repo_name
string
path
string
copies
string
size
string
content
string
license
string
TheTypoMaster/kernel_condor
sound/soc/pxa/e740_wm9705.c
5072
4903
/* * e740-wm9705.c -- SoC audio for e740 * * Copyright 2007 (c) Ian Molton <spyro@f2s.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; version 2 ONLY. * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/gpio.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/soc.h> #include <mach/audio.h> #include <mach/eseries-gpio.h> #include <asm/mach-types.h> #include "../codecs/wm9705.h" #include "pxa2xx-ac97.h" #define E740_AUDIO_OUT 1 #define E740_AUDIO_IN 2 static int e740_audio_power; static void e740_sync_audio_power(int status) { gpio_set_value(GPIO_E740_WM9705_nAVDD2, !status); gpio_set_value(GPIO_E740_AMP_ON, (status & E740_AUDIO_OUT) ? 1 : 0); gpio_set_value(GPIO_E740_MIC_ON, (status & E740_AUDIO_IN) ? 1 : 0); } static int e740_mic_amp_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { if (event & SND_SOC_DAPM_PRE_PMU) e740_audio_power |= E740_AUDIO_IN; else if (event & SND_SOC_DAPM_POST_PMD) e740_audio_power &= ~E740_AUDIO_IN; e740_sync_audio_power(e740_audio_power); return 0; } static int e740_output_amp_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { if (event & SND_SOC_DAPM_PRE_PMU) e740_audio_power |= E740_AUDIO_OUT; else if (event & SND_SOC_DAPM_POST_PMD) e740_audio_power &= ~E740_AUDIO_OUT; e740_sync_audio_power(e740_audio_power); return 0; } static const struct snd_soc_dapm_widget e740_dapm_widgets[] = { SND_SOC_DAPM_HP("Headphone Jack", NULL), SND_SOC_DAPM_SPK("Speaker", NULL), SND_SOC_DAPM_MIC("Mic (Internal)", NULL), SND_SOC_DAPM_PGA_E("Output Amp", SND_SOC_NOPM, 0, 0, NULL, 0, e740_output_amp_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_PGA_E("Mic Amp", SND_SOC_NOPM, 0, 0, NULL, 0, e740_mic_amp_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), }; static const struct snd_soc_dapm_route audio_map[] = { {"Output Amp", NULL, "LOUT"}, {"Output Amp", NULL, "ROUT"}, {"Output Amp", NULL, "MONOOUT"}, {"Speaker", NULL, "Output Amp"}, {"Headphone Jack", NULL, "Output Amp"}, {"MIC1", NULL, "Mic Amp"}, {"Mic Amp", NULL, "Mic (Internal)"}, }; static int e740_ac97_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; struct snd_soc_dapm_context *dapm = &codec->dapm; snd_soc_dapm_nc_pin(dapm, "HPOUTL"); snd_soc_dapm_nc_pin(dapm, "HPOUTR"); snd_soc_dapm_nc_pin(dapm, "PHONE"); snd_soc_dapm_nc_pin(dapm, "LINEINL"); snd_soc_dapm_nc_pin(dapm, "LINEINR"); snd_soc_dapm_nc_pin(dapm, "CDINL"); snd_soc_dapm_nc_pin(dapm, "CDINR"); snd_soc_dapm_nc_pin(dapm, "PCBEEP"); snd_soc_dapm_nc_pin(dapm, "MIC2"); snd_soc_dapm_new_controls(dapm, e740_dapm_widgets, ARRAY_SIZE(e740_dapm_widgets)); snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); return 0; } static struct snd_soc_dai_link e740_dai[] = { { .name = "AC97", .stream_name = "AC97 HiFi", .cpu_dai_name = "pxa2xx-ac97", .codec_dai_name = "wm9705-hifi", .platform_name = "pxa-pcm-audio", .codec_name = "wm9705-codec", .init = e740_ac97_init, }, { .name = "AC97 Aux", .stream_name = "AC97 Aux", .cpu_dai_name = "pxa2xx-ac97-aux", .codec_dai_name = "wm9705-aux", .platform_name = "pxa-pcm-audio", .codec_name = "wm9705-codec", }, }; static struct snd_soc_card e740 = { .name = "Toshiba e740", .owner = THIS_MODULE, .dai_link = e740_dai, .num_links = ARRAY_SIZE(e740_dai), }; static struct gpio e740_audio_gpios[] = { { GPIO_E740_MIC_ON, GPIOF_OUT_INIT_LOW, "Mic amp" }, { GPIO_E740_AMP_ON, GPIOF_OUT_INIT_LOW, "Output amp" }, { GPIO_E740_WM9705_nAVDD2, GPIOF_OUT_INIT_HIGH, "Audio power" }, }; static int __devinit e740_probe(struct platform_device *pdev) { struct snd_soc_card *card = &e740; int ret; ret = gpio_request_array(e740_audio_gpios, ARRAY_SIZE(e740_audio_gpios)); if (ret) return ret; card->dev = &pdev->dev; ret = snd_soc_register_card(card); if (ret) { dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", ret); gpio_free_array(e740_audio_gpios, ARRAY_SIZE(e740_audio_gpios)); } return ret; } static int __devexit e740_remove(struct platform_device *pdev) { struct snd_soc_card *card = platform_get_drvdata(pdev); gpio_free_array(e740_audio_gpios, ARRAY_SIZE(e740_audio_gpios)); snd_soc_unregister_card(card); return 0; } static struct platform_driver e740_driver = { .driver = { .name = "e740-audio", .owner = THIS_MODULE, }, .probe = e740_probe, .remove = __devexit_p(e740_remove), }; module_platform_driver(e740_driver); /* Module information */ MODULE_AUTHOR("Ian Molton <spyro@f2s.com>"); MODULE_DESCRIPTION("ALSA SoC driver for e740"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:e740-audio");
gpl-2.0
aniketroxx/civic-sa77
arch/cris/arch-v10/kernel/fasttimer.c
7632
23311
/* * linux/arch/cris/kernel/fasttimer.c * * Fast timers for ETRAX100/ETRAX100LX * * Copyright (C) 2000-2007 Axis Communications AB, Lund, Sweden */ #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/param.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/interrupt.h> #include <linux/time.h> #include <linux/delay.h> #include <asm/segment.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/delay.h> #include <asm/rtc.h> #include <arch/svinto.h> #include <asm/fasttimer.h> #include <linux/proc_fs.h> #define DEBUG_LOG_INCLUDED #define FAST_TIMER_LOG /* #define FAST_TIMER_TEST */ #define FAST_TIMER_SANITY_CHECKS #ifdef FAST_TIMER_SANITY_CHECKS static int sanity_failed; #endif #define D1(x) #define D2(x) #define DP(x) static unsigned int fast_timer_running; static unsigned int fast_timers_added; static unsigned int fast_timers_started; static unsigned int fast_timers_expired; static unsigned int fast_timers_deleted; static unsigned int fast_timer_is_init; static unsigned int fast_timer_ints; struct fast_timer *fast_timer_list = NULL; #ifdef DEBUG_LOG_INCLUDED #define DEBUG_LOG_MAX 128 static const char * debug_log_string[DEBUG_LOG_MAX]; static unsigned long debug_log_value[DEBUG_LOG_MAX]; static unsigned int debug_log_cnt; static unsigned int debug_log_cnt_wrapped; #define DEBUG_LOG(string, value) \ { \ unsigned long log_flags; \ local_irq_save(log_flags); \ debug_log_string[debug_log_cnt] = (string); \ debug_log_value[debug_log_cnt] = (unsigned long)(value); \ if (++debug_log_cnt >= DEBUG_LOG_MAX) \ { \ debug_log_cnt = debug_log_cnt % DEBUG_LOG_MAX; \ debug_log_cnt_wrapped = 1; \ } \ local_irq_restore(log_flags); \ } #else #define DEBUG_LOG(string, value) #endif /* The frequencies for index = clkselx number in R_TIMER_CTRL */ #define NUM_TIMER_FREQ 15 #define MAX_USABLE_TIMER_FREQ 7 #define MAX_DELAY_US 853333L const unsigned long timer_freq_100[NUM_TIMER_FREQ] = { 3, /* 0 3333 - 853333 us */ 6, /* 1 1666 - 426666 us */ 12, /* 2 833 - 213333 us */ 24, /* 3 416 - 106666 us */ 48, /* 4 208 - 53333 us */ 96, /* 5 104 - 26666 us */ 192, /* 6 52 - 13333 us */ 384, /* 7 26 - 6666 us */ 576, 1152, 2304, 4608, 9216, 18432, 62500, /* 15 = cascade */ }; #define NUM_TIMER_STATS 16 #ifdef FAST_TIMER_LOG struct fast_timer timer_added_log[NUM_TIMER_STATS]; struct fast_timer timer_started_log[NUM_TIMER_STATS]; struct fast_timer timer_expired_log[NUM_TIMER_STATS]; #endif int timer_div_settings[NUM_TIMER_STATS]; int timer_freq_settings[NUM_TIMER_STATS]; int timer_delay_settings[NUM_TIMER_STATS]; /* Not true gettimeofday, only checks the jiffies (uptime) + useconds */ inline void do_gettimeofday_fast(struct fasttime_t *tv) { tv->tv_jiff = jiffies; tv->tv_usec = GET_JIFFIES_USEC(); } inline int fasttime_cmp(struct fasttime_t *t0, struct fasttime_t *t1) { /* Compare jiffies. Takes care of wrapping */ if (time_before(t0->tv_jiff, t1->tv_jiff)) return -1; else if (time_after(t0->tv_jiff, t1->tv_jiff)) return 1; /* Compare us */ if (t0->tv_usec < t1->tv_usec) return -1; else if (t0->tv_usec > t1->tv_usec) return 1; return 0; } inline void start_timer1(unsigned long delay_us) { int freq_index = 0; /* This is the lowest resolution */ unsigned long upper_limit = MAX_DELAY_US; unsigned long div; /* Start/Restart the timer to the new shorter value */ /* t = 1/freq = 1/19200 = 53us * T=div*t, div = T/t = delay_us*freq/1000000 */ #if 1 /* Adaptive timer settings */ while (delay_us < upper_limit && freq_index < MAX_USABLE_TIMER_FREQ) { freq_index++; upper_limit >>= 1; /* Divide by 2 using shift */ } if (freq_index > 0) { freq_index--; } #else freq_index = 6; #endif div = delay_us * timer_freq_100[freq_index]/10000; if (div < 2) { /* Maybe increase timer freq? */ div = 2; } if (div > 255) { div = 0; /* This means 256, the max the timer takes */ /* If a longer timeout than the timer can handle is used, * then we must restart it when it goes off. */ } timer_div_settings[fast_timers_started % NUM_TIMER_STATS] = div; timer_freq_settings[fast_timers_started % NUM_TIMER_STATS] = freq_index; timer_delay_settings[fast_timers_started % NUM_TIMER_STATS] = delay_us; D1(printk(KERN_DEBUG "start_timer1 : %d us freq: %i div: %i\n", delay_us, freq_index, div)); /* Clear timer1 irq */ *R_IRQ_MASK0_CLR = IO_STATE(R_IRQ_MASK0_CLR, timer1, clr); /* Set timer values */ *R_TIMER_CTRL = r_timer_ctrl_shadow = (r_timer_ctrl_shadow & ~IO_MASK(R_TIMER_CTRL, timerdiv1) & ~IO_MASK(R_TIMER_CTRL, tm1) & ~IO_MASK(R_TIMER_CTRL, clksel1)) | IO_FIELD(R_TIMER_CTRL, timerdiv1, div) | IO_STATE(R_TIMER_CTRL, tm1, stop_ld) | IO_FIELD(R_TIMER_CTRL, clksel1, freq_index ); /* 6=c19k2Hz */ /* Ack interrupt */ *R_TIMER_CTRL = r_timer_ctrl_shadow | IO_STATE(R_TIMER_CTRL, i1, clr); /* Start timer */ *R_TIMER_CTRL = r_timer_ctrl_shadow = (r_timer_ctrl_shadow & ~IO_MASK(R_TIMER_CTRL, tm1)) | IO_STATE(R_TIMER_CTRL, tm1, run); /* Enable timer1 irq */ *R_IRQ_MASK0_SET = IO_STATE(R_IRQ_MASK0_SET, timer1, set); fast_timers_started++; fast_timer_running = 1; } /* In version 1.4 this function takes 27 - 50 us */ void start_one_shot_timer(struct fast_timer *t, fast_timer_function_type *function, unsigned long data, unsigned long delay_us, const char *name) { unsigned long flags; struct fast_timer *tmp; D1(printk("sft %s %d us\n", name, delay_us)); local_irq_save(flags); do_gettimeofday_fast(&t->tv_set); tmp = fast_timer_list; #ifdef FAST_TIMER_SANITY_CHECKS /* Check so this is not in the list already... */ while (tmp != NULL) { if (tmp == t) { printk(KERN_WARNING "timer name: %s data: " "0x%08lX already in list!\n", name, data); sanity_failed++; goto done; } else tmp = tmp->next; } tmp = fast_timer_list; #endif t->delay_us = delay_us; t->function = function; t->data = data; t->name = name; t->tv_expires.tv_usec = t->tv_set.tv_usec + delay_us % 1000000; t->tv_expires.tv_jiff = t->tv_set.tv_jiff + delay_us / 1000000 / HZ; if (t->tv_expires.tv_usec > 1000000) { t->tv_expires.tv_usec -= 1000000; t->tv_expires.tv_jiff += HZ; } #ifdef FAST_TIMER_LOG timer_added_log[fast_timers_added % NUM_TIMER_STATS] = *t; #endif fast_timers_added++; /* Check if this should timeout before anything else */ if (tmp == NULL || fasttime_cmp(&t->tv_expires, &tmp->tv_expires) < 0) { /* Put first in list and modify the timer value */ t->prev = NULL; t->next = fast_timer_list; if (fast_timer_list) { fast_timer_list->prev = t; } fast_timer_list = t; #ifdef FAST_TIMER_LOG timer_started_log[fast_timers_started % NUM_TIMER_STATS] = *t; #endif start_timer1(delay_us); } else { /* Put in correct place in list */ while (tmp->next && fasttime_cmp(&t->tv_expires, &tmp->next->tv_expires) > 0) { tmp = tmp->next; } /* Insert t after tmp */ t->prev = tmp; t->next = tmp->next; if (tmp->next) { tmp->next->prev = t; } tmp->next = t; } D2(printk("start_one_shot_timer: %d us done\n", delay_us)); done: local_irq_restore(flags); } /* start_one_shot_timer */ static inline int fast_timer_pending (const struct fast_timer * t) { return (t->next != NULL) || (t->prev != NULL) || (t == fast_timer_list); } static inline int detach_fast_timer (struct fast_timer *t) { struct fast_timer *next, *prev; if (!fast_timer_pending(t)) return 0; next = t->next; prev = t->prev; if (next) next->prev = prev; if (prev) prev->next = next; else fast_timer_list = next; fast_timers_deleted++; return 1; } int del_fast_timer(struct fast_timer * t) { unsigned long flags; int ret; local_irq_save(flags); ret = detach_fast_timer(t); t->next = t->prev = NULL; local_irq_restore(flags); return ret; } /* del_fast_timer */ /* Interrupt routines or functions called in interrupt context */ /* Timer 1 interrupt handler */ static irqreturn_t timer1_handler(int irq, void *dev_id) { struct fast_timer *t; unsigned long flags; /* We keep interrupts disabled not only when we modify the * fast timer list, but any time we hold a reference to a * timer in the list, since del_fast_timer may be called * from (another) interrupt context. Thus, the only time * when interrupts are enabled is when calling the timer * callback function. */ local_irq_save(flags); /* Clear timer1 irq */ *R_IRQ_MASK0_CLR = IO_STATE(R_IRQ_MASK0_CLR, timer1, clr); /* First stop timer, then ack interrupt */ /* Stop timer */ *R_TIMER_CTRL = r_timer_ctrl_shadow = (r_timer_ctrl_shadow & ~IO_MASK(R_TIMER_CTRL, tm1)) | IO_STATE(R_TIMER_CTRL, tm1, stop_ld); /* Ack interrupt */ *R_TIMER_CTRL = r_timer_ctrl_shadow | IO_STATE(R_TIMER_CTRL, i1, clr); fast_timer_running = 0; fast_timer_ints++; t = fast_timer_list; while (t) { struct fasttime_t tv; fast_timer_function_type *f; unsigned long d; /* Has it really expired? */ do_gettimeofday_fast(&tv); D1(printk(KERN_DEBUG "t: %is %06ius\n", tv.tv_jiff, tv.tv_usec)); if (fasttime_cmp(&t->tv_expires, &tv) <= 0) { /* Yes it has expired */ #ifdef FAST_TIMER_LOG timer_expired_log[fast_timers_expired % NUM_TIMER_STATS] = *t; #endif fast_timers_expired++; /* Remove this timer before call, since it may reuse the timer */ if (t->prev) { t->prev->next = t->next; } else { fast_timer_list = t->next; } if (t->next) { t->next->prev = t->prev; } t->prev = NULL; t->next = NULL; /* Save function callback data before enabling * interrupts, since the timer may be removed and * we don't know how it was allocated * (e.g. ->function and ->data may become overwritten * after deletion if the timer was stack-allocated). */ f = t->function; d = t->data; if (f != NULL) { /* Run callback with interrupts enabled. */ local_irq_restore(flags); f(d); local_irq_save(flags); } else DEBUG_LOG("!timer1 %i function==NULL!\n", fast_timer_ints); } else { /* Timer is to early, let's set it again using the normal routines */ D1(printk(".\n")); } if ((t = fast_timer_list) != NULL) { /* Start next timer.. */ long us = 0; struct fasttime_t tv; do_gettimeofday_fast(&tv); /* time_after_eq takes care of wrapping */ if (time_after_eq(t->tv_expires.tv_jiff, tv.tv_jiff)) us = ((t->tv_expires.tv_jiff - tv.tv_jiff) * 1000000 / HZ + t->tv_expires.tv_usec - tv.tv_usec); if (us > 0) { if (!fast_timer_running) { #ifdef FAST_TIMER_LOG timer_started_log[fast_timers_started % NUM_TIMER_STATS] = *t; #endif start_timer1(us); } break; } else { /* Timer already expired, let's handle it better late than never. * The normal loop handles it */ D1(printk("e! %d\n", us)); } } } local_irq_restore(flags); if (!t) { D1(printk("t1 stop!\n")); } return IRQ_HANDLED; } static void wake_up_func(unsigned long data) { wait_queue_head_t *sleep_wait_p = (wait_queue_head_t *)data; wake_up(sleep_wait_p); } /* Useful API */ void schedule_usleep(unsigned long us) { struct fast_timer t; wait_queue_head_t sleep_wait; init_waitqueue_head(&sleep_wait); D1(printk("schedule_usleep(%d)\n", us)); start_one_shot_timer(&t, wake_up_func, (unsigned long)&sleep_wait, us, "usleep"); /* Uninterruptible sleep on the fast timer. (The condition is somewhat * redundant since the timer is what wakes us up.) */ wait_event(sleep_wait, !fast_timer_pending(&t)); D1(printk("done schedule_usleep(%d)\n", us)); } #ifdef CONFIG_PROC_FS static int proc_fasttimer_read(char *buf, char **start, off_t offset, int len ,int *eof, void *data_unused); static struct proc_dir_entry *fasttimer_proc_entry; #endif /* CONFIG_PROC_FS */ #ifdef CONFIG_PROC_FS /* This value is very much based on testing */ #define BIG_BUF_SIZE (500 + NUM_TIMER_STATS * 300) static int proc_fasttimer_read(char *buf, char **start, off_t offset, int len ,int *eof, void *data_unused) { unsigned long flags; int i = 0; int num_to_show; struct fasttime_t tv; struct fast_timer *t, *nextt; static char *bigbuf = NULL; static unsigned long used; if (!bigbuf && !(bigbuf = vmalloc(BIG_BUF_SIZE))) { used = 0; if (buf) buf[0] = '\0'; return 0; } if (!offset || !used) { do_gettimeofday_fast(&tv); used = 0; used += sprintf(bigbuf + used, "Fast timers added: %i\n", fast_timers_added); used += sprintf(bigbuf + used, "Fast timers started: %i\n", fast_timers_started); used += sprintf(bigbuf + used, "Fast timer interrupts: %i\n", fast_timer_ints); used += sprintf(bigbuf + used, "Fast timers expired: %i\n", fast_timers_expired); used += sprintf(bigbuf + used, "Fast timers deleted: %i\n", fast_timers_deleted); used += sprintf(bigbuf + used, "Fast timer running: %s\n", fast_timer_running ? "yes" : "no"); used += sprintf(bigbuf + used, "Current time: %lu.%06lu\n", (unsigned long)tv.tv_jiff, (unsigned long)tv.tv_usec); #ifdef FAST_TIMER_SANITY_CHECKS used += sprintf(bigbuf + used, "Sanity failed: %i\n", sanity_failed); #endif used += sprintf(bigbuf + used, "\n"); #ifdef DEBUG_LOG_INCLUDED { int end_i = debug_log_cnt; i = 0; if (debug_log_cnt_wrapped) { i = debug_log_cnt; } while ((i != end_i || (debug_log_cnt_wrapped && !used)) && used+100 < BIG_BUF_SIZE) { used += sprintf(bigbuf + used, debug_log_string[i], debug_log_value[i]); i = (i+1) % DEBUG_LOG_MAX; } } used += sprintf(bigbuf + used, "\n"); #endif num_to_show = (fast_timers_started < NUM_TIMER_STATS ? fast_timers_started: NUM_TIMER_STATS); used += sprintf(bigbuf + used, "Timers started: %i\n", fast_timers_started); for (i = 0; i < num_to_show && (used+100 < BIG_BUF_SIZE) ; i++) { int cur = (fast_timers_started - i - 1) % NUM_TIMER_STATS; #if 1 //ndef FAST_TIMER_LOG used += sprintf(bigbuf + used, "div: %i freq: %i delay: %i" "\n", timer_div_settings[cur], timer_freq_settings[cur], timer_delay_settings[cur] ); #endif #ifdef FAST_TIMER_LOG t = &timer_started_log[cur]; used += sprintf(bigbuf + used, "%-14s s: %6lu.%06lu e: %6lu.%06lu " "d: %6li us data: 0x%08lX" "\n", t->name, (unsigned long)t->tv_set.tv_jiff, (unsigned long)t->tv_set.tv_usec, (unsigned long)t->tv_expires.tv_jiff, (unsigned long)t->tv_expires.tv_usec, t->delay_us, t->data ); #endif } used += sprintf(bigbuf + used, "\n"); #ifdef FAST_TIMER_LOG num_to_show = (fast_timers_added < NUM_TIMER_STATS ? fast_timers_added: NUM_TIMER_STATS); used += sprintf(bigbuf + used, "Timers added: %i\n", fast_timers_added); for (i = 0; i < num_to_show && (used+100 < BIG_BUF_SIZE); i++) { t = &timer_added_log[(fast_timers_added - i - 1) % NUM_TIMER_STATS]; used += sprintf(bigbuf + used, "%-14s s: %6lu.%06lu e: %6lu.%06lu " "d: %6li us data: 0x%08lX" "\n", t->name, (unsigned long)t->tv_set.tv_jiff, (unsigned long)t->tv_set.tv_usec, (unsigned long)t->tv_expires.tv_jiff, (unsigned long)t->tv_expires.tv_usec, t->delay_us, t->data ); } used += sprintf(bigbuf + used, "\n"); num_to_show = (fast_timers_expired < NUM_TIMER_STATS ? fast_timers_expired: NUM_TIMER_STATS); used += sprintf(bigbuf + used, "Timers expired: %i\n", fast_timers_expired); for (i = 0; i < num_to_show && (used+100 < BIG_BUF_SIZE); i++) { t = &timer_expired_log[(fast_timers_expired - i - 1) % NUM_TIMER_STATS]; used += sprintf(bigbuf + used, "%-14s s: %6lu.%06lu e: %6lu.%06lu " "d: %6li us data: 0x%08lX" "\n", t->name, (unsigned long)t->tv_set.tv_jiff, (unsigned long)t->tv_set.tv_usec, (unsigned long)t->tv_expires.tv_jiff, (unsigned long)t->tv_expires.tv_usec, t->delay_us, t->data ); } used += sprintf(bigbuf + used, "\n"); #endif used += sprintf(bigbuf + used, "Active timers:\n"); local_irq_save(flags); t = fast_timer_list; while (t != NULL && (used+100 < BIG_BUF_SIZE)) { nextt = t->next; local_irq_restore(flags); used += sprintf(bigbuf + used, "%-14s s: %6lu.%06lu e: %6lu.%06lu " "d: %6li us data: 0x%08lX" /* " func: 0x%08lX" */ "\n", t->name, (unsigned long)t->tv_set.tv_jiff, (unsigned long)t->tv_set.tv_usec, (unsigned long)t->tv_expires.tv_jiff, (unsigned long)t->tv_expires.tv_usec, t->delay_us, t->data /* , t->function */ ); local_irq_save(flags); if (t->next != nextt) { printk(KERN_WARNING "timer removed!\n"); } t = nextt; } local_irq_restore(flags); } if (used - offset < len) { len = used - offset; } memcpy(buf, bigbuf + offset, len); *start = buf; *eof = 1; return len; } #endif /* PROC_FS */ #ifdef FAST_TIMER_TEST static volatile unsigned long i = 0; static volatile int num_test_timeout = 0; static struct fast_timer tr[10]; static int exp_num[10]; static struct fasttime_t tv_exp[100]; static void test_timeout(unsigned long data) { do_gettimeofday_fast(&tv_exp[data]); exp_num[data] = num_test_timeout; num_test_timeout++; } static void test_timeout1(unsigned long data) { do_gettimeofday_fast(&tv_exp[data]); exp_num[data] = num_test_timeout; if (data < 7) { start_one_shot_timer(&tr[i], test_timeout1, i, 1000, "timeout1"); i++; } num_test_timeout++; } DP( static char buf0[2000]; static char buf1[2000]; static char buf2[2000]; static char buf3[2000]; static char buf4[2000]; ); static char buf5[6000]; static int j_u[1000]; static void fast_timer_test(void) { int prev_num; int j; struct fasttime_t tv, tv0, tv1, tv2; printk("fast_timer_test() start\n"); do_gettimeofday_fast(&tv); for (j = 0; j < 1000; j++) { j_u[j] = GET_JIFFIES_USEC(); } for (j = 0; j < 100; j++) { do_gettimeofday_fast(&tv_exp[j]); } printk(KERN_DEBUG "fast_timer_test() %is %06i\n", tv.tv_jiff, tv.tv_usec); for (j = 0; j < 1000; j++) { printk("%i %i %i %i %i\n",j_u[j], j_u[j+1], j_u[j+2], j_u[j+3], j_u[j+4]); j += 4; } for (j = 0; j < 100; j++) { printk(KERN_DEBUG "%i.%i %i.%i %i.%i %i.%i %i.%i\n", tv_exp[j].tv_jiff, tv_exp[j].tv_usec, tv_exp[j+1].tv_jiff, tv_exp[j+1].tv_usec, tv_exp[j+2].tv_jiff, tv_exp[j+2].tv_usec, tv_exp[j+3].tv_jiff, tv_exp[j+3].tv_usec, tv_exp[j+4].tv_jiff, tv_exp[j+4].tv_usec); j += 4; } do_gettimeofday_fast(&tv0); start_one_shot_timer(&tr[i], test_timeout, i, 50000, "test0"); DP(proc_fasttimer_read(buf0, NULL, 0, 0, 0)); i++; start_one_shot_timer(&tr[i], test_timeout, i, 70000, "test1"); DP(proc_fasttimer_read(buf1, NULL, 0, 0, 0)); i++; start_one_shot_timer(&tr[i], test_timeout, i, 40000, "test2"); DP(proc_fasttimer_read(buf2, NULL, 0, 0, 0)); i++; start_one_shot_timer(&tr[i], test_timeout, i, 60000, "test3"); DP(proc_fasttimer_read(buf3, NULL, 0, 0, 0)); i++; start_one_shot_timer(&tr[i], test_timeout1, i, 55000, "test4xx"); DP(proc_fasttimer_read(buf4, NULL, 0, 0, 0)); i++; do_gettimeofday_fast(&tv1); proc_fasttimer_read(buf5, NULL, 0, 0, 0); prev_num = num_test_timeout; while (num_test_timeout < i) { if (num_test_timeout != prev_num) { prev_num = num_test_timeout; } } do_gettimeofday_fast(&tv2); printk(KERN_DEBUG "Timers started %is %06i\n", tv0.tv_jiff, tv0.tv_usec); printk(KERN_DEBUG "Timers started at %is %06i\n", tv1.tv_jiff, tv1.tv_usec); printk(KERN_DEBUG "Timers done %is %06i\n", tv2.tv_jiff, tv2.tv_usec); DP(printk("buf0:\n"); printk(buf0); printk("buf1:\n"); printk(buf1); printk("buf2:\n"); printk(buf2); printk("buf3:\n"); printk(buf3); printk("buf4:\n"); printk(buf4); ); printk("buf5:\n"); printk(buf5); printk("timers set:\n"); for(j = 0; j<i; j++) { struct fast_timer *t = &tr[j]; printk("%-10s set: %6is %06ius exp: %6is %06ius " "data: 0x%08X func: 0x%08X\n", t->name, t->tv_set.tv_jiff, t->tv_set.tv_usec, t->tv_expires.tv_jiff, t->tv_expires.tv_usec, t->data, t->function ); printk(" del: %6ius did exp: %6is %06ius as #%i error: %6li\n", t->delay_us, tv_exp[j].tv_jiff, tv_exp[j].tv_usec, exp_num[j], (tv_exp[j].tv_jiff - t->tv_expires.tv_jiff) * 1000000 + tv_exp[j].tv_usec - t->tv_expires.tv_usec); } proc_fasttimer_read(buf5, NULL, 0, 0, 0); printk("buf5 after all done:\n"); printk(buf5); printk("fast_timer_test() done\n"); } #endif int fast_timer_init(void) { /* For some reason, request_irq() hangs when called froom time_init() */ if (!fast_timer_is_init) { #if 0 && defined(FAST_TIMER_TEST) int i; #endif printk(KERN_INFO "fast_timer_init()\n"); #if 0 && defined(FAST_TIMER_TEST) for (i = 0; i <= TIMER0_DIV; i++) { /* We must be careful not to get overflow... */ printk("%3i %6u\n", i, timer0_value_us[i]); } #endif #ifdef CONFIG_PROC_FS if ((fasttimer_proc_entry = create_proc_entry( "fasttimer", 0, 0 ))) fasttimer_proc_entry->read_proc = proc_fasttimer_read; #endif /* PROC_FS */ if(request_irq(TIMER1_IRQ_NBR, timer1_handler, 0, "fast timer int", NULL)) { printk("err: timer1 irq\n"); } fast_timer_is_init = 1; #ifdef FAST_TIMER_TEST printk("do test\n"); fast_timer_test(); #endif } return 0; } __initcall(fast_timer_init);
gpl-2.0
IndieBeto/moggy
arch/mips/kernel/csrc-sb1250.c
7632
2151
/* * Copyright (C) 2000, 2001 Broadcom Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/clocksource.h> #include <asm/addrspace.h> #include <asm/io.h> #include <asm/time.h> #include <asm/sibyte/sb1250.h> #include <asm/sibyte/sb1250_regs.h> #include <asm/sibyte/sb1250_int.h> #include <asm/sibyte/sb1250_scd.h> #define SB1250_HPT_NUM 3 #define SB1250_HPT_VALUE M_SCD_TIMER_CNT /* max value */ /* * The HPT is free running from SB1250_HPT_VALUE down to 0 then starts over * again. */ static cycle_t sb1250_hpt_read(struct clocksource *cs) { unsigned int count; count = G_SCD_TIMER_CNT(__raw_readq(IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, R_SCD_TIMER_CNT)))); return SB1250_HPT_VALUE - count; } struct clocksource bcm1250_clocksource = { .name = "bcm1250-counter-3", .rating = 200, .read = sb1250_hpt_read, .mask = CLOCKSOURCE_MASK(23), .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; void __init sb1250_clocksource_init(void) { struct clocksource *cs = &bcm1250_clocksource; /* Setup hpt using timer #3 but do not enable irq for it */ __raw_writeq(0, IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, R_SCD_TIMER_CFG))); __raw_writeq(SB1250_HPT_VALUE, IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, R_SCD_TIMER_INIT))); __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS, IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, R_SCD_TIMER_CFG))); clocksource_register_hz(cs, V_SCD_TIMER_FREQ); }
gpl-2.0
go2ev-devteam/Gplus_2159_0801
openplatform/sdk/os/kernel-2.6.32/drivers/media/video/em28xx/em28xx-core.c
465
31086
/* em28xx-core.c - driver for Empia EM2800/EM2820/2840 USB video capture devices Copyright (C) 2005 Ludovico Cavedon <cavedon@sssup.it> Markus Rechberger <mrechberger@gmail.com> Mauro Carvalho Chehab <mchehab@infradead.org> Sascha Sommer <saschasommer@freenet.de> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/list.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/vmalloc.h> #include <media/v4l2-common.h> #include "em28xx.h" /* #define ENABLE_DEBUG_ISOC_FRAMES */ static unsigned int core_debug; module_param(core_debug, int, 0644); MODULE_PARM_DESC(core_debug, "enable debug messages [core]"); #define em28xx_coredbg(fmt, arg...) do {\ if (core_debug) \ printk(KERN_INFO "%s %s :"fmt, \ dev->name, __func__ , ##arg); } while (0) static unsigned int reg_debug; module_param(reg_debug, int, 0644); MODULE_PARM_DESC(reg_debug, "enable debug messages [URB reg]"); #define em28xx_regdbg(fmt, arg...) do {\ if (reg_debug) \ printk(KERN_INFO "%s %s :"fmt, \ dev->name, __func__ , ##arg); } while (0) static int alt = EM28XX_PINOUT; module_param(alt, int, 0644); MODULE_PARM_DESC(alt, "alternate setting to use for video endpoint"); static unsigned int disable_vbi; module_param(disable_vbi, int, 0644); MODULE_PARM_DESC(disable_vbi, "disable vbi support"); /* FIXME */ #define em28xx_isocdbg(fmt, arg...) do {\ if (core_debug) \ printk(KERN_INFO "%s %s :"fmt, \ dev->name, __func__ , ##arg); } while (0) /* * em28xx_read_reg_req() * reads data from the usb device specifying bRequest */ int em28xx_read_reg_req_len(struct em28xx *dev, u8 req, u16 reg, char *buf, int len) { int ret; int pipe = usb_rcvctrlpipe(dev->udev, 0); if (dev->state & DEV_DISCONNECTED) return -ENODEV; if (len > URB_MAX_CTRL_SIZE) return -EINVAL; if (reg_debug) { printk(KERN_DEBUG "(pipe 0x%08x): " "IN: %02x %02x %02x %02x %02x %02x %02x %02x ", pipe, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, req, 0, 0, reg & 0xff, reg >> 8, len & 0xff, len >> 8); } mutex_lock(&dev->ctrl_urb_lock); ret = usb_control_msg(dev->udev, pipe, req, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x0000, reg, dev->urb_buf, len, HZ); if (ret < 0) { if (reg_debug) printk(" failed!\n"); mutex_unlock(&dev->ctrl_urb_lock); return ret; } if (len) memcpy(buf, dev->urb_buf, len); mutex_unlock(&dev->ctrl_urb_lock); if (reg_debug) { int byte; printk("<<<"); for (byte = 0; byte < len; byte++) printk(" %02x", (unsigned char)buf[byte]); printk("\n"); } return ret; } /* * em28xx_read_reg_req() * reads data from the usb device specifying bRequest */ int em28xx_read_reg_req(struct em28xx *dev, u8 req, u16 reg) { int ret; u8 val; ret = em28xx_read_reg_req_len(dev, req, reg, &val, 1); if (ret < 0) return ret; return val; } int em28xx_read_reg(struct em28xx *dev, u16 reg) { return em28xx_read_reg_req(dev, USB_REQ_GET_STATUS, reg); } /* * em28xx_write_regs_req() * sends data to the usb device, specifying bRequest */ int em28xx_write_regs_req(struct em28xx *dev, u8 req, u16 reg, char *buf, int len) { int ret; int pipe = usb_sndctrlpipe(dev->udev, 0); if (dev->state & DEV_DISCONNECTED) return -ENODEV; if ((len < 1) || (len > URB_MAX_CTRL_SIZE)) return -EINVAL; if (reg_debug) { int byte; printk(KERN_DEBUG "(pipe 0x%08x): " "OUT: %02x %02x %02x %02x %02x %02x %02x %02x >>>", pipe, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, req, 0, 0, reg & 0xff, reg >> 8, len & 0xff, len >> 8); for (byte = 0; byte < len; byte++) printk(" %02x", (unsigned char)buf[byte]); printk("\n"); } mutex_lock(&dev->ctrl_urb_lock); memcpy(dev->urb_buf, buf, len); ret = usb_control_msg(dev->udev, pipe, req, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x0000, reg, dev->urb_buf, len, HZ); mutex_unlock(&dev->ctrl_urb_lock); if (dev->wait_after_write) msleep(dev->wait_after_write); return ret; } int em28xx_write_regs(struct em28xx *dev, u16 reg, char *buf, int len) { int rc; rc = em28xx_write_regs_req(dev, USB_REQ_GET_STATUS, reg, buf, len); /* Stores GPO/GPIO values at the cache, if changed Only write values should be stored, since input on a GPIO register will return the input bits. Not sure what happens on reading GPO register. */ if (rc >= 0) { if (reg == dev->reg_gpo_num) dev->reg_gpo = buf[0]; else if (reg == dev->reg_gpio_num) dev->reg_gpio = buf[0]; } return rc; } /* Write a single register */ int em28xx_write_reg(struct em28xx *dev, u16 reg, u8 val) { return em28xx_write_regs(dev, reg, &val, 1); } /* * em28xx_write_reg_bits() * sets only some bits (specified by bitmask) of a register, by first reading * the actual value */ static int em28xx_write_reg_bits(struct em28xx *dev, u16 reg, u8 val, u8 bitmask) { int oldval; u8 newval; /* Uses cache for gpo/gpio registers */ if (reg == dev->reg_gpo_num) oldval = dev->reg_gpo; else if (reg == dev->reg_gpio_num) oldval = dev->reg_gpio; else oldval = em28xx_read_reg(dev, reg); if (oldval < 0) return oldval; newval = (((u8) oldval) & ~bitmask) | (val & bitmask); return em28xx_write_regs(dev, reg, &newval, 1); } /* * em28xx_is_ac97_ready() * Checks if ac97 is ready */ static int em28xx_is_ac97_ready(struct em28xx *dev) { int ret, i; /* Wait up to 50 ms for AC97 command to complete */ for (i = 0; i < 10; i++, msleep(5)) { ret = em28xx_read_reg(dev, EM28XX_R43_AC97BUSY); if (ret < 0) return ret; if (!(ret & 0x01)) return 0; } em28xx_warn("AC97 command still being executed: not handled properly!\n"); return -EBUSY; } /* * em28xx_read_ac97() * write a 16 bit value to the specified AC97 address (LSB first!) */ int em28xx_read_ac97(struct em28xx *dev, u8 reg) { int ret; u8 addr = (reg & 0x7f) | 0x80; u16 val; ret = em28xx_is_ac97_ready(dev); if (ret < 0) return ret; ret = em28xx_write_regs(dev, EM28XX_R42_AC97ADDR, &addr, 1); if (ret < 0) return ret; ret = dev->em28xx_read_reg_req_len(dev, 0, EM28XX_R40_AC97LSB, (u8 *)&val, sizeof(val)); if (ret < 0) return ret; return le16_to_cpu(val); } /* * em28xx_write_ac97() * write a 16 bit value to the specified AC97 address (LSB first!) */ int em28xx_write_ac97(struct em28xx *dev, u8 reg, u16 val) { int ret; u8 addr = reg & 0x7f; __le16 value; value = cpu_to_le16(val); ret = em28xx_is_ac97_ready(dev); if (ret < 0) return ret; ret = em28xx_write_regs(dev, EM28XX_R40_AC97LSB, (u8 *) &value, 2); if (ret < 0) return ret; ret = em28xx_write_regs(dev, EM28XX_R42_AC97ADDR, &addr, 1); if (ret < 0) return ret; return 0; } struct em28xx_vol_table { enum em28xx_amux mux; u8 reg; }; static struct em28xx_vol_table inputs[] = { { EM28XX_AMUX_VIDEO, AC97_VIDEO_VOL }, { EM28XX_AMUX_LINE_IN, AC97_LINEIN_VOL }, { EM28XX_AMUX_PHONE, AC97_PHONE_VOL }, { EM28XX_AMUX_MIC, AC97_MIC_VOL }, { EM28XX_AMUX_CD, AC97_CD_VOL }, { EM28XX_AMUX_AUX, AC97_AUX_VOL }, { EM28XX_AMUX_PCM_OUT, AC97_PCM_OUT_VOL }, }; static int set_ac97_input(struct em28xx *dev) { int ret, i; enum em28xx_amux amux = dev->ctl_ainput; /* EM28XX_AMUX_VIDEO2 is a special case used to indicate that em28xx should point to LINE IN, while AC97 should use VIDEO */ if (amux == EM28XX_AMUX_VIDEO2) amux = EM28XX_AMUX_VIDEO; /* Mute all entres but the one that were selected */ for (i = 0; i < ARRAY_SIZE(inputs); i++) { if (amux == inputs[i].mux) ret = em28xx_write_ac97(dev, inputs[i].reg, 0x0808); else ret = em28xx_write_ac97(dev, inputs[i].reg, 0x8000); if (ret < 0) em28xx_warn("couldn't setup AC97 register %d\n", inputs[i].reg); } return 0; } static int em28xx_set_audio_source(struct em28xx *dev) { int ret; u8 input; if (dev->board.is_em2800) { if (dev->ctl_ainput == EM28XX_AMUX_VIDEO) input = EM2800_AUDIO_SRC_TUNER; else input = EM2800_AUDIO_SRC_LINE; ret = em28xx_write_regs(dev, EM2800_R08_AUDIOSRC, &input, 1); if (ret < 0) return ret; } if (dev->board.has_msp34xx) input = EM28XX_AUDIO_SRC_TUNER; else { switch (dev->ctl_ainput) { case EM28XX_AMUX_VIDEO: input = EM28XX_AUDIO_SRC_TUNER; break; default: input = EM28XX_AUDIO_SRC_LINE; break; } } if (dev->board.mute_gpio && dev->mute) em28xx_gpio_set(dev, dev->board.mute_gpio); else em28xx_gpio_set(dev, INPUT(dev->ctl_input)->gpio); ret = em28xx_write_reg_bits(dev, EM28XX_R0E_AUDIOSRC, input, 0xc0); if (ret < 0) return ret; msleep(5); switch (dev->audio_mode.ac97) { case EM28XX_NO_AC97: break; default: ret = set_ac97_input(dev); } return ret; } static const struct em28xx_vol_table outputs[] = { { EM28XX_AOUT_MASTER, AC97_MASTER_VOL }, { EM28XX_AOUT_LINE, AC97_LINE_LEVEL_VOL }, { EM28XX_AOUT_MONO, AC97_MASTER_MONO_VOL }, { EM28XX_AOUT_LFE, AC97_LFE_MASTER_VOL }, { EM28XX_AOUT_SURR, AC97_SURR_MASTER_VOL }, }; int em28xx_audio_analog_set(struct em28xx *dev) { int ret, i; u8 xclk; if (!dev->audio_mode.has_audio) return 0; /* It is assumed that all devices use master volume for output. It would be possible to use also line output. */ if (dev->audio_mode.ac97 != EM28XX_NO_AC97) { /* Mute all outputs */ for (i = 0; i < ARRAY_SIZE(outputs); i++) { ret = em28xx_write_ac97(dev, outputs[i].reg, 0x8000); if (ret < 0) em28xx_warn("couldn't setup AC97 register %d\n", outputs[i].reg); } } xclk = dev->board.xclk & 0x7f; if (!dev->mute) xclk |= EM28XX_XCLK_AUDIO_UNMUTE; ret = em28xx_write_reg(dev, EM28XX_R0F_XCLK, xclk); if (ret < 0) return ret; msleep(10); /* Selects the proper audio input */ ret = em28xx_set_audio_source(dev); /* Sets volume */ if (dev->audio_mode.ac97 != EM28XX_NO_AC97) { int vol; em28xx_write_ac97(dev, AC97_POWER_DOWN_CTRL, 0x4200); em28xx_write_ac97(dev, AC97_EXT_AUD_CTRL, 0x0031); em28xx_write_ac97(dev, AC97_PCM_IN_SRATE, 0xbb80); /* LSB: left channel - both channels with the same level */ vol = (0x1f - dev->volume) | ((0x1f - dev->volume) << 8); /* Mute device, if needed */ if (dev->mute) vol |= 0x8000; /* Sets volume */ for (i = 0; i < ARRAY_SIZE(outputs); i++) { if (dev->ctl_aoutput & outputs[i].mux) ret = em28xx_write_ac97(dev, outputs[i].reg, vol); if (ret < 0) em28xx_warn("couldn't setup AC97 register %d\n", outputs[i].reg); } if (dev->ctl_aoutput & EM28XX_AOUT_PCM_IN) { int sel = ac97_return_record_select(dev->ctl_aoutput); /* Use the same input for both left and right channels */ sel |= (sel << 8); em28xx_write_ac97(dev, AC97_RECORD_SELECT, sel); } } return ret; } EXPORT_SYMBOL_GPL(em28xx_audio_analog_set); int em28xx_audio_setup(struct em28xx *dev) { int vid1, vid2, feat, cfg; u32 vid; if (dev->chip_id == CHIP_ID_EM2870 || dev->chip_id == CHIP_ID_EM2874) { /* Digital only device - don't load any alsa module */ dev->audio_mode.has_audio = 0; dev->has_audio_class = 0; dev->has_alsa_audio = 0; return 0; } /* If device doesn't support Usb Audio Class, use vendor class */ if (!dev->has_audio_class) dev->has_alsa_audio = 1; dev->audio_mode.has_audio = 1; /* See how this device is configured */ cfg = em28xx_read_reg(dev, EM28XX_R00_CHIPCFG); em28xx_info("Config register raw data: 0x%02x\n", cfg); if (cfg < 0) { /* Register read error? */ cfg = EM28XX_CHIPCFG_AC97; /* Be conservative */ } else if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) == 0x00) { /* The device doesn't have vendor audio at all */ dev->has_alsa_audio = 0; dev->audio_mode.has_audio = 0; return 0; } else if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) == EM28XX_CHIPCFG_I2S_3_SAMPRATES) { em28xx_info("I2S Audio (3 sample rates)\n"); dev->audio_mode.i2s_3rates = 1; } else if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) == EM28XX_CHIPCFG_I2S_5_SAMPRATES) { em28xx_info("I2S Audio (5 sample rates)\n"); dev->audio_mode.i2s_5rates = 1; } if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) != EM28XX_CHIPCFG_AC97) { /* Skip the code that does AC97 vendor detection */ dev->audio_mode.ac97 = EM28XX_NO_AC97; goto init_audio; } dev->audio_mode.ac97 = EM28XX_AC97_OTHER; vid1 = em28xx_read_ac97(dev, AC97_VENDOR_ID1); if (vid1 < 0) { /* Device likely doesn't support AC97 */ em28xx_warn("AC97 chip type couldn't be determined\n"); goto init_audio; } vid2 = em28xx_read_ac97(dev, AC97_VENDOR_ID2); if (vid2 < 0) goto init_audio; vid = vid1 << 16 | vid2; dev->audio_mode.ac97_vendor_id = vid; em28xx_warn("AC97 vendor ID = 0x%08x\n", vid); feat = em28xx_read_ac97(dev, AC97_RESET); if (feat < 0) goto init_audio; dev->audio_mode.ac97_feat = feat; em28xx_warn("AC97 features = 0x%04x\n", feat); /* Try to identify what audio processor we have */ if ((vid == 0xffffffff) && (feat == 0x6a90)) dev->audio_mode.ac97 = EM28XX_AC97_EM202; else if ((vid >> 8) == 0x838476) dev->audio_mode.ac97 = EM28XX_AC97_SIGMATEL; init_audio: /* Reports detected AC97 processor */ switch (dev->audio_mode.ac97) { case EM28XX_NO_AC97: em28xx_info("No AC97 audio processor\n"); break; case EM28XX_AC97_EM202: em28xx_info("Empia 202 AC97 audio processor detected\n"); break; case EM28XX_AC97_SIGMATEL: em28xx_info("Sigmatel audio processor detected(stac 97%02x)\n", dev->audio_mode.ac97_vendor_id & 0xff); break; case EM28XX_AC97_OTHER: em28xx_warn("Unknown AC97 audio processor detected!\n"); break; default: break; } return em28xx_audio_analog_set(dev); } EXPORT_SYMBOL_GPL(em28xx_audio_setup); int em28xx_colorlevels_set_default(struct em28xx *dev) { em28xx_write_reg(dev, EM28XX_R20_YGAIN, 0x10); /* contrast */ em28xx_write_reg(dev, EM28XX_R21_YOFFSET, 0x00); /* brightness */ em28xx_write_reg(dev, EM28XX_R22_UVGAIN, 0x10); /* saturation */ em28xx_write_reg(dev, EM28XX_R23_UOFFSET, 0x00); em28xx_write_reg(dev, EM28XX_R24_VOFFSET, 0x00); em28xx_write_reg(dev, EM28XX_R25_SHARPNESS, 0x00); em28xx_write_reg(dev, EM28XX_R14_GAMMA, 0x20); em28xx_write_reg(dev, EM28XX_R15_RGAIN, 0x20); em28xx_write_reg(dev, EM28XX_R16_GGAIN, 0x20); em28xx_write_reg(dev, EM28XX_R17_BGAIN, 0x20); em28xx_write_reg(dev, EM28XX_R18_ROFFSET, 0x00); em28xx_write_reg(dev, EM28XX_R19_GOFFSET, 0x00); return em28xx_write_reg(dev, EM28XX_R1A_BOFFSET, 0x00); } int em28xx_capture_start(struct em28xx *dev, int start) { int rc; if (dev->chip_id == CHIP_ID_EM2874) { /* The Transport Stream Enable Register moved in em2874 */ if (!start) { rc = em28xx_write_reg_bits(dev, EM2874_R5F_TS_ENABLE, 0x00, EM2874_TS1_CAPTURE_ENABLE); return rc; } /* Enable Transport Stream */ rc = em28xx_write_reg_bits(dev, EM2874_R5F_TS_ENABLE, EM2874_TS1_CAPTURE_ENABLE, EM2874_TS1_CAPTURE_ENABLE); return rc; } /* FIXME: which is the best order? */ /* video registers are sampled by VREF */ rc = em28xx_write_reg_bits(dev, EM28XX_R0C_USBSUSP, start ? 0x10 : 0x00, 0x10); if (rc < 0) return rc; if (!start) { /* disable video capture */ rc = em28xx_write_reg(dev, EM28XX_R12_VINENABLE, 0x27); return rc; } if (dev->board.is_webcam) rc = em28xx_write_reg(dev, 0x13, 0x0c); /* enable video capture */ rc = em28xx_write_reg(dev, 0x48, 0x00); if (dev->mode == EM28XX_ANALOG_MODE) rc = em28xx_write_reg(dev, EM28XX_R12_VINENABLE, 0x67); else rc = em28xx_write_reg(dev, EM28XX_R12_VINENABLE, 0x37); msleep(6); return rc; } int em28xx_vbi_supported(struct em28xx *dev) { /* Modprobe option to manually disable */ if (disable_vbi == 1) return 0; if (dev->chip_id == CHIP_ID_EM2860 || dev->chip_id == CHIP_ID_EM2883) return 1; /* Version of em28xx that does not support VBI */ return 0; } int em28xx_set_outfmt(struct em28xx *dev) { int ret; u8 vinctrl; ret = em28xx_write_reg_bits(dev, EM28XX_R27_OUTFMT, dev->format->reg | 0x20, 0xff); if (ret < 0) return ret; ret = em28xx_write_reg(dev, EM28XX_R10_VINMODE, dev->vinmode); if (ret < 0) return ret; vinctrl = dev->vinctl; if (em28xx_vbi_supported(dev) == 1) { vinctrl |= EM28XX_VINCTRL_VBI_RAW; em28xx_write_reg(dev, EM28XX_R34_VBI_START_H, 0x00); em28xx_write_reg(dev, EM28XX_R35_VBI_START_V, 0x09); em28xx_write_reg(dev, EM28XX_R36_VBI_WIDTH, 0xb4); em28xx_write_reg(dev, EM28XX_R37_VBI_HEIGHT, 0x0c); } return em28xx_write_reg(dev, EM28XX_R11_VINCTRL, vinctrl); } static int em28xx_accumulator_set(struct em28xx *dev, u8 xmin, u8 xmax, u8 ymin, u8 ymax) { em28xx_coredbg("em28xx Scale: (%d,%d)-(%d,%d)\n", xmin, ymin, xmax, ymax); em28xx_write_regs(dev, EM28XX_R28_XMIN, &xmin, 1); em28xx_write_regs(dev, EM28XX_R29_XMAX, &xmax, 1); em28xx_write_regs(dev, EM28XX_R2A_YMIN, &ymin, 1); return em28xx_write_regs(dev, EM28XX_R2B_YMAX, &ymax, 1); } static int em28xx_capture_area_set(struct em28xx *dev, u8 hstart, u8 vstart, u16 width, u16 height) { u8 cwidth = width; u8 cheight = height; u8 overflow = (height >> 7 & 0x02) | (width >> 8 & 0x01); em28xx_coredbg("em28xx Area Set: (%d,%d)\n", (width | (overflow & 2) << 7), (height | (overflow & 1) << 8)); em28xx_write_regs(dev, EM28XX_R1C_HSTART, &hstart, 1); em28xx_write_regs(dev, EM28XX_R1D_VSTART, &vstart, 1); em28xx_write_regs(dev, EM28XX_R1E_CWIDTH, &cwidth, 1); em28xx_write_regs(dev, EM28XX_R1F_CHEIGHT, &cheight, 1); return em28xx_write_regs(dev, EM28XX_R1B_OFLOW, &overflow, 1); } static int em28xx_scaler_set(struct em28xx *dev, u16 h, u16 v) { u8 mode; /* the em2800 scaler only supports scaling down to 50% */ if (dev->board.is_em2800) { mode = (v ? 0x20 : 0x00) | (h ? 0x10 : 0x00); } else { u8 buf[2]; buf[0] = h; buf[1] = h >> 8; em28xx_write_regs(dev, EM28XX_R30_HSCALELOW, (char *)buf, 2); buf[0] = v; buf[1] = v >> 8; em28xx_write_regs(dev, EM28XX_R32_VSCALELOW, (char *)buf, 2); /* it seems that both H and V scalers must be active to work correctly */ mode = (h || v) ? 0x30 : 0x00; } return em28xx_write_reg_bits(dev, EM28XX_R26_COMPR, mode, 0x30); } /* FIXME: this only function read values from dev */ int em28xx_resolution_set(struct em28xx *dev) { int width, height; width = norm_maxw(dev); height = norm_maxh(dev); if (!dev->progressive) height >>= norm_maxh(dev); em28xx_set_outfmt(dev); em28xx_accumulator_set(dev, 1, (width - 4) >> 2, 1, (height - 4) >> 2); /* If we don't set the start position to 4 in VBI mode, we end up with line 21 being YUYV encoded instead of being in 8-bit greyscale */ if (em28xx_vbi_supported(dev) == 1) em28xx_capture_area_set(dev, 0, 4, width >> 2, height >> 2); else em28xx_capture_area_set(dev, 0, 0, width >> 2, height >> 2); return em28xx_scaler_set(dev, dev->hscale, dev->vscale); } int em28xx_set_alternate(struct em28xx *dev) { int errCode, prev_alt = dev->alt; int i; unsigned int min_pkt_size = dev->width * 2 + 4; /* When image size is bigger than a certain value, the frame size should be increased, otherwise, only green screen will be received. */ if (dev->width * 2 * dev->height > 720 * 240 * 2) min_pkt_size *= 2; for (i = 0; i < dev->num_alt; i++) { /* stop when the selected alt setting offers enough bandwidth */ if (dev->alt_max_pkt_size[i] >= min_pkt_size) { dev->alt = i; break; /* otherwise make sure that we end up with the maximum bandwidth because the min_pkt_size equation might be wrong... */ } else if (dev->alt_max_pkt_size[i] > dev->alt_max_pkt_size[dev->alt]) dev->alt = i; } if (dev->alt != prev_alt) { em28xx_coredbg("minimum isoc packet size: %u (alt=%d)\n", min_pkt_size, dev->alt); dev->max_pkt_size = dev->alt_max_pkt_size[dev->alt]; em28xx_coredbg("setting alternate %d with wMaxPacketSize=%u\n", dev->alt, dev->max_pkt_size); errCode = usb_set_interface(dev->udev, 0, dev->alt); if (errCode < 0) { em28xx_errdev("cannot change alternate number to %d (error=%i)\n", dev->alt, errCode); return errCode; } } return 0; } int em28xx_gpio_set(struct em28xx *dev, struct em28xx_reg_seq *gpio) { int rc = 0; if (!gpio) return rc; if (dev->mode != EM28XX_SUSPEND) { em28xx_write_reg(dev, 0x48, 0x00); if (dev->mode == EM28XX_ANALOG_MODE) em28xx_write_reg(dev, EM28XX_R12_VINENABLE, 0x67); else em28xx_write_reg(dev, EM28XX_R12_VINENABLE, 0x37); msleep(6); } /* Send GPIO reset sequences specified at board entry */ while (gpio->sleep >= 0) { if (gpio->reg >= 0) { rc = em28xx_write_reg_bits(dev, gpio->reg, gpio->val, gpio->mask); if (rc < 0) return rc; } if (gpio->sleep > 0) msleep(gpio->sleep); gpio++; } return rc; } int em28xx_set_mode(struct em28xx *dev, enum em28xx_mode set_mode) { if (dev->mode == set_mode) return 0; if (set_mode == EM28XX_SUSPEND) { dev->mode = set_mode; /* FIXME: add suspend support for ac97 */ return em28xx_gpio_set(dev, dev->board.suspend_gpio); } dev->mode = set_mode; if (dev->mode == EM28XX_DIGITAL_MODE) return em28xx_gpio_set(dev, dev->board.dvb_gpio); else return em28xx_gpio_set(dev, INPUT(dev->ctl_input)->gpio); } EXPORT_SYMBOL_GPL(em28xx_set_mode); /* ------------------------------------------------------------------ URB control ------------------------------------------------------------------*/ /* * IRQ callback, called by URB callback */ static void em28xx_irq_callback(struct urb *urb) { struct em28xx *dev = urb->context; int rc, i; switch (urb->status) { case 0: /* success */ case -ETIMEDOUT: /* NAK */ break; case -ECONNRESET: /* kill */ case -ENOENT: case -ESHUTDOWN: return; default: /* error */ em28xx_isocdbg("urb completition error %d.\n", urb->status); break; } /* Copy data from URB */ spin_lock(&dev->slock); rc = dev->isoc_ctl.isoc_copy(dev, urb); spin_unlock(&dev->slock); /* Reset urb buffers */ for (i = 0; i < urb->number_of_packets; i++) { urb->iso_frame_desc[i].status = 0; urb->iso_frame_desc[i].actual_length = 0; } urb->status = 0; urb->status = usb_submit_urb(urb, GFP_ATOMIC); if (urb->status) { em28xx_isocdbg("urb resubmit failed (error=%i)\n", urb->status); } } /* * Stop and Deallocate URBs */ void em28xx_uninit_isoc(struct em28xx *dev) { struct urb *urb; int i; em28xx_isocdbg("em28xx: called em28xx_uninit_isoc\n"); dev->isoc_ctl.nfields = -1; for (i = 0; i < dev->isoc_ctl.num_bufs; i++) { urb = dev->isoc_ctl.urb[i]; if (urb) { if (!irqs_disabled()) usb_kill_urb(urb); else usb_unlink_urb(urb); if (dev->isoc_ctl.transfer_buffer[i]) { usb_buffer_free(dev->udev, urb->transfer_buffer_length, dev->isoc_ctl.transfer_buffer[i], urb->transfer_dma); } usb_free_urb(urb); dev->isoc_ctl.urb[i] = NULL; } dev->isoc_ctl.transfer_buffer[i] = NULL; } kfree(dev->isoc_ctl.urb); kfree(dev->isoc_ctl.transfer_buffer); dev->isoc_ctl.urb = NULL; dev->isoc_ctl.transfer_buffer = NULL; dev->isoc_ctl.num_bufs = 0; em28xx_capture_start(dev, 0); } EXPORT_SYMBOL_GPL(em28xx_uninit_isoc); /* * Allocate URBs and start IRQ */ int em28xx_init_isoc(struct em28xx *dev, int max_packets, int num_bufs, int max_pkt_size, int (*isoc_copy) (struct em28xx *dev, struct urb *urb)) { struct em28xx_dmaqueue *dma_q = &dev->vidq; struct em28xx_dmaqueue *vbi_dma_q = &dev->vbiq; int i; int sb_size, pipe; struct urb *urb; int j, k; int rc; em28xx_isocdbg("em28xx: called em28xx_prepare_isoc\n"); /* De-allocates all pending stuff */ em28xx_uninit_isoc(dev); dev->isoc_ctl.isoc_copy = isoc_copy; dev->isoc_ctl.num_bufs = num_bufs; dev->isoc_ctl.urb = kzalloc(sizeof(void *)*num_bufs, GFP_KERNEL); if (!dev->isoc_ctl.urb) { em28xx_errdev("cannot alloc memory for usb buffers\n"); return -ENOMEM; } dev->isoc_ctl.transfer_buffer = kzalloc(sizeof(void *)*num_bufs, GFP_KERNEL); if (!dev->isoc_ctl.transfer_buffer) { em28xx_errdev("cannot allocate memory for usb transfer\n"); kfree(dev->isoc_ctl.urb); return -ENOMEM; } dev->isoc_ctl.max_pkt_size = max_pkt_size; dev->isoc_ctl.vid_buf = NULL; dev->isoc_ctl.vbi_buf = NULL; sb_size = max_packets * dev->isoc_ctl.max_pkt_size; /* allocate urbs and transfer buffers */ for (i = 0; i < dev->isoc_ctl.num_bufs; i++) { urb = usb_alloc_urb(max_packets, GFP_KERNEL); if (!urb) { em28xx_err("cannot alloc isoc_ctl.urb %i\n", i); em28xx_uninit_isoc(dev); return -ENOMEM; } dev->isoc_ctl.urb[i] = urb; dev->isoc_ctl.transfer_buffer[i] = usb_buffer_alloc(dev->udev, sb_size, GFP_KERNEL, &urb->transfer_dma); if (!dev->isoc_ctl.transfer_buffer[i]) { em28xx_err("unable to allocate %i bytes for transfer" " buffer %i%s\n", sb_size, i, in_interrupt() ? " while in int" : ""); em28xx_uninit_isoc(dev); return -ENOMEM; } memset(dev->isoc_ctl.transfer_buffer[i], 0, sb_size); /* FIXME: this is a hack - should be 'desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK' should also be using 'desc.bInterval' */ pipe = usb_rcvisocpipe(dev->udev, dev->mode == EM28XX_ANALOG_MODE ? 0x82 : 0x84); usb_fill_int_urb(urb, dev->udev, pipe, dev->isoc_ctl.transfer_buffer[i], sb_size, em28xx_irq_callback, dev, 1); urb->number_of_packets = max_packets; urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP; k = 0; for (j = 0; j < max_packets; j++) { urb->iso_frame_desc[j].offset = k; urb->iso_frame_desc[j].length = dev->isoc_ctl.max_pkt_size; k += dev->isoc_ctl.max_pkt_size; } } init_waitqueue_head(&dma_q->wq); init_waitqueue_head(&vbi_dma_q->wq); em28xx_capture_start(dev, 1); /* submit urbs and enables IRQ */ for (i = 0; i < dev->isoc_ctl.num_bufs; i++) { rc = usb_submit_urb(dev->isoc_ctl.urb[i], GFP_ATOMIC); if (rc) { em28xx_err("submit of urb %i failed (error=%i)\n", i, rc); em28xx_uninit_isoc(dev); return rc; } } return 0; } EXPORT_SYMBOL_GPL(em28xx_init_isoc); /* Determine the packet size for the DVB stream for the given device (underlying value programmed into the eeprom) */ int em28xx_isoc_dvb_max_packetsize(struct em28xx *dev) { unsigned int chip_cfg2; unsigned int packet_size = 564; if (dev->chip_id == CHIP_ID_EM2874) { /* FIXME - for now assume 564 like it was before, but the em2874 code should be added to return the proper value... */ packet_size = 564; } else { /* TS max packet size stored in bits 1-0 of R01 */ chip_cfg2 = em28xx_read_reg(dev, EM28XX_R01_CHIPCFG2); switch (chip_cfg2 & EM28XX_CHIPCFG2_TS_PACKETSIZE_MASK) { case EM28XX_CHIPCFG2_TS_PACKETSIZE_188: packet_size = 188; break; case EM28XX_CHIPCFG2_TS_PACKETSIZE_376: packet_size = 376; break; case EM28XX_CHIPCFG2_TS_PACKETSIZE_564: packet_size = 564; break; case EM28XX_CHIPCFG2_TS_PACKETSIZE_752: packet_size = 752; break; } } em28xx_coredbg("dvb max packet size=%d\n", packet_size); return packet_size; } EXPORT_SYMBOL_GPL(em28xx_isoc_dvb_max_packetsize); /* * em28xx_wake_i2c() * configure i2c attached devices */ void em28xx_wake_i2c(struct em28xx *dev) { v4l2_device_call_all(&dev->v4l2_dev, 0, core, reset, 0); v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_routing, INPUT(dev->ctl_input)->vmux, 0, 0); v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 0); } /* * Device control list */ static LIST_HEAD(em28xx_devlist); static DEFINE_MUTEX(em28xx_devlist_mutex); struct em28xx *em28xx_get_device(int minor, enum v4l2_buf_type *fh_type, int *has_radio) { struct em28xx *h, *dev = NULL; *fh_type = V4L2_BUF_TYPE_VIDEO_CAPTURE; *has_radio = 0; mutex_lock(&em28xx_devlist_mutex); list_for_each_entry(h, &em28xx_devlist, devlist) { if (h->vdev->minor == minor) dev = h; if (h->vbi_dev && h->vbi_dev->minor == minor) { dev = h; *fh_type = V4L2_BUF_TYPE_VBI_CAPTURE; } if (h->radio_dev && h->radio_dev->minor == minor) { dev = h; *has_radio = 1; } } mutex_unlock(&em28xx_devlist_mutex); return dev; } /* * em28xx_realease_resources() * unregisters the v4l2,i2c and usb devices * called when the device gets disconected or at module unload */ void em28xx_remove_from_devlist(struct em28xx *dev) { mutex_lock(&em28xx_devlist_mutex); list_del(&dev->devlist); mutex_unlock(&em28xx_devlist_mutex); }; void em28xx_add_into_devlist(struct em28xx *dev) { mutex_lock(&em28xx_devlist_mutex); list_add_tail(&dev->devlist, &em28xx_devlist); mutex_unlock(&em28xx_devlist_mutex); }; /* * Extension interface */ static LIST_HEAD(em28xx_extension_devlist); static DEFINE_MUTEX(em28xx_extension_devlist_lock); int em28xx_register_extension(struct em28xx_ops *ops) { struct em28xx *dev = NULL; mutex_lock(&em28xx_devlist_mutex); mutex_lock(&em28xx_extension_devlist_lock); list_add_tail(&ops->next, &em28xx_extension_devlist); list_for_each_entry(dev, &em28xx_devlist, devlist) { if (dev) ops->init(dev); } printk(KERN_INFO "Em28xx: Initialized (%s) extension\n", ops->name); mutex_unlock(&em28xx_extension_devlist_lock); mutex_unlock(&em28xx_devlist_mutex); return 0; } EXPORT_SYMBOL(em28xx_register_extension); void em28xx_unregister_extension(struct em28xx_ops *ops) { struct em28xx *dev = NULL; mutex_lock(&em28xx_devlist_mutex); list_for_each_entry(dev, &em28xx_devlist, devlist) { if (dev) ops->fini(dev); } mutex_lock(&em28xx_extension_devlist_lock); printk(KERN_INFO "Em28xx: Removed (%s) extension\n", ops->name); list_del(&ops->next); mutex_unlock(&em28xx_extension_devlist_lock); mutex_unlock(&em28xx_devlist_mutex); } EXPORT_SYMBOL(em28xx_unregister_extension); void em28xx_init_extension(struct em28xx *dev) { struct em28xx_ops *ops = NULL; mutex_lock(&em28xx_extension_devlist_lock); if (!list_empty(&em28xx_extension_devlist)) { list_for_each_entry(ops, &em28xx_extension_devlist, next) { if (ops->init) ops->init(dev); } } mutex_unlock(&em28xx_extension_devlist_lock); } void em28xx_close_extension(struct em28xx *dev) { struct em28xx_ops *ops = NULL; mutex_lock(&em28xx_extension_devlist_lock); if (!list_empty(&em28xx_extension_devlist)) { list_for_each_entry(ops, &em28xx_extension_devlist, next) { if (ops->fini) ops->fini(dev); } } mutex_unlock(&em28xx_extension_devlist_lock); }
gpl-2.0
scjen/rts-pj2
fs/ext2/inode.c
465
43613
/* * linux/fs/ext2/inode.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/inode.c * * Copyright (C) 1991, 1992 Linus Torvalds * * Goal-directed block allocation by Stephen Tweedie * (sct@dcs.ed.ac.uk), 1993, 1998 * Big-endian to little-endian byte-swapping/bitmaps by * David S. Miller (davem@caip.rutgers.edu), 1995 * 64-bit file support on 64-bit platforms by Jakub Jelinek * (jj@sunsite.ms.mff.cuni.cz) * * Assorted race fixes, rewrite of ext2_get_block() by Al Viro, 2000 */ #include <linux/smp_lock.h> #include <linux/time.h> #include <linux/highuid.h> #include <linux/pagemap.h> #include <linux/quotaops.h> #include <linux/module.h> #include <linux/writeback.h> #include <linux/buffer_head.h> #include <linux/mpage.h> #include <linux/fiemap.h> #include <linux/namei.h> #include "ext2.h" #include "acl.h" #include "xip.h" MODULE_AUTHOR("Remy Card and others"); MODULE_DESCRIPTION("Second Extended Filesystem"); MODULE_LICENSE("GPL"); /* * Test whether an inode is a fast symlink. */ static inline int ext2_inode_is_fast_symlink(struct inode *inode) { int ea_blocks = EXT2_I(inode)->i_file_acl ? (inode->i_sb->s_blocksize >> 9) : 0; return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); } /* * Called at the last iput() if i_nlink is zero. */ void ext2_delete_inode (struct inode * inode) { truncate_inode_pages(&inode->i_data, 0); if (is_bad_inode(inode)) goto no_delete; EXT2_I(inode)->i_dtime = get_seconds(); mark_inode_dirty(inode); ext2_write_inode(inode, inode_needs_sync(inode)); inode->i_size = 0; if (inode->i_blocks) ext2_truncate (inode); ext2_free_inode (inode); return; no_delete: clear_inode(inode); /* We must guarantee clearing of inode... */ } typedef struct { __le32 *p; __le32 key; struct buffer_head *bh; } Indirect; static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v) { p->key = *(p->p = v); p->bh = bh; } static inline int verify_chain(Indirect *from, Indirect *to) { while (from <= to && from->key == *from->p) from++; return (from > to); } /** * ext2_block_to_path - parse the block number into array of offsets * @inode: inode in question (we are only interested in its superblock) * @i_block: block number to be parsed * @offsets: array to store the offsets in * @boundary: set this non-zero if the referred-to block is likely to be * followed (on disk) by an indirect block. * To store the locations of file's data ext2 uses a data structure common * for UNIX filesystems - tree of pointers anchored in the inode, with * data blocks at leaves and indirect blocks in intermediate nodes. * This function translates the block number into path in that tree - * return value is the path length and @offsets[n] is the offset of * pointer to (n+1)th node in the nth one. If @block is out of range * (negative or too large) warning is printed and zero returned. * * Note: function doesn't find node addresses, so no IO is needed. All * we need to know is the capacity of indirect blocks (taken from the * inode->i_sb). */ /* * Portability note: the last comparison (check that we fit into triple * indirect block) is spelled differently, because otherwise on an * architecture with 32-bit longs and 8Kb pages we might get into trouble * if our filesystem had 8Kb blocks. We might use long long, but that would * kill us on x86. Oh, well, at least the sign propagation does not matter - * i_block would have to be negative in the very beginning, so we would not * get there at all. */ static int ext2_block_to_path(struct inode *inode, long i_block, int offsets[4], int *boundary) { int ptrs = EXT2_ADDR_PER_BLOCK(inode->i_sb); int ptrs_bits = EXT2_ADDR_PER_BLOCK_BITS(inode->i_sb); const long direct_blocks = EXT2_NDIR_BLOCKS, indirect_blocks = ptrs, double_blocks = (1 << (ptrs_bits * 2)); int n = 0; int final = 0; if (i_block < 0) { ext2_warning (inode->i_sb, "ext2_block_to_path", "block < 0"); } else if (i_block < direct_blocks) { offsets[n++] = i_block; final = direct_blocks; } else if ( (i_block -= direct_blocks) < indirect_blocks) { offsets[n++] = EXT2_IND_BLOCK; offsets[n++] = i_block; final = ptrs; } else if ((i_block -= indirect_blocks) < double_blocks) { offsets[n++] = EXT2_DIND_BLOCK; offsets[n++] = i_block >> ptrs_bits; offsets[n++] = i_block & (ptrs - 1); final = ptrs; } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { offsets[n++] = EXT2_TIND_BLOCK; offsets[n++] = i_block >> (ptrs_bits * 2); offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); offsets[n++] = i_block & (ptrs - 1); final = ptrs; } else { ext2_warning (inode->i_sb, "ext2_block_to_path", "block > big"); } if (boundary) *boundary = final - 1 - (i_block & (ptrs - 1)); return n; } /** * ext2_get_branch - read the chain of indirect blocks leading to data * @inode: inode in question * @depth: depth of the chain (1 - direct pointer, etc.) * @offsets: offsets of pointers in inode/indirect blocks * @chain: place to store the result * @err: here we store the error value * * Function fills the array of triples <key, p, bh> and returns %NULL * if everything went OK or the pointer to the last filled triple * (incomplete one) otherwise. Upon the return chain[i].key contains * the number of (i+1)-th block in the chain (as it is stored in memory, * i.e. little-endian 32-bit), chain[i].p contains the address of that * number (it points into struct inode for i==0 and into the bh->b_data * for i>0) and chain[i].bh points to the buffer_head of i-th indirect * block for i>0 and NULL for i==0. In other words, it holds the block * numbers of the chain, addresses they were taken from (and where we can * verify that chain did not change) and buffer_heads hosting these * numbers. * * Function stops when it stumbles upon zero pointer (absent block) * (pointer to last triple returned, *@err == 0) * or when it gets an IO error reading an indirect block * (ditto, *@err == -EIO) * or when it notices that chain had been changed while it was reading * (ditto, *@err == -EAGAIN) * or when it reads all @depth-1 indirect blocks successfully and finds * the whole chain, all way to the data (returns %NULL, *err == 0). */ static Indirect *ext2_get_branch(struct inode *inode, int depth, int *offsets, Indirect chain[4], int *err) { struct super_block *sb = inode->i_sb; Indirect *p = chain; struct buffer_head *bh; *err = 0; /* i_data is not going away, no lock needed */ add_chain (chain, NULL, EXT2_I(inode)->i_data + *offsets); if (!p->key) goto no_block; while (--depth) { bh = sb_bread(sb, le32_to_cpu(p->key)); if (!bh) goto failure; read_lock(&EXT2_I(inode)->i_meta_lock); if (!verify_chain(chain, p)) goto changed; add_chain(++p, bh, (__le32*)bh->b_data + *++offsets); read_unlock(&EXT2_I(inode)->i_meta_lock); if (!p->key) goto no_block; } return NULL; changed: read_unlock(&EXT2_I(inode)->i_meta_lock); brelse(bh); *err = -EAGAIN; goto no_block; failure: *err = -EIO; no_block: return p; } /** * ext2_find_near - find a place for allocation with sufficient locality * @inode: owner * @ind: descriptor of indirect block. * * This function returns the preferred place for block allocation. * It is used when heuristic for sequential allocation fails. * Rules are: * + if there is a block to the left of our position - allocate near it. * + if pointer will live in indirect block - allocate near that block. * + if pointer will live in inode - allocate in the same cylinder group. * * In the latter case we colour the starting block by the callers PID to * prevent it from clashing with concurrent allocations for a different inode * in the same block group. The PID is used here so that functionally related * files will be close-by on-disk. * * Caller must make sure that @ind is valid and will stay that way. */ static ext2_fsblk_t ext2_find_near(struct inode *inode, Indirect *ind) { struct ext2_inode_info *ei = EXT2_I(inode); __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data; __le32 *p; ext2_fsblk_t bg_start; ext2_fsblk_t colour; /* Try to find previous block */ for (p = ind->p - 1; p >= start; p--) if (*p) return le32_to_cpu(*p); /* No such thing, so let's try location of indirect block */ if (ind->bh) return ind->bh->b_blocknr; /* * It is going to be refered from inode itself? OK, just put it into * the same cylinder group then. */ bg_start = ext2_group_first_block_no(inode->i_sb, ei->i_block_group); colour = (current->pid % 16) * (EXT2_BLOCKS_PER_GROUP(inode->i_sb) / 16); return bg_start + colour; } /** * ext2_find_goal - find a preferred place for allocation. * @inode: owner * @block: block we want * @partial: pointer to the last triple within a chain * * Returns preferred place for a block (the goal). */ static inline ext2_fsblk_t ext2_find_goal(struct inode *inode, long block, Indirect *partial) { struct ext2_block_alloc_info *block_i; block_i = EXT2_I(inode)->i_block_alloc_info; /* * try the heuristic for sequential allocation, * failing that at least try to get decent locality. */ if (block_i && (block == block_i->last_alloc_logical_block + 1) && (block_i->last_alloc_physical_block != 0)) { return block_i->last_alloc_physical_block + 1; } return ext2_find_near(inode, partial); } /** * ext2_blks_to_allocate: Look up the block map and count the number * of direct blocks need to be allocated for the given branch. * * @branch: chain of indirect blocks * @k: number of blocks need for indirect blocks * @blks: number of data blocks to be mapped. * @blocks_to_boundary: the offset in the indirect block * * return the total number of blocks to be allocate, including the * direct and indirect blocks. */ static int ext2_blks_to_allocate(Indirect * branch, int k, unsigned long blks, int blocks_to_boundary) { unsigned long count = 0; /* * Simple case, [t,d]Indirect block(s) has not allocated yet * then it's clear blocks on that path have not allocated */ if (k > 0) { /* right now don't hanel cross boundary allocation */ if (blks < blocks_to_boundary + 1) count += blks; else count += blocks_to_boundary + 1; return count; } count++; while (count < blks && count <= blocks_to_boundary && le32_to_cpu(*(branch[0].p + count)) == 0) { count++; } return count; } /** * ext2_alloc_blocks: multiple allocate blocks needed for a branch * @indirect_blks: the number of blocks need to allocate for indirect * blocks * * @new_blocks: on return it will store the new block numbers for * the indirect blocks(if needed) and the first direct block, * @blks: on return it will store the total number of allocated * direct blocks */ static int ext2_alloc_blocks(struct inode *inode, ext2_fsblk_t goal, int indirect_blks, int blks, ext2_fsblk_t new_blocks[4], int *err) { int target, i; unsigned long count = 0; int index = 0; ext2_fsblk_t current_block = 0; int ret = 0; /* * Here we try to allocate the requested multiple blocks at once, * on a best-effort basis. * To build a branch, we should allocate blocks for * the indirect blocks(if not allocated yet), and at least * the first direct block of this branch. That's the * minimum number of blocks need to allocate(required) */ target = blks + indirect_blks; while (1) { count = target; /* allocating blocks for indirect blocks and direct blocks */ current_block = ext2_new_blocks(inode,goal,&count,err); if (*err) goto failed_out; target -= count; /* allocate blocks for indirect blocks */ while (index < indirect_blks && count) { new_blocks[index++] = current_block++; count--; } if (count > 0) break; } /* save the new block number for the first direct block */ new_blocks[index] = current_block; /* total number of blocks allocated for direct blocks */ ret = count; *err = 0; return ret; failed_out: for (i = 0; i <index; i++) ext2_free_blocks(inode, new_blocks[i], 1); return ret; } /** * ext2_alloc_branch - allocate and set up a chain of blocks. * @inode: owner * @num: depth of the chain (number of blocks to allocate) * @offsets: offsets (in the blocks) to store the pointers to next. * @branch: place to store the chain in. * * This function allocates @num blocks, zeroes out all but the last one, * links them into chain and (if we are synchronous) writes them to disk. * In other words, it prepares a branch that can be spliced onto the * inode. It stores the information about that chain in the branch[], in * the same format as ext2_get_branch() would do. We are calling it after * we had read the existing part of chain and partial points to the last * triple of that (one with zero ->key). Upon the exit we have the same * picture as after the successful ext2_get_block(), excpet that in one * place chain is disconnected - *branch->p is still zero (we did not * set the last link), but branch->key contains the number that should * be placed into *branch->p to fill that gap. * * If allocation fails we free all blocks we've allocated (and forget * their buffer_heads) and return the error value the from failed * ext2_alloc_block() (normally -ENOSPC). Otherwise we set the chain * as described above and return 0. */ static int ext2_alloc_branch(struct inode *inode, int indirect_blks, int *blks, ext2_fsblk_t goal, int *offsets, Indirect *branch) { int blocksize = inode->i_sb->s_blocksize; int i, n = 0; int err = 0; struct buffer_head *bh; int num; ext2_fsblk_t new_blocks[4]; ext2_fsblk_t current_block; num = ext2_alloc_blocks(inode, goal, indirect_blks, *blks, new_blocks, &err); if (err) return err; branch[0].key = cpu_to_le32(new_blocks[0]); /* * metadata blocks and data blocks are allocated. */ for (n = 1; n <= indirect_blks; n++) { /* * Get buffer_head for parent block, zero it out * and set the pointer to new one, then send * parent to disk. */ bh = sb_getblk(inode->i_sb, new_blocks[n-1]); branch[n].bh = bh; lock_buffer(bh); memset(bh->b_data, 0, blocksize); branch[n].p = (__le32 *) bh->b_data + offsets[n]; branch[n].key = cpu_to_le32(new_blocks[n]); *branch[n].p = branch[n].key; if ( n == indirect_blks) { current_block = new_blocks[n]; /* * End of chain, update the last new metablock of * the chain to point to the new allocated * data blocks numbers */ for (i=1; i < num; i++) *(branch[n].p + i) = cpu_to_le32(++current_block); } set_buffer_uptodate(bh); unlock_buffer(bh); mark_buffer_dirty_inode(bh, inode); /* We used to sync bh here if IS_SYNC(inode). * But we now rely upon generic_write_sync() * and b_inode_buffers. But not for directories. */ if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode)) sync_dirty_buffer(bh); } *blks = num; return err; } /** * ext2_splice_branch - splice the allocated branch onto inode. * @inode: owner * @block: (logical) number of block we are adding * @where: location of missing link * @num: number of indirect blocks we are adding * @blks: number of direct blocks we are adding * * This function fills the missing link and does all housekeeping needed in * inode (->i_blocks, etc.). In case of success we end up with the full * chain to new block and return 0. */ static void ext2_splice_branch(struct inode *inode, long block, Indirect *where, int num, int blks) { int i; struct ext2_block_alloc_info *block_i; ext2_fsblk_t current_block; block_i = EXT2_I(inode)->i_block_alloc_info; /* XXX LOCKING probably should have i_meta_lock ?*/ /* That's it */ *where->p = where->key; /* * Update the host buffer_head or inode to point to more just allocated * direct blocks blocks */ if (num == 0 && blks > 1) { current_block = le32_to_cpu(where->key) + 1; for (i = 1; i < blks; i++) *(where->p + i ) = cpu_to_le32(current_block++); } /* * update the most recently allocated logical & physical block * in i_block_alloc_info, to assist find the proper goal block for next * allocation */ if (block_i) { block_i->last_alloc_logical_block = block + blks - 1; block_i->last_alloc_physical_block = le32_to_cpu(where[num].key) + blks - 1; } /* We are done with atomic stuff, now do the rest of housekeeping */ /* had we spliced it onto indirect block? */ if (where->bh) mark_buffer_dirty_inode(where->bh, inode); inode->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(inode); } /* * Allocation strategy is simple: if we have to allocate something, we will * have to go the whole way to leaf. So let's do it before attaching anything * to tree, set linkage between the newborn blocks, write them if sync is * required, recheck the path, free and repeat if check fails, otherwise * set the last missing link (that will protect us from any truncate-generated * removals - all blocks on the path are immune now) and possibly force the * write on the parent block. * That has a nice additional property: no special recovery from the failed * allocations is needed - we simply release blocks and do not touch anything * reachable from inode. * * `handle' can be NULL if create == 0. * * return > 0, # of blocks mapped or allocated. * return = 0, if plain lookup failed. * return < 0, error case. */ static int ext2_get_blocks(struct inode *inode, sector_t iblock, unsigned long maxblocks, struct buffer_head *bh_result, int create) { int err = -EIO; int offsets[4]; Indirect chain[4]; Indirect *partial; ext2_fsblk_t goal; int indirect_blks; int blocks_to_boundary = 0; int depth; struct ext2_inode_info *ei = EXT2_I(inode); int count = 0; ext2_fsblk_t first_block = 0; depth = ext2_block_to_path(inode,iblock,offsets,&blocks_to_boundary); if (depth == 0) return (err); partial = ext2_get_branch(inode, depth, offsets, chain, &err); /* Simplest case - block found, no allocation needed */ if (!partial) { first_block = le32_to_cpu(chain[depth - 1].key); clear_buffer_new(bh_result); /* What's this do? */ count++; /*map more blocks*/ while (count < maxblocks && count <= blocks_to_boundary) { ext2_fsblk_t blk; if (!verify_chain(chain, chain + depth - 1)) { /* * Indirect block might be removed by * truncate while we were reading it. * Handling of that case: forget what we've * got now, go to reread. */ err = -EAGAIN; count = 0; break; } blk = le32_to_cpu(*(chain[depth-1].p + count)); if (blk == first_block + count) count++; else break; } if (err != -EAGAIN) goto got_it; } /* Next simple case - plain lookup or failed read of indirect block */ if (!create || err == -EIO) goto cleanup; mutex_lock(&ei->truncate_mutex); /* * If the indirect block is missing while we are reading * the chain(ext3_get_branch() returns -EAGAIN err), or * if the chain has been changed after we grab the semaphore, * (either because another process truncated this branch, or * another get_block allocated this branch) re-grab the chain to see if * the request block has been allocated or not. * * Since we already block the truncate/other get_block * at this point, we will have the current copy of the chain when we * splice the branch into the tree. */ if (err == -EAGAIN || !verify_chain(chain, partial)) { while (partial > chain) { brelse(partial->bh); partial--; } partial = ext2_get_branch(inode, depth, offsets, chain, &err); if (!partial) { count++; mutex_unlock(&ei->truncate_mutex); if (err) goto cleanup; clear_buffer_new(bh_result); goto got_it; } } /* * Okay, we need to do block allocation. Lazily initialize the block * allocation info here if necessary */ if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info)) ext2_init_block_alloc_info(inode); goal = ext2_find_goal(inode, iblock, partial); /* the number of blocks need to allocate for [d,t]indirect blocks */ indirect_blks = (chain + depth) - partial - 1; /* * Next look up the indirect map to count the totoal number of * direct blocks to allocate for this branch. */ count = ext2_blks_to_allocate(partial, indirect_blks, maxblocks, blocks_to_boundary); /* * XXX ???? Block out ext2_truncate while we alter the tree */ err = ext2_alloc_branch(inode, indirect_blks, &count, goal, offsets + (partial - chain), partial); if (err) { mutex_unlock(&ei->truncate_mutex); goto cleanup; } if (ext2_use_xip(inode->i_sb)) { /* * we need to clear the block */ err = ext2_clear_xip_target (inode, le32_to_cpu(chain[depth-1].key)); if (err) { mutex_unlock(&ei->truncate_mutex); goto cleanup; } } ext2_splice_branch(inode, iblock, partial, indirect_blks, count); mutex_unlock(&ei->truncate_mutex); set_buffer_new(bh_result); got_it: map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key)); if (count > blocks_to_boundary) set_buffer_boundary(bh_result); err = count; /* Clean up and exit */ partial = chain + depth - 1; /* the whole chain */ cleanup: while (partial > chain) { brelse(partial->bh); partial--; } return err; } int ext2_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; int ret = ext2_get_blocks(inode, iblock, max_blocks, bh_result, create); if (ret > 0) { bh_result->b_size = (ret << inode->i_blkbits); ret = 0; } return ret; } int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len) { return generic_block_fiemap(inode, fieinfo, start, len, ext2_get_block); } static int ext2_writepage(struct page *page, struct writeback_control *wbc) { return block_write_full_page(page, ext2_get_block, wbc); } static int ext2_readpage(struct file *file, struct page *page) { return mpage_readpage(page, ext2_get_block); } static int ext2_readpages(struct file *file, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { return mpage_readpages(mapping, pages, nr_pages, ext2_get_block); } int __ext2_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, ext2_get_block); } static int ext2_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { *pagep = NULL; return __ext2_write_begin(file, mapping, pos, len, flags, pagep,fsdata); } static int ext2_nobh_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { /* * Dir-in-pagecache still uses ext2_write_begin. Would have to rework * directory handling code to pass around offsets rather than struct * pages in order to make this work easily. */ return nobh_write_begin(file, mapping, pos, len, flags, pagep, fsdata, ext2_get_block); } static int ext2_nobh_writepage(struct page *page, struct writeback_control *wbc) { return nobh_writepage(page, ext2_get_block, wbc); } static sector_t ext2_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping,block,ext2_get_block); } static ssize_t ext2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, offset, nr_segs, ext2_get_block, NULL); } static int ext2_writepages(struct address_space *mapping, struct writeback_control *wbc) { return mpage_writepages(mapping, wbc, ext2_get_block); } const struct address_space_operations ext2_aops = { .readpage = ext2_readpage, .readpages = ext2_readpages, .writepage = ext2_writepage, .sync_page = block_sync_page, .write_begin = ext2_write_begin, .write_end = generic_write_end, .bmap = ext2_bmap, .direct_IO = ext2_direct_IO, .writepages = ext2_writepages, .migratepage = buffer_migrate_page, .is_partially_uptodate = block_is_partially_uptodate, .error_remove_page = generic_error_remove_page, }; const struct address_space_operations ext2_aops_xip = { .bmap = ext2_bmap, .get_xip_mem = ext2_get_xip_mem, }; const struct address_space_operations ext2_nobh_aops = { .readpage = ext2_readpage, .readpages = ext2_readpages, .writepage = ext2_nobh_writepage, .sync_page = block_sync_page, .write_begin = ext2_nobh_write_begin, .write_end = nobh_write_end, .bmap = ext2_bmap, .direct_IO = ext2_direct_IO, .writepages = ext2_writepages, .migratepage = buffer_migrate_page, .error_remove_page = generic_error_remove_page, }; /* * Probably it should be a library function... search for first non-zero word * or memcmp with zero_page, whatever is better for particular architecture. * Linus? */ static inline int all_zeroes(__le32 *p, __le32 *q) { while (p < q) if (*p++) return 0; return 1; } /** * ext2_find_shared - find the indirect blocks for partial truncation. * @inode: inode in question * @depth: depth of the affected branch * @offsets: offsets of pointers in that branch (see ext2_block_to_path) * @chain: place to store the pointers to partial indirect blocks * @top: place to the (detached) top of branch * * This is a helper function used by ext2_truncate(). * * When we do truncate() we may have to clean the ends of several indirect * blocks but leave the blocks themselves alive. Block is partially * truncated if some data below the new i_size is refered from it (and * it is on the path to the first completely truncated data block, indeed). * We have to free the top of that path along with everything to the right * of the path. Since no allocation past the truncation point is possible * until ext2_truncate() finishes, we may safely do the latter, but top * of branch may require special attention - pageout below the truncation * point might try to populate it. * * We atomically detach the top of branch from the tree, store the block * number of its root in *@top, pointers to buffer_heads of partially * truncated blocks - in @chain[].bh and pointers to their last elements * that should not be removed - in @chain[].p. Return value is the pointer * to last filled element of @chain. * * The work left to caller to do the actual freeing of subtrees: * a) free the subtree starting from *@top * b) free the subtrees whose roots are stored in * (@chain[i].p+1 .. end of @chain[i].bh->b_data) * c) free the subtrees growing from the inode past the @chain[0].p * (no partially truncated stuff there). */ static Indirect *ext2_find_shared(struct inode *inode, int depth, int offsets[4], Indirect chain[4], __le32 *top) { Indirect *partial, *p; int k, err; *top = 0; for (k = depth; k > 1 && !offsets[k-1]; k--) ; partial = ext2_get_branch(inode, k, offsets, chain, &err); if (!partial) partial = chain + k-1; /* * If the branch acquired continuation since we've looked at it - * fine, it should all survive and (new) top doesn't belong to us. */ write_lock(&EXT2_I(inode)->i_meta_lock); if (!partial->key && *partial->p) { write_unlock(&EXT2_I(inode)->i_meta_lock); goto no_top; } for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--) ; /* * OK, we've found the last block that must survive. The rest of our * branch should be detached before unlocking. However, if that rest * of branch is all ours and does not grow immediately from the inode * it's easier to cheat and just decrement partial->p. */ if (p == chain + k - 1 && p > chain) { p->p--; } else { *top = *p->p; *p->p = 0; } write_unlock(&EXT2_I(inode)->i_meta_lock); while(partial > p) { brelse(partial->bh); partial--; } no_top: return partial; } /** * ext2_free_data - free a list of data blocks * @inode: inode we are dealing with * @p: array of block numbers * @q: points immediately past the end of array * * We are freeing all blocks refered from that array (numbers are * stored as little-endian 32-bit) and updating @inode->i_blocks * appropriately. */ static inline void ext2_free_data(struct inode *inode, __le32 *p, __le32 *q) { unsigned long block_to_free = 0, count = 0; unsigned long nr; for ( ; p < q ; p++) { nr = le32_to_cpu(*p); if (nr) { *p = 0; /* accumulate blocks to free if they're contiguous */ if (count == 0) goto free_this; else if (block_to_free == nr - count) count++; else { mark_inode_dirty(inode); ext2_free_blocks (inode, block_to_free, count); free_this: block_to_free = nr; count = 1; } } } if (count > 0) { mark_inode_dirty(inode); ext2_free_blocks (inode, block_to_free, count); } } /** * ext2_free_branches - free an array of branches * @inode: inode we are dealing with * @p: array of block numbers * @q: pointer immediately past the end of array * @depth: depth of the branches to free * * We are freeing all blocks refered from these branches (numbers are * stored as little-endian 32-bit) and updating @inode->i_blocks * appropriately. */ static void ext2_free_branches(struct inode *inode, __le32 *p, __le32 *q, int depth) { struct buffer_head * bh; unsigned long nr; if (depth--) { int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb); for ( ; p < q ; p++) { nr = le32_to_cpu(*p); if (!nr) continue; *p = 0; bh = sb_bread(inode->i_sb, nr); /* * A read failure? Report error and clear slot * (should be rare). */ if (!bh) { ext2_error(inode->i_sb, "ext2_free_branches", "Read failure, inode=%ld, block=%ld", inode->i_ino, nr); continue; } ext2_free_branches(inode, (__le32*)bh->b_data, (__le32*)bh->b_data + addr_per_block, depth); bforget(bh); ext2_free_blocks(inode, nr, 1); mark_inode_dirty(inode); } } else ext2_free_data(inode, p, q); } void ext2_truncate(struct inode *inode) { __le32 *i_data = EXT2_I(inode)->i_data; struct ext2_inode_info *ei = EXT2_I(inode); int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb); int offsets[4]; Indirect chain[4]; Indirect *partial; __le32 nr = 0; int n; long iblock; unsigned blocksize; if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) return; if (ext2_inode_is_fast_symlink(inode)) return; if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) return; blocksize = inode->i_sb->s_blocksize; iblock = (inode->i_size + blocksize-1) >> EXT2_BLOCK_SIZE_BITS(inode->i_sb); if (mapping_is_xip(inode->i_mapping)) xip_truncate_page(inode->i_mapping, inode->i_size); else if (test_opt(inode->i_sb, NOBH)) nobh_truncate_page(inode->i_mapping, inode->i_size, ext2_get_block); else block_truncate_page(inode->i_mapping, inode->i_size, ext2_get_block); n = ext2_block_to_path(inode, iblock, offsets, NULL); if (n == 0) return; /* * From here we block out all ext2_get_block() callers who want to * modify the block allocation tree. */ mutex_lock(&ei->truncate_mutex); if (n == 1) { ext2_free_data(inode, i_data+offsets[0], i_data + EXT2_NDIR_BLOCKS); goto do_indirects; } partial = ext2_find_shared(inode, n, offsets, chain, &nr); /* Kill the top of shared branch (already detached) */ if (nr) { if (partial == chain) mark_inode_dirty(inode); else mark_buffer_dirty_inode(partial->bh, inode); ext2_free_branches(inode, &nr, &nr+1, (chain+n-1) - partial); } /* Clear the ends of indirect blocks on the shared branch */ while (partial > chain) { ext2_free_branches(inode, partial->p + 1, (__le32*)partial->bh->b_data+addr_per_block, (chain+n-1) - partial); mark_buffer_dirty_inode(partial->bh, inode); brelse (partial->bh); partial--; } do_indirects: /* Kill the remaining (whole) subtrees */ switch (offsets[0]) { default: nr = i_data[EXT2_IND_BLOCK]; if (nr) { i_data[EXT2_IND_BLOCK] = 0; mark_inode_dirty(inode); ext2_free_branches(inode, &nr, &nr+1, 1); } case EXT2_IND_BLOCK: nr = i_data[EXT2_DIND_BLOCK]; if (nr) { i_data[EXT2_DIND_BLOCK] = 0; mark_inode_dirty(inode); ext2_free_branches(inode, &nr, &nr+1, 2); } case EXT2_DIND_BLOCK: nr = i_data[EXT2_TIND_BLOCK]; if (nr) { i_data[EXT2_TIND_BLOCK] = 0; mark_inode_dirty(inode); ext2_free_branches(inode, &nr, &nr+1, 3); } case EXT2_TIND_BLOCK: ; } ext2_discard_reservation(inode); mutex_unlock(&ei->truncate_mutex); inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; if (inode_needs_sync(inode)) { sync_mapping_buffers(inode->i_mapping); ext2_sync_inode (inode); } else { mark_inode_dirty(inode); } } static struct ext2_inode *ext2_get_inode(struct super_block *sb, ino_t ino, struct buffer_head **p) { struct buffer_head * bh; unsigned long block_group; unsigned long block; unsigned long offset; struct ext2_group_desc * gdp; *p = NULL; if ((ino != EXT2_ROOT_INO && ino < EXT2_FIRST_INO(sb)) || ino > le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count)) goto Einval; block_group = (ino - 1) / EXT2_INODES_PER_GROUP(sb); gdp = ext2_get_group_desc(sb, block_group, NULL); if (!gdp) goto Egdp; /* * Figure out the offset within the block group inode table */ offset = ((ino - 1) % EXT2_INODES_PER_GROUP(sb)) * EXT2_INODE_SIZE(sb); block = le32_to_cpu(gdp->bg_inode_table) + (offset >> EXT2_BLOCK_SIZE_BITS(sb)); if (!(bh = sb_bread(sb, block))) goto Eio; *p = bh; offset &= (EXT2_BLOCK_SIZE(sb) - 1); return (struct ext2_inode *) (bh->b_data + offset); Einval: ext2_error(sb, "ext2_get_inode", "bad inode number: %lu", (unsigned long) ino); return ERR_PTR(-EINVAL); Eio: ext2_error(sb, "ext2_get_inode", "unable to read inode block - inode=%lu, block=%lu", (unsigned long) ino, block); Egdp: return ERR_PTR(-EIO); } void ext2_set_inode_flags(struct inode *inode) { unsigned int flags = EXT2_I(inode)->i_flags; inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); if (flags & EXT2_SYNC_FL) inode->i_flags |= S_SYNC; if (flags & EXT2_APPEND_FL) inode->i_flags |= S_APPEND; if (flags & EXT2_IMMUTABLE_FL) inode->i_flags |= S_IMMUTABLE; if (flags & EXT2_NOATIME_FL) inode->i_flags |= S_NOATIME; if (flags & EXT2_DIRSYNC_FL) inode->i_flags |= S_DIRSYNC; } /* Propagate flags from i_flags to EXT2_I(inode)->i_flags */ void ext2_get_inode_flags(struct ext2_inode_info *ei) { unsigned int flags = ei->vfs_inode.i_flags; ei->i_flags &= ~(EXT2_SYNC_FL|EXT2_APPEND_FL| EXT2_IMMUTABLE_FL|EXT2_NOATIME_FL|EXT2_DIRSYNC_FL); if (flags & S_SYNC) ei->i_flags |= EXT2_SYNC_FL; if (flags & S_APPEND) ei->i_flags |= EXT2_APPEND_FL; if (flags & S_IMMUTABLE) ei->i_flags |= EXT2_IMMUTABLE_FL; if (flags & S_NOATIME) ei->i_flags |= EXT2_NOATIME_FL; if (flags & S_DIRSYNC) ei->i_flags |= EXT2_DIRSYNC_FL; } struct inode *ext2_iget (struct super_block *sb, unsigned long ino) { struct ext2_inode_info *ei; struct buffer_head * bh; struct ext2_inode *raw_inode; struct inode *inode; long ret = -EIO; int n; inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; ei = EXT2_I(inode); ei->i_block_alloc_info = NULL; raw_inode = ext2_get_inode(inode->i_sb, ino, &bh); if (IS_ERR(raw_inode)) { ret = PTR_ERR(raw_inode); goto bad_inode; } inode->i_mode = le16_to_cpu(raw_inode->i_mode); inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); if (!(test_opt (inode->i_sb, NO_UID32))) { inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; } inode->i_nlink = le16_to_cpu(raw_inode->i_links_count); inode->i_size = le32_to_cpu(raw_inode->i_size); inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime); inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime); inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime); inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = 0; ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); /* We now have enough fields to check if the inode was active or not. * This is needed because nfsd might try to access dead inodes * the test is that same one that e2fsck uses * NeilBrown 1999oct15 */ if (inode->i_nlink == 0 && (inode->i_mode == 0 || ei->i_dtime)) { /* this inode is deleted */ brelse (bh); ret = -ESTALE; goto bad_inode; } inode->i_blocks = le32_to_cpu(raw_inode->i_blocks); ei->i_flags = le32_to_cpu(raw_inode->i_flags); ei->i_faddr = le32_to_cpu(raw_inode->i_faddr); ei->i_frag_no = raw_inode->i_frag; ei->i_frag_size = raw_inode->i_fsize; ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl); ei->i_dir_acl = 0; if (S_ISREG(inode->i_mode)) inode->i_size |= ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32; else ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl); ei->i_dtime = 0; inode->i_generation = le32_to_cpu(raw_inode->i_generation); ei->i_state = 0; ei->i_block_group = (ino - 1) / EXT2_INODES_PER_GROUP(inode->i_sb); ei->i_dir_start_lookup = 0; /* * NOTE! The in-memory inode i_data array is in little-endian order * even on big-endian machines: we do NOT byteswap the block numbers! */ for (n = 0; n < EXT2_N_BLOCKS; n++) ei->i_data[n] = raw_inode->i_block[n]; if (S_ISREG(inode->i_mode)) { inode->i_op = &ext2_file_inode_operations; if (ext2_use_xip(inode->i_sb)) { inode->i_mapping->a_ops = &ext2_aops_xip; inode->i_fop = &ext2_xip_file_operations; } else if (test_opt(inode->i_sb, NOBH)) { inode->i_mapping->a_ops = &ext2_nobh_aops; inode->i_fop = &ext2_file_operations; } else { inode->i_mapping->a_ops = &ext2_aops; inode->i_fop = &ext2_file_operations; } } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &ext2_dir_inode_operations; inode->i_fop = &ext2_dir_operations; if (test_opt(inode->i_sb, NOBH)) inode->i_mapping->a_ops = &ext2_nobh_aops; else inode->i_mapping->a_ops = &ext2_aops; } else if (S_ISLNK(inode->i_mode)) { if (ext2_inode_is_fast_symlink(inode)) { inode->i_op = &ext2_fast_symlink_inode_operations; nd_terminate_link(ei->i_data, inode->i_size, sizeof(ei->i_data) - 1); } else { inode->i_op = &ext2_symlink_inode_operations; if (test_opt(inode->i_sb, NOBH)) inode->i_mapping->a_ops = &ext2_nobh_aops; else inode->i_mapping->a_ops = &ext2_aops; } } else { inode->i_op = &ext2_special_inode_operations; if (raw_inode->i_block[0]) init_special_inode(inode, inode->i_mode, old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); else init_special_inode(inode, inode->i_mode, new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); } brelse (bh); ext2_set_inode_flags(inode); unlock_new_inode(inode); return inode; bad_inode: iget_failed(inode); return ERR_PTR(ret); } int ext2_write_inode(struct inode *inode, int do_sync) { struct ext2_inode_info *ei = EXT2_I(inode); struct super_block *sb = inode->i_sb; ino_t ino = inode->i_ino; uid_t uid = inode->i_uid; gid_t gid = inode->i_gid; struct buffer_head * bh; struct ext2_inode * raw_inode = ext2_get_inode(sb, ino, &bh); int n; int err = 0; if (IS_ERR(raw_inode)) return -EIO; /* For fields not not tracking in the in-memory inode, * initialise them to zero for new inodes. */ if (ei->i_state & EXT2_STATE_NEW) memset(raw_inode, 0, EXT2_SB(sb)->s_inode_size); ext2_get_inode_flags(ei); raw_inode->i_mode = cpu_to_le16(inode->i_mode); if (!(test_opt(sb, NO_UID32))) { raw_inode->i_uid_low = cpu_to_le16(low_16_bits(uid)); raw_inode->i_gid_low = cpu_to_le16(low_16_bits(gid)); /* * Fix up interoperability with old kernels. Otherwise, old inodes get * re-used with the upper 16 bits of the uid/gid intact */ if (!ei->i_dtime) { raw_inode->i_uid_high = cpu_to_le16(high_16_bits(uid)); raw_inode->i_gid_high = cpu_to_le16(high_16_bits(gid)); } else { raw_inode->i_uid_high = 0; raw_inode->i_gid_high = 0; } } else { raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(uid)); raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(gid)); raw_inode->i_uid_high = 0; raw_inode->i_gid_high = 0; } raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); raw_inode->i_size = cpu_to_le32(inode->i_size); raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec); raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec); raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec); raw_inode->i_blocks = cpu_to_le32(inode->i_blocks); raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); raw_inode->i_flags = cpu_to_le32(ei->i_flags); raw_inode->i_faddr = cpu_to_le32(ei->i_faddr); raw_inode->i_frag = ei->i_frag_no; raw_inode->i_fsize = ei->i_frag_size; raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl); if (!S_ISREG(inode->i_mode)) raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl); else { raw_inode->i_size_high = cpu_to_le32(inode->i_size >> 32); if (inode->i_size > 0x7fffffffULL) { if (!EXT2_HAS_RO_COMPAT_FEATURE(sb, EXT2_FEATURE_RO_COMPAT_LARGE_FILE) || EXT2_SB(sb)->s_es->s_rev_level == cpu_to_le32(EXT2_GOOD_OLD_REV)) { /* If this is the first large file * created, add a flag to the superblock. */ lock_kernel(); ext2_update_dynamic_rev(sb); EXT2_SET_RO_COMPAT_FEATURE(sb, EXT2_FEATURE_RO_COMPAT_LARGE_FILE); unlock_kernel(); ext2_write_super(sb); } } } raw_inode->i_generation = cpu_to_le32(inode->i_generation); if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { if (old_valid_dev(inode->i_rdev)) { raw_inode->i_block[0] = cpu_to_le32(old_encode_dev(inode->i_rdev)); raw_inode->i_block[1] = 0; } else { raw_inode->i_block[0] = 0; raw_inode->i_block[1] = cpu_to_le32(new_encode_dev(inode->i_rdev)); raw_inode->i_block[2] = 0; } } else for (n = 0; n < EXT2_N_BLOCKS; n++) raw_inode->i_block[n] = ei->i_data[n]; mark_buffer_dirty(bh); if (do_sync) { sync_dirty_buffer(bh); if (buffer_req(bh) && !buffer_uptodate(bh)) { printk ("IO error syncing ext2 inode [%s:%08lx]\n", sb->s_id, (unsigned long) ino); err = -EIO; } } ei->i_state &= ~EXT2_STATE_NEW; brelse (bh); return err; } int ext2_sync_inode(struct inode *inode) { struct writeback_control wbc = { .sync_mode = WB_SYNC_ALL, .nr_to_write = 0, /* sys_fsync did this */ }; return sync_inode(inode, &wbc); } int ext2_setattr(struct dentry *dentry, struct iattr *iattr) { struct inode *inode = dentry->d_inode; int error; error = inode_change_ok(inode, iattr); if (error) return error; if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) || (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) { error = vfs_dq_transfer(inode, iattr) ? -EDQUOT : 0; if (error) return error; } error = inode_setattr(inode, iattr); if (!error && (iattr->ia_valid & ATTR_MODE)) error = ext2_acl_chmod(inode); return error; }
gpl-2.0
CyanogenMod/semc-kernel-msm7x30
drivers/scsi/lpfc/lpfc_hbadisc.c
465
133064
/******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2004-2009 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * * Public License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful. * * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * * TO BE LEGALLY INVALID. See the GNU General Public License for * * more details, a copy of which can be found in the file COPYING * * included with this package. * *******************************************************************/ #include <linux/blkdev.h> #include <linux/pci.h> #include <linux/kthread.h> #include <linux/interrupt.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_sli.h" #include "lpfc_sli4.h" #include "lpfc_scsi.h" #include "lpfc.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" #include "lpfc_vport.h" #include "lpfc_debugfs.h" /* AlpaArray for assignment of scsid for scan-down and bind_method */ static uint8_t lpfcAlpaArray[] = { 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6, 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA, 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5, 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97, 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79, 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B, 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56, 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A, 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17, 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01 }; static void lpfc_disc_timeout_handler(struct lpfc_vport *); static void lpfc_disc_flush_list(struct lpfc_vport *vport); static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_terminate_rport_io(struct fc_rport *rport) { struct lpfc_rport_data *rdata; struct lpfc_nodelist * ndlp; struct lpfc_hba *phba; rdata = rport->dd_data; ndlp = rdata->pnode; if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { if (rport->roles & FC_RPORT_ROLE_FCP_TARGET) printk(KERN_ERR "Cannot find remote node" " to terminate I/O Data x%x\n", rport->port_id); return; } phba = ndlp->phba; lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT, "rport terminate: sid:x%x did:x%x flg:x%x", ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); if (ndlp->nlp_sid != NLP_NO_SID) { lpfc_sli_abort_iocb(ndlp->vport, &phba->sli.ring[phba->sli.fcp_ring], ndlp->nlp_sid, 0, LPFC_CTX_TGT); } } /* * This function will be called when dev_loss_tmo fire. */ void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) { struct lpfc_rport_data *rdata; struct lpfc_nodelist * ndlp; struct lpfc_vport *vport; struct lpfc_hba *phba; struct lpfc_work_evt *evtp; int put_node; int put_rport; rdata = rport->dd_data; ndlp = rdata->pnode; if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) return; vport = ndlp->vport; phba = vport->phba; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, "rport devlosscb: sid:x%x did:x%x flg:x%x", ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); /* Don't defer this if we are in the process of deleting the vport * or unloading the driver. The unload will cleanup the node * appropriately we just need to cleanup the ndlp rport info here. */ if (vport->load_flag & FC_UNLOADING) { put_node = rdata->pnode != NULL; put_rport = ndlp->rport != NULL; rdata->pnode = NULL; ndlp->rport = NULL; if (put_node) lpfc_nlp_put(ndlp); if (put_rport) put_device(&rport->dev); return; } if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) return; evtp = &ndlp->dev_loss_evt; if (!list_empty(&evtp->evt_listp)) return; spin_lock_irq(&phba->hbalock); /* We need to hold the node by incrementing the reference * count until this queued work is done */ evtp->evt_arg1 = lpfc_nlp_get(ndlp); if (evtp->evt_arg1) { evtp->evt = LPFC_EVT_DEV_LOSS; list_add_tail(&evtp->evt_listp, &phba->work_list); lpfc_worker_wake_up(phba); } spin_unlock_irq(&phba->hbalock); return; } /* * This function is called from the worker thread when dev_loss_tmo * expire. */ static void lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) { struct lpfc_rport_data *rdata; struct fc_rport *rport; struct lpfc_vport *vport; struct lpfc_hba *phba; uint8_t *name; int put_node; int put_rport; int warn_on = 0; rport = ndlp->rport; if (!rport) return; rdata = rport->dd_data; name = (uint8_t *) &ndlp->nlp_portname; vport = ndlp->vport; phba = vport->phba; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, "rport devlosstmo:did:x%x type:x%x id:x%x", ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id); /* Don't defer this if we are in the process of deleting the vport * or unloading the driver. The unload will cleanup the node * appropriately we just need to cleanup the ndlp rport info here. */ if (vport->load_flag & FC_UNLOADING) { if (ndlp->nlp_sid != NLP_NO_SID) { /* flush the target */ lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], ndlp->nlp_sid, 0, LPFC_CTX_TGT); } put_node = rdata->pnode != NULL; put_rport = ndlp->rport != NULL; rdata->pnode = NULL; ndlp->rport = NULL; if (put_node) lpfc_nlp_put(ndlp); if (put_rport) put_device(&rport->dev); return; } if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0284 Devloss timeout Ignored on " "WWPN %x:%x:%x:%x:%x:%x:%x:%x " "NPort x%x\n", *name, *(name+1), *(name+2), *(name+3), *(name+4), *(name+5), *(name+6), *(name+7), ndlp->nlp_DID); return; } if (ndlp->nlp_type & NLP_FABRIC) { /* We will clean up these Nodes in linkup */ put_node = rdata->pnode != NULL; put_rport = ndlp->rport != NULL; rdata->pnode = NULL; ndlp->rport = NULL; if (put_node) lpfc_nlp_put(ndlp); if (put_rport) put_device(&rport->dev); return; } if (ndlp->nlp_sid != NLP_NO_SID) { warn_on = 1; /* flush the target */ lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], ndlp->nlp_sid, 0, LPFC_CTX_TGT); } if (warn_on) { lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, "0203 Devloss timeout on " "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " "NPort x%06x Data: x%x x%x x%x\n", *name, *(name+1), *(name+2), *(name+3), *(name+4), *(name+5), *(name+6), *(name+7), ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); } else { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0204 Devloss timeout on " "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " "NPort x%06x Data: x%x x%x x%x\n", *name, *(name+1), *(name+2), *(name+3), *(name+4), *(name+5), *(name+6), *(name+7), ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); } put_node = rdata->pnode != NULL; put_rport = ndlp->rport != NULL; rdata->pnode = NULL; ndlp->rport = NULL; if (put_node) lpfc_nlp_put(ndlp); if (put_rport) put_device(&rport->dev); if (!(vport->load_flag & FC_UNLOADING) && !(ndlp->nlp_flag & NLP_DELAY_TMO) && !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); lpfc_unregister_unused_fcf(phba); } /** * lpfc_alloc_fast_evt - Allocates data structure for posting event * @phba: Pointer to hba context object. * * This function is called from the functions which need to post * events from interrupt context. This function allocates data * structure required for posting event. It also keeps track of * number of events pending and prevent event storm when there are * too many events. **/ struct lpfc_fast_path_event * lpfc_alloc_fast_evt(struct lpfc_hba *phba) { struct lpfc_fast_path_event *ret; /* If there are lot of fast event do not exhaust memory due to this */ if (atomic_read(&phba->fast_event_count) > LPFC_MAX_EVT_COUNT) return NULL; ret = kzalloc(sizeof(struct lpfc_fast_path_event), GFP_ATOMIC); if (ret) { atomic_inc(&phba->fast_event_count); INIT_LIST_HEAD(&ret->work_evt.evt_listp); ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT; } return ret; } /** * lpfc_free_fast_evt - Frees event data structure * @phba: Pointer to hba context object. * @evt: Event object which need to be freed. * * This function frees the data structure required for posting * events. **/ void lpfc_free_fast_evt(struct lpfc_hba *phba, struct lpfc_fast_path_event *evt) { atomic_dec(&phba->fast_event_count); kfree(evt); } /** * lpfc_send_fastpath_evt - Posts events generated from fast path * @phba: Pointer to hba context object. * @evtp: Event data structure. * * This function is called from worker thread, when the interrupt * context need to post an event. This function posts the event * to fc transport netlink interface. **/ static void lpfc_send_fastpath_evt(struct lpfc_hba *phba, struct lpfc_work_evt *evtp) { unsigned long evt_category, evt_sub_category; struct lpfc_fast_path_event *fast_evt_data; char *evt_data; uint32_t evt_data_size; struct Scsi_Host *shost; fast_evt_data = container_of(evtp, struct lpfc_fast_path_event, work_evt); evt_category = (unsigned long) fast_evt_data->un.fabric_evt.event_type; evt_sub_category = (unsigned long) fast_evt_data->un. fabric_evt.subcategory; shost = lpfc_shost_from_vport(fast_evt_data->vport); if (evt_category == FC_REG_FABRIC_EVENT) { if (evt_sub_category == LPFC_EVENT_FCPRDCHKERR) { evt_data = (char *) &fast_evt_data->un.read_check_error; evt_data_size = sizeof(fast_evt_data->un. read_check_error); } else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) || (evt_sub_category == LPFC_EVENT_PORT_BUSY)) { evt_data = (char *) &fast_evt_data->un.fabric_evt; evt_data_size = sizeof(fast_evt_data->un.fabric_evt); } else { lpfc_free_fast_evt(phba, fast_evt_data); return; } } else if (evt_category == FC_REG_SCSI_EVENT) { switch (evt_sub_category) { case LPFC_EVENT_QFULL: case LPFC_EVENT_DEVBSY: evt_data = (char *) &fast_evt_data->un.scsi_evt; evt_data_size = sizeof(fast_evt_data->un.scsi_evt); break; case LPFC_EVENT_CHECK_COND: evt_data = (char *) &fast_evt_data->un.check_cond_evt; evt_data_size = sizeof(fast_evt_data->un. check_cond_evt); break; case LPFC_EVENT_VARQUEDEPTH: evt_data = (char *) &fast_evt_data->un.queue_depth_evt; evt_data_size = sizeof(fast_evt_data->un. queue_depth_evt); break; default: lpfc_free_fast_evt(phba, fast_evt_data); return; } } else { lpfc_free_fast_evt(phba, fast_evt_data); return; } fc_host_post_vendor_event(shost, fc_get_event_number(), evt_data_size, evt_data, LPFC_NL_VENDOR_ID); lpfc_free_fast_evt(phba, fast_evt_data); return; } static void lpfc_work_list_done(struct lpfc_hba *phba) { struct lpfc_work_evt *evtp = NULL; struct lpfc_nodelist *ndlp; int free_evt; spin_lock_irq(&phba->hbalock); while (!list_empty(&phba->work_list)) { list_remove_head((&phba->work_list), evtp, typeof(*evtp), evt_listp); spin_unlock_irq(&phba->hbalock); free_evt = 1; switch (evtp->evt) { case LPFC_EVT_ELS_RETRY: ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1); lpfc_els_retry_delay_handler(ndlp); free_evt = 0; /* evt is part of ndlp */ /* decrement the node reference count held * for this queued work */ lpfc_nlp_put(ndlp); break; case LPFC_EVT_DEV_LOSS: ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); lpfc_dev_loss_tmo_handler(ndlp); free_evt = 0; /* decrement the node reference count held for * this queued work */ lpfc_nlp_put(ndlp); break; case LPFC_EVT_ONLINE: if (phba->link_state < LPFC_LINK_DOWN) *(int *) (evtp->evt_arg1) = lpfc_online(phba); else *(int *) (evtp->evt_arg1) = 0; complete((struct completion *)(evtp->evt_arg2)); break; case LPFC_EVT_OFFLINE_PREP: if (phba->link_state >= LPFC_LINK_DOWN) lpfc_offline_prep(phba); *(int *)(evtp->evt_arg1) = 0; complete((struct completion *)(evtp->evt_arg2)); break; case LPFC_EVT_OFFLINE: lpfc_offline(phba); lpfc_sli_brdrestart(phba); *(int *)(evtp->evt_arg1) = lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY); lpfc_unblock_mgmt_io(phba); complete((struct completion *)(evtp->evt_arg2)); break; case LPFC_EVT_WARM_START: lpfc_offline(phba); lpfc_reset_barrier(phba); lpfc_sli_brdreset(phba); lpfc_hba_down_post(phba); *(int *)(evtp->evt_arg1) = lpfc_sli_brdready(phba, HS_MBRDY); lpfc_unblock_mgmt_io(phba); complete((struct completion *)(evtp->evt_arg2)); break; case LPFC_EVT_KILL: lpfc_offline(phba); *(int *)(evtp->evt_arg1) = (phba->pport->stopped) ? 0 : lpfc_sli_brdkill(phba); lpfc_unblock_mgmt_io(phba); complete((struct completion *)(evtp->evt_arg2)); break; case LPFC_EVT_FASTPATH_MGMT_EVT: lpfc_send_fastpath_evt(phba, evtp); free_evt = 0; break; } if (free_evt) kfree(evtp); spin_lock_irq(&phba->hbalock); } spin_unlock_irq(&phba->hbalock); } static void lpfc_work_done(struct lpfc_hba *phba) { struct lpfc_sli_ring *pring; uint32_t ha_copy, status, control, work_port_events; struct lpfc_vport **vports; struct lpfc_vport *vport; int i; spin_lock_irq(&phba->hbalock); ha_copy = phba->work_ha; phba->work_ha = 0; spin_unlock_irq(&phba->hbalock); /* First, try to post the next mailbox command to SLI4 device */ if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) lpfc_sli4_post_async_mbox(phba); if (ha_copy & HA_ERATT) /* Handle the error attention event */ lpfc_handle_eratt(phba); if (ha_copy & HA_MBATT) lpfc_sli_handle_mb_event(phba); if (ha_copy & HA_LATT) lpfc_handle_latt(phba); /* Process SLI4 events */ if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) { if (phba->hba_flag & FCP_XRI_ABORT_EVENT) lpfc_sli4_fcp_xri_abort_event_proc(phba); if (phba->hba_flag & ELS_XRI_ABORT_EVENT) lpfc_sli4_els_xri_abort_event_proc(phba); if (phba->hba_flag & ASYNC_EVENT) lpfc_sli4_async_event_proc(phba); if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) { spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER; spin_unlock_irq(&phba->hbalock); lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); } if (phba->hba_flag & HBA_RECEIVE_BUFFER) lpfc_sli4_handle_received_buffer(phba); } vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for (i = 0; i <= phba->max_vports; i++) { /* * We could have no vports in array if unloading, so if * this happens then just use the pport */ if (vports[i] == NULL && i == 0) vport = phba->pport; else vport = vports[i]; if (vport == NULL) break; spin_lock_irq(&vport->work_port_lock); work_port_events = vport->work_port_events; vport->work_port_events &= ~work_port_events; spin_unlock_irq(&vport->work_port_lock); if (work_port_events & WORKER_DISC_TMO) lpfc_disc_timeout_handler(vport); if (work_port_events & WORKER_ELS_TMO) lpfc_els_timeout_handler(vport); if (work_port_events & WORKER_HB_TMO) lpfc_hb_timeout_handler(phba); if (work_port_events & WORKER_MBOX_TMO) lpfc_mbox_timeout_handler(phba); if (work_port_events & WORKER_FABRIC_BLOCK_TMO) lpfc_unblock_fabric_iocbs(phba); if (work_port_events & WORKER_FDMI_TMO) lpfc_fdmi_timeout_handler(vport); if (work_port_events & WORKER_RAMP_DOWN_QUEUE) lpfc_ramp_down_queue_handler(phba); if (work_port_events & WORKER_RAMP_UP_QUEUE) lpfc_ramp_up_queue_handler(phba); } lpfc_destroy_vport_work_array(phba, vports); pring = &phba->sli.ring[LPFC_ELS_RING]; status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); status >>= (4*LPFC_ELS_RING); if ((status & HA_RXMASK) || (pring->flag & LPFC_DEFERRED_RING_EVENT)) { if (pring->flag & LPFC_STOP_IOCB_EVENT) { pring->flag |= LPFC_DEFERRED_RING_EVENT; /* Set the lpfc data pending flag */ set_bit(LPFC_DATA_READY, &phba->data_flags); } else { pring->flag &= ~LPFC_DEFERRED_RING_EVENT; lpfc_sli_handle_slow_ring_event(phba, pring, (status & HA_RXMASK)); } /* * Turn on Ring interrupts */ if (phba->sli_rev <= LPFC_SLI_REV3) { spin_lock_irq(&phba->hbalock); control = readl(phba->HCregaddr); if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) { lpfc_debugfs_slow_ring_trc(phba, "WRK Enable ring: cntl:x%x hacopy:x%x", control, ha_copy, 0); control |= (HC_R0INT_ENA << LPFC_ELS_RING); writel(control, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ } else { lpfc_debugfs_slow_ring_trc(phba, "WRK Ring ok: cntl:x%x hacopy:x%x", control, ha_copy, 0); } spin_unlock_irq(&phba->hbalock); } } lpfc_work_list_done(phba); } int lpfc_do_work(void *p) { struct lpfc_hba *phba = p; int rc; set_user_nice(current, -20); phba->data_flags = 0; while (!kthread_should_stop()) { /* wait and check worker queue activities */ rc = wait_event_interruptible(phba->work_waitq, (test_and_clear_bit(LPFC_DATA_READY, &phba->data_flags) || kthread_should_stop())); /* Signal wakeup shall terminate the worker thread */ if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_ELS, "0433 Wakeup on signal: rc=x%x\n", rc); break; } /* Attend pending lpfc data processing */ lpfc_work_done(phba); } phba->worker_thread = NULL; lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "0432 Worker thread stopped.\n"); return 0; } /* * This is only called to handle FC worker events. Since this a rare * occurance, we allocate a struct lpfc_work_evt structure here instead of * embedding it in the IOCB. */ int lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2, uint32_t evt) { struct lpfc_work_evt *evtp; unsigned long flags; /* * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will * be queued to worker thread for processing */ evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC); if (!evtp) return 0; evtp->evt_arg1 = arg1; evtp->evt_arg2 = arg2; evtp->evt = evt; spin_lock_irqsave(&phba->hbalock, flags); list_add_tail(&evtp->evt_listp, &phba->work_list); spin_unlock_irqrestore(&phba->hbalock, flags); lpfc_worker_wake_up(phba); return 1; } void lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ndlp, *next_ndlp; int rc; list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { if (!NLP_CHK_NODE_ACT(ndlp)) continue; if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) continue; if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) || ((vport->port_type == LPFC_NPIV_PORT) && (ndlp->nlp_DID == NameServer_DID))) lpfc_unreg_rpi(vport, ndlp); /* Leave Fabric nodes alone on link down */ if (!remove && ndlp->nlp_type & NLP_FABRIC) continue; rc = lpfc_disc_state_machine(vport, ndlp, NULL, remove ? NLP_EVT_DEVICE_RM : NLP_EVT_DEVICE_RECOVERY); } if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) { lpfc_mbx_unreg_vpi(vport); spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; spin_unlock_irq(shost->host_lock); } } void lpfc_port_link_failure(struct lpfc_vport *vport) { /* Cleanup any outstanding RSCN activity */ lpfc_els_flush_rscn(vport); /* Cleanup any outstanding ELS commands */ lpfc_els_flush_cmd(vport); lpfc_cleanup_rpis(vport, 0); /* Turn off discovery timer if its running */ lpfc_can_disctmo(vport); } void lpfc_linkdown_port(struct lpfc_vport *vport) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "Link Down: state:x%x rtry:x%x flg:x%x", vport->port_state, vport->fc_ns_retry, vport->fc_flag); lpfc_port_link_failure(vport); } int lpfc_linkdown(struct lpfc_hba *phba) { struct lpfc_vport *vport = phba->pport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_vport **vports; LPFC_MBOXQ_t *mb; int i; if (phba->link_state == LPFC_LINK_DOWN) return 0; spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_DISCOVERED); if (phba->link_state > LPFC_LINK_DOWN) { phba->link_state = LPFC_LINK_DOWN; phba->pport->fc_flag &= ~FC_LBIT; } spin_unlock_irq(&phba->hbalock); vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { /* Issue a LINK DOWN event to all nodes */ lpfc_linkdown_port(vports[i]); } lpfc_destroy_vport_work_array(phba, vports); /* Clean up any firmware default rpi's */ mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mb) { lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb); mb->vport = vport; mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT) == MBX_NOT_FINISHED) { mempool_free(mb, phba->mbox_mem_pool); } } /* Setup myDID for link up if we are in pt2pt mode */ if (phba->pport->fc_flag & FC_PT2PT) { phba->pport->fc_myDID = 0; mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mb) { lpfc_config_link(phba, mb); mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; mb->vport = vport; if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT) == MBX_NOT_FINISHED) { mempool_free(mb, phba->mbox_mem_pool); } } spin_lock_irq(shost->host_lock); phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI); spin_unlock_irq(shost->host_lock); } return 0; } static void lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport) { struct lpfc_nodelist *ndlp; list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { if (!NLP_CHK_NODE_ACT(ndlp)) continue; if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) continue; if (ndlp->nlp_type & NLP_FABRIC) { /* On Linkup its safe to clean up the ndlp * from Fabric connections. */ if (ndlp->nlp_DID != Fabric_DID) lpfc_unreg_rpi(vport, ndlp); lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { /* Fail outstanding IO now since device is * marked for PLOGI. */ lpfc_unreg_rpi(vport, ndlp); } } } static void lpfc_linkup_port(struct lpfc_vport *vport) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; if ((vport->load_flag & FC_UNLOADING) != 0) return; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "Link Up: top:x%x speed:x%x flg:x%x", phba->fc_topology, phba->fc_linkspeed, phba->link_flag); /* If NPIV is not enabled, only bring the physical port up */ if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && (vport != phba->pport)) return; fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0); spin_lock_irq(shost->host_lock); vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY | FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY); vport->fc_flag |= FC_NDISC_ACTIVE; vport->fc_ns_retry = 0; spin_unlock_irq(shost->host_lock); if (vport->fc_flag & FC_LBIT) lpfc_linkup_cleanup_nodes(vport); } static int lpfc_linkup(struct lpfc_hba *phba) { struct lpfc_vport **vports; int i; phba->link_state = LPFC_LINK_UP; /* Unblock fabric iocbs if they are blocked */ clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); del_timer_sync(&phba->fabric_block_timer); vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) lpfc_linkup_port(vports[i]); lpfc_destroy_vport_work_array(phba, vports); if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && (phba->sli_rev < LPFC_SLI_REV4)) lpfc_issue_clear_la(phba, phba->pport); return 0; } /* * This routine handles processing a CLEAR_LA mailbox * command upon completion. It is setup in the LPFC_MBOXQ * as the completion routine when the command is * handed off to the SLI layer. */ static void lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_sli *psli = &phba->sli; MAILBOX_t *mb = &pmb->u.mb; uint32_t control; /* Since we don't do discovery right now, turn these off here */ psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT; psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT; psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT; /* Check for error */ if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) { /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */ lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, "0320 CLEAR_LA mbxStatus error x%x hba " "state x%x\n", mb->mbxStatus, vport->port_state); phba->link_state = LPFC_HBA_ERROR; goto out; } if (vport->port_type == LPFC_PHYSICAL_PORT) phba->link_state = LPFC_HBA_READY; spin_lock_irq(&phba->hbalock); psli->sli_flag |= LPFC_PROCESS_LA; control = readl(phba->HCregaddr); control |= HC_LAINT_ENA; writel(control, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ spin_unlock_irq(&phba->hbalock); mempool_free(pmb, phba->mbox_mem_pool); return; out: /* Device Discovery completes */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0225 Device Discovery completes\n"); mempool_free(pmb, phba->mbox_mem_pool); spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_ABORT_DISCOVERY; spin_unlock_irq(shost->host_lock); lpfc_can_disctmo(vport); /* turn on Link Attention interrupts */ spin_lock_irq(&phba->hbalock); psli->sli_flag |= LPFC_PROCESS_LA; control = readl(phba->HCregaddr); control |= HC_LAINT_ENA; writel(control, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ spin_unlock_irq(&phba->hbalock); return; } static void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; if (pmb->u.mb.mbxStatus) goto out; mempool_free(pmb, phba->mbox_mem_pool); if (phba->fc_topology == TOPOLOGY_LOOP && vport->fc_flag & FC_PUBLIC_LOOP && !(vport->fc_flag & FC_LBIT)) { /* Need to wait for FAN - use discovery timer * for timeout. port_state is identically * LPFC_LOCAL_CFG_LINK while waiting for FAN */ lpfc_set_disctmo(vport); return; } /* Start discovery by sending a FLOGI. port_state is identically * LPFC_FLOGI while waiting for FLOGI cmpl */ if (vport->port_state != LPFC_FLOGI) { lpfc_initial_flogi(vport); } return; out: lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, "0306 CONFIG_LINK mbxStatus error x%x " "HBA state x%x\n", pmb->u.mb.mbxStatus, vport->port_state); mempool_free(pmb, phba->mbox_mem_pool); lpfc_linkdown(phba); lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, "0200 CONFIG_LINK bad hba state x%x\n", vport->port_state); lpfc_issue_clear_la(phba, vport); return; } static void lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { struct lpfc_vport *vport = mboxq->vport; unsigned long flags; if (mboxq->u.mb.mbxStatus) { lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, "2017 REG_FCFI mbxStatus error x%x " "HBA state x%x\n", mboxq->u.mb.mbxStatus, vport->port_state); mempool_free(mboxq, phba->mbox_mem_pool); return; } /* Start FCoE discovery by sending a FLOGI. */ phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi); /* Set the FCFI registered flag */ spin_lock_irqsave(&phba->hbalock, flags); phba->fcf.fcf_flag |= FCF_REGISTERED; spin_unlock_irqrestore(&phba->hbalock, flags); /* If there is a pending FCoE event, restart FCF table scan. */ if (lpfc_check_pending_fcoe_event(phba, 1)) { mempool_free(mboxq, phba->mbox_mem_pool); return; } if (vport->port_state != LPFC_FLOGI) { spin_lock_irqsave(&phba->hbalock, flags); phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE); phba->hba_flag &= ~FCF_DISC_INPROGRESS; spin_unlock_irqrestore(&phba->hbalock, flags); lpfc_initial_flogi(vport); } mempool_free(mboxq, phba->mbox_mem_pool); return; } /** * lpfc_fab_name_match - Check if the fcf fabric name match. * @fab_name: pointer to fabric name. * @new_fcf_record: pointer to fcf record. * * This routine compare the fcf record's fabric name with provided * fabric name. If the fabric name are identical this function * returns 1 else return 0. **/ static uint32_t lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record) { if ((fab_name[0] == bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record)) && (fab_name[1] == bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record)) && (fab_name[2] == bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record)) && (fab_name[3] == bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record)) && (fab_name[4] == bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record)) && (fab_name[5] == bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record)) && (fab_name[6] == bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record)) && (fab_name[7] == bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record))) return 1; else return 0; } /** * lpfc_sw_name_match - Check if the fcf switch name match. * @fab_name: pointer to fabric name. * @new_fcf_record: pointer to fcf record. * * This routine compare the fcf record's switch name with provided * switch name. If the switch name are identical this function * returns 1 else return 0. **/ static uint32_t lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record) { if ((sw_name[0] == bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record)) && (sw_name[1] == bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record)) && (sw_name[2] == bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record)) && (sw_name[3] == bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record)) && (sw_name[4] == bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record)) && (sw_name[5] == bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record)) && (sw_name[6] == bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record)) && (sw_name[7] == bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record))) return 1; else return 0; } /** * lpfc_mac_addr_match - Check if the fcf mac address match. * @phba: pointer to lpfc hba data structure. * @new_fcf_record: pointer to fcf record. * * This routine compare the fcf record's mac address with HBA's * FCF mac address. If the mac addresses are identical this function * returns 1 else return 0. **/ static uint32_t lpfc_mac_addr_match(struct lpfc_hba *phba, struct fcf_record *new_fcf_record) { if ((phba->fcf.mac_addr[0] == bf_get(lpfc_fcf_record_mac_0, new_fcf_record)) && (phba->fcf.mac_addr[1] == bf_get(lpfc_fcf_record_mac_1, new_fcf_record)) && (phba->fcf.mac_addr[2] == bf_get(lpfc_fcf_record_mac_2, new_fcf_record)) && (phba->fcf.mac_addr[3] == bf_get(lpfc_fcf_record_mac_3, new_fcf_record)) && (phba->fcf.mac_addr[4] == bf_get(lpfc_fcf_record_mac_4, new_fcf_record)) && (phba->fcf.mac_addr[5] == bf_get(lpfc_fcf_record_mac_5, new_fcf_record))) return 1; else return 0; } /** * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba. * @phba: pointer to lpfc hba data structure. * @new_fcf_record: pointer to fcf record. * * This routine copies the FCF information from the FCF * record to lpfc_hba data structure. **/ static void lpfc_copy_fcf_record(struct lpfc_hba *phba, struct fcf_record *new_fcf_record) { phba->fcf.fabric_name[0] = bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record); phba->fcf.fabric_name[1] = bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record); phba->fcf.fabric_name[2] = bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record); phba->fcf.fabric_name[3] = bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record); phba->fcf.fabric_name[4] = bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record); phba->fcf.fabric_name[5] = bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record); phba->fcf.fabric_name[6] = bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record); phba->fcf.fabric_name[7] = bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record); phba->fcf.mac_addr[0] = bf_get(lpfc_fcf_record_mac_0, new_fcf_record); phba->fcf.mac_addr[1] = bf_get(lpfc_fcf_record_mac_1, new_fcf_record); phba->fcf.mac_addr[2] = bf_get(lpfc_fcf_record_mac_2, new_fcf_record); phba->fcf.mac_addr[3] = bf_get(lpfc_fcf_record_mac_3, new_fcf_record); phba->fcf.mac_addr[4] = bf_get(lpfc_fcf_record_mac_4, new_fcf_record); phba->fcf.mac_addr[5] = bf_get(lpfc_fcf_record_mac_5, new_fcf_record); phba->fcf.fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); phba->fcf.priority = new_fcf_record->fip_priority; phba->fcf.switch_name[0] = bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record); phba->fcf.switch_name[1] = bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record); phba->fcf.switch_name[2] = bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record); phba->fcf.switch_name[3] = bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record); phba->fcf.switch_name[4] = bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record); phba->fcf.switch_name[5] = bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record); phba->fcf.switch_name[6] = bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record); phba->fcf.switch_name[7] = bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record); } /** * lpfc_register_fcf - Register the FCF with hba. * @phba: pointer to lpfc hba data structure. * * This routine issues a register fcfi mailbox command to register * the fcf with HBA. **/ static void lpfc_register_fcf(struct lpfc_hba *phba) { LPFC_MBOXQ_t *fcf_mbxq; int rc; unsigned long flags; spin_lock_irqsave(&phba->hbalock, flags); /* If the FCF is not availabe do nothing. */ if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { spin_unlock_irqrestore(&phba->hbalock, flags); return; } /* The FCF is already registered, start discovery */ if (phba->fcf.fcf_flag & FCF_REGISTERED) { phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE); phba->hba_flag &= ~FCF_DISC_INPROGRESS; spin_unlock_irqrestore(&phba->hbalock, flags); if (phba->pport->port_state != LPFC_FLOGI) lpfc_initial_flogi(phba->pport); return; } spin_unlock_irqrestore(&phba->hbalock, flags); fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!fcf_mbxq) return; lpfc_reg_fcfi(phba, fcf_mbxq); fcf_mbxq->vport = phba->pport; fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi; rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) mempool_free(fcf_mbxq, phba->mbox_mem_pool); return; } /** * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery. * @phba: pointer to lpfc hba data structure. * @new_fcf_record: pointer to fcf record. * @boot_flag: Indicates if this record used by boot bios. * @addr_mode: The address mode to be used by this FCF * * This routine compare the fcf record with connect list obtained from the * config region to decide if this FCF can be used for SAN discovery. It returns * 1 if this record can be used for SAN discovery else return zero. If this FCF * record can be used for SAN discovery, the boot_flag will indicate if this FCF * is used by boot bios and addr_mode will indicate the addressing mode to be * used for this FCF when the function returns. * If the FCF record need to be used with a particular vlan id, the vlan is * set in the vlan_id on return of the function. If not VLAN tagging need to * be used with the FCF vlan_id will be set to 0xFFFF; **/ static int lpfc_match_fcf_conn_list(struct lpfc_hba *phba, struct fcf_record *new_fcf_record, uint32_t *boot_flag, uint32_t *addr_mode, uint16_t *vlan_id) { struct lpfc_fcf_conn_entry *conn_entry; /* If FCF not available return 0 */ if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) || !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record)) return 0; if (!phba->cfg_enable_fip) { *boot_flag = 0; *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record); if (phba->valid_vlan) *vlan_id = phba->vlan_id; else *vlan_id = 0xFFFF; return 1; } /* * If there are no FCF connection table entry, driver connect to all * FCFs. */ if (list_empty(&phba->fcf_conn_rec_list)) { *boot_flag = 0; *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record); /* * When there are no FCF connect entries, use driver's default * addressing mode - FPMA. */ if (*addr_mode & LPFC_FCF_FPMA) *addr_mode = LPFC_FCF_FPMA; *vlan_id = 0xFFFF; return 1; } list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list, list) { if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID)) continue; if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) && !lpfc_fab_name_match(conn_entry->conn_rec.fabric_name, new_fcf_record)) continue; if ((conn_entry->conn_rec.flags & FCFCNCT_SWNM_VALID) && !lpfc_sw_name_match(conn_entry->conn_rec.switch_name, new_fcf_record)) continue; if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) { /* * If the vlan bit map does not have the bit set for the * vlan id to be used, then it is not a match. */ if (!(new_fcf_record->vlan_bitmap [conn_entry->conn_rec.vlan_tag / 8] & (1 << (conn_entry->conn_rec.vlan_tag % 8)))) continue; } /* * If connection record does not support any addressing mode, * skip the FCF record. */ if (!(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record) & (LPFC_FCF_FPMA | LPFC_FCF_SPMA))) continue; /* * Check if the connection record specifies a required * addressing mode. */ if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && !(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) { /* * If SPMA required but FCF not support this continue. */ if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && !(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record) & LPFC_FCF_SPMA)) continue; /* * If FPMA required but FCF not support this continue. */ if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && !(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record) & LPFC_FCF_FPMA)) continue; } /* * This fcf record matches filtering criteria. */ if (conn_entry->conn_rec.flags & FCFCNCT_BOOT) *boot_flag = 1; else *boot_flag = 0; /* * If user did not specify any addressing mode, or if the * prefered addressing mode specified by user is not supported * by FCF, allow fabric to pick the addressing mode. */ *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record); /* * If the user specified a required address mode, assign that * address mode */ if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && (!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED))) *addr_mode = (conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) ? LPFC_FCF_SPMA : LPFC_FCF_FPMA; /* * If the user specified a prefered address mode, use the * addr mode only if FCF support the addr_mode. */ else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) && (conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && (*addr_mode & LPFC_FCF_SPMA)) *addr_mode = LPFC_FCF_SPMA; else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) && !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && (*addr_mode & LPFC_FCF_FPMA)) *addr_mode = LPFC_FCF_FPMA; if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) *vlan_id = conn_entry->conn_rec.vlan_tag; else *vlan_id = 0xFFFF; return 1; } return 0; } /** * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event. * @phba: pointer to lpfc hba data structure. * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned. * * This function check if there is any fcoe event pending while driver * scan FCF entries. If there is any pending event, it will restart the * FCF saning and return 1 else return 0. */ int lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) { LPFC_MBOXQ_t *mbox; int rc; /* * If the Link is up and no FCoE events while in the * FCF discovery, no need to restart FCF discovery. */ if ((phba->link_state >= LPFC_LINK_UP) && (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan)) return 0; spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_AVAILABLE; spin_unlock_irq(&phba->hbalock); if (phba->link_state >= LPFC_LINK_UP) lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); if (unreg_fcf) { spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_REGISTERED; spin_unlock_irq(&phba->hbalock); mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) { lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, "2610 UNREG_FCFI mbox allocation failed\n"); return 1; } lpfc_unreg_fcfi(mbox, phba->fcf.fcfi); mbox->vport = phba->pport; mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl; rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, "2611 UNREG_FCFI issue mbox failed\n"); mempool_free(mbox, phba->mbox_mem_pool); } } return 1; } /** * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox. * @phba: pointer to lpfc hba data structure. * @mboxq: pointer to mailbox object. * * This function iterate through all the fcf records available in * HBA and choose the optimal FCF record for discovery. After finding * the FCF for discovery it register the FCF record and kick start * discovery. * If FCF_IN_USE flag is set in currently used FCF, the routine try to * use a FCF record which match fabric name and mac address of the * currently used FCF record. * If the driver support only one FCF, it will try to use the FCF record * used by BOOT_BIOS. */ void lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { void *virt_addr; dma_addr_t phys_addr; uint8_t *bytep; struct lpfc_mbx_sge sge; struct lpfc_mbx_read_fcf_tbl *read_fcf; uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; struct fcf_record *new_fcf_record; int rc; uint32_t boot_flag, addr_mode; uint32_t next_fcf_index; unsigned long flags; uint16_t vlan_id; /* If there is pending FCoE event restart FCF table scan */ if (lpfc_check_pending_fcoe_event(phba, 0)) { lpfc_sli4_mbox_cmd_free(phba, mboxq); return; } /* Get the first SGE entry from the non-embedded DMA memory. This * routine only uses a single SGE. */ lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); phys_addr = getPaddr(sge.pa_hi, sge.pa_lo); if (unlikely(!mboxq->sge_array)) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, "2524 Failed to get the non-embedded SGE " "virtual address\n"); goto out; } virt_addr = mboxq->sge_array->addr[0]; shdr = (union lpfc_sli4_cfg_shdr *)virt_addr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); /* * The FCF Record was read and there is no reason for the driver * to maintain the FCF record data or memory. Instead, just need * to book keeping the FCFIs can be used. */ if (shdr_status || shdr_add_status) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2521 READ_FCF_RECORD mailbox failed " "with status x%x add_status x%x, mbx\n", shdr_status, shdr_add_status); goto out; } /* Interpreting the returned information of FCF records */ read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; lpfc_sli_pcimem_bcopy(read_fcf, read_fcf, sizeof(struct lpfc_mbx_read_fcf_tbl)); next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf); new_fcf_record = (struct fcf_record *)(virt_addr + sizeof(struct lpfc_mbx_read_fcf_tbl)); lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record, sizeof(struct fcf_record)); bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, &addr_mode, &vlan_id); /* * If the fcf record does not match with connect list entries * read the next entry. */ if (!rc) goto read_next_fcf; /* * If this is not the first FCF discovery of the HBA, use last * FCF record for the discovery. */ spin_lock_irqsave(&phba->hbalock, flags); if (phba->fcf.fcf_flag & FCF_IN_USE) { if (lpfc_fab_name_match(phba->fcf.fabric_name, new_fcf_record) && lpfc_sw_name_match(phba->fcf.switch_name, new_fcf_record) && lpfc_mac_addr_match(phba, new_fcf_record)) { phba->fcf.fcf_flag |= FCF_AVAILABLE; spin_unlock_irqrestore(&phba->hbalock, flags); goto out; } spin_unlock_irqrestore(&phba->hbalock, flags); goto read_next_fcf; } if (phba->fcf.fcf_flag & FCF_AVAILABLE) { /* * If the current FCF record does not have boot flag * set and new fcf record has boot flag set, use the * new fcf record. */ if (boot_flag && !(phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) { /* Use this FCF record */ lpfc_copy_fcf_record(phba, new_fcf_record); phba->fcf.addr_mode = addr_mode; phba->fcf.fcf_flag |= FCF_BOOT_ENABLE; if (vlan_id != 0xFFFF) { phba->fcf.fcf_flag |= FCF_VALID_VLAN; phba->fcf.vlan_id = vlan_id; } spin_unlock_irqrestore(&phba->hbalock, flags); goto read_next_fcf; } /* * If the current FCF record has boot flag set and the * new FCF record does not have boot flag, read the next * FCF record. */ if (!boot_flag && (phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) { spin_unlock_irqrestore(&phba->hbalock, flags); goto read_next_fcf; } /* * If there is a record with lower priority value for * the current FCF, use that record. */ if (lpfc_fab_name_match(phba->fcf.fabric_name, new_fcf_record) && (new_fcf_record->fip_priority < phba->fcf.priority)) { /* Use this FCF record */ lpfc_copy_fcf_record(phba, new_fcf_record); phba->fcf.addr_mode = addr_mode; if (vlan_id != 0xFFFF) { phba->fcf.fcf_flag |= FCF_VALID_VLAN; phba->fcf.vlan_id = vlan_id; } spin_unlock_irqrestore(&phba->hbalock, flags); goto read_next_fcf; } spin_unlock_irqrestore(&phba->hbalock, flags); goto read_next_fcf; } /* * This is the first available FCF record, use this * record. */ lpfc_copy_fcf_record(phba, new_fcf_record); phba->fcf.addr_mode = addr_mode; if (boot_flag) phba->fcf.fcf_flag |= FCF_BOOT_ENABLE; phba->fcf.fcf_flag |= FCF_AVAILABLE; if (vlan_id != 0xFFFF) { phba->fcf.fcf_flag |= FCF_VALID_VLAN; phba->fcf.vlan_id = vlan_id; } spin_unlock_irqrestore(&phba->hbalock, flags); goto read_next_fcf; read_next_fcf: lpfc_sli4_mbox_cmd_free(phba, mboxq); if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) lpfc_register_fcf(phba); else lpfc_sli4_read_fcf_record(phba, next_fcf_index); return; out: lpfc_sli4_mbox_cmd_free(phba, mboxq); lpfc_register_fcf(phba); return; } /** * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command. * @phba: pointer to lpfc hba data structure. * @mboxq: pointer to mailbox data structure. * * This function handles completion of init vpi mailbox command. */ static void lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { struct lpfc_vport *vport = mboxq->vport; if (mboxq->u.mb.mbxStatus) { lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, "2609 Init VPI mailbox failed 0x%x\n", mboxq->u.mb.mbxStatus); mempool_free(mboxq, phba->mbox_mem_pool); lpfc_vport_set_state(vport, FC_VPORT_FAILED); return; } vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI; if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) lpfc_initial_fdisc(vport); else { lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP); lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, "2606 No NPIV Fabric support\n"); } return; } /** * lpfc_start_fdiscs - send fdiscs for each vports on this port. * @phba: pointer to lpfc hba data structure. * * This function loops through the list of vports on the @phba and issues an * FDISC if possible. */ void lpfc_start_fdiscs(struct lpfc_hba *phba) { struct lpfc_vport **vports; int i; LPFC_MBOXQ_t *mboxq; int rc; vports = lpfc_create_vport_work_array(phba); if (vports != NULL) { for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { if (vports[i]->port_type == LPFC_PHYSICAL_PORT) continue; /* There are no vpi for this vport */ if (vports[i]->vpi > phba->max_vpi) { lpfc_vport_set_state(vports[i], FC_VPORT_FAILED); continue; } if (phba->fc_topology == TOPOLOGY_LOOP) { lpfc_vport_set_state(vports[i], FC_VPORT_LINKDOWN); continue; } if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) { mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { lpfc_printf_vlog(vports[i], KERN_ERR, LOG_MBOX, "2607 Failed to allocate " "init_vpi mailbox\n"); continue; } lpfc_init_vpi(phba, mboxq, vports[i]->vpi); mboxq->vport = vports[i]; mboxq->mbox_cmpl = lpfc_init_vpi_cmpl; rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { lpfc_printf_vlog(vports[i], KERN_ERR, LOG_MBOX, "2608 Failed to issue " "init_vpi mailbox\n"); mempool_free(mboxq, phba->mbox_mem_pool); } continue; } if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) lpfc_initial_fdisc(vports[i]); else { lpfc_vport_set_state(vports[i], FC_VPORT_NO_FABRIC_SUPP); lpfc_printf_vlog(vports[i], KERN_ERR, LOG_ELS, "0259 No NPIV " "Fabric support\n"); } } } lpfc_destroy_vport_work_array(phba, vports); } void lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { struct lpfc_dmabuf *dmabuf = mboxq->context1; struct lpfc_vport *vport = mboxq->vport; if (mboxq->u.mb.mbxStatus) { lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, "2018 REG_VFI mbxStatus error x%x " "HBA state x%x\n", mboxq->u.mb.mbxStatus, vport->port_state); if (phba->fc_topology == TOPOLOGY_LOOP) { /* FLOGI failed, use loop map to make discovery list */ lpfc_disc_list_loopmap(vport); /* Start discovery */ lpfc_disc_start(vport); goto fail_free_mem; } lpfc_vport_set_state(vport, FC_VPORT_FAILED); goto fail_free_mem; } /* Mark the vport has registered with its VFI */ vport->vfi_state |= LPFC_VFI_REGISTERED; if (vport->port_state == LPFC_FABRIC_CFG_LINK) { lpfc_start_fdiscs(phba); lpfc_do_scr_ns_plogi(phba, vport); } fail_free_mem: mempool_free(mboxq, phba->mbox_mem_pool); lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); kfree(dmabuf); return; } static void lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { MAILBOX_t *mb = &pmb->u.mb; struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1; struct lpfc_vport *vport = pmb->vport; /* Check for error */ if (mb->mbxStatus) { /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */ lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, "0319 READ_SPARAM mbxStatus error x%x " "hba state x%x>\n", mb->mbxStatus, vport->port_state); lpfc_linkdown(phba); goto out; } memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt, sizeof (struct serv_parm)); if (phba->cfg_soft_wwnn) u64_to_wwn(phba->cfg_soft_wwnn, vport->fc_sparam.nodeName.u.wwn); if (phba->cfg_soft_wwpn) u64_to_wwn(phba->cfg_soft_wwpn, vport->fc_sparam.portName.u.wwn); memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, sizeof(vport->fc_nodename)); memcpy(&vport->fc_portname, &vport->fc_sparam.portName, sizeof(vport->fc_portname)); if (vport->port_type == LPFC_PHYSICAL_PORT) { memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn)); memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn)); } lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); mempool_free(pmb, phba->mbox_mem_pool); return; out: pmb->context1 = NULL; lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); lpfc_issue_clear_la(phba, vport); mempool_free(pmb, phba->mbox_mem_pool); return; } static void lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) { struct lpfc_vport *vport = phba->pport; LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL; int i; struct lpfc_dmabuf *mp; int rc; struct fcf_record *fcf_record; sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); spin_lock_irq(&phba->hbalock); switch (la->UlnkSpeed) { case LA_1GHZ_LINK: phba->fc_linkspeed = LA_1GHZ_LINK; break; case LA_2GHZ_LINK: phba->fc_linkspeed = LA_2GHZ_LINK; break; case LA_4GHZ_LINK: phba->fc_linkspeed = LA_4GHZ_LINK; break; case LA_8GHZ_LINK: phba->fc_linkspeed = LA_8GHZ_LINK; break; case LA_10GHZ_LINK: phba->fc_linkspeed = LA_10GHZ_LINK; break; default: phba->fc_linkspeed = LA_UNKNW_LINK; break; } phba->fc_topology = la->topology; phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; if (phba->fc_topology == TOPOLOGY_LOOP) { phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; if (phba->cfg_enable_npiv) lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, "1309 Link Up Event npiv not supported in loop " "topology\n"); /* Get Loop Map information */ if (la->il) vport->fc_flag |= FC_LBIT; vport->fc_myDID = la->granted_AL_PA; i = la->un.lilpBde64.tus.f.bdeSize; if (i == 0) { phba->alpa_map[0] = 0; } else { if (vport->cfg_log_verbose & LOG_LINK_EVENT) { int numalpa, j, k; union { uint8_t pamap[16]; struct { uint32_t wd1; uint32_t wd2; uint32_t wd3; uint32_t wd4; } pa; } un; numalpa = phba->alpa_map[0]; j = 0; while (j < numalpa) { memset(un.pamap, 0, 16); for (k = 1; j < numalpa; k++) { un.pamap[k - 1] = phba->alpa_map[j + 1]; j++; if (k == 16) break; } /* Link Up Event ALPA map */ lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT, "1304 Link Up Event " "ALPA map Data: x%x " "x%x x%x x%x\n", un.pa.wd1, un.pa.wd2, un.pa.wd3, un.pa.wd4); } } } } else { if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) { if (phba->max_vpi && phba->cfg_enable_npiv && (phba->sli_rev == 3)) phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; } vport->fc_myDID = phba->fc_pref_DID; vport->fc_flag |= FC_LBIT; } spin_unlock_irq(&phba->hbalock); lpfc_linkup(phba); if (sparam_mbox) { lpfc_read_sparam(phba, sparam_mbox, 0); sparam_mbox->vport = vport; sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { mp = (struct lpfc_dmabuf *) sparam_mbox->context1; lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); mempool_free(sparam_mbox, phba->mbox_mem_pool); goto out; } } if (!(phba->hba_flag & HBA_FCOE_SUPPORT)) { cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!cfglink_mbox) goto out; vport->port_state = LPFC_LOCAL_CFG_LINK; lpfc_config_link(phba, cfglink_mbox); cfglink_mbox->vport = vport; cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { mempool_free(cfglink_mbox, phba->mbox_mem_pool); goto out; } } else { vport->port_state = LPFC_VPORT_UNKNOWN; /* * Add the driver's default FCF record at FCF index 0 now. This * is phase 1 implementation that support FCF index 0 and driver * defaults. */ if (phba->cfg_enable_fip == 0) { fcf_record = kzalloc(sizeof(struct fcf_record), GFP_KERNEL); if (unlikely(!fcf_record)) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, "2554 Could not allocate memmory for " "fcf record\n"); rc = -ENODEV; goto out; } lpfc_sli4_build_dflt_fcf_record(phba, fcf_record, LPFC_FCOE_FCF_DEF_INDEX); rc = lpfc_sli4_add_fcf_record(phba, fcf_record); if (unlikely(rc)) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, "2013 Could not manually add FCF " "record 0, status %d\n", rc); rc = -ENODEV; kfree(fcf_record); goto out; } kfree(fcf_record); } /* * The driver is expected to do FIP/FCF. Call the port * and get the FCF Table. */ spin_lock_irq(&phba->hbalock); if (phba->hba_flag & FCF_DISC_INPROGRESS) { spin_unlock_irq(&phba->hbalock); return; } spin_unlock_irq(&phba->hbalock); rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); if (rc) goto out; } return; out: lpfc_vport_set_state(vport, FC_VPORT_FAILED); lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, "0263 Discovery Mailbox error: state: 0x%x : %p %p\n", vport->port_state, sparam_mbox, cfglink_mbox); lpfc_issue_clear_la(phba, vport); return; } static void lpfc_enable_la(struct lpfc_hba *phba) { uint32_t control; struct lpfc_sli *psli = &phba->sli; spin_lock_irq(&phba->hbalock); psli->sli_flag |= LPFC_PROCESS_LA; if (phba->sli_rev <= LPFC_SLI_REV3) { control = readl(phba->HCregaddr); control |= HC_LAINT_ENA; writel(control, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ } spin_unlock_irq(&phba->hbalock); } static void lpfc_mbx_issue_link_down(struct lpfc_hba *phba) { lpfc_linkdown(phba); lpfc_enable_la(phba); lpfc_unregister_unused_fcf(phba); /* turn on Link Attention interrupts - no CLEAR_LA needed */ } /* * This routine handles processing a READ_LA mailbox * command upon completion. It is setup in the LPFC_MBOXQ * as the completion routine when the command is * handed off to the SLI layer. */ void lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); READ_LA_VAR *la; MAILBOX_t *mb = &pmb->u.mb; struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); /* Unblock ELS traffic */ phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; /* Check for error */ if (mb->mbxStatus) { lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, "1307 READ_LA mbox error x%x state x%x\n", mb->mbxStatus, vport->port_state); lpfc_mbx_issue_link_down(phba); phba->link_state = LPFC_HBA_ERROR; goto lpfc_mbx_cmpl_read_la_free_mbuf; } la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA; memcpy(&phba->alpa_map[0], mp->virt, 128); spin_lock_irq(shost->host_lock); if (la->pb) vport->fc_flag |= FC_BYPASSED_MODE; else vport->fc_flag &= ~FC_BYPASSED_MODE; spin_unlock_irq(shost->host_lock); if ((phba->fc_eventTag < la->eventTag) || (phba->fc_eventTag == la->eventTag)) { phba->fc_stat.LinkMultiEvent++; if (la->attType == AT_LINK_UP) if (phba->fc_eventTag != 0) lpfc_linkdown(phba); } phba->fc_eventTag = la->eventTag; if (la->mm) phba->sli.sli_flag |= LPFC_MENLO_MAINT; else phba->sli.sli_flag &= ~LPFC_MENLO_MAINT; if (la->attType == AT_LINK_UP && (!la->mm)) { phba->fc_stat.LinkUp++; if (phba->link_flag & LS_LOOPBACK_MODE) { lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, "1306 Link Up Event in loop back mode " "x%x received Data: x%x x%x x%x x%x\n", la->eventTag, phba->fc_eventTag, la->granted_AL_PA, la->UlnkSpeed, phba->alpa_map[0]); } else { lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, "1303 Link Up Event x%x received " "Data: x%x x%x x%x x%x x%x x%x %d\n", la->eventTag, phba->fc_eventTag, la->granted_AL_PA, la->UlnkSpeed, phba->alpa_map[0], la->mm, la->fa, phba->wait_4_mlo_maint_flg); } lpfc_mbx_process_link_up(phba, la); } else if (la->attType == AT_LINK_DOWN) { phba->fc_stat.LinkDown++; if (phba->link_flag & LS_LOOPBACK_MODE) { lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, "1308 Link Down Event in loop back mode " "x%x received " "Data: x%x x%x x%x\n", la->eventTag, phba->fc_eventTag, phba->pport->port_state, vport->fc_flag); } else { lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, "1305 Link Down Event x%x received " "Data: x%x x%x x%x x%x x%x\n", la->eventTag, phba->fc_eventTag, phba->pport->port_state, vport->fc_flag, la->mm, la->fa); } lpfc_mbx_issue_link_down(phba); } if (la->mm && la->attType == AT_LINK_UP) { if (phba->link_state != LPFC_LINK_DOWN) { phba->fc_stat.LinkDown++; lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, "1312 Link Down Event x%x received " "Data: x%x x%x x%x\n", la->eventTag, phba->fc_eventTag, phba->pport->port_state, vport->fc_flag); lpfc_mbx_issue_link_down(phba); } else lpfc_enable_la(phba); lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, "1310 Menlo Maint Mode Link up Event x%x rcvd " "Data: x%x x%x x%x\n", la->eventTag, phba->fc_eventTag, phba->pport->port_state, vport->fc_flag); /* * The cmnd that triggered this will be waiting for this * signal. */ /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */ if (phba->wait_4_mlo_maint_flg) { phba->wait_4_mlo_maint_flg = 0; wake_up_interruptible(&phba->wait_4_mlo_m_q); } } if (la->fa) { if (la->mm) lpfc_issue_clear_la(phba, vport); lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, "1311 fa %d\n", la->fa); } lpfc_mbx_cmpl_read_la_free_mbuf: lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); mempool_free(pmb, phba->mbox_mem_pool); return; } /* * This routine handles processing a REG_LOGIN mailbox * command upon completion. It is setup in the LPFC_MBOXQ * as the completion routine when the command is * handed off to the SLI layer. */ void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; pmb->context1 = NULL; /* Good status, call state machine */ lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN); lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); mempool_free(pmb, phba->mbox_mem_pool); /* decrement the node reference count held for this callback * function. */ lpfc_nlp_put(ndlp); return; } static void lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { MAILBOX_t *mb = &pmb->u.mb; struct lpfc_vport *vport = pmb->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); switch (mb->mbxStatus) { case 0x0011: case 0x0020: case 0x9700: lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, "0911 cmpl_unreg_vpi, mb status = 0x%x\n", mb->mbxStatus); break; } vport->unreg_vpi_cmpl = VPORT_OK; mempool_free(pmb, phba->mbox_mem_pool); /* * This shost reference might have been taken at the beginning of * lpfc_vport_delete() */ if (vport->load_flag & FC_UNLOADING) scsi_host_put(shost); } int lpfc_mbx_unreg_vpi(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; LPFC_MBOXQ_t *mbox; int rc; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return 1; lpfc_unreg_vpi(phba, vport->vpi, mbox); mbox->vport = vport; mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi; rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT, "1800 Could not issue unreg_vpi\n"); mempool_free(mbox, phba->mbox_mem_pool); vport->unreg_vpi_cmpl = VPORT_ERROR; return rc; } return 0; } static void lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); MAILBOX_t *mb = &pmb->u.mb; switch (mb->mbxStatus) { case 0x0011: case 0x9601: case 0x9602: lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, "0912 cmpl_reg_vpi, mb status = 0x%x\n", mb->mbxStatus); lpfc_vport_set_state(vport, FC_VPORT_FAILED); spin_lock_irq(shost->host_lock); vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); spin_unlock_irq(shost->host_lock); vport->fc_myDID = 0; goto out; } vport->num_disc_nodes = 0; /* go thru NPR list and issue ELS PLOGIs */ if (vport->fc_npr_cnt) lpfc_els_disc_plogi(vport); if (!vport->num_disc_nodes) { spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_NDISC_ACTIVE; spin_unlock_irq(shost->host_lock); lpfc_can_disctmo(vport); } vport->port_state = LPFC_VPORT_READY; out: mempool_free(pmb, phba->mbox_mem_pool); return; } /** * lpfc_create_static_vport - Read HBA config region to create static vports. * @phba: pointer to lpfc hba data structure. * * This routine issue a DUMP mailbox command for config region 22 to get * the list of static vports to be created. The function create vports * based on the information returned from the HBA. **/ void lpfc_create_static_vport(struct lpfc_hba *phba) { LPFC_MBOXQ_t *pmb = NULL; MAILBOX_t *mb; struct static_vport_info *vport_info; int rc = 0, i; struct fc_vport_identifiers vport_id; struct fc_vport *new_fc_vport; struct Scsi_Host *shost; struct lpfc_vport *vport; uint16_t offset = 0; uint8_t *vport_buff; struct lpfc_dmabuf *mp; uint32_t byte_count = 0; pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0542 lpfc_create_static_vport failed to" " allocate mailbox memory\n"); return; } mb = &pmb->u.mb; vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL); if (!vport_info) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0543 lpfc_create_static_vport failed to" " allocate vport_info\n"); mempool_free(pmb, phba->mbox_mem_pool); return; } vport_buff = (uint8_t *) vport_info; do { if (lpfc_dump_static_vport(phba, pmb, offset)) goto out; pmb->vport = phba->pport; rc = lpfc_sli_issue_mbox_wait(phba, pmb, LPFC_MBOX_TMO); if ((rc != MBX_SUCCESS) || mb->mbxStatus) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0544 lpfc_create_static_vport failed to" " issue dump mailbox command ret 0x%x " "status 0x%x\n", rc, mb->mbxStatus); goto out; } if (phba->sli_rev == LPFC_SLI_REV4) { byte_count = pmb->u.mqe.un.mb_words[5]; mp = (struct lpfc_dmabuf *) pmb->context2; if (byte_count > sizeof(struct static_vport_info) - offset) byte_count = sizeof(struct static_vport_info) - offset; memcpy(vport_buff + offset, mp->virt, byte_count); offset += byte_count; } else { if (mb->un.varDmp.word_cnt > sizeof(struct static_vport_info) - offset) mb->un.varDmp.word_cnt = sizeof(struct static_vport_info) - offset; byte_count = mb->un.varDmp.word_cnt; lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, vport_buff + offset, byte_count); offset += byte_count; } } while (byte_count && offset < sizeof(struct static_vport_info)); if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) || ((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK) != VPORT_INFO_REV)) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0545 lpfc_create_static_vport bad" " information header 0x%x 0x%x\n", le32_to_cpu(vport_info->signature), le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK); goto out; } shost = lpfc_shost_from_vport(phba->pport); for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) { memset(&vport_id, 0, sizeof(vport_id)); vport_id.port_name = wwn_to_u64(vport_info->vport_list[i].wwpn); vport_id.node_name = wwn_to_u64(vport_info->vport_list[i].wwnn); if (!vport_id.port_name || !vport_id.node_name) continue; vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR; vport_id.vport_type = FC_PORTTYPE_NPIV; vport_id.disable = false; new_fc_vport = fc_vport_create(shost, 0, &vport_id); if (!new_fc_vport) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0546 lpfc_create_static_vport failed to" " create vport\n"); continue; } vport = *(struct lpfc_vport **)new_fc_vport->dd_data; vport->vport_flag |= STATIC_VPORT; } out: kfree(vport_info); if (rc != MBX_TIMEOUT) { if (pmb->context2) { mp = (struct lpfc_dmabuf *) pmb->context2; lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); } mempool_free(pmb, phba->mbox_mem_pool); } return; } /* * This routine handles processing a Fabric REG_LOGIN mailbox * command upon completion. It is setup in the LPFC_MBOXQ * as the completion routine when the command is * handed off to the SLI layer. */ void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; MAILBOX_t *mb = &pmb->u.mb; struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); struct lpfc_nodelist *ndlp; ndlp = (struct lpfc_nodelist *) pmb->context2; pmb->context1 = NULL; pmb->context2 = NULL; if (mb->mbxStatus) { lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, "0258 Register Fabric login error: 0x%x\n", mb->mbxStatus); lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); mempool_free(pmb, phba->mbox_mem_pool); if (phba->fc_topology == TOPOLOGY_LOOP) { /* FLOGI failed, use loop map to make discovery list */ lpfc_disc_list_loopmap(vport); /* Start discovery */ lpfc_disc_start(vport); /* Decrement the reference count to ndlp after the * reference to the ndlp are done. */ lpfc_nlp_put(ndlp); return; } lpfc_vport_set_state(vport, FC_VPORT_FAILED); /* Decrement the reference count to ndlp after the reference * to the ndlp are done. */ lpfc_nlp_put(ndlp); return; } ndlp->nlp_rpi = mb->un.varWords[0]; ndlp->nlp_flag |= NLP_RPI_VALID; ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); if (vport->port_state == LPFC_FABRIC_CFG_LINK) { lpfc_start_fdiscs(phba); lpfc_do_scr_ns_plogi(phba, vport); } lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); mempool_free(pmb, phba->mbox_mem_pool); /* Drop the reference count from the mbox at the end after * all the current reference to the ndlp have been done. */ lpfc_nlp_put(ndlp); return; } /* * This routine handles processing a NameServer REG_LOGIN mailbox * command upon completion. It is setup in the LPFC_MBOXQ * as the completion routine when the command is * handed off to the SLI layer. */ void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { MAILBOX_t *mb = &pmb->u.mb; struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; struct lpfc_vport *vport = pmb->vport; if (mb->mbxStatus) { out: lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, "0260 Register NameServer error: 0x%x\n", mb->mbxStatus); /* decrement the node reference count held for this * callback function. */ lpfc_nlp_put(ndlp); lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); mempool_free(pmb, phba->mbox_mem_pool); /* If no other thread is using the ndlp, free it */ lpfc_nlp_not_used(ndlp); if (phba->fc_topology == TOPOLOGY_LOOP) { /* * RegLogin failed, use loop map to make discovery * list */ lpfc_disc_list_loopmap(vport); /* Start discovery */ lpfc_disc_start(vport); return; } lpfc_vport_set_state(vport, FC_VPORT_FAILED); return; } pmb->context1 = NULL; ndlp->nlp_rpi = mb->un.varWords[0]; ndlp->nlp_flag |= NLP_RPI_VALID; ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); if (vport->port_state < LPFC_VPORT_READY) { /* Link up discovery requires Fabric registration. */ lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0); /* Do this first! */ lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0); lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0); lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0); lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0); /* Issue SCR just before NameServer GID_FT Query */ lpfc_issue_els_scr(vport, SCR_DID, 0); } vport->fc_ns_retry = 0; /* Good status, issue CT Request to NameServer */ if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0)) { /* Cannot issue NameServer Query, so finish up discovery */ goto out; } /* decrement the node reference count held for this * callback function. */ lpfc_nlp_put(ndlp); lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); mempool_free(pmb, phba->mbox_mem_pool); return; } static void lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct fc_rport *rport; struct lpfc_rport_data *rdata; struct fc_rport_identifiers rport_ids; struct lpfc_hba *phba = vport->phba; /* Remote port has reappeared. Re-register w/ FC transport */ rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); rport_ids.port_id = ndlp->nlp_DID; rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; /* * We leave our node pointer in rport->dd_data when we unregister a * FCP target port. But fc_remote_port_add zeros the space to which * rport->dd_data points. So, if we're reusing a previously * registered port, drop the reference that we took the last time we * registered the port. */ if (ndlp->rport && ndlp->rport->dd_data && ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp) lpfc_nlp_put(ndlp); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, "rport add: did:x%x flg:x%x type x%x", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids); if (!rport || !get_device(&rport->dev)) { dev_printk(KERN_WARNING, &phba->pcidev->dev, "Warning: fc_remote_port_add failed\n"); return; } /* initialize static port data */ rport->maxframe_size = ndlp->nlp_maxframe; rport->supported_classes = ndlp->nlp_class_sup; rdata = rport->dd_data; rdata->pnode = lpfc_nlp_get(ndlp); if (ndlp->nlp_type & NLP_FCP_TARGET) rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET; if (ndlp->nlp_type & NLP_FCP_INITIATOR) rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR; if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN) fc_remote_port_rolechg(rport, rport_ids.roles); if ((rport->scsi_target_id != -1) && (rport->scsi_target_id < LPFC_MAX_TARGET)) { ndlp->nlp_sid = rport->scsi_target_id; } return; } static void lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp) { struct fc_rport *rport = ndlp->rport; lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT, "rport delete: did:x%x flg:x%x type x%x", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); fc_remote_port_delete(rport); return; } static void lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); spin_lock_irq(shost->host_lock); switch (state) { case NLP_STE_UNUSED_NODE: vport->fc_unused_cnt += count; break; case NLP_STE_PLOGI_ISSUE: vport->fc_plogi_cnt += count; break; case NLP_STE_ADISC_ISSUE: vport->fc_adisc_cnt += count; break; case NLP_STE_REG_LOGIN_ISSUE: vport->fc_reglogin_cnt += count; break; case NLP_STE_PRLI_ISSUE: vport->fc_prli_cnt += count; break; case NLP_STE_UNMAPPED_NODE: vport->fc_unmap_cnt += count; break; case NLP_STE_MAPPED_NODE: vport->fc_map_cnt += count; break; case NLP_STE_NPR_NODE: vport->fc_npr_cnt += count; break; } spin_unlock_irq(shost->host_lock); } static void lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int old_state, int new_state) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); if (new_state == NLP_STE_UNMAPPED_NODE) { ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; ndlp->nlp_type |= NLP_FC_NODE; } if (new_state == NLP_STE_MAPPED_NODE) ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; if (new_state == NLP_STE_NPR_NODE) ndlp->nlp_flag &= ~NLP_RCV_PLOGI; /* Transport interface */ if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE || old_state == NLP_STE_UNMAPPED_NODE)) { vport->phba->nport_event_cnt++; lpfc_unregister_remote_port(ndlp); } if (new_state == NLP_STE_MAPPED_NODE || new_state == NLP_STE_UNMAPPED_NODE) { vport->phba->nport_event_cnt++; /* * Tell the fc transport about the port, if we haven't * already. If we have, and it's a scsi entity, be * sure to unblock any attached scsi devices */ lpfc_register_remote_port(vport, ndlp); } if ((new_state == NLP_STE_MAPPED_NODE) && (vport->stat_data_enabled)) { /* * A new target is discovered, if there is no buffer for * statistical data collection allocate buffer. */ ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT, sizeof(struct lpfc_scsicmd_bkt), GFP_KERNEL); if (!ndlp->lat_data) lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, "0286 lpfc_nlp_state_cleanup failed to " "allocate statistical data buffer DID " "0x%x\n", ndlp->nlp_DID); } /* * if we added to Mapped list, but the remote port * registration failed or assigned a target id outside * our presentable range - move the node to the * Unmapped List */ if (new_state == NLP_STE_MAPPED_NODE && (!ndlp->rport || ndlp->rport->scsi_target_id == -1 || ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) { spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_TGT_NO_SCSIID; spin_unlock_irq(shost->host_lock); lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); } } static char * lpfc_nlp_state_name(char *buffer, size_t size, int state) { static char *states[] = { [NLP_STE_UNUSED_NODE] = "UNUSED", [NLP_STE_PLOGI_ISSUE] = "PLOGI", [NLP_STE_ADISC_ISSUE] = "ADISC", [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN", [NLP_STE_PRLI_ISSUE] = "PRLI", [NLP_STE_UNMAPPED_NODE] = "UNMAPPED", [NLP_STE_MAPPED_NODE] = "MAPPED", [NLP_STE_NPR_NODE] = "NPR", }; if (state < NLP_STE_MAX_STATE && states[state]) strlcpy(buffer, states[state], size); else snprintf(buffer, size, "unknown (%d)", state); return buffer; } void lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int state) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); int old_state = ndlp->nlp_state; char name1[16], name2[16]; lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, "0904 NPort state transition x%06x, %s -> %s\n", ndlp->nlp_DID, lpfc_nlp_state_name(name1, sizeof(name1), old_state), lpfc_nlp_state_name(name2, sizeof(name2), state)); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, "node statechg did:x%x old:%d ste:%d", ndlp->nlp_DID, old_state, state); if (old_state == NLP_STE_NPR_NODE && state != NLP_STE_NPR_NODE) lpfc_cancel_retry_delay_tmo(vport, ndlp); if (old_state == NLP_STE_UNMAPPED_NODE) { ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID; ndlp->nlp_type &= ~NLP_FC_NODE; } if (list_empty(&ndlp->nlp_listp)) { spin_lock_irq(shost->host_lock); list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes); spin_unlock_irq(shost->host_lock); } else if (old_state) lpfc_nlp_counters(vport, old_state, -1); ndlp->nlp_state = state; lpfc_nlp_counters(vport, state, 1); lpfc_nlp_state_cleanup(vport, ndlp, old_state, state); } void lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); if (list_empty(&ndlp->nlp_listp)) { spin_lock_irq(shost->host_lock); list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes); spin_unlock_irq(shost->host_lock); } } void lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); lpfc_cancel_retry_delay_tmo(vport, ndlp); if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) lpfc_nlp_counters(vport, ndlp->nlp_state, -1); spin_lock_irq(shost->host_lock); list_del_init(&ndlp->nlp_listp); spin_unlock_irq(shost->host_lock); lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, NLP_STE_UNUSED_NODE); } static void lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { lpfc_cancel_retry_delay_tmo(vport, ndlp); if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) lpfc_nlp_counters(vport, ndlp->nlp_state, -1); lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, NLP_STE_UNUSED_NODE); } /** * lpfc_initialize_node - Initialize all fields of node object * @vport: Pointer to Virtual Port object. * @ndlp: Pointer to FC node object. * @did: FC_ID of the node. * * This function is always called when node object need to be initialized. * It initializes all the fields of the node object. Although the reference * to phba from @ndlp can be obtained indirectly through it's reference to * @vport, a direct reference to phba is taken here by @ndlp. This is due * to the life-span of the @ndlp might go beyond the existence of @vport as * the final release of ndlp is determined by its reference count. And, the * operation on @ndlp needs the reference to phba. **/ static inline void lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, uint32_t did) { INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp); init_timer(&ndlp->nlp_delayfunc); ndlp->nlp_delayfunc.function = lpfc_els_retry_delay; ndlp->nlp_delayfunc.data = (unsigned long)ndlp; ndlp->nlp_DID = did; ndlp->vport = vport; ndlp->phba = vport->phba; ndlp->nlp_sid = NLP_NO_SID; kref_init(&ndlp->kref); NLP_INT_NODE_ACT(ndlp); atomic_set(&ndlp->cmd_pending, 0); ndlp->cmd_qdepth = LPFC_MAX_TGT_QDEPTH; } struct lpfc_nodelist * lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int state) { struct lpfc_hba *phba = vport->phba; uint32_t did; unsigned long flags; if (!ndlp) return NULL; spin_lock_irqsave(&phba->ndlp_lock, flags); /* The ndlp should not be in memory free mode */ if (NLP_CHK_FREE_REQ(ndlp)) { spin_unlock_irqrestore(&phba->ndlp_lock, flags); lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, "0277 lpfc_enable_node: ndlp:x%p " "usgmap:x%x refcnt:%d\n", (void *)ndlp, ndlp->nlp_usg_map, atomic_read(&ndlp->kref.refcount)); return NULL; } /* The ndlp should not already be in active mode */ if (NLP_CHK_NODE_ACT(ndlp)) { spin_unlock_irqrestore(&phba->ndlp_lock, flags); lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, "0278 lpfc_enable_node: ndlp:x%p " "usgmap:x%x refcnt:%d\n", (void *)ndlp, ndlp->nlp_usg_map, atomic_read(&ndlp->kref.refcount)); return NULL; } /* Keep the original DID */ did = ndlp->nlp_DID; /* re-initialize ndlp except of ndlp linked list pointer */ memset((((char *)ndlp) + sizeof (struct list_head)), 0, sizeof (struct lpfc_nodelist) - sizeof (struct list_head)); lpfc_initialize_node(vport, ndlp, did); spin_unlock_irqrestore(&phba->ndlp_lock, flags); if (state != NLP_STE_UNUSED_NODE) lpfc_nlp_set_state(vport, ndlp, state); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, "node enable: did:x%x", ndlp->nlp_DID, 0, 0); return ndlp; } void lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { /* * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should * be used if we wish to issue the "last" lpfc_nlp_put() to remove * the ndlp from the vport. The ndlp marked as UNUSED on the list * until ALL other outstanding threads have completed. We check * that the ndlp not already in the UNUSED state before we proceed. */ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) return; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); lpfc_nlp_put(ndlp); return; } /* * Start / ReStart rescue timer for Discovery / RSCN handling */ void lpfc_set_disctmo(struct lpfc_vport *vport) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; uint32_t tmo; if (vport->port_state == LPFC_LOCAL_CFG_LINK) { /* For FAN, timeout should be greater than edtov */ tmo = (((phba->fc_edtov + 999) / 1000) + 1); } else { /* Normal discovery timeout should be > than ELS/CT timeout * FC spec states we need 3 * ratov for CT requests */ tmo = ((phba->fc_ratov * 3) + 3); } if (!timer_pending(&vport->fc_disctmo)) { lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "set disc timer: tmo:x%x state:x%x flg:x%x", tmo, vport->port_state, vport->fc_flag); } mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo); spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_DISC_TMO; spin_unlock_irq(shost->host_lock); /* Start Discovery Timer state <hba_state> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0247 Start Discovery Timer state x%x " "Data: x%x x%lx x%x x%x\n", vport->port_state, tmo, (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt, vport->fc_adisc_cnt); return; } /* * Cancel rescue timer for Discovery / RSCN handling */ int lpfc_can_disctmo(struct lpfc_vport *vport) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); unsigned long iflags; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "can disc timer: state:x%x rtry:x%x flg:x%x", vport->port_state, vport->fc_ns_retry, vport->fc_flag); /* Turn off discovery timer if its running */ if (vport->fc_flag & FC_DISC_TMO) { spin_lock_irqsave(shost->host_lock, iflags); vport->fc_flag &= ~FC_DISC_TMO; spin_unlock_irqrestore(shost->host_lock, iflags); del_timer_sync(&vport->fc_disctmo); spin_lock_irqsave(&vport->work_port_lock, iflags); vport->work_port_events &= ~WORKER_DISC_TMO; spin_unlock_irqrestore(&vport->work_port_lock, iflags); } /* Cancel Discovery Timer state <hba_state> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0248 Cancel Discovery Timer state x%x " "Data: x%x x%x x%x\n", vport->port_state, vport->fc_flag, vport->fc_plogi_cnt, vport->fc_adisc_cnt); return 0; } /* * Check specified ring for outstanding IOCB on the SLI queue * Return true if iocb matches the specified nport */ int lpfc_check_sli_ndlp(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp) { struct lpfc_sli *psli = &phba->sli; IOCB_t *icmd = &iocb->iocb; struct lpfc_vport *vport = ndlp->vport; if (iocb->vport != vport) return 0; if (pring->ringno == LPFC_ELS_RING) { switch (icmd->ulpCommand) { case CMD_GEN_REQUEST64_CR: if (iocb->context_un.ndlp == ndlp) return 1; case CMD_ELS_REQUEST64_CR: if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID) return 1; case CMD_XMIT_ELS_RSP64_CX: if (iocb->context1 == (uint8_t *) ndlp) return 1; } } else if (pring->ringno == psli->extra_ring) { } else if (pring->ringno == psli->fcp_ring) { /* Skip match check if waiting to relogin to FCP target */ if ((ndlp->nlp_type & NLP_FCP_TARGET) && (ndlp->nlp_flag & NLP_DELAY_TMO)) { return 0; } if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) { return 1; } } else if (pring->ringno == psli->next_ring) { } return 0; } /* * Free resources / clean up outstanding I/Os * associated with nlp_rpi in the LPFC_NODELIST entry. */ static int lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) { LIST_HEAD(completions); struct lpfc_sli *psli; struct lpfc_sli_ring *pring; struct lpfc_iocbq *iocb, *next_iocb; uint32_t rpi, i; lpfc_fabric_abort_nport(ndlp); /* * Everything that matches on txcmplq will be returned * by firmware with a no rpi error. */ psli = &phba->sli; rpi = ndlp->nlp_rpi; if (ndlp->nlp_flag & NLP_RPI_VALID) { /* Now process each ring */ for (i = 0; i < psli->num_rings; i++) { pring = &psli->ring[i]; spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { /* * Check to see if iocb matches the nport we are * looking for */ if ((lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))) { /* It matches, so deque and call compl with an error */ list_move_tail(&iocb->list, &completions); pring->txq_cnt--; } } spin_unlock_irq(&phba->hbalock); } } /* Cancel all the IOCBs from the completions list */ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); return 0; } /* * Free rpi associated with LPFC_NODELIST entry. * This routine is called from lpfc_freenode(), when we are removing * a LPFC_NODELIST entry. It is also called if the driver initiates a * LOGO that completes successfully, and we are waiting to PLOGI back * to the remote NPort. In addition, it is called after we receive * and unsolicated ELS cmd, send back a rsp, the rsp completes and * we are waiting to PLOGI back to the remote NPort. */ int lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { struct lpfc_hba *phba = vport->phba; LPFC_MBOXQ_t *mbox; int rc; if (ndlp->nlp_flag & NLP_RPI_VALID) { mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mbox) { lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox); mbox->vport = vport; mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) mempool_free(mbox, phba->mbox_mem_pool); } lpfc_no_rpi(phba, ndlp); ndlp->nlp_rpi = 0; ndlp->nlp_flag &= ~NLP_RPI_VALID; ndlp->nlp_flag &= ~NLP_NPR_ADISC; return 1; } return 0; } void lpfc_unreg_all_rpis(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; LPFC_MBOXQ_t *mbox; int rc; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mbox) { lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox); mbox->vport = vport; mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; mbox->context1 = NULL; rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); if (rc != MBX_TIMEOUT) mempool_free(mbox, phba->mbox_mem_pool); if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED)) lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT, "1836 Could not issue " "unreg_login(all_rpis) status %d\n", rc); } } void lpfc_unreg_default_rpis(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; LPFC_MBOXQ_t *mbox; int rc; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mbox) { lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox); mbox->vport = vport; mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; mbox->context1 = NULL; rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); if (rc != MBX_TIMEOUT) mempool_free(mbox, phba->mbox_mem_pool); if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED)) lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT, "1815 Could not issue " "unreg_did (default rpis) status %d\n", rc); } } /* * Free resources associated with LPFC_NODELIST entry * so it can be freed. */ static int lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; LPFC_MBOXQ_t *mb, *nextmb; struct lpfc_dmabuf *mp; /* Cleanup node for NPort <nlp_DID> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, "0900 Cleanup node for NPort x%x " "Data: x%x x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); if (NLP_CHK_FREE_REQ(ndlp)) { lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, "0280 lpfc_cleanup_node: ndlp:x%p " "usgmap:x%x refcnt:%d\n", (void *)ndlp, ndlp->nlp_usg_map, atomic_read(&ndlp->kref.refcount)); lpfc_dequeue_node(vport, ndlp); } else { lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, "0281 lpfc_cleanup_node: ndlp:x%p " "usgmap:x%x refcnt:%d\n", (void *)ndlp, ndlp->nlp_usg_map, atomic_read(&ndlp->kref.refcount)); lpfc_disable_node(vport, ndlp); } /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ if ((mb = phba->sli.mbox_active)) { if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && (ndlp == (struct lpfc_nodelist *) mb->context2)) { mb->context2 = NULL; mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; } } spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && (ndlp == (struct lpfc_nodelist *) mb->context2)) { mp = (struct lpfc_dmabuf *) (mb->context1); if (mp) { __lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); } list_del(&mb->list); mempool_free(mb, phba->mbox_mem_pool); /* We shall not invoke the lpfc_nlp_put to decrement * the ndlp reference count as we are in the process * of lpfc_nlp_release. */ } } spin_unlock_irq(&phba->hbalock); lpfc_els_abort(phba, ndlp); spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); ndlp->nlp_last_elscmd = 0; del_timer_sync(&ndlp->nlp_delayfunc); list_del_init(&ndlp->els_retry_evt.evt_listp); list_del_init(&ndlp->dev_loss_evt.evt_listp); lpfc_unreg_rpi(vport, ndlp); return 0; } /* * Check to see if we can free the nlp back to the freelist. * If we are in the middle of using the nlp in the discovery state * machine, defer the free till we reach the end of the state machine. */ static void lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { struct lpfc_hba *phba = vport->phba; struct lpfc_rport_data *rdata; LPFC_MBOXQ_t *mbox; int rc; lpfc_cancel_retry_delay_tmo(vport, ndlp); if ((ndlp->nlp_flag & NLP_DEFER_RM) && !(ndlp->nlp_flag & NLP_RPI_VALID)) { /* For this case we need to cleanup the default rpi * allocated by the firmware. */ if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) { rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID, (uint8_t *) &vport->fc_sparam, mbox, 0); if (rc) { mempool_free(mbox, phba->mbox_mem_pool); } else { mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; mbox->vport = vport; mbox->context2 = NULL; rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { mempool_free(mbox, phba->mbox_mem_pool); } } } } lpfc_cleanup_node(vport, ndlp); /* * We can get here with a non-NULL ndlp->rport because when we * unregister a rport we don't break the rport/node linkage. So if we * do, make sure we don't leaving any dangling pointers behind. */ if (ndlp->rport) { rdata = ndlp->rport->dd_data; rdata->pnode = NULL; ndlp->rport = NULL; } } static int lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, uint32_t did) { D_ID mydid, ndlpdid, matchdid; if (did == Bcast_DID) return 0; /* First check for Direct match */ if (ndlp->nlp_DID == did) return 1; /* Next check for area/domain identically equals 0 match */ mydid.un.word = vport->fc_myDID; if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) { return 0; } matchdid.un.word = did; ndlpdid.un.word = ndlp->nlp_DID; if (matchdid.un.b.id == ndlpdid.un.b.id) { if ((mydid.un.b.domain == matchdid.un.b.domain) && (mydid.un.b.area == matchdid.un.b.area)) { if ((ndlpdid.un.b.domain == 0) && (ndlpdid.un.b.area == 0)) { if (ndlpdid.un.b.id) return 1; } return 0; } matchdid.un.word = ndlp->nlp_DID; if ((mydid.un.b.domain == ndlpdid.un.b.domain) && (mydid.un.b.area == ndlpdid.un.b.area)) { if ((matchdid.un.b.domain == 0) && (matchdid.un.b.area == 0)) { if (matchdid.un.b.id) return 1; } } } return 0; } /* Search for a nodelist entry */ static struct lpfc_nodelist * __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) { struct lpfc_nodelist *ndlp; uint32_t data1; list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { if (lpfc_matchdid(vport, ndlp, did)) { data1 = (((uint32_t) ndlp->nlp_state << 24) | ((uint32_t) ndlp->nlp_xri << 16) | ((uint32_t) ndlp->nlp_type << 8) | ((uint32_t) ndlp->nlp_rpi & 0xff)); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, "0929 FIND node DID " "Data: x%p x%x x%x x%x\n", ndlp, ndlp->nlp_DID, ndlp->nlp_flag, data1); return ndlp; } } /* FIND node did <did> NOT FOUND */ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, "0932 FIND node did x%x NOT FOUND.\n", did); return NULL; } struct lpfc_nodelist * lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_nodelist *ndlp; spin_lock_irq(shost->host_lock); ndlp = __lpfc_findnode_did(vport, did); spin_unlock_irq(shost->host_lock); return ndlp; } struct lpfc_nodelist * lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_nodelist *ndlp; ndlp = lpfc_findnode_did(vport, did); if (!ndlp) { if ((vport->fc_flag & FC_RSCN_MODE) != 0 && lpfc_rscn_payload_check(vport, did) == 0) return NULL; ndlp = (struct lpfc_nodelist *) mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL); if (!ndlp) return NULL; lpfc_nlp_init(vport, ndlp, did); lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; spin_unlock_irq(shost->host_lock); return ndlp; } else if (!NLP_CHK_NODE_ACT(ndlp)) { ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE); if (!ndlp) return NULL; spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; spin_unlock_irq(shost->host_lock); return ndlp; } if ((vport->fc_flag & FC_RSCN_MODE) && !(vport->fc_flag & FC_NDISC_ACTIVE)) { if (lpfc_rscn_payload_check(vport, did)) { /* If we've already recieved a PLOGI from this NPort * we don't need to try to discover it again. */ if (ndlp->nlp_flag & NLP_RCV_PLOGI) return NULL; /* Since this node is marked for discovery, * delay timeout is not needed. */ lpfc_cancel_retry_delay_tmo(vport, ndlp); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; spin_unlock_irq(shost->host_lock); } else ndlp = NULL; } else { /* If we've already recieved a PLOGI from this NPort, * or we are already in the process of discovery on it, * we don't need to try to discover it again. */ if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE || ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || ndlp->nlp_flag & NLP_RCV_PLOGI) return NULL; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; spin_unlock_irq(shost->host_lock); } return ndlp; } /* Build a list of nodes to discover based on the loopmap */ void lpfc_disc_list_loopmap(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; int j; uint32_t alpa, index; if (!lpfc_is_link_up(phba)) return; if (phba->fc_topology != TOPOLOGY_LOOP) return; /* Check for loop map present or not */ if (phba->alpa_map[0]) { for (j = 1; j <= phba->alpa_map[0]; j++) { alpa = phba->alpa_map[j]; if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0)) continue; lpfc_setup_disc_node(vport, alpa); } } else { /* No alpamap, so try all alpa's */ for (j = 0; j < FC_MAXLOOP; j++) { /* If cfg_scan_down is set, start from highest * ALPA (0xef) to lowest (0x1). */ if (vport->cfg_scan_down) index = j; else index = FC_MAXLOOP - j - 1; alpa = lpfcAlpaArray[index]; if ((vport->fc_myDID & 0xff) == alpa) continue; lpfc_setup_disc_node(vport, alpa); } } return; } void lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport) { LPFC_MBOXQ_t *mbox; struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *extra_ring = &psli->ring[psli->extra_ring]; struct lpfc_sli_ring *fcp_ring = &psli->ring[psli->fcp_ring]; struct lpfc_sli_ring *next_ring = &psli->ring[psli->next_ring]; int rc; /* * if it's not a physical port or if we already send * clear_la then don't send it. */ if ((phba->link_state >= LPFC_CLEAR_LA) || (vport->port_type != LPFC_PHYSICAL_PORT) || (phba->sli_rev == LPFC_SLI_REV4)) return; /* Link up discovery */ if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) { phba->link_state = LPFC_CLEAR_LA; lpfc_clear_la(phba, mbox); mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la; mbox->vport = vport; rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { mempool_free(mbox, phba->mbox_mem_pool); lpfc_disc_flush_list(vport); extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT; fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT; next_ring->flag &= ~LPFC_STOP_IOCB_EVENT; phba->link_state = LPFC_HBA_ERROR; } } } /* Reg_vpi to tell firmware to resume normal operations */ void lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport) { LPFC_MBOXQ_t *regvpimbox; regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (regvpimbox) { lpfc_reg_vpi(vport, regvpimbox); regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi; regvpimbox->vport = vport; if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT) == MBX_NOT_FINISHED) { mempool_free(regvpimbox, phba->mbox_mem_pool); } } } /* Start Link up / RSCN discovery on NPR nodes */ void lpfc_disc_start(struct lpfc_vport *vport) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; uint32_t num_sent; uint32_t clear_la_pending; int did_changed; if (!lpfc_is_link_up(phba)) return; if (phba->link_state == LPFC_CLEAR_LA) clear_la_pending = 1; else clear_la_pending = 0; if (vport->port_state < LPFC_VPORT_READY) vport->port_state = LPFC_DISC_AUTH; lpfc_set_disctmo(vport); if (vport->fc_prevDID == vport->fc_myDID) did_changed = 0; else did_changed = 1; vport->fc_prevDID = vport->fc_myDID; vport->num_disc_nodes = 0; /* Start Discovery state <hba_state> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0202 Start Discovery hba state x%x " "Data: x%x x%x x%x\n", vport->port_state, vport->fc_flag, vport->fc_plogi_cnt, vport->fc_adisc_cnt); /* First do ADISCs - if any */ num_sent = lpfc_els_disc_adisc(vport); if (num_sent) return; /* * For SLI3, cmpl_reg_vpi will set port_state to READY, and * continue discovery. */ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && !(vport->fc_flag & FC_PT2PT) && !(vport->fc_flag & FC_RSCN_MODE) && (phba->sli_rev < LPFC_SLI_REV4)) { lpfc_issue_reg_vpi(phba, vport); return; } /* * For SLI2, we need to set port_state to READY and continue * discovery. */ if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) { /* If we get here, there is nothing to ADISC */ if (vport->port_type == LPFC_PHYSICAL_PORT) lpfc_issue_clear_la(phba, vport); if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { vport->num_disc_nodes = 0; /* go thru NPR nodes and issue ELS PLOGIs */ if (vport->fc_npr_cnt) lpfc_els_disc_plogi(vport); if (!vport->num_disc_nodes) { spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_NDISC_ACTIVE; spin_unlock_irq(shost->host_lock); lpfc_can_disctmo(vport); } } vport->port_state = LPFC_VPORT_READY; } else { /* Next do PLOGIs - if any */ num_sent = lpfc_els_disc_plogi(vport); if (num_sent) return; if (vport->fc_flag & FC_RSCN_MODE) { /* Check to see if more RSCNs came in while we * were processing this one. */ if ((vport->fc_rscn_id_cnt == 0) && (!(vport->fc_flag & FC_RSCN_DISCOVERY))) { spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_RSCN_MODE; spin_unlock_irq(shost->host_lock); lpfc_can_disctmo(vport); } else lpfc_els_handle_rscn(vport); } } return; } /* * Ignore completion for all IOCBs on tx and txcmpl queue for ELS * ring the match the sppecified nodelist. */ static void lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) { LIST_HEAD(completions); struct lpfc_sli *psli; IOCB_t *icmd; struct lpfc_iocbq *iocb, *next_iocb; struct lpfc_sli_ring *pring; psli = &phba->sli; pring = &psli->ring[LPFC_ELS_RING]; /* Error matching iocb on txq or txcmplq * First check the txq. */ spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { if (iocb->context1 != ndlp) { continue; } icmd = &iocb->iocb; if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) || (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) { list_move_tail(&iocb->list, &completions); pring->txq_cnt--; } } /* Next check the txcmplq */ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { if (iocb->context1 != ndlp) { continue; } icmd = &iocb->iocb; if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR || icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) { lpfc_sli_issue_abort_iotag(phba, pring, iocb); } } spin_unlock_irq(&phba->hbalock); /* Cancel all the IOCBs from the completions list */ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); } static void lpfc_disc_flush_list(struct lpfc_vport *vport) { struct lpfc_nodelist *ndlp, *next_ndlp; struct lpfc_hba *phba = vport->phba; if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) { list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { if (!NLP_CHK_NODE_ACT(ndlp)) continue; if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || ndlp->nlp_state == NLP_STE_ADISC_ISSUE) { lpfc_free_tx(phba, ndlp); } } } } void lpfc_cleanup_discovery_resources(struct lpfc_vport *vport) { lpfc_els_flush_rscn(vport); lpfc_els_flush_cmd(vport); lpfc_disc_flush_list(vport); } /*****************************************************************************/ /* * NAME: lpfc_disc_timeout * * FUNCTION: Fibre Channel driver discovery timeout routine. * * EXECUTION ENVIRONMENT: interrupt only * * CALLED FROM: * Timer function * * RETURNS: * none */ /*****************************************************************************/ void lpfc_disc_timeout(unsigned long ptr) { struct lpfc_vport *vport = (struct lpfc_vport *) ptr; struct lpfc_hba *phba = vport->phba; uint32_t tmo_posted; unsigned long flags = 0; if (unlikely(!phba)) return; spin_lock_irqsave(&vport->work_port_lock, flags); tmo_posted = vport->work_port_events & WORKER_DISC_TMO; if (!tmo_posted) vport->work_port_events |= WORKER_DISC_TMO; spin_unlock_irqrestore(&vport->work_port_lock, flags); if (!tmo_posted) lpfc_worker_wake_up(phba); return; } static void lpfc_disc_timeout_handler(struct lpfc_vport *vport) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; struct lpfc_sli *psli = &phba->sli; struct lpfc_nodelist *ndlp, *next_ndlp; LPFC_MBOXQ_t *initlinkmbox; int rc, clrlaerr = 0; if (!(vport->fc_flag & FC_DISC_TMO)) return; spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_DISC_TMO; spin_unlock_irq(shost->host_lock); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "disc timeout: state:x%x rtry:x%x flg:x%x", vport->port_state, vport->fc_ns_retry, vport->fc_flag); switch (vport->port_state) { case LPFC_LOCAL_CFG_LINK: /* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for * FAN */ /* FAN timeout */ lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY, "0221 FAN timeout\n"); /* Start discovery by sending FLOGI, clean up old rpis */ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { if (!NLP_CHK_NODE_ACT(ndlp)) continue; if (ndlp->nlp_state != NLP_STE_NPR_NODE) continue; if (ndlp->nlp_type & NLP_FABRIC) { /* Clean up the ndlp on Fabric connections */ lpfc_drop_node(vport, ndlp); } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { /* Fail outstanding IO now since device * is marked for PLOGI. */ lpfc_unreg_rpi(vport, ndlp); } } if (vport->port_state != LPFC_FLOGI) { lpfc_initial_flogi(vport); return; } break; case LPFC_FDISC: case LPFC_FLOGI: /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */ /* Initial FLOGI timeout */ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, "0222 Initial %s timeout\n", vport->vpi ? "FDISC" : "FLOGI"); /* Assume no Fabric and go on with discovery. * Check for outstanding ELS FLOGI to abort. */ /* FLOGI failed, so just use loop map to make discovery list */ lpfc_disc_list_loopmap(vport); /* Start discovery */ lpfc_disc_start(vport); break; case LPFC_FABRIC_CFG_LINK: /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for NameServer login */ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, "0223 Timeout while waiting for " "NameServer login\n"); /* Next look for NameServer ndlp */ ndlp = lpfc_findnode_did(vport, NameServer_DID); if (ndlp && NLP_CHK_NODE_ACT(ndlp)) lpfc_els_abort(phba, ndlp); /* ReStart discovery */ goto restart_disc; case LPFC_NS_QRY: /* Check for wait for NameServer Rsp timeout */ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, "0224 NameServer Query timeout " "Data: x%x x%x\n", vport->fc_ns_retry, LPFC_MAX_NS_RETRY); if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) { /* Try it one more time */ vport->fc_ns_retry++; rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, vport->fc_ns_retry, 0); if (rc == 0) break; } vport->fc_ns_retry = 0; restart_disc: /* * Discovery is over. * set port_state to PORT_READY if SLI2. * cmpl_reg_vpi will set port_state to READY for SLI3. */ if (phba->sli_rev < LPFC_SLI_REV4) { if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) lpfc_issue_reg_vpi(phba, vport); else { /* NPIV Not enabled */ lpfc_issue_clear_la(phba, vport); vport->port_state = LPFC_VPORT_READY; } } /* Setup and issue mailbox INITIALIZE LINK command */ initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!initlinkmbox) { lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, "0206 Device Discovery " "completion error\n"); phba->link_state = LPFC_HBA_ERROR; break; } lpfc_linkdown(phba); lpfc_init_link(phba, initlinkmbox, phba->cfg_topology, phba->cfg_link_speed); initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; initlinkmbox->vport = vport; initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT); lpfc_set_loopback_flag(phba); if (rc == MBX_NOT_FINISHED) mempool_free(initlinkmbox, phba->mbox_mem_pool); break; case LPFC_DISC_AUTH: /* Node Authentication timeout */ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, "0227 Node Authentication timeout\n"); lpfc_disc_flush_list(vport); /* * set port_state to PORT_READY if SLI2. * cmpl_reg_vpi will set port_state to READY for SLI3. */ if (phba->sli_rev < LPFC_SLI_REV4) { if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) lpfc_issue_reg_vpi(phba, vport); else { /* NPIV Not enabled */ lpfc_issue_clear_la(phba, vport); vport->port_state = LPFC_VPORT_READY; } } break; case LPFC_VPORT_READY: if (vport->fc_flag & FC_RSCN_MODE) { lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, "0231 RSCN timeout Data: x%x " "x%x\n", vport->fc_ns_retry, LPFC_MAX_NS_RETRY); /* Cleanup any outstanding ELS commands */ lpfc_els_flush_cmd(vport); lpfc_els_flush_rscn(vport); lpfc_disc_flush_list(vport); } break; default: lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, "0273 Unexpected discovery timeout, " "vport State x%x\n", vport->port_state); break; } switch (phba->link_state) { case LPFC_CLEAR_LA: /* CLEAR LA timeout */ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, "0228 CLEAR LA timeout\n"); clrlaerr = 1; break; case LPFC_LINK_UP: lpfc_issue_clear_la(phba, vport); /* Drop thru */ case LPFC_LINK_UNKNOWN: case LPFC_WARM_START: case LPFC_INIT_START: case LPFC_INIT_MBX_CMDS: case LPFC_LINK_DOWN: case LPFC_HBA_ERROR: lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, "0230 Unexpected timeout, hba link " "state x%x\n", phba->link_state); clrlaerr = 1; break; case LPFC_HBA_READY: break; } if (clrlaerr) { lpfc_disc_flush_list(vport); psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; vport->port_state = LPFC_VPORT_READY; } return; } /* * This routine handles processing a NameServer REG_LOGIN mailbox * command upon completion. It is setup in the LPFC_MBOXQ * as the completion routine when the command is * handed off to the SLI layer. */ void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { MAILBOX_t *mb = &pmb->u.mb; struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; struct lpfc_vport *vport = pmb->vport; pmb->context1 = NULL; ndlp->nlp_rpi = mb->un.varWords[0]; ndlp->nlp_flag |= NLP_RPI_VALID; ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); /* * Start issuing Fabric-Device Management Interface (FDMI) command to * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if * fdmi-on=2 (supporting RPA/hostnmae) */ if (vport->cfg_fdmi_on == 1) lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA); else mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60); /* decrement the node reference count held for this callback * function. */ lpfc_nlp_put(ndlp); lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); mempool_free(pmb, phba->mbox_mem_pool); return; } static int lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param) { uint16_t *rpi = param; return ndlp->nlp_rpi == *rpi; } static int lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param) { return memcmp(&ndlp->nlp_portname, param, sizeof(ndlp->nlp_portname)) == 0; } static struct lpfc_nodelist * __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param) { struct lpfc_nodelist *ndlp; list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { if (filter(ndlp, param)) return ndlp; } return NULL; } /* * This routine looks up the ndlp lists for the given RPI. If rpi found it * returns the node list element pointer else return NULL. */ struct lpfc_nodelist * __lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi) { return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi); } /* * This routine looks up the ndlp lists for the given WWPN. If WWPN found it * returns the node element list pointer else return NULL. */ struct lpfc_nodelist * lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_nodelist *ndlp; spin_lock_irq(shost->host_lock); ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn); spin_unlock_irq(shost->host_lock); return ndlp; } void lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, uint32_t did) { memset(ndlp, 0, sizeof (struct lpfc_nodelist)); lpfc_initialize_node(vport, ndlp, did); INIT_LIST_HEAD(&ndlp->nlp_listp); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, "node init: did:x%x", ndlp->nlp_DID, 0, 0); return; } /* This routine releases all resources associated with a specifc NPort's ndlp * and mempool_free's the nodelist. */ static void lpfc_nlp_release(struct kref *kref) { struct lpfc_hba *phba; unsigned long flags; struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist, kref); lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, "node release: did:x%x flg:x%x type:x%x", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, "0279 lpfc_nlp_release: ndlp:x%p " "usgmap:x%x refcnt:%d\n", (void *)ndlp, ndlp->nlp_usg_map, atomic_read(&ndlp->kref.refcount)); /* remove ndlp from action. */ lpfc_nlp_remove(ndlp->vport, ndlp); /* clear the ndlp active flag for all release cases */ phba = ndlp->phba; spin_lock_irqsave(&phba->ndlp_lock, flags); NLP_CLR_NODE_ACT(ndlp); spin_unlock_irqrestore(&phba->ndlp_lock, flags); /* free ndlp memory for final ndlp release */ if (NLP_CHK_FREE_REQ(ndlp)) { kfree(ndlp->lat_data); mempool_free(ndlp, ndlp->phba->nlp_mem_pool); } } /* This routine bumps the reference count for a ndlp structure to ensure * that one discovery thread won't free a ndlp while another discovery thread * is using it. */ struct lpfc_nodelist * lpfc_nlp_get(struct lpfc_nodelist *ndlp) { struct lpfc_hba *phba; unsigned long flags; if (ndlp) { lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, "node get: did:x%x flg:x%x refcnt:x%x", ndlp->nlp_DID, ndlp->nlp_flag, atomic_read(&ndlp->kref.refcount)); /* The check of ndlp usage to prevent incrementing the * ndlp reference count that is in the process of being * released. */ phba = ndlp->phba; spin_lock_irqsave(&phba->ndlp_lock, flags); if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) { spin_unlock_irqrestore(&phba->ndlp_lock, flags); lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, "0276 lpfc_nlp_get: ndlp:x%p " "usgmap:x%x refcnt:%d\n", (void *)ndlp, ndlp->nlp_usg_map, atomic_read(&ndlp->kref.refcount)); return NULL; } else kref_get(&ndlp->kref); spin_unlock_irqrestore(&phba->ndlp_lock, flags); } return ndlp; } /* This routine decrements the reference count for a ndlp structure. If the * count goes to 0, this indicates the the associated nodelist should be * freed. Returning 1 indicates the ndlp resource has been released; on the * other hand, returning 0 indicates the ndlp resource has not been released * yet. */ int lpfc_nlp_put(struct lpfc_nodelist *ndlp) { struct lpfc_hba *phba; unsigned long flags; if (!ndlp) return 1; lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, "node put: did:x%x flg:x%x refcnt:x%x", ndlp->nlp_DID, ndlp->nlp_flag, atomic_read(&ndlp->kref.refcount)); phba = ndlp->phba; spin_lock_irqsave(&phba->ndlp_lock, flags); /* Check the ndlp memory free acknowledge flag to avoid the * possible race condition that kref_put got invoked again * after previous one has done ndlp memory free. */ if (NLP_CHK_FREE_ACK(ndlp)) { spin_unlock_irqrestore(&phba->ndlp_lock, flags); lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, "0274 lpfc_nlp_put: ndlp:x%p " "usgmap:x%x refcnt:%d\n", (void *)ndlp, ndlp->nlp_usg_map, atomic_read(&ndlp->kref.refcount)); return 1; } /* Check the ndlp inactivate log flag to avoid the possible * race condition that kref_put got invoked again after ndlp * is already in inactivating state. */ if (NLP_CHK_IACT_REQ(ndlp)) { spin_unlock_irqrestore(&phba->ndlp_lock, flags); lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, "0275 lpfc_nlp_put: ndlp:x%p " "usgmap:x%x refcnt:%d\n", (void *)ndlp, ndlp->nlp_usg_map, atomic_read(&ndlp->kref.refcount)); return 1; } /* For last put, mark the ndlp usage flags to make sure no * other kref_get and kref_put on the same ndlp shall get * in between the process when the final kref_put has been * invoked on this ndlp. */ if (atomic_read(&ndlp->kref.refcount) == 1) { /* Indicate ndlp is put to inactive state. */ NLP_SET_IACT_REQ(ndlp); /* Acknowledge ndlp memory free has been seen. */ if (NLP_CHK_FREE_REQ(ndlp)) NLP_SET_FREE_ACK(ndlp); } spin_unlock_irqrestore(&phba->ndlp_lock, flags); /* Note, the kref_put returns 1 when decrementing a reference * count that was 1, it invokes the release callback function, * but it still left the reference count as 1 (not actually * performs the last decrementation). Otherwise, it actually * decrements the reference count and returns 0. */ return kref_put(&ndlp->kref, lpfc_nlp_release); } /* This routine free's the specified nodelist if it is not in use * by any other discovery thread. This routine returns 1 if the * ndlp has been freed. A return value of 0 indicates the ndlp is * not yet been released. */ int lpfc_nlp_not_used(struct lpfc_nodelist *ndlp) { lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, "node not used: did:x%x flg:x%x refcnt:x%x", ndlp->nlp_DID, ndlp->nlp_flag, atomic_read(&ndlp->kref.refcount)); if (atomic_read(&ndlp->kref.refcount) == 1) if (lpfc_nlp_put(ndlp)) return 1; return 0; } /** * lpfc_fcf_inuse - Check if FCF can be unregistered. * @phba: Pointer to hba context object. * * This function iterate through all FC nodes associated * will all vports to check if there is any node with * fc_rports associated with it. If there is an fc_rport * associated with the node, then the node is either in * discovered state or its devloss_timer is pending. */ static int lpfc_fcf_inuse(struct lpfc_hba *phba) { struct lpfc_vport **vports; int i, ret = 0; struct lpfc_nodelist *ndlp; struct Scsi_Host *shost; vports = lpfc_create_vport_work_array(phba); for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { shost = lpfc_shost_from_vport(vports[i]); spin_lock_irq(shost->host_lock); list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport && (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) { ret = 1; spin_unlock_irq(shost->host_lock); goto out; } } spin_unlock_irq(shost->host_lock); } out: lpfc_destroy_vport_work_array(phba, vports); return ret; } /** * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi. * @phba: Pointer to hba context object. * @mboxq: Pointer to mailbox object. * * This function frees memory associated with the mailbox command. */ static void lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { struct lpfc_vport *vport = mboxq->vport; if (mboxq->u.mb.mbxStatus) { lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, "2555 UNREG_VFI mbxStatus error x%x " "HBA state x%x\n", mboxq->u.mb.mbxStatus, vport->port_state); } mempool_free(mboxq, phba->mbox_mem_pool); return; } /** * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi. * @phba: Pointer to hba context object. * @mboxq: Pointer to mailbox object. * * This function frees memory associated with the mailbox command. */ static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { struct lpfc_vport *vport = mboxq->vport; if (mboxq->u.mb.mbxStatus) { lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, "2550 UNREG_FCFI mbxStatus error x%x " "HBA state x%x\n", mboxq->u.mb.mbxStatus, vport->port_state); } mempool_free(mboxq, phba->mbox_mem_pool); return; } /** * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected. * @phba: Pointer to hba context object. * * This function check if there are any connected remote port for the FCF and * if all the devices are disconnected, this function unregister FCFI. * This function also tries to use another FCF for discovery. */ void lpfc_unregister_unused_fcf(struct lpfc_hba *phba) { LPFC_MBOXQ_t *mbox; int rc; struct lpfc_vport **vports; int i; spin_lock_irq(&phba->hbalock); /* * If HBA is not running in FIP mode or * If HBA does not support FCoE or * If FCF is not registered. * do nothing. */ if (!(phba->hba_flag & HBA_FCOE_SUPPORT) || !(phba->fcf.fcf_flag & FCF_REGISTERED) || (phba->cfg_enable_fip == 0)) { spin_unlock_irq(&phba->hbalock); return; } spin_unlock_irq(&phba->hbalock); if (lpfc_fcf_inuse(phba)) return; /* Unregister VPIs */ vports = lpfc_create_vport_work_array(phba); if (vports && (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { lpfc_mbx_unreg_vpi(vports[i]); vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED; } lpfc_destroy_vport_work_array(phba, vports); /* Unregister VFI */ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) { lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, "2556 UNREG_VFI mbox allocation failed" "HBA state x%x\n", phba->pport->port_state); return; } lpfc_unreg_vfi(mbox, phba->pport->vfi); mbox->vport = phba->pport; mbox->mbox_cmpl = lpfc_unregister_vfi_cmpl; rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, "2557 UNREG_VFI issue mbox failed rc x%x " "HBA state x%x\n", rc, phba->pport->port_state); mempool_free(mbox, phba->mbox_mem_pool); return; } /* Unregister FCF */ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) { lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, "2551 UNREG_FCFI mbox allocation failed" "HBA state x%x\n", phba->pport->port_state); return; } lpfc_unreg_fcfi(mbox, phba->fcf.fcfi); mbox->vport = phba->pport; mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl; rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, "2552 UNREG_FCFI issue mbox failed rc x%x " "HBA state x%x\n", rc, phba->pport->port_state); mempool_free(mbox, phba->mbox_mem_pool); return; } spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_DISCOVERED | FCF_BOOT_ENABLE | FCF_IN_USE | FCF_VALID_VLAN); spin_unlock_irq(&phba->hbalock); /* * If driver is not unloading, check if there is any other * FCF record that can be used for discovery. */ if ((phba->pport->load_flag & FC_UNLOADING) || (phba->link_state < LPFC_LINK_UP)) return; rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); if (rc) lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, "2553 lpfc_unregister_unused_fcf failed to read FCF" " record HBA state x%x\n", phba->pport->port_state); } /** * lpfc_read_fcf_conn_tbl - Create driver FCF connection table. * @phba: Pointer to hba context object. * @buff: Buffer containing the FCF connection table as in the config * region. * This function create driver data structure for the FCF connection * record table read from config region 23. */ static void lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba, uint8_t *buff) { struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; struct lpfc_fcf_conn_hdr *conn_hdr; struct lpfc_fcf_conn_rec *conn_rec; uint32_t record_count; int i; /* Free the current connect table */ list_for_each_entry_safe(conn_entry, next_conn_entry, &phba->fcf_conn_rec_list, list) kfree(conn_entry); conn_hdr = (struct lpfc_fcf_conn_hdr *) buff; record_count = conn_hdr->length * sizeof(uint32_t)/ sizeof(struct lpfc_fcf_conn_rec); conn_rec = (struct lpfc_fcf_conn_rec *) (buff + sizeof(struct lpfc_fcf_conn_hdr)); for (i = 0; i < record_count; i++) { if (!(conn_rec[i].flags & FCFCNCT_VALID)) continue; conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry), GFP_KERNEL); if (!conn_entry) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2566 Failed to allocate connection" " table entry\n"); return; } memcpy(&conn_entry->conn_rec, &conn_rec[i], sizeof(struct lpfc_fcf_conn_rec)); conn_entry->conn_rec.vlan_tag = le16_to_cpu(conn_entry->conn_rec.vlan_tag) & 0xFFF; conn_entry->conn_rec.flags = le16_to_cpu(conn_entry->conn_rec.flags); list_add_tail(&conn_entry->list, &phba->fcf_conn_rec_list); } } /** * lpfc_read_fcoe_param - Read FCoe parameters from conf region.. * @phba: Pointer to hba context object. * @buff: Buffer containing the FCoE parameter data structure. * * This function update driver data structure with config * parameters read from config region 23. */ static void lpfc_read_fcoe_param(struct lpfc_hba *phba, uint8_t *buff) { struct lpfc_fip_param_hdr *fcoe_param_hdr; struct lpfc_fcoe_params *fcoe_param; fcoe_param_hdr = (struct lpfc_fip_param_hdr *) buff; fcoe_param = (struct lpfc_fcoe_params *) (buff + sizeof(struct lpfc_fip_param_hdr)); if ((fcoe_param_hdr->parm_version != FIPP_VERSION) || (fcoe_param_hdr->length != FCOE_PARAM_LENGTH)) return; if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) == FIPP_MODE_ON) phba->cfg_enable_fip = 1; if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) == FIPP_MODE_OFF) phba->cfg_enable_fip = 0; if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) { phba->valid_vlan = 1; phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) & 0xFFF; } phba->fc_map[0] = fcoe_param->fc_map[0]; phba->fc_map[1] = fcoe_param->fc_map[1]; phba->fc_map[2] = fcoe_param->fc_map[2]; return; } /** * lpfc_get_rec_conf23 - Get a record type in config region data. * @buff: Buffer containing config region 23 data. * @size: Size of the data buffer. * @rec_type: Record type to be searched. * * This function searches config region data to find the begining * of the record specified by record_type. If record found, this * function return pointer to the record else return NULL. */ static uint8_t * lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type) { uint32_t offset = 0, rec_length; if ((buff[0] == LPFC_REGION23_LAST_REC) || (size < sizeof(uint32_t))) return NULL; rec_length = buff[offset + 1]; /* * One TLV record has one word header and number of data words * specified in the rec_length field of the record header. */ while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t)) <= size) { if (buff[offset] == rec_type) return &buff[offset]; if (buff[offset] == LPFC_REGION23_LAST_REC) return NULL; offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t); rec_length = buff[offset + 1]; } return NULL; } /** * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23. * @phba: Pointer to lpfc_hba data structure. * @buff: Buffer containing config region 23 data. * @size: Size of the data buffer. * * This fuction parse the FCoE config parameters in config region 23 and * populate driver data structure with the parameters. */ void lpfc_parse_fcoe_conf(struct lpfc_hba *phba, uint8_t *buff, uint32_t size) { uint32_t offset = 0, rec_length; uint8_t *rec_ptr; /* * If data size is less than 2 words signature and version cannot be * verified. */ if (size < 2*sizeof(uint32_t)) return; /* Check the region signature first */ if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2567 Config region 23 has bad signature\n"); return; } offset += 4; /* Check the data structure version */ if (buff[offset] != LPFC_REGION23_VERSION) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2568 Config region 23 has bad version\n"); return; } offset += 4; rec_length = buff[offset + 1]; /* Read FCoE param record */ rec_ptr = lpfc_get_rec_conf23(&buff[offset], size - offset, FCOE_PARAM_TYPE); if (rec_ptr) lpfc_read_fcoe_param(phba, rec_ptr); /* Read FCF connection table */ rec_ptr = lpfc_get_rec_conf23(&buff[offset], size - offset, FCOE_CONN_TBL_TYPE); if (rec_ptr) lpfc_read_fcf_conn_tbl(phba, rec_ptr); }
gpl-2.0
coldnew/linux
drivers/usb/storage/realtek_cr.c
721
27303
/* Driver for Realtek RTS51xx USB card reader * * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, see <http://www.gnu.org/licenses/>. * * Author: * wwang (wei_wang@realsil.com.cn) * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China */ #include <linux/module.h> #include <linux/blkdev.h> #include <linux/kthread.h> #include <linux/sched.h> #include <linux/kernel.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <linux/cdrom.h> #include <linux/usb.h> #include <linux/slab.h> #include <linux/usb_usual.h> #include "usb.h" #include "transport.h" #include "protocol.h" #include "debug.h" MODULE_DESCRIPTION("Driver for Realtek USB Card Reader"); MODULE_AUTHOR("wwang <wei_wang@realsil.com.cn>"); MODULE_LICENSE("GPL"); MODULE_VERSION("1.03"); static int auto_delink_en = 1; module_param(auto_delink_en, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(auto_delink_en, "enable auto delink"); #ifdef CONFIG_REALTEK_AUTOPM static int ss_en = 1; module_param(ss_en, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ss_en, "enable selective suspend"); static int ss_delay = 50; module_param(ss_delay, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ss_delay, "seconds to delay before entering selective suspend"); enum RTS51X_STAT { RTS51X_STAT_INIT, RTS51X_STAT_IDLE, RTS51X_STAT_RUN, RTS51X_STAT_SS }; #define POLLING_INTERVAL 50 #define rts51x_set_stat(chip, stat) \ ((chip)->state = (enum RTS51X_STAT)(stat)) #define rts51x_get_stat(chip) ((chip)->state) #define SET_LUN_READY(chip, lun) ((chip)->lun_ready |= ((u8)1 << (lun))) #define CLR_LUN_READY(chip, lun) ((chip)->lun_ready &= ~((u8)1 << (lun))) #define TST_LUN_READY(chip, lun) ((chip)->lun_ready & ((u8)1 << (lun))) #endif struct rts51x_status { u16 vid; u16 pid; u8 cur_lun; u8 card_type; u8 total_lun; u16 fw_ver; u8 phy_exist; u8 multi_flag; u8 multi_card; u8 log_exist; union { u8 detailed_type1; u8 detailed_type2; } detailed_type; u8 function[2]; }; struct rts51x_chip { u16 vendor_id; u16 product_id; char max_lun; struct rts51x_status *status; int status_len; u32 flag; struct us_data *us; #ifdef CONFIG_REALTEK_AUTOPM struct timer_list rts51x_suspend_timer; unsigned long timer_expires; int pwr_state; u8 lun_ready; enum RTS51X_STAT state; int support_auto_delink; #endif /* used to back up the protocol chosen in probe1 phase */ proto_cmnd proto_handler_backup; }; /* flag definition */ #define FLIDX_AUTO_DELINK 0x01 #define SCSI_LUN(srb) ((srb)->device->lun) /* Bit Operation */ #define SET_BIT(data, idx) ((data) |= 1 << (idx)) #define CLR_BIT(data, idx) ((data) &= ~(1 << (idx))) #define CHK_BIT(data, idx) ((data) & (1 << (idx))) #define SET_AUTO_DELINK(chip) ((chip)->flag |= FLIDX_AUTO_DELINK) #define CLR_AUTO_DELINK(chip) ((chip)->flag &= ~FLIDX_AUTO_DELINK) #define CHK_AUTO_DELINK(chip) ((chip)->flag & FLIDX_AUTO_DELINK) #define RTS51X_GET_VID(chip) ((chip)->vendor_id) #define RTS51X_GET_PID(chip) ((chip)->product_id) #define VENDOR_ID(chip) ((chip)->status[0].vid) #define PRODUCT_ID(chip) ((chip)->status[0].pid) #define FW_VERSION(chip) ((chip)->status[0].fw_ver) #define STATUS_LEN(chip) ((chip)->status_len) #define STATUS_SUCCESS 0 #define STATUS_FAIL 1 /* Check card reader function */ #define SUPPORT_DETAILED_TYPE1(chip) \ CHK_BIT((chip)->status[0].function[0], 1) #define SUPPORT_OT(chip) \ CHK_BIT((chip)->status[0].function[0], 2) #define SUPPORT_OC(chip) \ CHK_BIT((chip)->status[0].function[0], 3) #define SUPPORT_AUTO_DELINK(chip) \ CHK_BIT((chip)->status[0].function[0], 4) #define SUPPORT_SDIO(chip) \ CHK_BIT((chip)->status[0].function[1], 0) #define SUPPORT_DETAILED_TYPE2(chip) \ CHK_BIT((chip)->status[0].function[1], 1) #define CHECK_PID(chip, pid) (RTS51X_GET_PID(chip) == (pid)) #define CHECK_FW_VER(chip, fw_ver) (FW_VERSION(chip) == (fw_ver)) #define CHECK_ID(chip, pid, fw_ver) \ (CHECK_PID((chip), (pid)) && CHECK_FW_VER((chip), (fw_ver))) static int init_realtek_cr(struct us_data *us); /* * The table of devices */ #define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \ vendorName, productName, useProtocol, useTransport, \ initFunction, flags) \ {\ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \ .driver_info = (flags) \ } static const struct usb_device_id realtek_cr_ids[] = { # include "unusual_realtek.h" {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, realtek_cr_ids); #undef UNUSUAL_DEV /* * The flags table */ #define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \ vendor_name, product_name, use_protocol, use_transport, \ init_function, Flags) \ { \ .vendorName = vendor_name, \ .productName = product_name, \ .useProtocol = use_protocol, \ .useTransport = use_transport, \ .initFunction = init_function, \ } static struct us_unusual_dev realtek_cr_unusual_dev_list[] = { # include "unusual_realtek.h" {} /* Terminating entry */ }; #undef UNUSUAL_DEV static int rts51x_bulk_transport(struct us_data *us, u8 lun, u8 *cmd, int cmd_len, u8 *buf, int buf_len, enum dma_data_direction dir, int *act_len) { struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *)us->iobuf; struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap *)us->iobuf; int result; unsigned int residue; unsigned int cswlen; unsigned int cbwlen = US_BULK_CB_WRAP_LEN; /* set up the command wrapper */ bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = cpu_to_le32(buf_len); bcb->Flags = (dir == DMA_FROM_DEVICE) ? US_BULK_FLAG_IN : 0; bcb->Tag = ++us->tag; bcb->Lun = lun; bcb->Length = cmd_len; /* copy the command payload */ memset(bcb->CDB, 0, sizeof(bcb->CDB)); memcpy(bcb->CDB, cmd, bcb->Length); /* send it to out endpoint */ result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcb, cbwlen, NULL); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; /* DATA STAGE */ /* send/receive data payload, if there is any */ if (buf && buf_len) { unsigned int pipe = (dir == DMA_FROM_DEVICE) ? us->recv_bulk_pipe : us->send_bulk_pipe; result = usb_stor_bulk_transfer_buf(us, pipe, buf, buf_len, NULL); if (result == USB_STOR_XFER_ERROR) return USB_STOR_TRANSPORT_ERROR; } /* get CSW for device status */ result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs, US_BULK_CS_WRAP_LEN, &cswlen); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; /* check bulk status */ if (bcs->Signature != cpu_to_le32(US_BULK_CS_SIGN)) { usb_stor_dbg(us, "Signature mismatch: got %08X, expecting %08X\n", le32_to_cpu(bcs->Signature), US_BULK_CS_SIGN); return USB_STOR_TRANSPORT_ERROR; } residue = bcs->Residue; if (bcs->Tag != us->tag) return USB_STOR_TRANSPORT_ERROR; /* try to compute the actual residue, based on how much data * was really transferred and what the device tells us */ if (residue) residue = residue < buf_len ? residue : buf_len; if (act_len) *act_len = buf_len - residue; /* based on the status code, we report good or bad */ switch (bcs->Status) { case US_BULK_STAT_OK: /* command good -- note that data could be short */ return USB_STOR_TRANSPORT_GOOD; case US_BULK_STAT_FAIL: /* command failed */ return USB_STOR_TRANSPORT_FAILED; case US_BULK_STAT_PHASE: /* phase error -- note that a transport reset will be * invoked by the invoke_transport() function */ return USB_STOR_TRANSPORT_ERROR; } /* we should never get here, but if we do, we're in trouble */ return USB_STOR_TRANSPORT_ERROR; } static int rts51x_bulk_transport_special(struct us_data *us, u8 lun, u8 *cmd, int cmd_len, u8 *buf, int buf_len, enum dma_data_direction dir, int *act_len) { struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap *) us->iobuf; int result; unsigned int cswlen; unsigned int cbwlen = US_BULK_CB_WRAP_LEN; /* set up the command wrapper */ bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = cpu_to_le32(buf_len); bcb->Flags = (dir == DMA_FROM_DEVICE) ? US_BULK_FLAG_IN : 0; bcb->Tag = ++us->tag; bcb->Lun = lun; bcb->Length = cmd_len; /* copy the command payload */ memset(bcb->CDB, 0, sizeof(bcb->CDB)); memcpy(bcb->CDB, cmd, bcb->Length); /* send it to out endpoint */ result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcb, cbwlen, NULL); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; /* DATA STAGE */ /* send/receive data payload, if there is any */ if (buf && buf_len) { unsigned int pipe = (dir == DMA_FROM_DEVICE) ? us->recv_bulk_pipe : us->send_bulk_pipe; result = usb_stor_bulk_transfer_buf(us, pipe, buf, buf_len, NULL); if (result == USB_STOR_XFER_ERROR) return USB_STOR_TRANSPORT_ERROR; } /* get CSW for device status */ result = usb_bulk_msg(us->pusb_dev, us->recv_bulk_pipe, bcs, US_BULK_CS_WRAP_LEN, &cswlen, 250); return result; } /* Determine what the maximum LUN supported is */ static int rts51x_get_max_lun(struct us_data *us) { int result; /* issue the command */ us->iobuf[0] = 0; result = usb_stor_control_msg(us, us->recv_ctrl_pipe, US_BULK_GET_MAX_LUN, USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, us->ifnum, us->iobuf, 1, 10 * HZ); usb_stor_dbg(us, "GetMaxLUN command result is %d, data is %d\n", result, us->iobuf[0]); /* if we have a successful request, return the result */ if (result > 0) return us->iobuf[0]; return 0; } static int rts51x_read_mem(struct us_data *us, u16 addr, u8 *data, u16 len) { int retval; u8 cmnd[12] = { 0 }; u8 *buf; buf = kmalloc(len, GFP_NOIO); if (buf == NULL) return USB_STOR_TRANSPORT_ERROR; usb_stor_dbg(us, "addr = 0x%x, len = %d\n", addr, len); cmnd[0] = 0xF0; cmnd[1] = 0x0D; cmnd[2] = (u8) (addr >> 8); cmnd[3] = (u8) addr; cmnd[4] = (u8) (len >> 8); cmnd[5] = (u8) len; retval = rts51x_bulk_transport(us, 0, cmnd, 12, buf, len, DMA_FROM_DEVICE, NULL); if (retval != USB_STOR_TRANSPORT_GOOD) { kfree(buf); return -EIO; } memcpy(data, buf, len); kfree(buf); return 0; } static int rts51x_write_mem(struct us_data *us, u16 addr, u8 *data, u16 len) { int retval; u8 cmnd[12] = { 0 }; u8 *buf; buf = kmemdup(data, len, GFP_NOIO); if (buf == NULL) return USB_STOR_TRANSPORT_ERROR; usb_stor_dbg(us, "addr = 0x%x, len = %d\n", addr, len); cmnd[0] = 0xF0; cmnd[1] = 0x0E; cmnd[2] = (u8) (addr >> 8); cmnd[3] = (u8) addr; cmnd[4] = (u8) (len >> 8); cmnd[5] = (u8) len; retval = rts51x_bulk_transport(us, 0, cmnd, 12, buf, len, DMA_TO_DEVICE, NULL); kfree(buf); if (retval != USB_STOR_TRANSPORT_GOOD) return -EIO; return 0; } static int rts51x_read_status(struct us_data *us, u8 lun, u8 *status, int len, int *actlen) { int retval; u8 cmnd[12] = { 0 }; u8 *buf; buf = kmalloc(len, GFP_NOIO); if (buf == NULL) return USB_STOR_TRANSPORT_ERROR; usb_stor_dbg(us, "lun = %d\n", lun); cmnd[0] = 0xF0; cmnd[1] = 0x09; retval = rts51x_bulk_transport(us, lun, cmnd, 12, buf, len, DMA_FROM_DEVICE, actlen); if (retval != USB_STOR_TRANSPORT_GOOD) { kfree(buf); return -EIO; } memcpy(status, buf, len); kfree(buf); return 0; } static int rts51x_check_status(struct us_data *us, u8 lun) { struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra); int retval; u8 buf[16]; retval = rts51x_read_status(us, lun, buf, 16, &(chip->status_len)); if (retval != STATUS_SUCCESS) return -EIO; usb_stor_dbg(us, "chip->status_len = %d\n", chip->status_len); chip->status[lun].vid = ((u16) buf[0] << 8) | buf[1]; chip->status[lun].pid = ((u16) buf[2] << 8) | buf[3]; chip->status[lun].cur_lun = buf[4]; chip->status[lun].card_type = buf[5]; chip->status[lun].total_lun = buf[6]; chip->status[lun].fw_ver = ((u16) buf[7] << 8) | buf[8]; chip->status[lun].phy_exist = buf[9]; chip->status[lun].multi_flag = buf[10]; chip->status[lun].multi_card = buf[11]; chip->status[lun].log_exist = buf[12]; if (chip->status_len == 16) { chip->status[lun].detailed_type.detailed_type1 = buf[13]; chip->status[lun].function[0] = buf[14]; chip->status[lun].function[1] = buf[15]; } return 0; } static int enable_oscillator(struct us_data *us) { int retval; u8 value; retval = rts51x_read_mem(us, 0xFE77, &value, 1); if (retval < 0) return -EIO; value |= 0x04; retval = rts51x_write_mem(us, 0xFE77, &value, 1); if (retval < 0) return -EIO; retval = rts51x_read_mem(us, 0xFE77, &value, 1); if (retval < 0) return -EIO; if (!(value & 0x04)) return -EIO; return 0; } static int __do_config_autodelink(struct us_data *us, u8 *data, u16 len) { int retval; u8 cmnd[12] = {0}; u8 *buf; usb_stor_dbg(us, "addr = 0xfe47, len = %d\n", len); buf = kmemdup(data, len, GFP_NOIO); if (!buf) return USB_STOR_TRANSPORT_ERROR; cmnd[0] = 0xF0; cmnd[1] = 0x0E; cmnd[2] = 0xfe; cmnd[3] = 0x47; cmnd[4] = (u8)(len >> 8); cmnd[5] = (u8)len; retval = rts51x_bulk_transport_special(us, 0, cmnd, 12, buf, len, DMA_TO_DEVICE, NULL); kfree(buf); if (retval != USB_STOR_TRANSPORT_GOOD) { return -EIO; } return 0; } static int do_config_autodelink(struct us_data *us, int enable, int force) { int retval; u8 value; retval = rts51x_read_mem(us, 0xFE47, &value, 1); if (retval < 0) return -EIO; if (enable) { if (force) value |= 0x03; else value |= 0x01; } else { value &= ~0x03; } usb_stor_dbg(us, "set 0xfe47 to 0x%x\n", value); /* retval = rts51x_write_mem(us, 0xFE47, &value, 1); */ retval = __do_config_autodelink(us, &value, 1); if (retval < 0) return -EIO; return 0; } static int config_autodelink_after_power_on(struct us_data *us) { struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra); int retval; u8 value; if (!CHK_AUTO_DELINK(chip)) return 0; retval = rts51x_read_mem(us, 0xFE47, &value, 1); if (retval < 0) return -EIO; if (auto_delink_en) { CLR_BIT(value, 0); CLR_BIT(value, 1); SET_BIT(value, 2); if (CHECK_ID(chip, 0x0138, 0x3882)) CLR_BIT(value, 2); SET_BIT(value, 7); /* retval = rts51x_write_mem(us, 0xFE47, &value, 1); */ retval = __do_config_autodelink(us, &value, 1); if (retval < 0) return -EIO; retval = enable_oscillator(us); if (retval == 0) (void)do_config_autodelink(us, 1, 0); } else { /* Autodelink controlled by firmware */ SET_BIT(value, 2); if (CHECK_ID(chip, 0x0138, 0x3882)) CLR_BIT(value, 2); if (CHECK_ID(chip, 0x0159, 0x5889) || CHECK_ID(chip, 0x0138, 0x3880)) { CLR_BIT(value, 0); CLR_BIT(value, 7); } /* retval = rts51x_write_mem(us, 0xFE47, &value, 1); */ retval = __do_config_autodelink(us, &value, 1); if (retval < 0) return -EIO; if (CHECK_ID(chip, 0x0159, 0x5888)) { value = 0xFF; retval = rts51x_write_mem(us, 0xFE79, &value, 1); if (retval < 0) return -EIO; value = 0x01; retval = rts51x_write_mem(us, 0x48, &value, 1); if (retval < 0) return -EIO; } } return 0; } #ifdef CONFIG_PM static int config_autodelink_before_power_down(struct us_data *us) { struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra); int retval; u8 value; if (!CHK_AUTO_DELINK(chip)) return 0; if (auto_delink_en) { retval = rts51x_read_mem(us, 0xFE77, &value, 1); if (retval < 0) return -EIO; SET_BIT(value, 2); retval = rts51x_write_mem(us, 0xFE77, &value, 1); if (retval < 0) return -EIO; if (CHECK_ID(chip, 0x0159, 0x5888)) { value = 0x01; retval = rts51x_write_mem(us, 0x48, &value, 1); if (retval < 0) return -EIO; } retval = rts51x_read_mem(us, 0xFE47, &value, 1); if (retval < 0) return -EIO; SET_BIT(value, 0); if (CHECK_ID(chip, 0x0138, 0x3882)) SET_BIT(value, 2); retval = rts51x_write_mem(us, 0xFE77, &value, 1); if (retval < 0) return -EIO; } else { if (CHECK_ID(chip, 0x0159, 0x5889) || CHECK_ID(chip, 0x0138, 0x3880) || CHECK_ID(chip, 0x0138, 0x3882)) { retval = rts51x_read_mem(us, 0xFE47, &value, 1); if (retval < 0) return -EIO; if (CHECK_ID(chip, 0x0159, 0x5889) || CHECK_ID(chip, 0x0138, 0x3880)) { SET_BIT(value, 0); SET_BIT(value, 7); } if (CHECK_ID(chip, 0x0138, 0x3882)) SET_BIT(value, 2); /* retval = rts51x_write_mem(us, 0xFE47, &value, 1); */ retval = __do_config_autodelink(us, &value, 1); if (retval < 0) return -EIO; } if (CHECK_ID(chip, 0x0159, 0x5888)) { value = 0x01; retval = rts51x_write_mem(us, 0x48, &value, 1); if (retval < 0) return -EIO; } } return 0; } static void fw5895_init(struct us_data *us) { struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra); int retval; u8 val; if ((PRODUCT_ID(chip) != 0x0158) || (FW_VERSION(chip) != 0x5895)) { usb_stor_dbg(us, "Not the specified device, return immediately!\n"); } else { retval = rts51x_read_mem(us, 0xFD6F, &val, 1); if (retval == STATUS_SUCCESS && (val & 0x1F) == 0) { val = 0x1F; retval = rts51x_write_mem(us, 0xFD70, &val, 1); if (retval != STATUS_SUCCESS) usb_stor_dbg(us, "Write memory fail\n"); } else { usb_stor_dbg(us, "Read memory fail, OR (val & 0x1F) != 0\n"); } } } #endif #ifdef CONFIG_REALTEK_AUTOPM static void fw5895_set_mmc_wp(struct us_data *us) { struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra); int retval; u8 buf[13]; if ((PRODUCT_ID(chip) != 0x0158) || (FW_VERSION(chip) != 0x5895)) { usb_stor_dbg(us, "Not the specified device, return immediately!\n"); } else { retval = rts51x_read_mem(us, 0xFD6F, buf, 1); if (retval == STATUS_SUCCESS && (buf[0] & 0x24) == 0x24) { /* SD Exist and SD WP */ retval = rts51x_read_mem(us, 0xD04E, buf, 1); if (retval == STATUS_SUCCESS) { buf[0] |= 0x04; retval = rts51x_write_mem(us, 0xFD70, buf, 1); if (retval != STATUS_SUCCESS) usb_stor_dbg(us, "Write memory fail\n"); } else { usb_stor_dbg(us, "Read memory fail\n"); } } else { usb_stor_dbg(us, "Read memory fail, OR (buf[0]&0x24)!=0x24\n"); } } } static void rts51x_modi_suspend_timer(struct rts51x_chip *chip) { struct us_data *us = chip->us; usb_stor_dbg(us, "state:%d\n", rts51x_get_stat(chip)); chip->timer_expires = jiffies + msecs_to_jiffies(1000*ss_delay); mod_timer(&chip->rts51x_suspend_timer, chip->timer_expires); } static void rts51x_suspend_timer_fn(unsigned long data) { struct rts51x_chip *chip = (struct rts51x_chip *)data; struct us_data *us = chip->us; switch (rts51x_get_stat(chip)) { case RTS51X_STAT_INIT: case RTS51X_STAT_RUN: rts51x_modi_suspend_timer(chip); break; case RTS51X_STAT_IDLE: case RTS51X_STAT_SS: usb_stor_dbg(us, "RTS51X_STAT_SS, intf->pm_usage_cnt:%d, power.usage:%d\n", atomic_read(&us->pusb_intf->pm_usage_cnt), atomic_read(&us->pusb_intf->dev.power.usage_count)); if (atomic_read(&us->pusb_intf->pm_usage_cnt) > 0) { usb_stor_dbg(us, "Ready to enter SS state\n"); rts51x_set_stat(chip, RTS51X_STAT_SS); /* ignore mass storage interface's children */ pm_suspend_ignore_children(&us->pusb_intf->dev, true); usb_autopm_put_interface_async(us->pusb_intf); usb_stor_dbg(us, "RTS51X_STAT_SS 01, intf->pm_usage_cnt:%d, power.usage:%d\n", atomic_read(&us->pusb_intf->pm_usage_cnt), atomic_read(&us->pusb_intf->dev.power.usage_count)); } break; default: usb_stor_dbg(us, "Unknown state !!!\n"); break; } } static inline int working_scsi(struct scsi_cmnd *srb) { if ((srb->cmnd[0] == TEST_UNIT_READY) || (srb->cmnd[0] == ALLOW_MEDIUM_REMOVAL)) { return 0; } return 1; } static void rts51x_invoke_transport(struct scsi_cmnd *srb, struct us_data *us) { struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra); static int card_first_show = 1; static u8 media_not_present[] = { 0x70, 0, 0x02, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0x3A, 0, 0, 0, 0, 0 }; static u8 invalid_cmd_field[] = { 0x70, 0, 0x05, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0x24, 0, 0, 0, 0, 0 }; int ret; if (working_scsi(srb)) { usb_stor_dbg(us, "working scsi, intf->pm_usage_cnt:%d, power.usage:%d\n", atomic_read(&us->pusb_intf->pm_usage_cnt), atomic_read(&us->pusb_intf->dev.power.usage_count)); if (atomic_read(&us->pusb_intf->pm_usage_cnt) <= 0) { ret = usb_autopm_get_interface(us->pusb_intf); usb_stor_dbg(us, "working scsi, ret=%d\n", ret); } if (rts51x_get_stat(chip) != RTS51X_STAT_RUN) rts51x_set_stat(chip, RTS51X_STAT_RUN); chip->proto_handler_backup(srb, us); } else { if (rts51x_get_stat(chip) == RTS51X_STAT_SS) { usb_stor_dbg(us, "NOT working scsi\n"); if ((srb->cmnd[0] == TEST_UNIT_READY) && (chip->pwr_state == US_SUSPEND)) { if (TST_LUN_READY(chip, srb->device->lun)) { srb->result = SAM_STAT_GOOD; } else { srb->result = SAM_STAT_CHECK_CONDITION; memcpy(srb->sense_buffer, media_not_present, US_SENSE_SIZE); } usb_stor_dbg(us, "TEST_UNIT_READY\n"); goto out; } if (srb->cmnd[0] == ALLOW_MEDIUM_REMOVAL) { int prevent = srb->cmnd[4] & 0x1; if (prevent) { srb->result = SAM_STAT_CHECK_CONDITION; memcpy(srb->sense_buffer, invalid_cmd_field, US_SENSE_SIZE); } else { srb->result = SAM_STAT_GOOD; } usb_stor_dbg(us, "ALLOW_MEDIUM_REMOVAL\n"); goto out; } } else { usb_stor_dbg(us, "NOT working scsi, not SS\n"); chip->proto_handler_backup(srb, us); /* Check whether card is plugged in */ if (srb->cmnd[0] == TEST_UNIT_READY) { if (srb->result == SAM_STAT_GOOD) { SET_LUN_READY(chip, srb->device->lun); if (card_first_show) { card_first_show = 0; fw5895_set_mmc_wp(us); } } else { CLR_LUN_READY(chip, srb->device->lun); card_first_show = 1; } } if (rts51x_get_stat(chip) != RTS51X_STAT_IDLE) rts51x_set_stat(chip, RTS51X_STAT_IDLE); } } out: usb_stor_dbg(us, "state:%d\n", rts51x_get_stat(chip)); if (rts51x_get_stat(chip) == RTS51X_STAT_RUN) rts51x_modi_suspend_timer(chip); } static int realtek_cr_autosuspend_setup(struct us_data *us) { struct rts51x_chip *chip; struct rts51x_status *status = NULL; u8 buf[16]; int retval; chip = (struct rts51x_chip *)us->extra; chip->support_auto_delink = 0; chip->pwr_state = US_RESUME; chip->lun_ready = 0; rts51x_set_stat(chip, RTS51X_STAT_INIT); retval = rts51x_read_status(us, 0, buf, 16, &(chip->status_len)); if (retval != STATUS_SUCCESS) { usb_stor_dbg(us, "Read status fail\n"); return -EIO; } status = chip->status; status->vid = ((u16) buf[0] << 8) | buf[1]; status->pid = ((u16) buf[2] << 8) | buf[3]; status->cur_lun = buf[4]; status->card_type = buf[5]; status->total_lun = buf[6]; status->fw_ver = ((u16) buf[7] << 8) | buf[8]; status->phy_exist = buf[9]; status->multi_flag = buf[10]; status->multi_card = buf[11]; status->log_exist = buf[12]; if (chip->status_len == 16) { status->detailed_type.detailed_type1 = buf[13]; status->function[0] = buf[14]; status->function[1] = buf[15]; } /* back up the proto_handler in us->extra */ chip = (struct rts51x_chip *)(us->extra); chip->proto_handler_backup = us->proto_handler; /* Set the autosuspend_delay to 0 */ pm_runtime_set_autosuspend_delay(&us->pusb_dev->dev, 0); /* override us->proto_handler setted in get_protocol() */ us->proto_handler = rts51x_invoke_transport; chip->timer_expires = 0; setup_timer(&chip->rts51x_suspend_timer, rts51x_suspend_timer_fn, (unsigned long)chip); fw5895_init(us); /* enable autosuspend function of the usb device */ usb_enable_autosuspend(us->pusb_dev); return 0; } #endif static void realtek_cr_destructor(void *extra) { struct rts51x_chip *chip = extra; if (!chip) return; #ifdef CONFIG_REALTEK_AUTOPM if (ss_en) { del_timer(&chip->rts51x_suspend_timer); chip->timer_expires = 0; } #endif kfree(chip->status); } #ifdef CONFIG_PM static int realtek_cr_suspend(struct usb_interface *iface, pm_message_t message) { struct us_data *us = usb_get_intfdata(iface); /* wait until no command is running */ mutex_lock(&us->dev_mutex); config_autodelink_before_power_down(us); mutex_unlock(&us->dev_mutex); return 0; } static int realtek_cr_resume(struct usb_interface *iface) { struct us_data *us = usb_get_intfdata(iface); fw5895_init(us); config_autodelink_after_power_on(us); return 0; } #else #define realtek_cr_suspend NULL #define realtek_cr_resume NULL #endif static int init_realtek_cr(struct us_data *us) { struct rts51x_chip *chip; int size, i, retval; chip = kzalloc(sizeof(struct rts51x_chip), GFP_KERNEL); if (!chip) return -ENOMEM; us->extra = chip; us->extra_destructor = realtek_cr_destructor; us->max_lun = chip->max_lun = rts51x_get_max_lun(us); chip->us = us; usb_stor_dbg(us, "chip->max_lun = %d\n", chip->max_lun); size = (chip->max_lun + 1) * sizeof(struct rts51x_status); chip->status = kzalloc(size, GFP_KERNEL); if (!chip->status) goto INIT_FAIL; for (i = 0; i <= (int)(chip->max_lun); i++) { retval = rts51x_check_status(us, (u8) i); if (retval < 0) goto INIT_FAIL; } if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) || CHECK_FW_VER(chip, 0x5901)) SET_AUTO_DELINK(chip); if (STATUS_LEN(chip) == 16) { if (SUPPORT_AUTO_DELINK(chip)) SET_AUTO_DELINK(chip); } #ifdef CONFIG_REALTEK_AUTOPM if (ss_en) realtek_cr_autosuspend_setup(us); #endif usb_stor_dbg(us, "chip->flag = 0x%x\n", chip->flag); (void)config_autodelink_after_power_on(us); return 0; INIT_FAIL: if (us->extra) { kfree(chip->status); kfree(us->extra); us->extra = NULL; } return -EIO; } static int realtek_cr_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct us_data *us; int result; dev_dbg(&intf->dev, "Probe Realtek Card Reader!\n"); result = usb_stor_probe1(&us, intf, id, (id - realtek_cr_ids) + realtek_cr_unusual_dev_list); if (result) return result; result = usb_stor_probe2(us); return result; } static struct usb_driver realtek_cr_driver = { .name = "ums-realtek", .probe = realtek_cr_probe, .disconnect = usb_stor_disconnect, /* .suspend = usb_stor_suspend, */ /* .resume = usb_stor_resume, */ .reset_resume = usb_stor_reset_resume, .suspend = realtek_cr_suspend, .resume = realtek_cr_resume, .pre_reset = usb_stor_pre_reset, .post_reset = usb_stor_post_reset, .id_table = realtek_cr_ids, .soft_unbind = 1, .supports_autosuspend = 1, .no_dynamic_id = 1, }; module_usb_driver(realtek_cr_driver);
gpl-2.0
defconoi/nexusplayer
arch/arm/mach-omap2/cm3xxx.c
2257
21481
/* * OMAP3xxx CM module functions * * Copyright (C) 2009 Nokia Corporation * Copyright (C) 2008-2010, 2012 Texas Instruments, Inc. * Paul Walmsley * Rajendra Nayak <rnayak@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/io.h> #include "soc.h" #include "iomap.h" #include "common.h" #include "prm2xxx_3xxx.h" #include "cm.h" #include "cm3xxx.h" #include "cm-regbits-34xx.h" #include "clockdomain.h" static const u8 omap3xxx_cm_idlest_offs[] = { CM_IDLEST1, CM_IDLEST2, OMAP2430_CM_IDLEST3 }; /* * */ static void _write_clktrctrl(u8 c, s16 module, u32 mask) { u32 v; v = omap2_cm_read_mod_reg(module, OMAP2_CM_CLKSTCTRL); v &= ~mask; v |= c << __ffs(mask); omap2_cm_write_mod_reg(v, module, OMAP2_CM_CLKSTCTRL); } bool omap3xxx_cm_is_clkdm_in_hwsup(s16 module, u32 mask) { u32 v; v = omap2_cm_read_mod_reg(module, OMAP2_CM_CLKSTCTRL); v &= mask; v >>= __ffs(mask); return (v == OMAP34XX_CLKSTCTRL_ENABLE_AUTO) ? 1 : 0; } void omap3xxx_cm_clkdm_enable_hwsup(s16 module, u32 mask) { _write_clktrctrl(OMAP34XX_CLKSTCTRL_ENABLE_AUTO, module, mask); } void omap3xxx_cm_clkdm_disable_hwsup(s16 module, u32 mask) { _write_clktrctrl(OMAP34XX_CLKSTCTRL_DISABLE_AUTO, module, mask); } void omap3xxx_cm_clkdm_force_sleep(s16 module, u32 mask) { _write_clktrctrl(OMAP34XX_CLKSTCTRL_FORCE_SLEEP, module, mask); } void omap3xxx_cm_clkdm_force_wakeup(s16 module, u32 mask) { _write_clktrctrl(OMAP34XX_CLKSTCTRL_FORCE_WAKEUP, module, mask); } /* * */ /** * omap3xxx_cm_wait_module_ready - wait for a module to leave idle or standby * @prcm_mod: PRCM module offset * @idlest_id: CM_IDLESTx register ID (i.e., x = 1, 2, 3) * @idlest_shift: shift of the bit in the CM_IDLEST* register to check * * Wait for the PRCM to indicate that the module identified by * (@prcm_mod, @idlest_id, @idlest_shift) is clocked. Return 0 upon * success or -EBUSY if the module doesn't enable in time. */ int omap3xxx_cm_wait_module_ready(s16 prcm_mod, u8 idlest_id, u8 idlest_shift) { int ena = 0, i = 0; u8 cm_idlest_reg; u32 mask; if (!idlest_id || (idlest_id > ARRAY_SIZE(omap3xxx_cm_idlest_offs))) return -EINVAL; cm_idlest_reg = omap3xxx_cm_idlest_offs[idlest_id - 1]; mask = 1 << idlest_shift; ena = 0; omap_test_timeout(((omap2_cm_read_mod_reg(prcm_mod, cm_idlest_reg) & mask) == ena), MAX_MODULE_READY_TIME, i); return (i < MAX_MODULE_READY_TIME) ? 0 : -EBUSY; } /** * omap3xxx_cm_split_idlest_reg - split CM_IDLEST reg addr into its components * @idlest_reg: CM_IDLEST* virtual address * @prcm_inst: pointer to an s16 to return the PRCM instance offset * @idlest_reg_id: pointer to a u8 to return the CM_IDLESTx register ID * * XXX This function is only needed until absolute register addresses are * removed from the OMAP struct clk records. */ int omap3xxx_cm_split_idlest_reg(void __iomem *idlest_reg, s16 *prcm_inst, u8 *idlest_reg_id) { unsigned long offs; u8 idlest_offs; int i; if (idlest_reg < (cm_base + OMAP3430_IVA2_MOD) || idlest_reg > (cm_base + 0x1ffff)) return -EINVAL; idlest_offs = (unsigned long)idlest_reg & 0xff; for (i = 0; i < ARRAY_SIZE(omap3xxx_cm_idlest_offs); i++) { if (idlest_offs == omap3xxx_cm_idlest_offs[i]) { *idlest_reg_id = i + 1; break; } } if (i == ARRAY_SIZE(omap3xxx_cm_idlest_offs)) return -EINVAL; offs = idlest_reg - cm_base; offs &= 0xff00; *prcm_inst = offs; return 0; } /* Clockdomain low-level operations */ static int omap3xxx_clkdm_add_sleepdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2) { omap2_cm_set_mod_reg_bits((1 << clkdm2->dep_bit), clkdm1->pwrdm.ptr->prcm_offs, OMAP3430_CM_SLEEPDEP); return 0; } static int omap3xxx_clkdm_del_sleepdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2) { omap2_cm_clear_mod_reg_bits((1 << clkdm2->dep_bit), clkdm1->pwrdm.ptr->prcm_offs, OMAP3430_CM_SLEEPDEP); return 0; } static int omap3xxx_clkdm_read_sleepdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2) { return omap2_cm_read_mod_bits_shift(clkdm1->pwrdm.ptr->prcm_offs, OMAP3430_CM_SLEEPDEP, (1 << clkdm2->dep_bit)); } static int omap3xxx_clkdm_clear_all_sleepdeps(struct clockdomain *clkdm) { struct clkdm_dep *cd; u32 mask = 0; for (cd = clkdm->sleepdep_srcs; cd && cd->clkdm_name; cd++) { if (!cd->clkdm) continue; /* only happens if data is erroneous */ mask |= 1 << cd->clkdm->dep_bit; cd->sleepdep_usecount = 0; } omap2_cm_clear_mod_reg_bits(mask, clkdm->pwrdm.ptr->prcm_offs, OMAP3430_CM_SLEEPDEP); return 0; } static int omap3xxx_clkdm_sleep(struct clockdomain *clkdm) { omap3xxx_cm_clkdm_force_sleep(clkdm->pwrdm.ptr->prcm_offs, clkdm->clktrctrl_mask); return 0; } static int omap3xxx_clkdm_wakeup(struct clockdomain *clkdm) { omap3xxx_cm_clkdm_force_wakeup(clkdm->pwrdm.ptr->prcm_offs, clkdm->clktrctrl_mask); return 0; } static void omap3xxx_clkdm_allow_idle(struct clockdomain *clkdm) { if (clkdm->usecount > 0) clkdm_add_autodeps(clkdm); omap3xxx_cm_clkdm_enable_hwsup(clkdm->pwrdm.ptr->prcm_offs, clkdm->clktrctrl_mask); } static void omap3xxx_clkdm_deny_idle(struct clockdomain *clkdm) { omap3xxx_cm_clkdm_disable_hwsup(clkdm->pwrdm.ptr->prcm_offs, clkdm->clktrctrl_mask); if (clkdm->usecount > 0) clkdm_del_autodeps(clkdm); } static int omap3xxx_clkdm_clk_enable(struct clockdomain *clkdm) { bool hwsup = false; if (!clkdm->clktrctrl_mask) return 0; /* * The CLKDM_MISSING_IDLE_REPORTING flag documentation has * more details on the unpleasant problem this is working * around */ if ((clkdm->flags & CLKDM_MISSING_IDLE_REPORTING) && (clkdm->flags & CLKDM_CAN_FORCE_WAKEUP)) { omap3xxx_clkdm_wakeup(clkdm); return 0; } hwsup = omap3xxx_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs, clkdm->clktrctrl_mask); if (hwsup) { /* Disable HW transitions when we are changing deps */ omap3xxx_cm_clkdm_disable_hwsup(clkdm->pwrdm.ptr->prcm_offs, clkdm->clktrctrl_mask); clkdm_add_autodeps(clkdm); omap3xxx_cm_clkdm_enable_hwsup(clkdm->pwrdm.ptr->prcm_offs, clkdm->clktrctrl_mask); } else { if (clkdm->flags & CLKDM_CAN_FORCE_WAKEUP) omap3xxx_clkdm_wakeup(clkdm); } return 0; } static int omap3xxx_clkdm_clk_disable(struct clockdomain *clkdm) { bool hwsup = false; if (!clkdm->clktrctrl_mask) return 0; /* * The CLKDM_MISSING_IDLE_REPORTING flag documentation has * more details on the unpleasant problem this is working * around */ if (clkdm->flags & CLKDM_MISSING_IDLE_REPORTING && !(clkdm->flags & CLKDM_CAN_FORCE_SLEEP)) { omap3xxx_cm_clkdm_enable_hwsup(clkdm->pwrdm.ptr->prcm_offs, clkdm->clktrctrl_mask); return 0; } hwsup = omap3xxx_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs, clkdm->clktrctrl_mask); if (hwsup) { /* Disable HW transitions when we are changing deps */ omap3xxx_cm_clkdm_disable_hwsup(clkdm->pwrdm.ptr->prcm_offs, clkdm->clktrctrl_mask); clkdm_del_autodeps(clkdm); omap3xxx_cm_clkdm_enable_hwsup(clkdm->pwrdm.ptr->prcm_offs, clkdm->clktrctrl_mask); } else { if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP) omap3xxx_clkdm_sleep(clkdm); } return 0; } struct clkdm_ops omap3_clkdm_operations = { .clkdm_add_wkdep = omap2_clkdm_add_wkdep, .clkdm_del_wkdep = omap2_clkdm_del_wkdep, .clkdm_read_wkdep = omap2_clkdm_read_wkdep, .clkdm_clear_all_wkdeps = omap2_clkdm_clear_all_wkdeps, .clkdm_add_sleepdep = omap3xxx_clkdm_add_sleepdep, .clkdm_del_sleepdep = omap3xxx_clkdm_del_sleepdep, .clkdm_read_sleepdep = omap3xxx_clkdm_read_sleepdep, .clkdm_clear_all_sleepdeps = omap3xxx_clkdm_clear_all_sleepdeps, .clkdm_sleep = omap3xxx_clkdm_sleep, .clkdm_wakeup = omap3xxx_clkdm_wakeup, .clkdm_allow_idle = omap3xxx_clkdm_allow_idle, .clkdm_deny_idle = omap3xxx_clkdm_deny_idle, .clkdm_clk_enable = omap3xxx_clkdm_clk_enable, .clkdm_clk_disable = omap3xxx_clkdm_clk_disable, }; /* * Context save/restore code - OMAP3 only */ struct omap3_cm_regs { u32 iva2_cm_clksel1; u32 iva2_cm_clksel2; u32 cm_sysconfig; u32 sgx_cm_clksel; u32 dss_cm_clksel; u32 cam_cm_clksel; u32 per_cm_clksel; u32 emu_cm_clksel; u32 emu_cm_clkstctrl; u32 pll_cm_autoidle; u32 pll_cm_autoidle2; u32 pll_cm_clksel4; u32 pll_cm_clksel5; u32 pll_cm_clken2; u32 cm_polctrl; u32 iva2_cm_fclken; u32 iva2_cm_clken_pll; u32 core_cm_fclken1; u32 core_cm_fclken3; u32 sgx_cm_fclken; u32 wkup_cm_fclken; u32 dss_cm_fclken; u32 cam_cm_fclken; u32 per_cm_fclken; u32 usbhost_cm_fclken; u32 core_cm_iclken1; u32 core_cm_iclken2; u32 core_cm_iclken3; u32 sgx_cm_iclken; u32 wkup_cm_iclken; u32 dss_cm_iclken; u32 cam_cm_iclken; u32 per_cm_iclken; u32 usbhost_cm_iclken; u32 iva2_cm_autoidle2; u32 mpu_cm_autoidle2; u32 iva2_cm_clkstctrl; u32 mpu_cm_clkstctrl; u32 core_cm_clkstctrl; u32 sgx_cm_clkstctrl; u32 dss_cm_clkstctrl; u32 cam_cm_clkstctrl; u32 per_cm_clkstctrl; u32 neon_cm_clkstctrl; u32 usbhost_cm_clkstctrl; u32 core_cm_autoidle1; u32 core_cm_autoidle2; u32 core_cm_autoidle3; u32 wkup_cm_autoidle; u32 dss_cm_autoidle; u32 cam_cm_autoidle; u32 per_cm_autoidle; u32 usbhost_cm_autoidle; u32 sgx_cm_sleepdep; u32 dss_cm_sleepdep; u32 cam_cm_sleepdep; u32 per_cm_sleepdep; u32 usbhost_cm_sleepdep; u32 cm_clkout_ctrl; }; static struct omap3_cm_regs cm_context; void omap3_cm_save_context(void) { cm_context.iva2_cm_clksel1 = omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, CM_CLKSEL1); cm_context.iva2_cm_clksel2 = omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, CM_CLKSEL2); cm_context.cm_sysconfig = __raw_readl(OMAP3430_CM_SYSCONFIG); cm_context.sgx_cm_clksel = omap2_cm_read_mod_reg(OMAP3430ES2_SGX_MOD, CM_CLKSEL); cm_context.dss_cm_clksel = omap2_cm_read_mod_reg(OMAP3430_DSS_MOD, CM_CLKSEL); cm_context.cam_cm_clksel = omap2_cm_read_mod_reg(OMAP3430_CAM_MOD, CM_CLKSEL); cm_context.per_cm_clksel = omap2_cm_read_mod_reg(OMAP3430_PER_MOD, CM_CLKSEL); cm_context.emu_cm_clksel = omap2_cm_read_mod_reg(OMAP3430_EMU_MOD, CM_CLKSEL1); cm_context.emu_cm_clkstctrl = omap2_cm_read_mod_reg(OMAP3430_EMU_MOD, OMAP2_CM_CLKSTCTRL); /* * As per erratum i671, ROM code does not respect the PER DPLL * programming scheme if CM_AUTOIDLE_PLL.AUTO_PERIPH_DPLL == 1. * In this case, even though this register has been saved in * scratchpad contents, we need to restore AUTO_PERIPH_DPLL * by ourselves. So, we need to save it anyway. */ cm_context.pll_cm_autoidle = omap2_cm_read_mod_reg(PLL_MOD, CM_AUTOIDLE); cm_context.pll_cm_autoidle2 = omap2_cm_read_mod_reg(PLL_MOD, CM_AUTOIDLE2); cm_context.pll_cm_clksel4 = omap2_cm_read_mod_reg(PLL_MOD, OMAP3430ES2_CM_CLKSEL4); cm_context.pll_cm_clksel5 = omap2_cm_read_mod_reg(PLL_MOD, OMAP3430ES2_CM_CLKSEL5); cm_context.pll_cm_clken2 = omap2_cm_read_mod_reg(PLL_MOD, OMAP3430ES2_CM_CLKEN2); cm_context.cm_polctrl = __raw_readl(OMAP3430_CM_POLCTRL); cm_context.iva2_cm_fclken = omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, CM_FCLKEN); cm_context.iva2_cm_clken_pll = omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKEN_PLL); cm_context.core_cm_fclken1 = omap2_cm_read_mod_reg(CORE_MOD, CM_FCLKEN1); cm_context.core_cm_fclken3 = omap2_cm_read_mod_reg(CORE_MOD, OMAP3430ES2_CM_FCLKEN3); cm_context.sgx_cm_fclken = omap2_cm_read_mod_reg(OMAP3430ES2_SGX_MOD, CM_FCLKEN); cm_context.wkup_cm_fclken = omap2_cm_read_mod_reg(WKUP_MOD, CM_FCLKEN); cm_context.dss_cm_fclken = omap2_cm_read_mod_reg(OMAP3430_DSS_MOD, CM_FCLKEN); cm_context.cam_cm_fclken = omap2_cm_read_mod_reg(OMAP3430_CAM_MOD, CM_FCLKEN); cm_context.per_cm_fclken = omap2_cm_read_mod_reg(OMAP3430_PER_MOD, CM_FCLKEN); cm_context.usbhost_cm_fclken = omap2_cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, CM_FCLKEN); cm_context.core_cm_iclken1 = omap2_cm_read_mod_reg(CORE_MOD, CM_ICLKEN1); cm_context.core_cm_iclken2 = omap2_cm_read_mod_reg(CORE_MOD, CM_ICLKEN2); cm_context.core_cm_iclken3 = omap2_cm_read_mod_reg(CORE_MOD, CM_ICLKEN3); cm_context.sgx_cm_iclken = omap2_cm_read_mod_reg(OMAP3430ES2_SGX_MOD, CM_ICLKEN); cm_context.wkup_cm_iclken = omap2_cm_read_mod_reg(WKUP_MOD, CM_ICLKEN); cm_context.dss_cm_iclken = omap2_cm_read_mod_reg(OMAP3430_DSS_MOD, CM_ICLKEN); cm_context.cam_cm_iclken = omap2_cm_read_mod_reg(OMAP3430_CAM_MOD, CM_ICLKEN); cm_context.per_cm_iclken = omap2_cm_read_mod_reg(OMAP3430_PER_MOD, CM_ICLKEN); cm_context.usbhost_cm_iclken = omap2_cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, CM_ICLKEN); cm_context.iva2_cm_autoidle2 = omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, CM_AUTOIDLE2); cm_context.mpu_cm_autoidle2 = omap2_cm_read_mod_reg(MPU_MOD, CM_AUTOIDLE2); cm_context.iva2_cm_clkstctrl = omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL); cm_context.mpu_cm_clkstctrl = omap2_cm_read_mod_reg(MPU_MOD, OMAP2_CM_CLKSTCTRL); cm_context.core_cm_clkstctrl = omap2_cm_read_mod_reg(CORE_MOD, OMAP2_CM_CLKSTCTRL); cm_context.sgx_cm_clkstctrl = omap2_cm_read_mod_reg(OMAP3430ES2_SGX_MOD, OMAP2_CM_CLKSTCTRL); cm_context.dss_cm_clkstctrl = omap2_cm_read_mod_reg(OMAP3430_DSS_MOD, OMAP2_CM_CLKSTCTRL); cm_context.cam_cm_clkstctrl = omap2_cm_read_mod_reg(OMAP3430_CAM_MOD, OMAP2_CM_CLKSTCTRL); cm_context.per_cm_clkstctrl = omap2_cm_read_mod_reg(OMAP3430_PER_MOD, OMAP2_CM_CLKSTCTRL); cm_context.neon_cm_clkstctrl = omap2_cm_read_mod_reg(OMAP3430_NEON_MOD, OMAP2_CM_CLKSTCTRL); cm_context.usbhost_cm_clkstctrl = omap2_cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, OMAP2_CM_CLKSTCTRL); cm_context.core_cm_autoidle1 = omap2_cm_read_mod_reg(CORE_MOD, CM_AUTOIDLE1); cm_context.core_cm_autoidle2 = omap2_cm_read_mod_reg(CORE_MOD, CM_AUTOIDLE2); cm_context.core_cm_autoidle3 = omap2_cm_read_mod_reg(CORE_MOD, CM_AUTOIDLE3); cm_context.wkup_cm_autoidle = omap2_cm_read_mod_reg(WKUP_MOD, CM_AUTOIDLE); cm_context.dss_cm_autoidle = omap2_cm_read_mod_reg(OMAP3430_DSS_MOD, CM_AUTOIDLE); cm_context.cam_cm_autoidle = omap2_cm_read_mod_reg(OMAP3430_CAM_MOD, CM_AUTOIDLE); cm_context.per_cm_autoidle = omap2_cm_read_mod_reg(OMAP3430_PER_MOD, CM_AUTOIDLE); cm_context.usbhost_cm_autoidle = omap2_cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, CM_AUTOIDLE); cm_context.sgx_cm_sleepdep = omap2_cm_read_mod_reg(OMAP3430ES2_SGX_MOD, OMAP3430_CM_SLEEPDEP); cm_context.dss_cm_sleepdep = omap2_cm_read_mod_reg(OMAP3430_DSS_MOD, OMAP3430_CM_SLEEPDEP); cm_context.cam_cm_sleepdep = omap2_cm_read_mod_reg(OMAP3430_CAM_MOD, OMAP3430_CM_SLEEPDEP); cm_context.per_cm_sleepdep = omap2_cm_read_mod_reg(OMAP3430_PER_MOD, OMAP3430_CM_SLEEPDEP); cm_context.usbhost_cm_sleepdep = omap2_cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, OMAP3430_CM_SLEEPDEP); cm_context.cm_clkout_ctrl = omap2_cm_read_mod_reg(OMAP3430_CCR_MOD, OMAP3_CM_CLKOUT_CTRL_OFFSET); } void omap3_cm_restore_context(void) { omap2_cm_write_mod_reg(cm_context.iva2_cm_clksel1, OMAP3430_IVA2_MOD, CM_CLKSEL1); omap2_cm_write_mod_reg(cm_context.iva2_cm_clksel2, OMAP3430_IVA2_MOD, CM_CLKSEL2); __raw_writel(cm_context.cm_sysconfig, OMAP3430_CM_SYSCONFIG); omap2_cm_write_mod_reg(cm_context.sgx_cm_clksel, OMAP3430ES2_SGX_MOD, CM_CLKSEL); omap2_cm_write_mod_reg(cm_context.dss_cm_clksel, OMAP3430_DSS_MOD, CM_CLKSEL); omap2_cm_write_mod_reg(cm_context.cam_cm_clksel, OMAP3430_CAM_MOD, CM_CLKSEL); omap2_cm_write_mod_reg(cm_context.per_cm_clksel, OMAP3430_PER_MOD, CM_CLKSEL); omap2_cm_write_mod_reg(cm_context.emu_cm_clksel, OMAP3430_EMU_MOD, CM_CLKSEL1); omap2_cm_write_mod_reg(cm_context.emu_cm_clkstctrl, OMAP3430_EMU_MOD, OMAP2_CM_CLKSTCTRL); /* * As per erratum i671, ROM code does not respect the PER DPLL * programming scheme if CM_AUTOIDLE_PLL.AUTO_PERIPH_DPLL == 1. * In this case, we need to restore AUTO_PERIPH_DPLL by ourselves. */ omap2_cm_write_mod_reg(cm_context.pll_cm_autoidle, PLL_MOD, CM_AUTOIDLE); omap2_cm_write_mod_reg(cm_context.pll_cm_autoidle2, PLL_MOD, CM_AUTOIDLE2); omap2_cm_write_mod_reg(cm_context.pll_cm_clksel4, PLL_MOD, OMAP3430ES2_CM_CLKSEL4); omap2_cm_write_mod_reg(cm_context.pll_cm_clksel5, PLL_MOD, OMAP3430ES2_CM_CLKSEL5); omap2_cm_write_mod_reg(cm_context.pll_cm_clken2, PLL_MOD, OMAP3430ES2_CM_CLKEN2); __raw_writel(cm_context.cm_polctrl, OMAP3430_CM_POLCTRL); omap2_cm_write_mod_reg(cm_context.iva2_cm_fclken, OMAP3430_IVA2_MOD, CM_FCLKEN); omap2_cm_write_mod_reg(cm_context.iva2_cm_clken_pll, OMAP3430_IVA2_MOD, OMAP3430_CM_CLKEN_PLL); omap2_cm_write_mod_reg(cm_context.core_cm_fclken1, CORE_MOD, CM_FCLKEN1); omap2_cm_write_mod_reg(cm_context.core_cm_fclken3, CORE_MOD, OMAP3430ES2_CM_FCLKEN3); omap2_cm_write_mod_reg(cm_context.sgx_cm_fclken, OMAP3430ES2_SGX_MOD, CM_FCLKEN); omap2_cm_write_mod_reg(cm_context.wkup_cm_fclken, WKUP_MOD, CM_FCLKEN); omap2_cm_write_mod_reg(cm_context.dss_cm_fclken, OMAP3430_DSS_MOD, CM_FCLKEN); omap2_cm_write_mod_reg(cm_context.cam_cm_fclken, OMAP3430_CAM_MOD, CM_FCLKEN); omap2_cm_write_mod_reg(cm_context.per_cm_fclken, OMAP3430_PER_MOD, CM_FCLKEN); omap2_cm_write_mod_reg(cm_context.usbhost_cm_fclken, OMAP3430ES2_USBHOST_MOD, CM_FCLKEN); omap2_cm_write_mod_reg(cm_context.core_cm_iclken1, CORE_MOD, CM_ICLKEN1); omap2_cm_write_mod_reg(cm_context.core_cm_iclken2, CORE_MOD, CM_ICLKEN2); omap2_cm_write_mod_reg(cm_context.core_cm_iclken3, CORE_MOD, CM_ICLKEN3); omap2_cm_write_mod_reg(cm_context.sgx_cm_iclken, OMAP3430ES2_SGX_MOD, CM_ICLKEN); omap2_cm_write_mod_reg(cm_context.wkup_cm_iclken, WKUP_MOD, CM_ICLKEN); omap2_cm_write_mod_reg(cm_context.dss_cm_iclken, OMAP3430_DSS_MOD, CM_ICLKEN); omap2_cm_write_mod_reg(cm_context.cam_cm_iclken, OMAP3430_CAM_MOD, CM_ICLKEN); omap2_cm_write_mod_reg(cm_context.per_cm_iclken, OMAP3430_PER_MOD, CM_ICLKEN); omap2_cm_write_mod_reg(cm_context.usbhost_cm_iclken, OMAP3430ES2_USBHOST_MOD, CM_ICLKEN); omap2_cm_write_mod_reg(cm_context.iva2_cm_autoidle2, OMAP3430_IVA2_MOD, CM_AUTOIDLE2); omap2_cm_write_mod_reg(cm_context.mpu_cm_autoidle2, MPU_MOD, CM_AUTOIDLE2); omap2_cm_write_mod_reg(cm_context.iva2_cm_clkstctrl, OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL); omap2_cm_write_mod_reg(cm_context.mpu_cm_clkstctrl, MPU_MOD, OMAP2_CM_CLKSTCTRL); omap2_cm_write_mod_reg(cm_context.core_cm_clkstctrl, CORE_MOD, OMAP2_CM_CLKSTCTRL); omap2_cm_write_mod_reg(cm_context.sgx_cm_clkstctrl, OMAP3430ES2_SGX_MOD, OMAP2_CM_CLKSTCTRL); omap2_cm_write_mod_reg(cm_context.dss_cm_clkstctrl, OMAP3430_DSS_MOD, OMAP2_CM_CLKSTCTRL); omap2_cm_write_mod_reg(cm_context.cam_cm_clkstctrl, OMAP3430_CAM_MOD, OMAP2_CM_CLKSTCTRL); omap2_cm_write_mod_reg(cm_context.per_cm_clkstctrl, OMAP3430_PER_MOD, OMAP2_CM_CLKSTCTRL); omap2_cm_write_mod_reg(cm_context.neon_cm_clkstctrl, OMAP3430_NEON_MOD, OMAP2_CM_CLKSTCTRL); omap2_cm_write_mod_reg(cm_context.usbhost_cm_clkstctrl, OMAP3430ES2_USBHOST_MOD, OMAP2_CM_CLKSTCTRL); omap2_cm_write_mod_reg(cm_context.core_cm_autoidle1, CORE_MOD, CM_AUTOIDLE1); omap2_cm_write_mod_reg(cm_context.core_cm_autoidle2, CORE_MOD, CM_AUTOIDLE2); omap2_cm_write_mod_reg(cm_context.core_cm_autoidle3, CORE_MOD, CM_AUTOIDLE3); omap2_cm_write_mod_reg(cm_context.wkup_cm_autoidle, WKUP_MOD, CM_AUTOIDLE); omap2_cm_write_mod_reg(cm_context.dss_cm_autoidle, OMAP3430_DSS_MOD, CM_AUTOIDLE); omap2_cm_write_mod_reg(cm_context.cam_cm_autoidle, OMAP3430_CAM_MOD, CM_AUTOIDLE); omap2_cm_write_mod_reg(cm_context.per_cm_autoidle, OMAP3430_PER_MOD, CM_AUTOIDLE); omap2_cm_write_mod_reg(cm_context.usbhost_cm_autoidle, OMAP3430ES2_USBHOST_MOD, CM_AUTOIDLE); omap2_cm_write_mod_reg(cm_context.sgx_cm_sleepdep, OMAP3430ES2_SGX_MOD, OMAP3430_CM_SLEEPDEP); omap2_cm_write_mod_reg(cm_context.dss_cm_sleepdep, OMAP3430_DSS_MOD, OMAP3430_CM_SLEEPDEP); omap2_cm_write_mod_reg(cm_context.cam_cm_sleepdep, OMAP3430_CAM_MOD, OMAP3430_CM_SLEEPDEP); omap2_cm_write_mod_reg(cm_context.per_cm_sleepdep, OMAP3430_PER_MOD, OMAP3430_CM_SLEEPDEP); omap2_cm_write_mod_reg(cm_context.usbhost_cm_sleepdep, OMAP3430ES2_USBHOST_MOD, OMAP3430_CM_SLEEPDEP); omap2_cm_write_mod_reg(cm_context.cm_clkout_ctrl, OMAP3430_CCR_MOD, OMAP3_CM_CLKOUT_CTRL_OFFSET); } /* * */ static struct cm_ll_data omap3xxx_cm_ll_data = { .split_idlest_reg = &omap3xxx_cm_split_idlest_reg, .wait_module_ready = &omap3xxx_cm_wait_module_ready, }; int __init omap3xxx_cm_init(void) { if (!cpu_is_omap34xx()) return 0; return cm_register(&omap3xxx_cm_ll_data); } static void __exit omap3xxx_cm_exit(void) { if (!cpu_is_omap34xx()) return; /* Should never happen */ WARN(cm_unregister(&omap3xxx_cm_ll_data), "%s: cm_ll_data function pointer mismatch\n", __func__); } __exitcall(omap3xxx_cm_exit);
gpl-2.0
vartanjean/yuandao-n90-window-dual-core-2
drivers/media/video/gspca/zc3xx.c
2769
260234
/* * Z-Star/Vimicro zc301/zc302p/vc30x library * * Copyright (C) 2009-2011 Jean-Francois Moine <http://moinejf.free.fr> * Copyright (C) 2004 2005 2006 Michel Xhaard mxhaard@magic.fr * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define MODULE_NAME "zc3xx" #include <linux/input.h> #include "gspca.h" #include "jpeg.h" MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>, " "Serge A. Suchkov <Serge.A.S@tochka.ru>"); MODULE_DESCRIPTION("GSPCA ZC03xx/VC3xx USB Camera Driver"); MODULE_LICENSE("GPL"); static int force_sensor = -1; #define QUANT_VAL 1 /* quantization table */ #include "zc3xx-reg.h" /* controls */ enum e_ctrl { BRIGHTNESS, CONTRAST, EXPOSURE, GAMMA, AUTOGAIN, LIGHTFREQ, SHARPNESS, NCTRLS /* number of controls */ }; #define AUTOGAIN_DEF 1 /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ struct gspca_ctrl ctrls[NCTRLS]; u8 quality; /* image quality */ #define QUALITY_MIN 50 #define QUALITY_MAX 80 #define QUALITY_DEF 70 u8 bridge; u8 sensor; /* Type of image sensor chip */ u16 chip_revision; u8 jpeg_hdr[JPEG_HDR_SZ]; }; enum bridges { BRIDGE_ZC301, BRIDGE_ZC303, }; enum sensors { SENSOR_ADCM2700, SENSOR_CS2102, SENSOR_CS2102K, SENSOR_GC0303, SENSOR_GC0305, SENSOR_HDCS2020, SENSOR_HV7131B, SENSOR_HV7131R, SENSOR_ICM105A, SENSOR_MC501CB, SENSOR_MT9V111_1, /* (mi360soc) zc301 */ SENSOR_MT9V111_3, /* (mi360soc) zc303 */ SENSOR_OV7620, /* OV7648 - same values */ SENSOR_OV7630C, SENSOR_PAS106, SENSOR_PAS202B, SENSOR_PB0330, SENSOR_PO2030, SENSOR_TAS5130C, SENSOR_MAX }; /* V4L2 controls supported by the driver */ static void setcontrast(struct gspca_dev *gspca_dev); static void setexposure(struct gspca_dev *gspca_dev); static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val); static void setlightfreq(struct gspca_dev *gspca_dev); static void setsharpness(struct gspca_dev *gspca_dev); static const struct ctrl sd_ctrls[NCTRLS] = { [BRIGHTNESS] = { { .id = V4L2_CID_BRIGHTNESS, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Brightness", .minimum = 0, .maximum = 255, .step = 1, .default_value = 128, }, .set_control = setcontrast }, [CONTRAST] = { { .id = V4L2_CID_CONTRAST, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Contrast", .minimum = 0, .maximum = 255, .step = 1, .default_value = 128, }, .set_control = setcontrast }, [EXPOSURE] = { { .id = V4L2_CID_EXPOSURE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Exposure", .minimum = 0x30d, .maximum = 0x493e, .step = 1, .default_value = 0x927 }, .set_control = setexposure }, [GAMMA] = { { .id = V4L2_CID_GAMMA, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Gamma", .minimum = 1, .maximum = 6, .step = 1, .default_value = 4, }, .set_control = setcontrast }, [AUTOGAIN] = { { .id = V4L2_CID_AUTOGAIN, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Auto Gain", .minimum = 0, .maximum = 1, .step = 1, .default_value = AUTOGAIN_DEF, .flags = V4L2_CTRL_FLAG_UPDATE }, .set = sd_setautogain }, [LIGHTFREQ] = { { .id = V4L2_CID_POWER_LINE_FREQUENCY, .type = V4L2_CTRL_TYPE_MENU, .name = "Light frequency filter", .minimum = 0, .maximum = 2, /* 0: 0, 1: 50Hz, 2:60Hz */ .step = 1, .default_value = 0, }, .set_control = setlightfreq }, [SHARPNESS] = { { .id = V4L2_CID_SHARPNESS, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Sharpness", .minimum = 0, .maximum = 3, .step = 1, .default_value = 2, }, .set_control = setsharpness }, }; static const struct v4l2_pix_format vga_mode[] = { {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 1}, {640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 0}, }; static const struct v4l2_pix_format broken_vga_mode[] = { {320, 232, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 232 * 4 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 1}, {640, 472, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 472 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 0}, }; static const struct v4l2_pix_format sif_mode[] = { {176, 144, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 176, .sizeimage = 176 * 144 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 1}, {352, 288, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 352, .sizeimage = 352 * 288 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 0}, }; /* usb exchanges */ struct usb_action { u8 req; u8 val; u16 idx; }; static const struct usb_action adcm2700_Initial[] = { {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, /* 00,00,01,cc */ {0xa0, 0x04, ZC3XX_R002_CLOCKSELECT}, /* 00,02,04,cc */ {0xa0, 0x00, ZC3XX_R008_CLOCKSETTING}, /* 00,08,03,cc */ {0xa0, 0x0a, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,0a,cc */ {0xa0, 0xd3, ZC3XX_R08B_I2CDEVICEADDR}, /* 00,8b,d3,cc */ {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, /* 00,03,02,cc */ {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, /* 00,04,80,cc */ {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, /* 00,05,01,cc */ {0xa0, 0xd8, ZC3XX_R006_FRAMEHEIGHTLOW}, /* 00,06,d8,cc */ {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, /* 00,01,01,cc */ {0xa0, 0x03, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,03,cc */ {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,01,cc */ {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,05,cc */ {0xa0, 0x00, ZC3XX_R098_WINYSTARTLOW}, /* 00,98,00,cc */ {0xa0, 0x00, ZC3XX_R09A_WINXSTARTLOW}, /* 00,9a,00,cc */ {0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW}, /* 01,1a,00,cc */ {0xa0, 0x00, ZC3XX_R11C_FIRSTXLOW}, /* 01,1c,00,cc */ {0xa0, 0xde, ZC3XX_R09C_WINHEIGHTLOW}, /* 00,9c,de,cc */ {0xa0, 0x86, ZC3XX_R09E_WINWIDTHLOW}, /* 00,9e,86,cc */ {0xbb, 0x00, 0x0400}, /* 04,00,00,bb */ {0xdd, 0x00, 0x0010}, /* 00,00,10,dd */ {0xbb, 0x0f, 0x140f}, /* 14,0f,0f,bb */ {0xa0, 0xb7, ZC3XX_R101_SENSORCORRECTION}, /* 01,01,37,cc */ {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, /* 01,00,0d,cc */ {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, /* 01,89,06,cc */ {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, /* 01,c5,03,cc */ {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, /* 01,cb,13,cc */ {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, /* 02,50,08,cc */ {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, /* 03,01,08,cc */ {0xa0, 0x58, ZC3XX_R116_RGAIN}, /* 01,16,58,cc */ {0xa0, 0x5a, ZC3XX_R118_BGAIN}, /* 01,18,5a,cc */ {0xa0, 0x02, ZC3XX_R180_AUTOCORRECTENABLE}, /* 01,80,02,cc */ {0xa0, 0xd3, ZC3XX_R08B_I2CDEVICEADDR}, /* 00,8b,d3,cc */ {0xbb, 0x00, 0x0408}, /* 04,00,08,bb */ {0xdd, 0x00, 0x0200}, /* 00,02,00,dd */ {0xbb, 0x00, 0x0400}, /* 04,00,00,bb */ {0xdd, 0x00, 0x0010}, /* 00,00,10,dd */ {0xbb, 0x0f, 0x140f}, /* 14,0f,0f,bb */ {0xbb, 0xe0, 0x0c2e}, /* 0c,e0,2e,bb */ {0xbb, 0x01, 0x2000}, /* 20,01,00,bb */ {0xbb, 0x96, 0x2400}, /* 24,96,00,bb */ {0xbb, 0x06, 0x1006}, /* 10,06,06,bb */ {0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,01,cc */ {0xdd, 0x00, 0x0010}, /* 00,00,10,dd */ {0xaa, 0xfe, 0x0002}, /* 00,fe,02,aa */ {0xa0, 0x0a, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,0a,cc */ {0xdd, 0x00, 0x0010}, /* 00,00,10,dd */ {0xbb, 0x5f, 0x2090}, /* 20,5f,90,bb */ {0xbb, 0x01, 0x8000}, /* 80,01,00,bb */ {0xbb, 0x09, 0x8400}, /* 84,09,00,bb */ {0xbb, 0x86, 0x0002}, /* 00,86,02,bb */ {0xbb, 0xe6, 0x0401}, /* 04,e6,01,bb */ {0xbb, 0x86, 0x0802}, /* 08,86,02,bb */ {0xbb, 0xe6, 0x0c01}, /* 0c,e6,01,bb */ {0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,01,cc */ {0xdd, 0x00, 0x0010}, /* 00,00,10,dd */ {0xaa, 0xfe, 0x0000}, /* 00,fe,00,aa */ {0xa0, 0x0a, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,0a,cc */ {0xdd, 0x00, 0x0010}, /* 00,00,10,dd */ {0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,01,cc */ {0xaa, 0xfe, 0x0020}, /* 00,fe,20,aa */ /*mswin+*/ {0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, {0xaa, 0xfe, 0x0002}, {0xa0, 0x0a, ZC3XX_R010_CMOSSENSORSELECT}, {0xaa, 0xb4, 0xcd37}, {0xaa, 0xa4, 0x0004}, {0xaa, 0xa8, 0x0007}, {0xaa, 0xac, 0x0004}, /*mswin-*/ {0xa0, 0x0a, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,0a,cc */ {0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,01,cc */ {0xdd, 0x00, 0x0010}, /* 00,00,10,dd */ {0xaa, 0xfe, 0x0000}, /* 00,fe,00,aa */ {0xa0, 0x0a, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,0a,cc */ {0xdd, 0x00, 0x0010}, /* 00,00,10,dd */ {0xbb, 0x04, 0x0400}, /* 04,04,00,bb */ {0xdd, 0x00, 0x0100}, /* 00,01,00,dd */ {0xbb, 0x01, 0x0400}, /* 04,01,00,bb */ {0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,01,cc */ {0xaa, 0xfe, 0x0002}, /* 00,fe,02,aa */ {0xa0, 0x0a, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,0a,cc */ {0xbb, 0x41, 0x2803}, /* 28,41,03,bb */ {0xbb, 0x40, 0x2c03}, /* 2c,40,03,bb */ {0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,01,cc */ {0xaa, 0xfe, 0x0010}, /* 00,fe,10,aa */ {} }; static const struct usb_action adcm2700_InitialScale[] = { {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, /* 00,00,01,cc */ {0xa0, 0x10, ZC3XX_R002_CLOCKSELECT}, /* 00,02,10,cc */ {0xa0, 0x00, ZC3XX_R008_CLOCKSETTING}, /* 00,08,03,cc */ {0xa0, 0x0a, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,0a,cc */ {0xa0, 0xd3, ZC3XX_R08B_I2CDEVICEADDR}, /* 00,8b,d3,cc */ {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, /* 00,03,02,cc */ {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, /* 00,04,80,cc */ {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, /* 00,05,01,cc */ {0xa0, 0xd0, ZC3XX_R006_FRAMEHEIGHTLOW}, /* 00,06,d0,cc */ {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, /* 00,01,01,cc */ {0xa0, 0x03, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,03,cc */ {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,01,cc */ {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,05,cc */ {0xa0, 0x00, ZC3XX_R098_WINYSTARTLOW}, /* 00,98,00,cc */ {0xa0, 0x00, ZC3XX_R09A_WINXSTARTLOW}, /* 00,9a,00,cc */ {0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW}, /* 01,1a,00,cc */ {0xa0, 0x00, ZC3XX_R11C_FIRSTXLOW}, /* 01,1c,00,cc */ {0xa0, 0xd8, ZC3XX_R09C_WINHEIGHTLOW}, /* 00,9c,d8,cc */ {0xa0, 0x88, ZC3XX_R09E_WINWIDTHLOW}, /* 00,9e,88,cc */ {0xbb, 0x00, 0x0400}, /* 04,00,00,bb */ {0xdd, 0x00, 0x0010}, /* 00,00,10,dd */ {0xbb, 0x0f, 0x140f}, /* 14,0f,0f,bb */ {0xa0, 0xb7, ZC3XX_R101_SENSORCORRECTION}, /* 01,01,37,cc */ {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, /* 01,00,0d,cc */ {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, /* 01,89,06,cc */ {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, /* 01,c5,03,cc */ {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, /* 01,cb,13,cc */ {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, /* 02,50,08,cc */ {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, /* 03,01,08,cc */ {0xa0, 0x58, ZC3XX_R116_RGAIN}, /* 01,16,58,cc */ {0xa0, 0x5a, ZC3XX_R118_BGAIN}, /* 01,18,5a,cc */ {0xa0, 0x02, ZC3XX_R180_AUTOCORRECTENABLE}, /* 01,80,02,cc */ {0xa0, 0xd3, ZC3XX_R08B_I2CDEVICEADDR}, /* 00,8b,d3,cc */ {0xbb, 0x00, 0x0408}, /* 04,00,08,bb */ {0xdd, 0x00, 0x0200}, /* 00,02,00,dd */ {0xbb, 0x00, 0x0400}, /* 04,00,00,bb */ {0xdd, 0x00, 0x0050}, /* 00,00,50,dd */ {0xbb, 0x0f, 0x140f}, /* 14,0f,0f,bb */ {0xbb, 0xe0, 0x0c2e}, /* 0c,e0,2e,bb */ {0xbb, 0x01, 0x2000}, /* 20,01,00,bb */ {0xbb, 0x96, 0x2400}, /* 24,96,00,bb */ {0xbb, 0x06, 0x1006}, /* 10,06,06,bb */ {0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,01,cc */ {0xdd, 0x00, 0x0010}, /* 00,00,10,dd */ {0xaa, 0xfe, 0x0002}, /* 00,fe,02,aa */ {0xa0, 0x0a, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,0a,cc */ {0xdd, 0x00, 0x0010}, /* 00,00,10,dd */ {0xbb, 0x5f, 0x2090}, /* 20,5f,90,bb */ {0xbb, 0x01, 0x8000}, /* 80,01,00,bb */ {0xbb, 0x09, 0x8400}, /* 84,09,00,bb */ {0xbb, 0x86, 0x0002}, /* 00,88,02,bb */ {0xbb, 0xe6, 0x0401}, /* 04,e6,01,bb */ {0xbb, 0x86, 0x0802}, /* 08,88,02,bb */ {0xbb, 0xe6, 0x0c01}, /* 0c,e6,01,bb */ {0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,01,cc */ {0xdd, 0x00, 0x0010}, /* 00,00,10,dd */ {0xaa, 0xfe, 0x0000}, /* 00,fe,00,aa */ {0xa0, 0x0a, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,0a,cc */ {0xdd, 0x00, 0x0010}, /* 00,00,10,dd */ {0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,01,cc */ {0xaa, 0xfe, 0x0020}, /* 00,fe,20,aa */ /*******/ {0xa0, 0x0a, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,0a,cc */ {0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,01,cc */ {0xdd, 0x00, 0x0010}, /* 00,00,10,dd */ {0xaa, 0xfe, 0x0000}, /* 00,fe,00,aa */ {0xa0, 0x0a, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,0a,cc */ {0xdd, 0x00, 0x0010}, /* 00,00,10,dd */ {0xbb, 0x04, 0x0400}, /* 04,04,00,bb */ {0xdd, 0x00, 0x0100}, /* 00,01,00,dd */ {0xbb, 0x01, 0x0400}, /* 04,01,00,bb */ {0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,01,cc */ {0xaa, 0xfe, 0x0002}, /* 00,fe,02,aa */ {0xa0, 0x0a, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,0a,cc */ {0xbb, 0x41, 0x2803}, /* 28,41,03,bb */ {0xbb, 0x40, 0x2c03}, /* 2c,40,03,bb */ {0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,01,cc */ {0xaa, 0xfe, 0x0010}, /* 00,fe,10,aa */ {} }; static const struct usb_action adcm2700_50HZ[] = { {0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,01,cc */ {0xaa, 0xfe, 0x0002}, /* 00,fe,02,aa */ {0xa0, 0x0a, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,0a,cc */ {0xbb, 0x05, 0x8400}, /* 84,05,00,bb */ {0xbb, 0xd0, 0xb007}, /* b0,d0,07,bb */ {0xbb, 0xa0, 0xb80f}, /* b8,a0,0f,bb */ {0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,01,cc */ {0xaa, 0xfe, 0x0010}, /* 00,fe,10,aa */ {0xaa, 0x26, 0x00d0}, /* 00,26,d0,aa */ {0xaa, 0x28, 0x0002}, /* 00,28,02,aa */ {} }; static const struct usb_action adcm2700_60HZ[] = { {0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,01,cc */ {0xaa, 0xfe, 0x0002}, /* 00,fe,02,aa */ {0xa0, 0x0a, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,0a,cc */ {0xbb, 0x07, 0x8400}, /* 84,07,00,bb */ {0xbb, 0x82, 0xb006}, /* b0,82,06,bb */ {0xbb, 0x04, 0xb80d}, /* b8,04,0d,bb */ {0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,01,cc */ {0xaa, 0xfe, 0x0010}, /* 00,fe,10,aa */ {0xaa, 0x26, 0x0057}, /* 00,26,57,aa */ {0xaa, 0x28, 0x0002}, /* 00,28,02,aa */ {} }; static const struct usb_action adcm2700_NoFliker[] = { {0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,01,cc */ {0xaa, 0xfe, 0x0002}, /* 00,fe,02,aa */ {0xa0, 0x0a, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,0a,cc */ {0xbb, 0x07, 0x8400}, /* 84,07,00,bb */ {0xbb, 0x05, 0xb000}, /* b0,05,00,bb */ {0xbb, 0xa0, 0xb801}, /* b8,a0,01,bb */ {0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,01,cc */ {0xaa, 0xfe, 0x0010}, /* 00,fe,10,aa */ {} }; static const struct usb_action cs2102_InitialScale[] = { /* 320x240 */ {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, {0xa0, 0x10, ZC3XX_R002_CLOCKSELECT}, {0xa0, 0x00, ZC3XX_R010_CMOSSENSORSELECT}, {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, {0xa0, 0x20, ZC3XX_R080_HBLANKHIGH}, {0xa0, 0x21, ZC3XX_R081_HBLANKLOW}, {0xa0, 0x30, ZC3XX_R083_RGAINADDR}, {0xa0, 0x31, ZC3XX_R084_GGAINADDR}, {0xa0, 0x32, ZC3XX_R085_BGAINADDR}, {0xa0, 0x23, ZC3XX_R086_EXPTIMEHIGH}, {0xa0, 0x24, ZC3XX_R087_EXPTIMEMID}, {0xa0, 0x25, ZC3XX_R088_EXPTIMELOW}, {0xa0, 0xb3, ZC3XX_R08B_I2CDEVICEADDR}, {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, /* 00 */ {0xa0, 0x03, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, {0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW}, {0xa0, 0x00, ZC3XX_R098_WINYSTARTLOW}, {0xa0, 0x00, ZC3XX_R09A_WINXSTARTLOW}, {0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW}, {0xa0, 0x00, ZC3XX_R11C_FIRSTXLOW}, {0xaa, 0x02, 0x0008}, {0xaa, 0x03, 0x0000}, {0xaa, 0x11, 0x0000}, {0xaa, 0x12, 0x0089}, {0xaa, 0x13, 0x0000}, {0xaa, 0x14, 0x00e9}, {0xaa, 0x20, 0x0000}, {0xaa, 0x22, 0x0000}, {0xaa, 0x0b, 0x0004}, {0xaa, 0x30, 0x0030}, {0xaa, 0x31, 0x0030}, {0xaa, 0x32, 0x0030}, {0xa0, 0x37, ZC3XX_R101_SENSORCORRECTION}, {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, {0xa0, 0x10, 0x01ae}, {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, {0xa0, 0x68, ZC3XX_R18D_YTARGET}, {0xa0, 0x00, 0x01ad}, {} }; static const struct usb_action cs2102_Initial[] = { /* 640x480 */ {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, {0xa0, 0x00, ZC3XX_R002_CLOCKSELECT}, {0xa0, 0x00, ZC3XX_R010_CMOSSENSORSELECT}, {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, {0xa0, 0x20, ZC3XX_R080_HBLANKHIGH}, {0xa0, 0x21, ZC3XX_R081_HBLANKLOW}, {0xa0, 0x30, ZC3XX_R083_RGAINADDR}, {0xa0, 0x31, ZC3XX_R084_GGAINADDR}, {0xa0, 0x32, ZC3XX_R085_BGAINADDR}, {0xa0, 0x23, ZC3XX_R086_EXPTIMEHIGH}, {0xa0, 0x24, ZC3XX_R087_EXPTIMEMID}, {0xa0, 0x25, ZC3XX_R088_EXPTIMELOW}, {0xa0, 0xb3, ZC3XX_R08B_I2CDEVICEADDR}, {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, /* 00 */ {0xa0, 0x03, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, {0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW}, {0xa0, 0x00, ZC3XX_R098_WINYSTARTLOW}, {0xa0, 0x00, ZC3XX_R09A_WINXSTARTLOW}, {0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW}, {0xa0, 0x00, ZC3XX_R11C_FIRSTXLOW}, {0xaa, 0x02, 0x0008}, {0xaa, 0x03, 0x0000}, {0xaa, 0x11, 0x0001}, {0xaa, 0x12, 0x0087}, {0xaa, 0x13, 0x0001}, {0xaa, 0x14, 0x00e7}, {0xaa, 0x20, 0x0000}, {0xaa, 0x22, 0x0000}, {0xaa, 0x0b, 0x0004}, {0xaa, 0x30, 0x0030}, {0xaa, 0x31, 0x0030}, {0xaa, 0x32, 0x0030}, {0xa0, 0x77, ZC3XX_R101_SENSORCORRECTION}, {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, {0xa0, 0x15, 0x01ae}, {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, {0xa0, 0x68, ZC3XX_R18D_YTARGET}, {0xa0, 0x00, 0x01ad}, {} }; static const struct usb_action cs2102_50HZScale[] = { {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xaa, 0x23, 0x0001}, {0xaa, 0x24, 0x005f}, {0xaa, 0x25, 0x0090}, {0xaa, 0x21, 0x00dd}, {0xa0, 0x02, ZC3XX_R190_EXPOSURELIMITHIGH}, {0xa0, 0xbf, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0x20, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0x3a, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0x98, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x10, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0xdd, ZC3XX_R01D_HSYNC_0}, {0xa0, 0xe4, ZC3XX_R01E_HSYNC_1}, {0xa0, 0xf0, ZC3XX_R01F_HSYNC_2}, {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, {} }; static const struct usb_action cs2102_50HZ[] = { {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xaa, 0x23, 0x0000}, {0xaa, 0x24, 0x00af}, {0xaa, 0x25, 0x00c8}, {0xaa, 0x21, 0x0068}, {0xa0, 0x01, ZC3XX_R190_EXPOSURELIMITHIGH}, {0xa0, 0x5f, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0x90, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0x1d, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0x4c, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x10, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0x68, ZC3XX_R01D_HSYNC_0}, {0xa0, 0xe3, ZC3XX_R01E_HSYNC_1}, {0xa0, 0xf0, ZC3XX_R01F_HSYNC_2}, {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, {} }; static const struct usb_action cs2102_60HZScale[] = { {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xaa, 0x23, 0x0001}, {0xaa, 0x24, 0x0055}, {0xaa, 0x25, 0x00cc}, {0xaa, 0x21, 0x003f}, {0xa0, 0x02, ZC3XX_R190_EXPOSURELIMITHIGH}, {0xa0, 0xab, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0x98, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0x30, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0xd4, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x10, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0x39, ZC3XX_R01D_HSYNC_0}, {0xa0, 0x70, ZC3XX_R01E_HSYNC_1}, {0xa0, 0xb0, ZC3XX_R01F_HSYNC_2}, {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, {} }; static const struct usb_action cs2102_60HZ[] = { {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xaa, 0x23, 0x0000}, {0xaa, 0x24, 0x00aa}, {0xaa, 0x25, 0x00e6}, {0xaa, 0x21, 0x003f}, {0xa0, 0x01, ZC3XX_R190_EXPOSURELIMITHIGH}, {0xa0, 0x55, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0xcc, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0x18, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0x6a, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x10, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0x3f, ZC3XX_R01D_HSYNC_0}, {0xa0, 0xa5, ZC3XX_R01E_HSYNC_1}, {0xa0, 0xf0, ZC3XX_R01F_HSYNC_2}, {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, {} }; static const struct usb_action cs2102_NoFlikerScale[] = { {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xaa, 0x23, 0x0001}, {0xaa, 0x24, 0x005f}, {0xaa, 0x25, 0x0000}, {0xaa, 0x21, 0x0001}, {0xa0, 0x02, ZC3XX_R190_EXPOSURELIMITHIGH}, {0xa0, 0xbf, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0x00, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0x80, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x00, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x00, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0x01, ZC3XX_R01D_HSYNC_0}, {0xa0, 0x40, ZC3XX_R01E_HSYNC_1}, {0xa0, 0xa0, ZC3XX_R01F_HSYNC_2}, {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, {} }; static const struct usb_action cs2102_NoFliker[] = { {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xaa, 0x23, 0x0000}, {0xaa, 0x24, 0x00af}, {0xaa, 0x25, 0x0080}, {0xaa, 0x21, 0x0001}, {0xa0, 0x01, ZC3XX_R190_EXPOSURELIMITHIGH}, {0xa0, 0x5f, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0x80, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0x80, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x00, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x00, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0x01, ZC3XX_R01D_HSYNC_0}, {0xa0, 0x40, ZC3XX_R01E_HSYNC_1}, {0xa0, 0xa0, ZC3XX_R01F_HSYNC_2}, {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, {} }; /* CS2102_KOCOM */ static const struct usb_action cs2102K_InitialScale[] = { {0xa0, 0x11, ZC3XX_R002_CLOCKSELECT}, {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, {0xa0, 0x08, ZC3XX_R010_CMOSSENSORSELECT}, {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, {0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW}, {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, {0xa0, 0x03, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x00, ZC3XX_R098_WINYSTARTLOW}, {0xa0, 0x00, ZC3XX_R09A_WINXSTARTLOW}, {0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW}, {0xa0, 0x00, ZC3XX_R11C_FIRSTXLOW}, {0xa0, 0xe8, ZC3XX_R09C_WINHEIGHTLOW}, {0xa0, 0x88, ZC3XX_R09E_WINWIDTHLOW}, {0xa0, 0x55, ZC3XX_R08B_I2CDEVICEADDR}, {0xa0, 0x18, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x00, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x0a, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x02, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x0b, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x02, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x0c, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x7c, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x0d, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0xa3, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x03, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0xfb, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x05, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x00, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x06, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x03, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x09, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x08, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x0e, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x04, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x0f, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x18, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x10, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x18, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x11, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x18, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x12, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x18, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x15, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x00, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x16, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x0c, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x17, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x0c, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x18, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x04, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0xb7, ZC3XX_R101_SENSORCORRECTION}, {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x78, ZC3XX_R18D_YTARGET}, {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, {0xa0, 0x20, ZC3XX_R087_EXPTIMEMID}, {0xa0, 0x21, ZC3XX_R088_EXPTIMELOW}, {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, {0xa0, 0x00, 0x01ad}, {0xa0, 0x01, 0x01b1}, {0xa0, 0x02, ZC3XX_R180_AUTOCORRECTENABLE}, {0xa0, 0x60, ZC3XX_R116_RGAIN}, {0xa0, 0x40, ZC3XX_R117_GGAIN}, {0xa0, 0x4c, ZC3XX_R118_BGAIN}, {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, /* clock ? */ {0xa0, 0x08, ZC3XX_R1C6_SHARPNESS00}, /* sharpness+ */ {0xa0, 0x0f, ZC3XX_R1CB_SHARPNESS05}, /* sharpness- */ {0xa0, 0x13, ZC3XX_R120_GAMMA00}, /* gamma 4 */ {0xa0, 0x38, ZC3XX_R121_GAMMA01}, {0xa0, 0x59, ZC3XX_R122_GAMMA02}, {0xa0, 0x79, ZC3XX_R123_GAMMA03}, {0xa0, 0x92, ZC3XX_R124_GAMMA04}, {0xa0, 0xa7, ZC3XX_R125_GAMMA05}, {0xa0, 0xb9, ZC3XX_R126_GAMMA06}, {0xa0, 0xc8, ZC3XX_R127_GAMMA07}, {0xa0, 0xd4, ZC3XX_R128_GAMMA08}, {0xa0, 0xdf, ZC3XX_R129_GAMMA09}, {0xa0, 0xe7, ZC3XX_R12A_GAMMA0A}, {0xa0, 0xee, ZC3XX_R12B_GAMMA0B}, {0xa0, 0xf4, ZC3XX_R12C_GAMMA0C}, {0xa0, 0xf9, ZC3XX_R12D_GAMMA0D}, {0xa0, 0xfc, ZC3XX_R12E_GAMMA0E}, {0xa0, 0xff, ZC3XX_R12F_GAMMA0F}, {0xa0, 0x26, ZC3XX_R130_GAMMA10}, {0xa0, 0x22, ZC3XX_R131_GAMMA11}, {0xa0, 0x20, ZC3XX_R132_GAMMA12}, {0xa0, 0x1c, ZC3XX_R133_GAMMA13}, {0xa0, 0x16, ZC3XX_R134_GAMMA14}, {0xa0, 0x13, ZC3XX_R135_GAMMA15}, {0xa0, 0x10, ZC3XX_R136_GAMMA16}, {0xa0, 0x0d, ZC3XX_R137_GAMMA17}, {0xa0, 0x0b, ZC3XX_R138_GAMMA18}, {0xa0, 0x09, ZC3XX_R139_GAMMA19}, {0xa0, 0x07, ZC3XX_R13A_GAMMA1A}, {0xa0, 0x06, ZC3XX_R13B_GAMMA1B}, {0xa0, 0x05, ZC3XX_R13C_GAMMA1C}, {0xa0, 0x04, ZC3XX_R13D_GAMMA1D}, {0xa0, 0x03, ZC3XX_R13E_GAMMA1E}, {0xa0, 0x02, ZC3XX_R13F_GAMMA1F}, {0xa0, 0x58, ZC3XX_R10A_RGB00}, /* matrix */ {0xa0, 0xf4, ZC3XX_R10B_RGB01}, {0xa0, 0xf4, ZC3XX_R10C_RGB02}, {0xa0, 0xf4, ZC3XX_R10D_RGB10}, {0xa0, 0x58, ZC3XX_R10E_RGB11}, {0xa0, 0xf4, ZC3XX_R10F_RGB12}, {0xa0, 0xf4, ZC3XX_R110_RGB20}, {0xa0, 0xf4, ZC3XX_R111_RGB21}, {0xa0, 0x58, ZC3XX_R112_RGB22}, {0xa0, 0x00, ZC3XX_R180_AUTOCORRECTENABLE}, {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xa0, 0x18, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x00, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x13, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x22, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x14, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x01, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x20, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x01, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x21, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x22, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x18, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x04, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x01, ZC3XX_R0A3_EXPOSURETIMEHIGH}, {0xa0, 0x22, ZC3XX_R0A4_EXPOSURETIMELOW}, {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, {0xa0, 0x07, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0xee, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0x3a, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x0c, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x28, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0x04, ZC3XX_R01D_HSYNC_0}, {0xa0, 0x0f, ZC3XX_R01E_HSYNC_1}, {0xa0, 0x19, ZC3XX_R01F_HSYNC_2}, {0xa0, 0x1f, ZC3XX_R020_HSYNC_3}, {0xa0, 0x60, ZC3XX_R11D_GLOBALGAIN}, {0xa0, 0x60, ZC3XX_R11D_GLOBALGAIN}, {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, {0xa0, 0x60, ZC3XX_R116_RGAIN}, {0xa0, 0x40, ZC3XX_R117_GGAIN}, {0xa0, 0x4c, ZC3XX_R118_BGAIN}, {0xa0, 0x04, ZC3XX_R1A7_CALCGLOBALMEAN}, {0xa0, 0x20, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x01, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x21, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x5c, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x18, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x00, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x13, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x5c, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x14, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x01, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x18, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x04, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x00, ZC3XX_R1A7_CALCGLOBALMEAN}, {0xa0, 0x04, ZC3XX_R1A7_CALCGLOBALMEAN}, {0xa0, 0x00, ZC3XX_R1A7_CALCGLOBALMEAN}, {0xa0, 0x04, ZC3XX_R1A7_CALCGLOBALMEAN}, {0xa0, 0x20, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x01, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x21, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x96, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x18, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x00, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x13, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x96, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x14, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x01, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x18, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x04, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x00, ZC3XX_R1A7_CALCGLOBALMEAN}, {0xa0, 0x04, ZC3XX_R1A7_CALCGLOBALMEAN}, {0xa0, 0x00, ZC3XX_R1A7_CALCGLOBALMEAN}, {} }; static const struct usb_action cs2102K_Initial[] = { {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, {0xa0, 0x00, ZC3XX_R002_CLOCKSELECT}, {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, {0xa0, 0x08, ZC3XX_R010_CMOSSENSORSELECT}, {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, {0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW}, {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, {0xa0, 0x03, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x00, ZC3XX_R098_WINYSTARTLOW}, {0xa0, 0x00, ZC3XX_R09A_WINXSTARTLOW}, {0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW}, {0xa0, 0x00, ZC3XX_R11C_FIRSTXLOW}, {0xa0, 0xe8, ZC3XX_R09C_WINHEIGHTLOW}, {0xa0, 0x88, ZC3XX_R09E_WINWIDTHLOW}, /*fixme: next sequence = i2c exchanges*/ {0xa0, 0x55, ZC3XX_R08B_I2CDEVICEADDR}, {0xa0, 0x18, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x00, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x0a, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x02, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x0b, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x02, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x0c, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x7b, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x0d, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0xa3, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x03, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0xfb, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x05, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x00, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x06, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x03, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x09, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x08, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x0e, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x04, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x0f, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x18, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x10, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x18, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x11, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x18, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x12, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x18, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x15, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x00, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x16, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x0c, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x17, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x0c, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x18, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x04, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0xf7, ZC3XX_R101_SENSORCORRECTION}, {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x78, ZC3XX_R18D_YTARGET}, {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, {0xa0, 0x20, ZC3XX_R087_EXPTIMEMID}, {0xa0, 0x21, ZC3XX_R088_EXPTIMELOW}, {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, {0xa0, 0x00, 0x01ad}, {0xa0, 0x01, 0x01b1}, {0xa0, 0x02, ZC3XX_R180_AUTOCORRECTENABLE}, {0xa0, 0x60, ZC3XX_R116_RGAIN}, {0xa0, 0x40, ZC3XX_R117_GGAIN}, {0xa0, 0x4c, ZC3XX_R118_BGAIN}, {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, /* clock ? */ {0xa0, 0x08, ZC3XX_R1C6_SHARPNESS00}, /* sharpness+ */ {0xa0, 0x0f, ZC3XX_R1CB_SHARPNESS05}, /* sharpness- */ {0xa0, 0x13, ZC3XX_R120_GAMMA00}, /* gamma 4 */ {0xa0, 0x38, ZC3XX_R121_GAMMA01}, {0xa0, 0x59, ZC3XX_R122_GAMMA02}, {0xa0, 0x79, ZC3XX_R123_GAMMA03}, {0xa0, 0x92, ZC3XX_R124_GAMMA04}, {0xa0, 0xa7, ZC3XX_R125_GAMMA05}, {0xa0, 0xb9, ZC3XX_R126_GAMMA06}, {0xa0, 0xc8, ZC3XX_R127_GAMMA07}, {0xa0, 0xd4, ZC3XX_R128_GAMMA08}, {0xa0, 0xdf, ZC3XX_R129_GAMMA09}, {0xa0, 0xe7, ZC3XX_R12A_GAMMA0A}, {0xa0, 0xee, ZC3XX_R12B_GAMMA0B}, {0xa0, 0xf4, ZC3XX_R12C_GAMMA0C}, {0xa0, 0xf9, ZC3XX_R12D_GAMMA0D}, {0xa0, 0xfc, ZC3XX_R12E_GAMMA0E}, {0xa0, 0xff, ZC3XX_R12F_GAMMA0F}, {0xa0, 0x26, ZC3XX_R130_GAMMA10}, {0xa0, 0x22, ZC3XX_R131_GAMMA11}, {0xa0, 0x20, ZC3XX_R132_GAMMA12}, {0xa0, 0x1c, ZC3XX_R133_GAMMA13}, {0xa0, 0x16, ZC3XX_R134_GAMMA14}, {0xa0, 0x13, ZC3XX_R135_GAMMA15}, {0xa0, 0x10, ZC3XX_R136_GAMMA16}, {0xa0, 0x0d, ZC3XX_R137_GAMMA17}, {0xa0, 0x0b, ZC3XX_R138_GAMMA18}, {0xa0, 0x09, ZC3XX_R139_GAMMA19}, {0xa0, 0x07, ZC3XX_R13A_GAMMA1A}, {0xa0, 0x06, ZC3XX_R13B_GAMMA1B}, {0xa0, 0x05, ZC3XX_R13C_GAMMA1C}, {0xa0, 0x04, ZC3XX_R13D_GAMMA1D}, {0xa0, 0x03, ZC3XX_R13E_GAMMA1E}, {0xa0, 0x02, ZC3XX_R13F_GAMMA1F}, {0xa0, 0x58, ZC3XX_R10A_RGB00}, /* matrix */ {0xa0, 0xf4, ZC3XX_R10B_RGB01}, {0xa0, 0xf4, ZC3XX_R10C_RGB02}, {0xa0, 0xf4, ZC3XX_R10D_RGB10}, {0xa0, 0x58, ZC3XX_R10E_RGB11}, {0xa0, 0xf4, ZC3XX_R10F_RGB12}, {0xa0, 0xf4, ZC3XX_R110_RGB20}, {0xa0, 0xf4, ZC3XX_R111_RGB21}, {0xa0, 0x58, ZC3XX_R112_RGB22}, {0xa0, 0x00, ZC3XX_R180_AUTOCORRECTENABLE}, {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xa0, 0x18, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x00, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x13, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x22, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x14, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x01, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x20, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x01, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x21, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x22, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x18, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x04, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x01, ZC3XX_R0A3_EXPOSURETIMEHIGH}, {0xa0, 0x22, ZC3XX_R0A4_EXPOSURETIMELOW}, {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, {0xa0, 0x07, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0xee, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0x3a, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x0c, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x28, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0x04, ZC3XX_R01D_HSYNC_0}, {0xa0, 0x0f, ZC3XX_R01E_HSYNC_1}, {0xa0, 0x19, ZC3XX_R01F_HSYNC_2}, {0xa0, 0x1f, ZC3XX_R020_HSYNC_3}, {0xa0, 0x60, ZC3XX_R11D_GLOBALGAIN}, {0xa0, 0x60, ZC3XX_R11D_GLOBALGAIN}, {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, {0xa0, 0x60, ZC3XX_R116_RGAIN}, {0xa0, 0x40, ZC3XX_R117_GGAIN}, {0xa0, 0x4c, ZC3XX_R118_BGAIN}, {0xa0, 0x04, ZC3XX_R1A7_CALCGLOBALMEAN}, {0xa0, 0x20, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x01, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x21, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x5c, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x18, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x00, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x13, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x5c, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x14, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x01, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x18, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x04, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x00, ZC3XX_R1A7_CALCGLOBALMEAN}, {0xa0, 0x04, ZC3XX_R1A7_CALCGLOBALMEAN}, {0xa0, 0x00, ZC3XX_R1A7_CALCGLOBALMEAN}, {0xa0, 0x04, ZC3XX_R1A7_CALCGLOBALMEAN}, {0xa0, 0x20, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x01, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x21, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x96, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x18, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x00, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x13, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x96, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x14, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x01, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x18, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x04, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x00, ZC3XX_R1A7_CALCGLOBALMEAN}, {0xa0, 0x04, ZC3XX_R1A7_CALCGLOBALMEAN}, {0xa0, 0x00, ZC3XX_R1A7_CALCGLOBALMEAN}, /*fixme:what does the next sequence?*/ {0xa0, 0x04, ZC3XX_R1A7_CALCGLOBALMEAN}, {0xa0, 0x00, ZC3XX_R1A7_CALCGLOBALMEAN}, {0xa0, 0x04, ZC3XX_R1A7_CALCGLOBALMEAN}, {0xa0, 0x20, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x01, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x21, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0xd0, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x18, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x00, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x13, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0xd0, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x14, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x01, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x18, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x04, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x00, ZC3XX_R1A7_CALCGLOBALMEAN}, {0xa0, 0x02, ZC3XX_R008_CLOCKSETTING}, {0xa0, 0x04, ZC3XX_R1A7_CALCGLOBALMEAN}, {0xa0, 0x00, ZC3XX_R1A7_CALCGLOBALMEAN}, {0xa0, 0x04, ZC3XX_R1A7_CALCGLOBALMEAN}, {0xa0, 0x20, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x02, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x21, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x0a, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x18, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x00, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x13, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x0a, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x14, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x02, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x18, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x04, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x00, ZC3XX_R1A7_CALCGLOBALMEAN}, {0xa0, 0x04, ZC3XX_R1A7_CALCGLOBALMEAN}, {0xa0, 0x00, ZC3XX_R1A7_CALCGLOBALMEAN}, {0xa0, 0x04, ZC3XX_R1A7_CALCGLOBALMEAN}, {0xa0, 0x20, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x02, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x21, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x44, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x18, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x00, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x13, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x44, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x14, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x02, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x18, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x04, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x00, ZC3XX_R1A7_CALCGLOBALMEAN}, {0xa0, 0x04, ZC3XX_R1A7_CALCGLOBALMEAN}, {0xa0, 0x00, ZC3XX_R1A7_CALCGLOBALMEAN}, {0xa0, 0x04, ZC3XX_R1A7_CALCGLOBALMEAN}, {0xa0, 0x20, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x02, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x21, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x7e, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x18, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x00, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x13, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x7e, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x14, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x02, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x18, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x04, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa0, 0x00, ZC3XX_R1A7_CALCGLOBALMEAN}, {0xa0, 0x04, ZC3XX_R1A7_CALCGLOBALMEAN}, {0xa0, 0x00, ZC3XX_R1A7_CALCGLOBALMEAN}, {0xa0, 0x04, ZC3XX_R1A7_CALCGLOBALMEAN}, {} }; static const struct usb_action gc0305_Initial[] = { /* 640x480 */ {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, /* 00,00,01,cc */ {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, /* 00,08,03,cc */ {0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,01,cc */ {0xa0, 0x04, ZC3XX_R002_CLOCKSELECT}, /* 00,02,04,cc */ {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, /* 00,03,02,cc */ {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, /* 00,04,80,cc */ {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, /* 00,05,01,cc */ {0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW}, /* 00,06,e0,cc */ {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, /* 00,01,01,cc */ {0xa0, 0x03, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,03,cc */ {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,01,cc */ {0xa0, 0x00, ZC3XX_R098_WINYSTARTLOW}, /* 00,98,00,cc */ {0xa0, 0x00, ZC3XX_R09A_WINXSTARTLOW}, /* 00,9a,00,cc */ {0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW}, /* 01,1a,00,cc */ {0xa0, 0x00, ZC3XX_R11C_FIRSTXLOW}, /* 01,1c,00,cc */ {0xa0, 0xe6, ZC3XX_R09C_WINHEIGHTLOW}, /* 00,9c,e6,cc */ {0xa0, 0x86, ZC3XX_R09E_WINWIDTHLOW}, /* 00,9e,86,cc */ {0xa0, 0x98, ZC3XX_R08B_I2CDEVICEADDR}, /* 00,8b,98,cc */ {0xaa, 0x13, 0x0002}, /* 00,13,02,aa */ {0xaa, 0x15, 0x0003}, /* 00,15,03,aa */ {0xaa, 0x01, 0x0000}, /* 00,01,00,aa */ {0xaa, 0x02, 0x0000}, /* 00,02,00,aa */ {0xaa, 0x1a, 0x0000}, /* 00,1a,00,aa */ {0xaa, 0x1c, 0x0017}, /* 00,1c,17,aa */ {0xaa, 0x1d, 0x0080}, /* 00,1d,80,aa */ {0xaa, 0x1f, 0x0008}, /* 00,1f,08,aa */ {0xaa, 0x21, 0x0012}, /* 00,21,12,aa */ {0xa0, 0x82, ZC3XX_R086_EXPTIMEHIGH}, /* 00,86,82,cc */ {0xa0, 0x83, ZC3XX_R087_EXPTIMEMID}, /* 00,87,83,cc */ {0xa0, 0x84, ZC3XX_R088_EXPTIMELOW}, /* 00,88,84,cc */ {0xaa, 0x05, 0x0000}, /* 00,05,00,aa */ {0xaa, 0x0a, 0x0000}, /* 00,0a,00,aa */ {0xaa, 0x0b, 0x00b0}, /* 00,0b,b0,aa */ {0xaa, 0x0c, 0x0000}, /* 00,0c,00,aa */ {0xaa, 0x0d, 0x00b0}, /* 00,0d,b0,aa */ {0xaa, 0x0e, 0x0000}, /* 00,0e,00,aa */ {0xaa, 0x0f, 0x00b0}, /* 00,0f,b0,aa */ {0xaa, 0x10, 0x0000}, /* 00,10,00,aa */ {0xaa, 0x11, 0x00b0}, /* 00,11,b0,aa */ {0xaa, 0x16, 0x0001}, /* 00,16,01,aa */ {0xaa, 0x17, 0x00e6}, /* 00,17,e6,aa */ {0xaa, 0x18, 0x0002}, /* 00,18,02,aa */ {0xaa, 0x19, 0x0086}, /* 00,19,86,aa */ {0xaa, 0x20, 0x0000}, /* 00,20,00,aa */ {0xaa, 0x1b, 0x0020}, /* 00,1b,20,aa */ {0xa0, 0xb7, ZC3XX_R101_SENSORCORRECTION}, /* 01,01,b7,cc */ {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,05,cc */ {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, /* 01,00,0d,cc */ {0xa0, 0x76, ZC3XX_R189_AWBSTATUS}, /* 01,89,76,cc */ {0xa0, 0x09, 0x01ad}, /* 01,ad,09,cc */ {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, /* 01,c5,03,cc */ {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, /* 01,cb,13,cc */ {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, /* 02,50,08,cc */ {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, /* 03,01,08,cc */ {0xa0, 0x60, ZC3XX_R1A8_DIGITALGAIN}, /* 01,a8,60,cc */ {0xa0, 0x85, ZC3XX_R18D_YTARGET}, /* 01,8d,85,cc */ {0xa0, 0x00, 0x011e}, /* 01,1e,00,cc */ {0xa0, 0x52, ZC3XX_R116_RGAIN}, /* 01,16,52,cc */ {0xa0, 0x40, ZC3XX_R117_GGAIN}, /* 01,17,40,cc */ {0xa0, 0x52, ZC3XX_R118_BGAIN}, /* 01,18,52,cc */ {0xa0, 0x03, ZC3XX_R113_RGB03}, /* 01,13,03,cc */ {} }; static const struct usb_action gc0305_InitialScale[] = { /* 320x240 */ {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, /* 00,00,01,cc */ {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, /* 00,08,03,cc */ {0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,01,cc */ {0xa0, 0x10, ZC3XX_R002_CLOCKSELECT}, /* 00,02,10,cc */ {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, /* 00,03,02,cc */ {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, /* 00,04,80,cc */ {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, /* 00,05,01,cc */ {0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW}, /* 00,06,e0,cc */ {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, /* 00,01,01,cc */ {0xa0, 0x03, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,03,cc */ {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,01,cc */ {0xa0, 0x00, ZC3XX_R098_WINYSTARTLOW}, /* 00,98,00,cc */ {0xa0, 0x00, ZC3XX_R09A_WINXSTARTLOW}, /* 00,9a,00,cc */ {0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW}, /* 01,1a,00,cc */ {0xa0, 0x00, ZC3XX_R11C_FIRSTXLOW}, /* 01,1c,00,cc */ {0xa0, 0xe8, ZC3XX_R09C_WINHEIGHTLOW}, /* 00,9c,e8,cc */ {0xa0, 0x88, ZC3XX_R09E_WINWIDTHLOW}, /* 00,9e,88,cc */ {0xa0, 0x98, ZC3XX_R08B_I2CDEVICEADDR}, /* 00,8b,98,cc */ {0xaa, 0x13, 0x0000}, /* 00,13,00,aa */ {0xaa, 0x15, 0x0001}, /* 00,15,01,aa */ {0xaa, 0x01, 0x0000}, /* 00,01,00,aa */ {0xaa, 0x02, 0x0000}, /* 00,02,00,aa */ {0xaa, 0x1a, 0x0000}, /* 00,1a,00,aa */ {0xaa, 0x1c, 0x0017}, /* 00,1c,17,aa */ {0xaa, 0x1d, 0x0080}, /* 00,1d,80,aa */ {0xaa, 0x1f, 0x0008}, /* 00,1f,08,aa */ {0xaa, 0x21, 0x0012}, /* 00,21,12,aa */ {0xa0, 0x82, ZC3XX_R086_EXPTIMEHIGH}, /* 00,86,82,cc */ {0xa0, 0x83, ZC3XX_R087_EXPTIMEMID}, /* 00,87,83,cc */ {0xa0, 0x84, ZC3XX_R088_EXPTIMELOW}, /* 00,88,84,cc */ {0xaa, 0x05, 0x0000}, /* 00,05,00,aa */ {0xaa, 0x0a, 0x0000}, /* 00,0a,00,aa */ {0xaa, 0x0b, 0x00b0}, /* 00,0b,b0,aa */ {0xaa, 0x0c, 0x0000}, /* 00,0c,00,aa */ {0xaa, 0x0d, 0x00b0}, /* 00,0d,b0,aa */ {0xaa, 0x0e, 0x0000}, /* 00,0e,00,aa */ {0xaa, 0x0f, 0x00b0}, /* 00,0f,b0,aa */ {0xaa, 0x10, 0x0000}, /* 00,10,00,aa */ {0xaa, 0x11, 0x00b0}, /* 00,11,b0,aa */ {0xaa, 0x16, 0x0001}, /* 00,16,01,aa */ {0xaa, 0x17, 0x00e8}, /* 00,17,e8,aa */ {0xaa, 0x18, 0x0002}, /* 00,18,02,aa */ {0xaa, 0x19, 0x0088}, /* 00,19,88,aa */ {0xaa, 0x20, 0x0000}, /* 00,20,00,aa */ {0xaa, 0x1b, 0x0020}, /* 00,1b,20,aa */ {0xa0, 0xb7, ZC3XX_R101_SENSORCORRECTION}, /* 01,01,b7,cc */ {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,05,cc */ {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, /* 01,00,0d,cc */ {0xa0, 0x76, ZC3XX_R189_AWBSTATUS}, /* 01,89,76,cc */ {0xa0, 0x09, 0x01ad}, /* 01,ad,09,cc */ {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, /* 01,c5,03,cc */ {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, /* 01,cb,13,cc */ {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, /* 02,50,08,cc */ {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, /* 03,01,08,cc */ {0xa0, 0x60, ZC3XX_R1A8_DIGITALGAIN}, /* 01,a8,60,cc */ {0xa0, 0x00, 0x011e}, /* 01,1e,00,cc */ {0xa0, 0x52, ZC3XX_R116_RGAIN}, /* 01,16,52,cc */ {0xa0, 0x40, ZC3XX_R117_GGAIN}, /* 01,17,40,cc */ {0xa0, 0x52, ZC3XX_R118_BGAIN}, /* 01,18,52,cc */ {0xa0, 0x03, ZC3XX_R113_RGB03}, /* 01,13,03,cc */ {} }; static const struct usb_action gc0305_50HZ[] = { {0xaa, 0x82, 0x0000}, /* 00,82,00,aa */ {0xaa, 0x83, 0x0002}, /* 00,83,02,aa */ {0xaa, 0x84, 0x0038}, /* 00,84,38,aa */ /* win: 00,84,ec */ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */ {0xa0, 0x0b, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,0b,cc */ {0xa0, 0x18, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,18,cc */ /* win: 01,92,10 */ {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */ {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */ {0xa0, 0x8e, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,8e,cc */ /* win: 01,97,ec */ {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE}, /* 01,8c,0e,cc */ {0xa0, 0x15, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,15,cc */ {0xa0, 0x10, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,10,cc */ {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,24,cc */ {0xa0, 0x62, ZC3XX_R01D_HSYNC_0}, /* 00,1d,62,cc */ {0xa0, 0x90, ZC3XX_R01E_HSYNC_1}, /* 00,1e,90,cc */ {0xa0, 0xc8, ZC3XX_R01F_HSYNC_2}, /* 00,1f,c8,cc */ {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, /* 00,20,ff,cc */ {0xa0, 0x60, ZC3XX_R11D_GLOBALGAIN}, /* 01,1d,60,cc */ {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, /* 01,80,42,cc */ /* {0xa0, 0x85, ZC3XX_R18D_YTARGET}, * 01,8d,85,cc * * if 640x480 */ {} }; static const struct usb_action gc0305_60HZ[] = { {0xaa, 0x82, 0x0000}, /* 00,82,00,aa */ {0xaa, 0x83, 0x0000}, /* 00,83,00,aa */ {0xaa, 0x84, 0x00ec}, /* 00,84,ec,aa */ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */ {0xa0, 0x0b, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,0b,cc */ {0xa0, 0x10, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,10,cc */ {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */ {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */ {0xa0, 0xec, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,ec,cc */ {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE}, /* 01,8c,0e,cc */ {0xa0, 0x15, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,15,cc */ {0xa0, 0x10, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,10,cc */ {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,24,cc */ {0xa0, 0x62, ZC3XX_R01D_HSYNC_0}, /* 00,1d,62,cc */ {0xa0, 0x90, ZC3XX_R01E_HSYNC_1}, /* 00,1e,90,cc */ {0xa0, 0xc8, ZC3XX_R01F_HSYNC_2}, /* 00,1f,c8,cc */ {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, /* 00,20,ff,cc */ {0xa0, 0x60, ZC3XX_R11D_GLOBALGAIN}, /* 01,1d,60,cc */ {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, /* 01,80,42,cc */ {0xa0, 0x80, ZC3XX_R18D_YTARGET}, /* 01,8d,80,cc */ {} }; static const struct usb_action gc0305_NoFliker[] = { {0xa0, 0x0c, ZC3XX_R100_OPERATIONMODE}, /* 01,00,0c,cc */ {0xaa, 0x82, 0x0000}, /* 00,82,00,aa */ {0xaa, 0x83, 0x0000}, /* 00,83,00,aa */ {0xaa, 0x84, 0x0020}, /* 00,84,20,aa */ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */ {0xa0, 0x00, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,00,cc */ {0xa0, 0x48, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,48,cc */ {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */ {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */ {0xa0, 0x10, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,10,cc */ {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE}, /* 01,8c,0e,cc */ {0xa0, 0x15, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,15,cc */ {0xa0, 0x62, ZC3XX_R01D_HSYNC_0}, /* 00,1d,62,cc */ {0xa0, 0x90, ZC3XX_R01E_HSYNC_1}, /* 00,1e,90,cc */ {0xa0, 0xc8, ZC3XX_R01F_HSYNC_2}, /* 00,1f,c8,cc */ {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, /* 00,20,ff,cc */ {0xa0, 0x60, ZC3XX_R11D_GLOBALGAIN}, /* 01,1d,60,cc */ {0xa0, 0x03, ZC3XX_R180_AUTOCORRECTENABLE}, /* 01,80,03,cc */ {0xa0, 0x80, ZC3XX_R18D_YTARGET}, /* 01,8d,80,cc */ {} }; static const struct usb_action hdcs2020_InitialScale[] = { {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, {0xa0, 0x11, ZC3XX_R002_CLOCKSELECT}, {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, /* qtable 0x05 */ {0xa0, 0x08, ZC3XX_R010_CMOSSENSORSELECT}, {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, {0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW}, {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, {0xa0, 0x03, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x00, ZC3XX_R098_WINYSTARTLOW}, {0xa0, 0x00, ZC3XX_R09A_WINXSTARTLOW}, {0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW}, {0xa0, 0x00, ZC3XX_R11C_FIRSTXLOW}, {0xa0, 0xe8, ZC3XX_R09C_WINHEIGHTLOW}, {0xa0, 0x88, ZC3XX_R09E_WINWIDTHLOW}, {0xaa, 0x1c, 0x0000}, {0xaa, 0x0a, 0x0001}, {0xaa, 0x0b, 0x0006}, {0xaa, 0x0c, 0x007b}, {0xaa, 0x0d, 0x00a7}, {0xaa, 0x03, 0x00fb}, {0xaa, 0x05, 0x0000}, {0xaa, 0x06, 0x0003}, {0xaa, 0x09, 0x0008}, {0xaa, 0x0f, 0x0018}, /* set sensor gain */ {0xaa, 0x10, 0x0018}, {0xaa, 0x11, 0x0018}, {0xaa, 0x12, 0x0018}, {0xaa, 0x15, 0x004e}, {0xaa, 0x1c, 0x0004}, {0xa0, 0xb7, ZC3XX_R101_SENSORCORRECTION}, {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x70, ZC3XX_R18D_YTARGET}, {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, {0xa1, 0x01, 0x0002}, {0xa1, 0x01, 0x0008}, {0xa1, 0x01, 0x0180}, {0xa0, 0x02, ZC3XX_R180_AUTOCORRECTENABLE}, {0xa0, 0x40, ZC3XX_R116_RGAIN}, {0xa0, 0x40, ZC3XX_R117_GGAIN}, {0xa0, 0x40, ZC3XX_R118_BGAIN}, {0xa1, 0x01, 0x0008}, {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, /* clock ? */ {0xa0, 0x08, ZC3XX_R1C6_SHARPNESS00}, /* sharpness+ */ {0xa1, 0x01, 0x01c8}, {0xa1, 0x01, 0x01c9}, {0xa1, 0x01, 0x01ca}, {0xa0, 0x0f, ZC3XX_R1CB_SHARPNESS05}, /* sharpness- */ {0xa0, 0x13, ZC3XX_R120_GAMMA00}, /* gamma 4 */ {0xa0, 0x38, ZC3XX_R121_GAMMA01}, {0xa0, 0x59, ZC3XX_R122_GAMMA02}, {0xa0, 0x79, ZC3XX_R123_GAMMA03}, {0xa0, 0x92, ZC3XX_R124_GAMMA04}, {0xa0, 0xa7, ZC3XX_R125_GAMMA05}, {0xa0, 0xb9, ZC3XX_R126_GAMMA06}, {0xa0, 0xc8, ZC3XX_R127_GAMMA07}, {0xa0, 0xd4, ZC3XX_R128_GAMMA08}, {0xa0, 0xdf, ZC3XX_R129_GAMMA09}, {0xa0, 0xe7, ZC3XX_R12A_GAMMA0A}, {0xa0, 0xee, ZC3XX_R12B_GAMMA0B}, {0xa0, 0xf4, ZC3XX_R12C_GAMMA0C}, {0xa0, 0xf9, ZC3XX_R12D_GAMMA0D}, {0xa0, 0xfc, ZC3XX_R12E_GAMMA0E}, {0xa0, 0xff, ZC3XX_R12F_GAMMA0F}, {0xa0, 0x26, ZC3XX_R130_GAMMA10}, {0xa0, 0x22, ZC3XX_R131_GAMMA11}, {0xa0, 0x20, ZC3XX_R132_GAMMA12}, {0xa0, 0x1c, ZC3XX_R133_GAMMA13}, {0xa0, 0x16, ZC3XX_R134_GAMMA14}, {0xa0, 0x13, ZC3XX_R135_GAMMA15}, {0xa0, 0x10, ZC3XX_R136_GAMMA16}, {0xa0, 0x0d, ZC3XX_R137_GAMMA17}, {0xa0, 0x0b, ZC3XX_R138_GAMMA18}, {0xa0, 0x09, ZC3XX_R139_GAMMA19}, {0xa0, 0x07, ZC3XX_R13A_GAMMA1A}, {0xa0, 0x06, ZC3XX_R13B_GAMMA1B}, {0xa0, 0x05, ZC3XX_R13C_GAMMA1C}, {0xa0, 0x04, ZC3XX_R13D_GAMMA1D}, {0xa0, 0x03, ZC3XX_R13E_GAMMA1E}, {0xa0, 0x02, ZC3XX_R13F_GAMMA1F}, {0xa0, 0x66, ZC3XX_R10A_RGB00}, /* matrix */ {0xa0, 0xed, ZC3XX_R10B_RGB01}, {0xa0, 0xed, ZC3XX_R10C_RGB02}, {0xa0, 0xed, ZC3XX_R10D_RGB10}, {0xa0, 0x66, ZC3XX_R10E_RGB11}, {0xa0, 0xed, ZC3XX_R10F_RGB12}, {0xa0, 0xed, ZC3XX_R110_RGB20}, {0xa0, 0xed, ZC3XX_R111_RGB21}, {0xa0, 0x66, ZC3XX_R112_RGB22}, {0xa1, 0x01, 0x0180}, {0xa0, 0x00, ZC3XX_R180_AUTOCORRECTENABLE}, {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xaa, 0x13, 0x0031}, {0xaa, 0x14, 0x0001}, {0xaa, 0x0e, 0x0004}, {0xaa, 0x19, 0x00cd}, {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, {0xa0, 0x02, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0x62, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0x3d, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x0c, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 0x14 */ {0xa0, 0x28, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0x04, ZC3XX_R01D_HSYNC_0}, {0xa0, 0x18, ZC3XX_R01E_HSYNC_1}, {0xa0, 0x2c, ZC3XX_R01F_HSYNC_2}, {0xa0, 0x41, ZC3XX_R020_HSYNC_3}, {0xa0, 0x60, ZC3XX_R11D_GLOBALGAIN}, {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, {0xa1, 0x01, 0x0180}, {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, {0xa0, 0x40, ZC3XX_R116_RGAIN}, {0xa0, 0x40, ZC3XX_R117_GGAIN}, {0xa0, 0x40, ZC3XX_R118_BGAIN}, {} }; static const struct usb_action hdcs2020_Initial[] = { {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, {0xa0, 0x00, ZC3XX_R002_CLOCKSELECT}, {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, {0xa0, 0x08, ZC3XX_R010_CMOSSENSORSELECT}, {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, {0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW}, {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, {0xa0, 0x03, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x00, ZC3XX_R098_WINYSTARTLOW}, {0xa0, 0x00, ZC3XX_R09A_WINXSTARTLOW}, {0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW}, {0xa0, 0x00, ZC3XX_R11C_FIRSTXLOW}, {0xa0, 0xe8, ZC3XX_R09C_WINHEIGHTLOW}, {0xa0, 0x88, ZC3XX_R09E_WINWIDTHLOW}, {0xaa, 0x1c, 0x0000}, {0xaa, 0x0a, 0x0001}, {0xaa, 0x0b, 0x0006}, {0xaa, 0x0c, 0x007a}, {0xaa, 0x0d, 0x00a7}, {0xaa, 0x03, 0x00fb}, {0xaa, 0x05, 0x0000}, {0xaa, 0x06, 0x0003}, {0xaa, 0x09, 0x0008}, {0xaa, 0x0f, 0x0018}, /* original setting */ {0xaa, 0x10, 0x0018}, {0xaa, 0x11, 0x0018}, {0xaa, 0x12, 0x0018}, {0xaa, 0x15, 0x004e}, {0xaa, 0x1c, 0x0004}, {0xa0, 0xf7, ZC3XX_R101_SENSORCORRECTION}, {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x70, ZC3XX_R18D_YTARGET}, {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, {0xa1, 0x01, 0x0002}, {0xa1, 0x01, 0x0008}, {0xa1, 0x01, 0x0180}, {0xa0, 0x02, ZC3XX_R180_AUTOCORRECTENABLE}, {0xa0, 0x40, ZC3XX_R116_RGAIN}, {0xa0, 0x40, ZC3XX_R117_GGAIN}, {0xa0, 0x40, ZC3XX_R118_BGAIN}, {0xa1, 0x01, 0x0008}, {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, /* clock ? */ {0xa0, 0x08, ZC3XX_R1C6_SHARPNESS00}, /* sharpness+ */ {0xa1, 0x01, 0x01c8}, {0xa1, 0x01, 0x01c9}, {0xa1, 0x01, 0x01ca}, {0xa0, 0x0f, ZC3XX_R1CB_SHARPNESS05}, /* sharpness- */ {0xa0, 0x13, ZC3XX_R120_GAMMA00}, /* gamma 4 */ {0xa0, 0x38, ZC3XX_R121_GAMMA01}, {0xa0, 0x59, ZC3XX_R122_GAMMA02}, {0xa0, 0x79, ZC3XX_R123_GAMMA03}, {0xa0, 0x92, ZC3XX_R124_GAMMA04}, {0xa0, 0xa7, ZC3XX_R125_GAMMA05}, {0xa0, 0xb9, ZC3XX_R126_GAMMA06}, {0xa0, 0xc8, ZC3XX_R127_GAMMA07}, {0xa0, 0xd4, ZC3XX_R128_GAMMA08}, {0xa0, 0xdf, ZC3XX_R129_GAMMA09}, {0xa0, 0xe7, ZC3XX_R12A_GAMMA0A}, {0xa0, 0xee, ZC3XX_R12B_GAMMA0B}, {0xa0, 0xf4, ZC3XX_R12C_GAMMA0C}, {0xa0, 0xf9, ZC3XX_R12D_GAMMA0D}, {0xa0, 0xfc, ZC3XX_R12E_GAMMA0E}, {0xa0, 0xff, ZC3XX_R12F_GAMMA0F}, {0xa0, 0x26, ZC3XX_R130_GAMMA10}, {0xa0, 0x22, ZC3XX_R131_GAMMA11}, {0xa0, 0x20, ZC3XX_R132_GAMMA12}, {0xa0, 0x1c, ZC3XX_R133_GAMMA13}, {0xa0, 0x16, ZC3XX_R134_GAMMA14}, {0xa0, 0x13, ZC3XX_R135_GAMMA15}, {0xa0, 0x10, ZC3XX_R136_GAMMA16}, {0xa0, 0x0d, ZC3XX_R137_GAMMA17}, {0xa0, 0x0b, ZC3XX_R138_GAMMA18}, {0xa0, 0x09, ZC3XX_R139_GAMMA19}, {0xa0, 0x07, ZC3XX_R13A_GAMMA1A}, {0xa0, 0x06, ZC3XX_R13B_GAMMA1B}, {0xa0, 0x05, ZC3XX_R13C_GAMMA1C}, {0xa0, 0x04, ZC3XX_R13D_GAMMA1D}, {0xa0, 0x03, ZC3XX_R13E_GAMMA1E}, {0xa0, 0x02, ZC3XX_R13F_GAMMA1F}, {0xa0, 0x66, ZC3XX_R10A_RGB00}, /* matrix */ {0xa0, 0xed, ZC3XX_R10B_RGB01}, {0xa0, 0xed, ZC3XX_R10C_RGB02}, {0xa0, 0xed, ZC3XX_R10D_RGB10}, {0xa0, 0x66, ZC3XX_R10E_RGB11}, {0xa0, 0xed, ZC3XX_R10F_RGB12}, {0xa0, 0xed, ZC3XX_R110_RGB20}, {0xa0, 0xed, ZC3XX_R111_RGB21}, {0xa0, 0x66, ZC3XX_R112_RGB22}, {0xa1, 0x01, 0x0180}, {0xa0, 0x00, ZC3XX_R180_AUTOCORRECTENABLE}, {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /**** set exposure ***/ {0xaa, 0x13, 0x0031}, {0xaa, 0x14, 0x0001}, {0xaa, 0x0e, 0x0004}, {0xaa, 0x19, 0x00cd}, {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, {0xa0, 0x02, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0x62, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0x3d, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x0c, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x28, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0x04, ZC3XX_R01D_HSYNC_0}, {0xa0, 0x18, ZC3XX_R01E_HSYNC_1}, {0xa0, 0x2c, ZC3XX_R01F_HSYNC_2}, {0xa0, 0x41, ZC3XX_R020_HSYNC_3}, {0xa0, 0x60, ZC3XX_R11D_GLOBALGAIN}, {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, {0xa1, 0x01, 0x0180}, {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, {0xa0, 0x40, ZC3XX_R116_RGAIN}, {0xa0, 0x40, ZC3XX_R117_GGAIN}, {0xa0, 0x40, ZC3XX_R118_BGAIN}, {} }; static const struct usb_action hdcs2020_50HZ[] = { {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */ {0xaa, 0x13, 0x0018}, /* 00,13,18,aa */ {0xaa, 0x14, 0x0001}, /* 00,14,01,aa */ {0xaa, 0x0e, 0x0005}, /* 00,0e,05,aa */ {0xaa, 0x19, 0x001f}, /* 00,19,1f,aa */ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */ {0xa0, 0x02, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,02,cc */ {0xa0, 0x76, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,76,cc */ {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */ {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */ {0xa0, 0x46, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,46,cc */ {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, /* 01,8c,10,cc */ {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,20,cc */ {0xa0, 0x0c, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,0c,cc */ {0xa0, 0x28, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,28,cc */ {0xa0, 0x05, ZC3XX_R01D_HSYNC_0}, /* 00,1d,05,cc */ {0xa0, 0x1a, ZC3XX_R01E_HSYNC_1}, /* 00,1e,1a,cc */ {0xa0, 0x2f, ZC3XX_R01F_HSYNC_2}, /* 00,1f,2f,cc */ {} }; static const struct usb_action hdcs2020_60HZ[] = { {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */ {0xaa, 0x13, 0x0031}, /* 00,13,31,aa */ {0xaa, 0x14, 0x0001}, /* 00,14,01,aa */ {0xaa, 0x0e, 0x0004}, /* 00,0e,04,aa */ {0xaa, 0x19, 0x00cd}, /* 00,19,cd,aa */ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */ {0xa0, 0x02, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,02,cc */ {0xa0, 0x62, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,62,cc */ {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */ {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */ {0xa0, 0x3d, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,3d,cc */ {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, /* 01,8c,10,cc */ {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,20,cc */ {0xa0, 0x0c, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,0c,cc */ {0xa0, 0x28, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,28,cc */ {0xa0, 0x04, ZC3XX_R01D_HSYNC_0}, /* 00,1d,04,cc */ {0xa0, 0x18, ZC3XX_R01E_HSYNC_1}, /* 00,1e,18,cc */ {0xa0, 0x2c, ZC3XX_R01F_HSYNC_2}, /* 00,1f,2c,cc */ {} }; static const struct usb_action hdcs2020_NoFliker[] = { {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */ {0xaa, 0x13, 0x0010}, /* 00,13,10,aa */ {0xaa, 0x14, 0x0001}, /* 00,14,01,aa */ {0xaa, 0x0e, 0x0004}, /* 00,0e,04,aa */ {0xaa, 0x19, 0x0000}, /* 00,19,00,aa */ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */ {0xa0, 0x02, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,02,cc */ {0xa0, 0x70, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,70,cc */ {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */ {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */ {0xa0, 0x10, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,10,cc */ {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, /* 01,8c,10,cc */ {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,20,cc */ {0xa0, 0x00, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,00,cc */ {0xa0, 0x00, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,00,cc */ {0xa0, 0x04, ZC3XX_R01D_HSYNC_0}, /* 00,1d,04,cc */ {0xa0, 0x17, ZC3XX_R01E_HSYNC_1}, /* 00,1e,17,cc */ {0xa0, 0x2a, ZC3XX_R01F_HSYNC_2}, /* 00,1f,2a,cc */ {} }; static const struct usb_action hv7131b_InitialScale[] = { /* 320x240 */ {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, {0xa0, 0x10, ZC3XX_R002_CLOCKSELECT}, {0xa0, 0x00, ZC3XX_R010_CMOSSENSORSELECT}, {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, {0xa0, 0x77, ZC3XX_R101_SENSORCORRECTION}, {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, /* 00 */ {0xa0, 0x03, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, {0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW}, {0xa0, 0x00, ZC3XX_R098_WINYSTARTLOW}, {0xa0, 0x00, ZC3XX_R09A_WINXSTARTLOW}, {0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW}, {0xa0, 0x00, ZC3XX_R11C_FIRSTXLOW}, {0xaa, 0x30, 0x002d}, {0xaa, 0x01, 0x0005}, {0xaa, 0x11, 0x0000}, {0xaa, 0x13, 0x0001}, /* {0xaa, 0x13, 0x0000}, */ {0xaa, 0x14, 0x0001}, {0xaa, 0x15, 0x00e8}, {0xaa, 0x16, 0x0002}, {0xaa, 0x17, 0x0086}, /* 00,17,88,aa */ {0xaa, 0x31, 0x0038}, {0xaa, 0x32, 0x0038}, {0xaa, 0x33, 0x0038}, {0xaa, 0x5b, 0x0001}, {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, {0xa0, 0x68, ZC3XX_R18D_YTARGET}, {0xa0, 0x60, ZC3XX_R1A8_DIGITALGAIN}, {0xa0, 0x00, 0x01ad}, {0xa0, 0xc0, 0x019b}, {0xa0, 0xa0, 0x019c}, {0xa0, 0x02, ZC3XX_R188_MINGAIN}, {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, {0xaa, 0x02, 0x0090}, /* 00,02,80,aa */ {} }; static const struct usb_action hv7131b_Initial[] = { /* 640x480*/ {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, {0xa0, 0x00, ZC3XX_R002_CLOCKSELECT}, {0xa0, 0x00, ZC3XX_R010_CMOSSENSORSELECT}, {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, {0xa0, 0x37, ZC3XX_R101_SENSORCORRECTION}, {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, /* 00 */ {0xa0, 0x03, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, {0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW}, {0xa0, 0x00, ZC3XX_R098_WINYSTARTLOW}, {0xa0, 0x00, ZC3XX_R09A_WINXSTARTLOW}, {0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW}, {0xa0, 0x00, ZC3XX_R11C_FIRSTXLOW}, {0xaa, 0x30, 0x002d}, {0xaa, 0x01, 0x0005}, {0xaa, 0x11, 0x0001}, {0xaa, 0x13, 0x0000}, /* {0xaa, 0x13, 0x0001}; */ {0xaa, 0x14, 0x0001}, {0xaa, 0x15, 0x00e6}, {0xaa, 0x16, 0x0002}, {0xaa, 0x17, 0x0086}, {0xaa, 0x31, 0x0038}, {0xaa, 0x32, 0x0038}, {0xaa, 0x33, 0x0038}, {0xaa, 0x5b, 0x0001}, {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, {0xa0, 0x70, ZC3XX_R18D_YTARGET}, {0xa0, 0x60, ZC3XX_R1A8_DIGITALGAIN}, {0xa0, 0x00, 0x01ad}, {0xa0, 0xc0, 0x019b}, {0xa0, 0xa0, 0x019c}, {0xa0, 0x02, ZC3XX_R188_MINGAIN}, {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, {0xaa, 0x02, 0x0090}, /* {0xaa, 0x02, 0x0080}, */ {} }; static const struct usb_action hv7131b_50HZ[] = { /* 640x480*/ {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */ {0xaa, 0x25, 0x0007}, /* 00,25,07,aa */ {0xaa, 0x26, 0x0053}, /* 00,26,53,aa */ {0xaa, 0x27, 0x0000}, /* 00,27,00,aa */ {0xaa, 0x20, 0x0000}, /* 00,20,00,aa */ {0xaa, 0x21, 0x0050}, /* 00,21,50,aa */ {0xaa, 0x22, 0x001b}, /* 00,22,1b,aa */ {0xaa, 0x23, 0x00fc}, /* 00,23,fc,aa */ {0xa0, 0x2f, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,2f,cc */ {0xa0, 0x9b, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,9b,cc */ {0xa0, 0x80, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,80,cc */ {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */ {0xa0, 0xea, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,ea,cc */ {0xa0, 0x60, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,60,cc */ {0xa0, 0x0c, ZC3XX_R18C_AEFREEZE}, /* 01,8c,0c,cc */ {0xa0, 0x18, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,18,cc */ {0xa0, 0x18, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,18,cc */ {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,24,cc */ {0xa0, 0x00, ZC3XX_R01D_HSYNC_0}, /* 00,1d,00,cc */ {0xa0, 0x50, ZC3XX_R01E_HSYNC_1}, /* 00,1e,50,cc */ {0xa0, 0x1b, ZC3XX_R01F_HSYNC_2}, /* 00,1f,1b,cc */ {0xa0, 0xfc, ZC3XX_R020_HSYNC_3}, /* 00,20,fc,cc */ {} }; static const struct usb_action hv7131b_50HZScale[] = { /* 320x240 */ {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */ {0xaa, 0x25, 0x0007}, /* 00,25,07,aa */ {0xaa, 0x26, 0x0053}, /* 00,26,53,aa */ {0xaa, 0x27, 0x0000}, /* 00,27,00,aa */ {0xaa, 0x20, 0x0000}, /* 00,20,00,aa */ {0xaa, 0x21, 0x0050}, /* 00,21,50,aa */ {0xaa, 0x22, 0x0012}, /* 00,22,12,aa */ {0xaa, 0x23, 0x0080}, /* 00,23,80,aa */ {0xa0, 0x2f, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,2f,cc */ {0xa0, 0x9b, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,9b,cc */ {0xa0, 0x80, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,80,cc */ {0xa0, 0x01, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,01,cc */ {0xa0, 0xd4, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,d4,cc */ {0xa0, 0xc0, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,c0,cc */ {0xa0, 0x07, ZC3XX_R18C_AEFREEZE}, /* 01,8c,07,cc */ {0xa0, 0x0f, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,0f,cc */ {0xa0, 0x18, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,18,cc */ {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,24,cc */ {0xa0, 0x00, ZC3XX_R01D_HSYNC_0}, /* 00,1d,00,cc */ {0xa0, 0x50, ZC3XX_R01E_HSYNC_1}, /* 00,1e,50,cc */ {0xa0, 0x12, ZC3XX_R01F_HSYNC_2}, /* 00,1f,12,cc */ {0xa0, 0x80, ZC3XX_R020_HSYNC_3}, /* 00,20,80,cc */ {} }; static const struct usb_action hv7131b_60HZ[] = { /* 640x480*/ {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */ {0xaa, 0x25, 0x0007}, /* 00,25,07,aa */ {0xaa, 0x26, 0x00a1}, /* 00,26,a1,aa */ {0xaa, 0x27, 0x0020}, /* 00,27,20,aa */ {0xaa, 0x20, 0x0000}, /* 00,20,00,aa */ {0xaa, 0x21, 0x0040}, /* 00,21,40,aa */ {0xaa, 0x22, 0x0013}, /* 00,22,13,aa */ {0xaa, 0x23, 0x004c}, /* 00,23,4c,aa */ {0xa0, 0x2f, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,2f,cc */ {0xa0, 0x4d, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,4d,cc */ {0xa0, 0x60, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,60,cc */ {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */ {0xa0, 0xc3, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,c3,cc */ {0xa0, 0x50, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,50,cc */ {0xa0, 0x0c, ZC3XX_R18C_AEFREEZE}, /* 01,8c,0c,cc */ {0xa0, 0x18, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,18,cc */ {0xa0, 0x18, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,18,cc */ {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,24,cc */ {0xa0, 0x00, ZC3XX_R01D_HSYNC_0}, /* 00,1d,00,cc */ {0xa0, 0x40, ZC3XX_R01E_HSYNC_1}, /* 00,1e,40,cc */ {0xa0, 0x13, ZC3XX_R01F_HSYNC_2}, /* 00,1f,13,cc */ {0xa0, 0x4c, ZC3XX_R020_HSYNC_3}, /* 00,20,4c,cc */ {} }; static const struct usb_action hv7131b_60HZScale[] = { /* 320x240 */ {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */ {0xaa, 0x25, 0x0007}, /* 00,25,07,aa */ {0xaa, 0x26, 0x00a1}, /* 00,26,a1,aa */ {0xaa, 0x27, 0x0020}, /* 00,27,20,aa */ {0xaa, 0x20, 0x0000}, /* 00,20,00,aa */ {0xaa, 0x21, 0x00a0}, /* 00,21,a0,aa */ {0xaa, 0x22, 0x0016}, /* 00,22,16,aa */ {0xaa, 0x23, 0x0040}, /* 00,23,40,aa */ {0xa0, 0x2f, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,2f,cc */ {0xa0, 0x4d, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,4d,cc */ {0xa0, 0x60, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,60,cc */ {0xa0, 0x01, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,01,cc */ {0xa0, 0x86, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,86,cc */ {0xa0, 0xa0, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,a0,cc */ {0xa0, 0x07, ZC3XX_R18C_AEFREEZE}, /* 01,8c,07,cc */ {0xa0, 0x0f, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,0f,cc */ {0xa0, 0x18, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,18,cc */ {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,24,cc */ {0xa0, 0x00, ZC3XX_R01D_HSYNC_0}, /* 00,1d,00,cc */ {0xa0, 0xa0, ZC3XX_R01E_HSYNC_1}, /* 00,1e,a0,cc */ {0xa0, 0x16, ZC3XX_R01F_HSYNC_2}, /* 00,1f,16,cc */ {0xa0, 0x40, ZC3XX_R020_HSYNC_3}, /* 00,20,40,cc */ {} }; static const struct usb_action hv7131b_NoFliker[] = { /* 640x480*/ {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */ {0xaa, 0x25, 0x0003}, /* 00,25,03,aa */ {0xaa, 0x26, 0x0000}, /* 00,26,00,aa */ {0xaa, 0x27, 0x0000}, /* 00,27,00,aa */ {0xaa, 0x20, 0x0000}, /* 00,20,00,aa */ {0xaa, 0x21, 0x0010}, /* 00,21,10,aa */ {0xaa, 0x22, 0x0000}, /* 00,22,00,aa */ {0xaa, 0x23, 0x0003}, /* 00,23,03,aa */ {0xa0, 0x2f, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,2f,cc */ {0xa0, 0xf8, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,f8,cc */ {0xa0, 0x00, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,00,cc */ {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */ {0xa0, 0x02, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,02,cc */ {0xa0, 0x00, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,00,cc */ {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, /* 01,8c,10,cc */ {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,20,cc */ {0xa0, 0x00, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,00,cc */ {0xa0, 0x00, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,00,cc */ {0xa0, 0x00, ZC3XX_R01D_HSYNC_0}, /* 00,1d,00,cc */ {0xa0, 0x10, ZC3XX_R01E_HSYNC_1}, /* 00,1e,10,cc */ {0xa0, 0x00, ZC3XX_R01F_HSYNC_2}, /* 00,1f,00,cc */ {0xa0, 0x03, ZC3XX_R020_HSYNC_3}, /* 00,20,03,cc */ {} }; static const struct usb_action hv7131b_NoFlikerScale[] = { /* 320x240 */ {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */ {0xaa, 0x25, 0x0003}, /* 00,25,03,aa */ {0xaa, 0x26, 0x0000}, /* 00,26,00,aa */ {0xaa, 0x27, 0x0000}, /* 00,27,00,aa */ {0xaa, 0x20, 0x0000}, /* 00,20,00,aa */ {0xaa, 0x21, 0x00a0}, /* 00,21,a0,aa */ {0xaa, 0x22, 0x0016}, /* 00,22,16,aa */ {0xaa, 0x23, 0x0040}, /* 00,23,40,aa */ {0xa0, 0x2f, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,2f,cc */ {0xa0, 0xf8, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,f8,cc */ {0xa0, 0x00, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,00,cc */ {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */ {0xa0, 0x02, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,02,cc */ {0xa0, 0x00, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,00,cc */ {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, /* 01,8c,10,cc */ {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,20,cc */ {0xa0, 0x00, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,00,cc */ {0xa0, 0x00, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,00,cc */ {0xa0, 0x00, ZC3XX_R01D_HSYNC_0}, /* 00,1d,00,cc */ {0xa0, 0xa0, ZC3XX_R01E_HSYNC_1}, /* 00,1e,a0,cc */ {0xa0, 0x16, ZC3XX_R01F_HSYNC_2}, /* 00,1f,16,cc */ {0xa0, 0x40, ZC3XX_R020_HSYNC_3}, /* 00,20,40,cc */ {} }; /* from lPEPI264v.inf (hv7131b!) */ static const struct usb_action hv7131r_InitialScale[] = { {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, {0xa0, 0x10, ZC3XX_R002_CLOCKSELECT}, {0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, {0xa0, 0x77, ZC3XX_R101_SENSORCORRECTION}, {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, {0xa0, 0x07, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, {0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW}, {0xa0, 0x00, ZC3XX_R098_WINYSTARTLOW}, {0xa0, 0x00, ZC3XX_R09A_WINXSTARTLOW}, {0xa0, 0x01, ZC3XX_R09B_WINHEIGHTHIGH}, {0xa0, 0xe8, ZC3XX_R09C_WINHEIGHTLOW}, {0xa0, 0x02, ZC3XX_R09D_WINWIDTHHIGH}, {0xa0, 0x88, ZC3XX_R09E_WINWIDTHLOW}, {0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW}, {0xa0, 0x00, ZC3XX_R11C_FIRSTXLOW}, {0xa0, 0x07, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xdd, 0x00, 0x0200}, {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xaa, 0x01, 0x000c}, {0xaa, 0x11, 0x0000}, {0xaa, 0x13, 0x0000}, {0xaa, 0x14, 0x0001}, {0xaa, 0x15, 0x00e8}, {0xaa, 0x16, 0x0002}, {0xaa, 0x17, 0x0088}, {0xaa, 0x30, 0x000b}, {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, {0xa0, 0x78, ZC3XX_R18D_YTARGET}, {0xa0, 0x50, ZC3XX_R1A8_DIGITALGAIN}, {0xa0, 0x00, 0x01ad}, {0xa0, 0xc0, 0x019b}, {0xa0, 0xa0, 0x019c}, {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, {} }; static const struct usb_action hv7131r_Initial[] = { {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, {0xa0, 0x00, ZC3XX_R002_CLOCKSELECT}, {0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, {0xa0, 0x77, ZC3XX_R101_SENSORCORRECTION}, {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, {0xa0, 0x07, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, {0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW}, {0xa0, 0x00, ZC3XX_R098_WINYSTARTLOW}, {0xa0, 0x00, ZC3XX_R09A_WINXSTARTLOW}, {0xa0, 0x01, ZC3XX_R09B_WINHEIGHTHIGH}, {0xa0, 0xe6, ZC3XX_R09C_WINHEIGHTLOW}, {0xa0, 0x02, ZC3XX_R09D_WINWIDTHHIGH}, {0xa0, 0x86, ZC3XX_R09E_WINWIDTHLOW}, {0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW}, {0xa0, 0x00, ZC3XX_R11C_FIRSTXLOW}, {0xa0, 0x07, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xdd, 0x00, 0x0200}, {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xaa, 0x01, 0x000c}, {0xaa, 0x11, 0x0000}, {0xaa, 0x13, 0x0000}, {0xaa, 0x14, 0x0001}, {0xaa, 0x15, 0x00e6}, {0xaa, 0x16, 0x0002}, {0xaa, 0x17, 0x0086}, {0xaa, 0x30, 0x000b}, {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, {0xa0, 0x78, ZC3XX_R18D_YTARGET}, {0xa0, 0x50, ZC3XX_R1A8_DIGITALGAIN}, {0xa0, 0x00, 0x01ad}, {0xa0, 0xc0, 0x019b}, {0xa0, 0xa0, 0x019c}, {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, {} }; static const struct usb_action hv7131r_50HZ[] = { {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xa0, 0x06, ZC3XX_R190_EXPOSURELIMITHIGH}, {0xa0, 0x68, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0xa0, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0xea, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0x60, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x18, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x10, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x66, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0x00, ZC3XX_R01D_HSYNC_0}, {0xa0, 0xd0, ZC3XX_R01E_HSYNC_1}, {0xa0, 0x00, ZC3XX_R01F_HSYNC_2}, {0xa0, 0x08, ZC3XX_R020_HSYNC_3}, {} }; static const struct usb_action hv7131r_50HZScale[] = { {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xa0, 0x0c, ZC3XX_R190_EXPOSURELIMITHIGH}, {0xa0, 0xd1, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0x40, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x01, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0xd4, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0xc0, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x18, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x10, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x66, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0x00, ZC3XX_R01D_HSYNC_0}, {0xa0, 0xd0, ZC3XX_R01E_HSYNC_1}, {0xa0, 0x00, ZC3XX_R01F_HSYNC_2}, {0xa0, 0x08, ZC3XX_R020_HSYNC_3}, {} }; static const struct usb_action hv7131r_60HZ[] = { {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xa0, 0x06, ZC3XX_R190_EXPOSURELIMITHIGH}, {0xa0, 0x1a, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0x80, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0xc3, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0x50, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x18, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x10, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x66, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0x00, ZC3XX_R01D_HSYNC_0}, {0xa0, 0xd0, ZC3XX_R01E_HSYNC_1}, {0xa0, 0x00, ZC3XX_R01F_HSYNC_2}, {0xa0, 0x08, ZC3XX_R020_HSYNC_3}, {} }; static const struct usb_action hv7131r_60HZScale[] = { {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xa0, 0x0c, ZC3XX_R190_EXPOSURELIMITHIGH}, {0xa0, 0x35, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0x00, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x01, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0x86, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0xa0, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x18, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x10, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x66, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0x00, ZC3XX_R01D_HSYNC_0}, {0xa0, 0xd0, ZC3XX_R01E_HSYNC_1}, {0xa0, 0x00, ZC3XX_R01F_HSYNC_2}, {0xa0, 0x08, ZC3XX_R020_HSYNC_3}, {} }; static const struct usb_action hv7131r_NoFliker[] = { {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xa0, 0x2f, ZC3XX_R190_EXPOSURELIMITHIGH}, {0xa0, 0xf8, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0x00, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0x02, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0x58, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x0c, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x18, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x00, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x00, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0x00, ZC3XX_R01D_HSYNC_0}, {0xa0, 0xd0, ZC3XX_R01E_HSYNC_1}, {0xa0, 0x00, ZC3XX_R01F_HSYNC_2}, {0xa0, 0x08, ZC3XX_R020_HSYNC_3}, {} }; static const struct usb_action hv7131r_NoFlikerScale[] = { {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xa0, 0x2f, ZC3XX_R190_EXPOSURELIMITHIGH}, {0xa0, 0xf8, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0x00, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0x04, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0xb0, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x0c, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x18, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x00, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x00, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0x00, ZC3XX_R01D_HSYNC_0}, {0xa0, 0xd0, ZC3XX_R01E_HSYNC_1}, {0xa0, 0x00, ZC3XX_R01F_HSYNC_2}, {0xa0, 0x08, ZC3XX_R020_HSYNC_3}, {} }; static const struct usb_action icm105a_InitialScale[] = { {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, {0xa0, 0x10, ZC3XX_R002_CLOCKSELECT}, {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, {0xa0, 0x0c, ZC3XX_R010_CMOSSENSORSELECT}, {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, {0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW}, {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, {0xa0, 0x03, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0xa1, ZC3XX_R08B_I2CDEVICEADDR}, {0xa0, 0x00, ZC3XX_R097_WINYSTARTHIGH}, {0xa0, 0x01, ZC3XX_R098_WINYSTARTLOW}, {0xa0, 0x00, ZC3XX_R099_WINXSTARTHIGH}, {0xa0, 0x01, ZC3XX_R09A_WINXSTARTLOW}, {0xa0, 0x01, ZC3XX_R11A_FIRSTYLOW}, {0xa0, 0x01, ZC3XX_R11C_FIRSTXLOW}, {0xa0, 0x01, ZC3XX_R09B_WINHEIGHTHIGH}, {0xa0, 0xe8, ZC3XX_R09C_WINHEIGHTLOW}, {0xa0, 0x02, ZC3XX_R09D_WINWIDTHHIGH}, {0xa0, 0x88, ZC3XX_R09E_WINWIDTHLOW}, {0xa0, 0x37, ZC3XX_R101_SENSORCORRECTION}, {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, {0xaa, 0x01, 0x0010}, {0xaa, 0x03, 0x0000}, {0xaa, 0x04, 0x0001}, {0xaa, 0x05, 0x0020}, {0xaa, 0x06, 0x0001}, {0xaa, 0x08, 0x0000}, {0xaa, 0x03, 0x0001}, {0xaa, 0x04, 0x0011}, {0xaa, 0x05, 0x00a0}, {0xaa, 0x06, 0x0001}, {0xaa, 0x08, 0x0000}, {0xaa, 0x03, 0x0002}, {0xaa, 0x04, 0x0013}, {0xaa, 0x05, 0x0020}, {0xaa, 0x06, 0x0001}, {0xaa, 0x08, 0x0000}, {0xaa, 0x03, 0x0003}, {0xaa, 0x04, 0x0015}, {0xaa, 0x05, 0x0020}, {0xaa, 0x06, 0x0005}, {0xaa, 0x08, 0x0000}, {0xaa, 0x03, 0x0004}, {0xaa, 0x04, 0x0017}, {0xaa, 0x05, 0x0020}, {0xaa, 0x06, 0x000d}, {0xaa, 0x08, 0x0000}, {0xaa, 0x03, 0x0005}, {0xaa, 0x04, 0x0019}, {0xaa, 0x05, 0x0020}, {0xaa, 0x06, 0x0005}, {0xaa, 0x08, 0x0000}, {0xaa, 0x03, 0x0006}, {0xaa, 0x04, 0x0017}, {0xaa, 0x05, 0x0026}, {0xaa, 0x06, 0x0005}, {0xaa, 0x08, 0x0000}, {0xaa, 0x03, 0x0007}, {0xaa, 0x04, 0x0019}, {0xaa, 0x05, 0x0022}, {0xaa, 0x06, 0x0005}, {0xaa, 0x08, 0x0000}, {0xaa, 0x03, 0x0008}, {0xaa, 0x04, 0x0021}, {0xaa, 0x05, 0x00aa}, {0xaa, 0x06, 0x0005}, {0xaa, 0x08, 0x0000}, {0xaa, 0x03, 0x0009}, {0xaa, 0x04, 0x0023}, {0xaa, 0x05, 0x00aa}, {0xaa, 0x06, 0x000d}, {0xaa, 0x08, 0x0000}, {0xaa, 0x03, 0x000a}, {0xaa, 0x04, 0x0025}, {0xaa, 0x05, 0x00aa}, {0xaa, 0x06, 0x0005}, {0xaa, 0x08, 0x0000}, {0xaa, 0x03, 0x000b}, {0xaa, 0x04, 0x00ec}, {0xaa, 0x05, 0x002e}, {0xaa, 0x06, 0x0005}, {0xaa, 0x08, 0x0000}, {0xaa, 0x03, 0x000c}, {0xaa, 0x04, 0x00fa}, {0xaa, 0x05, 0x002a}, {0xaa, 0x06, 0x0005}, {0xaa, 0x08, 0x0000}, {0xaa, 0x07, 0x000d}, {0xaa, 0x01, 0x0005}, {0xaa, 0x94, 0x0002}, {0xaa, 0x90, 0x0000}, {0xaa, 0x91, 0x001f}, {0xaa, 0x10, 0x0064}, {0xaa, 0x9b, 0x00f0}, {0xaa, 0x9c, 0x0002}, {0xaa, 0x14, 0x001a}, {0xaa, 0x20, 0x0080}, {0xaa, 0x22, 0x0080}, {0xaa, 0x24, 0x0080}, {0xaa, 0x26, 0x0080}, {0xaa, 0x00, 0x0084}, {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, {0xaa, 0xa8, 0x00c0}, {0xa1, 0x01, 0x0002}, {0xa1, 0x01, 0x0008}, {0xa1, 0x01, 0x0180}, {0xa0, 0x02, ZC3XX_R180_AUTOCORRECTENABLE}, {0xa0, 0x40, ZC3XX_R116_RGAIN}, {0xa0, 0x40, ZC3XX_R117_GGAIN}, {0xa0, 0x40, ZC3XX_R118_BGAIN}, {0xa1, 0x01, 0x0008}, {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, /* clock ? */ {0xa0, 0x08, ZC3XX_R1C6_SHARPNESS00}, /* sharpness+ */ {0xa1, 0x01, 0x01c8}, {0xa1, 0x01, 0x01c9}, {0xa1, 0x01, 0x01ca}, {0xa0, 0x0f, ZC3XX_R1CB_SHARPNESS05}, /* sharpness- */ {0xa0, 0x52, ZC3XX_R10A_RGB00}, /* matrix */ {0xa0, 0xf7, ZC3XX_R10B_RGB01}, {0xa0, 0xf7, ZC3XX_R10C_RGB02}, {0xa0, 0xf7, ZC3XX_R10D_RGB10}, {0xa0, 0x52, ZC3XX_R10E_RGB11}, {0xa0, 0xf7, ZC3XX_R10F_RGB12}, {0xa0, 0xf7, ZC3XX_R110_RGB20}, {0xa0, 0xf7, ZC3XX_R111_RGB21}, {0xa0, 0x52, ZC3XX_R112_RGB22}, {0xa1, 0x01, 0x0180}, {0xa0, 0x00, ZC3XX_R180_AUTOCORRECTENABLE}, {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xaa, 0x0d, 0x0003}, {0xaa, 0x0c, 0x008c}, {0xaa, 0x0e, 0x0095}, {0xaa, 0x0f, 0x0002}, {0xaa, 0x1c, 0x0094}, {0xaa, 0x1d, 0x0002}, {0xaa, 0x20, 0x0080}, {0xaa, 0x22, 0x0080}, {0xaa, 0x24, 0x0080}, {0xaa, 0x26, 0x0080}, {0xaa, 0x00, 0x0084}, {0xa0, 0x02, ZC3XX_R0A3_EXPOSURETIMEHIGH}, {0xa0, 0x94, ZC3XX_R0A4_EXPOSURETIMELOW}, {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, {0xa0, 0x04, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0x20, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0x84, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x10, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x12, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0xe3, ZC3XX_R01D_HSYNC_0}, {0xa0, 0xec, ZC3XX_R01E_HSYNC_1}, {0xa0, 0xf5, ZC3XX_R01F_HSYNC_2}, {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, {0xa0, 0x00, ZC3XX_R1A7_CALCGLOBALMEAN}, {0xa0, 0xc0, ZC3XX_R1A8_DIGITALGAIN}, {0xa0, 0xc0, ZC3XX_R11D_GLOBALGAIN}, {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, {0xa1, 0x01, 0x0180}, {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, {0xa0, 0x40, ZC3XX_R116_RGAIN}, {0xa0, 0x40, ZC3XX_R117_GGAIN}, {0xa0, 0x40, ZC3XX_R118_BGAIN}, {} }; static const struct usb_action icm105a_Initial[] = { {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, {0xa0, 0x00, ZC3XX_R002_CLOCKSELECT}, {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, {0xa0, 0x0c, ZC3XX_R010_CMOSSENSORSELECT}, {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, {0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW}, {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, {0xa0, 0x03, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0xa1, ZC3XX_R08B_I2CDEVICEADDR}, {0xa0, 0x00, ZC3XX_R097_WINYSTARTHIGH}, {0xa0, 0x02, ZC3XX_R098_WINYSTARTLOW}, {0xa0, 0x00, ZC3XX_R099_WINXSTARTHIGH}, {0xa0, 0x02, ZC3XX_R09A_WINXSTARTLOW}, {0xa0, 0x02, ZC3XX_R11A_FIRSTYLOW}, {0xa0, 0x02, ZC3XX_R11C_FIRSTXLOW}, {0xa0, 0x01, ZC3XX_R09B_WINHEIGHTHIGH}, {0xa0, 0xe6, ZC3XX_R09C_WINHEIGHTLOW}, {0xa0, 0x02, ZC3XX_R09D_WINWIDTHHIGH}, {0xa0, 0x86, ZC3XX_R09E_WINWIDTHLOW}, {0xa0, 0x77, ZC3XX_R101_SENSORCORRECTION}, {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, {0xaa, 0x01, 0x0010}, {0xaa, 0x03, 0x0000}, {0xaa, 0x04, 0x0001}, {0xaa, 0x05, 0x0020}, {0xaa, 0x06, 0x0001}, {0xaa, 0x08, 0x0000}, {0xaa, 0x03, 0x0001}, {0xaa, 0x04, 0x0011}, {0xaa, 0x05, 0x00a0}, {0xaa, 0x06, 0x0001}, {0xaa, 0x08, 0x0000}, {0xaa, 0x03, 0x0002}, {0xaa, 0x04, 0x0013}, {0xaa, 0x05, 0x0020}, {0xaa, 0x06, 0x0001}, {0xaa, 0x08, 0x0000}, {0xaa, 0x03, 0x0003}, {0xaa, 0x04, 0x0015}, {0xaa, 0x05, 0x0020}, {0xaa, 0x06, 0x0005}, {0xaa, 0x08, 0x0000}, {0xaa, 0x03, 0x0004}, {0xaa, 0x04, 0x0017}, {0xaa, 0x05, 0x0020}, {0xaa, 0x06, 0x000d}, {0xaa, 0x08, 0x0000}, {0xaa, 0x03, 0x0005}, {0xa0, 0x04, ZC3XX_R092_I2CADDRESSSELECT}, {0xa0, 0x19, ZC3XX_R093_I2CSETVALUE}, {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND}, {0xa1, 0x01, 0x0091}, {0xaa, 0x05, 0x0020}, {0xaa, 0x06, 0x0005}, {0xaa, 0x08, 0x0000}, {0xaa, 0x03, 0x0006}, {0xaa, 0x04, 0x0017}, {0xaa, 0x05, 0x0026}, {0xaa, 0x06, 0x0005}, {0xaa, 0x08, 0x0000}, {0xaa, 0x03, 0x0007}, {0xaa, 0x04, 0x0019}, {0xaa, 0x05, 0x0022}, {0xaa, 0x06, 0x0005}, {0xaa, 0x08, 0x0000}, {0xaa, 0x03, 0x0008}, {0xaa, 0x04, 0x0021}, {0xaa, 0x05, 0x00aa}, {0xaa, 0x06, 0x0005}, {0xaa, 0x08, 0x0000}, {0xaa, 0x03, 0x0009}, {0xaa, 0x04, 0x0023}, {0xaa, 0x05, 0x00aa}, {0xaa, 0x06, 0x000d}, {0xaa, 0x08, 0x0000}, {0xaa, 0x03, 0x000a}, {0xaa, 0x04, 0x0025}, {0xaa, 0x05, 0x00aa}, {0xaa, 0x06, 0x0005}, {0xaa, 0x08, 0x0000}, {0xaa, 0x03, 0x000b}, {0xaa, 0x04, 0x00ec}, {0xaa, 0x05, 0x002e}, {0xaa, 0x06, 0x0005}, {0xaa, 0x08, 0x0000}, {0xaa, 0x03, 0x000c}, {0xaa, 0x04, 0x00fa}, {0xaa, 0x05, 0x002a}, {0xaa, 0x06, 0x0005}, {0xaa, 0x08, 0x0000}, {0xaa, 0x07, 0x000d}, {0xaa, 0x01, 0x0005}, {0xaa, 0x94, 0x0002}, {0xaa, 0x90, 0x0000}, {0xaa, 0x91, 0x0010}, {0xaa, 0x10, 0x0064}, {0xaa, 0x9b, 0x00f0}, {0xaa, 0x9c, 0x0002}, {0xaa, 0x14, 0x001a}, {0xaa, 0x20, 0x0080}, {0xaa, 0x22, 0x0080}, {0xaa, 0x24, 0x0080}, {0xaa, 0x26, 0x0080}, {0xaa, 0x00, 0x0084}, {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, {0xaa, 0xa8, 0x0080}, {0xa0, 0x78, ZC3XX_R18D_YTARGET}, {0xa1, 0x01, 0x0002}, {0xa1, 0x01, 0x0008}, {0xa1, 0x01, 0x0180}, {0xa0, 0x02, ZC3XX_R180_AUTOCORRECTENABLE}, {0xa0, 0x40, ZC3XX_R116_RGAIN}, {0xa0, 0x40, ZC3XX_R117_GGAIN}, {0xa0, 0x40, ZC3XX_R118_BGAIN}, {0xa1, 0x01, 0x0008}, {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, /* clock ? */ {0xa0, 0x08, ZC3XX_R1C6_SHARPNESS00}, /* sharpness+ */ {0xa1, 0x01, 0x01c8}, {0xa1, 0x01, 0x01c9}, {0xa1, 0x01, 0x01ca}, {0xa0, 0x0f, ZC3XX_R1CB_SHARPNESS05}, /* sharpness- */ {0xa0, 0x52, ZC3XX_R10A_RGB00}, /* matrix */ {0xa0, 0xf7, ZC3XX_R10B_RGB01}, {0xa0, 0xf7, ZC3XX_R10C_RGB02}, {0xa0, 0xf7, ZC3XX_R10D_RGB10}, {0xa0, 0x52, ZC3XX_R10E_RGB11}, {0xa0, 0xf7, ZC3XX_R10F_RGB12}, {0xa0, 0xf7, ZC3XX_R110_RGB20}, {0xa0, 0xf7, ZC3XX_R111_RGB21}, {0xa0, 0x52, ZC3XX_R112_RGB22}, {0xa1, 0x01, 0x0180}, {0xa0, 0x00, ZC3XX_R180_AUTOCORRECTENABLE}, {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xaa, 0x0d, 0x0003}, {0xaa, 0x0c, 0x0020}, {0xaa, 0x0e, 0x000e}, {0xaa, 0x0f, 0x0002}, {0xaa, 0x1c, 0x000d}, {0xaa, 0x1d, 0x0002}, {0xaa, 0x20, 0x0080}, {0xaa, 0x22, 0x0080}, {0xaa, 0x24, 0x0080}, {0xaa, 0x26, 0x0080}, {0xaa, 0x00, 0x0084}, {0xa0, 0x02, ZC3XX_R0A3_EXPOSURETIMEHIGH}, {0xa0, 0x0d, ZC3XX_R0A4_EXPOSURETIMELOW}, {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, {0xa0, 0x04, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0x1a, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0x4b, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x10, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x12, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0xc8, ZC3XX_R01D_HSYNC_0}, {0xa0, 0xd8, ZC3XX_R01E_HSYNC_1}, {0xa0, 0xea, ZC3XX_R01F_HSYNC_2}, {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, {0xa0, 0x00, ZC3XX_R1A7_CALCGLOBALMEAN}, {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, {0xa1, 0x01, 0x0180}, {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, {0xa0, 0x40, ZC3XX_R116_RGAIN}, {0xa0, 0x40, ZC3XX_R117_GGAIN}, {0xa0, 0x40, ZC3XX_R118_BGAIN}, {} }; static const struct usb_action icm105a_50HZScale[] = { {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */ {0xaa, 0x0d, 0x0003}, /* 00,0d,03,aa */ {0xaa, 0x0c, 0x0020}, /* 00,0c,20,aa */ {0xaa, 0x0e, 0x000e}, /* 00,0e,0e,aa */ {0xaa, 0x0f, 0x0002}, /* 00,0f,02,aa */ {0xaa, 0x1c, 0x000d}, /* 00,1c,0d,aa */ {0xaa, 0x1d, 0x0002}, /* 00,1d,02,aa */ {0xaa, 0x20, 0x0080}, /* 00,20,80,aa */ {0xaa, 0x22, 0x0080}, /* 00,22,80,aa */ {0xaa, 0x24, 0x0080}, /* 00,24,80,aa */ {0xaa, 0x26, 0x0080}, /* 00,26,80,aa */ {0xaa, 0x00, 0x0084}, /* 00,00,84,aa */ {0xa0, 0x02, ZC3XX_R0A3_EXPOSURETIMEHIGH}, /* 00,a3,02,cc */ {0xa0, 0x0d, ZC3XX_R0A4_EXPOSURETIMELOW}, /* 00,a4,0d,cc */ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */ {0xa0, 0x04, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,04,cc */ {0xa0, 0x1a, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,1a,cc */ {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */ {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */ {0xa0, 0x4b, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,4b,cc */ {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, /* 01,8c,10,cc */ {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,20,cc */ {0xa0, 0x10, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,10,cc */ {0xa0, 0x12, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,12,cc */ {0xa0, 0xc8, ZC3XX_R01D_HSYNC_0}, /* 00,1d,c8,cc */ {0xa0, 0xd8, ZC3XX_R01E_HSYNC_1}, /* 00,1e,d8,cc */ {0xa0, 0xea, ZC3XX_R01F_HSYNC_2}, /* 00,1f,ea,cc */ {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, /* 00,20,ff,cc */ {} }; static const struct usb_action icm105a_50HZ[] = { {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */ {0xaa, 0x0d, 0x0003}, /* 00,0d,03,aa */ {0xaa, 0x0c, 0x008c}, /* 00,0c,8c,aa */ {0xaa, 0x0e, 0x0095}, /* 00,0e,95,aa */ {0xaa, 0x0f, 0x0002}, /* 00,0f,02,aa */ {0xaa, 0x1c, 0x0094}, /* 00,1c,94,aa */ {0xaa, 0x1d, 0x0002}, /* 00,1d,02,aa */ {0xaa, 0x20, 0x0080}, /* 00,20,80,aa */ {0xaa, 0x22, 0x0080}, /* 00,22,80,aa */ {0xaa, 0x24, 0x0080}, /* 00,24,80,aa */ {0xaa, 0x26, 0x0080}, /* 00,26,80,aa */ {0xaa, 0x00, 0x0084}, /* 00,00,84,aa */ {0xa0, 0x02, ZC3XX_R0A3_EXPOSURETIMEHIGH}, /* 00,a3,02,cc */ {0xa0, 0x94, ZC3XX_R0A4_EXPOSURETIMELOW}, /* 00,a4,94,cc */ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */ {0xa0, 0x04, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,04,cc */ {0xa0, 0x20, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,20,cc */ {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */ {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */ {0xa0, 0x84, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,84,cc */ {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, /* 01,8c,10,cc */ {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,20,cc */ {0xa0, 0x10, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,10,cc */ {0xa0, 0x12, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,12,cc */ {0xa0, 0xe3, ZC3XX_R01D_HSYNC_0}, /* 00,1d,e3,cc */ {0xa0, 0xec, ZC3XX_R01E_HSYNC_1}, /* 00,1e,ec,cc */ {0xa0, 0xf5, ZC3XX_R01F_HSYNC_2}, /* 00,1f,f5,cc */ {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, /* 00,20,ff,cc */ {0xa0, 0x00, ZC3XX_R1A7_CALCGLOBALMEAN}, /* 01,a7,00,cc */ {0xa0, 0xc0, ZC3XX_R1A8_DIGITALGAIN}, /* 01,a8,c0,cc */ {} }; static const struct usb_action icm105a_60HZScale[] = { {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */ {0xaa, 0x0d, 0x0003}, /* 00,0d,03,aa */ {0xaa, 0x0c, 0x0004}, /* 00,0c,04,aa */ {0xaa, 0x0e, 0x000d}, /* 00,0e,0d,aa */ {0xaa, 0x0f, 0x0002}, /* 00,0f,02,aa */ {0xaa, 0x1c, 0x0008}, /* 00,1c,08,aa */ {0xaa, 0x1d, 0x0002}, /* 00,1d,02,aa */ {0xaa, 0x20, 0x0080}, /* 00,20,80,aa */ {0xaa, 0x22, 0x0080}, /* 00,22,80,aa */ {0xaa, 0x24, 0x0080}, /* 00,24,80,aa */ {0xaa, 0x26, 0x0080}, /* 00,26,80,aa */ {0xaa, 0x00, 0x0084}, /* 00,00,84,aa */ {0xa0, 0x02, ZC3XX_R0A3_EXPOSURETIMEHIGH}, /* 00,a3,02,cc */ {0xa0, 0x08, ZC3XX_R0A4_EXPOSURETIMELOW}, /* 00,a4,08,cc */ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */ {0xa0, 0x04, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,04,cc */ {0xa0, 0x10, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,10,cc */ {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */ {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */ {0xa0, 0x41, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,41,cc */ {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, /* 01,8c,10,cc */ {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,20,cc */ {0xa0, 0x10, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,10,cc */ {0xa0, 0x12, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,12,cc */ {0xa0, 0xc1, ZC3XX_R01D_HSYNC_0}, /* 00,1d,c1,cc */ {0xa0, 0xd4, ZC3XX_R01E_HSYNC_1}, /* 00,1e,d4,cc */ {0xa0, 0xe8, ZC3XX_R01F_HSYNC_2}, /* 00,1f,e8,cc */ {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, /* 00,20,ff,cc */ {} }; static const struct usb_action icm105a_60HZ[] = { {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */ {0xaa, 0x0d, 0x0003}, /* 00,0d,03,aa */ {0xaa, 0x0c, 0x0008}, /* 00,0c,08,aa */ {0xaa, 0x0e, 0x0086}, /* 00,0e,86,aa */ {0xaa, 0x0f, 0x0002}, /* 00,0f,02,aa */ {0xaa, 0x1c, 0x0085}, /* 00,1c,85,aa */ {0xaa, 0x1d, 0x0002}, /* 00,1d,02,aa */ {0xaa, 0x20, 0x0080}, /* 00,20,80,aa */ {0xaa, 0x22, 0x0080}, /* 00,22,80,aa */ {0xaa, 0x24, 0x0080}, /* 00,24,80,aa */ {0xaa, 0x26, 0x0080}, /* 00,26,80,aa */ {0xaa, 0x00, 0x0084}, /* 00,00,84,aa */ {0xa0, 0x02, ZC3XX_R0A3_EXPOSURETIMEHIGH}, /* 00,a3,02,cc */ {0xa0, 0x85, ZC3XX_R0A4_EXPOSURETIMELOW}, /* 00,a4,85,cc */ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */ {0xa0, 0x04, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,04,cc */ {0xa0, 0x08, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,08,cc */ {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */ {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */ {0xa0, 0x81, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,81,cc */ {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, /* 01,8c,10,cc */ {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,20,cc */ {0xa0, 0x10, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,10,cc */ {0xa0, 0x12, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,12,cc */ {0xa0, 0xc2, ZC3XX_R01D_HSYNC_0}, /* 00,1d,c2,cc */ {0xa0, 0xd6, ZC3XX_R01E_HSYNC_1}, /* 00,1e,d6,cc */ {0xa0, 0xea, ZC3XX_R01F_HSYNC_2}, /* 00,1f,ea,cc */ {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, /* 00,20,ff,cc */ {0xa0, 0x00, ZC3XX_R1A7_CALCGLOBALMEAN}, /* 01,a7,00,cc */ {0xa0, 0xc0, ZC3XX_R1A8_DIGITALGAIN}, /* 01,a8,c0,cc */ {} }; static const struct usb_action icm105a_NoFlikerScale[] = { {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */ {0xaa, 0x0d, 0x0003}, /* 00,0d,03,aa */ {0xaa, 0x0c, 0x0004}, /* 00,0c,04,aa */ {0xaa, 0x0e, 0x000d}, /* 00,0e,0d,aa */ {0xaa, 0x0f, 0x0002}, /* 00,0f,02,aa */ {0xaa, 0x1c, 0x0000}, /* 00,1c,00,aa */ {0xaa, 0x1d, 0x0002}, /* 00,1d,02,aa */ {0xaa, 0x20, 0x0080}, /* 00,20,80,aa */ {0xaa, 0x22, 0x0080}, /* 00,22,80,aa */ {0xaa, 0x24, 0x0080}, /* 00,24,80,aa */ {0xaa, 0x26, 0x0080}, /* 00,26,80,aa */ {0xaa, 0x00, 0x0084}, /* 00,00,84,aa */ {0xa0, 0x02, ZC3XX_R0A3_EXPOSURETIMEHIGH}, /* 00,a3,02,cc */ {0xa0, 0x00, ZC3XX_R0A4_EXPOSURETIMELOW}, /* 00,a4,00,cc */ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */ {0xa0, 0x04, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,04,cc */ {0xa0, 0x20, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,20,cc */ {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */ {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */ {0xa0, 0x10, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,10,cc */ {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, /* 01,8c,10,cc */ {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,20,cc */ {0xa0, 0x00, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,00,cc */ {0xa0, 0x00, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,00,cc */ {0xa0, 0xc1, ZC3XX_R01D_HSYNC_0}, /* 00,1d,c1,cc */ {0xa0, 0xd4, ZC3XX_R01E_HSYNC_1}, /* 00,1e,d4,cc */ {0xa0, 0xe8, ZC3XX_R01F_HSYNC_2}, /* 00,1f,e8,cc */ {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, /* 00,20,ff,cc */ {} }; static const struct usb_action icm105a_NoFliker[] = { {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */ {0xaa, 0x0d, 0x0003}, /* 00,0d,03,aa */ {0xaa, 0x0c, 0x0004}, /* 00,0c,04,aa */ {0xaa, 0x0e, 0x0081}, /* 00,0e,81,aa */ {0xaa, 0x0f, 0x0002}, /* 00,0f,02,aa */ {0xaa, 0x1c, 0x0080}, /* 00,1c,80,aa */ {0xaa, 0x1d, 0x0002}, /* 00,1d,02,aa */ {0xaa, 0x20, 0x0080}, /* 00,20,80,aa */ {0xaa, 0x22, 0x0080}, /* 00,22,80,aa */ {0xaa, 0x24, 0x0080}, /* 00,24,80,aa */ {0xaa, 0x26, 0x0080}, /* 00,26,80,aa */ {0xaa, 0x00, 0x0084}, /* 00,00,84,aa */ {0xa0, 0x02, ZC3XX_R0A3_EXPOSURETIMEHIGH}, /* 00,a3,02,cc */ {0xa0, 0x80, ZC3XX_R0A4_EXPOSURETIMELOW}, /* 00,a4,80,cc */ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */ {0xa0, 0x04, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,04,cc */ {0xa0, 0x20, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,20,cc */ {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */ {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */ {0xa0, 0x10, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,10,cc */ {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, /* 01,8c,10,cc */ {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,20,cc */ {0xa0, 0x00, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,00,cc */ {0xa0, 0x00, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,00,cc */ {0xa0, 0xc1, ZC3XX_R01D_HSYNC_0}, /* 00,1d,c1,cc */ {0xa0, 0xd4, ZC3XX_R01E_HSYNC_1}, /* 00,1e,d4,cc */ {0xa0, 0xe8, ZC3XX_R01F_HSYNC_2}, /* 00,1f,e8,cc */ {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, /* 00,20,ff,cc */ {0xa0, 0x00, ZC3XX_R1A7_CALCGLOBALMEAN}, /* 01,a7,00,cc */ {0xa0, 0xc0, ZC3XX_R1A8_DIGITALGAIN}, /* 01,a8,c0,cc */ {} }; static const struct usb_action mc501cb_Initial[] = { {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, /* 00,00,01,cc */ {0xa0, 0x00, ZC3XX_R002_CLOCKSELECT}, /* 00,02,00,cc */ {0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,01,cc */ {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, /* 00,01,01,cc */ {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, /* 00,08,03,cc */ {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,01,cc */ {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,05,cc */ {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, /* 00,03,02,cc */ {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, /* 00,04,80,cc */ {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, /* 00,05,01,cc */ {0xa0, 0xd8, ZC3XX_R006_FRAMEHEIGHTLOW}, /* 00,06,d8,cc */ {0xa0, 0x00, ZC3XX_R098_WINYSTARTLOW}, /* 00,98,00,cc */ {0xa0, 0x00, ZC3XX_R09A_WINXSTARTLOW}, /* 00,9a,00,cc */ {0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW}, /* 01,1a,00,cc */ {0xa0, 0x00, ZC3XX_R11C_FIRSTXLOW}, /* 01,1c,00,cc */ {0xa0, 0x01, ZC3XX_R09B_WINHEIGHTHIGH}, /* 00,9b,01,cc */ {0xa0, 0xde, ZC3XX_R09C_WINHEIGHTLOW}, /* 00,9c,de,cc */ {0xa0, 0x02, ZC3XX_R09D_WINWIDTHHIGH}, /* 00,9d,02,cc */ {0xa0, 0x86, ZC3XX_R09E_WINWIDTHLOW}, /* 00,9e,86,cc */ {0xa0, 0x33, ZC3XX_R086_EXPTIMEHIGH}, /* 00,86,33,cc */ {0xa0, 0x34, ZC3XX_R087_EXPTIMEMID}, /* 00,87,34,cc */ {0xa0, 0x35, ZC3XX_R088_EXPTIMELOW}, /* 00,88,35,cc */ {0xa0, 0xb0, ZC3XX_R08B_I2CDEVICEADDR}, /* 00,8b,b0,cc */ {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,05,cc */ {0xaa, 0x01, 0x0001}, /* 00,01,01,aa */ {0xaa, 0x01, 0x0003}, /* 00,01,03,aa */ {0xaa, 0x01, 0x0001}, /* 00,01,01,aa */ {0xaa, 0x03, 0x0000}, /* 00,03,00,aa */ {0xaa, 0x10, 0x0000}, /* 00,10,00,aa */ {0xaa, 0x11, 0x0080}, /* 00,11,80,aa */ {0xaa, 0x12, 0x0000}, /* 00,12,00,aa */ {0xaa, 0x13, 0x0000}, /* 00,13,00,aa */ {0xaa, 0x14, 0x0000}, /* 00,14,00,aa */ {0xaa, 0x15, 0x0000}, /* 00,15,00,aa */ {0xaa, 0x16, 0x0000}, /* 00,16,00,aa */ {0xaa, 0x17, 0x0001}, /* 00,17,01,aa */ {0xaa, 0x18, 0x00de}, /* 00,18,de,aa */ {0xaa, 0x19, 0x0002}, /* 00,19,02,aa */ {0xaa, 0x1a, 0x0086}, /* 00,1a,86,aa */ {0xaa, 0x20, 0x00a8}, /* 00,20,a8,aa */ {0xaa, 0x22, 0x0000}, /* 00,22,00,aa */ {0xaa, 0x23, 0x0000}, /* 00,23,00,aa */ {0xaa, 0x24, 0x0000}, /* 00,24,00,aa */ {0xaa, 0x40, 0x0033}, /* 00,40,33,aa */ {0xaa, 0x41, 0x0077}, /* 00,41,77,aa */ {0xaa, 0x42, 0x0053}, /* 00,42,53,aa */ {0xaa, 0x43, 0x00b0}, /* 00,43,b0,aa */ {0xaa, 0x4b, 0x0001}, /* 00,4b,01,aa */ {0xaa, 0x72, 0x0020}, /* 00,72,20,aa */ {0xaa, 0x73, 0x0000}, /* 00,73,00,aa */ {0xaa, 0x80, 0x0000}, /* 00,80,00,aa */ {0xaa, 0x85, 0x0050}, /* 00,85,50,aa */ {0xaa, 0x91, 0x0070}, /* 00,91,70,aa */ {0xaa, 0x92, 0x0072}, /* 00,92,72,aa */ {0xaa, 0x03, 0x0001}, /* 00,03,01,aa */ {0xaa, 0x10, 0x00a0}, /* 00,10,a0,aa */ {0xaa, 0x11, 0x0001}, /* 00,11,01,aa */ {0xaa, 0x30, 0x0000}, /* 00,30,00,aa */ {0xaa, 0x60, 0x0000}, /* 00,60,00,aa */ {0xaa, 0xa0, 0x001a}, /* 00,a0,1a,aa */ {0xaa, 0xa1, 0x0000}, /* 00,a1,00,aa */ {0xaa, 0xa2, 0x003f}, /* 00,a2,3f,aa */ {0xaa, 0xa3, 0x0028}, /* 00,a3,28,aa */ {0xaa, 0xa4, 0x0010}, /* 00,a4,10,aa */ {0xaa, 0xa5, 0x0020}, /* 00,a5,20,aa */ {0xaa, 0xb1, 0x0044}, /* 00,b1,44,aa */ {0xaa, 0xd0, 0x0001}, /* 00,d0,01,aa */ {0xaa, 0xd1, 0x0085}, /* 00,d1,85,aa */ {0xaa, 0xd2, 0x0080}, /* 00,d2,80,aa */ {0xaa, 0xd3, 0x0080}, /* 00,d3,80,aa */ {0xaa, 0xd4, 0x0080}, /* 00,d4,80,aa */ {0xaa, 0xd5, 0x0080}, /* 00,d5,80,aa */ {0xaa, 0xc0, 0x00c3}, /* 00,c0,c3,aa */ {0xaa, 0xc2, 0x0044}, /* 00,c2,44,aa */ {0xaa, 0xc4, 0x0040}, /* 00,c4,40,aa */ {0xaa, 0xc5, 0x0020}, /* 00,c5,20,aa */ {0xaa, 0xc6, 0x0008}, /* 00,c6,08,aa */ {0xaa, 0x03, 0x0004}, /* 00,03,04,aa */ {0xaa, 0x10, 0x0000}, /* 00,10,00,aa */ {0xaa, 0x40, 0x0030}, /* 00,40,30,aa */ {0xaa, 0x41, 0x0020}, /* 00,41,20,aa */ {0xaa, 0x42, 0x002d}, /* 00,42,2d,aa */ {0xaa, 0x03, 0x0003}, /* 00,03,03,aa */ {0xaa, 0x1c, 0x0050}, /* 00,1C,50,aa */ {0xaa, 0x11, 0x0081}, /* 00,11,81,aa */ {0xaa, 0x3b, 0x001d}, /* 00,3b,1D,aa */ {0xaa, 0x3c, 0x004c}, /* 00,3c,4C,aa */ {0xaa, 0x3d, 0x0018}, /* 00,3d,18,aa */ {0xaa, 0x3e, 0x006a}, /* 00,3e,6A,aa */ {0xaa, 0x01, 0x0000}, /* 00,01,00,aa */ {0xaa, 0x52, 0x00ff}, /* 00,52,FF,aa */ {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */ {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, /* 01,00,0d,cc */ {0xa0, 0x37, ZC3XX_R101_SENSORCORRECTION}, /* 01,01,37,cc */ {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, /* 01,89,06,cc */ {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, /* 01,c5,03,cc */ {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, /* 01,cb,13,cc */ {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, /* 02,50,08,cc */ {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, /* 03,01,08,cc */ {0xa0, 0x02, ZC3XX_R180_AUTOCORRECTENABLE}, /* 01,80,02,cc */ {0xaa, 0x03, 0x0002}, /* 00,03,02,aa */ {0xaa, 0x51, 0x0027}, /* 00,51,27,aa */ {0xaa, 0x52, 0x0020}, /* 00,52,20,aa */ {0xaa, 0x03, 0x0003}, /* 00,03,03,aa */ {0xaa, 0x50, 0x0010}, /* 00,50,10,aa */ {0xaa, 0x51, 0x0010}, /* 00,51,10,aa */ {0xaa, 0x54, 0x0010}, /* 00,54,10,aa */ {0xaa, 0x55, 0x0010}, /* 00,55,10,aa */ {0xa0, 0xf0, 0x0199}, /* 01,99,F0,cc */ {0xa0, 0x80, 0x019a}, /* 01,9A,80,cc */ {0xaa, 0x03, 0x0003}, /* 00,03,03,aa */ {0xaa, 0x10, 0x00fc}, /* 00,10,fc,aa */ {0xaa, 0x36, 0x001d}, /* 00,36,1D,aa */ {0xaa, 0x37, 0x004c}, /* 00,37,4C,aa */ {0xaa, 0x3b, 0x001d}, /* 00,3B,1D,aa */ {} }; static const struct usb_action mc501cb_InitialScale[] = { /* 320x240 */ {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, /* 00,00,01,cc */ {0xa0, 0x10, ZC3XX_R002_CLOCKSELECT}, /* 00,02,10,cc */ {0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,01,cc */ {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, /* 00,01,01,cc */ {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, /* 00,08,03,cc */ {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,01,cc */ {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,05,cc */ {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, /* 00,03,02,cc */ {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, /* 00,04,80,cc */ {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, /* 00,05,01,cc */ {0xa0, 0xd0, ZC3XX_R006_FRAMEHEIGHTLOW}, /* 00,06,d0,cc */ {0xa0, 0x00, ZC3XX_R098_WINYSTARTLOW}, /* 00,98,00,cc */ {0xa0, 0x00, ZC3XX_R09A_WINXSTARTLOW}, /* 00,9a,00,cc */ {0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW}, /* 01,1a,00,cc */ {0xa0, 0x00, ZC3XX_R11C_FIRSTXLOW}, /* 01,1c,00,cc */ {0xa0, 0x01, ZC3XX_R09B_WINHEIGHTHIGH}, /* 00,9b,01,cc */ {0xa0, 0xd8, ZC3XX_R09C_WINHEIGHTLOW}, /* 00,9c,d8,cc */ {0xa0, 0x02, ZC3XX_R09D_WINWIDTHHIGH}, /* 00,9d,02,cc */ {0xa0, 0x88, ZC3XX_R09E_WINWIDTHLOW}, /* 00,9e,88,cc */ {0xa0, 0x33, ZC3XX_R086_EXPTIMEHIGH}, /* 00,86,33,cc */ {0xa0, 0x34, ZC3XX_R087_EXPTIMEMID}, /* 00,87,34,cc */ {0xa0, 0x35, ZC3XX_R088_EXPTIMELOW}, /* 00,88,35,cc */ {0xa0, 0xb0, ZC3XX_R08B_I2CDEVICEADDR}, /* 00,8b,b0,cc */ {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,05,cc */ {0xaa, 0x01, 0x0001}, /* 00,01,01,aa */ {0xaa, 0x01, 0x0003}, /* 00,01,03,aa */ {0xaa, 0x01, 0x0001}, /* 00,01,01,aa */ {0xaa, 0x03, 0x0000}, /* 00,03,00,aa */ {0xaa, 0x10, 0x0000}, /* 00,10,00,aa */ {0xaa, 0x11, 0x0080}, /* 00,11,80,aa */ {0xaa, 0x12, 0x0000}, /* 00,12,00,aa */ {0xaa, 0x13, 0x0000}, /* 00,13,00,aa */ {0xaa, 0x14, 0x0000}, /* 00,14,00,aa */ {0xaa, 0x15, 0x0000}, /* 00,15,00,aa */ {0xaa, 0x16, 0x0000}, /* 00,16,00,aa */ {0xaa, 0x17, 0x0001}, /* 00,17,01,aa */ {0xaa, 0x18, 0x00d8}, /* 00,18,d8,aa */ {0xaa, 0x19, 0x0002}, /* 00,19,02,aa */ {0xaa, 0x1a, 0x0088}, /* 00,1a,88,aa */ {0xaa, 0x20, 0x00a8}, /* 00,20,a8,aa */ {0xaa, 0x22, 0x0000}, /* 00,22,00,aa */ {0xaa, 0x23, 0x0000}, /* 00,23,00,aa */ {0xaa, 0x24, 0x0000}, /* 00,24,00,aa */ {0xaa, 0x40, 0x0033}, /* 00,40,33,aa */ {0xaa, 0x41, 0x0077}, /* 00,41,77,aa */ {0xaa, 0x42, 0x0053}, /* 00,42,53,aa */ {0xaa, 0x43, 0x00b0}, /* 00,43,b0,aa */ {0xaa, 0x4b, 0x0001}, /* 00,4b,01,aa */ {0xaa, 0x72, 0x0020}, /* 00,72,20,aa */ {0xaa, 0x73, 0x0000}, /* 00,73,00,aa */ {0xaa, 0x80, 0x0000}, /* 00,80,00,aa */ {0xaa, 0x85, 0x0050}, /* 00,85,50,aa */ {0xaa, 0x91, 0x0070}, /* 00,91,70,aa */ {0xaa, 0x92, 0x0072}, /* 00,92,72,aa */ {0xaa, 0x03, 0x0001}, /* 00,03,01,aa */ {0xaa, 0x10, 0x00a0}, /* 00,10,a0,aa */ {0xaa, 0x11, 0x0001}, /* 00,11,01,aa */ {0xaa, 0x30, 0x0000}, /* 00,30,00,aa */ {0xaa, 0x60, 0x0000}, /* 00,60,00,aa */ {0xaa, 0xa0, 0x001a}, /* 00,a0,1a,aa */ {0xaa, 0xa1, 0x0000}, /* 00,a1,00,aa */ {0xaa, 0xa2, 0x003f}, /* 00,a2,3f,aa */ {0xaa, 0xa3, 0x0028}, /* 00,a3,28,aa */ {0xaa, 0xa4, 0x0010}, /* 00,a4,10,aa */ {0xaa, 0xa5, 0x0020}, /* 00,a5,20,aa */ {0xaa, 0xb1, 0x0044}, /* 00,b1,44,aa */ {0xaa, 0xd0, 0x0001}, /* 00,d0,01,aa */ {0xaa, 0xd1, 0x0085}, /* 00,d1,85,aa */ {0xaa, 0xd2, 0x0080}, /* 00,d2,80,aa */ {0xaa, 0xd3, 0x0080}, /* 00,d3,80,aa */ {0xaa, 0xd4, 0x0080}, /* 00,d4,80,aa */ {0xaa, 0xd5, 0x0080}, /* 00,d5,80,aa */ {0xaa, 0xc0, 0x00c3}, /* 00,c0,c3,aa */ {0xaa, 0xc2, 0x0044}, /* 00,c2,44,aa */ {0xaa, 0xc4, 0x0040}, /* 00,c4,40,aa */ {0xaa, 0xc5, 0x0020}, /* 00,c5,20,aa */ {0xaa, 0xc6, 0x0008}, /* 00,c6,08,aa */ {0xaa, 0x03, 0x0004}, /* 00,03,04,aa */ {0xaa, 0x10, 0x0000}, /* 00,10,00,aa */ {0xaa, 0x40, 0x0030}, /* 00,40,30,aa */ {0xaa, 0x41, 0x0020}, /* 00,41,20,aa */ {0xaa, 0x42, 0x002d}, /* 00,42,2d,aa */ {0xaa, 0x03, 0x0003}, /* 00,03,03,aa */ {0xaa, 0x1c, 0x0050}, /* 00,1c,50,aa */ {0xaa, 0x11, 0x0081}, /* 00,11,81,aa */ {0xaa, 0x3b, 0x003a}, /* 00,3b,3A,aa */ {0xaa, 0x3c, 0x0098}, /* 00,3c,98,aa */ {0xaa, 0x3d, 0x0030}, /* 00,3d,30,aa */ {0xaa, 0x3e, 0x00d4}, /* 00,3E,D4,aa */ {0xaa, 0x01, 0x0000}, /* 00,01,00,aa */ {0xaa, 0x52, 0x00ff}, /* 00,52,FF,aa */ {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */ {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, /* 01,00,0d,cc */ {0xa0, 0x37, ZC3XX_R101_SENSORCORRECTION}, /* 01,01,37,cc */ {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, /* 01,89,06,cc */ {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, /* 01,c5,03,cc */ {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, /* 01,cb,13,cc */ {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, /* 02,50,08,cc */ {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, /* 03,01,08,cc */ {0xa0, 0x02, ZC3XX_R180_AUTOCORRECTENABLE}, /* 01,80,02,cc */ {0xaa, 0x03, 0x0002}, /* 00,03,02,aa */ {0xaa, 0x51, 0x004e}, /* 00,51,4E,aa */ {0xaa, 0x52, 0x0041}, /* 00,52,41,aa */ {0xaa, 0x03, 0x0003}, /* 00,03,03,aa */ {0xaa, 0x50, 0x0010}, /* 00,50,10,aa */ {0xaa, 0x51, 0x0010}, /* 00,51,10,aa */ {0xaa, 0x54, 0x0010}, /* 00,54,10,aa */ {0xaa, 0x55, 0x0010}, /* 00,55,10,aa */ {0xa0, 0xf0, 0x0199}, /* 01,99,F0,cc */ {0xa0, 0x80, 0x019a}, /* 01,9A,80,cc */ {} }; static const struct usb_action mc501cb_50HZ[] = { {0xaa, 0x03, 0x0003}, /* 00,03,03,aa */ {0xaa, 0x10, 0x00fc}, /* 00,10,fc,aa */ {0xaa, 0x36, 0x001d}, /* 00,36,1D,aa */ {0xaa, 0x37, 0x004c}, /* 00,37,4C,aa */ {0xaa, 0x3b, 0x001d}, /* 00,3B,1D,aa */ {0xaa, 0x3c, 0x004c}, /* 00,3C,4C,aa */ {0xaa, 0x3d, 0x001d}, /* 00,3D,1D,aa */ {0xaa, 0x3e, 0x004c}, /* 00,3E,4C,aa */ {} }; static const struct usb_action mc501cb_50HZScale[] = { {0xaa, 0x03, 0x0003}, /* 00,03,03,aa */ {0xaa, 0x10, 0x00fc}, /* 00,10,fc,aa */ {0xaa, 0x36, 0x003a}, /* 00,36,3A,aa */ {0xaa, 0x37, 0x0098}, /* 00,37,98,aa */ {0xaa, 0x3b, 0x003a}, /* 00,3B,3A,aa */ {0xaa, 0x3c, 0x0098}, /* 00,3C,98,aa */ {0xaa, 0x3d, 0x003a}, /* 00,3D,3A,aa */ {0xaa, 0x3e, 0x0098}, /* 00,3E,98,aa */ {} }; static const struct usb_action mc501cb_60HZ[] = { {0xaa, 0x03, 0x0003}, /* 00,03,03,aa */ {0xaa, 0x10, 0x00fc}, /* 00,10,fc,aa */ {0xaa, 0x36, 0x0018}, /* 00,36,18,aa */ {0xaa, 0x37, 0x006a}, /* 00,37,6A,aa */ {0xaa, 0x3d, 0x0018}, /* 00,3D,18,aa */ {0xaa, 0x3e, 0x006a}, /* 00,3E,6A,aa */ {0xaa, 0x3b, 0x0018}, /* 00,3B,18,aa */ {0xaa, 0x3c, 0x006a}, /* 00,3C,6A,aa */ {} }; static const struct usb_action mc501cb_60HZScale[] = { {0xaa, 0x03, 0x0003}, /* 00,03,03,aa */ {0xaa, 0x10, 0x00fc}, /* 00,10,fc,aa */ {0xaa, 0x36, 0x0030}, /* 00,36,30,aa */ {0xaa, 0x37, 0x00d4}, /* 00,37,D4,aa */ {0xaa, 0x3d, 0x0030}, /* 00,3D,30,aa */ {0xaa, 0x3e, 0x00d4}, /* 00,3E,D4,aa */ {0xaa, 0x3b, 0x0030}, /* 00,3B,30,aa */ {0xaa, 0x3c, 0x00d4}, /* 00,3C,D4,aa */ {} }; static const struct usb_action mc501cb_NoFliker[] = { {0xaa, 0x03, 0x0003}, /* 00,03,03,aa */ {0xaa, 0x10, 0x00fc}, /* 00,10,fc,aa */ {0xaa, 0x36, 0x0018}, /* 00,36,18,aa */ {0xaa, 0x37, 0x006a}, /* 00,37,6A,aa */ {0xaa, 0x3d, 0x0018}, /* 00,3D,18,aa */ {0xaa, 0x3e, 0x006a}, /* 00,3E,6A,aa */ {0xaa, 0x3b, 0x0018}, /* 00,3B,18,aa */ {0xaa, 0x3c, 0x006a}, /* 00,3C,6A,aa */ {} }; static const struct usb_action mc501cb_NoFlikerScale[] = { {0xaa, 0x03, 0x0003}, /* 00,03,03,aa */ {0xaa, 0x10, 0x00fc}, /* 00,10,fc,aa */ {0xaa, 0x36, 0x0030}, /* 00,36,30,aa */ {0xaa, 0x37, 0x00d4}, /* 00,37,D4,aa */ {0xaa, 0x3d, 0x0030}, /* 00,3D,30,aa */ {0xaa, 0x3e, 0x00d4}, /* 00,3E,D4,aa */ {0xaa, 0x3b, 0x0030}, /* 00,3B,30,aa */ {0xaa, 0x3c, 0x00d4}, /* 00,3C,D4,aa */ {} }; /* from zs211.inf */ static const struct usb_action ov7620_Initial[] = { /* 640x480 */ {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, /* 00,00,01,cc */ {0xa0, 0x40, ZC3XX_R002_CLOCKSELECT}, /* 00,02,40,cc */ {0xa0, 0x00, ZC3XX_R008_CLOCKSETTING}, /* 00,08,00,cc */ {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, /* 00,01,01,cc */ {0xa0, 0x06, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,06,cc */ {0xa0, 0x02, ZC3XX_R083_RGAINADDR}, /* 00,83,02,cc */ {0xa0, 0x01, ZC3XX_R085_BGAINADDR}, /* 00,85,01,cc */ {0xa0, 0x80, ZC3XX_R086_EXPTIMEHIGH}, /* 00,86,80,cc */ {0xa0, 0x81, ZC3XX_R087_EXPTIMEMID}, /* 00,87,81,cc */ {0xa0, 0x10, ZC3XX_R088_EXPTIMELOW}, /* 00,88,10,cc */ {0xa0, 0xa1, ZC3XX_R08B_I2CDEVICEADDR}, /* 00,8b,a1,cc */ {0xa0, 0x08, ZC3XX_R08D_COMPABILITYMODE}, /* 00,8d,08,cc */ {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, /* 00,03,02,cc */ {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, /* 00,04,80,cc */ {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, /* 00,05,01,cc */ {0xa0, 0xd8, ZC3XX_R006_FRAMEHEIGHTLOW}, /* 00,06,d8,cc */ {0xa0, 0x03, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,03,cc */ {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,01,cc */ {0xa0, 0x00, ZC3XX_R098_WINYSTARTLOW}, /* 00,98,00,cc */ {0xa0, 0x00, ZC3XX_R09A_WINXSTARTLOW}, /* 00,9a,00,cc */ {0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW}, /* 01,1a,00,cc */ {0xa0, 0x00, ZC3XX_R11C_FIRSTXLOW}, /* 01,1c,00,cc */ {0xa0, 0xde, ZC3XX_R09C_WINHEIGHTLOW}, /* 00,9c,de,cc */ {0xa0, 0x86, ZC3XX_R09E_WINWIDTHLOW}, /* 00,9e,86,cc */ {0xaa, 0x12, 0x0088}, /* 00,12,88,aa */ {0xaa, 0x12, 0x0048}, /* 00,12,48,aa */ {0xaa, 0x75, 0x008a}, /* 00,75,8a,aa */ {0xaa, 0x13, 0x00a3}, /* 00,13,a3,aa */ {0xaa, 0x04, 0x0000}, /* 00,04,00,aa */ {0xaa, 0x05, 0x0000}, /* 00,05,00,aa */ {0xaa, 0x14, 0x0000}, /* 00,14,00,aa */ {0xaa, 0x15, 0x0004}, /* 00,15,04,aa */ {0xaa, 0x17, 0x0018}, /* 00,17,18,aa */ {0xaa, 0x18, 0x00ba}, /* 00,18,ba,aa */ {0xaa, 0x19, 0x0002}, /* 00,19,02,aa */ {0xaa, 0x1a, 0x00f1}, /* 00,1a,f1,aa */ {0xaa, 0x20, 0x0040}, /* 00,20,40,aa */ {0xaa, 0x24, 0x0088}, /* 00,24,88,aa */ {0xaa, 0x25, 0x0078}, /* 00,25,78,aa */ {0xaa, 0x27, 0x00f6}, /* 00,27,f6,aa */ {0xaa, 0x28, 0x00a0}, /* 00,28,a0,aa */ {0xaa, 0x21, 0x0000}, /* 00,21,00,aa */ {0xaa, 0x2a, 0x0083}, /* 00,2a,83,aa */ {0xaa, 0x2b, 0x0096}, /* 00,2b,96,aa */ {0xaa, 0x2d, 0x0005}, /* 00,2d,05,aa */ {0xaa, 0x74, 0x0020}, /* 00,74,20,aa */ {0xaa, 0x61, 0x0068}, /* 00,61,68,aa */ {0xaa, 0x64, 0x0088}, /* 00,64,88,aa */ {0xaa, 0x00, 0x0000}, /* 00,00,00,aa */ {0xaa, 0x06, 0x0080}, /* 00,06,80,aa */ {0xaa, 0x01, 0x0090}, /* 00,01,90,aa */ {0xaa, 0x02, 0x0030}, /* 00,02,30,aa */ {0xa0, 0x77, ZC3XX_R101_SENSORCORRECTION}, /* 01,01,77,cc */ {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,05,cc */ {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, /* 01,00,0d,cc */ {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, /* 01,89,06,cc */ {0xa0, 0x00, 0x01ad}, /* 01,ad,00,cc */ {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, /* 01,c5,03,cc */ {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, /* 01,cb,13,cc */ {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, /* 02,50,08,cc */ {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, /* 03,01,08,cc */ {0xa0, 0x68, ZC3XX_R116_RGAIN}, /* 01,16,68,cc */ {0xa0, 0x52, ZC3XX_R118_BGAIN}, /* 01,18,52,cc */ {0xa0, 0x40, ZC3XX_R11D_GLOBALGAIN}, /* 01,1d,40,cc */ {0xa0, 0x02, ZC3XX_R180_AUTOCORRECTENABLE}, /* 01,80,02,cc */ {0xa0, 0x50, ZC3XX_R1A8_DIGITALGAIN}, /* 01,a8,50,cc */ {} }; static const struct usb_action ov7620_InitialScale[] = { /* 320x240 */ {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, /* 00,00,01,cc */ {0xa0, 0x50, ZC3XX_R002_CLOCKSELECT}, /* 00,02,50,cc */ {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, /* 00,08,00,cc */ /* mx change? */ {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, /* 00,01,01,cc */ {0xa0, 0x06, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,06,cc */ {0xa0, 0x02, ZC3XX_R083_RGAINADDR}, /* 00,83,02,cc */ {0xa0, 0x01, ZC3XX_R085_BGAINADDR}, /* 00,85,01,cc */ {0xa0, 0x80, ZC3XX_R086_EXPTIMEHIGH}, /* 00,86,80,cc */ {0xa0, 0x81, ZC3XX_R087_EXPTIMEMID}, /* 00,87,81,cc */ {0xa0, 0x10, ZC3XX_R088_EXPTIMELOW}, /* 00,88,10,cc */ {0xa0, 0xa1, ZC3XX_R08B_I2CDEVICEADDR}, /* 00,8b,a1,cc */ {0xa0, 0x08, ZC3XX_R08D_COMPABILITYMODE}, /* 00,8d,08,cc */ {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, /* 00,03,02,cc */ {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, /* 00,04,80,cc */ {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, /* 00,05,01,cc */ {0xa0, 0xd0, ZC3XX_R006_FRAMEHEIGHTLOW}, /* 00,06,d0,cc */ {0xa0, 0x03, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,03,cc */ {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,01,cc */ {0xa0, 0x00, ZC3XX_R098_WINYSTARTLOW}, /* 00,98,00,cc */ {0xa0, 0x00, ZC3XX_R09A_WINXSTARTLOW}, /* 00,9a,00,cc */ {0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW}, /* 01,1a,00,cc */ {0xa0, 0x00, ZC3XX_R11C_FIRSTXLOW}, /* 01,1c,00,cc */ {0xa0, 0xd6, ZC3XX_R09C_WINHEIGHTLOW}, /* 00,9c,d6,cc */ /* OV7648 00,9c,d8,cc */ {0xa0, 0x88, ZC3XX_R09E_WINWIDTHLOW}, /* 00,9e,88,cc */ {0xaa, 0x12, 0x0088}, /* 00,12,88,aa */ {0xaa, 0x12, 0x0048}, /* 00,12,48,aa */ {0xaa, 0x75, 0x008a}, /* 00,75,8a,aa */ {0xaa, 0x13, 0x00a3}, /* 00,13,a3,aa */ {0xaa, 0x04, 0x0000}, /* 00,04,00,aa */ {0xaa, 0x05, 0x0000}, /* 00,05,00,aa */ {0xaa, 0x14, 0x0000}, /* 00,14,00,aa */ {0xaa, 0x15, 0x0004}, /* 00,15,04,aa */ {0xaa, 0x24, 0x0088}, /* 00,24,88,aa */ {0xaa, 0x25, 0x0078}, /* 00,25,78,aa */ {0xaa, 0x17, 0x0018}, /* 00,17,18,aa */ {0xaa, 0x18, 0x00ba}, /* 00,18,ba,aa */ {0xaa, 0x19, 0x0002}, /* 00,19,02,aa */ {0xaa, 0x1a, 0x00f2}, /* 00,1a,f2,aa */ {0xaa, 0x20, 0x0040}, /* 00,20,40,aa */ {0xaa, 0x27, 0x00f6}, /* 00,27,f6,aa */ {0xaa, 0x28, 0x00a0}, /* 00,28,a0,aa */ {0xaa, 0x21, 0x0000}, /* 00,21,00,aa */ {0xaa, 0x2a, 0x0083}, /* 00,2a,83,aa */ {0xaa, 0x2b, 0x0096}, /* 00,2b,96,aa */ {0xaa, 0x2d, 0x0005}, /* 00,2d,05,aa */ {0xaa, 0x74, 0x0020}, /* 00,74,20,aa */ {0xaa, 0x61, 0x0068}, /* 00,61,68,aa */ {0xaa, 0x64, 0x0088}, /* 00,64,88,aa */ {0xaa, 0x00, 0x0000}, /* 00,00,00,aa */ {0xaa, 0x06, 0x0080}, /* 00,06,80,aa */ {0xaa, 0x01, 0x0090}, /* 00,01,90,aa */ {0xaa, 0x02, 0x0030}, /* 00,02,30,aa */ {0xa0, 0x77, ZC3XX_R101_SENSORCORRECTION}, /* 01,01,77,cc */ {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,05,cc */ {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, /* 01,00,0d,cc */ {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, /* 01,89,06,cc */ {0xa0, 0x00, 0x01ad}, /* 01,ad,00,cc */ {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, /* 01,c5,03,cc */ {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, /* 01,cb,13,cc */ {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, /* 02,50,08,cc */ {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, /* 03,01,08,cc */ {0xa0, 0x68, ZC3XX_R116_RGAIN}, /* 01,16,68,cc */ {0xa0, 0x52, ZC3XX_R118_BGAIN}, /* 01,18,52,cc */ {0xa0, 0x50, ZC3XX_R11D_GLOBALGAIN}, /* 01,1d,50,cc */ {0xa0, 0x02, ZC3XX_R180_AUTOCORRECTENABLE}, /* 01,80,02,cc */ {0xa0, 0x50, ZC3XX_R1A8_DIGITALGAIN}, /* 01,a8,50,cc */ {} }; static const struct usb_action ov7620_50HZ[] = { {0xaa, 0x13, 0x00a3}, /* 00,13,a3,aa */ {0xdd, 0x00, 0x0100}, /* 00,01,00,dd */ {0xaa, 0x2b, 0x0096}, /* 00,2b,96,aa */ {0xaa, 0x75, 0x008a}, /* 00,75,8a,aa */ {0xaa, 0x2d, 0x0005}, /* 00,2d,05,aa */ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */ {0xa0, 0x04, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,04,cc */ {0xa0, 0x18, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,18,cc */ {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */ {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */ {0xa0, 0x83, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,83,cc */ {0xaa, 0x10, 0x0082}, /* 00,10,82,aa */ {0xaa, 0x76, 0x0003}, /* 00,76,03,aa */ /* {0xa0, 0x40, ZC3XX_R002_CLOCKSELECT}, * 00,02,40,cc * if mode0 (640x480) */ {} }; static const struct usb_action ov7620_60HZ[] = { {0xaa, 0x13, 0x00a3}, /* 00,13,a3,aa */ /* (bug in zs211.inf) */ {0xdd, 0x00, 0x0100}, /* 00,01,00,dd */ {0xaa, 0x2b, 0x0000}, /* 00,2b,00,aa */ {0xaa, 0x75, 0x008a}, /* 00,75,8a,aa */ {0xaa, 0x2d, 0x0005}, /* 00,2d,05,aa */ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */ {0xa0, 0x04, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,04,cc */ {0xa0, 0x18, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,18,cc */ {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */ {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */ {0xa0, 0x83, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,83,cc */ {0xaa, 0x10, 0x0020}, /* 00,10,20,aa */ {0xaa, 0x76, 0x0003}, /* 00,76,03,aa */ /* {0xa0, 0x40, ZC3XX_R002_CLOCKSELECT}, * 00,02,40,cc * if mode0 (640x480) */ /* ?? in gspca v1, it was {0xa0, 0x00, 0x0039}, * 00,00,00,dd * {0xa1, 0x01, 0x0037}, */ {} }; static const struct usb_action ov7620_NoFliker[] = { {0xaa, 0x13, 0x00a3}, /* 00,13,a3,aa */ /* (bug in zs211.inf) */ {0xdd, 0x00, 0x0100}, /* 00,01,00,dd */ {0xaa, 0x2b, 0x0000}, /* 00,2b,00,aa */ {0xaa, 0x75, 0x008e}, /* 00,75,8e,aa */ {0xaa, 0x2d, 0x0001}, /* 00,2d,01,aa */ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */ {0xa0, 0x04, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,04,cc */ {0xa0, 0x18, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,18,cc */ {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */ {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */ {0xa0, 0x01, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,01,cc */ /* {0xa0, 0x44, ZC3XX_R002_CLOCKSELECT}, * 00,02,44,cc * if mode1 (320x240) */ /* ?? was {0xa0, 0x00, 0x0039}, * 00,00,00,dd * {0xa1, 0x01, 0x0037}, */ {} }; static const struct usb_action ov7630c_InitialScale[] = { {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, {0xa0, 0x10, ZC3XX_R002_CLOCKSELECT}, {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, {0xa0, 0x10, ZC3XX_R002_CLOCKSELECT}, {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, {0xa0, 0x06, ZC3XX_R010_CMOSSENSORSELECT}, {0xa0, 0xa1, ZC3XX_R08B_I2CDEVICEADDR}, {0xa0, 0x08, ZC3XX_R08D_COMPABILITYMODE}, {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, {0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW}, {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xaa, 0x12, 0x0080}, {0xa0, 0x02, ZC3XX_R083_RGAINADDR}, {0xa0, 0x01, ZC3XX_R085_BGAINADDR}, {0xa0, 0x90, ZC3XX_R086_EXPTIMEHIGH}, {0xa0, 0x91, ZC3XX_R087_EXPTIMEMID}, {0xa0, 0x10, ZC3XX_R088_EXPTIMELOW}, {0xa0, 0x00, ZC3XX_R098_WINYSTARTLOW}, {0xa0, 0x00, ZC3XX_R09A_WINXSTARTLOW}, {0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW}, {0xa0, 0x00, ZC3XX_R11C_FIRSTXLOW}, {0xa0, 0xd8, ZC3XX_R09C_WINHEIGHTLOW}, {0xa0, 0x88, ZC3XX_R09E_WINWIDTHLOW}, {0xaa, 0x12, 0x0069}, {0xaa, 0x04, 0x0020}, {0xaa, 0x06, 0x0050}, {0xaa, 0x13, 0x0083}, {0xaa, 0x14, 0x0000}, {0xaa, 0x15, 0x0024}, {0xaa, 0x17, 0x0018}, {0xaa, 0x18, 0x00ba}, {0xaa, 0x19, 0x0002}, {0xaa, 0x1a, 0x00f6}, {0xaa, 0x1b, 0x0002}, {0xaa, 0x20, 0x00c2}, {0xaa, 0x24, 0x0060}, {0xaa, 0x25, 0x0040}, {0xaa, 0x26, 0x0030}, {0xaa, 0x27, 0x00ea}, {0xaa, 0x28, 0x00a0}, {0xaa, 0x21, 0x0000}, {0xaa, 0x2a, 0x0081}, {0xaa, 0x2b, 0x0096}, {0xaa, 0x2d, 0x0094}, {0xaa, 0x2f, 0x003d}, {0xaa, 0x30, 0x0024}, {0xaa, 0x60, 0x0000}, {0xaa, 0x61, 0x0040}, {0xaa, 0x68, 0x007c}, {0xaa, 0x6f, 0x0015}, {0xaa, 0x75, 0x0088}, {0xaa, 0x77, 0x00b5}, {0xaa, 0x01, 0x0060}, {0xaa, 0x02, 0x0060}, {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x77, ZC3XX_R101_SENSORCORRECTION}, {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, {0xa0, 0x04, ZC3XX_R1A7_CALCGLOBALMEAN}, {0xa0, 0x00, 0x01ad}, {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, {0xa0, 0x60, ZC3XX_R116_RGAIN}, {0xa0, 0x46, ZC3XX_R118_BGAIN}, {0xa0, 0x04, ZC3XX_R113_RGB03}, /* 0x10, */ {0xa1, 0x01, 0x0002}, {0xa0, 0x50, ZC3XX_R10A_RGB00}, /* matrix */ {0xa0, 0xf8, ZC3XX_R10B_RGB01}, {0xa0, 0xf8, ZC3XX_R10C_RGB02}, {0xa0, 0xf8, ZC3XX_R10D_RGB10}, {0xa0, 0x50, ZC3XX_R10E_RGB11}, {0xa0, 0xf8, ZC3XX_R10F_RGB12}, {0xa0, 0xf8, ZC3XX_R110_RGB20}, {0xa0, 0xf8, ZC3XX_R111_RGB21}, {0xa0, 0x50, ZC3XX_R112_RGB22}, {0xa1, 0x01, 0x0008}, {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, /* clock ? */ {0xa0, 0x08, ZC3XX_R1C6_SHARPNESS00}, /* sharpness+ */ {0xa1, 0x01, 0x01c8}, {0xa1, 0x01, 0x01c9}, {0xa1, 0x01, 0x01ca}, {0xa0, 0x0f, ZC3XX_R1CB_SHARPNESS05}, /* sharpness- */ {0xa0, 0x01, ZC3XX_R120_GAMMA00}, /* gamma 2 ?*/ {0xa0, 0x0c, ZC3XX_R121_GAMMA01}, {0xa0, 0x1f, ZC3XX_R122_GAMMA02}, {0xa0, 0x3a, ZC3XX_R123_GAMMA03}, {0xa0, 0x53, ZC3XX_R124_GAMMA04}, {0xa0, 0x6d, ZC3XX_R125_GAMMA05}, {0xa0, 0x85, ZC3XX_R126_GAMMA06}, {0xa0, 0x9c, ZC3XX_R127_GAMMA07}, {0xa0, 0xb0, ZC3XX_R128_GAMMA08}, {0xa0, 0xc2, ZC3XX_R129_GAMMA09}, {0xa0, 0xd1, ZC3XX_R12A_GAMMA0A}, {0xa0, 0xde, ZC3XX_R12B_GAMMA0B}, {0xa0, 0xe9, ZC3XX_R12C_GAMMA0C}, {0xa0, 0xf2, ZC3XX_R12D_GAMMA0D}, {0xa0, 0xf9, ZC3XX_R12E_GAMMA0E}, {0xa0, 0xff, ZC3XX_R12F_GAMMA0F}, {0xa0, 0x05, ZC3XX_R130_GAMMA10}, {0xa0, 0x0f, ZC3XX_R131_GAMMA11}, {0xa0, 0x16, ZC3XX_R132_GAMMA12}, {0xa0, 0x1a, ZC3XX_R133_GAMMA13}, {0xa0, 0x19, ZC3XX_R134_GAMMA14}, {0xa0, 0x19, ZC3XX_R135_GAMMA15}, {0xa0, 0x17, ZC3XX_R136_GAMMA16}, {0xa0, 0x15, ZC3XX_R137_GAMMA17}, {0xa0, 0x12, ZC3XX_R138_GAMMA18}, {0xa0, 0x10, ZC3XX_R139_GAMMA19}, {0xa0, 0x0e, ZC3XX_R13A_GAMMA1A}, {0xa0, 0x0b, ZC3XX_R13B_GAMMA1B}, {0xa0, 0x09, ZC3XX_R13C_GAMMA1C}, {0xa0, 0x08, ZC3XX_R13D_GAMMA1D}, {0xa0, 0x06, ZC3XX_R13E_GAMMA1E}, {0xa0, 0x03, ZC3XX_R13F_GAMMA1F}, {0xa0, 0x50, ZC3XX_R10A_RGB00}, /* matrix */ {0xa0, 0xf8, ZC3XX_R10B_RGB01}, {0xa0, 0xf8, ZC3XX_R10C_RGB02}, {0xa0, 0xf8, ZC3XX_R10D_RGB10}, {0xa0, 0x50, ZC3XX_R10E_RGB11}, {0xa0, 0xf8, ZC3XX_R10F_RGB12}, {0xa0, 0xf8, ZC3XX_R110_RGB20}, {0xa0, 0xf8, ZC3XX_R111_RGB21}, {0xa0, 0x50, ZC3XX_R112_RGB22}, {0xa1, 0x01, 0x0180}, {0xa0, 0x00, ZC3XX_R180_AUTOCORRECTENABLE}, {0xaa, 0x10, 0x001b}, {0xaa, 0x76, 0x0002}, {0xaa, 0x2a, 0x0081}, {0xaa, 0x2b, 0x0000}, {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, {0xa0, 0x01, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0xb8, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0x37, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x10, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x26, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0x50, ZC3XX_R11D_GLOBALGAIN}, {0xa0, 0x02, ZC3XX_R180_AUTOCORRECTENABLE}, {0xa0, 0x40, ZC3XX_R180_AUTOCORRECTENABLE}, {0xaa, 0x13, 0x0083}, /* 40 */ {0xa1, 0x01, 0x0180}, {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, {} }; static const struct usb_action ov7630c_Initial[] = { {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, {0xa0, 0x00, ZC3XX_R002_CLOCKSELECT}, {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, {0xa0, 0x06, ZC3XX_R010_CMOSSENSORSELECT}, {0xa0, 0xa1, ZC3XX_R08B_I2CDEVICEADDR}, {0xa0, 0x08, ZC3XX_R08D_COMPABILITYMODE}, {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, {0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW}, {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xaa, 0x12, 0x0080}, {0xa0, 0x02, ZC3XX_R083_RGAINADDR}, {0xa0, 0x01, ZC3XX_R085_BGAINADDR}, {0xa0, 0x90, ZC3XX_R086_EXPTIMEHIGH}, {0xa0, 0x91, ZC3XX_R087_EXPTIMEMID}, {0xa0, 0x10, ZC3XX_R088_EXPTIMELOW}, {0xa0, 0x00, ZC3XX_R098_WINYSTARTLOW}, {0xa0, 0x00, ZC3XX_R09A_WINXSTARTLOW}, {0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW}, {0xa0, 0x00, ZC3XX_R11C_FIRSTXLOW}, {0xa0, 0xe6, ZC3XX_R09C_WINHEIGHTLOW}, {0xa0, 0x86, ZC3XX_R09E_WINWIDTHLOW}, {0xaa, 0x12, 0x0069}, /* i2c */ {0xaa, 0x04, 0x0020}, {0xaa, 0x06, 0x0050}, {0xaa, 0x13, 0x00c3}, {0xaa, 0x14, 0x0000}, {0xaa, 0x15, 0x0024}, {0xaa, 0x19, 0x0003}, {0xaa, 0x1a, 0x00f6}, {0xaa, 0x1b, 0x0002}, {0xaa, 0x20, 0x00c2}, {0xaa, 0x24, 0x0060}, {0xaa, 0x25, 0x0040}, {0xaa, 0x26, 0x0030}, {0xaa, 0x27, 0x00ea}, {0xaa, 0x28, 0x00a0}, {0xaa, 0x21, 0x0000}, {0xaa, 0x2a, 0x0081}, {0xaa, 0x2b, 0x0096}, {0xaa, 0x2d, 0x0084}, {0xaa, 0x2f, 0x003d}, {0xaa, 0x30, 0x0024}, {0xaa, 0x60, 0x0000}, {0xaa, 0x61, 0x0040}, {0xaa, 0x68, 0x007c}, {0xaa, 0x6f, 0x0015}, {0xaa, 0x75, 0x0088}, {0xaa, 0x77, 0x00b5}, {0xaa, 0x01, 0x0060}, {0xaa, 0x02, 0x0060}, {0xaa, 0x17, 0x0018}, {0xaa, 0x18, 0x00ba}, {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x77, ZC3XX_R101_SENSORCORRECTION}, {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, {0xa0, 0x04, ZC3XX_R1A7_CALCGLOBALMEAN}, {0xa0, 0x00, 0x01ad}, {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, {0xa0, 0x60, ZC3XX_R116_RGAIN}, {0xa0, 0x46, ZC3XX_R118_BGAIN}, {0xa0, 0x04, ZC3XX_R113_RGB03}, {0xa1, 0x01, 0x0002}, {0xa0, 0x4e, ZC3XX_R10A_RGB00}, /* matrix */ {0xa0, 0xfe, ZC3XX_R10B_RGB01}, {0xa0, 0xf4, ZC3XX_R10C_RGB02}, {0xa0, 0xf7, ZC3XX_R10D_RGB10}, {0xa0, 0x4d, ZC3XX_R10E_RGB11}, {0xa0, 0xfc, ZC3XX_R10F_RGB12}, {0xa0, 0x00, ZC3XX_R110_RGB20}, {0xa0, 0xf6, ZC3XX_R111_RGB21}, {0xa0, 0x4a, ZC3XX_R112_RGB22}, {0xa1, 0x01, 0x0008}, {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, /* clock ? */ {0xa0, 0x08, ZC3XX_R1C6_SHARPNESS00}, /* sharpness+ */ {0xa1, 0x01, 0x01c8}, {0xa1, 0x01, 0x01c9}, {0xa1, 0x01, 0x01ca}, {0xa0, 0x0f, ZC3XX_R1CB_SHARPNESS05}, /* sharpness- */ {0xa0, 0x16, ZC3XX_R120_GAMMA00}, /* gamma ~4 */ {0xa0, 0x3a, ZC3XX_R121_GAMMA01}, {0xa0, 0x5b, ZC3XX_R122_GAMMA02}, {0xa0, 0x7c, ZC3XX_R123_GAMMA03}, {0xa0, 0x94, ZC3XX_R124_GAMMA04}, {0xa0, 0xa9, ZC3XX_R125_GAMMA05}, {0xa0, 0xbb, ZC3XX_R126_GAMMA06}, {0xa0, 0xca, ZC3XX_R127_GAMMA07}, {0xa0, 0xd7, ZC3XX_R128_GAMMA08}, {0xa0, 0xe1, ZC3XX_R129_GAMMA09}, {0xa0, 0xea, ZC3XX_R12A_GAMMA0A}, {0xa0, 0xf1, ZC3XX_R12B_GAMMA0B}, {0xa0, 0xf7, ZC3XX_R12C_GAMMA0C}, {0xa0, 0xfc, ZC3XX_R12D_GAMMA0D}, {0xa0, 0xff, ZC3XX_R12E_GAMMA0E}, {0xa0, 0xff, ZC3XX_R12F_GAMMA0F}, {0xa0, 0x20, ZC3XX_R130_GAMMA10}, {0xa0, 0x22, ZC3XX_R131_GAMMA11}, {0xa0, 0x20, ZC3XX_R132_GAMMA12}, {0xa0, 0x1c, ZC3XX_R133_GAMMA13}, {0xa0, 0x16, ZC3XX_R134_GAMMA14}, {0xa0, 0x13, ZC3XX_R135_GAMMA15}, {0xa0, 0x10, ZC3XX_R136_GAMMA16}, {0xa0, 0x0d, ZC3XX_R137_GAMMA17}, {0xa0, 0x0b, ZC3XX_R138_GAMMA18}, {0xa0, 0x09, ZC3XX_R139_GAMMA19}, {0xa0, 0x07, ZC3XX_R13A_GAMMA1A}, {0xa0, 0x06, ZC3XX_R13B_GAMMA1B}, {0xa0, 0x05, ZC3XX_R13C_GAMMA1C}, {0xa0, 0x04, ZC3XX_R13D_GAMMA1D}, {0xa0, 0x00, ZC3XX_R13E_GAMMA1E}, {0xa0, 0x01, ZC3XX_R13F_GAMMA1F}, {0xa0, 0x4e, ZC3XX_R10A_RGB00}, /* matrix */ {0xa0, 0xfe, ZC3XX_R10B_RGB01}, {0xa0, 0xf4, ZC3XX_R10C_RGB02}, {0xa0, 0xf7, ZC3XX_R10D_RGB10}, {0xa0, 0x4d, ZC3XX_R10E_RGB11}, {0xa0, 0xfc, ZC3XX_R10F_RGB12}, {0xa0, 0x00, ZC3XX_R110_RGB20}, {0xa0, 0xf6, ZC3XX_R111_RGB21}, {0xa0, 0x4a, ZC3XX_R112_RGB22}, {0xa1, 0x01, 0x0180}, {0xa0, 0x00, ZC3XX_R180_AUTOCORRECTENABLE}, {0xaa, 0x10, 0x000d}, {0xaa, 0x76, 0x0002}, {0xaa, 0x2a, 0x0081}, {0xaa, 0x2b, 0x0000}, {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, {0xa0, 0x00, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0xd8, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0x1b, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x10, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x26, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0x50, ZC3XX_R11D_GLOBALGAIN}, {0xa0, 0x02, ZC3XX_R180_AUTOCORRECTENABLE}, {0xa0, 0x40, ZC3XX_R180_AUTOCORRECTENABLE}, {0xaa, 0x13, 0x00c3}, {0xa1, 0x01, 0x0180}, {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, {} }; static const struct usb_action pas106b_Initial_com[] = { /* Sream and Sensor specific */ {0xa1, 0x01, 0x0010}, /* CMOSSensorSelect */ /* System */ {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, /* SystemControl */ {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, /* SystemControl */ /* Picture size */ {0xa0, 0x00, ZC3XX_R002_CLOCKSELECT}, /* ClockSelect */ {0xa0, 0x03, 0x003a}, {0xa0, 0x0c, 0x003b}, {0xa0, 0x04, 0x0038}, {} }; static const struct usb_action pas106b_InitialScale[] = { /* 176x144 */ /* JPEG control */ {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, /* Sream and Sensor specific */ {0xa0, 0x0f, ZC3XX_R010_CMOSSENSORSELECT}, /* Picture size */ {0xa0, 0x00, ZC3XX_R003_FRAMEWIDTHHIGH}, {0xa0, 0xb0, ZC3XX_R004_FRAMEWIDTHLOW}, {0xa0, 0x00, ZC3XX_R005_FRAMEHEIGHTHIGH}, {0xa0, 0x90, ZC3XX_R006_FRAMEHEIGHTLOW}, /* System */ {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, /* Sream and Sensor specific */ {0xa0, 0x03, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC}, /* Sensor Interface */ {0xa0, 0x08, ZC3XX_R08D_COMPABILITYMODE}, /* Window inside sensor array */ {0xa0, 0x03, ZC3XX_R09A_WINXSTARTLOW}, {0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW}, {0xa0, 0x03, ZC3XX_R11C_FIRSTXLOW}, {0xa0, 0x28, ZC3XX_R09C_WINHEIGHTLOW}, {0xa0, 0x68, ZC3XX_R09E_WINWIDTHLOW}, /* Init the sensor */ {0xaa, 0x02, 0x0004}, {0xaa, 0x08, 0x0000}, {0xaa, 0x09, 0x0005}, {0xaa, 0x0a, 0x0002}, {0xaa, 0x0b, 0x0002}, {0xaa, 0x0c, 0x0005}, {0xaa, 0x0d, 0x0000}, {0xaa, 0x0e, 0x0002}, {0xaa, 0x14, 0x0081}, /* Other registers */ {0xa0, 0x37, ZC3XX_R101_SENSORCORRECTION}, /* Frame retreiving */ {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* Gains */ {0xa0, 0xa0, ZC3XX_R1A8_DIGITALGAIN}, /* Unknown */ {0xa0, 0x00, 0x01ad}, /* Sharpness */ {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, /* Other registers */ {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, /* Auto exposure and white balance */ {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, /*Dead pixels */ {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, /* EEPROM */ {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, /* JPEG control */ {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, {0xa0, 0x08, ZC3XX_R1C6_SHARPNESS00}, {0xa0, 0x0f, ZC3XX_R1CB_SHARPNESS05}, /* Other registers */ {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, /* Auto exposure and white balance */ {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, /*Dead pixels */ {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, /* EEPROM */ {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, /* JPEG control */ {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, {0xa0, 0x08, ZC3XX_R1C6_SHARPNESS00}, {0xa0, 0x0f, ZC3XX_R1CB_SHARPNESS05}, {0xa0, 0x58, ZC3XX_R10A_RGB00}, /* matrix */ {0xa0, 0xf4, ZC3XX_R10B_RGB01}, {0xa0, 0xf4, ZC3XX_R10C_RGB02}, {0xa0, 0xf4, ZC3XX_R10D_RGB10}, {0xa0, 0x58, ZC3XX_R10E_RGB11}, {0xa0, 0xf4, ZC3XX_R10F_RGB12}, {0xa0, 0xf4, ZC3XX_R110_RGB20}, {0xa0, 0xf4, ZC3XX_R111_RGB21}, {0xa0, 0x58, ZC3XX_R112_RGB22}, /* Auto correction */ {0xa0, 0x03, ZC3XX_R181_WINXSTART}, {0xa0, 0x08, ZC3XX_R182_WINXWIDTH}, {0xa0, 0x16, ZC3XX_R183_WINXCENTER}, {0xa0, 0x03, ZC3XX_R184_WINYSTART}, {0xa0, 0x05, ZC3XX_R185_WINYWIDTH}, {0xa0, 0x14, ZC3XX_R186_WINYCENTER}, {0xa0, 0x00, ZC3XX_R180_AUTOCORRECTENABLE}, /* Auto exposure and white balance */ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, {0xa0, 0x03, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0xb1, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0x87, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x0c, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x18, ZC3XX_R18F_AEUNFREEZE}, /* sensor on */ {0xaa, 0x07, 0x00b1}, {0xaa, 0x05, 0x0003}, {0xaa, 0x04, 0x0001}, {0xaa, 0x03, 0x003b}, /* Gains */ {0xa0, 0x20, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x26, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0xa0, ZC3XX_R11D_GLOBALGAIN}, {0xa0, 0x60, ZC3XX_R11D_GLOBALGAIN}, /* Auto correction */ {0xa0, 0x40, ZC3XX_R180_AUTOCORRECTENABLE}, {0xa1, 0x01, 0x0180}, /* AutoCorrectEnable */ {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, /* Gains */ {0xa0, 0x40, ZC3XX_R116_RGAIN}, {0xa0, 0x40, ZC3XX_R117_GGAIN}, {0xa0, 0x40, ZC3XX_R118_BGAIN}, {} }; static const struct usb_action pas106b_Initial[] = { /* 352x288 */ /* JPEG control */ {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, /* Sream and Sensor specific */ {0xa0, 0x0f, ZC3XX_R010_CMOSSENSORSELECT}, /* Picture size */ {0xa0, 0x01, ZC3XX_R003_FRAMEWIDTHHIGH}, {0xa0, 0x60, ZC3XX_R004_FRAMEWIDTHLOW}, {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, {0xa0, 0x20, ZC3XX_R006_FRAMEHEIGHTLOW}, /* System */ {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, /* Sream and Sensor specific */ {0xa0, 0x03, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC}, /* Sensor Interface */ {0xa0, 0x08, ZC3XX_R08D_COMPABILITYMODE}, /* Window inside sensor array */ {0xa0, 0x03, ZC3XX_R09A_WINXSTARTLOW}, {0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW}, {0xa0, 0x03, ZC3XX_R11C_FIRSTXLOW}, {0xa0, 0x28, ZC3XX_R09C_WINHEIGHTLOW}, {0xa0, 0x68, ZC3XX_R09E_WINWIDTHLOW}, /* Init the sensor */ {0xaa, 0x02, 0x0004}, {0xaa, 0x08, 0x0000}, {0xaa, 0x09, 0x0005}, {0xaa, 0x0a, 0x0002}, {0xaa, 0x0b, 0x0002}, {0xaa, 0x0c, 0x0005}, {0xaa, 0x0d, 0x0000}, {0xaa, 0x0e, 0x0002}, {0xaa, 0x14, 0x0081}, /* Other registers */ {0xa0, 0x37, ZC3XX_R101_SENSORCORRECTION}, /* Frame retreiving */ {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* Gains */ {0xa0, 0xa0, ZC3XX_R1A8_DIGITALGAIN}, /* Unknown */ {0xa0, 0x00, 0x01ad}, /* Sharpness */ {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, /* Other registers */ {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, /* Auto exposure and white balance */ {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, {0xa0, 0x80, ZC3XX_R18D_YTARGET}, /*Dead pixels */ {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, /* EEPROM */ {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, /* JPEG control */ {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, {0xa0, 0x08, ZC3XX_R1C6_SHARPNESS00}, {0xa0, 0x0f, ZC3XX_R1CB_SHARPNESS05}, /* Other registers */ {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, /* Auto exposure and white balance */ {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, /*Dead pixels */ {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, /* EEPROM */ {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, /* JPEG control */ {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, {0xa0, 0x08, ZC3XX_R1C6_SHARPNESS00}, {0xa0, 0x0f, ZC3XX_R1CB_SHARPNESS05}, {0xa0, 0x58, ZC3XX_R10A_RGB00}, /* matrix */ {0xa0, 0xf4, ZC3XX_R10B_RGB01}, {0xa0, 0xf4, ZC3XX_R10C_RGB02}, {0xa0, 0xf4, ZC3XX_R10D_RGB10}, {0xa0, 0x58, ZC3XX_R10E_RGB11}, {0xa0, 0xf4, ZC3XX_R10F_RGB12}, {0xa0, 0xf4, ZC3XX_R110_RGB20}, {0xa0, 0xf4, ZC3XX_R111_RGB21}, {0xa0, 0x58, ZC3XX_R112_RGB22}, /* Auto correction */ {0xa0, 0x03, ZC3XX_R181_WINXSTART}, {0xa0, 0x08, ZC3XX_R182_WINXWIDTH}, {0xa0, 0x16, ZC3XX_R183_WINXCENTER}, {0xa0, 0x03, ZC3XX_R184_WINYSTART}, {0xa0, 0x05, ZC3XX_R185_WINYWIDTH}, {0xa0, 0x14, ZC3XX_R186_WINYCENTER}, {0xa0, 0x00, ZC3XX_R180_AUTOCORRECTENABLE}, /* Auto exposure and white balance */ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, {0xa0, 0x03, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0xb1, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0x87, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, /* sensor on */ {0xaa, 0x07, 0x00b1}, {0xaa, 0x05, 0x0003}, {0xaa, 0x04, 0x0001}, {0xaa, 0x03, 0x003b}, /* Gains */ {0xa0, 0x20, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x26, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0xa0, ZC3XX_R11D_GLOBALGAIN}, {0xa0, 0x60, ZC3XX_R11D_GLOBALGAIN}, /* Auto correction */ {0xa0, 0x40, ZC3XX_R180_AUTOCORRECTENABLE}, {0xa1, 0x01, 0x0180}, /* AutoCorrectEnable */ {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, /* Gains */ {0xa0, 0x40, ZC3XX_R116_RGAIN}, {0xa0, 0x40, ZC3XX_R117_GGAIN}, {0xa0, 0x40, ZC3XX_R118_BGAIN}, {0xa0, 0x00, 0x0007}, /* AutoCorrectEnable */ {0xa0, 0xff, ZC3XX_R018_FRAMELOST}, /* Frame adjust */ {} }; static const struct usb_action pas106b_50HZ[] = { {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */ {0xa0, 0x06, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,06,cc */ {0xa0, 0x54, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,54,cc */ {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */ {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */ {0xa0, 0x87, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,87,cc */ {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, /* 01,8c,10,cc */ {0xa0, 0x30, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,30,cc */ {0xaa, 0x03, 0x0021}, /* 00,03,21,aa */ {0xaa, 0x04, 0x000c}, /* 00,04,0c,aa */ {0xaa, 0x05, 0x0002}, /* 00,05,02,aa */ {0xaa, 0x07, 0x001c}, /* 00,07,1c,aa */ {0xa0, 0x04, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,04,cc */ {} }; static const struct usb_action pas106b_60HZ[] = { {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */ {0xa0, 0x06, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,06,cc */ {0xa0, 0x2e, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,2e,cc */ {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */ {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */ {0xa0, 0x71, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,71,cc */ {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, /* 01,8c,10,cc */ {0xa0, 0x30, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,30,cc */ {0xaa, 0x03, 0x001c}, /* 00,03,1c,aa */ {0xaa, 0x04, 0x0004}, /* 00,04,04,aa */ {0xaa, 0x05, 0x0001}, /* 00,05,01,aa */ {0xaa, 0x07, 0x00c4}, /* 00,07,c4,aa */ {0xa0, 0x04, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,04,cc */ {} }; static const struct usb_action pas106b_NoFliker[] = { {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */ {0xa0, 0x06, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,06,cc */ {0xa0, 0x50, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,50,cc */ {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */ {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */ {0xa0, 0x10, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,10,cc */ {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, /* 01,8c,10,cc */ {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,20,cc */ {0xaa, 0x03, 0x0013}, /* 00,03,13,aa */ {0xaa, 0x04, 0x0000}, /* 00,04,00,aa */ {0xaa, 0x05, 0x0001}, /* 00,05,01,aa */ {0xaa, 0x07, 0x0030}, /* 00,07,30,aa */ {0xa0, 0x00, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,00,cc */ {} }; /* from lvWIMv.inf 046d:08a2/:08aa 2007/06/03 */ static const struct usb_action pas202b_Initial[] = { /* 640x480 */ {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, /* 00,00,01,cc */ {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, {0xa0, 0x0e, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,0e,cc */ {0xa0, 0x00, ZC3XX_R002_CLOCKSELECT}, /* 00,02,00,cc */ {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, /* 00,03,02,cc */ {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, /* 00,04,80,cc */ {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, /* 00,05,01,cc */ {0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW}, /* 00,06,e0,cc */ {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, /* 00,01,01,cc */ {0xa0, 0x03, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,03,cc */ {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,01,cc */ {0xa0, 0x08, ZC3XX_R08D_COMPABILITYMODE}, /* 00,8d,08,cc */ {0xa0, 0x00, ZC3XX_R098_WINYSTARTLOW}, /* 00,98,00,cc */ {0xa0, 0x03, ZC3XX_R09A_WINXSTARTLOW}, /* 00,9a,03,cc */ {0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW}, /* 01,1a,00,cc */ {0xa0, 0x03, ZC3XX_R11C_FIRSTXLOW}, /* 01,1c,03,cc */ {0xa0, 0x01, ZC3XX_R09B_WINHEIGHTHIGH}, /* 00,9b,01,cc */ {0xa0, 0xe6, ZC3XX_R09C_WINHEIGHTLOW}, /* 00,9c,e6,cc */ {0xa0, 0x02, ZC3XX_R09D_WINWIDTHHIGH}, /* 00,9d,02,cc */ {0xa0, 0x86, ZC3XX_R09E_WINWIDTHLOW}, /* 00,9e,86,cc */ {0xaa, 0x02, 0x0002}, /* 00,02,04,aa --> 02 */ {0xaa, 0x07, 0x0006}, /* 00,07,06,aa */ {0xaa, 0x08, 0x0002}, /* 00,08,02,aa */ {0xaa, 0x09, 0x0006}, /* 00,09,06,aa */ {0xaa, 0x0a, 0x0001}, /* 00,0a,01,aa */ {0xaa, 0x0b, 0x0001}, /* 00,0b,01,aa */ {0xaa, 0x0c, 0x0006}, {0xaa, 0x0d, 0x0000}, /* 00,0d,00,aa */ {0xaa, 0x10, 0x0000}, /* 00,10,00,aa */ {0xaa, 0x12, 0x0005}, /* 00,12,05,aa */ {0xaa, 0x13, 0x0063}, /* 00,13,63,aa */ {0xaa, 0x15, 0x0070}, /* 00,15,70,aa */ {0xa0, 0xb7, ZC3XX_R101_SENSORCORRECTION}, /* 01,01,b7,cc */ {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, /* 01,00,0d,cc */ {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, /* 01,89,06,cc */ {0xa0, 0x00, 0x01ad}, /* 01,ad,00,cc */ {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, /* 01,c5,03,cc */ {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, /* 01,cb,13,cc */ {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, /* 02,50,08,cc */ {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, /* 03,01,08,cc */ {0xa0, 0x70, ZC3XX_R18D_YTARGET}, /* 01,8d,70,cc */ {} }; static const struct usb_action pas202b_InitialScale[] = { /* 320x240 */ {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, /* 00,00,01,cc */ {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, {0xa0, 0x0e, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,0e,cc */ {0xa0, 0x10, ZC3XX_R002_CLOCKSELECT}, /* 00,02,10,cc */ {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, /* 00,03,02,cc */ {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, /* 00,04,80,cc */ {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, /* 00,05,01,cc */ {0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW}, {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, /* 00,01,01,cc */ {0xa0, 0x03, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,03,cc */ {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,01,cc */ {0xa0, 0x08, ZC3XX_R08D_COMPABILITYMODE}, /* 00,8d,08,cc */ {0xa0, 0x08, ZC3XX_R098_WINYSTARTLOW}, /* 00,98,08,cc */ {0xa0, 0x02, ZC3XX_R09A_WINXSTARTLOW}, /* 00,9a,02,cc */ {0xa0, 0x08, ZC3XX_R11A_FIRSTYLOW}, /* 01,1a,08,cc */ {0xa0, 0x02, ZC3XX_R11C_FIRSTXLOW}, /* 01,1c,02,cc */ {0xa0, 0x01, ZC3XX_R09B_WINHEIGHTHIGH}, /* 00,9b,01,cc */ {0xa0, 0xe8, ZC3XX_R09C_WINHEIGHTLOW}, {0xa0, 0x02, ZC3XX_R09D_WINWIDTHHIGH}, /* 00,9d,02,cc */ {0xa0, 0x88, ZC3XX_R09E_WINWIDTHLOW}, /* 00,9e,88,cc */ {0xaa, 0x02, 0x0002}, /* 00,02,02,aa */ {0xaa, 0x07, 0x0006}, /* 00,07,06,aa */ {0xaa, 0x08, 0x0002}, /* 00,08,02,aa */ {0xaa, 0x09, 0x0006}, /* 00,09,06,aa */ {0xaa, 0x0a, 0x0001}, /* 00,0a,01,aa */ {0xaa, 0x0b, 0x0001}, /* 00,0b,01,aa */ {0xaa, 0x0c, 0x0006}, {0xaa, 0x0d, 0x0000}, /* 00,0d,00,aa */ {0xaa, 0x10, 0x0000}, /* 00,10,00,aa */ {0xaa, 0x12, 0x0005}, /* 00,12,05,aa */ {0xaa, 0x13, 0x0063}, /* 00,13,63,aa */ {0xaa, 0x15, 0x0070}, /* 00,15,70,aa */ {0xa0, 0x37, ZC3XX_R101_SENSORCORRECTION}, /* 01,01,37,cc */ {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, /* 01,00,0d,cc */ {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, /* 01,89,06,cc */ {0xa0, 0x00, 0x01ad}, /* 01,ad,00,cc */ {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, /* 01,c5,03,cc */ {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, /* 01,cb,13,cc */ {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, /* 02,50,08,cc */ {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, /* 03,01,08,cc */ {0xa0, 0x70, ZC3XX_R18D_YTARGET}, /* 01,8d,70,cc */ {0xa0, 0xff, ZC3XX_R097_WINYSTARTHIGH}, {0xa0, 0xfe, ZC3XX_R098_WINYSTARTLOW}, {} }; static const struct usb_action pas202b_50HZ[] = { {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */ {0xa0, 0x20, ZC3XX_R087_EXPTIMEMID}, /* 00,87,20,cc */ {0xa0, 0x21, ZC3XX_R088_EXPTIMELOW}, /* 00,88,21,cc */ {0xaa, 0x20, 0x0002}, /* 00,20,02,aa */ {0xaa, 0x21, 0x001b}, {0xaa, 0x03, 0x0044}, /* 00,03,44,aa */ {0xaa, 0x04, 0x0008}, {0xaa, 0x05, 0x001b}, {0xaa, 0x0e, 0x0001}, /* 00,0e,01,aa */ {0xaa, 0x0f, 0x0000}, /* 00,0f,00,aa */ {0xa0, 0x1c, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,24,cc */ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */ {0xa0, 0x02, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0x1b, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */ {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */ {0xa0, 0x4d, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,4d,cc */ {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x1b, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x44, ZC3XX_R01D_HSYNC_0}, /* 00,1d,44,cc */ {0xa0, 0x6f, ZC3XX_R01E_HSYNC_1}, /* 00,1e,6f,cc */ {0xa0, 0xad, ZC3XX_R01F_HSYNC_2}, /* 00,1f,ad,cc */ {0xa0, 0xeb, ZC3XX_R020_HSYNC_3}, /* 00,20,eb,cc */ {0xa0, 0x0f, ZC3XX_R087_EXPTIMEMID}, /* 00,87,0f,cc */ {0xa0, 0x0e, ZC3XX_R088_EXPTIMELOW}, /* 00,88,0e,cc */ {} }; static const struct usb_action pas202b_50HZScale[] = { {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */ {0xa0, 0x20, ZC3XX_R087_EXPTIMEMID}, /* 00,87,20,cc */ {0xa0, 0x21, ZC3XX_R088_EXPTIMELOW}, /* 00,88,21,cc */ {0xaa, 0x20, 0x0004}, {0xaa, 0x21, 0x003d}, {0xaa, 0x03, 0x0041}, /* 00,03,41,aa */ {0xaa, 0x04, 0x0010}, {0xaa, 0x05, 0x003d}, {0xaa, 0x0e, 0x0001}, /* 00,0e,01,aa */ {0xaa, 0x0f, 0x0000}, /* 00,0f,00,aa */ {0xa0, 0x1c, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,24,cc */ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */ {0xa0, 0x04, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0x3d, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */ {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */ {0xa0, 0x9b, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,9b,cc */ {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x1b, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x41, ZC3XX_R01D_HSYNC_0}, /* 00,1d,41,cc */ {0xa0, 0x6f, ZC3XX_R01E_HSYNC_1}, /* 00,1e,6f,cc */ {0xa0, 0xad, ZC3XX_R01F_HSYNC_2}, /* 00,1f,ad,cc */ {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, /* 00,20,ff,cc */ {0xa0, 0x0f, ZC3XX_R087_EXPTIMEMID}, /* 00,87,0f,cc */ {0xa0, 0x0e, ZC3XX_R088_EXPTIMELOW}, /* 00,88,0e,cc */ {} }; static const struct usb_action pas202b_60HZ[] = { {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */ {0xa0, 0x20, ZC3XX_R087_EXPTIMEMID}, /* 00,87,20,cc */ {0xa0, 0x21, ZC3XX_R088_EXPTIMELOW}, /* 00,88,21,cc */ {0xaa, 0x20, 0x0002}, /* 00,20,02,aa */ {0xaa, 0x21, 0x0000}, /* 00,21,00,aa */ {0xaa, 0x03, 0x0045}, /* 00,03,45,aa */ {0xaa, 0x04, 0x0008}, /* 00,04,08,aa */ {0xaa, 0x05, 0x0000}, /* 00,05,00,aa */ {0xaa, 0x0e, 0x0001}, /* 00,0e,01,aa */ {0xaa, 0x0f, 0x0000}, /* 00,0f,00,aa */ {0xa0, 0x1c, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,24,cc */ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */ {0xa0, 0x02, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0x00, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */ {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */ {0xa0, 0x40, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,40,cc */ {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x1b, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x45, ZC3XX_R01D_HSYNC_0}, /* 00,1d,45,cc */ {0xa0, 0x8e, ZC3XX_R01E_HSYNC_1}, /* 00,1e,8e,cc */ {0xa0, 0xc1, ZC3XX_R01F_HSYNC_2}, /* 00,1f,c1,cc */ {0xa0, 0xf5, ZC3XX_R020_HSYNC_3}, /* 00,20,f5,cc */ {0xa0, 0x0f, ZC3XX_R087_EXPTIMEMID}, /* 00,87,0f,cc */ {0xa0, 0x0e, ZC3XX_R088_EXPTIMELOW}, /* 00,88,0e,cc */ {} }; static const struct usb_action pas202b_60HZScale[] = { {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */ {0xa0, 0x20, ZC3XX_R087_EXPTIMEMID}, /* 00,87,20,cc */ {0xa0, 0x21, ZC3XX_R088_EXPTIMELOW}, /* 00,88,21,cc */ {0xaa, 0x20, 0x0004}, {0xaa, 0x21, 0x0008}, {0xaa, 0x03, 0x0042}, /* 00,03,42,aa */ {0xaa, 0x04, 0x0010}, {0xaa, 0x05, 0x0008}, {0xaa, 0x0e, 0x0001}, /* 00,0e,01,aa */ {0xaa, 0x0f, 0x0000}, /* 00,0f,00,aa */ {0xa0, 0x1c, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,24,cc */ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */ {0xa0, 0x04, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0x08, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */ {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */ {0xa0, 0x81, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,81,cc */ {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x1b, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x42, ZC3XX_R01D_HSYNC_0}, /* 00,1d,42,cc */ {0xa0, 0x6f, ZC3XX_R01E_HSYNC_1}, /* 00,1e,6f,cc */ {0xa0, 0xaf, ZC3XX_R01F_HSYNC_2}, /* 00,1f,af,cc */ {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, /* 00,20,ff,cc */ {0xa0, 0x0f, ZC3XX_R087_EXPTIMEMID}, /* 00,87,0f,cc */ {0xa0, 0x0e, ZC3XX_R088_EXPTIMELOW}, /* 00,88,0e,cc */ {} }; static const struct usb_action pas202b_NoFliker[] = { {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */ {0xa0, 0x20, ZC3XX_R087_EXPTIMEMID}, /* 00,87,20,cc */ {0xa0, 0x21, ZC3XX_R088_EXPTIMELOW}, /* 00,88,21,cc */ {0xaa, 0x20, 0x0002}, /* 00,20,02,aa */ {0xaa, 0x21, 0x0006}, {0xaa, 0x03, 0x0040}, /* 00,03,40,aa */ {0xaa, 0x04, 0x0008}, /* 00,04,08,aa */ {0xaa, 0x05, 0x0006}, {0xaa, 0x0e, 0x0001}, /* 00,0e,01,aa */ {0xaa, 0x0f, 0x0000}, /* 00,0f,00,aa */ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */ {0xa0, 0x02, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0x06, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */ {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */ {0xa0, 0x01, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, /* 01,8c,10,cc */ {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,20,cc */ {0xa0, 0x00, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,00,cc */ {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0x40, ZC3XX_R01D_HSYNC_0}, /* 00,1d,40,cc */ {0xa0, 0x60, ZC3XX_R01E_HSYNC_1}, /* 00,1e,60,cc */ {0xa0, 0x90, ZC3XX_R01F_HSYNC_2}, /* 00,1f,90,cc */ {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, /* 00,20,ff,cc */ {0xa0, 0x0f, ZC3XX_R087_EXPTIMEMID}, /* 00,87,0f,cc */ {0xa0, 0x0e, ZC3XX_R088_EXPTIMELOW}, /* 00,88,0e,cc */ {} }; static const struct usb_action pas202b_NoFlikerScale[] = { {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */ {0xa0, 0x20, ZC3XX_R087_EXPTIMEMID}, /* 00,87,20,cc */ {0xa0, 0x21, ZC3XX_R088_EXPTIMELOW}, /* 00,88,21,cc */ {0xaa, 0x20, 0x0004}, {0xaa, 0x21, 0x000c}, {0xaa, 0x03, 0x0040}, /* 00,03,40,aa */ {0xaa, 0x04, 0x0010}, {0xaa, 0x05, 0x000c}, {0xaa, 0x0e, 0x0001}, /* 00,0e,01,aa */ {0xaa, 0x0f, 0x0000}, /* 00,0f,00,aa */ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */ {0xa0, 0x04, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0x0c, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */ {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */ {0xa0, 0x02, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,02,cc */ {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, /* 01,8c,10,cc */ {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,20,cc */ {0xa0, 0x00, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,00,cc */ {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0x40, ZC3XX_R01D_HSYNC_0}, /* 00,1d,40,cc */ {0xa0, 0x60, ZC3XX_R01E_HSYNC_1}, /* 00,1e,60,cc */ {0xa0, 0x90, ZC3XX_R01F_HSYNC_2}, /* 00,1f,90,cc */ {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, /* 00,20,ff,cc */ {0xa0, 0x0f, ZC3XX_R087_EXPTIMEMID}, /* 00,87,0f,cc */ {0xa0, 0x0e, ZC3XX_R088_EXPTIMELOW}, /* 00,88,0e,cc */ {} }; /* mt9v111 (mi0360soc) and pb0330 from vm30x.inf 0ac8:301b 07/02/13 */ static const struct usb_action mt9v111_1_Initial[] = { /* 640x480 */ {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, {0xa0, 0x0a, ZC3XX_R010_CMOSSENSORSELECT}, {0xa0, 0x00, ZC3XX_R002_CLOCKSELECT}, {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, {0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW}, {0xa0, 0xdc, ZC3XX_R08B_I2CDEVICEADDR}, {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, {0xa0, 0x07, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x00, ZC3XX_R098_WINYSTARTLOW}, {0xa0, 0x00, ZC3XX_R09A_WINXSTARTLOW}, {0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW}, {0xa0, 0x00, ZC3XX_R11C_FIRSTXLOW}, {0xa0, 0xdc, ZC3XX_R08B_I2CDEVICEADDR}, {0xdd, 0x00, 0x0200}, {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xaa, 0x01, 0x0001}, {0xaa, 0x06, 0x0000}, {0xaa, 0x08, 0x0483}, {0xaa, 0x01, 0x0004}, {0xaa, 0x08, 0x0006}, {0xaa, 0x02, 0x0011}, {0xaa, 0x03, 0x01e5}, /*jfm: was 01e7*/ {0xaa, 0x04, 0x0285}, /*jfm: was 0287*/ {0xaa, 0x07, 0x3002}, {0xaa, 0x20, 0x5100}, {0xaa, 0x35, 0x507f}, {0xaa, 0x30, 0x0005}, {0xaa, 0x31, 0x0000}, {0xaa, 0x58, 0x0078}, {0xaa, 0x62, 0x0411}, {0xaa, 0x2b, 0x007f}, {0xaa, 0x2c, 0x007f}, /*jfm: was 0030*/ {0xaa, 0x2d, 0x007f}, /*jfm: was 0030*/ {0xaa, 0x2e, 0x007f}, /*jfm: was 0030*/ {0xa0, 0x10, ZC3XX_R087_EXPTIMEMID}, {0xa0, 0xb7, ZC3XX_R101_SENSORCORRECTION}, {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, {0xa0, 0x09, 0x01ad}, /*jfm: was 00*/ {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, {0xa0, 0x60, ZC3XX_R1A8_DIGITALGAIN}, {0xa0, 0x6c, ZC3XX_R18D_YTARGET}, {0xa0, 0x61, ZC3XX_R116_RGAIN}, {0xa0, 0x65, ZC3XX_R118_BGAIN}, {} }; static const struct usb_action mt9v111_1_InitialScale[] = { /* 320x240 */ {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, {0xa0, 0x0a, ZC3XX_R010_CMOSSENSORSELECT}, {0xa0, 0x10, ZC3XX_R002_CLOCKSELECT}, {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, {0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW}, {0xa0, 0xdc, ZC3XX_R08B_I2CDEVICEADDR}, {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, {0xa0, 0x07, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x00, ZC3XX_R098_WINYSTARTLOW}, {0xa0, 0x00, ZC3XX_R09A_WINXSTARTLOW}, {0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW}, {0xa0, 0x00, ZC3XX_R11C_FIRSTXLOW}, {0xa0, 0xdc, ZC3XX_R08B_I2CDEVICEADDR}, {0xdd, 0x00, 0x0200}, {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xaa, 0x01, 0x0001}, {0xaa, 0x06, 0x0000}, {0xaa, 0x08, 0x0483}, {0xaa, 0x01, 0x0004}, {0xaa, 0x08, 0x0006}, {0xaa, 0x02, 0x0011}, {0xaa, 0x03, 0x01e7}, {0xaa, 0x04, 0x0287}, {0xaa, 0x07, 0x3002}, {0xaa, 0x20, 0x5100}, {0xaa, 0x35, 0x007f}, /*jfm: was 0050*/ {0xaa, 0x30, 0x0005}, {0xaa, 0x31, 0x0000}, {0xaa, 0x58, 0x0078}, {0xaa, 0x62, 0x0411}, {0xaa, 0x2b, 0x007f}, /*jfm: was 28*/ {0xaa, 0x2c, 0x007f}, /*jfm: was 30*/ {0xaa, 0x2d, 0x007f}, /*jfm: was 30*/ {0xaa, 0x2e, 0x007f}, /*jfm: was 28*/ {0xa0, 0x10, ZC3XX_R087_EXPTIMEMID}, {0xa0, 0xb7, ZC3XX_R101_SENSORCORRECTION}, {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, {0xa0, 0x09, 0x01ad}, /*jfm: was 00*/ {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, {0xa0, 0x60, ZC3XX_R1A8_DIGITALGAIN}, {0xa0, 0x6c, ZC3XX_R18D_YTARGET}, {0xa0, 0x61, ZC3XX_R116_RGAIN}, {0xa0, 0x65, ZC3XX_R118_BGAIN}, {} }; static const struct usb_action mt9v111_1_AE50HZ[] = { {0xa0, 0x00, ZC3XX_R180_AUTOCORRECTENABLE}, {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xbb, 0x00, 0x0562}, {0xbb, 0x01, 0x09aa}, {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, {0xa0, 0x03, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0x9b, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0x47, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x1c, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x14, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x66, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0x62, ZC3XX_R01D_HSYNC_0}, {0xa0, 0x90, ZC3XX_R01E_HSYNC_1}, {0xa0, 0xc8, ZC3XX_R01F_HSYNC_2}, {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, {0xa0, 0x60, ZC3XX_R11D_GLOBALGAIN}, {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, {} }; static const struct usb_action mt9v111_1_AE50HZScale[] = { {0xa0, 0x00, ZC3XX_R180_AUTOCORRECTENABLE}, {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xbb, 0x00, 0x0509}, {0xbb, 0x01, 0x0934}, {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, {0xa0, 0x07, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0xd2, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0x9a, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x1c, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x14, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x66, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0xd7, ZC3XX_R01D_HSYNC_0}, {0xa0, 0xf4, ZC3XX_R01E_HSYNC_1}, {0xa0, 0xf9, ZC3XX_R01F_HSYNC_2}, {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, {} }; static const struct usb_action mt9v111_1_AE60HZ[] = { {0xa0, 0x00, ZC3XX_R180_AUTOCORRECTENABLE}, {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xaa, 0x05, 0x003d}, {0xaa, 0x09, 0x016e}, {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, {0xa0, 0x07, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0xdd, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0x3d, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x1c, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x14, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0x62, ZC3XX_R01D_HSYNC_0}, {0xa0, 0x90, ZC3XX_R01E_HSYNC_1}, {0xa0, 0xc8, ZC3XX_R01F_HSYNC_2}, {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, {0xa0, 0x60, ZC3XX_R11D_GLOBALGAIN}, {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, {} }; static const struct usb_action mt9v111_1_AE60HZScale[] = { {0xa0, 0x00, ZC3XX_R180_AUTOCORRECTENABLE}, {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xbb, 0x00, 0x0509}, {0xbb, 0x01, 0x0983}, {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, {0xa0, 0x07, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0x8f, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0x81, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x1c, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x14, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0xd7, ZC3XX_R01D_HSYNC_0}, {0xa0, 0xf4, ZC3XX_R01E_HSYNC_1}, {0xa0, 0xf9, ZC3XX_R01F_HSYNC_2}, {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, {} }; static const struct usb_action mt9v111_1_AENoFliker[] = { {0xa0, 0x00, ZC3XX_R180_AUTOCORRECTENABLE}, {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xbb, 0x00, 0x0509}, {0xbb, 0x01, 0x0960}, {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, {0xa0, 0x07, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0xf0, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0x04, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x1c, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x00, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x00, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0x09, ZC3XX_R01D_HSYNC_0}, {0xa0, 0x40, ZC3XX_R01E_HSYNC_1}, {0xa0, 0x90, ZC3XX_R01F_HSYNC_2}, {0xa0, 0xe0, ZC3XX_R020_HSYNC_3}, {0xa0, 0x60, ZC3XX_R11D_GLOBALGAIN}, {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, {} }; static const struct usb_action mt9v111_1_AENoFlikerScale[] = { {0xa0, 0x00, ZC3XX_R180_AUTOCORRECTENABLE}, {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xbb, 0x00, 0x0534}, {0xbb, 0x02, 0x0960}, {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, {0xa0, 0x07, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0xf0, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0x04, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x1c, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x00, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x00, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0x34, ZC3XX_R01D_HSYNC_0}, {0xa0, 0x60, ZC3XX_R01E_HSYNC_1}, {0xa0, 0x90, ZC3XX_R01F_HSYNC_2}, {0xa0, 0xe0, ZC3XX_R020_HSYNC_3}, {0xa0, 0x60, ZC3XX_R11D_GLOBALGAIN}, {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, {} }; /* from usbvm303.inf 0ac8:303b 07/03/25 (3 - tas5130c) */ static const struct usb_action mt9v111_3_Initial[] = { {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, {0xa0, 0x0a, ZC3XX_R010_CMOSSENSORSELECT}, {0xa0, 0x00, ZC3XX_R002_CLOCKSELECT}, {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, {0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW}, {0xa0, 0xdc, ZC3XX_R08B_I2CDEVICEADDR}, {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, {0xa0, 0x07, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x00, ZC3XX_R098_WINYSTARTLOW}, {0xa0, 0x00, ZC3XX_R09A_WINXSTARTLOW}, {0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW}, {0xa0, 0x00, ZC3XX_R11C_FIRSTXLOW}, {0xa0, 0xdc, ZC3XX_R08B_I2CDEVICEADDR}, {0xdd, 0x00, 0x0200}, {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xaa, 0x01, 0x0001}, /* select IFP/SOC registers */ {0xaa, 0x06, 0x0000}, /* operating mode control */ {0xaa, 0x08, 0x0483}, /* output format control */ /* H red first, V red or blue first, * raw Bayer, auto flicker */ {0xaa, 0x01, 0x0004}, /* select sensor core registers */ {0xaa, 0x08, 0x0006}, /* row start */ {0xaa, 0x02, 0x0011}, /* column start */ {0xaa, 0x03, 0x01e5}, /* window height - 1 */ {0xaa, 0x04, 0x0285}, /* window width - 1 */ {0xaa, 0x07, 0x3002}, /* output control */ {0xaa, 0x20, 0x1100}, /* read mode: bits 8 & 12 (?) */ {0xaa, 0x35, 0x007f}, /* global gain */ {0xaa, 0x30, 0x0005}, {0xaa, 0x31, 0x0000}, {0xaa, 0x58, 0x0078}, {0xaa, 0x62, 0x0411}, {0xaa, 0x2b, 0x007f}, /* green1 gain */ {0xaa, 0x2c, 0x007f}, /* blue gain */ {0xaa, 0x2d, 0x007f}, /* red gain */ {0xaa, 0x2e, 0x007f}, /* green2 gain */ {0xa0, 0x10, ZC3XX_R087_EXPTIMEMID}, {0xa0, 0x37, ZC3XX_R101_SENSORCORRECTION}, {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, {0xa0, 0x00, 0x01ad}, {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, {0xa0, 0x60, ZC3XX_R1A8_DIGITALGAIN}, {0xa0, 0x80, ZC3XX_R18D_YTARGET}, {0xa0, 0x61, ZC3XX_R116_RGAIN}, {0xa0, 0x65, ZC3XX_R118_BGAIN}, {} }; static const struct usb_action mt9v111_3_InitialScale[] = { {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, {0xa0, 0x0a, ZC3XX_R010_CMOSSENSORSELECT}, {0xa0, 0x10, ZC3XX_R002_CLOCKSELECT}, {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, {0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW}, {0xa0, 0xdc, ZC3XX_R08B_I2CDEVICEADDR}, {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, {0xa0, 0x07, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x00, ZC3XX_R098_WINYSTARTLOW}, {0xa0, 0x00, ZC3XX_R09A_WINXSTARTLOW}, {0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW}, {0xa0, 0x00, ZC3XX_R11C_FIRSTXLOW}, {0xa0, 0xdc, ZC3XX_R08B_I2CDEVICEADDR}, {0xdd, 0x00, 0x0200}, {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xaa, 0x01, 0x0001}, {0xaa, 0x06, 0x0000}, {0xaa, 0x08, 0x0483}, {0xaa, 0x01, 0x0004}, {0xaa, 0x08, 0x0006}, {0xaa, 0x02, 0x0011}, {0xaa, 0x03, 0x01e7}, {0xaa, 0x04, 0x0287}, {0xaa, 0x07, 0x3002}, {0xaa, 0x20, 0x1100}, {0xaa, 0x35, 0x007f}, {0xaa, 0x30, 0x0005}, {0xaa, 0x31, 0x0000}, {0xaa, 0x58, 0x0078}, {0xaa, 0x62, 0x0411}, {0xaa, 0x2b, 0x007f}, {0xaa, 0x2c, 0x007f}, {0xaa, 0x2d, 0x007f}, {0xaa, 0x2e, 0x007f}, {0xa0, 0x10, ZC3XX_R087_EXPTIMEMID}, {0xa0, 0x37, ZC3XX_R101_SENSORCORRECTION}, {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, {0xa0, 0x00, 0x01ad}, {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, {0xa0, 0x60, ZC3XX_R1A8_DIGITALGAIN}, {0xa0, 0x80, ZC3XX_R18D_YTARGET}, {0xa0, 0x61, ZC3XX_R116_RGAIN}, {0xa0, 0x65, ZC3XX_R118_BGAIN}, {} }; static const struct usb_action mt9v111_3_AE50HZ[] = { {0xa0, 0x00, ZC3XX_R180_AUTOCORRECTENABLE}, {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xaa, 0x05, 0x0009}, /* horizontal blanking */ {0xaa, 0x09, 0x01ce}, /* shutter width */ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, {0xa0, 0x07, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0xd2, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0x9a, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x1c, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x14, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0xd7, ZC3XX_R01D_HSYNC_0}, {0xa0, 0xf4, ZC3XX_R01E_HSYNC_1}, {0xa0, 0xf9, ZC3XX_R01F_HSYNC_2}, {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, {} }; static const struct usb_action mt9v111_3_AE50HZScale[] = { {0xa0, 0x00, ZC3XX_R180_AUTOCORRECTENABLE}, {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xaa, 0x05, 0x0009}, {0xaa, 0x09, 0x01ce}, {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, {0xa0, 0x07, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0xd2, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0x9a, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x1c, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x14, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0xd7, ZC3XX_R01D_HSYNC_0}, {0xa0, 0xf4, ZC3XX_R01E_HSYNC_1}, {0xa0, 0xf9, ZC3XX_R01F_HSYNC_2}, {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, {} }; static const struct usb_action mt9v111_3_AE60HZ[] = { {0xa0, 0x00, ZC3XX_R180_AUTOCORRECTENABLE}, {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xaa, 0x05, 0x0009}, {0xaa, 0x09, 0x0083}, {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, {0xa0, 0x07, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0x8f, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0x81, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x1c, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x14, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0xd7, ZC3XX_R01D_HSYNC_0}, {0xa0, 0xf4, ZC3XX_R01E_HSYNC_1}, {0xa0, 0xf9, ZC3XX_R01F_HSYNC_2}, {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, {} }; static const struct usb_action mt9v111_3_AE60HZScale[] = { {0xa0, 0x00, ZC3XX_R180_AUTOCORRECTENABLE}, {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xaa, 0x05, 0x0009}, {0xaa, 0x09, 0x0083}, {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, {0xa0, 0x07, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0x8f, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0x81, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x1c, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x14, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0xd7, ZC3XX_R01D_HSYNC_0}, {0xa0, 0xf4, ZC3XX_R01E_HSYNC_1}, {0xa0, 0xf9, ZC3XX_R01F_HSYNC_2}, {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, {} }; static const struct usb_action mt9v111_3_AENoFliker[] = { {0xa0, 0x00, ZC3XX_R180_AUTOCORRECTENABLE}, {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xaa, 0x05, 0x0034}, {0xaa, 0x09, 0x0260}, {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, {0xa0, 0x07, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0xf0, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0x04, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x1c, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x00, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x00, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0x34, ZC3XX_R01D_HSYNC_0}, {0xa0, 0x60, ZC3XX_R01E_HSYNC_1}, {0xa0, 0x90, ZC3XX_R01F_HSYNC_2}, {0xa0, 0xe0, ZC3XX_R020_HSYNC_3}, {0xa0, 0x60, ZC3XX_R11D_GLOBALGAIN}, {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, {} }; static const struct usb_action mt9v111_3_AENoFlikerScale[] = { {0xa0, 0x00, ZC3XX_R180_AUTOCORRECTENABLE}, {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xaa, 0x05, 0x0034}, {0xaa, 0x09, 0x0260}, {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, {0xa0, 0x07, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0xf0, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0x04, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x1c, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x00, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x00, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0x34, ZC3XX_R01D_HSYNC_0}, {0xa0, 0x60, ZC3XX_R01E_HSYNC_1}, {0xa0, 0x90, ZC3XX_R01F_HSYNC_2}, {0xa0, 0xe0, ZC3XX_R020_HSYNC_3}, {0xa0, 0x60, ZC3XX_R11D_GLOBALGAIN}, {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, {} }; static const struct usb_action pb0330_Initial[] = { /* 640x480 */ {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, /* 00 */ {0xa0, 0x0a, ZC3XX_R010_CMOSSENSORSELECT}, {0xa0, 0x00, ZC3XX_R002_CLOCKSELECT}, {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, {0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW}, {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x07, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x00, ZC3XX_R098_WINYSTARTLOW}, {0xa0, 0x00, ZC3XX_R09A_WINXSTARTLOW}, {0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW}, {0xa0, 0x00, ZC3XX_R11C_FIRSTXLOW}, {0xdd, 0x00, 0x0200}, {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xaa, 0x01, 0x0006}, {0xaa, 0x02, 0x0011}, {0xaa, 0x03, 0x01e5}, /*jfm: was 1e7*/ {0xaa, 0x04, 0x0285}, /*jfm: was 0287*/ {0xaa, 0x06, 0x0003}, {0xaa, 0x07, 0x3002}, {0xaa, 0x20, 0x1100}, {0xaa, 0x2f, 0xf7b0}, {0xaa, 0x30, 0x0005}, {0xaa, 0x31, 0x0000}, {0xaa, 0x34, 0x0100}, {0xaa, 0x35, 0x0060}, {0xaa, 0x3d, 0x068f}, {0xaa, 0x40, 0x01e0}, {0xaa, 0x58, 0x0078}, {0xaa, 0x62, 0x0411}, {0xa0, 0x10, ZC3XX_R087_EXPTIMEMID}, {0xa0, 0x37, ZC3XX_R101_SENSORCORRECTION}, {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, {0xa0, 0x09, 0x01ad}, /*jfm: was 00 */ {0xa0, 0x15, 0x01ae}, {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, {0xa0, 0x60, ZC3XX_R1A8_DIGITALGAIN}, {0xa0, 0x78, ZC3XX_R18D_YTARGET}, /*jfm: was 6c*/ {} }; static const struct usb_action pb0330_InitialScale[] = { /* 320x240 */ {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, /* 00 */ {0xa0, 0x0a, ZC3XX_R010_CMOSSENSORSELECT}, {0xa0, 0x10, ZC3XX_R002_CLOCKSELECT}, {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, {0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW}, {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x07, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x00, ZC3XX_R098_WINYSTARTLOW}, {0xa0, 0x00, ZC3XX_R09A_WINXSTARTLOW}, {0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW}, {0xa0, 0x00, ZC3XX_R11C_FIRSTXLOW}, {0xdd, 0x00, 0x0200}, {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xaa, 0x01, 0x0006}, {0xaa, 0x02, 0x0011}, {0xaa, 0x03, 0x01e7}, {0xaa, 0x04, 0x0287}, {0xaa, 0x06, 0x0003}, {0xaa, 0x07, 0x3002}, {0xaa, 0x20, 0x1100}, {0xaa, 0x2f, 0xf7b0}, {0xaa, 0x30, 0x0005}, {0xaa, 0x31, 0x0000}, {0xaa, 0x34, 0x0100}, {0xaa, 0x35, 0x0060}, {0xaa, 0x3d, 0x068f}, {0xaa, 0x40, 0x01e0}, {0xaa, 0x58, 0x0078}, {0xaa, 0x62, 0x0411}, {0xa0, 0x10, ZC3XX_R087_EXPTIMEMID}, {0xa0, 0x37, ZC3XX_R101_SENSORCORRECTION}, {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, {0xa0, 0x09, 0x01ad}, {0xa0, 0x15, 0x01ae}, {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, {0xa0, 0x60, ZC3XX_R1A8_DIGITALGAIN}, {0xa0, 0x78, ZC3XX_R18D_YTARGET}, /*jfm: was 6c*/ {} }; static const struct usb_action pb0330_50HZ[] = { {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xbb, 0x00, 0x055c}, {0xbb, 0x01, 0x09aa}, {0xbb, 0x00, 0x1001}, {0xa0, 0x60, ZC3XX_R11D_GLOBALGAIN}, {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, {0xa0, 0x07, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0xc4, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0x47, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x1a, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x14, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x66, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0x5c, ZC3XX_R01D_HSYNC_0}, {0xa0, 0x90, ZC3XX_R01E_HSYNC_1}, {0xa0, 0xc8, ZC3XX_R01F_HSYNC_2}, {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, {} }; static const struct usb_action pb0330_50HZScale[] = { {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xbb, 0x00, 0x0566}, {0xbb, 0x02, 0x09b2}, {0xbb, 0x00, 0x1002}, {0xa0, 0x60, ZC3XX_R11D_GLOBALGAIN}, {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, {0xa0, 0x07, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0x8c, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0x8a, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x1a, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x14, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x66, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0xd7, ZC3XX_R01D_HSYNC_0}, {0xa0, 0xf0, ZC3XX_R01E_HSYNC_1}, {0xa0, 0xf8, ZC3XX_R01F_HSYNC_2}, {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, {} }; static const struct usb_action pb0330_60HZ[] = { {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xbb, 0x00, 0x0535}, {0xbb, 0x01, 0x0974}, {0xbb, 0x00, 0x1001}, {0xa0, 0x60, ZC3XX_R11D_GLOBALGAIN}, {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, {0xa0, 0x07, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0xfe, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0x3e, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x1a, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x14, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x66, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0x35, ZC3XX_R01D_HSYNC_0}, {0xa0, 0x50, ZC3XX_R01E_HSYNC_1}, {0xa0, 0x90, ZC3XX_R01F_HSYNC_2}, {0xa0, 0xd0, ZC3XX_R020_HSYNC_3}, {} }; static const struct usb_action pb0330_60HZScale[] = { {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xbb, 0x00, 0x0535}, {0xbb, 0x02, 0x096c}, {0xbb, 0x00, 0x1002}, {0xa0, 0x60, ZC3XX_R11D_GLOBALGAIN}, {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, {0xa0, 0x07, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0xc0, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0x7c, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x1a, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x14, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x66, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0x35, ZC3XX_R01D_HSYNC_0}, {0xa0, 0x50, ZC3XX_R01E_HSYNC_1}, {0xa0, 0x90, ZC3XX_R01F_HSYNC_2}, {0xa0, 0xd0, ZC3XX_R020_HSYNC_3}, {} }; static const struct usb_action pb0330_NoFliker[] = { {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xbb, 0x00, 0x0509}, {0xbb, 0x02, 0x0940}, {0xbb, 0x00, 0x1002}, {0xa0, 0x60, ZC3XX_R11D_GLOBALGAIN}, {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, {0xa0, 0x07, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0xf0, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0x01, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x00, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x00, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0x09, ZC3XX_R01D_HSYNC_0}, {0xa0, 0x40, ZC3XX_R01E_HSYNC_1}, {0xa0, 0x90, ZC3XX_R01F_HSYNC_2}, {0xa0, 0xe0, ZC3XX_R020_HSYNC_3}, {} }; static const struct usb_action pb0330_NoFlikerScale[] = { {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, {0xbb, 0x00, 0x0535}, {0xbb, 0x01, 0x0980}, {0xbb, 0x00, 0x1001}, {0xa0, 0x60, ZC3XX_R11D_GLOBALGAIN}, {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, {0xa0, 0x07, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0xf0, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0x01, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x00, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x00, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0x35, ZC3XX_R01D_HSYNC_0}, {0xa0, 0x60, ZC3XX_R01E_HSYNC_1}, {0xa0, 0x90, ZC3XX_R01F_HSYNC_2}, {0xa0, 0xe0, ZC3XX_R020_HSYNC_3}, {} }; /* from oem9.inf */ static const struct usb_action po2030_Initial[] = { /* 640x480 */ {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, /* 00,00,01,cc */ {0xa0, 0x04, ZC3XX_R002_CLOCKSELECT}, /* 00,02,04,cc */ {0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,01,cc */ {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, /* 00,01,01,cc */ {0xa0, 0x04, ZC3XX_R080_HBLANKHIGH}, /* 00,80,04,cc */ {0xa0, 0x05, ZC3XX_R081_HBLANKLOW}, /* 00,81,05,cc */ {0xa0, 0x16, ZC3XX_R083_RGAINADDR}, /* 00,83,16,cc */ {0xa0, 0x18, ZC3XX_R085_BGAINADDR}, /* 00,85,18,cc */ {0xa0, 0x1a, ZC3XX_R086_EXPTIMEHIGH}, /* 00,86,1a,cc */ {0xa0, 0x1b, ZC3XX_R087_EXPTIMEMID}, /* 00,87,1b,cc */ {0xa0, 0x1c, ZC3XX_R088_EXPTIMELOW}, /* 00,88,1c,cc */ {0xa0, 0xee, ZC3XX_R08B_I2CDEVICEADDR}, /* 00,8b,ee,cc */ {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, /* 00,08,03,cc */ {0xa0, 0x03, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,03,cc */ {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,01,cc */ {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, /* 00,03,02,cc */ {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, /* 00,04,80,cc */ {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, /* 00,05,01,cc */ {0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW}, /* 00,06,e0,cc */ {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, /* 01,80,42,cc */ {0xaa, 0x8d, 0x0008}, /* 00,8d,08,aa */ {0xa0, 0x00, ZC3XX_R098_WINYSTARTLOW}, /* 00,98,00,cc */ {0xa0, 0x00, ZC3XX_R09A_WINXSTARTLOW}, /* 00,9a,00,cc */ {0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW}, /* 01,1a,00,cc */ {0xa0, 0x00, ZC3XX_R11C_FIRSTXLOW}, /* 01,1c,00,cc */ {0xa0, 0xe6, ZC3XX_R09C_WINHEIGHTLOW}, /* 00,9c,e6,cc */ {0xa0, 0x86, ZC3XX_R09E_WINWIDTHLOW}, /* 00,9e,86,cc */ {0xaa, 0x09, 0x00ce}, /* 00,09,ce,aa */ {0xaa, 0x0b, 0x0005}, /* 00,0b,05,aa */ {0xaa, 0x0d, 0x0054}, /* 00,0d,54,aa */ {0xaa, 0x0f, 0x00eb}, /* 00,0f,eb,aa */ {0xaa, 0x87, 0x0000}, /* 00,87,00,aa */ {0xaa, 0x88, 0x0004}, /* 00,88,04,aa */ {0xaa, 0x89, 0x0000}, /* 00,89,00,aa */ {0xaa, 0x8a, 0x0005}, /* 00,8a,05,aa */ {0xaa, 0x13, 0x0003}, /* 00,13,03,aa */ {0xaa, 0x16, 0x0040}, /* 00,16,40,aa */ {0xaa, 0x18, 0x0040}, /* 00,18,40,aa */ {0xaa, 0x1d, 0x0002}, /* 00,1d,02,aa */ {0xaa, 0x29, 0x00e8}, /* 00,29,e8,aa */ {0xaa, 0x45, 0x0045}, /* 00,45,45,aa */ {0xaa, 0x50, 0x00ed}, /* 00,50,ed,aa */ {0xaa, 0x51, 0x0025}, /* 00,51,25,aa */ {0xaa, 0x52, 0x0042}, /* 00,52,42,aa */ {0xaa, 0x53, 0x002f}, /* 00,53,2f,aa */ {0xaa, 0x79, 0x0025}, /* 00,79,25,aa */ {0xaa, 0x7b, 0x0000}, /* 00,7b,00,aa */ {0xaa, 0x7e, 0x0025}, /* 00,7e,25,aa */ {0xaa, 0x7f, 0x0025}, /* 00,7f,25,aa */ {0xaa, 0x21, 0x0000}, /* 00,21,00,aa */ {0xaa, 0x33, 0x0036}, /* 00,33,36,aa */ {0xaa, 0x36, 0x0060}, /* 00,36,60,aa */ {0xaa, 0x37, 0x0008}, /* 00,37,08,aa */ {0xaa, 0x3b, 0x0031}, /* 00,3b,31,aa */ {0xaa, 0x44, 0x000f}, /* 00,44,0f,aa */ {0xaa, 0x58, 0x0002}, /* 00,58,02,aa */ {0xaa, 0x66, 0x00c0}, /* 00,66,c0,aa */ {0xaa, 0x67, 0x0044}, /* 00,67,44,aa */ {0xaa, 0x6b, 0x00a0}, /* 00,6b,a0,aa */ {0xaa, 0x6c, 0x0054}, /* 00,6c,54,aa */ {0xaa, 0xd6, 0x0007}, /* 00,d6,07,aa */ {0xa0, 0xf7, ZC3XX_R101_SENSORCORRECTION}, /* 01,01,f7,cc */ {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,05,cc */ {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, /* 01,00,0d,cc */ {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, /* 01,89,06,cc */ {0xa0, 0x00, 0x01ad}, /* 01,ad,00,cc */ {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, /* 01,c5,03,cc */ {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, /* 01,cb,13,cc */ {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, /* 02,50,08,cc */ {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, /* 03,01,08,cc */ {0xa0, 0x7a, ZC3XX_R116_RGAIN}, /* 01,16,7a,cc */ {0xa0, 0x4a, ZC3XX_R118_BGAIN}, /* 01,18,4a,cc */ {} }; /* from oem9.inf */ static const struct usb_action po2030_InitialScale[] = { /* 320x240 */ {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, /* 00,00,01,cc */ {0xa0, 0x10, ZC3XX_R002_CLOCKSELECT}, /* 00,02,10,cc */ {0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,01,cc */ {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, /* 00,01,01,cc */ {0xa0, 0x04, ZC3XX_R080_HBLANKHIGH}, /* 00,80,04,cc */ {0xa0, 0x05, ZC3XX_R081_HBLANKLOW}, /* 00,81,05,cc */ {0xa0, 0x16, ZC3XX_R083_RGAINADDR}, /* 00,83,16,cc */ {0xa0, 0x18, ZC3XX_R085_BGAINADDR}, /* 00,85,18,cc */ {0xa0, 0x1a, ZC3XX_R086_EXPTIMEHIGH}, /* 00,86,1a,cc */ {0xa0, 0x1b, ZC3XX_R087_EXPTIMEMID}, /* 00,87,1b,cc */ {0xa0, 0x1c, ZC3XX_R088_EXPTIMELOW}, /* 00,88,1c,cc */ {0xa0, 0xee, ZC3XX_R08B_I2CDEVICEADDR}, /* 00,8b,ee,cc */ {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, /* 00,08,03,cc */ {0xa0, 0x03, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,03,cc */ {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,01,cc */ {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, /* 00,03,02,cc */ {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, /* 00,04,80,cc */ {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, /* 00,05,01,cc */ {0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW}, /* 00,06,e0,cc */ {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, /* 01,80,42,cc */ {0xaa, 0x8d, 0x0008}, /* 00,8d,08,aa */ {0xa0, 0x00, ZC3XX_R098_WINYSTARTLOW}, /* 00,98,00,cc */ {0xa0, 0x00, ZC3XX_R09A_WINXSTARTLOW}, /* 00,9a,00,cc */ {0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW}, /* 01,1a,00,cc */ {0xa0, 0x00, ZC3XX_R11C_FIRSTXLOW}, /* 01,1c,00,cc */ {0xa0, 0xe8, ZC3XX_R09C_WINHEIGHTLOW}, /* 00,9c,e8,cc */ {0xa0, 0x88, ZC3XX_R09E_WINWIDTHLOW}, /* 00,9e,88,cc */ {0xaa, 0x09, 0x00cc}, /* 00,09,cc,aa */ {0xaa, 0x0b, 0x0005}, /* 00,0b,05,aa */ {0xaa, 0x0d, 0x0058}, /* 00,0d,58,aa */ {0xaa, 0x0f, 0x00ed}, /* 00,0f,ed,aa */ {0xaa, 0x87, 0x0000}, /* 00,87,00,aa */ {0xaa, 0x88, 0x0004}, /* 00,88,04,aa */ {0xaa, 0x89, 0x0000}, /* 00,89,00,aa */ {0xaa, 0x8a, 0x0005}, /* 00,8a,05,aa */ {0xaa, 0x13, 0x0003}, /* 00,13,03,aa */ {0xaa, 0x16, 0x0040}, /* 00,16,40,aa */ {0xaa, 0x18, 0x0040}, /* 00,18,40,aa */ {0xaa, 0x1d, 0x0002}, /* 00,1d,02,aa */ {0xaa, 0x29, 0x00e8}, /* 00,29,e8,aa */ {0xaa, 0x45, 0x0045}, /* 00,45,45,aa */ {0xaa, 0x50, 0x00ed}, /* 00,50,ed,aa */ {0xaa, 0x51, 0x0025}, /* 00,51,25,aa */ {0xaa, 0x52, 0x0042}, /* 00,52,42,aa */ {0xaa, 0x53, 0x002f}, /* 00,53,2f,aa */ {0xaa, 0x79, 0x0025}, /* 00,79,25,aa */ {0xaa, 0x7b, 0x0000}, /* 00,7b,00,aa */ {0xaa, 0x7e, 0x0025}, /* 00,7e,25,aa */ {0xaa, 0x7f, 0x0025}, /* 00,7f,25,aa */ {0xaa, 0x21, 0x0000}, /* 00,21,00,aa */ {0xaa, 0x33, 0x0036}, /* 00,33,36,aa */ {0xaa, 0x36, 0x0060}, /* 00,36,60,aa */ {0xaa, 0x37, 0x0008}, /* 00,37,08,aa */ {0xaa, 0x3b, 0x0031}, /* 00,3b,31,aa */ {0xaa, 0x44, 0x000f}, /* 00,44,0f,aa */ {0xaa, 0x58, 0x0002}, /* 00,58,02,aa */ {0xaa, 0x66, 0x00c0}, /* 00,66,c0,aa */ {0xaa, 0x67, 0x0044}, /* 00,67,44,aa */ {0xaa, 0x6b, 0x00a0}, /* 00,6b,a0,aa */ {0xaa, 0x6c, 0x0054}, /* 00,6c,54,aa */ {0xaa, 0xd6, 0x0007}, /* 00,d6,07,aa */ {0xa0, 0xf7, ZC3XX_R101_SENSORCORRECTION}, /* 01,01,f7,cc */ {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,05,cc */ {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, /* 01,00,0d,cc */ {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, /* 01,89,06,cc */ {0xa0, 0x00, 0x01ad}, /* 01,ad,00,cc */ {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, /* 01,c5,03,cc */ {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, /* 01,cb,13,cc */ {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, /* 02,50,08,cc */ {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, /* 03,01,08,cc */ {0xa0, 0x7a, ZC3XX_R116_RGAIN}, /* 01,16,7a,cc */ {0xa0, 0x4a, ZC3XX_R118_BGAIN}, /* 01,18,4a,cc */ {} }; static const struct usb_action po2030_50HZ[] = { {0xaa, 0x8d, 0x0008}, /* 00,8d,08,aa */ {0xaa, 0x1a, 0x0001}, /* 00,1a,01,aa */ {0xaa, 0x1b, 0x000a}, /* 00,1b,0a,aa */ {0xaa, 0x1c, 0x00b0}, /* 00,1c,b0,aa */ {0xa0, 0x05, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,05,cc */ {0xa0, 0x35, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,35,cc */ {0xa0, 0x70, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,70,cc */ {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */ {0xa0, 0x85, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,85,cc */ {0xa0, 0x58, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,58,cc */ {0xa0, 0x0c, ZC3XX_R18C_AEFREEZE}, /* 01,8c,0c,cc */ {0xa0, 0x18, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,18,cc */ {0xa0, 0x60, ZC3XX_R1A8_DIGITALGAIN}, /* 01,a8,60,cc */ {0xa0, 0x10, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,10,cc */ {0xa0, 0x22, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,22,cc */ {0xa0, 0x88, ZC3XX_R18D_YTARGET}, /* 01,8d,88,cc */ {0xa0, 0x58, ZC3XX_R11D_GLOBALGAIN}, /* 01,1d,58,cc */ {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, /* 01,80,42,cc */ {} }; static const struct usb_action po2030_60HZ[] = { {0xaa, 0x8d, 0x0008}, /* 00,8d,08,aa */ {0xaa, 0x1a, 0x0000}, /* 00,1a,00,aa */ {0xaa, 0x1b, 0x00de}, /* 00,1b,de,aa */ {0xaa, 0x1c, 0x0040}, /* 00,1c,40,aa */ {0xa0, 0x08, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,08,cc */ {0xa0, 0xae, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,ae,cc */ {0xa0, 0x80, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,80,cc */ {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */ {0xa0, 0x6f, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,6f,cc */ {0xa0, 0x20, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,20,cc */ {0xa0, 0x0c, ZC3XX_R18C_AEFREEZE}, /* 01,8c,0c,cc */ {0xa0, 0x18, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,18,cc */ {0xa0, 0x60, ZC3XX_R1A8_DIGITALGAIN}, /* 01,a8,60,cc */ {0xa0, 0x10, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,10,cc */ {0xa0, 0x22, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,22,cc */ {0xa0, 0x88, ZC3XX_R18D_YTARGET}, /* 01,8d,88,cc */ /* win: 01,8d,80 */ {0xa0, 0x58, ZC3XX_R11D_GLOBALGAIN}, /* 01,1d,58,cc */ {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, /* 01,80,42,cc */ {} }; static const struct usb_action po2030_NoFliker[] = { {0xa0, 0x02, ZC3XX_R180_AUTOCORRECTENABLE}, /* 01,80,02,cc */ {0xaa, 0x8d, 0x000d}, /* 00,8d,0d,aa */ {0xaa, 0x1a, 0x0000}, /* 00,1a,00,aa */ {0xaa, 0x1b, 0x0002}, /* 00,1b,02,aa */ {0xaa, 0x1c, 0x0078}, /* 00,1c,78,aa */ {0xaa, 0x46, 0x0000}, /* 00,46,00,aa */ {0xaa, 0x15, 0x0000}, /* 00,15,00,aa */ {} }; static const struct usb_action tas5130c_InitialScale[] = { /* 320x240 */ {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, {0xa0, 0x50, ZC3XX_R002_CLOCKSELECT}, {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, {0xa0, 0x02, ZC3XX_R010_CMOSSENSORSELECT}, {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, {0xa0, 0x00, ZC3XX_R001_SYSTEMOPERATING}, {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, {0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW}, {0xa0, 0x04, ZC3XX_R098_WINYSTARTLOW}, {0xa0, 0x0f, ZC3XX_R09A_WINXSTARTLOW}, {0xa0, 0x04, ZC3XX_R11A_FIRSTYLOW}, {0xa0, 0x0f, ZC3XX_R11C_FIRSTXLOW}, {0xa0, 0xe8, ZC3XX_R09C_WINHEIGHTLOW}, {0xa0, 0x02, ZC3XX_R09D_WINWIDTHHIGH}, {0xa0, 0x88, ZC3XX_R09E_WINWIDTHLOW}, {0xa0, 0x06, ZC3XX_R08D_COMPABILITYMODE}, {0xa0, 0xf7, ZC3XX_R101_SENSORCORRECTION}, {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, {0xa0, 0x70, ZC3XX_R18D_YTARGET}, {0xa0, 0x50, ZC3XX_R1A8_DIGITALGAIN}, {0xa0, 0x00, 0x01ad}, {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, {0xa0, 0x07, ZC3XX_R0A5_EXPOSUREGAIN}, {0xa0, 0x02, ZC3XX_R0A6_EXPOSUREBLACKLVL}, {} }; static const struct usb_action tas5130c_Initial[] = { /* 640x480 */ {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, {0xa0, 0x40, ZC3XX_R002_CLOCKSELECT}, {0xa0, 0x00, ZC3XX_R008_CLOCKSETTING}, {0xa0, 0x02, ZC3XX_R010_CMOSSENSORSELECT}, {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, {0xa0, 0x00, ZC3XX_R001_SYSTEMOPERATING}, {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, {0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW}, {0xa0, 0x05, ZC3XX_R098_WINYSTARTLOW}, {0xa0, 0x0f, ZC3XX_R09A_WINXSTARTLOW}, {0xa0, 0x05, ZC3XX_R11A_FIRSTYLOW}, {0xa0, 0x0f, ZC3XX_R11C_FIRSTXLOW}, {0xa0, 0xe6, ZC3XX_R09C_WINHEIGHTLOW}, {0xa0, 0x02, ZC3XX_R09D_WINWIDTHHIGH}, {0xa0, 0x86, ZC3XX_R09E_WINWIDTHLOW}, {0xa0, 0x06, ZC3XX_R08D_COMPABILITYMODE}, {0xa0, 0x37, ZC3XX_R101_SENSORCORRECTION}, {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, {0xa0, 0x70, ZC3XX_R18D_YTARGET}, {0xa0, 0x50, ZC3XX_R1A8_DIGITALGAIN}, {0xa0, 0x00, 0x01ad}, {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, {0xa0, 0x07, ZC3XX_R0A5_EXPOSUREGAIN}, {0xa0, 0x02, ZC3XX_R0A6_EXPOSUREBLACKLVL}, {} }; static const struct usb_action tas5130c_50HZ[] = { {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */ {0xaa, 0xa3, 0x0001}, /* 00,a3,01,aa */ {0xaa, 0xa4, 0x0063}, /* 00,a4,63,aa */ {0xa0, 0x01, ZC3XX_R0A3_EXPOSURETIMEHIGH}, /* 00,a3,01,cc */ {0xa0, 0x63, ZC3XX_R0A4_EXPOSURETIMELOW}, /* 00,a4,63,cc */ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */ {0xa0, 0x04, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0xfe, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */ {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */ {0xa0, 0x47, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,47,cc */ {0xa0, 0x0c, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x18, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x08, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0xd3, ZC3XX_R01D_HSYNC_0}, /* 00,1d,d3,cc */ {0xa0, 0xda, ZC3XX_R01E_HSYNC_1}, /* 00,1e,da,cc */ {0xa0, 0xea, ZC3XX_R01F_HSYNC_2}, /* 00,1f,ea,cc */ {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, /* 00,20,ff,cc */ {0xa0, 0x03, ZC3XX_R09F_MAXXHIGH}, /* 00,9f,03,cc */ {0xa0, 0x4c, ZC3XX_R0A0_MAXXLOW}, {0xa0, 0x50, ZC3XX_R11D_GLOBALGAIN}, {} }; static const struct usb_action tas5130c_50HZScale[] = { {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */ {0xaa, 0xa3, 0x0001}, /* 00,a3,01,aa */ {0xaa, 0xa4, 0x0077}, /* 00,a4,77,aa */ {0xa0, 0x01, ZC3XX_R0A3_EXPOSURETIMEHIGH}, /* 00,a3,01,cc */ {0xa0, 0x77, ZC3XX_R0A4_EXPOSURETIMELOW}, /* 00,a4,77,cc */ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */ {0xa0, 0x07, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0xd0, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */ {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */ {0xa0, 0x7d, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,7d,cc */ {0xa0, 0x0c, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x18, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x08, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0xf0, ZC3XX_R01D_HSYNC_0}, /* 00,1d,f0,cc */ {0xa0, 0xf4, ZC3XX_R01E_HSYNC_1}, /* 00,1e,f4,cc */ {0xa0, 0xf8, ZC3XX_R01F_HSYNC_2}, /* 00,1f,f8,cc */ {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, /* 00,20,ff,cc */ {0xa0, 0x03, ZC3XX_R09F_MAXXHIGH}, /* 00,9f,03,cc */ {0xa0, 0xc0, ZC3XX_R0A0_MAXXLOW}, {0xa0, 0x50, ZC3XX_R11D_GLOBALGAIN}, {} }; static const struct usb_action tas5130c_60HZ[] = { {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */ {0xaa, 0xa3, 0x0001}, /* 00,a3,01,aa */ {0xaa, 0xa4, 0x0036}, /* 00,a4,36,aa */ {0xa0, 0x01, ZC3XX_R0A3_EXPOSURETIMEHIGH}, /* 00,a3,01,cc */ {0xa0, 0x36, ZC3XX_R0A4_EXPOSURETIMELOW}, /* 00,a4,36,cc */ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */ {0xa0, 0x05, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0x54, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */ {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */ {0xa0, 0x3e, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,3e,cc */ {0xa0, 0x0c, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x18, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x08, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0xca, ZC3XX_R01D_HSYNC_0}, /* 00,1d,ca,cc */ {0xa0, 0xd0, ZC3XX_R01E_HSYNC_1}, /* 00,1e,d0,cc */ {0xa0, 0xe0, ZC3XX_R01F_HSYNC_2}, /* 00,1f,e0,cc */ {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, /* 00,20,ff,cc */ {0xa0, 0x03, ZC3XX_R09F_MAXXHIGH}, /* 00,9f,03,cc */ {0xa0, 0x28, ZC3XX_R0A0_MAXXLOW}, {0xa0, 0x50, ZC3XX_R11D_GLOBALGAIN}, {} }; static const struct usb_action tas5130c_60HZScale[] = { {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */ {0xaa, 0xa3, 0x0001}, /* 00,a3,01,aa */ {0xaa, 0xa4, 0x0077}, /* 00,a4,77,aa */ {0xa0, 0x01, ZC3XX_R0A3_EXPOSURETIMEHIGH}, /* 00,a3,01,cc */ {0xa0, 0x77, ZC3XX_R0A4_EXPOSURETIMELOW}, /* 00,a4,77,cc */ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */ {0xa0, 0x09, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0x47, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */ {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */ {0xa0, 0x7d, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,7d,cc */ {0xa0, 0x0c, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x18, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x08, ZC3XX_R1A9_DIGITALLIMITDIFF}, {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, {0xa0, 0xc8, ZC3XX_R01D_HSYNC_0}, /* 00,1d,c8,cc */ {0xa0, 0xd0, ZC3XX_R01E_HSYNC_1}, /* 00,1e,d0,cc */ {0xa0, 0xe0, ZC3XX_R01F_HSYNC_2}, /* 00,1f,e0,cc */ {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, /* 00,20,ff,cc */ {0xa0, 0x03, ZC3XX_R09F_MAXXHIGH}, /* 00,9f,03,cc */ {0xa0, 0x20, ZC3XX_R0A0_MAXXLOW}, {0xa0, 0x50, ZC3XX_R11D_GLOBALGAIN}, {} }; static const struct usb_action tas5130c_NoFliker[] = { {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */ {0xaa, 0xa3, 0x0001}, /* 00,a3,01,aa */ {0xaa, 0xa4, 0x0040}, /* 00,a4,40,aa */ {0xa0, 0x01, ZC3XX_R0A3_EXPOSURETIMEHIGH}, /* 00,a3,01,cc */ {0xa0, 0x40, ZC3XX_R0A4_EXPOSURETIMELOW}, /* 00,a4,40,cc */ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */ {0xa0, 0x05, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0xa0, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0x04, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x0c, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x18, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x00, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,00,cc */ {0xa0, 0x00, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,00,cc */ {0xa0, 0xbc, ZC3XX_R01D_HSYNC_0}, /* 00,1d,bc,cc */ {0xa0, 0xd0, ZC3XX_R01E_HSYNC_1}, /* 00,1e,d0,cc */ {0xa0, 0xe0, ZC3XX_R01F_HSYNC_2}, /* 00,1f,e0,cc */ {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, /* 00,20,ff,cc */ {0xa0, 0x02, ZC3XX_R09F_MAXXHIGH}, /* 00,9f,02,cc */ {0xa0, 0xf0, ZC3XX_R0A0_MAXXLOW}, {0xa0, 0x50, ZC3XX_R11D_GLOBALGAIN}, {} }; static const struct usb_action tas5130c_NoFlikerScale[] = { {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */ {0xaa, 0xa3, 0x0001}, /* 00,a3,01,aa */ {0xaa, 0xa4, 0x0090}, /* 00,a4,90,aa */ {0xa0, 0x01, ZC3XX_R0A3_EXPOSURETIMEHIGH}, /* 00,a3,01,cc */ {0xa0, 0x90, ZC3XX_R0A4_EXPOSURETIMELOW}, /* 00,a4,90,cc */ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */ {0xa0, 0x0a, ZC3XX_R191_EXPOSURELIMITMID}, {0xa0, 0x00, ZC3XX_R192_EXPOSURELIMITLOW}, {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, {0xa0, 0x04, ZC3XX_R197_ANTIFLICKERLOW}, {0xa0, 0x0c, ZC3XX_R18C_AEFREEZE}, {0xa0, 0x18, ZC3XX_R18F_AEUNFREEZE}, {0xa0, 0x00, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,00,cc */ {0xa0, 0x00, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,00,cc */ {0xa0, 0xbc, ZC3XX_R01D_HSYNC_0}, /* 00,1d,bc,cc */ {0xa0, 0xd0, ZC3XX_R01E_HSYNC_1}, /* 00,1e,d0,cc */ {0xa0, 0xe0, ZC3XX_R01F_HSYNC_2}, /* 00,1f,e0,cc */ {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, /* 00,20,ff,cc */ {0xa0, 0x02, ZC3XX_R09F_MAXXHIGH}, /* 00,9f,02,cc */ {0xa0, 0xf0, ZC3XX_R0A0_MAXXLOW}, {0xa0, 0x50, ZC3XX_R11D_GLOBALGAIN}, {} }; static const struct usb_action gc0303_InitialScale[] = { {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, /* 00,00,01,cc, */ {0xa0, 0x02, ZC3XX_R008_CLOCKSETTING}, /* 00,08,02,cc, */ {0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,01,cc, */ {0xa0, 0x10, ZC3XX_R002_CLOCKSELECT}, /* 00,02,00,cc, * 0<->10 */ {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, /* 00,03,02,cc, */ {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, /* 00,04,80,cc, */ {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, /* 00,05,01,cc, */ {0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW}, /* 00,06,e0,cc, */ {0xa0, 0x98, ZC3XX_R08B_I2CDEVICEADDR}, /* 00,8b,98,cc, */ {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, /* 00,01,01,cc, */ {0xa0, 0x03, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,03,cc, */ {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,01,cc, */ {0xa0, 0x00, ZC3XX_R098_WINYSTARTLOW}, /* 00,98,00,cc, */ {0xa0, 0x00, ZC3XX_R09A_WINXSTARTLOW}, /* 00,9a,00,cc, */ {0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW}, /* 01,1a,00,cc, */ {0xa0, 0x00, ZC3XX_R11C_FIRSTXLOW}, /* 01,1c,00,cc, */ {0xa0, 0xe8, ZC3XX_R09C_WINHEIGHTLOW}, /* 00,9c,e6,cc, * 6<->8 */ {0xa0, 0x88, ZC3XX_R09E_WINWIDTHLOW}, /* 00,9e,86,cc, * 6<->8 */ {0xa0, 0x10, ZC3XX_R087_EXPTIMEMID}, /* 00,87,10,cc, */ {0xa0, 0x98, ZC3XX_R08B_I2CDEVICEADDR}, /* 00,8b,98,cc, */ {0xaa, 0x1b, 0x0024}, /* 00,1b,24,aa, */ {0xdd, 0x00, 0x0080}, /* 00,00,80,dd, */ {0xaa, 0x1b, 0x0000}, /* 00,1b,00,aa, */ {0xaa, 0x13, 0x0002}, /* 00,13,02,aa, */ {0xaa, 0x15, 0x0004}, /* 00,15,04,aa */ /*?? {0xaa, 0x01, 0x0000}, */ {0xaa, 0x01, 0x0000}, {0xaa, 0x1a, 0x0000}, /* 00,1a,00,aa, */ {0xaa, 0x1c, 0x0017}, /* 00,1c,17,aa, */ {0xa0, 0x82, ZC3XX_R086_EXPTIMEHIGH}, /* 00,86,82,cc, */ {0xa0, 0x83, ZC3XX_R087_EXPTIMEMID}, /* 00,87,83,cc, */ {0xa0, 0x84, ZC3XX_R088_EXPTIMELOW}, /* 00,88,84,cc, */ {0xaa, 0x05, 0x0010}, /* 00,05,10,aa, */ {0xaa, 0x0a, 0x0000}, /* 00,0a,00,aa, */ {0xaa, 0x0b, 0x00a0}, /* 00,0b,a0,aa, */ {0xaa, 0x0c, 0x0000}, /* 00,0c,00,aa, */ {0xaa, 0x0d, 0x00a0}, /* 00,0d,a0,aa, */ {0xaa, 0x0e, 0x0000}, /* 00,0e,00,aa, */ {0xaa, 0x0f, 0x00a0}, /* 00,0f,a0,aa, */ {0xaa, 0x10, 0x0000}, /* 00,10,00,aa, */ {0xaa, 0x11, 0x00a0}, /* 00,11,a0,aa, */ /*?? {0xa0, 0x00, 0x0039}, {0xa1, 0x01, 0x0037}, */ {0xaa, 0x16, 0x0001}, /* 00,16,01,aa, */ {0xaa, 0x17, 0x00e8}, /* 00,17,e6,aa, (e6 -> e8) */ {0xaa, 0x18, 0x0002}, /* 00,18,02,aa, */ {0xaa, 0x19, 0x0088}, /* 00,19,86,aa, */ {0xaa, 0x20, 0x0020}, /* 00,20,20,aa, */ {0xa0, 0xb7, ZC3XX_R101_SENSORCORRECTION}, /* 01,01,b7,cc, */ {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,05,cc, */ {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, /* 01,00,0d,cc, */ {0xa0, 0x76, ZC3XX_R189_AWBSTATUS}, /* 01,89,76,cc, */ {0xa0, 0x09, 0x01ad}, /* 01,ad,09,cc, */ {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, /* 01,c5,03,cc, */ {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, /* 01,cb,13,cc, */ {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, /* 02,50,08,cc, */ {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, /* 03,01,08,cc, */ {0xa0, 0x60, ZC3XX_R1A8_DIGITALGAIN}, /* 01,a8,60,cc, */ {0xa0, 0x61, ZC3XX_R116_RGAIN}, /* 01,16,61,cc, */ {0xa0, 0x65, ZC3XX_R118_BGAIN}, /* 01,18,65,cc */ {} }; static const struct usb_action gc0303_Initial[] = { {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, /* 00,00,01,cc, */ {0xa0, 0x02, ZC3XX_R008_CLOCKSETTING}, /* 00,08,02,cc, */ {0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,01,cc, */ {0xa0, 0x00, ZC3XX_R002_CLOCKSELECT}, /* 00,02,10,cc, */ {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, /* 00,03,02,cc, */ {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, /* 00,04,80,cc, */ {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, /* 00,05,01,cc, */ {0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW}, /* 00,06,e0,cc, */ {0xa0, 0x98, ZC3XX_R08B_I2CDEVICEADDR}, /* 00,8b,98,cc, */ {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, /* 00,01,01,cc, */ {0xa0, 0x03, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,03,cc, */ {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,01,cc, */ {0xa0, 0x00, ZC3XX_R098_WINYSTARTLOW}, /* 00,98,00,cc, */ {0xa0, 0x00, ZC3XX_R09A_WINXSTARTLOW}, /* 00,9a,00,cc, */ {0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW}, /* 01,1a,00,cc, */ {0xa0, 0x00, ZC3XX_R11C_FIRSTXLOW}, /* 01,1c,00,cc, */ {0xa0, 0xe8, ZC3XX_R09C_WINHEIGHTLOW}, /* 00,9c,e8,cc, * 8<->6 */ {0xa0, 0x88, ZC3XX_R09E_WINWIDTHLOW}, /* 00,9e,88,cc, * 8<->6 */ {0xa0, 0x10, ZC3XX_R087_EXPTIMEMID}, /* 00,87,10,cc, */ {0xa0, 0x98, ZC3XX_R08B_I2CDEVICEADDR}, /* 00,8b,98,cc, */ {0xaa, 0x1b, 0x0024}, /* 00,1b,24,aa, */ {0xdd, 0x00, 0x0080}, /* 00,00,80,dd, */ {0xaa, 0x1b, 0x0000}, /* 00,1b,00,aa, */ {0xaa, 0x13, 0x0002}, /* 00,13,02,aa, */ {0xaa, 0x15, 0x0004}, /* 00,15,04,aa */ /*?? {0xaa, 0x01, 0x0000}, */ {0xaa, 0x01, 0x0000}, {0xaa, 0x1a, 0x0000}, /* 00,1a,00,aa, */ {0xaa, 0x1c, 0x0017}, /* 00,1c,17,aa, */ {0xa0, 0x82, ZC3XX_R086_EXPTIMEHIGH}, /* 00,86,82,cc, */ {0xa0, 0x83, ZC3XX_R087_EXPTIMEMID}, /* 00,87,83,cc, */ {0xa0, 0x84, ZC3XX_R088_EXPTIMELOW}, /* 00,88,84,cc, */ {0xaa, 0x05, 0x0010}, /* 00,05,10,aa, */ {0xaa, 0x0a, 0x0000}, /* 00,0a,00,aa, */ {0xaa, 0x0b, 0x00a0}, /* 00,0b,a0,aa, */ {0xaa, 0x0c, 0x0000}, /* 00,0c,00,aa, */ {0xaa, 0x0d, 0x00a0}, /* 00,0d,a0,aa, */ {0xaa, 0x0e, 0x0000}, /* 00,0e,00,aa, */ {0xaa, 0x0f, 0x00a0}, /* 00,0f,a0,aa, */ {0xaa, 0x10, 0x0000}, /* 00,10,00,aa, */ {0xaa, 0x11, 0x00a0}, /* 00,11,a0,aa, */ /*?? {0xa0, 0x00, 0x0039}, {0xa1, 0x01, 0x0037}, */ {0xaa, 0x16, 0x0001}, /* 00,16,01,aa, */ {0xaa, 0x17, 0x00e8}, /* 00,17,e6,aa (e6 -> e8) */ {0xaa, 0x18, 0x0002}, /* 00,18,02,aa, */ {0xaa, 0x19, 0x0088}, /* 00,19,88,aa, */ {0xaa, 0x20, 0x0020}, /* 00,20,20,aa, */ {0xa0, 0xb7, ZC3XX_R101_SENSORCORRECTION}, /* 01,01,b7,cc, */ {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,05,cc, */ {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, /* 01,00,0d,cc, */ {0xa0, 0x76, ZC3XX_R189_AWBSTATUS}, /* 01,89,76,cc, */ {0xa0, 0x09, 0x01ad}, /* 01,ad,09,cc, */ {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, /* 01,c5,03,cc, */ {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, /* 01,cb,13,cc, */ {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, /* 02,50,08,cc, */ {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, /* 03,01,08,cc, */ {0xa0, 0x60, ZC3XX_R1A8_DIGITALGAIN}, /* 01,a8,60,cc, */ {0xa0, 0x61, ZC3XX_R116_RGAIN}, /* 01,16,61,cc, */ {0xa0, 0x65, ZC3XX_R118_BGAIN}, /* 01,18,65,cc */ {} }; static const struct usb_action gc0303_50HZScale[] = { {0xaa, 0x82, 0x0000}, /* 00,82,00,aa */ {0xaa, 0x83, 0x0001}, /* 00,83,01,aa */ {0xaa, 0x84, 0x00aa}, /* 00,84,aa,aa */ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc, */ {0xa0, 0x06, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,0d,cc, */ {0xa0, 0xa8, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,50,cc, */ {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc, */ {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc, */ {0xa0, 0x8e, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,47,cc, */ {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE}, /* 01,8c,0e,cc, */ {0xa0, 0x15, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,15,cc, */ {0xa0, 0x10, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,10,cc, */ {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,24,cc, */ {0xa0, 0x62, ZC3XX_R01D_HSYNC_0}, /* 00,1d,62,cc, */ {0xa0, 0x90, ZC3XX_R01E_HSYNC_1}, /* 00,1e,90,cc, */ {0xa0, 0xc8, ZC3XX_R01F_HSYNC_2}, /* 00,1f,c8,cc, */ {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, /* 00,20,ff,cc, */ {0xa0, 0x58, ZC3XX_R11D_GLOBALGAIN}, /* 01,1d,58,cc, */ {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, /* 01,80,42,cc, */ {0xa0, 0x78, ZC3XX_R18D_YTARGET}, /* 01,8d,78,cc */ {} }; static const struct usb_action gc0303_50HZ[] = { {0xaa, 0x82, 0x0000}, /* 00,82,00,aa */ {0xaa, 0x83, 0x0003}, /* 00,83,03,aa */ {0xaa, 0x84, 0x0054}, /* 00,84,54,aa */ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc, */ {0xa0, 0x0d, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,0d,cc, */ {0xa0, 0x50, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,50,cc, */ {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc, */ {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc, */ {0xa0, 0x8e, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,8e,cc, */ {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE}, /* 01,8c,0e,cc, */ {0xa0, 0x15, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,15,cc, */ {0xa0, 0x10, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,10,cc, */ {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,24,cc, */ {0xa0, 0x62, ZC3XX_R01D_HSYNC_0}, /* 00,1d,62,cc, */ {0xa0, 0x90, ZC3XX_R01E_HSYNC_1}, /* 00,1e,90,cc, */ {0xa0, 0xc8, ZC3XX_R01F_HSYNC_2}, /* 00,1f,c8,cc, */ {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, /* 00,20,ff,cc, */ {0xa0, 0x58, ZC3XX_R11D_GLOBALGAIN}, /* 01,1d,58,cc, */ {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, /* 01,80,42,cc, */ {0xa0, 0x78, ZC3XX_R18D_YTARGET}, /* 01,8d,78,cc */ {} }; static const struct usb_action gc0303_60HZScale[] = { {0xaa, 0x82, 0x0000}, /* 00,82,00,aa */ {0xaa, 0x83, 0x0001}, /* 00,83,01,aa */ {0xaa, 0x84, 0x0062}, /* 00,84,62,aa */ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc, */ {0xa0, 0x05, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,05,cc, */ {0xa0, 0x88, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,88,cc, */ {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc, */ {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc, */ {0xa0, 0x3b, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,3b,cc, */ {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE}, /* 01,8c,0e,cc, */ {0xa0, 0x15, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,15,cc, */ {0xa0, 0x10, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,10,cc, */ {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,24,cc, */ {0xa0, 0x62, ZC3XX_R01D_HSYNC_0}, /* 00,1d,62,cc, */ {0xa0, 0x90, ZC3XX_R01E_HSYNC_1}, /* 00,1e,90,cc, */ {0xa0, 0xc8, ZC3XX_R01F_HSYNC_2}, /* 00,1f,c8,cc, */ {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, /* 00,20,ff,cc, */ {0xa0, 0x58, ZC3XX_R11D_GLOBALGAIN}, /* 01,1d,58,cc, */ {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, /* 01,80,42,cc, */ {0xa0, 0x78, ZC3XX_R18D_YTARGET}, /* 01,8d,78,cc */ {} }; static const struct usb_action gc0303_60HZ[] = { {0xaa, 0x82, 0x0000}, /* 00,82,00,aa */ {0xaa, 0x83, 0x0002}, /* 00,83,02,aa */ {0xaa, 0x84, 0x00c4}, /* 00,84,c4,aa */ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc, */ {0xa0, 0x0b, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,1,0b,cc, */ {0xa0, 0x10, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,2,10,cc, */ {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,5,00,cc, */ {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,6,00,cc, */ {0xa0, 0x76, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,7,76,cc, */ {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE}, /* 01,c,0e,cc, */ {0xa0, 0x15, ZC3XX_R18F_AEUNFREEZE}, /* 01,f,15,cc, */ {0xa0, 0x10, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,9,10,cc, */ {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,a,24,cc, */ {0xa0, 0x62, ZC3XX_R01D_HSYNC_0}, /* 00,d,62,cc, */ {0xa0, 0x90, ZC3XX_R01E_HSYNC_1}, /* 00,e,90,cc, */ {0xa0, 0xc8, ZC3XX_R01F_HSYNC_2}, /* 00,f,c8,cc, */ {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, /* 00,0,ff,cc, */ {0xa0, 0x58, ZC3XX_R11D_GLOBALGAIN}, /* 01,d,58,cc, */ {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, /* 01,80,42,cc, */ {0xa0, 0x78, ZC3XX_R18D_YTARGET}, /* 01,d,78,cc */ {} }; static const struct usb_action gc0303_NoFlikerScale[] = { {0xa0, 0x0c, ZC3XX_R100_OPERATIONMODE}, /* 01,00,0c,cc, */ {0xaa, 0x82, 0x0000}, /* 00,82,00,aa */ {0xaa, 0x83, 0x0000}, /* 00,83,00,aa */ {0xaa, 0x84, 0x0020}, /* 00,84,20,aa */ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,0,00,cc, */ {0xa0, 0x05, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,05,cc, */ {0xa0, 0x88, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,88,cc, */ {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc, */ {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc, */ {0xa0, 0x10, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,10,cc, */ {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE}, /* 01,8c,0e,cc, */ {0xa0, 0x15, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,15,cc, */ {0xa0, 0x62, ZC3XX_R01D_HSYNC_0}, /* 00,1d,62,cc, */ {0xa0, 0x90, ZC3XX_R01E_HSYNC_1}, /* 00,1e,90,cc, */ {0xa0, 0xc8, ZC3XX_R01F_HSYNC_2}, /* 00,1f,c8,cc, */ {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, /* 00,20,ff,cc, */ {0xa0, 0x58, ZC3XX_R11D_GLOBALGAIN}, /* 01,1d,58,cc, */ {0xa0, 0x03, ZC3XX_R180_AUTOCORRECTENABLE}, /* 01,80,03,cc */ {} }; static const struct usb_action gc0303_NoFliker[] = { {0xa0, 0x0c, ZC3XX_R100_OPERATIONMODE}, /* 01,00,0c,cc, */ {0xaa, 0x82, 0x0000}, /* 00,82,00,aa */ {0xaa, 0x83, 0x0000}, /* 00,83,00,aa */ {0xaa, 0x84, 0x0020}, /* 00,84,20,aa */ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc, */ {0xa0, 0x0b, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,0b,cc, */ {0xa0, 0x10, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,10,cc, */ {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc, */ {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc, */ {0xa0, 0x10, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,10,cc, */ {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE}, /* 01,8c,0e,cc, */ {0xa0, 0x15, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,15,cc, */ {0xa0, 0x62, ZC3XX_R01D_HSYNC_0}, /* 00,1d,62,cc, */ {0xa0, 0x90, ZC3XX_R01E_HSYNC_1}, /* 00,1e,90,cc, */ {0xa0, 0xc8, ZC3XX_R01F_HSYNC_2}, /* 00,1f,c8,cc, */ {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, /* 00,20,ff,cc, */ {0xa0, 0x58, ZC3XX_R11D_GLOBALGAIN}, /* 01,1d,58,cc, */ {0xa0, 0x03, ZC3XX_R180_AUTOCORRECTENABLE}, /* 01,80,03,cc */ {} }; static u8 reg_r_i(struct gspca_dev *gspca_dev, u16 index) { int ret; if (gspca_dev->usb_err < 0) return 0; ret = usb_control_msg(gspca_dev->dev, usb_rcvctrlpipe(gspca_dev->dev, 0), 0xa1, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x01, /* value */ index, gspca_dev->usb_buf, 1, 500); if (ret < 0) { err("reg_r_i err %d", ret); gspca_dev->usb_err = ret; return 0; } return gspca_dev->usb_buf[0]; } static u8 reg_r(struct gspca_dev *gspca_dev, u16 index) { u8 ret; ret = reg_r_i(gspca_dev, index); PDEBUG(D_USBI, "reg r [%04x] -> %02x", index, ret); return ret; } static void reg_w_i(struct gspca_dev *gspca_dev, u8 value, u16 index) { int ret; if (gspca_dev->usb_err < 0) return; ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0xa0, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, NULL, 0, 500); if (ret < 0) { err("reg_w_i err %d", ret); gspca_dev->usb_err = ret; } } static void reg_w(struct gspca_dev *gspca_dev, u8 value, u16 index) { PDEBUG(D_USBO, "reg w [%04x] = %02x", index, value); reg_w_i(gspca_dev, value, index); } static u16 i2c_read(struct gspca_dev *gspca_dev, u8 reg) { u8 retbyte; u16 retval; if (gspca_dev->usb_err < 0) return 0; reg_w_i(gspca_dev, reg, 0x0092); reg_w_i(gspca_dev, 0x02, 0x0090); /* <- read command */ msleep(20); retbyte = reg_r_i(gspca_dev, 0x0091); /* read status */ if (retbyte != 0x00) err("i2c_r status error %02x", retbyte); retval = reg_r_i(gspca_dev, 0x0095); /* read Lowbyte */ retval |= reg_r_i(gspca_dev, 0x0096) << 8; /* read Hightbyte */ PDEBUG(D_USBI, "i2c r [%02x] -> %04x (%02x)", reg, retval, retbyte); return retval; } static u8 i2c_write(struct gspca_dev *gspca_dev, u8 reg, u8 valL, u8 valH) { u8 retbyte; if (gspca_dev->usb_err < 0) return 0; reg_w_i(gspca_dev, reg, 0x92); reg_w_i(gspca_dev, valL, 0x93); reg_w_i(gspca_dev, valH, 0x94); reg_w_i(gspca_dev, 0x01, 0x90); /* <- write command */ msleep(1); retbyte = reg_r_i(gspca_dev, 0x0091); /* read status */ if (retbyte != 0x00) err("i2c_w status error %02x", retbyte); PDEBUG(D_USBO, "i2c w [%02x] = %02x%02x (%02x)", reg, valH, valL, retbyte); return retbyte; } static void usb_exchange(struct gspca_dev *gspca_dev, const struct usb_action *action) { while (action->req) { switch (action->req) { case 0xa0: /* write register */ reg_w(gspca_dev, action->val, action->idx); break; case 0xa1: /* read status */ reg_r(gspca_dev, action->idx); break; case 0xaa: i2c_write(gspca_dev, action->val, /* reg */ action->idx & 0xff, /* valL */ action->idx >> 8); /* valH */ break; case 0xbb: i2c_write(gspca_dev, action->idx >> 8, /* reg */ action->idx & 0xff, /* valL */ action->val); /* valH */ break; default: /* case 0xdd: * delay */ msleep(action->idx); break; } action++; msleep(1); } } static void setmatrix(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int i; const u8 *matrix; static const u8 adcm2700_matrix[9] = /* {0x66, 0xed, 0xed, 0xed, 0x66, 0xed, 0xed, 0xed, 0x66}; */ /*ms-win*/ {0x74, 0xed, 0xed, 0xed, 0x74, 0xed, 0xed, 0xed, 0x74}; static const u8 gc0305_matrix[9] = {0x50, 0xf8, 0xf8, 0xf8, 0x50, 0xf8, 0xf8, 0xf8, 0x50}; static const u8 ov7620_matrix[9] = {0x58, 0xf4, 0xf4, 0xf4, 0x58, 0xf4, 0xf4, 0xf4, 0x58}; static const u8 pas202b_matrix[9] = {0x4c, 0xf5, 0xff, 0xf9, 0x51, 0xf5, 0xfb, 0xed, 0x5f}; static const u8 po2030_matrix[9] = {0x60, 0xf0, 0xf0, 0xf0, 0x60, 0xf0, 0xf0, 0xf0, 0x60}; static const u8 tas5130c_matrix[9] = {0x68, 0xec, 0xec, 0xec, 0x68, 0xec, 0xec, 0xec, 0x68}; static const u8 gc0303_matrix[9] = {0x7b, 0xea, 0xea, 0xea, 0x7b, 0xea, 0xea, 0xea, 0x7b}; static const u8 *matrix_tb[SENSOR_MAX] = { [SENSOR_ADCM2700] = adcm2700_matrix, [SENSOR_CS2102] = ov7620_matrix, [SENSOR_CS2102K] = NULL, [SENSOR_GC0303] = gc0303_matrix, [SENSOR_GC0305] = gc0305_matrix, [SENSOR_HDCS2020] = NULL, [SENSOR_HV7131B] = NULL, [SENSOR_HV7131R] = po2030_matrix, [SENSOR_ICM105A] = po2030_matrix, [SENSOR_MC501CB] = NULL, [SENSOR_MT9V111_1] = gc0305_matrix, [SENSOR_MT9V111_3] = gc0305_matrix, [SENSOR_OV7620] = ov7620_matrix, [SENSOR_OV7630C] = NULL, [SENSOR_PAS106] = NULL, [SENSOR_PAS202B] = pas202b_matrix, [SENSOR_PB0330] = gc0305_matrix, [SENSOR_PO2030] = po2030_matrix, [SENSOR_TAS5130C] = tas5130c_matrix, }; matrix = matrix_tb[sd->sensor]; if (matrix == NULL) return; /* matrix already loaded */ for (i = 0; i < ARRAY_SIZE(ov7620_matrix); i++) reg_w(gspca_dev, matrix[i], 0x010a + i); } static void setsharpness(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int sharpness; static const u8 sharpness_tb[][2] = { {0x02, 0x03}, {0x04, 0x07}, {0x08, 0x0f}, {0x10, 0x1e} }; sharpness = sd->ctrls[SHARPNESS].val; reg_w(gspca_dev, sharpness_tb[sharpness][0], 0x01c6); reg_r(gspca_dev, 0x01c8); reg_r(gspca_dev, 0x01c9); reg_r(gspca_dev, 0x01ca); reg_w(gspca_dev, sharpness_tb[sharpness][1], 0x01cb); } static void setcontrast(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; const u8 *Tgamma; int g, i, brightness, contrast, adj, gp1, gp2; u8 gr[16]; static const u8 delta_b[16] = /* delta for brightness */ {0x50, 0x38, 0x2d, 0x28, 0x24, 0x21, 0x1e, 0x1d, 0x1d, 0x1b, 0x1b, 0x1b, 0x19, 0x18, 0x18, 0x18}; static const u8 delta_c[16] = /* delta for contrast */ {0x2c, 0x1a, 0x12, 0x0c, 0x0a, 0x06, 0x06, 0x06, 0x04, 0x06, 0x04, 0x04, 0x03, 0x03, 0x02, 0x02}; static const u8 gamma_tb[6][16] = { {0x00, 0x00, 0x03, 0x0d, 0x1b, 0x2e, 0x45, 0x5f, 0x79, 0x93, 0xab, 0xc1, 0xd4, 0xe5, 0xf3, 0xff}, {0x01, 0x0c, 0x1f, 0x3a, 0x53, 0x6d, 0x85, 0x9c, 0xb0, 0xc2, 0xd1, 0xde, 0xe9, 0xf2, 0xf9, 0xff}, {0x04, 0x16, 0x30, 0x4e, 0x68, 0x81, 0x98, 0xac, 0xbe, 0xcd, 0xda, 0xe4, 0xed, 0xf5, 0xfb, 0xff}, {0x13, 0x38, 0x59, 0x79, 0x92, 0xa7, 0xb9, 0xc8, 0xd4, 0xdf, 0xe7, 0xee, 0xf4, 0xf9, 0xfc, 0xff}, {0x20, 0x4b, 0x6e, 0x8d, 0xa3, 0xb5, 0xc5, 0xd2, 0xdc, 0xe5, 0xec, 0xf2, 0xf6, 0xfa, 0xfd, 0xff}, {0x24, 0x44, 0x64, 0x84, 0x9d, 0xb2, 0xc4, 0xd3, 0xe0, 0xeb, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff}, }; Tgamma = gamma_tb[sd->ctrls[GAMMA].val - 1]; contrast = ((int) sd->ctrls[CONTRAST].val - 128); /* -128 / 127 */ brightness = ((int) sd->ctrls[BRIGHTNESS].val - 128); /* -128 / 92 */ adj = 0; gp1 = gp2 = 0; for (i = 0; i < 16; i++) { g = Tgamma[i] + delta_b[i] * brightness / 256 - delta_c[i] * contrast / 256 - adj / 2; if (g > 0xff) g = 0xff; else if (g < 0) g = 0; reg_w(gspca_dev, g, 0x0120 + i); /* gamma */ if (contrast > 0) adj--; else if (contrast < 0) adj++; if (i > 1) gr[i - 1] = (g - gp2) / 2; else if (i != 0) gr[0] = gp1 == 0 ? 0 : (g - gp1); gp2 = gp1; gp1 = g; } gr[15] = (0xff - gp2) / 2; for (i = 0; i < 16; i++) reg_w(gspca_dev, gr[i], 0x0130 + i); /* gradient */ } static void getexposure(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; sd->ctrls[EXPOSURE].val = (i2c_read(gspca_dev, 0x25) << 9) | (i2c_read(gspca_dev, 0x26) << 1) | (i2c_read(gspca_dev, 0x27) >> 7); } static void setexposure(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int val; val = sd->ctrls[EXPOSURE].val; i2c_write(gspca_dev, 0x25, val >> 9, 0x00); i2c_write(gspca_dev, 0x26, val >> 1, 0x00); i2c_write(gspca_dev, 0x27, val << 7, 0x00); } static void setquality(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u8 frxt; switch (sd->sensor) { case SENSOR_ADCM2700: case SENSOR_GC0305: case SENSOR_HV7131B: case SENSOR_HV7131R: case SENSOR_OV7620: case SENSOR_PAS202B: case SENSOR_PO2030: return; } /*fixme: is it really 0008 0007 0018 for all other sensors? */ reg_w(gspca_dev, QUANT_VAL, 0x0008); frxt = 0x30; reg_w(gspca_dev, frxt, 0x0007); #if QUANT_VAL == 0 || QUANT_VAL == 1 || QUANT_VAL == 2 frxt = 0xff; #elif QUANT_VAL == 3 frxt = 0xf0; #elif QUANT_VAL == 4 frxt = 0xe0; #else frxt = 0x20; #endif reg_w(gspca_dev, frxt, 0x0018); } /* Matches the sensor's internal frame rate to the lighting frequency. * Valid frequencies are: * 50Hz, for European and Asian lighting (default) * 60Hz, for American lighting * 0 = No Fliker (for outdoore usage) */ static void setlightfreq(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int i, mode; const struct usb_action *zc3_freq; static const struct usb_action *freq_tb[SENSOR_MAX][6] = { [SENSOR_ADCM2700] = {adcm2700_NoFliker, adcm2700_NoFliker, adcm2700_50HZ, adcm2700_50HZ, adcm2700_60HZ, adcm2700_60HZ}, [SENSOR_CS2102] = {cs2102_NoFliker, cs2102_NoFlikerScale, cs2102_50HZ, cs2102_50HZScale, cs2102_60HZ, cs2102_60HZScale}, [SENSOR_CS2102K] = {cs2102_NoFliker, cs2102_NoFlikerScale, NULL, NULL, /* currently disabled */ NULL, NULL}, [SENSOR_GC0303] = {gc0303_NoFliker, gc0303_NoFlikerScale, gc0303_50HZ, gc0303_50HZScale, gc0303_60HZ, gc0303_60HZScale}, [SENSOR_GC0305] = {gc0305_NoFliker, gc0305_NoFliker, gc0305_50HZ, gc0305_50HZ, gc0305_60HZ, gc0305_60HZ}, [SENSOR_HDCS2020] = {hdcs2020_NoFliker, hdcs2020_NoFliker, hdcs2020_50HZ, hdcs2020_50HZ, hdcs2020_60HZ, hdcs2020_60HZ}, [SENSOR_HV7131B] = {hv7131b_NoFliker, hv7131b_NoFlikerScale, hv7131b_50HZ, hv7131b_50HZScale, hv7131b_60HZ, hv7131b_60HZScale}, [SENSOR_HV7131R] = {hv7131r_NoFliker, hv7131r_NoFlikerScale, hv7131r_50HZ, hv7131r_50HZScale, hv7131r_60HZ, hv7131r_60HZScale}, [SENSOR_ICM105A] = {icm105a_NoFliker, icm105a_NoFlikerScale, icm105a_50HZ, icm105a_50HZScale, icm105a_60HZ, icm105a_60HZScale}, [SENSOR_MC501CB] = {mc501cb_NoFliker, mc501cb_NoFlikerScale, mc501cb_50HZ, mc501cb_50HZScale, mc501cb_60HZ, mc501cb_60HZScale}, [SENSOR_MT9V111_1] = {mt9v111_1_AENoFliker, mt9v111_1_AENoFlikerScale, mt9v111_1_AE50HZ, mt9v111_1_AE50HZScale, mt9v111_1_AE60HZ, mt9v111_1_AE60HZScale}, [SENSOR_MT9V111_3] = {mt9v111_3_AENoFliker, mt9v111_3_AENoFlikerScale, mt9v111_3_AE50HZ, mt9v111_3_AE50HZScale, mt9v111_3_AE60HZ, mt9v111_3_AE60HZScale}, [SENSOR_OV7620] = {ov7620_NoFliker, ov7620_NoFliker, ov7620_50HZ, ov7620_50HZ, ov7620_60HZ, ov7620_60HZ}, [SENSOR_OV7630C] = {NULL, NULL, NULL, NULL, NULL, NULL}, [SENSOR_PAS106] = {pas106b_NoFliker, pas106b_NoFliker, pas106b_50HZ, pas106b_50HZ, pas106b_60HZ, pas106b_60HZ}, [SENSOR_PAS202B] = {pas202b_NoFliker, pas202b_NoFlikerScale, pas202b_50HZ, pas202b_50HZScale, pas202b_60HZ, pas202b_60HZScale}, [SENSOR_PB0330] = {pb0330_NoFliker, pb0330_NoFlikerScale, pb0330_50HZ, pb0330_50HZScale, pb0330_60HZ, pb0330_60HZScale}, [SENSOR_PO2030] = {po2030_NoFliker, po2030_NoFliker, po2030_50HZ, po2030_50HZ, po2030_60HZ, po2030_60HZ}, [SENSOR_TAS5130C] = {tas5130c_NoFliker, tas5130c_NoFlikerScale, tas5130c_50HZ, tas5130c_50HZScale, tas5130c_60HZ, tas5130c_60HZScale}, }; i = sd->ctrls[LIGHTFREQ].val * 2; mode = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv; if (mode) i++; /* 320x240 */ zc3_freq = freq_tb[sd->sensor][i]; if (zc3_freq == NULL) return; usb_exchange(gspca_dev, zc3_freq); switch (sd->sensor) { case SENSOR_GC0305: if (mode /* if 320x240 */ && sd->ctrls[LIGHTFREQ].val == 1) /* and 50Hz */ reg_w(gspca_dev, 0x85, 0x018d); /* win: 0x80, 0x018d */ break; case SENSOR_OV7620: if (!mode) { /* if 640x480 */ if (sd->ctrls[LIGHTFREQ].val != 0) /* and filter */ reg_w(gspca_dev, 0x40, 0x0002); else reg_w(gspca_dev, 0x44, 0x0002); } break; case SENSOR_PAS202B: reg_w(gspca_dev, 0x00, 0x01a7); break; } } static void setautogain(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u8 autoval; if (sd->ctrls[AUTOGAIN].val) autoval = 0x42; else autoval = 0x02; reg_w(gspca_dev, autoval, 0x0180); } static void send_unknown(struct gspca_dev *gspca_dev, int sensor) { reg_w(gspca_dev, 0x01, 0x0000); /* bridge reset */ switch (sensor) { case SENSOR_PAS106: reg_w(gspca_dev, 0x03, 0x003a); reg_w(gspca_dev, 0x0c, 0x003b); reg_w(gspca_dev, 0x08, 0x0038); break; case SENSOR_ADCM2700: case SENSOR_GC0305: case SENSOR_OV7620: case SENSOR_MT9V111_1: case SENSOR_MT9V111_3: case SENSOR_PB0330: case SENSOR_PO2030: reg_w(gspca_dev, 0x0d, 0x003a); reg_w(gspca_dev, 0x02, 0x003b); reg_w(gspca_dev, 0x00, 0x0038); break; case SENSOR_HV7131R: case SENSOR_PAS202B: reg_w(gspca_dev, 0x03, 0x003b); reg_w(gspca_dev, 0x0c, 0x003a); reg_w(gspca_dev, 0x0b, 0x0039); if (sensor == SENSOR_PAS202B) reg_w(gspca_dev, 0x0b, 0x0038); break; } } /* start probe 2 wires */ static void start_2wr_probe(struct gspca_dev *gspca_dev, int sensor) { reg_w(gspca_dev, 0x01, 0x0000); reg_w(gspca_dev, sensor, 0x0010); reg_w(gspca_dev, 0x01, 0x0001); reg_w(gspca_dev, 0x03, 0x0012); reg_w(gspca_dev, 0x01, 0x0012); /* msleep(2); */ } static int sif_probe(struct gspca_dev *gspca_dev) { u16 checkword; start_2wr_probe(gspca_dev, 0x0f); /* PAS106 */ reg_w(gspca_dev, 0x08, 0x008d); msleep(150); checkword = ((i2c_read(gspca_dev, 0x00) & 0x0f) << 4) | ((i2c_read(gspca_dev, 0x01) & 0xf0) >> 4); PDEBUG(D_PROBE, "probe sif 0x%04x", checkword); if (checkword == 0x0007) { send_unknown(gspca_dev, SENSOR_PAS106); return 0x0f; /* PAS106 */ } return -1; } static int vga_2wr_probe(struct gspca_dev *gspca_dev) { u16 retword; start_2wr_probe(gspca_dev, 0x00); /* HV7131B */ i2c_write(gspca_dev, 0x01, 0xaa, 0x00); retword = i2c_read(gspca_dev, 0x01); if (retword != 0) return 0x00; /* HV7131B */ start_2wr_probe(gspca_dev, 0x04); /* CS2102 */ i2c_write(gspca_dev, 0x01, 0xaa, 0x00); retword = i2c_read(gspca_dev, 0x01); if (retword != 0) return 0x04; /* CS2102 */ start_2wr_probe(gspca_dev, 0x06); /* OmniVision */ reg_w(gspca_dev, 0x08, 0x008d); i2c_write(gspca_dev, 0x11, 0xaa, 0x00); retword = i2c_read(gspca_dev, 0x11); if (retword != 0) { /* (should have returned 0xaa) --> Omnivision? */ /* reg_r 0x10 -> 0x06 --> */ goto ov_check; } start_2wr_probe(gspca_dev, 0x08); /* HDCS2020 */ i2c_write(gspca_dev, 0x1c, 0x00, 0x00); i2c_write(gspca_dev, 0x15, 0xaa, 0x00); retword = i2c_read(gspca_dev, 0x15); if (retword != 0) return 0x08; /* HDCS2020 */ start_2wr_probe(gspca_dev, 0x0a); /* PB0330 */ i2c_write(gspca_dev, 0x07, 0xaa, 0xaa); retword = i2c_read(gspca_dev, 0x07); if (retword != 0) return 0x0a; /* PB0330 */ retword = i2c_read(gspca_dev, 0x03); if (retword != 0) return 0x0a; /* PB0330 ?? */ retword = i2c_read(gspca_dev, 0x04); if (retword != 0) return 0x0a; /* PB0330 ?? */ start_2wr_probe(gspca_dev, 0x0c); /* ICM105A */ i2c_write(gspca_dev, 0x01, 0x11, 0x00); retword = i2c_read(gspca_dev, 0x01); if (retword != 0) return 0x0c; /* ICM105A */ start_2wr_probe(gspca_dev, 0x0e); /* PAS202BCB */ reg_w(gspca_dev, 0x08, 0x008d); i2c_write(gspca_dev, 0x03, 0xaa, 0x00); msleep(50); retword = i2c_read(gspca_dev, 0x03); if (retword != 0) { send_unknown(gspca_dev, SENSOR_PAS202B); return 0x0e; /* PAS202BCB */ } start_2wr_probe(gspca_dev, 0x02); /* TAS5130C */ i2c_write(gspca_dev, 0x01, 0xaa, 0x00); retword = i2c_read(gspca_dev, 0x01); if (retword != 0) return 0x02; /* TAS5130C */ ov_check: reg_r(gspca_dev, 0x0010); /* ?? */ reg_r(gspca_dev, 0x0010); reg_w(gspca_dev, 0x01, 0x0000); reg_w(gspca_dev, 0x01, 0x0001); reg_w(gspca_dev, 0x06, 0x0010); /* OmniVision */ reg_w(gspca_dev, 0xa1, 0x008b); reg_w(gspca_dev, 0x08, 0x008d); msleep(500); reg_w(gspca_dev, 0x01, 0x0012); i2c_write(gspca_dev, 0x12, 0x80, 0x00); /* sensor reset */ retword = i2c_read(gspca_dev, 0x0a) << 8; retword |= i2c_read(gspca_dev, 0x0b); PDEBUG(D_PROBE, "probe 2wr ov vga 0x%04x", retword); switch (retword) { case 0x7631: /* OV7630C */ reg_w(gspca_dev, 0x06, 0x0010); break; case 0x7620: /* OV7620 */ case 0x7648: /* OV7648 */ break; default: return -1; /* not OmniVision */ } return retword; } struct sensor_by_chipset_revision { u16 revision; u8 internal_sensor_id; }; static const struct sensor_by_chipset_revision chipset_revision_sensor[] = { {0xc000, 0x12}, /* TAS5130C */ {0xc001, 0x13}, /* MT9V111 */ {0xe001, 0x13}, {0x8001, 0x13}, {0x8000, 0x14}, /* CS2102K */ {0x8400, 0x15}, /* MT9V111 */ {0xe400, 0x15}, }; static int vga_3wr_probe(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int i; u16 retword; /*fixme: lack of 8b=b3 (11,12)-> 10, 8b=e0 (14,15,16)-> 12 found in gspcav1*/ reg_w(gspca_dev, 0x02, 0x0010); reg_r(gspca_dev, 0x0010); reg_w(gspca_dev, 0x01, 0x0000); reg_w(gspca_dev, 0x00, 0x0010); reg_w(gspca_dev, 0x01, 0x0001); reg_w(gspca_dev, 0x91, 0x008b); reg_w(gspca_dev, 0x03, 0x0012); reg_w(gspca_dev, 0x01, 0x0012); reg_w(gspca_dev, 0x05, 0x0012); retword = i2c_read(gspca_dev, 0x14); if (retword != 0) return 0x11; /* HV7131R */ retword = i2c_read(gspca_dev, 0x15); if (retword != 0) return 0x11; /* HV7131R */ retword = i2c_read(gspca_dev, 0x16); if (retword != 0) return 0x11; /* HV7131R */ reg_w(gspca_dev, 0x02, 0x0010); retword = reg_r(gspca_dev, 0x000b) << 8; retword |= reg_r(gspca_dev, 0x000a); PDEBUG(D_PROBE, "probe 3wr vga 1 0x%04x", retword); reg_r(gspca_dev, 0x0010); if ((retword & 0xff00) == 0x6400) return 0x02; /* TAS5130C */ for (i = 0; i < ARRAY_SIZE(chipset_revision_sensor); i++) { if (chipset_revision_sensor[i].revision == retword) { sd->chip_revision = retword; send_unknown(gspca_dev, SENSOR_PB0330); return chipset_revision_sensor[i].internal_sensor_id; } } reg_w(gspca_dev, 0x01, 0x0000); /* check PB0330 */ reg_w(gspca_dev, 0x01, 0x0001); reg_w(gspca_dev, 0xdd, 0x008b); reg_w(gspca_dev, 0x0a, 0x0010); reg_w(gspca_dev, 0x03, 0x0012); reg_w(gspca_dev, 0x01, 0x0012); retword = i2c_read(gspca_dev, 0x00); if (retword != 0) { PDEBUG(D_PROBE, "probe 3wr vga type 0a"); return 0x0a; /* PB0330 */ } /* probe gc0303 / gc0305 */ reg_w(gspca_dev, 0x01, 0x0000); reg_w(gspca_dev, 0x01, 0x0001); reg_w(gspca_dev, 0x98, 0x008b); reg_w(gspca_dev, 0x01, 0x0010); reg_w(gspca_dev, 0x03, 0x0012); msleep(2); reg_w(gspca_dev, 0x01, 0x0012); retword = i2c_read(gspca_dev, 0x00); if (retword != 0) { PDEBUG(D_PROBE, "probe 3wr vga type %02x", retword); if (retword == 0x0011) /* gc0303 */ return 0x0303; if (retword == 0x0029) /* gc0305 */ send_unknown(gspca_dev, SENSOR_GC0305); return retword; } reg_w(gspca_dev, 0x01, 0x0000); /* check OmniVision */ reg_w(gspca_dev, 0x01, 0x0001); reg_w(gspca_dev, 0xa1, 0x008b); reg_w(gspca_dev, 0x08, 0x008d); reg_w(gspca_dev, 0x06, 0x0010); reg_w(gspca_dev, 0x01, 0x0012); reg_w(gspca_dev, 0x05, 0x0012); if (i2c_read(gspca_dev, 0x1c) == 0x007f /* OV7610 - manufacturer ID */ && i2c_read(gspca_dev, 0x1d) == 0x00a2) { send_unknown(gspca_dev, SENSOR_OV7620); return 0x06; /* OmniVision confirm ? */ } reg_w(gspca_dev, 0x01, 0x0000); reg_w(gspca_dev, 0x00, 0x0002); reg_w(gspca_dev, 0x01, 0x0010); reg_w(gspca_dev, 0x01, 0x0001); reg_w(gspca_dev, 0xee, 0x008b); reg_w(gspca_dev, 0x03, 0x0012); reg_w(gspca_dev, 0x01, 0x0012); reg_w(gspca_dev, 0x05, 0x0012); retword = i2c_read(gspca_dev, 0x00) << 8; /* ID 0 */ retword |= i2c_read(gspca_dev, 0x01); /* ID 1 */ PDEBUG(D_PROBE, "probe 3wr vga 2 0x%04x", retword); if (retword == 0x2030) { #ifdef GSPCA_DEBUG u8 retbyte; retbyte = i2c_read(gspca_dev, 0x02); /* revision number */ PDEBUG(D_PROBE, "sensor PO2030 rev 0x%02x", retbyte); #endif send_unknown(gspca_dev, SENSOR_PO2030); return retword; } reg_w(gspca_dev, 0x01, 0x0000); reg_w(gspca_dev, 0x0a, 0x0010); reg_w(gspca_dev, 0xd3, 0x008b); reg_w(gspca_dev, 0x01, 0x0001); reg_w(gspca_dev, 0x03, 0x0012); reg_w(gspca_dev, 0x01, 0x0012); reg_w(gspca_dev, 0x05, 0x0012); reg_w(gspca_dev, 0xd3, 0x008b); retword = i2c_read(gspca_dev, 0x01); if (retword != 0) { PDEBUG(D_PROBE, "probe 3wr vga type 0a ? ret: %04x", retword); return 0x16; /* adcm2700 (6100/6200) */ } return -1; } static int zcxx_probeSensor(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int sensor; switch (sd->sensor) { case SENSOR_MC501CB: return -1; /* don't probe */ case SENSOR_GC0303: /* may probe but with no write in reg 0x0010 */ return -1; /* don't probe */ case SENSOR_PAS106: sensor = sif_probe(gspca_dev); if (sensor >= 0) return sensor; break; } sensor = vga_2wr_probe(gspca_dev); if (sensor >= 0) return sensor; return vga_3wr_probe(gspca_dev); } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; if (id->idProduct == 0x301b) sd->bridge = BRIDGE_ZC301; else sd->bridge = BRIDGE_ZC303; /* define some sensors from the vendor/product */ sd->sensor = id->driver_info; gspca_dev->cam.ctrls = sd->ctrls; sd->quality = QUALITY_DEF; /* if USB 1.1, let some bandwidth for the audio device */ if (gspca_dev->audio && gspca_dev->dev->speed < USB_SPEED_HIGH) gspca_dev->nbalt--; return 0; } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; struct cam *cam; int sensor; static const u8 gamma[SENSOR_MAX] = { [SENSOR_ADCM2700] = 4, [SENSOR_CS2102] = 4, [SENSOR_CS2102K] = 5, [SENSOR_GC0303] = 3, [SENSOR_GC0305] = 4, [SENSOR_HDCS2020] = 4, [SENSOR_HV7131B] = 4, [SENSOR_HV7131R] = 4, [SENSOR_ICM105A] = 4, [SENSOR_MC501CB] = 4, [SENSOR_MT9V111_1] = 4, [SENSOR_MT9V111_3] = 4, [SENSOR_OV7620] = 3, [SENSOR_OV7630C] = 4, [SENSOR_PAS106] = 4, [SENSOR_PAS202B] = 4, [SENSOR_PB0330] = 4, [SENSOR_PO2030] = 4, [SENSOR_TAS5130C] = 3, }; static const u8 mode_tb[SENSOR_MAX] = { [SENSOR_ADCM2700] = 2, [SENSOR_CS2102] = 1, [SENSOR_CS2102K] = 1, [SENSOR_GC0303] = 1, [SENSOR_GC0305] = 1, [SENSOR_HDCS2020] = 1, [SENSOR_HV7131B] = 1, [SENSOR_HV7131R] = 1, [SENSOR_ICM105A] = 1, [SENSOR_MC501CB] = 2, [SENSOR_MT9V111_1] = 1, [SENSOR_MT9V111_3] = 1, [SENSOR_OV7620] = 2, [SENSOR_OV7630C] = 1, [SENSOR_PAS106] = 0, [SENSOR_PAS202B] = 1, [SENSOR_PB0330] = 1, [SENSOR_PO2030] = 1, [SENSOR_TAS5130C] = 1, }; sensor = zcxx_probeSensor(gspca_dev); if (sensor >= 0) PDEBUG(D_PROBE, "probe sensor -> %04x", sensor); if ((unsigned) force_sensor < SENSOR_MAX) { sd->sensor = force_sensor; PDEBUG(D_PROBE, "sensor forced to %d", force_sensor); } else { switch (sensor) { case -1: switch (sd->sensor) { case SENSOR_MC501CB: PDEBUG(D_PROBE, "Sensor MC501CB"); break; case SENSOR_GC0303: PDEBUG(D_PROBE, "Sensor GC0303"); break; default: warn("Unknown sensor - set to TAS5130C"); sd->sensor = SENSOR_TAS5130C; } break; case 0: /* check the sensor type */ sensor = i2c_read(gspca_dev, 0x00); PDEBUG(D_PROBE, "Sensor hv7131 type %d", sensor); switch (sensor) { case 0: /* hv7131b */ case 1: /* hv7131e */ PDEBUG(D_PROBE, "Find Sensor HV7131B"); sd->sensor = SENSOR_HV7131B; break; default: /* case 2: * hv7131r */ PDEBUG(D_PROBE, "Find Sensor HV7131R"); sd->sensor = SENSOR_HV7131R; break; } break; case 0x02: PDEBUG(D_PROBE, "Sensor TAS5130C"); sd->sensor = SENSOR_TAS5130C; break; case 0x04: PDEBUG(D_PROBE, "Find Sensor CS2102"); sd->sensor = SENSOR_CS2102; break; case 0x08: PDEBUG(D_PROBE, "Find Sensor HDCS2020"); sd->sensor = SENSOR_HDCS2020; break; case 0x0a: PDEBUG(D_PROBE, "Find Sensor PB0330. Chip revision %x", sd->chip_revision); sd->sensor = SENSOR_PB0330; break; case 0x0c: PDEBUG(D_PROBE, "Find Sensor ICM105A"); sd->sensor = SENSOR_ICM105A; break; case 0x0e: PDEBUG(D_PROBE, "Find Sensor PAS202B"); sd->sensor = SENSOR_PAS202B; /* sd->sharpness = 1; */ break; case 0x0f: PDEBUG(D_PROBE, "Find Sensor PAS106"); sd->sensor = SENSOR_PAS106; break; case 0x10: case 0x12: PDEBUG(D_PROBE, "Find Sensor TAS5130C"); sd->sensor = SENSOR_TAS5130C; break; case 0x11: PDEBUG(D_PROBE, "Find Sensor HV7131R"); sd->sensor = SENSOR_HV7131R; break; case 0x13: case 0x15: PDEBUG(D_PROBE, "Sensor MT9V111. Chip revision %04x", sd->chip_revision); sd->sensor = sd->bridge == BRIDGE_ZC301 ? SENSOR_MT9V111_1 : SENSOR_MT9V111_3; break; case 0x14: PDEBUG(D_PROBE, "Find Sensor CS2102K?. Chip revision %x", sd->chip_revision); sd->sensor = SENSOR_CS2102K; break; case 0x16: PDEBUG(D_PROBE, "Find Sensor ADCM2700"); sd->sensor = SENSOR_ADCM2700; break; case 0x29: PDEBUG(D_PROBE, "Find Sensor GC0305"); sd->sensor = SENSOR_GC0305; break; case 0x0303: PDEBUG(D_PROBE, "Sensor GC0303"); sd->sensor = SENSOR_GC0303; break; case 0x2030: PDEBUG(D_PROBE, "Find Sensor PO2030"); sd->sensor = SENSOR_PO2030; sd->ctrls[SHARPNESS].def = 0; /* from win traces */ break; case 0x7620: PDEBUG(D_PROBE, "Find Sensor OV7620"); sd->sensor = SENSOR_OV7620; break; case 0x7631: PDEBUG(D_PROBE, "Find Sensor OV7630C"); sd->sensor = SENSOR_OV7630C; break; case 0x7648: PDEBUG(D_PROBE, "Find Sensor OV7648"); sd->sensor = SENSOR_OV7620; /* same sensor (?) */ break; default: err("Unknown sensor %04x", sensor); return -EINVAL; } } if (sensor < 0x20) { if (sensor == -1 || sensor == 0x10 || sensor == 0x12) reg_w(gspca_dev, 0x02, 0x0010); reg_r(gspca_dev, 0x0010); } cam = &gspca_dev->cam; switch (mode_tb[sd->sensor]) { case 0: cam->cam_mode = sif_mode; cam->nmodes = ARRAY_SIZE(sif_mode); break; case 1: cam->cam_mode = vga_mode; cam->nmodes = ARRAY_SIZE(vga_mode); break; default: /* case 2: */ cam->cam_mode = broken_vga_mode; cam->nmodes = ARRAY_SIZE(broken_vga_mode); break; } sd->ctrls[GAMMA].def = gamma[sd->sensor]; switch (sd->sensor) { case SENSOR_HV7131R: break; case SENSOR_OV7630C: gspca_dev->ctrl_dis = (1 << LIGHTFREQ) | (1 << EXPOSURE); break; default: gspca_dev->ctrl_dis = (1 << EXPOSURE); break; } #if AUTOGAIN_DEF if (sd->ctrls[AUTOGAIN].val) gspca_dev->ctrl_inac = (1 << EXPOSURE); #endif /* switch off the led */ reg_w(gspca_dev, 0x01, 0x0000); return gspca_dev->usb_err; } static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int mode; static const struct usb_action *init_tb[SENSOR_MAX][2] = { [SENSOR_ADCM2700] = {adcm2700_Initial, adcm2700_InitialScale}, [SENSOR_CS2102] = {cs2102_Initial, cs2102_InitialScale}, [SENSOR_CS2102K] = {cs2102K_Initial, cs2102K_InitialScale}, [SENSOR_GC0303] = {gc0303_Initial, gc0303_InitialScale}, [SENSOR_GC0305] = {gc0305_Initial, gc0305_InitialScale}, [SENSOR_HDCS2020] = {hdcs2020_Initial, hdcs2020_InitialScale}, [SENSOR_HV7131B] = {hv7131b_Initial, hv7131b_InitialScale}, [SENSOR_HV7131R] = {hv7131r_Initial, hv7131r_InitialScale}, [SENSOR_ICM105A] = {icm105a_Initial, icm105a_InitialScale}, [SENSOR_MC501CB] = {mc501cb_Initial, mc501cb_InitialScale}, [SENSOR_MT9V111_1] = {mt9v111_1_Initial, mt9v111_1_InitialScale}, [SENSOR_MT9V111_3] = {mt9v111_3_Initial, mt9v111_3_InitialScale}, [SENSOR_OV7620] = {ov7620_Initial, ov7620_InitialScale}, [SENSOR_OV7630C] = {ov7630c_Initial, ov7630c_InitialScale}, [SENSOR_PAS106] = {pas106b_Initial, pas106b_InitialScale}, [SENSOR_PAS202B] = {pas202b_Initial, pas202b_InitialScale}, [SENSOR_PB0330] = {pb0330_Initial, pb0330_InitialScale}, [SENSOR_PO2030] = {po2030_Initial, po2030_InitialScale}, [SENSOR_TAS5130C] = {tas5130c_Initial, tas5130c_InitialScale}, }; /* create the JPEG header */ jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, 0x21); /* JPEG 422 */ jpeg_set_qual(sd->jpeg_hdr, sd->quality); mode = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv; switch (sd->sensor) { case SENSOR_HV7131R: zcxx_probeSensor(gspca_dev); break; case SENSOR_PAS106: usb_exchange(gspca_dev, pas106b_Initial_com); break; } usb_exchange(gspca_dev, init_tb[sd->sensor][mode]); switch (sd->sensor) { case SENSOR_ADCM2700: case SENSOR_GC0305: case SENSOR_OV7620: case SENSOR_PO2030: case SENSOR_TAS5130C: case SENSOR_GC0303: /* msleep(100); * ?? */ reg_r(gspca_dev, 0x0002); /* --> 0x40 */ reg_w(gspca_dev, 0x09, 0x01ad); /* (from win traces) */ reg_w(gspca_dev, 0x15, 0x01ae); if (sd->sensor == SENSOR_TAS5130C) break; reg_w(gspca_dev, 0x0d, 0x003a); reg_w(gspca_dev, 0x02, 0x003b); reg_w(gspca_dev, 0x00, 0x0038); break; case SENSOR_HV7131R: case SENSOR_PAS202B: reg_w(gspca_dev, 0x03, 0x003b); reg_w(gspca_dev, 0x0c, 0x003a); reg_w(gspca_dev, 0x0b, 0x0039); if (sd->sensor == SENSOR_HV7131R) reg_w(gspca_dev, 0x50, ZC3XX_R11D_GLOBALGAIN); break; } setmatrix(gspca_dev); switch (sd->sensor) { case SENSOR_ADCM2700: case SENSOR_OV7620: reg_r(gspca_dev, 0x0008); reg_w(gspca_dev, 0x00, 0x0008); break; case SENSOR_PAS202B: case SENSOR_GC0305: case SENSOR_HV7131R: case SENSOR_TAS5130C: reg_r(gspca_dev, 0x0008); /* fall thru */ case SENSOR_PO2030: reg_w(gspca_dev, 0x03, 0x0008); break; } setsharpness(gspca_dev); /* set the gamma tables when not set */ switch (sd->sensor) { case SENSOR_CS2102K: /* gamma set in xxx_Initial */ case SENSOR_HDCS2020: case SENSOR_OV7630C: break; default: setcontrast(gspca_dev); break; } setmatrix(gspca_dev); /* one more time? */ switch (sd->sensor) { case SENSOR_OV7620: case SENSOR_PAS202B: reg_r(gspca_dev, 0x0180); /* from win */ reg_w(gspca_dev, 0x00, 0x0180); break; default: setquality(gspca_dev); break; } setlightfreq(gspca_dev); switch (sd->sensor) { case SENSOR_ADCM2700: reg_w(gspca_dev, 0x09, 0x01ad); /* (from win traces) */ reg_w(gspca_dev, 0x15, 0x01ae); reg_w(gspca_dev, 0x02, 0x0180); /* ms-win + */ reg_w(gspca_dev, 0x40, 0x0117); break; case SENSOR_HV7131R: if (!sd->ctrls[AUTOGAIN].val) setexposure(gspca_dev); reg_w(gspca_dev, 0x00, ZC3XX_R1A7_CALCGLOBALMEAN); break; case SENSOR_GC0305: case SENSOR_TAS5130C: reg_w(gspca_dev, 0x09, 0x01ad); /* (from win traces) */ reg_w(gspca_dev, 0x15, 0x01ae); /* fall thru */ case SENSOR_PAS202B: case SENSOR_PO2030: /* reg_w(gspca_dev, 0x40, ZC3XX_R117_GGAIN); in win traces */ reg_r(gspca_dev, 0x0180); break; case SENSOR_OV7620: reg_w(gspca_dev, 0x09, 0x01ad); reg_w(gspca_dev, 0x15, 0x01ae); i2c_read(gspca_dev, 0x13); /*fixme: returns 0xa3 */ i2c_write(gspca_dev, 0x13, 0xa3, 0x00); /*fixme: returned value to send? */ reg_w(gspca_dev, 0x40, 0x0117); reg_r(gspca_dev, 0x0180); break; } setautogain(gspca_dev); switch (sd->sensor) { case SENSOR_PO2030: msleep(50); reg_w(gspca_dev, 0x00, 0x0007); /* (from win traces) */ reg_w(gspca_dev, 0x02, ZC3XX_R008_CLOCKSETTING); break; } return gspca_dev->usb_err; } /* called on streamoff with alt 0 and on disconnect */ static void sd_stop0(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (!gspca_dev->present) return; send_unknown(gspca_dev, sd->sensor); } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, int len) { struct sd *sd = (struct sd *) gspca_dev; /* check the JPEG end of frame */ if (len >= 3 && data[len - 3] == 0xff && data[len - 2] == 0xd9) { /*fixme: what does the last byte mean?*/ gspca_frame_add(gspca_dev, LAST_PACKET, data, len - 1); return; } /* check the JPEG start of a frame */ if (data[0] == 0xff && data[1] == 0xd8) { /* put the JPEG header in the new frame */ gspca_frame_add(gspca_dev, FIRST_PACKET, sd->jpeg_hdr, JPEG_HDR_SZ); /* remove the webcam's header: * ff d8 ff fe 00 0e 00 00 ss ss 00 01 ww ww hh hh pp pp * - 'ss ss' is the frame sequence number (BE) * - 'ww ww' and 'hh hh' are the window dimensions (BE) * - 'pp pp' is the packet sequence number (BE) */ data += 18; len -= 18; } gspca_frame_add(gspca_dev, INTER_PACKET, data, len); } static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->ctrls[AUTOGAIN].val = val; if (val) { gspca_dev->ctrl_inac |= (1 << EXPOSURE); } else { gspca_dev->ctrl_inac &= ~(1 << EXPOSURE); if (gspca_dev->streaming) getexposure(gspca_dev); } if (gspca_dev->streaming) setautogain(gspca_dev); return gspca_dev->usb_err; } static int sd_querymenu(struct gspca_dev *gspca_dev, struct v4l2_querymenu *menu) { switch (menu->id) { case V4L2_CID_POWER_LINE_FREQUENCY: switch (menu->index) { case 0: /* V4L2_CID_POWER_LINE_FREQUENCY_DISABLED */ strcpy((char *) menu->name, "NoFliker"); return 0; case 1: /* V4L2_CID_POWER_LINE_FREQUENCY_50HZ */ strcpy((char *) menu->name, "50 Hz"); return 0; case 2: /* V4L2_CID_POWER_LINE_FREQUENCY_60HZ */ strcpy((char *) menu->name, "60 Hz"); return 0; } break; } return -EINVAL; } static int sd_set_jcomp(struct gspca_dev *gspca_dev, struct v4l2_jpegcompression *jcomp) { struct sd *sd = (struct sd *) gspca_dev; if (jcomp->quality < QUALITY_MIN) sd->quality = QUALITY_MIN; else if (jcomp->quality > QUALITY_MAX) sd->quality = QUALITY_MAX; else sd->quality = jcomp->quality; if (gspca_dev->streaming) jpeg_set_qual(sd->jpeg_hdr, sd->quality); return gspca_dev->usb_err; } static int sd_get_jcomp(struct gspca_dev *gspca_dev, struct v4l2_jpegcompression *jcomp) { struct sd *sd = (struct sd *) gspca_dev; memset(jcomp, 0, sizeof *jcomp); jcomp->quality = sd->quality; jcomp->jpeg_markers = V4L2_JPEG_MARKER_DHT | V4L2_JPEG_MARKER_DQT; return 0; } #if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE) static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* interrupt packet data */ int len) /* interrput packet length */ { if (len == 8 && data[4] == 1) { input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1); input_sync(gspca_dev->input_dev); input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0); input_sync(gspca_dev->input_dev); } return 0; } #endif static const struct sd_desc sd_desc = { .name = MODULE_NAME, .ctrls = sd_ctrls, .nctrls = ARRAY_SIZE(sd_ctrls), .config = sd_config, .init = sd_init, .start = sd_start, .stop0 = sd_stop0, .pkt_scan = sd_pkt_scan, .querymenu = sd_querymenu, .get_jcomp = sd_get_jcomp, .set_jcomp = sd_set_jcomp, #if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE) .int_pkt_scan = sd_int_pkt_scan, #endif }; static const struct usb_device_id device_table[] = { {USB_DEVICE(0x041e, 0x041e)}, {USB_DEVICE(0x041e, 0x4017)}, {USB_DEVICE(0x041e, 0x401c), .driver_info = SENSOR_PAS106}, {USB_DEVICE(0x041e, 0x401e)}, {USB_DEVICE(0x041e, 0x401f)}, {USB_DEVICE(0x041e, 0x4022)}, {USB_DEVICE(0x041e, 0x4029)}, {USB_DEVICE(0x041e, 0x4034), .driver_info = SENSOR_PAS106}, {USB_DEVICE(0x041e, 0x4035), .driver_info = SENSOR_PAS106}, {USB_DEVICE(0x041e, 0x4036)}, {USB_DEVICE(0x041e, 0x403a)}, {USB_DEVICE(0x041e, 0x4051), .driver_info = SENSOR_GC0303}, {USB_DEVICE(0x041e, 0x4053), .driver_info = SENSOR_GC0303}, {USB_DEVICE(0x0458, 0x7007)}, {USB_DEVICE(0x0458, 0x700c)}, {USB_DEVICE(0x0458, 0x700f)}, {USB_DEVICE(0x0461, 0x0a00)}, {USB_DEVICE(0x046d, 0x089d), .driver_info = SENSOR_MC501CB}, {USB_DEVICE(0x046d, 0x08a0)}, {USB_DEVICE(0x046d, 0x08a1)}, {USB_DEVICE(0x046d, 0x08a2)}, {USB_DEVICE(0x046d, 0x08a3)}, {USB_DEVICE(0x046d, 0x08a6)}, {USB_DEVICE(0x046d, 0x08a7)}, {USB_DEVICE(0x046d, 0x08a9)}, {USB_DEVICE(0x046d, 0x08aa)}, {USB_DEVICE(0x046d, 0x08ac)}, {USB_DEVICE(0x046d, 0x08ad)}, {USB_DEVICE(0x046d, 0x08ae)}, {USB_DEVICE(0x046d, 0x08af)}, {USB_DEVICE(0x046d, 0x08b9)}, {USB_DEVICE(0x046d, 0x08d7)}, {USB_DEVICE(0x046d, 0x08d8)}, {USB_DEVICE(0x046d, 0x08d9)}, {USB_DEVICE(0x046d, 0x08da)}, {USB_DEVICE(0x046d, 0x08dd), .driver_info = SENSOR_MC501CB}, {USB_DEVICE(0x0471, 0x0325), .driver_info = SENSOR_PAS106}, {USB_DEVICE(0x0471, 0x0326), .driver_info = SENSOR_PAS106}, {USB_DEVICE(0x0471, 0x032d), .driver_info = SENSOR_PAS106}, {USB_DEVICE(0x0471, 0x032e), .driver_info = SENSOR_PAS106}, {USB_DEVICE(0x055f, 0xc005)}, {USB_DEVICE(0x055f, 0xd003)}, {USB_DEVICE(0x055f, 0xd004)}, {USB_DEVICE(0x0698, 0x2003)}, {USB_DEVICE(0x0ac8, 0x0301), .driver_info = SENSOR_PAS106}, {USB_DEVICE(0x0ac8, 0x0302), .driver_info = SENSOR_PAS106}, {USB_DEVICE(0x0ac8, 0x301b)}, {USB_DEVICE(0x0ac8, 0x303b)}, {USB_DEVICE(0x0ac8, 0x305b)}, {USB_DEVICE(0x0ac8, 0x307b)}, {USB_DEVICE(0x10fd, 0x0128)}, {USB_DEVICE(0x10fd, 0x804d)}, {USB_DEVICE(0x10fd, 0x8050)}, {} /* end of entry */ }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } /* USB driver */ static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, #endif }; static int __init sd_mod_init(void) { return usb_register(&sd_driver); } static void __exit sd_mod_exit(void) { usb_deregister(&sd_driver); } module_init(sd_mod_init); module_exit(sd_mod_exit); module_param(force_sensor, int, 0644); MODULE_PARM_DESC(force_sensor, "Force sensor. Only for experts!!!");
gpl-2.0
OnePlusOSS/android_kernel_oneplus_msm8994
scripts/pnmtologo.c
2769
12198
/* * Convert a logo in ASCII PNM format to C source suitable for inclusion in * the Linux kernel * * (C) Copyright 2001-2003 by Geert Uytterhoeven <geert@linux-m68k.org> * * -------------------------------------------------------------------------- * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of the Linux * distribution for more details. */ #include <ctype.h> #include <errno.h> #include <stdarg.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> static const char *programname; static const char *filename; static const char *logoname = "linux_logo"; static const char *outputname; static FILE *out; #define LINUX_LOGO_MONO 1 /* monochrome black/white */ #define LINUX_LOGO_VGA16 2 /* 16 colors VGA text palette */ #define LINUX_LOGO_CLUT224 3 /* 224 colors */ #define LINUX_LOGO_GRAY256 4 /* 256 levels grayscale */ static const char *logo_types[LINUX_LOGO_GRAY256+1] = { [LINUX_LOGO_MONO] = "LINUX_LOGO_MONO", [LINUX_LOGO_VGA16] = "LINUX_LOGO_VGA16", [LINUX_LOGO_CLUT224] = "LINUX_LOGO_CLUT224", [LINUX_LOGO_GRAY256] = "LINUX_LOGO_GRAY256" }; #define MAX_LINUX_LOGO_COLORS 224 struct color { unsigned char red; unsigned char green; unsigned char blue; }; static const struct color clut_vga16[16] = { { 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0xaa }, { 0x00, 0xaa, 0x00 }, { 0x00, 0xaa, 0xaa }, { 0xaa, 0x00, 0x00 }, { 0xaa, 0x00, 0xaa }, { 0xaa, 0x55, 0x00 }, { 0xaa, 0xaa, 0xaa }, { 0x55, 0x55, 0x55 }, { 0x55, 0x55, 0xff }, { 0x55, 0xff, 0x55 }, { 0x55, 0xff, 0xff }, { 0xff, 0x55, 0x55 }, { 0xff, 0x55, 0xff }, { 0xff, 0xff, 0x55 }, { 0xff, 0xff, 0xff }, }; static int logo_type = LINUX_LOGO_CLUT224; static unsigned int logo_width; static unsigned int logo_height; static struct color **logo_data; static struct color logo_clut[MAX_LINUX_LOGO_COLORS]; static unsigned int logo_clutsize; static int is_plain_pbm = 0; static void die(const char *fmt, ...) __attribute__ ((noreturn)) __attribute ((format (printf, 1, 2))); static void usage(void) __attribute ((noreturn)); static unsigned int get_number(FILE *fp) { int c, val; /* Skip leading whitespace */ do { c = fgetc(fp); if (c == EOF) die("%s: end of file\n", filename); if (c == '#') { /* Ignore comments 'till end of line */ do { c = fgetc(fp); if (c == EOF) die("%s: end of file\n", filename); } while (c != '\n'); } } while (isspace(c)); /* Parse decimal number */ val = 0; while (isdigit(c)) { val = 10*val+c-'0'; /* some PBM are 'broken'; GiMP for example exports a PBM without space * between the digits. This is Ok cause we know a PBM can only have a '1' * or a '0' for the digit. */ if (is_plain_pbm) break; c = fgetc(fp); if (c == EOF) die("%s: end of file\n", filename); } return val; } static unsigned int get_number255(FILE *fp, unsigned int maxval) { unsigned int val = get_number(fp); return (255*val+maxval/2)/maxval; } static void read_image(void) { FILE *fp; unsigned int i, j; int magic; unsigned int maxval; /* open image file */ fp = fopen(filename, "r"); if (!fp) die("Cannot open file %s: %s\n", filename, strerror(errno)); /* check file type and read file header */ magic = fgetc(fp); if (magic != 'P') die("%s is not a PNM file\n", filename); magic = fgetc(fp); switch (magic) { case '1': case '2': case '3': /* Plain PBM/PGM/PPM */ break; case '4': case '5': case '6': /* Binary PBM/PGM/PPM */ die("%s: Binary PNM is not supported\n" "Use pnmnoraw(1) to convert it to ASCII PNM\n", filename); default: die("%s is not a PNM file\n", filename); } logo_width = get_number(fp); logo_height = get_number(fp); /* allocate image data */ logo_data = (struct color **)malloc(logo_height*sizeof(struct color *)); if (!logo_data) die("%s\n", strerror(errno)); for (i = 0; i < logo_height; i++) { logo_data[i] = malloc(logo_width*sizeof(struct color)); if (!logo_data[i]) die("%s\n", strerror(errno)); } /* read image data */ switch (magic) { case '1': /* Plain PBM */ is_plain_pbm = 1; for (i = 0; i < logo_height; i++) for (j = 0; j < logo_width; j++) logo_data[i][j].red = logo_data[i][j].green = logo_data[i][j].blue = 255*(1-get_number(fp)); break; case '2': /* Plain PGM */ maxval = get_number(fp); for (i = 0; i < logo_height; i++) for (j = 0; j < logo_width; j++) logo_data[i][j].red = logo_data[i][j].green = logo_data[i][j].blue = get_number255(fp, maxval); break; case '3': /* Plain PPM */ maxval = get_number(fp); for (i = 0; i < logo_height; i++) for (j = 0; j < logo_width; j++) { logo_data[i][j].red = get_number255(fp, maxval); logo_data[i][j].green = get_number255(fp, maxval); logo_data[i][j].blue = get_number255(fp, maxval); } break; } /* close file */ fclose(fp); } static inline int is_black(struct color c) { return c.red == 0 && c.green == 0 && c.blue == 0; } static inline int is_white(struct color c) { return c.red == 255 && c.green == 255 && c.blue == 255; } static inline int is_gray(struct color c) { return c.red == c.green && c.red == c.blue; } static inline int is_equal(struct color c1, struct color c2) { return c1.red == c2.red && c1.green == c2.green && c1.blue == c2.blue; } static void write_header(void) { /* open logo file */ if (outputname) { out = fopen(outputname, "w"); if (!out) die("Cannot create file %s: %s\n", outputname, strerror(errno)); } else { out = stdout; } fputs("/*\n", out); fputs(" * DO NOT EDIT THIS FILE!\n", out); fputs(" *\n", out); fprintf(out, " * It was automatically generated from %s\n", filename); fputs(" *\n", out); fprintf(out, " * Linux logo %s\n", logoname); fputs(" */\n\n", out); fputs("#include <linux/linux_logo.h>\n\n", out); fprintf(out, "static unsigned char %s_data[] __initdata = {\n", logoname); } static void write_footer(void) { fputs("\n};\n\n", out); fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname); fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]); fprintf(out, "\t.width\t\t= %d,\n", logo_width); fprintf(out, "\t.height\t\t= %d,\n", logo_height); if (logo_type == LINUX_LOGO_CLUT224) { fprintf(out, "\t.clutsize\t= %d,\n", logo_clutsize); fprintf(out, "\t.clut\t\t= %s_clut,\n", logoname); } fprintf(out, "\t.data\t\t= %s_data\n", logoname); fputs("};\n\n", out); /* close logo file */ if (outputname) fclose(out); } static int write_hex_cnt; static void write_hex(unsigned char byte) { if (write_hex_cnt % 12) fprintf(out, ", 0x%02x", byte); else if (write_hex_cnt) fprintf(out, ",\n\t0x%02x", byte); else fprintf(out, "\t0x%02x", byte); write_hex_cnt++; } static void write_logo_mono(void) { unsigned int i, j; unsigned char val, bit; /* validate image */ for (i = 0; i < logo_height; i++) for (j = 0; j < logo_width; j++) if (!is_black(logo_data[i][j]) && !is_white(logo_data[i][j])) die("Image must be monochrome\n"); /* write file header */ write_header(); /* write logo data */ for (i = 0; i < logo_height; i++) { for (j = 0; j < logo_width;) { for (val = 0, bit = 0x80; bit && j < logo_width; j++, bit >>= 1) if (logo_data[i][j].red) val |= bit; write_hex(val); } } /* write logo structure and file footer */ write_footer(); } static void write_logo_vga16(void) { unsigned int i, j, k; unsigned char val; /* validate image */ for (i = 0; i < logo_height; i++) for (j = 0; j < logo_width; j++) { for (k = 0; k < 16; k++) if (is_equal(logo_data[i][j], clut_vga16[k])) break; if (k == 16) die("Image must use the 16 console colors only\n" "Use ppmquant(1) -map clut_vga16.ppm to reduce the number " "of colors\n"); } /* write file header */ write_header(); /* write logo data */ for (i = 0; i < logo_height; i++) for (j = 0; j < logo_width; j++) { for (k = 0; k < 16; k++) if (is_equal(logo_data[i][j], clut_vga16[k])) break; val = k<<4; if (++j < logo_width) { for (k = 0; k < 16; k++) if (is_equal(logo_data[i][j], clut_vga16[k])) break; val |= k; } write_hex(val); } /* write logo structure and file footer */ write_footer(); } static void write_logo_clut224(void) { unsigned int i, j, k; /* validate image */ for (i = 0; i < logo_height; i++) for (j = 0; j < logo_width; j++) { for (k = 0; k < logo_clutsize; k++) if (is_equal(logo_data[i][j], logo_clut[k])) break; if (k == logo_clutsize) { if (logo_clutsize == MAX_LINUX_LOGO_COLORS) die("Image has more than %d colors\n" "Use ppmquant(1) to reduce the number of colors\n", MAX_LINUX_LOGO_COLORS); logo_clut[logo_clutsize++] = logo_data[i][j]; } } /* write file header */ write_header(); /* write logo data */ for (i = 0; i < logo_height; i++) for (j = 0; j < logo_width; j++) { for (k = 0; k < logo_clutsize; k++) if (is_equal(logo_data[i][j], logo_clut[k])) break; write_hex(k+32); } fputs("\n};\n\n", out); /* write logo clut */ fprintf(out, "static unsigned char %s_clut[] __initdata = {\n", logoname); write_hex_cnt = 0; for (i = 0; i < logo_clutsize; i++) { write_hex(logo_clut[i].red); write_hex(logo_clut[i].green); write_hex(logo_clut[i].blue); } /* write logo structure and file footer */ write_footer(); } static void write_logo_gray256(void) { unsigned int i, j; /* validate image */ for (i = 0; i < logo_height; i++) for (j = 0; j < logo_width; j++) if (!is_gray(logo_data[i][j])) die("Image must be grayscale\n"); /* write file header */ write_header(); /* write logo data */ for (i = 0; i < logo_height; i++) for (j = 0; j < logo_width; j++) write_hex(logo_data[i][j].red); /* write logo structure and file footer */ write_footer(); } static void die(const char *fmt, ...) { va_list ap; va_start(ap, fmt); vfprintf(stderr, fmt, ap); va_end(ap); exit(1); } static void usage(void) { die("\n" "Usage: %s [options] <filename>\n" "\n" "Valid options:\n" " -h : display this usage information\n" " -n <name> : specify logo name (default: linux_logo)\n" " -o <output> : output to file <output> instead of stdout\n" " -t <type> : specify logo type, one of\n" " mono : monochrome black/white\n" " vga16 : 16 colors VGA text palette\n" " clut224 : 224 colors (default)\n" " gray256 : 256 levels grayscale\n" "\n", programname); } int main(int argc, char *argv[]) { int opt; programname = argv[0]; opterr = 0; while (1) { opt = getopt(argc, argv, "hn:o:t:"); if (opt == -1) break; switch (opt) { case 'h': usage(); break; case 'n': logoname = optarg; break; case 'o': outputname = optarg; break; case 't': if (!strcmp(optarg, "mono")) logo_type = LINUX_LOGO_MONO; else if (!strcmp(optarg, "vga16")) logo_type = LINUX_LOGO_VGA16; else if (!strcmp(optarg, "clut224")) logo_type = LINUX_LOGO_CLUT224; else if (!strcmp(optarg, "gray256")) logo_type = LINUX_LOGO_GRAY256; else usage(); break; default: usage(); break; } } if (optind != argc-1) usage(); filename = argv[optind]; read_image(); switch (logo_type) { case LINUX_LOGO_MONO: write_logo_mono(); break; case LINUX_LOGO_VGA16: write_logo_vga16(); break; case LINUX_LOGO_CLUT224: write_logo_clut224(); break; case LINUX_LOGO_GRAY256: write_logo_gray256(); break; } exit(0); }
gpl-2.0
roalex/sgs3-kernel
drivers/edac/i82875p_edac.c
3025
14886
/* * Intel D82875P Memory Controller kernel module * (C) 2003 Linux Networx (http://lnxi.com) * This file may be distributed under the terms of the * GNU General Public License. * * Written by Thayne Harbaugh * Contributors: * Wang Zhenyu at intel.com * * $Id: edac_i82875p.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $ * * Note: E7210 appears same as D82875P - zhenyu.z.wang at intel.com */ #include <linux/module.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/edac.h> #include "edac_core.h" #define I82875P_REVISION " Ver: 2.0.2" #define EDAC_MOD_STR "i82875p_edac" #define i82875p_printk(level, fmt, arg...) \ edac_printk(level, "i82875p", fmt, ##arg) #define i82875p_mc_printk(mci, level, fmt, arg...) \ edac_mc_chipset_printk(mci, level, "i82875p", fmt, ##arg) #ifndef PCI_DEVICE_ID_INTEL_82875_0 #define PCI_DEVICE_ID_INTEL_82875_0 0x2578 #endif /* PCI_DEVICE_ID_INTEL_82875_0 */ #ifndef PCI_DEVICE_ID_INTEL_82875_6 #define PCI_DEVICE_ID_INTEL_82875_6 0x257e #endif /* PCI_DEVICE_ID_INTEL_82875_6 */ /* four csrows in dual channel, eight in single channel */ #define I82875P_NR_CSROWS(nr_chans) (8/(nr_chans)) /* Intel 82875p register addresses - device 0 function 0 - DRAM Controller */ #define I82875P_EAP 0x58 /* Error Address Pointer (32b) * * 31:12 block address * 11:0 reserved */ #define I82875P_DERRSYN 0x5c /* DRAM Error Syndrome (8b) * * 7:0 DRAM ECC Syndrome */ #define I82875P_DES 0x5d /* DRAM Error Status (8b) * * 7:1 reserved * 0 Error channel 0/1 */ #define I82875P_ERRSTS 0xc8 /* Error Status Register (16b) * * 15:10 reserved * 9 non-DRAM lock error (ndlock) * 8 Sftwr Generated SMI * 7 ECC UE * 6 reserved * 5 MCH detects unimplemented cycle * 4 AGP access outside GA * 3 Invalid AGP access * 2 Invalid GA translation table * 1 Unsupported AGP command * 0 ECC CE */ #define I82875P_ERRCMD 0xca /* Error Command (16b) * * 15:10 reserved * 9 SERR on non-DRAM lock * 8 SERR on ECC UE * 7 SERR on ECC CE * 6 target abort on high exception * 5 detect unimplemented cyc * 4 AGP access outside of GA * 3 SERR on invalid AGP access * 2 invalid translation table * 1 SERR on unsupported AGP command * 0 reserved */ /* Intel 82875p register addresses - device 6 function 0 - DRAM Controller */ #define I82875P_PCICMD6 0x04 /* PCI Command Register (16b) * * 15:10 reserved * 9 fast back-to-back - ro 0 * 8 SERR enable - ro 0 * 7 addr/data stepping - ro 0 * 6 parity err enable - ro 0 * 5 VGA palette snoop - ro 0 * 4 mem wr & invalidate - ro 0 * 3 special cycle - ro 0 * 2 bus master - ro 0 * 1 mem access dev6 - 0(dis),1(en) * 0 IO access dev3 - 0(dis),1(en) */ #define I82875P_BAR6 0x10 /* Mem Delays Base ADDR Reg (32b) * * 31:12 mem base addr [31:12] * 11:4 address mask - ro 0 * 3 prefetchable - ro 0(non),1(pre) * 2:1 mem type - ro 0 * 0 mem space - ro 0 */ /* Intel 82875p MMIO register space - device 0 function 0 - MMR space */ #define I82875P_DRB_SHIFT 26 /* 64MiB grain */ #define I82875P_DRB 0x00 /* DRAM Row Boundary (8b x 8) * * 7 reserved * 6:0 64MiB row boundary addr */ #define I82875P_DRA 0x10 /* DRAM Row Attribute (4b x 8) * * 7 reserved * 6:4 row attr row 1 * 3 reserved * 2:0 row attr row 0 * * 000 = 4KiB * 001 = 8KiB * 010 = 16KiB * 011 = 32KiB */ #define I82875P_DRC 0x68 /* DRAM Controller Mode (32b) * * 31:30 reserved * 29 init complete * 28:23 reserved * 22:21 nr chan 00=1,01=2 * 20 reserved * 19:18 Data Integ Mode 00=none,01=ecc * 17:11 reserved * 10:8 refresh mode * 7 reserved * 6:4 mode select * 3:2 reserved * 1:0 DRAM type 01=DDR */ enum i82875p_chips { I82875P = 0, }; struct i82875p_pvt { struct pci_dev *ovrfl_pdev; void __iomem *ovrfl_window; }; struct i82875p_dev_info { const char *ctl_name; }; struct i82875p_error_info { u16 errsts; u32 eap; u8 des; u8 derrsyn; u16 errsts2; }; static const struct i82875p_dev_info i82875p_devs[] = { [I82875P] = { .ctl_name = "i82875p"}, }; static struct pci_dev *mci_pdev; /* init dev: in case that AGP code has * already registered driver */ static struct edac_pci_ctl_info *i82875p_pci; static void i82875p_get_error_info(struct mem_ctl_info *mci, struct i82875p_error_info *info) { struct pci_dev *pdev; pdev = to_pci_dev(mci->dev); /* * This is a mess because there is no atomic way to read all the * registers at once and the registers can transition from CE being * overwritten by UE. */ pci_read_config_word(pdev, I82875P_ERRSTS, &info->errsts); if (!(info->errsts & 0x0081)) return; pci_read_config_dword(pdev, I82875P_EAP, &info->eap); pci_read_config_byte(pdev, I82875P_DES, &info->des); pci_read_config_byte(pdev, I82875P_DERRSYN, &info->derrsyn); pci_read_config_word(pdev, I82875P_ERRSTS, &info->errsts2); /* * If the error is the same then we can for both reads then * the first set of reads is valid. If there is a change then * there is a CE no info and the second set of reads is valid * and should be UE info. */ if ((info->errsts ^ info->errsts2) & 0x0081) { pci_read_config_dword(pdev, I82875P_EAP, &info->eap); pci_read_config_byte(pdev, I82875P_DES, &info->des); pci_read_config_byte(pdev, I82875P_DERRSYN, &info->derrsyn); } pci_write_bits16(pdev, I82875P_ERRSTS, 0x0081, 0x0081); } static int i82875p_process_error_info(struct mem_ctl_info *mci, struct i82875p_error_info *info, int handle_errors) { int row, multi_chan; multi_chan = mci->csrows[0].nr_channels - 1; if (!(info->errsts & 0x0081)) return 0; if (!handle_errors) return 1; if ((info->errsts ^ info->errsts2) & 0x0081) { edac_mc_handle_ce_no_info(mci, "UE overwrote CE"); info->errsts = info->errsts2; } info->eap >>= PAGE_SHIFT; row = edac_mc_find_csrow_by_page(mci, info->eap); if (info->errsts & 0x0080) edac_mc_handle_ue(mci, info->eap, 0, row, "i82875p UE"); else edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row, multi_chan ? (info->des & 0x1) : 0, "i82875p CE"); return 1; } static void i82875p_check(struct mem_ctl_info *mci) { struct i82875p_error_info info; debugf1("MC%d: %s()\n", mci->mc_idx, __func__); i82875p_get_error_info(mci, &info); i82875p_process_error_info(mci, &info, 1); } /* Return 0 on success or 1 on failure. */ static int i82875p_setup_overfl_dev(struct pci_dev *pdev, struct pci_dev **ovrfl_pdev, void __iomem **ovrfl_window) { struct pci_dev *dev; void __iomem *window; int err; *ovrfl_pdev = NULL; *ovrfl_window = NULL; dev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL); if (dev == NULL) { /* Intel tells BIOS developers to hide device 6 which * configures the overflow device access containing * the DRBs - this is where we expose device 6. * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm */ pci_write_bits8(pdev, 0xf4, 0x2, 0x2); dev = pci_scan_single_device(pdev->bus, PCI_DEVFN(6, 0)); if (dev == NULL) return 1; err = pci_bus_add_device(dev); if (err) { i82875p_printk(KERN_ERR, "%s(): pci_bus_add_device() Failed\n", __func__); } pci_bus_assign_resources(dev->bus); } *ovrfl_pdev = dev; if (pci_enable_device(dev)) { i82875p_printk(KERN_ERR, "%s(): Failed to enable overflow " "device\n", __func__); return 1; } if (pci_request_regions(dev, pci_name(dev))) { #ifdef CORRECT_BIOS goto fail0; #endif } /* cache is irrelevant for PCI bus reads/writes */ window = pci_ioremap_bar(dev, 0); if (window == NULL) { i82875p_printk(KERN_ERR, "%s(): Failed to ioremap bar6\n", __func__); goto fail1; } *ovrfl_window = window; return 0; fail1: pci_release_regions(dev); #ifdef CORRECT_BIOS fail0: pci_disable_device(dev); #endif /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */ return 1; } /* Return 1 if dual channel mode is active. Else return 0. */ static inline int dual_channel_active(u32 drc) { return (drc >> 21) & 0x1; } static void i82875p_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, void __iomem * ovrfl_window, u32 drc) { struct csrow_info *csrow; unsigned long last_cumul_size; u8 value; u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ u32 cumul_size; int index; drc_ddim = (drc >> 18) & 0x1; last_cumul_size = 0; /* The dram row boundary (DRB) reg values are boundary address * for each DRAM row with a granularity of 32 or 64MB (single/dual * channel operation). DRB regs are cumulative; therefore DRB7 will * contain the total memory contained in all eight rows. */ for (index = 0; index < mci->nr_csrows; index++) { csrow = &mci->csrows[index]; value = readb(ovrfl_window + I82875P_DRB + index); cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT); debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, cumul_size); if (cumul_size == last_cumul_size) continue; /* not populated */ csrow->first_page = last_cumul_size; csrow->last_page = cumul_size - 1; csrow->nr_pages = cumul_size - last_cumul_size; last_cumul_size = cumul_size; csrow->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */ csrow->mtype = MEM_DDR; csrow->dtype = DEV_UNKNOWN; csrow->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE; } } static int i82875p_probe1(struct pci_dev *pdev, int dev_idx) { int rc = -ENODEV; struct mem_ctl_info *mci; struct i82875p_pvt *pvt; struct pci_dev *ovrfl_pdev; void __iomem *ovrfl_window; u32 drc; u32 nr_chans; struct i82875p_error_info discard; debugf0("%s()\n", __func__); ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL); if (i82875p_setup_overfl_dev(pdev, &ovrfl_pdev, &ovrfl_window)) return -ENODEV; drc = readl(ovrfl_window + I82875P_DRC); nr_chans = dual_channel_active(drc) + 1; mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans), nr_chans, 0); if (!mci) { rc = -ENOMEM; goto fail0; } /* Keeps mci available after edac_mc_del_mc() till edac_mc_free() */ kobject_get(&mci->edac_mci_kobj); debugf3("%s(): init mci\n", __func__); mci->dev = &pdev->dev; mci->mtype_cap = MEM_FLAG_DDR; mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; mci->edac_cap = EDAC_FLAG_UNKNOWN; mci->mod_name = EDAC_MOD_STR; mci->mod_ver = I82875P_REVISION; mci->ctl_name = i82875p_devs[dev_idx].ctl_name; mci->dev_name = pci_name(pdev); mci->edac_check = i82875p_check; mci->ctl_page_to_phys = NULL; debugf3("%s(): init pvt\n", __func__); pvt = (struct i82875p_pvt *)mci->pvt_info; pvt->ovrfl_pdev = ovrfl_pdev; pvt->ovrfl_window = ovrfl_window; i82875p_init_csrows(mci, pdev, ovrfl_window, drc); i82875p_get_error_info(mci, &discard); /* clear counters */ /* Here we assume that we will never see multiple instances of this * type of memory controller. The ID is therefore hardcoded to 0. */ if (edac_mc_add_mc(mci)) { debugf3("%s(): failed edac_mc_add_mc()\n", __func__); goto fail1; } /* allocating generic PCI control info */ i82875p_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR); if (!i82875p_pci) { printk(KERN_WARNING "%s(): Unable to create PCI control\n", __func__); printk(KERN_WARNING "%s(): PCI error report via EDAC not setup\n", __func__); } /* get this far and it's successful */ debugf3("%s(): success\n", __func__); return 0; fail1: kobject_put(&mci->edac_mci_kobj); edac_mc_free(mci); fail0: iounmap(ovrfl_window); pci_release_regions(ovrfl_pdev); pci_disable_device(ovrfl_pdev); /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */ return rc; } /* returns count (>= 0), or negative on error */ static int __devinit i82875p_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int rc; debugf0("%s()\n", __func__); i82875p_printk(KERN_INFO, "i82875p init one\n"); if (pci_enable_device(pdev) < 0) return -EIO; rc = i82875p_probe1(pdev, ent->driver_data); if (mci_pdev == NULL) mci_pdev = pci_dev_get(pdev); return rc; } static void __devexit i82875p_remove_one(struct pci_dev *pdev) { struct mem_ctl_info *mci; struct i82875p_pvt *pvt = NULL; debugf0("%s()\n", __func__); if (i82875p_pci) edac_pci_release_generic_ctl(i82875p_pci); if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL) return; pvt = (struct i82875p_pvt *)mci->pvt_info; if (pvt->ovrfl_window) iounmap(pvt->ovrfl_window); if (pvt->ovrfl_pdev) { #ifdef CORRECT_BIOS pci_release_regions(pvt->ovrfl_pdev); #endif /*CORRECT_BIOS */ pci_disable_device(pvt->ovrfl_pdev); pci_dev_put(pvt->ovrfl_pdev); } edac_mc_free(mci); } static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = { { PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, I82875P}, { 0, } /* 0 terminated list. */ }; MODULE_DEVICE_TABLE(pci, i82875p_pci_tbl); static struct pci_driver i82875p_driver = { .name = EDAC_MOD_STR, .probe = i82875p_init_one, .remove = __devexit_p(i82875p_remove_one), .id_table = i82875p_pci_tbl, }; static int __init i82875p_init(void) { int pci_rc; debugf3("%s()\n", __func__); /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); pci_rc = pci_register_driver(&i82875p_driver); if (pci_rc < 0) goto fail0; if (mci_pdev == NULL) { mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_0, NULL); if (!mci_pdev) { debugf0("875p pci_get_device fail\n"); pci_rc = -ENODEV; goto fail1; } pci_rc = i82875p_init_one(mci_pdev, i82875p_pci_tbl); if (pci_rc < 0) { debugf0("875p init fail\n"); pci_rc = -ENODEV; goto fail1; } } return 0; fail1: pci_unregister_driver(&i82875p_driver); fail0: if (mci_pdev != NULL) pci_dev_put(mci_pdev); return pci_rc; } static void __exit i82875p_exit(void) { debugf3("%s()\n", __func__); i82875p_remove_one(mci_pdev); pci_dev_put(mci_pdev); pci_unregister_driver(&i82875p_driver); } module_init(i82875p_init); module_exit(i82875p_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh"); MODULE_DESCRIPTION("MC support for Intel 82875 memory hub controllers"); module_param(edac_op_state, int, 0444); MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
gpl-2.0
csolg/hi35xx-buildroot
linux/linux-3.0.y/drivers/scsi/gdth.c
3281
178873
/************************************************************************ * Linux driver for * * ICP vortex GmbH: GDT ISA/EISA/PCI Disk Array Controllers * * Intel Corporation: Storage RAID Controllers * * * * gdth.c * * Copyright (C) 1995-06 ICP vortex GmbH, Achim Leubner * * Copyright (C) 2002-04 Intel Corporation * * Copyright (C) 2003-06 Adaptec Inc. * * <achim_leubner@adaptec.com> * * * * Additions/Fixes: * * Boji Tony Kannanthanam <boji.t.kannanthanam@intel.com> * * Johannes Dinner <johannes_dinner@adaptec.com> * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published * * by the Free Software Foundation; either version 2 of the License, * * or (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this kernel; if not, write to the Free Software * * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * * * Linux kernel 2.6.x supported * * * ************************************************************************/ /* All GDT Disk Array Controllers are fully supported by this driver. * This includes the PCI/EISA/ISA SCSI Disk Array Controllers and the * PCI Fibre Channel Disk Array Controllers. See gdth.h for a complete * list of all controller types. * * If you have one or more GDT3000/3020 EISA controllers with * controller BIOS disabled, you have to set the IRQ values with the * command line option "gdth=irq1,irq2,...", where the irq1,irq2,... are * the IRQ values for the EISA controllers. * * After the optional list of IRQ values, other possible * command line options are: * disable:Y disable driver * disable:N enable driver * reserve_mode:0 reserve no drives for the raw service * reserve_mode:1 reserve all not init., removable drives * reserve_mode:2 reserve all not init. drives * reserve_list:h,b,t,l,h,b,t,l,... reserve particular drive(s) with * h- controller no., b- channel no., * t- target ID, l- LUN * reverse_scan:Y reverse scan order for PCI controllers * reverse_scan:N scan PCI controllers like BIOS * max_ids:x x - target ID count per channel (1..MAXID) * rescan:Y rescan all channels/IDs * rescan:N use all devices found until now * hdr_channel:x x - number of virtual bus for host drives * shared_access:Y disable driver reserve/release protocol to * access a shared resource from several nodes, * appropriate controller firmware required * shared_access:N enable driver reserve/release protocol * probe_eisa_isa:Y scan for EISA/ISA controllers * probe_eisa_isa:N do not scan for EISA/ISA controllers * force_dma32:Y use only 32 bit DMA mode * force_dma32:N use 64 bit DMA mode, if supported * * The default values are: "gdth=disable:N,reserve_mode:1,reverse_scan:N, * max_ids:127,rescan:N,hdr_channel:0, * shared_access:Y,probe_eisa_isa:N,force_dma32:N". * Here is another example: "gdth=reserve_list:0,1,2,0,0,1,3,0,rescan:Y". * * When loading the gdth driver as a module, the same options are available. * You can set the IRQs with "IRQ=...". However, the syntax to specify the * options changes slightly. You must replace all ',' between options * with ' ' and all ':' with '=' and you must use * '1' in place of 'Y' and '0' in place of 'N'. * * Default: "modprobe gdth disable=0 reserve_mode=1 reverse_scan=0 * max_ids=127 rescan=0 hdr_channel=0 shared_access=0 * probe_eisa_isa=0 force_dma32=0" * The other example: "modprobe gdth reserve_list=0,1,2,0,0,1,3,0 rescan=1". */ /* The meaning of the Scsi_Pointer members in this driver is as follows: * ptr: Chaining * this_residual: unused * buffer: unused * dma_handle: unused * buffers_residual: unused * Status: unused * Message: unused * have_data_in: unused * sent_command: unused * phase: unused */ /* interrupt coalescing */ /* #define INT_COAL */ /* statistics */ #define GDTH_STATISTICS #include <linux/module.h> #include <linux/version.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/string.h> #include <linux/ctype.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/in.h> #include <linux/proc_fs.h> #include <linux/time.h> #include <linux/timer.h> #include <linux/dma-mapping.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/slab.h> #ifdef GDTH_RTC #include <linux/mc146818rtc.h> #endif #include <linux/reboot.h> #include <asm/dma.h> #include <asm/system.h> #include <asm/io.h> #include <asm/uaccess.h> #include <linux/spinlock.h> #include <linux/blkdev.h> #include <linux/scatterlist.h> #include "scsi.h" #include <scsi/scsi_host.h> #include "gdth.h" static DEFINE_MUTEX(gdth_mutex); static void gdth_delay(int milliseconds); static void gdth_eval_mapping(u32 size, u32 *cyls, int *heads, int *secs); static irqreturn_t gdth_interrupt(int irq, void *dev_id); static irqreturn_t __gdth_interrupt(gdth_ha_str *ha, int gdth_from_wait, int* pIndex); static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index, Scsi_Cmnd *scp); static int gdth_async_event(gdth_ha_str *ha); static void gdth_log_event(gdth_evt_data *dvr, char *buffer); static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 priority); static void gdth_next(gdth_ha_str *ha); static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 b); static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp); static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source, u16 idx, gdth_evt_data *evt); static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr); static void gdth_readapp_event(gdth_ha_str *ha, u8 application, gdth_evt_str *estr); static void gdth_clear_events(void); static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp, char *buffer, u16 count); static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp); static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u16 hdrive); static void gdth_enable_int(gdth_ha_str *ha); static int gdth_test_busy(gdth_ha_str *ha); static int gdth_get_cmd_index(gdth_ha_str *ha); static void gdth_release_event(gdth_ha_str *ha); static int gdth_wait(gdth_ha_str *ha, int index,u32 time); static int gdth_internal_cmd(gdth_ha_str *ha, u8 service, u16 opcode, u32 p1, u64 p2,u64 p3); static int gdth_search_drives(gdth_ha_str *ha); static int gdth_analyse_hdrive(gdth_ha_str *ha, u16 hdrive); static const char *gdth_ctr_name(gdth_ha_str *ha); static int gdth_open(struct inode *inode, struct file *filep); static int gdth_close(struct inode *inode, struct file *filep); static long gdth_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); static void gdth_flush(gdth_ha_str *ha); static int gdth_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd); static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp, struct gdth_cmndinfo *cmndinfo); static void gdth_scsi_done(struct scsi_cmnd *scp); #ifdef DEBUG_GDTH static u8 DebugState = DEBUG_GDTH; #ifdef __SERIAL__ #define MAX_SERBUF 160 static void ser_init(void); static void ser_puts(char *str); static void ser_putc(char c); static int ser_printk(const char *fmt, ...); static char strbuf[MAX_SERBUF+1]; #ifdef __COM2__ #define COM_BASE 0x2f8 #else #define COM_BASE 0x3f8 #endif static void ser_init() { unsigned port=COM_BASE; outb(0x80,port+3); outb(0,port+1); /* 19200 Baud, if 9600: outb(12,port) */ outb(6, port); outb(3,port+3); outb(0,port+1); /* ser_putc('I'); ser_putc(' '); */ } static void ser_puts(char *str) { char *ptr; ser_init(); for (ptr=str;*ptr;++ptr) ser_putc(*ptr); } static void ser_putc(char c) { unsigned port=COM_BASE; while ((inb(port+5) & 0x20)==0); outb(c,port); if (c==0x0a) { while ((inb(port+5) & 0x20)==0); outb(0x0d,port); } } static int ser_printk(const char *fmt, ...) { va_list args; int i; va_start(args,fmt); i = vsprintf(strbuf,fmt,args); ser_puts(strbuf); va_end(args); return i; } #define TRACE(a) {if (DebugState==1) {ser_printk a;}} #define TRACE2(a) {if (DebugState==1 || DebugState==2) {ser_printk a;}} #define TRACE3(a) {if (DebugState!=0) {ser_printk a;}} #else /* !__SERIAL__ */ #define TRACE(a) {if (DebugState==1) {printk a;}} #define TRACE2(a) {if (DebugState==1 || DebugState==2) {printk a;}} #define TRACE3(a) {if (DebugState!=0) {printk a;}} #endif #else /* !DEBUG */ #define TRACE(a) #define TRACE2(a) #define TRACE3(a) #endif #ifdef GDTH_STATISTICS static u32 max_rq=0, max_index=0, max_sg=0; #ifdef INT_COAL static u32 max_int_coal=0; #endif static u32 act_ints=0, act_ios=0, act_stats=0, act_rq=0; static struct timer_list gdth_timer; #endif #define PTR2USHORT(a) (u16)(unsigned long)(a) #define GDTOFFSOF(a,b) (size_t)&(((a*)0)->b) #define INDEX_OK(i,t) ((i)<ARRAY_SIZE(t)) #define BUS_L2P(a,b) ((b)>(a)->virt_bus ? (b-1):(b)) #ifdef CONFIG_ISA static u8 gdth_drq_tab[4] = {5,6,7,7}; /* DRQ table */ #endif #if defined(CONFIG_EISA) || defined(CONFIG_ISA) static u8 gdth_irq_tab[6] = {0,10,11,12,14,0}; /* IRQ table */ #endif static u8 gdth_polling; /* polling if TRUE */ static int gdth_ctr_count = 0; /* controller count */ static LIST_HEAD(gdth_instances); /* controller list */ static u8 gdth_write_through = FALSE; /* write through */ static gdth_evt_str ebuffer[MAX_EVENTS]; /* event buffer */ static int elastidx; static int eoldidx; static int major; #define DIN 1 /* IN data direction */ #define DOU 2 /* OUT data direction */ #define DNO DIN /* no data transfer */ #define DUN DIN /* unknown data direction */ static u8 gdth_direction_tab[0x100] = { DNO,DNO,DIN,DIN,DOU,DIN,DIN,DOU,DIN,DUN,DOU,DOU,DUN,DUN,DUN,DIN, DNO,DIN,DIN,DOU,DIN,DOU,DNO,DNO,DOU,DNO,DIN,DNO,DIN,DOU,DNO,DUN, DIN,DUN,DIN,DUN,DOU,DIN,DUN,DUN,DIN,DIN,DOU,DNO,DUN,DIN,DOU,DOU, DOU,DOU,DOU,DNO,DIN,DNO,DNO,DIN,DOU,DOU,DOU,DOU,DIN,DOU,DIN,DOU, DOU,DOU,DIN,DIN,DIN,DNO,DUN,DNO,DNO,DNO,DUN,DNO,DOU,DIN,DUN,DUN, DUN,DUN,DUN,DUN,DUN,DOU,DUN,DUN,DUN,DUN,DIN,DUN,DUN,DUN,DUN,DUN, DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN, DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN, DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DIN,DUN,DOU,DUN,DUN,DUN,DUN,DUN, DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DIN,DUN, DUN,DUN,DUN,DUN,DUN,DNO,DNO,DUN,DIN,DNO,DOU,DUN,DNO,DUN,DOU,DOU, DOU,DOU,DOU,DNO,DUN,DIN,DOU,DIN,DIN,DUN,DUN,DUN,DUN,DUN,DUN,DUN, DUN,DUN,DOU,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN, DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN, DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DOU,DUN,DUN,DUN,DUN,DUN, DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN }; /* LILO and modprobe/insmod parameters */ /* IRQ list for GDT3000/3020 EISA controllers */ static int irq[MAXHA] __initdata = {0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff}; /* disable driver flag */ static int disable __initdata = 0; /* reserve flag */ static int reserve_mode = 1; /* reserve list */ static int reserve_list[MAX_RES_ARGS] = {0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff}; /* scan order for PCI controllers */ static int reverse_scan = 0; /* virtual channel for the host drives */ static int hdr_channel = 0; /* max. IDs per channel */ static int max_ids = MAXID; /* rescan all IDs */ static int rescan = 0; /* shared access */ static int shared_access = 1; /* enable support for EISA and ISA controllers */ static int probe_eisa_isa = 0; /* 64 bit DMA mode, support for drives > 2 TB, if force_dma32 = 0 */ static int force_dma32 = 0; /* parameters for modprobe/insmod */ module_param_array(irq, int, NULL, 0); module_param(disable, int, 0); module_param(reserve_mode, int, 0); module_param_array(reserve_list, int, NULL, 0); module_param(reverse_scan, int, 0); module_param(hdr_channel, int, 0); module_param(max_ids, int, 0); module_param(rescan, int, 0); module_param(shared_access, int, 0); module_param(probe_eisa_isa, int, 0); module_param(force_dma32, int, 0); MODULE_AUTHOR("Achim Leubner"); MODULE_LICENSE("GPL"); /* ioctl interface */ static const struct file_operations gdth_fops = { .unlocked_ioctl = gdth_unlocked_ioctl, .open = gdth_open, .release = gdth_close, .llseek = noop_llseek, }; #include "gdth_proc.h" #include "gdth_proc.c" static gdth_ha_str *gdth_find_ha(int hanum) { gdth_ha_str *ha; list_for_each_entry(ha, &gdth_instances, list) if (hanum == ha->hanum) return ha; return NULL; } static struct gdth_cmndinfo *gdth_get_cmndinfo(gdth_ha_str *ha) { struct gdth_cmndinfo *priv = NULL; unsigned long flags; int i; spin_lock_irqsave(&ha->smp_lock, flags); for (i=0; i<GDTH_MAXCMDS; ++i) { if (ha->cmndinfo[i].index == 0) { priv = &ha->cmndinfo[i]; memset(priv, 0, sizeof(*priv)); priv->index = i+1; break; } } spin_unlock_irqrestore(&ha->smp_lock, flags); return priv; } static void gdth_put_cmndinfo(struct gdth_cmndinfo *priv) { BUG_ON(!priv); priv->index = 0; } static void gdth_delay(int milliseconds) { if (milliseconds == 0) { udelay(1); } else { mdelay(milliseconds); } } static void gdth_scsi_done(struct scsi_cmnd *scp) { struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); int internal_command = cmndinfo->internal_command; TRACE2(("gdth_scsi_done()\n")); gdth_put_cmndinfo(cmndinfo); scp->host_scribble = NULL; if (internal_command) complete((struct completion *)scp->request); else scp->scsi_done(scp); } int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd, int timeout, u32 *info) { gdth_ha_str *ha = shost_priv(sdev->host); Scsi_Cmnd *scp; struct gdth_cmndinfo cmndinfo; DECLARE_COMPLETION_ONSTACK(wait); int rval; scp = kzalloc(sizeof(*scp), GFP_KERNEL); if (!scp) return -ENOMEM; scp->sense_buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); if (!scp->sense_buffer) { kfree(scp); return -ENOMEM; } scp->device = sdev; memset(&cmndinfo, 0, sizeof(cmndinfo)); /* use request field to save the ptr. to completion struct. */ scp->request = (struct request *)&wait; scp->cmd_len = 12; scp->cmnd = cmnd; cmndinfo.priority = IOCTL_PRI; cmndinfo.internal_cmd_str = gdtcmd; cmndinfo.internal_command = 1; TRACE(("__gdth_execute() cmd 0x%x\n", scp->cmnd[0])); __gdth_queuecommand(ha, scp, &cmndinfo); wait_for_completion(&wait); rval = cmndinfo.status; if (info) *info = cmndinfo.info; kfree(scp->sense_buffer); kfree(scp); return rval; } int gdth_execute(struct Scsi_Host *shost, gdth_cmd_str *gdtcmd, char *cmnd, int timeout, u32 *info) { struct scsi_device *sdev = scsi_get_host_dev(shost); int rval = __gdth_execute(sdev, gdtcmd, cmnd, timeout, info); scsi_free_host_dev(sdev); return rval; } static void gdth_eval_mapping(u32 size, u32 *cyls, int *heads, int *secs) { *cyls = size /HEADS/SECS; if (*cyls <= MAXCYLS) { *heads = HEADS; *secs = SECS; } else { /* too high for 64*32 */ *cyls = size /MEDHEADS/MEDSECS; if (*cyls <= MAXCYLS) { *heads = MEDHEADS; *secs = MEDSECS; } else { /* too high for 127*63 */ *cyls = size /BIGHEADS/BIGSECS; *heads = BIGHEADS; *secs = BIGSECS; } } } /* controller search and initialization functions */ #ifdef CONFIG_EISA static int __init gdth_search_eisa(u16 eisa_adr) { u32 id; TRACE(("gdth_search_eisa() adr. %x\n",eisa_adr)); id = inl(eisa_adr+ID0REG); if (id == GDT3A_ID || id == GDT3B_ID) { /* GDT3000A or GDT3000B */ if ((inb(eisa_adr+EISAREG) & 8) == 0) return 0; /* not EISA configured */ return 1; } if (id == GDT3_ID) /* GDT3000 */ return 1; return 0; } #endif /* CONFIG_EISA */ #ifdef CONFIG_ISA static int __init gdth_search_isa(u32 bios_adr) { void __iomem *addr; u32 id; TRACE(("gdth_search_isa() bios adr. %x\n",bios_adr)); if ((addr = ioremap(bios_adr+BIOS_ID_OFFS, sizeof(u32))) != NULL) { id = readl(addr); iounmap(addr); if (id == GDT2_ID) /* GDT2000 */ return 1; } return 0; } #endif /* CONFIG_ISA */ #ifdef CONFIG_PCI static bool gdth_search_vortex(u16 device) { if (device <= PCI_DEVICE_ID_VORTEX_GDT6555) return true; if (device >= PCI_DEVICE_ID_VORTEX_GDT6x17RP && device <= PCI_DEVICE_ID_VORTEX_GDTMAXRP) return true; if (device == PCI_DEVICE_ID_VORTEX_GDTNEWRX || device == PCI_DEVICE_ID_VORTEX_GDTNEWRX2) return true; return false; } static int gdth_pci_probe_one(gdth_pci_str *pcistr, gdth_ha_str **ha_out); static int gdth_pci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); static void gdth_pci_remove_one(struct pci_dev *pdev); static void gdth_remove_one(gdth_ha_str *ha); /* Vortex only makes RAID controllers. * We do not really want to specify all 550 ids here, so wildcard match. */ static const struct pci_device_id gdthtable[] = { { PCI_VDEVICE(VORTEX, PCI_ANY_ID) }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SRC) }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SRC_XSCALE) }, { } /* terminate list */ }; MODULE_DEVICE_TABLE(pci, gdthtable); static struct pci_driver gdth_pci_driver = { .name = "gdth", .id_table = gdthtable, .probe = gdth_pci_init_one, .remove = gdth_pci_remove_one, }; static void __devexit gdth_pci_remove_one(struct pci_dev *pdev) { gdth_ha_str *ha = pci_get_drvdata(pdev); pci_set_drvdata(pdev, NULL); list_del(&ha->list); gdth_remove_one(ha); pci_disable_device(pdev); } static int __devinit gdth_pci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { u16 vendor = pdev->vendor; u16 device = pdev->device; unsigned long base0, base1, base2; int rc; gdth_pci_str gdth_pcistr; gdth_ha_str *ha = NULL; TRACE(("gdth_search_dev() cnt %d vendor %x device %x\n", gdth_ctr_count, vendor, device)); memset(&gdth_pcistr, 0, sizeof(gdth_pcistr)); if (vendor == PCI_VENDOR_ID_VORTEX && !gdth_search_vortex(device)) return -ENODEV; rc = pci_enable_device(pdev); if (rc) return rc; if (gdth_ctr_count >= MAXHA) return -EBUSY; /* GDT PCI controller found, resources are already in pdev */ gdth_pcistr.pdev = pdev; base0 = pci_resource_flags(pdev, 0); base1 = pci_resource_flags(pdev, 1); base2 = pci_resource_flags(pdev, 2); if (device <= PCI_DEVICE_ID_VORTEX_GDT6000B || /* GDT6000/B */ device >= PCI_DEVICE_ID_VORTEX_GDT6x17RP) { /* MPR */ if (!(base0 & IORESOURCE_MEM)) return -ENODEV; gdth_pcistr.dpmem = pci_resource_start(pdev, 0); } else { /* GDT6110, GDT6120, .. */ if (!(base0 & IORESOURCE_MEM) || !(base2 & IORESOURCE_MEM) || !(base1 & IORESOURCE_IO)) return -ENODEV; gdth_pcistr.dpmem = pci_resource_start(pdev, 2); gdth_pcistr.io = pci_resource_start(pdev, 1); } TRACE2(("Controller found at %d/%d, irq %d, dpmem 0x%lx\n", gdth_pcistr.pdev->bus->number, PCI_SLOT(gdth_pcistr.pdev->devfn), gdth_pcistr.irq, gdth_pcistr.dpmem)); rc = gdth_pci_probe_one(&gdth_pcistr, &ha); if (rc) return rc; return 0; } #endif /* CONFIG_PCI */ #ifdef CONFIG_EISA static int __init gdth_init_eisa(u16 eisa_adr,gdth_ha_str *ha) { u32 retries,id; u8 prot_ver,eisacf,i,irq_found; TRACE(("gdth_init_eisa() adr. %x\n",eisa_adr)); /* disable board interrupts, deinitialize services */ outb(0xff,eisa_adr+EDOORREG); outb(0x00,eisa_adr+EDENABREG); outb(0x00,eisa_adr+EINTENABREG); outb(0xff,eisa_adr+LDOORREG); retries = INIT_RETRIES; gdth_delay(20); while (inb(eisa_adr+EDOORREG) != 0xff) { if (--retries == 0) { printk("GDT-EISA: Initialization error (DEINIT failed)\n"); return 0; } gdth_delay(1); TRACE2(("wait for DEINIT: retries=%d\n",retries)); } prot_ver = inb(eisa_adr+MAILBOXREG); outb(0xff,eisa_adr+EDOORREG); if (prot_ver != PROTOCOL_VERSION) { printk("GDT-EISA: Illegal protocol version\n"); return 0; } ha->bmic = eisa_adr; ha->brd_phys = (u32)eisa_adr >> 12; outl(0,eisa_adr+MAILBOXREG); outl(0,eisa_adr+MAILBOXREG+4); outl(0,eisa_adr+MAILBOXREG+8); outl(0,eisa_adr+MAILBOXREG+12); /* detect IRQ */ if ((id = inl(eisa_adr+ID0REG)) == GDT3_ID) { ha->oem_id = OEM_ID_ICP; ha->type = GDT_EISA; ha->stype = id; outl(1,eisa_adr+MAILBOXREG+8); outb(0xfe,eisa_adr+LDOORREG); retries = INIT_RETRIES; gdth_delay(20); while (inb(eisa_adr+EDOORREG) != 0xfe) { if (--retries == 0) { printk("GDT-EISA: Initialization error (get IRQ failed)\n"); return 0; } gdth_delay(1); } ha->irq = inb(eisa_adr+MAILBOXREG); outb(0xff,eisa_adr+EDOORREG); TRACE2(("GDT3000/3020: IRQ=%d\n",ha->irq)); /* check the result */ if (ha->irq == 0) { TRACE2(("Unknown IRQ, use IRQ table from cmd line !\n")); for (i = 0, irq_found = FALSE; i < MAXHA && irq[i] != 0xff; ++i) { if (irq[i]==10 || irq[i]==11 || irq[i]==12 || irq[i]==14) { irq_found = TRUE; break; } } if (irq_found) { ha->irq = irq[i]; irq[i] = 0; printk("GDT-EISA: Can not detect controller IRQ,\n"); printk("Use IRQ setting from command line (IRQ = %d)\n", ha->irq); } else { printk("GDT-EISA: Initialization error (unknown IRQ), Enable\n"); printk("the controller BIOS or use command line parameters\n"); return 0; } } } else { eisacf = inb(eisa_adr+EISAREG) & 7; if (eisacf > 4) /* level triggered */ eisacf -= 4; ha->irq = gdth_irq_tab[eisacf]; ha->oem_id = OEM_ID_ICP; ha->type = GDT_EISA; ha->stype = id; } ha->dma64_support = 0; return 1; } #endif /* CONFIG_EISA */ #ifdef CONFIG_ISA static int __init gdth_init_isa(u32 bios_adr,gdth_ha_str *ha) { register gdt2_dpram_str __iomem *dp2_ptr; int i; u8 irq_drq,prot_ver; u32 retries; TRACE(("gdth_init_isa() bios adr. %x\n",bios_adr)); ha->brd = ioremap(bios_adr, sizeof(gdt2_dpram_str)); if (ha->brd == NULL) { printk("GDT-ISA: Initialization error (DPMEM remap error)\n"); return 0; } dp2_ptr = ha->brd; writeb(1, &dp2_ptr->io.memlock); /* switch off write protection */ /* reset interface area */ memset_io(&dp2_ptr->u, 0, sizeof(dp2_ptr->u)); if (readl(&dp2_ptr->u) != 0) { printk("GDT-ISA: Initialization error (DPMEM write error)\n"); iounmap(ha->brd); return 0; } /* disable board interrupts, read DRQ and IRQ */ writeb(0xff, &dp2_ptr->io.irqdel); writeb(0x00, &dp2_ptr->io.irqen); writeb(0x00, &dp2_ptr->u.ic.S_Status); writeb(0x00, &dp2_ptr->u.ic.Cmd_Index); irq_drq = readb(&dp2_ptr->io.rq); for (i=0; i<3; ++i) { if ((irq_drq & 1)==0) break; irq_drq >>= 1; } ha->drq = gdth_drq_tab[i]; irq_drq = readb(&dp2_ptr->io.rq) >> 3; for (i=1; i<5; ++i) { if ((irq_drq & 1)==0) break; irq_drq >>= 1; } ha->irq = gdth_irq_tab[i]; /* deinitialize services */ writel(bios_adr, &dp2_ptr->u.ic.S_Info[0]); writeb(0xff, &dp2_ptr->u.ic.S_Cmd_Indx); writeb(0, &dp2_ptr->io.event); retries = INIT_RETRIES; gdth_delay(20); while (readb(&dp2_ptr->u.ic.S_Status) != 0xff) { if (--retries == 0) { printk("GDT-ISA: Initialization error (DEINIT failed)\n"); iounmap(ha->brd); return 0; } gdth_delay(1); } prot_ver = (u8)readl(&dp2_ptr->u.ic.S_Info[0]); writeb(0, &dp2_ptr->u.ic.Status); writeb(0xff, &dp2_ptr->io.irqdel); if (prot_ver != PROTOCOL_VERSION) { printk("GDT-ISA: Illegal protocol version\n"); iounmap(ha->brd); return 0; } ha->oem_id = OEM_ID_ICP; ha->type = GDT_ISA; ha->ic_all_size = sizeof(dp2_ptr->u); ha->stype= GDT2_ID; ha->brd_phys = bios_adr >> 4; /* special request to controller BIOS */ writel(0x00, &dp2_ptr->u.ic.S_Info[0]); writel(0x00, &dp2_ptr->u.ic.S_Info[1]); writel(0x01, &dp2_ptr->u.ic.S_Info[2]); writel(0x00, &dp2_ptr->u.ic.S_Info[3]); writeb(0xfe, &dp2_ptr->u.ic.S_Cmd_Indx); writeb(0, &dp2_ptr->io.event); retries = INIT_RETRIES; gdth_delay(20); while (readb(&dp2_ptr->u.ic.S_Status) != 0xfe) { if (--retries == 0) { printk("GDT-ISA: Initialization error\n"); iounmap(ha->brd); return 0; } gdth_delay(1); } writeb(0, &dp2_ptr->u.ic.Status); writeb(0xff, &dp2_ptr->io.irqdel); ha->dma64_support = 0; return 1; } #endif /* CONFIG_ISA */ #ifdef CONFIG_PCI static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr, gdth_ha_str *ha) { register gdt6_dpram_str __iomem *dp6_ptr; register gdt6c_dpram_str __iomem *dp6c_ptr; register gdt6m_dpram_str __iomem *dp6m_ptr; u32 retries; u8 prot_ver; u16 command; int i, found = FALSE; TRACE(("gdth_init_pci()\n")); if (pdev->vendor == PCI_VENDOR_ID_INTEL) ha->oem_id = OEM_ID_INTEL; else ha->oem_id = OEM_ID_ICP; ha->brd_phys = (pdev->bus->number << 8) | (pdev->devfn & 0xf8); ha->stype = (u32)pdev->device; ha->irq = pdev->irq; ha->pdev = pdev; if (ha->pdev->device <= PCI_DEVICE_ID_VORTEX_GDT6000B) { /* GDT6000/B */ TRACE2(("init_pci() dpmem %lx irq %d\n",pcistr->dpmem,ha->irq)); ha->brd = ioremap(pcistr->dpmem, sizeof(gdt6_dpram_str)); if (ha->brd == NULL) { printk("GDT-PCI: Initialization error (DPMEM remap error)\n"); return 0; } /* check and reset interface area */ dp6_ptr = ha->brd; writel(DPMEM_MAGIC, &dp6_ptr->u); if (readl(&dp6_ptr->u) != DPMEM_MAGIC) { printk("GDT-PCI: Cannot access DPMEM at 0x%lx (shadowed?)\n", pcistr->dpmem); found = FALSE; for (i = 0xC8000; i < 0xE8000; i += 0x4000) { iounmap(ha->brd); ha->brd = ioremap(i, sizeof(u16)); if (ha->brd == NULL) { printk("GDT-PCI: Initialization error (DPMEM remap error)\n"); return 0; } if (readw(ha->brd) != 0xffff) { TRACE2(("init_pci_old() address 0x%x busy\n", i)); continue; } iounmap(ha->brd); pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, i); ha->brd = ioremap(i, sizeof(gdt6_dpram_str)); if (ha->brd == NULL) { printk("GDT-PCI: Initialization error (DPMEM remap error)\n"); return 0; } dp6_ptr = ha->brd; writel(DPMEM_MAGIC, &dp6_ptr->u); if (readl(&dp6_ptr->u) == DPMEM_MAGIC) { printk("GDT-PCI: Use free address at 0x%x\n", i); found = TRUE; break; } } if (!found) { printk("GDT-PCI: No free address found!\n"); iounmap(ha->brd); return 0; } } memset_io(&dp6_ptr->u, 0, sizeof(dp6_ptr->u)); if (readl(&dp6_ptr->u) != 0) { printk("GDT-PCI: Initialization error (DPMEM write error)\n"); iounmap(ha->brd); return 0; } /* disable board interrupts, deinit services */ writeb(0xff, &dp6_ptr->io.irqdel); writeb(0x00, &dp6_ptr->io.irqen); writeb(0x00, &dp6_ptr->u.ic.S_Status); writeb(0x00, &dp6_ptr->u.ic.Cmd_Index); writel(pcistr->dpmem, &dp6_ptr->u.ic.S_Info[0]); writeb(0xff, &dp6_ptr->u.ic.S_Cmd_Indx); writeb(0, &dp6_ptr->io.event); retries = INIT_RETRIES; gdth_delay(20); while (readb(&dp6_ptr->u.ic.S_Status) != 0xff) { if (--retries == 0) { printk("GDT-PCI: Initialization error (DEINIT failed)\n"); iounmap(ha->brd); return 0; } gdth_delay(1); } prot_ver = (u8)readl(&dp6_ptr->u.ic.S_Info[0]); writeb(0, &dp6_ptr->u.ic.S_Status); writeb(0xff, &dp6_ptr->io.irqdel); if (prot_ver != PROTOCOL_VERSION) { printk("GDT-PCI: Illegal protocol version\n"); iounmap(ha->brd); return 0; } ha->type = GDT_PCI; ha->ic_all_size = sizeof(dp6_ptr->u); /* special command to controller BIOS */ writel(0x00, &dp6_ptr->u.ic.S_Info[0]); writel(0x00, &dp6_ptr->u.ic.S_Info[1]); writel(0x00, &dp6_ptr->u.ic.S_Info[2]); writel(0x00, &dp6_ptr->u.ic.S_Info[3]); writeb(0xfe, &dp6_ptr->u.ic.S_Cmd_Indx); writeb(0, &dp6_ptr->io.event); retries = INIT_RETRIES; gdth_delay(20); while (readb(&dp6_ptr->u.ic.S_Status) != 0xfe) { if (--retries == 0) { printk("GDT-PCI: Initialization error\n"); iounmap(ha->brd); return 0; } gdth_delay(1); } writeb(0, &dp6_ptr->u.ic.S_Status); writeb(0xff, &dp6_ptr->io.irqdel); ha->dma64_support = 0; } else if (ha->pdev->device <= PCI_DEVICE_ID_VORTEX_GDT6555) { /* GDT6110, ... */ ha->plx = (gdt6c_plx_regs *)pcistr->io; TRACE2(("init_pci_new() dpmem %lx irq %d\n", pcistr->dpmem,ha->irq)); ha->brd = ioremap(pcistr->dpmem, sizeof(gdt6c_dpram_str)); if (ha->brd == NULL) { printk("GDT-PCI: Initialization error (DPMEM remap error)\n"); iounmap(ha->brd); return 0; } /* check and reset interface area */ dp6c_ptr = ha->brd; writel(DPMEM_MAGIC, &dp6c_ptr->u); if (readl(&dp6c_ptr->u) != DPMEM_MAGIC) { printk("GDT-PCI: Cannot access DPMEM at 0x%lx (shadowed?)\n", pcistr->dpmem); found = FALSE; for (i = 0xC8000; i < 0xE8000; i += 0x4000) { iounmap(ha->brd); ha->brd = ioremap(i, sizeof(u16)); if (ha->brd == NULL) { printk("GDT-PCI: Initialization error (DPMEM remap error)\n"); return 0; } if (readw(ha->brd) != 0xffff) { TRACE2(("init_pci_plx() address 0x%x busy\n", i)); continue; } iounmap(ha->brd); pci_write_config_dword(pdev, PCI_BASE_ADDRESS_2, i); ha->brd = ioremap(i, sizeof(gdt6c_dpram_str)); if (ha->brd == NULL) { printk("GDT-PCI: Initialization error (DPMEM remap error)\n"); return 0; } dp6c_ptr = ha->brd; writel(DPMEM_MAGIC, &dp6c_ptr->u); if (readl(&dp6c_ptr->u) == DPMEM_MAGIC) { printk("GDT-PCI: Use free address at 0x%x\n", i); found = TRUE; break; } } if (!found) { printk("GDT-PCI: No free address found!\n"); iounmap(ha->brd); return 0; } } memset_io(&dp6c_ptr->u, 0, sizeof(dp6c_ptr->u)); if (readl(&dp6c_ptr->u) != 0) { printk("GDT-PCI: Initialization error (DPMEM write error)\n"); iounmap(ha->brd); return 0; } /* disable board interrupts, deinit services */ outb(0x00,PTR2USHORT(&ha->plx->control1)); outb(0xff,PTR2USHORT(&ha->plx->edoor_reg)); writeb(0x00, &dp6c_ptr->u.ic.S_Status); writeb(0x00, &dp6c_ptr->u.ic.Cmd_Index); writel(pcistr->dpmem, &dp6c_ptr->u.ic.S_Info[0]); writeb(0xff, &dp6c_ptr->u.ic.S_Cmd_Indx); outb(1,PTR2USHORT(&ha->plx->ldoor_reg)); retries = INIT_RETRIES; gdth_delay(20); while (readb(&dp6c_ptr->u.ic.S_Status) != 0xff) { if (--retries == 0) { printk("GDT-PCI: Initialization error (DEINIT failed)\n"); iounmap(ha->brd); return 0; } gdth_delay(1); } prot_ver = (u8)readl(&dp6c_ptr->u.ic.S_Info[0]); writeb(0, &dp6c_ptr->u.ic.Status); if (prot_ver != PROTOCOL_VERSION) { printk("GDT-PCI: Illegal protocol version\n"); iounmap(ha->brd); return 0; } ha->type = GDT_PCINEW; ha->ic_all_size = sizeof(dp6c_ptr->u); /* special command to controller BIOS */ writel(0x00, &dp6c_ptr->u.ic.S_Info[0]); writel(0x00, &dp6c_ptr->u.ic.S_Info[1]); writel(0x00, &dp6c_ptr->u.ic.S_Info[2]); writel(0x00, &dp6c_ptr->u.ic.S_Info[3]); writeb(0xfe, &dp6c_ptr->u.ic.S_Cmd_Indx); outb(1,PTR2USHORT(&ha->plx->ldoor_reg)); retries = INIT_RETRIES; gdth_delay(20); while (readb(&dp6c_ptr->u.ic.S_Status) != 0xfe) { if (--retries == 0) { printk("GDT-PCI: Initialization error\n"); iounmap(ha->brd); return 0; } gdth_delay(1); } writeb(0, &dp6c_ptr->u.ic.S_Status); ha->dma64_support = 0; } else { /* MPR */ TRACE2(("init_pci_mpr() dpmem %lx irq %d\n",pcistr->dpmem,ha->irq)); ha->brd = ioremap(pcistr->dpmem, sizeof(gdt6m_dpram_str)); if (ha->brd == NULL) { printk("GDT-PCI: Initialization error (DPMEM remap error)\n"); return 0; } /* manipulate config. space to enable DPMEM, start RP controller */ pci_read_config_word(pdev, PCI_COMMAND, &command); command |= 6; pci_write_config_word(pdev, PCI_COMMAND, command); if (pci_resource_start(pdev, 8) == 1UL) pci_resource_start(pdev, 8) = 0UL; i = 0xFEFF0001UL; pci_write_config_dword(pdev, PCI_ROM_ADDRESS, i); gdth_delay(1); pci_write_config_dword(pdev, PCI_ROM_ADDRESS, pci_resource_start(pdev, 8)); dp6m_ptr = ha->brd; /* Ensure that it is safe to access the non HW portions of DPMEM. * Aditional check needed for Xscale based RAID controllers */ while( ((int)readb(&dp6m_ptr->i960r.sema0_reg) ) & 3 ) gdth_delay(1); /* check and reset interface area */ writel(DPMEM_MAGIC, &dp6m_ptr->u); if (readl(&dp6m_ptr->u) != DPMEM_MAGIC) { printk("GDT-PCI: Cannot access DPMEM at 0x%lx (shadowed?)\n", pcistr->dpmem); found = FALSE; for (i = 0xC8000; i < 0xE8000; i += 0x4000) { iounmap(ha->brd); ha->brd = ioremap(i, sizeof(u16)); if (ha->brd == NULL) { printk("GDT-PCI: Initialization error (DPMEM remap error)\n"); return 0; } if (readw(ha->brd) != 0xffff) { TRACE2(("init_pci_mpr() address 0x%x busy\n", i)); continue; } iounmap(ha->brd); pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, i); ha->brd = ioremap(i, sizeof(gdt6m_dpram_str)); if (ha->brd == NULL) { printk("GDT-PCI: Initialization error (DPMEM remap error)\n"); return 0; } dp6m_ptr = ha->brd; writel(DPMEM_MAGIC, &dp6m_ptr->u); if (readl(&dp6m_ptr->u) == DPMEM_MAGIC) { printk("GDT-PCI: Use free address at 0x%x\n", i); found = TRUE; break; } } if (!found) { printk("GDT-PCI: No free address found!\n"); iounmap(ha->brd); return 0; } } memset_io(&dp6m_ptr->u, 0, sizeof(dp6m_ptr->u)); /* disable board interrupts, deinit services */ writeb(readb(&dp6m_ptr->i960r.edoor_en_reg) | 4, &dp6m_ptr->i960r.edoor_en_reg); writeb(0xff, &dp6m_ptr->i960r.edoor_reg); writeb(0x00, &dp6m_ptr->u.ic.S_Status); writeb(0x00, &dp6m_ptr->u.ic.Cmd_Index); writel(pcistr->dpmem, &dp6m_ptr->u.ic.S_Info[0]); writeb(0xff, &dp6m_ptr->u.ic.S_Cmd_Indx); writeb(1, &dp6m_ptr->i960r.ldoor_reg); retries = INIT_RETRIES; gdth_delay(20); while (readb(&dp6m_ptr->u.ic.S_Status) != 0xff) { if (--retries == 0) { printk("GDT-PCI: Initialization error (DEINIT failed)\n"); iounmap(ha->brd); return 0; } gdth_delay(1); } prot_ver = (u8)readl(&dp6m_ptr->u.ic.S_Info[0]); writeb(0, &dp6m_ptr->u.ic.S_Status); if (prot_ver != PROTOCOL_VERSION) { printk("GDT-PCI: Illegal protocol version\n"); iounmap(ha->brd); return 0; } ha->type = GDT_PCIMPR; ha->ic_all_size = sizeof(dp6m_ptr->u); /* special command to controller BIOS */ writel(0x00, &dp6m_ptr->u.ic.S_Info[0]); writel(0x00, &dp6m_ptr->u.ic.S_Info[1]); writel(0x00, &dp6m_ptr->u.ic.S_Info[2]); writel(0x00, &dp6m_ptr->u.ic.S_Info[3]); writeb(0xfe, &dp6m_ptr->u.ic.S_Cmd_Indx); writeb(1, &dp6m_ptr->i960r.ldoor_reg); retries = INIT_RETRIES; gdth_delay(20); while (readb(&dp6m_ptr->u.ic.S_Status) != 0xfe) { if (--retries == 0) { printk("GDT-PCI: Initialization error\n"); iounmap(ha->brd); return 0; } gdth_delay(1); } writeb(0, &dp6m_ptr->u.ic.S_Status); /* read FW version to detect 64-bit DMA support */ writeb(0xfd, &dp6m_ptr->u.ic.S_Cmd_Indx); writeb(1, &dp6m_ptr->i960r.ldoor_reg); retries = INIT_RETRIES; gdth_delay(20); while (readb(&dp6m_ptr->u.ic.S_Status) != 0xfd) { if (--retries == 0) { printk("GDT-PCI: Initialization error (DEINIT failed)\n"); iounmap(ha->brd); return 0; } gdth_delay(1); } prot_ver = (u8)(readl(&dp6m_ptr->u.ic.S_Info[0]) >> 16); writeb(0, &dp6m_ptr->u.ic.S_Status); if (prot_ver < 0x2b) /* FW < x.43: no 64-bit DMA support */ ha->dma64_support = 0; else ha->dma64_support = 1; } return 1; } #endif /* CONFIG_PCI */ /* controller protocol functions */ static void __devinit gdth_enable_int(gdth_ha_str *ha) { unsigned long flags; gdt2_dpram_str __iomem *dp2_ptr; gdt6_dpram_str __iomem *dp6_ptr; gdt6m_dpram_str __iomem *dp6m_ptr; TRACE(("gdth_enable_int() hanum %d\n",ha->hanum)); spin_lock_irqsave(&ha->smp_lock, flags); if (ha->type == GDT_EISA) { outb(0xff, ha->bmic + EDOORREG); outb(0xff, ha->bmic + EDENABREG); outb(0x01, ha->bmic + EINTENABREG); } else if (ha->type == GDT_ISA) { dp2_ptr = ha->brd; writeb(1, &dp2_ptr->io.irqdel); writeb(0, &dp2_ptr->u.ic.Cmd_Index); writeb(1, &dp2_ptr->io.irqen); } else if (ha->type == GDT_PCI) { dp6_ptr = ha->brd; writeb(1, &dp6_ptr->io.irqdel); writeb(0, &dp6_ptr->u.ic.Cmd_Index); writeb(1, &dp6_ptr->io.irqen); } else if (ha->type == GDT_PCINEW) { outb(0xff, PTR2USHORT(&ha->plx->edoor_reg)); outb(0x03, PTR2USHORT(&ha->plx->control1)); } else if (ha->type == GDT_PCIMPR) { dp6m_ptr = ha->brd; writeb(0xff, &dp6m_ptr->i960r.edoor_reg); writeb(readb(&dp6m_ptr->i960r.edoor_en_reg) & ~4, &dp6m_ptr->i960r.edoor_en_reg); } spin_unlock_irqrestore(&ha->smp_lock, flags); } /* return IStatus if interrupt was from this card else 0 */ static u8 gdth_get_status(gdth_ha_str *ha) { u8 IStatus = 0; TRACE(("gdth_get_status() irq %d ctr_count %d\n", ha->irq, gdth_ctr_count)); if (ha->type == GDT_EISA) IStatus = inb((u16)ha->bmic + EDOORREG); else if (ha->type == GDT_ISA) IStatus = readb(&((gdt2_dpram_str __iomem *)ha->brd)->u.ic.Cmd_Index); else if (ha->type == GDT_PCI) IStatus = readb(&((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Cmd_Index); else if (ha->type == GDT_PCINEW) IStatus = inb(PTR2USHORT(&ha->plx->edoor_reg)); else if (ha->type == GDT_PCIMPR) IStatus = readb(&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.edoor_reg); return IStatus; } static int gdth_test_busy(gdth_ha_str *ha) { register int gdtsema0 = 0; TRACE(("gdth_test_busy() hanum %d\n", ha->hanum)); if (ha->type == GDT_EISA) gdtsema0 = (int)inb(ha->bmic + SEMA0REG); else if (ha->type == GDT_ISA) gdtsema0 = (int)readb(&((gdt2_dpram_str __iomem *)ha->brd)->u.ic.Sema0); else if (ha->type == GDT_PCI) gdtsema0 = (int)readb(&((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Sema0); else if (ha->type == GDT_PCINEW) gdtsema0 = (int)inb(PTR2USHORT(&ha->plx->sema0_reg)); else if (ha->type == GDT_PCIMPR) gdtsema0 = (int)readb(&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.sema0_reg); return (gdtsema0 & 1); } static int gdth_get_cmd_index(gdth_ha_str *ha) { int i; TRACE(("gdth_get_cmd_index() hanum %d\n", ha->hanum)); for (i=0; i<GDTH_MAXCMDS; ++i) { if (ha->cmd_tab[i].cmnd == UNUSED_CMND) { ha->cmd_tab[i].cmnd = ha->pccb->RequestBuffer; ha->cmd_tab[i].service = ha->pccb->Service; ha->pccb->CommandIndex = (u32)i+2; return (i+2); } } return 0; } static void gdth_set_sema0(gdth_ha_str *ha) { TRACE(("gdth_set_sema0() hanum %d\n", ha->hanum)); if (ha->type == GDT_EISA) { outb(1, ha->bmic + SEMA0REG); } else if (ha->type == GDT_ISA) { writeb(1, &((gdt2_dpram_str __iomem *)ha->brd)->u.ic.Sema0); } else if (ha->type == GDT_PCI) { writeb(1, &((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Sema0); } else if (ha->type == GDT_PCINEW) { outb(1, PTR2USHORT(&ha->plx->sema0_reg)); } else if (ha->type == GDT_PCIMPR) { writeb(1, &((gdt6m_dpram_str __iomem *)ha->brd)->i960r.sema0_reg); } } static void gdth_copy_command(gdth_ha_str *ha) { register gdth_cmd_str *cmd_ptr; register gdt6m_dpram_str __iomem *dp6m_ptr; register gdt6c_dpram_str __iomem *dp6c_ptr; gdt6_dpram_str __iomem *dp6_ptr; gdt2_dpram_str __iomem *dp2_ptr; u16 cp_count,dp_offset,cmd_no; TRACE(("gdth_copy_command() hanum %d\n", ha->hanum)); cp_count = ha->cmd_len; dp_offset= ha->cmd_offs_dpmem; cmd_no = ha->cmd_cnt; cmd_ptr = ha->pccb; ++ha->cmd_cnt; if (ha->type == GDT_EISA) return; /* no DPMEM, no copy */ /* set cpcount dword aligned */ if (cp_count & 3) cp_count += (4 - (cp_count & 3)); ha->cmd_offs_dpmem += cp_count; /* set offset and service, copy command to DPMEM */ if (ha->type == GDT_ISA) { dp2_ptr = ha->brd; writew(dp_offset + DPMEM_COMMAND_OFFSET, &dp2_ptr->u.ic.comm_queue[cmd_no].offset); writew((u16)cmd_ptr->Service, &dp2_ptr->u.ic.comm_queue[cmd_no].serv_id); memcpy_toio(&dp2_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count); } else if (ha->type == GDT_PCI) { dp6_ptr = ha->brd; writew(dp_offset + DPMEM_COMMAND_OFFSET, &dp6_ptr->u.ic.comm_queue[cmd_no].offset); writew((u16)cmd_ptr->Service, &dp6_ptr->u.ic.comm_queue[cmd_no].serv_id); memcpy_toio(&dp6_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count); } else if (ha->type == GDT_PCINEW) { dp6c_ptr = ha->brd; writew(dp_offset + DPMEM_COMMAND_OFFSET, &dp6c_ptr->u.ic.comm_queue[cmd_no].offset); writew((u16)cmd_ptr->Service, &dp6c_ptr->u.ic.comm_queue[cmd_no].serv_id); memcpy_toio(&dp6c_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count); } else if (ha->type == GDT_PCIMPR) { dp6m_ptr = ha->brd; writew(dp_offset + DPMEM_COMMAND_OFFSET, &dp6m_ptr->u.ic.comm_queue[cmd_no].offset); writew((u16)cmd_ptr->Service, &dp6m_ptr->u.ic.comm_queue[cmd_no].serv_id); memcpy_toio(&dp6m_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count); } } static void gdth_release_event(gdth_ha_str *ha) { TRACE(("gdth_release_event() hanum %d\n", ha->hanum)); #ifdef GDTH_STATISTICS { u32 i,j; for (i=0,j=0; j<GDTH_MAXCMDS; ++j) { if (ha->cmd_tab[j].cmnd != UNUSED_CMND) ++i; } if (max_index < i) { max_index = i; TRACE3(("GDT: max_index = %d\n",(u16)i)); } } #endif if (ha->pccb->OpCode == GDT_INIT) ha->pccb->Service |= 0x80; if (ha->type == GDT_EISA) { if (ha->pccb->OpCode == GDT_INIT) /* store DMA buffer */ outl(ha->ccb_phys, ha->bmic + MAILBOXREG); outb(ha->pccb->Service, ha->bmic + LDOORREG); } else if (ha->type == GDT_ISA) { writeb(0, &((gdt2_dpram_str __iomem *)ha->brd)->io.event); } else if (ha->type == GDT_PCI) { writeb(0, &((gdt6_dpram_str __iomem *)ha->brd)->io.event); } else if (ha->type == GDT_PCINEW) { outb(1, PTR2USHORT(&ha->plx->ldoor_reg)); } else if (ha->type == GDT_PCIMPR) { writeb(1, &((gdt6m_dpram_str __iomem *)ha->brd)->i960r.ldoor_reg); } } static int gdth_wait(gdth_ha_str *ha, int index, u32 time) { int answer_found = FALSE; int wait_index = 0; TRACE(("gdth_wait() hanum %d index %d time %d\n", ha->hanum, index, time)); if (index == 0) return 1; /* no wait required */ do { __gdth_interrupt(ha, true, &wait_index); if (wait_index == index) { answer_found = TRUE; break; } gdth_delay(1); } while (--time); while (gdth_test_busy(ha)) gdth_delay(0); return (answer_found); } static int gdth_internal_cmd(gdth_ha_str *ha, u8 service, u16 opcode, u32 p1, u64 p2, u64 p3) { register gdth_cmd_str *cmd_ptr; int retries,index; TRACE2(("gdth_internal_cmd() service %d opcode %d\n",service,opcode)); cmd_ptr = ha->pccb; memset((char*)cmd_ptr,0,sizeof(gdth_cmd_str)); /* make command */ for (retries = INIT_RETRIES;;) { cmd_ptr->Service = service; cmd_ptr->RequestBuffer = INTERNAL_CMND; if (!(index=gdth_get_cmd_index(ha))) { TRACE(("GDT: No free command index found\n")); return 0; } gdth_set_sema0(ha); cmd_ptr->OpCode = opcode; cmd_ptr->BoardNode = LOCALBOARD; if (service == CACHESERVICE) { if (opcode == GDT_IOCTL) { cmd_ptr->u.ioctl.subfunc = p1; cmd_ptr->u.ioctl.channel = (u32)p2; cmd_ptr->u.ioctl.param_size = (u16)p3; cmd_ptr->u.ioctl.p_param = ha->scratch_phys; } else { if (ha->cache_feat & GDT_64BIT) { cmd_ptr->u.cache64.DeviceNo = (u16)p1; cmd_ptr->u.cache64.BlockNo = p2; } else { cmd_ptr->u.cache.DeviceNo = (u16)p1; cmd_ptr->u.cache.BlockNo = (u32)p2; } } } else if (service == SCSIRAWSERVICE) { if (ha->raw_feat & GDT_64BIT) { cmd_ptr->u.raw64.direction = p1; cmd_ptr->u.raw64.bus = (u8)p2; cmd_ptr->u.raw64.target = (u8)p3; cmd_ptr->u.raw64.lun = (u8)(p3 >> 8); } else { cmd_ptr->u.raw.direction = p1; cmd_ptr->u.raw.bus = (u8)p2; cmd_ptr->u.raw.target = (u8)p3; cmd_ptr->u.raw.lun = (u8)(p3 >> 8); } } else if (service == SCREENSERVICE) { if (opcode == GDT_REALTIME) { *(u32 *)&cmd_ptr->u.screen.su.data[0] = p1; *(u32 *)&cmd_ptr->u.screen.su.data[4] = (u32)p2; *(u32 *)&cmd_ptr->u.screen.su.data[8] = (u32)p3; } } ha->cmd_len = sizeof(gdth_cmd_str); ha->cmd_offs_dpmem = 0; ha->cmd_cnt = 0; gdth_copy_command(ha); gdth_release_event(ha); gdth_delay(20); if (!gdth_wait(ha, index, INIT_TIMEOUT)) { printk("GDT: Initialization error (timeout service %d)\n",service); return 0; } if (ha->status != S_BSY || --retries == 0) break; gdth_delay(1); } return (ha->status != S_OK ? 0:1); } /* search for devices */ static int __devinit gdth_search_drives(gdth_ha_str *ha) { u16 cdev_cnt, i; int ok; u32 bus_no, drv_cnt, drv_no, j; gdth_getch_str *chn; gdth_drlist_str *drl; gdth_iochan_str *ioc; gdth_raw_iochan_str *iocr; gdth_arcdl_str *alst; gdth_alist_str *alst2; gdth_oem_str_ioctl *oemstr; #ifdef INT_COAL gdth_perf_modes *pmod; #endif #ifdef GDTH_RTC u8 rtc[12]; unsigned long flags; #endif TRACE(("gdth_search_drives() hanum %d\n", ha->hanum)); ok = 0; /* initialize controller services, at first: screen service */ ha->screen_feat = 0; if (!force_dma32) { ok = gdth_internal_cmd(ha, SCREENSERVICE, GDT_X_INIT_SCR, 0, 0, 0); if (ok) ha->screen_feat = GDT_64BIT; } if (force_dma32 || (!ok && ha->status == (u16)S_NOFUNC)) ok = gdth_internal_cmd(ha, SCREENSERVICE, GDT_INIT, 0, 0, 0); if (!ok) { printk("GDT-HA %d: Initialization error screen service (code %d)\n", ha->hanum, ha->status); return 0; } TRACE2(("gdth_search_drives(): SCREENSERVICE initialized\n")); #ifdef GDTH_RTC /* read realtime clock info, send to controller */ /* 1. wait for the falling edge of update flag */ spin_lock_irqsave(&rtc_lock, flags); for (j = 0; j < 1000000; ++j) if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) break; for (j = 0; j < 1000000; ++j) if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)) break; /* 2. read info */ do { for (j = 0; j < 12; ++j) rtc[j] = CMOS_READ(j); } while (rtc[0] != CMOS_READ(0)); spin_unlock_irqrestore(&rtc_lock, flags); TRACE2(("gdth_search_drives(): RTC: %x/%x/%x\n",*(u32 *)&rtc[0], *(u32 *)&rtc[4], *(u32 *)&rtc[8])); /* 3. send to controller firmware */ gdth_internal_cmd(ha, SCREENSERVICE, GDT_REALTIME, *(u32 *)&rtc[0], *(u32 *)&rtc[4], *(u32 *)&rtc[8]); #endif /* unfreeze all IOs */ gdth_internal_cmd(ha, CACHESERVICE, GDT_UNFREEZE_IO, 0, 0, 0); /* initialize cache service */ ha->cache_feat = 0; if (!force_dma32) { ok = gdth_internal_cmd(ha, CACHESERVICE, GDT_X_INIT_HOST, LINUX_OS, 0, 0); if (ok) ha->cache_feat = GDT_64BIT; } if (force_dma32 || (!ok && ha->status == (u16)S_NOFUNC)) ok = gdth_internal_cmd(ha, CACHESERVICE, GDT_INIT, LINUX_OS, 0, 0); if (!ok) { printk("GDT-HA %d: Initialization error cache service (code %d)\n", ha->hanum, ha->status); return 0; } TRACE2(("gdth_search_drives(): CACHESERVICE initialized\n")); cdev_cnt = (u16)ha->info; ha->fw_vers = ha->service; #ifdef INT_COAL if (ha->type == GDT_PCIMPR) { /* set perf. modes */ pmod = (gdth_perf_modes *)ha->pscratch; pmod->version = 1; pmod->st_mode = 1; /* enable one status buffer */ *((u64 *)&pmod->st_buff_addr1) = ha->coal_stat_phys; pmod->st_buff_indx1 = COALINDEX; pmod->st_buff_addr2 = 0; pmod->st_buff_u_addr2 = 0; pmod->st_buff_indx2 = 0; pmod->st_buff_size = sizeof(gdth_coal_status) * MAXOFFSETS; pmod->cmd_mode = 0; // disable all cmd buffers pmod->cmd_buff_addr1 = 0; pmod->cmd_buff_u_addr1 = 0; pmod->cmd_buff_indx1 = 0; pmod->cmd_buff_addr2 = 0; pmod->cmd_buff_u_addr2 = 0; pmod->cmd_buff_indx2 = 0; pmod->cmd_buff_size = 0; pmod->reserved1 = 0; pmod->reserved2 = 0; if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, SET_PERF_MODES, INVALID_CHANNEL,sizeof(gdth_perf_modes))) { printk("GDT-HA %d: Interrupt coalescing activated\n", ha->hanum); } } #endif /* detect number of buses - try new IOCTL */ iocr = (gdth_raw_iochan_str *)ha->pscratch; iocr->hdr.version = 0xffffffff; iocr->hdr.list_entries = MAXBUS; iocr->hdr.first_chan = 0; iocr->hdr.last_chan = MAXBUS-1; iocr->hdr.list_offset = GDTOFFSOF(gdth_raw_iochan_str, list[0]); if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, IOCHAN_RAW_DESC, INVALID_CHANNEL,sizeof(gdth_raw_iochan_str))) { TRACE2(("IOCHAN_RAW_DESC supported!\n")); ha->bus_cnt = iocr->hdr.chan_count; for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) { if (iocr->list[bus_no].proc_id < MAXID) ha->bus_id[bus_no] = iocr->list[bus_no].proc_id; else ha->bus_id[bus_no] = 0xff; } } else { /* old method */ chn = (gdth_getch_str *)ha->pscratch; for (bus_no = 0; bus_no < MAXBUS; ++bus_no) { chn->channel_no = bus_no; if (!gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, SCSI_CHAN_CNT | L_CTRL_PATTERN, IO_CHANNEL | INVALID_CHANNEL, sizeof(gdth_getch_str))) { if (bus_no == 0) { printk("GDT-HA %d: Error detecting channel count (0x%x)\n", ha->hanum, ha->status); return 0; } break; } if (chn->siop_id < MAXID) ha->bus_id[bus_no] = chn->siop_id; else ha->bus_id[bus_no] = 0xff; } ha->bus_cnt = (u8)bus_no; } TRACE2(("gdth_search_drives() %d channels\n",ha->bus_cnt)); /* read cache configuration */ if (!gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_INFO, INVALID_CHANNEL,sizeof(gdth_cinfo_str))) { printk("GDT-HA %d: Initialization error cache service (code %d)\n", ha->hanum, ha->status); return 0; } ha->cpar = ((gdth_cinfo_str *)ha->pscratch)->cpar; TRACE2(("gdth_search_drives() cinfo: vs %x sta %d str %d dw %d b %d\n", ha->cpar.version,ha->cpar.state,ha->cpar.strategy, ha->cpar.write_back,ha->cpar.block_size)); /* read board info and features */ ha->more_proc = FALSE; if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, BOARD_INFO, INVALID_CHANNEL,sizeof(gdth_binfo_str))) { memcpy(&ha->binfo, (gdth_binfo_str *)ha->pscratch, sizeof(gdth_binfo_str)); if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, BOARD_FEATURES, INVALID_CHANNEL,sizeof(gdth_bfeat_str))) { TRACE2(("BOARD_INFO/BOARD_FEATURES supported\n")); ha->bfeat = *(gdth_bfeat_str *)ha->pscratch; ha->more_proc = TRUE; } } else { TRACE2(("BOARD_INFO requires firmware >= 1.10/2.08\n")); strcpy(ha->binfo.type_string, gdth_ctr_name(ha)); } TRACE2(("Controller name: %s\n",ha->binfo.type_string)); /* read more informations */ if (ha->more_proc) { /* physical drives, channel addresses */ ioc = (gdth_iochan_str *)ha->pscratch; ioc->hdr.version = 0xffffffff; ioc->hdr.list_entries = MAXBUS; ioc->hdr.first_chan = 0; ioc->hdr.last_chan = MAXBUS-1; ioc->hdr.list_offset = GDTOFFSOF(gdth_iochan_str, list[0]); if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, IOCHAN_DESC, INVALID_CHANNEL,sizeof(gdth_iochan_str))) { for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) { ha->raw[bus_no].address = ioc->list[bus_no].address; ha->raw[bus_no].local_no = ioc->list[bus_no].local_no; } } else { for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) { ha->raw[bus_no].address = IO_CHANNEL; ha->raw[bus_no].local_no = bus_no; } } for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) { chn = (gdth_getch_str *)ha->pscratch; chn->channel_no = ha->raw[bus_no].local_no; if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, SCSI_CHAN_CNT | L_CTRL_PATTERN, ha->raw[bus_no].address | INVALID_CHANNEL, sizeof(gdth_getch_str))) { ha->raw[bus_no].pdev_cnt = chn->drive_cnt; TRACE2(("Channel %d: %d phys. drives\n", bus_no,chn->drive_cnt)); } if (ha->raw[bus_no].pdev_cnt > 0) { drl = (gdth_drlist_str *)ha->pscratch; drl->sc_no = ha->raw[bus_no].local_no; drl->sc_cnt = ha->raw[bus_no].pdev_cnt; if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, SCSI_DR_LIST | L_CTRL_PATTERN, ha->raw[bus_no].address | INVALID_CHANNEL, sizeof(gdth_drlist_str))) { for (j = 0; j < ha->raw[bus_no].pdev_cnt; ++j) ha->raw[bus_no].id_list[j] = drl->sc_list[j]; } else { ha->raw[bus_no].pdev_cnt = 0; } } } /* logical drives */ if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_DRV_CNT, INVALID_CHANNEL,sizeof(u32))) { drv_cnt = *(u32 *)ha->pscratch; if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_DRV_LIST, INVALID_CHANNEL,drv_cnt * sizeof(u32))) { for (j = 0; j < drv_cnt; ++j) { drv_no = ((u32 *)ha->pscratch)[j]; if (drv_no < MAX_LDRIVES) { ha->hdr[drv_no].is_logdrv = TRUE; TRACE2(("Drive %d is log. drive\n",drv_no)); } } } alst = (gdth_arcdl_str *)ha->pscratch; alst->entries_avail = MAX_LDRIVES; alst->first_entry = 0; alst->list_offset = GDTOFFSOF(gdth_arcdl_str, list[0]); if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, ARRAY_DRV_LIST2 | LA_CTRL_PATTERN, INVALID_CHANNEL, sizeof(gdth_arcdl_str) + (alst->entries_avail-1) * sizeof(gdth_alist_str))) { for (j = 0; j < alst->entries_init; ++j) { ha->hdr[j].is_arraydrv = alst->list[j].is_arrayd; ha->hdr[j].is_master = alst->list[j].is_master; ha->hdr[j].is_parity = alst->list[j].is_parity; ha->hdr[j].is_hotfix = alst->list[j].is_hotfix; ha->hdr[j].master_no = alst->list[j].cd_handle; } } else if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, ARRAY_DRV_LIST | LA_CTRL_PATTERN, 0, 35 * sizeof(gdth_alist_str))) { for (j = 0; j < 35; ++j) { alst2 = &((gdth_alist_str *)ha->pscratch)[j]; ha->hdr[j].is_arraydrv = alst2->is_arrayd; ha->hdr[j].is_master = alst2->is_master; ha->hdr[j].is_parity = alst2->is_parity; ha->hdr[j].is_hotfix = alst2->is_hotfix; ha->hdr[j].master_no = alst2->cd_handle; } } } } /* initialize raw service */ ha->raw_feat = 0; if (!force_dma32) { ok = gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_X_INIT_RAW, 0, 0, 0); if (ok) ha->raw_feat = GDT_64BIT; } if (force_dma32 || (!ok && ha->status == (u16)S_NOFUNC)) ok = gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_INIT, 0, 0, 0); if (!ok) { printk("GDT-HA %d: Initialization error raw service (code %d)\n", ha->hanum, ha->status); return 0; } TRACE2(("gdth_search_drives(): RAWSERVICE initialized\n")); /* set/get features raw service (scatter/gather) */ if (gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_SET_FEAT, SCATTER_GATHER, 0, 0)) { TRACE2(("gdth_search_drives(): set features RAWSERVICE OK\n")); if (gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_GET_FEAT, 0, 0, 0)) { TRACE2(("gdth_search_dr(): get feat RAWSERVICE %d\n", ha->info)); ha->raw_feat |= (u16)ha->info; } } /* set/get features cache service (equal to raw service) */ if (gdth_internal_cmd(ha, CACHESERVICE, GDT_SET_FEAT, 0, SCATTER_GATHER,0)) { TRACE2(("gdth_search_drives(): set features CACHESERVICE OK\n")); if (gdth_internal_cmd(ha, CACHESERVICE, GDT_GET_FEAT, 0, 0, 0)) { TRACE2(("gdth_search_dr(): get feat CACHESERV. %d\n", ha->info)); ha->cache_feat |= (u16)ha->info; } } /* reserve drives for raw service */ if (reserve_mode != 0) { gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_RESERVE_ALL, reserve_mode == 1 ? 1 : 3, 0, 0); TRACE2(("gdth_search_drives(): RESERVE_ALL code %d\n", ha->status)); } for (i = 0; i < MAX_RES_ARGS; i += 4) { if (reserve_list[i] == ha->hanum && reserve_list[i+1] < ha->bus_cnt && reserve_list[i+2] < ha->tid_cnt && reserve_list[i+3] < MAXLUN) { TRACE2(("gdth_search_drives(): reserve ha %d bus %d id %d lun %d\n", reserve_list[i], reserve_list[i+1], reserve_list[i+2], reserve_list[i+3])); if (!gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_RESERVE, 0, reserve_list[i+1], reserve_list[i+2] | (reserve_list[i+3] << 8))) { printk("GDT-HA %d: Error raw service (RESERVE, code %d)\n", ha->hanum, ha->status); } } } /* Determine OEM string using IOCTL */ oemstr = (gdth_oem_str_ioctl *)ha->pscratch; oemstr->params.ctl_version = 0x01; oemstr->params.buffer_size = sizeof(oemstr->text); if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_READ_OEM_STRING_RECORD,INVALID_CHANNEL, sizeof(gdth_oem_str_ioctl))) { TRACE2(("gdth_search_drives(): CACHE_READ_OEM_STRING_RECORD OK\n")); printk("GDT-HA %d: Vendor: %s Name: %s\n", ha->hanum, oemstr->text.oem_company_name, ha->binfo.type_string); /* Save the Host Drive inquiry data */ strlcpy(ha->oem_name,oemstr->text.scsi_host_drive_inquiry_vendor_id, sizeof(ha->oem_name)); } else { /* Old method, based on PCI ID */ TRACE2(("gdth_search_drives(): CACHE_READ_OEM_STRING_RECORD failed\n")); printk("GDT-HA %d: Name: %s\n", ha->hanum, ha->binfo.type_string); if (ha->oem_id == OEM_ID_INTEL) strlcpy(ha->oem_name,"Intel ", sizeof(ha->oem_name)); else strlcpy(ha->oem_name,"ICP ", sizeof(ha->oem_name)); } /* scanning for host drives */ for (i = 0; i < cdev_cnt; ++i) gdth_analyse_hdrive(ha, i); TRACE(("gdth_search_drives() OK\n")); return 1; } static int gdth_analyse_hdrive(gdth_ha_str *ha, u16 hdrive) { u32 drv_cyls; int drv_hds, drv_secs; TRACE(("gdth_analyse_hdrive() hanum %d drive %d\n", ha->hanum, hdrive)); if (hdrive >= MAX_HDRIVES) return 0; if (!gdth_internal_cmd(ha, CACHESERVICE, GDT_INFO, hdrive, 0, 0)) return 0; ha->hdr[hdrive].present = TRUE; ha->hdr[hdrive].size = ha->info; /* evaluate mapping (sectors per head, heads per cylinder) */ ha->hdr[hdrive].size &= ~SECS32; if (ha->info2 == 0) { gdth_eval_mapping(ha->hdr[hdrive].size,&drv_cyls,&drv_hds,&drv_secs); } else { drv_hds = ha->info2 & 0xff; drv_secs = (ha->info2 >> 8) & 0xff; drv_cyls = (u32)ha->hdr[hdrive].size / drv_hds / drv_secs; } ha->hdr[hdrive].heads = (u8)drv_hds; ha->hdr[hdrive].secs = (u8)drv_secs; /* round size */ ha->hdr[hdrive].size = drv_cyls * drv_hds * drv_secs; if (ha->cache_feat & GDT_64BIT) { if (gdth_internal_cmd(ha, CACHESERVICE, GDT_X_INFO, hdrive, 0, 0) && ha->info2 != 0) { ha->hdr[hdrive].size = ((u64)ha->info2 << 32) | ha->info; } } TRACE2(("gdth_search_dr() cdr. %d size %d hds %d scs %d\n", hdrive,ha->hdr[hdrive].size,drv_hds,drv_secs)); /* get informations about device */ if (gdth_internal_cmd(ha, CACHESERVICE, GDT_DEVTYPE, hdrive, 0, 0)) { TRACE2(("gdth_search_dr() cache drive %d devtype %d\n", hdrive,ha->info)); ha->hdr[hdrive].devtype = (u16)ha->info; } /* cluster info */ if (gdth_internal_cmd(ha, CACHESERVICE, GDT_CLUST_INFO, hdrive, 0, 0)) { TRACE2(("gdth_search_dr() cache drive %d cluster info %d\n", hdrive,ha->info)); if (!shared_access) ha->hdr[hdrive].cluster_type = (u8)ha->info; } /* R/W attributes */ if (gdth_internal_cmd(ha, CACHESERVICE, GDT_RW_ATTRIBS, hdrive, 0, 0)) { TRACE2(("gdth_search_dr() cache drive %d r/w attrib. %d\n", hdrive,ha->info)); ha->hdr[hdrive].rw_attribs = (u8)ha->info; } return 1; } /* command queueing/sending functions */ static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 priority) { struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); register Scsi_Cmnd *pscp; register Scsi_Cmnd *nscp; unsigned long flags; TRACE(("gdth_putq() priority %d\n",priority)); spin_lock_irqsave(&ha->smp_lock, flags); if (!cmndinfo->internal_command) cmndinfo->priority = priority; if (ha->req_first==NULL) { ha->req_first = scp; /* queue was empty */ scp->SCp.ptr = NULL; } else { /* queue not empty */ pscp = ha->req_first; nscp = (Scsi_Cmnd *)pscp->SCp.ptr; /* priority: 0-highest,..,0xff-lowest */ while (nscp && gdth_cmnd_priv(nscp)->priority <= priority) { pscp = nscp; nscp = (Scsi_Cmnd *)pscp->SCp.ptr; } pscp->SCp.ptr = (char *)scp; scp->SCp.ptr = (char *)nscp; } spin_unlock_irqrestore(&ha->smp_lock, flags); #ifdef GDTH_STATISTICS flags = 0; for (nscp=ha->req_first; nscp; nscp=(Scsi_Cmnd*)nscp->SCp.ptr) ++flags; if (max_rq < flags) { max_rq = flags; TRACE3(("GDT: max_rq = %d\n",(u16)max_rq)); } #endif } static void gdth_next(gdth_ha_str *ha) { register Scsi_Cmnd *pscp; register Scsi_Cmnd *nscp; u8 b, t, l, firsttime; u8 this_cmd, next_cmd; unsigned long flags = 0; int cmd_index; TRACE(("gdth_next() hanum %d\n", ha->hanum)); if (!gdth_polling) spin_lock_irqsave(&ha->smp_lock, flags); ha->cmd_cnt = ha->cmd_offs_dpmem = 0; this_cmd = firsttime = TRUE; next_cmd = gdth_polling ? FALSE:TRUE; cmd_index = 0; for (nscp = pscp = ha->req_first; nscp; nscp = (Scsi_Cmnd *)nscp->SCp.ptr) { struct gdth_cmndinfo *nscp_cmndinfo = gdth_cmnd_priv(nscp); if (nscp != pscp && nscp != (Scsi_Cmnd *)pscp->SCp.ptr) pscp = (Scsi_Cmnd *)pscp->SCp.ptr; if (!nscp_cmndinfo->internal_command) { b = nscp->device->channel; t = nscp->device->id; l = nscp->device->lun; if (nscp_cmndinfo->priority >= DEFAULT_PRI) { if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha,b)].lock) || (b == ha->virt_bus && t < MAX_HDRIVES && ha->hdr[t].lock)) continue; } } else b = t = l = 0; if (firsttime) { if (gdth_test_busy(ha)) { /* controller busy ? */ TRACE(("gdth_next() controller %d busy !\n", ha->hanum)); if (!gdth_polling) { spin_unlock_irqrestore(&ha->smp_lock, flags); return; } while (gdth_test_busy(ha)) gdth_delay(1); } firsttime = FALSE; } if (!nscp_cmndinfo->internal_command) { if (nscp_cmndinfo->phase == -1) { nscp_cmndinfo->phase = CACHESERVICE; /* default: cache svc. */ if (nscp->cmnd[0] == TEST_UNIT_READY) { TRACE2(("TEST_UNIT_READY Bus %d Id %d LUN %d\n", b, t, l)); /* TEST_UNIT_READY -> set scan mode */ if ((ha->scan_mode & 0x0f) == 0) { if (b == 0 && t == 0 && l == 0) { ha->scan_mode |= 1; TRACE2(("Scan mode: 0x%x\n", ha->scan_mode)); } } else if ((ha->scan_mode & 0x0f) == 1) { if (b == 0 && ((t == 0 && l == 1) || (t == 1 && l == 0))) { nscp_cmndinfo->OpCode = GDT_SCAN_START; nscp_cmndinfo->phase = ((ha->scan_mode & 0x10 ? 1:0) << 8) | SCSIRAWSERVICE; ha->scan_mode = 0x12; TRACE2(("Scan mode: 0x%x (SCAN_START)\n", ha->scan_mode)); } else { ha->scan_mode &= 0x10; TRACE2(("Scan mode: 0x%x\n", ha->scan_mode)); } } else if (ha->scan_mode == 0x12) { if (b == ha->bus_cnt && t == ha->tid_cnt-1) { nscp_cmndinfo->phase = SCSIRAWSERVICE; nscp_cmndinfo->OpCode = GDT_SCAN_END; ha->scan_mode &= 0x10; TRACE2(("Scan mode: 0x%x (SCAN_END)\n", ha->scan_mode)); } } } if (b == ha->virt_bus && nscp->cmnd[0] != INQUIRY && nscp->cmnd[0] != READ_CAPACITY && nscp->cmnd[0] != MODE_SENSE && (ha->hdr[t].cluster_type & CLUSTER_DRIVE)) { /* always GDT_CLUST_INFO! */ nscp_cmndinfo->OpCode = GDT_CLUST_INFO; } } } if (nscp_cmndinfo->OpCode != -1) { if ((nscp_cmndinfo->phase & 0xff) == CACHESERVICE) { if (!(cmd_index=gdth_fill_cache_cmd(ha, nscp, t))) this_cmd = FALSE; next_cmd = FALSE; } else if ((nscp_cmndinfo->phase & 0xff) == SCSIRAWSERVICE) { if (!(cmd_index=gdth_fill_raw_cmd(ha, nscp, BUS_L2P(ha, b)))) this_cmd = FALSE; next_cmd = FALSE; } else { memset((char*)nscp->sense_buffer,0,16); nscp->sense_buffer[0] = 0x70; nscp->sense_buffer[2] = NOT_READY; nscp->result = (DID_OK << 16) | (CHECK_CONDITION << 1); if (!nscp_cmndinfo->wait_for_completion) nscp_cmndinfo->wait_for_completion++; else gdth_scsi_done(nscp); } } else if (gdth_cmnd_priv(nscp)->internal_command) { if (!(cmd_index=gdth_special_cmd(ha, nscp))) this_cmd = FALSE; next_cmd = FALSE; } else if (b != ha->virt_bus) { if (ha->raw[BUS_L2P(ha,b)].io_cnt[t] >= GDTH_MAX_RAW || !(cmd_index=gdth_fill_raw_cmd(ha, nscp, BUS_L2P(ha, b)))) this_cmd = FALSE; else ha->raw[BUS_L2P(ha,b)].io_cnt[t]++; } else if (t >= MAX_HDRIVES || !ha->hdr[t].present || l != 0) { TRACE2(("Command 0x%x to bus %d id %d lun %d -> IGNORE\n", nscp->cmnd[0], b, t, l)); nscp->result = DID_BAD_TARGET << 16; if (!nscp_cmndinfo->wait_for_completion) nscp_cmndinfo->wait_for_completion++; else gdth_scsi_done(nscp); } else { switch (nscp->cmnd[0]) { case TEST_UNIT_READY: case INQUIRY: case REQUEST_SENSE: case READ_CAPACITY: case VERIFY: case START_STOP: case MODE_SENSE: case SERVICE_ACTION_IN: TRACE(("cache cmd %x/%x/%x/%x/%x/%x\n",nscp->cmnd[0], nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3], nscp->cmnd[4],nscp->cmnd[5])); if (ha->hdr[t].media_changed && nscp->cmnd[0] != INQUIRY) { /* return UNIT_ATTENTION */ TRACE2(("cmd 0x%x target %d: UNIT_ATTENTION\n", nscp->cmnd[0], t)); ha->hdr[t].media_changed = FALSE; memset((char*)nscp->sense_buffer,0,16); nscp->sense_buffer[0] = 0x70; nscp->sense_buffer[2] = UNIT_ATTENTION; nscp->result = (DID_OK << 16) | (CHECK_CONDITION << 1); if (!nscp_cmndinfo->wait_for_completion) nscp_cmndinfo->wait_for_completion++; else gdth_scsi_done(nscp); } else if (gdth_internal_cache_cmd(ha, nscp)) gdth_scsi_done(nscp); break; case ALLOW_MEDIUM_REMOVAL: TRACE(("cache cmd %x/%x/%x/%x/%x/%x\n",nscp->cmnd[0], nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3], nscp->cmnd[4],nscp->cmnd[5])); if ( (nscp->cmnd[4]&1) && !(ha->hdr[t].devtype&1) ) { TRACE(("Prevent r. nonremov. drive->do nothing\n")); nscp->result = DID_OK << 16; nscp->sense_buffer[0] = 0; if (!nscp_cmndinfo->wait_for_completion) nscp_cmndinfo->wait_for_completion++; else gdth_scsi_done(nscp); } else { nscp->cmnd[3] = (ha->hdr[t].devtype&1) ? 1:0; TRACE(("Prevent/allow r. %d rem. drive %d\n", nscp->cmnd[4],nscp->cmnd[3])); if (!(cmd_index=gdth_fill_cache_cmd(ha, nscp, t))) this_cmd = FALSE; } break; case RESERVE: case RELEASE: TRACE2(("cache cmd %s\n",nscp->cmnd[0] == RESERVE ? "RESERVE" : "RELEASE")); if (!(cmd_index=gdth_fill_cache_cmd(ha, nscp, t))) this_cmd = FALSE; break; case READ_6: case WRITE_6: case READ_10: case WRITE_10: case READ_16: case WRITE_16: if (ha->hdr[t].media_changed) { /* return UNIT_ATTENTION */ TRACE2(("cmd 0x%x target %d: UNIT_ATTENTION\n", nscp->cmnd[0], t)); ha->hdr[t].media_changed = FALSE; memset((char*)nscp->sense_buffer,0,16); nscp->sense_buffer[0] = 0x70; nscp->sense_buffer[2] = UNIT_ATTENTION; nscp->result = (DID_OK << 16) | (CHECK_CONDITION << 1); if (!nscp_cmndinfo->wait_for_completion) nscp_cmndinfo->wait_for_completion++; else gdth_scsi_done(nscp); } else if (!(cmd_index=gdth_fill_cache_cmd(ha, nscp, t))) this_cmd = FALSE; break; default: TRACE2(("cache cmd %x/%x/%x/%x/%x/%x unknown\n",nscp->cmnd[0], nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3], nscp->cmnd[4],nscp->cmnd[5])); printk("GDT-HA %d: Unknown SCSI command 0x%x to cache service !\n", ha->hanum, nscp->cmnd[0]); nscp->result = DID_ABORT << 16; if (!nscp_cmndinfo->wait_for_completion) nscp_cmndinfo->wait_for_completion++; else gdth_scsi_done(nscp); break; } } if (!this_cmd) break; if (nscp == ha->req_first) ha->req_first = pscp = (Scsi_Cmnd *)nscp->SCp.ptr; else pscp->SCp.ptr = nscp->SCp.ptr; if (!next_cmd) break; } if (ha->cmd_cnt > 0) { gdth_release_event(ha); } if (!gdth_polling) spin_unlock_irqrestore(&ha->smp_lock, flags); if (gdth_polling && ha->cmd_cnt > 0) { if (!gdth_wait(ha, cmd_index, POLL_TIMEOUT)) printk("GDT-HA %d: Command %d timed out !\n", ha->hanum, cmd_index); } } /* * gdth_copy_internal_data() - copy to/from a buffer onto a scsi_cmnd's * buffers, kmap_atomic() as needed. */ static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp, char *buffer, u16 count) { u16 cpcount,i, max_sg = scsi_sg_count(scp); u16 cpsum,cpnow; struct scatterlist *sl; char *address; cpcount = min_t(u16, count, scsi_bufflen(scp)); if (cpcount) { cpsum=0; scsi_for_each_sg(scp, sl, max_sg, i) { unsigned long flags; cpnow = (u16)sl->length; TRACE(("copy_internal() now %d sum %d count %d %d\n", cpnow, cpsum, cpcount, scsi_bufflen(scp))); if (cpsum+cpnow > cpcount) cpnow = cpcount - cpsum; cpsum += cpnow; if (!sg_page(sl)) { printk("GDT-HA %d: invalid sc/gt element in gdth_copy_internal_data()\n", ha->hanum); return; } local_irq_save(flags); address = kmap_atomic(sg_page(sl), KM_BIO_SRC_IRQ) + sl->offset; memcpy(address, buffer, cpnow); flush_dcache_page(sg_page(sl)); kunmap_atomic(address, KM_BIO_SRC_IRQ); local_irq_restore(flags); if (cpsum == cpcount) break; buffer += cpnow; } } else if (count) { printk("GDT-HA %d: SCSI command with no buffers but data transfer expected!\n", ha->hanum); WARN_ON(1); } } static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) { u8 t; gdth_inq_data inq; gdth_rdcap_data rdc; gdth_sense_data sd; gdth_modep_data mpd; struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); t = scp->device->id; TRACE(("gdth_internal_cache_cmd() cmd 0x%x hdrive %d\n", scp->cmnd[0],t)); scp->result = DID_OK << 16; scp->sense_buffer[0] = 0; switch (scp->cmnd[0]) { case TEST_UNIT_READY: case VERIFY: case START_STOP: TRACE2(("Test/Verify/Start hdrive %d\n",t)); break; case INQUIRY: TRACE2(("Inquiry hdrive %d devtype %d\n", t,ha->hdr[t].devtype)); inq.type_qual = (ha->hdr[t].devtype&4) ? TYPE_ROM:TYPE_DISK; /* you can here set all disks to removable, if you want to do a flush using the ALLOW_MEDIUM_REMOVAL command */ inq.modif_rmb = 0x00; if ((ha->hdr[t].devtype & 1) || (ha->hdr[t].cluster_type & CLUSTER_DRIVE)) inq.modif_rmb = 0x80; inq.version = 2; inq.resp_aenc = 2; inq.add_length= 32; strcpy(inq.vendor,ha->oem_name); sprintf(inq.product,"Host Drive #%02d",t); strcpy(inq.revision," "); gdth_copy_internal_data(ha, scp, (char*)&inq, sizeof(gdth_inq_data)); break; case REQUEST_SENSE: TRACE2(("Request sense hdrive %d\n",t)); sd.errorcode = 0x70; sd.segno = 0x00; sd.key = NO_SENSE; sd.info = 0; sd.add_length= 0; gdth_copy_internal_data(ha, scp, (char*)&sd, sizeof(gdth_sense_data)); break; case MODE_SENSE: TRACE2(("Mode sense hdrive %d\n",t)); memset((char*)&mpd,0,sizeof(gdth_modep_data)); mpd.hd.data_length = sizeof(gdth_modep_data); mpd.hd.dev_par = (ha->hdr[t].devtype&2) ? 0x80:0; mpd.hd.bd_length = sizeof(mpd.bd); mpd.bd.block_length[0] = (SECTOR_SIZE & 0x00ff0000) >> 16; mpd.bd.block_length[1] = (SECTOR_SIZE & 0x0000ff00) >> 8; mpd.bd.block_length[2] = (SECTOR_SIZE & 0x000000ff); gdth_copy_internal_data(ha, scp, (char*)&mpd, sizeof(gdth_modep_data)); break; case READ_CAPACITY: TRACE2(("Read capacity hdrive %d\n",t)); if (ha->hdr[t].size > (u64)0xffffffff) rdc.last_block_no = 0xffffffff; else rdc.last_block_no = cpu_to_be32(ha->hdr[t].size-1); rdc.block_length = cpu_to_be32(SECTOR_SIZE); gdth_copy_internal_data(ha, scp, (char*)&rdc, sizeof(gdth_rdcap_data)); break; case SERVICE_ACTION_IN: if ((scp->cmnd[1] & 0x1f) == SAI_READ_CAPACITY_16 && (ha->cache_feat & GDT_64BIT)) { gdth_rdcap16_data rdc16; TRACE2(("Read capacity (16) hdrive %d\n",t)); rdc16.last_block_no = cpu_to_be64(ha->hdr[t].size-1); rdc16.block_length = cpu_to_be32(SECTOR_SIZE); gdth_copy_internal_data(ha, scp, (char*)&rdc16, sizeof(gdth_rdcap16_data)); } else { scp->result = DID_ABORT << 16; } break; default: TRACE2(("Internal cache cmd 0x%x unknown\n",scp->cmnd[0])); break; } if (!cmndinfo->wait_for_completion) cmndinfo->wait_for_completion++; else return 1; return 0; } static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u16 hdrive) { register gdth_cmd_str *cmdp; struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); u32 cnt, blockcnt; u64 no, blockno; int i, cmd_index, read_write, sgcnt, mode64; cmdp = ha->pccb; TRACE(("gdth_fill_cache_cmd() cmd 0x%x cmdsize %d hdrive %d\n", scp->cmnd[0],scp->cmd_len,hdrive)); if (ha->type==GDT_EISA && ha->cmd_cnt>0) return 0; mode64 = (ha->cache_feat & GDT_64BIT) ? TRUE : FALSE; /* test for READ_16, WRITE_16 if !mode64 ? --- not required, should not occur due to error return on READ_CAPACITY_16 */ cmdp->Service = CACHESERVICE; cmdp->RequestBuffer = scp; /* search free command index */ if (!(cmd_index=gdth_get_cmd_index(ha))) { TRACE(("GDT: No free command index found\n")); return 0; } /* if it's the first command, set command semaphore */ if (ha->cmd_cnt == 0) gdth_set_sema0(ha); /* fill command */ read_write = 0; if (cmndinfo->OpCode != -1) cmdp->OpCode = cmndinfo->OpCode; /* special cache cmd. */ else if (scp->cmnd[0] == RESERVE) cmdp->OpCode = GDT_RESERVE_DRV; else if (scp->cmnd[0] == RELEASE) cmdp->OpCode = GDT_RELEASE_DRV; else if (scp->cmnd[0] == ALLOW_MEDIUM_REMOVAL) { if (scp->cmnd[4] & 1) /* prevent ? */ cmdp->OpCode = GDT_MOUNT; else if (scp->cmnd[3] & 1) /* removable drive ? */ cmdp->OpCode = GDT_UNMOUNT; else cmdp->OpCode = GDT_FLUSH; } else if (scp->cmnd[0] == WRITE_6 || scp->cmnd[0] == WRITE_10 || scp->cmnd[0] == WRITE_12 || scp->cmnd[0] == WRITE_16 ) { read_write = 1; if (gdth_write_through || ((ha->hdr[hdrive].rw_attribs & 1) && (ha->cache_feat & GDT_WR_THROUGH))) cmdp->OpCode = GDT_WRITE_THR; else cmdp->OpCode = GDT_WRITE; } else { read_write = 2; cmdp->OpCode = GDT_READ; } cmdp->BoardNode = LOCALBOARD; if (mode64) { cmdp->u.cache64.DeviceNo = hdrive; cmdp->u.cache64.BlockNo = 1; cmdp->u.cache64.sg_canz = 0; } else { cmdp->u.cache.DeviceNo = hdrive; cmdp->u.cache.BlockNo = 1; cmdp->u.cache.sg_canz = 0; } if (read_write) { if (scp->cmd_len == 16) { memcpy(&no, &scp->cmnd[2], sizeof(u64)); blockno = be64_to_cpu(no); memcpy(&cnt, &scp->cmnd[10], sizeof(u32)); blockcnt = be32_to_cpu(cnt); } else if (scp->cmd_len == 10) { memcpy(&no, &scp->cmnd[2], sizeof(u32)); blockno = be32_to_cpu(no); memcpy(&cnt, &scp->cmnd[7], sizeof(u16)); blockcnt = be16_to_cpu(cnt); } else { memcpy(&no, &scp->cmnd[0], sizeof(u32)); blockno = be32_to_cpu(no) & 0x001fffffUL; blockcnt= scp->cmnd[4]==0 ? 0x100 : scp->cmnd[4]; } if (mode64) { cmdp->u.cache64.BlockNo = blockno; cmdp->u.cache64.BlockCnt = blockcnt; } else { cmdp->u.cache.BlockNo = (u32)blockno; cmdp->u.cache.BlockCnt = blockcnt; } if (scsi_bufflen(scp)) { cmndinfo->dma_dir = (read_write == 1 ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); sgcnt = pci_map_sg(ha->pdev, scsi_sglist(scp), scsi_sg_count(scp), cmndinfo->dma_dir); if (mode64) { struct scatterlist *sl; cmdp->u.cache64.DestAddr= (u64)-1; cmdp->u.cache64.sg_canz = sgcnt; scsi_for_each_sg(scp, sl, sgcnt, i) { cmdp->u.cache64.sg_lst[i].sg_ptr = sg_dma_address(sl); #ifdef GDTH_DMA_STATISTICS if (cmdp->u.cache64.sg_lst[i].sg_ptr > (u64)0xffffffff) ha->dma64_cnt++; else ha->dma32_cnt++; #endif cmdp->u.cache64.sg_lst[i].sg_len = sg_dma_len(sl); } } else { struct scatterlist *sl; cmdp->u.cache.DestAddr= 0xffffffff; cmdp->u.cache.sg_canz = sgcnt; scsi_for_each_sg(scp, sl, sgcnt, i) { cmdp->u.cache.sg_lst[i].sg_ptr = sg_dma_address(sl); #ifdef GDTH_DMA_STATISTICS ha->dma32_cnt++; #endif cmdp->u.cache.sg_lst[i].sg_len = sg_dma_len(sl); } } #ifdef GDTH_STATISTICS if (max_sg < (u32)sgcnt) { max_sg = (u32)sgcnt; TRACE3(("GDT: max_sg = %d\n",max_sg)); } #endif } } /* evaluate command size, check space */ if (mode64) { TRACE(("cache cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n", cmdp->u.cache64.DestAddr,cmdp->u.cache64.sg_canz, cmdp->u.cache64.sg_lst[0].sg_ptr, cmdp->u.cache64.sg_lst[0].sg_len)); TRACE(("cache cmd: cmd %d blockno. %d, blockcnt %d\n", cmdp->OpCode,cmdp->u.cache64.BlockNo,cmdp->u.cache64.BlockCnt)); ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache64.sg_lst) + (u16)cmdp->u.cache64.sg_canz * sizeof(gdth_sg64_str); } else { TRACE(("cache cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n", cmdp->u.cache.DestAddr,cmdp->u.cache.sg_canz, cmdp->u.cache.sg_lst[0].sg_ptr, cmdp->u.cache.sg_lst[0].sg_len)); TRACE(("cache cmd: cmd %d blockno. %d, blockcnt %d\n", cmdp->OpCode,cmdp->u.cache.BlockNo,cmdp->u.cache.BlockCnt)); ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache.sg_lst) + (u16)cmdp->u.cache.sg_canz * sizeof(gdth_sg_str); } if (ha->cmd_len & 3) ha->cmd_len += (4 - (ha->cmd_len & 3)); if (ha->cmd_cnt > 0) { if ((ha->cmd_offs_dpmem + ha->cmd_len + DPMEM_COMMAND_OFFSET) > ha->ic_all_size) { TRACE2(("gdth_fill_cache() DPMEM overflow\n")); ha->cmd_tab[cmd_index-2].cmnd = UNUSED_CMND; return 0; } } /* copy command */ gdth_copy_command(ha); return cmd_index; } static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 b) { register gdth_cmd_str *cmdp; u16 i; dma_addr_t sense_paddr; int cmd_index, sgcnt, mode64; u8 t,l; struct page *page; unsigned long offset; struct gdth_cmndinfo *cmndinfo; t = scp->device->id; l = scp->device->lun; cmdp = ha->pccb; TRACE(("gdth_fill_raw_cmd() cmd 0x%x bus %d ID %d LUN %d\n", scp->cmnd[0],b,t,l)); if (ha->type==GDT_EISA && ha->cmd_cnt>0) return 0; mode64 = (ha->raw_feat & GDT_64BIT) ? TRUE : FALSE; cmdp->Service = SCSIRAWSERVICE; cmdp->RequestBuffer = scp; /* search free command index */ if (!(cmd_index=gdth_get_cmd_index(ha))) { TRACE(("GDT: No free command index found\n")); return 0; } /* if it's the first command, set command semaphore */ if (ha->cmd_cnt == 0) gdth_set_sema0(ha); cmndinfo = gdth_cmnd_priv(scp); /* fill command */ if (cmndinfo->OpCode != -1) { cmdp->OpCode = cmndinfo->OpCode; /* special raw cmd. */ cmdp->BoardNode = LOCALBOARD; if (mode64) { cmdp->u.raw64.direction = (cmndinfo->phase >> 8); TRACE2(("special raw cmd 0x%x param 0x%x\n", cmdp->OpCode, cmdp->u.raw64.direction)); /* evaluate command size */ ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst); } else { cmdp->u.raw.direction = (cmndinfo->phase >> 8); TRACE2(("special raw cmd 0x%x param 0x%x\n", cmdp->OpCode, cmdp->u.raw.direction)); /* evaluate command size */ ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst); } } else { page = virt_to_page(scp->sense_buffer); offset = (unsigned long)scp->sense_buffer & ~PAGE_MASK; sense_paddr = pci_map_page(ha->pdev,page,offset, 16,PCI_DMA_FROMDEVICE); cmndinfo->sense_paddr = sense_paddr; cmdp->OpCode = GDT_WRITE; /* always */ cmdp->BoardNode = LOCALBOARD; if (mode64) { cmdp->u.raw64.reserved = 0; cmdp->u.raw64.mdisc_time = 0; cmdp->u.raw64.mcon_time = 0; cmdp->u.raw64.clen = scp->cmd_len; cmdp->u.raw64.target = t; cmdp->u.raw64.lun = l; cmdp->u.raw64.bus = b; cmdp->u.raw64.priority = 0; cmdp->u.raw64.sdlen = scsi_bufflen(scp); cmdp->u.raw64.sense_len = 16; cmdp->u.raw64.sense_data = sense_paddr; cmdp->u.raw64.direction = gdth_direction_tab[scp->cmnd[0]]==DOU ? GDTH_DATA_OUT:GDTH_DATA_IN; memcpy(cmdp->u.raw64.cmd,scp->cmnd,16); cmdp->u.raw64.sg_ranz = 0; } else { cmdp->u.raw.reserved = 0; cmdp->u.raw.mdisc_time = 0; cmdp->u.raw.mcon_time = 0; cmdp->u.raw.clen = scp->cmd_len; cmdp->u.raw.target = t; cmdp->u.raw.lun = l; cmdp->u.raw.bus = b; cmdp->u.raw.priority = 0; cmdp->u.raw.link_p = 0; cmdp->u.raw.sdlen = scsi_bufflen(scp); cmdp->u.raw.sense_len = 16; cmdp->u.raw.sense_data = sense_paddr; cmdp->u.raw.direction = gdth_direction_tab[scp->cmnd[0]]==DOU ? GDTH_DATA_OUT:GDTH_DATA_IN; memcpy(cmdp->u.raw.cmd,scp->cmnd,12); cmdp->u.raw.sg_ranz = 0; } if (scsi_bufflen(scp)) { cmndinfo->dma_dir = PCI_DMA_BIDIRECTIONAL; sgcnt = pci_map_sg(ha->pdev, scsi_sglist(scp), scsi_sg_count(scp), cmndinfo->dma_dir); if (mode64) { struct scatterlist *sl; cmdp->u.raw64.sdata = (u64)-1; cmdp->u.raw64.sg_ranz = sgcnt; scsi_for_each_sg(scp, sl, sgcnt, i) { cmdp->u.raw64.sg_lst[i].sg_ptr = sg_dma_address(sl); #ifdef GDTH_DMA_STATISTICS if (cmdp->u.raw64.sg_lst[i].sg_ptr > (u64)0xffffffff) ha->dma64_cnt++; else ha->dma32_cnt++; #endif cmdp->u.raw64.sg_lst[i].sg_len = sg_dma_len(sl); } } else { struct scatterlist *sl; cmdp->u.raw.sdata = 0xffffffff; cmdp->u.raw.sg_ranz = sgcnt; scsi_for_each_sg(scp, sl, sgcnt, i) { cmdp->u.raw.sg_lst[i].sg_ptr = sg_dma_address(sl); #ifdef GDTH_DMA_STATISTICS ha->dma32_cnt++; #endif cmdp->u.raw.sg_lst[i].sg_len = sg_dma_len(sl); } } #ifdef GDTH_STATISTICS if (max_sg < sgcnt) { max_sg = sgcnt; TRACE3(("GDT: max_sg = %d\n",sgcnt)); } #endif } if (mode64) { TRACE(("raw cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n", cmdp->u.raw64.sdata,cmdp->u.raw64.sg_ranz, cmdp->u.raw64.sg_lst[0].sg_ptr, cmdp->u.raw64.sg_lst[0].sg_len)); /* evaluate command size */ ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst) + (u16)cmdp->u.raw64.sg_ranz * sizeof(gdth_sg64_str); } else { TRACE(("raw cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n", cmdp->u.raw.sdata,cmdp->u.raw.sg_ranz, cmdp->u.raw.sg_lst[0].sg_ptr, cmdp->u.raw.sg_lst[0].sg_len)); /* evaluate command size */ ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst) + (u16)cmdp->u.raw.sg_ranz * sizeof(gdth_sg_str); } } /* check space */ if (ha->cmd_len & 3) ha->cmd_len += (4 - (ha->cmd_len & 3)); if (ha->cmd_cnt > 0) { if ((ha->cmd_offs_dpmem + ha->cmd_len + DPMEM_COMMAND_OFFSET) > ha->ic_all_size) { TRACE2(("gdth_fill_raw() DPMEM overflow\n")); ha->cmd_tab[cmd_index-2].cmnd = UNUSED_CMND; return 0; } } /* copy command */ gdth_copy_command(ha); return cmd_index; } static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) { register gdth_cmd_str *cmdp; struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); int cmd_index; cmdp= ha->pccb; TRACE2(("gdth_special_cmd(): ")); if (ha->type==GDT_EISA && ha->cmd_cnt>0) return 0; *cmdp = *cmndinfo->internal_cmd_str; cmdp->RequestBuffer = scp; /* search free command index */ if (!(cmd_index=gdth_get_cmd_index(ha))) { TRACE(("GDT: No free command index found\n")); return 0; } /* if it's the first command, set command semaphore */ if (ha->cmd_cnt == 0) gdth_set_sema0(ha); /* evaluate command size, check space */ if (cmdp->OpCode == GDT_IOCTL) { TRACE2(("IOCTL\n")); ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.ioctl.p_param) + sizeof(u64); } else if (cmdp->Service == CACHESERVICE) { TRACE2(("cache command %d\n",cmdp->OpCode)); if (ha->cache_feat & GDT_64BIT) ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache64.sg_lst) + sizeof(gdth_sg64_str); else ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache.sg_lst) + sizeof(gdth_sg_str); } else if (cmdp->Service == SCSIRAWSERVICE) { TRACE2(("raw command %d\n",cmdp->OpCode)); if (ha->raw_feat & GDT_64BIT) ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst) + sizeof(gdth_sg64_str); else ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst) + sizeof(gdth_sg_str); } if (ha->cmd_len & 3) ha->cmd_len += (4 - (ha->cmd_len & 3)); if (ha->cmd_cnt > 0) { if ((ha->cmd_offs_dpmem + ha->cmd_len + DPMEM_COMMAND_OFFSET) > ha->ic_all_size) { TRACE2(("gdth_special_cmd() DPMEM overflow\n")); ha->cmd_tab[cmd_index-2].cmnd = UNUSED_CMND; return 0; } } /* copy command */ gdth_copy_command(ha); return cmd_index; } /* Controller event handling functions */ static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source, u16 idx, gdth_evt_data *evt) { gdth_evt_str *e; struct timeval tv; /* no GDTH_LOCK_HA() ! */ TRACE2(("gdth_store_event() source %d idx %d\n", source, idx)); if (source == 0) /* no source -> no event */ return NULL; if (ebuffer[elastidx].event_source == source && ebuffer[elastidx].event_idx == idx && ((evt->size != 0 && ebuffer[elastidx].event_data.size != 0 && !memcmp((char *)&ebuffer[elastidx].event_data.eu, (char *)&evt->eu, evt->size)) || (evt->size == 0 && ebuffer[elastidx].event_data.size == 0 && !strcmp((char *)&ebuffer[elastidx].event_data.event_string, (char *)&evt->event_string)))) { e = &ebuffer[elastidx]; do_gettimeofday(&tv); e->last_stamp = tv.tv_sec; ++e->same_count; } else { if (ebuffer[elastidx].event_source != 0) { /* entry not free ? */ ++elastidx; if (elastidx == MAX_EVENTS) elastidx = 0; if (elastidx == eoldidx) { /* reached mark ? */ ++eoldidx; if (eoldidx == MAX_EVENTS) eoldidx = 0; } } e = &ebuffer[elastidx]; e->event_source = source; e->event_idx = idx; do_gettimeofday(&tv); e->first_stamp = e->last_stamp = tv.tv_sec; e->same_count = 1; e->event_data = *evt; e->application = 0; } return e; } static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr) { gdth_evt_str *e; int eindex; unsigned long flags; TRACE2(("gdth_read_event() handle %d\n", handle)); spin_lock_irqsave(&ha->smp_lock, flags); if (handle == -1) eindex = eoldidx; else eindex = handle; estr->event_source = 0; if (eindex < 0 || eindex >= MAX_EVENTS) { spin_unlock_irqrestore(&ha->smp_lock, flags); return eindex; } e = &ebuffer[eindex]; if (e->event_source != 0) { if (eindex != elastidx) { if (++eindex == MAX_EVENTS) eindex = 0; } else { eindex = -1; } memcpy(estr, e, sizeof(gdth_evt_str)); } spin_unlock_irqrestore(&ha->smp_lock, flags); return eindex; } static void gdth_readapp_event(gdth_ha_str *ha, u8 application, gdth_evt_str *estr) { gdth_evt_str *e; int eindex; unsigned long flags; u8 found = FALSE; TRACE2(("gdth_readapp_event() app. %d\n", application)); spin_lock_irqsave(&ha->smp_lock, flags); eindex = eoldidx; for (;;) { e = &ebuffer[eindex]; if (e->event_source == 0) break; if ((e->application & application) == 0) { e->application |= application; found = TRUE; break; } if (eindex == elastidx) break; if (++eindex == MAX_EVENTS) eindex = 0; } if (found) memcpy(estr, e, sizeof(gdth_evt_str)); else estr->event_source = 0; spin_unlock_irqrestore(&ha->smp_lock, flags); } static void gdth_clear_events(void) { TRACE(("gdth_clear_events()")); eoldidx = elastidx = 0; ebuffer[0].event_source = 0; } /* SCSI interface functions */ static irqreturn_t __gdth_interrupt(gdth_ha_str *ha, int gdth_from_wait, int* pIndex) { gdt6m_dpram_str __iomem *dp6m_ptr = NULL; gdt6_dpram_str __iomem *dp6_ptr; gdt2_dpram_str __iomem *dp2_ptr; Scsi_Cmnd *scp; int rval, i; u8 IStatus; u16 Service; unsigned long flags = 0; #ifdef INT_COAL int coalesced = FALSE; int next = FALSE; gdth_coal_status *pcs = NULL; int act_int_coal = 0; #endif TRACE(("gdth_interrupt() IRQ %d\n", ha->irq)); /* if polling and not from gdth_wait() -> return */ if (gdth_polling) { if (!gdth_from_wait) { return IRQ_HANDLED; } } if (!gdth_polling) spin_lock_irqsave(&ha->smp_lock, flags); /* search controller */ IStatus = gdth_get_status(ha); if (IStatus == 0) { /* spurious interrupt */ if (!gdth_polling) spin_unlock_irqrestore(&ha->smp_lock, flags); return IRQ_HANDLED; } #ifdef GDTH_STATISTICS ++act_ints; #endif #ifdef INT_COAL /* See if the fw is returning coalesced status */ if (IStatus == COALINDEX) { /* Coalesced status. Setup the initial status buffer pointer and flags */ pcs = ha->coal_stat; coalesced = TRUE; next = TRUE; } do { if (coalesced) { /* For coalesced requests all status information is found in the status buffer */ IStatus = (u8)(pcs->status & 0xff); } #endif if (ha->type == GDT_EISA) { if (IStatus & 0x80) { /* error flag */ IStatus &= ~0x80; ha->status = inw(ha->bmic + MAILBOXREG+8); TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status)); } else /* no error */ ha->status = S_OK; ha->info = inl(ha->bmic + MAILBOXREG+12); ha->service = inw(ha->bmic + MAILBOXREG+10); ha->info2 = inl(ha->bmic + MAILBOXREG+4); outb(0xff, ha->bmic + EDOORREG); /* acknowledge interrupt */ outb(0x00, ha->bmic + SEMA1REG); /* reset status semaphore */ } else if (ha->type == GDT_ISA) { dp2_ptr = ha->brd; if (IStatus & 0x80) { /* error flag */ IStatus &= ~0x80; ha->status = readw(&dp2_ptr->u.ic.Status); TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status)); } else /* no error */ ha->status = S_OK; ha->info = readl(&dp2_ptr->u.ic.Info[0]); ha->service = readw(&dp2_ptr->u.ic.Service); ha->info2 = readl(&dp2_ptr->u.ic.Info[1]); writeb(0xff, &dp2_ptr->io.irqdel); /* acknowledge interrupt */ writeb(0, &dp2_ptr->u.ic.Cmd_Index);/* reset command index */ writeb(0, &dp2_ptr->io.Sema1); /* reset status semaphore */ } else if (ha->type == GDT_PCI) { dp6_ptr = ha->brd; if (IStatus & 0x80) { /* error flag */ IStatus &= ~0x80; ha->status = readw(&dp6_ptr->u.ic.Status); TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status)); } else /* no error */ ha->status = S_OK; ha->info = readl(&dp6_ptr->u.ic.Info[0]); ha->service = readw(&dp6_ptr->u.ic.Service); ha->info2 = readl(&dp6_ptr->u.ic.Info[1]); writeb(0xff, &dp6_ptr->io.irqdel); /* acknowledge interrupt */ writeb(0, &dp6_ptr->u.ic.Cmd_Index);/* reset command index */ writeb(0, &dp6_ptr->io.Sema1); /* reset status semaphore */ } else if (ha->type == GDT_PCINEW) { if (IStatus & 0x80) { /* error flag */ IStatus &= ~0x80; ha->status = inw(PTR2USHORT(&ha->plx->status)); TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status)); } else ha->status = S_OK; ha->info = inl(PTR2USHORT(&ha->plx->info[0])); ha->service = inw(PTR2USHORT(&ha->plx->service)); ha->info2 = inl(PTR2USHORT(&ha->plx->info[1])); outb(0xff, PTR2USHORT(&ha->plx->edoor_reg)); outb(0x00, PTR2USHORT(&ha->plx->sema1_reg)); } else if (ha->type == GDT_PCIMPR) { dp6m_ptr = ha->brd; if (IStatus & 0x80) { /* error flag */ IStatus &= ~0x80; #ifdef INT_COAL if (coalesced) ha->status = pcs->ext_status & 0xffff; else #endif ha->status = readw(&dp6m_ptr->i960r.status); TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status)); } else /* no error */ ha->status = S_OK; #ifdef INT_COAL /* get information */ if (coalesced) { ha->info = pcs->info0; ha->info2 = pcs->info1; ha->service = (pcs->ext_status >> 16) & 0xffff; } else #endif { ha->info = readl(&dp6m_ptr->i960r.info[0]); ha->service = readw(&dp6m_ptr->i960r.service); ha->info2 = readl(&dp6m_ptr->i960r.info[1]); } /* event string */ if (IStatus == ASYNCINDEX) { if (ha->service != SCREENSERVICE && (ha->fw_vers & 0xff) >= 0x1a) { ha->dvr.severity = readb (&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.severity); for (i = 0; i < 256; ++i) { ha->dvr.event_string[i] = readb (&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.evt_str[i]); if (ha->dvr.event_string[i] == 0) break; } } } #ifdef INT_COAL /* Make sure that non coalesced interrupts get cleared before being handled by gdth_async_event/gdth_sync_event */ if (!coalesced) #endif { writeb(0xff, &dp6m_ptr->i960r.edoor_reg); writeb(0, &dp6m_ptr->i960r.sema1_reg); } } else { TRACE2(("gdth_interrupt() unknown controller type\n")); if (!gdth_polling) spin_unlock_irqrestore(&ha->smp_lock, flags); return IRQ_HANDLED; } TRACE(("gdth_interrupt() index %d stat %d info %d\n", IStatus,ha->status,ha->info)); if (gdth_from_wait) { *pIndex = (int)IStatus; } if (IStatus == ASYNCINDEX) { TRACE2(("gdth_interrupt() async. event\n")); gdth_async_event(ha); if (!gdth_polling) spin_unlock_irqrestore(&ha->smp_lock, flags); gdth_next(ha); return IRQ_HANDLED; } if (IStatus == SPEZINDEX) { TRACE2(("Service unknown or not initialized !\n")); ha->dvr.size = sizeof(ha->dvr.eu.driver); ha->dvr.eu.driver.ionode = ha->hanum; gdth_store_event(ha, ES_DRIVER, 4, &ha->dvr); if (!gdth_polling) spin_unlock_irqrestore(&ha->smp_lock, flags); return IRQ_HANDLED; } scp = ha->cmd_tab[IStatus-2].cmnd; Service = ha->cmd_tab[IStatus-2].service; ha->cmd_tab[IStatus-2].cmnd = UNUSED_CMND; if (scp == UNUSED_CMND) { TRACE2(("gdth_interrupt() index to unused command (%d)\n",IStatus)); ha->dvr.size = sizeof(ha->dvr.eu.driver); ha->dvr.eu.driver.ionode = ha->hanum; ha->dvr.eu.driver.index = IStatus; gdth_store_event(ha, ES_DRIVER, 1, &ha->dvr); if (!gdth_polling) spin_unlock_irqrestore(&ha->smp_lock, flags); return IRQ_HANDLED; } if (scp == INTERNAL_CMND) { TRACE(("gdth_interrupt() answer to internal command\n")); if (!gdth_polling) spin_unlock_irqrestore(&ha->smp_lock, flags); return IRQ_HANDLED; } TRACE(("gdth_interrupt() sync. status\n")); rval = gdth_sync_event(ha,Service,IStatus,scp); if (!gdth_polling) spin_unlock_irqrestore(&ha->smp_lock, flags); if (rval == 2) { gdth_putq(ha, scp, gdth_cmnd_priv(scp)->priority); } else if (rval == 1) { gdth_scsi_done(scp); } #ifdef INT_COAL if (coalesced) { /* go to the next status in the status buffer */ ++pcs; #ifdef GDTH_STATISTICS ++act_int_coal; if (act_int_coal > max_int_coal) { max_int_coal = act_int_coal; printk("GDT: max_int_coal = %d\n",(u16)max_int_coal); } #endif /* see if there is another status */ if (pcs->status == 0) /* Stop the coalesce loop */ next = FALSE; } } while (next); /* coalescing only for new GDT_PCIMPR controllers available */ if (ha->type == GDT_PCIMPR && coalesced) { writeb(0xff, &dp6m_ptr->i960r.edoor_reg); writeb(0, &dp6m_ptr->i960r.sema1_reg); } #endif gdth_next(ha); return IRQ_HANDLED; } static irqreturn_t gdth_interrupt(int irq, void *dev_id) { gdth_ha_str *ha = dev_id; return __gdth_interrupt(ha, false, NULL); } static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index, Scsi_Cmnd *scp) { gdth_msg_str *msg; gdth_cmd_str *cmdp; u8 b, t; struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); cmdp = ha->pccb; TRACE(("gdth_sync_event() serv %d status %d\n", service,ha->status)); if (service == SCREENSERVICE) { msg = ha->pmsg; TRACE(("len: %d, answer: %d, ext: %d, alen: %d\n", msg->msg_len,msg->msg_answer,msg->msg_ext,msg->msg_alen)); if (msg->msg_len > MSGLEN+1) msg->msg_len = MSGLEN+1; if (msg->msg_len) if (!(msg->msg_answer && msg->msg_ext)) { msg->msg_text[msg->msg_len] = '\0'; printk("%s",msg->msg_text); } if (msg->msg_ext && !msg->msg_answer) { while (gdth_test_busy(ha)) gdth_delay(0); cmdp->Service = SCREENSERVICE; cmdp->RequestBuffer = SCREEN_CMND; gdth_get_cmd_index(ha); gdth_set_sema0(ha); cmdp->OpCode = GDT_READ; cmdp->BoardNode = LOCALBOARD; cmdp->u.screen.reserved = 0; cmdp->u.screen.su.msg.msg_handle= msg->msg_handle; cmdp->u.screen.su.msg.msg_addr = ha->msg_phys; ha->cmd_offs_dpmem = 0; ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr) + sizeof(u64); ha->cmd_cnt = 0; gdth_copy_command(ha); gdth_release_event(ha); return 0; } if (msg->msg_answer && msg->msg_alen) { /* default answers (getchar() not possible) */ if (msg->msg_alen == 1) { msg->msg_alen = 0; msg->msg_len = 1; msg->msg_text[0] = 0; } else { msg->msg_alen -= 2; msg->msg_len = 2; msg->msg_text[0] = 1; msg->msg_text[1] = 0; } msg->msg_ext = 0; msg->msg_answer = 0; while (gdth_test_busy(ha)) gdth_delay(0); cmdp->Service = SCREENSERVICE; cmdp->RequestBuffer = SCREEN_CMND; gdth_get_cmd_index(ha); gdth_set_sema0(ha); cmdp->OpCode = GDT_WRITE; cmdp->BoardNode = LOCALBOARD; cmdp->u.screen.reserved = 0; cmdp->u.screen.su.msg.msg_handle= msg->msg_handle; cmdp->u.screen.su.msg.msg_addr = ha->msg_phys; ha->cmd_offs_dpmem = 0; ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr) + sizeof(u64); ha->cmd_cnt = 0; gdth_copy_command(ha); gdth_release_event(ha); return 0; } printk("\n"); } else { b = scp->device->channel; t = scp->device->id; if (cmndinfo->OpCode == -1 && b != ha->virt_bus) { ha->raw[BUS_L2P(ha,b)].io_cnt[t]--; } /* cache or raw service */ if (ha->status == S_BSY) { TRACE2(("Controller busy -> retry !\n")); if (cmndinfo->OpCode == GDT_MOUNT) cmndinfo->OpCode = GDT_CLUST_INFO; /* retry */ return 2; } if (scsi_bufflen(scp)) pci_unmap_sg(ha->pdev, scsi_sglist(scp), scsi_sg_count(scp), cmndinfo->dma_dir); if (cmndinfo->sense_paddr) pci_unmap_page(ha->pdev, cmndinfo->sense_paddr, 16, PCI_DMA_FROMDEVICE); if (ha->status == S_OK) { cmndinfo->status = S_OK; cmndinfo->info = ha->info; if (cmndinfo->OpCode != -1) { TRACE2(("gdth_sync_event(): special cmd 0x%x OK\n", cmndinfo->OpCode)); /* special commands GDT_CLUST_INFO/GDT_MOUNT ? */ if (cmndinfo->OpCode == GDT_CLUST_INFO) { ha->hdr[t].cluster_type = (u8)ha->info; if (!(ha->hdr[t].cluster_type & CLUSTER_MOUNTED)) { /* NOT MOUNTED -> MOUNT */ cmndinfo->OpCode = GDT_MOUNT; if (ha->hdr[t].cluster_type & CLUSTER_RESERVED) { /* cluster drive RESERVED (on the other node) */ cmndinfo->phase = -2; /* reservation conflict */ } } else { cmndinfo->OpCode = -1; } } else { if (cmndinfo->OpCode == GDT_MOUNT) { ha->hdr[t].cluster_type |= CLUSTER_MOUNTED; ha->hdr[t].media_changed = TRUE; } else if (cmndinfo->OpCode == GDT_UNMOUNT) { ha->hdr[t].cluster_type &= ~CLUSTER_MOUNTED; ha->hdr[t].media_changed = TRUE; } cmndinfo->OpCode = -1; } /* retry */ cmndinfo->priority = HIGH_PRI; return 2; } else { /* RESERVE/RELEASE ? */ if (scp->cmnd[0] == RESERVE) { ha->hdr[t].cluster_type |= CLUSTER_RESERVED; } else if (scp->cmnd[0] == RELEASE) { ha->hdr[t].cluster_type &= ~CLUSTER_RESERVED; } scp->result = DID_OK << 16; scp->sense_buffer[0] = 0; } } else { cmndinfo->status = ha->status; cmndinfo->info = ha->info; if (cmndinfo->OpCode != -1) { TRACE2(("gdth_sync_event(): special cmd 0x%x error 0x%x\n", cmndinfo->OpCode, ha->status)); if (cmndinfo->OpCode == GDT_SCAN_START || cmndinfo->OpCode == GDT_SCAN_END) { cmndinfo->OpCode = -1; /* retry */ cmndinfo->priority = HIGH_PRI; return 2; } memset((char*)scp->sense_buffer,0,16); scp->sense_buffer[0] = 0x70; scp->sense_buffer[2] = NOT_READY; scp->result = (DID_OK << 16) | (CHECK_CONDITION << 1); } else if (service == CACHESERVICE) { if (ha->status == S_CACHE_UNKNOWN && (ha->hdr[t].cluster_type & CLUSTER_RESERVE_STATE) == CLUSTER_RESERVE_STATE) { /* bus reset -> force GDT_CLUST_INFO */ ha->hdr[t].cluster_type &= ~CLUSTER_RESERVED; } memset((char*)scp->sense_buffer,0,16); if (ha->status == (u16)S_CACHE_RESERV) { scp->result = (DID_OK << 16) | (RESERVATION_CONFLICT << 1); } else { scp->sense_buffer[0] = 0x70; scp->sense_buffer[2] = NOT_READY; scp->result = (DID_OK << 16) | (CHECK_CONDITION << 1); } if (!cmndinfo->internal_command) { ha->dvr.size = sizeof(ha->dvr.eu.sync); ha->dvr.eu.sync.ionode = ha->hanum; ha->dvr.eu.sync.service = service; ha->dvr.eu.sync.status = ha->status; ha->dvr.eu.sync.info = ha->info; ha->dvr.eu.sync.hostdrive = t; if (ha->status >= 0x8000) gdth_store_event(ha, ES_SYNC, 0, &ha->dvr); else gdth_store_event(ha, ES_SYNC, service, &ha->dvr); } } else { /* sense buffer filled from controller firmware (DMA) */ if (ha->status != S_RAW_SCSI || ha->info >= 0x100) { scp->result = DID_BAD_TARGET << 16; } else { scp->result = (DID_OK << 16) | ha->info; } } } if (!cmndinfo->wait_for_completion) cmndinfo->wait_for_completion++; else return 1; } return 0; } static char *async_cache_tab[] = { /* 0*/ "\011\000\002\002\002\004\002\006\004" "GDT HA %u, service %u, async. status %u/%lu unknown", /* 1*/ "\011\000\002\002\002\004\002\006\004" "GDT HA %u, service %u, async. status %u/%lu unknown", /* 2*/ "\005\000\002\006\004" "GDT HA %u, Host Drive %lu not ready", /* 3*/ "\005\000\002\006\004" "GDT HA %u, Host Drive %lu: REASSIGN not successful and/or data error on reassigned blocks. Drive may crash in the future and should be replaced", /* 4*/ "\005\000\002\006\004" "GDT HA %u, mirror update on Host Drive %lu failed", /* 5*/ "\005\000\002\006\004" "GDT HA %u, Mirror Drive %lu failed", /* 6*/ "\005\000\002\006\004" "GDT HA %u, Mirror Drive %lu: REASSIGN not successful and/or data error on reassigned blocks. Drive may crash in the future and should be replaced", /* 7*/ "\005\000\002\006\004" "GDT HA %u, Host Drive %lu write protected", /* 8*/ "\005\000\002\006\004" "GDT HA %u, media changed in Host Drive %lu", /* 9*/ "\005\000\002\006\004" "GDT HA %u, Host Drive %lu is offline", /*10*/ "\005\000\002\006\004" "GDT HA %u, media change of Mirror Drive %lu", /*11*/ "\005\000\002\006\004" "GDT HA %u, Mirror Drive %lu is write protected", /*12*/ "\005\000\002\006\004" "GDT HA %u, general error on Host Drive %lu. Please check the devices of this drive!", /*13*/ "\007\000\002\006\002\010\002" "GDT HA %u, Array Drive %u: Cache Drive %u failed", /*14*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: FAIL state entered", /*15*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: error", /*16*/ "\007\000\002\006\002\010\002" "GDT HA %u, Array Drive %u: failed drive replaced by Cache Drive %u", /*17*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: parity build failed", /*18*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: drive rebuild failed", /*19*/ "\005\000\002\010\002" "GDT HA %u, Test of Hot Fix %u failed", /*20*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: drive build finished successfully", /*21*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: drive rebuild finished successfully", /*22*/ "\007\000\002\006\002\010\002" "GDT HA %u, Array Drive %u: Hot Fix %u activated", /*23*/ "\005\000\002\006\002" "GDT HA %u, Host Drive %u: processing of i/o aborted due to serious drive error", /*24*/ "\005\000\002\010\002" "GDT HA %u, mirror update on Cache Drive %u completed", /*25*/ "\005\000\002\010\002" "GDT HA %u, mirror update on Cache Drive %lu failed", /*26*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: drive rebuild started", /*27*/ "\005\000\002\012\001" "GDT HA %u, Fault bus %u: SHELF OK detected", /*28*/ "\005\000\002\012\001" "GDT HA %u, Fault bus %u: SHELF not OK detected", /*29*/ "\007\000\002\012\001\013\001" "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug started", /*30*/ "\007\000\002\012\001\013\001" "GDT HA %u, Fault bus %u, ID %u: new disk detected", /*31*/ "\007\000\002\012\001\013\001" "GDT HA %u, Fault bus %u, ID %u: old disk detected", /*32*/ "\007\000\002\012\001\013\001" "GDT HA %u, Fault bus %u, ID %u: plugging an active disk is invalid", /*33*/ "\007\000\002\012\001\013\001" "GDT HA %u, Fault bus %u, ID %u: invalid device detected", /*34*/ "\011\000\002\012\001\013\001\006\004" "GDT HA %u, Fault bus %u, ID %u: insufficient disk capacity (%lu MB required)", /*35*/ "\007\000\002\012\001\013\001" "GDT HA %u, Fault bus %u, ID %u: disk write protected", /*36*/ "\007\000\002\012\001\013\001" "GDT HA %u, Fault bus %u, ID %u: disk not available", /*37*/ "\007\000\002\012\001\006\004" "GDT HA %u, Fault bus %u: swap detected (%lu)", /*38*/ "\007\000\002\012\001\013\001" "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug finished successfully", /*39*/ "\007\000\002\012\001\013\001" "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug aborted due to user Hot Plug", /*40*/ "\007\000\002\012\001\013\001" "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug aborted", /*41*/ "\007\000\002\012\001\013\001" "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug for Hot Fix started", /*42*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: drive build started", /*43*/ "\003\000\002" "GDT HA %u, DRAM parity error detected", /*44*/ "\005\000\002\006\002" "GDT HA %u, Mirror Drive %u: update started", /*45*/ "\007\000\002\006\002\010\002" "GDT HA %u, Mirror Drive %u: Hot Fix %u activated", /*46*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: no matching Pool Hot Fix Drive available", /*47*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: Pool Hot Fix Drive available", /*48*/ "\005\000\002\006\002" "GDT HA %u, Mirror Drive %u: no matching Pool Hot Fix Drive available", /*49*/ "\005\000\002\006\002" "GDT HA %u, Mirror Drive %u: Pool Hot Fix Drive available", /*50*/ "\007\000\002\012\001\013\001" "GDT HA %u, SCSI bus %u, ID %u: IGNORE_WIDE_RESIDUE message received", /*51*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: expand started", /*52*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: expand finished successfully", /*53*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: expand failed", /*54*/ "\003\000\002" "GDT HA %u, CPU temperature critical", /*55*/ "\003\000\002" "GDT HA %u, CPU temperature OK", /*56*/ "\005\000\002\006\004" "GDT HA %u, Host drive %lu created", /*57*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: expand restarted", /*58*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: expand stopped", /*59*/ "\005\000\002\010\002" "GDT HA %u, Mirror Drive %u: drive build quited", /*60*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: parity build quited", /*61*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: drive rebuild quited", /*62*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: parity verify started", /*63*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: parity verify done", /*64*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: parity verify failed", /*65*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: parity error detected", /*66*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: parity verify quited", /*67*/ "\005\000\002\006\002" "GDT HA %u, Host Drive %u reserved", /*68*/ "\005\000\002\006\002" "GDT HA %u, Host Drive %u mounted and released", /*69*/ "\005\000\002\006\002" "GDT HA %u, Host Drive %u released", /*70*/ "\003\000\002" "GDT HA %u, DRAM error detected and corrected with ECC", /*71*/ "\003\000\002" "GDT HA %u, Uncorrectable DRAM error detected with ECC", /*72*/ "\011\000\002\012\001\013\001\014\001" "GDT HA %u, SCSI bus %u, ID %u, LUN %u: reassigning block", /*73*/ "\005\000\002\006\002" "GDT HA %u, Host drive %u resetted locally", /*74*/ "\005\000\002\006\002" "GDT HA %u, Host drive %u resetted remotely", /*75*/ "\003\000\002" "GDT HA %u, async. status 75 unknown", }; static int gdth_async_event(gdth_ha_str *ha) { gdth_cmd_str *cmdp; int cmd_index; cmdp= ha->pccb; TRACE2(("gdth_async_event() ha %d serv %d\n", ha->hanum, ha->service)); if (ha->service == SCREENSERVICE) { if (ha->status == MSG_REQUEST) { while (gdth_test_busy(ha)) gdth_delay(0); cmdp->Service = SCREENSERVICE; cmdp->RequestBuffer = SCREEN_CMND; cmd_index = gdth_get_cmd_index(ha); gdth_set_sema0(ha); cmdp->OpCode = GDT_READ; cmdp->BoardNode = LOCALBOARD; cmdp->u.screen.reserved = 0; cmdp->u.screen.su.msg.msg_handle= MSG_INV_HANDLE; cmdp->u.screen.su.msg.msg_addr = ha->msg_phys; ha->cmd_offs_dpmem = 0; ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr) + sizeof(u64); ha->cmd_cnt = 0; gdth_copy_command(ha); if (ha->type == GDT_EISA) printk("[EISA slot %d] ",(u16)ha->brd_phys); else if (ha->type == GDT_ISA) printk("[DPMEM 0x%4X] ",(u16)ha->brd_phys); else printk("[PCI %d/%d] ",(u16)(ha->brd_phys>>8), (u16)((ha->brd_phys>>3)&0x1f)); gdth_release_event(ha); } } else { if (ha->type == GDT_PCIMPR && (ha->fw_vers & 0xff) >= 0x1a) { ha->dvr.size = 0; ha->dvr.eu.async.ionode = ha->hanum; ha->dvr.eu.async.status = ha->status; /* severity and event_string already set! */ } else { ha->dvr.size = sizeof(ha->dvr.eu.async); ha->dvr.eu.async.ionode = ha->hanum; ha->dvr.eu.async.service = ha->service; ha->dvr.eu.async.status = ha->status; ha->dvr.eu.async.info = ha->info; *(u32 *)ha->dvr.eu.async.scsi_coord = ha->info2; } gdth_store_event( ha, ES_ASYNC, ha->service, &ha->dvr ); gdth_log_event( &ha->dvr, NULL ); /* new host drive from expand? */ if (ha->service == CACHESERVICE && ha->status == 56) { TRACE2(("gdth_async_event(): new host drive %d created\n", (u16)ha->info)); /* gdth_analyse_hdrive(hanum, (u16)ha->info); */ } } return 1; } static void gdth_log_event(gdth_evt_data *dvr, char *buffer) { gdth_stackframe stack; char *f = NULL; int i,j; TRACE2(("gdth_log_event()\n")); if (dvr->size == 0) { if (buffer == NULL) { printk("Adapter %d: %s\n",dvr->eu.async.ionode,dvr->event_string); } else { sprintf(buffer,"Adapter %d: %s\n", dvr->eu.async.ionode,dvr->event_string); } } else if (dvr->eu.async.service == CACHESERVICE && INDEX_OK(dvr->eu.async.status, async_cache_tab)) { TRACE2(("GDT: Async. event cache service, event no.: %d\n", dvr->eu.async.status)); f = async_cache_tab[dvr->eu.async.status]; /* i: parameter to push, j: stack element to fill */ for (j=0,i=1; i < f[0]; i+=2) { switch (f[i+1]) { case 4: stack.b[j++] = *(u32*)&dvr->eu.stream[(int)f[i]]; break; case 2: stack.b[j++] = *(u16*)&dvr->eu.stream[(int)f[i]]; break; case 1: stack.b[j++] = *(u8*)&dvr->eu.stream[(int)f[i]]; break; default: break; } } if (buffer == NULL) { printk(&f[(int)f[0]],stack); printk("\n"); } else { sprintf(buffer,&f[(int)f[0]],stack); } } else { if (buffer == NULL) { printk("GDT HA %u, Unknown async. event service %d event no. %d\n", dvr->eu.async.ionode,dvr->eu.async.service,dvr->eu.async.status); } else { sprintf(buffer,"GDT HA %u, Unknown async. event service %d event no. %d", dvr->eu.async.ionode,dvr->eu.async.service,dvr->eu.async.status); } } } #ifdef GDTH_STATISTICS static u8 gdth_timer_running; static void gdth_timeout(unsigned long data) { u32 i; Scsi_Cmnd *nscp; gdth_ha_str *ha; unsigned long flags; if(unlikely(list_empty(&gdth_instances))) { gdth_timer_running = 0; return; } ha = list_first_entry(&gdth_instances, gdth_ha_str, list); spin_lock_irqsave(&ha->smp_lock, flags); for (act_stats=0,i=0; i<GDTH_MAXCMDS; ++i) if (ha->cmd_tab[i].cmnd != UNUSED_CMND) ++act_stats; for (act_rq=0,nscp=ha->req_first; nscp; nscp=(Scsi_Cmnd*)nscp->SCp.ptr) ++act_rq; TRACE2(("gdth_to(): ints %d, ios %d, act_stats %d, act_rq %d\n", act_ints, act_ios, act_stats, act_rq)); act_ints = act_ios = 0; gdth_timer.expires = jiffies + 30 * HZ; add_timer(&gdth_timer); spin_unlock_irqrestore(&ha->smp_lock, flags); } static void gdth_timer_init(void) { if (gdth_timer_running) return; gdth_timer_running = 1; TRACE2(("gdth_detect(): Initializing timer !\n")); gdth_timer.expires = jiffies + HZ; gdth_timer.data = 0L; gdth_timer.function = gdth_timeout; add_timer(&gdth_timer); } #else static inline void gdth_timer_init(void) { } #endif static void __init internal_setup(char *str,int *ints) { int i, argc; char *cur_str, *argv; TRACE2(("internal_setup() str %s ints[0] %d\n", str ? str:"NULL", ints ? ints[0]:0)); /* read irq[] from ints[] */ if (ints) { argc = ints[0]; if (argc > 0) { if (argc > MAXHA) argc = MAXHA; for (i = 0; i < argc; ++i) irq[i] = ints[i+1]; } } /* analyse string */ argv = str; while (argv && (cur_str = strchr(argv, ':'))) { int val = 0, c = *++cur_str; if (c == 'n' || c == 'N') val = 0; else if (c == 'y' || c == 'Y') val = 1; else val = (int)simple_strtoul(cur_str, NULL, 0); if (!strncmp(argv, "disable:", 8)) disable = val; else if (!strncmp(argv, "reserve_mode:", 13)) reserve_mode = val; else if (!strncmp(argv, "reverse_scan:", 13)) reverse_scan = val; else if (!strncmp(argv, "hdr_channel:", 12)) hdr_channel = val; else if (!strncmp(argv, "max_ids:", 8)) max_ids = val; else if (!strncmp(argv, "rescan:", 7)) rescan = val; else if (!strncmp(argv, "shared_access:", 14)) shared_access = val; else if (!strncmp(argv, "probe_eisa_isa:", 15)) probe_eisa_isa = val; else if (!strncmp(argv, "reserve_list:", 13)) { reserve_list[0] = val; for (i = 1; i < MAX_RES_ARGS; i++) { cur_str = strchr(cur_str, ','); if (!cur_str) break; if (!isdigit((int)*++cur_str)) { --cur_str; break; } reserve_list[i] = (int)simple_strtoul(cur_str, NULL, 0); } if (!cur_str) break; argv = ++cur_str; continue; } if ((argv = strchr(argv, ','))) ++argv; } } int __init option_setup(char *str) { int ints[MAXHA]; char *cur = str; int i = 1; TRACE2(("option_setup() str %s\n", str ? str:"NULL")); while (cur && isdigit(*cur) && i < MAXHA) { ints[i++] = simple_strtoul(cur, NULL, 0); if ((cur = strchr(cur, ',')) != NULL) cur++; } ints[0] = i - 1; internal_setup(cur, ints); return 1; } static const char *gdth_ctr_name(gdth_ha_str *ha) { TRACE2(("gdth_ctr_name()\n")); if (ha->type == GDT_EISA) { switch (ha->stype) { case GDT3_ID: return("GDT3000/3020"); case GDT3A_ID: return("GDT3000A/3020A/3050A"); case GDT3B_ID: return("GDT3000B/3010A"); } } else if (ha->type == GDT_ISA) { return("GDT2000/2020"); } else if (ha->type == GDT_PCI) { switch (ha->pdev->device) { case PCI_DEVICE_ID_VORTEX_GDT60x0: return("GDT6000/6020/6050"); case PCI_DEVICE_ID_VORTEX_GDT6000B: return("GDT6000B/6010"); } } /* new controllers (GDT_PCINEW, GDT_PCIMPR, ..) use board_info IOCTL! */ return(""); } static const char *gdth_info(struct Scsi_Host *shp) { gdth_ha_str *ha = shost_priv(shp); TRACE2(("gdth_info()\n")); return ((const char *)ha->binfo.type_string); } static enum blk_eh_timer_return gdth_timed_out(struct scsi_cmnd *scp) { gdth_ha_str *ha = shost_priv(scp->device->host); struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); u8 b, t; unsigned long flags; enum blk_eh_timer_return retval = BLK_EH_NOT_HANDLED; TRACE(("%s() cmd 0x%x\n", scp->cmnd[0], __func__)); b = scp->device->channel; t = scp->device->id; /* * We don't really honor the command timeout, but we try to * honor 6 times of the actual command timeout! So reset the * timer if this is less than 6th timeout on this command! */ if (++cmndinfo->timeout_count < 6) retval = BLK_EH_RESET_TIMER; /* Reset the timeout if it is locked IO */ spin_lock_irqsave(&ha->smp_lock, flags); if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha, b)].lock) || (b == ha->virt_bus && t < MAX_HDRIVES && ha->hdr[t].lock)) { TRACE2(("%s(): locked IO, reset timeout\n", __func__)); retval = BLK_EH_RESET_TIMER; } spin_unlock_irqrestore(&ha->smp_lock, flags); return retval; } static int gdth_eh_bus_reset(Scsi_Cmnd *scp) { gdth_ha_str *ha = shost_priv(scp->device->host); int i; unsigned long flags; Scsi_Cmnd *cmnd; u8 b; TRACE2(("gdth_eh_bus_reset()\n")); b = scp->device->channel; /* clear command tab */ spin_lock_irqsave(&ha->smp_lock, flags); for (i = 0; i < GDTH_MAXCMDS; ++i) { cmnd = ha->cmd_tab[i].cmnd; if (!SPECIAL_SCP(cmnd) && cmnd->device->channel == b) ha->cmd_tab[i].cmnd = UNUSED_CMND; } spin_unlock_irqrestore(&ha->smp_lock, flags); if (b == ha->virt_bus) { /* host drives */ for (i = 0; i < MAX_HDRIVES; ++i) { if (ha->hdr[i].present) { spin_lock_irqsave(&ha->smp_lock, flags); gdth_polling = TRUE; while (gdth_test_busy(ha)) gdth_delay(0); if (gdth_internal_cmd(ha, CACHESERVICE, GDT_CLUST_RESET, i, 0, 0)) ha->hdr[i].cluster_type &= ~CLUSTER_RESERVED; gdth_polling = FALSE; spin_unlock_irqrestore(&ha->smp_lock, flags); } } } else { /* raw devices */ spin_lock_irqsave(&ha->smp_lock, flags); for (i = 0; i < MAXID; ++i) ha->raw[BUS_L2P(ha,b)].io_cnt[i] = 0; gdth_polling = TRUE; while (gdth_test_busy(ha)) gdth_delay(0); gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_RESET_BUS, BUS_L2P(ha,b), 0, 0); gdth_polling = FALSE; spin_unlock_irqrestore(&ha->smp_lock, flags); } return SUCCESS; } static int gdth_bios_param(struct scsi_device *sdev,struct block_device *bdev,sector_t cap,int *ip) { u8 b, t; gdth_ha_str *ha = shost_priv(sdev->host); struct scsi_device *sd; unsigned capacity; sd = sdev; capacity = cap; b = sd->channel; t = sd->id; TRACE2(("gdth_bios_param() ha %d bus %d target %d\n", ha->hanum, b, t)); if (b != ha->virt_bus || ha->hdr[t].heads == 0) { /* raw device or host drive without mapping information */ TRACE2(("Evaluate mapping\n")); gdth_eval_mapping(capacity,&ip[2],&ip[0],&ip[1]); } else { ip[0] = ha->hdr[t].heads; ip[1] = ha->hdr[t].secs; ip[2] = capacity / ip[0] / ip[1]; } TRACE2(("gdth_bios_param(): %d heads, %d secs, %d cyls\n", ip[0],ip[1],ip[2])); return 0; } static int gdth_queuecommand_lck(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *)) { gdth_ha_str *ha = shost_priv(scp->device->host); struct gdth_cmndinfo *cmndinfo; TRACE(("gdth_queuecommand() cmd 0x%x\n", scp->cmnd[0])); cmndinfo = gdth_get_cmndinfo(ha); BUG_ON(!cmndinfo); scp->scsi_done = done; cmndinfo->timeout_count = 0; cmndinfo->priority = DEFAULT_PRI; return __gdth_queuecommand(ha, scp, cmndinfo); } static DEF_SCSI_QCMD(gdth_queuecommand) static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp, struct gdth_cmndinfo *cmndinfo) { scp->host_scribble = (unsigned char *)cmndinfo; cmndinfo->wait_for_completion = 1; cmndinfo->phase = -1; cmndinfo->OpCode = -1; #ifdef GDTH_STATISTICS ++act_ios; #endif gdth_putq(ha, scp, cmndinfo->priority); gdth_next(ha); return 0; } static int gdth_open(struct inode *inode, struct file *filep) { gdth_ha_str *ha; mutex_lock(&gdth_mutex); list_for_each_entry(ha, &gdth_instances, list) { if (!ha->sdev) ha->sdev = scsi_get_host_dev(ha->shost); } mutex_unlock(&gdth_mutex); TRACE(("gdth_open()\n")); return 0; } static int gdth_close(struct inode *inode, struct file *filep) { TRACE(("gdth_close()\n")); return 0; } static int ioc_event(void __user *arg) { gdth_ioctl_event evt; gdth_ha_str *ha; unsigned long flags; if (copy_from_user(&evt, arg, sizeof(gdth_ioctl_event))) return -EFAULT; ha = gdth_find_ha(evt.ionode); if (!ha) return -EFAULT; if (evt.erase == 0xff) { if (evt.event.event_source == ES_TEST) evt.event.event_data.size=sizeof(evt.event.event_data.eu.test); else if (evt.event.event_source == ES_DRIVER) evt.event.event_data.size=sizeof(evt.event.event_data.eu.driver); else if (evt.event.event_source == ES_SYNC) evt.event.event_data.size=sizeof(evt.event.event_data.eu.sync); else evt.event.event_data.size=sizeof(evt.event.event_data.eu.async); spin_lock_irqsave(&ha->smp_lock, flags); gdth_store_event(ha, evt.event.event_source, evt.event.event_idx, &evt.event.event_data); spin_unlock_irqrestore(&ha->smp_lock, flags); } else if (evt.erase == 0xfe) { gdth_clear_events(); } else if (evt.erase == 0) { evt.handle = gdth_read_event(ha, evt.handle, &evt.event); } else { gdth_readapp_event(ha, evt.erase, &evt.event); } if (copy_to_user(arg, &evt, sizeof(gdth_ioctl_event))) return -EFAULT; return 0; } static int ioc_lockdrv(void __user *arg) { gdth_ioctl_lockdrv ldrv; u8 i, j; unsigned long flags; gdth_ha_str *ha; if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv))) return -EFAULT; ha = gdth_find_ha(ldrv.ionode); if (!ha) return -EFAULT; for (i = 0; i < ldrv.drive_cnt && i < MAX_HDRIVES; ++i) { j = ldrv.drives[i]; if (j >= MAX_HDRIVES || !ha->hdr[j].present) continue; if (ldrv.lock) { spin_lock_irqsave(&ha->smp_lock, flags); ha->hdr[j].lock = 1; spin_unlock_irqrestore(&ha->smp_lock, flags); gdth_wait_completion(ha, ha->bus_cnt, j); } else { spin_lock_irqsave(&ha->smp_lock, flags); ha->hdr[j].lock = 0; spin_unlock_irqrestore(&ha->smp_lock, flags); gdth_next(ha); } } return 0; } static int ioc_resetdrv(void __user *arg, char *cmnd) { gdth_ioctl_reset res; gdth_cmd_str cmd; gdth_ha_str *ha; int rval; if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) || res.number >= MAX_HDRIVES) return -EFAULT; ha = gdth_find_ha(res.ionode); if (!ha) return -EFAULT; if (!ha->hdr[res.number].present) return 0; memset(&cmd, 0, sizeof(gdth_cmd_str)); cmd.Service = CACHESERVICE; cmd.OpCode = GDT_CLUST_RESET; if (ha->cache_feat & GDT_64BIT) cmd.u.cache64.DeviceNo = res.number; else cmd.u.cache.DeviceNo = res.number; rval = __gdth_execute(ha->sdev, &cmd, cmnd, 30, NULL); if (rval < 0) return rval; res.status = rval; if (copy_to_user(arg, &res, sizeof(gdth_ioctl_reset))) return -EFAULT; return 0; } static int ioc_general(void __user *arg, char *cmnd) { gdth_ioctl_general gen; char *buf = NULL; u64 paddr; gdth_ha_str *ha; int rval; if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general))) return -EFAULT; ha = gdth_find_ha(gen.ionode); if (!ha) return -EFAULT; if (gen.data_len > INT_MAX) return -EINVAL; if (gen.sense_len > INT_MAX) return -EINVAL; if (gen.data_len + gen.sense_len > INT_MAX) return -EINVAL; if (gen.data_len + gen.sense_len != 0) { if (!(buf = gdth_ioctl_alloc(ha, gen.data_len + gen.sense_len, FALSE, &paddr))) return -EFAULT; if (copy_from_user(buf, arg + sizeof(gdth_ioctl_general), gen.data_len + gen.sense_len)) { gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr); return -EFAULT; } if (gen.command.OpCode == GDT_IOCTL) { gen.command.u.ioctl.p_param = paddr; } else if (gen.command.Service == CACHESERVICE) { if (ha->cache_feat & GDT_64BIT) { /* copy elements from 32-bit IOCTL structure */ gen.command.u.cache64.BlockCnt = gen.command.u.cache.BlockCnt; gen.command.u.cache64.BlockNo = gen.command.u.cache.BlockNo; gen.command.u.cache64.DeviceNo = gen.command.u.cache.DeviceNo; /* addresses */ if (ha->cache_feat & SCATTER_GATHER) { gen.command.u.cache64.DestAddr = (u64)-1; gen.command.u.cache64.sg_canz = 1; gen.command.u.cache64.sg_lst[0].sg_ptr = paddr; gen.command.u.cache64.sg_lst[0].sg_len = gen.data_len; gen.command.u.cache64.sg_lst[1].sg_len = 0; } else { gen.command.u.cache64.DestAddr = paddr; gen.command.u.cache64.sg_canz = 0; } } else { if (ha->cache_feat & SCATTER_GATHER) { gen.command.u.cache.DestAddr = 0xffffffff; gen.command.u.cache.sg_canz = 1; gen.command.u.cache.sg_lst[0].sg_ptr = (u32)paddr; gen.command.u.cache.sg_lst[0].sg_len = gen.data_len; gen.command.u.cache.sg_lst[1].sg_len = 0; } else { gen.command.u.cache.DestAddr = paddr; gen.command.u.cache.sg_canz = 0; } } } else if (gen.command.Service == SCSIRAWSERVICE) { if (ha->raw_feat & GDT_64BIT) { /* copy elements from 32-bit IOCTL structure */ char cmd[16]; gen.command.u.raw64.sense_len = gen.command.u.raw.sense_len; gen.command.u.raw64.bus = gen.command.u.raw.bus; gen.command.u.raw64.lun = gen.command.u.raw.lun; gen.command.u.raw64.target = gen.command.u.raw.target; memcpy(cmd, gen.command.u.raw.cmd, 16); memcpy(gen.command.u.raw64.cmd, cmd, 16); gen.command.u.raw64.clen = gen.command.u.raw.clen; gen.command.u.raw64.sdlen = gen.command.u.raw.sdlen; gen.command.u.raw64.direction = gen.command.u.raw.direction; /* addresses */ if (ha->raw_feat & SCATTER_GATHER) { gen.command.u.raw64.sdata = (u64)-1; gen.command.u.raw64.sg_ranz = 1; gen.command.u.raw64.sg_lst[0].sg_ptr = paddr; gen.command.u.raw64.sg_lst[0].sg_len = gen.data_len; gen.command.u.raw64.sg_lst[1].sg_len = 0; } else { gen.command.u.raw64.sdata = paddr; gen.command.u.raw64.sg_ranz = 0; } gen.command.u.raw64.sense_data = paddr + gen.data_len; } else { if (ha->raw_feat & SCATTER_GATHER) { gen.command.u.raw.sdata = 0xffffffff; gen.command.u.raw.sg_ranz = 1; gen.command.u.raw.sg_lst[0].sg_ptr = (u32)paddr; gen.command.u.raw.sg_lst[0].sg_len = gen.data_len; gen.command.u.raw.sg_lst[1].sg_len = 0; } else { gen.command.u.raw.sdata = paddr; gen.command.u.raw.sg_ranz = 0; } gen.command.u.raw.sense_data = (u32)paddr + gen.data_len; } } else { gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr); return -EFAULT; } } rval = __gdth_execute(ha->sdev, &gen.command, cmnd, gen.timeout, &gen.info); if (rval < 0) { gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr); return rval; } gen.status = rval; if (copy_to_user(arg + sizeof(gdth_ioctl_general), buf, gen.data_len + gen.sense_len)) { gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr); return -EFAULT; } if (copy_to_user(arg, &gen, sizeof(gdth_ioctl_general) - sizeof(gdth_cmd_str))) { gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr); return -EFAULT; } gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr); return 0; } static int ioc_hdrlist(void __user *arg, char *cmnd) { gdth_ioctl_rescan *rsc; gdth_cmd_str *cmd; gdth_ha_str *ha; u8 i; int rc = -ENOMEM; u32 cluster_type = 0; rsc = kmalloc(sizeof(*rsc), GFP_KERNEL); cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); if (!rsc || !cmd) goto free_fail; if (copy_from_user(rsc, arg, sizeof(gdth_ioctl_rescan)) || (NULL == (ha = gdth_find_ha(rsc->ionode)))) { rc = -EFAULT; goto free_fail; } memset(cmd, 0, sizeof(gdth_cmd_str)); for (i = 0; i < MAX_HDRIVES; ++i) { if (!ha->hdr[i].present) { rsc->hdr_list[i].bus = 0xff; continue; } rsc->hdr_list[i].bus = ha->virt_bus; rsc->hdr_list[i].target = i; rsc->hdr_list[i].lun = 0; rsc->hdr_list[i].cluster_type = ha->hdr[i].cluster_type; if (ha->hdr[i].cluster_type & CLUSTER_DRIVE) { cmd->Service = CACHESERVICE; cmd->OpCode = GDT_CLUST_INFO; if (ha->cache_feat & GDT_64BIT) cmd->u.cache64.DeviceNo = i; else cmd->u.cache.DeviceNo = i; if (__gdth_execute(ha->sdev, cmd, cmnd, 30, &cluster_type) == S_OK) rsc->hdr_list[i].cluster_type = cluster_type; } } if (copy_to_user(arg, rsc, sizeof(gdth_ioctl_rescan))) rc = -EFAULT; else rc = 0; free_fail: kfree(rsc); kfree(cmd); return rc; } static int ioc_rescan(void __user *arg, char *cmnd) { gdth_ioctl_rescan *rsc; gdth_cmd_str *cmd; u16 i, status, hdr_cnt; u32 info; int cyls, hds, secs; int rc = -ENOMEM; unsigned long flags; gdth_ha_str *ha; rsc = kmalloc(sizeof(*rsc), GFP_KERNEL); cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd || !rsc) goto free_fail; if (copy_from_user(rsc, arg, sizeof(gdth_ioctl_rescan)) || (NULL == (ha = gdth_find_ha(rsc->ionode)))) { rc = -EFAULT; goto free_fail; } memset(cmd, 0, sizeof(gdth_cmd_str)); if (rsc->flag == 0) { /* old method: re-init. cache service */ cmd->Service = CACHESERVICE; if (ha->cache_feat & GDT_64BIT) { cmd->OpCode = GDT_X_INIT_HOST; cmd->u.cache64.DeviceNo = LINUX_OS; } else { cmd->OpCode = GDT_INIT; cmd->u.cache.DeviceNo = LINUX_OS; } status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info); i = 0; hdr_cnt = (status == S_OK ? (u16)info : 0); } else { i = rsc->hdr_no; hdr_cnt = i + 1; } for (; i < hdr_cnt && i < MAX_HDRIVES; ++i) { cmd->Service = CACHESERVICE; cmd->OpCode = GDT_INFO; if (ha->cache_feat & GDT_64BIT) cmd->u.cache64.DeviceNo = i; else cmd->u.cache.DeviceNo = i; status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info); spin_lock_irqsave(&ha->smp_lock, flags); rsc->hdr_list[i].bus = ha->virt_bus; rsc->hdr_list[i].target = i; rsc->hdr_list[i].lun = 0; if (status != S_OK) { ha->hdr[i].present = FALSE; } else { ha->hdr[i].present = TRUE; ha->hdr[i].size = info; /* evaluate mapping */ ha->hdr[i].size &= ~SECS32; gdth_eval_mapping(ha->hdr[i].size,&cyls,&hds,&secs); ha->hdr[i].heads = hds; ha->hdr[i].secs = secs; /* round size */ ha->hdr[i].size = cyls * hds * secs; } spin_unlock_irqrestore(&ha->smp_lock, flags); if (status != S_OK) continue; /* extended info, if GDT_64BIT, for drives > 2 TB */ /* but we need ha->info2, not yet stored in scp->SCp */ /* devtype, cluster info, R/W attribs */ cmd->Service = CACHESERVICE; cmd->OpCode = GDT_DEVTYPE; if (ha->cache_feat & GDT_64BIT) cmd->u.cache64.DeviceNo = i; else cmd->u.cache.DeviceNo = i; status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info); spin_lock_irqsave(&ha->smp_lock, flags); ha->hdr[i].devtype = (status == S_OK ? (u16)info : 0); spin_unlock_irqrestore(&ha->smp_lock, flags); cmd->Service = CACHESERVICE; cmd->OpCode = GDT_CLUST_INFO; if (ha->cache_feat & GDT_64BIT) cmd->u.cache64.DeviceNo = i; else cmd->u.cache.DeviceNo = i; status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info); spin_lock_irqsave(&ha->smp_lock, flags); ha->hdr[i].cluster_type = ((status == S_OK && !shared_access) ? (u16)info : 0); spin_unlock_irqrestore(&ha->smp_lock, flags); rsc->hdr_list[i].cluster_type = ha->hdr[i].cluster_type; cmd->Service = CACHESERVICE; cmd->OpCode = GDT_RW_ATTRIBS; if (ha->cache_feat & GDT_64BIT) cmd->u.cache64.DeviceNo = i; else cmd->u.cache.DeviceNo = i; status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info); spin_lock_irqsave(&ha->smp_lock, flags); ha->hdr[i].rw_attribs = (status == S_OK ? (u16)info : 0); spin_unlock_irqrestore(&ha->smp_lock, flags); } if (copy_to_user(arg, rsc, sizeof(gdth_ioctl_rescan))) rc = -EFAULT; else rc = 0; free_fail: kfree(rsc); kfree(cmd); return rc; } static int gdth_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) { gdth_ha_str *ha; Scsi_Cmnd *scp; unsigned long flags; char cmnd[MAX_COMMAND_SIZE]; void __user *argp = (void __user *)arg; memset(cmnd, 0xff, 12); TRACE(("gdth_ioctl() cmd 0x%x\n", cmd)); switch (cmd) { case GDTIOCTL_CTRCNT: { int cnt = gdth_ctr_count; if (put_user(cnt, (int __user *)argp)) return -EFAULT; break; } case GDTIOCTL_DRVERS: { int ver = (GDTH_VERSION<<8) | GDTH_SUBVERSION; if (put_user(ver, (int __user *)argp)) return -EFAULT; break; } case GDTIOCTL_OSVERS: { gdth_ioctl_osvers osv; osv.version = (u8)(LINUX_VERSION_CODE >> 16); osv.subversion = (u8)(LINUX_VERSION_CODE >> 8); osv.revision = (u16)(LINUX_VERSION_CODE & 0xff); if (copy_to_user(argp, &osv, sizeof(gdth_ioctl_osvers))) return -EFAULT; break; } case GDTIOCTL_CTRTYPE: { gdth_ioctl_ctrtype ctrt; if (copy_from_user(&ctrt, argp, sizeof(gdth_ioctl_ctrtype)) || (NULL == (ha = gdth_find_ha(ctrt.ionode)))) return -EFAULT; if (ha->type == GDT_ISA || ha->type == GDT_EISA) { ctrt.type = (u8)((ha->stype>>20) - 0x10); } else { if (ha->type != GDT_PCIMPR) { ctrt.type = (u8)((ha->stype<<4) + 6); } else { ctrt.type = (ha->oem_id == OEM_ID_INTEL ? 0xfd : 0xfe); if (ha->stype >= 0x300) ctrt.ext_type = 0x6000 | ha->pdev->subsystem_device; else ctrt.ext_type = 0x6000 | ha->stype; } ctrt.device_id = ha->pdev->device; ctrt.sub_device_id = ha->pdev->subsystem_device; } ctrt.info = ha->brd_phys; ctrt.oem_id = ha->oem_id; if (copy_to_user(argp, &ctrt, sizeof(gdth_ioctl_ctrtype))) return -EFAULT; break; } case GDTIOCTL_GENERAL: return ioc_general(argp, cmnd); case GDTIOCTL_EVENT: return ioc_event(argp); case GDTIOCTL_LOCKDRV: return ioc_lockdrv(argp); case GDTIOCTL_LOCKCHN: { gdth_ioctl_lockchn lchn; u8 i, j; if (copy_from_user(&lchn, argp, sizeof(gdth_ioctl_lockchn)) || (NULL == (ha = gdth_find_ha(lchn.ionode)))) return -EFAULT; i = lchn.channel; if (i < ha->bus_cnt) { if (lchn.lock) { spin_lock_irqsave(&ha->smp_lock, flags); ha->raw[i].lock = 1; spin_unlock_irqrestore(&ha->smp_lock, flags); for (j = 0; j < ha->tid_cnt; ++j) gdth_wait_completion(ha, i, j); } else { spin_lock_irqsave(&ha->smp_lock, flags); ha->raw[i].lock = 0; spin_unlock_irqrestore(&ha->smp_lock, flags); for (j = 0; j < ha->tid_cnt; ++j) gdth_next(ha); } } break; } case GDTIOCTL_RESCAN: return ioc_rescan(argp, cmnd); case GDTIOCTL_HDRLIST: return ioc_hdrlist(argp, cmnd); case GDTIOCTL_RESET_BUS: { gdth_ioctl_reset res; int rval; if (copy_from_user(&res, argp, sizeof(gdth_ioctl_reset)) || (NULL == (ha = gdth_find_ha(res.ionode)))) return -EFAULT; scp = kzalloc(sizeof(*scp), GFP_KERNEL); if (!scp) return -ENOMEM; scp->device = ha->sdev; scp->cmd_len = 12; scp->device->channel = res.number; rval = gdth_eh_bus_reset(scp); res.status = (rval == SUCCESS ? S_OK : S_GENERR); kfree(scp); if (copy_to_user(argp, &res, sizeof(gdth_ioctl_reset))) return -EFAULT; break; } case GDTIOCTL_RESET_DRV: return ioc_resetdrv(argp, cmnd); default: break; } return 0; } static long gdth_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret; mutex_lock(&gdth_mutex); ret = gdth_ioctl(file, cmd, arg); mutex_unlock(&gdth_mutex); return ret; } /* flush routine */ static void gdth_flush(gdth_ha_str *ha) { int i; gdth_cmd_str gdtcmd; char cmnd[MAX_COMMAND_SIZE]; memset(cmnd, 0xff, MAX_COMMAND_SIZE); TRACE2(("gdth_flush() hanum %d\n", ha->hanum)); for (i = 0; i < MAX_HDRIVES; ++i) { if (ha->hdr[i].present) { gdtcmd.BoardNode = LOCALBOARD; gdtcmd.Service = CACHESERVICE; gdtcmd.OpCode = GDT_FLUSH; if (ha->cache_feat & GDT_64BIT) { gdtcmd.u.cache64.DeviceNo = i; gdtcmd.u.cache64.BlockNo = 1; gdtcmd.u.cache64.sg_canz = 0; } else { gdtcmd.u.cache.DeviceNo = i; gdtcmd.u.cache.BlockNo = 1; gdtcmd.u.cache.sg_canz = 0; } TRACE2(("gdth_flush(): flush ha %d drive %d\n", ha->hanum, i)); gdth_execute(ha->shost, &gdtcmd, cmnd, 30, NULL); } } } /* configure lun */ static int gdth_slave_configure(struct scsi_device *sdev) { scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); sdev->skip_ms_page_3f = 1; sdev->skip_ms_page_8 = 1; return 0; } static struct scsi_host_template gdth_template = { .name = "GDT SCSI Disk Array Controller", .info = gdth_info, .queuecommand = gdth_queuecommand, .eh_bus_reset_handler = gdth_eh_bus_reset, .slave_configure = gdth_slave_configure, .bios_param = gdth_bios_param, .proc_info = gdth_proc_info, .eh_timed_out = gdth_timed_out, .proc_name = "gdth", .can_queue = GDTH_MAXCMDS, .this_id = -1, .sg_tablesize = GDTH_MAXSG, .cmd_per_lun = GDTH_MAXC_P_L, .unchecked_isa_dma = 1, .use_clustering = ENABLE_CLUSTERING, }; #ifdef CONFIG_ISA static int __init gdth_isa_probe_one(u32 isa_bios) { struct Scsi_Host *shp; gdth_ha_str *ha; dma_addr_t scratch_dma_handle = 0; int error, i; if (!gdth_search_isa(isa_bios)) return -ENXIO; shp = scsi_host_alloc(&gdth_template, sizeof(gdth_ha_str)); if (!shp) return -ENOMEM; ha = shost_priv(shp); error = -ENODEV; if (!gdth_init_isa(isa_bios,ha)) goto out_host_put; /* controller found and initialized */ printk("Configuring GDT-ISA HA at BIOS 0x%05X IRQ %u DRQ %u\n", isa_bios, ha->irq, ha->drq); error = request_irq(ha->irq, gdth_interrupt, IRQF_DISABLED, "gdth", ha); if (error) { printk("GDT-ISA: Unable to allocate IRQ\n"); goto out_host_put; } error = request_dma(ha->drq, "gdth"); if (error) { printk("GDT-ISA: Unable to allocate DMA channel\n"); goto out_free_irq; } set_dma_mode(ha->drq,DMA_MODE_CASCADE); enable_dma(ha->drq); shp->unchecked_isa_dma = 1; shp->irq = ha->irq; shp->dma_channel = ha->drq; ha->hanum = gdth_ctr_count++; ha->shost = shp; ha->pccb = &ha->cmdext; ha->ccb_phys = 0L; ha->pdev = NULL; error = -ENOMEM; ha->pscratch = pci_alloc_consistent(ha->pdev, GDTH_SCRATCH, &scratch_dma_handle); if (!ha->pscratch) goto out_dec_counters; ha->scratch_phys = scratch_dma_handle; ha->pmsg = pci_alloc_consistent(ha->pdev, sizeof(gdth_msg_str), &scratch_dma_handle); if (!ha->pmsg) goto out_free_pscratch; ha->msg_phys = scratch_dma_handle; #ifdef INT_COAL ha->coal_stat = pci_alloc_consistent(ha->pdev, sizeof(gdth_coal_status) * MAXOFFSETS, &scratch_dma_handle); if (!ha->coal_stat) goto out_free_pmsg; ha->coal_stat_phys = scratch_dma_handle; #endif ha->scratch_busy = FALSE; ha->req_first = NULL; ha->tid_cnt = MAX_HDRIVES; if (max_ids > 0 && max_ids < ha->tid_cnt) ha->tid_cnt = max_ids; for (i = 0; i < GDTH_MAXCMDS; ++i) ha->cmd_tab[i].cmnd = UNUSED_CMND; ha->scan_mode = rescan ? 0x10 : 0; error = -ENODEV; if (!gdth_search_drives(ha)) { printk("GDT-ISA: Error during device scan\n"); goto out_free_coal_stat; } if (hdr_channel < 0 || hdr_channel > ha->bus_cnt) hdr_channel = ha->bus_cnt; ha->virt_bus = hdr_channel; if (ha->cache_feat & ha->raw_feat & ha->screen_feat & GDT_64BIT) shp->max_cmd_len = 16; shp->max_id = ha->tid_cnt; shp->max_lun = MAXLUN; shp->max_channel = ha->bus_cnt; spin_lock_init(&ha->smp_lock); gdth_enable_int(ha); error = scsi_add_host(shp, NULL); if (error) goto out_free_coal_stat; list_add_tail(&ha->list, &gdth_instances); gdth_timer_init(); scsi_scan_host(shp); return 0; out_free_coal_stat: #ifdef INT_COAL pci_free_consistent(ha->pdev, sizeof(gdth_coal_status) * MAXOFFSETS, ha->coal_stat, ha->coal_stat_phys); out_free_pmsg: #endif pci_free_consistent(ha->pdev, sizeof(gdth_msg_str), ha->pmsg, ha->msg_phys); out_free_pscratch: pci_free_consistent(ha->pdev, GDTH_SCRATCH, ha->pscratch, ha->scratch_phys); out_dec_counters: gdth_ctr_count--; out_free_irq: free_irq(ha->irq, ha); out_host_put: scsi_host_put(shp); return error; } #endif /* CONFIG_ISA */ #ifdef CONFIG_EISA static int __init gdth_eisa_probe_one(u16 eisa_slot) { struct Scsi_Host *shp; gdth_ha_str *ha; dma_addr_t scratch_dma_handle = 0; int error, i; if (!gdth_search_eisa(eisa_slot)) return -ENXIO; shp = scsi_host_alloc(&gdth_template, sizeof(gdth_ha_str)); if (!shp) return -ENOMEM; ha = shost_priv(shp); error = -ENODEV; if (!gdth_init_eisa(eisa_slot,ha)) goto out_host_put; /* controller found and initialized */ printk("Configuring GDT-EISA HA at Slot %d IRQ %u\n", eisa_slot >> 12, ha->irq); error = request_irq(ha->irq, gdth_interrupt, IRQF_DISABLED, "gdth", ha); if (error) { printk("GDT-EISA: Unable to allocate IRQ\n"); goto out_host_put; } shp->unchecked_isa_dma = 0; shp->irq = ha->irq; shp->dma_channel = 0xff; ha->hanum = gdth_ctr_count++; ha->shost = shp; TRACE2(("EISA detect Bus 0: hanum %d\n", ha->hanum)); ha->pccb = &ha->cmdext; ha->ccb_phys = 0L; error = -ENOMEM; ha->pdev = NULL; ha->pscratch = pci_alloc_consistent(ha->pdev, GDTH_SCRATCH, &scratch_dma_handle); if (!ha->pscratch) goto out_free_irq; ha->scratch_phys = scratch_dma_handle; ha->pmsg = pci_alloc_consistent(ha->pdev, sizeof(gdth_msg_str), &scratch_dma_handle); if (!ha->pmsg) goto out_free_pscratch; ha->msg_phys = scratch_dma_handle; #ifdef INT_COAL ha->coal_stat = pci_alloc_consistent(ha->pdev, sizeof(gdth_coal_status) * MAXOFFSETS, &scratch_dma_handle); if (!ha->coal_stat) goto out_free_pmsg; ha->coal_stat_phys = scratch_dma_handle; #endif ha->ccb_phys = pci_map_single(ha->pdev,ha->pccb, sizeof(gdth_cmd_str), PCI_DMA_BIDIRECTIONAL); if (!ha->ccb_phys) goto out_free_coal_stat; ha->scratch_busy = FALSE; ha->req_first = NULL; ha->tid_cnt = MAX_HDRIVES; if (max_ids > 0 && max_ids < ha->tid_cnt) ha->tid_cnt = max_ids; for (i = 0; i < GDTH_MAXCMDS; ++i) ha->cmd_tab[i].cmnd = UNUSED_CMND; ha->scan_mode = rescan ? 0x10 : 0; if (!gdth_search_drives(ha)) { printk("GDT-EISA: Error during device scan\n"); error = -ENODEV; goto out_free_ccb_phys; } if (hdr_channel < 0 || hdr_channel > ha->bus_cnt) hdr_channel = ha->bus_cnt; ha->virt_bus = hdr_channel; if (ha->cache_feat & ha->raw_feat & ha->screen_feat & GDT_64BIT) shp->max_cmd_len = 16; shp->max_id = ha->tid_cnt; shp->max_lun = MAXLUN; shp->max_channel = ha->bus_cnt; spin_lock_init(&ha->smp_lock); gdth_enable_int(ha); error = scsi_add_host(shp, NULL); if (error) goto out_free_ccb_phys; list_add_tail(&ha->list, &gdth_instances); gdth_timer_init(); scsi_scan_host(shp); return 0; out_free_ccb_phys: pci_unmap_single(ha->pdev,ha->ccb_phys, sizeof(gdth_cmd_str), PCI_DMA_BIDIRECTIONAL); out_free_coal_stat: #ifdef INT_COAL pci_free_consistent(ha->pdev, sizeof(gdth_coal_status) * MAXOFFSETS, ha->coal_stat, ha->coal_stat_phys); out_free_pmsg: #endif pci_free_consistent(ha->pdev, sizeof(gdth_msg_str), ha->pmsg, ha->msg_phys); out_free_pscratch: pci_free_consistent(ha->pdev, GDTH_SCRATCH, ha->pscratch, ha->scratch_phys); out_free_irq: free_irq(ha->irq, ha); gdth_ctr_count--; out_host_put: scsi_host_put(shp); return error; } #endif /* CONFIG_EISA */ #ifdef CONFIG_PCI static int __devinit gdth_pci_probe_one(gdth_pci_str *pcistr, gdth_ha_str **ha_out) { struct Scsi_Host *shp; gdth_ha_str *ha; dma_addr_t scratch_dma_handle = 0; int error, i; struct pci_dev *pdev = pcistr->pdev; *ha_out = NULL; shp = scsi_host_alloc(&gdth_template, sizeof(gdth_ha_str)); if (!shp) return -ENOMEM; ha = shost_priv(shp); error = -ENODEV; if (!gdth_init_pci(pdev, pcistr, ha)) goto out_host_put; /* controller found and initialized */ printk("Configuring GDT-PCI HA at %d/%d IRQ %u\n", pdev->bus->number, PCI_SLOT(pdev->devfn), ha->irq); error = request_irq(ha->irq, gdth_interrupt, IRQF_DISABLED|IRQF_SHARED, "gdth", ha); if (error) { printk("GDT-PCI: Unable to allocate IRQ\n"); goto out_host_put; } shp->unchecked_isa_dma = 0; shp->irq = ha->irq; shp->dma_channel = 0xff; ha->hanum = gdth_ctr_count++; ha->shost = shp; ha->pccb = &ha->cmdext; ha->ccb_phys = 0L; error = -ENOMEM; ha->pscratch = pci_alloc_consistent(ha->pdev, GDTH_SCRATCH, &scratch_dma_handle); if (!ha->pscratch) goto out_free_irq; ha->scratch_phys = scratch_dma_handle; ha->pmsg = pci_alloc_consistent(ha->pdev, sizeof(gdth_msg_str), &scratch_dma_handle); if (!ha->pmsg) goto out_free_pscratch; ha->msg_phys = scratch_dma_handle; #ifdef INT_COAL ha->coal_stat = pci_alloc_consistent(ha->pdev, sizeof(gdth_coal_status) * MAXOFFSETS, &scratch_dma_handle); if (!ha->coal_stat) goto out_free_pmsg; ha->coal_stat_phys = scratch_dma_handle; #endif ha->scratch_busy = FALSE; ha->req_first = NULL; ha->tid_cnt = pdev->device >= 0x200 ? MAXID : MAX_HDRIVES; if (max_ids > 0 && max_ids < ha->tid_cnt) ha->tid_cnt = max_ids; for (i = 0; i < GDTH_MAXCMDS; ++i) ha->cmd_tab[i].cmnd = UNUSED_CMND; ha->scan_mode = rescan ? 0x10 : 0; error = -ENODEV; if (!gdth_search_drives(ha)) { printk("GDT-PCI %d: Error during device scan\n", ha->hanum); goto out_free_coal_stat; } if (hdr_channel < 0 || hdr_channel > ha->bus_cnt) hdr_channel = ha->bus_cnt; ha->virt_bus = hdr_channel; /* 64-bit DMA only supported from FW >= x.43 */ if (!(ha->cache_feat & ha->raw_feat & ha->screen_feat & GDT_64BIT) || !ha->dma64_support) { if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { printk(KERN_WARNING "GDT-PCI %d: " "Unable to set 32-bit DMA\n", ha->hanum); goto out_free_coal_stat; } } else { shp->max_cmd_len = 16; if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { printk("GDT-PCI %d: 64-bit DMA enabled\n", ha->hanum); } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { printk(KERN_WARNING "GDT-PCI %d: " "Unable to set 64/32-bit DMA\n", ha->hanum); goto out_free_coal_stat; } } shp->max_id = ha->tid_cnt; shp->max_lun = MAXLUN; shp->max_channel = ha->bus_cnt; spin_lock_init(&ha->smp_lock); gdth_enable_int(ha); error = scsi_add_host(shp, &pdev->dev); if (error) goto out_free_coal_stat; list_add_tail(&ha->list, &gdth_instances); pci_set_drvdata(ha->pdev, ha); gdth_timer_init(); scsi_scan_host(shp); *ha_out = ha; return 0; out_free_coal_stat: #ifdef INT_COAL pci_free_consistent(ha->pdev, sizeof(gdth_coal_status) * MAXOFFSETS, ha->coal_stat, ha->coal_stat_phys); out_free_pmsg: #endif pci_free_consistent(ha->pdev, sizeof(gdth_msg_str), ha->pmsg, ha->msg_phys); out_free_pscratch: pci_free_consistent(ha->pdev, GDTH_SCRATCH, ha->pscratch, ha->scratch_phys); out_free_irq: free_irq(ha->irq, ha); gdth_ctr_count--; out_host_put: scsi_host_put(shp); return error; } #endif /* CONFIG_PCI */ static void gdth_remove_one(gdth_ha_str *ha) { struct Scsi_Host *shp = ha->shost; TRACE2(("gdth_remove_one()\n")); scsi_remove_host(shp); gdth_flush(ha); if (ha->sdev) { scsi_free_host_dev(ha->sdev); ha->sdev = NULL; } if (shp->irq) free_irq(shp->irq,ha); #ifdef CONFIG_ISA if (shp->dma_channel != 0xff) free_dma(shp->dma_channel); #endif #ifdef INT_COAL if (ha->coal_stat) pci_free_consistent(ha->pdev, sizeof(gdth_coal_status) * MAXOFFSETS, ha->coal_stat, ha->coal_stat_phys); #endif if (ha->pscratch) pci_free_consistent(ha->pdev, GDTH_SCRATCH, ha->pscratch, ha->scratch_phys); if (ha->pmsg) pci_free_consistent(ha->pdev, sizeof(gdth_msg_str), ha->pmsg, ha->msg_phys); if (ha->ccb_phys) pci_unmap_single(ha->pdev,ha->ccb_phys, sizeof(gdth_cmd_str),PCI_DMA_BIDIRECTIONAL); scsi_host_put(shp); } static int gdth_halt(struct notifier_block *nb, unsigned long event, void *buf) { gdth_ha_str *ha; TRACE2(("gdth_halt() event %d\n", (int)event)); if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF) return NOTIFY_DONE; list_for_each_entry(ha, &gdth_instances, list) gdth_flush(ha); return NOTIFY_OK; } static struct notifier_block gdth_notifier = { gdth_halt, NULL, 0 }; static int __init gdth_init(void) { if (disable) { printk("GDT-HA: Controller driver disabled from" " command line !\n"); return 0; } printk("GDT-HA: Storage RAID Controller Driver. Version: %s\n", GDTH_VERSION_STR); /* initializations */ gdth_polling = TRUE; gdth_clear_events(); init_timer(&gdth_timer); /* As default we do not probe for EISA or ISA controllers */ if (probe_eisa_isa) { /* scanning for controllers, at first: ISA controller */ #ifdef CONFIG_ISA u32 isa_bios; for (isa_bios = 0xc8000UL; isa_bios <= 0xd8000UL; isa_bios += 0x8000UL) gdth_isa_probe_one(isa_bios); #endif #ifdef CONFIG_EISA { u16 eisa_slot; for (eisa_slot = 0x1000; eisa_slot <= 0x8000; eisa_slot += 0x1000) gdth_eisa_probe_one(eisa_slot); } #endif } #ifdef CONFIG_PCI /* scanning for PCI controllers */ if (pci_register_driver(&gdth_pci_driver)) { gdth_ha_str *ha; list_for_each_entry(ha, &gdth_instances, list) gdth_remove_one(ha); return -ENODEV; } #endif /* CONFIG_PCI */ TRACE2(("gdth_detect() %d controller detected\n", gdth_ctr_count)); major = register_chrdev(0,"gdth", &gdth_fops); register_reboot_notifier(&gdth_notifier); gdth_polling = FALSE; return 0; } static void __exit gdth_exit(void) { gdth_ha_str *ha; unregister_chrdev(major, "gdth"); unregister_reboot_notifier(&gdth_notifier); #ifdef GDTH_STATISTICS del_timer_sync(&gdth_timer); #endif #ifdef CONFIG_PCI pci_unregister_driver(&gdth_pci_driver); #endif list_for_each_entry(ha, &gdth_instances, list) gdth_remove_one(ha); } module_init(gdth_init); module_exit(gdth_exit); #ifndef MODULE __setup("gdth=", option_setup); #endif
gpl-2.0
Tesla-Redux-Devices/android_kernel_samsung_lt02ltespr
arch/mips/mm/sc-r5k.c
4561
2183
/* * Copyright (C) 1997, 2001 Ralf Baechle (ralf@gnu.org), * derived from r4xx0.c by David S. Miller (davem@davemloft.net). */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> #include <asm/mipsregs.h> #include <asm/bcache.h> #include <asm/cacheops.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/mmu_context.h> #include <asm/r4kcache.h> /* Secondary cache size in bytes, if present. */ static unsigned long scache_size; #define SC_LINE 32 #define SC_PAGE (128*SC_LINE) static inline void blast_r5000_scache(void) { unsigned long start = INDEX_BASE; unsigned long end = start + scache_size; while(start < end) { cache_op(R5K_Page_Invalidate_S, start); start += SC_PAGE; } } static void r5k_dma_cache_inv_sc(unsigned long addr, unsigned long size) { unsigned long end, a; /* Catch bad driver code */ BUG_ON(size == 0); if (size >= scache_size) { blast_r5000_scache(); return; } /* On the R5000 secondary cache we cannot * invalidate less than a page at a time. * The secondary cache is physically indexed, write-through. */ a = addr & ~(SC_PAGE - 1); end = (addr + size - 1) & ~(SC_PAGE - 1); while (a <= end) { cache_op(R5K_Page_Invalidate_S, a); a += SC_PAGE; } } static void r5k_sc_enable(void) { unsigned long flags; local_irq_save(flags); set_c0_config(R5K_CONF_SE); blast_r5000_scache(); local_irq_restore(flags); } static void r5k_sc_disable(void) { unsigned long flags; local_irq_save(flags); blast_r5000_scache(); clear_c0_config(R5K_CONF_SE); local_irq_restore(flags); } static inline int __init r5k_sc_probe(void) { unsigned long config = read_c0_config(); if (config & CONF_SC) return(0); scache_size = (512 * 1024) << ((config & R5K_CONF_SS) >> 20); printk("R5000 SCACHE size %ldkB, linesize 32 bytes.\n", scache_size >> 10); return 1; } static struct bcache_ops r5k_sc_ops = { .bc_enable = r5k_sc_enable, .bc_disable = r5k_sc_disable, .bc_wback_inv = r5k_dma_cache_inv_sc, .bc_inv = r5k_dma_cache_inv_sc }; void __cpuinit r5k_sc_init(void) { if (r5k_sc_probe()) { r5k_sc_enable(); bcops = &r5k_sc_ops; } }
gpl-2.0
chirayudesai/laughing-cyril
scripts/kconfig/lxdialog/textbox.c
4817
9144
/* * textbox.c -- implements the text box * * ORIGINAL AUTHOR: Savio Lam (lam836@cs.cuhk.hk) * MODIFIED FOR LINUX KERNEL CONFIG BY: William Roadcap (roadcap@cfw.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "dialog.h" static void back_lines(int n); static void print_page(WINDOW * win, int height, int width); static void print_line(WINDOW * win, int row, int width); static char *get_line(void); static void print_position(WINDOW * win); static int hscroll; static int begin_reached, end_reached, page_length; static const char *buf; static const char *page; /* * refresh window content */ static void refresh_text_box(WINDOW *dialog, WINDOW *box, int boxh, int boxw, int cur_y, int cur_x) { print_page(box, boxh, boxw); print_position(dialog); wmove(dialog, cur_y, cur_x); /* Restore cursor position */ wrefresh(dialog); } /* * Display text from a file in a dialog box. */ int dialog_textbox(const char *title, const char *tbuf, int initial_height, int initial_width) { int i, x, y, cur_x, cur_y, key = 0; int height, width, boxh, boxw; int passed_end; WINDOW *dialog, *box; begin_reached = 1; end_reached = 0; page_length = 0; hscroll = 0; buf = tbuf; page = buf; /* page is pointer to start of page to be displayed */ do_resize: getmaxyx(stdscr, height, width); if (height < 8 || width < 8) return -ERRDISPLAYTOOSMALL; if (initial_height != 0) height = initial_height; else if (height > 4) height -= 4; else height = 0; if (initial_width != 0) width = initial_width; else if (width > 5) width -= 5; else width = 0; /* center dialog box on screen */ x = (COLS - width) / 2; y = (LINES - height) / 2; draw_shadow(stdscr, y, x, height, width); dialog = newwin(height, width, y, x); keypad(dialog, TRUE); /* Create window for box region, used for scrolling text */ boxh = height - 4; boxw = width - 2; box = subwin(dialog, boxh, boxw, y + 1, x + 1); wattrset(box, dlg.dialog.atr); wbkgdset(box, dlg.dialog.atr & A_COLOR); keypad(box, TRUE); /* register the new window, along with its borders */ draw_box(dialog, 0, 0, height, width, dlg.dialog.atr, dlg.border.atr); wattrset(dialog, dlg.border.atr); mvwaddch(dialog, height - 3, 0, ACS_LTEE); for (i = 0; i < width - 2; i++) waddch(dialog, ACS_HLINE); wattrset(dialog, dlg.dialog.atr); wbkgdset(dialog, dlg.dialog.atr & A_COLOR); waddch(dialog, ACS_RTEE); print_title(dialog, title, width); print_button(dialog, gettext(" Exit "), height - 2, width / 2 - 4, TRUE); wnoutrefresh(dialog); getyx(dialog, cur_y, cur_x); /* Save cursor position */ /* Print first page of text */ attr_clear(box, boxh, boxw, dlg.dialog.atr); refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x); while ((key != KEY_ESC) && (key != '\n')) { key = wgetch(dialog); switch (key) { case 'E': /* Exit */ case 'e': case 'X': case 'x': delwin(box); delwin(dialog); return 0; case 'g': /* First page */ case KEY_HOME: if (!begin_reached) { begin_reached = 1; page = buf; refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x); } break; case 'G': /* Last page */ case KEY_END: end_reached = 1; /* point to last char in buf */ page = buf + strlen(buf); back_lines(boxh); refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x); break; case 'K': /* Previous line */ case 'k': case KEY_UP: if (!begin_reached) { back_lines(page_length + 1); /* We don't call print_page() here but use * scrolling to ensure faster screen update. * However, 'end_reached' and 'page_length' * should still be updated, and 'page' should * point to start of next page. This is done * by calling get_line() in the following * 'for' loop. */ scrollok(box, TRUE); wscrl(box, -1); /* Scroll box region down one line */ scrollok(box, FALSE); page_length = 0; passed_end = 0; for (i = 0; i < boxh; i++) { if (!i) { /* print first line of page */ print_line(box, 0, boxw); wnoutrefresh(box); } else /* Called to update 'end_reached' and 'page' */ get_line(); if (!passed_end) page_length++; if (end_reached && !passed_end) passed_end = 1; } print_position(dialog); wmove(dialog, cur_y, cur_x); /* Restore cursor position */ wrefresh(dialog); } break; case 'B': /* Previous page */ case 'b': case KEY_PPAGE: if (begin_reached) break; back_lines(page_length + boxh); refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x); break; case 'J': /* Next line */ case 'j': case KEY_DOWN: if (!end_reached) { begin_reached = 0; scrollok(box, TRUE); scroll(box); /* Scroll box region up one line */ scrollok(box, FALSE); print_line(box, boxh - 1, boxw); wnoutrefresh(box); print_position(dialog); wmove(dialog, cur_y, cur_x); /* Restore cursor position */ wrefresh(dialog); } break; case KEY_NPAGE: /* Next page */ case ' ': if (end_reached) break; begin_reached = 0; refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x); break; case '0': /* Beginning of line */ case 'H': /* Scroll left */ case 'h': case KEY_LEFT: if (hscroll <= 0) break; if (key == '0') hscroll = 0; else hscroll--; /* Reprint current page to scroll horizontally */ back_lines(page_length); refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x); break; case 'L': /* Scroll right */ case 'l': case KEY_RIGHT: if (hscroll >= MAX_LEN) break; hscroll++; /* Reprint current page to scroll horizontally */ back_lines(page_length); refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x); break; case KEY_ESC: key = on_key_esc(dialog); break; case KEY_RESIZE: back_lines(height); delwin(box); delwin(dialog); on_key_resize(); goto do_resize; } } delwin(box); delwin(dialog); return key; /* ESC pressed */ } /* * Go back 'n' lines in text. Called by dialog_textbox(). * 'page' will be updated to point to the desired line in 'buf'. */ static void back_lines(int n) { int i; begin_reached = 0; /* Go back 'n' lines */ for (i = 0; i < n; i++) { if (*page == '\0') { if (end_reached) { end_reached = 0; continue; } } if (page == buf) { begin_reached = 1; return; } page--; do { if (page == buf) { begin_reached = 1; return; } page--; } while (*page != '\n'); page++; } } /* * Print a new page of text. Called by dialog_textbox(). */ static void print_page(WINDOW * win, int height, int width) { int i, passed_end = 0; page_length = 0; for (i = 0; i < height; i++) { print_line(win, i, width); if (!passed_end) page_length++; if (end_reached && !passed_end) passed_end = 1; } wnoutrefresh(win); } /* * Print a new line of text. Called by dialog_textbox() and print_page(). */ static void print_line(WINDOW * win, int row, int width) { char *line; line = get_line(); line += MIN(strlen(line), hscroll); /* Scroll horizontally */ wmove(win, row, 0); /* move cursor to correct line */ waddch(win, ' '); waddnstr(win, line, MIN(strlen(line), width - 2)); /* Clear 'residue' of previous line */ #if OLD_NCURSES { int x = getcurx(win); int i; for (i = 0; i < width - x; i++) waddch(win, ' '); } #else wclrtoeol(win); #endif } /* * Return current line of text. Called by dialog_textbox() and print_line(). * 'page' should point to start of current line before calling, and will be * updated to point to start of next line. */ static char *get_line(void) { int i = 0; static char line[MAX_LEN + 1]; end_reached = 0; while (*page != '\n') { if (*page == '\0') { if (!end_reached) { end_reached = 1; break; } } else if (i < MAX_LEN) line[i++] = *(page++); else { /* Truncate lines longer than MAX_LEN characters */ if (i == MAX_LEN) line[i++] = '\0'; page++; } } if (i <= MAX_LEN) line[i] = '\0'; if (!end_reached) page++; /* move pass '\n' */ return line; } /* * Print current position */ static void print_position(WINDOW * win) { int percent; wattrset(win, dlg.position_indicator.atr); wbkgdset(win, dlg.position_indicator.atr & A_COLOR); percent = (page - buf) * 100 / strlen(buf); wmove(win, getmaxy(win) - 3, getmaxx(win) - 9); wprintw(win, "(%3d%%)", percent); }
gpl-2.0
kbc-developers/android_kernel_samsung_klte
drivers/net/ethernet/xircom/xirc2ps_cs.c
5073
53524
/* [xirc2ps_cs.c wk 03.11.99] (1.40 1999/11/18 00:06:03) * Xircom CreditCard Ethernet Adapter IIps driver * Xircom Realport 10/100 (RE-100) driver * * This driver supports various Xircom CreditCard Ethernet adapters * including the CE2, CE IIps, RE-10, CEM28, CEM33, CE33, CEM56, * CE3-100, CE3B, RE-100, REM10BT, and REM56G-100. * * 2000-09-24 <psheer@icon.co.za> The Xircom CE3B-100 may not * autodetect the media properly. In this case use the * if_port=1 (for 10BaseT) or if_port=4 (for 100BaseT) options * to force the media type. * * Written originally by Werner Koch based on David Hinds' skeleton of the * PCMCIA driver. * * Copyright (c) 1997,1998 Werner Koch (dd9jn) * * This driver is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * It is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * * ALTERNATIVELY, this driver may be distributed under the terms of * the following license, in which case the provisions of this license * are required INSTEAD OF the GNU General Public License. (This clause * is necessary due to a potential bad interaction between the GPL and * the restrictions contained in a BSD-style copyright.) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, and the entire permission notice in its entirety, * including the disclaimer of warranties. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote * products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/in.h> #include <linux/delay.h> #include <linux/ethtool.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/ioport.h> #include <linux/bitops.h> #include <linux/mii.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ciscode.h> #include <asm/io.h> #include <asm/uaccess.h> #ifndef MANFID_COMPAQ #define MANFID_COMPAQ 0x0138 #define MANFID_COMPAQ2 0x0183 /* is this correct? */ #endif #include <pcmcia/ds.h> /* Time in jiffies before concluding Tx hung */ #define TX_TIMEOUT ((400*HZ)/1000) /**************** * Some constants used to access the hardware */ /* Register offsets and value constans */ #define XIRCREG_CR 0 /* Command register (wr) */ enum xirc_cr { TransmitPacket = 0x01, SoftReset = 0x02, EnableIntr = 0x04, ForceIntr = 0x08, ClearTxFIFO = 0x10, ClearRxOvrun = 0x20, RestartTx = 0x40 }; #define XIRCREG_ESR 0 /* Ethernet status register (rd) */ enum xirc_esr { FullPktRcvd = 0x01, /* full packet in receive buffer */ PktRejected = 0x04, /* a packet has been rejected */ TxPktPend = 0x08, /* TX Packet Pending */ IncorPolarity = 0x10, MediaSelect = 0x20 /* set if TP, clear if AUI */ }; #define XIRCREG_PR 1 /* Page Register select */ #define XIRCREG_EDP 4 /* Ethernet Data Port Register */ #define XIRCREG_ISR 6 /* Ethernet Interrupt Status Register */ enum xirc_isr { TxBufOvr = 0x01, /* TX Buffer Overflow */ PktTxed = 0x02, /* Packet Transmitted */ MACIntr = 0x04, /* MAC Interrupt occurred */ TxResGrant = 0x08, /* Tx Reservation Granted */ RxFullPkt = 0x20, /* Rx Full Packet */ RxPktRej = 0x40, /* Rx Packet Rejected */ ForcedIntr= 0x80 /* Forced Interrupt */ }; #define XIRCREG1_IMR0 12 /* Ethernet Interrupt Mask Register (on page 1)*/ #define XIRCREG1_IMR1 13 #define XIRCREG0_TSO 8 /* Transmit Space Open Register (on page 0)*/ #define XIRCREG0_TRS 10 /* Transmit reservation Size Register (page 0)*/ #define XIRCREG0_DO 12 /* Data Offset Register (page 0) (wr) */ #define XIRCREG0_RSR 12 /* Receive Status Register (page 0) (rd) */ enum xirc_rsr { PhyPkt = 0x01, /* set:physical packet, clear: multicast packet */ BrdcstPkt = 0x02, /* set if it is a broadcast packet */ PktTooLong = 0x04, /* set if packet length > 1518 */ AlignErr = 0x10, /* incorrect CRC and last octet not complete */ CRCErr = 0x20, /* incorrect CRC and last octet is complete */ PktRxOk = 0x80 /* received ok */ }; #define XIRCREG0_PTR 13 /* packets transmitted register (rd) */ #define XIRCREG0_RBC 14 /* receive byte count regsister (rd) */ #define XIRCREG1_ECR 14 /* ethernet configurationn register */ enum xirc_ecr { FullDuplex = 0x04, /* enable full duplex mode */ LongTPMode = 0x08, /* adjust for longer lengths of TP cable */ DisablePolCor = 0x10,/* disable auto polarity correction */ DisableLinkPulse = 0x20, /* disable link pulse generation */ DisableAutoTx = 0x40, /* disable auto-transmit */ }; #define XIRCREG2_RBS 8 /* receive buffer start register */ #define XIRCREG2_LED 10 /* LED Configuration register */ /* values for the leds: Bits 2-0 for led 1 * 0 disabled Bits 5-3 for led 2 * 1 collision * 2 noncollision * 3 link_detected * 4 incor_polarity * 5 jabber * 6 auto_assertion * 7 rx_tx_activity */ #define XIRCREG2_MSR 12 /* Mohawk specific register */ #define XIRCREG4_GPR0 8 /* General Purpose Register 0 */ #define XIRCREG4_GPR1 9 /* General Purpose Register 1 */ #define XIRCREG2_GPR2 13 /* General Purpose Register 2 (page2!)*/ #define XIRCREG4_BOV 10 /* Bonding Version Register */ #define XIRCREG4_LMA 12 /* Local Memory Address Register */ #define XIRCREG4_LMD 14 /* Local Memory Data Port */ /* MAC register can only by accessed with 8 bit operations */ #define XIRCREG40_CMD0 8 /* Command Register (wr) */ enum xirc_cmd { /* Commands */ Transmit = 0x01, EnableRecv = 0x04, DisableRecv = 0x08, Abort = 0x10, Online = 0x20, IntrAck = 0x40, Offline = 0x80 }; #define XIRCREG5_RHSA0 10 /* Rx Host Start Address */ #define XIRCREG40_RXST0 9 /* Receive Status Register */ #define XIRCREG40_TXST0 11 /* Transmit Status Register 0 */ #define XIRCREG40_TXST1 12 /* Transmit Status Register 10 */ #define XIRCREG40_RMASK0 13 /* Receive Mask Register */ #define XIRCREG40_TMASK0 14 /* Transmit Mask Register 0 */ #define XIRCREG40_TMASK1 15 /* Transmit Mask Register 0 */ #define XIRCREG42_SWC0 8 /* Software Configuration 0 */ #define XIRCREG42_SWC1 9 /* Software Configuration 1 */ #define XIRCREG42_BOC 10 /* Back-Off Configuration */ #define XIRCREG44_TDR0 8 /* Time Domain Reflectometry 0 */ #define XIRCREG44_TDR1 9 /* Time Domain Reflectometry 1 */ #define XIRCREG44_RXBC_LO 10 /* Rx Byte Count 0 (rd) */ #define XIRCREG44_RXBC_HI 11 /* Rx Byte Count 1 (rd) */ #define XIRCREG45_REV 15 /* Revision Register (rd) */ #define XIRCREG50_IA 8 /* Individual Address (8-13) */ static const char *if_names[] = { "Auto", "10BaseT", "10Base2", "AUI", "100BaseT" }; /* card types */ #define XIR_UNKNOWN 0 /* unknown: not supported */ #define XIR_CE 1 /* (prodid 1) different hardware: not supported */ #define XIR_CE2 2 /* (prodid 2) */ #define XIR_CE3 3 /* (prodid 3) */ #define XIR_CEM 4 /* (prodid 1) different hardware: not supported */ #define XIR_CEM2 5 /* (prodid 2) */ #define XIR_CEM3 6 /* (prodid 3) */ #define XIR_CEM33 7 /* (prodid 4) */ #define XIR_CEM56M 8 /* (prodid 5) */ #define XIR_CEM56 9 /* (prodid 6) */ #define XIR_CM28 10 /* (prodid 3) modem only: not supported here */ #define XIR_CM33 11 /* (prodid 4) modem only: not supported here */ #define XIR_CM56 12 /* (prodid 5) modem only: not supported here */ #define XIR_CG 13 /* (prodid 1) GSM modem only: not supported */ #define XIR_CBE 14 /* (prodid 1) cardbus ethernet: not supported */ /*====================================================================*/ /* Module parameters */ MODULE_DESCRIPTION("Xircom PCMCIA ethernet driver"); MODULE_LICENSE("Dual MPL/GPL"); #define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) INT_MODULE_PARM(if_port, 0); INT_MODULE_PARM(full_duplex, 0); INT_MODULE_PARM(do_sound, 1); INT_MODULE_PARM(lockup_hack, 0); /* anti lockup hack */ /*====================================================================*/ /* We do not process more than these number of bytes during one * interrupt. (Of course we receive complete packets, so this is not * an exact value). * Something between 2000..22000; first value gives best interrupt latency, * the second enables the usage of the complete on-chip buffer. We use the * high value as the initial value. */ static unsigned maxrx_bytes = 22000; /* MII management prototypes */ static void mii_idle(unsigned int ioaddr); static void mii_putbit(unsigned int ioaddr, unsigned data); static int mii_getbit(unsigned int ioaddr); static void mii_wbits(unsigned int ioaddr, unsigned data, int len); static unsigned mii_rd(unsigned int ioaddr, u_char phyaddr, u_char phyreg); static void mii_wr(unsigned int ioaddr, u_char phyaddr, u_char phyreg, unsigned data, int len); static int has_ce2_string(struct pcmcia_device * link); static int xirc2ps_config(struct pcmcia_device * link); static void xirc2ps_release(struct pcmcia_device * link); static void xirc2ps_detach(struct pcmcia_device *p_dev); static irqreturn_t xirc2ps_interrupt(int irq, void *dev_id); typedef struct local_info_t { struct net_device *dev; struct pcmcia_device *p_dev; int card_type; int probe_port; int silicon; /* silicon revision. 0=old CE2, 1=Scipper, 4=Mohawk */ int mohawk; /* a CE3 type card */ int dingo; /* a CEM56 type card */ int new_mii; /* has full 10baseT/100baseT MII */ int modem; /* is a multi function card (i.e with a modem) */ void __iomem *dingo_ccr; /* only used for CEM56 cards */ unsigned last_ptr_value; /* last packets transmitted value */ const char *manf_str; struct work_struct tx_timeout_task; } local_info_t; /**************** * Some more prototypes */ static netdev_tx_t do_start_xmit(struct sk_buff *skb, struct net_device *dev); static void xirc_tx_timeout(struct net_device *dev); static void xirc2ps_tx_timeout_task(struct work_struct *work); static void set_addresses(struct net_device *dev); static void set_multicast_list(struct net_device *dev); static int set_card_type(struct pcmcia_device *link); static int do_config(struct net_device *dev, struct ifmap *map); static int do_open(struct net_device *dev); static int do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static const struct ethtool_ops netdev_ethtool_ops; static void hardreset(struct net_device *dev); static void do_reset(struct net_device *dev, int full); static int init_mii(struct net_device *dev); static void do_powerdown(struct net_device *dev); static int do_stop(struct net_device *dev); /*=============== Helper functions =========================*/ #define SelectPage(pgnr) outb((pgnr), ioaddr + XIRCREG_PR) #define GetByte(reg) ((unsigned)inb(ioaddr + (reg))) #define GetWord(reg) ((unsigned)inw(ioaddr + (reg))) #define PutByte(reg,value) outb((value), ioaddr+(reg)) #define PutWord(reg,value) outw((value), ioaddr+(reg)) /*====== Functions used for debugging =================================*/ #if 0 /* reading regs may change system status */ static void PrintRegisters(struct net_device *dev) { unsigned int ioaddr = dev->base_addr; if (pc_debug > 1) { int i, page; printk(KERN_DEBUG pr_fmt("Register common: ")); for (i = 0; i < 8; i++) pr_cont(" %2.2x", GetByte(i)); pr_cont("\n"); for (page = 0; page <= 8; page++) { printk(KERN_DEBUG pr_fmt("Register page %2x: "), page); SelectPage(page); for (i = 8; i < 16; i++) pr_cont(" %2.2x", GetByte(i)); pr_cont("\n"); } for (page=0x40 ; page <= 0x5f; page++) { if (page == 0x43 || (page >= 0x46 && page <= 0x4f) || (page >= 0x51 && page <=0x5e)) continue; printk(KERN_DEBUG pr_fmt("Register page %2x: "), page); SelectPage(page); for (i = 8; i < 16; i++) pr_cont(" %2.2x", GetByte(i)); pr_cont("\n"); } } } #endif /* 0 */ /*============== MII Management functions ===============*/ /**************** * Turn around for read */ static void mii_idle(unsigned int ioaddr) { PutByte(XIRCREG2_GPR2, 0x04|0); /* drive MDCK low */ udelay(1); PutByte(XIRCREG2_GPR2, 0x04|1); /* and drive MDCK high */ udelay(1); } /**************** * Write a bit to MDI/O */ static void mii_putbit(unsigned int ioaddr, unsigned data) { #if 1 if (data) { PutByte(XIRCREG2_GPR2, 0x0c|2|0); /* set MDIO */ udelay(1); PutByte(XIRCREG2_GPR2, 0x0c|2|1); /* and drive MDCK high */ udelay(1); } else { PutByte(XIRCREG2_GPR2, 0x0c|0|0); /* clear MDIO */ udelay(1); PutByte(XIRCREG2_GPR2, 0x0c|0|1); /* and drive MDCK high */ udelay(1); } #else if (data) { PutWord(XIRCREG2_GPR2-1, 0x0e0e); udelay(1); PutWord(XIRCREG2_GPR2-1, 0x0f0f); udelay(1); } else { PutWord(XIRCREG2_GPR2-1, 0x0c0c); udelay(1); PutWord(XIRCREG2_GPR2-1, 0x0d0d); udelay(1); } #endif } /**************** * Get a bit from MDI/O */ static int mii_getbit(unsigned int ioaddr) { unsigned d; PutByte(XIRCREG2_GPR2, 4|0); /* drive MDCK low */ udelay(1); d = GetByte(XIRCREG2_GPR2); /* read MDIO */ PutByte(XIRCREG2_GPR2, 4|1); /* drive MDCK high again */ udelay(1); return d & 0x20; /* read MDIO */ } static void mii_wbits(unsigned int ioaddr, unsigned data, int len) { unsigned m = 1 << (len-1); for (; m; m >>= 1) mii_putbit(ioaddr, data & m); } static unsigned mii_rd(unsigned int ioaddr, u_char phyaddr, u_char phyreg) { int i; unsigned data=0, m; SelectPage(2); for (i=0; i < 32; i++) /* 32 bit preamble */ mii_putbit(ioaddr, 1); mii_wbits(ioaddr, 0x06, 4); /* Start and opcode for read */ mii_wbits(ioaddr, phyaddr, 5); /* PHY address to be accessed */ mii_wbits(ioaddr, phyreg, 5); /* PHY register to read */ mii_idle(ioaddr); /* turn around */ mii_getbit(ioaddr); for (m = 1<<15; m; m >>= 1) if (mii_getbit(ioaddr)) data |= m; mii_idle(ioaddr); return data; } static void mii_wr(unsigned int ioaddr, u_char phyaddr, u_char phyreg, unsigned data, int len) { int i; SelectPage(2); for (i=0; i < 32; i++) /* 32 bit preamble */ mii_putbit(ioaddr, 1); mii_wbits(ioaddr, 0x05, 4); /* Start and opcode for write */ mii_wbits(ioaddr, phyaddr, 5); /* PHY address to be accessed */ mii_wbits(ioaddr, phyreg, 5); /* PHY Register to write */ mii_putbit(ioaddr, 1); /* turn around */ mii_putbit(ioaddr, 0); mii_wbits(ioaddr, data, len); /* And write the data */ mii_idle(ioaddr); } /*============= Main bulk of functions =========================*/ static const struct net_device_ops netdev_ops = { .ndo_open = do_open, .ndo_stop = do_stop, .ndo_start_xmit = do_start_xmit, .ndo_tx_timeout = xirc_tx_timeout, .ndo_set_config = do_config, .ndo_do_ioctl = do_ioctl, .ndo_set_rx_mode = set_multicast_list, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static int xirc2ps_probe(struct pcmcia_device *link) { struct net_device *dev; local_info_t *local; dev_dbg(&link->dev, "attach()\n"); /* Allocate the device structure */ dev = alloc_etherdev(sizeof(local_info_t)); if (!dev) return -ENOMEM; local = netdev_priv(dev); local->dev = dev; local->p_dev = link; link->priv = dev; /* General socket configuration */ link->config_index = 1; /* Fill in card specific entries */ dev->netdev_ops = &netdev_ops; dev->ethtool_ops = &netdev_ethtool_ops; dev->watchdog_timeo = TX_TIMEOUT; INIT_WORK(&local->tx_timeout_task, xirc2ps_tx_timeout_task); return xirc2ps_config(link); } /* xirc2ps_attach */ static void xirc2ps_detach(struct pcmcia_device *link) { struct net_device *dev = link->priv; dev_dbg(&link->dev, "detach\n"); unregister_netdev(dev); xirc2ps_release(link); free_netdev(dev); } /* xirc2ps_detach */ /**************** * Detect the type of the card. s is the buffer with the data of tuple 0x20 * Returns: 0 := not supported * mediaid=11 and prodid=47 * Media-Id bits: * Ethernet 0x01 * Tokenring 0x02 * Arcnet 0x04 * Wireless 0x08 * Modem 0x10 * GSM only 0x20 * Prod-Id bits: * Pocket 0x10 * External 0x20 * Creditcard 0x40 * Cardbus 0x80 * */ static int set_card_type(struct pcmcia_device *link) { struct net_device *dev = link->priv; local_info_t *local = netdev_priv(dev); u8 *buf; unsigned int cisrev, mediaid, prodid; size_t len; len = pcmcia_get_tuple(link, CISTPL_MANFID, &buf); if (len < 5) { dev_err(&link->dev, "invalid CIS -- sorry\n"); return 0; } cisrev = buf[2]; mediaid = buf[3]; prodid = buf[4]; dev_dbg(&link->dev, "cisrev=%02x mediaid=%02x prodid=%02x\n", cisrev, mediaid, prodid); local->mohawk = 0; local->dingo = 0; local->modem = 0; local->card_type = XIR_UNKNOWN; if (!(prodid & 0x40)) { pr_notice("Oops: Not a creditcard\n"); return 0; } if (!(mediaid & 0x01)) { pr_notice("Not an Ethernet card\n"); return 0; } if (mediaid & 0x10) { local->modem = 1; switch(prodid & 15) { case 1: local->card_type = XIR_CEM ; break; case 2: local->card_type = XIR_CEM2 ; break; case 3: local->card_type = XIR_CEM3 ; break; case 4: local->card_type = XIR_CEM33 ; break; case 5: local->card_type = XIR_CEM56M; local->mohawk = 1; break; case 6: case 7: /* 7 is the RealPort 10/56 */ local->card_type = XIR_CEM56 ; local->mohawk = 1; local->dingo = 1; break; } } else { switch(prodid & 15) { case 1: local->card_type = has_ce2_string(link)? XIR_CE2 : XIR_CE ; break; case 2: local->card_type = XIR_CE2; break; case 3: local->card_type = XIR_CE3; local->mohawk = 1; break; } } if (local->card_type == XIR_CE || local->card_type == XIR_CEM) { pr_notice("Sorry, this is an old CE card\n"); return 0; } if (local->card_type == XIR_UNKNOWN) pr_notice("unknown card (mediaid=%02x prodid=%02x)\n", mediaid, prodid); return 1; } /**************** * There are some CE2 cards out which claim to be a CE card. * This function looks for a "CE2" in the 3rd version field. * Returns: true if this is a CE2 */ static int has_ce2_string(struct pcmcia_device * p_dev) { if (p_dev->prod_id[2] && strstr(p_dev->prod_id[2], "CE2")) return 1; return 0; } static int xirc2ps_config_modem(struct pcmcia_device *p_dev, void *priv_data) { unsigned int ioaddr; if ((p_dev->resource[0]->start & 0xf) == 8) return -ENODEV; p_dev->resource[0]->end = 16; p_dev->resource[1]->end = 8; p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_16; p_dev->resource[1]->flags &= ~IO_DATA_PATH_WIDTH; p_dev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; p_dev->io_lines = 10; p_dev->resource[1]->start = p_dev->resource[0]->start; for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) { p_dev->resource[0]->start = ioaddr; if (!pcmcia_request_io(p_dev)) return 0; } return -ENODEV; } static int xirc2ps_config_check(struct pcmcia_device *p_dev, void *priv_data) { int *pass = priv_data; resource_size_t tmp = p_dev->resource[1]->start; tmp += (*pass ? (p_dev->config_index & 0x20 ? -24 : 8) : (p_dev->config_index & 0x20 ? 8 : -24)); if ((p_dev->resource[0]->start & 0xf) == 8) return -ENODEV; p_dev->resource[0]->end = 18; p_dev->resource[1]->end = 8; p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_16; p_dev->resource[1]->flags &= ~IO_DATA_PATH_WIDTH; p_dev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; p_dev->io_lines = 10; p_dev->resource[1]->start = p_dev->resource[0]->start; p_dev->resource[0]->start = tmp; return pcmcia_request_io(p_dev); } static int pcmcia_get_mac_ce(struct pcmcia_device *p_dev, tuple_t *tuple, void *priv) { struct net_device *dev = priv; int i; if (tuple->TupleDataLen != 13) return -EINVAL; if ((tuple->TupleData[0] != 2) || (tuple->TupleData[1] != 1) || (tuple->TupleData[2] != 6)) return -EINVAL; /* another try (James Lehmer's CE2 version 4.1)*/ for (i = 2; i < 6; i++) dev->dev_addr[i] = tuple->TupleData[i+2]; return 0; }; static int xirc2ps_config(struct pcmcia_device * link) { struct net_device *dev = link->priv; local_info_t *local = netdev_priv(dev); unsigned int ioaddr; int err; u8 *buf; size_t len; local->dingo_ccr = NULL; dev_dbg(&link->dev, "config\n"); /* Is this a valid card */ if (link->has_manf_id == 0) { pr_notice("manfid not found in CIS\n"); goto failure; } switch (link->manf_id) { case MANFID_XIRCOM: local->manf_str = "Xircom"; break; case MANFID_ACCTON: local->manf_str = "Accton"; break; case MANFID_COMPAQ: case MANFID_COMPAQ2: local->manf_str = "Compaq"; break; case MANFID_INTEL: local->manf_str = "Intel"; break; case MANFID_TOSHIBA: local->manf_str = "Toshiba"; break; default: pr_notice("Unknown Card Manufacturer ID: 0x%04x\n", (unsigned)link->manf_id); goto failure; } dev_dbg(&link->dev, "found %s card\n", local->manf_str); if (!set_card_type(link)) { pr_notice("this card is not supported\n"); goto failure; } /* get the ethernet address from the CIS */ err = pcmcia_get_mac_from_cis(link, dev); /* not found: try to get the node-id from tuple 0x89 */ if (err) { len = pcmcia_get_tuple(link, 0x89, &buf); /* data layout looks like tuple 0x22 */ if (buf && len == 8) { if (*buf == CISTPL_FUNCE_LAN_NODE_ID) { int i; for (i = 2; i < 6; i++) dev->dev_addr[i] = buf[i+2]; } else err = -1; } kfree(buf); } if (err) err = pcmcia_loop_tuple(link, CISTPL_FUNCE, pcmcia_get_mac_ce, dev); if (err) { pr_notice("node-id not found in CIS\n"); goto failure; } if (local->modem) { int pass; link->config_flags |= CONF_AUTO_SET_IO; if (local->dingo) { /* Take the Modem IO port from the CIS and scan for a free * Ethernet port */ if (!pcmcia_loop_config(link, xirc2ps_config_modem, NULL)) goto port_found; } else { /* We do 2 passes here: The first one uses the regular mapping and * the second tries again, thereby considering that the 32 ports are * mirrored every 32 bytes. Actually we use a mirrored port for * the Mako if (on the first pass) the COR bit 5 is set. */ for (pass=0; pass < 2; pass++) if (!pcmcia_loop_config(link, xirc2ps_config_check, &pass)) goto port_found; /* if special option: * try to configure as Ethernet only. * .... */ } pr_notice("no ports available\n"); } else { link->io_lines = 10; link->resource[0]->end = 16; link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16; for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) { link->resource[0]->start = ioaddr; if (!(err = pcmcia_request_io(link))) goto port_found; } link->resource[0]->start = 0; /* let CS decide */ if ((err = pcmcia_request_io(link))) goto config_error; } port_found: if (err) goto config_error; /**************** * Now allocate an interrupt line. Note that this does not * actually assign a handler to the interrupt. */ if ((err=pcmcia_request_irq(link, xirc2ps_interrupt))) goto config_error; link->config_flags |= CONF_ENABLE_IRQ; if (do_sound) link->config_flags |= CONF_ENABLE_SPKR; if ((err = pcmcia_enable_device(link))) goto config_error; if (local->dingo) { /* Reset the modem's BAR to the correct value * This is necessary because in the RequestConfiguration call, * the base address of the ethernet port (BasePort1) is written * to the BAR registers of the modem. */ err = pcmcia_write_config_byte(link, CISREG_IOBASE_0, (u8) link->resource[1]->start & 0xff); if (err) goto config_error; err = pcmcia_write_config_byte(link, CISREG_IOBASE_1, (link->resource[1]->start >> 8) & 0xff); if (err) goto config_error; /* There is no config entry for the Ethernet part which * is at 0x0800. So we allocate a window into the attribute * memory and write direct to the CIS registers */ link->resource[2]->flags = WIN_DATA_WIDTH_8 | WIN_MEMORY_TYPE_AM | WIN_ENABLE; link->resource[2]->start = link->resource[2]->end = 0; if ((err = pcmcia_request_window(link, link->resource[2], 0))) goto config_error; local->dingo_ccr = ioremap(link->resource[2]->start, 0x1000) + 0x0800; if ((err = pcmcia_map_mem_page(link, link->resource[2], 0))) goto config_error; /* Setup the CCRs; there are no infos in the CIS about the Ethernet * part. */ writeb(0x47, local->dingo_ccr + CISREG_COR); ioaddr = link->resource[0]->start; writeb(ioaddr & 0xff , local->dingo_ccr + CISREG_IOBASE_0); writeb((ioaddr >> 8)&0xff , local->dingo_ccr + CISREG_IOBASE_1); #if 0 { u_char tmp; pr_info("ECOR:"); for (i=0; i < 7; i++) { tmp = readb(local->dingo_ccr + i*2); pr_cont(" %02x", tmp); } pr_cont("\n"); pr_info("DCOR:"); for (i=0; i < 4; i++) { tmp = readb(local->dingo_ccr + 0x20 + i*2); pr_cont(" %02x", tmp); } pr_cont("\n"); pr_info("SCOR:"); for (i=0; i < 10; i++) { tmp = readb(local->dingo_ccr + 0x40 + i*2); pr_cont(" %02x", tmp); } pr_cont("\n"); } #endif writeb(0x01, local->dingo_ccr + 0x20); writeb(0x0c, local->dingo_ccr + 0x22); writeb(0x00, local->dingo_ccr + 0x24); writeb(0x00, local->dingo_ccr + 0x26); writeb(0x00, local->dingo_ccr + 0x28); } /* The if_port symbol can be set when the module is loaded */ local->probe_port=0; if (!if_port) { local->probe_port = dev->if_port = 1; } else if ((if_port >= 1 && if_port <= 2) || (local->mohawk && if_port==4)) dev->if_port = if_port; else pr_notice("invalid if_port requested\n"); /* we can now register the device with the net subsystem */ dev->irq = link->irq; dev->base_addr = link->resource[0]->start; if (local->dingo) do_reset(dev, 1); /* a kludge to make the cem56 work */ SET_NETDEV_DEV(dev, &link->dev); if ((err=register_netdev(dev))) { pr_notice("register_netdev() failed\n"); goto config_error; } /* give some infos about the hardware */ netdev_info(dev, "%s: port %#3lx, irq %d, hwaddr %pM\n", local->manf_str, (u_long)dev->base_addr, (int)dev->irq, dev->dev_addr); return 0; config_error: xirc2ps_release(link); return -ENODEV; failure: return -ENODEV; } /* xirc2ps_config */ static void xirc2ps_release(struct pcmcia_device *link) { dev_dbg(&link->dev, "release\n"); if (link->resource[2]->end) { struct net_device *dev = link->priv; local_info_t *local = netdev_priv(dev); if (local->dingo) iounmap(local->dingo_ccr - 0x0800); } pcmcia_disable_device(link); } /* xirc2ps_release */ /*====================================================================*/ static int xirc2ps_suspend(struct pcmcia_device *link) { struct net_device *dev = link->priv; if (link->open) { netif_device_detach(dev); do_powerdown(dev); } return 0; } static int xirc2ps_resume(struct pcmcia_device *link) { struct net_device *dev = link->priv; if (link->open) { do_reset(dev,1); netif_device_attach(dev); } return 0; } /*====================================================================*/ /**************** * This is the Interrupt service route. */ static irqreturn_t xirc2ps_interrupt(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; local_info_t *lp = netdev_priv(dev); unsigned int ioaddr; u_char saved_page; unsigned bytes_rcvd; unsigned int_status, eth_status, rx_status, tx_status; unsigned rsr, pktlen; ulong start_ticks = jiffies; /* fixme: jiffies rollover every 497 days * is this something to worry about? * -- on a laptop? */ if (!netif_device_present(dev)) return IRQ_HANDLED; ioaddr = dev->base_addr; if (lp->mohawk) { /* must disable the interrupt */ PutByte(XIRCREG_CR, 0); } pr_debug("%s: interrupt %d at %#x.\n", dev->name, irq, ioaddr); saved_page = GetByte(XIRCREG_PR); /* Read the ISR to see whats the cause for the interrupt. * This also clears the interrupt flags on CE2 cards */ int_status = GetByte(XIRCREG_ISR); bytes_rcvd = 0; loop_entry: if (int_status == 0xff) { /* card may be ejected */ pr_debug("%s: interrupt %d for dead card\n", dev->name, irq); goto leave; } eth_status = GetByte(XIRCREG_ESR); SelectPage(0x40); rx_status = GetByte(XIRCREG40_RXST0); PutByte(XIRCREG40_RXST0, (~rx_status & 0xff)); tx_status = GetByte(XIRCREG40_TXST0); tx_status |= GetByte(XIRCREG40_TXST1) << 8; PutByte(XIRCREG40_TXST0, 0); PutByte(XIRCREG40_TXST1, 0); pr_debug("%s: ISR=%#2.2x ESR=%#2.2x RSR=%#2.2x TSR=%#4.4x\n", dev->name, int_status, eth_status, rx_status, tx_status); /***** receive section ******/ SelectPage(0); while (eth_status & FullPktRcvd) { rsr = GetByte(XIRCREG0_RSR); if (bytes_rcvd > maxrx_bytes && (rsr & PktRxOk)) { /* too many bytes received during this int, drop the rest of the * packets */ dev->stats.rx_dropped++; pr_debug("%s: RX drop, too much done\n", dev->name); } else if (rsr & PktRxOk) { struct sk_buff *skb; pktlen = GetWord(XIRCREG0_RBC); bytes_rcvd += pktlen; pr_debug("rsr=%#02x packet_length=%u\n", rsr, pktlen); /* 1 extra so we can use insw */ skb = netdev_alloc_skb(dev, pktlen + 3); if (!skb) { pr_notice("low memory, packet dropped (size=%u)\n", pktlen); dev->stats.rx_dropped++; } else { /* okay get the packet */ skb_reserve(skb, 2); if (lp->silicon == 0 ) { /* work around a hardware bug */ unsigned rhsa; /* receive start address */ SelectPage(5); rhsa = GetWord(XIRCREG5_RHSA0); SelectPage(0); rhsa += 3; /* skip control infos */ if (rhsa >= 0x8000) rhsa = 0; if (rhsa + pktlen > 0x8000) { unsigned i; u_char *buf = skb_put(skb, pktlen); for (i=0; i < pktlen ; i++, rhsa++) { buf[i] = GetByte(XIRCREG_EDP); if (rhsa == 0x8000) { rhsa = 0; i--; } } } else { insw(ioaddr+XIRCREG_EDP, skb_put(skb, pktlen), (pktlen+1)>>1); } } #if 0 else if (lp->mohawk) { /* To use this 32 bit access we should use * a manual optimized loop * Also the words are swapped, we can get more * performance by using 32 bit access and swapping * the words in a register. Will need this for cardbus * * Note: don't forget to change the ALLOC_SKB to .. +3 */ unsigned i; u_long *p = skb_put(skb, pktlen); register u_long a; unsigned int edpreg = ioaddr+XIRCREG_EDP-2; for (i=0; i < len ; i += 4, p++) { a = inl(edpreg); __asm__("rorl $16,%0\n\t" :"=q" (a) : "0" (a)); *p = a; } } #endif else { insw(ioaddr+XIRCREG_EDP, skb_put(skb, pktlen), (pktlen+1)>>1); } skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += pktlen; if (!(rsr & PhyPkt)) dev->stats.multicast++; } } else { /* bad packet */ pr_debug("rsr=%#02x\n", rsr); } if (rsr & PktTooLong) { dev->stats.rx_frame_errors++; pr_debug("%s: Packet too long\n", dev->name); } if (rsr & CRCErr) { dev->stats.rx_crc_errors++; pr_debug("%s: CRC error\n", dev->name); } if (rsr & AlignErr) { dev->stats.rx_fifo_errors++; /* okay ? */ pr_debug("%s: Alignment error\n", dev->name); } /* clear the received/dropped/error packet */ PutWord(XIRCREG0_DO, 0x8000); /* issue cmd: skip_rx_packet */ /* get the new ethernet status */ eth_status = GetByte(XIRCREG_ESR); } if (rx_status & 0x10) { /* Receive overrun */ dev->stats.rx_over_errors++; PutByte(XIRCREG_CR, ClearRxOvrun); pr_debug("receive overrun cleared\n"); } /***** transmit section ******/ if (int_status & PktTxed) { unsigned n, nn; n = lp->last_ptr_value; nn = GetByte(XIRCREG0_PTR); lp->last_ptr_value = nn; if (nn < n) /* rollover */ dev->stats.tx_packets += 256 - n; else if (n == nn) { /* happens sometimes - don't know why */ pr_debug("PTR not changed?\n"); } else dev->stats.tx_packets += lp->last_ptr_value - n; netif_wake_queue(dev); } if (tx_status & 0x0002) { /* Execessive collissions */ pr_debug("tx restarted due to execssive collissions\n"); PutByte(XIRCREG_CR, RestartTx); /* restart transmitter process */ } if (tx_status & 0x0040) dev->stats.tx_aborted_errors++; /* recalculate our work chunk so that we limit the duration of this * ISR to about 1/10 of a second. * Calculate only if we received a reasonable amount of bytes. */ if (bytes_rcvd > 1000) { u_long duration = jiffies - start_ticks; if (duration >= HZ/10) { /* if more than about 1/10 second */ maxrx_bytes = (bytes_rcvd * (HZ/10)) / duration; if (maxrx_bytes < 2000) maxrx_bytes = 2000; else if (maxrx_bytes > 22000) maxrx_bytes = 22000; pr_debug("set maxrx=%u (rcvd=%u ticks=%lu)\n", maxrx_bytes, bytes_rcvd, duration); } else if (!duration && maxrx_bytes < 22000) { /* now much faster */ maxrx_bytes += 2000; if (maxrx_bytes > 22000) maxrx_bytes = 22000; pr_debug("set maxrx=%u\n", maxrx_bytes); } } leave: if (lockup_hack) { if (int_status != 0xff && (int_status = GetByte(XIRCREG_ISR)) != 0) goto loop_entry; } SelectPage(saved_page); PutByte(XIRCREG_CR, EnableIntr); /* re-enable interrupts */ /* Instead of dropping packets during a receive, we could * force an interrupt with this command: * PutByte(XIRCREG_CR, EnableIntr|ForceIntr); */ return IRQ_HANDLED; } /* xirc2ps_interrupt */ /*====================================================================*/ static void xirc2ps_tx_timeout_task(struct work_struct *work) { local_info_t *local = container_of(work, local_info_t, tx_timeout_task); struct net_device *dev = local->dev; /* reset the card */ do_reset(dev,1); dev->trans_start = jiffies; /* prevent tx timeout */ netif_wake_queue(dev); } static void xirc_tx_timeout(struct net_device *dev) { local_info_t *lp = netdev_priv(dev); dev->stats.tx_errors++; netdev_notice(dev, "transmit timed out\n"); schedule_work(&lp->tx_timeout_task); } static netdev_tx_t do_start_xmit(struct sk_buff *skb, struct net_device *dev) { local_info_t *lp = netdev_priv(dev); unsigned int ioaddr = dev->base_addr; int okay; unsigned freespace; unsigned pktlen = skb->len; pr_debug("do_start_xmit(skb=%p, dev=%p) len=%u\n", skb, dev, pktlen); /* adjust the packet length to min. required * and hope that the buffer is large enough * to provide some random data. * fixme: For Mohawk we can change this by sending * a larger packetlen than we actually have; the chip will * pad this in his buffer with random bytes */ if (pktlen < ETH_ZLEN) { if (skb_padto(skb, ETH_ZLEN)) return NETDEV_TX_OK; pktlen = ETH_ZLEN; } netif_stop_queue(dev); SelectPage(0); PutWord(XIRCREG0_TRS, (u_short)pktlen+2); freespace = GetWord(XIRCREG0_TSO); okay = freespace & 0x8000; freespace &= 0x7fff; /* TRS doesn't work - (indeed it is eliminated with sil-rev 1) */ okay = pktlen +2 < freespace; pr_debug("%s: avail. tx space=%u%s\n", dev->name, freespace, okay ? " (okay)":" (not enough)"); if (!okay) { /* not enough space */ return NETDEV_TX_BUSY; /* upper layer may decide to requeue this packet */ } /* send the packet */ PutWord(XIRCREG_EDP, (u_short)pktlen); outsw(ioaddr+XIRCREG_EDP, skb->data, pktlen>>1); if (pktlen & 1) PutByte(XIRCREG_EDP, skb->data[pktlen-1]); if (lp->mohawk) PutByte(XIRCREG_CR, TransmitPacket|EnableIntr); dev_kfree_skb (skb); dev->stats.tx_bytes += pktlen; netif_start_queue(dev); return NETDEV_TX_OK; } struct set_address_info { int reg_nr; int page_nr; int mohawk; unsigned int ioaddr; }; static void set_address(struct set_address_info *sa_info, char *addr) { unsigned int ioaddr = sa_info->ioaddr; int i; for (i = 0; i < 6; i++) { if (sa_info->reg_nr > 15) { sa_info->reg_nr = 8; sa_info->page_nr++; SelectPage(sa_info->page_nr); } if (sa_info->mohawk) PutByte(sa_info->reg_nr++, addr[5 - i]); else PutByte(sa_info->reg_nr++, addr[i]); } } /**************** * Set all addresses: This first one is the individual address, * the next 9 addresses are taken from the multicast list and * the rest is filled with the individual address. */ static void set_addresses(struct net_device *dev) { unsigned int ioaddr = dev->base_addr; local_info_t *lp = netdev_priv(dev); struct netdev_hw_addr *ha; struct set_address_info sa_info; int i; /* * Setup the info structure so that by first set_address call it will do * SelectPage with the right page number. Hence these ones here. */ sa_info.reg_nr = 15 + 1; sa_info.page_nr = 0x50 - 1; sa_info.mohawk = lp->mohawk; sa_info.ioaddr = ioaddr; set_address(&sa_info, dev->dev_addr); i = 0; netdev_for_each_mc_addr(ha, dev) { if (i++ == 9) break; set_address(&sa_info, ha->addr); } while (i++ < 9) set_address(&sa_info, dev->dev_addr); SelectPage(0); } /**************** * Set or clear the multicast filter for this adaptor. * We can filter up to 9 addresses, if more are requested we set * multicast promiscuous mode. */ static void set_multicast_list(struct net_device *dev) { unsigned int ioaddr = dev->base_addr; unsigned value; SelectPage(0x42); value = GetByte(XIRCREG42_SWC1) & 0xC0; if (dev->flags & IFF_PROMISC) { /* snoop */ PutByte(XIRCREG42_SWC1, value | 0x06); /* set MPE and PME */ } else if (netdev_mc_count(dev) > 9 || (dev->flags & IFF_ALLMULTI)) { PutByte(XIRCREG42_SWC1, value | 0x02); /* set MPE */ } else if (!netdev_mc_empty(dev)) { /* the chip can filter 9 addresses perfectly */ PutByte(XIRCREG42_SWC1, value | 0x01); SelectPage(0x40); PutByte(XIRCREG40_CMD0, Offline); set_addresses(dev); SelectPage(0x40); PutByte(XIRCREG40_CMD0, EnableRecv | Online); } else { /* standard usage */ PutByte(XIRCREG42_SWC1, value | 0x00); } SelectPage(0); } static int do_config(struct net_device *dev, struct ifmap *map) { local_info_t *local = netdev_priv(dev); pr_debug("do_config(%p)\n", dev); if (map->port != 255 && map->port != dev->if_port) { if (map->port > 4) return -EINVAL; if (!map->port) { local->probe_port = 1; dev->if_port = 1; } else { local->probe_port = 0; dev->if_port = map->port; } netdev_info(dev, "switching to %s port\n", if_names[dev->if_port]); do_reset(dev,1); /* not the fine way :-) */ } return 0; } /**************** * Open the driver */ static int do_open(struct net_device *dev) { local_info_t *lp = netdev_priv(dev); struct pcmcia_device *link = lp->p_dev; dev_dbg(&link->dev, "do_open(%p)\n", dev); /* Check that the PCMCIA card is still here. */ /* Physical device present signature. */ if (!pcmcia_dev_present(link)) return -ENODEV; /* okay */ link->open++; netif_start_queue(dev); do_reset(dev,1); return 0; } static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strlcpy(info->driver, "xirc2ps_cs", sizeof(info->driver)); sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr); } static const struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, }; static int do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { local_info_t *local = netdev_priv(dev); unsigned int ioaddr = dev->base_addr; struct mii_ioctl_data *data = if_mii(rq); pr_debug("%s: ioctl(%-.6s, %#04x) %04x %04x %04x %04x\n", dev->name, rq->ifr_ifrn.ifrn_name, cmd, data->phy_id, data->reg_num, data->val_in, data->val_out); if (!local->mohawk) return -EOPNOTSUPP; switch(cmd) { case SIOCGMIIPHY: /* Get the address of the PHY in use. */ data->phy_id = 0; /* we have only this address */ /* fall through */ case SIOCGMIIREG: /* Read the specified MII register. */ data->val_out = mii_rd(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f); break; case SIOCSMIIREG: /* Write the specified MII register */ mii_wr(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in, 16); break; default: return -EOPNOTSUPP; } return 0; } static void hardreset(struct net_device *dev) { local_info_t *local = netdev_priv(dev); unsigned int ioaddr = dev->base_addr; SelectPage(4); udelay(1); PutByte(XIRCREG4_GPR1, 0); /* clear bit 0: power down */ msleep(40); /* wait 40 msec */ if (local->mohawk) PutByte(XIRCREG4_GPR1, 1); /* set bit 0: power up */ else PutByte(XIRCREG4_GPR1, 1 | 4); /* set bit 0: power up, bit 2: AIC */ msleep(20); /* wait 20 msec */ } static void do_reset(struct net_device *dev, int full) { local_info_t *local = netdev_priv(dev); unsigned int ioaddr = dev->base_addr; unsigned value; pr_debug("%s: do_reset(%p,%d)\n", dev? dev->name:"eth?", dev, full); hardreset(dev); PutByte(XIRCREG_CR, SoftReset); /* set */ msleep(20); /* wait 20 msec */ PutByte(XIRCREG_CR, 0); /* clear */ msleep(40); /* wait 40 msec */ if (local->mohawk) { SelectPage(4); /* set pin GP1 and GP2 to output (0x0c) * set GP1 to low to power up the ML6692 (0x00) * set GP2 to high to power up the 10Mhz chip (0x02) */ PutByte(XIRCREG4_GPR0, 0x0e); } /* give the circuits some time to power up */ msleep(500); /* about 500ms */ local->last_ptr_value = 0; local->silicon = local->mohawk ? (GetByte(XIRCREG4_BOV) & 0x70) >> 4 : (GetByte(XIRCREG4_BOV) & 0x30) >> 4; if (local->probe_port) { if (!local->mohawk) { SelectPage(4); PutByte(XIRCREG4_GPR0, 4); local->probe_port = 0; } } else if (dev->if_port == 2) { /* enable 10Base2 */ SelectPage(0x42); PutByte(XIRCREG42_SWC1, 0xC0); } else { /* enable 10BaseT */ SelectPage(0x42); PutByte(XIRCREG42_SWC1, 0x80); } msleep(40); /* wait 40 msec to let it complete */ #if 0 { SelectPage(0); value = GetByte(XIRCREG_ESR); /* read the ESR */ pr_debug("%s: ESR is: %#02x\n", dev->name, value); } #endif /* setup the ECR */ SelectPage(1); PutByte(XIRCREG1_IMR0, 0xff); /* allow all ints */ PutByte(XIRCREG1_IMR1, 1 ); /* and Set TxUnderrunDetect */ value = GetByte(XIRCREG1_ECR); #if 0 if (local->mohawk) value |= DisableLinkPulse; PutByte(XIRCREG1_ECR, value); #endif pr_debug("%s: ECR is: %#02x\n", dev->name, value); SelectPage(0x42); PutByte(XIRCREG42_SWC0, 0x20); /* disable source insertion */ if (local->silicon != 1) { /* set the local memory dividing line. * The comments in the sample code say that this is only * settable with the scipper version 2 which is revision 0. * Always for CE3 cards */ SelectPage(2); PutWord(XIRCREG2_RBS, 0x2000); } if (full) set_addresses(dev); /* Hardware workaround: * The receive byte pointer after reset is off by 1 so we need * to move the offset pointer back to 0. */ SelectPage(0); PutWord(XIRCREG0_DO, 0x2000); /* change offset command, off=0 */ /* setup MAC IMRs and clear status registers */ SelectPage(0x40); /* Bit 7 ... bit 0 */ PutByte(XIRCREG40_RMASK0, 0xff); /* ROK, RAB, rsv, RO, CRC, AE, PTL, MP */ PutByte(XIRCREG40_TMASK0, 0xff); /* TOK, TAB, SQE, LL, TU, JAB, EXC, CRS */ PutByte(XIRCREG40_TMASK1, 0xb0); /* rsv, rsv, PTD, EXT, rsv,rsv,rsv, rsv*/ PutByte(XIRCREG40_RXST0, 0x00); /* ROK, RAB, REN, RO, CRC, AE, PTL, MP */ PutByte(XIRCREG40_TXST0, 0x00); /* TOK, TAB, SQE, LL, TU, JAB, EXC, CRS */ PutByte(XIRCREG40_TXST1, 0x00); /* TEN, rsv, PTD, EXT, retry_counter:4 */ if (full && local->mohawk && init_mii(dev)) { if (dev->if_port == 4 || local->dingo || local->new_mii) { netdev_info(dev, "MII selected\n"); SelectPage(2); PutByte(XIRCREG2_MSR, GetByte(XIRCREG2_MSR) | 0x08); msleep(20); } else { netdev_info(dev, "MII detected; using 10mbs\n"); SelectPage(0x42); if (dev->if_port == 2) /* enable 10Base2 */ PutByte(XIRCREG42_SWC1, 0xC0); else /* enable 10BaseT */ PutByte(XIRCREG42_SWC1, 0x80); msleep(40); /* wait 40 msec to let it complete */ } if (full_duplex) PutByte(XIRCREG1_ECR, GetByte(XIRCREG1_ECR | FullDuplex)); } else { /* No MII */ SelectPage(0); value = GetByte(XIRCREG_ESR); /* read the ESR */ dev->if_port = (value & MediaSelect) ? 1 : 2; } /* configure the LEDs */ SelectPage(2); if (dev->if_port == 1 || dev->if_port == 4) /* TP: Link and Activity */ PutByte(XIRCREG2_LED, 0x3b); else /* Coax: Not-Collision and Activity */ PutByte(XIRCREG2_LED, 0x3a); if (local->dingo) PutByte(0x0b, 0x04); /* 100 Mbit LED */ /* enable receiver and put the mac online */ if (full) { set_multicast_list(dev); SelectPage(0x40); PutByte(XIRCREG40_CMD0, EnableRecv | Online); } /* setup Ethernet IMR and enable interrupts */ SelectPage(1); PutByte(XIRCREG1_IMR0, 0xff); udelay(1); SelectPage(0); PutByte(XIRCREG_CR, EnableIntr); if (local->modem && !local->dingo) { /* do some magic */ if (!(GetByte(0x10) & 0x01)) PutByte(0x10, 0x11); /* unmask master-int bit */ } if (full) netdev_info(dev, "media %s, silicon revision %d\n", if_names[dev->if_port], local->silicon); /* We should switch back to page 0 to avoid a bug in revision 0 * where regs with offset below 8 can't be read after an access * to the MAC registers */ SelectPage(0); } /**************** * Initialize the Media-Independent-Interface * Returns: True if we have a good MII */ static int init_mii(struct net_device *dev) { local_info_t *local = netdev_priv(dev); unsigned int ioaddr = dev->base_addr; unsigned control, status, linkpartner; int i; if (if_port == 4 || if_port == 1) { /* force 100BaseT or 10BaseT */ dev->if_port = if_port; local->probe_port = 0; return 1; } status = mii_rd(ioaddr, 0, 1); if ((status & 0xff00) != 0x7800) return 0; /* No MII */ local->new_mii = (mii_rd(ioaddr, 0, 2) != 0xffff); if (local->probe_port) control = 0x1000; /* auto neg */ else if (dev->if_port == 4) control = 0x2000; /* no auto neg, 100mbs mode */ else control = 0x0000; /* no auto neg, 10mbs mode */ mii_wr(ioaddr, 0, 0, control, 16); udelay(100); control = mii_rd(ioaddr, 0, 0); if (control & 0x0400) { netdev_notice(dev, "can't take PHY out of isolation mode\n"); local->probe_port = 0; return 0; } if (local->probe_port) { /* according to the DP83840A specs the auto negotiation process * may take up to 3.5 sec, so we use this also for our ML6692 * Fixme: Better to use a timer here! */ for (i=0; i < 35; i++) { msleep(100); /* wait 100 msec */ status = mii_rd(ioaddr, 0, 1); if ((status & 0x0020) && (status & 0x0004)) break; } if (!(status & 0x0020)) { netdev_info(dev, "autonegotiation failed; using 10mbs\n"); if (!local->new_mii) { control = 0x0000; mii_wr(ioaddr, 0, 0, control, 16); udelay(100); SelectPage(0); dev->if_port = (GetByte(XIRCREG_ESR) & MediaSelect) ? 1 : 2; } } else { linkpartner = mii_rd(ioaddr, 0, 5); netdev_info(dev, "MII link partner: %04x\n", linkpartner); if (linkpartner & 0x0080) { dev->if_port = 4; } else dev->if_port = 1; } } return 1; } static void do_powerdown(struct net_device *dev) { unsigned int ioaddr = dev->base_addr; pr_debug("do_powerdown(%p)\n", dev); SelectPage(4); PutByte(XIRCREG4_GPR1, 0); /* clear bit 0: power down */ SelectPage(0); } static int do_stop(struct net_device *dev) { unsigned int ioaddr = dev->base_addr; local_info_t *lp = netdev_priv(dev); struct pcmcia_device *link = lp->p_dev; dev_dbg(&link->dev, "do_stop(%p)\n", dev); if (!link) return -ENODEV; netif_stop_queue(dev); SelectPage(0); PutByte(XIRCREG_CR, 0); /* disable interrupts */ SelectPage(0x01); PutByte(XIRCREG1_IMR0, 0x00); /* forbid all ints */ SelectPage(4); PutByte(XIRCREG4_GPR1, 0); /* clear bit 0: power down */ SelectPage(0); link->open--; return 0; } static const struct pcmcia_device_id xirc2ps_ids[] = { PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0089, 0x110a), PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0138, 0x110a), PCMCIA_PFC_DEVICE_PROD_ID13(0, "Xircom", "CEM28", 0x2e3ee845, 0x0ea978ea), PCMCIA_PFC_DEVICE_PROD_ID13(0, "Xircom", "CEM33", 0x2e3ee845, 0x80609023), PCMCIA_PFC_DEVICE_PROD_ID13(0, "Xircom", "CEM56", 0x2e3ee845, 0xa650c32a), PCMCIA_PFC_DEVICE_PROD_ID13(0, "Xircom", "REM10", 0x2e3ee845, 0x76df1d29), PCMCIA_PFC_DEVICE_PROD_ID13(0, "Xircom", "XEM5600", 0x2e3ee845, 0xf1403719), PCMCIA_PFC_DEVICE_PROD_ID12(0, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf), PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x010a), PCMCIA_DEVICE_PROD_ID13("Toshiba Information Systems", "TPCENET", 0x1b3b94fe, 0xf381c1a2), PCMCIA_DEVICE_PROD_ID13("Xircom", "CE3-10/100", 0x2e3ee845, 0x0ec0ac37), PCMCIA_DEVICE_PROD_ID13("Xircom", "PS-CE2-10", 0x2e3ee845, 0x947d9073), PCMCIA_DEVICE_PROD_ID13("Xircom", "R2E-100BTX", 0x2e3ee845, 0x2464a6e3), PCMCIA_DEVICE_PROD_ID13("Xircom", "RE-10", 0x2e3ee845, 0x3e08d609), PCMCIA_DEVICE_PROD_ID13("Xircom", "XE2000", 0x2e3ee845, 0xf7188e46), PCMCIA_DEVICE_PROD_ID12("Compaq", "Ethernet LAN Card", 0x54f7c49c, 0x9fd2f0a2), PCMCIA_DEVICE_PROD_ID12("Compaq", "Netelligent 10/100 PC Card", 0x54f7c49c, 0xefe96769), PCMCIA_DEVICE_PROD_ID12("Intel", "EtherExpress(TM) PRO/100 PC Card Mobile Adapter16", 0x816cc815, 0x174397db), PCMCIA_DEVICE_PROD_ID12("Toshiba", "10/100 Ethernet PC Card", 0x44a09d9c, 0xb44deecf), /* also matches CFE-10 cards! */ /* PCMCIA_DEVICE_MANF_CARD(0x0105, 0x010a), */ PCMCIA_DEVICE_NULL, }; MODULE_DEVICE_TABLE(pcmcia, xirc2ps_ids); static struct pcmcia_driver xirc2ps_cs_driver = { .owner = THIS_MODULE, .name = "xirc2ps_cs", .probe = xirc2ps_probe, .remove = xirc2ps_detach, .id_table = xirc2ps_ids, .suspend = xirc2ps_suspend, .resume = xirc2ps_resume, }; static int __init init_xirc2ps_cs(void) { return pcmcia_register_driver(&xirc2ps_cs_driver); } static void __exit exit_xirc2ps_cs(void) { pcmcia_unregister_driver(&xirc2ps_cs_driver); } module_init(init_xirc2ps_cs); module_exit(exit_xirc2ps_cs); #ifndef MODULE static int __init setup_xirc2ps_cs(char *str) { /* if_port, full_duplex, do_sound, lockup_hack */ int ints[10] = { -1 }; str = get_options(str, 9, ints); #define MAYBE_SET(X,Y) if (ints[0] >= Y && ints[Y] != -1) { X = ints[Y]; } MAYBE_SET(if_port, 3); MAYBE_SET(full_duplex, 4); MAYBE_SET(do_sound, 5); MAYBE_SET(lockup_hack, 6); #undef MAYBE_SET return 1; } __setup("xirc2ps_cs=", setup_xirc2ps_cs); #endif
gpl-2.0
AOSPA/android_kernel_lge_mako
sound/soc/atmel/sam9g20_wm8731.c
5073
6981
/* * sam9g20_wm8731 -- SoC audio for AT91SAM9G20-based * ATMEL AT91SAM9G20ek board. * * Copyright (C) 2005 SAN People * Copyright (C) 2008 Atmel * * Authors: Sedji Gaouaou <sedji.gaouaou@atmel.com> * * Based on ati_b1_wm8731.c by: * Frank Mandarino <fmandarino@endrelia.com> * Copyright 2006 Endrelia Technologies Inc. * Based on corgi.c by: * Copyright 2005 Wolfson Microelectronics PLC. * Copyright 2005 Openedhand Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/clk.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/i2c.h> #include <linux/atmel-ssc.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <asm/mach-types.h> #include <mach/hardware.h> #include <mach/gpio.h> #include "../codecs/wm8731.h" #include "atmel-pcm.h" #include "atmel_ssc_dai.h" #define MCLK_RATE 12000000 /* * As shipped the board does not have inputs. However, it is relatively * straightforward to modify the board to hook them up so support is left * in the driver. */ #undef ENABLE_MIC_INPUT static struct clk *mclk; static int at91sam9g20ek_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; int ret; /* set codec DAI configuration */ ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM); if (ret < 0) return ret; /* set cpu DAI configuration */ ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM); if (ret < 0) return ret; return 0; } static struct snd_soc_ops at91sam9g20ek_ops = { .hw_params = at91sam9g20ek_hw_params, }; static int at91sam9g20ek_set_bias_level(struct snd_soc_card *card, struct snd_soc_dapm_context *dapm, enum snd_soc_bias_level level) { static int mclk_on; int ret = 0; switch (level) { case SND_SOC_BIAS_ON: case SND_SOC_BIAS_PREPARE: if (!mclk_on) ret = clk_enable(mclk); if (ret == 0) mclk_on = 1; break; case SND_SOC_BIAS_OFF: case SND_SOC_BIAS_STANDBY: if (mclk_on) clk_disable(mclk); mclk_on = 0; break; } return ret; } static const struct snd_soc_dapm_widget at91sam9g20ek_dapm_widgets[] = { SND_SOC_DAPM_MIC("Int Mic", NULL), SND_SOC_DAPM_SPK("Ext Spk", NULL), }; static const struct snd_soc_dapm_route intercon[] = { /* speaker connected to LHPOUT */ {"Ext Spk", NULL, "LHPOUT"}, /* mic is connected to Mic Jack, with WM8731 Mic Bias */ {"MICIN", NULL, "Mic Bias"}, {"Mic Bias", NULL, "Int Mic"}, }; /* * Logic for a wm8731 as connected on a at91sam9g20ek board. */ static int at91sam9g20ek_wm8731_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; struct snd_soc_dai *codec_dai = rtd->codec_dai; struct snd_soc_dapm_context *dapm = &codec->dapm; int ret; printk(KERN_DEBUG "at91sam9g20ek_wm8731 " ": at91sam9g20ek_wm8731_init() called\n"); ret = snd_soc_dai_set_sysclk(codec_dai, WM8731_SYSCLK_MCLK, MCLK_RATE, SND_SOC_CLOCK_IN); if (ret < 0) { printk(KERN_ERR "Failed to set WM8731 SYSCLK: %d\n", ret); return ret; } /* Add specific widgets */ snd_soc_dapm_new_controls(dapm, at91sam9g20ek_dapm_widgets, ARRAY_SIZE(at91sam9g20ek_dapm_widgets)); /* Set up specific audio path interconnects */ snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon)); /* not connected */ snd_soc_dapm_nc_pin(dapm, "RLINEIN"); snd_soc_dapm_nc_pin(dapm, "LLINEIN"); #ifdef ENABLE_MIC_INPUT snd_soc_dapm_enable_pin(dapm, "Int Mic"); #else snd_soc_dapm_nc_pin(dapm, "Int Mic"); #endif /* always connected */ snd_soc_dapm_enable_pin(dapm, "Ext Spk"); return 0; } static struct snd_soc_dai_link at91sam9g20ek_dai = { .name = "WM8731", .stream_name = "WM8731 PCM", .cpu_dai_name = "atmel-ssc-dai.0", .codec_dai_name = "wm8731-hifi", .init = at91sam9g20ek_wm8731_init, .platform_name = "atmel-pcm-audio", .codec_name = "wm8731.0-001b", .ops = &at91sam9g20ek_ops, }; static struct snd_soc_card snd_soc_at91sam9g20ek = { .name = "AT91SAMG20-EK", .owner = THIS_MODULE, .dai_link = &at91sam9g20ek_dai, .num_links = 1, .set_bias_level = at91sam9g20ek_set_bias_level, }; static struct platform_device *at91sam9g20ek_snd_device; static int __init at91sam9g20ek_init(void) { struct clk *pllb; int ret; if (!(machine_is_at91sam9g20ek() || machine_is_at91sam9g20ek_2mmc())) return -ENODEV; ret = atmel_ssc_set_audio(0); if (ret != 0) { pr_err("Failed to set SSC 0 for audio: %d\n", ret); return ret; } /* * Codec MCLK is supplied by PCK0 - set it up. */ mclk = clk_get(NULL, "pck0"); if (IS_ERR(mclk)) { printk(KERN_ERR "ASoC: Failed to get MCLK\n"); ret = PTR_ERR(mclk); goto err; } pllb = clk_get(NULL, "pllb"); if (IS_ERR(pllb)) { printk(KERN_ERR "ASoC: Failed to get PLLB\n"); ret = PTR_ERR(pllb); goto err_mclk; } ret = clk_set_parent(mclk, pllb); clk_put(pllb); if (ret != 0) { printk(KERN_ERR "ASoC: Failed to set MCLK parent\n"); goto err_mclk; } clk_set_rate(mclk, MCLK_RATE); at91sam9g20ek_snd_device = platform_device_alloc("soc-audio", -1); if (!at91sam9g20ek_snd_device) { printk(KERN_ERR "ASoC: Platform device allocation failed\n"); ret = -ENOMEM; goto err_mclk; } platform_set_drvdata(at91sam9g20ek_snd_device, &snd_soc_at91sam9g20ek); ret = platform_device_add(at91sam9g20ek_snd_device); if (ret) { printk(KERN_ERR "ASoC: Platform device allocation failed\n"); goto err_device_add; } return ret; err_device_add: platform_device_put(at91sam9g20ek_snd_device); err_mclk: clk_put(mclk); mclk = NULL; err: return ret; } static void __exit at91sam9g20ek_exit(void) { platform_device_unregister(at91sam9g20ek_snd_device); at91sam9g20ek_snd_device = NULL; clk_put(mclk); mclk = NULL; } module_init(at91sam9g20ek_init); module_exit(at91sam9g20ek_exit); /* Module information */ MODULE_AUTHOR("Sedji Gaouaou <sedji.gaouaou@atmel.com>"); MODULE_DESCRIPTION("ALSA SoC AT91SAM9G20EK_WM8731"); MODULE_LICENSE("GPL");
gpl-2.0
dlespiau/linux
net/ieee802154/dgram.c
466
9797
/* * IEEE 802.15.4 dgram socket interface * * Copyright 2007, 2008 Siemens AG * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * Written by: * Sergey Lapin <slapin@ossfans.org> * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> */ #include <linux/net.h> #include <linux/module.h> #include <linux/if_arp.h> #include <linux/list.h> #include <linux/slab.h> #include <net/sock.h> #include <net/af_ieee802154.h> #include <net/ieee802154.h> #include <net/ieee802154_netdev.h> #include <asm/ioctls.h> #include "af802154.h" static HLIST_HEAD(dgram_head); static DEFINE_RWLOCK(dgram_lock); struct dgram_sock { struct sock sk; struct ieee802154_addr src_addr; struct ieee802154_addr dst_addr; unsigned int bound:1; unsigned int want_ack:1; }; static inline struct dgram_sock *dgram_sk(const struct sock *sk) { return container_of(sk, struct dgram_sock, sk); } static void dgram_hash(struct sock *sk) { write_lock_bh(&dgram_lock); sk_add_node(sk, &dgram_head); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); write_unlock_bh(&dgram_lock); } static void dgram_unhash(struct sock *sk) { write_lock_bh(&dgram_lock); if (sk_del_node_init(sk)) sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); write_unlock_bh(&dgram_lock); } static int dgram_init(struct sock *sk) { struct dgram_sock *ro = dgram_sk(sk); ro->dst_addr.addr_type = IEEE802154_ADDR_LONG; ro->dst_addr.pan_id = 0xffff; ro->want_ack = 1; memset(&ro->dst_addr.hwaddr, 0xff, sizeof(ro->dst_addr.hwaddr)); return 0; } static void dgram_close(struct sock *sk, long timeout) { sk_common_release(sk); } static int dgram_bind(struct sock *sk, struct sockaddr *uaddr, int len) { struct sockaddr_ieee802154 *addr = (struct sockaddr_ieee802154 *)uaddr; struct dgram_sock *ro = dgram_sk(sk); int err = -EINVAL; struct net_device *dev; lock_sock(sk); ro->bound = 0; if (len < sizeof(*addr)) goto out; if (addr->family != AF_IEEE802154) goto out; dev = ieee802154_get_dev(sock_net(sk), &addr->addr); if (!dev) { err = -ENODEV; goto out; } if (dev->type != ARPHRD_IEEE802154) { err = -ENODEV; goto out_put; } memcpy(&ro->src_addr, &addr->addr, sizeof(struct ieee802154_addr)); ro->bound = 1; err = 0; out_put: dev_put(dev); out: release_sock(sk); return err; } static int dgram_ioctl(struct sock *sk, int cmd, unsigned long arg) { switch (cmd) { case SIOCOUTQ: { int amount = sk_wmem_alloc_get(sk); return put_user(amount, (int __user *)arg); } case SIOCINQ: { struct sk_buff *skb; unsigned long amount; amount = 0; spin_lock_bh(&sk->sk_receive_queue.lock); skb = skb_peek(&sk->sk_receive_queue); if (skb != NULL) { /* * We will only return the amount * of this packet since that is all * that will be read. */ /* FIXME: parse the header for more correct value */ amount = skb->len - (3+8+8); } spin_unlock_bh(&sk->sk_receive_queue.lock); return put_user(amount, (int __user *)arg); } } return -ENOIOCTLCMD; } /* FIXME: autobind */ static int dgram_connect(struct sock *sk, struct sockaddr *uaddr, int len) { struct sockaddr_ieee802154 *addr = (struct sockaddr_ieee802154 *)uaddr; struct dgram_sock *ro = dgram_sk(sk); int err = 0; if (len < sizeof(*addr)) return -EINVAL; if (addr->family != AF_IEEE802154) return -EINVAL; lock_sock(sk); if (!ro->bound) { err = -ENETUNREACH; goto out; } memcpy(&ro->dst_addr, &addr->addr, sizeof(struct ieee802154_addr)); out: release_sock(sk); return err; } static int dgram_disconnect(struct sock *sk, int flags) { struct dgram_sock *ro = dgram_sk(sk); lock_sock(sk); ro->dst_addr.addr_type = IEEE802154_ADDR_LONG; memset(&ro->dst_addr.hwaddr, 0xff, sizeof(ro->dst_addr.hwaddr)); release_sock(sk); return 0; } static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t size) { struct net_device *dev; unsigned int mtu; struct sk_buff *skb; struct dgram_sock *ro = dgram_sk(sk); int hlen, tlen; int err; if (msg->msg_flags & MSG_OOB) { pr_debug("msg->msg_flags = 0x%x\n", msg->msg_flags); return -EOPNOTSUPP; } if (!ro->bound) dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154); else dev = ieee802154_get_dev(sock_net(sk), &ro->src_addr); if (!dev) { pr_debug("no dev\n"); err = -ENXIO; goto out; } mtu = dev->mtu; pr_debug("name = %s, mtu = %u\n", dev->name, mtu); if (size > mtu) { pr_debug("size = %Zu, mtu = %u\n", size, mtu); err = -EINVAL; goto out_dev; } hlen = LL_RESERVED_SPACE(dev); tlen = dev->needed_tailroom; skb = sock_alloc_send_skb(sk, hlen + tlen + size, msg->msg_flags & MSG_DONTWAIT, &err); if (!skb) goto out_dev; skb_reserve(skb, hlen); skb_reset_network_header(skb); mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA; if (ro->want_ack) mac_cb(skb)->flags |= MAC_CB_FLAG_ACKREQ; mac_cb(skb)->seq = ieee802154_mlme_ops(dev)->get_dsn(dev); err = dev_hard_header(skb, dev, ETH_P_IEEE802154, &ro->dst_addr, ro->bound ? &ro->src_addr : NULL, size); if (err < 0) goto out_skb; skb_reset_mac_header(skb); err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); if (err < 0) goto out_skb; skb->dev = dev; skb->sk = sk; skb->protocol = htons(ETH_P_IEEE802154); dev_put(dev); err = dev_queue_xmit(skb); if (err > 0) err = net_xmit_errno(err); return err ?: size; out_skb: kfree_skb(skb); out_dev: dev_put(dev); out: return err; } static int dgram_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { size_t copied = 0; int err = -EOPNOTSUPP; struct sk_buff *skb; struct sockaddr_ieee802154 *saddr; saddr = (struct sockaddr_ieee802154 *)msg->msg_name; skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } /* FIXME: skip headers if necessary ?! */ err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err) goto done; sock_recv_ts_and_drops(msg, sk, skb); if (saddr) { saddr->family = AF_IEEE802154; saddr->addr = mac_cb(skb)->sa; } if (addr_len) *addr_len = sizeof(*saddr); if (flags & MSG_TRUNC) copied = skb->len; done: skb_free_datagram(sk, skb); out: if (err) return err; return copied; } static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb) { if (sock_queue_rcv_skb(sk, skb) < 0) { kfree_skb(skb); return NET_RX_DROP; } return NET_RX_SUCCESS; } static inline int ieee802154_match_sock(u8 *hw_addr, u16 pan_id, u16 short_addr, struct dgram_sock *ro) { if (!ro->bound) return 1; if (ro->src_addr.addr_type == IEEE802154_ADDR_LONG && !memcmp(ro->src_addr.hwaddr, hw_addr, IEEE802154_ADDR_LEN)) return 1; if (ro->src_addr.addr_type == IEEE802154_ADDR_SHORT && pan_id == ro->src_addr.pan_id && short_addr == ro->src_addr.short_addr) return 1; return 0; } int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb) { struct sock *sk, *prev = NULL; int ret = NET_RX_SUCCESS; u16 pan_id, short_addr; /* Data frame processing */ BUG_ON(dev->type != ARPHRD_IEEE802154); pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev); short_addr = ieee802154_mlme_ops(dev)->get_short_addr(dev); read_lock(&dgram_lock); sk_for_each(sk, &dgram_head) { if (ieee802154_match_sock(dev->dev_addr, pan_id, short_addr, dgram_sk(sk))) { if (prev) { struct sk_buff *clone; clone = skb_clone(skb, GFP_ATOMIC); if (clone) dgram_rcv_skb(prev, clone); } prev = sk; } } if (prev) dgram_rcv_skb(prev, skb); else { kfree_skb(skb); ret = NET_RX_DROP; } read_unlock(&dgram_lock); return ret; } static int dgram_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { struct dgram_sock *ro = dgram_sk(sk); int val, len; if (level != SOL_IEEE802154) return -EOPNOTSUPP; if (get_user(len, optlen)) return -EFAULT; len = min_t(unsigned int, len, sizeof(int)); switch (optname) { case WPAN_WANTACK: val = ro->want_ack; break; default: return -ENOPROTOOPT; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } static int dgram_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { struct dgram_sock *ro = dgram_sk(sk); int val; int err = 0; if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; lock_sock(sk); switch (optname) { case WPAN_WANTACK: ro->want_ack = !!val; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } struct proto ieee802154_dgram_prot = { .name = "IEEE-802.15.4-MAC", .owner = THIS_MODULE, .obj_size = sizeof(struct dgram_sock), .init = dgram_init, .close = dgram_close, .bind = dgram_bind, .sendmsg = dgram_sendmsg, .recvmsg = dgram_recvmsg, .hash = dgram_hash, .unhash = dgram_unhash, .connect = dgram_connect, .disconnect = dgram_disconnect, .ioctl = dgram_ioctl, .getsockopt = dgram_getsockopt, .setsockopt = dgram_setsockopt, };
gpl-2.0
ChronoMonochrome/android_kernel_lenovo_msm8916
drivers/media/tuners/xc5000.c
978
35375
/* * Driver for Xceive XC5000 "QAM/8VSB single chip tuner" * * Copyright (c) 2007 Xceive Corporation * Copyright (c) 2007 Steven Toth <stoth@linuxtv.org> * Copyright (c) 2009 Devin Heitmueller <dheitmueller@kernellabs.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/videodev2.h> #include <linux/delay.h> #include <linux/dvb/frontend.h> #include <linux/i2c.h> #include "dvb_frontend.h" #include "xc5000.h" #include "tuner-i2c.h" static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off)."); static int no_poweroff; module_param(no_poweroff, int, 0644); MODULE_PARM_DESC(no_poweroff, "0 (default) powers device off when not used.\n" "\t\t1 keep device energized and with tuner ready all the times.\n" "\t\tFaster, but consumes more power and keeps the device hotter"); static DEFINE_MUTEX(xc5000_list_mutex); static LIST_HEAD(hybrid_tuner_instance_list); #define dprintk(level, fmt, arg...) if (debug >= level) \ printk(KERN_INFO "%s: " fmt, "xc5000", ## arg) struct xc5000_priv { struct tuner_i2c_props i2c_props; struct list_head hybrid_tuner_instance_list; u32 if_khz; u16 xtal_khz; u32 freq_hz, freq_offset; u32 bandwidth; u8 video_standard; u8 rf_mode; u8 radio_input; int chip_id; u16 pll_register_no; u8 init_status_supported; u8 fw_checksum_supported; }; /* Misc Defines */ #define MAX_TV_STANDARD 24 #define XC_MAX_I2C_WRITE_LENGTH 64 /* Signal Types */ #define XC_RF_MODE_AIR 0 #define XC_RF_MODE_CABLE 1 /* Result codes */ #define XC_RESULT_SUCCESS 0 #define XC_RESULT_RESET_FAILURE 1 #define XC_RESULT_I2C_WRITE_FAILURE 2 #define XC_RESULT_I2C_READ_FAILURE 3 #define XC_RESULT_OUT_OF_RANGE 5 /* Product id */ #define XC_PRODUCT_ID_FW_NOT_LOADED 0x2000 #define XC_PRODUCT_ID_FW_LOADED 0x1388 /* Registers */ #define XREG_INIT 0x00 #define XREG_VIDEO_MODE 0x01 #define XREG_AUDIO_MODE 0x02 #define XREG_RF_FREQ 0x03 #define XREG_D_CODE 0x04 #define XREG_IF_OUT 0x05 #define XREG_SEEK_MODE 0x07 #define XREG_POWER_DOWN 0x0A /* Obsolete */ /* Set the output amplitude - SIF for analog, DTVP/DTVN for digital */ #define XREG_OUTPUT_AMP 0x0B #define XREG_SIGNALSOURCE 0x0D /* 0=Air, 1=Cable */ #define XREG_SMOOTHEDCVBS 0x0E #define XREG_XTALFREQ 0x0F #define XREG_FINERFREQ 0x10 #define XREG_DDIMODE 0x11 #define XREG_ADC_ENV 0x00 #define XREG_QUALITY 0x01 #define XREG_FRAME_LINES 0x02 #define XREG_HSYNC_FREQ 0x03 #define XREG_LOCK 0x04 #define XREG_FREQ_ERROR 0x05 #define XREG_SNR 0x06 #define XREG_VERSION 0x07 #define XREG_PRODUCT_ID 0x08 #define XREG_BUSY 0x09 #define XREG_BUILD 0x0D #define XREG_TOTALGAIN 0x0F #define XREG_FW_CHECKSUM 0x12 #define XREG_INIT_STATUS 0x13 /* Basic firmware description. This will remain with the driver for documentation purposes. This represents an I2C firmware file encoded as a string of unsigned char. Format is as follows: char[0 ]=len0_MSB -> len = len_MSB * 256 + len_LSB char[1 ]=len0_LSB -> length of first write transaction char[2 ]=data0 -> first byte to be sent char[3 ]=data1 char[4 ]=data2 char[ ]=... char[M ]=dataN -> last byte to be sent char[M+1]=len1_MSB -> len = len_MSB * 256 + len_LSB char[M+2]=len1_LSB -> length of second write transaction char[M+3]=data0 char[M+4]=data1 ... etc. The [len] value should be interpreted as follows: len= len_MSB _ len_LSB len=1111_1111_1111_1111 : End of I2C_SEQUENCE len=0000_0000_0000_0000 : Reset command: Do hardware reset len=0NNN_NNNN_NNNN_NNNN : Normal transaction: number of bytes = {1:32767) len=1WWW_WWWW_WWWW_WWWW : Wait command: wait for {1:32767} ms For the RESET and WAIT commands, the two following bytes will contain immediately the length of the following transaction. */ struct XC_TV_STANDARD { char *Name; u16 AudioMode; u16 VideoMode; }; /* Tuner standards */ #define MN_NTSC_PAL_BTSC 0 #define MN_NTSC_PAL_A2 1 #define MN_NTSC_PAL_EIAJ 2 #define MN_NTSC_PAL_Mono 3 #define BG_PAL_A2 4 #define BG_PAL_NICAM 5 #define BG_PAL_MONO 6 #define I_PAL_NICAM 7 #define I_PAL_NICAM_MONO 8 #define DK_PAL_A2 9 #define DK_PAL_NICAM 10 #define DK_PAL_MONO 11 #define DK_SECAM_A2DK1 12 #define DK_SECAM_A2LDK3 13 #define DK_SECAM_A2MONO 14 #define L_SECAM_NICAM 15 #define LC_SECAM_NICAM 16 #define DTV6 17 #define DTV8 18 #define DTV7_8 19 #define DTV7 20 #define FM_Radio_INPUT2 21 #define FM_Radio_INPUT1 22 #define FM_Radio_INPUT1_MONO 23 static struct XC_TV_STANDARD XC5000_Standard[MAX_TV_STANDARD] = { {"M/N-NTSC/PAL-BTSC", 0x0400, 0x8020}, {"M/N-NTSC/PAL-A2", 0x0600, 0x8020}, {"M/N-NTSC/PAL-EIAJ", 0x0440, 0x8020}, {"M/N-NTSC/PAL-Mono", 0x0478, 0x8020}, {"B/G-PAL-A2", 0x0A00, 0x8049}, {"B/G-PAL-NICAM", 0x0C04, 0x8049}, {"B/G-PAL-MONO", 0x0878, 0x8059}, {"I-PAL-NICAM", 0x1080, 0x8009}, {"I-PAL-NICAM-MONO", 0x0E78, 0x8009}, {"D/K-PAL-A2", 0x1600, 0x8009}, {"D/K-PAL-NICAM", 0x0E80, 0x8009}, {"D/K-PAL-MONO", 0x1478, 0x8009}, {"D/K-SECAM-A2 DK1", 0x1200, 0x8009}, {"D/K-SECAM-A2 L/DK3", 0x0E00, 0x8009}, {"D/K-SECAM-A2 MONO", 0x1478, 0x8009}, {"L-SECAM-NICAM", 0x8E82, 0x0009}, {"L'-SECAM-NICAM", 0x8E82, 0x4009}, {"DTV6", 0x00C0, 0x8002}, {"DTV8", 0x00C0, 0x800B}, {"DTV7/8", 0x00C0, 0x801B}, {"DTV7", 0x00C0, 0x8007}, {"FM Radio-INPUT2", 0x9802, 0x9002}, {"FM Radio-INPUT1", 0x0208, 0x9002}, {"FM Radio-INPUT1_MONO", 0x0278, 0x9002} }; struct xc5000_fw_cfg { char *name; u16 size; u16 pll_reg; u8 init_status_supported; u8 fw_checksum_supported; }; #define XC5000A_FIRMWARE "dvb-fe-xc5000-1.6.114.fw" static const struct xc5000_fw_cfg xc5000a_1_6_114 = { .name = XC5000A_FIRMWARE, .size = 12401, .pll_reg = 0x806c, }; #define XC5000C_FIRMWARE "dvb-fe-xc5000c-4.1.30.7.fw" static const struct xc5000_fw_cfg xc5000c_41_024_5 = { .name = XC5000C_FIRMWARE, .size = 16497, .pll_reg = 0x13, .init_status_supported = 1, .fw_checksum_supported = 1, }; static inline const struct xc5000_fw_cfg *xc5000_assign_firmware(int chip_id) { switch (chip_id) { default: case XC5000A: return &xc5000a_1_6_114; case XC5000C: return &xc5000c_41_024_5; } } static int xc_load_fw_and_init_tuner(struct dvb_frontend *fe, int force); static int xc5000_is_firmware_loaded(struct dvb_frontend *fe); static int xc5000_readreg(struct xc5000_priv *priv, u16 reg, u16 *val); static int xc5000_TunerReset(struct dvb_frontend *fe); static int xc_send_i2c_data(struct xc5000_priv *priv, u8 *buf, int len) { struct i2c_msg msg = { .addr = priv->i2c_props.addr, .flags = 0, .buf = buf, .len = len }; if (i2c_transfer(priv->i2c_props.adap, &msg, 1) != 1) { printk(KERN_ERR "xc5000: I2C write failed (len=%i)\n", len); return XC_RESULT_I2C_WRITE_FAILURE; } return XC_RESULT_SUCCESS; } #if 0 /* This routine is never used because the only time we read data from the i2c bus is when we read registers, and we want that to be an atomic i2c transaction in case we are on a multi-master bus */ static int xc_read_i2c_data(struct xc5000_priv *priv, u8 *buf, int len) { struct i2c_msg msg = { .addr = priv->i2c_props.addr, .flags = I2C_M_RD, .buf = buf, .len = len }; if (i2c_transfer(priv->i2c_props.adap, &msg, 1) != 1) { printk(KERN_ERR "xc5000 I2C read failed (len=%i)\n", len); return -EREMOTEIO; } return 0; } #endif static int xc5000_readreg(struct xc5000_priv *priv, u16 reg, u16 *val) { u8 buf[2] = { reg >> 8, reg & 0xff }; u8 bval[2] = { 0, 0 }; struct i2c_msg msg[2] = { { .addr = priv->i2c_props.addr, .flags = 0, .buf = &buf[0], .len = 2 }, { .addr = priv->i2c_props.addr, .flags = I2C_M_RD, .buf = &bval[0], .len = 2 }, }; if (i2c_transfer(priv->i2c_props.adap, msg, 2) != 2) { printk(KERN_WARNING "xc5000: I2C read failed\n"); return -EREMOTEIO; } *val = (bval[0] << 8) | bval[1]; return XC_RESULT_SUCCESS; } static void xc_wait(int wait_ms) { msleep(wait_ms); } static int xc5000_TunerReset(struct dvb_frontend *fe) { struct xc5000_priv *priv = fe->tuner_priv; int ret; dprintk(1, "%s()\n", __func__); if (fe->callback) { ret = fe->callback(((fe->dvb) && (fe->dvb->priv)) ? fe->dvb->priv : priv->i2c_props.adap->algo_data, DVB_FRONTEND_COMPONENT_TUNER, XC5000_TUNER_RESET, 0); if (ret) { printk(KERN_ERR "xc5000: reset failed\n"); return XC_RESULT_RESET_FAILURE; } } else { printk(KERN_ERR "xc5000: no tuner reset callback function, fatal\n"); return XC_RESULT_RESET_FAILURE; } return XC_RESULT_SUCCESS; } static int xc_write_reg(struct xc5000_priv *priv, u16 regAddr, u16 i2cData) { u8 buf[4]; int WatchDogTimer = 100; int result; buf[0] = (regAddr >> 8) & 0xFF; buf[1] = regAddr & 0xFF; buf[2] = (i2cData >> 8) & 0xFF; buf[3] = i2cData & 0xFF; result = xc_send_i2c_data(priv, buf, 4); if (result == XC_RESULT_SUCCESS) { /* wait for busy flag to clear */ while ((WatchDogTimer > 0) && (result == XC_RESULT_SUCCESS)) { result = xc5000_readreg(priv, XREG_BUSY, (u16 *)buf); if (result == XC_RESULT_SUCCESS) { if ((buf[0] == 0) && (buf[1] == 0)) { /* busy flag cleared */ break; } else { xc_wait(5); /* wait 5 ms */ WatchDogTimer--; } } } } if (WatchDogTimer <= 0) result = XC_RESULT_I2C_WRITE_FAILURE; return result; } static int xc_load_i2c_sequence(struct dvb_frontend *fe, const u8 *i2c_sequence) { struct xc5000_priv *priv = fe->tuner_priv; int i, nbytes_to_send, result; unsigned int len, pos, index; u8 buf[XC_MAX_I2C_WRITE_LENGTH]; index = 0; while ((i2c_sequence[index] != 0xFF) || (i2c_sequence[index + 1] != 0xFF)) { len = i2c_sequence[index] * 256 + i2c_sequence[index+1]; if (len == 0x0000) { /* RESET command */ result = xc5000_TunerReset(fe); index += 2; if (result != XC_RESULT_SUCCESS) return result; } else if (len & 0x8000) { /* WAIT command */ xc_wait(len & 0x7FFF); index += 2; } else { /* Send i2c data whilst ensuring individual transactions * do not exceed XC_MAX_I2C_WRITE_LENGTH bytes. */ index += 2; buf[0] = i2c_sequence[index]; buf[1] = i2c_sequence[index + 1]; pos = 2; while (pos < len) { if ((len - pos) > XC_MAX_I2C_WRITE_LENGTH - 2) nbytes_to_send = XC_MAX_I2C_WRITE_LENGTH; else nbytes_to_send = (len - pos + 2); for (i = 2; i < nbytes_to_send; i++) { buf[i] = i2c_sequence[index + pos + i - 2]; } result = xc_send_i2c_data(priv, buf, nbytes_to_send); if (result != XC_RESULT_SUCCESS) return result; pos += nbytes_to_send - 2; } index += len; } } return XC_RESULT_SUCCESS; } static int xc_initialize(struct xc5000_priv *priv) { dprintk(1, "%s()\n", __func__); return xc_write_reg(priv, XREG_INIT, 0); } static int xc_SetTVStandard(struct xc5000_priv *priv, u16 VideoMode, u16 AudioMode, u8 RadioMode) { int ret; dprintk(1, "%s(0x%04x,0x%04x)\n", __func__, VideoMode, AudioMode); if (RadioMode) { dprintk(1, "%s() Standard = %s\n", __func__, XC5000_Standard[RadioMode].Name); } else { dprintk(1, "%s() Standard = %s\n", __func__, XC5000_Standard[priv->video_standard].Name); } ret = xc_write_reg(priv, XREG_VIDEO_MODE, VideoMode); if (ret == XC_RESULT_SUCCESS) ret = xc_write_reg(priv, XREG_AUDIO_MODE, AudioMode); return ret; } static int xc_SetSignalSource(struct xc5000_priv *priv, u16 rf_mode) { dprintk(1, "%s(%d) Source = %s\n", __func__, rf_mode, rf_mode == XC_RF_MODE_AIR ? "ANTENNA" : "CABLE"); if ((rf_mode != XC_RF_MODE_AIR) && (rf_mode != XC_RF_MODE_CABLE)) { rf_mode = XC_RF_MODE_CABLE; printk(KERN_ERR "%s(), Invalid mode, defaulting to CABLE", __func__); } return xc_write_reg(priv, XREG_SIGNALSOURCE, rf_mode); } static const struct dvb_tuner_ops xc5000_tuner_ops; static int xc_set_RF_frequency(struct xc5000_priv *priv, u32 freq_hz) { u16 freq_code; dprintk(1, "%s(%u)\n", __func__, freq_hz); if ((freq_hz > xc5000_tuner_ops.info.frequency_max) || (freq_hz < xc5000_tuner_ops.info.frequency_min)) return XC_RESULT_OUT_OF_RANGE; freq_code = (u16)(freq_hz / 15625); /* Starting in firmware version 1.1.44, Xceive recommends using the FINERFREQ for all normal tuning (the doc indicates reg 0x03 should only be used for fast scanning for channel lock) */ return xc_write_reg(priv, XREG_FINERFREQ, freq_code); } static int xc_set_IF_frequency(struct xc5000_priv *priv, u32 freq_khz) { u32 freq_code = (freq_khz * 1024)/1000; dprintk(1, "%s(freq_khz = %d) freq_code = 0x%x\n", __func__, freq_khz, freq_code); return xc_write_reg(priv, XREG_IF_OUT, freq_code); } static int xc_get_ADC_Envelope(struct xc5000_priv *priv, u16 *adc_envelope) { return xc5000_readreg(priv, XREG_ADC_ENV, adc_envelope); } static int xc_get_frequency_error(struct xc5000_priv *priv, u32 *freq_error_hz) { int result; u16 regData; u32 tmp; result = xc5000_readreg(priv, XREG_FREQ_ERROR, &regData); if (result != XC_RESULT_SUCCESS) return result; tmp = (u32)regData; (*freq_error_hz) = (tmp * 15625) / 1000; return result; } static int xc_get_lock_status(struct xc5000_priv *priv, u16 *lock_status) { return xc5000_readreg(priv, XREG_LOCK, lock_status); } static int xc_get_version(struct xc5000_priv *priv, u8 *hw_majorversion, u8 *hw_minorversion, u8 *fw_majorversion, u8 *fw_minorversion) { u16 data; int result; result = xc5000_readreg(priv, XREG_VERSION, &data); if (result != XC_RESULT_SUCCESS) return result; (*hw_majorversion) = (data >> 12) & 0x0F; (*hw_minorversion) = (data >> 8) & 0x0F; (*fw_majorversion) = (data >> 4) & 0x0F; (*fw_minorversion) = data & 0x0F; return 0; } static int xc_get_buildversion(struct xc5000_priv *priv, u16 *buildrev) { return xc5000_readreg(priv, XREG_BUILD, buildrev); } static int xc_get_hsync_freq(struct xc5000_priv *priv, u32 *hsync_freq_hz) { u16 regData; int result; result = xc5000_readreg(priv, XREG_HSYNC_FREQ, &regData); if (result != XC_RESULT_SUCCESS) return result; (*hsync_freq_hz) = ((regData & 0x0fff) * 763)/100; return result; } static int xc_get_frame_lines(struct xc5000_priv *priv, u16 *frame_lines) { return xc5000_readreg(priv, XREG_FRAME_LINES, frame_lines); } static int xc_get_quality(struct xc5000_priv *priv, u16 *quality) { return xc5000_readreg(priv, XREG_QUALITY, quality); } static int xc_get_analogsnr(struct xc5000_priv *priv, u16 *snr) { return xc5000_readreg(priv, XREG_SNR, snr); } static int xc_get_totalgain(struct xc5000_priv *priv, u16 *totalgain) { return xc5000_readreg(priv, XREG_TOTALGAIN, totalgain); } static u16 WaitForLock(struct xc5000_priv *priv) { u16 lockState = 0; int watchDogCount = 40; while ((lockState == 0) && (watchDogCount > 0)) { xc_get_lock_status(priv, &lockState); if (lockState != 1) { xc_wait(5); watchDogCount--; } } return lockState; } #define XC_TUNE_ANALOG 0 #define XC_TUNE_DIGITAL 1 static int xc_tune_channel(struct xc5000_priv *priv, u32 freq_hz, int mode) { int found = 0; dprintk(1, "%s(%u)\n", __func__, freq_hz); if (xc_set_RF_frequency(priv, freq_hz) != XC_RESULT_SUCCESS) return 0; if (mode == XC_TUNE_ANALOG) { if (WaitForLock(priv) == 1) found = 1; } return found; } static int xc_set_xtal(struct dvb_frontend *fe) { struct xc5000_priv *priv = fe->tuner_priv; int ret = XC_RESULT_SUCCESS; switch (priv->chip_id) { default: case XC5000A: /* 32.000 MHz xtal is default */ break; case XC5000C: switch (priv->xtal_khz) { default: case 32000: /* 32.000 MHz xtal is default */ break; case 31875: /* 31.875 MHz xtal configuration */ ret = xc_write_reg(priv, 0x000f, 0x8081); break; } break; } return ret; } static int xc5000_fwupload(struct dvb_frontend *fe) { struct xc5000_priv *priv = fe->tuner_priv; const struct firmware *fw; int ret; const struct xc5000_fw_cfg *desired_fw = xc5000_assign_firmware(priv->chip_id); priv->pll_register_no = desired_fw->pll_reg; priv->init_status_supported = desired_fw->init_status_supported; priv->fw_checksum_supported = desired_fw->fw_checksum_supported; /* request the firmware, this will block and timeout */ printk(KERN_INFO "xc5000: waiting for firmware upload (%s)...\n", desired_fw->name); ret = request_firmware(&fw, desired_fw->name, priv->i2c_props.adap->dev.parent); if (ret) { printk(KERN_ERR "xc5000: Upload failed. (file not found?)\n"); ret = XC_RESULT_RESET_FAILURE; goto out; } else { printk(KERN_DEBUG "xc5000: firmware read %Zu bytes.\n", fw->size); ret = XC_RESULT_SUCCESS; } if (fw->size != desired_fw->size) { printk(KERN_ERR "xc5000: firmware incorrect size\n"); ret = XC_RESULT_RESET_FAILURE; } else { printk(KERN_INFO "xc5000: firmware uploading...\n"); ret = xc_load_i2c_sequence(fe, fw->data); if (XC_RESULT_SUCCESS == ret) ret = xc_set_xtal(fe); if (XC_RESULT_SUCCESS == ret) printk(KERN_INFO "xc5000: firmware upload complete...\n"); else printk(KERN_ERR "xc5000: firmware upload failed...\n"); } out: release_firmware(fw); return ret; } static void xc_debug_dump(struct xc5000_priv *priv) { u16 adc_envelope; u32 freq_error_hz = 0; u16 lock_status; u32 hsync_freq_hz = 0; u16 frame_lines; u16 quality; u16 snr; u16 totalgain; u8 hw_majorversion = 0, hw_minorversion = 0; u8 fw_majorversion = 0, fw_minorversion = 0; u16 fw_buildversion = 0; u16 regval; /* Wait for stats to stabilize. * Frame Lines needs two frame times after initial lock * before it is valid. */ xc_wait(100); xc_get_ADC_Envelope(priv, &adc_envelope); dprintk(1, "*** ADC envelope (0-1023) = %d\n", adc_envelope); xc_get_frequency_error(priv, &freq_error_hz); dprintk(1, "*** Frequency error = %d Hz\n", freq_error_hz); xc_get_lock_status(priv, &lock_status); dprintk(1, "*** Lock status (0-Wait, 1-Locked, 2-No-signal) = %d\n", lock_status); xc_get_version(priv, &hw_majorversion, &hw_minorversion, &fw_majorversion, &fw_minorversion); xc_get_buildversion(priv, &fw_buildversion); dprintk(1, "*** HW: V%d.%d, FW: V %d.%d.%d\n", hw_majorversion, hw_minorversion, fw_majorversion, fw_minorversion, fw_buildversion); xc_get_hsync_freq(priv, &hsync_freq_hz); dprintk(1, "*** Horizontal sync frequency = %d Hz\n", hsync_freq_hz); xc_get_frame_lines(priv, &frame_lines); dprintk(1, "*** Frame lines = %d\n", frame_lines); xc_get_quality(priv, &quality); dprintk(1, "*** Quality (0:<8dB, 7:>56dB) = %d\n", quality & 0x07); xc_get_analogsnr(priv, &snr); dprintk(1, "*** Unweighted analog SNR = %d dB\n", snr & 0x3f); xc_get_totalgain(priv, &totalgain); dprintk(1, "*** Total gain = %d.%d dB\n", totalgain / 256, (totalgain % 256) * 100 / 256); if (priv->pll_register_no) { xc5000_readreg(priv, priv->pll_register_no, &regval); dprintk(1, "*** PLL lock status = 0x%04x\n", regval); } } static int xc5000_set_params(struct dvb_frontend *fe) { int ret, b; struct xc5000_priv *priv = fe->tuner_priv; u32 bw = fe->dtv_property_cache.bandwidth_hz; u32 freq = fe->dtv_property_cache.frequency; u32 delsys = fe->dtv_property_cache.delivery_system; if (xc_load_fw_and_init_tuner(fe, 0) != XC_RESULT_SUCCESS) { dprintk(1, "Unable to load firmware and init tuner\n"); return -EINVAL; } dprintk(1, "%s() frequency=%d (Hz)\n", __func__, freq); switch (delsys) { case SYS_ATSC: dprintk(1, "%s() VSB modulation\n", __func__); priv->rf_mode = XC_RF_MODE_AIR; priv->freq_offset = 1750000; priv->video_standard = DTV6; break; case SYS_DVBC_ANNEX_B: dprintk(1, "%s() QAM modulation\n", __func__); priv->rf_mode = XC_RF_MODE_CABLE; priv->freq_offset = 1750000; priv->video_standard = DTV6; break; case SYS_ISDBT: /* All ISDB-T are currently for 6 MHz bw */ if (!bw) bw = 6000000; /* fall to OFDM handling */ case SYS_DMBTH: case SYS_DVBT: case SYS_DVBT2: dprintk(1, "%s() OFDM\n", __func__); switch (bw) { case 6000000: priv->video_standard = DTV6; priv->freq_offset = 1750000; break; case 7000000: priv->video_standard = DTV7; priv->freq_offset = 2250000; break; case 8000000: priv->video_standard = DTV8; priv->freq_offset = 2750000; break; default: printk(KERN_ERR "xc5000 bandwidth not set!\n"); return -EINVAL; } priv->rf_mode = XC_RF_MODE_AIR; break; case SYS_DVBC_ANNEX_A: case SYS_DVBC_ANNEX_C: dprintk(1, "%s() QAM modulation\n", __func__); priv->rf_mode = XC_RF_MODE_CABLE; if (bw <= 6000000) { priv->video_standard = DTV6; priv->freq_offset = 1750000; b = 6; } else if (bw <= 7000000) { priv->video_standard = DTV7; priv->freq_offset = 2250000; b = 7; } else { priv->video_standard = DTV7_8; priv->freq_offset = 2750000; b = 8; } dprintk(1, "%s() Bandwidth %dMHz (%d)\n", __func__, b, bw); break; default: printk(KERN_ERR "xc5000: delivery system is not supported!\n"); return -EINVAL; } priv->freq_hz = freq - priv->freq_offset; dprintk(1, "%s() frequency=%d (compensated to %d)\n", __func__, freq, priv->freq_hz); ret = xc_SetSignalSource(priv, priv->rf_mode); if (ret != XC_RESULT_SUCCESS) { printk(KERN_ERR "xc5000: xc_SetSignalSource(%d) failed\n", priv->rf_mode); return -EREMOTEIO; } ret = xc_SetTVStandard(priv, XC5000_Standard[priv->video_standard].VideoMode, XC5000_Standard[priv->video_standard].AudioMode, 0); if (ret != XC_RESULT_SUCCESS) { printk(KERN_ERR "xc5000: xc_SetTVStandard failed\n"); return -EREMOTEIO; } ret = xc_set_IF_frequency(priv, priv->if_khz); if (ret != XC_RESULT_SUCCESS) { printk(KERN_ERR "xc5000: xc_Set_IF_frequency(%d) failed\n", priv->if_khz); return -EIO; } xc_write_reg(priv, XREG_OUTPUT_AMP, 0x8a); xc_tune_channel(priv, priv->freq_hz, XC_TUNE_DIGITAL); if (debug) xc_debug_dump(priv); priv->bandwidth = bw; return 0; } static int xc5000_is_firmware_loaded(struct dvb_frontend *fe) { struct xc5000_priv *priv = fe->tuner_priv; int ret; u16 id; ret = xc5000_readreg(priv, XREG_PRODUCT_ID, &id); if (ret == XC_RESULT_SUCCESS) { if (id == XC_PRODUCT_ID_FW_NOT_LOADED) ret = XC_RESULT_RESET_FAILURE; else ret = XC_RESULT_SUCCESS; } dprintk(1, "%s() returns %s id = 0x%x\n", __func__, ret == XC_RESULT_SUCCESS ? "True" : "False", id); return ret; } static int xc5000_set_tv_freq(struct dvb_frontend *fe, struct analog_parameters *params) { struct xc5000_priv *priv = fe->tuner_priv; u16 pll_lock_status; int ret; dprintk(1, "%s() frequency=%d (in units of 62.5khz)\n", __func__, params->frequency); /* Fix me: it could be air. */ priv->rf_mode = params->mode; if (params->mode > XC_RF_MODE_CABLE) priv->rf_mode = XC_RF_MODE_CABLE; /* params->frequency is in units of 62.5khz */ priv->freq_hz = params->frequency * 62500; /* FIX ME: Some video standards may have several possible audio standards. We simply default to one of them here. */ if (params->std & V4L2_STD_MN) { /* default to BTSC audio standard */ priv->video_standard = MN_NTSC_PAL_BTSC; goto tune_channel; } if (params->std & V4L2_STD_PAL_BG) { /* default to NICAM audio standard */ priv->video_standard = BG_PAL_NICAM; goto tune_channel; } if (params->std & V4L2_STD_PAL_I) { /* default to NICAM audio standard */ priv->video_standard = I_PAL_NICAM; goto tune_channel; } if (params->std & V4L2_STD_PAL_DK) { /* default to NICAM audio standard */ priv->video_standard = DK_PAL_NICAM; goto tune_channel; } if (params->std & V4L2_STD_SECAM_DK) { /* default to A2 DK1 audio standard */ priv->video_standard = DK_SECAM_A2DK1; goto tune_channel; } if (params->std & V4L2_STD_SECAM_L) { priv->video_standard = L_SECAM_NICAM; goto tune_channel; } if (params->std & V4L2_STD_SECAM_LC) { priv->video_standard = LC_SECAM_NICAM; goto tune_channel; } tune_channel: ret = xc_SetSignalSource(priv, priv->rf_mode); if (ret != XC_RESULT_SUCCESS) { printk(KERN_ERR "xc5000: xc_SetSignalSource(%d) failed\n", priv->rf_mode); return -EREMOTEIO; } ret = xc_SetTVStandard(priv, XC5000_Standard[priv->video_standard].VideoMode, XC5000_Standard[priv->video_standard].AudioMode, 0); if (ret != XC_RESULT_SUCCESS) { printk(KERN_ERR "xc5000: xc_SetTVStandard failed\n"); return -EREMOTEIO; } xc_write_reg(priv, XREG_OUTPUT_AMP, 0x09); xc_tune_channel(priv, priv->freq_hz, XC_TUNE_ANALOG); if (debug) xc_debug_dump(priv); if (priv->pll_register_no != 0) { msleep(20); xc5000_readreg(priv, priv->pll_register_no, &pll_lock_status); if (pll_lock_status > 63) { /* PLL is unlocked, force reload of the firmware */ dprintk(1, "xc5000: PLL not locked (0x%x). Reloading...\n", pll_lock_status); if (xc_load_fw_and_init_tuner(fe, 1) != XC_RESULT_SUCCESS) { printk(KERN_ERR "xc5000: Unable to reload fw\n"); return -EREMOTEIO; } goto tune_channel; } } return 0; } static int xc5000_set_radio_freq(struct dvb_frontend *fe, struct analog_parameters *params) { struct xc5000_priv *priv = fe->tuner_priv; int ret = -EINVAL; u8 radio_input; dprintk(1, "%s() frequency=%d (in units of khz)\n", __func__, params->frequency); if (priv->radio_input == XC5000_RADIO_NOT_CONFIGURED) { dprintk(1, "%s() radio input not configured\n", __func__); return -EINVAL; } if (priv->radio_input == XC5000_RADIO_FM1) radio_input = FM_Radio_INPUT1; else if (priv->radio_input == XC5000_RADIO_FM2) radio_input = FM_Radio_INPUT2; else if (priv->radio_input == XC5000_RADIO_FM1_MONO) radio_input = FM_Radio_INPUT1_MONO; else { dprintk(1, "%s() unknown radio input %d\n", __func__, priv->radio_input); return -EINVAL; } priv->freq_hz = params->frequency * 125 / 2; priv->rf_mode = XC_RF_MODE_AIR; ret = xc_SetTVStandard(priv, XC5000_Standard[radio_input].VideoMode, XC5000_Standard[radio_input].AudioMode, radio_input); if (ret != XC_RESULT_SUCCESS) { printk(KERN_ERR "xc5000: xc_SetTVStandard failed\n"); return -EREMOTEIO; } ret = xc_SetSignalSource(priv, priv->rf_mode); if (ret != XC_RESULT_SUCCESS) { printk(KERN_ERR "xc5000: xc_SetSignalSource(%d) failed\n", priv->rf_mode); return -EREMOTEIO; } if ((priv->radio_input == XC5000_RADIO_FM1) || (priv->radio_input == XC5000_RADIO_FM2)) xc_write_reg(priv, XREG_OUTPUT_AMP, 0x09); else if (priv->radio_input == XC5000_RADIO_FM1_MONO) xc_write_reg(priv, XREG_OUTPUT_AMP, 0x06); xc_tune_channel(priv, priv->freq_hz, XC_TUNE_ANALOG); return 0; } static int xc5000_set_analog_params(struct dvb_frontend *fe, struct analog_parameters *params) { struct xc5000_priv *priv = fe->tuner_priv; int ret = -EINVAL; if (priv->i2c_props.adap == NULL) return -EINVAL; if (xc_load_fw_and_init_tuner(fe, 0) != XC_RESULT_SUCCESS) { dprintk(1, "Unable to load firmware and init tuner\n"); return -EINVAL; } switch (params->mode) { case V4L2_TUNER_RADIO: ret = xc5000_set_radio_freq(fe, params); break; case V4L2_TUNER_ANALOG_TV: case V4L2_TUNER_DIGITAL_TV: ret = xc5000_set_tv_freq(fe, params); break; } return ret; } static int xc5000_get_frequency(struct dvb_frontend *fe, u32 *freq) { struct xc5000_priv *priv = fe->tuner_priv; dprintk(1, "%s()\n", __func__); *freq = priv->freq_hz + priv->freq_offset; return 0; } static int xc5000_get_if_frequency(struct dvb_frontend *fe, u32 *freq) { struct xc5000_priv *priv = fe->tuner_priv; dprintk(1, "%s()\n", __func__); *freq = priv->if_khz * 1000; return 0; } static int xc5000_get_bandwidth(struct dvb_frontend *fe, u32 *bw) { struct xc5000_priv *priv = fe->tuner_priv; dprintk(1, "%s()\n", __func__); *bw = priv->bandwidth; return 0; } static int xc5000_get_status(struct dvb_frontend *fe, u32 *status) { struct xc5000_priv *priv = fe->tuner_priv; u16 lock_status = 0; xc_get_lock_status(priv, &lock_status); dprintk(1, "%s() lock_status = 0x%08x\n", __func__, lock_status); *status = lock_status; return 0; } static int xc_load_fw_and_init_tuner(struct dvb_frontend *fe, int force) { struct xc5000_priv *priv = fe->tuner_priv; int ret = XC_RESULT_SUCCESS; u16 pll_lock_status; u16 fw_ck; if (force || xc5000_is_firmware_loaded(fe) != XC_RESULT_SUCCESS) { fw_retry: ret = xc5000_fwupload(fe); if (ret != XC_RESULT_SUCCESS) return ret; msleep(20); if (priv->fw_checksum_supported) { if (xc5000_readreg(priv, XREG_FW_CHECKSUM, &fw_ck) != XC_RESULT_SUCCESS) { dprintk(1, "%s() FW checksum reading failed.\n", __func__); goto fw_retry; } if (fw_ck == 0) { dprintk(1, "%s() FW checksum failed = 0x%04x\n", __func__, fw_ck); goto fw_retry; } } /* Start the tuner self-calibration process */ ret |= xc_initialize(priv); if (ret != XC_RESULT_SUCCESS) goto fw_retry; /* Wait for calibration to complete. * We could continue but XC5000 will clock stretch subsequent * I2C transactions until calibration is complete. This way we * don't have to rely on clock stretching working. */ xc_wait(100); if (priv->init_status_supported) { if (xc5000_readreg(priv, XREG_INIT_STATUS, &fw_ck) != XC_RESULT_SUCCESS) { dprintk(1, "%s() FW failed reading init status.\n", __func__); goto fw_retry; } if (fw_ck == 0) { dprintk(1, "%s() FW init status failed = 0x%04x\n", __func__, fw_ck); goto fw_retry; } } if (priv->pll_register_no) { xc5000_readreg(priv, priv->pll_register_no, &pll_lock_status); if (pll_lock_status > 63) { /* PLL is unlocked, force reload of the firmware */ printk(KERN_ERR "xc5000: PLL not running after fwload.\n"); goto fw_retry; } } /* Default to "CABLE" mode */ ret |= xc_write_reg(priv, XREG_SIGNALSOURCE, XC_RF_MODE_CABLE); } return ret; } static int xc5000_sleep(struct dvb_frontend *fe) { int ret; dprintk(1, "%s()\n", __func__); /* Avoid firmware reload on slow devices */ if (no_poweroff) return 0; /* According to Xceive technical support, the "powerdown" register was removed in newer versions of the firmware. The "supported" way to sleep the tuner is to pull the reset pin low for 10ms */ ret = xc5000_TunerReset(fe); if (ret != XC_RESULT_SUCCESS) { printk(KERN_ERR "xc5000: %s() unable to shutdown tuner\n", __func__); return -EREMOTEIO; } else return XC_RESULT_SUCCESS; } static int xc5000_init(struct dvb_frontend *fe) { struct xc5000_priv *priv = fe->tuner_priv; dprintk(1, "%s()\n", __func__); if (xc_load_fw_and_init_tuner(fe, 0) != XC_RESULT_SUCCESS) { printk(KERN_ERR "xc5000: Unable to initialise tuner\n"); return -EREMOTEIO; } if (debug) xc_debug_dump(priv); return 0; } static int xc5000_release(struct dvb_frontend *fe) { struct xc5000_priv *priv = fe->tuner_priv; dprintk(1, "%s()\n", __func__); mutex_lock(&xc5000_list_mutex); if (priv) hybrid_tuner_release_state(priv); mutex_unlock(&xc5000_list_mutex); fe->tuner_priv = NULL; return 0; } static int xc5000_set_config(struct dvb_frontend *fe, void *priv_cfg) { struct xc5000_priv *priv = fe->tuner_priv; struct xc5000_config *p = priv_cfg; dprintk(1, "%s()\n", __func__); if (p->if_khz) priv->if_khz = p->if_khz; if (p->radio_input) priv->radio_input = p->radio_input; return 0; } static const struct dvb_tuner_ops xc5000_tuner_ops = { .info = { .name = "Xceive XC5000", .frequency_min = 1000000, .frequency_max = 1023000000, .frequency_step = 50000, }, .release = xc5000_release, .init = xc5000_init, .sleep = xc5000_sleep, .set_config = xc5000_set_config, .set_params = xc5000_set_params, .set_analog_params = xc5000_set_analog_params, .get_frequency = xc5000_get_frequency, .get_if_frequency = xc5000_get_if_frequency, .get_bandwidth = xc5000_get_bandwidth, .get_status = xc5000_get_status }; struct dvb_frontend *xc5000_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, const struct xc5000_config *cfg) { struct xc5000_priv *priv = NULL; int instance; u16 id = 0; dprintk(1, "%s(%d-%04x)\n", __func__, i2c ? i2c_adapter_id(i2c) : -1, cfg ? cfg->i2c_address : -1); mutex_lock(&xc5000_list_mutex); instance = hybrid_tuner_request_state(struct xc5000_priv, priv, hybrid_tuner_instance_list, i2c, cfg->i2c_address, "xc5000"); switch (instance) { case 0: goto fail; break; case 1: /* new tuner instance */ priv->bandwidth = 6000000; fe->tuner_priv = priv; break; default: /* existing tuner instance */ fe->tuner_priv = priv; break; } if (priv->if_khz == 0) { /* If the IF hasn't been set yet, use the value provided by the caller (occurs in hybrid devices where the analog call to xc5000_attach occurs before the digital side) */ priv->if_khz = cfg->if_khz; } if (priv->xtal_khz == 0) priv->xtal_khz = cfg->xtal_khz; if (priv->radio_input == 0) priv->radio_input = cfg->radio_input; /* don't override chip id if it's already been set unless explicitly specified */ if ((priv->chip_id == 0) || (cfg->chip_id)) /* use default chip id if none specified, set to 0 so it can be overridden if this is a hybrid driver */ priv->chip_id = (cfg->chip_id) ? cfg->chip_id : 0; /* Check if firmware has been loaded. It is possible that another instance of the driver has loaded the firmware. */ if (xc5000_readreg(priv, XREG_PRODUCT_ID, &id) != XC_RESULT_SUCCESS) goto fail; switch (id) { case XC_PRODUCT_ID_FW_LOADED: printk(KERN_INFO "xc5000: Successfully identified at address 0x%02x\n", cfg->i2c_address); printk(KERN_INFO "xc5000: Firmware has been loaded previously\n"); break; case XC_PRODUCT_ID_FW_NOT_LOADED: printk(KERN_INFO "xc5000: Successfully identified at address 0x%02x\n", cfg->i2c_address); printk(KERN_INFO "xc5000: Firmware has not been loaded previously\n"); break; default: printk(KERN_ERR "xc5000: Device not found at addr 0x%02x (0x%x)\n", cfg->i2c_address, id); goto fail; } mutex_unlock(&xc5000_list_mutex); memcpy(&fe->ops.tuner_ops, &xc5000_tuner_ops, sizeof(struct dvb_tuner_ops)); return fe; fail: mutex_unlock(&xc5000_list_mutex); xc5000_release(fe); return NULL; } EXPORT_SYMBOL(xc5000_attach); MODULE_AUTHOR("Steven Toth"); MODULE_DESCRIPTION("Xceive xc5000 silicon tuner driver"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE(XC5000A_FIRMWARE); MODULE_FIRMWARE(XC5000C_FIRMWARE);
gpl-2.0
TeamAlto45/android_kernel_tcl_alto45
kernel/trace/trace_selftest.c
2002
26405
/* Include in trace.c */ #include <linux/stringify.h> #include <linux/kthread.h> #include <linux/delay.h> #include <linux/slab.h> static inline int trace_valid_entry(struct trace_entry *entry) { switch (entry->type) { case TRACE_FN: case TRACE_CTX: case TRACE_WAKE: case TRACE_STACK: case TRACE_PRINT: case TRACE_BRANCH: case TRACE_GRAPH_ENT: case TRACE_GRAPH_RET: return 1; } return 0; } static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu) { struct ring_buffer_event *event; struct trace_entry *entry; unsigned int loops = 0; while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) { entry = ring_buffer_event_data(event); /* * The ring buffer is a size of trace_buf_size, if * we loop more than the size, there's something wrong * with the ring buffer. */ if (loops++ > trace_buf_size) { printk(KERN_CONT ".. bad ring buffer "); goto failed; } if (!trace_valid_entry(entry)) { printk(KERN_CONT ".. invalid entry %d ", entry->type); goto failed; } } return 0; failed: /* disable tracing */ tracing_disabled = 1; printk(KERN_CONT ".. corrupted trace buffer .. "); return -1; } /* * Test the trace buffer to see if all the elements * are still sane. */ static int trace_test_buffer(struct trace_buffer *buf, unsigned long *count) { unsigned long flags, cnt = 0; int cpu, ret = 0; /* Don't allow flipping of max traces now */ local_irq_save(flags); arch_spin_lock(&ftrace_max_lock); cnt = ring_buffer_entries(buf->buffer); /* * The trace_test_buffer_cpu runs a while loop to consume all data. * If the calling tracer is broken, and is constantly filling * the buffer, this will run forever, and hard lock the box. * We disable the ring buffer while we do this test to prevent * a hard lock up. */ tracing_off(); for_each_possible_cpu(cpu) { ret = trace_test_buffer_cpu(buf, cpu); if (ret) break; } tracing_on(); arch_spin_unlock(&ftrace_max_lock); local_irq_restore(flags); if (count) *count = cnt; return ret; } static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) { printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n", trace->name, init_ret); } #ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_DYNAMIC_FTRACE static int trace_selftest_test_probe1_cnt; static void trace_selftest_test_probe1_func(unsigned long ip, unsigned long pip, struct ftrace_ops *op, struct pt_regs *pt_regs) { trace_selftest_test_probe1_cnt++; } static int trace_selftest_test_probe2_cnt; static void trace_selftest_test_probe2_func(unsigned long ip, unsigned long pip, struct ftrace_ops *op, struct pt_regs *pt_regs) { trace_selftest_test_probe2_cnt++; } static int trace_selftest_test_probe3_cnt; static void trace_selftest_test_probe3_func(unsigned long ip, unsigned long pip, struct ftrace_ops *op, struct pt_regs *pt_regs) { trace_selftest_test_probe3_cnt++; } static int trace_selftest_test_global_cnt; static void trace_selftest_test_global_func(unsigned long ip, unsigned long pip, struct ftrace_ops *op, struct pt_regs *pt_regs) { trace_selftest_test_global_cnt++; } static int trace_selftest_test_dyn_cnt; static void trace_selftest_test_dyn_func(unsigned long ip, unsigned long pip, struct ftrace_ops *op, struct pt_regs *pt_regs) { trace_selftest_test_dyn_cnt++; } static struct ftrace_ops test_probe1 = { .func = trace_selftest_test_probe1_func, .flags = FTRACE_OPS_FL_RECURSION_SAFE, }; static struct ftrace_ops test_probe2 = { .func = trace_selftest_test_probe2_func, .flags = FTRACE_OPS_FL_RECURSION_SAFE, }; static struct ftrace_ops test_probe3 = { .func = trace_selftest_test_probe3_func, .flags = FTRACE_OPS_FL_RECURSION_SAFE, }; static struct ftrace_ops test_global = { .func = trace_selftest_test_global_func, .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE, }; static void print_counts(void) { printk("(%d %d %d %d %d) ", trace_selftest_test_probe1_cnt, trace_selftest_test_probe2_cnt, trace_selftest_test_probe3_cnt, trace_selftest_test_global_cnt, trace_selftest_test_dyn_cnt); } static void reset_counts(void) { trace_selftest_test_probe1_cnt = 0; trace_selftest_test_probe2_cnt = 0; trace_selftest_test_probe3_cnt = 0; trace_selftest_test_global_cnt = 0; trace_selftest_test_dyn_cnt = 0; } static int trace_selftest_ops(int cnt) { int save_ftrace_enabled = ftrace_enabled; struct ftrace_ops *dyn_ops; char *func1_name; char *func2_name; int len1; int len2; int ret = -1; printk(KERN_CONT "PASSED\n"); pr_info("Testing dynamic ftrace ops #%d: ", cnt); ftrace_enabled = 1; reset_counts(); /* Handle PPC64 '.' name */ func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME); func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2); len1 = strlen(func1_name); len2 = strlen(func2_name); /* * Probe 1 will trace function 1. * Probe 2 will trace function 2. * Probe 3 will trace functions 1 and 2. */ ftrace_set_filter(&test_probe1, func1_name, len1, 1); ftrace_set_filter(&test_probe2, func2_name, len2, 1); ftrace_set_filter(&test_probe3, func1_name, len1, 1); ftrace_set_filter(&test_probe3, func2_name, len2, 0); register_ftrace_function(&test_probe1); register_ftrace_function(&test_probe2); register_ftrace_function(&test_probe3); register_ftrace_function(&test_global); DYN_FTRACE_TEST_NAME(); print_counts(); if (trace_selftest_test_probe1_cnt != 1) goto out; if (trace_selftest_test_probe2_cnt != 0) goto out; if (trace_selftest_test_probe3_cnt != 1) goto out; if (trace_selftest_test_global_cnt == 0) goto out; DYN_FTRACE_TEST_NAME2(); print_counts(); if (trace_selftest_test_probe1_cnt != 1) goto out; if (trace_selftest_test_probe2_cnt != 1) goto out; if (trace_selftest_test_probe3_cnt != 2) goto out; /* Add a dynamic probe */ dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL); if (!dyn_ops) { printk("MEMORY ERROR "); goto out; } dyn_ops->func = trace_selftest_test_dyn_func; register_ftrace_function(dyn_ops); trace_selftest_test_global_cnt = 0; DYN_FTRACE_TEST_NAME(); print_counts(); if (trace_selftest_test_probe1_cnt != 2) goto out_free; if (trace_selftest_test_probe2_cnt != 1) goto out_free; if (trace_selftest_test_probe3_cnt != 3) goto out_free; if (trace_selftest_test_global_cnt == 0) goto out; if (trace_selftest_test_dyn_cnt == 0) goto out_free; DYN_FTRACE_TEST_NAME2(); print_counts(); if (trace_selftest_test_probe1_cnt != 2) goto out_free; if (trace_selftest_test_probe2_cnt != 2) goto out_free; if (trace_selftest_test_probe3_cnt != 4) goto out_free; ret = 0; out_free: unregister_ftrace_function(dyn_ops); kfree(dyn_ops); out: /* Purposely unregister in the same order */ unregister_ftrace_function(&test_probe1); unregister_ftrace_function(&test_probe2); unregister_ftrace_function(&test_probe3); unregister_ftrace_function(&test_global); /* Make sure everything is off */ reset_counts(); DYN_FTRACE_TEST_NAME(); DYN_FTRACE_TEST_NAME(); if (trace_selftest_test_probe1_cnt || trace_selftest_test_probe2_cnt || trace_selftest_test_probe3_cnt || trace_selftest_test_global_cnt || trace_selftest_test_dyn_cnt) ret = -1; ftrace_enabled = save_ftrace_enabled; return ret; } /* Test dynamic code modification and ftrace filters */ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, struct trace_array *tr, int (*func)(void)) { int save_ftrace_enabled = ftrace_enabled; unsigned long count; char *func_name; int ret; /* The ftrace test PASSED */ printk(KERN_CONT "PASSED\n"); pr_info("Testing dynamic ftrace: "); /* enable tracing, and record the filter function */ ftrace_enabled = 1; /* passed in by parameter to fool gcc from optimizing */ func(); /* * Some archs *cough*PowerPC*cough* add characters to the * start of the function names. We simply put a '*' to * accommodate them. */ func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); /* filter only on our function */ ftrace_set_global_filter(func_name, strlen(func_name), 1); /* enable tracing */ ret = tracer_init(trace, tr); if (ret) { warn_failed_init_tracer(trace, ret); goto out; } /* Sleep for a 1/10 of a second */ msleep(100); /* we should have nothing in the buffer */ ret = trace_test_buffer(&tr->trace_buffer, &count); if (ret) goto out; if (count) { ret = -1; printk(KERN_CONT ".. filter did not filter .. "); goto out; } /* call our function again */ func(); /* sleep again */ msleep(100); /* stop the tracing. */ tracing_stop(); ftrace_enabled = 0; /* check the trace buffer */ ret = trace_test_buffer(&tr->trace_buffer, &count); tracing_start(); /* we should only have one item */ if (!ret && count != 1) { trace->reset(tr); printk(KERN_CONT ".. filter failed count=%ld ..", count); ret = -1; goto out; } /* Test the ops with global tracing running */ ret = trace_selftest_ops(1); trace->reset(tr); out: ftrace_enabled = save_ftrace_enabled; /* Enable tracing on all functions again */ ftrace_set_global_filter(NULL, 0, 1); /* Test the ops with global tracing off */ if (!ret) ret = trace_selftest_ops(2); return ret; } static int trace_selftest_recursion_cnt; static void trace_selftest_test_recursion_func(unsigned long ip, unsigned long pip, struct ftrace_ops *op, struct pt_regs *pt_regs) { /* * This function is registered without the recursion safe flag. * The ftrace infrastructure should provide the recursion * protection. If not, this will crash the kernel! */ if (trace_selftest_recursion_cnt++ > 10) return; DYN_FTRACE_TEST_NAME(); } static void trace_selftest_test_recursion_safe_func(unsigned long ip, unsigned long pip, struct ftrace_ops *op, struct pt_regs *pt_regs) { /* * We said we would provide our own recursion. By calling * this function again, we should recurse back into this function * and count again. But this only happens if the arch supports * all of ftrace features and nothing else is using the function * tracing utility. */ if (trace_selftest_recursion_cnt++) return; DYN_FTRACE_TEST_NAME(); } static struct ftrace_ops test_rec_probe = { .func = trace_selftest_test_recursion_func, }; static struct ftrace_ops test_recsafe_probe = { .func = trace_selftest_test_recursion_safe_func, .flags = FTRACE_OPS_FL_RECURSION_SAFE, }; static int trace_selftest_function_recursion(void) { int save_ftrace_enabled = ftrace_enabled; char *func_name; int len; int ret; /* The previous test PASSED */ pr_cont("PASSED\n"); pr_info("Testing ftrace recursion: "); /* enable tracing, and record the filter function */ ftrace_enabled = 1; /* Handle PPC64 '.' name */ func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); len = strlen(func_name); ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1); if (ret) { pr_cont("*Could not set filter* "); goto out; } ret = register_ftrace_function(&test_rec_probe); if (ret) { pr_cont("*could not register callback* "); goto out; } DYN_FTRACE_TEST_NAME(); unregister_ftrace_function(&test_rec_probe); ret = -1; if (trace_selftest_recursion_cnt != 1) { pr_cont("*callback not called once (%d)* ", trace_selftest_recursion_cnt); goto out; } trace_selftest_recursion_cnt = 1; pr_cont("PASSED\n"); pr_info("Testing ftrace recursion safe: "); ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1); if (ret) { pr_cont("*Could not set filter* "); goto out; } ret = register_ftrace_function(&test_recsafe_probe); if (ret) { pr_cont("*could not register callback* "); goto out; } DYN_FTRACE_TEST_NAME(); unregister_ftrace_function(&test_recsafe_probe); ret = -1; if (trace_selftest_recursion_cnt != 2) { pr_cont("*callback not called expected 2 times (%d)* ", trace_selftest_recursion_cnt); goto out; } ret = 0; out: ftrace_enabled = save_ftrace_enabled; return ret; } #else # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) # define trace_selftest_function_recursion() ({ 0; }) #endif /* CONFIG_DYNAMIC_FTRACE */ static enum { TRACE_SELFTEST_REGS_START, TRACE_SELFTEST_REGS_FOUND, TRACE_SELFTEST_REGS_NOT_FOUND, } trace_selftest_regs_stat; static void trace_selftest_test_regs_func(unsigned long ip, unsigned long pip, struct ftrace_ops *op, struct pt_regs *pt_regs) { if (pt_regs) trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND; else trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND; } static struct ftrace_ops test_regs_probe = { .func = trace_selftest_test_regs_func, .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS, }; static int trace_selftest_function_regs(void) { int save_ftrace_enabled = ftrace_enabled; char *func_name; int len; int ret; int supported = 0; #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS supported = 1; #endif /* The previous test PASSED */ pr_cont("PASSED\n"); pr_info("Testing ftrace regs%s: ", !supported ? "(no arch support)" : ""); /* enable tracing, and record the filter function */ ftrace_enabled = 1; /* Handle PPC64 '.' name */ func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); len = strlen(func_name); ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1); /* * If DYNAMIC_FTRACE is not set, then we just trace all functions. * This test really doesn't care. */ if (ret && ret != -ENODEV) { pr_cont("*Could not set filter* "); goto out; } ret = register_ftrace_function(&test_regs_probe); /* * Now if the arch does not support passing regs, then this should * have failed. */ if (!supported) { if (!ret) { pr_cont("*registered save-regs without arch support* "); goto out; } test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED; ret = register_ftrace_function(&test_regs_probe); } if (ret) { pr_cont("*could not register callback* "); goto out; } DYN_FTRACE_TEST_NAME(); unregister_ftrace_function(&test_regs_probe); ret = -1; switch (trace_selftest_regs_stat) { case TRACE_SELFTEST_REGS_START: pr_cont("*callback never called* "); goto out; case TRACE_SELFTEST_REGS_FOUND: if (supported) break; pr_cont("*callback received regs without arch support* "); goto out; case TRACE_SELFTEST_REGS_NOT_FOUND: if (!supported) break; pr_cont("*callback received NULL regs* "); goto out; } ret = 0; out: ftrace_enabled = save_ftrace_enabled; return ret; } /* * Simple verification test of ftrace function tracer. * Enable ftrace, sleep 1/10 second, and then read the trace * buffer to see if all is in order. */ int trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) { int save_ftrace_enabled = ftrace_enabled; unsigned long count; int ret; /* make sure msleep has been recorded */ msleep(1); /* start the tracing */ ftrace_enabled = 1; ret = tracer_init(trace, tr); if (ret) { warn_failed_init_tracer(trace, ret); goto out; } /* Sleep for a 1/10 of a second */ msleep(100); /* stop the tracing. */ tracing_stop(); ftrace_enabled = 0; /* check the trace buffer */ ret = trace_test_buffer(&tr->trace_buffer, &count); trace->reset(tr); tracing_start(); if (!ret && !count) { printk(KERN_CONT ".. no entries found .."); ret = -1; goto out; } ret = trace_selftest_startup_dynamic_tracing(trace, tr, DYN_FTRACE_TEST_NAME); if (ret) goto out; ret = trace_selftest_function_recursion(); if (ret) goto out; ret = trace_selftest_function_regs(); out: ftrace_enabled = save_ftrace_enabled; /* kill ftrace totally if we failed */ if (ret) ftrace_kill(); return ret; } #endif /* CONFIG_FUNCTION_TRACER */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* Maximum number of functions to trace before diagnosing a hang */ #define GRAPH_MAX_FUNC_TEST 100000000 static unsigned int graph_hang_thresh; /* Wrap the real function entry probe to avoid possible hanging */ static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace) { /* This is harmlessly racy, we want to approximately detect a hang */ if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) { ftrace_graph_stop(); printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); if (ftrace_dump_on_oops) { ftrace_dump(DUMP_ALL); /* ftrace_dump() disables tracing */ tracing_on(); } return 0; } return trace_graph_entry(trace); } /* * Pretty much the same than for the function tracer from which the selftest * has been borrowed. */ int trace_selftest_startup_function_graph(struct tracer *trace, struct trace_array *tr) { int ret; unsigned long count; /* * Simulate the init() callback but we attach a watchdog callback * to detect and recover from possible hangs */ tracing_reset_online_cpus(&tr->trace_buffer); set_graph_array(tr); ret = register_ftrace_graph(&trace_graph_return, &trace_graph_entry_watchdog); if (ret) { warn_failed_init_tracer(trace, ret); goto out; } tracing_start_cmdline_record(); /* Sleep for a 1/10 of a second */ msleep(100); /* Have we just recovered from a hang? */ if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) { tracing_selftest_disabled = true; ret = -1; goto out; } tracing_stop(); /* check the trace buffer */ ret = trace_test_buffer(&tr->trace_buffer, &count); trace->reset(tr); tracing_start(); if (!ret && !count) { printk(KERN_CONT ".. no entries found .."); ret = -1; goto out; } /* Don't test dynamic tracing, the function tracer already did */ out: /* Stop it if we failed */ if (ret) ftrace_graph_stop(); return ret; } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #ifdef CONFIG_IRQSOFF_TRACER int trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) { unsigned long save_max = tracing_max_latency; unsigned long count; int ret; /* start the tracing */ ret = tracer_init(trace, tr); if (ret) { warn_failed_init_tracer(trace, ret); return ret; } /* reset the max latency */ tracing_max_latency = 0; /* disable interrupts for a bit */ local_irq_disable(); udelay(100); local_irq_enable(); /* * Stop the tracer to avoid a warning subsequent * to buffer flipping failure because tracing_stop() * disables the tr and max buffers, making flipping impossible * in case of parallels max irqs off latencies. */ trace->stop(tr); /* stop the tracing. */ tracing_stop(); /* check both trace buffers */ ret = trace_test_buffer(&tr->trace_buffer, NULL); if (!ret) ret = trace_test_buffer(&tr->max_buffer, &count); trace->reset(tr); tracing_start(); if (!ret && !count) { printk(KERN_CONT ".. no entries found .."); ret = -1; } tracing_max_latency = save_max; return ret; } #endif /* CONFIG_IRQSOFF_TRACER */ #ifdef CONFIG_PREEMPT_TRACER int trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) { unsigned long save_max = tracing_max_latency; unsigned long count; int ret; /* * Now that the big kernel lock is no longer preemptable, * and this is called with the BKL held, it will always * fail. If preemption is already disabled, simply * pass the test. When the BKL is removed, or becomes * preemptible again, we will once again test this, * so keep it in. */ if (preempt_count()) { printk(KERN_CONT "can not test ... force "); return 0; } /* start the tracing */ ret = tracer_init(trace, tr); if (ret) { warn_failed_init_tracer(trace, ret); return ret; } /* reset the max latency */ tracing_max_latency = 0; /* disable preemption for a bit */ preempt_disable(); udelay(100); preempt_enable(); /* * Stop the tracer to avoid a warning subsequent * to buffer flipping failure because tracing_stop() * disables the tr and max buffers, making flipping impossible * in case of parallels max preempt off latencies. */ trace->stop(tr); /* stop the tracing. */ tracing_stop(); /* check both trace buffers */ ret = trace_test_buffer(&tr->trace_buffer, NULL); if (!ret) ret = trace_test_buffer(&tr->max_buffer, &count); trace->reset(tr); tracing_start(); if (!ret && !count) { printk(KERN_CONT ".. no entries found .."); ret = -1; } tracing_max_latency = save_max; return ret; } #endif /* CONFIG_PREEMPT_TRACER */ #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) int trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) { unsigned long save_max = tracing_max_latency; unsigned long count; int ret; /* * Now that the big kernel lock is no longer preemptable, * and this is called with the BKL held, it will always * fail. If preemption is already disabled, simply * pass the test. When the BKL is removed, or becomes * preemptible again, we will once again test this, * so keep it in. */ if (preempt_count()) { printk(KERN_CONT "can not test ... force "); return 0; } /* start the tracing */ ret = tracer_init(trace, tr); if (ret) { warn_failed_init_tracer(trace, ret); goto out_no_start; } /* reset the max latency */ tracing_max_latency = 0; /* disable preemption and interrupts for a bit */ preempt_disable(); local_irq_disable(); udelay(100); preempt_enable(); /* reverse the order of preempt vs irqs */ local_irq_enable(); /* * Stop the tracer to avoid a warning subsequent * to buffer flipping failure because tracing_stop() * disables the tr and max buffers, making flipping impossible * in case of parallels max irqs/preempt off latencies. */ trace->stop(tr); /* stop the tracing. */ tracing_stop(); /* check both trace buffers */ ret = trace_test_buffer(&tr->trace_buffer, NULL); if (ret) goto out; ret = trace_test_buffer(&tr->max_buffer, &count); if (ret) goto out; if (!ret && !count) { printk(KERN_CONT ".. no entries found .."); ret = -1; goto out; } /* do the test by disabling interrupts first this time */ tracing_max_latency = 0; tracing_start(); trace->start(tr); preempt_disable(); local_irq_disable(); udelay(100); preempt_enable(); /* reverse the order of preempt vs irqs */ local_irq_enable(); trace->stop(tr); /* stop the tracing. */ tracing_stop(); /* check both trace buffers */ ret = trace_test_buffer(&tr->trace_buffer, NULL); if (ret) goto out; ret = trace_test_buffer(&tr->max_buffer, &count); if (!ret && !count) { printk(KERN_CONT ".. no entries found .."); ret = -1; goto out; } out: tracing_start(); out_no_start: trace->reset(tr); tracing_max_latency = save_max; return ret; } #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */ #ifdef CONFIG_NOP_TRACER int trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr) { /* What could possibly go wrong? */ return 0; } #endif #ifdef CONFIG_SCHED_TRACER static int trace_wakeup_test_thread(void *data) { /* Make this a RT thread, doesn't need to be too high */ static const struct sched_param param = { .sched_priority = 5 }; struct completion *x = data; sched_setscheduler(current, SCHED_FIFO, &param); /* Make it know we have a new prio */ complete(x); /* now go to sleep and let the test wake us up */ set_current_state(TASK_INTERRUPTIBLE); schedule(); complete(x); /* we are awake, now wait to disappear */ while (!kthread_should_stop()) { /* * This is an RT task, do short sleeps to let * others run. */ msleep(100); } return 0; } int trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) { unsigned long save_max = tracing_max_latency; struct task_struct *p; struct completion isrt; unsigned long count; int ret; init_completion(&isrt); /* create a high prio thread */ p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test"); if (IS_ERR(p)) { printk(KERN_CONT "Failed to create ftrace wakeup test thread "); return -1; } /* make sure the thread is running at an RT prio */ wait_for_completion(&isrt); /* start the tracing */ ret = tracer_init(trace, tr); if (ret) { warn_failed_init_tracer(trace, ret); return ret; } /* reset the max latency */ tracing_max_latency = 0; while (p->on_rq) { /* * Sleep to make sure the RT thread is asleep too. * On virtual machines we can't rely on timings, * but we want to make sure this test still works. */ msleep(100); } init_completion(&isrt); wake_up_process(p); /* Wait for the task to wake up */ wait_for_completion(&isrt); /* stop the tracing. */ tracing_stop(); /* check both trace buffers */ ret = trace_test_buffer(&tr->trace_buffer, NULL); printk("ret = %d\n", ret); if (!ret) ret = trace_test_buffer(&tr->max_buffer, &count); trace->reset(tr); tracing_start(); tracing_max_latency = save_max; /* kill the thread */ kthread_stop(p); if (!ret && !count) { printk(KERN_CONT ".. no entries found .."); ret = -1; } return ret; } #endif /* CONFIG_SCHED_TRACER */ #ifdef CONFIG_CONTEXT_SWITCH_TRACER int trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr) { unsigned long count; int ret; /* start the tracing */ ret = tracer_init(trace, tr); if (ret) { warn_failed_init_tracer(trace, ret); return ret; } /* Sleep for a 1/10 of a second */ msleep(100); /* stop the tracing. */ tracing_stop(); /* check the trace buffer */ ret = trace_test_buffer(&tr->trace_buffer, &count); trace->reset(tr); tracing_start(); if (!ret && !count) { printk(KERN_CONT ".. no entries found .."); ret = -1; } return ret; } #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ #ifdef CONFIG_BRANCH_TRACER int trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) { unsigned long count; int ret; /* start the tracing */ ret = tracer_init(trace, tr); if (ret) { warn_failed_init_tracer(trace, ret); return ret; } /* Sleep for a 1/10 of a second */ msleep(100); /* stop the tracing. */ tracing_stop(); /* check the trace buffer */ ret = trace_test_buffer(&tr->trace_buffer, &count); trace->reset(tr); tracing_start(); if (!ret && !count) { printk(KERN_CONT ".. no entries found .."); ret = -1; } return ret; } #endif /* CONFIG_BRANCH_TRACER */
gpl-2.0
ItsAnilSingh/android_kernel_samsung_logan2g
drivers/net/pcmcia/ibmtr_cs.c
2770
10086
/*====================================================================== A PCMCIA token-ring driver for IBM-based cards This driver supports the IBM PCMCIA Token-Ring Card. Written by Steve Kipisz, kipisz@vnet.ibm.com or bungy@ibm.net Written 1995,1996. This code is based on pcnet_cs.c from David Hinds. V2.2.0 February 1999 - Mike Phillips phillim@amtrak.com Linux V2.2.x presented significant changes to the underlying ibmtr.c code. Mainly the code became a lot more organized and modular. This caused the old PCMCIA Token Ring driver to give up and go home early. Instead of just patching the old code to make it work, the PCMCIA code has been streamlined, updated and possibly improved. This code now only contains code required for the Card Services. All we do here is set the card up enough so that the real ibmtr.c driver can find it and work with it properly. i.e. We set up the io port, irq, mmio memory and shared ram memory. This enables ibmtr_probe in ibmtr.c to find the card and configure it as though it was a normal ISA and/or PnP card. CHANGES v2.2.5 April 1999 Mike Phillips (phillim@amtrak.com) Obscure bug fix, required changed to ibmtr.c not ibmtr_cs.c v2.2.7 May 1999 Mike Phillips (phillim@amtrak.com) Updated to version 2.2.7 to match the first version of the kernel that the modification to ibmtr.c were incorporated into. v2.2.17 July 2000 Burt Silverman (burts@us.ibm.com) Address translation feature of PCMCIA controller is usable so memory windows can be placed in High memory (meaning above 0xFFFFF.) ======================================================================*/ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/init.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/trdevice.h> #include <linux/ibmtr.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/system.h> #define PCMCIA #include "../tokenring/ibmtr.c" /*====================================================================*/ /* Parameters that can be set with 'insmod' */ /* MMIO base address */ static u_long mmiobase = 0xce000; /* SRAM base address */ static u_long srambase = 0xd0000; /* SRAM size 8,16,32,64 */ static u_long sramsize = 64; /* Ringspeed 4,16 */ static int ringspeed = 16; module_param(mmiobase, ulong, 0); module_param(srambase, ulong, 0); module_param(sramsize, ulong, 0); module_param(ringspeed, int, 0); MODULE_LICENSE("GPL"); /*====================================================================*/ static int ibmtr_config(struct pcmcia_device *link); static void ibmtr_hw_setup(struct net_device *dev, u_int mmiobase); static void ibmtr_release(struct pcmcia_device *link); static void ibmtr_detach(struct pcmcia_device *p_dev); /*====================================================================*/ typedef struct ibmtr_dev_t { struct pcmcia_device *p_dev; struct net_device *dev; struct tok_info *ti; } ibmtr_dev_t; static irqreturn_t ibmtr_interrupt(int irq, void *dev_id) { ibmtr_dev_t *info = dev_id; struct net_device *dev = info->dev; return tok_interrupt(irq, dev); }; static int __devinit ibmtr_attach(struct pcmcia_device *link) { ibmtr_dev_t *info; struct net_device *dev; dev_dbg(&link->dev, "ibmtr_attach()\n"); /* Create new token-ring device */ info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; dev = alloc_trdev(sizeof(struct tok_info)); if (!dev) { kfree(info); return -ENOMEM; } info->p_dev = link; link->priv = info; info->ti = netdev_priv(dev); link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; link->resource[0]->end = 4; link->config_flags |= CONF_ENABLE_IRQ; link->config_regs = PRESENT_OPTION; info->dev = dev; return ibmtr_config(link); } /* ibmtr_attach */ static void ibmtr_detach(struct pcmcia_device *link) { struct ibmtr_dev_t *info = link->priv; struct net_device *dev = info->dev; struct tok_info *ti = netdev_priv(dev); dev_dbg(&link->dev, "ibmtr_detach\n"); /* * When the card removal interrupt hits tok_interrupt(), * bail out early, so we don't crash the machine */ ti->sram_phys |= 1; unregister_netdev(dev); del_timer_sync(&(ti->tr_timer)); ibmtr_release(link); free_netdev(dev); kfree(info); } /* ibmtr_detach */ static int __devinit ibmtr_config(struct pcmcia_device *link) { ibmtr_dev_t *info = link->priv; struct net_device *dev = info->dev; struct tok_info *ti = netdev_priv(dev); int i, ret; dev_dbg(&link->dev, "ibmtr_config\n"); link->io_lines = 16; link->config_index = 0x61; /* Determine if this is PRIMARY or ALTERNATE. */ /* Try PRIMARY card at 0xA20-0xA23 */ link->resource[0]->start = 0xA20; i = pcmcia_request_io(link); if (i != 0) { /* Couldn't get 0xA20-0xA23. Try ALTERNATE at 0xA24-0xA27. */ link->resource[0]->start = 0xA24; ret = pcmcia_request_io(link); if (ret) goto failed; } dev->base_addr = link->resource[0]->start; ret = pcmcia_request_exclusive_irq(link, ibmtr_interrupt); if (ret) goto failed; dev->irq = link->irq; ti->irq = link->irq; ti->global_int_enable=GLOBAL_INT_ENABLE+((dev->irq==9) ? 2 : dev->irq); /* Allocate the MMIO memory window */ link->resource[2]->flags |= WIN_DATA_WIDTH_16|WIN_MEMORY_TYPE_CM|WIN_ENABLE; link->resource[2]->flags |= WIN_USE_WAIT; link->resource[2]->start = 0; link->resource[2]->end = 0x2000; ret = pcmcia_request_window(link, link->resource[2], 250); if (ret) goto failed; ret = pcmcia_map_mem_page(link, link->resource[2], mmiobase); if (ret) goto failed; ti->mmio = ioremap(link->resource[2]->start, resource_size(link->resource[2])); /* Allocate the SRAM memory window */ link->resource[3]->flags = WIN_DATA_WIDTH_16|WIN_MEMORY_TYPE_CM|WIN_ENABLE; link->resource[3]->flags |= WIN_USE_WAIT; link->resource[3]->start = 0; link->resource[3]->end = sramsize * 1024; ret = pcmcia_request_window(link, link->resource[3], 250); if (ret) goto failed; ret = pcmcia_map_mem_page(link, link->resource[3], srambase); if (ret) goto failed; ti->sram_base = srambase >> 12; ti->sram_virt = ioremap(link->resource[3]->start, resource_size(link->resource[3])); ti->sram_phys = link->resource[3]->start; ret = pcmcia_enable_device(link); if (ret) goto failed; /* Set up the Token-Ring Controller Configuration Register and turn on the card. Check the "Local Area Network Credit Card Adapters Technical Reference" SC30-3585 for this info. */ ibmtr_hw_setup(dev, mmiobase); SET_NETDEV_DEV(dev, &link->dev); i = ibmtr_probe_card(dev); if (i != 0) { pr_notice("register_netdev() failed\n"); goto failed; } netdev_info(dev, "port %#3lx, irq %d, mmio %#5lx, sram %#5lx, hwaddr=%pM\n", dev->base_addr, dev->irq, (u_long)ti->mmio, (u_long)(ti->sram_base << 12), dev->dev_addr); return 0; failed: ibmtr_release(link); return -ENODEV; } /* ibmtr_config */ static void ibmtr_release(struct pcmcia_device *link) { ibmtr_dev_t *info = link->priv; struct net_device *dev = info->dev; dev_dbg(&link->dev, "ibmtr_release\n"); if (link->resource[2]->end) { struct tok_info *ti = netdev_priv(dev); iounmap(ti->mmio); } pcmcia_disable_device(link); } static int ibmtr_suspend(struct pcmcia_device *link) { ibmtr_dev_t *info = link->priv; struct net_device *dev = info->dev; if (link->open) netif_device_detach(dev); return 0; } static int __devinit ibmtr_resume(struct pcmcia_device *link) { ibmtr_dev_t *info = link->priv; struct net_device *dev = info->dev; if (link->open) { ibmtr_probe(dev); /* really? */ netif_device_attach(dev); } return 0; } /*====================================================================*/ static void ibmtr_hw_setup(struct net_device *dev, u_int mmiobase) { int i; /* Bizarre IBM behavior, there are 16 bits of information we need to set, but the card only allows us to send 4 bits at a time. For each byte sent to base_addr, bits 7-4 tell the card which part of the 16 bits we are setting, bits 3-0 contain the actual information */ /* First nibble provides 4 bits of mmio */ i = (mmiobase >> 16) & 0x0F; outb(i, dev->base_addr); /* Second nibble provides 3 bits of mmio */ i = 0x10 | ((mmiobase >> 12) & 0x0E); outb(i, dev->base_addr); /* Third nibble, hard-coded values */ i = 0x26; outb(i, dev->base_addr); /* Fourth nibble sets shared ram page size */ /* 8 = 00, 16 = 01, 32 = 10, 64 = 11 */ i = (sramsize >> 4) & 0x07; i = ((i == 4) ? 3 : i) << 2; i |= 0x30; if (ringspeed == 16) i |= 2; if (dev->base_addr == 0xA24) i |= 1; outb(i, dev->base_addr); /* 0x40 will release the card for use */ outb(0x40, dev->base_addr); } static const struct pcmcia_device_id ibmtr_ids[] = { PCMCIA_DEVICE_PROD_ID12("3Com", "TokenLink Velocity PC Card", 0x41240e5b, 0x82c3734e), PCMCIA_DEVICE_PROD_ID12("IBM", "TOKEN RING", 0xb569a6e5, 0xbf8eed47), PCMCIA_DEVICE_NULL, }; MODULE_DEVICE_TABLE(pcmcia, ibmtr_ids); static struct pcmcia_driver ibmtr_cs_driver = { .owner = THIS_MODULE, .name = "ibmtr_cs", .probe = ibmtr_attach, .remove = ibmtr_detach, .id_table = ibmtr_ids, .suspend = ibmtr_suspend, .resume = ibmtr_resume, }; static int __init init_ibmtr_cs(void) { return pcmcia_register_driver(&ibmtr_cs_driver); } static void __exit exit_ibmtr_cs(void) { pcmcia_unregister_driver(&ibmtr_cs_driver); } module_init(init_ibmtr_cs); module_exit(exit_ibmtr_cs);
gpl-2.0
onealtom/MYD-C335X-Linux-Kernel
fs/squashfs/symlink.c
3026
3890
/* * Squashfs - a compressed read only filesystem for Linux * * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 * Phillip Lougher <phillip@squashfs.org.uk> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2, * or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * symlink.c */ /* * This file implements code to handle symbolic links. * * The data contents of symbolic links are stored inside the symbolic * link inode within the inode table. This allows the normally small symbolic * link to be compressed as part of the inode table, achieving much greater * compression than if the symbolic link was compressed individually. */ #include <linux/fs.h> #include <linux/vfs.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/pagemap.h> #include <linux/xattr.h> #include "squashfs_fs.h" #include "squashfs_fs_sb.h" #include "squashfs_fs_i.h" #include "squashfs.h" #include "xattr.h" static int squashfs_symlink_readpage(struct file *file, struct page *page) { struct inode *inode = page->mapping->host; struct super_block *sb = inode->i_sb; struct squashfs_sb_info *msblk = sb->s_fs_info; int index = page->index << PAGE_CACHE_SHIFT; u64 block = squashfs_i(inode)->start; int offset = squashfs_i(inode)->offset; int length = min_t(int, i_size_read(inode) - index, PAGE_CACHE_SIZE); int bytes, copied; void *pageaddr; struct squashfs_cache_entry *entry; TRACE("Entered squashfs_symlink_readpage, page index %ld, start block " "%llx, offset %x\n", page->index, block, offset); /* * Skip index bytes into symlink metadata. */ if (index) { bytes = squashfs_read_metadata(sb, NULL, &block, &offset, index); if (bytes < 0) { ERROR("Unable to read symlink [%llx:%x]\n", squashfs_i(inode)->start, squashfs_i(inode)->offset); goto error_out; } } /* * Read length bytes from symlink metadata. Squashfs_read_metadata * is not used here because it can sleep and we want to use * kmap_atomic to map the page. Instead call the underlying * squashfs_cache_get routine. As length bytes may overlap metadata * blocks, we may need to call squashfs_cache_get multiple times. */ for (bytes = 0; bytes < length; offset = 0, bytes += copied) { entry = squashfs_cache_get(sb, msblk->block_cache, block, 0); if (entry->error) { ERROR("Unable to read symlink [%llx:%x]\n", squashfs_i(inode)->start, squashfs_i(inode)->offset); squashfs_cache_put(entry); goto error_out; } pageaddr = kmap_atomic(page, KM_USER0); copied = squashfs_copy_data(pageaddr + bytes, entry, offset, length - bytes); if (copied == length - bytes) memset(pageaddr + length, 0, PAGE_CACHE_SIZE - length); else block = entry->next_index; kunmap_atomic(pageaddr, KM_USER0); squashfs_cache_put(entry); } flush_dcache_page(page); SetPageUptodate(page); unlock_page(page); return 0; error_out: SetPageError(page); unlock_page(page); return 0; } const struct address_space_operations squashfs_symlink_aops = { .readpage = squashfs_symlink_readpage }; const struct inode_operations squashfs_symlink_inode_ops = { .readlink = generic_readlink, .follow_link = page_follow_link_light, .put_link = page_put_link, .getxattr = generic_getxattr, .listxattr = squashfs_listxattr };
gpl-2.0
anoane/Ultrakernel
net/x25/x25_proc.c
4050
6463
/* * X.25 Packet Layer release 002 * * This is ALPHA test software. This code may break your machine, * randomly fail to work with new releases, misbehave and/or generally * screw up. It might even work. * * This code REQUIRES 2.4 with seq_file support * * This module: * This module is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * History * 2002/10/06 Arnaldo Carvalho de Melo seq_file support */ #include <linux/init.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <net/net_namespace.h> #include <net/sock.h> #include <net/x25.h> #ifdef CONFIG_PROC_FS static void *x25_seq_route_start(struct seq_file *seq, loff_t *pos) __acquires(x25_route_list_lock) { read_lock_bh(&x25_route_list_lock); return seq_list_start_head(&x25_route_list, *pos); } static void *x25_seq_route_next(struct seq_file *seq, void *v, loff_t *pos) { return seq_list_next(v, &x25_route_list, pos); } static void x25_seq_route_stop(struct seq_file *seq, void *v) __releases(x25_route_list_lock) { read_unlock_bh(&x25_route_list_lock); } static int x25_seq_route_show(struct seq_file *seq, void *v) { struct x25_route *rt = list_entry(v, struct x25_route, node); if (v == &x25_route_list) { seq_puts(seq, "Address Digits Device\n"); goto out; } rt = v; seq_printf(seq, "%-15s %-6d %-5s\n", rt->address.x25_addr, rt->sigdigits, rt->dev ? rt->dev->name : "???"); out: return 0; } static void *x25_seq_socket_start(struct seq_file *seq, loff_t *pos) __acquires(x25_list_lock) { read_lock_bh(&x25_list_lock); return seq_hlist_start_head(&x25_list, *pos); } static void *x25_seq_socket_next(struct seq_file *seq, void *v, loff_t *pos) { return seq_hlist_next(v, &x25_list, pos); } static void x25_seq_socket_stop(struct seq_file *seq, void *v) __releases(x25_list_lock) { read_unlock_bh(&x25_list_lock); } static int x25_seq_socket_show(struct seq_file *seq, void *v) { struct sock *s; struct x25_sock *x25; struct net_device *dev; const char *devname; if (v == SEQ_START_TOKEN) { seq_printf(seq, "dest_addr src_addr dev lci st vs vr " "va t t2 t21 t22 t23 Snd-Q Rcv-Q inode\n"); goto out; } s = sk_entry(v); x25 = x25_sk(s); if (!x25->neighbour || (dev = x25->neighbour->dev) == NULL) devname = "???"; else devname = x25->neighbour->dev->name; seq_printf(seq, "%-10s %-10s %-5s %3.3X %d %d %d %d %3lu %3lu " "%3lu %3lu %3lu %5d %5d %ld\n", !x25->dest_addr.x25_addr[0] ? "*" : x25->dest_addr.x25_addr, !x25->source_addr.x25_addr[0] ? "*" : x25->source_addr.x25_addr, devname, x25->lci & 0x0FFF, x25->state, x25->vs, x25->vr, x25->va, x25_display_timer(s) / HZ, x25->t2 / HZ, x25->t21 / HZ, x25->t22 / HZ, x25->t23 / HZ, sk_wmem_alloc_get(s), sk_rmem_alloc_get(s), s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L); out: return 0; } static void *x25_seq_forward_start(struct seq_file *seq, loff_t *pos) __acquires(x25_forward_list_lock) { read_lock_bh(&x25_forward_list_lock); return seq_list_start_head(&x25_forward_list, *pos); } static void *x25_seq_forward_next(struct seq_file *seq, void *v, loff_t *pos) { return seq_list_next(v, &x25_forward_list, pos); } static void x25_seq_forward_stop(struct seq_file *seq, void *v) __releases(x25_forward_list_lock) { read_unlock_bh(&x25_forward_list_lock); } static int x25_seq_forward_show(struct seq_file *seq, void *v) { struct x25_forward *f = list_entry(v, struct x25_forward, node); if (v == &x25_forward_list) { seq_printf(seq, "lci dev1 dev2\n"); goto out; } f = v; seq_printf(seq, "%d %-10s %-10s\n", f->lci, f->dev1->name, f->dev2->name); out: return 0; } static const struct seq_operations x25_seq_route_ops = { .start = x25_seq_route_start, .next = x25_seq_route_next, .stop = x25_seq_route_stop, .show = x25_seq_route_show, }; static const struct seq_operations x25_seq_socket_ops = { .start = x25_seq_socket_start, .next = x25_seq_socket_next, .stop = x25_seq_socket_stop, .show = x25_seq_socket_show, }; static const struct seq_operations x25_seq_forward_ops = { .start = x25_seq_forward_start, .next = x25_seq_forward_next, .stop = x25_seq_forward_stop, .show = x25_seq_forward_show, }; static int x25_seq_socket_open(struct inode *inode, struct file *file) { return seq_open(file, &x25_seq_socket_ops); } static int x25_seq_route_open(struct inode *inode, struct file *file) { return seq_open(file, &x25_seq_route_ops); } static int x25_seq_forward_open(struct inode *inode, struct file *file) { return seq_open(file, &x25_seq_forward_ops); } static const struct file_operations x25_seq_socket_fops = { .owner = THIS_MODULE, .open = x25_seq_socket_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static const struct file_operations x25_seq_route_fops = { .owner = THIS_MODULE, .open = x25_seq_route_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static const struct file_operations x25_seq_forward_fops = { .owner = THIS_MODULE, .open = x25_seq_forward_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static struct proc_dir_entry *x25_proc_dir; int __init x25_proc_init(void) { struct proc_dir_entry *p; int rc = -ENOMEM; x25_proc_dir = proc_mkdir("x25", init_net.proc_net); if (!x25_proc_dir) goto out; p = proc_create("route", S_IRUGO, x25_proc_dir, &x25_seq_route_fops); if (!p) goto out_route; p = proc_create("socket", S_IRUGO, x25_proc_dir, &x25_seq_socket_fops); if (!p) goto out_socket; p = proc_create("forward", S_IRUGO, x25_proc_dir, &x25_seq_forward_fops); if (!p) goto out_forward; rc = 0; out: return rc; out_forward: remove_proc_entry("socket", x25_proc_dir); out_socket: remove_proc_entry("route", x25_proc_dir); out_route: remove_proc_entry("x25", init_net.proc_net); goto out; } void __exit x25_proc_exit(void) { remove_proc_entry("forward", x25_proc_dir); remove_proc_entry("route", x25_proc_dir); remove_proc_entry("socket", x25_proc_dir); remove_proc_entry("x25", init_net.proc_net); } #else /* CONFIG_PROC_FS */ int __init x25_proc_init(void) { return 0; } void __exit x25_proc_exit(void) { } #endif /* CONFIG_PROC_FS */
gpl-2.0
steppnasty/platform_kernel_msm7x30
arch/arm/mach-msm/board-sapphire-keypad.c
4562
4013
/* arch/arm/mach-msm/board-sapphire-keypad.c * Copyright (C) 2007-2009 HTC Corporation. * Author: Thomas Tsai <thomas_tsai@htc.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/platform_device.h> #include <linux/input.h> #include <linux/interrupt.h> #include <linux/gpio_event.h> #include <asm/mach-types.h> #include "gpio_chip.h" #include "board-sapphire.h" static char *keycaps = "--qwerty"; #undef MODULE_PARAM_PREFIX #define MODULE_PARAM_PREFIX "board_sapphire." module_param_named(keycaps, keycaps, charp, 0); static unsigned int sapphire_col_gpios[] = { 35, 34 }; /* KP_MKIN2 (GPIO40) is not used? */ static unsigned int sapphire_row_gpios[] = { 42, 41 }; #define KEYMAP_INDEX(col, row) ((col)*ARRAY_SIZE(sapphire_row_gpios) + (row)) /*scan matrix key*/ /* HOME(up) MENU (up) Back Search */ static const unsigned short sapphire_keymap2[ARRAY_SIZE(sapphire_col_gpios) * ARRAY_SIZE(sapphire_row_gpios)] = { [KEYMAP_INDEX(0, 0)] = KEY_COMPOSE, [KEYMAP_INDEX(0, 1)] = KEY_BACK, [KEYMAP_INDEX(1, 0)] = KEY_MENU, [KEYMAP_INDEX(1, 1)] = KEY_SEND, }; /* HOME(up) + MENU (down)*/ static const unsigned short sapphire_keymap1[ARRAY_SIZE(sapphire_col_gpios) * ARRAY_SIZE(sapphire_row_gpios)] = { [KEYMAP_INDEX(0, 0)] = KEY_BACK, [KEYMAP_INDEX(0, 1)] = KEY_MENU, [KEYMAP_INDEX(1, 0)] = KEY_HOME, [KEYMAP_INDEX(1, 1)] = KEY_SEND, }; /* MENU(up) + HOME (down)*/ static const unsigned short sapphire_keymap0[ARRAY_SIZE(sapphire_col_gpios) * ARRAY_SIZE(sapphire_row_gpios)] = { [KEYMAP_INDEX(0, 0)] = KEY_BACK, [KEYMAP_INDEX(0, 1)] = KEY_HOME, [KEYMAP_INDEX(1, 0)] = KEY_MENU, [KEYMAP_INDEX(1, 1)] = KEY_SEND, }; static struct gpio_event_matrix_info sapphire_keypad_matrix_info = { .info.func = gpio_event_matrix_func, .keymap = sapphire_keymap2, .output_gpios = sapphire_col_gpios, .input_gpios = sapphire_row_gpios, .noutputs = ARRAY_SIZE(sapphire_col_gpios), .ninputs = ARRAY_SIZE(sapphire_row_gpios), .settle_time.tv.nsec = 40 * NSEC_PER_USEC, .poll_time.tv.nsec = 20 * NSEC_PER_MSEC, .debounce_delay.tv.nsec = 50 * NSEC_PER_MSEC, .flags = GPIOKPF_LEVEL_TRIGGERED_IRQ | GPIOKPF_REMOVE_PHANTOM_KEYS | GPIOKPF_PRINT_UNMAPPED_KEYS /*| GPIOKPF_PRINT_MAPPED_KEYS*/ }; static struct gpio_event_direct_entry sapphire_keypad_nav_map[] = { { SAPPHIRE_POWER_KEY, KEY_END }, { SAPPHIRE_VOLUME_UP, KEY_VOLUMEUP }, { SAPPHIRE_VOLUME_DOWN, KEY_VOLUMEDOWN }, }; static struct gpio_event_input_info sapphire_keypad_nav_info = { .info.func = gpio_event_input_func, .flags = 0, .type = EV_KEY, .keymap = sapphire_keypad_nav_map, .debounce_time.tv.nsec = 20 * NSEC_PER_MSEC, .keymap_size = ARRAY_SIZE(sapphire_keypad_nav_map) }; static struct gpio_event_info *sapphire_keypad_info[] = { &sapphire_keypad_matrix_info.info, &sapphire_keypad_nav_info.info, }; static struct gpio_event_platform_data sapphire_keypad_data = { .name = "sapphire-keypad", .info = sapphire_keypad_info, .info_count = ARRAY_SIZE(sapphire_keypad_info) }; static struct platform_device sapphire_keypad_device = { .name = GPIO_EVENT_DEV_NAME, .id = 0, .dev = { .platform_data = &sapphire_keypad_data, }, }; static int __init sapphire_init_keypad(void) { if (!machine_is_sapphire()) return 0; switch (sapphire_get_hwid()) { case 0: sapphire_keypad_matrix_info.keymap = sapphire_keymap0; break; default: if(system_rev != 0x80) sapphire_keypad_matrix_info.keymap = sapphire_keymap1; break; } return platform_device_register(&sapphire_keypad_device); } device_initcall(sapphire_init_keypad);
gpl-2.0
linhphi9x94/zte-kernel-msm7x27
drivers/pci/hotplug/cpqphp_pci.c
4818
39788
/* * Compaq Hot Plug Controller Driver * * Copyright (C) 1995,2001 Compaq Computer Corporation * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com) * Copyright (C) 2001 IBM Corp. * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to <greg@kroah.com> * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/proc_fs.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> #include "../pci.h" #include "cpqphp.h" #include "cpqphp_nvram.h" u8 cpqhp_nic_irq; u8 cpqhp_disk_irq; static u16 unused_IRQ; /* * detect_HRT_floating_pointer * * find the Hot Plug Resource Table in the specified region of memory. * */ static void __iomem *detect_HRT_floating_pointer(void __iomem *begin, void __iomem *end) { void __iomem *fp; void __iomem *endp; u8 temp1, temp2, temp3, temp4; int status = 0; endp = (end - sizeof(struct hrt) + 1); for (fp = begin; fp <= endp; fp += 16) { temp1 = readb(fp + SIG0); temp2 = readb(fp + SIG1); temp3 = readb(fp + SIG2); temp4 = readb(fp + SIG3); if (temp1 == '$' && temp2 == 'H' && temp3 == 'R' && temp4 == 'T') { status = 1; break; } } if (!status) fp = NULL; dbg("Discovered Hotplug Resource Table at %p\n", fp); return fp; } int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func) { unsigned char bus; struct pci_bus *child; int num; if (func->pci_dev == NULL) func->pci_dev = pci_get_bus_and_slot(func->bus,PCI_DEVFN(func->device, func->function)); /* No pci device, we need to create it then */ if (func->pci_dev == NULL) { dbg("INFO: pci_dev still null\n"); num = pci_scan_slot(ctrl->pci_dev->bus, PCI_DEVFN(func->device, func->function)); if (num) pci_bus_add_devices(ctrl->pci_dev->bus); func->pci_dev = pci_get_bus_and_slot(func->bus, PCI_DEVFN(func->device, func->function)); if (func->pci_dev == NULL) { dbg("ERROR: pci_dev still null\n"); return 0; } } if (func->pci_dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { pci_read_config_byte(func->pci_dev, PCI_SECONDARY_BUS, &bus); child = (struct pci_bus*) pci_add_new_bus(func->pci_dev->bus, (func->pci_dev), bus); pci_do_scan_bus(child); } pci_dev_put(func->pci_dev); return 0; } int cpqhp_unconfigure_device(struct pci_func* func) { int j; dbg("%s: bus/dev/func = %x/%x/%x\n", __func__, func->bus, func->device, func->function); for (j=0; j<8 ; j++) { struct pci_dev* temp = pci_get_bus_and_slot(func->bus, PCI_DEVFN(func->device, j)); if (temp) { pci_dev_put(temp); pci_remove_bus_device(temp); } } return 0; } static int PCI_RefinedAccessConfig(struct pci_bus *bus, unsigned int devfn, u8 offset, u32 *value) { u32 vendID = 0; if (pci_bus_read_config_dword (bus, devfn, PCI_VENDOR_ID, &vendID) == -1) return -1; if (vendID == 0xffffffff) return -1; return pci_bus_read_config_dword (bus, devfn, offset, value); } /* * cpqhp_set_irq * * @bus_num: bus number of PCI device * @dev_num: device number of PCI device * @slot: pointer to u8 where slot number will be returned */ int cpqhp_set_irq (u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num) { int rc = 0; if (cpqhp_legacy_mode) { struct pci_dev *fakedev; struct pci_bus *fakebus; u16 temp_word; fakedev = kmalloc(sizeof(*fakedev), GFP_KERNEL); fakebus = kmalloc(sizeof(*fakebus), GFP_KERNEL); if (!fakedev || !fakebus) { kfree(fakedev); kfree(fakebus); return -ENOMEM; } fakedev->devfn = dev_num << 3; fakedev->bus = fakebus; fakebus->number = bus_num; dbg("%s: dev %d, bus %d, pin %d, num %d\n", __func__, dev_num, bus_num, int_pin, irq_num); rc = pcibios_set_irq_routing(fakedev, int_pin - 1, irq_num); kfree(fakedev); kfree(fakebus); dbg("%s: rc %d\n", __func__, rc); if (!rc) return !rc; /* set the Edge Level Control Register (ELCR) */ temp_word = inb(0x4d0); temp_word |= inb(0x4d1) << 8; temp_word |= 0x01 << irq_num; /* This should only be for x86 as it sets the Edge Level * Control Register */ outb((u8) (temp_word & 0xFF), 0x4d0); outb((u8) ((temp_word & 0xFF00) >> 8), 0x4d1); rc = 0; } return rc; } static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 * dev_num) { u16 tdevice; u32 work; u8 tbus; ctrl->pci_bus->number = bus_num; for (tdevice = 0; tdevice < 0xFF; tdevice++) { /* Scan for access first */ if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1) continue; dbg("Looking for nonbridge bus_num %d dev_num %d\n", bus_num, tdevice); /* Yep we got one. Not a bridge ? */ if ((work >> 8) != PCI_TO_PCI_BRIDGE_CLASS) { *dev_num = tdevice; dbg("found it !\n"); return 0; } } for (tdevice = 0; tdevice < 0xFF; tdevice++) { /* Scan for access first */ if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1) continue; dbg("Looking for bridge bus_num %d dev_num %d\n", bus_num, tdevice); /* Yep we got one. bridge ? */ if ((work >> 8) == PCI_TO_PCI_BRIDGE_CLASS) { pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(tdevice, 0), PCI_SECONDARY_BUS, &tbus); /* XXX: no recursion, wtf? */ dbg("Recurse on bus_num %d tdevice %d\n", tbus, tdevice); return 0; } } return -1; } static int PCI_GetBusDevHelper(struct controller *ctrl, u8 *bus_num, u8 *dev_num, u8 slot, u8 nobridge) { int loop, len; u32 work; u8 tbus, tdevice, tslot; len = cpqhp_routing_table_length(); for (loop = 0; loop < len; ++loop) { tbus = cpqhp_routing_table->slots[loop].bus; tdevice = cpqhp_routing_table->slots[loop].devfn; tslot = cpqhp_routing_table->slots[loop].slot; if (tslot == slot) { *bus_num = tbus; *dev_num = tdevice; ctrl->pci_bus->number = tbus; pci_bus_read_config_dword (ctrl->pci_bus, *dev_num, PCI_VENDOR_ID, &work); if (!nobridge || (work == 0xffffffff)) return 0; dbg("bus_num %d devfn %d\n", *bus_num, *dev_num); pci_bus_read_config_dword (ctrl->pci_bus, *dev_num, PCI_CLASS_REVISION, &work); dbg("work >> 8 (%x) = BRIDGE (%x)\n", work >> 8, PCI_TO_PCI_BRIDGE_CLASS); if ((work >> 8) == PCI_TO_PCI_BRIDGE_CLASS) { pci_bus_read_config_byte (ctrl->pci_bus, *dev_num, PCI_SECONDARY_BUS, &tbus); dbg("Scan bus for Non Bridge: bus %d\n", tbus); if (PCI_ScanBusForNonBridge(ctrl, tbus, dev_num) == 0) { *bus_num = tbus; return 0; } } else return 0; } } return -1; } int cpqhp_get_bus_dev (struct controller *ctrl, u8 * bus_num, u8 * dev_num, u8 slot) { /* plain (bridges allowed) */ return PCI_GetBusDevHelper(ctrl, bus_num, dev_num, slot, 0); } /* More PCI configuration routines; this time centered around hotplug * controller */ /* * cpqhp_save_config * * Reads configuration for all slots in a PCI bus and saves info. * * Note: For non-hot plug busses, the slot # saved is the device # * * returns 0 if success */ int cpqhp_save_config(struct controller *ctrl, int busnumber, int is_hot_plug) { long rc; u8 class_code; u8 header_type; u32 ID; u8 secondary_bus; struct pci_func *new_slot; int sub_bus; int FirstSupported; int LastSupported; int max_functions; int function; u8 DevError; int device = 0; int cloop = 0; int stop_it; int index; /* Decide which slots are supported */ if (is_hot_plug) { /* * is_hot_plug is the slot mask */ FirstSupported = is_hot_plug >> 4; LastSupported = FirstSupported + (is_hot_plug & 0x0F) - 1; } else { FirstSupported = 0; LastSupported = 0x1F; } /* Save PCI configuration space for all devices in supported slots */ ctrl->pci_bus->number = busnumber; for (device = FirstSupported; device <= LastSupported; device++) { ID = 0xFFFFFFFF; rc = pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(device, 0), PCI_VENDOR_ID, &ID); if (ID == 0xFFFFFFFF) { if (is_hot_plug) { /* Setup slot structure with entry for empty * slot */ new_slot = cpqhp_slot_create(busnumber); if (new_slot == NULL) return 1; new_slot->bus = (u8) busnumber; new_slot->device = (u8) device; new_slot->function = 0; new_slot->is_a_board = 0; new_slot->presence_save = 0; new_slot->switch_save = 0; } continue; } rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, 0), 0x0B, &class_code); if (rc) return rc; rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, 0), PCI_HEADER_TYPE, &header_type); if (rc) return rc; /* If multi-function device, set max_functions to 8 */ if (header_type & 0x80) max_functions = 8; else max_functions = 1; function = 0; do { DevError = 0; if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { /* Recurse the subordinate bus * get the subordinate bus number */ rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, function), PCI_SECONDARY_BUS, &secondary_bus); if (rc) { return rc; } else { sub_bus = (int) secondary_bus; /* Save secondary bus cfg spc * with this recursive call. */ rc = cpqhp_save_config(ctrl, sub_bus, 0); if (rc) return rc; ctrl->pci_bus->number = busnumber; } } index = 0; new_slot = cpqhp_slot_find(busnumber, device, index++); while (new_slot && (new_slot->function != (u8) function)) new_slot = cpqhp_slot_find(busnumber, device, index++); if (!new_slot) { /* Setup slot structure. */ new_slot = cpqhp_slot_create(busnumber); if (new_slot == NULL) return 1; } new_slot->bus = (u8) busnumber; new_slot->device = (u8) device; new_slot->function = (u8) function; new_slot->is_a_board = 1; new_slot->switch_save = 0x10; /* In case of unsupported board */ new_slot->status = DevError; new_slot->pci_dev = pci_get_bus_and_slot(new_slot->bus, (new_slot->device << 3) | new_slot->function); for (cloop = 0; cloop < 0x20; cloop++) { rc = pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(device, function), cloop << 2, (u32 *) & (new_slot-> config_space [cloop])); if (rc) return rc; } pci_dev_put(new_slot->pci_dev); function++; stop_it = 0; /* this loop skips to the next present function * reading in Class Code and Header type. */ while ((function < max_functions) && (!stop_it)) { rc = pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(device, function), PCI_VENDOR_ID, &ID); if (ID == 0xFFFFFFFF) { function++; continue; } rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, function), 0x0B, &class_code); if (rc) return rc; rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, function), PCI_HEADER_TYPE, &header_type); if (rc) return rc; stop_it++; } } while (function < max_functions); } /* End of FOR loop */ return 0; } /* * cpqhp_save_slot_config * * Saves configuration info for all PCI devices in a given slot * including subordinate busses. * * returns 0 if success */ int cpqhp_save_slot_config (struct controller *ctrl, struct pci_func * new_slot) { long rc; u8 class_code; u8 header_type; u32 ID; u8 secondary_bus; int sub_bus; int max_functions; int function = 0; int cloop = 0; int stop_it; ID = 0xFFFFFFFF; ctrl->pci_bus->number = new_slot->bus; pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), PCI_VENDOR_ID, &ID); if (ID == 0xFFFFFFFF) return 2; pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), 0x0B, &class_code); pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), PCI_HEADER_TYPE, &header_type); if (header_type & 0x80) /* Multi-function device */ max_functions = 8; else max_functions = 1; while (function < max_functions) { if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { /* Recurse the subordinate bus */ pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_SECONDARY_BUS, &secondary_bus); sub_bus = (int) secondary_bus; /* Save the config headers for the secondary * bus. */ rc = cpqhp_save_config(ctrl, sub_bus, 0); if (rc) return(rc); ctrl->pci_bus->number = new_slot->bus; } new_slot->status = 0; for (cloop = 0; cloop < 0x20; cloop++) pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), cloop << 2, (u32 *) & (new_slot-> config_space [cloop])); function++; stop_it = 0; /* this loop skips to the next present function * reading in the Class Code and the Header type. */ while ((function < max_functions) && (!stop_it)) { pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_VENDOR_ID, &ID); if (ID == 0xFFFFFFFF) function++; else { pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), 0x0B, &class_code); pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_HEADER_TYPE, &header_type); stop_it++; } } } return 0; } /* * cpqhp_save_base_addr_length * * Saves the length of all base address registers for the * specified slot. this is for hot plug REPLACE * * returns 0 if success */ int cpqhp_save_base_addr_length(struct controller *ctrl, struct pci_func * func) { u8 cloop; u8 header_type; u8 secondary_bus; u8 type; int sub_bus; u32 temp_register; u32 base; u32 rc; struct pci_func *next; int index = 0; struct pci_bus *pci_bus = ctrl->pci_bus; unsigned int devfn; func = cpqhp_slot_find(func->bus, func->device, index++); while (func != NULL) { pci_bus->number = func->bus; devfn = PCI_DEVFN(func->device, func->function); /* Check for Bridge */ pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type); if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { pci_bus_read_config_byte (pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus); sub_bus = (int) secondary_bus; next = cpqhp_slot_list[sub_bus]; while (next != NULL) { rc = cpqhp_save_base_addr_length(ctrl, next); if (rc) return rc; next = next->next; } pci_bus->number = func->bus; /* FIXME: this loop is duplicated in the non-bridge * case. The two could be rolled together Figure out * IO and memory base lengths */ for (cloop = 0x10; cloop <= 0x14; cloop += 4) { temp_register = 0xFFFFFFFF; pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register); pci_bus_read_config_dword (pci_bus, devfn, cloop, &base); /* If this register is implemented */ if (base) { if (base & 0x01L) { /* IO base * set base = amount of IO space * requested */ base = base & 0xFFFFFFFE; base = (~base) + 1; type = 1; } else { /* memory base */ base = base & 0xFFFFFFF0; base = (~base) + 1; type = 0; } } else { base = 0x0L; type = 0; } /* Save information in slot structure */ func->base_length[(cloop - 0x10) >> 2] = base; func->base_type[(cloop - 0x10) >> 2] = type; } /* End of base register loop */ } else if ((header_type & 0x7F) == 0x00) { /* Figure out IO and memory base lengths */ for (cloop = 0x10; cloop <= 0x24; cloop += 4) { temp_register = 0xFFFFFFFF; pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register); pci_bus_read_config_dword (pci_bus, devfn, cloop, &base); /* If this register is implemented */ if (base) { if (base & 0x01L) { /* IO base * base = amount of IO space * requested */ base = base & 0xFFFFFFFE; base = (~base) + 1; type = 1; } else { /* memory base * base = amount of memory * space requested */ base = base & 0xFFFFFFF0; base = (~base) + 1; type = 0; } } else { base = 0x0L; type = 0; } /* Save information in slot structure */ func->base_length[(cloop - 0x10) >> 2] = base; func->base_type[(cloop - 0x10) >> 2] = type; } /* End of base register loop */ } else { /* Some other unknown header type */ } /* find the next device in this slot */ func = cpqhp_slot_find(func->bus, func->device, index++); } return(0); } /* * cpqhp_save_used_resources * * Stores used resource information for existing boards. this is * for boards that were in the system when this driver was loaded. * this function is for hot plug ADD * * returns 0 if success */ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func) { u8 cloop; u8 header_type; u8 secondary_bus; u8 temp_byte; u8 b_base; u8 b_length; u16 command; u16 save_command; u16 w_base; u16 w_length; u32 temp_register; u32 save_base; u32 base; int index = 0; struct pci_resource *mem_node; struct pci_resource *p_mem_node; struct pci_resource *io_node; struct pci_resource *bus_node; struct pci_bus *pci_bus = ctrl->pci_bus; unsigned int devfn; func = cpqhp_slot_find(func->bus, func->device, index++); while ((func != NULL) && func->is_a_board) { pci_bus->number = func->bus; devfn = PCI_DEVFN(func->device, func->function); /* Save the command register */ pci_bus_read_config_word(pci_bus, devfn, PCI_COMMAND, &save_command); /* disable card */ command = 0x00; pci_bus_write_config_word(pci_bus, devfn, PCI_COMMAND, command); /* Check for Bridge */ pci_bus_read_config_byte(pci_bus, devfn, PCI_HEADER_TYPE, &header_type); if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { /* Clear Bridge Control Register */ command = 0x00; pci_bus_write_config_word(pci_bus, devfn, PCI_BRIDGE_CONTROL, command); pci_bus_read_config_byte(pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus); pci_bus_read_config_byte(pci_bus, devfn, PCI_SUBORDINATE_BUS, &temp_byte); bus_node = kmalloc(sizeof(*bus_node), GFP_KERNEL); if (!bus_node) return -ENOMEM; bus_node->base = secondary_bus; bus_node->length = temp_byte - secondary_bus + 1; bus_node->next = func->bus_head; func->bus_head = bus_node; /* Save IO base and Limit registers */ pci_bus_read_config_byte(pci_bus, devfn, PCI_IO_BASE, &b_base); pci_bus_read_config_byte(pci_bus, devfn, PCI_IO_LIMIT, &b_length); if ((b_base <= b_length) && (save_command & 0x01)) { io_node = kmalloc(sizeof(*io_node), GFP_KERNEL); if (!io_node) return -ENOMEM; io_node->base = (b_base & 0xF0) << 8; io_node->length = (b_length - b_base + 0x10) << 8; io_node->next = func->io_head; func->io_head = io_node; } /* Save memory base and Limit registers */ pci_bus_read_config_word(pci_bus, devfn, PCI_MEMORY_BASE, &w_base); pci_bus_read_config_word(pci_bus, devfn, PCI_MEMORY_LIMIT, &w_length); if ((w_base <= w_length) && (save_command & 0x02)) { mem_node = kmalloc(sizeof(*mem_node), GFP_KERNEL); if (!mem_node) return -ENOMEM; mem_node->base = w_base << 16; mem_node->length = (w_length - w_base + 0x10) << 16; mem_node->next = func->mem_head; func->mem_head = mem_node; } /* Save prefetchable memory base and Limit registers */ pci_bus_read_config_word(pci_bus, devfn, PCI_PREF_MEMORY_BASE, &w_base); pci_bus_read_config_word(pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, &w_length); if ((w_base <= w_length) && (save_command & 0x02)) { p_mem_node = kmalloc(sizeof(*p_mem_node), GFP_KERNEL); if (!p_mem_node) return -ENOMEM; p_mem_node->base = w_base << 16; p_mem_node->length = (w_length - w_base + 0x10) << 16; p_mem_node->next = func->p_mem_head; func->p_mem_head = p_mem_node; } /* Figure out IO and memory base lengths */ for (cloop = 0x10; cloop <= 0x14; cloop += 4) { pci_bus_read_config_dword (pci_bus, devfn, cloop, &save_base); temp_register = 0xFFFFFFFF; pci_bus_write_config_dword(pci_bus, devfn, cloop, temp_register); pci_bus_read_config_dword(pci_bus, devfn, cloop, &base); temp_register = base; /* If this register is implemented */ if (base) { if (((base & 0x03L) == 0x01) && (save_command & 0x01)) { /* IO base * set temp_register = amount * of IO space requested */ temp_register = base & 0xFFFFFFFE; temp_register = (~temp_register) + 1; io_node = kmalloc(sizeof(*io_node), GFP_KERNEL); if (!io_node) return -ENOMEM; io_node->base = save_base & (~0x03L); io_node->length = temp_register; io_node->next = func->io_head; func->io_head = io_node; } else if (((base & 0x0BL) == 0x08) && (save_command & 0x02)) { /* prefetchable memory base */ temp_register = base & 0xFFFFFFF0; temp_register = (~temp_register) + 1; p_mem_node = kmalloc(sizeof(*p_mem_node), GFP_KERNEL); if (!p_mem_node) return -ENOMEM; p_mem_node->base = save_base & (~0x0FL); p_mem_node->length = temp_register; p_mem_node->next = func->p_mem_head; func->p_mem_head = p_mem_node; } else if (((base & 0x0BL) == 0x00) && (save_command & 0x02)) { /* prefetchable memory base */ temp_register = base & 0xFFFFFFF0; temp_register = (~temp_register) + 1; mem_node = kmalloc(sizeof(*mem_node), GFP_KERNEL); if (!mem_node) return -ENOMEM; mem_node->base = save_base & (~0x0FL); mem_node->length = temp_register; mem_node->next = func->mem_head; func->mem_head = mem_node; } else return(1); } } /* End of base register loop */ /* Standard header */ } else if ((header_type & 0x7F) == 0x00) { /* Figure out IO and memory base lengths */ for (cloop = 0x10; cloop <= 0x24; cloop += 4) { pci_bus_read_config_dword(pci_bus, devfn, cloop, &save_base); temp_register = 0xFFFFFFFF; pci_bus_write_config_dword(pci_bus, devfn, cloop, temp_register); pci_bus_read_config_dword(pci_bus, devfn, cloop, &base); temp_register = base; /* If this register is implemented */ if (base) { if (((base & 0x03L) == 0x01) && (save_command & 0x01)) { /* IO base * set temp_register = amount * of IO space requested */ temp_register = base & 0xFFFFFFFE; temp_register = (~temp_register) + 1; io_node = kmalloc(sizeof(*io_node), GFP_KERNEL); if (!io_node) return -ENOMEM; io_node->base = save_base & (~0x01L); io_node->length = temp_register; io_node->next = func->io_head; func->io_head = io_node; } else if (((base & 0x0BL) == 0x08) && (save_command & 0x02)) { /* prefetchable memory base */ temp_register = base & 0xFFFFFFF0; temp_register = (~temp_register) + 1; p_mem_node = kmalloc(sizeof(*p_mem_node), GFP_KERNEL); if (!p_mem_node) return -ENOMEM; p_mem_node->base = save_base & (~0x0FL); p_mem_node->length = temp_register; p_mem_node->next = func->p_mem_head; func->p_mem_head = p_mem_node; } else if (((base & 0x0BL) == 0x00) && (save_command & 0x02)) { /* prefetchable memory base */ temp_register = base & 0xFFFFFFF0; temp_register = (~temp_register) + 1; mem_node = kmalloc(sizeof(*mem_node), GFP_KERNEL); if (!mem_node) return -ENOMEM; mem_node->base = save_base & (~0x0FL); mem_node->length = temp_register; mem_node->next = func->mem_head; func->mem_head = mem_node; } else return(1); } } /* End of base register loop */ } /* find the next device in this slot */ func = cpqhp_slot_find(func->bus, func->device, index++); } return 0; } /* * cpqhp_configure_board * * Copies saved configuration information to one slot. * this is called recursively for bridge devices. * this is for hot plug REPLACE! * * returns 0 if success */ int cpqhp_configure_board(struct controller *ctrl, struct pci_func * func) { int cloop; u8 header_type; u8 secondary_bus; int sub_bus; struct pci_func *next; u32 temp; u32 rc; int index = 0; struct pci_bus *pci_bus = ctrl->pci_bus; unsigned int devfn; func = cpqhp_slot_find(func->bus, func->device, index++); while (func != NULL) { pci_bus->number = func->bus; devfn = PCI_DEVFN(func->device, func->function); /* Start at the top of config space so that the control * registers are programmed last */ for (cloop = 0x3C; cloop > 0; cloop -= 4) pci_bus_write_config_dword (pci_bus, devfn, cloop, func->config_space[cloop >> 2]); pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type); /* If this is a bridge device, restore subordinate devices */ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { pci_bus_read_config_byte (pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus); sub_bus = (int) secondary_bus; next = cpqhp_slot_list[sub_bus]; while (next != NULL) { rc = cpqhp_configure_board(ctrl, next); if (rc) return rc; next = next->next; } } else { /* Check all the base Address Registers to make sure * they are the same. If not, the board is different. */ for (cloop = 16; cloop < 40; cloop += 4) { pci_bus_read_config_dword (pci_bus, devfn, cloop, &temp); if (temp != func->config_space[cloop >> 2]) { dbg("Config space compare failure!!! offset = %x\n", cloop); dbg("bus = %x, device = %x, function = %x\n", func->bus, func->device, func->function); dbg("temp = %x, config space = %x\n\n", temp, func->config_space[cloop >> 2]); return 1; } } } func->configured = 1; func = cpqhp_slot_find(func->bus, func->device, index++); } return 0; } /* * cpqhp_valid_replace * * this function checks to see if a board is the same as the * one it is replacing. this check will detect if the device's * vendor or device id's are the same * * returns 0 if the board is the same nonzero otherwise */ int cpqhp_valid_replace(struct controller *ctrl, struct pci_func * func) { u8 cloop; u8 header_type; u8 secondary_bus; u8 type; u32 temp_register = 0; u32 base; u32 rc; struct pci_func *next; int index = 0; struct pci_bus *pci_bus = ctrl->pci_bus; unsigned int devfn; if (!func->is_a_board) return(ADD_NOT_SUPPORTED); func = cpqhp_slot_find(func->bus, func->device, index++); while (func != NULL) { pci_bus->number = func->bus; devfn = PCI_DEVFN(func->device, func->function); pci_bus_read_config_dword (pci_bus, devfn, PCI_VENDOR_ID, &temp_register); /* No adapter present */ if (temp_register == 0xFFFFFFFF) return(NO_ADAPTER_PRESENT); if (temp_register != func->config_space[0]) return(ADAPTER_NOT_SAME); /* Check for same revision number and class code */ pci_bus_read_config_dword (pci_bus, devfn, PCI_CLASS_REVISION, &temp_register); /* Adapter not the same */ if (temp_register != func->config_space[0x08 >> 2]) return(ADAPTER_NOT_SAME); /* Check for Bridge */ pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type); if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { /* In order to continue checking, we must program the * bus registers in the bridge to respond to accesses * for its subordinate bus(es) */ temp_register = func->config_space[0x18 >> 2]; pci_bus_write_config_dword (pci_bus, devfn, PCI_PRIMARY_BUS, temp_register); secondary_bus = (temp_register >> 8) & 0xFF; next = cpqhp_slot_list[secondary_bus]; while (next != NULL) { rc = cpqhp_valid_replace(ctrl, next); if (rc) return rc; next = next->next; } } /* Check to see if it is a standard config header */ else if ((header_type & 0x7F) == PCI_HEADER_TYPE_NORMAL) { /* Check subsystem vendor and ID */ pci_bus_read_config_dword (pci_bus, devfn, PCI_SUBSYSTEM_VENDOR_ID, &temp_register); if (temp_register != func->config_space[0x2C >> 2]) { /* If it's a SMART-2 and the register isn't * filled in, ignore the difference because * they just have an old rev of the firmware */ if (!((func->config_space[0] == 0xAE100E11) && (temp_register == 0x00L))) return(ADAPTER_NOT_SAME); } /* Figure out IO and memory base lengths */ for (cloop = 0x10; cloop <= 0x24; cloop += 4) { temp_register = 0xFFFFFFFF; pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register); pci_bus_read_config_dword (pci_bus, devfn, cloop, &base); /* If this register is implemented */ if (base) { if (base & 0x01L) { /* IO base * set base = amount of IO * space requested */ base = base & 0xFFFFFFFE; base = (~base) + 1; type = 1; } else { /* memory base */ base = base & 0xFFFFFFF0; base = (~base) + 1; type = 0; } } else { base = 0x0L; type = 0; } /* Check information in slot structure */ if (func->base_length[(cloop - 0x10) >> 2] != base) return(ADAPTER_NOT_SAME); if (func->base_type[(cloop - 0x10) >> 2] != type) return(ADAPTER_NOT_SAME); } /* End of base register loop */ } /* End of (type 0 config space) else */ else { /* this is not a type 0 or 1 config space header so * we don't know how to do it */ return(DEVICE_TYPE_NOT_SUPPORTED); } /* Get the next function */ func = cpqhp_slot_find(func->bus, func->device, index++); } return 0; } /* * cpqhp_find_available_resources * * Finds available memory, IO, and IRQ resources for programming * devices which may be added to the system * this function is for hot plug ADD! * * returns 0 if success */ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_start) { u8 temp; u8 populated_slot; u8 bridged_slot; void __iomem *one_slot; void __iomem *rom_resource_table; struct pci_func *func = NULL; int i = 10, index; u32 temp_dword, rc; struct pci_resource *mem_node; struct pci_resource *p_mem_node; struct pci_resource *io_node; struct pci_resource *bus_node; rom_resource_table = detect_HRT_floating_pointer(rom_start, rom_start+0xffff); dbg("rom_resource_table = %p\n", rom_resource_table); if (rom_resource_table == NULL) return -ENODEV; /* Sum all resources and setup resource maps */ unused_IRQ = readl(rom_resource_table + UNUSED_IRQ); dbg("unused_IRQ = %x\n", unused_IRQ); temp = 0; while (unused_IRQ) { if (unused_IRQ & 1) { cpqhp_disk_irq = temp; break; } unused_IRQ = unused_IRQ >> 1; temp++; } dbg("cpqhp_disk_irq= %d\n", cpqhp_disk_irq); unused_IRQ = unused_IRQ >> 1; temp++; while (unused_IRQ) { if (unused_IRQ & 1) { cpqhp_nic_irq = temp; break; } unused_IRQ = unused_IRQ >> 1; temp++; } dbg("cpqhp_nic_irq= %d\n", cpqhp_nic_irq); unused_IRQ = readl(rom_resource_table + PCIIRQ); temp = 0; if (!cpqhp_nic_irq) cpqhp_nic_irq = ctrl->cfgspc_irq; if (!cpqhp_disk_irq) cpqhp_disk_irq = ctrl->cfgspc_irq; dbg("cpqhp_disk_irq, cpqhp_nic_irq= %d, %d\n", cpqhp_disk_irq, cpqhp_nic_irq); rc = compaq_nvram_load(rom_start, ctrl); if (rc) return rc; one_slot = rom_resource_table + sizeof (struct hrt); i = readb(rom_resource_table + NUMBER_OF_ENTRIES); dbg("number_of_entries = %d\n", i); if (!readb(one_slot + SECONDARY_BUS)) return 1; dbg("dev|IO base|length|Mem base|length|Pre base|length|PB SB MB\n"); while (i && readb(one_slot + SECONDARY_BUS)) { u8 dev_func = readb(one_slot + DEV_FUNC); u8 primary_bus = readb(one_slot + PRIMARY_BUS); u8 secondary_bus = readb(one_slot + SECONDARY_BUS); u8 max_bus = readb(one_slot + MAX_BUS); u16 io_base = readw(one_slot + IO_BASE); u16 io_length = readw(one_slot + IO_LENGTH); u16 mem_base = readw(one_slot + MEM_BASE); u16 mem_length = readw(one_slot + MEM_LENGTH); u16 pre_mem_base = readw(one_slot + PRE_MEM_BASE); u16 pre_mem_length = readw(one_slot + PRE_MEM_LENGTH); dbg("%2.2x | %4.4x | %4.4x | %4.4x | %4.4x | %4.4x | %4.4x |%2.2x %2.2x %2.2x\n", dev_func, io_base, io_length, mem_base, mem_length, pre_mem_base, pre_mem_length, primary_bus, secondary_bus, max_bus); /* If this entry isn't for our controller's bus, ignore it */ if (primary_bus != ctrl->bus) { i--; one_slot += sizeof (struct slot_rt); continue; } /* find out if this entry is for an occupied slot */ ctrl->pci_bus->number = primary_bus; pci_bus_read_config_dword (ctrl->pci_bus, dev_func, PCI_VENDOR_ID, &temp_dword); dbg("temp_D_word = %x\n", temp_dword); if (temp_dword != 0xFFFFFFFF) { index = 0; func = cpqhp_slot_find(primary_bus, dev_func >> 3, 0); while (func && (func->function != (dev_func & 0x07))) { dbg("func = %p (bus, dev, fun) = (%d, %d, %d)\n", func, primary_bus, dev_func >> 3, index); func = cpqhp_slot_find(primary_bus, dev_func >> 3, index++); } /* If we can't find a match, skip this table entry */ if (!func) { i--; one_slot += sizeof (struct slot_rt); continue; } /* this may not work and shouldn't be used */ if (secondary_bus != primary_bus) bridged_slot = 1; else bridged_slot = 0; populated_slot = 1; } else { populated_slot = 0; bridged_slot = 0; } /* If we've got a valid IO base, use it */ temp_dword = io_base + io_length; if ((io_base) && (temp_dword < 0x10000)) { io_node = kmalloc(sizeof(*io_node), GFP_KERNEL); if (!io_node) return -ENOMEM; io_node->base = io_base; io_node->length = io_length; dbg("found io_node(base, length) = %x, %x\n", io_node->base, io_node->length); dbg("populated slot =%d \n", populated_slot); if (!populated_slot) { io_node->next = ctrl->io_head; ctrl->io_head = io_node; } else { io_node->next = func->io_head; func->io_head = io_node; } } /* If we've got a valid memory base, use it */ temp_dword = mem_base + mem_length; if ((mem_base) && (temp_dword < 0x10000)) { mem_node = kmalloc(sizeof(*mem_node), GFP_KERNEL); if (!mem_node) return -ENOMEM; mem_node->base = mem_base << 16; mem_node->length = mem_length << 16; dbg("found mem_node(base, length) = %x, %x\n", mem_node->base, mem_node->length); dbg("populated slot =%d \n", populated_slot); if (!populated_slot) { mem_node->next = ctrl->mem_head; ctrl->mem_head = mem_node; } else { mem_node->next = func->mem_head; func->mem_head = mem_node; } } /* If we've got a valid prefetchable memory base, and * the base + length isn't greater than 0xFFFF */ temp_dword = pre_mem_base + pre_mem_length; if ((pre_mem_base) && (temp_dword < 0x10000)) { p_mem_node = kmalloc(sizeof(*p_mem_node), GFP_KERNEL); if (!p_mem_node) return -ENOMEM; p_mem_node->base = pre_mem_base << 16; p_mem_node->length = pre_mem_length << 16; dbg("found p_mem_node(base, length) = %x, %x\n", p_mem_node->base, p_mem_node->length); dbg("populated slot =%d \n", populated_slot); if (!populated_slot) { p_mem_node->next = ctrl->p_mem_head; ctrl->p_mem_head = p_mem_node; } else { p_mem_node->next = func->p_mem_head; func->p_mem_head = p_mem_node; } } /* If we've got a valid bus number, use it * The second condition is to ignore bus numbers on * populated slots that don't have PCI-PCI bridges */ if (secondary_bus && (secondary_bus != primary_bus)) { bus_node = kmalloc(sizeof(*bus_node), GFP_KERNEL); if (!bus_node) return -ENOMEM; bus_node->base = secondary_bus; bus_node->length = max_bus - secondary_bus + 1; dbg("found bus_node(base, length) = %x, %x\n", bus_node->base, bus_node->length); dbg("populated slot =%d \n", populated_slot); if (!populated_slot) { bus_node->next = ctrl->bus_head; ctrl->bus_head = bus_node; } else { bus_node->next = func->bus_head; func->bus_head = bus_node; } } i--; one_slot += sizeof (struct slot_rt); } /* If all of the following fail, we don't have any resources for * hot plug add */ rc = 1; rc &= cpqhp_resource_sort_and_combine(&(ctrl->mem_head)); rc &= cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head)); rc &= cpqhp_resource_sort_and_combine(&(ctrl->io_head)); rc &= cpqhp_resource_sort_and_combine(&(ctrl->bus_head)); return rc; } /* * cpqhp_return_board_resources * * this routine returns all resources allocated to a board to * the available pool. * * returns 0 if success */ int cpqhp_return_board_resources(struct pci_func * func, struct resource_lists * resources) { int rc = 0; struct pci_resource *node; struct pci_resource *t_node; dbg("%s\n", __func__); if (!func) return 1; node = func->io_head; func->io_head = NULL; while (node) { t_node = node->next; return_resource(&(resources->io_head), node); node = t_node; } node = func->mem_head; func->mem_head = NULL; while (node) { t_node = node->next; return_resource(&(resources->mem_head), node); node = t_node; } node = func->p_mem_head; func->p_mem_head = NULL; while (node) { t_node = node->next; return_resource(&(resources->p_mem_head), node); node = t_node; } node = func->bus_head; func->bus_head = NULL; while (node) { t_node = node->next; return_resource(&(resources->bus_head), node); node = t_node; } rc |= cpqhp_resource_sort_and_combine(&(resources->mem_head)); rc |= cpqhp_resource_sort_and_combine(&(resources->p_mem_head)); rc |= cpqhp_resource_sort_and_combine(&(resources->io_head)); rc |= cpqhp_resource_sort_and_combine(&(resources->bus_head)); return rc; } /* * cpqhp_destroy_resource_list * * Puts node back in the resource list pointed to by head */ void cpqhp_destroy_resource_list (struct resource_lists * resources) { struct pci_resource *res, *tres; res = resources->io_head; resources->io_head = NULL; while (res) { tres = res; res = res->next; kfree(tres); } res = resources->mem_head; resources->mem_head = NULL; while (res) { tres = res; res = res->next; kfree(tres); } res = resources->p_mem_head; resources->p_mem_head = NULL; while (res) { tres = res; res = res->next; kfree(tres); } res = resources->bus_head; resources->bus_head = NULL; while (res) { tres = res; res = res->next; kfree(tres); } } /* * cpqhp_destroy_board_resources * * Puts node back in the resource list pointed to by head */ void cpqhp_destroy_board_resources (struct pci_func * func) { struct pci_resource *res, *tres; res = func->io_head; func->io_head = NULL; while (res) { tres = res; res = res->next; kfree(tres); } res = func->mem_head; func->mem_head = NULL; while (res) { tres = res; res = res->next; kfree(tres); } res = func->p_mem_head; func->p_mem_head = NULL; while (res) { tres = res; res = res->next; kfree(tres); } res = func->bus_head; func->bus_head = NULL; while (res) { tres = res; res = res->next; kfree(tres); } }
gpl-2.0
kernel-hut/android_kernel_xiaomi_dior
arch/arm/plat-mxc/cpufreq.c
5074
4820
/* * Copyright (C) 2010 Freescale Semiconductor, Inc. All Rights Reserved. */ /* * The code contained herein is licensed under the GNU General Public * License. You may obtain a copy of the GNU General Public License * Version 2 or later at the following locations: * * http://www.opensource.org/licenses/gpl-license.html * http://www.gnu.org/copyleft/gpl.html */ /* * A driver for the Freescale Semiconductor i.MXC CPUfreq module. * The CPUFREQ driver is for controlling CPU frequency. It allows you to change * the CPU clock speed on the fly. */ #include <linux/module.h> #include <linux/cpufreq.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/slab.h> #include <mach/hardware.h> #include <mach/clock.h> #define CLK32_FREQ 32768 #define NANOSECOND (1000 * 1000 * 1000) struct cpu_op *(*get_cpu_op)(int *op); static int cpu_freq_khz_min; static int cpu_freq_khz_max; static struct clk *cpu_clk; static struct cpufreq_frequency_table *imx_freq_table; static int cpu_op_nr; static struct cpu_op *cpu_op_tbl; static int set_cpu_freq(int freq) { int ret = 0; int org_cpu_rate; org_cpu_rate = clk_get_rate(cpu_clk); if (org_cpu_rate == freq) return ret; ret = clk_set_rate(cpu_clk, freq); if (ret != 0) { printk(KERN_DEBUG "cannot set CPU clock rate\n"); return ret; } return ret; } static int mxc_verify_speed(struct cpufreq_policy *policy) { if (policy->cpu != 0) return -EINVAL; return cpufreq_frequency_table_verify(policy, imx_freq_table); } static unsigned int mxc_get_speed(unsigned int cpu) { if (cpu) return 0; return clk_get_rate(cpu_clk) / 1000; } static int mxc_set_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { struct cpufreq_freqs freqs; int freq_Hz; int ret = 0; unsigned int index; cpufreq_frequency_table_target(policy, imx_freq_table, target_freq, relation, &index); freq_Hz = imx_freq_table[index].frequency * 1000; freqs.old = clk_get_rate(cpu_clk) / 1000; freqs.new = freq_Hz / 1000; freqs.cpu = 0; freqs.flags = 0; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); ret = set_cpu_freq(freq_Hz); cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); return ret; } static int mxc_cpufreq_init(struct cpufreq_policy *policy) { int ret; int i; printk(KERN_INFO "i.MXC CPU frequency driver\n"); if (policy->cpu != 0) return -EINVAL; if (!get_cpu_op) return -EINVAL; cpu_clk = clk_get(NULL, "cpu_clk"); if (IS_ERR(cpu_clk)) { printk(KERN_ERR "%s: failed to get cpu clock\n", __func__); return PTR_ERR(cpu_clk); } cpu_op_tbl = get_cpu_op(&cpu_op_nr); cpu_freq_khz_min = cpu_op_tbl[0].cpu_rate / 1000; cpu_freq_khz_max = cpu_op_tbl[0].cpu_rate / 1000; imx_freq_table = kmalloc( sizeof(struct cpufreq_frequency_table) * (cpu_op_nr + 1), GFP_KERNEL); if (!imx_freq_table) { ret = -ENOMEM; goto err1; } for (i = 0; i < cpu_op_nr; i++) { imx_freq_table[i].index = i; imx_freq_table[i].frequency = cpu_op_tbl[i].cpu_rate / 1000; if ((cpu_op_tbl[i].cpu_rate / 1000) < cpu_freq_khz_min) cpu_freq_khz_min = cpu_op_tbl[i].cpu_rate / 1000; if ((cpu_op_tbl[i].cpu_rate / 1000) > cpu_freq_khz_max) cpu_freq_khz_max = cpu_op_tbl[i].cpu_rate / 1000; } imx_freq_table[i].index = i; imx_freq_table[i].frequency = CPUFREQ_TABLE_END; policy->cur = clk_get_rate(cpu_clk) / 1000; policy->min = policy->cpuinfo.min_freq = cpu_freq_khz_min; policy->max = policy->cpuinfo.max_freq = cpu_freq_khz_max; /* Manual states, that PLL stabilizes in two CLK32 periods */ policy->cpuinfo.transition_latency = 2 * NANOSECOND / CLK32_FREQ; ret = cpufreq_frequency_table_cpuinfo(policy, imx_freq_table); if (ret < 0) { printk(KERN_ERR "%s: failed to register i.MXC CPUfreq with error code %d\n", __func__, ret); goto err; } cpufreq_frequency_table_get_attr(imx_freq_table, policy->cpu); return 0; err: kfree(imx_freq_table); err1: clk_put(cpu_clk); return ret; } static int mxc_cpufreq_exit(struct cpufreq_policy *policy) { cpufreq_frequency_table_put_attr(policy->cpu); set_cpu_freq(cpu_freq_khz_max * 1000); clk_put(cpu_clk); kfree(imx_freq_table); return 0; } static struct cpufreq_driver mxc_driver = { .flags = CPUFREQ_STICKY, .verify = mxc_verify_speed, .target = mxc_set_target, .get = mxc_get_speed, .init = mxc_cpufreq_init, .exit = mxc_cpufreq_exit, .name = "imx", }; static int __devinit mxc_cpufreq_driver_init(void) { return cpufreq_register_driver(&mxc_driver); } static void mxc_cpufreq_driver_exit(void) { cpufreq_unregister_driver(&mxc_driver); } module_init(mxc_cpufreq_driver_init); module_exit(mxc_cpufreq_driver_exit); MODULE_AUTHOR("Freescale Semiconductor Inc. Yong Shen <yong.shen@linaro.org>"); MODULE_DESCRIPTION("CPUfreq driver for i.MX"); MODULE_LICENSE("GPL");
gpl-2.0
sakuramilk/sc02c_kernel_ics
drivers/watchdog/w83877f_wdt.c
5074
10364
/* * W83877F Computer Watchdog Timer driver * * Based on acquirewdt.c by Alan Cox, * and sbc60xxwdt.c by Jakob Oestergaard <jakob@unthought.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * The authors do NOT admit liability nor provide warranty for * any of this software. This material is provided "AS-IS" in * the hope that it may be useful for others. * * (c) Copyright 2001 Scott Jennings <linuxdrivers@oro.net> * * 4/19 - 2001 [Initial revision] * 9/27 - 2001 Added spinlocking * 4/12 - 2002 [rob@osinvestor.com] Eliminate extra comments * Eliminate fop_read * Eliminate extra spin_unlock * Added KERN_* tags to printks * add CONFIG_WATCHDOG_NOWAYOUT support * fix possible wdt_is_open race * changed watchdog_info to correctly reflect what * the driver offers * added WDIOC_GETSTATUS, WDIOC_GETBOOTSTATUS, * WDIOC_SETTIMEOUT, * WDIOC_GETTIMEOUT, and WDIOC_SETOPTIONS ioctls * 09/8 - 2003 [wim@iguana.be] cleanup of trailing spaces * added extra printk's for startup problems * use module_param * made timeout (the emulated heartbeat) a * module_param * made the keepalive ping an internal subroutine * * This WDT driver is different from most other Linux WDT * drivers in that the driver will ping the watchdog by itself, * because this particular WDT has a very short timeout (1.6 * seconds) and it would be insane to count on any userspace * daemon always getting scheduled within that time frame. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/timer.h> #include <linux/jiffies.h> #include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/fs.h> #include <linux/ioport.h> #include <linux/notifier.h> #include <linux/reboot.h> #include <linux/init.h> #include <linux/io.h> #include <linux/uaccess.h> #include <asm/system.h> #define OUR_NAME "w83877f_wdt" #define PFX OUR_NAME ": " #define ENABLE_W83877F_PORT 0x3F0 #define ENABLE_W83877F 0x87 #define DISABLE_W83877F 0xAA #define WDT_PING 0x443 #define WDT_REGISTER 0x14 #define WDT_ENABLE 0x9C #define WDT_DISABLE 0x8C /* * The W83877F seems to be fixed at 1.6s timeout (at least on the * EMACS PC-104 board I'm using). If we reset the watchdog every * ~250ms we should be safe. */ #define WDT_INTERVAL (HZ/4+1) /* * We must not require too good response from the userspace daemon. * Here we require the userspace daemon to send us a heartbeat * char to /dev/watchdog every 30 seconds. */ #define WATCHDOG_TIMEOUT 30 /* 30 sec default timeout */ /* in seconds, will be multiplied by HZ to get seconds to wait for a ping */ static int timeout = WATCHDOG_TIMEOUT; module_param(timeout, int, 0); MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. (1<=timeout<=3600, default=" __MODULE_STRING(WATCHDOG_TIMEOUT) ")"); static int nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, int, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); static void wdt_timer_ping(unsigned long); static DEFINE_TIMER(timer, wdt_timer_ping, 0, 0); static unsigned long next_heartbeat; static unsigned long wdt_is_open; static char wdt_expect_close; static DEFINE_SPINLOCK(wdt_spinlock); /* * Whack the dog */ static void wdt_timer_ping(unsigned long data) { /* If we got a heartbeat pulse within the WDT_US_INTERVAL * we agree to ping the WDT */ if (time_before(jiffies, next_heartbeat)) { /* Ping the WDT */ spin_lock(&wdt_spinlock); /* Ping the WDT by reading from WDT_PING */ inb_p(WDT_PING); /* Re-set the timer interval */ mod_timer(&timer, jiffies + WDT_INTERVAL); spin_unlock(&wdt_spinlock); } else printk(KERN_WARNING PFX "Heartbeat lost! Will not ping the watchdog\n"); } /* * Utility routines */ static void wdt_change(int writeval) { unsigned long flags; spin_lock_irqsave(&wdt_spinlock, flags); /* buy some time */ inb_p(WDT_PING); /* make W83877F available */ outb_p(ENABLE_W83877F, ENABLE_W83877F_PORT); outb_p(ENABLE_W83877F, ENABLE_W83877F_PORT); /* enable watchdog */ outb_p(WDT_REGISTER, ENABLE_W83877F_PORT); outb_p(writeval, ENABLE_W83877F_PORT+1); /* lock the W8387FF away */ outb_p(DISABLE_W83877F, ENABLE_W83877F_PORT); spin_unlock_irqrestore(&wdt_spinlock, flags); } static void wdt_startup(void) { next_heartbeat = jiffies + (timeout * HZ); /* Start the timer */ mod_timer(&timer, jiffies + WDT_INTERVAL); wdt_change(WDT_ENABLE); printk(KERN_INFO PFX "Watchdog timer is now enabled.\n"); } static void wdt_turnoff(void) { /* Stop the timer */ del_timer(&timer); wdt_change(WDT_DISABLE); printk(KERN_INFO PFX "Watchdog timer is now disabled...\n"); } static void wdt_keepalive(void) { /* user land ping */ next_heartbeat = jiffies + (timeout * HZ); } /* * /dev/watchdog handling */ static ssize_t fop_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { /* See if we got the magic character 'V' and reload the timer */ if (count) { if (!nowayout) { size_t ofs; /* note: just in case someone wrote the magic character five months ago... */ wdt_expect_close = 0; /* scan to see whether or not we got the magic character */ for (ofs = 0; ofs != count; ofs++) { char c; if (get_user(c, buf + ofs)) return -EFAULT; if (c == 'V') wdt_expect_close = 42; } } /* someone wrote to us, we should restart timer */ wdt_keepalive(); } return count; } static int fop_open(struct inode *inode, struct file *file) { /* Just in case we're already talking to someone... */ if (test_and_set_bit(0, &wdt_is_open)) return -EBUSY; /* Good, fire up the show */ wdt_startup(); return nonseekable_open(inode, file); } static int fop_close(struct inode *inode, struct file *file) { if (wdt_expect_close == 42) wdt_turnoff(); else { del_timer(&timer); printk(KERN_CRIT PFX "device file closed unexpectedly. Will not stop the WDT!\n"); } clear_bit(0, &wdt_is_open); wdt_expect_close = 0; return 0; } static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; int __user *p = argp; static const struct watchdog_info ident = { .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, .firmware_version = 1, .identity = "W83877F", }; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: return put_user(0, p); case WDIOC_SETOPTIONS: { int new_options, retval = -EINVAL; if (get_user(new_options, p)) return -EFAULT; if (new_options & WDIOS_DISABLECARD) { wdt_turnoff(); retval = 0; } if (new_options & WDIOS_ENABLECARD) { wdt_startup(); retval = 0; } return retval; } case WDIOC_KEEPALIVE: wdt_keepalive(); return 0; case WDIOC_SETTIMEOUT: { int new_timeout; if (get_user(new_timeout, p)) return -EFAULT; /* arbitrary upper limit */ if (new_timeout < 1 || new_timeout > 3600) return -EINVAL; timeout = new_timeout; wdt_keepalive(); /* Fall through */ } case WDIOC_GETTIMEOUT: return put_user(timeout, p); default: return -ENOTTY; } } static const struct file_operations wdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = fop_write, .open = fop_open, .release = fop_close, .unlocked_ioctl = fop_ioctl, }; static struct miscdevice wdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &wdt_fops, }; /* * Notifier for system down */ static int wdt_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { if (code == SYS_DOWN || code == SYS_HALT) wdt_turnoff(); return NOTIFY_DONE; } /* * The WDT needs to learn about soft shutdowns in order to * turn the timebomb registers off. */ static struct notifier_block wdt_notifier = { .notifier_call = wdt_notify_sys, }; static void __exit w83877f_wdt_unload(void) { wdt_turnoff(); /* Deregister */ misc_deregister(&wdt_miscdev); unregister_reboot_notifier(&wdt_notifier); release_region(WDT_PING, 1); release_region(ENABLE_W83877F_PORT, 2); } static int __init w83877f_wdt_init(void) { int rc = -EBUSY; if (timeout < 1 || timeout > 3600) { /* arbitrary upper limit */ timeout = WATCHDOG_TIMEOUT; printk(KERN_INFO PFX "timeout value must be 1 <= x <= 3600, using %d\n", timeout); } if (!request_region(ENABLE_W83877F_PORT, 2, "W83877F WDT")) { printk(KERN_ERR PFX "I/O address 0x%04x already in use\n", ENABLE_W83877F_PORT); rc = -EIO; goto err_out; } if (!request_region(WDT_PING, 1, "W8387FF WDT")) { printk(KERN_ERR PFX "I/O address 0x%04x already in use\n", WDT_PING); rc = -EIO; goto err_out_region1; } rc = register_reboot_notifier(&wdt_notifier); if (rc) { printk(KERN_ERR PFX "cannot register reboot notifier (err=%d)\n", rc); goto err_out_region2; } rc = misc_register(&wdt_miscdev); if (rc) { printk(KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n", wdt_miscdev.minor, rc); goto err_out_reboot; } printk(KERN_INFO PFX "WDT driver for W83877F initialised. timeout=%d sec (nowayout=%d)\n", timeout, nowayout); return 0; err_out_reboot: unregister_reboot_notifier(&wdt_notifier); err_out_region2: release_region(WDT_PING, 1); err_out_region1: release_region(ENABLE_W83877F_PORT, 2); err_out: return rc; } module_init(w83877f_wdt_init); module_exit(w83877f_wdt_unload); MODULE_AUTHOR("Scott and Bill Jennings"); MODULE_DESCRIPTION("Driver for watchdog timer in w83877f chip"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
gpl-2.0
raden/cempaka-kernel
drivers/watchdog/pc87413_wdt.c
7378
14397
/* * NS pc87413-wdt Watchdog Timer driver for Linux 2.6.x.x * * This code is based on wdt.c with original copyright. * * (C) Copyright 2006 Sven Anders, <anders@anduras.de> * and Marcus Junker, <junker@anduras.de> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Neither Sven Anders, Marcus Junker nor ANDURAS AG * admit liability nor provide warranty for any of this software. * This material is provided "AS-IS" and at no charge. * * Release 1.1 */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/types.h> #include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/notifier.h> #include <linux/fs.h> #include <linux/reboot.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/moduleparam.h> #include <linux/io.h> #include <linux/uaccess.h> /* #define DEBUG 1 */ #define DEFAULT_TIMEOUT 1 /* 1 minute */ #define MAX_TIMEOUT 255 #define VERSION "1.1" #define MODNAME "pc87413 WDT" #define DPFX MODNAME " - DEBUG: " #define WDT_INDEX_IO_PORT (io+0) /* I/O port base (index register) */ #define WDT_DATA_IO_PORT (WDT_INDEX_IO_PORT+1) #define SWC_LDN 0x04 #define SIOCFG2 0x22 /* Serial IO register */ #define WDCTL 0x10 /* Watchdog-Timer-Control-Register */ #define WDTO 0x11 /* Watchdog timeout register */ #define WDCFG 0x12 /* Watchdog config register */ #define IO_DEFAULT 0x2E /* Address used on Portwell Boards */ static int io = IO_DEFAULT; static int swc_base_addr = -1; static int timeout = DEFAULT_TIMEOUT; /* timeout value */ static unsigned long timer_enabled; /* is the timer enabled? */ static char expect_close; /* is the close expected? */ static DEFINE_SPINLOCK(io_lock); /* to guard us from io races */ static bool nowayout = WATCHDOG_NOWAYOUT; /* -- Low level function ----------------------------------------*/ /* Select pins for Watchdog output */ static inline void pc87413_select_wdt_out(void) { unsigned int cr_data = 0; /* Step 1: Select multiple pin,pin55,as WDT output */ outb_p(SIOCFG2, WDT_INDEX_IO_PORT); cr_data = inb(WDT_DATA_IO_PORT); cr_data |= 0x80; /* Set Bit7 to 1*/ outb_p(SIOCFG2, WDT_INDEX_IO_PORT); outb_p(cr_data, WDT_DATA_IO_PORT); #ifdef DEBUG pr_info(DPFX "Select multiple pin,pin55,as WDT output: Bit7 to 1: %d\n", cr_data); #endif } /* Enable SWC functions */ static inline void pc87413_enable_swc(void) { unsigned int cr_data = 0; /* Step 2: Enable SWC functions */ outb_p(0x07, WDT_INDEX_IO_PORT); /* Point SWC_LDN (LDN=4) */ outb_p(SWC_LDN, WDT_DATA_IO_PORT); outb_p(0x30, WDT_INDEX_IO_PORT); /* Read Index 0x30 First */ cr_data = inb(WDT_DATA_IO_PORT); cr_data |= 0x01; /* Set Bit0 to 1 */ outb_p(0x30, WDT_INDEX_IO_PORT); outb_p(cr_data, WDT_DATA_IO_PORT); /* Index0x30_bit0P1 */ #ifdef DEBUG pr_info(DPFX "pc87413 - Enable SWC functions\n"); #endif } /* Read SWC I/O base address */ static void pc87413_get_swc_base_addr(void) { unsigned char addr_l, addr_h = 0; /* Step 3: Read SWC I/O Base Address */ outb_p(0x60, WDT_INDEX_IO_PORT); /* Read Index 0x60 */ addr_h = inb(WDT_DATA_IO_PORT); outb_p(0x61, WDT_INDEX_IO_PORT); /* Read Index 0x61 */ addr_l = inb(WDT_DATA_IO_PORT); swc_base_addr = (addr_h << 8) + addr_l; #ifdef DEBUG pr_info(DPFX "Read SWC I/O Base Address: low %d, high %d, res %d\n", addr_l, addr_h, swc_base_addr); #endif } /* Select Bank 3 of SWC */ static inline void pc87413_swc_bank3(void) { /* Step 4: Select Bank3 of SWC */ outb_p(inb(swc_base_addr + 0x0f) | 0x03, swc_base_addr + 0x0f); #ifdef DEBUG pr_info(DPFX "Select Bank3 of SWC\n"); #endif } /* Set watchdog timeout to x minutes */ static inline void pc87413_programm_wdto(char pc87413_time) { /* Step 5: Programm WDTO, Twd. */ outb_p(pc87413_time, swc_base_addr + WDTO); #ifdef DEBUG pr_info(DPFX "Set WDTO to %d minutes\n", pc87413_time); #endif } /* Enable WDEN */ static inline void pc87413_enable_wden(void) { /* Step 6: Enable WDEN */ outb_p(inb(swc_base_addr + WDCTL) | 0x01, swc_base_addr + WDCTL); #ifdef DEBUG pr_info(DPFX "Enable WDEN\n"); #endif } /* Enable SW_WD_TREN */ static inline void pc87413_enable_sw_wd_tren(void) { /* Enable SW_WD_TREN */ outb_p(inb(swc_base_addr + WDCFG) | 0x80, swc_base_addr + WDCFG); #ifdef DEBUG pr_info(DPFX "Enable SW_WD_TREN\n"); #endif } /* Disable SW_WD_TREN */ static inline void pc87413_disable_sw_wd_tren(void) { /* Disable SW_WD_TREN */ outb_p(inb(swc_base_addr + WDCFG) & 0x7f, swc_base_addr + WDCFG); #ifdef DEBUG pr_info(DPFX "pc87413 - Disable SW_WD_TREN\n"); #endif } /* Enable SW_WD_TRG */ static inline void pc87413_enable_sw_wd_trg(void) { /* Enable SW_WD_TRG */ outb_p(inb(swc_base_addr + WDCTL) | 0x80, swc_base_addr + WDCTL); #ifdef DEBUG pr_info(DPFX "pc87413 - Enable SW_WD_TRG\n"); #endif } /* Disable SW_WD_TRG */ static inline void pc87413_disable_sw_wd_trg(void) { /* Disable SW_WD_TRG */ outb_p(inb(swc_base_addr + WDCTL) & 0x7f, swc_base_addr + WDCTL); #ifdef DEBUG pr_info(DPFX "Disable SW_WD_TRG\n"); #endif } /* -- Higher level functions ------------------------------------*/ /* Enable the watchdog */ static void pc87413_enable(void) { spin_lock(&io_lock); pc87413_swc_bank3(); pc87413_programm_wdto(timeout); pc87413_enable_wden(); pc87413_enable_sw_wd_tren(); pc87413_enable_sw_wd_trg(); spin_unlock(&io_lock); } /* Disable the watchdog */ static void pc87413_disable(void) { spin_lock(&io_lock); pc87413_swc_bank3(); pc87413_disable_sw_wd_tren(); pc87413_disable_sw_wd_trg(); pc87413_programm_wdto(0); spin_unlock(&io_lock); } /* Refresh the watchdog */ static void pc87413_refresh(void) { spin_lock(&io_lock); pc87413_swc_bank3(); pc87413_disable_sw_wd_tren(); pc87413_disable_sw_wd_trg(); pc87413_programm_wdto(timeout); pc87413_enable_wden(); pc87413_enable_sw_wd_tren(); pc87413_enable_sw_wd_trg(); spin_unlock(&io_lock); } /* -- File operations -------------------------------------------*/ /** * pc87413_open: * @inode: inode of device * @file: file handle to device * */ static int pc87413_open(struct inode *inode, struct file *file) { /* /dev/watchdog can only be opened once */ if (test_and_set_bit(0, &timer_enabled)) return -EBUSY; if (nowayout) __module_get(THIS_MODULE); /* Reload and activate timer */ pc87413_refresh(); pr_info("Watchdog enabled. Timeout set to %d minute(s).\n", timeout); return nonseekable_open(inode, file); } /** * pc87413_release: * @inode: inode to board * @file: file handle to board * * The watchdog has a configurable API. There is a religious dispute * between people who want their watchdog to be able to shut down and * those who want to be sure if the watchdog manager dies the machine * reboots. In the former case we disable the counters, in the latter * case you have to open it again very soon. */ static int pc87413_release(struct inode *inode, struct file *file) { /* Shut off the timer. */ if (expect_close == 42) { pc87413_disable(); pr_info("Watchdog disabled, sleeping again...\n"); } else { pr_crit("Unexpected close, not stopping watchdog!\n"); pc87413_refresh(); } clear_bit(0, &timer_enabled); expect_close = 0; return 0; } /** * pc87413_status: * * return, if the watchdog is enabled (timeout is set...) */ static int pc87413_status(void) { return 0; /* currently not supported */ } /** * pc87413_write: * @file: file handle to the watchdog * @data: data buffer to write * @len: length in bytes * @ppos: pointer to the position to write. No seeks allowed * * A write to a watchdog device is defined as a keepalive signal. Any * write of data will do, as we we don't define content meaning. */ static ssize_t pc87413_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { /* See if we got the magic character 'V' and reload the timer */ if (len) { if (!nowayout) { size_t i; /* reset expect flag */ expect_close = 0; /* scan to see whether or not we got the magic character */ for (i = 0; i != len; i++) { char c; if (get_user(c, data + i)) return -EFAULT; if (c == 'V') expect_close = 42; } } /* someone wrote to us, we should reload the timer */ pc87413_refresh(); } return len; } /** * pc87413_ioctl: * @file: file handle to the device * @cmd: watchdog command * @arg: argument pointer * * The watchdog API defines a common set of functions for all watchdogs * according to their available features. We only actually usefully support * querying capabilities and current status. */ static long pc87413_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int new_timeout; union { struct watchdog_info __user *ident; int __user *i; } uarg; static const struct watchdog_info ident = { .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, .firmware_version = 1, .identity = "PC87413(HF/F) watchdog", }; uarg.i = (int __user *)arg; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(uarg.ident, &ident, sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: return put_user(pc87413_status(), uarg.i); case WDIOC_GETBOOTSTATUS: return put_user(0, uarg.i); case WDIOC_SETOPTIONS: { int options, retval = -EINVAL; if (get_user(options, uarg.i)) return -EFAULT; if (options & WDIOS_DISABLECARD) { pc87413_disable(); retval = 0; } if (options & WDIOS_ENABLECARD) { pc87413_enable(); retval = 0; } return retval; } case WDIOC_KEEPALIVE: pc87413_refresh(); #ifdef DEBUG pr_info(DPFX "keepalive\n"); #endif return 0; case WDIOC_SETTIMEOUT: if (get_user(new_timeout, uarg.i)) return -EFAULT; /* the API states this is given in secs */ new_timeout /= 60; if (new_timeout < 0 || new_timeout > MAX_TIMEOUT) return -EINVAL; timeout = new_timeout; pc87413_refresh(); /* fall through and return the new timeout... */ case WDIOC_GETTIMEOUT: new_timeout = timeout * 60; return put_user(new_timeout, uarg.i); default: return -ENOTTY; } } /* -- Notifier funtions -----------------------------------------*/ /** * notify_sys: * @this: our notifier block * @code: the event being reported * @unused: unused * * Our notifier is called on system shutdowns. We want to turn the card * off at reboot otherwise the machine will reboot again during memory * test or worse yet during the following fsck. This would suck, in fact * trust me - if it happens it does suck. */ static int pc87413_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { if (code == SYS_DOWN || code == SYS_HALT) /* Turn the card off */ pc87413_disable(); return NOTIFY_DONE; } /* -- Module's structures ---------------------------------------*/ static const struct file_operations pc87413_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = pc87413_write, .unlocked_ioctl = pc87413_ioctl, .open = pc87413_open, .release = pc87413_release, }; static struct notifier_block pc87413_notifier = { .notifier_call = pc87413_notify_sys, }; static struct miscdevice pc87413_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &pc87413_fops, }; /* -- Module init functions -------------------------------------*/ /** * pc87413_init: module's "constructor" * * Set up the WDT watchdog board. All we have to do is grab the * resources we require and bitch if anyone beat us to them. * The open() function will actually kick the board off. */ static int __init pc87413_init(void) { int ret; pr_info("Version " VERSION " at io 0x%X\n", WDT_INDEX_IO_PORT); if (!request_muxed_region(io, 2, MODNAME)) return -EBUSY; ret = register_reboot_notifier(&pc87413_notifier); if (ret != 0) { pr_err("cannot register reboot notifier (err=%d)\n", ret); } ret = misc_register(&pc87413_miscdev); if (ret != 0) { pr_err("cannot register miscdev on minor=%d (err=%d)\n", WATCHDOG_MINOR, ret); goto reboot_unreg; } pr_info("initialized. timeout=%d min\n", timeout); pc87413_select_wdt_out(); pc87413_enable_swc(); pc87413_get_swc_base_addr(); if (!request_region(swc_base_addr, 0x20, MODNAME)) { pr_err("cannot request SWC region at 0x%x\n", swc_base_addr); ret = -EBUSY; goto misc_unreg; } pc87413_enable(); release_region(io, 2); return 0; misc_unreg: misc_deregister(&pc87413_miscdev); reboot_unreg: unregister_reboot_notifier(&pc87413_notifier); release_region(io, 2); return ret; } /** * pc87413_exit: module's "destructor" * * Unload the watchdog. You cannot do this with any file handles open. * If your watchdog is set to continue ticking on close and you unload * it, well it keeps ticking. We won't get the interrupt but the board * will not touch PC memory so all is fine. You just have to load a new * module in 60 seconds or reboot. */ static void __exit pc87413_exit(void) { /* Stop the timer before we leave */ if (!nowayout) { pc87413_disable(); pr_info("Watchdog disabled\n"); } misc_deregister(&pc87413_miscdev); unregister_reboot_notifier(&pc87413_notifier); release_region(swc_base_addr, 0x20); pr_info("watchdog component driver removed\n"); } module_init(pc87413_init); module_exit(pc87413_exit); MODULE_AUTHOR("Sven Anders <anders@anduras.de>, " "Marcus Junker <junker@anduras.de>,"); MODULE_DESCRIPTION("PC87413 WDT driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); module_param(io, int, 0); MODULE_PARM_DESC(io, MODNAME " I/O port (default: " __MODULE_STRING(IO_DEFAULT) ")."); module_param(timeout, int, 0); MODULE_PARM_DESC(timeout, "Watchdog timeout in minutes (default=" __MODULE_STRING(DEFAULT_TIMEOUT) ")."); module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
gpl-2.0
cbolumar/android_kernel_samsung_a3ulte
drivers/video/valkyriefb.c
7634
16790
/* * valkyriefb.c -- frame buffer device for the PowerMac 'valkyrie' display * * Created 8 August 1998 by * Martin Costabel <costabel@wanadoo.fr> and Kevin Schoedel * * Vmode-switching changes and vmode 15/17 modifications created 29 August * 1998 by Barry K. Nathan <barryn@pobox.com>. * * Ported to m68k Macintosh by David Huggins-Daines <dhd@debian.org> * * Derived directly from: * * controlfb.c -- frame buffer device for the PowerMac 'control' display * Copyright (C) 1998 Dan Jacobowitz <dan@debian.org> * * pmc-valkyrie.c -- Console support for PowerMac "valkyrie" display adaptor. * Copyright (C) 1997 Paul Mackerras. * * and indirectly: * * Frame buffer structure from: * drivers/video/chipsfb.c -- frame buffer device for * Chips & Technologies 65550 chip. * * Copyright (C) 1998 Paul Mackerras * * This file is derived from the Powermac "chips" driver: * Copyright (C) 1997 Fabio Riccardi. * And from the frame buffer device for Open Firmware-initialized devices: * Copyright (C) 1997 Geert Uytterhoeven. * * Hardware information from: * control.c: Console support for PowerMac "control" display adaptor. * Copyright (C) 1996 Paul Mackerras * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/fb.h> #include <linux/selection.h> #include <linux/init.h> #include <linux/nvram.h> #include <linux/adb.h> #include <linux/cuda.h> #include <asm/io.h> #ifdef CONFIG_MAC #include <asm/bootinfo.h> #include <asm/macintosh.h> #else #include <asm/prom.h> #endif #include <asm/pgtable.h> #include "macmodes.h" #include "valkyriefb.h" #ifdef CONFIG_MAC /* We don't yet have functions to read the PRAM... perhaps we can adapt them from the PPC code? */ static int default_vmode = VMODE_CHOOSE; static int default_cmode = CMODE_8; #else static int default_vmode = VMODE_NVRAM; static int default_cmode = CMODE_NVRAM; #endif struct fb_par_valkyrie { int vmode, cmode; int xres, yres; int vxres, vyres; struct valkyrie_regvals *init; }; struct fb_info_valkyrie { struct fb_info info; struct fb_par_valkyrie par; struct cmap_regs __iomem *cmap_regs; unsigned long cmap_regs_phys; struct valkyrie_regs __iomem *valkyrie_regs; unsigned long valkyrie_regs_phys; __u8 __iomem *frame_buffer; unsigned long frame_buffer_phys; int sense; unsigned long total_vram; u32 pseudo_palette[16]; }; /* * Exported functions */ int valkyriefb_init(void); int valkyriefb_setup(char*); static int valkyriefb_check_var(struct fb_var_screeninfo *var, struct fb_info *info); static int valkyriefb_set_par(struct fb_info *info); static int valkyriefb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info); static int valkyriefb_blank(int blank_mode, struct fb_info *info); static int read_valkyrie_sense(struct fb_info_valkyrie *p); static void set_valkyrie_clock(unsigned char *params); static int valkyrie_var_to_par(struct fb_var_screeninfo *var, struct fb_par_valkyrie *par, const struct fb_info *fb_info); static int valkyrie_init_info(struct fb_info *info, struct fb_info_valkyrie *p); static void valkyrie_par_to_fix(struct fb_par_valkyrie *par, struct fb_fix_screeninfo *fix); static void valkyrie_init_fix(struct fb_fix_screeninfo *fix, struct fb_info_valkyrie *p); static struct fb_ops valkyriefb_ops = { .owner = THIS_MODULE, .fb_check_var = valkyriefb_check_var, .fb_set_par = valkyriefb_set_par, .fb_setcolreg = valkyriefb_setcolreg, .fb_blank = valkyriefb_blank, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, }; /* Sets the video mode according to info->var */ static int valkyriefb_set_par(struct fb_info *info) { struct fb_info_valkyrie *p = (struct fb_info_valkyrie *) info; volatile struct valkyrie_regs __iomem *valkyrie_regs = p->valkyrie_regs; struct fb_par_valkyrie *par = info->par; struct valkyrie_regvals *init; int err; if ((err = valkyrie_var_to_par(&info->var, par, info))) return err; valkyrie_par_to_fix(par, &info->fix); /* Reset the valkyrie */ out_8(&valkyrie_regs->status.r, 0); udelay(100); /* Initialize display timing registers */ init = par->init; out_8(&valkyrie_regs->mode.r, init->mode | 0x80); out_8(&valkyrie_regs->depth.r, par->cmode + 3); set_valkyrie_clock(init->clock_params); udelay(100); /* Turn on display */ out_8(&valkyrie_regs->mode.r, init->mode); return 0; } static inline int valkyrie_par_to_var(struct fb_par_valkyrie *par, struct fb_var_screeninfo *var) { return mac_vmode_to_var(par->vmode, par->cmode, var); } static int valkyriefb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { int err; struct fb_par_valkyrie par; if ((err = valkyrie_var_to_par(var, &par, info))) return err; valkyrie_par_to_var(&par, var); return 0; } /* * Blank the screen if blank_mode != 0, else unblank. If blank_mode == NULL * then the caller blanks by setting the CLUT (Color Look Up Table) to all * black. Return 0 if blanking succeeded, != 0 if un-/blanking failed due * to e.g. a video mode which doesn't support it. Implements VESA suspend * and powerdown modes on hardware that supports disabling hsync/vsync: * blank_mode == 2: suspend vsync * blank_mode == 3: suspend hsync * blank_mode == 4: powerdown */ static int valkyriefb_blank(int blank_mode, struct fb_info *info) { struct fb_info_valkyrie *p = (struct fb_info_valkyrie *) info; struct fb_par_valkyrie *par = info->par; struct valkyrie_regvals *init = par->init; if (init == NULL) return 1; switch (blank_mode) { case FB_BLANK_UNBLANK: /* unblank */ out_8(&p->valkyrie_regs->mode.r, init->mode); break; case FB_BLANK_NORMAL: return 1; /* get caller to set CLUT to all black */ case FB_BLANK_VSYNC_SUSPEND: case FB_BLANK_HSYNC_SUSPEND: /* * [kps] Value extracted from MacOS. I don't know * whether this bit disables hsync or vsync, or * whether the hardware can do the other as well. */ out_8(&p->valkyrie_regs->mode.r, init->mode | 0x40); break; case FB_BLANK_POWERDOWN: out_8(&p->valkyrie_regs->mode.r, 0x66); break; } return 0; } static int valkyriefb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info) { struct fb_info_valkyrie *p = (struct fb_info_valkyrie *) info; volatile struct cmap_regs __iomem *cmap_regs = p->cmap_regs; struct fb_par_valkyrie *par = info->par; if (regno > 255) return 1; red >>= 8; green >>= 8; blue >>= 8; /* tell clut which address to fill */ out_8(&p->cmap_regs->addr, regno); udelay(1); /* send one color channel at a time */ out_8(&cmap_regs->lut, red); out_8(&cmap_regs->lut, green); out_8(&cmap_regs->lut, blue); if (regno < 16 && par->cmode == CMODE_16) ((u32 *)info->pseudo_palette)[regno] = (regno << 10) | (regno << 5) | regno; return 0; } static inline int valkyrie_vram_reqd(int video_mode, int color_mode) { int pitch; struct valkyrie_regvals *init = valkyrie_reg_init[video_mode-1]; if ((pitch = init->pitch[color_mode]) == 0) pitch = 2 * init->pitch[0]; return init->vres * pitch; } static void set_valkyrie_clock(unsigned char *params) { struct adb_request req; int i; #ifdef CONFIG_ADB_CUDA for (i = 0; i < 3; ++i) { cuda_request(&req, NULL, 5, CUDA_PACKET, CUDA_GET_SET_IIC, 0x50, i + 1, params[i]); while (!req.complete) cuda_poll(); } #endif } static void __init valkyrie_choose_mode(struct fb_info_valkyrie *p) { p->sense = read_valkyrie_sense(p); printk(KERN_INFO "Monitor sense value = 0x%x\n", p->sense); /* Try to pick a video mode out of NVRAM if we have one. */ #if !defined(CONFIG_MAC) && defined(CONFIG_NVRAM) if (default_vmode == VMODE_NVRAM) { default_vmode = nvram_read_byte(NV_VMODE); if (default_vmode <= 0 || default_vmode > VMODE_MAX || !valkyrie_reg_init[default_vmode - 1]) default_vmode = VMODE_CHOOSE; } #endif if (default_vmode == VMODE_CHOOSE) default_vmode = mac_map_monitor_sense(p->sense); if (!valkyrie_reg_init[default_vmode - 1]) default_vmode = VMODE_640_480_67; #if !defined(CONFIG_MAC) && defined(CONFIG_NVRAM) if (default_cmode == CMODE_NVRAM) default_cmode = nvram_read_byte(NV_CMODE); #endif /* * Reduce the pixel size if we don't have enough VRAM or bandwidth. */ if (default_cmode < CMODE_8 || default_cmode > CMODE_16 || valkyrie_reg_init[default_vmode-1]->pitch[default_cmode] == 0 || valkyrie_vram_reqd(default_vmode, default_cmode) > p->total_vram) default_cmode = CMODE_8; printk(KERN_INFO "using video mode %d and color mode %d.\n", default_vmode, default_cmode); } int __init valkyriefb_init(void) { struct fb_info_valkyrie *p; unsigned long frame_buffer_phys, cmap_regs_phys, flags; int err; char *option = NULL; if (fb_get_options("valkyriefb", &option)) return -ENODEV; valkyriefb_setup(option); #ifdef CONFIG_MAC if (!MACH_IS_MAC) return -ENODEV; if (!(mac_bi_data.id == MAC_MODEL_Q630 /* I'm not sure about this one */ || mac_bi_data.id == MAC_MODEL_P588)) return -ENODEV; /* Hardcoded addresses... welcome to 68k Macintosh country :-) */ frame_buffer_phys = 0xf9000000; cmap_regs_phys = 0x50f24000; flags = IOMAP_NOCACHE_SER; /* IOMAP_WRITETHROUGH?? */ #else /* ppc (!CONFIG_MAC) */ { struct device_node *dp; struct resource r; dp = of_find_node_by_name(NULL, "valkyrie"); if (dp == 0) return 0; if (of_address_to_resource(dp, 0, &r)) { printk(KERN_ERR "can't find address for valkyrie\n"); return 0; } frame_buffer_phys = r.start; cmap_regs_phys = r.start + 0x304000; flags = _PAGE_WRITETHRU; } #endif /* ppc (!CONFIG_MAC) */ p = kzalloc(sizeof(*p), GFP_ATOMIC); if (p == 0) return -ENOMEM; /* Map in frame buffer and registers */ if (!request_mem_region(frame_buffer_phys, 0x100000, "valkyriefb")) { kfree(p); return 0; } p->total_vram = 0x100000; p->frame_buffer_phys = frame_buffer_phys; p->frame_buffer = __ioremap(frame_buffer_phys, p->total_vram, flags); p->cmap_regs_phys = cmap_regs_phys; p->cmap_regs = ioremap(p->cmap_regs_phys, 0x1000); p->valkyrie_regs_phys = cmap_regs_phys+0x6000; p->valkyrie_regs = ioremap(p->valkyrie_regs_phys, 0x1000); err = -ENOMEM; if (p->frame_buffer == NULL || p->cmap_regs == NULL || p->valkyrie_regs == NULL) { printk(KERN_ERR "valkyriefb: couldn't map resources\n"); goto out_free; } valkyrie_choose_mode(p); mac_vmode_to_var(default_vmode, default_cmode, &p->info.var); err = valkyrie_init_info(&p->info, p); if (err < 0) goto out_free; valkyrie_init_fix(&p->info.fix, p); if (valkyriefb_set_par(&p->info)) /* "can't happen" */ printk(KERN_ERR "valkyriefb: can't set default video mode\n"); if ((err = register_framebuffer(&p->info)) != 0) goto out_cmap_free; printk(KERN_INFO "fb%d: valkyrie frame buffer device\n", p->info.node); return 0; out_cmap_free: fb_dealloc_cmap(&p->info.cmap); out_free: if (p->frame_buffer) iounmap(p->frame_buffer); if (p->cmap_regs) iounmap(p->cmap_regs); if (p->valkyrie_regs) iounmap(p->valkyrie_regs); kfree(p); return err; } /* * Get the monitor sense value. */ static int read_valkyrie_sense(struct fb_info_valkyrie *p) { int sense, in; out_8(&p->valkyrie_regs->msense.r, 0); /* release all lines */ __delay(20000); sense = ((in = in_8(&p->valkyrie_regs->msense.r)) & 0x70) << 4; /* drive each sense line low in turn and collect the other 2 */ out_8(&p->valkyrie_regs->msense.r, 4); /* drive A low */ __delay(20000); sense |= ((in = in_8(&p->valkyrie_regs->msense.r)) & 0x30); out_8(&p->valkyrie_regs->msense.r, 2); /* drive B low */ __delay(20000); sense |= ((in = in_8(&p->valkyrie_regs->msense.r)) & 0x40) >> 3; sense |= (in & 0x10) >> 2; out_8(&p->valkyrie_regs->msense.r, 1); /* drive C low */ __delay(20000); sense |= ((in = in_8(&p->valkyrie_regs->msense.r)) & 0x60) >> 5; out_8(&p->valkyrie_regs->msense.r, 7); return sense; } /* * This routine takes a user-supplied var, * and picks the best vmode/cmode from it. */ /* [bkn] I did a major overhaul of this function. * * Much of the old code was "swiped by jonh from atyfb.c". Because * macmodes has mac_var_to_vmode, I felt that it would be better to * rework this function to use that, instead of reinventing the wheel to * add support for vmode 17. This was reinforced by the fact that * the previously swiped atyfb.c code is no longer there. * * So, I swiped and adapted platinum_var_to_par (from platinumfb.c), replacing * most, but not all, of the old code in the process. One side benefit of * swiping the platinumfb code is that we now have more comprehensible error * messages when a vmode/cmode switch fails. (Most of the error messages are * platinumfb.c, but I added two of my own, and I also changed some commas * into colons to make the messages more consistent with other Linux error * messages.) In addition, I think the new code *might* fix some vmode- * switching oddities, but I'm not sure. * * There may be some more opportunities for cleanup in here, but this is a * good start... */ static int valkyrie_var_to_par(struct fb_var_screeninfo *var, struct fb_par_valkyrie *par, const struct fb_info *fb_info) { int vmode, cmode; struct valkyrie_regvals *init; struct fb_info_valkyrie *p = (struct fb_info_valkyrie *) fb_info; if (mac_var_to_vmode(var, &vmode, &cmode) != 0) { printk(KERN_ERR "valkyriefb: can't do %dx%dx%d.\n", var->xres, var->yres, var->bits_per_pixel); return -EINVAL; } /* Check if we know about the wanted video mode */ if (vmode < 1 || vmode > VMODE_MAX || !valkyrie_reg_init[vmode-1]) { printk(KERN_ERR "valkyriefb: vmode %d not valid.\n", vmode); return -EINVAL; } if (cmode != CMODE_8 && cmode != CMODE_16) { printk(KERN_ERR "valkyriefb: cmode %d not valid.\n", cmode); return -EINVAL; } if (var->xres_virtual > var->xres || var->yres_virtual > var->yres || var->xoffset != 0 || var->yoffset != 0) { return -EINVAL; } init = valkyrie_reg_init[vmode-1]; if (init->pitch[cmode] == 0) { printk(KERN_ERR "valkyriefb: vmode %d does not support " "cmode %d.\n", vmode, cmode); return -EINVAL; } if (valkyrie_vram_reqd(vmode, cmode) > p->total_vram) { printk(KERN_ERR "valkyriefb: not enough ram for vmode %d, " "cmode %d.\n", vmode, cmode); return -EINVAL; } par->vmode = vmode; par->cmode = cmode; par->init = init; par->xres = var->xres; par->yres = var->yres; par->vxres = par->xres; par->vyres = par->yres; return 0; } static void valkyrie_init_fix(struct fb_fix_screeninfo *fix, struct fb_info_valkyrie *p) { memset(fix, 0, sizeof(*fix)); strcpy(fix->id, "valkyrie"); fix->mmio_start = p->valkyrie_regs_phys; fix->mmio_len = sizeof(struct valkyrie_regs); fix->type = FB_TYPE_PACKED_PIXELS; fix->smem_start = p->frame_buffer_phys + 0x1000; fix->smem_len = p->total_vram; fix->type_aux = 0; fix->ywrapstep = 0; fix->ypanstep = 0; fix->xpanstep = 0; } /* Fix must already be inited above */ static void valkyrie_par_to_fix(struct fb_par_valkyrie *par, struct fb_fix_screeninfo *fix) { fix->smem_len = valkyrie_vram_reqd(par->vmode, par->cmode); fix->visual = (par->cmode == CMODE_8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR; fix->line_length = par->vxres << par->cmode; /* ywrapstep, xpanstep, ypanstep */ } static int __init valkyrie_init_info(struct fb_info *info, struct fb_info_valkyrie *p) { info->fbops = &valkyriefb_ops; info->screen_base = p->frame_buffer + 0x1000; info->flags = FBINFO_DEFAULT; info->pseudo_palette = p->pseudo_palette; info->par = &p->par; return fb_alloc_cmap(&info->cmap, 256, 0); } /* * Parse user specified options (`video=valkyriefb:') */ int __init valkyriefb_setup(char *options) { char *this_opt; if (!options || !*options) return 0; while ((this_opt = strsep(&options, ",")) != NULL) { if (!strncmp(this_opt, "vmode:", 6)) { int vmode = simple_strtoul(this_opt+6, NULL, 0); if (vmode > 0 && vmode <= VMODE_MAX) default_vmode = vmode; } else if (!strncmp(this_opt, "cmode:", 6)) { int depth = simple_strtoul(this_opt+6, NULL, 0); switch (depth) { case 8: default_cmode = CMODE_8; break; case 15: case 16: default_cmode = CMODE_16; break; } } } return 0; } module_init(valkyriefb_init); MODULE_LICENSE("GPL");
gpl-2.0
art1p/android_kernel_lge_omap4-common
net/tipc/handler.c
8146
4104
/* * net/tipc/handler.c: TIPC signal handling * * Copyright (c) 2000-2006, Ericsson AB * Copyright (c) 2005, Wind River Systems * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the names of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "core.h" struct queue_item { struct list_head next_signal; void (*handler) (unsigned long); unsigned long data; }; static struct kmem_cache *tipc_queue_item_cache; static struct list_head signal_queue_head; static DEFINE_SPINLOCK(qitem_lock); static int handler_enabled; static void process_signal_queue(unsigned long dummy); static DECLARE_TASKLET_DISABLED(tipc_tasklet, process_signal_queue, 0); unsigned int tipc_k_signal(Handler routine, unsigned long argument) { struct queue_item *item; if (!handler_enabled) { err("Signal request ignored by handler\n"); return -ENOPROTOOPT; } spin_lock_bh(&qitem_lock); item = kmem_cache_alloc(tipc_queue_item_cache, GFP_ATOMIC); if (!item) { err("Signal queue out of memory\n"); spin_unlock_bh(&qitem_lock); return -ENOMEM; } item->handler = routine; item->data = argument; list_add_tail(&item->next_signal, &signal_queue_head); spin_unlock_bh(&qitem_lock); tasklet_schedule(&tipc_tasklet); return 0; } static void process_signal_queue(unsigned long dummy) { struct queue_item *__volatile__ item; struct list_head *l, *n; spin_lock_bh(&qitem_lock); list_for_each_safe(l, n, &signal_queue_head) { item = list_entry(l, struct queue_item, next_signal); list_del(&item->next_signal); spin_unlock_bh(&qitem_lock); item->handler(item->data); spin_lock_bh(&qitem_lock); kmem_cache_free(tipc_queue_item_cache, item); } spin_unlock_bh(&qitem_lock); } int tipc_handler_start(void) { tipc_queue_item_cache = kmem_cache_create("tipc_queue_items", sizeof(struct queue_item), 0, SLAB_HWCACHE_ALIGN, NULL); if (!tipc_queue_item_cache) return -ENOMEM; INIT_LIST_HEAD(&signal_queue_head); tasklet_enable(&tipc_tasklet); handler_enabled = 1; return 0; } void tipc_handler_stop(void) { struct list_head *l, *n; struct queue_item *item; if (!handler_enabled) return; handler_enabled = 0; tasklet_disable(&tipc_tasklet); tasklet_kill(&tipc_tasklet); spin_lock_bh(&qitem_lock); list_for_each_safe(l, n, &signal_queue_head) { item = list_entry(l, struct queue_item, next_signal); list_del(&item->next_signal); kmem_cache_free(tipc_queue_item_cache, item); } spin_unlock_bh(&qitem_lock); kmem_cache_destroy(tipc_queue_item_cache); }
gpl-2.0
ngxson/android_kernel_sony_msm8x27
drivers/ide/ide-atapi.c
8914
18109
/* * ATAPI support. */ #include <linux/kernel.h> #include <linux/cdrom.h> #include <linux/delay.h> #include <linux/export.h> #include <linux/ide.h> #include <linux/scatterlist.h> #include <linux/gfp.h> #include <scsi/scsi.h> #define DRV_NAME "ide-atapi" #define PFX DRV_NAME ": " #ifdef DEBUG #define debug_log(fmt, args...) \ printk(KERN_INFO "ide: " fmt, ## args) #else #define debug_log(fmt, args...) do {} while (0) #endif #define ATAPI_MIN_CDB_BYTES 12 static inline int dev_is_idecd(ide_drive_t *drive) { return drive->media == ide_cdrom || drive->media == ide_optical; } /* * Check whether we can support a device, * based on the ATAPI IDENTIFY command results. */ int ide_check_atapi_device(ide_drive_t *drive, const char *s) { u16 *id = drive->id; u8 gcw[2], protocol, device_type, removable, drq_type, packet_size; *((u16 *)&gcw) = id[ATA_ID_CONFIG]; protocol = (gcw[1] & 0xC0) >> 6; device_type = gcw[1] & 0x1F; removable = (gcw[0] & 0x80) >> 7; drq_type = (gcw[0] & 0x60) >> 5; packet_size = gcw[0] & 0x03; #ifdef CONFIG_PPC /* kludge for Apple PowerBook internal zip */ if (drive->media == ide_floppy && device_type == 5 && !strstr((char *)&id[ATA_ID_PROD], "CD-ROM") && strstr((char *)&id[ATA_ID_PROD], "ZIP")) device_type = 0; #endif if (protocol != 2) printk(KERN_ERR "%s: %s: protocol (0x%02x) is not ATAPI\n", s, drive->name, protocol); else if ((drive->media == ide_floppy && device_type != 0) || (drive->media == ide_tape && device_type != 1)) printk(KERN_ERR "%s: %s: invalid device type (0x%02x)\n", s, drive->name, device_type); else if (removable == 0) printk(KERN_ERR "%s: %s: the removable flag is not set\n", s, drive->name); else if (drive->media == ide_floppy && drq_type == 3) printk(KERN_ERR "%s: %s: sorry, DRQ type (0x%02x) not " "supported\n", s, drive->name, drq_type); else if (packet_size != 0) printk(KERN_ERR "%s: %s: packet size (0x%02x) is not 12 " "bytes\n", s, drive->name, packet_size); else return 1; return 0; } EXPORT_SYMBOL_GPL(ide_check_atapi_device); void ide_init_pc(struct ide_atapi_pc *pc) { memset(pc, 0, sizeof(*pc)); } EXPORT_SYMBOL_GPL(ide_init_pc); /* * Add a special packet command request to the tail of the request queue, * and wait for it to be serviced. */ int ide_queue_pc_tail(ide_drive_t *drive, struct gendisk *disk, struct ide_atapi_pc *pc, void *buf, unsigned int bufflen) { struct request *rq; int error; rq = blk_get_request(drive->queue, READ, __GFP_WAIT); rq->cmd_type = REQ_TYPE_SPECIAL; rq->special = (char *)pc; if (buf && bufflen) { error = blk_rq_map_kern(drive->queue, rq, buf, bufflen, GFP_NOIO); if (error) goto put_req; } memcpy(rq->cmd, pc->c, 12); if (drive->media == ide_tape) rq->cmd[13] = REQ_IDETAPE_PC1; error = blk_execute_rq(drive->queue, disk, rq, 0); put_req: blk_put_request(rq); return error; } EXPORT_SYMBOL_GPL(ide_queue_pc_tail); int ide_do_test_unit_ready(ide_drive_t *drive, struct gendisk *disk) { struct ide_atapi_pc pc; ide_init_pc(&pc); pc.c[0] = TEST_UNIT_READY; return ide_queue_pc_tail(drive, disk, &pc, NULL, 0); } EXPORT_SYMBOL_GPL(ide_do_test_unit_ready); int ide_do_start_stop(ide_drive_t *drive, struct gendisk *disk, int start) { struct ide_atapi_pc pc; ide_init_pc(&pc); pc.c[0] = START_STOP; pc.c[4] = start; if (drive->media == ide_tape) pc.flags |= PC_FLAG_WAIT_FOR_DSC; return ide_queue_pc_tail(drive, disk, &pc, NULL, 0); } EXPORT_SYMBOL_GPL(ide_do_start_stop); int ide_set_media_lock(ide_drive_t *drive, struct gendisk *disk, int on) { struct ide_atapi_pc pc; if ((drive->dev_flags & IDE_DFLAG_DOORLOCKING) == 0) return 0; ide_init_pc(&pc); pc.c[0] = ALLOW_MEDIUM_REMOVAL; pc.c[4] = on; return ide_queue_pc_tail(drive, disk, &pc, NULL, 0); } EXPORT_SYMBOL_GPL(ide_set_media_lock); void ide_create_request_sense_cmd(ide_drive_t *drive, struct ide_atapi_pc *pc) { ide_init_pc(pc); pc->c[0] = REQUEST_SENSE; if (drive->media == ide_floppy) { pc->c[4] = 255; pc->req_xfer = 18; } else { pc->c[4] = 20; pc->req_xfer = 20; } } EXPORT_SYMBOL_GPL(ide_create_request_sense_cmd); void ide_prep_sense(ide_drive_t *drive, struct request *rq) { struct request_sense *sense = &drive->sense_data; struct request *sense_rq = &drive->sense_rq; unsigned int cmd_len, sense_len; int err; switch (drive->media) { case ide_floppy: cmd_len = 255; sense_len = 18; break; case ide_tape: cmd_len = 20; sense_len = 20; break; default: cmd_len = 18; sense_len = 18; } BUG_ON(sense_len > sizeof(*sense)); if (rq->cmd_type == REQ_TYPE_SENSE || drive->sense_rq_armed) return; memset(sense, 0, sizeof(*sense)); blk_rq_init(rq->q, sense_rq); err = blk_rq_map_kern(drive->queue, sense_rq, sense, sense_len, GFP_NOIO); if (unlikely(err)) { if (printk_ratelimit()) printk(KERN_WARNING PFX "%s: failed to map sense " "buffer\n", drive->name); return; } sense_rq->rq_disk = rq->rq_disk; sense_rq->cmd[0] = GPCMD_REQUEST_SENSE; sense_rq->cmd[4] = cmd_len; sense_rq->cmd_type = REQ_TYPE_SENSE; sense_rq->cmd_flags |= REQ_PREEMPT; if (drive->media == ide_tape) sense_rq->cmd[13] = REQ_IDETAPE_PC1; drive->sense_rq_armed = true; } EXPORT_SYMBOL_GPL(ide_prep_sense); int ide_queue_sense_rq(ide_drive_t *drive, void *special) { /* deferred failure from ide_prep_sense() */ if (!drive->sense_rq_armed) { printk(KERN_WARNING PFX "%s: error queuing a sense request\n", drive->name); return -ENOMEM; } drive->sense_rq.special = special; drive->sense_rq_armed = false; drive->hwif->rq = NULL; elv_add_request(drive->queue, &drive->sense_rq, ELEVATOR_INSERT_FRONT); return 0; } EXPORT_SYMBOL_GPL(ide_queue_sense_rq); /* * Called when an error was detected during the last packet command. * We queue a request sense packet command at the head of the request * queue. */ void ide_retry_pc(ide_drive_t *drive) { struct request *failed_rq = drive->hwif->rq; struct request *sense_rq = &drive->sense_rq; struct ide_atapi_pc *pc = &drive->request_sense_pc; (void)ide_read_error(drive); /* init pc from sense_rq */ ide_init_pc(pc); memcpy(pc->c, sense_rq->cmd, 12); if (drive->media == ide_tape) drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC; /* * Push back the failed request and put request sense on top * of it. The failed command will be retried after sense data * is acquired. */ drive->hwif->rq = NULL; ide_requeue_and_plug(drive, failed_rq); if (ide_queue_sense_rq(drive, pc)) { blk_start_request(failed_rq); ide_complete_rq(drive, -EIO, blk_rq_bytes(failed_rq)); } } EXPORT_SYMBOL_GPL(ide_retry_pc); int ide_cd_expiry(ide_drive_t *drive) { struct request *rq = drive->hwif->rq; unsigned long wait = 0; debug_log("%s: rq->cmd[0]: 0x%x\n", __func__, rq->cmd[0]); /* * Some commands are *slow* and normally take a long time to complete. * Usually we can use the ATAPI "disconnect" to bypass this, but not all * commands/drives support that. Let ide_timer_expiry keep polling us * for these. */ switch (rq->cmd[0]) { case GPCMD_BLANK: case GPCMD_FORMAT_UNIT: case GPCMD_RESERVE_RZONE_TRACK: case GPCMD_CLOSE_TRACK: case GPCMD_FLUSH_CACHE: wait = ATAPI_WAIT_PC; break; default: if (!(rq->cmd_flags & REQ_QUIET)) printk(KERN_INFO PFX "cmd 0x%x timed out\n", rq->cmd[0]); wait = 0; break; } return wait; } EXPORT_SYMBOL_GPL(ide_cd_expiry); int ide_cd_get_xferlen(struct request *rq) { switch (rq->cmd_type) { case REQ_TYPE_FS: return 32768; case REQ_TYPE_SENSE: case REQ_TYPE_BLOCK_PC: case REQ_TYPE_ATA_PC: return blk_rq_bytes(rq); default: return 0; } } EXPORT_SYMBOL_GPL(ide_cd_get_xferlen); void ide_read_bcount_and_ireason(ide_drive_t *drive, u16 *bcount, u8 *ireason) { struct ide_taskfile tf; drive->hwif->tp_ops->tf_read(drive, &tf, IDE_VALID_NSECT | IDE_VALID_LBAM | IDE_VALID_LBAH); *bcount = (tf.lbah << 8) | tf.lbam; *ireason = tf.nsect & 3; } EXPORT_SYMBOL_GPL(ide_read_bcount_and_ireason); /* * Check the contents of the interrupt reason register and attempt to recover if * there are problems. * * Returns: * - 0 if everything's ok * - 1 if the request has to be terminated. */ int ide_check_ireason(ide_drive_t *drive, struct request *rq, int len, int ireason, int rw) { ide_hwif_t *hwif = drive->hwif; debug_log("ireason: 0x%x, rw: 0x%x\n", ireason, rw); if (ireason == (!rw << 1)) return 0; else if (ireason == (rw << 1)) { printk(KERN_ERR PFX "%s: %s: wrong transfer direction!\n", drive->name, __func__); if (dev_is_idecd(drive)) ide_pad_transfer(drive, rw, len); } else if (!rw && ireason == ATAPI_COD) { if (dev_is_idecd(drive)) { /* * Some drives (ASUS) seem to tell us that status info * is available. Just get it and ignore. */ (void)hwif->tp_ops->read_status(hwif); return 0; } } else { if (ireason & ATAPI_COD) printk(KERN_ERR PFX "%s: CoD != 0 in %s\n", drive->name, __func__); /* drive wants a command packet, or invalid ireason... */ printk(KERN_ERR PFX "%s: %s: bad interrupt reason 0x%02x\n", drive->name, __func__, ireason); } if (dev_is_idecd(drive) && rq->cmd_type == REQ_TYPE_ATA_PC) rq->cmd_flags |= REQ_FAILED; return 1; } EXPORT_SYMBOL_GPL(ide_check_ireason); /* * This is the usual interrupt handler which will be called during a packet * command. We will transfer some of the data (as requested by the drive) * and will re-point interrupt handler to us. */ static ide_startstop_t ide_pc_intr(ide_drive_t *drive) { struct ide_atapi_pc *pc = drive->pc; ide_hwif_t *hwif = drive->hwif; struct ide_cmd *cmd = &hwif->cmd; struct request *rq = hwif->rq; const struct ide_tp_ops *tp_ops = hwif->tp_ops; unsigned int timeout, done; u16 bcount; u8 stat, ireason, dsc = 0; u8 write = !!(pc->flags & PC_FLAG_WRITING); debug_log("Enter %s - interrupt handler\n", __func__); timeout = (drive->media == ide_floppy) ? WAIT_FLOPPY_CMD : WAIT_TAPE_CMD; /* Clear the interrupt */ stat = tp_ops->read_status(hwif); if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) { int rc; drive->waiting_for_dma = 0; rc = hwif->dma_ops->dma_end(drive); ide_dma_unmap_sg(drive, cmd); if (rc || (drive->media == ide_tape && (stat & ATA_ERR))) { if (drive->media == ide_floppy) printk(KERN_ERR PFX "%s: DMA %s error\n", drive->name, rq_data_dir(pc->rq) ? "write" : "read"); pc->flags |= PC_FLAG_DMA_ERROR; } else rq->resid_len = 0; debug_log("%s: DMA finished\n", drive->name); } /* No more interrupts */ if ((stat & ATA_DRQ) == 0) { int uptodate, error; debug_log("Packet command completed, %d bytes transferred\n", blk_rq_bytes(rq)); pc->flags &= ~PC_FLAG_DMA_IN_PROGRESS; local_irq_enable_in_hardirq(); if (drive->media == ide_tape && (stat & ATA_ERR) && rq->cmd[0] == REQUEST_SENSE) stat &= ~ATA_ERR; if ((stat & ATA_ERR) || (pc->flags & PC_FLAG_DMA_ERROR)) { /* Error detected */ debug_log("%s: I/O error\n", drive->name); if (drive->media != ide_tape) pc->rq->errors++; if (rq->cmd[0] == REQUEST_SENSE) { printk(KERN_ERR PFX "%s: I/O error in request " "sense command\n", drive->name); return ide_do_reset(drive); } debug_log("[cmd %x]: check condition\n", rq->cmd[0]); /* Retry operation */ ide_retry_pc(drive); /* queued, but not started */ return ide_stopped; } pc->error = 0; if ((pc->flags & PC_FLAG_WAIT_FOR_DSC) && (stat & ATA_DSC) == 0) dsc = 1; /* * ->pc_callback() might change rq->data_len for * residual count, cache total length. */ done = blk_rq_bytes(rq); /* Command finished - Call the callback function */ uptodate = drive->pc_callback(drive, dsc); if (uptodate == 0) drive->failed_pc = NULL; if (rq->cmd_type == REQ_TYPE_SPECIAL) { rq->errors = 0; error = 0; } else { if (rq->cmd_type != REQ_TYPE_FS && uptodate <= 0) { if (rq->errors == 0) rq->errors = -EIO; } error = uptodate ? 0 : -EIO; } ide_complete_rq(drive, error, blk_rq_bytes(rq)); return ide_stopped; } if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) { pc->flags &= ~PC_FLAG_DMA_IN_PROGRESS; printk(KERN_ERR PFX "%s: The device wants to issue more " "interrupts in DMA mode\n", drive->name); ide_dma_off(drive); return ide_do_reset(drive); } /* Get the number of bytes to transfer on this interrupt. */ ide_read_bcount_and_ireason(drive, &bcount, &ireason); if (ide_check_ireason(drive, rq, bcount, ireason, write)) return ide_do_reset(drive); done = min_t(unsigned int, bcount, cmd->nleft); ide_pio_bytes(drive, cmd, write, done); /* Update transferred byte count */ rq->resid_len -= done; bcount -= done; if (bcount) ide_pad_transfer(drive, write, bcount); debug_log("[cmd %x] transferred %d bytes, padded %d bytes, resid: %u\n", rq->cmd[0], done, bcount, rq->resid_len); /* And set the interrupt handler again */ ide_set_handler(drive, ide_pc_intr, timeout); return ide_started; } static void ide_init_packet_cmd(struct ide_cmd *cmd, u8 valid_tf, u16 bcount, u8 dma) { cmd->protocol = dma ? ATAPI_PROT_DMA : ATAPI_PROT_PIO; cmd->valid.out.tf = IDE_VALID_LBAH | IDE_VALID_LBAM | IDE_VALID_FEATURE | valid_tf; cmd->tf.command = ATA_CMD_PACKET; cmd->tf.feature = dma; /* Use PIO/DMA */ cmd->tf.lbam = bcount & 0xff; cmd->tf.lbah = (bcount >> 8) & 0xff; } static u8 ide_read_ireason(ide_drive_t *drive) { struct ide_taskfile tf; drive->hwif->tp_ops->tf_read(drive, &tf, IDE_VALID_NSECT); return tf.nsect & 3; } static u8 ide_wait_ireason(ide_drive_t *drive, u8 ireason) { int retries = 100; while (retries-- && ((ireason & ATAPI_COD) == 0 || (ireason & ATAPI_IO))) { printk(KERN_ERR PFX "%s: (IO,CoD != (0,1) while issuing " "a packet command, retrying\n", drive->name); udelay(100); ireason = ide_read_ireason(drive); if (retries == 0) { printk(KERN_ERR PFX "%s: (IO,CoD != (0,1) while issuing" " a packet command, ignoring\n", drive->name); ireason |= ATAPI_COD; ireason &= ~ATAPI_IO; } } return ireason; } static int ide_delayed_transfer_pc(ide_drive_t *drive) { /* Send the actual packet */ drive->hwif->tp_ops->output_data(drive, NULL, drive->pc->c, 12); /* Timeout for the packet command */ return WAIT_FLOPPY_CMD; } static ide_startstop_t ide_transfer_pc(ide_drive_t *drive) { struct ide_atapi_pc *uninitialized_var(pc); ide_hwif_t *hwif = drive->hwif; struct request *rq = hwif->rq; ide_expiry_t *expiry; unsigned int timeout; int cmd_len; ide_startstop_t startstop; u8 ireason; if (ide_wait_stat(&startstop, drive, ATA_DRQ, ATA_BUSY, WAIT_READY)) { printk(KERN_ERR PFX "%s: Strange, packet command initiated yet " "DRQ isn't asserted\n", drive->name); return startstop; } if (drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT) { if (drive->dma) drive->waiting_for_dma = 1; } if (dev_is_idecd(drive)) { /* ATAPI commands get padded out to 12 bytes minimum */ cmd_len = COMMAND_SIZE(rq->cmd[0]); if (cmd_len < ATAPI_MIN_CDB_BYTES) cmd_len = ATAPI_MIN_CDB_BYTES; timeout = rq->timeout; expiry = ide_cd_expiry; } else { pc = drive->pc; cmd_len = ATAPI_MIN_CDB_BYTES; /* * If necessary schedule the packet transfer to occur 'timeout' * milliseconds later in ide_delayed_transfer_pc() after the * device says it's ready for a packet. */ if (drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) { timeout = drive->pc_delay; expiry = &ide_delayed_transfer_pc; } else { timeout = (drive->media == ide_floppy) ? WAIT_FLOPPY_CMD : WAIT_TAPE_CMD; expiry = NULL; } ireason = ide_read_ireason(drive); if (drive->media == ide_tape) ireason = ide_wait_ireason(drive, ireason); if ((ireason & ATAPI_COD) == 0 || (ireason & ATAPI_IO)) { printk(KERN_ERR PFX "%s: (IO,CoD) != (0,1) while " "issuing a packet command\n", drive->name); return ide_do_reset(drive); } } hwif->expiry = expiry; /* Set the interrupt routine */ ide_set_handler(drive, (dev_is_idecd(drive) ? drive->irq_handler : ide_pc_intr), timeout); /* Send the actual packet */ if ((drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) == 0) hwif->tp_ops->output_data(drive, NULL, rq->cmd, cmd_len); /* Begin DMA, if necessary */ if (dev_is_idecd(drive)) { if (drive->dma) hwif->dma_ops->dma_start(drive); } else { if (pc->flags & PC_FLAG_DMA_OK) { pc->flags |= PC_FLAG_DMA_IN_PROGRESS; hwif->dma_ops->dma_start(drive); } } return ide_started; } ide_startstop_t ide_issue_pc(ide_drive_t *drive, struct ide_cmd *cmd) { struct ide_atapi_pc *pc; ide_hwif_t *hwif = drive->hwif; ide_expiry_t *expiry = NULL; struct request *rq = hwif->rq; unsigned int timeout, bytes; u16 bcount; u8 valid_tf; u8 drq_int = !!(drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT); if (dev_is_idecd(drive)) { valid_tf = IDE_VALID_NSECT | IDE_VALID_LBAL; bcount = ide_cd_get_xferlen(rq); expiry = ide_cd_expiry; timeout = ATAPI_WAIT_PC; if (drive->dma) drive->dma = !ide_dma_prepare(drive, cmd); } else { pc = drive->pc; valid_tf = IDE_VALID_DEVICE; bytes = blk_rq_bytes(rq); bcount = ((drive->media == ide_tape) ? bytes : min_t(unsigned int, bytes, 63 * 1024)); /* We haven't transferred any data yet */ rq->resid_len = bcount; if (pc->flags & PC_FLAG_DMA_ERROR) { pc->flags &= ~PC_FLAG_DMA_ERROR; ide_dma_off(drive); } if (pc->flags & PC_FLAG_DMA_OK) drive->dma = !ide_dma_prepare(drive, cmd); if (!drive->dma) pc->flags &= ~PC_FLAG_DMA_OK; timeout = (drive->media == ide_floppy) ? WAIT_FLOPPY_CMD : WAIT_TAPE_CMD; } ide_init_packet_cmd(cmd, valid_tf, bcount, drive->dma); (void)do_rw_taskfile(drive, cmd); if (drq_int) { if (drive->dma) drive->waiting_for_dma = 0; hwif->expiry = expiry; } ide_execute_command(drive, cmd, ide_transfer_pc, timeout); return drq_int ? ide_started : ide_transfer_pc(drive); } EXPORT_SYMBOL_GPL(ide_issue_pc);
gpl-2.0
rmcc/commtiva-kernel-z71
drivers/char/agp/ali-agp.c
9170
10370
/* * ALi AGPGART routines. */ #include <linux/types.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/agp_backend.h> #include <asm/page.h> /* PAGE_SIZE */ #include "agp.h" #define ALI_AGPCTRL 0xb8 #define ALI_ATTBASE 0xbc #define ALI_TLBCTRL 0xc0 #define ALI_TAGCTRL 0xc4 #define ALI_CACHE_FLUSH_CTRL 0xD0 #define ALI_CACHE_FLUSH_ADDR_MASK 0xFFFFF000 #define ALI_CACHE_FLUSH_EN 0x100 static int ali_fetch_size(void) { int i; u32 temp; struct aper_size_info_32 *values; pci_read_config_dword(agp_bridge->dev, ALI_ATTBASE, &temp); temp &= ~(0xfffffff0); values = A_SIZE_32(agp_bridge->driver->aperture_sizes); for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { if (temp == values[i].size_value) { agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i); agp_bridge->aperture_size_idx = i; return values[i].size; } } return 0; } static void ali_tlbflush(struct agp_memory *mem) { u32 temp; pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp); temp &= 0xfffffff0; temp |= (1<<0 | 1<<1); pci_write_config_dword(agp_bridge->dev, ALI_TAGCTRL, temp); } static void ali_cleanup(void) { struct aper_size_info_32 *previous_size; u32 temp; previous_size = A_SIZE_32(agp_bridge->previous_size); pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp); // clear tag pci_write_config_dword(agp_bridge->dev, ALI_TAGCTRL, ((temp & 0xffffff00) | 0x00000001|0x00000002)); pci_read_config_dword(agp_bridge->dev, ALI_ATTBASE, &temp); pci_write_config_dword(agp_bridge->dev, ALI_ATTBASE, ((temp & 0x00000ff0) | previous_size->size_value)); } static int ali_configure(void) { u32 temp; struct aper_size_info_32 *current_size; current_size = A_SIZE_32(agp_bridge->current_size); /* aperture size and gatt addr */ pci_read_config_dword(agp_bridge->dev, ALI_ATTBASE, &temp); temp = (((temp & 0x00000ff0) | (agp_bridge->gatt_bus_addr & 0xfffff000)) | (current_size->size_value & 0xf)); pci_write_config_dword(agp_bridge->dev, ALI_ATTBASE, temp); /* tlb control */ pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp); pci_write_config_dword(agp_bridge->dev, ALI_TLBCTRL, ((temp & 0xffffff00) | 0x00000010)); /* address to map to */ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); #if 0 if (agp_bridge->type == ALI_M1541) { u32 nlvm_addr = 0; switch (current_size->size_value) { case 0: break; case 1: nlvm_addr = 0x100000;break; case 2: nlvm_addr = 0x200000;break; case 3: nlvm_addr = 0x400000;break; case 4: nlvm_addr = 0x800000;break; case 6: nlvm_addr = 0x1000000;break; case 7: nlvm_addr = 0x2000000;break; case 8: nlvm_addr = 0x4000000;break; case 9: nlvm_addr = 0x8000000;break; case 10: nlvm_addr = 0x10000000;break; default: break; } nlvm_addr--; nlvm_addr&=0xfff00000; nlvm_addr+= agp_bridge->gart_bus_addr; nlvm_addr|=(agp_bridge->gart_bus_addr>>12); dev_info(&agp_bridge->dev->dev, "nlvm top &base = %8x\n", nlvm_addr); } #endif pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp); temp &= 0xffffff7f; //enable TLB pci_write_config_dword(agp_bridge->dev, ALI_TLBCTRL, temp); return 0; } static void m1541_cache_flush(void) { int i, page_count; u32 temp; global_cache_flush(); page_count = 1 << A_SIZE_32(agp_bridge->current_size)->page_order; for (i = 0; i < PAGE_SIZE * page_count; i += PAGE_SIZE) { pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp); pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, (((temp & ALI_CACHE_FLUSH_ADDR_MASK) | (agp_bridge->gatt_bus_addr + i)) | ALI_CACHE_FLUSH_EN)); } } static struct page *m1541_alloc_page(struct agp_bridge_data *bridge) { struct page *page = agp_generic_alloc_page(agp_bridge); u32 temp; if (!page) return NULL; pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp); pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, (((temp & ALI_CACHE_FLUSH_ADDR_MASK) | page_to_phys(page)) | ALI_CACHE_FLUSH_EN )); return page; } static void ali_destroy_page(struct page *page, int flags) { if (page) { if (flags & AGP_PAGE_DESTROY_UNMAP) { global_cache_flush(); /* is this really needed? --hch */ agp_generic_destroy_page(page, flags); } else agp_generic_destroy_page(page, flags); } } static void m1541_destroy_page(struct page *page, int flags) { u32 temp; if (page == NULL) return; if (flags & AGP_PAGE_DESTROY_UNMAP) { global_cache_flush(); pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp); pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, (((temp & ALI_CACHE_FLUSH_ADDR_MASK) | page_to_phys(page)) | ALI_CACHE_FLUSH_EN)); } agp_generic_destroy_page(page, flags); } /* Setup function */ static const struct aper_size_info_32 ali_generic_sizes[7] = { {256, 65536, 6, 10}, {128, 32768, 5, 9}, {64, 16384, 4, 8}, {32, 8192, 3, 7}, {16, 4096, 2, 6}, {8, 2048, 1, 4}, {4, 1024, 0, 3} }; static const struct agp_bridge_driver ali_generic_bridge = { .owner = THIS_MODULE, .aperture_sizes = ali_generic_sizes, .size_type = U32_APER_SIZE, .num_aperture_sizes = 7, .needs_scratch_page = true, .configure = ali_configure, .fetch_size = ali_fetch_size, .cleanup = ali_cleanup, .tlb_flush = ali_tlbflush, .mask_memory = agp_generic_mask_memory, .masks = NULL, .agp_enable = agp_generic_enable, .cache_flush = global_cache_flush, .create_gatt_table = agp_generic_create_gatt_table, .free_gatt_table = agp_generic_free_gatt_table, .insert_memory = agp_generic_insert_memory, .remove_memory = agp_generic_remove_memory, .alloc_by_type = agp_generic_alloc_by_type, .free_by_type = agp_generic_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_destroy_page = ali_destroy_page, .agp_type_to_mask_type = agp_generic_type_to_mask_type, }; static const struct agp_bridge_driver ali_m1541_bridge = { .owner = THIS_MODULE, .aperture_sizes = ali_generic_sizes, .size_type = U32_APER_SIZE, .num_aperture_sizes = 7, .configure = ali_configure, .fetch_size = ali_fetch_size, .cleanup = ali_cleanup, .tlb_flush = ali_tlbflush, .mask_memory = agp_generic_mask_memory, .masks = NULL, .agp_enable = agp_generic_enable, .cache_flush = m1541_cache_flush, .create_gatt_table = agp_generic_create_gatt_table, .free_gatt_table = agp_generic_free_gatt_table, .insert_memory = agp_generic_insert_memory, .remove_memory = agp_generic_remove_memory, .alloc_by_type = agp_generic_alloc_by_type, .free_by_type = agp_generic_free_by_type, .agp_alloc_page = m1541_alloc_page, .agp_destroy_page = m1541_destroy_page, .agp_type_to_mask_type = agp_generic_type_to_mask_type, }; static struct agp_device_ids ali_agp_device_ids[] __devinitdata = { { .device_id = PCI_DEVICE_ID_AL_M1541, .chipset_name = "M1541", }, { .device_id = PCI_DEVICE_ID_AL_M1621, .chipset_name = "M1621", }, { .device_id = PCI_DEVICE_ID_AL_M1631, .chipset_name = "M1631", }, { .device_id = PCI_DEVICE_ID_AL_M1632, .chipset_name = "M1632", }, { .device_id = PCI_DEVICE_ID_AL_M1641, .chipset_name = "M1641", }, { .device_id = PCI_DEVICE_ID_AL_M1644, .chipset_name = "M1644", }, { .device_id = PCI_DEVICE_ID_AL_M1647, .chipset_name = "M1647", }, { .device_id = PCI_DEVICE_ID_AL_M1651, .chipset_name = "M1651", }, { .device_id = PCI_DEVICE_ID_AL_M1671, .chipset_name = "M1671", }, { .device_id = PCI_DEVICE_ID_AL_M1681, .chipset_name = "M1681", }, { .device_id = PCI_DEVICE_ID_AL_M1683, .chipset_name = "M1683", }, { }, /* dummy final entry, always present */ }; static int __devinit agp_ali_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct agp_device_ids *devs = ali_agp_device_ids; struct agp_bridge_data *bridge; u8 hidden_1621_id, cap_ptr; int j; cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); if (!cap_ptr) return -ENODEV; /* probe for known chipsets */ for (j = 0; devs[j].chipset_name; j++) { if (pdev->device == devs[j].device_id) goto found; } dev_err(&pdev->dev, "unsupported ALi chipset [%04x/%04x])\n", pdev->vendor, pdev->device); return -ENODEV; found: bridge = agp_alloc_bridge(); if (!bridge) return -ENOMEM; bridge->dev = pdev; bridge->capndx = cap_ptr; switch (pdev->device) { case PCI_DEVICE_ID_AL_M1541: bridge->driver = &ali_m1541_bridge; break; case PCI_DEVICE_ID_AL_M1621: pci_read_config_byte(pdev, 0xFB, &hidden_1621_id); switch (hidden_1621_id) { case 0x31: devs[j].chipset_name = "M1631"; break; case 0x32: devs[j].chipset_name = "M1632"; break; case 0x41: devs[j].chipset_name = "M1641"; break; case 0x43: devs[j].chipset_name = "M1621"; break; case 0x47: devs[j].chipset_name = "M1647"; break; case 0x51: devs[j].chipset_name = "M1651"; break; default: break; } /*FALLTHROUGH*/ default: bridge->driver = &ali_generic_bridge; } dev_info(&pdev->dev, "ALi %s chipset\n", devs[j].chipset_name); /* Fill in the mode register */ pci_read_config_dword(pdev, bridge->capndx+PCI_AGP_STATUS, &bridge->mode); pci_set_drvdata(pdev, bridge); return agp_add_bridge(bridge); } static void __devexit agp_ali_remove(struct pci_dev *pdev) { struct agp_bridge_data *bridge = pci_get_drvdata(pdev); agp_remove_bridge(bridge); agp_put_bridge(bridge); } static struct pci_device_id agp_ali_pci_table[] = { { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_AL, .device = PCI_ANY_ID, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { } }; MODULE_DEVICE_TABLE(pci, agp_ali_pci_table); static struct pci_driver agp_ali_pci_driver = { .name = "agpgart-ali", .id_table = agp_ali_pci_table, .probe = agp_ali_probe, .remove = agp_ali_remove, }; static int __init agp_ali_init(void) { if (agp_off) return -EINVAL; return pci_register_driver(&agp_ali_pci_driver); } static void __exit agp_ali_cleanup(void) { pci_unregister_driver(&agp_ali_pci_driver); } module_init(agp_ali_init); module_exit(agp_ali_cleanup); MODULE_AUTHOR("Dave Jones <davej@redhat.com>"); MODULE_LICENSE("GPL and additional rights");
gpl-2.0
arjen75/LG-Optimus-Chic-Kernel-2.6.32.59-
arch/alpha/kernel/console.c
11474
2057
/* * linux/arch/alpha/kernel/console.c * * Architecture-specific specific support for VGA device on * non-0 I/O hose */ #include <linux/pci.h> #include <linux/init.h> #include <linux/tty.h> #include <linux/console.h> #include <linux/vt.h> #include <asm/vga.h> #include <asm/machvec.h> #include "pci_impl.h" #ifdef CONFIG_VGA_HOSE struct pci_controller *pci_vga_hose; static struct resource alpha_vga = { .name = "alpha-vga+", .start = 0x3C0, .end = 0x3DF }; static struct pci_controller * __init default_vga_hose_select(struct pci_controller *h1, struct pci_controller *h2) { if (h2->index < h1->index) return h2; return h1; } void __init locate_and_init_vga(void *(*sel_func)(void *, void *)) { struct pci_controller *hose = NULL; struct pci_dev *dev = NULL; /* Default the select function */ if (!sel_func) sel_func = (void *)default_vga_hose_select; /* Find the console VGA device */ for(dev=NULL; (dev=pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, dev));) { if (!hose) hose = dev->sysdata; else hose = sel_func(hose, dev->sysdata); } /* Did we already initialize the correct one? Is there one? */ if (!hose || (conswitchp == &vga_con && pci_vga_hose == hose)) return; /* Create a new VGA ioport resource WRT the hose it is on. */ alpha_vga.start += hose->io_space->start; alpha_vga.end += hose->io_space->start; request_resource(hose->io_space, &alpha_vga); /* Set the VGA hose and init the new console. */ pci_vga_hose = hose; take_over_console(&vga_con, 0, MAX_NR_CONSOLES-1, 1); } void __init find_console_vga_hose(void) { u64 *pu64 = (u64 *)((u64)hwrpb + hwrpb->ctbt_offset); if (pu64[7] == 3) { /* TERM_TYPE == graphics */ struct pci_controller *hose; int h = (pu64[30] >> 24) & 0xff; /* console hose # */ /* * Our hose numbering DOES match the console's, so find * the right one... */ for (hose = hose_head; hose; hose = hose->next) { if (hose->index == h) break; } if (hose) { printk("Console graphics on hose %d\n", h); pci_vga_hose = hose; } } } #endif
gpl-2.0
RegaliaEzz/Hexa-N9208
fs/squashfs/xattr.c
11474
8603
/* * Squashfs - a compressed read only filesystem for Linux * * Copyright (c) 2010 * Phillip Lougher <phillip@squashfs.org.uk> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2, * or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * xattr.c */ #include <linux/init.h> #include <linux/module.h> #include <linux/string.h> #include <linux/fs.h> #include <linux/vfs.h> #include <linux/xattr.h> #include <linux/slab.h> #include "squashfs_fs.h" #include "squashfs_fs_sb.h" #include "squashfs_fs_i.h" #include "squashfs.h" static const struct xattr_handler *squashfs_xattr_handler(int); ssize_t squashfs_listxattr(struct dentry *d, char *buffer, size_t buffer_size) { struct inode *inode = d->d_inode; struct super_block *sb = inode->i_sb; struct squashfs_sb_info *msblk = sb->s_fs_info; u64 start = SQUASHFS_XATTR_BLK(squashfs_i(inode)->xattr) + msblk->xattr_table; int offset = SQUASHFS_XATTR_OFFSET(squashfs_i(inode)->xattr); int count = squashfs_i(inode)->xattr_count; size_t rest = buffer_size; int err; /* check that the file system has xattrs */ if (msblk->xattr_id_table == NULL) return -EOPNOTSUPP; /* loop reading each xattr name */ while (count--) { struct squashfs_xattr_entry entry; struct squashfs_xattr_val val; const struct xattr_handler *handler; int name_size, prefix_size = 0; err = squashfs_read_metadata(sb, &entry, &start, &offset, sizeof(entry)); if (err < 0) goto failed; name_size = le16_to_cpu(entry.size); handler = squashfs_xattr_handler(le16_to_cpu(entry.type)); if (handler) prefix_size = handler->list(d, buffer, rest, NULL, name_size, handler->flags); if (prefix_size) { if (buffer) { if (prefix_size + name_size + 1 > rest) { err = -ERANGE; goto failed; } buffer += prefix_size; } err = squashfs_read_metadata(sb, buffer, &start, &offset, name_size); if (err < 0) goto failed; if (buffer) { buffer[name_size] = '\0'; buffer += name_size + 1; } rest -= prefix_size + name_size + 1; } else { /* no handler or insuffficient privileges, so skip */ err = squashfs_read_metadata(sb, NULL, &start, &offset, name_size); if (err < 0) goto failed; } /* skip remaining xattr entry */ err = squashfs_read_metadata(sb, &val, &start, &offset, sizeof(val)); if (err < 0) goto failed; err = squashfs_read_metadata(sb, NULL, &start, &offset, le32_to_cpu(val.vsize)); if (err < 0) goto failed; } err = buffer_size - rest; failed: return err; } static int squashfs_xattr_get(struct inode *inode, int name_index, const char *name, void *buffer, size_t buffer_size) { struct super_block *sb = inode->i_sb; struct squashfs_sb_info *msblk = sb->s_fs_info; u64 start = SQUASHFS_XATTR_BLK(squashfs_i(inode)->xattr) + msblk->xattr_table; int offset = SQUASHFS_XATTR_OFFSET(squashfs_i(inode)->xattr); int count = squashfs_i(inode)->xattr_count; int name_len = strlen(name); int err, vsize; char *target = kmalloc(name_len, GFP_KERNEL); if (target == NULL) return -ENOMEM; /* loop reading each xattr name */ for (; count; count--) { struct squashfs_xattr_entry entry; struct squashfs_xattr_val val; int type, prefix, name_size; err = squashfs_read_metadata(sb, &entry, &start, &offset, sizeof(entry)); if (err < 0) goto failed; name_size = le16_to_cpu(entry.size); type = le16_to_cpu(entry.type); prefix = type & SQUASHFS_XATTR_PREFIX_MASK; if (prefix == name_index && name_size == name_len) err = squashfs_read_metadata(sb, target, &start, &offset, name_size); else err = squashfs_read_metadata(sb, NULL, &start, &offset, name_size); if (err < 0) goto failed; if (prefix == name_index && name_size == name_len && strncmp(target, name, name_size) == 0) { /* found xattr */ if (type & SQUASHFS_XATTR_VALUE_OOL) { __le64 xattr_val; u64 xattr; /* val is a reference to the real location */ err = squashfs_read_metadata(sb, &val, &start, &offset, sizeof(val)); if (err < 0) goto failed; err = squashfs_read_metadata(sb, &xattr_val, &start, &offset, sizeof(xattr_val)); if (err < 0) goto failed; xattr = le64_to_cpu(xattr_val); start = SQUASHFS_XATTR_BLK(xattr) + msblk->xattr_table; offset = SQUASHFS_XATTR_OFFSET(xattr); } /* read xattr value */ err = squashfs_read_metadata(sb, &val, &start, &offset, sizeof(val)); if (err < 0) goto failed; vsize = le32_to_cpu(val.vsize); if (buffer) { if (vsize > buffer_size) { err = -ERANGE; goto failed; } err = squashfs_read_metadata(sb, buffer, &start, &offset, vsize); if (err < 0) goto failed; } break; } /* no match, skip remaining xattr entry */ err = squashfs_read_metadata(sb, &val, &start, &offset, sizeof(val)); if (err < 0) goto failed; err = squashfs_read_metadata(sb, NULL, &start, &offset, le32_to_cpu(val.vsize)); if (err < 0) goto failed; } err = count ? vsize : -ENODATA; failed: kfree(target); return err; } /* * User namespace support */ static size_t squashfs_user_list(struct dentry *d, char *list, size_t list_size, const char *name, size_t name_len, int type) { if (list && XATTR_USER_PREFIX_LEN <= list_size) memcpy(list, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN); return XATTR_USER_PREFIX_LEN; } static int squashfs_user_get(struct dentry *d, const char *name, void *buffer, size_t size, int type) { if (name[0] == '\0') return -EINVAL; return squashfs_xattr_get(d->d_inode, SQUASHFS_XATTR_USER, name, buffer, size); } static const struct xattr_handler squashfs_xattr_user_handler = { .prefix = XATTR_USER_PREFIX, .list = squashfs_user_list, .get = squashfs_user_get }; /* * Trusted namespace support */ static size_t squashfs_trusted_list(struct dentry *d, char *list, size_t list_size, const char *name, size_t name_len, int type) { if (!capable(CAP_SYS_ADMIN)) return 0; if (list && XATTR_TRUSTED_PREFIX_LEN <= list_size) memcpy(list, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN); return XATTR_TRUSTED_PREFIX_LEN; } static int squashfs_trusted_get(struct dentry *d, const char *name, void *buffer, size_t size, int type) { if (name[0] == '\0') return -EINVAL; return squashfs_xattr_get(d->d_inode, SQUASHFS_XATTR_TRUSTED, name, buffer, size); } static const struct xattr_handler squashfs_xattr_trusted_handler = { .prefix = XATTR_TRUSTED_PREFIX, .list = squashfs_trusted_list, .get = squashfs_trusted_get }; /* * Security namespace support */ static size_t squashfs_security_list(struct dentry *d, char *list, size_t list_size, const char *name, size_t name_len, int type) { if (list && XATTR_SECURITY_PREFIX_LEN <= list_size) memcpy(list, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN); return XATTR_SECURITY_PREFIX_LEN; } static int squashfs_security_get(struct dentry *d, const char *name, void *buffer, size_t size, int type) { if (name[0] == '\0') return -EINVAL; return squashfs_xattr_get(d->d_inode, SQUASHFS_XATTR_SECURITY, name, buffer, size); } static const struct xattr_handler squashfs_xattr_security_handler = { .prefix = XATTR_SECURITY_PREFIX, .list = squashfs_security_list, .get = squashfs_security_get }; static const struct xattr_handler *squashfs_xattr_handler(int type) { if (type & ~(SQUASHFS_XATTR_PREFIX_MASK | SQUASHFS_XATTR_VALUE_OOL)) /* ignore unrecognised type */ return NULL; switch (type & SQUASHFS_XATTR_PREFIX_MASK) { case SQUASHFS_XATTR_USER: return &squashfs_xattr_user_handler; case SQUASHFS_XATTR_TRUSTED: return &squashfs_xattr_trusted_handler; case SQUASHFS_XATTR_SECURITY: return &squashfs_xattr_security_handler; default: /* ignore unrecognised type */ return NULL; } } const struct xattr_handler *squashfs_xattr_handlers[] = { &squashfs_xattr_user_handler, &squashfs_xattr_trusted_handler, &squashfs_xattr_security_handler, NULL };
gpl-2.0
cooldudezach/android_kernel_zte_warplte
drivers/edac/amd8131_edac.c
12498
10975
/* * amd8131_edac.c, AMD8131 hypertransport chip EDAC kernel module * * Copyright (c) 2008 Wind River Systems, Inc. * * Authors: Cao Qingtao <qingtao.cao@windriver.com> * Benjamin Walsh <benjamin.walsh@windriver.com> * Hu Yongqi <yongqi.hu@windriver.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/bitops.h> #include <linux/edac.h> #include <linux/pci_ids.h> #include "edac_core.h" #include "edac_module.h" #include "amd8131_edac.h" #define AMD8131_EDAC_REVISION " Ver: 1.0.0" #define AMD8131_EDAC_MOD_STR "amd8131_edac" /* Wrapper functions for accessing PCI configuration space */ static void edac_pci_read_dword(struct pci_dev *dev, int reg, u32 *val32) { int ret; ret = pci_read_config_dword(dev, reg, val32); if (ret != 0) printk(KERN_ERR AMD8131_EDAC_MOD_STR " PCI Access Read Error at 0x%x\n", reg); } static void edac_pci_write_dword(struct pci_dev *dev, int reg, u32 val32) { int ret; ret = pci_write_config_dword(dev, reg, val32); if (ret != 0) printk(KERN_ERR AMD8131_EDAC_MOD_STR " PCI Access Write Error at 0x%x\n", reg); } static char * const bridge_str[] = { [NORTH_A] = "NORTH A", [NORTH_B] = "NORTH B", [SOUTH_A] = "SOUTH A", [SOUTH_B] = "SOUTH B", [NO_BRIDGE] = "NO BRIDGE", }; /* Support up to two AMD8131 chipsets on a platform */ static struct amd8131_dev_info amd8131_devices[] = { { .inst = NORTH_A, .devfn = DEVFN_PCIX_BRIDGE_NORTH_A, .ctl_name = "AMD8131_PCIX_NORTH_A", }, { .inst = NORTH_B, .devfn = DEVFN_PCIX_BRIDGE_NORTH_B, .ctl_name = "AMD8131_PCIX_NORTH_B", }, { .inst = SOUTH_A, .devfn = DEVFN_PCIX_BRIDGE_SOUTH_A, .ctl_name = "AMD8131_PCIX_SOUTH_A", }, { .inst = SOUTH_B, .devfn = DEVFN_PCIX_BRIDGE_SOUTH_B, .ctl_name = "AMD8131_PCIX_SOUTH_B", }, {.inst = NO_BRIDGE,}, }; static void amd8131_pcix_init(struct amd8131_dev_info *dev_info) { u32 val32; struct pci_dev *dev = dev_info->dev; /* First clear error detection flags */ edac_pci_read_dword(dev, REG_MEM_LIM, &val32); if (val32 & MEM_LIMIT_MASK) edac_pci_write_dword(dev, REG_MEM_LIM, val32); /* Clear Discard Timer Timedout flag */ edac_pci_read_dword(dev, REG_INT_CTLR, &val32); if (val32 & INT_CTLR_DTS) edac_pci_write_dword(dev, REG_INT_CTLR, val32); /* Clear CRC Error flag on link side A */ edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32); if (val32 & LNK_CTRL_CRCERR_A) edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32); /* Clear CRC Error flag on link side B */ edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32); if (val32 & LNK_CTRL_CRCERR_B) edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32); /* * Then enable all error detections. * * Setup Discard Timer Sync Flood Enable, * System Error Enable and Parity Error Enable. */ edac_pci_read_dword(dev, REG_INT_CTLR, &val32); val32 |= INT_CTLR_PERR | INT_CTLR_SERR | INT_CTLR_DTSE; edac_pci_write_dword(dev, REG_INT_CTLR, val32); /* Enable overall SERR Error detection */ edac_pci_read_dword(dev, REG_STS_CMD, &val32); val32 |= STS_CMD_SERREN; edac_pci_write_dword(dev, REG_STS_CMD, val32); /* Setup CRC Flood Enable for link side A */ edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32); val32 |= LNK_CTRL_CRCFEN; edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32); /* Setup CRC Flood Enable for link side B */ edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32); val32 |= LNK_CTRL_CRCFEN; edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32); } static void amd8131_pcix_exit(struct amd8131_dev_info *dev_info) { u32 val32; struct pci_dev *dev = dev_info->dev; /* Disable SERR, PERR and DTSE Error detection */ edac_pci_read_dword(dev, REG_INT_CTLR, &val32); val32 &= ~(INT_CTLR_PERR | INT_CTLR_SERR | INT_CTLR_DTSE); edac_pci_write_dword(dev, REG_INT_CTLR, val32); /* Disable overall System Error detection */ edac_pci_read_dword(dev, REG_STS_CMD, &val32); val32 &= ~STS_CMD_SERREN; edac_pci_write_dword(dev, REG_STS_CMD, val32); /* Disable CRC Sync Flood on link side A */ edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32); val32 &= ~LNK_CTRL_CRCFEN; edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32); /* Disable CRC Sync Flood on link side B */ edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32); val32 &= ~LNK_CTRL_CRCFEN; edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32); } static void amd8131_pcix_check(struct edac_pci_ctl_info *edac_dev) { struct amd8131_dev_info *dev_info = edac_dev->pvt_info; struct pci_dev *dev = dev_info->dev; u32 val32; /* Check PCI-X Bridge Memory Base-Limit Register for errors */ edac_pci_read_dword(dev, REG_MEM_LIM, &val32); if (val32 & MEM_LIMIT_MASK) { printk(KERN_INFO "Error(s) in mem limit register " "on %s bridge\n", dev_info->ctl_name); printk(KERN_INFO "DPE: %d, RSE: %d, RMA: %d\n" "RTA: %d, STA: %d, MDPE: %d\n", val32 & MEM_LIMIT_DPE, val32 & MEM_LIMIT_RSE, val32 & MEM_LIMIT_RMA, val32 & MEM_LIMIT_RTA, val32 & MEM_LIMIT_STA, val32 & MEM_LIMIT_MDPE); val32 |= MEM_LIMIT_MASK; edac_pci_write_dword(dev, REG_MEM_LIM, val32); edac_pci_handle_npe(edac_dev, edac_dev->ctl_name); } /* Check if Discard Timer timed out */ edac_pci_read_dword(dev, REG_INT_CTLR, &val32); if (val32 & INT_CTLR_DTS) { printk(KERN_INFO "Error(s) in interrupt and control register " "on %s bridge\n", dev_info->ctl_name); printk(KERN_INFO "DTS: %d\n", val32 & INT_CTLR_DTS); val32 |= INT_CTLR_DTS; edac_pci_write_dword(dev, REG_INT_CTLR, val32); edac_pci_handle_npe(edac_dev, edac_dev->ctl_name); } /* Check if CRC error happens on link side A */ edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32); if (val32 & LNK_CTRL_CRCERR_A) { printk(KERN_INFO "Error(s) in link conf and control register " "on %s bridge\n", dev_info->ctl_name); printk(KERN_INFO "CRCERR: %d\n", val32 & LNK_CTRL_CRCERR_A); val32 |= LNK_CTRL_CRCERR_A; edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32); edac_pci_handle_npe(edac_dev, edac_dev->ctl_name); } /* Check if CRC error happens on link side B */ edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32); if (val32 & LNK_CTRL_CRCERR_B) { printk(KERN_INFO "Error(s) in link conf and control register " "on %s bridge\n", dev_info->ctl_name); printk(KERN_INFO "CRCERR: %d\n", val32 & LNK_CTRL_CRCERR_B); val32 |= LNK_CTRL_CRCERR_B; edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32); edac_pci_handle_npe(edac_dev, edac_dev->ctl_name); } } static struct amd8131_info amd8131_chipset = { .err_dev = PCI_DEVICE_ID_AMD_8131_APIC, .devices = amd8131_devices, .init = amd8131_pcix_init, .exit = amd8131_pcix_exit, .check = amd8131_pcix_check, }; /* * There are 4 PCIX Bridges on ATCA-6101 that share the same PCI Device ID, * so amd8131_probe() would be called by kernel 4 times, with different * address of pci_dev for each of them each time. */ static int amd8131_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct amd8131_dev_info *dev_info; for (dev_info = amd8131_chipset.devices; dev_info->inst != NO_BRIDGE; dev_info++) if (dev_info->devfn == dev->devfn) break; if (dev_info->inst == NO_BRIDGE) /* should never happen */ return -ENODEV; /* * We can't call pci_get_device() as we are used to do because * there are 4 of them but pci_dev_get() instead. */ dev_info->dev = pci_dev_get(dev); if (pci_enable_device(dev_info->dev)) { pci_dev_put(dev_info->dev); printk(KERN_ERR "failed to enable:" "vendor %x, device %x, devfn %x, name %s\n", PCI_VENDOR_ID_AMD, amd8131_chipset.err_dev, dev_info->devfn, dev_info->ctl_name); return -ENODEV; } /* * we do not allocate extra private structure for * edac_pci_ctl_info, but make use of existing * one instead. */ dev_info->edac_idx = edac_pci_alloc_index(); dev_info->edac_dev = edac_pci_alloc_ctl_info(0, dev_info->ctl_name); if (!dev_info->edac_dev) return -ENOMEM; dev_info->edac_dev->pvt_info = dev_info; dev_info->edac_dev->dev = &dev_info->dev->dev; dev_info->edac_dev->mod_name = AMD8131_EDAC_MOD_STR; dev_info->edac_dev->ctl_name = dev_info->ctl_name; dev_info->edac_dev->dev_name = dev_name(&dev_info->dev->dev); if (edac_op_state == EDAC_OPSTATE_POLL) dev_info->edac_dev->edac_check = amd8131_chipset.check; if (amd8131_chipset.init) amd8131_chipset.init(dev_info); if (edac_pci_add_device(dev_info->edac_dev, dev_info->edac_idx) > 0) { printk(KERN_ERR "failed edac_pci_add_device() for %s\n", dev_info->ctl_name); edac_pci_free_ctl_info(dev_info->edac_dev); return -ENODEV; } printk(KERN_INFO "added one device on AMD8131 " "vendor %x, device %x, devfn %x, name %s\n", PCI_VENDOR_ID_AMD, amd8131_chipset.err_dev, dev_info->devfn, dev_info->ctl_name); return 0; } static void amd8131_remove(struct pci_dev *dev) { struct amd8131_dev_info *dev_info; for (dev_info = amd8131_chipset.devices; dev_info->inst != NO_BRIDGE; dev_info++) if (dev_info->devfn == dev->devfn) break; if (dev_info->inst == NO_BRIDGE) /* should never happen */ return; if (dev_info->edac_dev) { edac_pci_del_device(dev_info->edac_dev->dev); edac_pci_free_ctl_info(dev_info->edac_dev); } if (amd8131_chipset.exit) amd8131_chipset.exit(dev_info); pci_dev_put(dev_info->dev); } static const struct pci_device_id amd8131_edac_pci_tbl[] = { { PCI_VEND_DEV(AMD, 8131_BRIDGE), .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .class = 0, .class_mask = 0, .driver_data = 0, }, { 0, } /* table is NULL-terminated */ }; MODULE_DEVICE_TABLE(pci, amd8131_edac_pci_tbl); static struct pci_driver amd8131_edac_driver = { .name = AMD8131_EDAC_MOD_STR, .probe = amd8131_probe, .remove = amd8131_remove, .id_table = amd8131_edac_pci_tbl, }; static int __init amd8131_edac_init(void) { printk(KERN_INFO "AMD8131 EDAC driver " AMD8131_EDAC_REVISION "\n"); printk(KERN_INFO "\t(c) 2008 Wind River Systems, Inc.\n"); /* Only POLL mode supported so far */ edac_op_state = EDAC_OPSTATE_POLL; return pci_register_driver(&amd8131_edac_driver); } static void __exit amd8131_edac_exit(void) { pci_unregister_driver(&amd8131_edac_driver); } module_init(amd8131_edac_init); module_exit(amd8131_edac_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>\n"); MODULE_DESCRIPTION("AMD8131 HyperTransport PCI-X Tunnel EDAC kernel module");
gpl-2.0
manishj-patel/netbook_kernel_3.4.5_plus
arch/x86/kernel/paravirt_patch_32.c
13266
1714
#include <asm/paravirt.h> DEF_NATIVE(pv_irq_ops, irq_disable, "cli"); DEF_NATIVE(pv_irq_ops, irq_enable, "sti"); DEF_NATIVE(pv_irq_ops, restore_fl, "push %eax; popf"); DEF_NATIVE(pv_irq_ops, save_fl, "pushf; pop %eax"); DEF_NATIVE(pv_cpu_ops, iret, "iret"); DEF_NATIVE(pv_cpu_ops, irq_enable_sysexit, "sti; sysexit"); DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax"); DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3"); DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax"); DEF_NATIVE(pv_cpu_ops, clts, "clts"); DEF_NATIVE(pv_cpu_ops, read_tsc, "rdtsc"); unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len) { /* arg in %eax, return in %eax */ return 0; } unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len) { /* arg in %edx:%eax, return in %edx:%eax */ return 0; } unsigned native_patch(u8 type, u16 clobbers, void *ibuf, unsigned long addr, unsigned len) { const unsigned char *start, *end; unsigned ret; #define PATCH_SITE(ops, x) \ case PARAVIRT_PATCH(ops.x): \ start = start_##ops##_##x; \ end = end_##ops##_##x; \ goto patch_site switch (type) { PATCH_SITE(pv_irq_ops, irq_disable); PATCH_SITE(pv_irq_ops, irq_enable); PATCH_SITE(pv_irq_ops, restore_fl); PATCH_SITE(pv_irq_ops, save_fl); PATCH_SITE(pv_cpu_ops, iret); PATCH_SITE(pv_cpu_ops, irq_enable_sysexit); PATCH_SITE(pv_mmu_ops, read_cr2); PATCH_SITE(pv_mmu_ops, read_cr3); PATCH_SITE(pv_mmu_ops, write_cr3); PATCH_SITE(pv_cpu_ops, clts); PATCH_SITE(pv_cpu_ops, read_tsc); patch_site: ret = paravirt_patch_insns(ibuf, len, start, end); break; default: ret = paravirt_patch_default(type, clobbers, ibuf, addr, len); break; } #undef PATCH_SITE return ret; }
gpl-2.0
johnnyslt/android_kernel_htc_msm8660
sound/oss/pas2_midi.c
14802
5011
/* * sound/oss/pas2_midi.c * * The low level driver for the PAS Midi Interface. */ /* * Copyright (C) by Hannu Savolainen 1993-1997 * * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL) * Version 2 (June 1991). See the "COPYING" file distributed with this software * for more info. * * Bartlomiej Zolnierkiewicz : Added __init to pas_init_mixer() */ #include <linux/init.h> #include <linux/spinlock.h> #include "sound_config.h" #include "pas2.h" extern spinlock_t pas_lock; static int midi_busy, input_opened; static int my_dev; int pas2_mididev=-1; static unsigned char tmp_queue[256]; static volatile int qlen; static volatile unsigned char qhead, qtail; static void (*midi_input_intr) (int dev, unsigned char data); static int pas_midi_open(int dev, int mode, void (*input) (int dev, unsigned char data), void (*output) (int dev) ) { int err; unsigned long flags; unsigned char ctrl; if (midi_busy) return -EBUSY; /* * Reset input and output FIFO pointers */ pas_write(0x20 | 0x40, 0x178b); spin_lock_irqsave(&pas_lock, flags); if ((err = pas_set_intr(0x10)) < 0) { spin_unlock_irqrestore(&pas_lock, flags); return err; } /* * Enable input available and output FIFO empty interrupts */ ctrl = 0; input_opened = 0; midi_input_intr = input; if (mode == OPEN_READ || mode == OPEN_READWRITE) { ctrl |= 0x04; /* Enable input */ input_opened = 1; } if (mode == OPEN_WRITE || mode == OPEN_READWRITE) { ctrl |= 0x08 | 0x10; /* Enable output */ } pas_write(ctrl, 0x178b); /* * Acknowledge any pending interrupts */ pas_write(0xff, 0x1B88); spin_unlock_irqrestore(&pas_lock, flags); midi_busy = 1; qlen = qhead = qtail = 0; return 0; } static void pas_midi_close(int dev) { /* * Reset FIFO pointers, disable intrs */ pas_write(0x20 | 0x40, 0x178b); pas_remove_intr(0x10); midi_busy = 0; } static int dump_to_midi(unsigned char midi_byte) { int fifo_space, x; fifo_space = ((x = pas_read(0x1B89)) >> 4) & 0x0f; /* * The MIDI FIFO space register and it's documentation is nonunderstandable. * There seem to be no way to differentiate between buffer full and buffer * empty situations. For this reason we don't never write the buffer * completely full. In this way we can assume that 0 (or is it 15) * means that the buffer is empty. */ if (fifo_space < 2 && fifo_space != 0) /* Full (almost) */ return 0; /* Ask upper layers to retry after some time */ pas_write(midi_byte, 0x178A); return 1; } static int pas_midi_out(int dev, unsigned char midi_byte) { unsigned long flags; /* * Drain the local queue first */ spin_lock_irqsave(&pas_lock, flags); while (qlen && dump_to_midi(tmp_queue[qhead])) { qlen--; qhead++; } spin_unlock_irqrestore(&pas_lock, flags); /* * Output the byte if the local queue is empty. */ if (!qlen) if (dump_to_midi(midi_byte)) return 1; /* * Put to the local queue */ if (qlen >= 256) return 0; /* Local queue full */ spin_lock_irqsave(&pas_lock, flags); tmp_queue[qtail] = midi_byte; qlen++; qtail++; spin_unlock_irqrestore(&pas_lock, flags); return 1; } static int pas_midi_start_read(int dev) { return 0; } static int pas_midi_end_read(int dev) { return 0; } static void pas_midi_kick(int dev) { } static int pas_buffer_status(int dev) { return qlen; } #define MIDI_SYNTH_NAME "Pro Audio Spectrum Midi" #define MIDI_SYNTH_CAPS SYNTH_CAP_INPUT #include "midi_synth.h" static struct midi_operations pas_midi_operations = { .owner = THIS_MODULE, .info = {"Pro Audio Spectrum", 0, 0, SNDCARD_PAS}, .converter = &std_midi_synth, .in_info = {0}, .open = pas_midi_open, .close = pas_midi_close, .outputc = pas_midi_out, .start_read = pas_midi_start_read, .end_read = pas_midi_end_read, .kick = pas_midi_kick, .buffer_status = pas_buffer_status, }; void __init pas_midi_init(void) { int dev = sound_alloc_mididev(); if (dev == -1) { printk(KERN_WARNING "pas_midi_init: Too many midi devices detected\n"); return; } std_midi_synth.midi_dev = my_dev = dev; midi_devs[dev] = &pas_midi_operations; pas2_mididev = dev; sequencer_init(); } void pas_midi_interrupt(void) { unsigned char stat; int i, incount; stat = pas_read(0x1B88); if (stat & 0x04) /* Input data available */ { incount = pas_read(0x1B89) & 0x0f; /* Input FIFO size */ if (!incount) incount = 16; for (i = 0; i < incount; i++) if (input_opened) { midi_input_intr(my_dev, pas_read(0x178A)); } else pas_read(0x178A); /* Flush */ } if (stat & (0x08 | 0x10)) { spin_lock(&pas_lock);/* called in irq context */ while (qlen && dump_to_midi(tmp_queue[qhead])) { qlen--; qhead++; } spin_unlock(&pas_lock); } if (stat & 0x40) { printk(KERN_WARNING "MIDI output overrun %x,%x\n", pas_read(0x1B89), stat); } pas_write(stat, 0x1B88); /* Acknowledge interrupts */ }
gpl-2.0
j-r0dd/motus_kernel
drivers/s390/char/vmwatchdog.c
211
6146
/* * Watchdog implementation based on z/VM Watchdog Timer API * * The user space watchdog daemon can use this driver as * /dev/vmwatchdog to have z/VM execute the specified CP * command when the timeout expires. The default command is * "IPL", which which cause an immediate reboot. */ #include <linux/init.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/watchdog.h> #include <linux/smp_lock.h> #include <asm/ebcdic.h> #include <asm/io.h> #include <asm/uaccess.h> #define MAX_CMDLEN 240 #define MIN_INTERVAL 15 static char vmwdt_cmd[MAX_CMDLEN] = "IPL"; static int vmwdt_conceal; static int vmwdt_nowayout = WATCHDOG_NOWAYOUT; MODULE_LICENSE("GPL"); MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>"); MODULE_DESCRIPTION("z/VM Watchdog Timer"); module_param_string(cmd, vmwdt_cmd, MAX_CMDLEN, 0644); MODULE_PARM_DESC(cmd, "CP command that is run when the watchdog triggers"); module_param_named(conceal, vmwdt_conceal, bool, 0644); MODULE_PARM_DESC(conceal, "Enable the CONCEAL CP option while the watchdog " " is active"); module_param_named(nowayout, vmwdt_nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started" " (default=CONFIG_WATCHDOG_NOWAYOUT)"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); static unsigned int vmwdt_interval = 60; static unsigned long vmwdt_is_open; static int vmwdt_expect_close; enum vmwdt_func { /* function codes */ wdt_init = 0, wdt_change = 1, wdt_cancel = 2, /* flags */ wdt_conceal = 0x80000000, }; static int __diag288(enum vmwdt_func func, unsigned int timeout, char *cmd, size_t len) { register unsigned long __func asm("2") = func; register unsigned long __timeout asm("3") = timeout; register unsigned long __cmdp asm("4") = virt_to_phys(cmd); register unsigned long __cmdl asm("5") = len; int err; err = -EINVAL; asm volatile( " diag %1,%3,0x288\n" "0: la %0,0\n" "1:\n" EX_TABLE(0b,1b) : "+d" (err) : "d"(__func), "d"(__timeout), "d"(__cmdp), "d"(__cmdl) : "1", "cc"); return err; } static int vmwdt_keepalive(void) { /* we allocate new memory every time to avoid having * to track the state. static allocation is not an * option since that might not be contiguous in real * storage in case of a modular build */ static char *ebc_cmd; size_t len; int ret; unsigned int func; ebc_cmd = kmalloc(MAX_CMDLEN, GFP_KERNEL); if (!ebc_cmd) return -ENOMEM; len = strlcpy(ebc_cmd, vmwdt_cmd, MAX_CMDLEN); ASCEBC(ebc_cmd, MAX_CMDLEN); EBC_TOUPPER(ebc_cmd, MAX_CMDLEN); func = vmwdt_conceal ? (wdt_init | wdt_conceal) : wdt_init; ret = __diag288(func, vmwdt_interval, ebc_cmd, len); WARN_ON(ret != 0); kfree(ebc_cmd); return ret; } static int vmwdt_disable(void) { int ret = __diag288(wdt_cancel, 0, "", 0); WARN_ON(ret != 0); return ret; } static int __init vmwdt_probe(void) { /* there is no real way to see if the watchdog is supported, * so we try initializing it with a NOP command ("BEGIN") * that won't cause any harm even if the following disable * fails for some reason */ static char __initdata ebc_begin[] = { 194, 197, 199, 201, 213 }; if (__diag288(wdt_init, 15, ebc_begin, sizeof(ebc_begin)) != 0) return -EINVAL; return vmwdt_disable(); } static int vmwdt_open(struct inode *i, struct file *f) { int ret; lock_kernel(); if (test_and_set_bit(0, &vmwdt_is_open)) { unlock_kernel(); return -EBUSY; } ret = vmwdt_keepalive(); if (ret) clear_bit(0, &vmwdt_is_open); unlock_kernel(); return ret ? ret : nonseekable_open(i, f); } static int vmwdt_close(struct inode *i, struct file *f) { if (vmwdt_expect_close == 42) vmwdt_disable(); vmwdt_expect_close = 0; clear_bit(0, &vmwdt_is_open); return 0; } static struct watchdog_info vmwdt_info = { .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, .firmware_version = 0, .identity = "z/VM Watchdog Timer", }; static int vmwdt_ioctl(struct inode *i, struct file *f, unsigned int cmd, unsigned long arg) { switch (cmd) { case WDIOC_GETSUPPORT: if (copy_to_user((void __user *)arg, &vmwdt_info, sizeof(vmwdt_info))) return -EFAULT; return 0; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: return put_user(0, (int __user *)arg); case WDIOC_GETTEMP: return -EINVAL; case WDIOC_SETOPTIONS: { int options, ret; if (get_user(options, (int __user *)arg)) return -EFAULT; ret = -EINVAL; if (options & WDIOS_DISABLECARD) { ret = vmwdt_disable(); if (ret) return ret; } if (options & WDIOS_ENABLECARD) { ret = vmwdt_keepalive(); } return ret; } case WDIOC_GETTIMEOUT: return put_user(vmwdt_interval, (int __user *)arg); case WDIOC_SETTIMEOUT: { int interval; if (get_user(interval, (int __user *)arg)) return -EFAULT; if (interval < MIN_INTERVAL) return -EINVAL; vmwdt_interval = interval; } return vmwdt_keepalive(); case WDIOC_KEEPALIVE: return vmwdt_keepalive(); } return -EINVAL; } static ssize_t vmwdt_write(struct file *f, const char __user *buf, size_t count, loff_t *ppos) { if(count) { if (!vmwdt_nowayout) { size_t i; /* note: just in case someone wrote the magic character * five months ago... */ vmwdt_expect_close = 0; for (i = 0; i != count; i++) { char c; if (get_user(c, buf+i)) return -EFAULT; if (c == 'V') vmwdt_expect_close = 42; } } /* someone wrote to us, we should restart timer */ vmwdt_keepalive(); } return count; } static const struct file_operations vmwdt_fops = { .open = &vmwdt_open, .release = &vmwdt_close, .ioctl = &vmwdt_ioctl, .write = &vmwdt_write, .owner = THIS_MODULE, }; static struct miscdevice vmwdt_dev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &vmwdt_fops, }; static int __init vmwdt_init(void) { int ret; ret = vmwdt_probe(); if (ret) return ret; return misc_register(&vmwdt_dev); } module_init(vmwdt_init); static void __exit vmwdt_exit(void) { WARN_ON(misc_deregister(&vmwdt_dev) != 0); } module_exit(vmwdt_exit);
gpl-2.0
vathpela/linux-esrt
arch/tile/kernel/unaligned.c
211
42747
/* * Copyright 2013 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. * * A code-rewriter that handles unaligned exception. */ #include <linux/smp.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/thread_info.h> #include <linux/uaccess.h> #include <linux/mman.h> #include <linux/types.h> #include <linux/err.h> #include <linux/module.h> #include <linux/compat.h> #include <linux/prctl.h> #include <asm/cacheflush.h> #include <asm/traps.h> #include <asm/uaccess.h> #include <asm/unaligned.h> #include <arch/abi.h> #include <arch/spr_def.h> #include <arch/opcode.h> /* * This file handles unaligned exception for tile-Gx. The tilepro's unaligned * exception is supported out of single_step.c */ int unaligned_printk; static int __init setup_unaligned_printk(char *str) { long val; if (kstrtol(str, 0, &val) != 0) return 0; unaligned_printk = val; pr_info("Printk for each unaligned data accesses is %s\n", unaligned_printk ? "enabled" : "disabled"); return 1; } __setup("unaligned_printk=", setup_unaligned_printk); unsigned int unaligned_fixup_count; #ifdef __tilegx__ /* * Unalign data jit fixup code fragement. Reserved space is 128 bytes. * The 1st 64-bit word saves fault PC address, 2nd word is the fault * instruction bundle followed by 14 JIT bundles. */ struct unaligned_jit_fragment { unsigned long pc; tilegx_bundle_bits bundle; tilegx_bundle_bits insn[14]; }; /* * Check if a nop or fnop at bundle's pipeline X0. */ static bool is_bundle_x0_nop(tilegx_bundle_bits bundle) { return (((get_UnaryOpcodeExtension_X0(bundle) == NOP_UNARY_OPCODE_X0) && (get_RRROpcodeExtension_X0(bundle) == UNARY_RRR_0_OPCODE_X0) && (get_Opcode_X0(bundle) == RRR_0_OPCODE_X0)) || ((get_UnaryOpcodeExtension_X0(bundle) == FNOP_UNARY_OPCODE_X0) && (get_RRROpcodeExtension_X0(bundle) == UNARY_RRR_0_OPCODE_X0) && (get_Opcode_X0(bundle) == RRR_0_OPCODE_X0))); } /* * Check if nop or fnop at bundle's pipeline X1. */ static bool is_bundle_x1_nop(tilegx_bundle_bits bundle) { return (((get_UnaryOpcodeExtension_X1(bundle) == NOP_UNARY_OPCODE_X1) && (get_RRROpcodeExtension_X1(bundle) == UNARY_RRR_0_OPCODE_X1) && (get_Opcode_X1(bundle) == RRR_0_OPCODE_X1)) || ((get_UnaryOpcodeExtension_X1(bundle) == FNOP_UNARY_OPCODE_X1) && (get_RRROpcodeExtension_X1(bundle) == UNARY_RRR_0_OPCODE_X1) && (get_Opcode_X1(bundle) == RRR_0_OPCODE_X1))); } /* * Check if nop or fnop at bundle's Y0 pipeline. */ static bool is_bundle_y0_nop(tilegx_bundle_bits bundle) { return (((get_UnaryOpcodeExtension_Y0(bundle) == NOP_UNARY_OPCODE_Y0) && (get_RRROpcodeExtension_Y0(bundle) == UNARY_RRR_1_OPCODE_Y0) && (get_Opcode_Y0(bundle) == RRR_1_OPCODE_Y0)) || ((get_UnaryOpcodeExtension_Y0(bundle) == FNOP_UNARY_OPCODE_Y0) && (get_RRROpcodeExtension_Y0(bundle) == UNARY_RRR_1_OPCODE_Y0) && (get_Opcode_Y0(bundle) == RRR_1_OPCODE_Y0))); } /* * Check if nop or fnop at bundle's pipeline Y1. */ static bool is_bundle_y1_nop(tilegx_bundle_bits bundle) { return (((get_UnaryOpcodeExtension_Y1(bundle) == NOP_UNARY_OPCODE_Y1) && (get_RRROpcodeExtension_Y1(bundle) == UNARY_RRR_1_OPCODE_Y1) && (get_Opcode_Y1(bundle) == RRR_1_OPCODE_Y1)) || ((get_UnaryOpcodeExtension_Y1(bundle) == FNOP_UNARY_OPCODE_Y1) && (get_RRROpcodeExtension_Y1(bundle) == UNARY_RRR_1_OPCODE_Y1) && (get_Opcode_Y1(bundle) == RRR_1_OPCODE_Y1))); } /* * Test if a bundle's y0 and y1 pipelines are both nop or fnop. */ static bool is_y0_y1_nop(tilegx_bundle_bits bundle) { return is_bundle_y0_nop(bundle) && is_bundle_y1_nop(bundle); } /* * Test if a bundle's x0 and x1 pipelines are both nop or fnop. */ static bool is_x0_x1_nop(tilegx_bundle_bits bundle) { return is_bundle_x0_nop(bundle) && is_bundle_x1_nop(bundle); } /* * Find the destination, source registers of fault unalign access instruction * at X1 or Y2. Also, allocate up to 3 scratch registers clob1, clob2 and * clob3, which are guaranteed different from any register used in the fault * bundle. r_alias is used to return if the other instructions other than the * unalign load/store shares same register with ra, rb and rd. */ static void find_regs(tilegx_bundle_bits bundle, uint64_t *rd, uint64_t *ra, uint64_t *rb, uint64_t *clob1, uint64_t *clob2, uint64_t *clob3, bool *r_alias) { int i; uint64_t reg; uint64_t reg_map = 0, alias_reg_map = 0, map; bool alias = false; /* * Parse fault bundle, find potential used registers and mark * corresponding bits in reg_map and alias_map. These 2 bit maps * are used to find the scratch registers and determine if there * is register alais. */ if (bundle & TILEGX_BUNDLE_MODE_MASK) { /* Y Mode Bundle. */ reg = get_SrcA_Y2(bundle); reg_map |= 1ULL << reg; *ra = reg; reg = get_SrcBDest_Y2(bundle); reg_map |= 1ULL << reg; if (rd) { /* Load. */ *rd = reg; alias_reg_map = (1ULL << *rd) | (1ULL << *ra); } else { /* Store. */ *rb = reg; alias_reg_map = (1ULL << *ra) | (1ULL << *rb); } if (!is_bundle_y1_nop(bundle)) { reg = get_SrcA_Y1(bundle); reg_map |= (1ULL << reg); map = (1ULL << reg); reg = get_SrcB_Y1(bundle); reg_map |= (1ULL << reg); map |= (1ULL << reg); reg = get_Dest_Y1(bundle); reg_map |= (1ULL << reg); map |= (1ULL << reg); if (map & alias_reg_map) alias = true; } if (!is_bundle_y0_nop(bundle)) { reg = get_SrcA_Y0(bundle); reg_map |= (1ULL << reg); map = (1ULL << reg); reg = get_SrcB_Y0(bundle); reg_map |= (1ULL << reg); map |= (1ULL << reg); reg = get_Dest_Y0(bundle); reg_map |= (1ULL << reg); map |= (1ULL << reg); if (map & alias_reg_map) alias = true; } } else { /* X Mode Bundle. */ reg = get_SrcA_X1(bundle); reg_map |= (1ULL << reg); *ra = reg; if (rd) { /* Load. */ reg = get_Dest_X1(bundle); reg_map |= (1ULL << reg); *rd = reg; alias_reg_map = (1ULL << *rd) | (1ULL << *ra); } else { /* Store. */ reg = get_SrcB_X1(bundle); reg_map |= (1ULL << reg); *rb = reg; alias_reg_map = (1ULL << *ra) | (1ULL << *rb); } if (!is_bundle_x0_nop(bundle)) { reg = get_SrcA_X0(bundle); reg_map |= (1ULL << reg); map = (1ULL << reg); reg = get_SrcB_X0(bundle); reg_map |= (1ULL << reg); map |= (1ULL << reg); reg = get_Dest_X0(bundle); reg_map |= (1ULL << reg); map |= (1ULL << reg); if (map & alias_reg_map) alias = true; } } /* * "alias" indicates if the unalign access registers have collision * with others in the same bundle. We jsut simply test all register * operands case (RRR), ignored the case with immidate. If a bundle * has no register alias, we may do fixup in a simple or fast manner. * So if an immidata field happens to hit with a register, we may end * up fall back to the generic handling. */ *r_alias = alias; /* Flip bits on reg_map. */ reg_map ^= -1ULL; /* Scan reg_map lower 54(TREG_SP) bits to find 3 set bits. */ for (i = 0; i < TREG_SP; i++) { if (reg_map & (0x1ULL << i)) { if (*clob1 == -1) { *clob1 = i; } else if (*clob2 == -1) { *clob2 = i; } else if (*clob3 == -1) { *clob3 = i; return; } } } } /* * Sanity check for register ra, rb, rd, clob1/2/3. Return true if any of them * is unexpected. */ static bool check_regs(uint64_t rd, uint64_t ra, uint64_t rb, uint64_t clob1, uint64_t clob2, uint64_t clob3) { bool unexpected = false; if ((ra >= 56) && (ra != TREG_ZERO)) unexpected = true; if ((clob1 >= 56) || (clob2 >= 56) || (clob3 >= 56)) unexpected = true; if (rd != -1) { if ((rd >= 56) && (rd != TREG_ZERO)) unexpected = true; } else { if ((rb >= 56) && (rb != TREG_ZERO)) unexpected = true; } return unexpected; } #define GX_INSN_X0_MASK ((1ULL << 31) - 1) #define GX_INSN_X1_MASK (((1ULL << 31) - 1) << 31) #define GX_INSN_Y0_MASK ((0xFULL << 27) | (0xFFFFFULL)) #define GX_INSN_Y1_MASK (GX_INSN_Y0_MASK << 31) #define GX_INSN_Y2_MASK ((0x7FULL << 51) | (0x7FULL << 20)) #ifdef __LITTLE_ENDIAN #define GX_INSN_BSWAP(_bundle_) (_bundle_) #else #define GX_INSN_BSWAP(_bundle_) swab64(_bundle_) #endif /* __LITTLE_ENDIAN */ /* * __JIT_CODE(.) creates template bundles in .rodata.unalign_data section. * The corresponding static function jix_x#_###(.) generates partial or * whole bundle based on the template and given arguments. */ #define __JIT_CODE(_X_) \ asm (".pushsection .rodata.unalign_data, \"a\"\n" \ _X_"\n" \ ".popsection\n") __JIT_CODE("__unalign_jit_x1_mtspr: {mtspr 0, r0}"); static tilegx_bundle_bits jit_x1_mtspr(int spr, int reg) { extern tilegx_bundle_bits __unalign_jit_x1_mtspr; return (GX_INSN_BSWAP(__unalign_jit_x1_mtspr) & GX_INSN_X1_MASK) | create_MT_Imm14_X1(spr) | create_SrcA_X1(reg); } __JIT_CODE("__unalign_jit_x1_mfspr: {mfspr r0, 0}"); static tilegx_bundle_bits jit_x1_mfspr(int reg, int spr) { extern tilegx_bundle_bits __unalign_jit_x1_mfspr; return (GX_INSN_BSWAP(__unalign_jit_x1_mfspr) & GX_INSN_X1_MASK) | create_MF_Imm14_X1(spr) | create_Dest_X1(reg); } __JIT_CODE("__unalign_jit_x0_addi: {addi r0, r0, 0; iret}"); static tilegx_bundle_bits jit_x0_addi(int rd, int ra, int imm8) { extern tilegx_bundle_bits __unalign_jit_x0_addi; return (GX_INSN_BSWAP(__unalign_jit_x0_addi) & GX_INSN_X0_MASK) | create_Dest_X0(rd) | create_SrcA_X0(ra) | create_Imm8_X0(imm8); } __JIT_CODE("__unalign_jit_x1_ldna: {ldna r0, r0}"); static tilegx_bundle_bits jit_x1_ldna(int rd, int ra) { extern tilegx_bundle_bits __unalign_jit_x1_ldna; return (GX_INSN_BSWAP(__unalign_jit_x1_ldna) & GX_INSN_X1_MASK) | create_Dest_X1(rd) | create_SrcA_X1(ra); } __JIT_CODE("__unalign_jit_x0_dblalign: {dblalign r0, r0 ,r0}"); static tilegx_bundle_bits jit_x0_dblalign(int rd, int ra, int rb) { extern tilegx_bundle_bits __unalign_jit_x0_dblalign; return (GX_INSN_BSWAP(__unalign_jit_x0_dblalign) & GX_INSN_X0_MASK) | create_Dest_X0(rd) | create_SrcA_X0(ra) | create_SrcB_X0(rb); } __JIT_CODE("__unalign_jit_x1_iret: {iret}"); static tilegx_bundle_bits jit_x1_iret(void) { extern tilegx_bundle_bits __unalign_jit_x1_iret; return GX_INSN_BSWAP(__unalign_jit_x1_iret) & GX_INSN_X1_MASK; } __JIT_CODE("__unalign_jit_x01_fnop: {fnop;fnop}"); static tilegx_bundle_bits jit_x0_fnop(void) { extern tilegx_bundle_bits __unalign_jit_x01_fnop; return GX_INSN_BSWAP(__unalign_jit_x01_fnop) & GX_INSN_X0_MASK; } static tilegx_bundle_bits jit_x1_fnop(void) { extern tilegx_bundle_bits __unalign_jit_x01_fnop; return GX_INSN_BSWAP(__unalign_jit_x01_fnop) & GX_INSN_X1_MASK; } __JIT_CODE("__unalign_jit_y2_dummy: {fnop; fnop; ld zero, sp}"); static tilegx_bundle_bits jit_y2_dummy(void) { extern tilegx_bundle_bits __unalign_jit_y2_dummy; return GX_INSN_BSWAP(__unalign_jit_y2_dummy) & GX_INSN_Y2_MASK; } static tilegx_bundle_bits jit_y1_fnop(void) { extern tilegx_bundle_bits __unalign_jit_y2_dummy; return GX_INSN_BSWAP(__unalign_jit_y2_dummy) & GX_INSN_Y1_MASK; } __JIT_CODE("__unalign_jit_x1_st1_add: {st1_add r1, r0, 0}"); static tilegx_bundle_bits jit_x1_st1_add(int ra, int rb, int imm8) { extern tilegx_bundle_bits __unalign_jit_x1_st1_add; return (GX_INSN_BSWAP(__unalign_jit_x1_st1_add) & (~create_SrcA_X1(-1)) & GX_INSN_X1_MASK) | create_SrcA_X1(ra) | create_SrcB_X1(rb) | create_Dest_Imm8_X1(imm8); } __JIT_CODE("__unalign_jit_x1_st: {crc32_8 r1, r0, r0; st r0, r0}"); static tilegx_bundle_bits jit_x1_st(int ra, int rb) { extern tilegx_bundle_bits __unalign_jit_x1_st; return (GX_INSN_BSWAP(__unalign_jit_x1_st) & GX_INSN_X1_MASK) | create_SrcA_X1(ra) | create_SrcB_X1(rb); } __JIT_CODE("__unalign_jit_x1_st_add: {st_add r1, r0, 0}"); static tilegx_bundle_bits jit_x1_st_add(int ra, int rb, int imm8) { extern tilegx_bundle_bits __unalign_jit_x1_st_add; return (GX_INSN_BSWAP(__unalign_jit_x1_st_add) & (~create_SrcA_X1(-1)) & GX_INSN_X1_MASK) | create_SrcA_X1(ra) | create_SrcB_X1(rb) | create_Dest_Imm8_X1(imm8); } __JIT_CODE("__unalign_jit_x1_ld: {crc32_8 r1, r0, r0; ld r0, r0}"); static tilegx_bundle_bits jit_x1_ld(int rd, int ra) { extern tilegx_bundle_bits __unalign_jit_x1_ld; return (GX_INSN_BSWAP(__unalign_jit_x1_ld) & GX_INSN_X1_MASK) | create_Dest_X1(rd) | create_SrcA_X1(ra); } __JIT_CODE("__unalign_jit_x1_ld_add: {ld_add r1, r0, 0}"); static tilegx_bundle_bits jit_x1_ld_add(int rd, int ra, int imm8) { extern tilegx_bundle_bits __unalign_jit_x1_ld_add; return (GX_INSN_BSWAP(__unalign_jit_x1_ld_add) & (~create_Dest_X1(-1)) & GX_INSN_X1_MASK) | create_Dest_X1(rd) | create_SrcA_X1(ra) | create_Imm8_X1(imm8); } __JIT_CODE("__unalign_jit_x0_bfexts: {bfexts r0, r0, 0, 0}"); static tilegx_bundle_bits jit_x0_bfexts(int rd, int ra, int bfs, int bfe) { extern tilegx_bundle_bits __unalign_jit_x0_bfexts; return (GX_INSN_BSWAP(__unalign_jit_x0_bfexts) & GX_INSN_X0_MASK) | create_Dest_X0(rd) | create_SrcA_X0(ra) | create_BFStart_X0(bfs) | create_BFEnd_X0(bfe); } __JIT_CODE("__unalign_jit_x0_bfextu: {bfextu r0, r0, 0, 0}"); static tilegx_bundle_bits jit_x0_bfextu(int rd, int ra, int bfs, int bfe) { extern tilegx_bundle_bits __unalign_jit_x0_bfextu; return (GX_INSN_BSWAP(__unalign_jit_x0_bfextu) & GX_INSN_X0_MASK) | create_Dest_X0(rd) | create_SrcA_X0(ra) | create_BFStart_X0(bfs) | create_BFEnd_X0(bfe); } __JIT_CODE("__unalign_jit_x1_addi: {bfextu r1, r1, 0, 0; addi r0, r0, 0}"); static tilegx_bundle_bits jit_x1_addi(int rd, int ra, int imm8) { extern tilegx_bundle_bits __unalign_jit_x1_addi; return (GX_INSN_BSWAP(__unalign_jit_x1_addi) & GX_INSN_X1_MASK) | create_Dest_X1(rd) | create_SrcA_X1(ra) | create_Imm8_X1(imm8); } __JIT_CODE("__unalign_jit_x0_shrui: {shrui r0, r0, 0; iret}"); static tilegx_bundle_bits jit_x0_shrui(int rd, int ra, int imm6) { extern tilegx_bundle_bits __unalign_jit_x0_shrui; return (GX_INSN_BSWAP(__unalign_jit_x0_shrui) & GX_INSN_X0_MASK) | create_Dest_X0(rd) | create_SrcA_X0(ra) | create_ShAmt_X0(imm6); } __JIT_CODE("__unalign_jit_x0_rotli: {rotli r0, r0, 0; iret}"); static tilegx_bundle_bits jit_x0_rotli(int rd, int ra, int imm6) { extern tilegx_bundle_bits __unalign_jit_x0_rotli; return (GX_INSN_BSWAP(__unalign_jit_x0_rotli) & GX_INSN_X0_MASK) | create_Dest_X0(rd) | create_SrcA_X0(ra) | create_ShAmt_X0(imm6); } __JIT_CODE("__unalign_jit_x1_bnezt: {bnezt r0, __unalign_jit_x1_bnezt}"); static tilegx_bundle_bits jit_x1_bnezt(int ra, int broff) { extern tilegx_bundle_bits __unalign_jit_x1_bnezt; return (GX_INSN_BSWAP(__unalign_jit_x1_bnezt) & GX_INSN_X1_MASK) | create_SrcA_X1(ra) | create_BrOff_X1(broff); } #undef __JIT_CODE /* * This function generates unalign fixup JIT. * * We first find unalign load/store instruction's destination, source * registers: ra, rb and rd. and 3 scratch registers by calling * find_regs(...). 3 scratch clobbers should not alias with any register * used in the fault bundle. Then analyze the fault bundle to determine * if it's a load or store, operand width, branch or address increment etc. * At last generated JIT is copied into JIT code area in user space. */ static void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle, int align_ctl) { struct thread_info *info = current_thread_info(); struct unaligned_jit_fragment frag; struct unaligned_jit_fragment *jit_code_area; tilegx_bundle_bits bundle_2 = 0; /* If bundle_2_enable = false, bundle_2 is fnop/nop operation. */ bool bundle_2_enable = true; uint64_t ra = -1, rb = -1, rd = -1, clob1 = -1, clob2 = -1, clob3 = -1; /* * Indicate if the unalign access * instruction's registers hit with * others in the same bundle. */ bool alias = false; bool load_n_store = true; bool load_store_signed = false; unsigned int load_store_size = 8; bool y1_br = false; /* True, for a branch in same bundle at Y1.*/ int y1_br_reg = 0; /* True for link operation. i.e. jalr or lnk at Y1 */ bool y1_lr = false; int y1_lr_reg = 0; bool x1_add = false;/* True, for load/store ADD instruction at X1*/ int x1_add_imm8 = 0; bool unexpected = false; int n = 0, k; jit_code_area = (struct unaligned_jit_fragment *)(info->unalign_jit_base); memset((void *)&frag, 0, sizeof(frag)); /* 0: X mode, Otherwise: Y mode. */ if (bundle & TILEGX_BUNDLE_MODE_MASK) { unsigned int mod, opcode; if (get_Opcode_Y1(bundle) == RRR_1_OPCODE_Y1 && get_RRROpcodeExtension_Y1(bundle) == UNARY_RRR_1_OPCODE_Y1) { opcode = get_UnaryOpcodeExtension_Y1(bundle); /* * Test "jalr", "jalrp", "jr", "jrp" instruction at Y1 * pipeline. */ switch (opcode) { case JALR_UNARY_OPCODE_Y1: case JALRP_UNARY_OPCODE_Y1: y1_lr = true; y1_lr_reg = 55; /* Link register. */ /* FALLTHROUGH */ case JR_UNARY_OPCODE_Y1: case JRP_UNARY_OPCODE_Y1: y1_br = true; y1_br_reg = get_SrcA_Y1(bundle); break; case LNK_UNARY_OPCODE_Y1: /* "lnk" at Y1 pipeline. */ y1_lr = true; y1_lr_reg = get_Dest_Y1(bundle); break; } } opcode = get_Opcode_Y2(bundle); mod = get_Mode(bundle); /* * bundle_2 is bundle after making Y2 as a dummy operation * - ld zero, sp */ bundle_2 = (bundle & (~GX_INSN_Y2_MASK)) | jit_y2_dummy(); /* Make Y1 as fnop if Y1 is a branch or lnk operation. */ if (y1_br || y1_lr) { bundle_2 &= ~(GX_INSN_Y1_MASK); bundle_2 |= jit_y1_fnop(); } if (is_y0_y1_nop(bundle_2)) bundle_2_enable = false; if (mod == MODE_OPCODE_YC2) { /* Store. */ load_n_store = false; load_store_size = 1 << opcode; load_store_signed = false; find_regs(bundle, 0, &ra, &rb, &clob1, &clob2, &clob3, &alias); if (load_store_size > 8) unexpected = true; } else { /* Load. */ load_n_store = true; if (mod == MODE_OPCODE_YB2) { switch (opcode) { case LD_OPCODE_Y2: load_store_signed = false; load_store_size = 8; break; case LD4S_OPCODE_Y2: load_store_signed = true; load_store_size = 4; break; case LD4U_OPCODE_Y2: load_store_signed = false; load_store_size = 4; break; default: unexpected = true; } } else if (mod == MODE_OPCODE_YA2) { if (opcode == LD2S_OPCODE_Y2) { load_store_signed = true; load_store_size = 2; } else if (opcode == LD2U_OPCODE_Y2) { load_store_signed = false; load_store_size = 2; } else unexpected = true; } else unexpected = true; find_regs(bundle, &rd, &ra, &rb, &clob1, &clob2, &clob3, &alias); } } else { unsigned int opcode; /* bundle_2 is bundle after making X1 as "fnop". */ bundle_2 = (bundle & (~GX_INSN_X1_MASK)) | jit_x1_fnop(); if (is_x0_x1_nop(bundle_2)) bundle_2_enable = false; if (get_Opcode_X1(bundle) == RRR_0_OPCODE_X1) { opcode = get_UnaryOpcodeExtension_X1(bundle); if (get_RRROpcodeExtension_X1(bundle) == UNARY_RRR_0_OPCODE_X1) { load_n_store = true; find_regs(bundle, &rd, &ra, &rb, &clob1, &clob2, &clob3, &alias); switch (opcode) { case LD_UNARY_OPCODE_X1: load_store_signed = false; load_store_size = 8; break; case LD4S_UNARY_OPCODE_X1: load_store_signed = true; /* FALLTHROUGH */ case LD4U_UNARY_OPCODE_X1: load_store_size = 4; break; case LD2S_UNARY_OPCODE_X1: load_store_signed = true; /* FALLTHROUGH */ case LD2U_UNARY_OPCODE_X1: load_store_size = 2; break; default: unexpected = true; } } else { load_n_store = false; load_store_signed = false; find_regs(bundle, 0, &ra, &rb, &clob1, &clob2, &clob3, &alias); opcode = get_RRROpcodeExtension_X1(bundle); switch (opcode) { case ST_RRR_0_OPCODE_X1: load_store_size = 8; break; case ST4_RRR_0_OPCODE_X1: load_store_size = 4; break; case ST2_RRR_0_OPCODE_X1: load_store_size = 2; break; default: unexpected = true; } } } else if (get_Opcode_X1(bundle) == IMM8_OPCODE_X1) { load_n_store = true; opcode = get_Imm8OpcodeExtension_X1(bundle); switch (opcode) { case LD_ADD_IMM8_OPCODE_X1: load_store_size = 8; break; case LD4S_ADD_IMM8_OPCODE_X1: load_store_signed = true; /* FALLTHROUGH */ case LD4U_ADD_IMM8_OPCODE_X1: load_store_size = 4; break; case LD2S_ADD_IMM8_OPCODE_X1: load_store_signed = true; /* FALLTHROUGH */ case LD2U_ADD_IMM8_OPCODE_X1: load_store_size = 2; break; case ST_ADD_IMM8_OPCODE_X1: load_n_store = false; load_store_size = 8; break; case ST4_ADD_IMM8_OPCODE_X1: load_n_store = false; load_store_size = 4; break; case ST2_ADD_IMM8_OPCODE_X1: load_n_store = false; load_store_size = 2; break; default: unexpected = true; } if (!unexpected) { x1_add = true; if (load_n_store) x1_add_imm8 = get_Imm8_X1(bundle); else x1_add_imm8 = get_Dest_Imm8_X1(bundle); } find_regs(bundle, load_n_store ? (&rd) : NULL, &ra, &rb, &clob1, &clob2, &clob3, &alias); } else unexpected = true; } /* * Some sanity check for register numbers extracted from fault bundle. */ if (check_regs(rd, ra, rb, clob1, clob2, clob3) == true) unexpected = true; /* Give warning if register ra has an aligned address. */ if (!unexpected) WARN_ON(!((load_store_size - 1) & (regs->regs[ra]))); /* * Fault came from kernel space, here we only need take care of * unaligned "get_user/put_user" macros defined in "uaccess.h". * Basically, we will handle bundle like this: * {ld/2u/4s rd, ra; movei rx, 0} or {st/2/4 ra, rb; movei rx, 0} * (Refer to file "arch/tile/include/asm/uaccess.h" for details). * For either load or store, byte-wise operation is performed by calling * get_user() or put_user(). If the macro returns non-zero value, * set the value to rx, otherwise set zero to rx. Finally make pc point * to next bundle and return. */ if (EX1_PL(regs->ex1) != USER_PL) { unsigned long rx = 0; unsigned long x = 0, ret = 0; if (y1_br || y1_lr || x1_add || (load_store_signed != (load_n_store && load_store_size == 4))) { /* No branch, link, wrong sign-ext or load/store add. */ unexpected = true; } else if (!unexpected) { if (bundle & TILEGX_BUNDLE_MODE_MASK) { /* * Fault bundle is Y mode. * Check if the Y1 and Y0 is the form of * { movei rx, 0; nop/fnop }, if yes, * find the rx. */ if ((get_Opcode_Y1(bundle) == ADDI_OPCODE_Y1) && (get_SrcA_Y1(bundle) == TREG_ZERO) && (get_Imm8_Y1(bundle) == 0) && is_bundle_y0_nop(bundle)) { rx = get_Dest_Y1(bundle); } else if ((get_Opcode_Y0(bundle) == ADDI_OPCODE_Y0) && (get_SrcA_Y0(bundle) == TREG_ZERO) && (get_Imm8_Y0(bundle) == 0) && is_bundle_y1_nop(bundle)) { rx = get_Dest_Y0(bundle); } else { unexpected = true; } } else { /* * Fault bundle is X mode. * Check if the X0 is 'movei rx, 0', * if yes, find the rx. */ if ((get_Opcode_X0(bundle) == IMM8_OPCODE_X0) && (get_Imm8OpcodeExtension_X0(bundle) == ADDI_IMM8_OPCODE_X0) && (get_SrcA_X0(bundle) == TREG_ZERO) && (get_Imm8_X0(bundle) == 0)) { rx = get_Dest_X0(bundle); } else { unexpected = true; } } /* rx should be less than 56. */ if (!unexpected && (rx >= 56)) unexpected = true; } if (!search_exception_tables(regs->pc)) { /* No fixup in the exception tables for the pc. */ unexpected = true; } if (unexpected) { /* Unexpected unalign kernel fault. */ struct task_struct *tsk = validate_current(); bust_spinlocks(1); show_regs(regs); if (unlikely(tsk->pid < 2)) { panic("Kernel unalign fault running %s!", tsk->pid ? "init" : "the idle task"); } #ifdef SUPPORT_DIE die("Oops", regs); #endif bust_spinlocks(1); do_group_exit(SIGKILL); } else { unsigned long i, b = 0; unsigned char *ptr = (unsigned char *)regs->regs[ra]; if (load_n_store) { /* handle get_user(x, ptr) */ for (i = 0; i < load_store_size; i++) { ret = get_user(b, ptr++); if (!ret) { /* Success! update x. */ #ifdef __LITTLE_ENDIAN x |= (b << (8 * i)); #else x <<= 8; x |= b; #endif /* __LITTLE_ENDIAN */ } else { x = 0; break; } } /* Sign-extend 4-byte loads. */ if (load_store_size == 4) x = (long)(int)x; /* Set register rd. */ regs->regs[rd] = x; /* Set register rx. */ regs->regs[rx] = ret; /* Bump pc. */ regs->pc += 8; } else { /* Handle put_user(x, ptr) */ x = regs->regs[rb]; #ifdef __LITTLE_ENDIAN b = x; #else /* * Swap x in order to store x from low * to high memory same as the * little-endian case. */ switch (load_store_size) { case 8: b = swab64(x); break; case 4: b = swab32(x); break; case 2: b = swab16(x); break; } #endif /* __LITTLE_ENDIAN */ for (i = 0; i < load_store_size; i++) { ret = put_user(b, ptr++); if (ret) break; /* Success! shift 1 byte. */ b >>= 8; } /* Set register rx. */ regs->regs[rx] = ret; /* Bump pc. */ regs->pc += 8; } } unaligned_fixup_count++; if (unaligned_printk) { pr_info("%s/%d - Unalign fixup for kernel access to userspace %lx\n", current->comm, current->pid, regs->regs[ra]); } /* Done! Return to the exception handler. */ return; } if ((align_ctl == 0) || unexpected) { siginfo_t info = { .si_signo = SIGBUS, .si_code = BUS_ADRALN, .si_addr = (unsigned char __user *)0 }; if (unaligned_printk) pr_info("Unalign bundle: unexp @%llx, %llx\n", (unsigned long long)regs->pc, (unsigned long long)bundle); if (ra < 56) { unsigned long uaa = (unsigned long)regs->regs[ra]; /* Set bus Address. */ info.si_addr = (unsigned char __user *)uaa; } unaligned_fixup_count++; trace_unhandled_signal("unaligned fixup trap", regs, (unsigned long)info.si_addr, SIGBUS); force_sig_info(info.si_signo, &info, current); return; } #ifdef __LITTLE_ENDIAN #define UA_FIXUP_ADDR_DELTA 1 #define UA_FIXUP_BFEXT_START(_B_) 0 #define UA_FIXUP_BFEXT_END(_B_) (8 * (_B_) - 1) #else /* __BIG_ENDIAN */ #define UA_FIXUP_ADDR_DELTA -1 #define UA_FIXUP_BFEXT_START(_B_) (64 - 8 * (_B_)) #define UA_FIXUP_BFEXT_END(_B_) 63 #endif /* __LITTLE_ENDIAN */ if ((ra != rb) && (rd != TREG_SP) && !alias && !y1_br && !y1_lr && !x1_add) { /* * Simple case: ra != rb and no register alias found, * and no branch or link. This will be the majority. * We can do a little better for simplae case than the * generic scheme below. */ if (!load_n_store) { /* * Simple store: ra != rb, no need for scratch register. * Just store and rotate to right bytewise. */ #ifdef __BIG_ENDIAN frag.insn[n++] = jit_x0_addi(ra, ra, load_store_size - 1) | jit_x1_fnop(); #endif /* __BIG_ENDIAN */ for (k = 0; k < load_store_size; k++) { /* Store a byte. */ frag.insn[n++] = jit_x0_rotli(rb, rb, 56) | jit_x1_st1_add(ra, rb, UA_FIXUP_ADDR_DELTA); } #ifdef __BIG_ENDIAN frag.insn[n] = jit_x1_addi(ra, ra, 1); #else frag.insn[n] = jit_x1_addi(ra, ra, -1 * load_store_size); #endif /* __LITTLE_ENDIAN */ if (load_store_size == 8) { frag.insn[n] |= jit_x0_fnop(); } else if (load_store_size == 4) { frag.insn[n] |= jit_x0_rotli(rb, rb, 32); } else { /* = 2 */ frag.insn[n] |= jit_x0_rotli(rb, rb, 16); } n++; if (bundle_2_enable) frag.insn[n++] = bundle_2; frag.insn[n++] = jit_x0_fnop() | jit_x1_iret(); } else { if (rd == ra) { /* Use two clobber registers: clob1/2. */ frag.insn[n++] = jit_x0_addi(TREG_SP, TREG_SP, -16) | jit_x1_fnop(); frag.insn[n++] = jit_x0_addi(clob1, ra, 7) | jit_x1_st_add(TREG_SP, clob1, -8); frag.insn[n++] = jit_x0_addi(clob2, ra, 0) | jit_x1_st(TREG_SP, clob2); frag.insn[n++] = jit_x0_fnop() | jit_x1_ldna(rd, ra); frag.insn[n++] = jit_x0_fnop() | jit_x1_ldna(clob1, clob1); /* * Note: we must make sure that rd must not * be sp. Recover clob1/2 from stack. */ frag.insn[n++] = jit_x0_dblalign(rd, clob1, clob2) | jit_x1_ld_add(clob2, TREG_SP, 8); frag.insn[n++] = jit_x0_fnop() | jit_x1_ld_add(clob1, TREG_SP, 16); } else { /* Use one clobber register: clob1 only. */ frag.insn[n++] = jit_x0_addi(TREG_SP, TREG_SP, -16) | jit_x1_fnop(); frag.insn[n++] = jit_x0_addi(clob1, ra, 7) | jit_x1_st(TREG_SP, clob1); frag.insn[n++] = jit_x0_fnop() | jit_x1_ldna(rd, ra); frag.insn[n++] = jit_x0_fnop() | jit_x1_ldna(clob1, clob1); /* * Note: we must make sure that rd must not * be sp. Recover clob1 from stack. */ frag.insn[n++] = jit_x0_dblalign(rd, clob1, ra) | jit_x1_ld_add(clob1, TREG_SP, 16); } if (bundle_2_enable) frag.insn[n++] = bundle_2; /* * For non 8-byte load, extract corresponding bytes and * signed extension. */ if (load_store_size == 4) { if (load_store_signed) frag.insn[n++] = jit_x0_bfexts( rd, rd, UA_FIXUP_BFEXT_START(4), UA_FIXUP_BFEXT_END(4)) | jit_x1_fnop(); else frag.insn[n++] = jit_x0_bfextu( rd, rd, UA_FIXUP_BFEXT_START(4), UA_FIXUP_BFEXT_END(4)) | jit_x1_fnop(); } else if (load_store_size == 2) { if (load_store_signed) frag.insn[n++] = jit_x0_bfexts( rd, rd, UA_FIXUP_BFEXT_START(2), UA_FIXUP_BFEXT_END(2)) | jit_x1_fnop(); else frag.insn[n++] = jit_x0_bfextu( rd, rd, UA_FIXUP_BFEXT_START(2), UA_FIXUP_BFEXT_END(2)) | jit_x1_fnop(); } frag.insn[n++] = jit_x0_fnop() | jit_x1_iret(); } } else if (!load_n_store) { /* * Generic memory store cases: use 3 clobber registers. * * Alloc space for saveing clob2,1,3 on user's stack. * register clob3 points to where clob2 saved, followed by * clob1 and 3 from high to low memory. */ frag.insn[n++] = jit_x0_addi(TREG_SP, TREG_SP, -32) | jit_x1_fnop(); frag.insn[n++] = jit_x0_addi(clob3, TREG_SP, 16) | jit_x1_st_add(TREG_SP, clob3, 8); #ifdef __LITTLE_ENDIAN frag.insn[n++] = jit_x0_addi(clob1, ra, 0) | jit_x1_st_add(TREG_SP, clob1, 8); #else frag.insn[n++] = jit_x0_addi(clob1, ra, load_store_size - 1) | jit_x1_st_add(TREG_SP, clob1, 8); #endif if (load_store_size == 8) { /* * We save one byte a time, not for fast, but compact * code. After each store, data source register shift * right one byte. unchanged after 8 stores. */ frag.insn[n++] = jit_x0_addi(clob2, TREG_ZERO, 7) | jit_x1_st_add(TREG_SP, clob2, 16); frag.insn[n++] = jit_x0_rotli(rb, rb, 56) | jit_x1_st1_add(clob1, rb, UA_FIXUP_ADDR_DELTA); frag.insn[n++] = jit_x0_addi(clob2, clob2, -1) | jit_x1_bnezt(clob2, -1); frag.insn[n++] = jit_x0_fnop() | jit_x1_addi(clob2, y1_br_reg, 0); } else if (load_store_size == 4) { frag.insn[n++] = jit_x0_addi(clob2, TREG_ZERO, 3) | jit_x1_st_add(TREG_SP, clob2, 16); frag.insn[n++] = jit_x0_rotli(rb, rb, 56) | jit_x1_st1_add(clob1, rb, UA_FIXUP_ADDR_DELTA); frag.insn[n++] = jit_x0_addi(clob2, clob2, -1) | jit_x1_bnezt(clob2, -1); /* * same as 8-byte case, but need shift another 4 * byte to recover rb for 4-byte store. */ frag.insn[n++] = jit_x0_rotli(rb, rb, 32) | jit_x1_addi(clob2, y1_br_reg, 0); } else { /* =2 */ frag.insn[n++] = jit_x0_addi(clob2, rb, 0) | jit_x1_st_add(TREG_SP, clob2, 16); for (k = 0; k < 2; k++) { frag.insn[n++] = jit_x0_shrui(rb, rb, 8) | jit_x1_st1_add(clob1, rb, UA_FIXUP_ADDR_DELTA); } frag.insn[n++] = jit_x0_addi(rb, clob2, 0) | jit_x1_addi(clob2, y1_br_reg, 0); } if (bundle_2_enable) frag.insn[n++] = bundle_2; if (y1_lr) { frag.insn[n++] = jit_x0_fnop() | jit_x1_mfspr(y1_lr_reg, SPR_EX_CONTEXT_0_0); } if (y1_br) { frag.insn[n++] = jit_x0_fnop() | jit_x1_mtspr(SPR_EX_CONTEXT_0_0, clob2); } if (x1_add) { frag.insn[n++] = jit_x0_addi(ra, ra, x1_add_imm8) | jit_x1_ld_add(clob2, clob3, -8); } else { frag.insn[n++] = jit_x0_fnop() | jit_x1_ld_add(clob2, clob3, -8); } frag.insn[n++] = jit_x0_fnop() | jit_x1_ld_add(clob1, clob3, -8); frag.insn[n++] = jit_x0_fnop() | jit_x1_ld(clob3, clob3); frag.insn[n++] = jit_x0_fnop() | jit_x1_iret(); } else { /* * Generic memory load cases. * * Alloc space for saveing clob1,2,3 on user's stack. * register clob3 points to where clob1 saved, followed * by clob2 and 3 from high to low memory. */ frag.insn[n++] = jit_x0_addi(TREG_SP, TREG_SP, -32) | jit_x1_fnop(); frag.insn[n++] = jit_x0_addi(clob3, TREG_SP, 16) | jit_x1_st_add(TREG_SP, clob3, 8); frag.insn[n++] = jit_x0_addi(clob2, ra, 0) | jit_x1_st_add(TREG_SP, clob2, 8); if (y1_br) { frag.insn[n++] = jit_x0_addi(clob1, y1_br_reg, 0) | jit_x1_st_add(TREG_SP, clob1, 16); } else { frag.insn[n++] = jit_x0_fnop() | jit_x1_st_add(TREG_SP, clob1, 16); } if (bundle_2_enable) frag.insn[n++] = bundle_2; if (y1_lr) { frag.insn[n++] = jit_x0_fnop() | jit_x1_mfspr(y1_lr_reg, SPR_EX_CONTEXT_0_0); } if (y1_br) { frag.insn[n++] = jit_x0_fnop() | jit_x1_mtspr(SPR_EX_CONTEXT_0_0, clob1); } frag.insn[n++] = jit_x0_addi(clob1, clob2, 7) | jit_x1_ldna(rd, clob2); frag.insn[n++] = jit_x0_fnop() | jit_x1_ldna(clob1, clob1); frag.insn[n++] = jit_x0_dblalign(rd, clob1, clob2) | jit_x1_ld_add(clob1, clob3, -8); if (x1_add) { frag.insn[n++] = jit_x0_addi(ra, ra, x1_add_imm8) | jit_x1_ld_add(clob2, clob3, -8); } else { frag.insn[n++] = jit_x0_fnop() | jit_x1_ld_add(clob2, clob3, -8); } frag.insn[n++] = jit_x0_fnop() | jit_x1_ld(clob3, clob3); if (load_store_size == 4) { if (load_store_signed) frag.insn[n++] = jit_x0_bfexts( rd, rd, UA_FIXUP_BFEXT_START(4), UA_FIXUP_BFEXT_END(4)) | jit_x1_fnop(); else frag.insn[n++] = jit_x0_bfextu( rd, rd, UA_FIXUP_BFEXT_START(4), UA_FIXUP_BFEXT_END(4)) | jit_x1_fnop(); } else if (load_store_size == 2) { if (load_store_signed) frag.insn[n++] = jit_x0_bfexts( rd, rd, UA_FIXUP_BFEXT_START(2), UA_FIXUP_BFEXT_END(2)) | jit_x1_fnop(); else frag.insn[n++] = jit_x0_bfextu( rd, rd, UA_FIXUP_BFEXT_START(2), UA_FIXUP_BFEXT_END(2)) | jit_x1_fnop(); } frag.insn[n++] = jit_x0_fnop() | jit_x1_iret(); } /* Max JIT bundle count is 14. */ WARN_ON(n > 14); if (!unexpected) { int status = 0; int idx = (regs->pc >> 3) & ((1ULL << (PAGE_SHIFT - UNALIGN_JIT_SHIFT)) - 1); frag.pc = regs->pc; frag.bundle = bundle; if (unaligned_printk) { pr_info("%s/%d, Unalign fixup: pc=%lx bundle=%lx %d %d %d %d %d %d %d %d\n", current->comm, current->pid, (unsigned long)frag.pc, (unsigned long)frag.bundle, (int)alias, (int)rd, (int)ra, (int)rb, (int)bundle_2_enable, (int)y1_lr, (int)y1_br, (int)x1_add); for (k = 0; k < n; k += 2) pr_info("[%d] %016llx %016llx\n", k, (unsigned long long)frag.insn[k], (unsigned long long)frag.insn[k+1]); } /* Swap bundle byte order for big endian sys. */ #ifdef __BIG_ENDIAN frag.bundle = GX_INSN_BSWAP(frag.bundle); for (k = 0; k < n; k++) frag.insn[k] = GX_INSN_BSWAP(frag.insn[k]); #endif /* __BIG_ENDIAN */ status = copy_to_user((void __user *)&jit_code_area[idx], &frag, sizeof(frag)); if (status) { /* Fail to copy JIT into user land. send SIGSEGV. */ siginfo_t info = { .si_signo = SIGSEGV, .si_code = SEGV_MAPERR, .si_addr = (void __user *)&jit_code_area[idx] }; pr_warn("Unalign fixup: pid=%d %s jit_code_area=%llx\n", current->pid, current->comm, (unsigned long long)&jit_code_area[idx]); trace_unhandled_signal("segfault in unalign fixup", regs, (unsigned long)info.si_addr, SIGSEGV); force_sig_info(info.si_signo, &info, current); return; } /* Do a cheaper increment, not accurate. */ unaligned_fixup_count++; __flush_icache_range((unsigned long)&jit_code_area[idx], (unsigned long)&jit_code_area[idx] + sizeof(frag)); /* Setup SPR_EX_CONTEXT_0_0/1 for returning to user program.*/ __insn_mtspr(SPR_EX_CONTEXT_0_0, regs->pc + 8); __insn_mtspr(SPR_EX_CONTEXT_0_1, PL_ICS_EX1(USER_PL, 0)); /* Modify pc at the start of new JIT. */ regs->pc = (unsigned long)&jit_code_area[idx].insn[0]; /* Set ICS in SPR_EX_CONTEXT_K_1. */ regs->ex1 = PL_ICS_EX1(USER_PL, 1); } } /* * C function to generate unalign data JIT. Called from unalign data * interrupt handler. * * First check if unalign fix is disabled or exception did not not come from * user space or sp register points to unalign address, if true, generate a * SIGBUS. Then map a page into user space as JIT area if it is not mapped * yet. Genenerate JIT code by calling jit_bundle_gen(). After that return * back to exception handler. * * The exception handler will "iret" to new generated JIT code after * restoring caller saved registers. In theory, the JIT code will perform * another "iret" to resume user's program. */ void do_unaligned(struct pt_regs *regs, int vecnum) { tilegx_bundle_bits __user *pc; tilegx_bundle_bits bundle; struct thread_info *info = current_thread_info(); int align_ctl; /* Checks the per-process unaligned JIT flags */ align_ctl = unaligned_fixup; switch (task_thread_info(current)->align_ctl) { case PR_UNALIGN_NOPRINT: align_ctl = 1; break; case PR_UNALIGN_SIGBUS: align_ctl = 0; break; } /* Enable iterrupt in order to access user land. */ local_irq_enable(); /* * The fault came from kernel space. Two choices: * (a) unaligned_fixup < 1, we will first call get/put_user fixup * to return -EFAULT. If no fixup, simply panic the kernel. * (b) unaligned_fixup >=1, we will try to fix the unaligned access * if it was triggered by get_user/put_user() macros. Panic the * kernel if it is not fixable. */ if (EX1_PL(regs->ex1) != USER_PL) { if (align_ctl < 1) { unaligned_fixup_count++; /* If exception came from kernel, try fix it up. */ if (fixup_exception(regs)) { if (unaligned_printk) pr_info("Unalign fixup: %d %llx @%llx\n", (int)unaligned_fixup, (unsigned long long)regs->ex1, (unsigned long long)regs->pc); return; } /* Not fixable. Go panic. */ panic("Unalign exception in Kernel. pc=%lx", regs->pc); return; } else { /* * Try to fix the exception. If we can't, panic the * kernel. */ bundle = GX_INSN_BSWAP( *((tilegx_bundle_bits *)(regs->pc))); jit_bundle_gen(regs, bundle, align_ctl); return; } } /* * Fault came from user with ICS or stack is not aligned. * If so, we will trigger SIGBUS. */ if ((regs->sp & 0x7) || (regs->ex1) || (align_ctl < 0)) { siginfo_t info = { .si_signo = SIGBUS, .si_code = BUS_ADRALN, .si_addr = (unsigned char __user *)0 }; if (unaligned_printk) pr_info("Unalign fixup: %d %llx @%llx\n", (int)unaligned_fixup, (unsigned long long)regs->ex1, (unsigned long long)regs->pc); unaligned_fixup_count++; trace_unhandled_signal("unaligned fixup trap", regs, 0, SIGBUS); force_sig_info(info.si_signo, &info, current); return; } /* Read the bundle casued the exception! */ pc = (tilegx_bundle_bits __user *)(regs->pc); if (get_user(bundle, pc) != 0) { /* Probably never be here since pc is valid user address.*/ siginfo_t info = { .si_signo = SIGSEGV, .si_code = SEGV_MAPERR, .si_addr = (void __user *)pc }; pr_err("Couldn't read instruction at %p trying to step\n", pc); trace_unhandled_signal("segfault in unalign fixup", regs, (unsigned long)info.si_addr, SIGSEGV); force_sig_info(info.si_signo, &info, current); return; } if (!info->unalign_jit_base) { void __user *user_page; /* * Allocate a page in userland. * For 64-bit processes we try to place the mapping far * from anything else that might be going on (specifically * 64 GB below the top of the user address space). If it * happens not to be possible to put it there, it's OK; * the kernel will choose another location and we'll * remember it for later. */ if (is_compat_task()) user_page = NULL; else user_page = (void __user *)(TASK_SIZE - (1UL << 36)) + (current->pid << PAGE_SHIFT); user_page = (void __user *) vm_mmap(NULL, (unsigned long)user_page, PAGE_SIZE, PROT_EXEC | PROT_READ | PROT_WRITE, #ifdef CONFIG_HOMECACHE MAP_CACHE_HOME_TASK | #endif MAP_PRIVATE | MAP_ANONYMOUS, 0); if (IS_ERR((void __force *)user_page)) { pr_err("Out of kernel pages trying do_mmap\n"); return; } /* Save the address in the thread_info struct */ info->unalign_jit_base = user_page; if (unaligned_printk) pr_info("Unalign bundle: %d:%d, allocate page @%llx\n", raw_smp_processor_id(), current->pid, (unsigned long long)user_page); } /* Generate unalign JIT */ jit_bundle_gen(regs, GX_INSN_BSWAP(bundle), align_ctl); } #endif /* __tilegx__ */
gpl-2.0
MoKee/android_kernel_huawei_msm8928
arch/arm/mach-msm/clock-a7.c
723
9761
/* * Copyright (c) 2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/kernel.h> #include <linux/module.h> #include <linux/io.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/mutex.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> #include <linux/of.h> #include <mach/clock-generic.h> #include "clock-local2.h" #define UPDATE_CHECK_MAX_LOOPS 200 struct cortex_reg_data { u32 cmd_offset; u32 update_mask; u32 poll_mask; }; #define DIV_REG(x) ((x)->base + (x)->div_offset) #define SRC_REG(x) ((x)->base + (x)->src_offset) #define CMD_REG(x) ((x)->base + \ ((struct cortex_reg_data *)(x)->priv)->cmd_offset) static int update_config(struct mux_div_clk *md) { u32 regval, count; struct cortex_reg_data *r = md->priv; /* Update the configuration */ regval = readl_relaxed(CMD_REG(md)); regval |= r->update_mask; writel_relaxed(regval, CMD_REG(md)); /* Wait for update to take effect */ for (count = UPDATE_CHECK_MAX_LOOPS; count > 0; count--) { if (!(readl_relaxed(CMD_REG(md)) & r->poll_mask)) return 0; udelay(1); } CLK_WARN(&md->c, true, "didn't update its configuration."); return -EINVAL; } static void cortex_get_config(struct mux_div_clk *md, u32 *src_sel, u32 *div) { u32 regval; regval = readl_relaxed(DIV_REG(md)); regval &= (md->div_mask << md->div_shift); *div = regval >> md->div_shift; *div = max((u32)1, (*div + 1) / 2); regval = readl_relaxed(SRC_REG(md)); regval &= (md->src_mask << md->src_shift); *src_sel = regval >> md->src_shift; } static int cortex_set_config(struct mux_div_clk *md, u32 src_sel, u32 div) { u32 regval; div = div ? ((2 * div) - 1) : 0; regval = readl_relaxed(DIV_REG(md)); regval &= ~(md->div_mask << md->div_shift); regval |= div << md->div_shift; writel_relaxed(regval, DIV_REG(md)); regval = readl_relaxed(SRC_REG(md)); regval &= ~(md->src_mask << md->src_shift); regval |= src_sel << md->src_shift; writel_relaxed(regval, SRC_REG(md)); return update_config(md); } static int cortex_enable(struct mux_div_clk *md) { return cortex_set_config(md, md->src_sel, md->data.div); } static void cortex_disable(struct mux_div_clk *md) { u32 src_sel = parent_to_src_sel(md->parents, md->num_parents, md->safe_parent); cortex_set_config(md, src_sel, md->safe_div); } static bool cortex_is_enabled(struct mux_div_clk *md) { return true; } struct mux_div_ops cortex_mux_div_ops = { .set_src_div = cortex_set_config, .get_src_div = cortex_get_config, .is_enabled = cortex_is_enabled, .enable = cortex_enable, .disable = cortex_disable, }; static struct cortex_reg_data a7ssmux_priv = { .cmd_offset = 0x0, .update_mask = BIT(0), .poll_mask = BIT(0), }; DEFINE_VDD_REGS_INIT(vdd_cpu, 1); static struct mux_div_clk a7ssmux = { .ops = &cortex_mux_div_ops, .safe_freq = 300000000, .data = { .max_div = 8, .min_div = 1, }, .c = { .dbg_name = "a7ssmux", .ops = &clk_ops_mux_div_clk, .vdd_class = &vdd_cpu, CLK_INIT(a7ssmux.c), }, .parents = (struct clk_src[8]) {}, .priv = &a7ssmux_priv, .div_offset = 0x4, .div_mask = BM(4, 0), .div_shift = 0, .src_offset = 0x4, .src_mask = BM(10, 8) >> 8, .src_shift = 8, }; static struct clk_lookup clock_tbl_a7[] = { CLK_LOOKUP("cpu0_clk", a7ssmux.c, "0.qcom,msm-cpufreq"), CLK_LOOKUP("cpu0_clk", a7ssmux.c, "fe805664.qcom,pm-8x60"), }; static int of_get_fmax_vdd_class(struct platform_device *pdev, struct clk *c, char *prop_name) { struct device_node *of = pdev->dev.of_node; int prop_len, i; struct clk_vdd_class *vdd = c->vdd_class; u32 *array; if (!of_find_property(of, prop_name, &prop_len)) { dev_err(&pdev->dev, "missing %s\n", prop_name); return -EINVAL; } prop_len /= sizeof(u32); if (prop_len % 2) { dev_err(&pdev->dev, "bad length %d\n", prop_len); return -EINVAL; } prop_len /= 2; vdd->level_votes = devm_kzalloc(&pdev->dev, prop_len * sizeof(int), GFP_KERNEL); if (!vdd->level_votes) return -ENOMEM; vdd->vdd_uv = devm_kzalloc(&pdev->dev, prop_len * sizeof(int), GFP_KERNEL); if (!vdd->vdd_uv) return -ENOMEM; c->fmax = devm_kzalloc(&pdev->dev, prop_len * sizeof(unsigned long), GFP_KERNEL); if (!c->fmax) return -ENOMEM; array = devm_kzalloc(&pdev->dev, prop_len * sizeof(u32) * 2, GFP_KERNEL); if (!array) return -ENOMEM; of_property_read_u32_array(of, prop_name, array, prop_len * 2); for (i = 0; i < prop_len; i++) { c->fmax[i] = array[2 * i]; vdd->vdd_uv[i] = array[2 * i + 1]; } devm_kfree(&pdev->dev, array); vdd->num_levels = prop_len; vdd->cur_level = prop_len; c->num_fmax = prop_len; return 0; } static void get_speed_bin(struct platform_device *pdev, int *bin, int *version) { struct resource *res; void __iomem *base; u32 pte_efuse, redundant_sel, valid; *bin = 0; *version = 0; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "efuse"); if (!res) { dev_info(&pdev->dev, "No speed/PVS binning available. Defaulting to 0!\n"); return; } base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!base) { dev_warn(&pdev->dev, "Unable to read efuse data. Defaulting to 0!\n"); return; } pte_efuse = readl_relaxed(base); devm_iounmap(&pdev->dev, base); redundant_sel = (pte_efuse >> 24) & 0x7; *bin = pte_efuse & 0x7; valid = (pte_efuse >> 3) & 0x1; *version = (pte_efuse >> 4) & 0x3; if (redundant_sel == 1) *bin = (pte_efuse >> 27) & 0x7; if (!valid) { dev_info(&pdev->dev, "Speed bin not set. Defaulting to 0!\n"); *bin = 0; } else { dev_info(&pdev->dev, "Speed bin: %d\n", *bin); } dev_info(&pdev->dev, "PVS version: %d\n", *version); return; } static int of_get_clk_src(struct platform_device *pdev, struct clk_src *parents) { struct device_node *of = pdev->dev.of_node; int num_parents, i, j, index; struct clk *c; char clk_name[] = "clk-x"; num_parents = of_property_count_strings(of, "clock-names"); if (num_parents <= 0 || num_parents > 8) { dev_err(&pdev->dev, "missing clock-names\n"); return -EINVAL; } j = 0; for (i = 0; i < 8; i++) { snprintf(clk_name, ARRAY_SIZE(clk_name), "clk-%d", i); index = of_property_match_string(of, "clock-names", clk_name); if (IS_ERR_VALUE(index)) continue; parents[j].sel = i; parents[j].src = c = devm_clk_get(&pdev->dev, clk_name); if (IS_ERR(c)) { if (c != ERR_PTR(-EPROBE_DEFER)) dev_err(&pdev->dev, "clk_get: %s\n fail", clk_name); return PTR_ERR(c); } j++; } return num_parents; } static int clock_a7_probe(struct platform_device *pdev) { struct resource *res; int speed_bin = 0, version = 0, rc; unsigned long rate, aux_rate; struct clk *aux_clk, *main_pll; char prop_name[] = "qcom,speedX-bin-vX"; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rcg-base"); if (!res) { dev_err(&pdev->dev, "missing rcg-base\n"); return -EINVAL; } a7ssmux.base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!a7ssmux.base) { dev_err(&pdev->dev, "ioremap failed for rcg-base\n"); return -ENOMEM; } vdd_cpu.regulator[0] = devm_regulator_get(&pdev->dev, "cpu-vdd"); if (IS_ERR(vdd_cpu.regulator[0])) { if (PTR_ERR(vdd_cpu.regulator[0]) != -EPROBE_DEFER) dev_err(&pdev->dev, "unable to get regulator\n"); return PTR_ERR(vdd_cpu.regulator[0]); } a7ssmux.num_parents = of_get_clk_src(pdev, a7ssmux.parents); if (IS_ERR_VALUE(a7ssmux.num_parents)) return a7ssmux.num_parents; get_speed_bin(pdev, &speed_bin, &version); snprintf(prop_name, ARRAY_SIZE(prop_name), "qcom,speed%d-bin-v%d", speed_bin, version); rc = of_get_fmax_vdd_class(pdev, &a7ssmux.c, prop_name); if (rc) { /* Fall back to most conservative PVS table */ dev_err(&pdev->dev, "Unable to load voltage plan %s!\n", prop_name); rc = of_get_fmax_vdd_class(pdev, &a7ssmux.c, "qcom,speed0-bin-v0"); if (rc) { dev_err(&pdev->dev, "Unable to load safe voltage plan\n"); return rc; } dev_info(&pdev->dev, "Safe voltage plan loaded.\n"); } rc = msm_clock_register(clock_tbl_a7, ARRAY_SIZE(clock_tbl_a7)); if (rc) { dev_err(&pdev->dev, "msm_clock_register failed\n"); return rc; } /* Force a PLL reconfiguration */ aux_clk = a7ssmux.parents[0].src; main_pll = a7ssmux.parents[1].src; aux_rate = clk_get_rate(aux_clk); rate = clk_get_rate(&a7ssmux.c); clk_set_rate(&a7ssmux.c, aux_rate); clk_set_rate(main_pll, clk_round_rate(main_pll, 1)); clk_set_rate(&a7ssmux.c, rate); /* * We don't want the CPU clocks to be turned off at late init * if CPUFREQ or HOTPLUG configs are disabled. So, bump up the * refcount of these clocks. Any cpufreq/hotplug manager can assume * that the clocks have already been prepared and enabled by the time * they take over. */ WARN(clk_prepare_enable(&a7ssmux.c), "Unable to turn on CPU clock"); return 0; } static struct of_device_id clock_a7_match_table[] = { {.compatible = "qcom,clock-a7-8226"}, {} }; static struct platform_driver clock_a7_driver = { .driver = { .name = "clock-a7", .of_match_table = clock_a7_match_table, .owner = THIS_MODULE, }, }; static int __init clock_a7_init(void) { return platform_driver_probe(&clock_a7_driver, clock_a7_probe); } device_initcall(clock_a7_init);
gpl-2.0
AppliedMicro/ENGLinuxLatest
drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
979
1602
/* * drivers/media/platform/s5p-mfc/s5p_mfc_opr.c * * Samsung MFC (Multi Function Codec - FIMV) driver * This file contains hw related functions. * * Kamil Debski, Copyright (c) 2012 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include "s5p_mfc_debug.h" #include "s5p_mfc_opr.h" #include "s5p_mfc_opr_v5.h" #include "s5p_mfc_opr_v6.h" static struct s5p_mfc_hw_ops *s5p_mfc_ops; void s5p_mfc_init_hw_ops(struct s5p_mfc_dev *dev) { if (IS_MFCV6_PLUS(dev)) { s5p_mfc_ops = s5p_mfc_init_hw_ops_v6(); dev->warn_start = S5P_FIMV_ERR_WARNINGS_START_V6; } else { s5p_mfc_ops = s5p_mfc_init_hw_ops_v5(); dev->warn_start = S5P_FIMV_ERR_WARNINGS_START; } dev->mfc_ops = s5p_mfc_ops; } void s5p_mfc_init_regs(struct s5p_mfc_dev *dev) { if (IS_MFCV6_PLUS(dev)) dev->mfc_regs = s5p_mfc_init_regs_v6_plus(dev); } int s5p_mfc_alloc_priv_buf(struct device *dev, struct s5p_mfc_priv_buf *b) { mfc_debug(3, "Allocating priv: %zu\n", b->size); b->virt = dma_alloc_coherent(dev, b->size, &b->dma, GFP_KERNEL); if (!b->virt) { mfc_err("Allocating private buffer failed\n"); return -ENOMEM; } mfc_debug(3, "Allocated addr %p %pad\n", b->virt, &b->dma); return 0; } void s5p_mfc_release_priv_buf(struct device *dev, struct s5p_mfc_priv_buf *b) { if (b->virt) { dma_free_coherent(dev, b->size, b->virt, b->dma); b->virt = NULL; b->dma = 0; b->size = 0; } }
gpl-2.0
linux-wpan/linux-wpan
arch/sparc/kernel/module.c
1747
5236
/* Kernel module help for sparc64. * * Copyright (C) 2001 Rusty Russell. * Copyright (C) 2002 David S. Miller. */ #include <linux/moduleloader.h> #include <linux/kernel.h> #include <linux/elf.h> #include <linux/vmalloc.h> #include <linux/fs.h> #include <linux/gfp.h> #include <linux/string.h> #include <linux/ctype.h> #include <linux/mm.h> #include <asm/processor.h> #include <asm/spitfire.h> #include <asm/cacheflush.h> #include "entry.h" #ifdef CONFIG_SPARC64 #include <linux/jump_label.h> static void *module_map(unsigned long size) { if (PAGE_ALIGN(size) > MODULES_LEN) return NULL; return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, GFP_KERNEL, PAGE_KERNEL, NUMA_NO_NODE, __builtin_return_address(0)); } #else static void *module_map(unsigned long size) { return vmalloc(size); } #endif /* CONFIG_SPARC64 */ void *module_alloc(unsigned long size) { void *ret; ret = module_map(size); if (ret) memset(ret, 0, size); return ret; } /* Make generic code ignore STT_REGISTER dummy undefined symbols. */ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, char *secstrings, struct module *mod) { unsigned int symidx; Elf_Sym *sym; char *strtab; int i; for (symidx = 0; sechdrs[symidx].sh_type != SHT_SYMTAB; symidx++) { if (symidx == hdr->e_shnum-1) { printk("%s: no symtab found.\n", mod->name); return -ENOEXEC; } } sym = (Elf_Sym *)sechdrs[symidx].sh_addr; strtab = (char *)sechdrs[sechdrs[symidx].sh_link].sh_addr; for (i = 1; i < sechdrs[symidx].sh_size / sizeof(Elf_Sym); i++) { if (sym[i].st_shndx == SHN_UNDEF) { if (ELF_ST_TYPE(sym[i].st_info) == STT_REGISTER) sym[i].st_shndx = SHN_ABS; } } return 0; } int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned int relsec, struct module *me) { unsigned int i; Elf_Rela *rel = (void *)sechdrs[relsec].sh_addr; Elf_Sym *sym; u8 *location; u32 *loc32; for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { Elf_Addr v; /* This is where to make the change */ location = (u8 *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset; loc32 = (u32 *) location; #ifdef CONFIG_SPARC64 BUG_ON(((u64)location >> (u64)32) != (u64)0); #endif /* CONFIG_SPARC64 */ /* This is the symbol it is referring to. Note that all undefined symbols have been resolved. */ sym = (Elf_Sym *)sechdrs[symindex].sh_addr + ELF_R_SYM(rel[i].r_info); v = sym->st_value + rel[i].r_addend; switch (ELF_R_TYPE(rel[i].r_info) & 0xff) { case R_SPARC_DISP32: v -= (Elf_Addr) location; *loc32 = v; break; #ifdef CONFIG_SPARC64 case R_SPARC_64: location[0] = v >> 56; location[1] = v >> 48; location[2] = v >> 40; location[3] = v >> 32; location[4] = v >> 24; location[5] = v >> 16; location[6] = v >> 8; location[7] = v >> 0; break; case R_SPARC_WDISP19: v -= (Elf_Addr) location; *loc32 = (*loc32 & ~0x7ffff) | ((v >> 2) & 0x7ffff); break; case R_SPARC_OLO10: *loc32 = (*loc32 & ~0x1fff) | (((v & 0x3ff) + (ELF_R_TYPE(rel[i].r_info) >> 8)) & 0x1fff); break; #endif /* CONFIG_SPARC64 */ case R_SPARC_32: case R_SPARC_UA32: location[0] = v >> 24; location[1] = v >> 16; location[2] = v >> 8; location[3] = v >> 0; break; case R_SPARC_WDISP30: v -= (Elf_Addr) location; *loc32 = (*loc32 & ~0x3fffffff) | ((v >> 2) & 0x3fffffff); break; case R_SPARC_WDISP22: v -= (Elf_Addr) location; *loc32 = (*loc32 & ~0x3fffff) | ((v >> 2) & 0x3fffff); break; case R_SPARC_LO10: *loc32 = (*loc32 & ~0x3ff) | (v & 0x3ff); break; case R_SPARC_HI22: *loc32 = (*loc32 & ~0x3fffff) | ((v >> 10) & 0x3fffff); break; default: printk(KERN_ERR "module %s: Unknown relocation: %x\n", me->name, (int) (ELF_R_TYPE(rel[i].r_info) & 0xff)); return -ENOEXEC; } } return 0; } #ifdef CONFIG_SPARC64 static void do_patch_sections(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs) { const Elf_Shdr *s, *sun4v_1insn = NULL, *sun4v_2insn = NULL; char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { if (!strcmp(".sun4v_1insn_patch", secstrings + s->sh_name)) sun4v_1insn = s; if (!strcmp(".sun4v_2insn_patch", secstrings + s->sh_name)) sun4v_2insn = s; } if (sun4v_1insn && tlb_type == hypervisor) { void *p = (void *) sun4v_1insn->sh_addr; sun4v_patch_1insn_range(p, p + sun4v_1insn->sh_size); } if (sun4v_2insn && tlb_type == hypervisor) { void *p = (void *) sun4v_2insn->sh_addr; sun4v_patch_2insn_range(p, p + sun4v_2insn->sh_size); } } int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *me) { /* make jump label nops */ jump_label_apply_nops(me); do_patch_sections(hdr, sechdrs); /* Cheetah's I-cache is fully coherent. */ if (tlb_type == spitfire) { unsigned long va; flushw_all(); for (va = 0; va < (PAGE_SIZE << 1); va += 32) spitfire_put_icache_tag(va, 0x0); __asm__ __volatile__("flush %g6"); } return 0; } #endif /* CONFIG_SPARC64 */
gpl-2.0
ArthySundaram/chromeos-3.8
fs/ubifs/compress.c
2515
6760
/* * This file is part of UBIFS. * * Copyright (C) 2006-2008 Nokia Corporation. * Copyright (C) 2006, 2007 University of Szeged, Hungary * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Authors: Adrian Hunter * Artem Bityutskiy (Битюцкий Артём) * Zoltan Sogor */ /* * This file provides a single place to access to compression and * decompression. */ #include <linux/crypto.h> #include "ubifs.h" /* Fake description object for the "none" compressor */ static struct ubifs_compressor none_compr = { .compr_type = UBIFS_COMPR_NONE, .name = "none", .capi_name = "", }; #ifdef CONFIG_UBIFS_FS_LZO static DEFINE_MUTEX(lzo_mutex); static struct ubifs_compressor lzo_compr = { .compr_type = UBIFS_COMPR_LZO, .comp_mutex = &lzo_mutex, .name = "lzo", .capi_name = "lzo", }; #else static struct ubifs_compressor lzo_compr = { .compr_type = UBIFS_COMPR_LZO, .name = "lzo", }; #endif #ifdef CONFIG_UBIFS_FS_ZLIB static DEFINE_MUTEX(deflate_mutex); static DEFINE_MUTEX(inflate_mutex); static struct ubifs_compressor zlib_compr = { .compr_type = UBIFS_COMPR_ZLIB, .comp_mutex = &deflate_mutex, .decomp_mutex = &inflate_mutex, .name = "zlib", .capi_name = "deflate", }; #else static struct ubifs_compressor zlib_compr = { .compr_type = UBIFS_COMPR_ZLIB, .name = "zlib", }; #endif /* All UBIFS compressors */ struct ubifs_compressor *ubifs_compressors[UBIFS_COMPR_TYPES_CNT]; /** * ubifs_compress - compress data. * @in_buf: data to compress * @in_len: length of the data to compress * @out_buf: output buffer where compressed data should be stored * @out_len: output buffer length is returned here * @compr_type: type of compression to use on enter, actually used compression * type on exit * * This function compresses input buffer @in_buf of length @in_len and stores * the result in the output buffer @out_buf and the resulting length in * @out_len. If the input buffer does not compress, it is just copied to the * @out_buf. The same happens if @compr_type is %UBIFS_COMPR_NONE or if * compression error occurred. * * Note, if the input buffer was not compressed, it is copied to the output * buffer and %UBIFS_COMPR_NONE is returned in @compr_type. */ void ubifs_compress(const void *in_buf, int in_len, void *out_buf, int *out_len, int *compr_type) { int err; struct ubifs_compressor *compr = ubifs_compressors[*compr_type]; if (*compr_type == UBIFS_COMPR_NONE) goto no_compr; /* If the input data is small, do not even try to compress it */ if (in_len < UBIFS_MIN_COMPR_LEN) goto no_compr; if (compr->comp_mutex) mutex_lock(compr->comp_mutex); err = crypto_comp_compress(compr->cc, in_buf, in_len, out_buf, (unsigned int *)out_len); if (compr->comp_mutex) mutex_unlock(compr->comp_mutex); if (unlikely(err)) { ubifs_warn("cannot compress %d bytes, compressor %s, error %d, leave data uncompressed", in_len, compr->name, err); goto no_compr; } /* * If the data compressed only slightly, it is better to leave it * uncompressed to improve read speed. */ if (in_len - *out_len < UBIFS_MIN_COMPRESS_DIFF) goto no_compr; return; no_compr: memcpy(out_buf, in_buf, in_len); *out_len = in_len; *compr_type = UBIFS_COMPR_NONE; } /** * ubifs_decompress - decompress data. * @in_buf: data to decompress * @in_len: length of the data to decompress * @out_buf: output buffer where decompressed data should * @out_len: output length is returned here * @compr_type: type of compression * * This function decompresses data from buffer @in_buf into buffer @out_buf. * The length of the uncompressed data is returned in @out_len. This functions * returns %0 on success or a negative error code on failure. */ int ubifs_decompress(const void *in_buf, int in_len, void *out_buf, int *out_len, int compr_type) { int err; struct ubifs_compressor *compr; if (unlikely(compr_type < 0 || compr_type >= UBIFS_COMPR_TYPES_CNT)) { ubifs_err("invalid compression type %d", compr_type); return -EINVAL; } compr = ubifs_compressors[compr_type]; if (unlikely(!compr->capi_name)) { ubifs_err("%s compression is not compiled in", compr->name); return -EINVAL; } if (compr_type == UBIFS_COMPR_NONE) { memcpy(out_buf, in_buf, in_len); *out_len = in_len; return 0; } if (compr->decomp_mutex) mutex_lock(compr->decomp_mutex); err = crypto_comp_decompress(compr->cc, in_buf, in_len, out_buf, (unsigned int *)out_len); if (compr->decomp_mutex) mutex_unlock(compr->decomp_mutex); if (err) ubifs_err("cannot decompress %d bytes, compressor %s, error %d", in_len, compr->name, err); return err; } /** * compr_init - initialize a compressor. * @compr: compressor description object * * This function initializes the requested compressor and returns zero in case * of success or a negative error code in case of failure. */ static int __init compr_init(struct ubifs_compressor *compr) { if (compr->capi_name) { compr->cc = crypto_alloc_comp(compr->capi_name, 0, 0); if (IS_ERR(compr->cc)) { ubifs_err("cannot initialize compressor %s, error %ld", compr->name, PTR_ERR(compr->cc)); return PTR_ERR(compr->cc); } } ubifs_compressors[compr->compr_type] = compr; return 0; } /** * compr_exit - de-initialize a compressor. * @compr: compressor description object */ static void compr_exit(struct ubifs_compressor *compr) { if (compr->capi_name) crypto_free_comp(compr->cc); return; } /** * ubifs_compressors_init - initialize UBIFS compressors. * * This function initializes the compressor which were compiled in. Returns * zero in case of success and a negative error code in case of failure. */ int __init ubifs_compressors_init(void) { int err; err = compr_init(&lzo_compr); if (err) return err; err = compr_init(&zlib_compr); if (err) goto out_lzo; ubifs_compressors[UBIFS_COMPR_NONE] = &none_compr; return 0; out_lzo: compr_exit(&lzo_compr); return err; } /** * ubifs_compressors_exit - de-initialize UBIFS compressors. */ void ubifs_compressors_exit(void) { compr_exit(&lzo_compr); compr_exit(&zlib_compr); }
gpl-2.0
zyrgit/linux-yocto-3.10-work
fs/ubifs/compress.c
2515
6760
/* * This file is part of UBIFS. * * Copyright (C) 2006-2008 Nokia Corporation. * Copyright (C) 2006, 2007 University of Szeged, Hungary * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Authors: Adrian Hunter * Artem Bityutskiy (Битюцкий Артём) * Zoltan Sogor */ /* * This file provides a single place to access to compression and * decompression. */ #include <linux/crypto.h> #include "ubifs.h" /* Fake description object for the "none" compressor */ static struct ubifs_compressor none_compr = { .compr_type = UBIFS_COMPR_NONE, .name = "none", .capi_name = "", }; #ifdef CONFIG_UBIFS_FS_LZO static DEFINE_MUTEX(lzo_mutex); static struct ubifs_compressor lzo_compr = { .compr_type = UBIFS_COMPR_LZO, .comp_mutex = &lzo_mutex, .name = "lzo", .capi_name = "lzo", }; #else static struct ubifs_compressor lzo_compr = { .compr_type = UBIFS_COMPR_LZO, .name = "lzo", }; #endif #ifdef CONFIG_UBIFS_FS_ZLIB static DEFINE_MUTEX(deflate_mutex); static DEFINE_MUTEX(inflate_mutex); static struct ubifs_compressor zlib_compr = { .compr_type = UBIFS_COMPR_ZLIB, .comp_mutex = &deflate_mutex, .decomp_mutex = &inflate_mutex, .name = "zlib", .capi_name = "deflate", }; #else static struct ubifs_compressor zlib_compr = { .compr_type = UBIFS_COMPR_ZLIB, .name = "zlib", }; #endif /* All UBIFS compressors */ struct ubifs_compressor *ubifs_compressors[UBIFS_COMPR_TYPES_CNT]; /** * ubifs_compress - compress data. * @in_buf: data to compress * @in_len: length of the data to compress * @out_buf: output buffer where compressed data should be stored * @out_len: output buffer length is returned here * @compr_type: type of compression to use on enter, actually used compression * type on exit * * This function compresses input buffer @in_buf of length @in_len and stores * the result in the output buffer @out_buf and the resulting length in * @out_len. If the input buffer does not compress, it is just copied to the * @out_buf. The same happens if @compr_type is %UBIFS_COMPR_NONE or if * compression error occurred. * * Note, if the input buffer was not compressed, it is copied to the output * buffer and %UBIFS_COMPR_NONE is returned in @compr_type. */ void ubifs_compress(const void *in_buf, int in_len, void *out_buf, int *out_len, int *compr_type) { int err; struct ubifs_compressor *compr = ubifs_compressors[*compr_type]; if (*compr_type == UBIFS_COMPR_NONE) goto no_compr; /* If the input data is small, do not even try to compress it */ if (in_len < UBIFS_MIN_COMPR_LEN) goto no_compr; if (compr->comp_mutex) mutex_lock(compr->comp_mutex); err = crypto_comp_compress(compr->cc, in_buf, in_len, out_buf, (unsigned int *)out_len); if (compr->comp_mutex) mutex_unlock(compr->comp_mutex); if (unlikely(err)) { ubifs_warn("cannot compress %d bytes, compressor %s, error %d, leave data uncompressed", in_len, compr->name, err); goto no_compr; } /* * If the data compressed only slightly, it is better to leave it * uncompressed to improve read speed. */ if (in_len - *out_len < UBIFS_MIN_COMPRESS_DIFF) goto no_compr; return; no_compr: memcpy(out_buf, in_buf, in_len); *out_len = in_len; *compr_type = UBIFS_COMPR_NONE; } /** * ubifs_decompress - decompress data. * @in_buf: data to decompress * @in_len: length of the data to decompress * @out_buf: output buffer where decompressed data should * @out_len: output length is returned here * @compr_type: type of compression * * This function decompresses data from buffer @in_buf into buffer @out_buf. * The length of the uncompressed data is returned in @out_len. This functions * returns %0 on success or a negative error code on failure. */ int ubifs_decompress(const void *in_buf, int in_len, void *out_buf, int *out_len, int compr_type) { int err; struct ubifs_compressor *compr; if (unlikely(compr_type < 0 || compr_type >= UBIFS_COMPR_TYPES_CNT)) { ubifs_err("invalid compression type %d", compr_type); return -EINVAL; } compr = ubifs_compressors[compr_type]; if (unlikely(!compr->capi_name)) { ubifs_err("%s compression is not compiled in", compr->name); return -EINVAL; } if (compr_type == UBIFS_COMPR_NONE) { memcpy(out_buf, in_buf, in_len); *out_len = in_len; return 0; } if (compr->decomp_mutex) mutex_lock(compr->decomp_mutex); err = crypto_comp_decompress(compr->cc, in_buf, in_len, out_buf, (unsigned int *)out_len); if (compr->decomp_mutex) mutex_unlock(compr->decomp_mutex); if (err) ubifs_err("cannot decompress %d bytes, compressor %s, error %d", in_len, compr->name, err); return err; } /** * compr_init - initialize a compressor. * @compr: compressor description object * * This function initializes the requested compressor and returns zero in case * of success or a negative error code in case of failure. */ static int __init compr_init(struct ubifs_compressor *compr) { if (compr->capi_name) { compr->cc = crypto_alloc_comp(compr->capi_name, 0, 0); if (IS_ERR(compr->cc)) { ubifs_err("cannot initialize compressor %s, error %ld", compr->name, PTR_ERR(compr->cc)); return PTR_ERR(compr->cc); } } ubifs_compressors[compr->compr_type] = compr; return 0; } /** * compr_exit - de-initialize a compressor. * @compr: compressor description object */ static void compr_exit(struct ubifs_compressor *compr) { if (compr->capi_name) crypto_free_comp(compr->cc); return; } /** * ubifs_compressors_init - initialize UBIFS compressors. * * This function initializes the compressor which were compiled in. Returns * zero in case of success and a negative error code in case of failure. */ int __init ubifs_compressors_init(void) { int err; err = compr_init(&lzo_compr); if (err) return err; err = compr_init(&zlib_compr); if (err) goto out_lzo; ubifs_compressors[UBIFS_COMPR_NONE] = &none_compr; return 0; out_lzo: compr_exit(&lzo_compr); return err; } /** * ubifs_compressors_exit - de-initialize UBIFS compressors. */ void ubifs_compressors_exit(void) { compr_exit(&lzo_compr); compr_exit(&zlib_compr); }
gpl-2.0
luk1337/android_kernel_samsung_i9082
drivers/media/dvb/dvb-usb/lmedm04.c
2771
29825
/* DVB USB compliant linux driver for * * DM04/QQBOX DVB-S USB BOX LME2510C + SHARP:BS2F7HZ7395 * LME2510C + LG TDQY-P001F * LME2510C + BS2F7HZ0194 * LME2510 + LG TDQY-P001F * LME2510 + BS2F7HZ0194 * * MVB7395 (LME2510C+SHARP:BS2F7HZ7395) * SHARP:BS2F7HZ7395 = (STV0288+Sharp IX2505V) * * MV001F (LME2510+LGTDQY-P001F) * LG TDQY - P001F =(TDA8263 + TDA10086H) * * MVB0001F (LME2510C+LGTDQT-P001F) * * MV0194 (LME2510+SHARP:BS2F7HZ0194) * SHARP:BS2F7HZ0194 = (STV0299+IX2410) * * MVB0194 (LME2510C+SHARP0194) * * For firmware see Documentation/dvb/lmedm04.txt * * I2C addresses: * 0xd0 - STV0288 - Demodulator * 0xc0 - Sharp IX2505V - Tuner * -- * 0x1c - TDA10086 - Demodulator * 0xc0 - TDA8263 - Tuner * -- * 0xd0 - STV0299 - Demodulator * 0xc0 - IX2410 - Tuner * * * VID = 3344 PID LME2510=1122 LME2510C=1120 * * Copyright (C) 2010 Malcolm Priestley (tvboxspy@gmail.com) * LME2510(C)(C) Leaguerme (Shenzhen) MicroElectronics Co., Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License Version 2, as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * * see Documentation/dvb/README.dvb-usb for more information * * Known Issues : * LME2510: Non Intel USB chipsets fail to maintain High Speed on * Boot or Hot Plug. * * QQbox suffers from noise on LNB voltage. * * LME2510: SHARP:BS2F7HZ0194(MV0194) cannot cold reset and share system * with other tuners. After a cold reset streaming will not start. * */ #define DVB_USB_LOG_PREFIX "LME2510(C)" #include <linux/usb.h> #include <linux/usb/input.h> #include <media/rc-core.h> #include "dvb-usb.h" #include "lmedm04.h" #include "tda826x.h" #include "tda10086.h" #include "stv0288.h" #include "ix2505v.h" #include "stv0299.h" #include "dvb-pll.h" #include "z0194a.h" /* debug */ static int dvb_usb_lme2510_debug; #define l_dprintk(var, level, args...) do { \ if ((var >= level)) \ printk(KERN_DEBUG DVB_USB_LOG_PREFIX ": " args); \ } while (0) #define deb_info(level, args...) l_dprintk(dvb_usb_lme2510_debug, level, args) #define debug_data_snipet(level, name, p) \ deb_info(level, name" (%02x%02x%02x%02x%02x%02x%02x%02x)", \ *p, *(p+1), *(p+2), *(p+3), *(p+4), \ *(p+5), *(p+6), *(p+7)); module_param_named(debug, dvb_usb_lme2510_debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (1=info (or-able))." DVB_USB_DEBUG_STATUS); static int dvb_usb_lme2510_firmware; module_param_named(firmware, dvb_usb_lme2510_firmware, int, 0644); MODULE_PARM_DESC(firmware, "set default firmware 0=Sharp7395 1=LG"); static int pid_filter; module_param_named(pid, pid_filter, int, 0644); MODULE_PARM_DESC(pid, "set default 0=on 1=off"); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); #define TUNER_DEFAULT 0x0 #define TUNER_LG 0x1 #define TUNER_S7395 0x2 #define TUNER_S0194 0x3 struct lme2510_state { u8 id; u8 tuner_config; u8 signal_lock; u8 signal_level; u8 signal_sn; u8 time_key; u8 i2c_talk_onoff; u8 i2c_gate; u8 i2c_tuner_gate_w; u8 i2c_tuner_gate_r; u8 i2c_tuner_addr; u8 stream_on; u8 pid_size; void *buffer; struct urb *lme_urb; void *usb_buffer; }; static int lme2510_bulk_write(struct usb_device *dev, u8 *snd, int len, u8 pipe) { int ret, actual_l; ret = usb_bulk_msg(dev, usb_sndbulkpipe(dev, pipe), snd, len , &actual_l, 100); return ret; } static int lme2510_bulk_read(struct usb_device *dev, u8 *rev, int len, u8 pipe) { int ret, actual_l; ret = usb_bulk_msg(dev, usb_rcvbulkpipe(dev, pipe), rev, len , &actual_l, 200); return ret; } static int lme2510_usb_talk(struct dvb_usb_device *d, u8 *wbuf, int wlen, u8 *rbuf, int rlen) { struct lme2510_state *st = d->priv; u8 *buff; int ret = 0; if (st->usb_buffer == NULL) { st->usb_buffer = kmalloc(512, GFP_KERNEL); if (st->usb_buffer == NULL) { info("MEM Error no memory"); return -ENOMEM; } } buff = st->usb_buffer; ret = mutex_lock_interruptible(&d->usb_mutex); if (ret < 0) return -EAGAIN; /* the read/write capped at 512 */ memcpy(buff, wbuf, (wlen > 512) ? 512 : wlen); ret |= usb_clear_halt(d->udev, usb_sndbulkpipe(d->udev, 0x01)); ret |= lme2510_bulk_write(d->udev, buff, wlen , 0x01); msleep(10); ret |= usb_clear_halt(d->udev, usb_rcvbulkpipe(d->udev, 0x01)); ret |= lme2510_bulk_read(d->udev, buff, (rlen > 512) ? 512 : rlen , 0x01); if (rlen > 0) memcpy(rbuf, buff, rlen); mutex_unlock(&d->usb_mutex); return (ret < 0) ? -ENODEV : 0; } static int lme2510_stream_restart(struct dvb_usb_device *d) { static u8 stream_on[] = LME_ST_ON_W; int ret; u8 rbuff[10]; /*Restart Stream Command*/ ret = lme2510_usb_talk(d, stream_on, sizeof(stream_on), rbuff, sizeof(rbuff)); return ret; } static int lme2510_enable_pid(struct dvb_usb_device *d, u8 index, u16 pid_out) { struct lme2510_state *st = d->priv; static u8 pid_buff[] = LME_ZERO_PID; static u8 rbuf[1]; u8 pid_no = index * 2; u8 pid_len = pid_no + 2; int ret = 0; deb_info(1, "PID Setting Pid %04x", pid_out); if (st->pid_size == 0) ret |= lme2510_stream_restart(d); pid_buff[2] = pid_no; pid_buff[3] = (u8)pid_out & 0xff; pid_buff[4] = pid_no + 1; pid_buff[5] = (u8)(pid_out >> 8); if (pid_len > st->pid_size) st->pid_size = pid_len; pid_buff[7] = 0x80 + st->pid_size; ret |= lme2510_usb_talk(d, pid_buff , sizeof(pid_buff) , rbuf, sizeof(rbuf)); if (st->stream_on) ret |= lme2510_stream_restart(d); return ret; } static void lme2510_int_response(struct urb *lme_urb) { struct dvb_usb_adapter *adap = lme_urb->context; struct lme2510_state *st = adap->dev->priv; static u8 *ibuf, *rbuf; int i = 0, offset; u32 key; switch (lme_urb->status) { case 0: case -ETIMEDOUT: break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: return; default: info("Error %x", lme_urb->status); break; } rbuf = (u8 *) lme_urb->transfer_buffer; offset = ((lme_urb->actual_length/8) > 4) ? 4 : (lme_urb->actual_length/8) ; for (i = 0; i < offset; ++i) { ibuf = (u8 *)&rbuf[i*8]; deb_info(5, "INT O/S C =%02x C/O=%02x Type =%02x%02x", offset, i, ibuf[0], ibuf[1]); switch (ibuf[0]) { case 0xaa: debug_data_snipet(1, "INT Remote data snipet", ibuf); if ((ibuf[4] + ibuf[5]) == 0xff) { key = ibuf[5]; key += (ibuf[3] > 0) ? (ibuf[3] ^ 0xff) << 8 : 0; key += (ibuf[2] ^ 0xff) << 16; deb_info(1, "INT Key =%08x", key); if (adap->dev->rc_dev != NULL) rc_keydown(adap->dev->rc_dev, key, 0); } break; case 0xbb: switch (st->tuner_config) { case TUNER_LG: if (ibuf[2] > 0) st->signal_lock = ibuf[2]; st->signal_level = ibuf[4]; st->signal_sn = ibuf[3]; st->time_key = ibuf[7]; break; case TUNER_S7395: case TUNER_S0194: /* Tweak for earlier firmware*/ if (ibuf[1] == 0x03) { if (ibuf[2] > 1) st->signal_lock = ibuf[2]; st->signal_level = ibuf[3]; st->signal_sn = ibuf[4]; } else { st->signal_level = ibuf[4]; st->signal_sn = ibuf[5]; st->signal_lock = (st->signal_lock & 0xf7) + ((ibuf[2] & 0x01) << 0x03); } break; default: break; } debug_data_snipet(5, "INT Remote data snipet in", ibuf); break; case 0xcc: debug_data_snipet(1, "INT Control data snipet", ibuf); break; default: debug_data_snipet(1, "INT Unknown data snipet", ibuf); break; } } usb_submit_urb(lme_urb, GFP_ATOMIC); } static int lme2510_int_read(struct dvb_usb_adapter *adap) { struct lme2510_state *lme_int = adap->dev->priv; lme_int->lme_urb = usb_alloc_urb(0, GFP_ATOMIC); if (lme_int->lme_urb == NULL) return -ENOMEM; lme_int->buffer = usb_alloc_coherent(adap->dev->udev, 5000, GFP_ATOMIC, &lme_int->lme_urb->transfer_dma); if (lme_int->buffer == NULL) return -ENOMEM; usb_fill_int_urb(lme_int->lme_urb, adap->dev->udev, usb_rcvintpipe(adap->dev->udev, 0xa), lme_int->buffer, 4096, lme2510_int_response, adap, 11); lme_int->lme_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_submit_urb(lme_int->lme_urb, GFP_ATOMIC); info("INT Interrupt Service Started"); return 0; } static int lme2510_pid_filter_ctrl(struct dvb_usb_adapter *adap, int onoff) { struct lme2510_state *st = adap->dev->priv; static u8 clear_pid_reg[] = LME_CLEAR_PID; static u8 rbuf[1]; int ret; deb_info(1, "PID Clearing Filter"); ret = mutex_lock_interruptible(&adap->dev->i2c_mutex); if (ret < 0) return -EAGAIN; if (!onoff) ret |= lme2510_usb_talk(adap->dev, clear_pid_reg, sizeof(clear_pid_reg), rbuf, sizeof(rbuf)); st->pid_size = 0; mutex_unlock(&adap->dev->i2c_mutex); return 0; } static int lme2510_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid, int onoff) { int ret = 0; deb_info(3, "%s PID=%04x Index=%04x onoff=%02x", __func__, pid, index, onoff); if (onoff) if (!pid_filter) { ret = mutex_lock_interruptible(&adap->dev->i2c_mutex); if (ret < 0) return -EAGAIN; ret |= lme2510_enable_pid(adap->dev, index, pid); mutex_unlock(&adap->dev->i2c_mutex); } return ret; } static int lme2510_return_status(struct usb_device *dev) { int ret = 0; u8 *data; data = kzalloc(10, GFP_KERNEL); if (!data) return -ENOMEM; ret |= usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x06, 0x80, 0x0302, 0x00, data, 0x0006, 200); info("Firmware Status: %x (%x)", ret , data[2]); ret = (ret < 0) ? -ENODEV : data[2]; kfree(data); return ret; } static int lme2510_msg(struct dvb_usb_device *d, u8 *wbuf, int wlen, u8 *rbuf, int rlen) { int ret = 0; struct lme2510_state *st = d->priv; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; if (st->i2c_talk_onoff == 1) { ret = lme2510_usb_talk(d, wbuf, wlen, rbuf, rlen); switch (st->tuner_config) { case TUNER_LG: if (wbuf[2] == 0x1c) { if (wbuf[3] == 0x0e) { st->signal_lock = rbuf[1]; if ((st->stream_on & 1) && (st->signal_lock & 0x10)) { lme2510_stream_restart(d); st->i2c_talk_onoff = 0; } msleep(80); } } break; case TUNER_S7395: if (wbuf[2] == 0xd0) { if (wbuf[3] == 0x24) { st->signal_lock = rbuf[1]; if ((st->stream_on & 1) && (st->signal_lock & 0x8)) { lme2510_stream_restart(d); st->i2c_talk_onoff = 0; } } if ((wbuf[3] != 0x6) & (wbuf[3] != 0x5)) msleep(5); } break; case TUNER_S0194: if (wbuf[2] == 0xd0) { if (wbuf[3] == 0x1b) { st->signal_lock = rbuf[1]; if ((st->stream_on & 1) && (st->signal_lock & 0x8)) { lme2510_stream_restart(d); st->i2c_talk_onoff = 0; } } } break; default: break; } } else { switch (st->tuner_config) { case TUNER_LG: switch (wbuf[3]) { case 0x0e: rbuf[0] = 0x55; rbuf[1] = st->signal_lock; break; case 0x43: rbuf[0] = 0x55; rbuf[1] = st->signal_level; break; case 0x1c: rbuf[0] = 0x55; rbuf[1] = st->signal_sn; break; case 0x15: case 0x16: case 0x17: case 0x18: rbuf[0] = 0x55; rbuf[1] = 0x00; break; default: lme2510_usb_talk(d, wbuf, wlen, rbuf, rlen); st->i2c_talk_onoff = 1; break; } break; case TUNER_S7395: switch (wbuf[3]) { case 0x10: rbuf[0] = 0x55; rbuf[1] = (st->signal_level & 0x80) ? 0 : (st->signal_level * 2); break; case 0x2d: rbuf[0] = 0x55; rbuf[1] = st->signal_sn; break; case 0x24: rbuf[0] = 0x55; rbuf[1] = st->signal_lock; break; case 0x2e: case 0x26: case 0x27: rbuf[0] = 0x55; rbuf[1] = 0x00; break; default: lme2510_usb_talk(d, wbuf, wlen, rbuf, rlen); st->i2c_talk_onoff = 1; break; } break; case TUNER_S0194: switch (wbuf[3]) { case 0x18: rbuf[0] = 0x55; rbuf[1] = (st->signal_level & 0x80) ? 0 : (st->signal_level * 2); break; case 0x24: rbuf[0] = 0x55; rbuf[1] = st->signal_sn; break; case 0x1b: rbuf[0] = 0x55; rbuf[1] = st->signal_lock; break; case 0x19: case 0x25: case 0x1e: case 0x1d: rbuf[0] = 0x55; rbuf[1] = 0x00; break; default: lme2510_usb_talk(d, wbuf, wlen, rbuf, rlen); st->i2c_talk_onoff = 1; break; } break; default: break; } deb_info(4, "I2C From Interrupt Message out(%02x) in(%02x)", wbuf[3], rbuf[1]); } mutex_unlock(&d->i2c_mutex); return ret; } static int lme2510_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); struct lme2510_state *st = d->priv; static u8 obuf[64], ibuf[512]; int i, read, read_o; u16 len; u8 gate = st->i2c_gate; if (gate == 0) gate = 5; if (num > 2) warn("more than 2 i2c messages" "at a time is not handled yet. TODO."); for (i = 0; i < num; i++) { read_o = 1 & (msg[i].flags & I2C_M_RD); read = i+1 < num && (msg[i+1].flags & I2C_M_RD); read |= read_o; gate = (msg[i].addr == st->i2c_tuner_addr) ? (read) ? st->i2c_tuner_gate_r : st->i2c_tuner_gate_w : st->i2c_gate; obuf[0] = gate | (read << 7); if (gate == 5) obuf[1] = (read) ? 2 : msg[i].len + 1; else obuf[1] = msg[i].len + read + 1; obuf[2] = msg[i].addr; if (read) { if (read_o) len = 3; else { memcpy(&obuf[3], msg[i].buf, msg[i].len); obuf[msg[i].len+3] = msg[i+1].len; len = msg[i].len+4; } } else { memcpy(&obuf[3], msg[i].buf, msg[i].len); len = msg[i].len+3; } if (lme2510_msg(d, obuf, len, ibuf, 512) < 0) { deb_info(1, "i2c transfer failed."); return -EAGAIN; } if (read) { if (read_o) memcpy(msg[i].buf, &ibuf[1], msg[i].len); else { memcpy(msg[i+1].buf, &ibuf[1], msg[i+1].len); i++; } } } return i; } static u32 lme2510_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static struct i2c_algorithm lme2510_i2c_algo = { .master_xfer = lme2510_i2c_xfer, .functionality = lme2510_i2c_func, }; /* Callbacks for DVB USB */ static int lme2510_identify_state(struct usb_device *udev, struct dvb_usb_device_properties *props, struct dvb_usb_device_description **desc, int *cold) { *cold = 0; return 0; } static int lme2510_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) { struct lme2510_state *st = adap->dev->priv; static u8 clear_reg_3[] = LME_CLEAR_PID; static u8 rbuf[1]; int ret = 0, rlen = sizeof(rbuf); deb_info(1, "STM (%02x)", onoff); /* Streaming is started by FE_HAS_LOCK */ if (onoff == 1) st->stream_on = 1; else { deb_info(1, "STM Steam Off"); /* mutex is here only to avoid collision with I2C */ if (mutex_lock_interruptible(&adap->dev->i2c_mutex) < 0) return -EAGAIN; ret = lme2510_usb_talk(adap->dev, clear_reg_3, sizeof(clear_reg_3), rbuf, rlen); st->stream_on = 0; st->i2c_talk_onoff = 1; mutex_unlock(&adap->dev->i2c_mutex); } return (ret < 0) ? -ENODEV : 0; } static u8 check_sum(u8 *p, u8 len) { u8 sum = 0; while (len--) sum += *p++; return sum; } static int lme2510_download_firmware(struct usb_device *dev, const struct firmware *fw) { int ret = 0; u8 *data; u16 j, wlen, len_in, start, end; u8 packet_size, dlen, i; u8 *fw_data; packet_size = 0x31; len_in = 1; data = kzalloc(512, GFP_KERNEL); if (!data) { info("FRM Could not start Firmware Download (Buffer allocation failed)"); return -ENOMEM; } info("FRM Starting Firmware Download"); for (i = 1; i < 3; i++) { start = (i == 1) ? 0 : 512; end = (i == 1) ? 512 : fw->size; for (j = start; j < end; j += (packet_size+1)) { fw_data = (u8 *)(fw->data + j); if ((end - j) > packet_size) { data[0] = i; dlen = packet_size; } else { data[0] = i | 0x80; dlen = (u8)(end - j)-1; } data[1] = dlen; memcpy(&data[2], fw_data, dlen+1); wlen = (u8) dlen + 4; data[wlen-1] = check_sum(fw_data, dlen+1); deb_info(1, "Data S=%02x:E=%02x CS= %02x", data[3], data[dlen+2], data[dlen+3]); ret |= lme2510_bulk_write(dev, data, wlen, 1); ret |= lme2510_bulk_read(dev, data, len_in , 1); ret |= (data[0] == 0x88) ? 0 : -1; } } usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x06, 0x80, 0x0200, 0x00, data, 0x0109, 1000); data[0] = 0x8a; len_in = 1; msleep(2000); ret |= lme2510_bulk_write(dev, data , len_in, 1); /*Resetting*/ ret |= lme2510_bulk_read(dev, data, len_in, 1); msleep(400); if (ret < 0) info("FRM Firmware Download Failed (%04x)" , ret); else info("FRM Firmware Download Completed - Resetting Device"); kfree(data); return (ret < 0) ? -ENODEV : 0; } static void lme_coldreset(struct usb_device *dev) { int ret = 0, len_in; u8 data[512] = {0}; data[0] = 0x0a; len_in = 1; info("FRM Firmware Cold Reset"); ret |= lme2510_bulk_write(dev, data , len_in, 1); /*Cold Resetting*/ ret |= lme2510_bulk_read(dev, data, len_in, 1); return; } static int lme_firmware_switch(struct usb_device *udev, int cold) { const struct firmware *fw = NULL; const char fw_c_s7395[] = "dvb-usb-lme2510c-s7395.fw"; const char fw_c_lg[] = "dvb-usb-lme2510c-lg.fw"; const char fw_c_s0194[] = "dvb-usb-lme2510c-s0194.fw"; const char fw_lg[] = "dvb-usb-lme2510-lg.fw"; const char fw_s0194[] = "dvb-usb-lme2510-s0194.fw"; const char *fw_lme; int ret, cold_fw; cold = (cold > 0) ? (cold & 1) : 0; cold_fw = !cold; if (le16_to_cpu(udev->descriptor.idProduct) == 0x1122) { switch (dvb_usb_lme2510_firmware) { default: dvb_usb_lme2510_firmware = TUNER_S0194; case TUNER_S0194: fw_lme = fw_s0194; ret = request_firmware(&fw, fw_lme, &udev->dev); if (ret == 0) { cold = 0; break; } dvb_usb_lme2510_firmware = TUNER_LG; case TUNER_LG: fw_lme = fw_lg; ret = request_firmware(&fw, fw_lme, &udev->dev); if (ret == 0) break; info("FRM No Firmware Found - please install"); dvb_usb_lme2510_firmware = TUNER_DEFAULT; cold = 0; cold_fw = 0; break; } } else { switch (dvb_usb_lme2510_firmware) { default: dvb_usb_lme2510_firmware = TUNER_S7395; case TUNER_S7395: fw_lme = fw_c_s7395; ret = request_firmware(&fw, fw_lme, &udev->dev); if (ret == 0) { cold = 0; break; } dvb_usb_lme2510_firmware = TUNER_LG; case TUNER_LG: fw_lme = fw_c_lg; ret = request_firmware(&fw, fw_lme, &udev->dev); if (ret == 0) break; dvb_usb_lme2510_firmware = TUNER_S0194; case TUNER_S0194: fw_lme = fw_c_s0194; ret = request_firmware(&fw, fw_lme, &udev->dev); if (ret == 0) break; info("FRM No Firmware Found - please install"); dvb_usb_lme2510_firmware = TUNER_DEFAULT; cold = 0; cold_fw = 0; break; } } if (cold_fw) { info("FRM Loading %s file", fw_lme); ret = lme2510_download_firmware(udev, fw); } release_firmware(fw); if (cold) { info("FRM Changing to %s firmware", fw_lme); lme_coldreset(udev); return -ENODEV; } return ret; } static int lme2510_kill_urb(struct usb_data_stream *stream) { int i; for (i = 0; i < stream->urbs_submitted; i++) { deb_info(3, "killing URB no. %d.", i); /* stop the URB */ usb_kill_urb(stream->urb_list[i]); } stream->urbs_submitted = 0; return 0; } static struct tda10086_config tda10086_config = { .demod_address = 0x1c, .invert = 0, .diseqc_tone = 1, .xtal_freq = TDA10086_XTAL_16M, }; static struct stv0288_config lme_config = { .demod_address = 0xd0, .min_delay_ms = 15, .inittab = s7395_inittab, }; static struct ix2505v_config lme_tuner = { .tuner_address = 0xc0, .min_delay_ms = 100, .tuner_gain = 0x0, .tuner_chargepump = 0x3, }; static struct stv0299_config sharp_z0194_config = { .demod_address = 0xd0, .inittab = sharp_z0194a_inittab, .mclk = 88000000UL, .invert = 0, .skip_reinit = 0, .lock_output = STV0299_LOCKOUTPUT_1, .volt13_op0_op1 = STV0299_VOLT13_OP1, .min_delay_ms = 100, .set_symbol_rate = sharp_z0194a_set_symbol_rate, }; static int dm04_lme2510_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) { struct dvb_usb_adapter *adap = fe->dvb->priv; static u8 voltage_low[] = LME_VOLTAGE_L; static u8 voltage_high[] = LME_VOLTAGE_H; static u8 rbuf[1]; int ret = 0, len = 3, rlen = 1; if (mutex_lock_interruptible(&adap->dev->i2c_mutex) < 0) return -EAGAIN; switch (voltage) { case SEC_VOLTAGE_18: ret |= lme2510_usb_talk(adap->dev, voltage_high, len, rbuf, rlen); break; case SEC_VOLTAGE_OFF: case SEC_VOLTAGE_13: default: ret |= lme2510_usb_talk(adap->dev, voltage_low, len, rbuf, rlen); break; } mutex_unlock(&adap->dev->i2c_mutex); return (ret < 0) ? -ENODEV : 0; } static int lme_name(struct dvb_usb_adapter *adap) { struct lme2510_state *st = adap->dev->priv; const char *desc = adap->dev->desc->name; char *fe_name[] = {"", " LG TDQY-P001F", " SHARP:BS2F7HZ7395", " SHARP:BS2F7HZ0194"}; char *name = adap->fe->ops.info.name; strlcpy(name, desc, 128); strlcat(name, fe_name[st->tuner_config], 128); return 0; } static int dm04_lme2510_frontend_attach(struct dvb_usb_adapter *adap) { struct lme2510_state *st = adap->dev->priv; int ret = 0; st->i2c_talk_onoff = 1; st->i2c_gate = 4; adap->fe = dvb_attach(tda10086_attach, &tda10086_config, &adap->dev->i2c_adap); if (adap->fe) { info("TUN Found Frontend TDA10086"); st->i2c_tuner_gate_w = 4; st->i2c_tuner_gate_r = 4; st->i2c_tuner_addr = 0xc0; st->tuner_config = TUNER_LG; if (dvb_usb_lme2510_firmware != TUNER_LG) { dvb_usb_lme2510_firmware = TUNER_LG; ret = lme_firmware_switch(adap->dev->udev, 1); } goto end; } st->i2c_gate = 4; adap->fe = dvb_attach(stv0299_attach, &sharp_z0194_config, &adap->dev->i2c_adap); if (adap->fe) { info("FE Found Stv0299"); st->i2c_tuner_gate_w = 4; st->i2c_tuner_gate_r = 5; st->i2c_tuner_addr = 0xc0; st->tuner_config = TUNER_S0194; if (dvb_usb_lme2510_firmware != TUNER_S0194) { dvb_usb_lme2510_firmware = TUNER_S0194; ret = lme_firmware_switch(adap->dev->udev, 1); } goto end; } st->i2c_gate = 5; adap->fe = dvb_attach(stv0288_attach, &lme_config, &adap->dev->i2c_adap); if (adap->fe) { info("FE Found Stv0288"); st->i2c_tuner_gate_w = 4; st->i2c_tuner_gate_r = 5; st->i2c_tuner_addr = 0xc0; st->tuner_config = TUNER_S7395; if (dvb_usb_lme2510_firmware != TUNER_S7395) { dvb_usb_lme2510_firmware = TUNER_S7395; ret = lme_firmware_switch(adap->dev->udev, 1); } } else { info("DM04 Not Supported"); return -ENODEV; } end: if (ret) { if (adap->fe) { dvb_frontend_detach(adap->fe); adap->fe = NULL; } adap->dev->props.rc.core.rc_codes = NULL; return -ENODEV; } adap->fe->ops.set_voltage = dm04_lme2510_set_voltage; ret = lme_name(adap); return ret; } static int dm04_lme2510_tuner(struct dvb_usb_adapter *adap) { struct lme2510_state *st = adap->dev->priv; char *tun_msg[] = {"", "TDA8263", "IX2505V", "DVB_PLL_OPERA"}; int ret = 0; switch (st->tuner_config) { case TUNER_LG: if (dvb_attach(tda826x_attach, adap->fe, 0xc0, &adap->dev->i2c_adap, 1)) ret = st->tuner_config; break; case TUNER_S7395: if (dvb_attach(ix2505v_attach , adap->fe, &lme_tuner, &adap->dev->i2c_adap)) ret = st->tuner_config; break; case TUNER_S0194: if (dvb_attach(dvb_pll_attach , adap->fe, 0xc0, &adap->dev->i2c_adap, DVB_PLL_OPERA1)) ret = st->tuner_config; break; default: break; } if (ret) info("TUN Found %s tuner", tun_msg[ret]); else { info("TUN No tuner found --- reseting device"); lme_coldreset(adap->dev->udev); return -ENODEV; } /* Start the Interrupt*/ ret = lme2510_int_read(adap); if (ret < 0) { info("INT Unable to start Interrupt Service"); return -ENODEV; } return ret; } static int lme2510_powerup(struct dvb_usb_device *d, int onoff) { struct lme2510_state *st = d->priv; static u8 lnb_on[] = LNB_ON; static u8 lnb_off[] = LNB_OFF; static u8 rbuf[1]; int ret, len = 3, rlen = 1; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; if (onoff) ret = lme2510_usb_talk(d, lnb_on, len, rbuf, rlen); else ret = lme2510_usb_talk(d, lnb_off, len, rbuf, rlen); st->i2c_talk_onoff = 1; mutex_unlock(&d->i2c_mutex); return ret; } /* DVB USB Driver stuff */ static struct dvb_usb_device_properties lme2510_properties; static struct dvb_usb_device_properties lme2510c_properties; static int lme2510_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(intf); int ret = 0; usb_reset_configuration(udev); usb_set_interface(udev, intf->cur_altsetting->desc.bInterfaceNumber, 1); if (udev->speed != USB_SPEED_HIGH) { ret = usb_reset_device(udev); info("DEV Failed to connect in HIGH SPEED mode"); return -ENODEV; } if (lme2510_return_status(udev) == 0x44) { lme_firmware_switch(udev, 0); return -ENODEV; } if (0 == dvb_usb_device_init(intf, &lme2510_properties, THIS_MODULE, NULL, adapter_nr)) { info("DEV registering device driver"); return 0; } if (0 == dvb_usb_device_init(intf, &lme2510c_properties, THIS_MODULE, NULL, adapter_nr)) { info("DEV registering device driver"); return 0; } info("DEV lme2510 Error"); return -ENODEV; } static struct usb_device_id lme2510_table[] = { { USB_DEVICE(0x3344, 0x1122) }, /* LME2510 */ { USB_DEVICE(0x3344, 0x1120) }, /* LME2510C */ {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, lme2510_table); static struct dvb_usb_device_properties lme2510_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .size_of_priv = sizeof(struct lme2510_state), .num_adapters = 1, .adapter = { { .caps = DVB_USB_ADAP_HAS_PID_FILTER| DVB_USB_ADAP_NEED_PID_FILTERING| DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .streaming_ctrl = lme2510_streaming_ctrl, .pid_filter_count = 15, .pid_filter = lme2510_pid_filter, .pid_filter_ctrl = lme2510_pid_filter_ctrl, .frontend_attach = dm04_lme2510_frontend_attach, .tuner_attach = dm04_lme2510_tuner, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_BULK, .count = 10, .endpoint = 0x06, .u = { .bulk = { .buffersize = 4096, } } } } }, .rc.core = { .protocol = RC_TYPE_NEC, .module_name = "LME2510 Remote Control", .allowed_protos = RC_TYPE_NEC, .rc_codes = RC_MAP_LME2510, }, .power_ctrl = lme2510_powerup, .identify_state = lme2510_identify_state, .i2c_algo = &lme2510_i2c_algo, .generic_bulk_ctrl_endpoint = 0, .num_device_descs = 1, .devices = { { "DM04_LME2510_DVB-S", { &lme2510_table[0], NULL }, }, } }; static struct dvb_usb_device_properties lme2510c_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .size_of_priv = sizeof(struct lme2510_state), .num_adapters = 1, .adapter = { { .caps = DVB_USB_ADAP_HAS_PID_FILTER| DVB_USB_ADAP_NEED_PID_FILTERING| DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .streaming_ctrl = lme2510_streaming_ctrl, .pid_filter_count = 15, .pid_filter = lme2510_pid_filter, .pid_filter_ctrl = lme2510_pid_filter_ctrl, .frontend_attach = dm04_lme2510_frontend_attach, .tuner_attach = dm04_lme2510_tuner, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_BULK, .count = 10, .endpoint = 0x8, .u = { .bulk = { .buffersize = 4096, } } } } }, .rc.core = { .protocol = RC_TYPE_NEC, .module_name = "LME2510 Remote Control", .allowed_protos = RC_TYPE_NEC, .rc_codes = RC_MAP_LME2510, }, .power_ctrl = lme2510_powerup, .identify_state = lme2510_identify_state, .i2c_algo = &lme2510_i2c_algo, .generic_bulk_ctrl_endpoint = 0, .num_device_descs = 1, .devices = { { "DM04_LME2510C_DVB-S", { &lme2510_table[1], NULL }, }, } }; static void *lme2510_exit_int(struct dvb_usb_device *d) { struct lme2510_state *st = d->priv; struct dvb_usb_adapter *adap = &d->adapter[0]; void *buffer = NULL; if (adap != NULL) { lme2510_kill_urb(&adap->stream); adap->feedcount = 0; } if (st->usb_buffer != NULL) { st->i2c_talk_onoff = 1; st->signal_lock = 0; st->signal_level = 0; st->signal_sn = 0; buffer = st->usb_buffer; } if (st->lme_urb != NULL) { usb_kill_urb(st->lme_urb); usb_free_coherent(d->udev, 5000, st->buffer, st->lme_urb->transfer_dma); info("Interrupt Service Stopped"); } return buffer; } static void lme2510_exit(struct usb_interface *intf) { struct dvb_usb_device *d = usb_get_intfdata(intf); void *usb_buffer; if (d != NULL) { usb_buffer = lme2510_exit_int(d); dvb_usb_device_exit(intf); if (usb_buffer != NULL) kfree(usb_buffer); } } static struct usb_driver lme2510_driver = { .name = "LME2510C_DVB-S", .probe = lme2510_probe, .disconnect = lme2510_exit, .id_table = lme2510_table, }; /* module stuff */ static int __init lme2510_module_init(void) { int result = usb_register(&lme2510_driver); if (result) { err("usb_register failed. Error number %d", result); return result; } return 0; } static void __exit lme2510_module_exit(void) { /* deregister this driver from the USB subsystem */ usb_deregister(&lme2510_driver); } module_init(lme2510_module_init); module_exit(lme2510_module_exit); MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>"); MODULE_DESCRIPTION("LME2510(C) DVB-S USB2.0"); MODULE_VERSION("1.88"); MODULE_LICENSE("GPL");
gpl-2.0
chrisch1974/htc8960-3.0
drivers/input/touchscreen/jornada720_ts.c
4051
4870
/* * drivers/input/touchscreen/jornada720_ts.c * * Copyright (C) 2007 Kristoffer Ericson <Kristoffer.Ericson@gmail.com> * * Copyright (C) 2006 Filip Zyzniewski <filip.zyzniewski@tefnet.pl> * based on HP Jornada 56x touchscreen driver by Alex Lange <chicken@handhelds.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * HP Jornada 710/720/729 Touchscreen Driver */ #include <linux/platform_device.h> #include <linux/init.h> #include <linux/input.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/slab.h> #include <mach/hardware.h> #include <mach/jornada720.h> MODULE_AUTHOR("Kristoffer Ericson <kristoffer.ericson@gmail.com>"); MODULE_DESCRIPTION("HP Jornada 710/720/728 touchscreen driver"); MODULE_LICENSE("GPL v2"); struct jornada_ts { struct input_dev *dev; int x_data[4]; /* X sample values */ int y_data[4]; /* Y sample values */ }; static void jornada720_ts_collect_data(struct jornada_ts *jornada_ts) { /* 3 low word X samples */ jornada_ts->x_data[0] = jornada_ssp_byte(TXDUMMY); jornada_ts->x_data[1] = jornada_ssp_byte(TXDUMMY); jornada_ts->x_data[2] = jornada_ssp_byte(TXDUMMY); /* 3 low word Y samples */ jornada_ts->y_data[0] = jornada_ssp_byte(TXDUMMY); jornada_ts->y_data[1] = jornada_ssp_byte(TXDUMMY); jornada_ts->y_data[2] = jornada_ssp_byte(TXDUMMY); /* combined x samples bits */ jornada_ts->x_data[3] = jornada_ssp_byte(TXDUMMY); /* combined y samples bits */ jornada_ts->y_data[3] = jornada_ssp_byte(TXDUMMY); } static int jornada720_ts_average(int coords[4]) { int coord, high_bits = coords[3]; coord = coords[0] | ((high_bits & 0x03) << 8); coord += coords[1] | ((high_bits & 0x0c) << 6); coord += coords[2] | ((high_bits & 0x30) << 4); return coord / 3; } static irqreturn_t jornada720_ts_interrupt(int irq, void *dev_id) { struct platform_device *pdev = dev_id; struct jornada_ts *jornada_ts = platform_get_drvdata(pdev); struct input_dev *input = jornada_ts->dev; int x, y; /* If GPIO_GPIO9 is set to high then report pen up */ if (GPLR & GPIO_GPIO(9)) { input_report_key(input, BTN_TOUCH, 0); input_sync(input); } else { jornada_ssp_start(); /* proper reply to request is always TXDUMMY */ if (jornada_ssp_inout(GETTOUCHSAMPLES) == TXDUMMY) { jornada720_ts_collect_data(jornada_ts); x = jornada720_ts_average(jornada_ts->x_data); y = jornada720_ts_average(jornada_ts->y_data); input_report_key(input, BTN_TOUCH, 1); input_report_abs(input, ABS_X, x); input_report_abs(input, ABS_Y, y); input_sync(input); } jornada_ssp_end(); } return IRQ_HANDLED; } static int __devinit jornada720_ts_probe(struct platform_device *pdev) { struct jornada_ts *jornada_ts; struct input_dev *input_dev; int error; jornada_ts = kzalloc(sizeof(struct jornada_ts), GFP_KERNEL); input_dev = input_allocate_device(); if (!jornada_ts || !input_dev) { error = -ENOMEM; goto fail1; } platform_set_drvdata(pdev, jornada_ts); jornada_ts->dev = input_dev; input_dev->name = "HP Jornada 7xx Touchscreen"; input_dev->phys = "jornadats/input0"; input_dev->id.bustype = BUS_HOST; input_dev->dev.parent = &pdev->dev; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); input_set_abs_params(input_dev, ABS_X, 270, 3900, 0, 0); input_set_abs_params(input_dev, ABS_Y, 180, 3700, 0, 0); error = request_irq(IRQ_GPIO9, jornada720_ts_interrupt, IRQF_DISABLED | IRQF_TRIGGER_RISING, "HP7XX Touchscreen driver", pdev); if (error) { printk(KERN_INFO "HP7XX TS : Unable to acquire irq!\n"); goto fail1; } error = input_register_device(jornada_ts->dev); if (error) goto fail2; return 0; fail2: free_irq(IRQ_GPIO9, pdev); fail1: platform_set_drvdata(pdev, NULL); input_free_device(input_dev); kfree(jornada_ts); return error; } static int __devexit jornada720_ts_remove(struct platform_device *pdev) { struct jornada_ts *jornada_ts = platform_get_drvdata(pdev); free_irq(IRQ_GPIO9, pdev); platform_set_drvdata(pdev, NULL); input_unregister_device(jornada_ts->dev); kfree(jornada_ts); return 0; } /* work with hotplug and coldplug */ MODULE_ALIAS("platform:jornada_ts"); static struct platform_driver jornada720_ts_driver = { .probe = jornada720_ts_probe, .remove = __devexit_p(jornada720_ts_remove), .driver = { .name = "jornada_ts", .owner = THIS_MODULE, }, }; static int __init jornada720_ts_init(void) { return platform_driver_register(&jornada720_ts_driver); } static void __exit jornada720_ts_exit(void) { platform_driver_unregister(&jornada720_ts_driver); } module_init(jornada720_ts_init); module_exit(jornada720_ts_exit);
gpl-2.0
HomerSp/shooter_u-ics
drivers/media/dvb/frontends/ec100.c
4307
7388
/* * E3C EC100 demodulator driver * * Copyright (C) 2009 Antti Palosaari <crope@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include "dvb_frontend.h" #include "ec100_priv.h" #include "ec100.h" int ec100_debug; module_param_named(debug, ec100_debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off)."); struct ec100_state { struct i2c_adapter *i2c; struct dvb_frontend frontend; struct ec100_config config; u16 ber; }; /* write single register */ static int ec100_write_reg(struct ec100_state *state, u8 reg, u8 val) { u8 buf[2] = {reg, val}; struct i2c_msg msg = { .addr = state->config.demod_address, .flags = 0, .len = 2, .buf = buf}; if (i2c_transfer(state->i2c, &msg, 1) != 1) { warn("I2C write failed reg:%02x", reg); return -EREMOTEIO; } return 0; } /* read single register */ static int ec100_read_reg(struct ec100_state *state, u8 reg, u8 *val) { struct i2c_msg msg[2] = { { .addr = state->config.demod_address, .flags = 0, .len = 1, .buf = &reg }, { .addr = state->config.demod_address, .flags = I2C_M_RD, .len = 1, .buf = val } }; if (i2c_transfer(state->i2c, msg, 2) != 2) { warn("I2C read failed reg:%02x", reg); return -EREMOTEIO; } return 0; } static int ec100_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *params) { struct ec100_state *state = fe->demodulator_priv; int ret; u8 tmp, tmp2; deb_info("%s: freq:%d bw:%d\n", __func__, params->frequency, params->u.ofdm.bandwidth); /* program tuner */ if (fe->ops.tuner_ops.set_params) fe->ops.tuner_ops.set_params(fe, params); ret = ec100_write_reg(state, 0x04, 0x06); if (ret) goto error; ret = ec100_write_reg(state, 0x67, 0x58); if (ret) goto error; ret = ec100_write_reg(state, 0x05, 0x18); if (ret) goto error; /* reg/bw | 6 | 7 | 8 -------+------+------+------ A 0x1b | 0xa1 | 0xe7 | 0x2c A 0x1c | 0x55 | 0x63 | 0x72 -------+------+------+------ B 0x1b | 0xb7 | 0x00 | 0x49 B 0x1c | 0x55 | 0x64 | 0x72 */ switch (params->u.ofdm.bandwidth) { case BANDWIDTH_6_MHZ: tmp = 0xb7; tmp2 = 0x55; break; case BANDWIDTH_7_MHZ: tmp = 0x00; tmp2 = 0x64; break; case BANDWIDTH_8_MHZ: default: tmp = 0x49; tmp2 = 0x72; } ret = ec100_write_reg(state, 0x1b, tmp); if (ret) goto error; ret = ec100_write_reg(state, 0x1c, tmp2); if (ret) goto error; ret = ec100_write_reg(state, 0x0c, 0xbb); /* if freq */ if (ret) goto error; ret = ec100_write_reg(state, 0x0d, 0x31); /* if freq */ if (ret) goto error; ret = ec100_write_reg(state, 0x08, 0x24); if (ret) goto error; ret = ec100_write_reg(state, 0x00, 0x00); /* go */ if (ret) goto error; ret = ec100_write_reg(state, 0x00, 0x20); /* go */ if (ret) goto error; return ret; error: deb_info("%s: failed:%d\n", __func__, ret); return ret; } static int ec100_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings *fesettings) { fesettings->min_delay_ms = 300; fesettings->step_size = 0; fesettings->max_drift = 0; return 0; } static int ec100_read_status(struct dvb_frontend *fe, fe_status_t *status) { struct ec100_state *state = fe->demodulator_priv; int ret; u8 tmp; *status = 0; ret = ec100_read_reg(state, 0x42, &tmp); if (ret) goto error; if (tmp & 0x80) { /* bit7 set - have lock */ *status |= FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK; } else { ret = ec100_read_reg(state, 0x01, &tmp); if (ret) goto error; if (tmp & 0x10) { /* bit4 set - have signal */ *status |= FE_HAS_SIGNAL; if (!(tmp & 0x01)) { /* bit0 clear - have ~valid signal */ *status |= FE_HAS_CARRIER | FE_HAS_VITERBI; } } } return ret; error: deb_info("%s: failed:%d\n", __func__, ret); return ret; } static int ec100_read_ber(struct dvb_frontend *fe, u32 *ber) { struct ec100_state *state = fe->demodulator_priv; int ret; u8 tmp, tmp2; u16 ber2; *ber = 0; ret = ec100_read_reg(state, 0x65, &tmp); if (ret) goto error; ret = ec100_read_reg(state, 0x66, &tmp2); if (ret) goto error; ber2 = (tmp2 << 8) | tmp; /* if counter overflow or clear */ if (ber2 < state->ber) *ber = ber2; else *ber = ber2 - state->ber; state->ber = ber2; return ret; error: deb_info("%s: failed:%d\n", __func__, ret); return ret; } static int ec100_read_signal_strength(struct dvb_frontend *fe, u16 *strength) { struct ec100_state *state = fe->demodulator_priv; int ret; u8 tmp; ret = ec100_read_reg(state, 0x24, &tmp); if (ret) { *strength = 0; goto error; } *strength = ((tmp << 8) | tmp); return ret; error: deb_info("%s: failed:%d\n", __func__, ret); return ret; } static int ec100_read_snr(struct dvb_frontend *fe, u16 *snr) { *snr = 0; return 0; } static int ec100_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) { *ucblocks = 0; return 0; } static void ec100_release(struct dvb_frontend *fe) { struct ec100_state *state = fe->demodulator_priv; kfree(state); } static struct dvb_frontend_ops ec100_ops; struct dvb_frontend *ec100_attach(const struct ec100_config *config, struct i2c_adapter *i2c) { int ret; struct ec100_state *state = NULL; u8 tmp; /* allocate memory for the internal state */ state = kzalloc(sizeof(struct ec100_state), GFP_KERNEL); if (state == NULL) goto error; /* setup the state */ state->i2c = i2c; memcpy(&state->config, config, sizeof(struct ec100_config)); /* check if the demod is there */ ret = ec100_read_reg(state, 0x33, &tmp); if (ret || tmp != 0x0b) goto error; /* create dvb_frontend */ memcpy(&state->frontend.ops, &ec100_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; return &state->frontend; error: kfree(state); return NULL; } EXPORT_SYMBOL(ec100_attach); static struct dvb_frontend_ops ec100_ops = { .info = { .name = "E3C EC100 DVB-T", .type = FE_OFDM, .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_HIERARCHY_AUTO | FE_CAN_MUTE_TS }, .release = ec100_release, .set_frontend = ec100_set_frontend, .get_tune_settings = ec100_get_tune_settings, .read_status = ec100_read_status, .read_ber = ec100_read_ber, .read_signal_strength = ec100_read_signal_strength, .read_snr = ec100_read_snr, .read_ucblocks = ec100_read_ucblocks, }; MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>"); MODULE_DESCRIPTION("E3C EC100 DVB-T demodulator driver"); MODULE_LICENSE("GPL");
gpl-2.0
livlogik/Evil_Yummy_Gumdrop--Tmo-V10-Kernel
drivers/media/rc/keymaps/rc-medion-x10-digitainer.c
4819
3728
/* * Medion X10 RF remote keytable (Digitainer variant) * * Copyright (C) 2012 Anssi Hannula <anssi.hannula@iki.fi> * * This keymap is for a variant that has a distinctive scrollwheel instead of * up/down buttons (tested with P/N 40009936 / 20018268), reportedly * originally shipped with Medion Digitainer but now sold separately simply as * an "X10" remote. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <linux/module.h> #include <media/rc-map.h> static struct rc_map_table medion_x10_digitainer[] = { { 0x02, KEY_POWER }, { 0x2c, KEY_TV }, { 0x2d, KEY_VIDEO }, { 0x04, KEY_DVD }, /* CD/DVD */ { 0x16, KEY_TEXT }, /* "teletext" icon, i.e. a screen with lines */ { 0x06, KEY_AUDIO }, { 0x2e, KEY_RADIO }, { 0x31, KEY_EPG }, /* a screen with an open book */ { 0x05, KEY_IMAGES }, /* Photo */ { 0x2f, KEY_INFO }, { 0x78, KEY_UP }, /* scrollwheel up 1 notch */ /* 0x79..0x7f: 2-8 notches, driver repeats 0x78 entry */ { 0x70, KEY_DOWN }, /* scrollwheel down 1 notch */ /* 0x71..0x77: 2-8 notches, driver repeats 0x70 entry */ { 0x19, KEY_MENU }, { 0x1d, KEY_LEFT }, { 0x1e, KEY_OK }, /* scrollwheel press */ { 0x1f, KEY_RIGHT }, { 0x20, KEY_BACK }, { 0x09, KEY_VOLUMEUP }, { 0x08, KEY_VOLUMEDOWN }, { 0x00, KEY_MUTE }, { 0x1b, KEY_SELECT }, /* also has "U" rotated 90 degrees CCW */ { 0x0b, KEY_CHANNELUP }, { 0x0c, KEY_CHANNELDOWN }, { 0x1c, KEY_LAST }, { 0x32, KEY_RED }, /* also Audio */ { 0x33, KEY_GREEN }, /* also Subtitle */ { 0x34, KEY_YELLOW }, /* also Angle */ { 0x35, KEY_BLUE }, /* also Title */ { 0x28, KEY_STOP }, { 0x29, KEY_PAUSE }, { 0x25, KEY_PLAY }, { 0x21, KEY_PREVIOUS }, { 0x18, KEY_CAMERA }, { 0x23, KEY_NEXT }, { 0x24, KEY_REWIND }, { 0x27, KEY_RECORD }, { 0x26, KEY_FORWARD }, { 0x0d, KEY_1 }, { 0x0e, KEY_2 }, { 0x0f, KEY_3 }, { 0x10, KEY_4 }, { 0x11, KEY_5 }, { 0x12, KEY_6 }, { 0x13, KEY_7 }, { 0x14, KEY_8 }, { 0x15, KEY_9 }, { 0x17, KEY_0 }, /* these do not actually exist on this remote, but these scancodes * exist on all other Medion X10 remotes and adding them here allows * such remotes to be adequately usable with this keymap in case * this keymap is wrongly used with them (which is quite possible as * there are lots of different Medion X10 remotes): */ { 0x1a, KEY_UP }, { 0x22, KEY_DOWN }, }; static struct rc_map_list medion_x10_digitainer_map = { .map = { .scan = medion_x10_digitainer, .size = ARRAY_SIZE(medion_x10_digitainer), .rc_type = RC_TYPE_OTHER, .name = RC_MAP_MEDION_X10_DIGITAINER, } }; static int __init init_rc_map_medion_x10_digitainer(void) { return rc_map_register(&medion_x10_digitainer_map); } static void __exit exit_rc_map_medion_x10_digitainer(void) { rc_map_unregister(&medion_x10_digitainer_map); } module_init(init_rc_map_medion_x10_digitainer) module_exit(exit_rc_map_medion_x10_digitainer) MODULE_DESCRIPTION("Medion X10 RF remote keytable (Digitainer variant)"); MODULE_AUTHOR("Anssi Hannula <anssi.hannula@iki.fi>"); MODULE_LICENSE("GPL");
gpl-2.0
KAsp3rd/android_kernel_lge_msm8992
drivers/firewire/core-iso.c
4819
10521
/* * Isochronous I/O functionality: * - Isochronous DMA context management * - Isochronous bus resource management (channels, bandwidth), client side * * Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/dma-mapping.h> #include <linux/errno.h> #include <linux/firewire.h> #include <linux/firewire-constants.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/vmalloc.h> #include <linux/export.h> #include <asm/byteorder.h> #include "core.h" /* * Isochronous DMA context management */ int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count) { int i; buffer->page_count = 0; buffer->page_count_mapped = 0; buffer->pages = kmalloc(page_count * sizeof(buffer->pages[0]), GFP_KERNEL); if (buffer->pages == NULL) return -ENOMEM; for (i = 0; i < page_count; i++) { buffer->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); if (buffer->pages[i] == NULL) break; } buffer->page_count = i; if (i < page_count) { fw_iso_buffer_destroy(buffer, NULL); return -ENOMEM; } return 0; } int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card, enum dma_data_direction direction) { dma_addr_t address; int i; buffer->direction = direction; for (i = 0; i < buffer->page_count; i++) { address = dma_map_page(card->device, buffer->pages[i], 0, PAGE_SIZE, direction); if (dma_mapping_error(card->device, address)) break; set_page_private(buffer->pages[i], address); } buffer->page_count_mapped = i; if (i < buffer->page_count) return -ENOMEM; return 0; } int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, int page_count, enum dma_data_direction direction) { int ret; ret = fw_iso_buffer_alloc(buffer, page_count); if (ret < 0) return ret; ret = fw_iso_buffer_map_dma(buffer, card, direction); if (ret < 0) fw_iso_buffer_destroy(buffer, card); return ret; } EXPORT_SYMBOL(fw_iso_buffer_init); int fw_iso_buffer_map_vma(struct fw_iso_buffer *buffer, struct vm_area_struct *vma) { unsigned long uaddr; int i, err; uaddr = vma->vm_start; for (i = 0; i < buffer->page_count; i++) { err = vm_insert_page(vma, uaddr, buffer->pages[i]); if (err) return err; uaddr += PAGE_SIZE; } return 0; } void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card) { int i; dma_addr_t address; for (i = 0; i < buffer->page_count_mapped; i++) { address = page_private(buffer->pages[i]); dma_unmap_page(card->device, address, PAGE_SIZE, buffer->direction); } for (i = 0; i < buffer->page_count; i++) __free_page(buffer->pages[i]); kfree(buffer->pages); buffer->pages = NULL; buffer->page_count = 0; buffer->page_count_mapped = 0; } EXPORT_SYMBOL(fw_iso_buffer_destroy); /* Convert DMA address to offset into virtually contiguous buffer. */ size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed) { size_t i; dma_addr_t address; ssize_t offset; for (i = 0; i < buffer->page_count; i++) { address = page_private(buffer->pages[i]); offset = (ssize_t)completed - (ssize_t)address; if (offset > 0 && offset <= PAGE_SIZE) return (i << PAGE_SHIFT) + offset; } return 0; } struct fw_iso_context *fw_iso_context_create(struct fw_card *card, int type, int channel, int speed, size_t header_size, fw_iso_callback_t callback, void *callback_data) { struct fw_iso_context *ctx; ctx = card->driver->allocate_iso_context(card, type, channel, header_size); if (IS_ERR(ctx)) return ctx; ctx->card = card; ctx->type = type; ctx->channel = channel; ctx->speed = speed; ctx->header_size = header_size; ctx->callback.sc = callback; ctx->callback_data = callback_data; return ctx; } EXPORT_SYMBOL(fw_iso_context_create); void fw_iso_context_destroy(struct fw_iso_context *ctx) { ctx->card->driver->free_iso_context(ctx); } EXPORT_SYMBOL(fw_iso_context_destroy); int fw_iso_context_start(struct fw_iso_context *ctx, int cycle, int sync, int tags) { return ctx->card->driver->start_iso(ctx, cycle, sync, tags); } EXPORT_SYMBOL(fw_iso_context_start); int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels) { return ctx->card->driver->set_iso_channels(ctx, channels); } int fw_iso_context_queue(struct fw_iso_context *ctx, struct fw_iso_packet *packet, struct fw_iso_buffer *buffer, unsigned long payload) { return ctx->card->driver->queue_iso(ctx, packet, buffer, payload); } EXPORT_SYMBOL(fw_iso_context_queue); void fw_iso_context_queue_flush(struct fw_iso_context *ctx) { ctx->card->driver->flush_queue_iso(ctx); } EXPORT_SYMBOL(fw_iso_context_queue_flush); int fw_iso_context_flush_completions(struct fw_iso_context *ctx) { return ctx->card->driver->flush_iso_completions(ctx); } EXPORT_SYMBOL(fw_iso_context_flush_completions); int fw_iso_context_stop(struct fw_iso_context *ctx) { return ctx->card->driver->stop_iso(ctx); } EXPORT_SYMBOL(fw_iso_context_stop); /* * Isochronous bus resource management (channels, bandwidth), client side */ static int manage_bandwidth(struct fw_card *card, int irm_id, int generation, int bandwidth, bool allocate) { int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0; __be32 data[2]; /* * On a 1394a IRM with low contention, try < 1 is enough. * On a 1394-1995 IRM, we need at least try < 2. * Let's just do try < 5. */ for (try = 0; try < 5; try++) { new = allocate ? old - bandwidth : old + bandwidth; if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL) return -EBUSY; data[0] = cpu_to_be32(old); data[1] = cpu_to_be32(new); switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, irm_id, generation, SCODE_100, CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE, data, 8)) { case RCODE_GENERATION: /* A generation change frees all bandwidth. */ return allocate ? -EAGAIN : bandwidth; case RCODE_COMPLETE: if (be32_to_cpup(data) == old) return bandwidth; old = be32_to_cpup(data); /* Fall through. */ } } return -EIO; } static int manage_channel(struct fw_card *card, int irm_id, int generation, u32 channels_mask, u64 offset, bool allocate) { __be32 bit, all, old; __be32 data[2]; int channel, ret = -EIO, retry = 5; old = all = allocate ? cpu_to_be32(~0) : 0; for (channel = 0; channel < 32; channel++) { if (!(channels_mask & 1 << channel)) continue; ret = -EBUSY; bit = cpu_to_be32(1 << (31 - channel)); if ((old & bit) != (all & bit)) continue; data[0] = old; data[1] = old ^ bit; switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, irm_id, generation, SCODE_100, offset, data, 8)) { case RCODE_GENERATION: /* A generation change frees all channels. */ return allocate ? -EAGAIN : channel; case RCODE_COMPLETE: if (data[0] == old) return channel; old = data[0]; /* Is the IRM 1394a-2000 compliant? */ if ((data[0] & bit) == (data[1] & bit)) continue; /* 1394-1995 IRM, fall through to retry. */ default: if (retry) { retry--; channel--; } else { ret = -EIO; } } } return ret; } static void deallocate_channel(struct fw_card *card, int irm_id, int generation, int channel) { u32 mask; u64 offset; mask = channel < 32 ? 1 << channel : 1 << (channel - 32); offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI : CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO; manage_channel(card, irm_id, generation, mask, offset, false); } /** * fw_iso_resource_manage() - Allocate or deallocate a channel and/or bandwidth * * In parameters: card, generation, channels_mask, bandwidth, allocate * Out parameters: channel, bandwidth * This function blocks (sleeps) during communication with the IRM. * * Allocates or deallocates at most one channel out of channels_mask. * channels_mask is a bitfield with MSB for channel 63 and LSB for channel 0. * (Note, the IRM's CHANNELS_AVAILABLE is a big-endian bitfield with MSB for * channel 0 and LSB for channel 63.) * Allocates or deallocates as many bandwidth allocation units as specified. * * Returns channel < 0 if no channel was allocated or deallocated. * Returns bandwidth = 0 if no bandwidth was allocated or deallocated. * * If generation is stale, deallocations succeed but allocations fail with * channel = -EAGAIN. * * If channel allocation fails, no bandwidth will be allocated either. * If bandwidth allocation fails, no channel will be allocated either. * But deallocations of channel and bandwidth are tried independently * of each other's success. */ void fw_iso_resource_manage(struct fw_card *card, int generation, u64 channels_mask, int *channel, int *bandwidth, bool allocate) { u32 channels_hi = channels_mask; /* channels 31...0 */ u32 channels_lo = channels_mask >> 32; /* channels 63...32 */ int irm_id, ret, c = -EINVAL; spin_lock_irq(&card->lock); irm_id = card->irm_node->node_id; spin_unlock_irq(&card->lock); if (channels_hi) c = manage_channel(card, irm_id, generation, channels_hi, CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, allocate); if (channels_lo && c < 0) { c = manage_channel(card, irm_id, generation, channels_lo, CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, allocate); if (c >= 0) c += 32; } *channel = c; if (allocate && channels_mask != 0 && c < 0) *bandwidth = 0; if (*bandwidth == 0) return; ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate); if (ret < 0) *bandwidth = 0; if (allocate && ret < 0) { if (c >= 0) deallocate_channel(card, irm_id, generation, c); *channel = ret; } } EXPORT_SYMBOL(fw_iso_resource_manage);
gpl-2.0
TimofeyFox/GT-S7270_kernel
sound/soc/blackfin/bf5xx-i2s-pcm.c
5075
8506
/* * File: sound/soc/blackfin/bf5xx-i2s-pcm.c * Author: Cliff Cai <Cliff.Cai@analog.com> * * Created: Tue June 06 2008 * Description: DMA driver for i2s codec * * Modified: * Copyright 2008 Analog Devices Inc. * * Bugs: Enter bugs at http://blackfin.uclinux.org/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see the file COPYING, or write * to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/module.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/gfp.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <asm/dma.h> #include "bf5xx-i2s-pcm.h" #include "bf5xx-sport.h" static void bf5xx_dma_irq(void *data) { struct snd_pcm_substream *pcm = data; snd_pcm_period_elapsed(pcm); } static const struct snd_pcm_hardware bf5xx_pcm_hardware = { .info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_BLOCK_TRANSFER, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE, .period_bytes_min = 32, .period_bytes_max = 0x10000, .periods_min = 1, .periods_max = PAGE_SIZE/32, .buffer_bytes_max = 0x20000, /* 128 kbytes */ .fifo_size = 16, }; static int bf5xx_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { size_t size = bf5xx_pcm_hardware.buffer_bytes_max; snd_pcm_lib_malloc_pages(substream, size); return 0; } static int bf5xx_pcm_hw_free(struct snd_pcm_substream *substream) { snd_pcm_lib_free_pages(substream); return 0; } static int bf5xx_pcm_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct sport_device *sport = runtime->private_data; int period_bytes = frames_to_bytes(runtime, runtime->period_size); pr_debug("%s enter\n", __func__); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { sport_set_tx_callback(sport, bf5xx_dma_irq, substream); sport_config_tx_dma(sport, runtime->dma_area, runtime->periods, period_bytes); } else { sport_set_rx_callback(sport, bf5xx_dma_irq, substream); sport_config_rx_dma(sport, runtime->dma_area, runtime->periods, period_bytes); } return 0; } static int bf5xx_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_pcm_runtime *runtime = substream->runtime; struct sport_device *sport = runtime->private_data; int ret = 0; pr_debug("%s enter\n", __func__); switch (cmd) { case SNDRV_PCM_TRIGGER_START: if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) sport_tx_start(sport); else sport_rx_start(sport); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) sport_tx_stop(sport); else sport_rx_stop(sport); break; default: ret = -EINVAL; } return ret; } static snd_pcm_uframes_t bf5xx_pcm_pointer(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct sport_device *sport = runtime->private_data; unsigned int diff; snd_pcm_uframes_t frames; pr_debug("%s enter\n", __func__); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { diff = sport_curr_offset_tx(sport); } else { diff = sport_curr_offset_rx(sport); } /* * TX at least can report one frame beyond the end of the * buffer if we hit the wraparound case - clamp to within the * buffer as the ALSA APIs require. */ if (diff == snd_pcm_lib_buffer_bytes(substream)) diff = 0; frames = bytes_to_frames(substream->runtime, diff); return frames; } static int bf5xx_pcm_open(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; struct sport_device *sport_handle = snd_soc_dai_get_drvdata(cpu_dai); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_dma_buffer *buf = &substream->dma_buffer; int ret; pr_debug("%s enter\n", __func__); snd_soc_set_runtime_hwparams(substream, &bf5xx_pcm_hardware); ret = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (ret < 0) goto out; if (sport_handle != NULL) { if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) sport_handle->tx_buf = buf->area; else sport_handle->rx_buf = buf->area; runtime->private_data = sport_handle; } else { pr_err("sport_handle is NULL\n"); return -1; } return 0; out: return ret; } static int bf5xx_pcm_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { struct snd_pcm_runtime *runtime = substream->runtime; size_t size = vma->vm_end - vma->vm_start; vma->vm_start = (unsigned long)runtime->dma_area; vma->vm_end = vma->vm_start + size; vma->vm_flags |= VM_SHARED; return 0 ; } static struct snd_pcm_ops bf5xx_pcm_i2s_ops = { .open = bf5xx_pcm_open, .ioctl = snd_pcm_lib_ioctl, .hw_params = bf5xx_pcm_hw_params, .hw_free = bf5xx_pcm_hw_free, .prepare = bf5xx_pcm_prepare, .trigger = bf5xx_pcm_trigger, .pointer = bf5xx_pcm_pointer, .mmap = bf5xx_pcm_mmap, }; static int bf5xx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream) { struct snd_pcm_substream *substream = pcm->streams[stream].substream; struct snd_dma_buffer *buf = &substream->dma_buffer; size_t size = bf5xx_pcm_hardware.buffer_bytes_max; buf->dev.type = SNDRV_DMA_TYPE_DEV; buf->dev.dev = pcm->card->dev; buf->private_data = NULL; buf->area = dma_alloc_coherent(pcm->card->dev, size, &buf->addr, GFP_KERNEL); if (!buf->area) { pr_err("Failed to allocate dma memory - Please increase uncached DMA memory region\n"); return -ENOMEM; } buf->bytes = size; pr_debug("%s, area:%p, size:0x%08lx\n", __func__, buf->area, buf->bytes); return 0; } static void bf5xx_pcm_free_dma_buffers(struct snd_pcm *pcm) { struct snd_pcm_substream *substream; struct snd_dma_buffer *buf; int stream; for (stream = 0; stream < 2; stream++) { substream = pcm->streams[stream].substream; if (!substream) continue; buf = &substream->dma_buffer; if (!buf->area) continue; dma_free_coherent(NULL, buf->bytes, buf->area, 0); buf->area = NULL; } } static u64 bf5xx_pcm_dmamask = DMA_BIT_MASK(32); static int bf5xx_pcm_i2s_new(struct snd_soc_pcm_runtime *rtd) { struct snd_card *card = rtd->card->snd_card; struct snd_pcm *pcm = rtd->pcm; int ret = 0; pr_debug("%s enter\n", __func__); if (!card->dev->dma_mask) card->dev->dma_mask = &bf5xx_pcm_dmamask; if (!card->dev->coherent_dma_mask) card->dev->coherent_dma_mask = DMA_BIT_MASK(32); if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) { ret = bf5xx_pcm_preallocate_dma_buffer(pcm, SNDRV_PCM_STREAM_PLAYBACK); if (ret) goto out; } if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) { ret = bf5xx_pcm_preallocate_dma_buffer(pcm, SNDRV_PCM_STREAM_CAPTURE); if (ret) goto out; } out: return ret; } static struct snd_soc_platform_driver bf5xx_i2s_soc_platform = { .ops = &bf5xx_pcm_i2s_ops, .pcm_new = bf5xx_pcm_i2s_new, .pcm_free = bf5xx_pcm_free_dma_buffers, }; static int __devinit bfin_i2s_soc_platform_probe(struct platform_device *pdev) { return snd_soc_register_platform(&pdev->dev, &bf5xx_i2s_soc_platform); } static int __devexit bfin_i2s_soc_platform_remove(struct platform_device *pdev) { snd_soc_unregister_platform(&pdev->dev); return 0; } static struct platform_driver bfin_i2s_pcm_driver = { .driver = { .name = "bfin-i2s-pcm-audio", .owner = THIS_MODULE, }, .probe = bfin_i2s_soc_platform_probe, .remove = __devexit_p(bfin_i2s_soc_platform_remove), }; module_platform_driver(bfin_i2s_pcm_driver); MODULE_AUTHOR("Cliff Cai"); MODULE_DESCRIPTION("ADI Blackfin I2S PCM DMA module"); MODULE_LICENSE("GPL");
gpl-2.0
AndroidSymmetry/Old_Sparky
drivers/media/rc/keymaps/rc-msi-tvanywhere-plus.c
7635
3619
/* msi-tvanywhere-plus.h - Keytable for msi_tvanywhere_plus Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> #include <linux/module.h> /* Keycodes for remote on the MSI TV@nywhere Plus. The controller IC on the card is marked "KS003". The controller is I2C at address 0x30, but does not seem to respond to probes until a read is performed from a valid device. I don't know why... Note: This remote may be of similar or identical design to the Pixelview remote (?). The raw codes and duplicate button codes appear to be the same. Henry Wong <henry@stuffedcow.net> Some changes to formatting and keycodes by Mark Schultz <n9xmj@yahoo.com> */ static struct rc_map_table msi_tvanywhere_plus[] = { /* ---- Remote Button Layout ---- POWER SOURCE SCAN MUTE TV/FM 1 2 3 |> 4 5 6 <| 7 8 9 ^^UP 0 + RECALL vvDN RECORD STOP PLAY MINIMIZE ZOOM CH+ VOL- VOL+ CH- SNAPSHOT MTS << FUNC >> RESET */ { 0x01, KEY_1 }, /* 1 */ { 0x0b, KEY_2 }, /* 2 */ { 0x1b, KEY_3 }, /* 3 */ { 0x05, KEY_4 }, /* 4 */ { 0x09, KEY_5 }, /* 5 */ { 0x15, KEY_6 }, /* 6 */ { 0x06, KEY_7 }, /* 7 */ { 0x0a, KEY_8 }, /* 8 */ { 0x12, KEY_9 }, /* 9 */ { 0x02, KEY_0 }, /* 0 */ { 0x10, KEY_KPPLUS }, /* + */ { 0x13, KEY_AGAIN }, /* Recall */ { 0x1e, KEY_POWER }, /* Power */ { 0x07, KEY_VIDEO }, /* Source */ { 0x1c, KEY_SEARCH }, /* Scan */ { 0x18, KEY_MUTE }, /* Mute */ { 0x03, KEY_RADIO }, /* TV/FM */ /* The next four keys are duplicates that appear to send the same IR code as Ch+, Ch-, >>, and << . The raw code assigned to them is the actual code + 0x20 - they will never be detected as such unless some way is discovered to distinguish these buttons from those that have the same code. */ { 0x3f, KEY_RIGHT }, /* |> and Ch+ */ { 0x37, KEY_LEFT }, /* <| and Ch- */ { 0x2c, KEY_UP }, /* ^^Up and >> */ { 0x24, KEY_DOWN }, /* vvDn and << */ { 0x00, KEY_RECORD }, /* Record */ { 0x08, KEY_STOP }, /* Stop */ { 0x11, KEY_PLAY }, /* Play */ { 0x0f, KEY_CLOSE }, /* Minimize */ { 0x19, KEY_ZOOM }, /* Zoom */ { 0x1a, KEY_CAMERA }, /* Snapshot */ { 0x0d, KEY_LANGUAGE }, /* MTS */ { 0x14, KEY_VOLUMEDOWN }, /* Vol- */ { 0x16, KEY_VOLUMEUP }, /* Vol+ */ { 0x17, KEY_CHANNELDOWN }, /* Ch- */ { 0x1f, KEY_CHANNELUP }, /* Ch+ */ { 0x04, KEY_REWIND }, /* << */ { 0x0e, KEY_MENU }, /* Function */ { 0x0c, KEY_FASTFORWARD }, /* >> */ { 0x1d, KEY_RESTART }, /* Reset */ }; static struct rc_map_list msi_tvanywhere_plus_map = { .map = { .scan = msi_tvanywhere_plus, .size = ARRAY_SIZE(msi_tvanywhere_plus), .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_MSI_TVANYWHERE_PLUS, } }; static int __init init_rc_map_msi_tvanywhere_plus(void) { return rc_map_register(&msi_tvanywhere_plus_map); } static void __exit exit_rc_map_msi_tvanywhere_plus(void) { rc_map_unregister(&msi_tvanywhere_plus_map); } module_init(init_rc_map_msi_tvanywhere_plus) module_exit(exit_rc_map_msi_tvanywhere_plus) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
kogone/android_kernel_lge_hammerhead
drivers/staging/comedi/drivers/dt2817.c
8147
4940
/* comedi/drivers/dt2817.c Hardware driver for Data Translation DT2817 COMEDI - Linux Control and Measurement Device Interface Copyright (C) 1998 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: dt2817 Description: Data Translation DT2817 Author: ds Status: complete Devices: [Data Translation] DT2817 (dt2817) A very simple digital I/O card. Four banks of 8 lines, each bank is configurable for input or output. One wonders why it takes a 50 page manual to describe this thing. The driver (which, btw, is much less than 50 pages) has 1 subdevice with 32 channels, configurable in groups of 8. Configuration options: [0] - I/O port base base address */ #include "../comedidev.h" #include <linux/ioport.h> #define DT2817_SIZE 5 #define DT2817_CR 0 #define DT2817_DATA 1 static int dt2817_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int dt2817_detach(struct comedi_device *dev); static struct comedi_driver driver_dt2817 = { .driver_name = "dt2817", .module = THIS_MODULE, .attach = dt2817_attach, .detach = dt2817_detach, }; static int __init driver_dt2817_init_module(void) { return comedi_driver_register(&driver_dt2817); } static void __exit driver_dt2817_cleanup_module(void) { comedi_driver_unregister(&driver_dt2817); } module_init(driver_dt2817_init_module); module_exit(driver_dt2817_cleanup_module); static int dt2817_dio_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int mask; int chan; int oe = 0; if (insn->n != 1) return -EINVAL; chan = CR_CHAN(insn->chanspec); if (chan < 8) mask = 0xff; else if (chan < 16) mask = 0xff00; else if (chan < 24) mask = 0xff0000; else mask = 0xff000000; if (data[0]) s->io_bits |= mask; else s->io_bits &= ~mask; if (s->io_bits & 0x000000ff) oe |= 0x1; if (s->io_bits & 0x0000ff00) oe |= 0x2; if (s->io_bits & 0x00ff0000) oe |= 0x4; if (s->io_bits & 0xff000000) oe |= 0x8; outb(oe, dev->iobase + DT2817_CR); return 1; } static int dt2817_dio_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int changed; /* It's questionable whether it is more important in * a driver like this to be deterministic or fast. * We choose fast. */ if (data[0]) { changed = s->state; s->state &= ~data[0]; s->state |= (data[0] & data[1]); changed ^= s->state; changed &= s->io_bits; if (changed & 0x000000ff) outb(s->state & 0xff, dev->iobase + DT2817_DATA + 0); if (changed & 0x0000ff00) outb((s->state >> 8) & 0xff, dev->iobase + DT2817_DATA + 1); if (changed & 0x00ff0000) outb((s->state >> 16) & 0xff, dev->iobase + DT2817_DATA + 2); if (changed & 0xff000000) outb((s->state >> 24) & 0xff, dev->iobase + DT2817_DATA + 3); } data[1] = inb(dev->iobase + DT2817_DATA + 0); data[1] |= (inb(dev->iobase + DT2817_DATA + 1) << 8); data[1] |= (inb(dev->iobase + DT2817_DATA + 2) << 16); data[1] |= (inb(dev->iobase + DT2817_DATA + 3) << 24); return 2; } static int dt2817_attach(struct comedi_device *dev, struct comedi_devconfig *it) { int ret; struct comedi_subdevice *s; unsigned long iobase; iobase = it->options[0]; printk(KERN_INFO "comedi%d: dt2817: 0x%04lx ", dev->minor, iobase); if (!request_region(iobase, DT2817_SIZE, "dt2817")) { printk("I/O port conflict\n"); return -EIO; } dev->iobase = iobase; dev->board_name = "dt2817"; ret = alloc_subdevices(dev, 1); if (ret < 0) return ret; s = dev->subdevices + 0; s->n_chan = 32; s->type = COMEDI_SUBD_DIO; s->subdev_flags = SDF_READABLE | SDF_WRITABLE; s->range_table = &range_digital; s->maxdata = 1; s->insn_bits = dt2817_dio_insn_bits; s->insn_config = dt2817_dio_insn_config; s->state = 0; outb(0, dev->iobase + DT2817_CR); printk(KERN_INFO "\n"); return 0; } static int dt2817_detach(struct comedi_device *dev) { printk(KERN_INFO "comedi%d: dt2817: remove\n", dev->minor); if (dev->iobase) release_region(dev->iobase, DT2817_SIZE); return 0; } MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
smarkwell/asuswrt-merlin
release/src-rt-6.x.4708/linux/linux-2.6.36/net/sunrpc/auth_null.c
10707
2582
/* * linux/net/sunrpc/auth_null.c * * AUTH_NULL authentication. Really :-) * * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> */ #include <linux/types.h> #include <linux/module.h> #include <linux/sunrpc/clnt.h> #ifdef RPC_DEBUG # define RPCDBG_FACILITY RPCDBG_AUTH #endif static struct rpc_auth null_auth; static struct rpc_cred null_cred; static struct rpc_auth * nul_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor) { atomic_inc(&null_auth.au_count); return &null_auth; } static void nul_destroy(struct rpc_auth *auth) { } /* * Lookup NULL creds for current process */ static struct rpc_cred * nul_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) { return get_rpccred(&null_cred); } /* * Destroy cred handle. */ static void nul_destroy_cred(struct rpc_cred *cred) { } /* * Match cred handle against current process */ static int nul_match(struct auth_cred *acred, struct rpc_cred *cred, int taskflags) { return 1; } /* * Marshal credential. */ static __be32 * nul_marshal(struct rpc_task *task, __be32 *p) { *p++ = htonl(RPC_AUTH_NULL); *p++ = 0; *p++ = htonl(RPC_AUTH_NULL); *p++ = 0; return p; } /* * Refresh credential. This is a no-op for AUTH_NULL */ static int nul_refresh(struct rpc_task *task) { set_bit(RPCAUTH_CRED_UPTODATE, &task->tk_rqstp->rq_cred->cr_flags); return 0; } static __be32 * nul_validate(struct rpc_task *task, __be32 *p) { rpc_authflavor_t flavor; u32 size; flavor = ntohl(*p++); if (flavor != RPC_AUTH_NULL) { printk("RPC: bad verf flavor: %u\n", flavor); return NULL; } size = ntohl(*p++); if (size != 0) { printk("RPC: bad verf size: %u\n", size); return NULL; } return p; } const struct rpc_authops authnull_ops = { .owner = THIS_MODULE, .au_flavor = RPC_AUTH_NULL, .au_name = "NULL", .create = nul_create, .destroy = nul_destroy, .lookup_cred = nul_lookup_cred, }; static struct rpc_auth null_auth = { .au_cslack = 4, .au_rslack = 2, .au_ops = &authnull_ops, .au_flavor = RPC_AUTH_NULL, .au_count = ATOMIC_INIT(0), }; static const struct rpc_credops null_credops = { .cr_name = "AUTH_NULL", .crdestroy = nul_destroy_cred, .crbind = rpcauth_generic_bind_cred, .crmatch = nul_match, .crmarshal = nul_marshal, .crrefresh = nul_refresh, .crvalidate = nul_validate, }; static struct rpc_cred null_cred = { .cr_lru = LIST_HEAD_INIT(null_cred.cr_lru), .cr_auth = &null_auth, .cr_ops = &null_credops, .cr_count = ATOMIC_INIT(1), .cr_flags = 1UL << RPCAUTH_CRED_UPTODATE, #ifdef RPC_DEBUG .cr_magic = RPCAUTH_CRED_MAGIC, #endif };
gpl-2.0
diego-ch/android_kernel_samsung_u8500
arch/x86/math-emu/errors.c
12243
18106
/*---------------------------------------------------------------------------+ | errors.c | | | | The error handling functions for wm-FPU-emu | | | | Copyright (C) 1992,1993,1994,1996 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia | | E-mail billm@jacobi.maths.monash.edu.au | | | | | +---------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------+ | Note: | | The file contains code which accesses user memory. | | Emulator static data may change when user memory is accessed, due to | | other processes using the emulator while swapping is in progress. | +---------------------------------------------------------------------------*/ #include <linux/signal.h> #include <asm/uaccess.h> #include "fpu_emu.h" #include "fpu_system.h" #include "exception.h" #include "status_w.h" #include "control_w.h" #include "reg_constant.h" #include "version.h" /* */ #undef PRINT_MESSAGES /* */ #if 0 void Un_impl(void) { u_char byte1, FPU_modrm; unsigned long address = FPU_ORIG_EIP; RE_ENTRANT_CHECK_OFF; /* No need to check access_ok(), we have previously fetched these bytes. */ printk("Unimplemented FPU Opcode at eip=%p : ", (void __user *)address); if (FPU_CS == __USER_CS) { while (1) { FPU_get_user(byte1, (u_char __user *) address); if ((byte1 & 0xf8) == 0xd8) break; printk("[%02x]", byte1); address++; } printk("%02x ", byte1); FPU_get_user(FPU_modrm, 1 + (u_char __user *) address); if (FPU_modrm >= 0300) printk("%02x (%02x+%d)\n", FPU_modrm, FPU_modrm & 0xf8, FPU_modrm & 7); else printk("/%d\n", (FPU_modrm >> 3) & 7); } else { printk("cs selector = %04x\n", FPU_CS); } RE_ENTRANT_CHECK_ON; EXCEPTION(EX_Invalid); } #endif /* 0 */ /* Called for opcodes which are illegal and which are known to result in a SIGILL with a real 80486. */ void FPU_illegal(void) { math_abort(FPU_info, SIGILL); } void FPU_printall(void) { int i; static const char *tag_desc[] = { "Valid", "Zero", "ERROR", "Empty", "DeNorm", "Inf", "NaN" }; u_char byte1, FPU_modrm; unsigned long address = FPU_ORIG_EIP; RE_ENTRANT_CHECK_OFF; /* No need to check access_ok(), we have previously fetched these bytes. */ printk("At %p:", (void *)address); if (FPU_CS == __USER_CS) { #define MAX_PRINTED_BYTES 20 for (i = 0; i < MAX_PRINTED_BYTES; i++) { FPU_get_user(byte1, (u_char __user *) address); if ((byte1 & 0xf8) == 0xd8) { printk(" %02x", byte1); break; } printk(" [%02x]", byte1); address++; } if (i == MAX_PRINTED_BYTES) printk(" [more..]\n"); else { FPU_get_user(FPU_modrm, 1 + (u_char __user *) address); if (FPU_modrm >= 0300) printk(" %02x (%02x+%d)\n", FPU_modrm, FPU_modrm & 0xf8, FPU_modrm & 7); else printk(" /%d, mod=%d rm=%d\n", (FPU_modrm >> 3) & 7, (FPU_modrm >> 6) & 3, FPU_modrm & 7); } } else { printk("%04x\n", FPU_CS); } partial_status = status_word(); #ifdef DEBUGGING if (partial_status & SW_Backward) printk("SW: backward compatibility\n"); if (partial_status & SW_C3) printk("SW: condition bit 3\n"); if (partial_status & SW_C2) printk("SW: condition bit 2\n"); if (partial_status & SW_C1) printk("SW: condition bit 1\n"); if (partial_status & SW_C0) printk("SW: condition bit 0\n"); if (partial_status & SW_Summary) printk("SW: exception summary\n"); if (partial_status & SW_Stack_Fault) printk("SW: stack fault\n"); if (partial_status & SW_Precision) printk("SW: loss of precision\n"); if (partial_status & SW_Underflow) printk("SW: underflow\n"); if (partial_status & SW_Overflow) printk("SW: overflow\n"); if (partial_status & SW_Zero_Div) printk("SW: divide by zero\n"); if (partial_status & SW_Denorm_Op) printk("SW: denormalized operand\n"); if (partial_status & SW_Invalid) printk("SW: invalid operation\n"); #endif /* DEBUGGING */ printk(" SW: b=%d st=%d es=%d sf=%d cc=%d%d%d%d ef=%d%d%d%d%d%d\n", partial_status & 0x8000 ? 1 : 0, /* busy */ (partial_status & 0x3800) >> 11, /* stack top pointer */ partial_status & 0x80 ? 1 : 0, /* Error summary status */ partial_status & 0x40 ? 1 : 0, /* Stack flag */ partial_status & SW_C3 ? 1 : 0, partial_status & SW_C2 ? 1 : 0, /* cc */ partial_status & SW_C1 ? 1 : 0, partial_status & SW_C0 ? 1 : 0, /* cc */ partial_status & SW_Precision ? 1 : 0, partial_status & SW_Underflow ? 1 : 0, partial_status & SW_Overflow ? 1 : 0, partial_status & SW_Zero_Div ? 1 : 0, partial_status & SW_Denorm_Op ? 1 : 0, partial_status & SW_Invalid ? 1 : 0); printk(" CW: ic=%d rc=%d%d pc=%d%d iem=%d ef=%d%d%d%d%d%d\n", control_word & 0x1000 ? 1 : 0, (control_word & 0x800) >> 11, (control_word & 0x400) >> 10, (control_word & 0x200) >> 9, (control_word & 0x100) >> 8, control_word & 0x80 ? 1 : 0, control_word & SW_Precision ? 1 : 0, control_word & SW_Underflow ? 1 : 0, control_word & SW_Overflow ? 1 : 0, control_word & SW_Zero_Div ? 1 : 0, control_word & SW_Denorm_Op ? 1 : 0, control_word & SW_Invalid ? 1 : 0); for (i = 0; i < 8; i++) { FPU_REG *r = &st(i); u_char tagi = FPU_gettagi(i); switch (tagi) { case TAG_Empty: continue; break; case TAG_Zero: case TAG_Special: tagi = FPU_Special(r); case TAG_Valid: printk("st(%d) %c .%04lx %04lx %04lx %04lx e%+-6d ", i, getsign(r) ? '-' : '+', (long)(r->sigh >> 16), (long)(r->sigh & 0xFFFF), (long)(r->sigl >> 16), (long)(r->sigl & 0xFFFF), exponent(r) - EXP_BIAS + 1); break; default: printk("Whoops! Error in errors.c: tag%d is %d ", i, tagi); continue; break; } printk("%s\n", tag_desc[(int)(unsigned)tagi]); } RE_ENTRANT_CHECK_ON; } static struct { int type; const char *name; } exception_names[] = { { EX_StackOver, "stack overflow"}, { EX_StackUnder, "stack underflow"}, { EX_Precision, "loss of precision"}, { EX_Underflow, "underflow"}, { EX_Overflow, "overflow"}, { EX_ZeroDiv, "divide by zero"}, { EX_Denormal, "denormalized operand"}, { EX_Invalid, "invalid operation"}, { EX_INTERNAL, "INTERNAL BUG in " FPU_VERSION}, { 0, NULL} }; /* EX_INTERNAL is always given with a code which indicates where the error was detected. Internal error types: 0x14 in fpu_etc.c 0x1nn in a *.c file: 0x101 in reg_add_sub.c 0x102 in reg_mul.c 0x104 in poly_atan.c 0x105 in reg_mul.c 0x107 in fpu_trig.c 0x108 in reg_compare.c 0x109 in reg_compare.c 0x110 in reg_add_sub.c 0x111 in fpe_entry.c 0x112 in fpu_trig.c 0x113 in errors.c 0x115 in fpu_trig.c 0x116 in fpu_trig.c 0x117 in fpu_trig.c 0x118 in fpu_trig.c 0x119 in fpu_trig.c 0x120 in poly_atan.c 0x121 in reg_compare.c 0x122 in reg_compare.c 0x123 in reg_compare.c 0x125 in fpu_trig.c 0x126 in fpu_entry.c 0x127 in poly_2xm1.c 0x128 in fpu_entry.c 0x129 in fpu_entry.c 0x130 in get_address.c 0x131 in get_address.c 0x132 in get_address.c 0x133 in get_address.c 0x140 in load_store.c 0x141 in load_store.c 0x150 in poly_sin.c 0x151 in poly_sin.c 0x160 in reg_ld_str.c 0x161 in reg_ld_str.c 0x162 in reg_ld_str.c 0x163 in reg_ld_str.c 0x164 in reg_ld_str.c 0x170 in fpu_tags.c 0x171 in fpu_tags.c 0x172 in fpu_tags.c 0x180 in reg_convert.c 0x2nn in an *.S file: 0x201 in reg_u_add.S 0x202 in reg_u_div.S 0x203 in reg_u_div.S 0x204 in reg_u_div.S 0x205 in reg_u_mul.S 0x206 in reg_u_sub.S 0x207 in wm_sqrt.S 0x208 in reg_div.S 0x209 in reg_u_sub.S 0x210 in reg_u_sub.S 0x211 in reg_u_sub.S 0x212 in reg_u_sub.S 0x213 in wm_sqrt.S 0x214 in wm_sqrt.S 0x215 in wm_sqrt.S 0x220 in reg_norm.S 0x221 in reg_norm.S 0x230 in reg_round.S 0x231 in reg_round.S 0x232 in reg_round.S 0x233 in reg_round.S 0x234 in reg_round.S 0x235 in reg_round.S 0x236 in reg_round.S 0x240 in div_Xsig.S 0x241 in div_Xsig.S 0x242 in div_Xsig.S */ asmlinkage void FPU_exception(int n) { int i, int_type; int_type = 0; /* Needed only to stop compiler warnings */ if (n & EX_INTERNAL) { int_type = n - EX_INTERNAL; n = EX_INTERNAL; /* Set lots of exception bits! */ partial_status |= (SW_Exc_Mask | SW_Summary | SW_Backward); } else { /* Extract only the bits which we use to set the status word */ n &= (SW_Exc_Mask); /* Set the corresponding exception bit */ partial_status |= n; /* Set summary bits iff exception isn't masked */ if (partial_status & ~control_word & CW_Exceptions) partial_status |= (SW_Summary | SW_Backward); if (n & (SW_Stack_Fault | EX_Precision)) { if (!(n & SW_C1)) /* This bit distinguishes over- from underflow for a stack fault, and roundup from round-down for precision loss. */ partial_status &= ~SW_C1; } } RE_ENTRANT_CHECK_OFF; if ((~control_word & n & CW_Exceptions) || (n == EX_INTERNAL)) { #ifdef PRINT_MESSAGES /* My message from the sponsor */ printk(FPU_VERSION " " __DATE__ " (C) W. Metzenthen.\n"); #endif /* PRINT_MESSAGES */ /* Get a name string for error reporting */ for (i = 0; exception_names[i].type; i++) if ((exception_names[i].type & n) == exception_names[i].type) break; if (exception_names[i].type) { #ifdef PRINT_MESSAGES printk("FP Exception: %s!\n", exception_names[i].name); #endif /* PRINT_MESSAGES */ } else printk("FPU emulator: Unknown Exception: 0x%04x!\n", n); if (n == EX_INTERNAL) { printk("FPU emulator: Internal error type 0x%04x\n", int_type); FPU_printall(); } #ifdef PRINT_MESSAGES else FPU_printall(); #endif /* PRINT_MESSAGES */ /* * The 80486 generates an interrupt on the next non-control FPU * instruction. So we need some means of flagging it. * We use the ES (Error Summary) bit for this. */ } RE_ENTRANT_CHECK_ON; #ifdef __DEBUG__ math_abort(FPU_info, SIGFPE); #endif /* __DEBUG__ */ } /* Real operation attempted on a NaN. */ /* Returns < 0 if the exception is unmasked */ int real_1op_NaN(FPU_REG *a) { int signalling, isNaN; isNaN = (exponent(a) == EXP_OVER) && (a->sigh & 0x80000000); /* The default result for the case of two "equal" NaNs (signs may differ) is chosen to reproduce 80486 behaviour */ signalling = isNaN && !(a->sigh & 0x40000000); if (!signalling) { if (!isNaN) { /* pseudo-NaN, or other unsupported? */ if (control_word & CW_Invalid) { /* Masked response */ reg_copy(&CONST_QNaN, a); } EXCEPTION(EX_Invalid); return (!(control_word & CW_Invalid) ? FPU_Exception : 0) | TAG_Special; } return TAG_Special; } if (control_word & CW_Invalid) { /* The masked response */ if (!(a->sigh & 0x80000000)) { /* pseudo-NaN ? */ reg_copy(&CONST_QNaN, a); } /* ensure a Quiet NaN */ a->sigh |= 0x40000000; } EXCEPTION(EX_Invalid); return (!(control_word & CW_Invalid) ? FPU_Exception : 0) | TAG_Special; } /* Real operation attempted on two operands, one a NaN. */ /* Returns < 0 if the exception is unmasked */ int real_2op_NaN(FPU_REG const *b, u_char tagb, int deststnr, FPU_REG const *defaultNaN) { FPU_REG *dest = &st(deststnr); FPU_REG const *a = dest; u_char taga = FPU_gettagi(deststnr); FPU_REG const *x; int signalling, unsupported; if (taga == TAG_Special) taga = FPU_Special(a); if (tagb == TAG_Special) tagb = FPU_Special(b); /* TW_NaN is also used for unsupported data types. */ unsupported = ((taga == TW_NaN) && !((exponent(a) == EXP_OVER) && (a->sigh & 0x80000000))) || ((tagb == TW_NaN) && !((exponent(b) == EXP_OVER) && (b->sigh & 0x80000000))); if (unsupported) { if (control_word & CW_Invalid) { /* Masked response */ FPU_copy_to_regi(&CONST_QNaN, TAG_Special, deststnr); } EXCEPTION(EX_Invalid); return (!(control_word & CW_Invalid) ? FPU_Exception : 0) | TAG_Special; } if (taga == TW_NaN) { x = a; if (tagb == TW_NaN) { signalling = !(a->sigh & b->sigh & 0x40000000); if (significand(b) > significand(a)) x = b; else if (significand(b) == significand(a)) { /* The default result for the case of two "equal" NaNs (signs may differ) is chosen to reproduce 80486 behaviour */ x = defaultNaN; } } else { /* return the quiet version of the NaN in a */ signalling = !(a->sigh & 0x40000000); } } else #ifdef PARANOID if (tagb == TW_NaN) #endif /* PARANOID */ { signalling = !(b->sigh & 0x40000000); x = b; } #ifdef PARANOID else { signalling = 0; EXCEPTION(EX_INTERNAL | 0x113); x = &CONST_QNaN; } #endif /* PARANOID */ if ((!signalling) || (control_word & CW_Invalid)) { if (!x) x = b; if (!(x->sigh & 0x80000000)) /* pseudo-NaN ? */ x = &CONST_QNaN; FPU_copy_to_regi(x, TAG_Special, deststnr); if (!signalling) return TAG_Special; /* ensure a Quiet NaN */ dest->sigh |= 0x40000000; } EXCEPTION(EX_Invalid); return (!(control_word & CW_Invalid) ? FPU_Exception : 0) | TAG_Special; } /* Invalid arith operation on Valid registers */ /* Returns < 0 if the exception is unmasked */ asmlinkage int arith_invalid(int deststnr) { EXCEPTION(EX_Invalid); if (control_word & CW_Invalid) { /* The masked response */ FPU_copy_to_regi(&CONST_QNaN, TAG_Special, deststnr); } return (!(control_word & CW_Invalid) ? FPU_Exception : 0) | TAG_Valid; } /* Divide a finite number by zero */ asmlinkage int FPU_divide_by_zero(int deststnr, u_char sign) { FPU_REG *dest = &st(deststnr); int tag = TAG_Valid; if (control_word & CW_ZeroDiv) { /* The masked response */ FPU_copy_to_regi(&CONST_INF, TAG_Special, deststnr); setsign(dest, sign); tag = TAG_Special; } EXCEPTION(EX_ZeroDiv); return (!(control_word & CW_ZeroDiv) ? FPU_Exception : 0) | tag; } /* This may be called often, so keep it lean */ int set_precision_flag(int flags) { if (control_word & CW_Precision) { partial_status &= ~(SW_C1 & flags); partial_status |= flags; /* The masked response */ return 0; } else { EXCEPTION(flags); return 1; } } /* This may be called often, so keep it lean */ asmlinkage void set_precision_flag_up(void) { if (control_word & CW_Precision) partial_status |= (SW_Precision | SW_C1); /* The masked response */ else EXCEPTION(EX_Precision | SW_C1); } /* This may be called often, so keep it lean */ asmlinkage void set_precision_flag_down(void) { if (control_word & CW_Precision) { /* The masked response */ partial_status &= ~SW_C1; partial_status |= SW_Precision; } else EXCEPTION(EX_Precision); } asmlinkage int denormal_operand(void) { if (control_word & CW_Denormal) { /* The masked response */ partial_status |= SW_Denorm_Op; return TAG_Special; } else { EXCEPTION(EX_Denormal); return TAG_Special | FPU_Exception; } } asmlinkage int arith_overflow(FPU_REG *dest) { int tag = TAG_Valid; if (control_word & CW_Overflow) { /* The masked response */ /* ###### The response here depends upon the rounding mode */ reg_copy(&CONST_INF, dest); tag = TAG_Special; } else { /* Subtract the magic number from the exponent */ addexponent(dest, (-3 * (1 << 13))); } EXCEPTION(EX_Overflow); if (control_word & CW_Overflow) { /* The overflow exception is masked. */ /* By definition, precision is lost. The roundup bit (C1) is also set because we have "rounded" upwards to Infinity. */ EXCEPTION(EX_Precision | SW_C1); return tag; } return tag; } asmlinkage int arith_underflow(FPU_REG *dest) { int tag = TAG_Valid; if (control_word & CW_Underflow) { /* The masked response */ if (exponent16(dest) <= EXP_UNDER - 63) { reg_copy(&CONST_Z, dest); partial_status &= ~SW_C1; /* Round down. */ tag = TAG_Zero; } else { stdexp(dest); } } else { /* Add the magic number to the exponent. */ addexponent(dest, (3 * (1 << 13)) + EXTENDED_Ebias); } EXCEPTION(EX_Underflow); if (control_word & CW_Underflow) { /* The underflow exception is masked. */ EXCEPTION(EX_Precision); return tag; } return tag; } void FPU_stack_overflow(void) { if (control_word & CW_Invalid) { /* The masked response */ top--; FPU_copy_to_reg0(&CONST_QNaN, TAG_Special); } EXCEPTION(EX_StackOver); return; } void FPU_stack_underflow(void) { if (control_word & CW_Invalid) { /* The masked response */ FPU_copy_to_reg0(&CONST_QNaN, TAG_Special); } EXCEPTION(EX_StackUnder); return; } void FPU_stack_underflow_i(int i) { if (control_word & CW_Invalid) { /* The masked response */ FPU_copy_to_regi(&CONST_QNaN, TAG_Special, i); } EXCEPTION(EX_StackUnder); return; } void FPU_stack_underflow_pop(int i) { if (control_word & CW_Invalid) { /* The masked response */ FPU_copy_to_regi(&CONST_QNaN, TAG_Special, i); FPU_pop(); } EXCEPTION(EX_StackUnder); return; }
gpl-2.0
shakalaca/ASUS_ZenFone_A500KL
kernel/drivers/macintosh/via-pmu-event.c
15571
2121
/* * via-pmu event device for reporting some events that come through the PMU * * Copyright 2006 Johannes Berg <johannes@sipsolutions.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/input.h> #include <linux/adb.h> #include <linux/pmu.h> #include "via-pmu-event.h" static struct input_dev *pmu_input_dev; static int __init via_pmu_event_init(void) { int err; /* do other models report button/lid status? */ if (pmu_get_model() != PMU_KEYLARGO_BASED) return -ENODEV; pmu_input_dev = input_allocate_device(); if (!pmu_input_dev) return -ENOMEM; pmu_input_dev->name = "PMU"; pmu_input_dev->id.bustype = BUS_HOST; pmu_input_dev->id.vendor = 0x0001; pmu_input_dev->id.product = 0x0001; pmu_input_dev->id.version = 0x0100; set_bit(EV_KEY, pmu_input_dev->evbit); set_bit(EV_SW, pmu_input_dev->evbit); set_bit(KEY_POWER, pmu_input_dev->keybit); set_bit(SW_LID, pmu_input_dev->swbit); err = input_register_device(pmu_input_dev); if (err) input_free_device(pmu_input_dev); return err; } void via_pmu_event(int key, int down) { if (unlikely(!pmu_input_dev)) return; switch (key) { case PMU_EVT_POWER: input_report_key(pmu_input_dev, KEY_POWER, down); break; case PMU_EVT_LID: input_report_switch(pmu_input_dev, SW_LID, down); break; default: /* no such key handled */ return; } input_sync(pmu_input_dev); } late_initcall(via_pmu_event_init);
gpl-2.0
lilinj2000/linux-4.3.3
drivers/block/paride/ppc6lnx.c
15571
14693
/* ppc6lnx.c (c) 2001 Micro Solutions Inc. Released under the terms of the GNU General Public license ppc6lnx.c is a par of the protocol driver for the Micro Solutions "BACKPACK" parallel port IDE adapter (Works on Series 6 drives) */ //*************************************************************************** // PPC 6 Code in C sanitized for LINUX // Original x86 ASM by Ron, Converted to C by Clive //*************************************************************************** #define port_stb 1 #define port_afd 2 #define cmd_stb port_afd #define port_init 4 #define data_stb port_init #define port_sel 8 #define port_int 16 #define port_dir 0x20 #define ECR_EPP 0x80 #define ECR_BI 0x20 //*************************************************************************** // 60772 Commands #define ACCESS_REG 0x00 #define ACCESS_PORT 0x40 #define ACCESS_READ 0x00 #define ACCESS_WRITE 0x20 // 60772 Command Prefix #define CMD_PREFIX_SET 0xe0 // Special command that modifies the next command's operation #define CMD_PREFIX_RESET 0xc0 // Resets current cmd modifier reg bits #define PREFIX_IO16 0x01 // perform 16-bit wide I/O #define PREFIX_FASTWR 0x04 // enable PPC mode fast-write #define PREFIX_BLK 0x08 // enable block transfer mode // 60772 Registers #define REG_STATUS 0x00 // status register #define STATUS_IRQA 0x01 // Peripheral IRQA line #define STATUS_EEPROM_DO 0x40 // Serial EEPROM data bit #define REG_VERSION 0x01 // PPC version register (read) #define REG_HWCFG 0x02 // Hardware Config register #define REG_RAMSIZE 0x03 // Size of RAM Buffer #define RAMSIZE_128K 0x02 #define REG_EEPROM 0x06 // EEPROM control register #define EEPROM_SK 0x01 // eeprom SK bit #define EEPROM_DI 0x02 // eeprom DI bit #define EEPROM_CS 0x04 // eeprom CS bit #define EEPROM_EN 0x08 // eeprom output enable #define REG_BLKSIZE 0x08 // Block transfer len (24 bit) //*************************************************************************** typedef struct ppc_storage { u16 lpt_addr; // LPT base address u8 ppc_id; u8 mode; // operating mode // 0 = PPC Uni SW // 1 = PPC Uni FW // 2 = PPC Bi SW // 3 = PPC Bi FW // 4 = EPP Byte // 5 = EPP Word // 6 = EPP Dword u8 ppc_flags; u8 org_data; // original LPT data port contents u8 org_ctrl; // original LPT control port contents u8 cur_ctrl; // current control port contents } Interface; //*************************************************************************** // ppc_flags #define fifo_wait 0x10 //*************************************************************************** // DONT CHANGE THESE LEST YOU BREAK EVERYTHING - BIT FIELD DEPENDENCIES #define PPCMODE_UNI_SW 0 #define PPCMODE_UNI_FW 1 #define PPCMODE_BI_SW 2 #define PPCMODE_BI_FW 3 #define PPCMODE_EPP_BYTE 4 #define PPCMODE_EPP_WORD 5 #define PPCMODE_EPP_DWORD 6 //*************************************************************************** static int ppc6_select(Interface *ppc); static void ppc6_deselect(Interface *ppc); static void ppc6_send_cmd(Interface *ppc, u8 cmd); static void ppc6_wr_data_byte(Interface *ppc, u8 data); static u8 ppc6_rd_data_byte(Interface *ppc); static u8 ppc6_rd_port(Interface *ppc, u8 port); static void ppc6_wr_port(Interface *ppc, u8 port, u8 data); static void ppc6_rd_data_blk(Interface *ppc, u8 *data, long count); static void ppc6_wait_for_fifo(Interface *ppc); static void ppc6_wr_data_blk(Interface *ppc, u8 *data, long count); static void ppc6_rd_port16_blk(Interface *ppc, u8 port, u8 *data, long length); static void ppc6_wr_port16_blk(Interface *ppc, u8 port, u8 *data, long length); static void ppc6_wr_extout(Interface *ppc, u8 regdata); static int ppc6_open(Interface *ppc); static void ppc6_close(Interface *ppc); //*************************************************************************** static int ppc6_select(Interface *ppc) { u8 i, j, k; i = inb(ppc->lpt_addr + 1); if (i & 1) outb(i, ppc->lpt_addr + 1); ppc->org_data = inb(ppc->lpt_addr); ppc->org_ctrl = inb(ppc->lpt_addr + 2) & 0x5F; // readback ctrl ppc->cur_ctrl = ppc->org_ctrl; ppc->cur_ctrl |= port_sel; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); if (ppc->org_data == 'b') outb('x', ppc->lpt_addr); outb('b', ppc->lpt_addr); outb('p', ppc->lpt_addr); outb(ppc->ppc_id, ppc->lpt_addr); outb(~ppc->ppc_id,ppc->lpt_addr); ppc->cur_ctrl &= ~port_sel; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); ppc->cur_ctrl = (ppc->cur_ctrl & port_int) | port_init; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); i = ppc->mode & 0x0C; if (i == 0) i = (ppc->mode & 2) | 1; outb(i, ppc->lpt_addr); ppc->cur_ctrl |= port_sel; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); // DELAY ppc->cur_ctrl |= port_afd; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); j = ((i & 0x08) << 4) | ((i & 0x07) << 3); k = inb(ppc->lpt_addr + 1) & 0xB8; if (j == k) { ppc->cur_ctrl &= ~port_afd; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); k = (inb(ppc->lpt_addr + 1) & 0xB8) ^ 0xB8; if (j == k) { if (i & 4) // EPP ppc->cur_ctrl &= ~(port_sel | port_init); else // PPC/ECP ppc->cur_ctrl &= ~port_sel; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); return(1); } } outb(ppc->org_ctrl, ppc->lpt_addr + 2); outb(ppc->org_data, ppc->lpt_addr); return(0); // FAIL } //*************************************************************************** static void ppc6_deselect(Interface *ppc) { if (ppc->mode & 4) // EPP ppc->cur_ctrl |= port_init; else // PPC/ECP ppc->cur_ctrl |= port_sel; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); outb(ppc->org_data, ppc->lpt_addr); outb((ppc->org_ctrl | port_sel), ppc->lpt_addr + 2); outb(ppc->org_ctrl, ppc->lpt_addr + 2); } //*************************************************************************** static void ppc6_send_cmd(Interface *ppc, u8 cmd) { switch(ppc->mode) { case PPCMODE_UNI_SW : case PPCMODE_UNI_FW : case PPCMODE_BI_SW : case PPCMODE_BI_FW : { outb(cmd, ppc->lpt_addr); ppc->cur_ctrl ^= cmd_stb; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); break; } case PPCMODE_EPP_BYTE : case PPCMODE_EPP_WORD : case PPCMODE_EPP_DWORD : { outb(cmd, ppc->lpt_addr + 3); break; } } } //*************************************************************************** static void ppc6_wr_data_byte(Interface *ppc, u8 data) { switch(ppc->mode) { case PPCMODE_UNI_SW : case PPCMODE_UNI_FW : case PPCMODE_BI_SW : case PPCMODE_BI_FW : { outb(data, ppc->lpt_addr); ppc->cur_ctrl ^= data_stb; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); break; } case PPCMODE_EPP_BYTE : case PPCMODE_EPP_WORD : case PPCMODE_EPP_DWORD : { outb(data, ppc->lpt_addr + 4); break; } } } //*************************************************************************** static u8 ppc6_rd_data_byte(Interface *ppc) { u8 data = 0; switch(ppc->mode) { case PPCMODE_UNI_SW : case PPCMODE_UNI_FW : { ppc->cur_ctrl = (ppc->cur_ctrl & ~port_stb) ^ data_stb; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); // DELAY data = inb(ppc->lpt_addr + 1); data = ((data & 0x80) >> 1) | ((data & 0x38) >> 3); ppc->cur_ctrl |= port_stb; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); // DELAY data |= inb(ppc->lpt_addr + 1) & 0xB8; break; } case PPCMODE_BI_SW : case PPCMODE_BI_FW : { ppc->cur_ctrl |= port_dir; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); ppc->cur_ctrl = (ppc->cur_ctrl | port_stb) ^ data_stb; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); data = inb(ppc->lpt_addr); ppc->cur_ctrl &= ~port_stb; outb(ppc->cur_ctrl,ppc->lpt_addr + 2); ppc->cur_ctrl &= ~port_dir; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); break; } case PPCMODE_EPP_BYTE : case PPCMODE_EPP_WORD : case PPCMODE_EPP_DWORD : { outb((ppc->cur_ctrl | port_dir),ppc->lpt_addr + 2); data = inb(ppc->lpt_addr + 4); outb(ppc->cur_ctrl,ppc->lpt_addr + 2); break; } } return(data); } //*************************************************************************** static u8 ppc6_rd_port(Interface *ppc, u8 port) { ppc6_send_cmd(ppc,(u8)(port | ACCESS_PORT | ACCESS_READ)); return(ppc6_rd_data_byte(ppc)); } //*************************************************************************** static void ppc6_wr_port(Interface *ppc, u8 port, u8 data) { ppc6_send_cmd(ppc,(u8)(port | ACCESS_PORT | ACCESS_WRITE)); ppc6_wr_data_byte(ppc, data); } //*************************************************************************** static void ppc6_rd_data_blk(Interface *ppc, u8 *data, long count) { switch(ppc->mode) { case PPCMODE_UNI_SW : case PPCMODE_UNI_FW : { while(count) { u8 d; ppc->cur_ctrl = (ppc->cur_ctrl & ~port_stb) ^ data_stb; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); // DELAY d = inb(ppc->lpt_addr + 1); d = ((d & 0x80) >> 1) | ((d & 0x38) >> 3); ppc->cur_ctrl |= port_stb; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); // DELAY d |= inb(ppc->lpt_addr + 1) & 0xB8; *data++ = d; count--; } break; } case PPCMODE_BI_SW : case PPCMODE_BI_FW : { ppc->cur_ctrl |= port_dir; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); ppc->cur_ctrl |= port_stb; while(count) { ppc->cur_ctrl ^= data_stb; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); *data++ = inb(ppc->lpt_addr); count--; } ppc->cur_ctrl &= ~port_stb; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); ppc->cur_ctrl &= ~port_dir; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); break; } case PPCMODE_EPP_BYTE : { outb((ppc->cur_ctrl | port_dir), ppc->lpt_addr + 2); // DELAY while(count) { *data++ = inb(ppc->lpt_addr + 4); count--; } outb(ppc->cur_ctrl, ppc->lpt_addr + 2); break; } case PPCMODE_EPP_WORD : { outb((ppc->cur_ctrl | port_dir), ppc->lpt_addr + 2); // DELAY while(count > 1) { *((u16 *)data) = inw(ppc->lpt_addr + 4); data += 2; count -= 2; } while(count) { *data++ = inb(ppc->lpt_addr + 4); count--; } outb(ppc->cur_ctrl, ppc->lpt_addr + 2); break; } case PPCMODE_EPP_DWORD : { outb((ppc->cur_ctrl | port_dir),ppc->lpt_addr + 2); // DELAY while(count > 3) { *((u32 *)data) = inl(ppc->lpt_addr + 4); data += 4; count -= 4; } while(count) { *data++ = inb(ppc->lpt_addr + 4); count--; } outb(ppc->cur_ctrl, ppc->lpt_addr + 2); break; } } } //*************************************************************************** static void ppc6_wait_for_fifo(Interface *ppc) { int i; if (ppc->ppc_flags & fifo_wait) { for(i=0; i<20; i++) inb(ppc->lpt_addr + 1); } } //*************************************************************************** static void ppc6_wr_data_blk(Interface *ppc, u8 *data, long count) { switch(ppc->mode) { case PPCMODE_UNI_SW : case PPCMODE_BI_SW : { while(count--) { outb(*data++, ppc->lpt_addr); ppc->cur_ctrl ^= data_stb; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); } break; } case PPCMODE_UNI_FW : case PPCMODE_BI_FW : { u8 this, last; ppc6_send_cmd(ppc,(CMD_PREFIX_SET | PREFIX_FASTWR)); ppc->cur_ctrl |= port_stb; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); last = *data; outb(last, ppc->lpt_addr); while(count) { this = *data++; count--; if (this == last) { ppc->cur_ctrl ^= data_stb; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); } else { outb(this, ppc->lpt_addr); last = this; } } ppc->cur_ctrl &= ~port_stb; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); ppc6_send_cmd(ppc,(CMD_PREFIX_RESET | PREFIX_FASTWR)); break; } case PPCMODE_EPP_BYTE : { while(count) { outb(*data++,ppc->lpt_addr + 4); count--; } ppc6_wait_for_fifo(ppc); break; } case PPCMODE_EPP_WORD : { while(count > 1) { outw(*((u16 *)data),ppc->lpt_addr + 4); data += 2; count -= 2; } while(count) { outb(*data++,ppc->lpt_addr + 4); count--; } ppc6_wait_for_fifo(ppc); break; } case PPCMODE_EPP_DWORD : { while(count > 3) { outl(*((u32 *)data),ppc->lpt_addr + 4); data += 4; count -= 4; } while(count) { outb(*data++,ppc->lpt_addr + 4); count--; } ppc6_wait_for_fifo(ppc); break; } } } //*************************************************************************** static void ppc6_rd_port16_blk(Interface *ppc, u8 port, u8 *data, long length) { length = length << 1; ppc6_send_cmd(ppc, (REG_BLKSIZE | ACCESS_REG | ACCESS_WRITE)); ppc6_wr_data_byte(ppc,(u8)length); ppc6_wr_data_byte(ppc,(u8)(length >> 8)); ppc6_wr_data_byte(ppc,0); ppc6_send_cmd(ppc, (CMD_PREFIX_SET | PREFIX_IO16 | PREFIX_BLK)); ppc6_send_cmd(ppc, (u8)(port | ACCESS_PORT | ACCESS_READ)); ppc6_rd_data_blk(ppc, data, length); ppc6_send_cmd(ppc, (CMD_PREFIX_RESET | PREFIX_IO16 | PREFIX_BLK)); } //*************************************************************************** static void ppc6_wr_port16_blk(Interface *ppc, u8 port, u8 *data, long length) { length = length << 1; ppc6_send_cmd(ppc, (REG_BLKSIZE | ACCESS_REG | ACCESS_WRITE)); ppc6_wr_data_byte(ppc,(u8)length); ppc6_wr_data_byte(ppc,(u8)(length >> 8)); ppc6_wr_data_byte(ppc,0); ppc6_send_cmd(ppc, (CMD_PREFIX_SET | PREFIX_IO16 | PREFIX_BLK)); ppc6_send_cmd(ppc, (u8)(port | ACCESS_PORT | ACCESS_WRITE)); ppc6_wr_data_blk(ppc, data, length); ppc6_send_cmd(ppc, (CMD_PREFIX_RESET | PREFIX_IO16 | PREFIX_BLK)); } //*************************************************************************** static void ppc6_wr_extout(Interface *ppc, u8 regdata) { ppc6_send_cmd(ppc,(REG_VERSION | ACCESS_REG | ACCESS_WRITE)); ppc6_wr_data_byte(ppc, (u8)((regdata & 0x03) << 6)); } //*************************************************************************** static int ppc6_open(Interface *ppc) { int ret; ret = ppc6_select(ppc); if (ret == 0) return(ret); ppc->ppc_flags &= ~fifo_wait; ppc6_send_cmd(ppc, (ACCESS_REG | ACCESS_WRITE | REG_RAMSIZE)); ppc6_wr_data_byte(ppc, RAMSIZE_128K); ppc6_send_cmd(ppc, (ACCESS_REG | ACCESS_READ | REG_VERSION)); if ((ppc6_rd_data_byte(ppc) & 0x3F) == 0x0C) ppc->ppc_flags |= fifo_wait; return(ret); } //*************************************************************************** static void ppc6_close(Interface *ppc) { ppc6_deselect(ppc); } //***************************************************************************
gpl-2.0
visi0nary/android_kernel_elephone_p8000
drivers/block/paride/ppc6lnx.c
15571
14693
/* ppc6lnx.c (c) 2001 Micro Solutions Inc. Released under the terms of the GNU General Public license ppc6lnx.c is a par of the protocol driver for the Micro Solutions "BACKPACK" parallel port IDE adapter (Works on Series 6 drives) */ //*************************************************************************** // PPC 6 Code in C sanitized for LINUX // Original x86 ASM by Ron, Converted to C by Clive //*************************************************************************** #define port_stb 1 #define port_afd 2 #define cmd_stb port_afd #define port_init 4 #define data_stb port_init #define port_sel 8 #define port_int 16 #define port_dir 0x20 #define ECR_EPP 0x80 #define ECR_BI 0x20 //*************************************************************************** // 60772 Commands #define ACCESS_REG 0x00 #define ACCESS_PORT 0x40 #define ACCESS_READ 0x00 #define ACCESS_WRITE 0x20 // 60772 Command Prefix #define CMD_PREFIX_SET 0xe0 // Special command that modifies the next command's operation #define CMD_PREFIX_RESET 0xc0 // Resets current cmd modifier reg bits #define PREFIX_IO16 0x01 // perform 16-bit wide I/O #define PREFIX_FASTWR 0x04 // enable PPC mode fast-write #define PREFIX_BLK 0x08 // enable block transfer mode // 60772 Registers #define REG_STATUS 0x00 // status register #define STATUS_IRQA 0x01 // Peripheral IRQA line #define STATUS_EEPROM_DO 0x40 // Serial EEPROM data bit #define REG_VERSION 0x01 // PPC version register (read) #define REG_HWCFG 0x02 // Hardware Config register #define REG_RAMSIZE 0x03 // Size of RAM Buffer #define RAMSIZE_128K 0x02 #define REG_EEPROM 0x06 // EEPROM control register #define EEPROM_SK 0x01 // eeprom SK bit #define EEPROM_DI 0x02 // eeprom DI bit #define EEPROM_CS 0x04 // eeprom CS bit #define EEPROM_EN 0x08 // eeprom output enable #define REG_BLKSIZE 0x08 // Block transfer len (24 bit) //*************************************************************************** typedef struct ppc_storage { u16 lpt_addr; // LPT base address u8 ppc_id; u8 mode; // operating mode // 0 = PPC Uni SW // 1 = PPC Uni FW // 2 = PPC Bi SW // 3 = PPC Bi FW // 4 = EPP Byte // 5 = EPP Word // 6 = EPP Dword u8 ppc_flags; u8 org_data; // original LPT data port contents u8 org_ctrl; // original LPT control port contents u8 cur_ctrl; // current control port contents } Interface; //*************************************************************************** // ppc_flags #define fifo_wait 0x10 //*************************************************************************** // DONT CHANGE THESE LEST YOU BREAK EVERYTHING - BIT FIELD DEPENDENCIES #define PPCMODE_UNI_SW 0 #define PPCMODE_UNI_FW 1 #define PPCMODE_BI_SW 2 #define PPCMODE_BI_FW 3 #define PPCMODE_EPP_BYTE 4 #define PPCMODE_EPP_WORD 5 #define PPCMODE_EPP_DWORD 6 //*************************************************************************** static int ppc6_select(Interface *ppc); static void ppc6_deselect(Interface *ppc); static void ppc6_send_cmd(Interface *ppc, u8 cmd); static void ppc6_wr_data_byte(Interface *ppc, u8 data); static u8 ppc6_rd_data_byte(Interface *ppc); static u8 ppc6_rd_port(Interface *ppc, u8 port); static void ppc6_wr_port(Interface *ppc, u8 port, u8 data); static void ppc6_rd_data_blk(Interface *ppc, u8 *data, long count); static void ppc6_wait_for_fifo(Interface *ppc); static void ppc6_wr_data_blk(Interface *ppc, u8 *data, long count); static void ppc6_rd_port16_blk(Interface *ppc, u8 port, u8 *data, long length); static void ppc6_wr_port16_blk(Interface *ppc, u8 port, u8 *data, long length); static void ppc6_wr_extout(Interface *ppc, u8 regdata); static int ppc6_open(Interface *ppc); static void ppc6_close(Interface *ppc); //*************************************************************************** static int ppc6_select(Interface *ppc) { u8 i, j, k; i = inb(ppc->lpt_addr + 1); if (i & 1) outb(i, ppc->lpt_addr + 1); ppc->org_data = inb(ppc->lpt_addr); ppc->org_ctrl = inb(ppc->lpt_addr + 2) & 0x5F; // readback ctrl ppc->cur_ctrl = ppc->org_ctrl; ppc->cur_ctrl |= port_sel; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); if (ppc->org_data == 'b') outb('x', ppc->lpt_addr); outb('b', ppc->lpt_addr); outb('p', ppc->lpt_addr); outb(ppc->ppc_id, ppc->lpt_addr); outb(~ppc->ppc_id,ppc->lpt_addr); ppc->cur_ctrl &= ~port_sel; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); ppc->cur_ctrl = (ppc->cur_ctrl & port_int) | port_init; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); i = ppc->mode & 0x0C; if (i == 0) i = (ppc->mode & 2) | 1; outb(i, ppc->lpt_addr); ppc->cur_ctrl |= port_sel; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); // DELAY ppc->cur_ctrl |= port_afd; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); j = ((i & 0x08) << 4) | ((i & 0x07) << 3); k = inb(ppc->lpt_addr + 1) & 0xB8; if (j == k) { ppc->cur_ctrl &= ~port_afd; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); k = (inb(ppc->lpt_addr + 1) & 0xB8) ^ 0xB8; if (j == k) { if (i & 4) // EPP ppc->cur_ctrl &= ~(port_sel | port_init); else // PPC/ECP ppc->cur_ctrl &= ~port_sel; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); return(1); } } outb(ppc->org_ctrl, ppc->lpt_addr + 2); outb(ppc->org_data, ppc->lpt_addr); return(0); // FAIL } //*************************************************************************** static void ppc6_deselect(Interface *ppc) { if (ppc->mode & 4) // EPP ppc->cur_ctrl |= port_init; else // PPC/ECP ppc->cur_ctrl |= port_sel; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); outb(ppc->org_data, ppc->lpt_addr); outb((ppc->org_ctrl | port_sel), ppc->lpt_addr + 2); outb(ppc->org_ctrl, ppc->lpt_addr + 2); } //*************************************************************************** static void ppc6_send_cmd(Interface *ppc, u8 cmd) { switch(ppc->mode) { case PPCMODE_UNI_SW : case PPCMODE_UNI_FW : case PPCMODE_BI_SW : case PPCMODE_BI_FW : { outb(cmd, ppc->lpt_addr); ppc->cur_ctrl ^= cmd_stb; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); break; } case PPCMODE_EPP_BYTE : case PPCMODE_EPP_WORD : case PPCMODE_EPP_DWORD : { outb(cmd, ppc->lpt_addr + 3); break; } } } //*************************************************************************** static void ppc6_wr_data_byte(Interface *ppc, u8 data) { switch(ppc->mode) { case PPCMODE_UNI_SW : case PPCMODE_UNI_FW : case PPCMODE_BI_SW : case PPCMODE_BI_FW : { outb(data, ppc->lpt_addr); ppc->cur_ctrl ^= data_stb; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); break; } case PPCMODE_EPP_BYTE : case PPCMODE_EPP_WORD : case PPCMODE_EPP_DWORD : { outb(data, ppc->lpt_addr + 4); break; } } } //*************************************************************************** static u8 ppc6_rd_data_byte(Interface *ppc) { u8 data = 0; switch(ppc->mode) { case PPCMODE_UNI_SW : case PPCMODE_UNI_FW : { ppc->cur_ctrl = (ppc->cur_ctrl & ~port_stb) ^ data_stb; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); // DELAY data = inb(ppc->lpt_addr + 1); data = ((data & 0x80) >> 1) | ((data & 0x38) >> 3); ppc->cur_ctrl |= port_stb; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); // DELAY data |= inb(ppc->lpt_addr + 1) & 0xB8; break; } case PPCMODE_BI_SW : case PPCMODE_BI_FW : { ppc->cur_ctrl |= port_dir; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); ppc->cur_ctrl = (ppc->cur_ctrl | port_stb) ^ data_stb; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); data = inb(ppc->lpt_addr); ppc->cur_ctrl &= ~port_stb; outb(ppc->cur_ctrl,ppc->lpt_addr + 2); ppc->cur_ctrl &= ~port_dir; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); break; } case PPCMODE_EPP_BYTE : case PPCMODE_EPP_WORD : case PPCMODE_EPP_DWORD : { outb((ppc->cur_ctrl | port_dir),ppc->lpt_addr + 2); data = inb(ppc->lpt_addr + 4); outb(ppc->cur_ctrl,ppc->lpt_addr + 2); break; } } return(data); } //*************************************************************************** static u8 ppc6_rd_port(Interface *ppc, u8 port) { ppc6_send_cmd(ppc,(u8)(port | ACCESS_PORT | ACCESS_READ)); return(ppc6_rd_data_byte(ppc)); } //*************************************************************************** static void ppc6_wr_port(Interface *ppc, u8 port, u8 data) { ppc6_send_cmd(ppc,(u8)(port | ACCESS_PORT | ACCESS_WRITE)); ppc6_wr_data_byte(ppc, data); } //*************************************************************************** static void ppc6_rd_data_blk(Interface *ppc, u8 *data, long count) { switch(ppc->mode) { case PPCMODE_UNI_SW : case PPCMODE_UNI_FW : { while(count) { u8 d; ppc->cur_ctrl = (ppc->cur_ctrl & ~port_stb) ^ data_stb; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); // DELAY d = inb(ppc->lpt_addr + 1); d = ((d & 0x80) >> 1) | ((d & 0x38) >> 3); ppc->cur_ctrl |= port_stb; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); // DELAY d |= inb(ppc->lpt_addr + 1) & 0xB8; *data++ = d; count--; } break; } case PPCMODE_BI_SW : case PPCMODE_BI_FW : { ppc->cur_ctrl |= port_dir; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); ppc->cur_ctrl |= port_stb; while(count) { ppc->cur_ctrl ^= data_stb; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); *data++ = inb(ppc->lpt_addr); count--; } ppc->cur_ctrl &= ~port_stb; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); ppc->cur_ctrl &= ~port_dir; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); break; } case PPCMODE_EPP_BYTE : { outb((ppc->cur_ctrl | port_dir), ppc->lpt_addr + 2); // DELAY while(count) { *data++ = inb(ppc->lpt_addr + 4); count--; } outb(ppc->cur_ctrl, ppc->lpt_addr + 2); break; } case PPCMODE_EPP_WORD : { outb((ppc->cur_ctrl | port_dir), ppc->lpt_addr + 2); // DELAY while(count > 1) { *((u16 *)data) = inw(ppc->lpt_addr + 4); data += 2; count -= 2; } while(count) { *data++ = inb(ppc->lpt_addr + 4); count--; } outb(ppc->cur_ctrl, ppc->lpt_addr + 2); break; } case PPCMODE_EPP_DWORD : { outb((ppc->cur_ctrl | port_dir),ppc->lpt_addr + 2); // DELAY while(count > 3) { *((u32 *)data) = inl(ppc->lpt_addr + 4); data += 4; count -= 4; } while(count) { *data++ = inb(ppc->lpt_addr + 4); count--; } outb(ppc->cur_ctrl, ppc->lpt_addr + 2); break; } } } //*************************************************************************** static void ppc6_wait_for_fifo(Interface *ppc) { int i; if (ppc->ppc_flags & fifo_wait) { for(i=0; i<20; i++) inb(ppc->lpt_addr + 1); } } //*************************************************************************** static void ppc6_wr_data_blk(Interface *ppc, u8 *data, long count) { switch(ppc->mode) { case PPCMODE_UNI_SW : case PPCMODE_BI_SW : { while(count--) { outb(*data++, ppc->lpt_addr); ppc->cur_ctrl ^= data_stb; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); } break; } case PPCMODE_UNI_FW : case PPCMODE_BI_FW : { u8 this, last; ppc6_send_cmd(ppc,(CMD_PREFIX_SET | PREFIX_FASTWR)); ppc->cur_ctrl |= port_stb; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); last = *data; outb(last, ppc->lpt_addr); while(count) { this = *data++; count--; if (this == last) { ppc->cur_ctrl ^= data_stb; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); } else { outb(this, ppc->lpt_addr); last = this; } } ppc->cur_ctrl &= ~port_stb; outb(ppc->cur_ctrl, ppc->lpt_addr + 2); ppc6_send_cmd(ppc,(CMD_PREFIX_RESET | PREFIX_FASTWR)); break; } case PPCMODE_EPP_BYTE : { while(count) { outb(*data++,ppc->lpt_addr + 4); count--; } ppc6_wait_for_fifo(ppc); break; } case PPCMODE_EPP_WORD : { while(count > 1) { outw(*((u16 *)data),ppc->lpt_addr + 4); data += 2; count -= 2; } while(count) { outb(*data++,ppc->lpt_addr + 4); count--; } ppc6_wait_for_fifo(ppc); break; } case PPCMODE_EPP_DWORD : { while(count > 3) { outl(*((u32 *)data),ppc->lpt_addr + 4); data += 4; count -= 4; } while(count) { outb(*data++,ppc->lpt_addr + 4); count--; } ppc6_wait_for_fifo(ppc); break; } } } //*************************************************************************** static void ppc6_rd_port16_blk(Interface *ppc, u8 port, u8 *data, long length) { length = length << 1; ppc6_send_cmd(ppc, (REG_BLKSIZE | ACCESS_REG | ACCESS_WRITE)); ppc6_wr_data_byte(ppc,(u8)length); ppc6_wr_data_byte(ppc,(u8)(length >> 8)); ppc6_wr_data_byte(ppc,0); ppc6_send_cmd(ppc, (CMD_PREFIX_SET | PREFIX_IO16 | PREFIX_BLK)); ppc6_send_cmd(ppc, (u8)(port | ACCESS_PORT | ACCESS_READ)); ppc6_rd_data_blk(ppc, data, length); ppc6_send_cmd(ppc, (CMD_PREFIX_RESET | PREFIX_IO16 | PREFIX_BLK)); } //*************************************************************************** static void ppc6_wr_port16_blk(Interface *ppc, u8 port, u8 *data, long length) { length = length << 1; ppc6_send_cmd(ppc, (REG_BLKSIZE | ACCESS_REG | ACCESS_WRITE)); ppc6_wr_data_byte(ppc,(u8)length); ppc6_wr_data_byte(ppc,(u8)(length >> 8)); ppc6_wr_data_byte(ppc,0); ppc6_send_cmd(ppc, (CMD_PREFIX_SET | PREFIX_IO16 | PREFIX_BLK)); ppc6_send_cmd(ppc, (u8)(port | ACCESS_PORT | ACCESS_WRITE)); ppc6_wr_data_blk(ppc, data, length); ppc6_send_cmd(ppc, (CMD_PREFIX_RESET | PREFIX_IO16 | PREFIX_BLK)); } //*************************************************************************** static void ppc6_wr_extout(Interface *ppc, u8 regdata) { ppc6_send_cmd(ppc,(REG_VERSION | ACCESS_REG | ACCESS_WRITE)); ppc6_wr_data_byte(ppc, (u8)((regdata & 0x03) << 6)); } //*************************************************************************** static int ppc6_open(Interface *ppc) { int ret; ret = ppc6_select(ppc); if (ret == 0) return(ret); ppc->ppc_flags &= ~fifo_wait; ppc6_send_cmd(ppc, (ACCESS_REG | ACCESS_WRITE | REG_RAMSIZE)); ppc6_wr_data_byte(ppc, RAMSIZE_128K); ppc6_send_cmd(ppc, (ACCESS_REG | ACCESS_READ | REG_VERSION)); if ((ppc6_rd_data_byte(ppc) & 0x3F) == 0x0C) ppc->ppc_flags |= fifo_wait; return(ret); } //*************************************************************************** static void ppc6_close(Interface *ppc) { ppc6_deselect(ppc); } //***************************************************************************
gpl-2.0
wanahmadzainie/linux-mainline
lib/win_minmax.c
212
3400
/** * lib/minmax.c: windowed min/max tracker * * Kathleen Nichols' algorithm for tracking the minimum (or maximum) * value of a data stream over some fixed time interval. (E.g., * the minimum RTT over the past five minutes.) It uses constant * space and constant time per update yet almost always delivers * the same minimum as an implementation that has to keep all the * data in the window. * * The algorithm keeps track of the best, 2nd best & 3rd best min * values, maintaining an invariant that the measurement time of * the n'th best >= n-1'th best. It also makes sure that the three * values are widely separated in the time window since that bounds * the worse case error when that data is monotonically increasing * over the window. * * Upon getting a new min, we can forget everything earlier because * it has no value - the new min is <= everything else in the window * by definition and it's the most recent. So we restart fresh on * every new min and overwrites 2nd & 3rd choices. The same property * holds for 2nd & 3rd best. */ #include <linux/module.h> #include <linux/win_minmax.h> /* As time advances, update the 1st, 2nd, and 3rd choices. */ static u32 minmax_subwin_update(struct minmax *m, u32 win, const struct minmax_sample *val) { u32 dt = val->t - m->s[0].t; if (unlikely(dt > win)) { /* * Passed entire window without a new val so make 2nd * choice the new val & 3rd choice the new 2nd choice. * we may have to iterate this since our 2nd choice * may also be outside the window (we checked on entry * that the third choice was in the window). */ m->s[0] = m->s[1]; m->s[1] = m->s[2]; m->s[2] = *val; if (unlikely(val->t - m->s[0].t > win)) { m->s[0] = m->s[1]; m->s[1] = m->s[2]; m->s[2] = *val; } } else if (unlikely(m->s[1].t == m->s[0].t) && dt > win/4) { /* * We've passed a quarter of the window without a new val * so take a 2nd choice from the 2nd quarter of the window. */ m->s[2] = m->s[1] = *val; } else if (unlikely(m->s[2].t == m->s[1].t) && dt > win/2) { /* * We've passed half the window without finding a new val * so take a 3rd choice from the last half of the window */ m->s[2] = *val; } return m->s[0].v; } /* Check if new measurement updates the 1st, 2nd or 3rd choice max. */ u32 minmax_running_max(struct minmax *m, u32 win, u32 t, u32 meas) { struct minmax_sample val = { .t = t, .v = meas }; if (unlikely(val.v >= m->s[0].v) || /* found new max? */ unlikely(val.t - m->s[2].t > win)) /* nothing left in window? */ return minmax_reset(m, t, meas); /* forget earlier samples */ if (unlikely(val.v >= m->s[1].v)) m->s[2] = m->s[1] = val; else if (unlikely(val.v >= m->s[2].v)) m->s[2] = val; return minmax_subwin_update(m, win, &val); } EXPORT_SYMBOL(minmax_running_max); /* Check if new measurement updates the 1st, 2nd or 3rd choice min. */ u32 minmax_running_min(struct minmax *m, u32 win, u32 t, u32 meas) { struct minmax_sample val = { .t = t, .v = meas }; if (unlikely(val.v <= m->s[0].v) || /* found new min? */ unlikely(val.t - m->s[2].t > win)) /* nothing left in window? */ return minmax_reset(m, t, meas); /* forget earlier samples */ if (unlikely(val.v <= m->s[1].v)) m->s[2] = m->s[1] = val; else if (unlikely(val.v <= m->s[2].v)) m->s[2] = val; return minmax_subwin_update(m, win, &val); }
gpl-2.0
squirrel20/linux-4.8.15
drivers/staging/iio/gyro/adis16060_core.c
468
5612
/* * ADIS16060 Wide Bandwidth Yaw Rate Gyroscope with SPI driver * * Copyright 2010 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/module.h> #include <linux/delay.h> #include <linux/mutex.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> #define ADIS16060_GYRO 0x20 /* Measure Angular Rate (Gyro) */ #define ADIS16060_TEMP_OUT 0x10 /* Measure Temperature */ #define ADIS16060_AIN2 0x80 /* Measure AIN2 */ #define ADIS16060_AIN1 0x40 /* Measure AIN1 */ /** * struct adis16060_state - device instance specific data * @us_w: actual spi_device to write config * @us_r: actual spi_device to read back data * @buf: transmit or receive buffer * @buf_lock: mutex to protect tx and rx **/ struct adis16060_state { struct spi_device *us_w; struct spi_device *us_r; struct mutex buf_lock; u8 buf[3] ____cacheline_aligned; }; static struct iio_dev *adis16060_iio_dev; static int adis16060_spi_write(struct iio_dev *indio_dev, u8 val) { int ret; struct adis16060_state *st = iio_priv(indio_dev); mutex_lock(&st->buf_lock); st->buf[2] = val; /* The last 8 bits clocked in are latched */ ret = spi_write(st->us_w, st->buf, 3); mutex_unlock(&st->buf_lock); return ret; } static int adis16060_spi_read(struct iio_dev *indio_dev, u16 *val) { int ret; struct adis16060_state *st = iio_priv(indio_dev); mutex_lock(&st->buf_lock); ret = spi_read(st->us_r, st->buf, 3); /* The internal successive approximation ADC begins the * conversion process on the falling edge of MSEL1 and * starts to place data MSB first on the DOUT line at * the 6th falling edge of SCLK */ if (!ret) *val = ((st->buf[0] & 0x3) << 12) | (st->buf[1] << 4) | ((st->buf[2] >> 4) & 0xF); mutex_unlock(&st->buf_lock); return ret; } static int adis16060_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) { u16 tval = 0; int ret; switch (mask) { case IIO_CHAN_INFO_RAW: /* Take the iio_dev status lock */ mutex_lock(&indio_dev->mlock); ret = adis16060_spi_write(indio_dev, chan->address); if (ret < 0) goto out_unlock; ret = adis16060_spi_read(indio_dev, &tval); if (ret < 0) goto out_unlock; mutex_unlock(&indio_dev->mlock); *val = tval; return IIO_VAL_INT; case IIO_CHAN_INFO_OFFSET: *val = -7; *val2 = 461117; return IIO_VAL_INT_PLUS_MICRO; case IIO_CHAN_INFO_SCALE: *val = 0; *val2 = 34000; return IIO_VAL_INT_PLUS_MICRO; } return -EINVAL; out_unlock: mutex_unlock(&indio_dev->mlock); return ret; } static const struct iio_info adis16060_info = { .read_raw = &adis16060_read_raw, .driver_module = THIS_MODULE, }; static const struct iio_chan_spec adis16060_channels[] = { { .type = IIO_ANGL_VEL, .modified = 1, .channel2 = IIO_MOD_Z, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), .address = ADIS16060_GYRO, }, { .type = IIO_VOLTAGE, .indexed = 1, .channel = 0, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), .address = ADIS16060_AIN1, }, { .type = IIO_VOLTAGE, .indexed = 1, .channel = 1, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), .address = ADIS16060_AIN2, }, { .type = IIO_TEMP, .indexed = 1, .channel = 0, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_OFFSET) | BIT(IIO_CHAN_INFO_SCALE), .address = ADIS16060_TEMP_OUT, } }; static int adis16060_r_probe(struct spi_device *spi) { int ret; struct adis16060_state *st; struct iio_dev *indio_dev; /* setup the industrialio driver allocated elements */ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st)); if (!indio_dev) return -ENOMEM; /* this is only used for removal purposes */ spi_set_drvdata(spi, indio_dev); st = iio_priv(indio_dev); st->us_r = spi; mutex_init(&st->buf_lock); indio_dev->name = spi->dev.driver->name; indio_dev->dev.parent = &spi->dev; indio_dev->info = &adis16060_info; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = adis16060_channels; indio_dev->num_channels = ARRAY_SIZE(adis16060_channels); ret = devm_iio_device_register(&spi->dev, indio_dev); if (ret) return ret; adis16060_iio_dev = indio_dev; return 0; } static int adis16060_w_probe(struct spi_device *spi) { int ret; struct iio_dev *indio_dev = adis16060_iio_dev; struct adis16060_state *st; if (!indio_dev) { ret = -ENODEV; goto error_ret; } st = iio_priv(indio_dev); spi_set_drvdata(spi, indio_dev); st->us_w = spi; return 0; error_ret: return ret; } static int adis16060_w_remove(struct spi_device *spi) { return 0; } static struct spi_driver adis16060_r_driver = { .driver = { .name = "adis16060_r", }, .probe = adis16060_r_probe, }; static struct spi_driver adis16060_w_driver = { .driver = { .name = "adis16060_w", }, .probe = adis16060_w_probe, .remove = adis16060_w_remove, }; static __init int adis16060_init(void) { int ret; ret = spi_register_driver(&adis16060_r_driver); if (ret < 0) return ret; ret = spi_register_driver(&adis16060_w_driver); if (ret < 0) { spi_unregister_driver(&adis16060_r_driver); return ret; } return 0; } module_init(adis16060_init); static __exit void adis16060_exit(void) { spi_unregister_driver(&adis16060_w_driver); spi_unregister_driver(&adis16060_r_driver); } module_exit(adis16060_exit); MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>"); MODULE_DESCRIPTION("Analog Devices ADIS16060 Yaw Rate Gyroscope Driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
Evervolv/android_kernel_samsung_tuna
arch/x86/xen/p2m.c
980
28073
/* * Xen leaves the responsibility for maintaining p2m mappings to the * guests themselves, but it must also access and update the p2m array * during suspend/resume when all the pages are reallocated. * * The p2m table is logically a flat array, but we implement it as a * three-level tree to allow the address space to be sparse. * * Xen * | * p2m_top p2m_top_mfn * / \ / \ * p2m_mid p2m_mid p2m_mid_mfn p2m_mid_mfn * / \ / \ / / * p2m p2m p2m p2m p2m p2m p2m ... * * The p2m_mid_mfn pages are mapped by p2m_top_mfn_p. * * The p2m_top and p2m_top_mfn levels are limited to 1 page, so the * maximum representable pseudo-physical address space is: * P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE pages * * P2M_PER_PAGE depends on the architecture, as a mfn is always * unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to * 512 and 1024 entries respectively. * * In short, these structures contain the Machine Frame Number (MFN) of the PFN. * * However not all entries are filled with MFNs. Specifically for all other * leaf entries, or for the top root, or middle one, for which there is a void * entry, we assume it is "missing". So (for example) * pfn_to_mfn(0x90909090)=INVALID_P2M_ENTRY. * * We also have the possibility of setting 1-1 mappings on certain regions, so * that: * pfn_to_mfn(0xc0000)=0xc0000 * * The benefit of this is, that we can assume for non-RAM regions (think * PCI BARs, or ACPI spaces), we can create mappings easily b/c we * get the PFN value to match the MFN. * * For this to work efficiently we have one new page p2m_identity and * allocate (via reserved_brk) any other pages we need to cover the sides * (1GB or 4MB boundary violations). All entries in p2m_identity are set to * INVALID_P2M_ENTRY type (Xen toolstack only recognizes that and MFNs, * no other fancy value). * * On lookup we spot that the entry points to p2m_identity and return the * identity value instead of dereferencing and returning INVALID_P2M_ENTRY. * If the entry points to an allocated page, we just proceed as before and * return the PFN. If the PFN has IDENTITY_FRAME_BIT set we unmask that in * appropriate functions (pfn_to_mfn). * * The reason for having the IDENTITY_FRAME_BIT instead of just returning the * PFN is that we could find ourselves where pfn_to_mfn(pfn)==pfn for a * non-identity pfn. To protect ourselves against we elect to set (and get) the * IDENTITY_FRAME_BIT on all identity mapped PFNs. * * This simplistic diagram is used to explain the more subtle piece of code. * There is also a digram of the P2M at the end that can help. * Imagine your E820 looking as so: * * 1GB 2GB * /-------------------+---------\/----\ /----------\ /---+-----\ * | System RAM | Sys RAM ||ACPI| | reserved | | Sys RAM | * \-------------------+---------/\----/ \----------/ \---+-----/ * ^- 1029MB ^- 2001MB * * [1029MB = 263424 (0x40500), 2001MB = 512256 (0x7D100), * 2048MB = 524288 (0x80000)] * * And dom0_mem=max:3GB,1GB is passed in to the guest, meaning memory past 1GB * is actually not present (would have to kick the balloon driver to put it in). * * When we are told to set the PFNs for identity mapping (see patch: "xen/setup: * Set identity mapping for non-RAM E820 and E820 gaps.") we pass in the start * of the PFN and the end PFN (263424 and 512256 respectively). The first step * is to reserve_brk a top leaf page if the p2m[1] is missing. The top leaf page * covers 512^2 of page estate (1GB) and in case the start or end PFN is not * aligned on 512^2*PAGE_SIZE (1GB) we loop on aligned 1GB PFNs from start pfn * to end pfn. We reserve_brk top leaf pages if they are missing (means they * point to p2m_mid_missing). * * With the E820 example above, 263424 is not 1GB aligned so we allocate a * reserve_brk page which will cover the PFNs estate from 0x40000 to 0x80000. * Each entry in the allocate page is "missing" (points to p2m_missing). * * Next stage is to determine if we need to do a more granular boundary check * on the 4MB (or 2MB depending on architecture) off the start and end pfn's. * We check if the start pfn and end pfn violate that boundary check, and if * so reserve_brk a middle (p2m[x][y]) leaf page. This way we have a much finer * granularity of setting which PFNs are missing and which ones are identity. * In our example 263424 and 512256 both fail the check so we reserve_brk two * pages. Populate them with INVALID_P2M_ENTRY (so they both have "missing" * values) and assign them to p2m[1][2] and p2m[1][488] respectively. * * At this point we would at minimum reserve_brk one page, but could be up to * three. Each call to set_phys_range_identity has at maximum a three page * cost. If we were to query the P2M at this stage, all those entries from * start PFN through end PFN (so 1029MB -> 2001MB) would return * INVALID_P2M_ENTRY ("missing"). * * The next step is to walk from the start pfn to the end pfn setting * the IDENTITY_FRAME_BIT on each PFN. This is done in set_phys_range_identity. * If we find that the middle leaf is pointing to p2m_missing we can swap it * over to p2m_identity - this way covering 4MB (or 2MB) PFN space. At this * point we do not need to worry about boundary aligment (so no need to * reserve_brk a middle page, figure out which PFNs are "missing" and which * ones are identity), as that has been done earlier. If we find that the * middle leaf is not occupied by p2m_identity or p2m_missing, we dereference * that page (which covers 512 PFNs) and set the appropriate PFN with * IDENTITY_FRAME_BIT. In our example 263424 and 512256 end up there, and we * set from p2m[1][2][256->511] and p2m[1][488][0->256] with * IDENTITY_FRAME_BIT set. * * All other regions that are void (or not filled) either point to p2m_missing * (considered missing) or have the default value of INVALID_P2M_ENTRY (also * considered missing). In our case, p2m[1][2][0->255] and p2m[1][488][257->511] * contain the INVALID_P2M_ENTRY value and are considered "missing." * * This is what the p2m ends up looking (for the E820 above) with this * fabulous drawing: * * p2m /--------------\ * /-----\ | &mfn_list[0],| /-----------------\ * | 0 |------>| &mfn_list[1],| /---------------\ | ~0, ~0, .. | * |-----| | ..., ~0, ~0 | | ~0, ~0, [x]---+----->| IDENTITY [@256] | * | 1 |---\ \--------------/ | [p2m_identity]+\ | IDENTITY [@257] | * |-----| \ | [p2m_identity]+\\ | .... | * | 2 |--\ \-------------------->| ... | \\ \----------------/ * |-----| \ \---------------/ \\ * | 3 |\ \ \\ p2m_identity * |-----| \ \-------------------->/---------------\ /-----------------\ * | .. +->+ | [p2m_identity]+-->| ~0, ~0, ~0, ... | * \-----/ / | [p2m_identity]+-->| ..., ~0 | * / /---------------\ | .... | \-----------------/ * / | IDENTITY[@0] | /-+-[x], ~0, ~0.. | * / | IDENTITY[@256]|<----/ \---------------/ * / | ~0, ~0, .... | * | \---------------/ * | * p2m_missing p2m_missing * /------------------\ /------------\ * | [p2m_mid_missing]+---->| ~0, ~0, ~0 | * | [p2m_mid_missing]+---->| ..., ~0 | * \------------------/ \------------/ * * where ~0 is INVALID_P2M_ENTRY. IDENTITY is (PFN | IDENTITY_BIT) */ #include <linux/init.h> #include <linux/module.h> #include <linux/list.h> #include <linux/hash.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <asm/cache.h> #include <asm/setup.h> #include <asm/xen/page.h> #include <asm/xen/hypercall.h> #include <asm/xen/hypervisor.h> #include "xen-ops.h" static void __init m2p_override_init(void); unsigned long xen_max_p2m_pfn __read_mostly; #define P2M_PER_PAGE (PAGE_SIZE / sizeof(unsigned long)) #define P2M_MID_PER_PAGE (PAGE_SIZE / sizeof(unsigned long *)) #define P2M_TOP_PER_PAGE (PAGE_SIZE / sizeof(unsigned long **)) #define MAX_P2M_PFN (P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE) /* Placeholders for holes in the address space */ static RESERVE_BRK_ARRAY(unsigned long, p2m_missing, P2M_PER_PAGE); static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_missing, P2M_MID_PER_PAGE); static RESERVE_BRK_ARRAY(unsigned long, p2m_mid_missing_mfn, P2M_MID_PER_PAGE); static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE); static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE); static RESERVE_BRK_ARRAY(unsigned long *, p2m_top_mfn_p, P2M_TOP_PER_PAGE); static RESERVE_BRK_ARRAY(unsigned long, p2m_identity, P2M_PER_PAGE); RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); /* We might hit two boundary violations at the start and end, at max each * boundary violation will require three middle nodes. */ RESERVE_BRK(p2m_mid_identity, PAGE_SIZE * 2 * 3); static inline unsigned p2m_top_index(unsigned long pfn) { BUG_ON(pfn >= MAX_P2M_PFN); return pfn / (P2M_MID_PER_PAGE * P2M_PER_PAGE); } static inline unsigned p2m_mid_index(unsigned long pfn) { return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE; } static inline unsigned p2m_index(unsigned long pfn) { return pfn % P2M_PER_PAGE; } static void p2m_top_init(unsigned long ***top) { unsigned i; for (i = 0; i < P2M_TOP_PER_PAGE; i++) top[i] = p2m_mid_missing; } static void p2m_top_mfn_init(unsigned long *top) { unsigned i; for (i = 0; i < P2M_TOP_PER_PAGE; i++) top[i] = virt_to_mfn(p2m_mid_missing_mfn); } static void p2m_top_mfn_p_init(unsigned long **top) { unsigned i; for (i = 0; i < P2M_TOP_PER_PAGE; i++) top[i] = p2m_mid_missing_mfn; } static void p2m_mid_init(unsigned long **mid) { unsigned i; for (i = 0; i < P2M_MID_PER_PAGE; i++) mid[i] = p2m_missing; } static void p2m_mid_mfn_init(unsigned long *mid) { unsigned i; for (i = 0; i < P2M_MID_PER_PAGE; i++) mid[i] = virt_to_mfn(p2m_missing); } static void p2m_init(unsigned long *p2m) { unsigned i; for (i = 0; i < P2M_MID_PER_PAGE; i++) p2m[i] = INVALID_P2M_ENTRY; } /* * Build the parallel p2m_top_mfn and p2m_mid_mfn structures * * This is called both at boot time, and after resuming from suspend: * - At boot time we're called very early, and must use extend_brk() * to allocate memory. * * - After resume we're called from within stop_machine, but the mfn * tree should alreay be completely allocated. */ void __ref xen_build_mfn_list_list(void) { unsigned long pfn; /* Pre-initialize p2m_top_mfn to be completely missing */ if (p2m_top_mfn == NULL) { p2m_mid_missing_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_mid_mfn_init(p2m_mid_missing_mfn); p2m_top_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_top_mfn_p_init(p2m_top_mfn_p); p2m_top_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_top_mfn_init(p2m_top_mfn); } else { /* Reinitialise, mfn's all change after migration */ p2m_mid_mfn_init(p2m_mid_missing_mfn); } for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += P2M_PER_PAGE) { unsigned topidx = p2m_top_index(pfn); unsigned mididx = p2m_mid_index(pfn); unsigned long **mid; unsigned long *mid_mfn_p; mid = p2m_top[topidx]; mid_mfn_p = p2m_top_mfn_p[topidx]; /* Don't bother allocating any mfn mid levels if * they're just missing, just update the stored mfn, * since all could have changed over a migrate. */ if (mid == p2m_mid_missing) { BUG_ON(mididx); BUG_ON(mid_mfn_p != p2m_mid_missing_mfn); p2m_top_mfn[topidx] = virt_to_mfn(p2m_mid_missing_mfn); pfn += (P2M_MID_PER_PAGE - 1) * P2M_PER_PAGE; continue; } if (mid_mfn_p == p2m_mid_missing_mfn) { /* * XXX boot-time only! We should never find * missing parts of the mfn tree after * runtime. extend_brk() will BUG if we call * it too late. */ mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_mid_mfn_init(mid_mfn_p); p2m_top_mfn_p[topidx] = mid_mfn_p; } p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p); mid_mfn_p[mididx] = virt_to_mfn(mid[mididx]); } } void xen_setup_mfn_list_list(void) { BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info); HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = virt_to_mfn(p2m_top_mfn); HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn; } /* Set up p2m_top to point to the domain-builder provided p2m pages */ void __init xen_build_dynamic_phys_to_machine(void) { unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list; unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages); unsigned long pfn; xen_max_p2m_pfn = max_pfn; p2m_missing = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_init(p2m_missing); p2m_mid_missing = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_mid_init(p2m_mid_missing); p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_top_init(p2m_top); p2m_identity = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_init(p2m_identity); /* * The domain builder gives us a pre-constructed p2m array in * mfn_list for all the pages initially given to us, so we just * need to graft that into our tree structure. */ for (pfn = 0; pfn < max_pfn; pfn += P2M_PER_PAGE) { unsigned topidx = p2m_top_index(pfn); unsigned mididx = p2m_mid_index(pfn); if (p2m_top[topidx] == p2m_mid_missing) { unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_mid_init(mid); p2m_top[topidx] = mid; } /* * As long as the mfn_list has enough entries to completely * fill a p2m page, pointing into the array is ok. But if * not the entries beyond the last pfn will be undefined. */ if (unlikely(pfn + P2M_PER_PAGE > max_pfn)) { unsigned long p2midx; p2midx = max_pfn % P2M_PER_PAGE; for ( ; p2midx < P2M_PER_PAGE; p2midx++) mfn_list[pfn + p2midx] = INVALID_P2M_ENTRY; } p2m_top[topidx][mididx] = &mfn_list[pfn]; } m2p_override_init(); } unsigned long get_phys_to_machine(unsigned long pfn) { unsigned topidx, mididx, idx; if (unlikely(pfn >= MAX_P2M_PFN)) return INVALID_P2M_ENTRY; topidx = p2m_top_index(pfn); mididx = p2m_mid_index(pfn); idx = p2m_index(pfn); /* * The INVALID_P2M_ENTRY is filled in both p2m_*identity * and in p2m_*missing, so returning the INVALID_P2M_ENTRY * would be wrong. */ if (p2m_top[topidx][mididx] == p2m_identity) return IDENTITY_FRAME(pfn); return p2m_top[topidx][mididx][idx]; } EXPORT_SYMBOL_GPL(get_phys_to_machine); static void *alloc_p2m_page(void) { return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT); } static void free_p2m_page(void *p) { free_page((unsigned long)p); } /* * Fully allocate the p2m structure for a given pfn. We need to check * that both the top and mid levels are allocated, and make sure the * parallel mfn tree is kept in sync. We may race with other cpus, so * the new pages are installed with cmpxchg; if we lose the race then * simply free the page we allocated and use the one that's there. */ static bool alloc_p2m(unsigned long pfn) { unsigned topidx, mididx; unsigned long ***top_p, **mid; unsigned long *top_mfn_p, *mid_mfn; topidx = p2m_top_index(pfn); mididx = p2m_mid_index(pfn); top_p = &p2m_top[topidx]; mid = *top_p; if (mid == p2m_mid_missing) { /* Mid level is missing, allocate a new one */ mid = alloc_p2m_page(); if (!mid) return false; p2m_mid_init(mid); if (cmpxchg(top_p, p2m_mid_missing, mid) != p2m_mid_missing) free_p2m_page(mid); } top_mfn_p = &p2m_top_mfn[topidx]; mid_mfn = p2m_top_mfn_p[topidx]; BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p); if (mid_mfn == p2m_mid_missing_mfn) { /* Separately check the mid mfn level */ unsigned long missing_mfn; unsigned long mid_mfn_mfn; mid_mfn = alloc_p2m_page(); if (!mid_mfn) return false; p2m_mid_mfn_init(mid_mfn); missing_mfn = virt_to_mfn(p2m_mid_missing_mfn); mid_mfn_mfn = virt_to_mfn(mid_mfn); if (cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn) != missing_mfn) free_p2m_page(mid_mfn); else p2m_top_mfn_p[topidx] = mid_mfn; } if (p2m_top[topidx][mididx] == p2m_identity || p2m_top[topidx][mididx] == p2m_missing) { /* p2m leaf page is missing */ unsigned long *p2m; unsigned long *p2m_orig = p2m_top[topidx][mididx]; p2m = alloc_p2m_page(); if (!p2m) return false; p2m_init(p2m); if (cmpxchg(&mid[mididx], p2m_orig, p2m) != p2m_orig) free_p2m_page(p2m); else mid_mfn[mididx] = virt_to_mfn(p2m); } return true; } static bool __init __early_alloc_p2m(unsigned long pfn) { unsigned topidx, mididx, idx; topidx = p2m_top_index(pfn); mididx = p2m_mid_index(pfn); idx = p2m_index(pfn); /* Pfff.. No boundary cross-over, lets get out. */ if (!idx) return false; WARN(p2m_top[topidx][mididx] == p2m_identity, "P2M[%d][%d] == IDENTITY, should be MISSING (or alloced)!\n", topidx, mididx); /* * Could be done by xen_build_dynamic_phys_to_machine.. */ if (p2m_top[topidx][mididx] != p2m_missing) return false; /* Boundary cross-over for the edges: */ if (idx) { unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE); unsigned long *mid_mfn_p; p2m_init(p2m); p2m_top[topidx][mididx] = p2m; /* For save/restore we need to MFN of the P2M saved */ mid_mfn_p = p2m_top_mfn_p[topidx]; WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing), "P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n", topidx, mididx); mid_mfn_p[mididx] = virt_to_mfn(p2m); } return idx != 0; } unsigned long __init set_phys_range_identity(unsigned long pfn_s, unsigned long pfn_e) { unsigned long pfn; if (unlikely(pfn_s >= MAX_P2M_PFN || pfn_e >= MAX_P2M_PFN)) return 0; if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) return pfn_e - pfn_s; if (pfn_s > pfn_e) return 0; for (pfn = (pfn_s & ~(P2M_MID_PER_PAGE * P2M_PER_PAGE - 1)); pfn < ALIGN(pfn_e, (P2M_MID_PER_PAGE * P2M_PER_PAGE)); pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE) { unsigned topidx = p2m_top_index(pfn); unsigned long *mid_mfn_p; unsigned long **mid; mid = p2m_top[topidx]; mid_mfn_p = p2m_top_mfn_p[topidx]; if (mid == p2m_mid_missing) { mid = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_mid_init(mid); p2m_top[topidx] = mid; BUG_ON(mid_mfn_p != p2m_mid_missing_mfn); } /* And the save/restore P2M tables.. */ if (mid_mfn_p == p2m_mid_missing_mfn) { mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_mid_mfn_init(mid_mfn_p); p2m_top_mfn_p[topidx] = mid_mfn_p; p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p); /* Note: we don't set mid_mfn_p[midix] here, * look in __early_alloc_p2m */ } } __early_alloc_p2m(pfn_s); __early_alloc_p2m(pfn_e); for (pfn = pfn_s; pfn < pfn_e; pfn++) if (!__set_phys_to_machine(pfn, IDENTITY_FRAME(pfn))) break; if (!WARN((pfn - pfn_s) != (pfn_e - pfn_s), "Identity mapping failed. We are %ld short of 1-1 mappings!\n", (pfn_e - pfn_s) - (pfn - pfn_s))) printk(KERN_DEBUG "1-1 mapping on %lx->%lx\n", pfn_s, pfn); return pfn - pfn_s; } /* Try to install p2m mapping; fail if intermediate bits missing */ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) { unsigned topidx, mididx, idx; if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) { BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); return true; } if (unlikely(pfn >= MAX_P2M_PFN)) { BUG_ON(mfn != INVALID_P2M_ENTRY); return true; } topidx = p2m_top_index(pfn); mididx = p2m_mid_index(pfn); idx = p2m_index(pfn); /* For sparse holes were the p2m leaf has real PFN along with * PCI holes, stick in the PFN as the MFN value. */ if (mfn != INVALID_P2M_ENTRY && (mfn & IDENTITY_FRAME_BIT)) { if (p2m_top[topidx][mididx] == p2m_identity) return true; /* Swap over from MISSING to IDENTITY if needed. */ if (p2m_top[topidx][mididx] == p2m_missing) { WARN_ON(cmpxchg(&p2m_top[topidx][mididx], p2m_missing, p2m_identity) != p2m_missing); return true; } } if (p2m_top[topidx][mididx] == p2m_missing) return mfn == INVALID_P2M_ENTRY; p2m_top[topidx][mididx][idx] = mfn; return true; } bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) { if (unlikely(!__set_phys_to_machine(pfn, mfn))) { if (!alloc_p2m(pfn)) return false; if (!__set_phys_to_machine(pfn, mfn)) return false; } return true; } #define M2P_OVERRIDE_HASH_SHIFT 10 #define M2P_OVERRIDE_HASH (1 << M2P_OVERRIDE_HASH_SHIFT) static RESERVE_BRK_ARRAY(struct list_head, m2p_overrides, M2P_OVERRIDE_HASH); static DEFINE_SPINLOCK(m2p_override_lock); static void __init m2p_override_init(void) { unsigned i; m2p_overrides = extend_brk(sizeof(*m2p_overrides) * M2P_OVERRIDE_HASH, sizeof(unsigned long)); for (i = 0; i < M2P_OVERRIDE_HASH; i++) INIT_LIST_HEAD(&m2p_overrides[i]); } static unsigned long mfn_hash(unsigned long mfn) { return hash_long(mfn, M2P_OVERRIDE_HASH_SHIFT); } /* Add an MFN override for a particular page */ int m2p_add_override(unsigned long mfn, struct page *page, bool clear_pte) { unsigned long flags; unsigned long pfn; unsigned long uninitialized_var(address); unsigned level; pte_t *ptep = NULL; int ret = 0; pfn = page_to_pfn(page); if (!PageHighMem(page)) { address = (unsigned long)__va(pfn << PAGE_SHIFT); ptep = lookup_address(address, &level); if (WARN(ptep == NULL || level != PG_LEVEL_4K, "m2p_add_override: pfn %lx not mapped", pfn)) return -EINVAL; } page->private = mfn; page->index = pfn_to_mfn(pfn); if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) return -ENOMEM; if (clear_pte && !PageHighMem(page)) /* Just zap old mapping for now */ pte_clear(&init_mm, address, ptep); spin_lock_irqsave(&m2p_override_lock, flags); list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]); spin_unlock_irqrestore(&m2p_override_lock, flags); /* p2m(m2p(mfn)) == mfn: the mfn is already present somewhere in * this domain. Set the FOREIGN_FRAME_BIT in the p2m for the other * pfn so that the following mfn_to_pfn(mfn) calls will return the * pfn from the m2p_override (the backend pfn) instead. * We need to do this because the pages shared by the frontend * (xen-blkfront) can be already locked (lock_page, called by * do_read_cache_page); when the userspace backend tries to use them * with direct_IO, mfn_to_pfn returns the pfn of the frontend, so * do_blockdev_direct_IO is going to try to lock the same pages * again resulting in a deadlock. * As a side effect get_user_pages_fast might not be safe on the * frontend pages while they are being shared with the backend, * because mfn_to_pfn (that ends up being called by GUPF) will * return the backend pfn rather than the frontend pfn. */ ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); if (ret == 0 && get_phys_to_machine(pfn) == mfn) set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)); return 0; } EXPORT_SYMBOL_GPL(m2p_add_override); int m2p_remove_override(struct page *page, bool clear_pte) { unsigned long flags; unsigned long mfn; unsigned long pfn; unsigned long uninitialized_var(address); unsigned level; pte_t *ptep = NULL; int ret = 0; pfn = page_to_pfn(page); mfn = get_phys_to_machine(pfn); if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) return -EINVAL; if (!PageHighMem(page)) { address = (unsigned long)__va(pfn << PAGE_SHIFT); ptep = lookup_address(address, &level); if (WARN(ptep == NULL || level != PG_LEVEL_4K, "m2p_remove_override: pfn %lx not mapped", pfn)) return -EINVAL; } spin_lock_irqsave(&m2p_override_lock, flags); list_del(&page->lru); spin_unlock_irqrestore(&m2p_override_lock, flags); set_phys_to_machine(pfn, page->index); if (clear_pte && !PageHighMem(page)) set_pte_at(&init_mm, address, ptep, pfn_pte(pfn, PAGE_KERNEL)); /* No tlb flush necessary because the caller already * left the pte unmapped. */ /* p2m(m2p(mfn)) == FOREIGN_FRAME(mfn): the mfn is already present * somewhere in this domain, even before being added to the * m2p_override (see comment above in m2p_add_override). * If there are no other entries in the m2p_override corresponding * to this mfn, then remove the FOREIGN_FRAME_BIT from the p2m for * the original pfn (the one shared by the frontend): the backend * cannot do any IO on this page anymore because it has been * unshared. Removing the FOREIGN_FRAME_BIT from the p2m entry of * the original pfn causes mfn_to_pfn(mfn) to return the frontend * pfn again. */ mfn &= ~FOREIGN_FRAME_BIT; ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); if (ret == 0 && get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) && m2p_find_override(mfn) == NULL) set_phys_to_machine(pfn, mfn); return 0; } EXPORT_SYMBOL_GPL(m2p_remove_override); struct page *m2p_find_override(unsigned long mfn) { unsigned long flags; struct list_head *bucket = &m2p_overrides[mfn_hash(mfn)]; struct page *p, *ret; ret = NULL; spin_lock_irqsave(&m2p_override_lock, flags); list_for_each_entry(p, bucket, lru) { if (p->private == mfn) { ret = p; break; } } spin_unlock_irqrestore(&m2p_override_lock, flags); return ret; } unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn) { struct page *p = m2p_find_override(mfn); unsigned long ret = pfn; if (p) ret = page_to_pfn(p); return ret; } EXPORT_SYMBOL_GPL(m2p_find_override_pfn); #ifdef CONFIG_XEN_DEBUG_FS int p2m_dump_show(struct seq_file *m, void *v) { static const char * const level_name[] = { "top", "middle", "entry", "abnormal" }; static const char * const type_name[] = { "identity", "missing", "pfn", "abnormal"}; #define TYPE_IDENTITY 0 #define TYPE_MISSING 1 #define TYPE_PFN 2 #define TYPE_UNKNOWN 3 unsigned long pfn, prev_pfn_type = 0, prev_pfn_level = 0; unsigned int uninitialized_var(prev_level); unsigned int uninitialized_var(prev_type); if (!p2m_top) return 0; for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn++) { unsigned topidx = p2m_top_index(pfn); unsigned mididx = p2m_mid_index(pfn); unsigned idx = p2m_index(pfn); unsigned lvl, type; lvl = 4; type = TYPE_UNKNOWN; if (p2m_top[topidx] == p2m_mid_missing) { lvl = 0; type = TYPE_MISSING; } else if (p2m_top[topidx] == NULL) { lvl = 0; type = TYPE_UNKNOWN; } else if (p2m_top[topidx][mididx] == NULL) { lvl = 1; type = TYPE_UNKNOWN; } else if (p2m_top[topidx][mididx] == p2m_identity) { lvl = 1; type = TYPE_IDENTITY; } else if (p2m_top[topidx][mididx] == p2m_missing) { lvl = 1; type = TYPE_MISSING; } else if (p2m_top[topidx][mididx][idx] == 0) { lvl = 2; type = TYPE_UNKNOWN; } else if (p2m_top[topidx][mididx][idx] == IDENTITY_FRAME(pfn)) { lvl = 2; type = TYPE_IDENTITY; } else if (p2m_top[topidx][mididx][idx] == INVALID_P2M_ENTRY) { lvl = 2; type = TYPE_MISSING; } else if (p2m_top[topidx][mididx][idx] == pfn) { lvl = 2; type = TYPE_PFN; } else if (p2m_top[topidx][mididx][idx] != pfn) { lvl = 2; type = TYPE_PFN; } if (pfn == 0) { prev_level = lvl; prev_type = type; } if (pfn == MAX_DOMAIN_PAGES-1) { lvl = 3; type = TYPE_UNKNOWN; } if (prev_type != type) { seq_printf(m, " [0x%lx->0x%lx] %s\n", prev_pfn_type, pfn, type_name[prev_type]); prev_pfn_type = pfn; prev_type = type; } if (prev_level != lvl) { seq_printf(m, " [0x%lx->0x%lx] level %s\n", prev_pfn_level, pfn, level_name[prev_level]); prev_pfn_level = pfn; prev_level = lvl; } } return 0; #undef TYPE_IDENTITY #undef TYPE_MISSING #undef TYPE_PFN #undef TYPE_UNKNOWN } #endif
gpl-2.0
spleef/android_kernel_samsung_trlte
drivers/mtd/ubi/attach.c
1236
47992
/* * Copyright (c) International Business Machines Corp., 2006 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Author: Artem Bityutskiy (Битюцкий Артём) */ /* * UBI attaching sub-system. * * This sub-system is responsible for attaching MTD devices and it also * implements flash media scanning. * * The attaching information is represented by a &struct ubi_attach_info' * object. Information about volumes is represented by &struct ubi_ainf_volume * objects which are kept in volume RB-tree with root at the @volumes field. * The RB-tree is indexed by the volume ID. * * Logical eraseblocks are represented by &struct ubi_ainf_peb objects. These * objects are kept in per-volume RB-trees with the root at the corresponding * &struct ubi_ainf_volume object. To put it differently, we keep an RB-tree of * per-volume objects and each of these objects is the root of RB-tree of * per-LEB objects. * * Corrupted physical eraseblocks are put to the @corr list, free physical * eraseblocks are put to the @free list and the physical eraseblock to be * erased are put to the @erase list. * * About corruptions * ~~~~~~~~~~~~~~~~~ * * UBI protects EC and VID headers with CRC-32 checksums, so it can detect * whether the headers are corrupted or not. Sometimes UBI also protects the * data with CRC-32, e.g., when it executes the atomic LEB change operation, or * when it moves the contents of a PEB for wear-leveling purposes. * * UBI tries to distinguish between 2 types of corruptions. * * 1. Corruptions caused by power cuts. These are expected corruptions and UBI * tries to handle them gracefully, without printing too many warnings and * error messages. The idea is that we do not lose important data in these * cases - we may lose only the data which were being written to the media just * before the power cut happened, and the upper layers (e.g., UBIFS) are * supposed to handle such data losses (e.g., by using the FS journal). * * When UBI detects a corruption (CRC-32 mismatch) in a PEB, and it looks like * the reason is a power cut, UBI puts this PEB to the @erase list, and all * PEBs in the @erase list are scheduled for erasure later. * * 2. Unexpected corruptions which are not caused by power cuts. During * attaching, such PEBs are put to the @corr list and UBI preserves them. * Obviously, this lessens the amount of available PEBs, and if at some point * UBI runs out of free PEBs, it switches to R/O mode. UBI also loudly informs * about such PEBs every time the MTD device is attached. * * However, it is difficult to reliably distinguish between these types of * corruptions and UBI's strategy is as follows (in case of attaching by * scanning). UBI assumes corruption type 2 if the VID header is corrupted and * the data area does not contain all 0xFFs, and there were no bit-flips or * integrity errors (e.g., ECC errors in case of NAND) while reading the data * area. Otherwise UBI assumes corruption type 1. So the decision criteria * are as follows. * o If the data area contains only 0xFFs, there are no data, and it is safe * to just erase this PEB - this is corruption type 1. * o If the data area has bit-flips or data integrity errors (ECC errors on * NAND), it is probably a PEB which was being erased when power cut * happened, so this is corruption type 1. However, this is just a guess, * which might be wrong. * o Otherwise this is corruption type 2. */ #include <linux/err.h> #include <linux/slab.h> #include <linux/crc32.h> #include <linux/math64.h> #include <linux/random.h> #include "ubi.h" static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai); /* Temporary variables used during scanning */ static struct ubi_ec_hdr *ech; static struct ubi_vid_hdr *vidh; /** * add_to_list - add physical eraseblock to a list. * @ai: attaching information * @pnum: physical eraseblock number to add * @vol_id: the last used volume id for the PEB * @lnum: the last used LEB number for the PEB * @ec: erase counter of the physical eraseblock * @to_head: if not zero, add to the head of the list * @list: the list to add to * * This function allocates a 'struct ubi_ainf_peb' object for physical * eraseblock @pnum and adds it to the "free", "erase", or "alien" lists. * It stores the @lnum and @vol_id alongside, which can both be * %UBI_UNKNOWN if they are not available, not readable, or not assigned. * If @to_head is not zero, PEB will be added to the head of the list, which * basically means it will be processed first later. E.g., we add corrupted * PEBs (corrupted due to power cuts) to the head of the erase list to make * sure we erase them first and get rid of corruptions ASAP. This function * returns zero in case of success and a negative error code in case of * failure. */ static int add_to_list(struct ubi_attach_info *ai, int pnum, int vol_id, int lnum, int ec, int to_head, struct list_head *list) { struct ubi_ainf_peb *aeb; if (list == &ai->free) { dbg_bld("add to free: PEB %d, EC %d", pnum, ec); } else if (list == &ai->erase) { dbg_bld("add to erase: PEB %d, EC %d", pnum, ec); } else if (list == &ai->alien) { dbg_bld("add to alien: PEB %d, EC %d", pnum, ec); ai->alien_peb_count += 1; } else BUG(); aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL); if (!aeb) return -ENOMEM; aeb->pnum = pnum; aeb->vol_id = vol_id; aeb->lnum = lnum; aeb->ec = ec; if (to_head) list_add(&aeb->u.list, list); else list_add_tail(&aeb->u.list, list); return 0; } /** * add_corrupted - add a corrupted physical eraseblock. * @ai: attaching information * @pnum: physical eraseblock number to add * @ec: erase counter of the physical eraseblock * * This function allocates a 'struct ubi_ainf_peb' object for a corrupted * physical eraseblock @pnum and adds it to the 'corr' list. The corruption * was presumably not caused by a power cut. Returns zero in case of success * and a negative error code in case of failure. */ static int add_corrupted(struct ubi_attach_info *ai, int pnum, int ec) { struct ubi_ainf_peb *aeb; dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec); aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL); if (!aeb) return -ENOMEM; ai->corr_peb_count += 1; aeb->pnum = pnum; aeb->ec = ec; list_add(&aeb->u.list, &ai->corr); return 0; } /** * validate_vid_hdr - check volume identifier header. * @vid_hdr: the volume identifier header to check * @av: information about the volume this logical eraseblock belongs to * @pnum: physical eraseblock number the VID header came from * * This function checks that data stored in @vid_hdr is consistent. Returns * non-zero if an inconsistency was found and zero if not. * * Note, UBI does sanity check of everything it reads from the flash media. * Most of the checks are done in the I/O sub-system. Here we check that the * information in the VID header is consistent to the information in other VID * headers of the same volume. */ static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr, const struct ubi_ainf_volume *av, int pnum) { int vol_type = vid_hdr->vol_type; int vol_id = be32_to_cpu(vid_hdr->vol_id); int used_ebs = be32_to_cpu(vid_hdr->used_ebs); int data_pad = be32_to_cpu(vid_hdr->data_pad); if (av->leb_count != 0) { int av_vol_type; /* * This is not the first logical eraseblock belonging to this * volume. Ensure that the data in its VID header is consistent * to the data in previous logical eraseblock headers. */ if (vol_id != av->vol_id) { ubi_err("inconsistent vol_id"); goto bad; } if (av->vol_type == UBI_STATIC_VOLUME) av_vol_type = UBI_VID_STATIC; else av_vol_type = UBI_VID_DYNAMIC; if (vol_type != av_vol_type) { ubi_err("inconsistent vol_type"); goto bad; } if (used_ebs != av->used_ebs) { ubi_err("inconsistent used_ebs"); goto bad; } if (data_pad != av->data_pad) { ubi_err("inconsistent data_pad"); goto bad; } } return 0; bad: ubi_err("inconsistent VID header at PEB %d", pnum); ubi_dump_vid_hdr(vid_hdr); ubi_dump_av(av); return -EINVAL; } /** * add_volume - add volume to the attaching information. * @ai: attaching information * @vol_id: ID of the volume to add * @pnum: physical eraseblock number * @vid_hdr: volume identifier header * * If the volume corresponding to the @vid_hdr logical eraseblock is already * present in the attaching information, this function does nothing. Otherwise * it adds corresponding volume to the attaching information. Returns a pointer * to the allocated "av" object in case of success and a negative error code in * case of failure. */ static struct ubi_ainf_volume *add_volume(struct ubi_attach_info *ai, int vol_id, int pnum, const struct ubi_vid_hdr *vid_hdr) { struct ubi_ainf_volume *av; struct rb_node **p = &ai->volumes.rb_node, *parent = NULL; ubi_assert(vol_id == be32_to_cpu(vid_hdr->vol_id)); /* Walk the volume RB-tree to look if this volume is already present */ while (*p) { parent = *p; av = rb_entry(parent, struct ubi_ainf_volume, rb); if (vol_id == av->vol_id) return av; if (vol_id > av->vol_id) p = &(*p)->rb_left; else p = &(*p)->rb_right; } /* The volume is absent - add it */ av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL); if (!av) return ERR_PTR(-ENOMEM); av->highest_lnum = av->leb_count = 0; av->vol_id = vol_id; av->root = RB_ROOT; av->used_ebs = be32_to_cpu(vid_hdr->used_ebs); av->data_pad = be32_to_cpu(vid_hdr->data_pad); av->compat = vid_hdr->compat; av->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME; if (vol_id > ai->highest_vol_id) ai->highest_vol_id = vol_id; rb_link_node(&av->rb, parent, p); rb_insert_color(&av->rb, &ai->volumes); ai->vols_found += 1; dbg_bld("added volume %d", vol_id); return av; } /** * ubi_compare_lebs - find out which logical eraseblock is newer. * @ubi: UBI device description object * @aeb: first logical eraseblock to compare * @pnum: physical eraseblock number of the second logical eraseblock to * compare * @vid_hdr: volume identifier header of the second logical eraseblock * * This function compares 2 copies of a LEB and informs which one is newer. In * case of success this function returns a positive value, in case of failure, a * negative error code is returned. The success return codes use the following * bits: * o bit 0 is cleared: the first PEB (described by @aeb) is newer than the * second PEB (described by @pnum and @vid_hdr); * o bit 0 is set: the second PEB is newer; * o bit 1 is cleared: no bit-flips were detected in the newer LEB; * o bit 1 is set: bit-flips were detected in the newer LEB; * o bit 2 is cleared: the older LEB is not corrupted; * o bit 2 is set: the older LEB is corrupted. */ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb, int pnum, const struct ubi_vid_hdr *vid_hdr) { int len, err, second_is_newer, bitflips = 0, corrupted = 0; uint32_t data_crc, crc; struct ubi_vid_hdr *vh = NULL; unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum); if (sqnum2 == aeb->sqnum) { /* * This must be a really ancient UBI image which has been * created before sequence numbers support has been added. At * that times we used 32-bit LEB versions stored in logical * eraseblocks. That was before UBI got into mainline. We do not * support these images anymore. Well, those images still work, * but only if no unclean reboots happened. */ ubi_err("unsupported on-flash UBI format"); return -EINVAL; } /* Obviously the LEB with lower sequence counter is older */ second_is_newer = (sqnum2 > aeb->sqnum); /* * Now we know which copy is newer. If the copy flag of the PEB with * newer version is not set, then we just return, otherwise we have to * check data CRC. For the second PEB we already have the VID header, * for the first one - we'll need to re-read it from flash. * * Note: this may be optimized so that we wouldn't read twice. */ if (second_is_newer) { if (!vid_hdr->copy_flag) { /* It is not a copy, so it is newer */ dbg_bld("second PEB %d is newer, copy_flag is unset", pnum); return 1; } } else { if (!aeb->copy_flag) { /* It is not a copy, so it is newer */ dbg_bld("first PEB %d is newer, copy_flag is unset", pnum); return bitflips << 1; } vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); if (!vh) return -ENOMEM; pnum = aeb->pnum; err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0); if (err) { if (err == UBI_IO_BITFLIPS) bitflips = 1; else { ubi_err("VID of PEB %d header is bad, but it was OK earlier, err %d", pnum, err); if (err > 0) err = -EIO; goto out_free_vidh; } } vid_hdr = vh; } /* Read the data of the copy and check the CRC */ len = be32_to_cpu(vid_hdr->data_size); mutex_lock(&ubi->buf_mutex); err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, len); if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err)) goto out_unlock; data_crc = be32_to_cpu(vid_hdr->data_crc); crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, len); if (crc != data_crc) { dbg_bld("PEB %d CRC error: calculated %#08x, must be %#08x", pnum, crc, data_crc); corrupted = 1; bitflips = 0; second_is_newer = !second_is_newer; } else { dbg_bld("PEB %d CRC is OK", pnum); bitflips = !!err; } mutex_unlock(&ubi->buf_mutex); ubi_free_vid_hdr(ubi, vh); if (second_is_newer) dbg_bld("second PEB %d is newer, copy_flag is set", pnum); else dbg_bld("first PEB %d is newer, copy_flag is set", pnum); return second_is_newer | (bitflips << 1) | (corrupted << 2); out_unlock: mutex_unlock(&ubi->buf_mutex); out_free_vidh: ubi_free_vid_hdr(ubi, vh); return err; } /** * ubi_add_to_av - add used physical eraseblock to the attaching information. * @ubi: UBI device description object * @ai: attaching information * @pnum: the physical eraseblock number * @ec: erase counter * @vid_hdr: the volume identifier header * @bitflips: if bit-flips were detected when this physical eraseblock was read * * This function adds information about a used physical eraseblock to the * 'used' tree of the corresponding volume. The function is rather complex * because it has to handle cases when this is not the first physical * eraseblock belonging to the same logical eraseblock, and the newer one has * to be picked, while the older one has to be dropped. This function returns * zero in case of success and a negative error code in case of failure. */ int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum, int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips) { int err, vol_id, lnum; unsigned long long sqnum; struct ubi_ainf_volume *av; struct ubi_ainf_peb *aeb; struct rb_node **p, *parent = NULL; vol_id = be32_to_cpu(vid_hdr->vol_id); lnum = be32_to_cpu(vid_hdr->lnum); sqnum = be64_to_cpu(vid_hdr->sqnum); dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, bitflips %d", pnum, vol_id, lnum, ec, sqnum, bitflips); av = add_volume(ai, vol_id, pnum, vid_hdr); if (IS_ERR(av)) return PTR_ERR(av); if (ai->max_sqnum < sqnum) ai->max_sqnum = sqnum; /* * Walk the RB-tree of logical eraseblocks of volume @vol_id to look * if this is the first instance of this logical eraseblock or not. */ p = &av->root.rb_node; while (*p) { int cmp_res; parent = *p; aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb); if (lnum != aeb->lnum) { if (lnum < aeb->lnum) p = &(*p)->rb_left; else p = &(*p)->rb_right; continue; } /* * There is already a physical eraseblock describing the same * logical eraseblock present. */ dbg_bld("this LEB already exists: PEB %d, sqnum %llu, EC %d", aeb->pnum, aeb->sqnum, aeb->ec); /* * Make sure that the logical eraseblocks have different * sequence numbers. Otherwise the image is bad. * * However, if the sequence number is zero, we assume it must * be an ancient UBI image from the era when UBI did not have * sequence numbers. We still can attach these images, unless * there is a need to distinguish between old and new * eraseblocks, in which case we'll refuse the image in * 'ubi_compare_lebs()'. In other words, we attach old clean * images, but refuse attaching old images with duplicated * logical eraseblocks because there was an unclean reboot. */ if (aeb->sqnum == sqnum && sqnum != 0) { ubi_err("two LEBs with same sequence number %llu", sqnum); ubi_dump_aeb(aeb, 0); ubi_dump_vid_hdr(vid_hdr); return -EINVAL; } /* * Now we have to drop the older one and preserve the newer * one. */ cmp_res = ubi_compare_lebs(ubi, aeb, pnum, vid_hdr); if (cmp_res < 0) return cmp_res; if (cmp_res & 1) { /* * This logical eraseblock is newer than the one * found earlier. */ err = validate_vid_hdr(vid_hdr, av, pnum); if (err) return err; err = add_to_list(ai, aeb->pnum, aeb->vol_id, aeb->lnum, aeb->ec, cmp_res & 4, &ai->erase); if (err) return err; aeb->ec = ec; aeb->pnum = pnum; aeb->vol_id = vol_id; aeb->lnum = lnum; aeb->scrub = ((cmp_res & 2) || bitflips); aeb->copy_flag = vid_hdr->copy_flag; aeb->sqnum = sqnum; if (av->highest_lnum == lnum) av->last_data_size = be32_to_cpu(vid_hdr->data_size); return 0; } else { /* * This logical eraseblock is older than the one found * previously. */ return add_to_list(ai, pnum, vol_id, lnum, ec, cmp_res & 4, &ai->erase); } } /* * We've met this logical eraseblock for the first time, add it to the * attaching information. */ err = validate_vid_hdr(vid_hdr, av, pnum); if (err) return err; aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL); if (!aeb) return -ENOMEM; aeb->ec = ec; aeb->pnum = pnum; aeb->vol_id = vol_id; aeb->lnum = lnum; aeb->scrub = bitflips; aeb->copy_flag = vid_hdr->copy_flag; aeb->sqnum = sqnum; if (av->highest_lnum <= lnum) { av->highest_lnum = lnum; av->last_data_size = be32_to_cpu(vid_hdr->data_size); } av->leb_count += 1; rb_link_node(&aeb->u.rb, parent, p); rb_insert_color(&aeb->u.rb, &av->root); return 0; } /** * ubi_find_av - find volume in the attaching information. * @ai: attaching information * @vol_id: the requested volume ID * * This function returns a pointer to the volume description or %NULL if there * are no data about this volume in the attaching information. */ struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai, int vol_id) { struct ubi_ainf_volume *av; struct rb_node *p = ai->volumes.rb_node; while (p) { av = rb_entry(p, struct ubi_ainf_volume, rb); if (vol_id == av->vol_id) return av; if (vol_id > av->vol_id) p = p->rb_left; else p = p->rb_right; } return NULL; } /** * ubi_remove_av - delete attaching information about a volume. * @ai: attaching information * @av: the volume attaching information to delete */ void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av) { struct rb_node *rb; struct ubi_ainf_peb *aeb; dbg_bld("remove attaching information about volume %d", av->vol_id); while ((rb = rb_first(&av->root))) { aeb = rb_entry(rb, struct ubi_ainf_peb, u.rb); rb_erase(&aeb->u.rb, &av->root); list_add_tail(&aeb->u.list, &ai->erase); } rb_erase(&av->rb, &ai->volumes); kfree(av); ai->vols_found -= 1; } /** * early_erase_peb - erase a physical eraseblock. * @ubi: UBI device description object * @ai: attaching information * @pnum: physical eraseblock number to erase; * @ec: erase counter value to write (%UBI_UNKNOWN if it is unknown) * * This function erases physical eraseblock 'pnum', and writes the erase * counter header to it. This function should only be used on UBI device * initialization stages, when the EBA sub-system had not been yet initialized. * This function returns zero in case of success and a negative error code in * case of failure. */ static int early_erase_peb(struct ubi_device *ubi, const struct ubi_attach_info *ai, int pnum, int ec) { int err; struct ubi_ec_hdr *ec_hdr; if ((long long)ec >= UBI_MAX_ERASECOUNTER) { /* * Erase counter overflow. Upgrade UBI and use 64-bit * erase counters internally. */ ubi_err("erase counter overflow at PEB %d, EC %d", pnum, ec); return -EINVAL; } ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); if (!ec_hdr) return -ENOMEM; ec_hdr->ec = cpu_to_be64(ec); err = ubi_io_sync_erase(ubi, pnum, 0); if (err < 0) goto out_free; err = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr); out_free: kfree(ec_hdr); return err; } /** * ubi_early_get_peb - get a free physical eraseblock. * @ubi: UBI device description object * @ai: attaching information * * This function returns a free physical eraseblock. It is supposed to be * called on the UBI initialization stages when the wear-leveling sub-system is * not initialized yet. This function picks a physical eraseblocks from one of * the lists, writes the EC header if it is needed, and removes it from the * list. * * This function returns a pointer to the "aeb" of the found free PEB in case * of success and an error code in case of failure. */ struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi, struct ubi_attach_info *ai) { int err = 0; struct ubi_ainf_peb *aeb, *tmp_aeb; if (!list_empty(&ai->free)) { aeb = list_entry(ai->free.next, struct ubi_ainf_peb, u.list); list_del(&aeb->u.list); dbg_bld("return free PEB %d, EC %d", aeb->pnum, aeb->ec); return aeb; } /* * We try to erase the first physical eraseblock from the erase list * and pick it if we succeed, or try to erase the next one if not. And * so forth. We don't want to take care about bad eraseblocks here - * they'll be handled later. */ list_for_each_entry_safe(aeb, tmp_aeb, &ai->erase, u.list) { if (aeb->ec == UBI_UNKNOWN) aeb->ec = ai->mean_ec; err = early_erase_peb(ubi, ai, aeb->pnum, aeb->ec+1); if (err) continue; aeb->ec += 1; list_del(&aeb->u.list); dbg_bld("return PEB %d, EC %d", aeb->pnum, aeb->ec); return aeb; } ubi_err("no free eraseblocks"); return ERR_PTR(-ENOSPC); } /** * check_corruption - check the data area of PEB. * @ubi: UBI device description object * @vid_hdr: the (corrupted) VID header of this PEB * @pnum: the physical eraseblock number to check * * This is a helper function which is used to distinguish between VID header * corruptions caused by power cuts and other reasons. If the PEB contains only * 0xFF bytes in the data area, the VID header is most probably corrupted * because of a power cut (%0 is returned in this case). Otherwise, it was * probably corrupted for some other reasons (%1 is returned in this case). A * negative error code is returned if a read error occurred. * * If the corruption reason was a power cut, UBI can safely erase this PEB. * Otherwise, it should preserve it to avoid possibly destroying important * information. */ static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr, int pnum) { int err; mutex_lock(&ubi->buf_mutex); memset(ubi->peb_buf, 0x00, ubi->leb_size); err = ubi_io_read(ubi, ubi->peb_buf, pnum, ubi->leb_start, ubi->leb_size); if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) { /* * Bit-flips or integrity errors while reading the data area. * It is difficult to say for sure what type of corruption is * this, but presumably a power cut happened while this PEB was * erased, so it became unstable and corrupted, and should be * erased. */ err = 0; goto out_unlock; } if (err) goto out_unlock; if (ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->leb_size)) goto out_unlock; ubi_err("PEB %d contains corrupted VID header, and the data does not contain all 0xFF", pnum); ubi_err("this may be a non-UBI PEB or a severe VID header corruption which requires manual inspection"); ubi_dump_vid_hdr(vid_hdr); pr_err("hexdump of PEB %d offset %d, length %d", pnum, ubi->leb_start, ubi->leb_size); ubi_dbg_print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, ubi->peb_buf, ubi->leb_size, 1); err = 1; out_unlock: mutex_unlock(&ubi->buf_mutex); return err; } /** * scan_peb - scan and process UBI headers of a PEB. * @ubi: UBI device description object * @ai: attaching information * @pnum: the physical eraseblock number * @vid: The volume ID of the found volume will be stored in this pointer * @sqnum: The sqnum of the found volume will be stored in this pointer * * This function reads UBI headers of PEB @pnum, checks them, and adds * information about this PEB to the corresponding list or RB-tree in the * "attaching info" structure. Returns zero if the physical eraseblock was * successfully handled and a negative error code in case of failure. */ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum, int *vid, unsigned long long *sqnum) { long long uninitialized_var(ec); int err, bitflips = 0, vol_id = -1, ec_err = 0; dbg_bld("scan PEB %d", pnum); /* Skip bad physical eraseblocks */ err = ubi_io_is_bad(ubi, pnum); if (err < 0) return err; else if (err) { ai->bad_peb_count += 1; return 0; } err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); if (err < 0) return err; switch (err) { case 0: break; case UBI_IO_BITFLIPS: bitflips = 1; break; case UBI_IO_FF: ai->empty_peb_count += 1; return add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN, UBI_UNKNOWN, 0, &ai->erase); case UBI_IO_FF_BITFLIPS: ai->empty_peb_count += 1; return add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN, UBI_UNKNOWN, 1, &ai->erase); case UBI_IO_BAD_HDR_EBADMSG: case UBI_IO_BAD_HDR: /* * We have to also look at the VID header, possibly it is not * corrupted. Set %bitflips flag in order to make this PEB be * moved and EC be re-created. */ ec_err = err; ec = UBI_UNKNOWN; bitflips = 1; break; default: ubi_err("'ubi_io_read_ec_hdr()' returned unknown code %d", err); return -EINVAL; } if (!ec_err) { int image_seq; /* Make sure UBI version is OK */ if (ech->version != UBI_VERSION) { ubi_err("this UBI version is %d, image version is %d", UBI_VERSION, (int)ech->version); return -EINVAL; } ec = be64_to_cpu(ech->ec); if (ec > UBI_MAX_ERASECOUNTER) { /* * Erase counter overflow. The EC headers have 64 bits * reserved, but we anyway make use of only 31 bit * values, as this seems to be enough for any existing * flash. Upgrade UBI and use 64-bit erase counters * internally. */ ubi_err("erase counter overflow, max is %d", UBI_MAX_ERASECOUNTER); ubi_dump_ec_hdr(ech); return -EINVAL; } /* * Make sure that all PEBs have the same image sequence number. * This allows us to detect situations when users flash UBI * images incorrectly, so that the flash has the new UBI image * and leftovers from the old one. This feature was added * relatively recently, and the sequence number was always * zero, because old UBI implementations always set it to zero. * For this reasons, we do not panic if some PEBs have zero * sequence number, while other PEBs have non-zero sequence * number. */ image_seq = be32_to_cpu(ech->image_seq); if (!ubi->image_seq && image_seq) ubi->image_seq = image_seq; if (ubi->image_seq && image_seq && ubi->image_seq != image_seq) { ubi_err("bad image sequence number %d in PEB %d, expected %d", image_seq, pnum, ubi->image_seq); ubi_dump_ec_hdr(ech); return -EINVAL; } } /* OK, we've done with the EC header, let's look at the VID header */ err = ubi_io_read_vid_hdr(ubi, pnum, vidh, 0); if (err < 0) return err; switch (err) { case 0: break; case UBI_IO_BITFLIPS: bitflips = 1; break; case UBI_IO_BAD_HDR_EBADMSG: if (ec_err == UBI_IO_BAD_HDR_EBADMSG) /* * Both EC and VID headers are corrupted and were read * with data integrity error, probably this is a bad * PEB, bit it is not marked as bad yet. This may also * be a result of power cut during erasure. */ ai->maybe_bad_peb_count += 1; case UBI_IO_BAD_HDR: if (ec_err) /* * Both headers are corrupted. There is a possibility * that this a valid UBI PEB which has corresponding * LEB, but the headers are corrupted. However, it is * impossible to distinguish it from a PEB which just * contains garbage because of a power cut during erase * operation. So we just schedule this PEB for erasure. * * Besides, in case of NOR flash, we deliberately * corrupt both headers because NOR flash erasure is * slow and can start from the end. */ err = 0; else /* * The EC was OK, but the VID header is corrupted. We * have to check what is in the data area. */ err = check_corruption(ubi, vidh, pnum); if (err < 0) return err; else if (!err) /* This corruption is caused by a power cut */ err = add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN, ec, 1, &ai->erase); else /* This is an unexpected corruption */ err = add_corrupted(ai, pnum, ec); if (err) return err; goto adjust_mean_ec; case UBI_IO_FF_BITFLIPS: err = add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN, ec, 1, &ai->erase); if (err) return err; goto adjust_mean_ec; case UBI_IO_FF: if (ec_err || bitflips) err = add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN, ec, 1, &ai->erase); else err = add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN, ec, 0, &ai->free); if (err) return err; goto adjust_mean_ec; default: ubi_err("'ubi_io_read_vid_hdr()' returned unknown code %d", err); return -EINVAL; } vol_id = be32_to_cpu(vidh->vol_id); if (vid) *vid = vol_id; if (sqnum) *sqnum = be64_to_cpu(vidh->sqnum); if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) { int lnum = be32_to_cpu(vidh->lnum); /* Unsupported internal volume */ switch (vidh->compat) { case UBI_COMPAT_DELETE: if (vol_id != UBI_FM_SB_VOLUME_ID && vol_id != UBI_FM_DATA_VOLUME_ID) { ubi_msg("\"delete\" compatible internal volume %d:%d found, will remove it", vol_id, lnum); } err = add_to_list(ai, pnum, vol_id, lnum, ec, 1, &ai->erase); if (err) return err; return 0; case UBI_COMPAT_RO: ubi_msg("read-only compatible internal volume %d:%d found, switch to read-only mode", vol_id, lnum); ubi->ro_mode = 1; break; case UBI_COMPAT_PRESERVE: ubi_msg("\"preserve\" compatible internal volume %d:%d found", vol_id, lnum); err = add_to_list(ai, pnum, vol_id, lnum, ec, 0, &ai->alien); if (err) return err; return 0; case UBI_COMPAT_REJECT: ubi_err("incompatible internal volume %d:%d found", vol_id, lnum); return -EINVAL; } } if (ec_err) ubi_warn("valid VID header but corrupted EC header at PEB %d", pnum); err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips); if (err) return err; adjust_mean_ec: if (!ec_err) { ai->ec_sum += ec; ai->ec_count += 1; if (ec > ai->max_ec) ai->max_ec = ec; if (ec < ai->min_ec) ai->min_ec = ec; } return 0; } /** * late_analysis - analyze the overall situation with PEB. * @ubi: UBI device description object * @ai: attaching information * * This is a helper function which takes a look what PEBs we have after we * gather information about all of them ("ai" is compete). It decides whether * the flash is empty and should be formatted of whether there are too many * corrupted PEBs and we should not attach this MTD device. Returns zero if we * should proceed with attaching the MTD device, and %-EINVAL if we should not. */ static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai) { struct ubi_ainf_peb *aeb; int max_corr, peb_count; peb_count = ubi->peb_count - ai->bad_peb_count - ai->alien_peb_count; max_corr = peb_count / 20 ?: 8; /* * Few corrupted PEBs is not a problem and may be just a result of * unclean reboots. However, many of them may indicate some problems * with the flash HW or driver. */ if (ai->corr_peb_count) { ubi_err("%d PEBs are corrupted and preserved", ai->corr_peb_count); pr_err("Corrupted PEBs are:"); list_for_each_entry(aeb, &ai->corr, u.list) pr_cont(" %d", aeb->pnum); pr_cont("\n"); /* * If too many PEBs are corrupted, we refuse attaching, * otherwise, only print a warning. */ if (ai->corr_peb_count >= max_corr) { ubi_err("too many corrupted PEBs, refusing"); return -EINVAL; } } if (ai->empty_peb_count + ai->maybe_bad_peb_count == peb_count) { /* * All PEBs are empty, or almost all - a couple PEBs look like * they may be bad PEBs which were not marked as bad yet. * * This piece of code basically tries to distinguish between * the following situations: * * 1. Flash is empty, but there are few bad PEBs, which are not * marked as bad so far, and which were read with error. We * want to go ahead and format this flash. While formatting, * the faulty PEBs will probably be marked as bad. * * 2. Flash contains non-UBI data and we do not want to format * it and destroy possibly important information. */ if (ai->maybe_bad_peb_count <= 2) { ai->is_empty = 1; ubi_msg("empty MTD device detected"); get_random_bytes(&ubi->image_seq, sizeof(ubi->image_seq)); } else { ubi_err("MTD device is not UBI-formatted and possibly contains non-UBI data - refusing it"); return -EINVAL; } } return 0; } /** * destroy_av - free volume attaching information. * @av: volume attaching information * @ai: attaching information * * This function destroys the volume attaching information. */ static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av) { struct ubi_ainf_peb *aeb; struct rb_node *this = av->root.rb_node; while (this) { if (this->rb_left) this = this->rb_left; else if (this->rb_right) this = this->rb_right; else { aeb = rb_entry(this, struct ubi_ainf_peb, u.rb); this = rb_parent(this); if (this) { if (this->rb_left == &aeb->u.rb) this->rb_left = NULL; else this->rb_right = NULL; } kmem_cache_free(ai->aeb_slab_cache, aeb); } } kfree(av); } /** * destroy_ai - destroy attaching information. * @ai: attaching information */ static void destroy_ai(struct ubi_attach_info *ai) { struct ubi_ainf_peb *aeb, *aeb_tmp; struct ubi_ainf_volume *av; struct rb_node *rb; list_for_each_entry_safe(aeb, aeb_tmp, &ai->alien, u.list) { list_del(&aeb->u.list); kmem_cache_free(ai->aeb_slab_cache, aeb); } list_for_each_entry_safe(aeb, aeb_tmp, &ai->erase, u.list) { list_del(&aeb->u.list); kmem_cache_free(ai->aeb_slab_cache, aeb); } list_for_each_entry_safe(aeb, aeb_tmp, &ai->corr, u.list) { list_del(&aeb->u.list); kmem_cache_free(ai->aeb_slab_cache, aeb); } list_for_each_entry_safe(aeb, aeb_tmp, &ai->free, u.list) { list_del(&aeb->u.list); kmem_cache_free(ai->aeb_slab_cache, aeb); } /* Destroy the volume RB-tree */ rb = ai->volumes.rb_node; while (rb) { if (rb->rb_left) rb = rb->rb_left; else if (rb->rb_right) rb = rb->rb_right; else { av = rb_entry(rb, struct ubi_ainf_volume, rb); rb = rb_parent(rb); if (rb) { if (rb->rb_left == &av->rb) rb->rb_left = NULL; else rb->rb_right = NULL; } destroy_av(ai, av); } } if (ai->aeb_slab_cache) kmem_cache_destroy(ai->aeb_slab_cache); kfree(ai); } /** * scan_all - scan entire MTD device. * @ubi: UBI device description object * @ai: attach info object * @start: start scanning at this PEB * * This function does full scanning of an MTD device and returns complete * information about it in form of a "struct ubi_attach_info" object. In case * of failure, an error code is returned. */ static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai, int start) { int err, pnum; struct rb_node *rb1, *rb2; struct ubi_ainf_volume *av; struct ubi_ainf_peb *aeb; err = -ENOMEM; ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); if (!ech) return err; vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); if (!vidh) goto out_ech; for (pnum = start; pnum < ubi->peb_count; pnum++) { cond_resched(); dbg_gen("process PEB %d", pnum); err = scan_peb(ubi, ai, pnum, NULL, NULL); if (err < 0) goto out_vidh; } ubi_msg("scanning is finished"); /* Calculate mean erase counter */ if (ai->ec_count) ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count); err = late_analysis(ubi, ai); if (err) goto out_vidh; /* * In case of unknown erase counter we use the mean erase counter * value. */ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) { ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) if (aeb->ec == UBI_UNKNOWN) aeb->ec = ai->mean_ec; } list_for_each_entry(aeb, &ai->free, u.list) { if (aeb->ec == UBI_UNKNOWN) aeb->ec = ai->mean_ec; } list_for_each_entry(aeb, &ai->corr, u.list) if (aeb->ec == UBI_UNKNOWN) aeb->ec = ai->mean_ec; list_for_each_entry(aeb, &ai->erase, u.list) if (aeb->ec == UBI_UNKNOWN) aeb->ec = ai->mean_ec; err = self_check_ai(ubi, ai); if (err) goto out_vidh; ubi_free_vid_hdr(ubi, vidh); kfree(ech); return 0; out_vidh: ubi_free_vid_hdr(ubi, vidh); out_ech: kfree(ech); return err; } #ifdef CONFIG_MTD_UBI_FASTMAP /** * scan_fastmap - try to find a fastmap and attach from it. * @ubi: UBI device description object * @ai: attach info object * * Returns 0 on success, negative return values indicate an internal * error. * UBI_NO_FASTMAP denotes that no fastmap was found. * UBI_BAD_FASTMAP denotes that the found fastmap was invalid. */ static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info *ai) { int err, pnum, fm_anchor = -1; unsigned long long max_sqnum = 0; err = -ENOMEM; ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); if (!ech) goto out; vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); if (!vidh) goto out_ech; for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) { int vol_id = -1; unsigned long long sqnum = -1; cond_resched(); dbg_gen("process PEB %d", pnum); err = scan_peb(ubi, ai, pnum, &vol_id, &sqnum); if (err < 0) goto out_vidh; if (vol_id == UBI_FM_SB_VOLUME_ID && sqnum > max_sqnum) { max_sqnum = sqnum; fm_anchor = pnum; } } ubi_free_vid_hdr(ubi, vidh); kfree(ech); if (fm_anchor < 0) return UBI_NO_FASTMAP; return ubi_scan_fastmap(ubi, ai, fm_anchor); out_vidh: ubi_free_vid_hdr(ubi, vidh); out_ech: kfree(ech); out: return err; } #endif static struct ubi_attach_info *alloc_ai(const char *slab_name) { struct ubi_attach_info *ai; ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL); if (!ai) return ai; INIT_LIST_HEAD(&ai->corr); INIT_LIST_HEAD(&ai->free); INIT_LIST_HEAD(&ai->erase); INIT_LIST_HEAD(&ai->alien); ai->volumes = RB_ROOT; ai->aeb_slab_cache = kmem_cache_create(slab_name, sizeof(struct ubi_ainf_peb), 0, 0, NULL); if (!ai->aeb_slab_cache) { kfree(ai); ai = NULL; } return ai; } /** * ubi_attach - attach an MTD device. * @ubi: UBI device descriptor * @force_scan: if set to non-zero attach by scanning * * This function returns zero in case of success and a negative error code in * case of failure. */ int ubi_attach(struct ubi_device *ubi, int force_scan) { int err; struct ubi_attach_info *ai; ai = alloc_ai("ubi_aeb_slab_cache"); if (!ai) return -ENOMEM; #ifdef CONFIG_MTD_UBI_FASTMAP /* On small flash devices we disable fastmap in any case. */ if ((int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd) <= UBI_FM_MAX_START) { ubi->fm_disabled = 1; force_scan = 1; } if (force_scan) err = scan_all(ubi, ai, 0); else { err = scan_fast(ubi, ai); if (err > 0) { if (err != UBI_NO_FASTMAP) { destroy_ai(ai); ai = alloc_ai("ubi_aeb_slab_cache2"); if (!ai) return -ENOMEM; } err = scan_all(ubi, ai, UBI_FM_MAX_START); } } #else err = scan_all(ubi, ai, 0); #endif if (err) goto out_ai; ubi->bad_peb_count = ai->bad_peb_count; ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count; ubi->corr_peb_count = ai->corr_peb_count; ubi->max_ec = ai->max_ec; ubi->mean_ec = ai->mean_ec; dbg_gen("max. sequence number: %llu", ai->max_sqnum); err = ubi_read_volume_table(ubi, ai); if (err) goto out_ai; err = ubi_wl_init(ubi, ai); if (err) goto out_vtbl; err = ubi_eba_init(ubi, ai); if (err) goto out_wl; #ifdef CONFIG_MTD_UBI_FASTMAP if (ubi->fm && ubi_dbg_chk_gen(ubi)) { struct ubi_attach_info *scan_ai; scan_ai = alloc_ai("ubi_ckh_aeb_slab_cache"); if (!scan_ai) goto out_wl; err = scan_all(ubi, scan_ai, 0); if (err) { destroy_ai(scan_ai); goto out_wl; } err = self_check_eba(ubi, ai, scan_ai); destroy_ai(scan_ai); if (err) goto out_wl; } #endif destroy_ai(ai); return 0; out_wl: ubi_wl_close(ubi); out_vtbl: ubi_free_internal_volumes(ubi); vfree(ubi->vtbl); out_ai: destroy_ai(ai); return err; } /** * self_check_ai - check the attaching information. * @ubi: UBI device description object * @ai: attaching information * * This function returns zero if the attaching information is all right, and a * negative error code if not or if an error occurred. */ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai) { int pnum, err, vols_found = 0; struct rb_node *rb1, *rb2; struct ubi_ainf_volume *av; struct ubi_ainf_peb *aeb, *last_aeb; uint8_t *buf; if (!ubi_dbg_chk_gen(ubi)) return 0; /* * At first, check that attaching information is OK. */ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) { int leb_count = 0; cond_resched(); vols_found += 1; if (ai->is_empty) { ubi_err("bad is_empty flag"); goto bad_av; } if (av->vol_id < 0 || av->highest_lnum < 0 || av->leb_count < 0 || av->vol_type < 0 || av->used_ebs < 0 || av->data_pad < 0 || av->last_data_size < 0) { ubi_err("negative values"); goto bad_av; } if (av->vol_id >= UBI_MAX_VOLUMES && av->vol_id < UBI_INTERNAL_VOL_START) { ubi_err("bad vol_id"); goto bad_av; } if (av->vol_id > ai->highest_vol_id) { ubi_err("highest_vol_id is %d, but vol_id %d is there", ai->highest_vol_id, av->vol_id); goto out; } if (av->vol_type != UBI_DYNAMIC_VOLUME && av->vol_type != UBI_STATIC_VOLUME) { ubi_err("bad vol_type"); goto bad_av; } if (av->data_pad > ubi->leb_size / 2) { ubi_err("bad data_pad"); goto bad_av; } last_aeb = NULL; ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) { cond_resched(); last_aeb = aeb; leb_count += 1; if (aeb->pnum < 0 || aeb->ec < 0) { ubi_err("negative values"); goto bad_aeb; } if (aeb->ec < ai->min_ec) { ubi_err("bad ai->min_ec (%d), %d found", ai->min_ec, aeb->ec); goto bad_aeb; } if (aeb->ec > ai->max_ec) { ubi_err("bad ai->max_ec (%d), %d found", ai->max_ec, aeb->ec); goto bad_aeb; } if (aeb->pnum >= ubi->peb_count) { ubi_err("too high PEB number %d, total PEBs %d", aeb->pnum, ubi->peb_count); goto bad_aeb; } if (av->vol_type == UBI_STATIC_VOLUME) { if (aeb->lnum >= av->used_ebs) { ubi_err("bad lnum or used_ebs"); goto bad_aeb; } } else { if (av->used_ebs != 0) { ubi_err("non-zero used_ebs"); goto bad_aeb; } } if (aeb->lnum > av->highest_lnum) { ubi_err("incorrect highest_lnum or lnum"); goto bad_aeb; } } if (av->leb_count != leb_count) { ubi_err("bad leb_count, %d objects in the tree", leb_count); goto bad_av; } if (!last_aeb) continue; aeb = last_aeb; if (aeb->lnum != av->highest_lnum) { ubi_err("bad highest_lnum"); goto bad_aeb; } } if (vols_found != ai->vols_found) { ubi_err("bad ai->vols_found %d, should be %d", ai->vols_found, vols_found); goto out; } /* Check that attaching information is correct */ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) { last_aeb = NULL; ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) { int vol_type; cond_resched(); last_aeb = aeb; err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidh, 1); if (err && err != UBI_IO_BITFLIPS) { ubi_err("VID header is not OK (%d)", err); if (err > 0) err = -EIO; return err; } vol_type = vidh->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME; if (av->vol_type != vol_type) { ubi_err("bad vol_type"); goto bad_vid_hdr; } if (aeb->sqnum != be64_to_cpu(vidh->sqnum)) { ubi_err("bad sqnum %llu", aeb->sqnum); goto bad_vid_hdr; } if (av->vol_id != be32_to_cpu(vidh->vol_id)) { ubi_err("bad vol_id %d", av->vol_id); goto bad_vid_hdr; } if (av->compat != vidh->compat) { ubi_err("bad compat %d", vidh->compat); goto bad_vid_hdr; } if (aeb->lnum != be32_to_cpu(vidh->lnum)) { ubi_err("bad lnum %d", aeb->lnum); goto bad_vid_hdr; } if (av->used_ebs != be32_to_cpu(vidh->used_ebs)) { ubi_err("bad used_ebs %d", av->used_ebs); goto bad_vid_hdr; } if (av->data_pad != be32_to_cpu(vidh->data_pad)) { ubi_err("bad data_pad %d", av->data_pad); goto bad_vid_hdr; } } if (!last_aeb) continue; if (av->highest_lnum != be32_to_cpu(vidh->lnum)) { ubi_err("bad highest_lnum %d", av->highest_lnum); goto bad_vid_hdr; } if (av->last_data_size != be32_to_cpu(vidh->data_size)) { ubi_err("bad last_data_size %d", av->last_data_size); goto bad_vid_hdr; } } /* * Make sure that all the physical eraseblocks are in one of the lists * or trees. */ buf = kzalloc(ubi->peb_count, GFP_KERNEL); if (!buf) return -ENOMEM; for (pnum = 0; pnum < ubi->peb_count; pnum++) { err = ubi_io_is_bad(ubi, pnum); if (err < 0) { kfree(buf); return err; } else if (err) buf[pnum] = 1; } ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) buf[aeb->pnum] = 1; list_for_each_entry(aeb, &ai->free, u.list) buf[aeb->pnum] = 1; list_for_each_entry(aeb, &ai->corr, u.list) buf[aeb->pnum] = 1; list_for_each_entry(aeb, &ai->erase, u.list) buf[aeb->pnum] = 1; list_for_each_entry(aeb, &ai->alien, u.list) buf[aeb->pnum] = 1; err = 0; for (pnum = 0; pnum < ubi->peb_count; pnum++) if (!buf[pnum]) { ubi_err("PEB %d is not referred", pnum); err = 1; } kfree(buf); if (err) goto out; return 0; bad_aeb: ubi_err("bad attaching information about LEB %d", aeb->lnum); ubi_dump_aeb(aeb, 0); ubi_dump_av(av); goto out; bad_av: ubi_err("bad attaching information about volume %d", av->vol_id); ubi_dump_av(av); goto out; bad_vid_hdr: ubi_err("bad attaching information about volume %d", av->vol_id); ubi_dump_av(av); ubi_dump_vid_hdr(vidh); out: dump_stack(); return -EINVAL; }
gpl-2.0
supercairos/android_kernel_doro_msm8916_2
drivers/staging/iio/adc/ad7606_par.c
2260
3808
/* * AD7606 Parallel Interface ADC driver * * Copyright 2011 Analog Devices Inc. * * Licensed under the GPL-2. */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/types.h> #include <linux/err.h> #include <linux/io.h> #include <linux/iio/iio.h> #include "ad7606.h" static int ad7606_par16_read_block(struct device *dev, int count, void *buf) { struct platform_device *pdev = to_platform_device(dev); struct iio_dev *indio_dev = platform_get_drvdata(pdev); struct ad7606_state *st = iio_priv(indio_dev); insw((unsigned long) st->base_address, buf, count); return 0; } static const struct ad7606_bus_ops ad7606_par16_bops = { .read_block = ad7606_par16_read_block, }; static int ad7606_par8_read_block(struct device *dev, int count, void *buf) { struct platform_device *pdev = to_platform_device(dev); struct iio_dev *indio_dev = platform_get_drvdata(pdev); struct ad7606_state *st = iio_priv(indio_dev); insb((unsigned long) st->base_address, buf, count * 2); return 0; } static const struct ad7606_bus_ops ad7606_par8_bops = { .read_block = ad7606_par8_read_block, }; static int ad7606_par_probe(struct platform_device *pdev) { struct resource *res; struct iio_dev *indio_dev; void __iomem *addr; resource_size_t remap_size; int ret, irq; irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "no irq\n"); return -ENODEV; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; remap_size = resource_size(res); /* Request the regions */ if (!request_mem_region(res->start, remap_size, "iio-ad7606")) { ret = -EBUSY; goto out1; } addr = ioremap(res->start, remap_size); if (!addr) { ret = -ENOMEM; goto out1; } indio_dev = ad7606_probe(&pdev->dev, irq, addr, platform_get_device_id(pdev)->driver_data, remap_size > 1 ? &ad7606_par16_bops : &ad7606_par8_bops); if (IS_ERR(indio_dev)) { ret = PTR_ERR(indio_dev); goto out2; } platform_set_drvdata(pdev, indio_dev); return 0; out2: iounmap(addr); out1: release_mem_region(res->start, remap_size); return ret; } static int ad7606_par_remove(struct platform_device *pdev) { struct iio_dev *indio_dev = platform_get_drvdata(pdev); struct resource *res; struct ad7606_state *st = iio_priv(indio_dev); ad7606_remove(indio_dev, platform_get_irq(pdev, 0)); iounmap(st->base_address); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, resource_size(res)); platform_set_drvdata(pdev, NULL); return 0; } #ifdef CONFIG_PM static int ad7606_par_suspend(struct device *dev) { struct iio_dev *indio_dev = dev_get_drvdata(dev); ad7606_suspend(indio_dev); return 0; } static int ad7606_par_resume(struct device *dev) { struct iio_dev *indio_dev = dev_get_drvdata(dev); ad7606_resume(indio_dev); return 0; } static const struct dev_pm_ops ad7606_pm_ops = { .suspend = ad7606_par_suspend, .resume = ad7606_par_resume, }; #define AD7606_PAR_PM_OPS (&ad7606_pm_ops) #else #define AD7606_PAR_PM_OPS NULL #endif /* CONFIG_PM */ static struct platform_device_id ad7606_driver_ids[] = { { .name = "ad7606-8", .driver_data = ID_AD7606_8, }, { .name = "ad7606-6", .driver_data = ID_AD7606_6, }, { .name = "ad7606-4", .driver_data = ID_AD7606_4, }, { } }; MODULE_DEVICE_TABLE(platform, ad7606_driver_ids); static struct platform_driver ad7606_driver = { .probe = ad7606_par_probe, .remove = ad7606_par_remove, .id_table = ad7606_driver_ids, .driver = { .name = "ad7606", .owner = THIS_MODULE, .pm = AD7606_PAR_PM_OPS, }, }; module_platform_driver(ad7606_driver); MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); MODULE_DESCRIPTION("Analog Devices AD7606 ADC"); MODULE_LICENSE("GPL v2");
gpl-2.0
dmeadows013/furry-hipster
drivers/hwmon/k8temp.c
3028
9588
/* * k8temp.c - Linux kernel module for hardware monitoring * * Copyright (C) 2006 Rudolf Marek <r.marek@assembler.cz> * * Inspired from the w83785 and amd756 drivers. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301 USA. */ #include <linux/module.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/pci.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> #include <asm/processor.h> #define TEMP_FROM_REG(val) (((((val) >> 16) & 0xff) - 49) * 1000) #define REG_TEMP 0xe4 #define SEL_PLACE 0x40 #define SEL_CORE 0x04 struct k8temp_data { struct device *hwmon_dev; struct mutex update_lock; const char *name; char valid; /* zero until following fields are valid */ unsigned long last_updated; /* in jiffies */ /* registers values */ u8 sensorsp; /* sensor presence bits - SEL_CORE & SEL_PLACE */ u32 temp[2][2]; /* core, place */ u8 swap_core_select; /* meaning of SEL_CORE is inverted */ u32 temp_offset; }; static struct k8temp_data *k8temp_update_device(struct device *dev) { struct k8temp_data *data = dev_get_drvdata(dev); struct pci_dev *pdev = to_pci_dev(dev); u8 tmp; mutex_lock(&data->update_lock); if (!data->valid || time_after(jiffies, data->last_updated + HZ)) { pci_read_config_byte(pdev, REG_TEMP, &tmp); tmp &= ~(SEL_PLACE | SEL_CORE); /* Select sensor 0, core0 */ pci_write_config_byte(pdev, REG_TEMP, tmp); pci_read_config_dword(pdev, REG_TEMP, &data->temp[0][0]); if (data->sensorsp & SEL_PLACE) { tmp |= SEL_PLACE; /* Select sensor 1, core0 */ pci_write_config_byte(pdev, REG_TEMP, tmp); pci_read_config_dword(pdev, REG_TEMP, &data->temp[0][1]); } if (data->sensorsp & SEL_CORE) { tmp &= ~SEL_PLACE; /* Select sensor 0, core1 */ tmp |= SEL_CORE; pci_write_config_byte(pdev, REG_TEMP, tmp); pci_read_config_dword(pdev, REG_TEMP, &data->temp[1][0]); if (data->sensorsp & SEL_PLACE) { tmp |= SEL_PLACE; /* Select sensor 1, core1 */ pci_write_config_byte(pdev, REG_TEMP, tmp); pci_read_config_dword(pdev, REG_TEMP, &data->temp[1][1]); } } data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } /* * Sysfs stuff */ static ssize_t show_name(struct device *dev, struct device_attribute *devattr, char *buf) { struct k8temp_data *data = dev_get_drvdata(dev); return sprintf(buf, "%s\n", data->name); } static ssize_t show_temp(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr); int core = attr->nr; int place = attr->index; int temp; struct k8temp_data *data = k8temp_update_device(dev); if (data->swap_core_select && (data->sensorsp & SEL_CORE)) core = core ? 0 : 1; temp = TEMP_FROM_REG(data->temp[core][place]) + data->temp_offset; return sprintf(buf, "%d\n", temp); } /* core, place */ static SENSOR_DEVICE_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 0); static SENSOR_DEVICE_ATTR_2(temp2_input, S_IRUGO, show_temp, NULL, 0, 1); static SENSOR_DEVICE_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 1, 0); static SENSOR_DEVICE_ATTR_2(temp4_input, S_IRUGO, show_temp, NULL, 1, 1); static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); static const struct pci_device_id k8temp_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, { 0 }, }; MODULE_DEVICE_TABLE(pci, k8temp_ids); static int __devinit is_rev_g_desktop(u8 model) { u32 brandidx; if (model < 0x69) return 0; if (model == 0xc1 || model == 0x6c || model == 0x7c) return 0; /* * Differentiate between AM2 and ASB1. * See "Constructing the processor Name String" in "Revision * Guide for AMD NPT Family 0Fh Processors" (33610). */ brandidx = cpuid_ebx(0x80000001); brandidx = (brandidx >> 9) & 0x1f; /* Single core */ if ((model == 0x6f || model == 0x7f) && (brandidx == 0x7 || brandidx == 0x9 || brandidx == 0xc)) return 0; /* Dual core */ if (model == 0x6b && (brandidx == 0xb || brandidx == 0xc)) return 0; return 1; } static int __devinit k8temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int err; u8 scfg; u32 temp; u8 model, stepping; struct k8temp_data *data; if (!(data = kzalloc(sizeof(struct k8temp_data), GFP_KERNEL))) { err = -ENOMEM; goto exit; } model = boot_cpu_data.x86_model; stepping = boot_cpu_data.x86_mask; /* feature available since SH-C0, exclude older revisions */ if (((model == 4) && (stepping == 0)) || ((model == 5) && (stepping <= 1))) { err = -ENODEV; goto exit_free; } /* * AMD NPT family 0fh, i.e. RevF and RevG: * meaning of SEL_CORE bit is inverted */ if (model >= 0x40) { data->swap_core_select = 1; dev_warn(&pdev->dev, "Temperature readouts might be wrong - " "check erratum #141\n"); } /* * RevG desktop CPUs (i.e. no socket S1G1 or ASB1 parts) need * additional offset, otherwise reported temperature is below * ambient temperature */ if (is_rev_g_desktop(model)) data->temp_offset = 21000; pci_read_config_byte(pdev, REG_TEMP, &scfg); scfg &= ~(SEL_PLACE | SEL_CORE); /* Select sensor 0, core0 */ pci_write_config_byte(pdev, REG_TEMP, scfg); pci_read_config_byte(pdev, REG_TEMP, &scfg); if (scfg & (SEL_PLACE | SEL_CORE)) { dev_err(&pdev->dev, "Configuration bit(s) stuck at 1!\n"); err = -ENODEV; goto exit_free; } scfg |= (SEL_PLACE | SEL_CORE); pci_write_config_byte(pdev, REG_TEMP, scfg); /* now we know if we can change core and/or sensor */ pci_read_config_byte(pdev, REG_TEMP, &data->sensorsp); if (data->sensorsp & SEL_PLACE) { scfg &= ~SEL_CORE; /* Select sensor 1, core0 */ pci_write_config_byte(pdev, REG_TEMP, scfg); pci_read_config_dword(pdev, REG_TEMP, &temp); scfg |= SEL_CORE; /* prepare for next selection */ if (!((temp >> 16) & 0xff)) /* if temp is 0 -49C is not likely */ data->sensorsp &= ~SEL_PLACE; } if (data->sensorsp & SEL_CORE) { scfg &= ~SEL_PLACE; /* Select sensor 0, core1 */ pci_write_config_byte(pdev, REG_TEMP, scfg); pci_read_config_dword(pdev, REG_TEMP, &temp); if (!((temp >> 16) & 0xff)) /* if temp is 0 -49C is not likely */ data->sensorsp &= ~SEL_CORE; } data->name = "k8temp"; mutex_init(&data->update_lock); pci_set_drvdata(pdev, data); /* Register sysfs hooks */ err = device_create_file(&pdev->dev, &sensor_dev_attr_temp1_input.dev_attr); if (err) goto exit_remove; /* sensor can be changed and reports something */ if (data->sensorsp & SEL_PLACE) { err = device_create_file(&pdev->dev, &sensor_dev_attr_temp2_input.dev_attr); if (err) goto exit_remove; } /* core can be changed and reports something */ if (data->sensorsp & SEL_CORE) { err = device_create_file(&pdev->dev, &sensor_dev_attr_temp3_input.dev_attr); if (err) goto exit_remove; if (data->sensorsp & SEL_PLACE) { err = device_create_file(&pdev->dev, &sensor_dev_attr_temp4_input. dev_attr); if (err) goto exit_remove; } } err = device_create_file(&pdev->dev, &dev_attr_name); if (err) goto exit_remove; data->hwmon_dev = hwmon_device_register(&pdev->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); goto exit_remove; } return 0; exit_remove: device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_input.dev_attr); device_remove_file(&pdev->dev, &sensor_dev_attr_temp2_input.dev_attr); device_remove_file(&pdev->dev, &sensor_dev_attr_temp3_input.dev_attr); device_remove_file(&pdev->dev, &sensor_dev_attr_temp4_input.dev_attr); device_remove_file(&pdev->dev, &dev_attr_name); exit_free: pci_set_drvdata(pdev, NULL); kfree(data); exit: return err; } static void __devexit k8temp_remove(struct pci_dev *pdev) { struct k8temp_data *data = pci_get_drvdata(pdev); hwmon_device_unregister(data->hwmon_dev); device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_input.dev_attr); device_remove_file(&pdev->dev, &sensor_dev_attr_temp2_input.dev_attr); device_remove_file(&pdev->dev, &sensor_dev_attr_temp3_input.dev_attr); device_remove_file(&pdev->dev, &sensor_dev_attr_temp4_input.dev_attr); device_remove_file(&pdev->dev, &dev_attr_name); pci_set_drvdata(pdev, NULL); kfree(data); } static struct pci_driver k8temp_driver = { .name = "k8temp", .id_table = k8temp_ids, .probe = k8temp_probe, .remove = __devexit_p(k8temp_remove), }; static int __init k8temp_init(void) { return pci_register_driver(&k8temp_driver); } static void __exit k8temp_exit(void) { pci_unregister_driver(&k8temp_driver); } MODULE_AUTHOR("Rudolf Marek <r.marek@assembler.cz>"); MODULE_DESCRIPTION("AMD K8 core temperature monitor"); MODULE_LICENSE("GPL"); module_init(k8temp_init) module_exit(k8temp_exit)
gpl-2.0
ThePlayground/android_kernel_htc_shooter
drivers/isdn/mISDN/layer2.c
3028
50933
/* * * Author Karsten Keil <kkeil@novell.com> * * Copyright 2008 by Karsten Keil <kkeil@novell.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/mISDNif.h> #include <linux/slab.h> #include "core.h" #include "fsm.h" #include "layer2.h" static u_int *debug; static struct Fsm l2fsm = {NULL, 0, 0, NULL, NULL}; static char *strL2State[] = { "ST_L2_1", "ST_L2_2", "ST_L2_3", "ST_L2_4", "ST_L2_5", "ST_L2_6", "ST_L2_7", "ST_L2_8", }; enum { EV_L2_UI, EV_L2_SABME, EV_L2_DISC, EV_L2_DM, EV_L2_UA, EV_L2_FRMR, EV_L2_SUPER, EV_L2_I, EV_L2_DL_DATA, EV_L2_ACK_PULL, EV_L2_DL_UNITDATA, EV_L2_DL_ESTABLISH_REQ, EV_L2_DL_RELEASE_REQ, EV_L2_MDL_ASSIGN, EV_L2_MDL_REMOVE, EV_L2_MDL_ERROR, EV_L1_DEACTIVATE, EV_L2_T200, EV_L2_T203, EV_L2_SET_OWN_BUSY, EV_L2_CLEAR_OWN_BUSY, EV_L2_FRAME_ERROR, }; #define L2_EVENT_COUNT (EV_L2_FRAME_ERROR+1) static char *strL2Event[] = { "EV_L2_UI", "EV_L2_SABME", "EV_L2_DISC", "EV_L2_DM", "EV_L2_UA", "EV_L2_FRMR", "EV_L2_SUPER", "EV_L2_I", "EV_L2_DL_DATA", "EV_L2_ACK_PULL", "EV_L2_DL_UNITDATA", "EV_L2_DL_ESTABLISH_REQ", "EV_L2_DL_RELEASE_REQ", "EV_L2_MDL_ASSIGN", "EV_L2_MDL_REMOVE", "EV_L2_MDL_ERROR", "EV_L1_DEACTIVATE", "EV_L2_T200", "EV_L2_T203", "EV_L2_SET_OWN_BUSY", "EV_L2_CLEAR_OWN_BUSY", "EV_L2_FRAME_ERROR", }; static void l2m_debug(struct FsmInst *fi, char *fmt, ...) { struct layer2 *l2 = fi->userdata; struct va_format vaf; va_list va; if (!(*debug & DEBUG_L2_FSM)) return; va_start(va, fmt); vaf.fmt = fmt; vaf.va = &va; printk(KERN_DEBUG "l2 (sapi %d tei %d): %pV\n", l2->sapi, l2->tei, &vaf); va_end(va); } inline u_int l2headersize(struct layer2 *l2, int ui) { return ((test_bit(FLG_MOD128, &l2->flag) && (!ui)) ? 2 : 1) + (test_bit(FLG_LAPD, &l2->flag) ? 2 : 1); } inline u_int l2addrsize(struct layer2 *l2) { return test_bit(FLG_LAPD, &l2->flag) ? 2 : 1; } static u_int l2_newid(struct layer2 *l2) { u_int id; id = l2->next_id++; if (id == 0x7fff) l2->next_id = 1; id <<= 16; id |= l2->tei << 8; id |= l2->sapi; return id; } static void l2up(struct layer2 *l2, u_int prim, struct sk_buff *skb) { int err; if (!l2->up) return; mISDN_HEAD_PRIM(skb) = prim; mISDN_HEAD_ID(skb) = (l2->ch.nr << 16) | l2->ch.addr; err = l2->up->send(l2->up, skb); if (err) { printk(KERN_WARNING "%s: err=%d\n", __func__, err); dev_kfree_skb(skb); } } static void l2up_create(struct layer2 *l2, u_int prim, int len, void *arg) { struct sk_buff *skb; struct mISDNhead *hh; int err; if (!l2->up) return; skb = mI_alloc_skb(len, GFP_ATOMIC); if (!skb) return; hh = mISDN_HEAD_P(skb); hh->prim = prim; hh->id = (l2->ch.nr << 16) | l2->ch.addr; if (len) memcpy(skb_put(skb, len), arg, len); err = l2->up->send(l2->up, skb); if (err) { printk(KERN_WARNING "%s: err=%d\n", __func__, err); dev_kfree_skb(skb); } } static int l2down_skb(struct layer2 *l2, struct sk_buff *skb) { int ret; ret = l2->ch.recv(l2->ch.peer, skb); if (ret && (*debug & DEBUG_L2_RECV)) printk(KERN_DEBUG "l2down_skb: ret(%d)\n", ret); return ret; } static int l2down_raw(struct layer2 *l2, struct sk_buff *skb) { struct mISDNhead *hh = mISDN_HEAD_P(skb); if (hh->prim == PH_DATA_REQ) { if (test_and_set_bit(FLG_L1_NOTREADY, &l2->flag)) { skb_queue_tail(&l2->down_queue, skb); return 0; } l2->down_id = mISDN_HEAD_ID(skb); } return l2down_skb(l2, skb); } static int l2down(struct layer2 *l2, u_int prim, u_int id, struct sk_buff *skb) { struct mISDNhead *hh = mISDN_HEAD_P(skb); hh->prim = prim; hh->id = id; return l2down_raw(l2, skb); } static int l2down_create(struct layer2 *l2, u_int prim, u_int id, int len, void *arg) { struct sk_buff *skb; int err; struct mISDNhead *hh; skb = mI_alloc_skb(len, GFP_ATOMIC); if (!skb) return -ENOMEM; hh = mISDN_HEAD_P(skb); hh->prim = prim; hh->id = id; if (len) memcpy(skb_put(skb, len), arg, len); err = l2down_raw(l2, skb); if (err) dev_kfree_skb(skb); return err; } static int ph_data_confirm(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb) { struct sk_buff *nskb = skb; int ret = -EAGAIN; if (test_bit(FLG_L1_NOTREADY, &l2->flag)) { if (hh->id == l2->down_id) { nskb = skb_dequeue(&l2->down_queue); if (nskb) { l2->down_id = mISDN_HEAD_ID(nskb); if (l2down_skb(l2, nskb)) { dev_kfree_skb(nskb); l2->down_id = MISDN_ID_NONE; } } else l2->down_id = MISDN_ID_NONE; if (ret) { dev_kfree_skb(skb); ret = 0; } if (l2->down_id == MISDN_ID_NONE) { test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag); mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL); } } } if (!test_and_set_bit(FLG_L1_NOTREADY, &l2->flag)) { nskb = skb_dequeue(&l2->down_queue); if (nskb) { l2->down_id = mISDN_HEAD_ID(nskb); if (l2down_skb(l2, nskb)) { dev_kfree_skb(nskb); l2->down_id = MISDN_ID_NONE; test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag); } } else test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag); } return ret; } static int l2mgr(struct layer2 *l2, u_int prim, void *arg) { long c = (long)arg; printk(KERN_WARNING "l2mgr: addr:%x prim %x %c\n", l2->id, prim, (char)c); if (test_bit(FLG_LAPD, &l2->flag) && !test_bit(FLG_FIXED_TEI, &l2->flag)) { switch (c) { case 'C': case 'D': case 'G': case 'H': l2_tei(l2, prim, (u_long)arg); break; } } return 0; } static void set_peer_busy(struct layer2 *l2) { test_and_set_bit(FLG_PEER_BUSY, &l2->flag); if (skb_queue_len(&l2->i_queue) || skb_queue_len(&l2->ui_queue)) test_and_set_bit(FLG_L2BLOCK, &l2->flag); } static void clear_peer_busy(struct layer2 *l2) { if (test_and_clear_bit(FLG_PEER_BUSY, &l2->flag)) test_and_clear_bit(FLG_L2BLOCK, &l2->flag); } static void InitWin(struct layer2 *l2) { int i; for (i = 0; i < MAX_WINDOW; i++) l2->windowar[i] = NULL; } static int freewin(struct layer2 *l2) { int i, cnt = 0; for (i = 0; i < MAX_WINDOW; i++) { if (l2->windowar[i]) { cnt++; dev_kfree_skb(l2->windowar[i]); l2->windowar[i] = NULL; } } return cnt; } static void ReleaseWin(struct layer2 *l2) { int cnt = freewin(l2); if (cnt) printk(KERN_WARNING "isdnl2 freed %d skbuffs in release\n", cnt); } inline unsigned int cansend(struct layer2 *l2) { unsigned int p1; if (test_bit(FLG_MOD128, &l2->flag)) p1 = (l2->vs - l2->va) % 128; else p1 = (l2->vs - l2->va) % 8; return (p1 < l2->window) && !test_bit(FLG_PEER_BUSY, &l2->flag); } inline void clear_exception(struct layer2 *l2) { test_and_clear_bit(FLG_ACK_PEND, &l2->flag); test_and_clear_bit(FLG_REJEXC, &l2->flag); test_and_clear_bit(FLG_OWN_BUSY, &l2->flag); clear_peer_busy(l2); } static int sethdraddr(struct layer2 *l2, u_char *header, int rsp) { u_char *ptr = header; int crbit = rsp; if (test_bit(FLG_LAPD, &l2->flag)) { if (test_bit(FLG_LAPD_NET, &l2->flag)) crbit = !crbit; *ptr++ = (l2->sapi << 2) | (crbit ? 2 : 0); *ptr++ = (l2->tei << 1) | 1; return 2; } else { if (test_bit(FLG_ORIG, &l2->flag)) crbit = !crbit; if (crbit) *ptr++ = l2->addr.B; else *ptr++ = l2->addr.A; return 1; } } static inline void enqueue_super(struct layer2 *l2, struct sk_buff *skb) { if (l2down(l2, PH_DATA_REQ, l2_newid(l2), skb)) dev_kfree_skb(skb); } static inline void enqueue_ui(struct layer2 *l2, struct sk_buff *skb) { if (l2->tm) l2_tei(l2, MDL_STATUS_UI_IND, 0); if (l2down(l2, PH_DATA_REQ, l2_newid(l2), skb)) dev_kfree_skb(skb); } inline int IsUI(u_char *data) { return (data[0] & 0xef) == UI; } inline int IsUA(u_char *data) { return (data[0] & 0xef) == UA; } inline int IsDM(u_char *data) { return (data[0] & 0xef) == DM; } inline int IsDISC(u_char *data) { return (data[0] & 0xef) == DISC; } inline int IsRR(u_char *data, struct layer2 *l2) { if (test_bit(FLG_MOD128, &l2->flag)) return data[0] == RR; else return (data[0] & 0xf) == 1; } inline int IsSFrame(u_char *data, struct layer2 *l2) { register u_char d = *data; if (!test_bit(FLG_MOD128, &l2->flag)) d &= 0xf; return ((d & 0xf3) == 1) && ((d & 0x0c) != 0x0c); } inline int IsSABME(u_char *data, struct layer2 *l2) { u_char d = data[0] & ~0x10; return test_bit(FLG_MOD128, &l2->flag) ? d == SABME : d == SABM; } inline int IsREJ(u_char *data, struct layer2 *l2) { return test_bit(FLG_MOD128, &l2->flag) ? data[0] == REJ : (data[0] & 0xf) == REJ; } inline int IsFRMR(u_char *data) { return (data[0] & 0xef) == FRMR; } inline int IsRNR(u_char *data, struct layer2 *l2) { return test_bit(FLG_MOD128, &l2->flag) ? data[0] == RNR : (data[0] & 0xf) == RNR; } static int iframe_error(struct layer2 *l2, struct sk_buff *skb) { u_int i; int rsp = *skb->data & 0x2; i = l2addrsize(l2) + (test_bit(FLG_MOD128, &l2->flag) ? 2 : 1); if (test_bit(FLG_ORIG, &l2->flag)) rsp = !rsp; if (rsp) return 'L'; if (skb->len < i) return 'N'; if ((skb->len - i) > l2->maxlen) return 'O'; return 0; } static int super_error(struct layer2 *l2, struct sk_buff *skb) { if (skb->len != l2addrsize(l2) + (test_bit(FLG_MOD128, &l2->flag) ? 2 : 1)) return 'N'; return 0; } static int unnum_error(struct layer2 *l2, struct sk_buff *skb, int wantrsp) { int rsp = (*skb->data & 0x2) >> 1; if (test_bit(FLG_ORIG, &l2->flag)) rsp = !rsp; if (rsp != wantrsp) return 'L'; if (skb->len != l2addrsize(l2) + 1) return 'N'; return 0; } static int UI_error(struct layer2 *l2, struct sk_buff *skb) { int rsp = *skb->data & 0x2; if (test_bit(FLG_ORIG, &l2->flag)) rsp = !rsp; if (rsp) return 'L'; if (skb->len > l2->maxlen + l2addrsize(l2) + 1) return 'O'; return 0; } static int FRMR_error(struct layer2 *l2, struct sk_buff *skb) { u_int headers = l2addrsize(l2) + 1; u_char *datap = skb->data + headers; int rsp = *skb->data & 0x2; if (test_bit(FLG_ORIG, &l2->flag)) rsp = !rsp; if (!rsp) return 'L'; if (test_bit(FLG_MOD128, &l2->flag)) { if (skb->len < headers + 5) return 'N'; else if (*debug & DEBUG_L2) l2m_debug(&l2->l2m, "FRMR information %2x %2x %2x %2x %2x", datap[0], datap[1], datap[2], datap[3], datap[4]); } else { if (skb->len < headers + 3) return 'N'; else if (*debug & DEBUG_L2) l2m_debug(&l2->l2m, "FRMR information %2x %2x %2x", datap[0], datap[1], datap[2]); } return 0; } static unsigned int legalnr(struct layer2 *l2, unsigned int nr) { if (test_bit(FLG_MOD128, &l2->flag)) return ((nr - l2->va) % 128) <= ((l2->vs - l2->va) % 128); else return ((nr - l2->va) % 8) <= ((l2->vs - l2->va) % 8); } static void setva(struct layer2 *l2, unsigned int nr) { struct sk_buff *skb; while (l2->va != nr) { l2->va++; if (test_bit(FLG_MOD128, &l2->flag)) l2->va %= 128; else l2->va %= 8; if (l2->windowar[l2->sow]) { skb_trim(l2->windowar[l2->sow], 0); skb_queue_tail(&l2->tmp_queue, l2->windowar[l2->sow]); l2->windowar[l2->sow] = NULL; } l2->sow = (l2->sow + 1) % l2->window; } skb = skb_dequeue(&l2->tmp_queue); while (skb) { dev_kfree_skb(skb); skb = skb_dequeue(&l2->tmp_queue); } } static void send_uframe(struct layer2 *l2, struct sk_buff *skb, u_char cmd, u_char cr) { u_char tmp[MAX_L2HEADER_LEN]; int i; i = sethdraddr(l2, tmp, cr); tmp[i++] = cmd; if (skb) skb_trim(skb, 0); else { skb = mI_alloc_skb(i, GFP_ATOMIC); if (!skb) { printk(KERN_WARNING "%s: can't alloc skbuff\n", __func__); return; } } memcpy(skb_put(skb, i), tmp, i); enqueue_super(l2, skb); } inline u_char get_PollFlag(struct layer2 *l2, struct sk_buff *skb) { return skb->data[l2addrsize(l2)] & 0x10; } inline u_char get_PollFlagFree(struct layer2 *l2, struct sk_buff *skb) { u_char PF; PF = get_PollFlag(l2, skb); dev_kfree_skb(skb); return PF; } inline void start_t200(struct layer2 *l2, int i) { mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, i); test_and_set_bit(FLG_T200_RUN, &l2->flag); } inline void restart_t200(struct layer2 *l2, int i) { mISDN_FsmRestartTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, i); test_and_set_bit(FLG_T200_RUN, &l2->flag); } inline void stop_t200(struct layer2 *l2, int i) { if (test_and_clear_bit(FLG_T200_RUN, &l2->flag)) mISDN_FsmDelTimer(&l2->t200, i); } inline void st5_dl_release_l2l3(struct layer2 *l2) { int pr; if (test_and_clear_bit(FLG_PEND_REL, &l2->flag)) pr = DL_RELEASE_CNF; else pr = DL_RELEASE_IND; l2up_create(l2, pr, 0, NULL); } inline void lapb_dl_release_l2l3(struct layer2 *l2, int f) { if (test_bit(FLG_LAPB, &l2->flag)) l2down_create(l2, PH_DEACTIVATE_REQ, l2_newid(l2), 0, NULL); l2up_create(l2, f, 0, NULL); } static void establishlink(struct FsmInst *fi) { struct layer2 *l2 = fi->userdata; u_char cmd; clear_exception(l2); l2->rc = 0; cmd = (test_bit(FLG_MOD128, &l2->flag) ? SABME : SABM) | 0x10; send_uframe(l2, NULL, cmd, CMD); mISDN_FsmDelTimer(&l2->t203, 1); restart_t200(l2, 1); test_and_clear_bit(FLG_PEND_REL, &l2->flag); freewin(l2); mISDN_FsmChangeState(fi, ST_L2_5); } static void l2_mdl_error_ua(struct FsmInst *fi, int event, void *arg) { struct sk_buff *skb = arg; struct layer2 *l2 = fi->userdata; if (get_PollFlagFree(l2, skb)) l2mgr(l2, MDL_ERROR_IND, (void *) 'C'); else l2mgr(l2, MDL_ERROR_IND, (void *) 'D'); } static void l2_mdl_error_dm(struct FsmInst *fi, int event, void *arg) { struct sk_buff *skb = arg; struct layer2 *l2 = fi->userdata; if (get_PollFlagFree(l2, skb)) l2mgr(l2, MDL_ERROR_IND, (void *) 'B'); else { l2mgr(l2, MDL_ERROR_IND, (void *) 'E'); establishlink(fi); test_and_clear_bit(FLG_L3_INIT, &l2->flag); } } static void l2_st8_mdl_error_dm(struct FsmInst *fi, int event, void *arg) { struct sk_buff *skb = arg; struct layer2 *l2 = fi->userdata; if (get_PollFlagFree(l2, skb)) l2mgr(l2, MDL_ERROR_IND, (void *) 'B'); else l2mgr(l2, MDL_ERROR_IND, (void *) 'E'); establishlink(fi); test_and_clear_bit(FLG_L3_INIT, &l2->flag); } static void l2_go_st3(struct FsmInst *fi, int event, void *arg) { dev_kfree_skb((struct sk_buff *)arg); mISDN_FsmChangeState(fi, ST_L2_3); } static void l2_mdl_assign(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; mISDN_FsmChangeState(fi, ST_L2_3); dev_kfree_skb((struct sk_buff *)arg); l2_tei(l2, MDL_ASSIGN_IND, 0); } static void l2_queue_ui_assign(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; skb_queue_tail(&l2->ui_queue, skb); mISDN_FsmChangeState(fi, ST_L2_2); l2_tei(l2, MDL_ASSIGN_IND, 0); } static void l2_queue_ui(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; skb_queue_tail(&l2->ui_queue, skb); } static void tx_ui(struct layer2 *l2) { struct sk_buff *skb; u_char header[MAX_L2HEADER_LEN]; int i; i = sethdraddr(l2, header, CMD); if (test_bit(FLG_LAPD_NET, &l2->flag)) header[1] = 0xff; /* tei 127 */ header[i++] = UI; while ((skb = skb_dequeue(&l2->ui_queue))) { memcpy(skb_push(skb, i), header, i); enqueue_ui(l2, skb); } } static void l2_send_ui(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; skb_queue_tail(&l2->ui_queue, skb); tx_ui(l2); } static void l2_got_ui(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; skb_pull(skb, l2headersize(l2, 1)); /* * in states 1-3 for broadcast */ if (l2->tm) l2_tei(l2, MDL_STATUS_UI_IND, 0); l2up(l2, DL_UNITDATA_IND, skb); } static void l2_establish(struct FsmInst *fi, int event, void *arg) { struct sk_buff *skb = arg; struct layer2 *l2 = fi->userdata; establishlink(fi); test_and_set_bit(FLG_L3_INIT, &l2->flag); dev_kfree_skb(skb); } static void l2_discard_i_setl3(struct FsmInst *fi, int event, void *arg) { struct sk_buff *skb = arg; struct layer2 *l2 = fi->userdata; skb_queue_purge(&l2->i_queue); test_and_set_bit(FLG_L3_INIT, &l2->flag); test_and_clear_bit(FLG_PEND_REL, &l2->flag); dev_kfree_skb(skb); } static void l2_l3_reestablish(struct FsmInst *fi, int event, void *arg) { struct sk_buff *skb = arg; struct layer2 *l2 = fi->userdata; skb_queue_purge(&l2->i_queue); establishlink(fi); test_and_set_bit(FLG_L3_INIT, &l2->flag); dev_kfree_skb(skb); } static void l2_release(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; skb_trim(skb, 0); l2up(l2, DL_RELEASE_CNF, skb); } static void l2_pend_rel(struct FsmInst *fi, int event, void *arg) { struct sk_buff *skb = arg; struct layer2 *l2 = fi->userdata; test_and_set_bit(FLG_PEND_REL, &l2->flag); dev_kfree_skb(skb); } static void l2_disconnect(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; skb_queue_purge(&l2->i_queue); freewin(l2); mISDN_FsmChangeState(fi, ST_L2_6); l2->rc = 0; send_uframe(l2, NULL, DISC | 0x10, CMD); mISDN_FsmDelTimer(&l2->t203, 1); restart_t200(l2, 2); if (skb) dev_kfree_skb(skb); } static void l2_start_multi(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; l2->vs = 0; l2->va = 0; l2->vr = 0; l2->sow = 0; clear_exception(l2); send_uframe(l2, NULL, UA | get_PollFlag(l2, skb), RSP); mISDN_FsmChangeState(fi, ST_L2_7); mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 3); skb_trim(skb, 0); l2up(l2, DL_ESTABLISH_IND, skb); if (l2->tm) l2_tei(l2, MDL_STATUS_UP_IND, 0); } static void l2_send_UA(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP); } static void l2_send_DM(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; send_uframe(l2, skb, DM | get_PollFlag(l2, skb), RSP); } static void l2_restart_multi(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; int est = 0; send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP); l2mgr(l2, MDL_ERROR_IND, (void *) 'F'); if (l2->vs != l2->va) { skb_queue_purge(&l2->i_queue); est = 1; } clear_exception(l2); l2->vs = 0; l2->va = 0; l2->vr = 0; l2->sow = 0; mISDN_FsmChangeState(fi, ST_L2_7); stop_t200(l2, 3); mISDN_FsmRestartTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 3); if (est) l2up_create(l2, DL_ESTABLISH_IND, 0, NULL); /* mISDN_queue_data(&l2->inst, l2->inst.id | MSG_BROADCAST, * MGR_SHORTSTATUS | INDICATION, SSTATUS_L2_ESTABLISHED, * 0, NULL, 0); */ if (skb_queue_len(&l2->i_queue) && cansend(l2)) mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL); } static void l2_stop_multi(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; mISDN_FsmChangeState(fi, ST_L2_4); mISDN_FsmDelTimer(&l2->t203, 3); stop_t200(l2, 4); send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP); skb_queue_purge(&l2->i_queue); freewin(l2); lapb_dl_release_l2l3(l2, DL_RELEASE_IND); if (l2->tm) l2_tei(l2, MDL_STATUS_DOWN_IND, 0); } static void l2_connected(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; int pr = -1; if (!get_PollFlag(l2, skb)) { l2_mdl_error_ua(fi, event, arg); return; } dev_kfree_skb(skb); if (test_and_clear_bit(FLG_PEND_REL, &l2->flag)) l2_disconnect(fi, event, NULL); if (test_and_clear_bit(FLG_L3_INIT, &l2->flag)) { pr = DL_ESTABLISH_CNF; } else if (l2->vs != l2->va) { skb_queue_purge(&l2->i_queue); pr = DL_ESTABLISH_IND; } stop_t200(l2, 5); l2->vr = 0; l2->vs = 0; l2->va = 0; l2->sow = 0; mISDN_FsmChangeState(fi, ST_L2_7); mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 4); if (pr != -1) l2up_create(l2, pr, 0, NULL); if (skb_queue_len(&l2->i_queue) && cansend(l2)) mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL); if (l2->tm) l2_tei(l2, MDL_STATUS_UP_IND, 0); } static void l2_released(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; if (!get_PollFlag(l2, skb)) { l2_mdl_error_ua(fi, event, arg); return; } dev_kfree_skb(skb); stop_t200(l2, 6); lapb_dl_release_l2l3(l2, DL_RELEASE_CNF); mISDN_FsmChangeState(fi, ST_L2_4); if (l2->tm) l2_tei(l2, MDL_STATUS_DOWN_IND, 0); } static void l2_reestablish(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; if (!get_PollFlagFree(l2, skb)) { establishlink(fi); test_and_set_bit(FLG_L3_INIT, &l2->flag); } } static void l2_st5_dm_release(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; if (get_PollFlagFree(l2, skb)) { stop_t200(l2, 7); if (!test_bit(FLG_L3_INIT, &l2->flag)) skb_queue_purge(&l2->i_queue); if (test_bit(FLG_LAPB, &l2->flag)) l2down_create(l2, PH_DEACTIVATE_REQ, l2_newid(l2), 0, NULL); st5_dl_release_l2l3(l2); mISDN_FsmChangeState(fi, ST_L2_4); if (l2->tm) l2_tei(l2, MDL_STATUS_DOWN_IND, 0); } } static void l2_st6_dm_release(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; if (get_PollFlagFree(l2, skb)) { stop_t200(l2, 8); lapb_dl_release_l2l3(l2, DL_RELEASE_CNF); mISDN_FsmChangeState(fi, ST_L2_4); if (l2->tm) l2_tei(l2, MDL_STATUS_DOWN_IND, 0); } } static void enquiry_cr(struct layer2 *l2, u_char typ, u_char cr, u_char pf) { struct sk_buff *skb; u_char tmp[MAX_L2HEADER_LEN]; int i; i = sethdraddr(l2, tmp, cr); if (test_bit(FLG_MOD128, &l2->flag)) { tmp[i++] = typ; tmp[i++] = (l2->vr << 1) | (pf ? 1 : 0); } else tmp[i++] = (l2->vr << 5) | typ | (pf ? 0x10 : 0); skb = mI_alloc_skb(i, GFP_ATOMIC); if (!skb) { printk(KERN_WARNING "isdnl2 can't alloc sbbuff for enquiry_cr\n"); return; } memcpy(skb_put(skb, i), tmp, i); enqueue_super(l2, skb); } inline void enquiry_response(struct layer2 *l2) { if (test_bit(FLG_OWN_BUSY, &l2->flag)) enquiry_cr(l2, RNR, RSP, 1); else enquiry_cr(l2, RR, RSP, 1); test_and_clear_bit(FLG_ACK_PEND, &l2->flag); } inline void transmit_enquiry(struct layer2 *l2) { if (test_bit(FLG_OWN_BUSY, &l2->flag)) enquiry_cr(l2, RNR, CMD, 1); else enquiry_cr(l2, RR, CMD, 1); test_and_clear_bit(FLG_ACK_PEND, &l2->flag); start_t200(l2, 9); } static void nrerrorrecovery(struct FsmInst *fi) { struct layer2 *l2 = fi->userdata; l2mgr(l2, MDL_ERROR_IND, (void *) 'J'); establishlink(fi); test_and_clear_bit(FLG_L3_INIT, &l2->flag); } static void invoke_retransmission(struct layer2 *l2, unsigned int nr) { u_int p1; if (l2->vs != nr) { while (l2->vs != nr) { (l2->vs)--; if (test_bit(FLG_MOD128, &l2->flag)) { l2->vs %= 128; p1 = (l2->vs - l2->va) % 128; } else { l2->vs %= 8; p1 = (l2->vs - l2->va) % 8; } p1 = (p1 + l2->sow) % l2->window; if (l2->windowar[p1]) skb_queue_head(&l2->i_queue, l2->windowar[p1]); else printk(KERN_WARNING "%s: windowar[%d] is NULL\n", __func__, p1); l2->windowar[p1] = NULL; } mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL); } } static void l2_st7_got_super(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; int PollFlag, rsp, typ = RR; unsigned int nr; rsp = *skb->data & 0x2; if (test_bit(FLG_ORIG, &l2->flag)) rsp = !rsp; skb_pull(skb, l2addrsize(l2)); if (IsRNR(skb->data, l2)) { set_peer_busy(l2); typ = RNR; } else clear_peer_busy(l2); if (IsREJ(skb->data, l2)) typ = REJ; if (test_bit(FLG_MOD128, &l2->flag)) { PollFlag = (skb->data[1] & 0x1) == 0x1; nr = skb->data[1] >> 1; } else { PollFlag = (skb->data[0] & 0x10); nr = (skb->data[0] >> 5) & 0x7; } dev_kfree_skb(skb); if (PollFlag) { if (rsp) l2mgr(l2, MDL_ERROR_IND, (void *) 'A'); else enquiry_response(l2); } if (legalnr(l2, nr)) { if (typ == REJ) { setva(l2, nr); invoke_retransmission(l2, nr); stop_t200(l2, 10); if (mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 6)) l2m_debug(&l2->l2m, "Restart T203 ST7 REJ"); } else if ((nr == l2->vs) && (typ == RR)) { setva(l2, nr); stop_t200(l2, 11); mISDN_FsmRestartTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 7); } else if ((l2->va != nr) || (typ == RNR)) { setva(l2, nr); if (typ != RR) mISDN_FsmDelTimer(&l2->t203, 9); restart_t200(l2, 12); } if (skb_queue_len(&l2->i_queue) && (typ == RR)) mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL); } else nrerrorrecovery(fi); } static void l2_feed_i_if_reest(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; if (!test_bit(FLG_L3_INIT, &l2->flag)) skb_queue_tail(&l2->i_queue, skb); else dev_kfree_skb(skb); } static void l2_feed_i_pull(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; skb_queue_tail(&l2->i_queue, skb); mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL); } static void l2_feed_iqueue(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; skb_queue_tail(&l2->i_queue, skb); } static void l2_got_iframe(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; int PollFlag, i; u_int ns, nr; i = l2addrsize(l2); if (test_bit(FLG_MOD128, &l2->flag)) { PollFlag = ((skb->data[i + 1] & 0x1) == 0x1); ns = skb->data[i] >> 1; nr = (skb->data[i + 1] >> 1) & 0x7f; } else { PollFlag = (skb->data[i] & 0x10); ns = (skb->data[i] >> 1) & 0x7; nr = (skb->data[i] >> 5) & 0x7; } if (test_bit(FLG_OWN_BUSY, &l2->flag)) { dev_kfree_skb(skb); if (PollFlag) enquiry_response(l2); } else { if (l2->vr == ns) { l2->vr++; if (test_bit(FLG_MOD128, &l2->flag)) l2->vr %= 128; else l2->vr %= 8; test_and_clear_bit(FLG_REJEXC, &l2->flag); if (PollFlag) enquiry_response(l2); else test_and_set_bit(FLG_ACK_PEND, &l2->flag); skb_pull(skb, l2headersize(l2, 0)); l2up(l2, DL_DATA_IND, skb); } else { /* n(s)!=v(r) */ dev_kfree_skb(skb); if (test_and_set_bit(FLG_REJEXC, &l2->flag)) { if (PollFlag) enquiry_response(l2); } else { enquiry_cr(l2, REJ, RSP, PollFlag); test_and_clear_bit(FLG_ACK_PEND, &l2->flag); } } } if (legalnr(l2, nr)) { if (!test_bit(FLG_PEER_BUSY, &l2->flag) && (fi->state == ST_L2_7)) { if (nr == l2->vs) { stop_t200(l2, 13); mISDN_FsmRestartTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 7); } else if (nr != l2->va) restart_t200(l2, 14); } setva(l2, nr); } else { nrerrorrecovery(fi); return; } if (skb_queue_len(&l2->i_queue) && (fi->state == ST_L2_7)) mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL); if (test_and_clear_bit(FLG_ACK_PEND, &l2->flag)) enquiry_cr(l2, RR, RSP, 0); } static void l2_got_tei(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; u_int info; l2->tei = (signed char)(long)arg; set_channel_address(&l2->ch, l2->sapi, l2->tei); info = DL_INFO_L2_CONNECT; l2up_create(l2, DL_INFORMATION_IND, sizeof(info), &info); if (fi->state == ST_L2_3) { establishlink(fi); test_and_set_bit(FLG_L3_INIT, &l2->flag); } else mISDN_FsmChangeState(fi, ST_L2_4); if (skb_queue_len(&l2->ui_queue)) tx_ui(l2); } static void l2_st5_tout_200(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; if (test_bit(FLG_LAPD, &l2->flag) && test_bit(FLG_DCHAN_BUSY, &l2->flag)) { mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9); } else if (l2->rc == l2->N200) { mISDN_FsmChangeState(fi, ST_L2_4); test_and_clear_bit(FLG_T200_RUN, &l2->flag); skb_queue_purge(&l2->i_queue); l2mgr(l2, MDL_ERROR_IND, (void *) 'G'); if (test_bit(FLG_LAPB, &l2->flag)) l2down_create(l2, PH_DEACTIVATE_REQ, l2_newid(l2), 0, NULL); st5_dl_release_l2l3(l2); if (l2->tm) l2_tei(l2, MDL_STATUS_DOWN_IND, 0); } else { l2->rc++; mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9); send_uframe(l2, NULL, (test_bit(FLG_MOD128, &l2->flag) ? SABME : SABM) | 0x10, CMD); } } static void l2_st6_tout_200(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; if (test_bit(FLG_LAPD, &l2->flag) && test_bit(FLG_DCHAN_BUSY, &l2->flag)) { mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9); } else if (l2->rc == l2->N200) { mISDN_FsmChangeState(fi, ST_L2_4); test_and_clear_bit(FLG_T200_RUN, &l2->flag); l2mgr(l2, MDL_ERROR_IND, (void *) 'H'); lapb_dl_release_l2l3(l2, DL_RELEASE_CNF); if (l2->tm) l2_tei(l2, MDL_STATUS_DOWN_IND, 0); } else { l2->rc++; mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9); send_uframe(l2, NULL, DISC | 0x10, CMD); } } static void l2_st7_tout_200(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; if (test_bit(FLG_LAPD, &l2->flag) && test_bit(FLG_DCHAN_BUSY, &l2->flag)) { mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9); return; } test_and_clear_bit(FLG_T200_RUN, &l2->flag); l2->rc = 0; mISDN_FsmChangeState(fi, ST_L2_8); transmit_enquiry(l2); l2->rc++; } static void l2_st8_tout_200(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; if (test_bit(FLG_LAPD, &l2->flag) && test_bit(FLG_DCHAN_BUSY, &l2->flag)) { mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9); return; } test_and_clear_bit(FLG_T200_RUN, &l2->flag); if (l2->rc == l2->N200) { l2mgr(l2, MDL_ERROR_IND, (void *) 'I'); establishlink(fi); test_and_clear_bit(FLG_L3_INIT, &l2->flag); } else { transmit_enquiry(l2); l2->rc++; } } static void l2_st7_tout_203(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; if (test_bit(FLG_LAPD, &l2->flag) && test_bit(FLG_DCHAN_BUSY, &l2->flag)) { mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 9); return; } mISDN_FsmChangeState(fi, ST_L2_8); transmit_enquiry(l2); l2->rc = 0; } static void l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb, *nskb, *oskb; u_char header[MAX_L2HEADER_LEN]; u_int i, p1; if (!cansend(l2)) return; skb = skb_dequeue(&l2->i_queue); if (!skb) return; if (test_bit(FLG_MOD128, &l2->flag)) p1 = (l2->vs - l2->va) % 128; else p1 = (l2->vs - l2->va) % 8; p1 = (p1 + l2->sow) % l2->window; if (l2->windowar[p1]) { printk(KERN_WARNING "isdnl2 try overwrite ack queue entry %d\n", p1); dev_kfree_skb(l2->windowar[p1]); } l2->windowar[p1] = skb; i = sethdraddr(l2, header, CMD); if (test_bit(FLG_MOD128, &l2->flag)) { header[i++] = l2->vs << 1; header[i++] = l2->vr << 1; l2->vs = (l2->vs + 1) % 128; } else { header[i++] = (l2->vr << 5) | (l2->vs << 1); l2->vs = (l2->vs + 1) % 8; } nskb = skb_clone(skb, GFP_ATOMIC); p1 = skb_headroom(nskb); if (p1 >= i) memcpy(skb_push(nskb, i), header, i); else { printk(KERN_WARNING "isdnl2 pull_iqueue skb header(%d/%d) too short\n", i, p1); oskb = nskb; nskb = mI_alloc_skb(oskb->len + i, GFP_ATOMIC); if (!nskb) { dev_kfree_skb(oskb); printk(KERN_WARNING "%s: no skb mem\n", __func__); return; } memcpy(skb_put(nskb, i), header, i); memcpy(skb_put(nskb, oskb->len), oskb->data, oskb->len); dev_kfree_skb(oskb); } l2down(l2, PH_DATA_REQ, l2_newid(l2), nskb); test_and_clear_bit(FLG_ACK_PEND, &l2->flag); if (!test_and_set_bit(FLG_T200_RUN, &l2->flag)) { mISDN_FsmDelTimer(&l2->t203, 13); mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 11); } } static void l2_st8_got_super(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; int PollFlag, rsp, rnr = 0; unsigned int nr; rsp = *skb->data & 0x2; if (test_bit(FLG_ORIG, &l2->flag)) rsp = !rsp; skb_pull(skb, l2addrsize(l2)); if (IsRNR(skb->data, l2)) { set_peer_busy(l2); rnr = 1; } else clear_peer_busy(l2); if (test_bit(FLG_MOD128, &l2->flag)) { PollFlag = (skb->data[1] & 0x1) == 0x1; nr = skb->data[1] >> 1; } else { PollFlag = (skb->data[0] & 0x10); nr = (skb->data[0] >> 5) & 0x7; } dev_kfree_skb(skb); if (rsp && PollFlag) { if (legalnr(l2, nr)) { if (rnr) { restart_t200(l2, 15); } else { stop_t200(l2, 16); mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 5); setva(l2, nr); } invoke_retransmission(l2, nr); mISDN_FsmChangeState(fi, ST_L2_7); if (skb_queue_len(&l2->i_queue) && cansend(l2)) mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL); } else nrerrorrecovery(fi); } else { if (!rsp && PollFlag) enquiry_response(l2); if (legalnr(l2, nr)) setva(l2, nr); else nrerrorrecovery(fi); } } static void l2_got_FRMR(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; skb_pull(skb, l2addrsize(l2) + 1); if (!(skb->data[0] & 1) || ((skb->data[0] & 3) == 1) || /* I or S */ (IsUA(skb->data) && (fi->state == ST_L2_7))) { l2mgr(l2, MDL_ERROR_IND, (void *) 'K'); establishlink(fi); test_and_clear_bit(FLG_L3_INIT, &l2->flag); } dev_kfree_skb(skb); } static void l2_st24_tei_remove(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; skb_queue_purge(&l2->ui_queue); l2->tei = GROUP_TEI; mISDN_FsmChangeState(fi, ST_L2_1); } static void l2_st3_tei_remove(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; skb_queue_purge(&l2->ui_queue); l2->tei = GROUP_TEI; l2up_create(l2, DL_RELEASE_IND, 0, NULL); mISDN_FsmChangeState(fi, ST_L2_1); } static void l2_st5_tei_remove(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; skb_queue_purge(&l2->i_queue); skb_queue_purge(&l2->ui_queue); freewin(l2); l2->tei = GROUP_TEI; stop_t200(l2, 17); st5_dl_release_l2l3(l2); mISDN_FsmChangeState(fi, ST_L2_1); } static void l2_st6_tei_remove(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; skb_queue_purge(&l2->ui_queue); l2->tei = GROUP_TEI; stop_t200(l2, 18); l2up_create(l2, DL_RELEASE_IND, 0, NULL); mISDN_FsmChangeState(fi, ST_L2_1); } static void l2_tei_remove(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; skb_queue_purge(&l2->i_queue); skb_queue_purge(&l2->ui_queue); freewin(l2); l2->tei = GROUP_TEI; stop_t200(l2, 17); mISDN_FsmDelTimer(&l2->t203, 19); l2up_create(l2, DL_RELEASE_IND, 0, NULL); /* mISDN_queue_data(&l2->inst, l2->inst.id | MSG_BROADCAST, * MGR_SHORTSTATUS_IND, SSTATUS_L2_RELEASED, * 0, NULL, 0); */ mISDN_FsmChangeState(fi, ST_L2_1); } static void l2_st14_persistent_da(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; skb_queue_purge(&l2->i_queue); skb_queue_purge(&l2->ui_queue); if (test_and_clear_bit(FLG_ESTAB_PEND, &l2->flag)) l2up(l2, DL_RELEASE_IND, skb); else dev_kfree_skb(skb); } static void l2_st5_persistent_da(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; skb_queue_purge(&l2->i_queue); skb_queue_purge(&l2->ui_queue); freewin(l2); stop_t200(l2, 19); st5_dl_release_l2l3(l2); mISDN_FsmChangeState(fi, ST_L2_4); if (l2->tm) l2_tei(l2, MDL_STATUS_DOWN_IND, 0); dev_kfree_skb(skb); } static void l2_st6_persistent_da(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; skb_queue_purge(&l2->ui_queue); stop_t200(l2, 20); l2up(l2, DL_RELEASE_CNF, skb); mISDN_FsmChangeState(fi, ST_L2_4); if (l2->tm) l2_tei(l2, MDL_STATUS_DOWN_IND, 0); } static void l2_persistent_da(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; skb_queue_purge(&l2->i_queue); skb_queue_purge(&l2->ui_queue); freewin(l2); stop_t200(l2, 19); mISDN_FsmDelTimer(&l2->t203, 19); l2up(l2, DL_RELEASE_IND, skb); mISDN_FsmChangeState(fi, ST_L2_4); if (l2->tm) l2_tei(l2, MDL_STATUS_DOWN_IND, 0); } static void l2_set_own_busy(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; if (!test_and_set_bit(FLG_OWN_BUSY, &l2->flag)) { enquiry_cr(l2, RNR, RSP, 0); test_and_clear_bit(FLG_ACK_PEND, &l2->flag); } if (skb) dev_kfree_skb(skb); } static void l2_clear_own_busy(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; if (!test_and_clear_bit(FLG_OWN_BUSY, &l2->flag)) { enquiry_cr(l2, RR, RSP, 0); test_and_clear_bit(FLG_ACK_PEND, &l2->flag); } if (skb) dev_kfree_skb(skb); } static void l2_frame_error(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; l2mgr(l2, MDL_ERROR_IND, arg); } static void l2_frame_error_reest(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; l2mgr(l2, MDL_ERROR_IND, arg); establishlink(fi); test_and_clear_bit(FLG_L3_INIT, &l2->flag); } static struct FsmNode L2FnList[] = { {ST_L2_1, EV_L2_DL_ESTABLISH_REQ, l2_mdl_assign}, {ST_L2_2, EV_L2_DL_ESTABLISH_REQ, l2_go_st3}, {ST_L2_4, EV_L2_DL_ESTABLISH_REQ, l2_establish}, {ST_L2_5, EV_L2_DL_ESTABLISH_REQ, l2_discard_i_setl3}, {ST_L2_7, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish}, {ST_L2_8, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish}, {ST_L2_4, EV_L2_DL_RELEASE_REQ, l2_release}, {ST_L2_5, EV_L2_DL_RELEASE_REQ, l2_pend_rel}, {ST_L2_7, EV_L2_DL_RELEASE_REQ, l2_disconnect}, {ST_L2_8, EV_L2_DL_RELEASE_REQ, l2_disconnect}, {ST_L2_5, EV_L2_DL_DATA, l2_feed_i_if_reest}, {ST_L2_7, EV_L2_DL_DATA, l2_feed_i_pull}, {ST_L2_8, EV_L2_DL_DATA, l2_feed_iqueue}, {ST_L2_1, EV_L2_DL_UNITDATA, l2_queue_ui_assign}, {ST_L2_2, EV_L2_DL_UNITDATA, l2_queue_ui}, {ST_L2_3, EV_L2_DL_UNITDATA, l2_queue_ui}, {ST_L2_4, EV_L2_DL_UNITDATA, l2_send_ui}, {ST_L2_5, EV_L2_DL_UNITDATA, l2_send_ui}, {ST_L2_6, EV_L2_DL_UNITDATA, l2_send_ui}, {ST_L2_7, EV_L2_DL_UNITDATA, l2_send_ui}, {ST_L2_8, EV_L2_DL_UNITDATA, l2_send_ui}, {ST_L2_1, EV_L2_MDL_ASSIGN, l2_got_tei}, {ST_L2_2, EV_L2_MDL_ASSIGN, l2_got_tei}, {ST_L2_3, EV_L2_MDL_ASSIGN, l2_got_tei}, {ST_L2_2, EV_L2_MDL_ERROR, l2_st24_tei_remove}, {ST_L2_3, EV_L2_MDL_ERROR, l2_st3_tei_remove}, {ST_L2_4, EV_L2_MDL_REMOVE, l2_st24_tei_remove}, {ST_L2_5, EV_L2_MDL_REMOVE, l2_st5_tei_remove}, {ST_L2_6, EV_L2_MDL_REMOVE, l2_st6_tei_remove}, {ST_L2_7, EV_L2_MDL_REMOVE, l2_tei_remove}, {ST_L2_8, EV_L2_MDL_REMOVE, l2_tei_remove}, {ST_L2_4, EV_L2_SABME, l2_start_multi}, {ST_L2_5, EV_L2_SABME, l2_send_UA}, {ST_L2_6, EV_L2_SABME, l2_send_DM}, {ST_L2_7, EV_L2_SABME, l2_restart_multi}, {ST_L2_8, EV_L2_SABME, l2_restart_multi}, {ST_L2_4, EV_L2_DISC, l2_send_DM}, {ST_L2_5, EV_L2_DISC, l2_send_DM}, {ST_L2_6, EV_L2_DISC, l2_send_UA}, {ST_L2_7, EV_L2_DISC, l2_stop_multi}, {ST_L2_8, EV_L2_DISC, l2_stop_multi}, {ST_L2_4, EV_L2_UA, l2_mdl_error_ua}, {ST_L2_5, EV_L2_UA, l2_connected}, {ST_L2_6, EV_L2_UA, l2_released}, {ST_L2_7, EV_L2_UA, l2_mdl_error_ua}, {ST_L2_8, EV_L2_UA, l2_mdl_error_ua}, {ST_L2_4, EV_L2_DM, l2_reestablish}, {ST_L2_5, EV_L2_DM, l2_st5_dm_release}, {ST_L2_6, EV_L2_DM, l2_st6_dm_release}, {ST_L2_7, EV_L2_DM, l2_mdl_error_dm}, {ST_L2_8, EV_L2_DM, l2_st8_mdl_error_dm}, {ST_L2_1, EV_L2_UI, l2_got_ui}, {ST_L2_2, EV_L2_UI, l2_got_ui}, {ST_L2_3, EV_L2_UI, l2_got_ui}, {ST_L2_4, EV_L2_UI, l2_got_ui}, {ST_L2_5, EV_L2_UI, l2_got_ui}, {ST_L2_6, EV_L2_UI, l2_got_ui}, {ST_L2_7, EV_L2_UI, l2_got_ui}, {ST_L2_8, EV_L2_UI, l2_got_ui}, {ST_L2_7, EV_L2_FRMR, l2_got_FRMR}, {ST_L2_8, EV_L2_FRMR, l2_got_FRMR}, {ST_L2_7, EV_L2_SUPER, l2_st7_got_super}, {ST_L2_8, EV_L2_SUPER, l2_st8_got_super}, {ST_L2_7, EV_L2_I, l2_got_iframe}, {ST_L2_8, EV_L2_I, l2_got_iframe}, {ST_L2_5, EV_L2_T200, l2_st5_tout_200}, {ST_L2_6, EV_L2_T200, l2_st6_tout_200}, {ST_L2_7, EV_L2_T200, l2_st7_tout_200}, {ST_L2_8, EV_L2_T200, l2_st8_tout_200}, {ST_L2_7, EV_L2_T203, l2_st7_tout_203}, {ST_L2_7, EV_L2_ACK_PULL, l2_pull_iqueue}, {ST_L2_7, EV_L2_SET_OWN_BUSY, l2_set_own_busy}, {ST_L2_8, EV_L2_SET_OWN_BUSY, l2_set_own_busy}, {ST_L2_7, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy}, {ST_L2_8, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy}, {ST_L2_4, EV_L2_FRAME_ERROR, l2_frame_error}, {ST_L2_5, EV_L2_FRAME_ERROR, l2_frame_error}, {ST_L2_6, EV_L2_FRAME_ERROR, l2_frame_error}, {ST_L2_7, EV_L2_FRAME_ERROR, l2_frame_error_reest}, {ST_L2_8, EV_L2_FRAME_ERROR, l2_frame_error_reest}, {ST_L2_1, EV_L1_DEACTIVATE, l2_st14_persistent_da}, {ST_L2_2, EV_L1_DEACTIVATE, l2_st24_tei_remove}, {ST_L2_3, EV_L1_DEACTIVATE, l2_st3_tei_remove}, {ST_L2_4, EV_L1_DEACTIVATE, l2_st14_persistent_da}, {ST_L2_5, EV_L1_DEACTIVATE, l2_st5_persistent_da}, {ST_L2_6, EV_L1_DEACTIVATE, l2_st6_persistent_da}, {ST_L2_7, EV_L1_DEACTIVATE, l2_persistent_da}, {ST_L2_8, EV_L1_DEACTIVATE, l2_persistent_da}, }; static int ph_data_indication(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb) { u_char *datap = skb->data; int ret = -EINVAL; int psapi, ptei; u_int l; int c = 0; l = l2addrsize(l2); if (skb->len <= l) { mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *) 'N'); return ret; } if (test_bit(FLG_LAPD, &l2->flag)) { /* Maybe not needed */ psapi = *datap++; ptei = *datap++; if ((psapi & 1) || !(ptei & 1)) { printk(KERN_WARNING "l2 D-channel frame wrong EA0/EA1\n"); return ret; } psapi >>= 2; ptei >>= 1; if (psapi != l2->sapi) { /* not our business */ if (*debug & DEBUG_L2) printk(KERN_DEBUG "%s: sapi %d/%d mismatch\n", __func__, psapi, l2->sapi); dev_kfree_skb(skb); return 0; } if ((ptei != l2->tei) && (ptei != GROUP_TEI)) { /* not our business */ if (*debug & DEBUG_L2) printk(KERN_DEBUG "%s: tei %d/%d mismatch\n", __func__, ptei, l2->tei); dev_kfree_skb(skb); return 0; } } else datap += l; if (!(*datap & 1)) { /* I-Frame */ c = iframe_error(l2, skb); if (!c) ret = mISDN_FsmEvent(&l2->l2m, EV_L2_I, skb); } else if (IsSFrame(datap, l2)) { /* S-Frame */ c = super_error(l2, skb); if (!c) ret = mISDN_FsmEvent(&l2->l2m, EV_L2_SUPER, skb); } else if (IsUI(datap)) { c = UI_error(l2, skb); if (!c) ret = mISDN_FsmEvent(&l2->l2m, EV_L2_UI, skb); } else if (IsSABME(datap, l2)) { c = unnum_error(l2, skb, CMD); if (!c) ret = mISDN_FsmEvent(&l2->l2m, EV_L2_SABME, skb); } else if (IsUA(datap)) { c = unnum_error(l2, skb, RSP); if (!c) ret = mISDN_FsmEvent(&l2->l2m, EV_L2_UA, skb); } else if (IsDISC(datap)) { c = unnum_error(l2, skb, CMD); if (!c) ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DISC, skb); } else if (IsDM(datap)) { c = unnum_error(l2, skb, RSP); if (!c) ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DM, skb); } else if (IsFRMR(datap)) { c = FRMR_error(l2, skb); if (!c) ret = mISDN_FsmEvent(&l2->l2m, EV_L2_FRMR, skb); } else c = 'L'; if (c) { printk(KERN_WARNING "l2 D-channel frame error %c\n", c); mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *)(long)c); } return ret; } static int l2_send(struct mISDNchannel *ch, struct sk_buff *skb) { struct layer2 *l2 = container_of(ch, struct layer2, ch); struct mISDNhead *hh = mISDN_HEAD_P(skb); int ret = -EINVAL; if (*debug & DEBUG_L2_RECV) printk(KERN_DEBUG "%s: prim(%x) id(%x) sapi(%d) tei(%d)\n", __func__, hh->prim, hh->id, l2->sapi, l2->tei); switch (hh->prim) { case PH_DATA_IND: ret = ph_data_indication(l2, hh, skb); break; case PH_DATA_CNF: ret = ph_data_confirm(l2, hh, skb); break; case PH_ACTIVATE_IND: test_and_set_bit(FLG_L1_ACTIV, &l2->flag); l2up_create(l2, MPH_ACTIVATE_IND, 0, NULL); if (test_and_clear_bit(FLG_ESTAB_PEND, &l2->flag)) ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_ESTABLISH_REQ, skb); break; case PH_DEACTIVATE_IND: test_and_clear_bit(FLG_L1_ACTIV, &l2->flag); l2up_create(l2, MPH_DEACTIVATE_IND, 0, NULL); ret = mISDN_FsmEvent(&l2->l2m, EV_L1_DEACTIVATE, skb); break; case MPH_INFORMATION_IND: if (!l2->up) break; ret = l2->up->send(l2->up, skb); break; case DL_DATA_REQ: ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_DATA, skb); break; case DL_UNITDATA_REQ: ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_UNITDATA, skb); break; case DL_ESTABLISH_REQ: if (test_bit(FLG_LAPB, &l2->flag)) test_and_set_bit(FLG_ORIG, &l2->flag); if (test_bit(FLG_L1_ACTIV, &l2->flag)) { if (test_bit(FLG_LAPD, &l2->flag) || test_bit(FLG_ORIG, &l2->flag)) ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_ESTABLISH_REQ, skb); } else { if (test_bit(FLG_LAPD, &l2->flag) || test_bit(FLG_ORIG, &l2->flag)) { test_and_set_bit(FLG_ESTAB_PEND, &l2->flag); } ret = l2down(l2, PH_ACTIVATE_REQ, l2_newid(l2), skb); } break; case DL_RELEASE_REQ: if (test_bit(FLG_LAPB, &l2->flag)) l2down_create(l2, PH_DEACTIVATE_REQ, l2_newid(l2), 0, NULL); ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_RELEASE_REQ, skb); break; default: if (*debug & DEBUG_L2) l2m_debug(&l2->l2m, "l2 unknown pr %04x", hh->prim); } if (ret) { dev_kfree_skb(skb); ret = 0; } return ret; } int tei_l2(struct layer2 *l2, u_int cmd, u_long arg) { int ret = -EINVAL; if (*debug & DEBUG_L2_TEI) printk(KERN_DEBUG "%s: cmd(%x)\n", __func__, cmd); switch (cmd) { case (MDL_ASSIGN_REQ): ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ASSIGN, (void *)arg); break; case (MDL_REMOVE_REQ): ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_REMOVE, NULL); break; case (MDL_ERROR_IND): ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL); break; case (MDL_ERROR_RSP): /* ETS 300-125 5.3.2.1 Test: TC13010 */ printk(KERN_NOTICE "MDL_ERROR|REQ (tei_l2)\n"); ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL); break; } return ret; } static void release_l2(struct layer2 *l2) { mISDN_FsmDelTimer(&l2->t200, 21); mISDN_FsmDelTimer(&l2->t203, 16); skb_queue_purge(&l2->i_queue); skb_queue_purge(&l2->ui_queue); skb_queue_purge(&l2->down_queue); ReleaseWin(l2); if (test_bit(FLG_LAPD, &l2->flag)) { TEIrelease(l2); if (l2->ch.st) l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, CLOSE_CHANNEL, NULL); } kfree(l2); } static int l2_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg) { struct layer2 *l2 = container_of(ch, struct layer2, ch); u_int info; if (*debug & DEBUG_L2_CTRL) printk(KERN_DEBUG "%s:(%x)\n", __func__, cmd); switch (cmd) { case OPEN_CHANNEL: if (test_bit(FLG_LAPD, &l2->flag)) { set_channel_address(&l2->ch, l2->sapi, l2->tei); info = DL_INFO_L2_CONNECT; l2up_create(l2, DL_INFORMATION_IND, sizeof(info), &info); } break; case CLOSE_CHANNEL: if (l2->ch.peer) l2->ch.peer->ctrl(l2->ch.peer, CLOSE_CHANNEL, NULL); release_l2(l2); break; } return 0; } struct layer2 * create_l2(struct mISDNchannel *ch, u_int protocol, u_long options, int tei, int sapi) { struct layer2 *l2; struct channel_req rq; l2 = kzalloc(sizeof(struct layer2), GFP_KERNEL); if (!l2) { printk(KERN_ERR "kzalloc layer2 failed\n"); return NULL; } l2->next_id = 1; l2->down_id = MISDN_ID_NONE; l2->up = ch; l2->ch.st = ch->st; l2->ch.send = l2_send; l2->ch.ctrl = l2_ctrl; switch (protocol) { case ISDN_P_LAPD_NT: test_and_set_bit(FLG_LAPD, &l2->flag); test_and_set_bit(FLG_LAPD_NET, &l2->flag); test_and_set_bit(FLG_MOD128, &l2->flag); l2->sapi = sapi; l2->maxlen = MAX_DFRAME_LEN; if (test_bit(OPTION_L2_PMX, &options)) l2->window = 7; else l2->window = 1; if (test_bit(OPTION_L2_PTP, &options)) test_and_set_bit(FLG_PTP, &l2->flag); if (test_bit(OPTION_L2_FIXEDTEI, &options)) test_and_set_bit(FLG_FIXED_TEI, &l2->flag); l2->tei = tei; l2->T200 = 1000; l2->N200 = 3; l2->T203 = 10000; if (test_bit(OPTION_L2_PMX, &options)) rq.protocol = ISDN_P_NT_E1; else rq.protocol = ISDN_P_NT_S0; rq.adr.channel = 0; l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, OPEN_CHANNEL, &rq); break; case ISDN_P_LAPD_TE: test_and_set_bit(FLG_LAPD, &l2->flag); test_and_set_bit(FLG_MOD128, &l2->flag); test_and_set_bit(FLG_ORIG, &l2->flag); l2->sapi = sapi; l2->maxlen = MAX_DFRAME_LEN; if (test_bit(OPTION_L2_PMX, &options)) l2->window = 7; else l2->window = 1; if (test_bit(OPTION_L2_PTP, &options)) test_and_set_bit(FLG_PTP, &l2->flag); if (test_bit(OPTION_L2_FIXEDTEI, &options)) test_and_set_bit(FLG_FIXED_TEI, &l2->flag); l2->tei = tei; l2->T200 = 1000; l2->N200 = 3; l2->T203 = 10000; if (test_bit(OPTION_L2_PMX, &options)) rq.protocol = ISDN_P_TE_E1; else rq.protocol = ISDN_P_TE_S0; rq.adr.channel = 0; l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, OPEN_CHANNEL, &rq); break; case ISDN_P_B_X75SLP: test_and_set_bit(FLG_LAPB, &l2->flag); l2->window = 7; l2->maxlen = MAX_DATA_SIZE; l2->T200 = 1000; l2->N200 = 4; l2->T203 = 5000; l2->addr.A = 3; l2->addr.B = 1; break; default: printk(KERN_ERR "layer2 create failed prt %x\n", protocol); kfree(l2); return NULL; } skb_queue_head_init(&l2->i_queue); skb_queue_head_init(&l2->ui_queue); skb_queue_head_init(&l2->down_queue); skb_queue_head_init(&l2->tmp_queue); InitWin(l2); l2->l2m.fsm = &l2fsm; if (test_bit(FLG_LAPB, &l2->flag) || test_bit(FLG_PTP, &l2->flag) || test_bit(FLG_LAPD_NET, &l2->flag)) l2->l2m.state = ST_L2_4; else l2->l2m.state = ST_L2_1; l2->l2m.debug = *debug; l2->l2m.userdata = l2; l2->l2m.userint = 0; l2->l2m.printdebug = l2m_debug; mISDN_FsmInitTimer(&l2->l2m, &l2->t200); mISDN_FsmInitTimer(&l2->l2m, &l2->t203); return l2; } static int x75create(struct channel_req *crq) { struct layer2 *l2; if (crq->protocol != ISDN_P_B_X75SLP) return -EPROTONOSUPPORT; l2 = create_l2(crq->ch, crq->protocol, 0, 0, 0); if (!l2) return -ENOMEM; crq->ch = &l2->ch; crq->protocol = ISDN_P_B_HDLC; return 0; } static struct Bprotocol X75SLP = { .Bprotocols = (1 << (ISDN_P_B_X75SLP & ISDN_P_B_MASK)), .name = "X75SLP", .create = x75create }; int Isdnl2_Init(u_int *deb) { debug = deb; mISDN_register_Bprotocol(&X75SLP); l2fsm.state_count = L2_STATE_COUNT; l2fsm.event_count = L2_EVENT_COUNT; l2fsm.strEvent = strL2Event; l2fsm.strState = strL2State; mISDN_FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList)); TEIInit(deb); return 0; } void Isdnl2_cleanup(void) { mISDN_unregister_Bprotocol(&X75SLP); TEIFree(); mISDN_FsmFree(&l2fsm); }
gpl-2.0
hallovveen31/ICED_COLD_Hercules_JB_Kernel
net/llc/llc_output.c
4052
2424
/* * llc_output.c - LLC minimal output path * * Copyright (c) 1997 by Procom Technology, Inc. * 2001-2003 by Arnaldo Carvalho de Melo <acme@conectiva.com.br> * * This program can be redistributed or modified under the terms of the * GNU General Public License version 2 as published by the Free Software * Foundation. * This program is distributed without any warranty or implied warranty * of merchantability or fitness for a particular purpose. * * See the GNU General Public License version 2 for more details. */ #include <linux/if_arp.h> #include <linux/if_tr.h> #include <linux/netdevice.h> #include <linux/trdevice.h> #include <linux/skbuff.h> #include <net/llc.h> #include <net/llc_pdu.h> /** * llc_mac_hdr_init - fills MAC header fields * @skb: Address of the frame to initialize its MAC header * @sa: The MAC source address * @da: The MAC destination address * * Fills MAC header fields, depending on MAC type. Returns 0, If MAC type * is a valid type and initialization completes correctly 1, otherwise. */ int llc_mac_hdr_init(struct sk_buff *skb, const unsigned char *sa, const unsigned char *da) { int rc = -EINVAL; switch (skb->dev->type) { case ARPHRD_IEEE802_TR: case ARPHRD_ETHER: case ARPHRD_LOOPBACK: rc = dev_hard_header(skb, skb->dev, ETH_P_802_2, da, sa, skb->len); if (rc > 0) rc = 0; break; default: WARN(1, "device type not supported: %d\n", skb->dev->type); } return rc; } /** * llc_build_and_send_ui_pkt - unitdata request interface for upper layers * @sap: sap to use * @skb: packet to send * @dmac: destination mac address * @dsap: destination sap * * Upper layers calls this function when upper layer wants to send data * using connection-less mode communication (UI pdu). * * Accept data frame from network layer to be sent using connection- * less mode communication; timeout/retries handled by network layer; * package primitive as an event and send to SAP event handler */ int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb, unsigned char *dmac, unsigned char dsap) { int rc; llc_pdu_header_init(skb, LLC_PDU_TYPE_U, sap->laddr.lsap, dsap, LLC_PDU_CMD); llc_pdu_init_as_ui_cmd(skb); rc = llc_mac_hdr_init(skb, skb->dev->dev_addr, dmac); if (likely(!rc)) rc = dev_queue_xmit(skb); return rc; } EXPORT_SYMBOL(llc_mac_hdr_init); EXPORT_SYMBOL(llc_build_and_send_ui_pkt);
gpl-2.0
Split-Screen/android_kernel_huawei_msm8928
kernel/debug/kdb/kdb_main.c
4308
71380
/* * Kernel Debugger Architecture Independent Main Code * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1999-2004 Silicon Graphics, Inc. All Rights Reserved. * Copyright (C) 2000 Stephane Eranian <eranian@hpl.hp.com> * Xscale (R) modifications copyright (C) 2003 Intel Corporation. * Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved. */ #include <linux/ctype.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/reboot.h> #include <linux/sched.h> #include <linux/sysrq.h> #include <linux/smp.h> #include <linux/utsname.h> #include <linux/vmalloc.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/kallsyms.h> #include <linux/kgdb.h> #include <linux/kdb.h> #include <linux/notifier.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/nmi.h> #include <linux/time.h> #include <linux/ptrace.h> #include <linux/sysctl.h> #include <linux/cpu.h> #include <linux/kdebug.h> #include <linux/proc_fs.h> #include <linux/uaccess.h> #include <linux/slab.h> #include "kdb_private.h" #define GREP_LEN 256 char kdb_grep_string[GREP_LEN]; int kdb_grepping_flag; EXPORT_SYMBOL(kdb_grepping_flag); int kdb_grep_leading; int kdb_grep_trailing; /* * Kernel debugger state flags */ int kdb_flags; atomic_t kdb_event; /* * kdb_lock protects updates to kdb_initial_cpu. Used to * single thread processors through the kernel debugger. */ int kdb_initial_cpu = -1; /* cpu number that owns kdb */ int kdb_nextline = 1; int kdb_state; /* General KDB state */ struct task_struct *kdb_current_task; EXPORT_SYMBOL(kdb_current_task); struct pt_regs *kdb_current_regs; const char *kdb_diemsg; static int kdb_go_count; #ifdef CONFIG_KDB_CONTINUE_CATASTROPHIC static unsigned int kdb_continue_catastrophic = CONFIG_KDB_CONTINUE_CATASTROPHIC; #else static unsigned int kdb_continue_catastrophic; #endif /* kdb_commands describes the available commands. */ static kdbtab_t *kdb_commands; #define KDB_BASE_CMD_MAX 50 static int kdb_max_commands = KDB_BASE_CMD_MAX; static kdbtab_t kdb_base_commands[KDB_BASE_CMD_MAX]; #define for_each_kdbcmd(cmd, num) \ for ((cmd) = kdb_base_commands, (num) = 0; \ num < kdb_max_commands; \ num++, num == KDB_BASE_CMD_MAX ? cmd = kdb_commands : cmd++) typedef struct _kdbmsg { int km_diag; /* kdb diagnostic */ char *km_msg; /* Corresponding message text */ } kdbmsg_t; #define KDBMSG(msgnum, text) \ { KDB_##msgnum, text } static kdbmsg_t kdbmsgs[] = { KDBMSG(NOTFOUND, "Command Not Found"), KDBMSG(ARGCOUNT, "Improper argument count, see usage."), KDBMSG(BADWIDTH, "Illegal value for BYTESPERWORD use 1, 2, 4 or 8, " "8 is only allowed on 64 bit systems"), KDBMSG(BADRADIX, "Illegal value for RADIX use 8, 10 or 16"), KDBMSG(NOTENV, "Cannot find environment variable"), KDBMSG(NOENVVALUE, "Environment variable should have value"), KDBMSG(NOTIMP, "Command not implemented"), KDBMSG(ENVFULL, "Environment full"), KDBMSG(ENVBUFFULL, "Environment buffer full"), KDBMSG(TOOMANYBPT, "Too many breakpoints defined"), #ifdef CONFIG_CPU_XSCALE KDBMSG(TOOMANYDBREGS, "More breakpoints than ibcr registers defined"), #else KDBMSG(TOOMANYDBREGS, "More breakpoints than db registers defined"), #endif KDBMSG(DUPBPT, "Duplicate breakpoint address"), KDBMSG(BPTNOTFOUND, "Breakpoint not found"), KDBMSG(BADMODE, "Invalid IDMODE"), KDBMSG(BADINT, "Illegal numeric value"), KDBMSG(INVADDRFMT, "Invalid symbolic address format"), KDBMSG(BADREG, "Invalid register name"), KDBMSG(BADCPUNUM, "Invalid cpu number"), KDBMSG(BADLENGTH, "Invalid length field"), KDBMSG(NOBP, "No Breakpoint exists"), KDBMSG(BADADDR, "Invalid address"), }; #undef KDBMSG static const int __nkdb_err = sizeof(kdbmsgs) / sizeof(kdbmsg_t); /* * Initial environment. This is all kept static and local to * this file. We don't want to rely on the memory allocation * mechanisms in the kernel, so we use a very limited allocate-only * heap for new and altered environment variables. The entire * environment is limited to a fixed number of entries (add more * to __env[] if required) and a fixed amount of heap (add more to * KDB_ENVBUFSIZE if required). */ static char *__env[] = { #if defined(CONFIG_SMP) "PROMPT=[%d]kdb> ", "MOREPROMPT=[%d]more> ", #else "PROMPT=kdb> ", "MOREPROMPT=more> ", #endif "RADIX=16", "MDCOUNT=8", /* lines of md output */ KDB_PLATFORM_ENV, "DTABCOUNT=30", "NOSECT=1", (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, }; static const int __nenv = (sizeof(__env) / sizeof(char *)); struct task_struct *kdb_curr_task(int cpu) { struct task_struct *p = curr_task(cpu); #ifdef _TIF_MCA_INIT if ((task_thread_info(p)->flags & _TIF_MCA_INIT) && KDB_TSK(cpu)) p = krp->p; #endif return p; } /* * kdbgetenv - This function will return the character string value of * an environment variable. * Parameters: * match A character string representing an environment variable. * Returns: * NULL No environment variable matches 'match' * char* Pointer to string value of environment variable. */ char *kdbgetenv(const char *match) { char **ep = __env; int matchlen = strlen(match); int i; for (i = 0; i < __nenv; i++) { char *e = *ep++; if (!e) continue; if ((strncmp(match, e, matchlen) == 0) && ((e[matchlen] == '\0') || (e[matchlen] == '='))) { char *cp = strchr(e, '='); return cp ? ++cp : ""; } } return NULL; } /* * kdballocenv - This function is used to allocate bytes for * environment entries. * Parameters: * match A character string representing a numeric value * Outputs: * *value the unsigned long representation of the env variable 'match' * Returns: * Zero on success, a kdb diagnostic on failure. * Remarks: * We use a static environment buffer (envbuffer) to hold the values * of dynamically generated environment variables (see kdb_set). Buffer * space once allocated is never free'd, so over time, the amount of space * (currently 512 bytes) will be exhausted if env variables are changed * frequently. */ static char *kdballocenv(size_t bytes) { #define KDB_ENVBUFSIZE 512 static char envbuffer[KDB_ENVBUFSIZE]; static int envbufsize; char *ep = NULL; if ((KDB_ENVBUFSIZE - envbufsize) >= bytes) { ep = &envbuffer[envbufsize]; envbufsize += bytes; } return ep; } /* * kdbgetulenv - This function will return the value of an unsigned * long-valued environment variable. * Parameters: * match A character string representing a numeric value * Outputs: * *value the unsigned long represntation of the env variable 'match' * Returns: * Zero on success, a kdb diagnostic on failure. */ static int kdbgetulenv(const char *match, unsigned long *value) { char *ep; ep = kdbgetenv(match); if (!ep) return KDB_NOTENV; if (strlen(ep) == 0) return KDB_NOENVVALUE; *value = simple_strtoul(ep, NULL, 0); return 0; } /* * kdbgetintenv - This function will return the value of an * integer-valued environment variable. * Parameters: * match A character string representing an integer-valued env variable * Outputs: * *value the integer representation of the environment variable 'match' * Returns: * Zero on success, a kdb diagnostic on failure. */ int kdbgetintenv(const char *match, int *value) { unsigned long val; int diag; diag = kdbgetulenv(match, &val); if (!diag) *value = (int) val; return diag; } /* * kdbgetularg - This function will convert a numeric string into an * unsigned long value. * Parameters: * arg A character string representing a numeric value * Outputs: * *value the unsigned long represntation of arg. * Returns: * Zero on success, a kdb diagnostic on failure. */ int kdbgetularg(const char *arg, unsigned long *value) { char *endp; unsigned long val; val = simple_strtoul(arg, &endp, 0); if (endp == arg) { /* * Also try base 16, for us folks too lazy to type the * leading 0x... */ val = simple_strtoul(arg, &endp, 16); if (endp == arg) return KDB_BADINT; } *value = val; return 0; } int kdbgetu64arg(const char *arg, u64 *value) { char *endp; u64 val; val = simple_strtoull(arg, &endp, 0); if (endp == arg) { val = simple_strtoull(arg, &endp, 16); if (endp == arg) return KDB_BADINT; } *value = val; return 0; } /* * kdb_set - This function implements the 'set' command. Alter an * existing environment variable or create a new one. */ int kdb_set(int argc, const char **argv) { int i; char *ep; size_t varlen, vallen; /* * we can be invoked two ways: * set var=value argv[1]="var", argv[2]="value" * set var = value argv[1]="var", argv[2]="=", argv[3]="value" * - if the latter, shift 'em down. */ if (argc == 3) { argv[2] = argv[3]; argc--; } if (argc != 2) return KDB_ARGCOUNT; /* * Check for internal variables */ if (strcmp(argv[1], "KDBDEBUG") == 0) { unsigned int debugflags; char *cp; debugflags = simple_strtoul(argv[2], &cp, 0); if (cp == argv[2] || debugflags & ~KDB_DEBUG_FLAG_MASK) { kdb_printf("kdb: illegal debug flags '%s'\n", argv[2]); return 0; } kdb_flags = (kdb_flags & ~(KDB_DEBUG_FLAG_MASK << KDB_DEBUG_FLAG_SHIFT)) | (debugflags << KDB_DEBUG_FLAG_SHIFT); return 0; } /* * Tokenizer squashed the '=' sign. argv[1] is variable * name, argv[2] = value. */ varlen = strlen(argv[1]); vallen = strlen(argv[2]); ep = kdballocenv(varlen + vallen + 2); if (ep == (char *)0) return KDB_ENVBUFFULL; sprintf(ep, "%s=%s", argv[1], argv[2]); ep[varlen+vallen+1] = '\0'; for (i = 0; i < __nenv; i++) { if (__env[i] && ((strncmp(__env[i], argv[1], varlen) == 0) && ((__env[i][varlen] == '\0') || (__env[i][varlen] == '=')))) { __env[i] = ep; return 0; } } /* * Wasn't existing variable. Fit into slot. */ for (i = 0; i < __nenv-1; i++) { if (__env[i] == (char *)0) { __env[i] = ep; return 0; } } return KDB_ENVFULL; } static int kdb_check_regs(void) { if (!kdb_current_regs) { kdb_printf("No current kdb registers." " You may need to select another task\n"); return KDB_BADREG; } return 0; } /* * kdbgetaddrarg - This function is responsible for parsing an * address-expression and returning the value of the expression, * symbol name, and offset to the caller. * * The argument may consist of a numeric value (decimal or * hexidecimal), a symbol name, a register name (preceded by the * percent sign), an environment variable with a numeric value * (preceded by a dollar sign) or a simple arithmetic expression * consisting of a symbol name, +/-, and a numeric constant value * (offset). * Parameters: * argc - count of arguments in argv * argv - argument vector * *nextarg - index to next unparsed argument in argv[] * regs - Register state at time of KDB entry * Outputs: * *value - receives the value of the address-expression * *offset - receives the offset specified, if any * *name - receives the symbol name, if any * *nextarg - index to next unparsed argument in argv[] * Returns: * zero is returned on success, a kdb diagnostic code is * returned on error. */ int kdbgetaddrarg(int argc, const char **argv, int *nextarg, unsigned long *value, long *offset, char **name) { unsigned long addr; unsigned long off = 0; int positive; int diag; int found = 0; char *symname; char symbol = '\0'; char *cp; kdb_symtab_t symtab; /* * Process arguments which follow the following syntax: * * symbol | numeric-address [+/- numeric-offset] * %register * $environment-variable */ if (*nextarg > argc) return KDB_ARGCOUNT; symname = (char *)argv[*nextarg]; /* * If there is no whitespace between the symbol * or address and the '+' or '-' symbols, we * remember the character and replace it with a * null so the symbol/value can be properly parsed */ cp = strpbrk(symname, "+-"); if (cp != NULL) { symbol = *cp; *cp++ = '\0'; } if (symname[0] == '$') { diag = kdbgetulenv(&symname[1], &addr); if (diag) return diag; } else if (symname[0] == '%') { diag = kdb_check_regs(); if (diag) return diag; /* Implement register values with % at a later time as it is * arch optional. */ return KDB_NOTIMP; } else { found = kdbgetsymval(symname, &symtab); if (found) { addr = symtab.sym_start; } else { diag = kdbgetularg(argv[*nextarg], &addr); if (diag) return diag; } } if (!found) found = kdbnearsym(addr, &symtab); (*nextarg)++; if (name) *name = symname; if (value) *value = addr; if (offset && name && *name) *offset = addr - symtab.sym_start; if ((*nextarg > argc) && (symbol == '\0')) return 0; /* * check for +/- and offset */ if (symbol == '\0') { if ((argv[*nextarg][0] != '+') && (argv[*nextarg][0] != '-')) { /* * Not our argument. Return. */ return 0; } else { positive = (argv[*nextarg][0] == '+'); (*nextarg)++; } } else positive = (symbol == '+'); /* * Now there must be an offset! */ if ((*nextarg > argc) && (symbol == '\0')) { return KDB_INVADDRFMT; } if (!symbol) { cp = (char *)argv[*nextarg]; (*nextarg)++; } diag = kdbgetularg(cp, &off); if (diag) return diag; if (!positive) off = -off; if (offset) *offset += off; if (value) *value += off; return 0; } static void kdb_cmderror(int diag) { int i; if (diag >= 0) { kdb_printf("no error detected (diagnostic is %d)\n", diag); return; } for (i = 0; i < __nkdb_err; i++) { if (kdbmsgs[i].km_diag == diag) { kdb_printf("diag: %d: %s\n", diag, kdbmsgs[i].km_msg); return; } } kdb_printf("Unknown diag %d\n", -diag); } /* * kdb_defcmd, kdb_defcmd2 - This function implements the 'defcmd' * command which defines one command as a set of other commands, * terminated by endefcmd. kdb_defcmd processes the initial * 'defcmd' command, kdb_defcmd2 is invoked from kdb_parse for * the following commands until 'endefcmd'. * Inputs: * argc argument count * argv argument vector * Returns: * zero for success, a kdb diagnostic if error */ struct defcmd_set { int count; int usable; char *name; char *usage; char *help; char **command; }; static struct defcmd_set *defcmd_set; static int defcmd_set_count; static int defcmd_in_progress; /* Forward references */ static int kdb_exec_defcmd(int argc, const char **argv); static int kdb_defcmd2(const char *cmdstr, const char *argv0) { struct defcmd_set *s = defcmd_set + defcmd_set_count - 1; char **save_command = s->command; if (strcmp(argv0, "endefcmd") == 0) { defcmd_in_progress = 0; if (!s->count) s->usable = 0; if (s->usable) kdb_register(s->name, kdb_exec_defcmd, s->usage, s->help, 0); return 0; } if (!s->usable) return KDB_NOTIMP; s->command = kzalloc((s->count + 1) * sizeof(*(s->command)), GFP_KDB); if (!s->command) { kdb_printf("Could not allocate new kdb_defcmd table for %s\n", cmdstr); s->usable = 0; return KDB_NOTIMP; } memcpy(s->command, save_command, s->count * sizeof(*(s->command))); s->command[s->count++] = kdb_strdup(cmdstr, GFP_KDB); kfree(save_command); return 0; } static int kdb_defcmd(int argc, const char **argv) { struct defcmd_set *save_defcmd_set = defcmd_set, *s; if (defcmd_in_progress) { kdb_printf("kdb: nested defcmd detected, assuming missing " "endefcmd\n"); kdb_defcmd2("endefcmd", "endefcmd"); } if (argc == 0) { int i; for (s = defcmd_set; s < defcmd_set + defcmd_set_count; ++s) { kdb_printf("defcmd %s \"%s\" \"%s\"\n", s->name, s->usage, s->help); for (i = 0; i < s->count; ++i) kdb_printf("%s", s->command[i]); kdb_printf("endefcmd\n"); } return 0; } if (argc != 3) return KDB_ARGCOUNT; defcmd_set = kmalloc((defcmd_set_count + 1) * sizeof(*defcmd_set), GFP_KDB); if (!defcmd_set) { kdb_printf("Could not allocate new defcmd_set entry for %s\n", argv[1]); defcmd_set = save_defcmd_set; return KDB_NOTIMP; } memcpy(defcmd_set, save_defcmd_set, defcmd_set_count * sizeof(*defcmd_set)); kfree(save_defcmd_set); s = defcmd_set + defcmd_set_count; memset(s, 0, sizeof(*s)); s->usable = 1; s->name = kdb_strdup(argv[1], GFP_KDB); s->usage = kdb_strdup(argv[2], GFP_KDB); s->help = kdb_strdup(argv[3], GFP_KDB); if (s->usage[0] == '"') { strcpy(s->usage, s->usage+1); s->usage[strlen(s->usage)-1] = '\0'; } if (s->help[0] == '"') { strcpy(s->help, s->help+1); s->help[strlen(s->help)-1] = '\0'; } ++defcmd_set_count; defcmd_in_progress = 1; return 0; } /* * kdb_exec_defcmd - Execute the set of commands associated with this * defcmd name. * Inputs: * argc argument count * argv argument vector * Returns: * zero for success, a kdb diagnostic if error */ static int kdb_exec_defcmd(int argc, const char **argv) { int i, ret; struct defcmd_set *s; if (argc != 0) return KDB_ARGCOUNT; for (s = defcmd_set, i = 0; i < defcmd_set_count; ++i, ++s) { if (strcmp(s->name, argv[0]) == 0) break; } if (i == defcmd_set_count) { kdb_printf("kdb_exec_defcmd: could not find commands for %s\n", argv[0]); return KDB_NOTIMP; } for (i = 0; i < s->count; ++i) { /* Recursive use of kdb_parse, do not use argv after * this point */ argv = NULL; kdb_printf("[%s]kdb> %s\n", s->name, s->command[i]); ret = kdb_parse(s->command[i]); if (ret) return ret; } return 0; } /* Command history */ #define KDB_CMD_HISTORY_COUNT 32 #define CMD_BUFLEN 200 /* kdb_printf: max printline * size == 256 */ static unsigned int cmd_head, cmd_tail; static unsigned int cmdptr; static char cmd_hist[KDB_CMD_HISTORY_COUNT][CMD_BUFLEN]; static char cmd_cur[CMD_BUFLEN]; /* * The "str" argument may point to something like | grep xyz */ static void parse_grep(const char *str) { int len; char *cp = (char *)str, *cp2; /* sanity check: we should have been called with the \ first */ if (*cp != '|') return; cp++; while (isspace(*cp)) cp++; if (strncmp(cp, "grep ", 5)) { kdb_printf("invalid 'pipe', see grephelp\n"); return; } cp += 5; while (isspace(*cp)) cp++; cp2 = strchr(cp, '\n'); if (cp2) *cp2 = '\0'; /* remove the trailing newline */ len = strlen(cp); if (len == 0) { kdb_printf("invalid 'pipe', see grephelp\n"); return; } /* now cp points to a nonzero length search string */ if (*cp == '"') { /* allow it be "x y z" by removing the "'s - there must be two of them */ cp++; cp2 = strchr(cp, '"'); if (!cp2) { kdb_printf("invalid quoted string, see grephelp\n"); return; } *cp2 = '\0'; /* end the string where the 2nd " was */ } kdb_grep_leading = 0; if (*cp == '^') { kdb_grep_leading = 1; cp++; } len = strlen(cp); kdb_grep_trailing = 0; if (*(cp+len-1) == '$') { kdb_grep_trailing = 1; *(cp+len-1) = '\0'; } len = strlen(cp); if (!len) return; if (len >= GREP_LEN) { kdb_printf("search string too long\n"); return; } strcpy(kdb_grep_string, cp); kdb_grepping_flag++; return; } /* * kdb_parse - Parse the command line, search the command table for a * matching command and invoke the command function. This * function may be called recursively, if it is, the second call * will overwrite argv and cbuf. It is the caller's * responsibility to save their argv if they recursively call * kdb_parse(). * Parameters: * cmdstr The input command line to be parsed. * regs The registers at the time kdb was entered. * Returns: * Zero for success, a kdb diagnostic if failure. * Remarks: * Limited to 20 tokens. * * Real rudimentary tokenization. Basically only whitespace * is considered a token delimeter (but special consideration * is taken of the '=' sign as used by the 'set' command). * * The algorithm used to tokenize the input string relies on * there being at least one whitespace (or otherwise useless) * character between tokens as the character immediately following * the token is altered in-place to a null-byte to terminate the * token string. */ #define MAXARGC 20 int kdb_parse(const char *cmdstr) { static char *argv[MAXARGC]; static int argc; static char cbuf[CMD_BUFLEN+2]; char *cp; char *cpp, quoted; kdbtab_t *tp; int i, escaped, ignore_errors = 0, check_grep; /* * First tokenize the command string. */ cp = (char *)cmdstr; kdb_grepping_flag = check_grep = 0; if (KDB_FLAG(CMD_INTERRUPT)) { /* Previous command was interrupted, newline must not * repeat the command */ KDB_FLAG_CLEAR(CMD_INTERRUPT); KDB_STATE_SET(PAGER); argc = 0; /* no repeat */ } if (*cp != '\n' && *cp != '\0') { argc = 0; cpp = cbuf; while (*cp) { /* skip whitespace */ while (isspace(*cp)) cp++; if ((*cp == '\0') || (*cp == '\n') || (*cp == '#' && !defcmd_in_progress)) break; /* special case: check for | grep pattern */ if (*cp == '|') { check_grep++; break; } if (cpp >= cbuf + CMD_BUFLEN) { kdb_printf("kdb_parse: command buffer " "overflow, command ignored\n%s\n", cmdstr); return KDB_NOTFOUND; } if (argc >= MAXARGC - 1) { kdb_printf("kdb_parse: too many arguments, " "command ignored\n%s\n", cmdstr); return KDB_NOTFOUND; } argv[argc++] = cpp; escaped = 0; quoted = '\0'; /* Copy to next unquoted and unescaped * whitespace or '=' */ while (*cp && *cp != '\n' && (escaped || quoted || !isspace(*cp))) { if (cpp >= cbuf + CMD_BUFLEN) break; if (escaped) { escaped = 0; *cpp++ = *cp++; continue; } if (*cp == '\\') { escaped = 1; ++cp; continue; } if (*cp == quoted) quoted = '\0'; else if (*cp == '\'' || *cp == '"') quoted = *cp; *cpp = *cp++; if (*cpp == '=' && !quoted) break; ++cpp; } *cpp++ = '\0'; /* Squash a ws or '=' character */ } } if (!argc) return 0; if (check_grep) parse_grep(cp); if (defcmd_in_progress) { int result = kdb_defcmd2(cmdstr, argv[0]); if (!defcmd_in_progress) { argc = 0; /* avoid repeat on endefcmd */ *(argv[0]) = '\0'; } return result; } if (argv[0][0] == '-' && argv[0][1] && (argv[0][1] < '0' || argv[0][1] > '9')) { ignore_errors = 1; ++argv[0]; } for_each_kdbcmd(tp, i) { if (tp->cmd_name) { /* * If this command is allowed to be abbreviated, * check to see if this is it. */ if (tp->cmd_minlen && (strlen(argv[0]) <= tp->cmd_minlen)) { if (strncmp(argv[0], tp->cmd_name, tp->cmd_minlen) == 0) { break; } } if (strcmp(argv[0], tp->cmd_name) == 0) break; } } /* * If we don't find a command by this name, see if the first * few characters of this match any of the known commands. * e.g., md1c20 should match md. */ if (i == kdb_max_commands) { for_each_kdbcmd(tp, i) { if (tp->cmd_name) { if (strncmp(argv[0], tp->cmd_name, strlen(tp->cmd_name)) == 0) { break; } } } } if (i < kdb_max_commands) { int result; KDB_STATE_SET(CMD); result = (*tp->cmd_func)(argc-1, (const char **)argv); if (result && ignore_errors && result > KDB_CMD_GO) result = 0; KDB_STATE_CLEAR(CMD); switch (tp->cmd_repeat) { case KDB_REPEAT_NONE: argc = 0; if (argv[0]) *(argv[0]) = '\0'; break; case KDB_REPEAT_NO_ARGS: argc = 1; if (argv[1]) *(argv[1]) = '\0'; break; case KDB_REPEAT_WITH_ARGS: break; } return result; } /* * If the input with which we were presented does not * map to an existing command, attempt to parse it as an * address argument and display the result. Useful for * obtaining the address of a variable, or the nearest symbol * to an address contained in a register. */ { unsigned long value; char *name = NULL; long offset; int nextarg = 0; if (kdbgetaddrarg(0, (const char **)argv, &nextarg, &value, &offset, &name)) { return KDB_NOTFOUND; } kdb_printf("%s = ", argv[0]); kdb_symbol_print(value, NULL, KDB_SP_DEFAULT); kdb_printf("\n"); return 0; } } static int handle_ctrl_cmd(char *cmd) { #define CTRL_P 16 #define CTRL_N 14 /* initial situation */ if (cmd_head == cmd_tail) return 0; switch (*cmd) { case CTRL_P: if (cmdptr != cmd_tail) cmdptr = (cmdptr-1) % KDB_CMD_HISTORY_COUNT; strncpy(cmd_cur, cmd_hist[cmdptr], CMD_BUFLEN); return 1; case CTRL_N: if (cmdptr != cmd_head) cmdptr = (cmdptr+1) % KDB_CMD_HISTORY_COUNT; strncpy(cmd_cur, cmd_hist[cmdptr], CMD_BUFLEN); return 1; } return 0; } /* * kdb_reboot - This function implements the 'reboot' command. Reboot * the system immediately, or loop for ever on failure. */ static int kdb_reboot(int argc, const char **argv) { emergency_restart(); kdb_printf("Hmm, kdb_reboot did not reboot, spinning here\n"); while (1) cpu_relax(); /* NOTREACHED */ return 0; } static void kdb_dumpregs(struct pt_regs *regs) { int old_lvl = console_loglevel; console_loglevel = 15; kdb_trap_printk++; show_regs(regs); kdb_trap_printk--; kdb_printf("\n"); console_loglevel = old_lvl; } void kdb_set_current_task(struct task_struct *p) { kdb_current_task = p; if (kdb_task_has_cpu(p)) { kdb_current_regs = KDB_TSKREGS(kdb_process_cpu(p)); return; } kdb_current_regs = NULL; } /* * kdb_local - The main code for kdb. This routine is invoked on a * specific processor, it is not global. The main kdb() routine * ensures that only one processor at a time is in this routine. * This code is called with the real reason code on the first * entry to a kdb session, thereafter it is called with reason * SWITCH, even if the user goes back to the original cpu. * Inputs: * reason The reason KDB was invoked * error The hardware-defined error code * regs The exception frame at time of fault/breakpoint. * db_result Result code from the break or debug point. * Returns: * 0 KDB was invoked for an event which it wasn't responsible * 1 KDB handled the event for which it was invoked. * KDB_CMD_GO User typed 'go'. * KDB_CMD_CPU User switched to another cpu. * KDB_CMD_SS Single step. * KDB_CMD_SSB Single step until branch. */ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs, kdb_dbtrap_t db_result) { char *cmdbuf; int diag; struct task_struct *kdb_current = kdb_curr_task(raw_smp_processor_id()); KDB_DEBUG_STATE("kdb_local 1", reason); kdb_go_count = 0; if (reason == KDB_REASON_DEBUG) { /* special case below */ } else { kdb_printf("\nEntering kdb (current=0x%p, pid %d) ", kdb_current, kdb_current ? kdb_current->pid : 0); #if defined(CONFIG_SMP) kdb_printf("on processor %d ", raw_smp_processor_id()); #endif } switch (reason) { case KDB_REASON_DEBUG: { /* * If re-entering kdb after a single step * command, don't print the message. */ switch (db_result) { case KDB_DB_BPT: kdb_printf("\nEntering kdb (0x%p, pid %d) ", kdb_current, kdb_current->pid); #if defined(CONFIG_SMP) kdb_printf("on processor %d ", raw_smp_processor_id()); #endif kdb_printf("due to Debug @ " kdb_machreg_fmt "\n", instruction_pointer(regs)); break; case KDB_DB_SSB: /* * In the midst of ssb command. Just return. */ KDB_DEBUG_STATE("kdb_local 3", reason); return KDB_CMD_SSB; /* Continue with SSB command */ break; case KDB_DB_SS: break; case KDB_DB_SSBPT: KDB_DEBUG_STATE("kdb_local 4", reason); return 1; /* kdba_db_trap did the work */ default: kdb_printf("kdb: Bad result from kdba_db_trap: %d\n", db_result); break; } } break; case KDB_REASON_ENTER: if (KDB_STATE(KEYBOARD)) kdb_printf("due to Keyboard Entry\n"); else kdb_printf("due to KDB_ENTER()\n"); break; case KDB_REASON_KEYBOARD: KDB_STATE_SET(KEYBOARD); kdb_printf("due to Keyboard Entry\n"); break; case KDB_REASON_ENTER_SLAVE: /* drop through, slaves only get released via cpu switch */ case KDB_REASON_SWITCH: kdb_printf("due to cpu switch\n"); break; case KDB_REASON_OOPS: kdb_printf("Oops: %s\n", kdb_diemsg); kdb_printf("due to oops @ " kdb_machreg_fmt "\n", instruction_pointer(regs)); kdb_dumpregs(regs); break; case KDB_REASON_NMI: kdb_printf("due to NonMaskable Interrupt @ " kdb_machreg_fmt "\n", instruction_pointer(regs)); kdb_dumpregs(regs); break; case KDB_REASON_SSTEP: case KDB_REASON_BREAK: kdb_printf("due to %s @ " kdb_machreg_fmt "\n", reason == KDB_REASON_BREAK ? "Breakpoint" : "SS trap", instruction_pointer(regs)); /* * Determine if this breakpoint is one that we * are interested in. */ if (db_result != KDB_DB_BPT) { kdb_printf("kdb: error return from kdba_bp_trap: %d\n", db_result); KDB_DEBUG_STATE("kdb_local 6", reason); return 0; /* Not for us, dismiss it */ } break; case KDB_REASON_RECURSE: kdb_printf("due to Recursion @ " kdb_machreg_fmt "\n", instruction_pointer(regs)); break; default: kdb_printf("kdb: unexpected reason code: %d\n", reason); KDB_DEBUG_STATE("kdb_local 8", reason); return 0; /* Not for us, dismiss it */ } while (1) { /* * Initialize pager context. */ kdb_nextline = 1; KDB_STATE_CLEAR(SUPPRESS); cmdbuf = cmd_cur; *cmdbuf = '\0'; *(cmd_hist[cmd_head]) = '\0'; if (KDB_FLAG(ONLY_DO_DUMP)) { /* kdb is off but a catastrophic error requires a dump. * Take the dump and reboot. * Turn on logging so the kdb output appears in the log * buffer in the dump. */ const char *setargs[] = { "set", "LOGGING", "1" }; kdb_set(2, setargs); kdb_reboot(0, NULL); /*NOTREACHED*/ } do_full_getstr: #if defined(CONFIG_SMP) snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT"), raw_smp_processor_id()); #else snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT")); #endif if (defcmd_in_progress) strncat(kdb_prompt_str, "[defcmd]", CMD_BUFLEN); /* * Fetch command from keyboard */ cmdbuf = kdb_getstr(cmdbuf, CMD_BUFLEN, kdb_prompt_str); if (*cmdbuf != '\n') { if (*cmdbuf < 32) { if (cmdptr == cmd_head) { strncpy(cmd_hist[cmd_head], cmd_cur, CMD_BUFLEN); *(cmd_hist[cmd_head] + strlen(cmd_hist[cmd_head])-1) = '\0'; } if (!handle_ctrl_cmd(cmdbuf)) *(cmd_cur+strlen(cmd_cur)-1) = '\0'; cmdbuf = cmd_cur; goto do_full_getstr; } else { strncpy(cmd_hist[cmd_head], cmd_cur, CMD_BUFLEN); } cmd_head = (cmd_head+1) % KDB_CMD_HISTORY_COUNT; if (cmd_head == cmd_tail) cmd_tail = (cmd_tail+1) % KDB_CMD_HISTORY_COUNT; } cmdptr = cmd_head; diag = kdb_parse(cmdbuf); if (diag == KDB_NOTFOUND) { kdb_printf("Unknown kdb command: '%s'\n", cmdbuf); diag = 0; } if (diag == KDB_CMD_GO || diag == KDB_CMD_CPU || diag == KDB_CMD_SS || diag == KDB_CMD_SSB || diag == KDB_CMD_KGDB) break; if (diag) kdb_cmderror(diag); } KDB_DEBUG_STATE("kdb_local 9", diag); return diag; } /* * kdb_print_state - Print the state data for the current processor * for debugging. * Inputs: * text Identifies the debug point * value Any integer value to be printed, e.g. reason code. */ void kdb_print_state(const char *text, int value) { kdb_printf("state: %s cpu %d value %d initial %d state %x\n", text, raw_smp_processor_id(), value, kdb_initial_cpu, kdb_state); } /* * kdb_main_loop - After initial setup and assignment of the * controlling cpu, all cpus are in this loop. One cpu is in * control and will issue the kdb prompt, the others will spin * until 'go' or cpu switch. * * To get a consistent view of the kernel stacks for all * processes, this routine is invoked from the main kdb code via * an architecture specific routine. kdba_main_loop is * responsible for making the kernel stacks consistent for all * processes, there should be no difference between a blocked * process and a running process as far as kdb is concerned. * Inputs: * reason The reason KDB was invoked * error The hardware-defined error code * reason2 kdb's current reason code. * Initially error but can change * according to kdb state. * db_result Result code from break or debug point. * regs The exception frame at time of fault/breakpoint. * should always be valid. * Returns: * 0 KDB was invoked for an event which it wasn't responsible * 1 KDB handled the event for which it was invoked. */ int kdb_main_loop(kdb_reason_t reason, kdb_reason_t reason2, int error, kdb_dbtrap_t db_result, struct pt_regs *regs) { int result = 1; /* Stay in kdb() until 'go', 'ss[b]' or an error */ while (1) { /* * All processors except the one that is in control * will spin here. */ KDB_DEBUG_STATE("kdb_main_loop 1", reason); while (KDB_STATE(HOLD_CPU)) { /* state KDB is turned off by kdb_cpu to see if the * other cpus are still live, each cpu in this loop * turns it back on. */ if (!KDB_STATE(KDB)) KDB_STATE_SET(KDB); } KDB_STATE_CLEAR(SUPPRESS); KDB_DEBUG_STATE("kdb_main_loop 2", reason); if (KDB_STATE(LEAVING)) break; /* Another cpu said 'go' */ /* Still using kdb, this processor is in control */ result = kdb_local(reason2, error, regs, db_result); KDB_DEBUG_STATE("kdb_main_loop 3", result); if (result == KDB_CMD_CPU) break; if (result == KDB_CMD_SS) { KDB_STATE_SET(DOING_SS); break; } if (result == KDB_CMD_SSB) { KDB_STATE_SET(DOING_SS); KDB_STATE_SET(DOING_SSB); break; } if (result == KDB_CMD_KGDB) { if (!KDB_STATE(DOING_KGDB)) kdb_printf("Entering please attach debugger " "or use $D#44+ or $3#33\n"); break; } if (result && result != 1 && result != KDB_CMD_GO) kdb_printf("\nUnexpected kdb_local return code %d\n", result); KDB_DEBUG_STATE("kdb_main_loop 4", reason); break; } if (KDB_STATE(DOING_SS)) KDB_STATE_CLEAR(SSBPT); /* Clean up any keyboard devices before leaving */ kdb_kbd_cleanup_state(); return result; } /* * kdb_mdr - This function implements the guts of the 'mdr', memory * read command. * mdr <addr arg>,<byte count> * Inputs: * addr Start address * count Number of bytes * Returns: * Always 0. Any errors are detected and printed by kdb_getarea. */ static int kdb_mdr(unsigned long addr, unsigned int count) { unsigned char c; while (count--) { if (kdb_getarea(c, addr)) return 0; kdb_printf("%02x", c); addr++; } kdb_printf("\n"); return 0; } /* * kdb_md - This function implements the 'md', 'md1', 'md2', 'md4', * 'md8' 'mdr' and 'mds' commands. * * md|mds [<addr arg> [<line count> [<radix>]]] * mdWcN [<addr arg> [<line count> [<radix>]]] * where W = is the width (1, 2, 4 or 8) and N is the count. * for eg., md1c20 reads 20 bytes, 1 at a time. * mdr <addr arg>,<byte count> */ static void kdb_md_line(const char *fmtstr, unsigned long addr, int symbolic, int nosect, int bytesperword, int num, int repeat, int phys) { /* print just one line of data */ kdb_symtab_t symtab; char cbuf[32]; char *c = cbuf; int i; unsigned long word; memset(cbuf, '\0', sizeof(cbuf)); if (phys) kdb_printf("phys " kdb_machreg_fmt0 " ", addr); else kdb_printf(kdb_machreg_fmt0 " ", addr); for (i = 0; i < num && repeat--; i++) { if (phys) { if (kdb_getphysword(&word, addr, bytesperword)) break; } else if (kdb_getword(&word, addr, bytesperword)) break; kdb_printf(fmtstr, word); if (symbolic) kdbnearsym(word, &symtab); else memset(&symtab, 0, sizeof(symtab)); if (symtab.sym_name) { kdb_symbol_print(word, &symtab, 0); if (!nosect) { kdb_printf("\n"); kdb_printf(" %s %s " kdb_machreg_fmt " " kdb_machreg_fmt " " kdb_machreg_fmt, symtab.mod_name, symtab.sec_name, symtab.sec_start, symtab.sym_start, symtab.sym_end); } addr += bytesperword; } else { union { u64 word; unsigned char c[8]; } wc; unsigned char *cp; #ifdef __BIG_ENDIAN cp = wc.c + 8 - bytesperword; #else cp = wc.c; #endif wc.word = word; #define printable_char(c) \ ({unsigned char __c = c; isascii(__c) && isprint(__c) ? __c : '.'; }) switch (bytesperword) { case 8: *c++ = printable_char(*cp++); *c++ = printable_char(*cp++); *c++ = printable_char(*cp++); *c++ = printable_char(*cp++); addr += 4; case 4: *c++ = printable_char(*cp++); *c++ = printable_char(*cp++); addr += 2; case 2: *c++ = printable_char(*cp++); addr++; case 1: *c++ = printable_char(*cp++); addr++; break; } #undef printable_char } } kdb_printf("%*s %s\n", (int)((num-i)*(2*bytesperword + 1)+1), " ", cbuf); } static int kdb_md(int argc, const char **argv) { static unsigned long last_addr; static int last_radix, last_bytesperword, last_repeat; int radix = 16, mdcount = 8, bytesperword = KDB_WORD_SIZE, repeat; int nosect = 0; char fmtchar, fmtstr[64]; unsigned long addr; unsigned long word; long offset = 0; int symbolic = 0; int valid = 0; int phys = 0; kdbgetintenv("MDCOUNT", &mdcount); kdbgetintenv("RADIX", &radix); kdbgetintenv("BYTESPERWORD", &bytesperword); /* Assume 'md <addr>' and start with environment values */ repeat = mdcount * 16 / bytesperword; if (strcmp(argv[0], "mdr") == 0) { if (argc != 2) return KDB_ARGCOUNT; valid = 1; } else if (isdigit(argv[0][2])) { bytesperword = (int)(argv[0][2] - '0'); if (bytesperword == 0) { bytesperword = last_bytesperword; if (bytesperword == 0) bytesperword = 4; } last_bytesperword = bytesperword; repeat = mdcount * 16 / bytesperword; if (!argv[0][3]) valid = 1; else if (argv[0][3] == 'c' && argv[0][4]) { char *p; repeat = simple_strtoul(argv[0] + 4, &p, 10); mdcount = ((repeat * bytesperword) + 15) / 16; valid = !*p; } last_repeat = repeat; } else if (strcmp(argv[0], "md") == 0) valid = 1; else if (strcmp(argv[0], "mds") == 0) valid = 1; else if (strcmp(argv[0], "mdp") == 0) { phys = valid = 1; } if (!valid) return KDB_NOTFOUND; if (argc == 0) { if (last_addr == 0) return KDB_ARGCOUNT; addr = last_addr; radix = last_radix; bytesperword = last_bytesperword; repeat = last_repeat; mdcount = ((repeat * bytesperword) + 15) / 16; } if (argc) { unsigned long val; int diag, nextarg = 1; diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL); if (diag) return diag; if (argc > nextarg+2) return KDB_ARGCOUNT; if (argc >= nextarg) { diag = kdbgetularg(argv[nextarg], &val); if (!diag) { mdcount = (int) val; repeat = mdcount * 16 / bytesperword; } } if (argc >= nextarg+1) { diag = kdbgetularg(argv[nextarg+1], &val); if (!diag) radix = (int) val; } } if (strcmp(argv[0], "mdr") == 0) return kdb_mdr(addr, mdcount); switch (radix) { case 10: fmtchar = 'd'; break; case 16: fmtchar = 'x'; break; case 8: fmtchar = 'o'; break; default: return KDB_BADRADIX; } last_radix = radix; if (bytesperword > KDB_WORD_SIZE) return KDB_BADWIDTH; switch (bytesperword) { case 8: sprintf(fmtstr, "%%16.16l%c ", fmtchar); break; case 4: sprintf(fmtstr, "%%8.8l%c ", fmtchar); break; case 2: sprintf(fmtstr, "%%4.4l%c ", fmtchar); break; case 1: sprintf(fmtstr, "%%2.2l%c ", fmtchar); break; default: return KDB_BADWIDTH; } last_repeat = repeat; last_bytesperword = bytesperword; if (strcmp(argv[0], "mds") == 0) { symbolic = 1; /* Do not save these changes as last_*, they are temporary mds * overrides. */ bytesperword = KDB_WORD_SIZE; repeat = mdcount; kdbgetintenv("NOSECT", &nosect); } /* Round address down modulo BYTESPERWORD */ addr &= ~(bytesperword-1); while (repeat > 0) { unsigned long a; int n, z, num = (symbolic ? 1 : (16 / bytesperword)); if (KDB_FLAG(CMD_INTERRUPT)) return 0; for (a = addr, z = 0; z < repeat; a += bytesperword, ++z) { if (phys) { if (kdb_getphysword(&word, a, bytesperword) || word) break; } else if (kdb_getword(&word, a, bytesperword) || word) break; } n = min(num, repeat); kdb_md_line(fmtstr, addr, symbolic, nosect, bytesperword, num, repeat, phys); addr += bytesperword * n; repeat -= n; z = (z + num - 1) / num; if (z > 2) { int s = num * (z-2); kdb_printf(kdb_machreg_fmt0 "-" kdb_machreg_fmt0 " zero suppressed\n", addr, addr + bytesperword * s - 1); addr += bytesperword * s; repeat -= s; } } last_addr = addr; return 0; } /* * kdb_mm - This function implements the 'mm' command. * mm address-expression new-value * Remarks: * mm works on machine words, mmW works on bytes. */ static int kdb_mm(int argc, const char **argv) { int diag; unsigned long addr; long offset = 0; unsigned long contents; int nextarg; int width; if (argv[0][2] && !isdigit(argv[0][2])) return KDB_NOTFOUND; if (argc < 2) return KDB_ARGCOUNT; nextarg = 1; diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL); if (diag) return diag; if (nextarg > argc) return KDB_ARGCOUNT; diag = kdbgetaddrarg(argc, argv, &nextarg, &contents, NULL, NULL); if (diag) return diag; if (nextarg != argc + 1) return KDB_ARGCOUNT; width = argv[0][2] ? (argv[0][2] - '0') : (KDB_WORD_SIZE); diag = kdb_putword(addr, contents, width); if (diag) return diag; kdb_printf(kdb_machreg_fmt " = " kdb_machreg_fmt "\n", addr, contents); return 0; } /* * kdb_go - This function implements the 'go' command. * go [address-expression] */ static int kdb_go(int argc, const char **argv) { unsigned long addr; int diag; int nextarg; long offset; if (raw_smp_processor_id() != kdb_initial_cpu) { kdb_printf("go must execute on the entry cpu, " "please use \"cpu %d\" and then execute go\n", kdb_initial_cpu); return KDB_BADCPUNUM; } if (argc == 1) { nextarg = 1; diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL); if (diag) return diag; } else if (argc) { return KDB_ARGCOUNT; } diag = KDB_CMD_GO; if (KDB_FLAG(CATASTROPHIC)) { kdb_printf("Catastrophic error detected\n"); kdb_printf("kdb_continue_catastrophic=%d, ", kdb_continue_catastrophic); if (kdb_continue_catastrophic == 0 && kdb_go_count++ == 0) { kdb_printf("type go a second time if you really want " "to continue\n"); return 0; } if (kdb_continue_catastrophic == 2) { kdb_printf("forcing reboot\n"); kdb_reboot(0, NULL); } kdb_printf("attempting to continue\n"); } return diag; } /* * kdb_rd - This function implements the 'rd' command. */ static int kdb_rd(int argc, const char **argv) { int len = kdb_check_regs(); #if DBG_MAX_REG_NUM > 0 int i; char *rname; int rsize; u64 reg64; u32 reg32; u16 reg16; u8 reg8; if (len) return len; for (i = 0; i < DBG_MAX_REG_NUM; i++) { rsize = dbg_reg_def[i].size * 2; if (rsize > 16) rsize = 2; if (len + strlen(dbg_reg_def[i].name) + 4 + rsize > 80) { len = 0; kdb_printf("\n"); } if (len) len += kdb_printf(" "); switch(dbg_reg_def[i].size * 8) { case 8: rname = dbg_get_reg(i, &reg8, kdb_current_regs); if (!rname) break; len += kdb_printf("%s: %02x", rname, reg8); break; case 16: rname = dbg_get_reg(i, &reg16, kdb_current_regs); if (!rname) break; len += kdb_printf("%s: %04x", rname, reg16); break; case 32: rname = dbg_get_reg(i, &reg32, kdb_current_regs); if (!rname) break; len += kdb_printf("%s: %08x", rname, reg32); break; case 64: rname = dbg_get_reg(i, &reg64, kdb_current_regs); if (!rname) break; len += kdb_printf("%s: %016llx", rname, reg64); break; default: len += kdb_printf("%s: ??", dbg_reg_def[i].name); } } kdb_printf("\n"); #else if (len) return len; kdb_dumpregs(kdb_current_regs); #endif return 0; } /* * kdb_rm - This function implements the 'rm' (register modify) command. * rm register-name new-contents * Remarks: * Allows register modification with the same restrictions as gdb */ static int kdb_rm(int argc, const char **argv) { #if DBG_MAX_REG_NUM > 0 int diag; const char *rname; int i; u64 reg64; u32 reg32; u16 reg16; u8 reg8; if (argc != 2) return KDB_ARGCOUNT; /* * Allow presence or absence of leading '%' symbol. */ rname = argv[1]; if (*rname == '%') rname++; diag = kdbgetu64arg(argv[2], &reg64); if (diag) return diag; diag = kdb_check_regs(); if (diag) return diag; diag = KDB_BADREG; for (i = 0; i < DBG_MAX_REG_NUM; i++) { if (strcmp(rname, dbg_reg_def[i].name) == 0) { diag = 0; break; } } if (!diag) { switch(dbg_reg_def[i].size * 8) { case 8: reg8 = reg64; dbg_set_reg(i, &reg8, kdb_current_regs); break; case 16: reg16 = reg64; dbg_set_reg(i, &reg16, kdb_current_regs); break; case 32: reg32 = reg64; dbg_set_reg(i, &reg32, kdb_current_regs); break; case 64: dbg_set_reg(i, &reg64, kdb_current_regs); break; } } return diag; #else kdb_printf("ERROR: Register set currently not implemented\n"); return 0; #endif } #if defined(CONFIG_MAGIC_SYSRQ) /* * kdb_sr - This function implements the 'sr' (SYSRQ key) command * which interfaces to the soi-disant MAGIC SYSRQ functionality. * sr <magic-sysrq-code> */ static int kdb_sr(int argc, const char **argv) { if (argc != 1) return KDB_ARGCOUNT; kdb_trap_printk++; __handle_sysrq(*argv[1], false); kdb_trap_printk--; return 0; } #endif /* CONFIG_MAGIC_SYSRQ */ /* * kdb_ef - This function implements the 'regs' (display exception * frame) command. This command takes an address and expects to * find an exception frame at that address, formats and prints * it. * regs address-expression * Remarks: * Not done yet. */ static int kdb_ef(int argc, const char **argv) { int diag; unsigned long addr; long offset; int nextarg; if (argc != 1) return KDB_ARGCOUNT; nextarg = 1; diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL); if (diag) return diag; show_regs((struct pt_regs *)addr); return 0; } #if defined(CONFIG_MODULES) /* * kdb_lsmod - This function implements the 'lsmod' command. Lists * currently loaded kernel modules. * Mostly taken from userland lsmod. */ static int kdb_lsmod(int argc, const char **argv) { struct module *mod; if (argc != 0) return KDB_ARGCOUNT; kdb_printf("Module Size modstruct Used by\n"); list_for_each_entry(mod, kdb_modules, list) { kdb_printf("%-20s%8u 0x%p ", mod->name, mod->core_size, (void *)mod); #ifdef CONFIG_MODULE_UNLOAD kdb_printf("%4ld ", module_refcount(mod)); #endif if (mod->state == MODULE_STATE_GOING) kdb_printf(" (Unloading)"); else if (mod->state == MODULE_STATE_COMING) kdb_printf(" (Loading)"); else kdb_printf(" (Live)"); kdb_printf(" 0x%p", mod->module_core); #ifdef CONFIG_MODULE_UNLOAD { struct module_use *use; kdb_printf(" [ "); list_for_each_entry(use, &mod->source_list, source_list) kdb_printf("%s ", use->target->name); kdb_printf("]\n"); } #endif } return 0; } #endif /* CONFIG_MODULES */ /* * kdb_env - This function implements the 'env' command. Display the * current environment variables. */ static int kdb_env(int argc, const char **argv) { int i; for (i = 0; i < __nenv; i++) { if (__env[i]) kdb_printf("%s\n", __env[i]); } if (KDB_DEBUG(MASK)) kdb_printf("KDBFLAGS=0x%x\n", kdb_flags); return 0; } #ifdef CONFIG_PRINTK /* * kdb_dmesg - This function implements the 'dmesg' command to display * the contents of the syslog buffer. * dmesg [lines] [adjust] */ static int kdb_dmesg(int argc, const char **argv) { char *syslog_data[4], *start, *end, c = '\0', *p; int diag, logging, logsize, lines = 0, adjust = 0, n; if (argc > 2) return KDB_ARGCOUNT; if (argc) { char *cp; lines = simple_strtol(argv[1], &cp, 0); if (*cp) lines = 0; if (argc > 1) { adjust = simple_strtoul(argv[2], &cp, 0); if (*cp || adjust < 0) adjust = 0; } } /* disable LOGGING if set */ diag = kdbgetintenv("LOGGING", &logging); if (!diag && logging) { const char *setargs[] = { "set", "LOGGING", "0" }; kdb_set(2, setargs); } /* syslog_data[0,1] physical start, end+1. syslog_data[2,3] * logical start, end+1. */ kdb_syslog_data(syslog_data); if (syslog_data[2] == syslog_data[3]) return 0; logsize = syslog_data[1] - syslog_data[0]; start = syslog_data[2]; end = syslog_data[3]; #define KDB_WRAP(p) (((p - syslog_data[0]) % logsize) + syslog_data[0]) for (n = 0, p = start; p < end; ++p) { c = *KDB_WRAP(p); if (c == '\n') ++n; } if (c != '\n') ++n; if (lines < 0) { if (adjust >= n) kdb_printf("buffer only contains %d lines, nothing " "printed\n", n); else if (adjust - lines >= n) kdb_printf("buffer only contains %d lines, last %d " "lines printed\n", n, n - adjust); if (adjust) { for (; start < end && adjust; ++start) { if (*KDB_WRAP(start) == '\n') --adjust; } if (start < end) ++start; } for (p = start; p < end && lines; ++p) { if (*KDB_WRAP(p) == '\n') ++lines; } end = p; } else if (lines > 0) { int skip = n - (adjust + lines); if (adjust >= n) { kdb_printf("buffer only contains %d lines, " "nothing printed\n", n); skip = n; } else if (skip < 0) { lines += skip; skip = 0; kdb_printf("buffer only contains %d lines, first " "%d lines printed\n", n, lines); } for (; start < end && skip; ++start) { if (*KDB_WRAP(start) == '\n') --skip; } for (p = start; p < end && lines; ++p) { if (*KDB_WRAP(p) == '\n') --lines; } end = p; } /* Do a line at a time (max 200 chars) to reduce protocol overhead */ c = '\n'; while (start != end) { char buf[201]; p = buf; if (KDB_FLAG(CMD_INTERRUPT)) return 0; while (start < end && (c = *KDB_WRAP(start)) && (p - buf) < sizeof(buf)-1) { ++start; *p++ = c; if (c == '\n') break; } *p = '\0'; kdb_printf("%s", buf); } if (c != '\n') kdb_printf("\n"); return 0; } #endif /* CONFIG_PRINTK */ /* * kdb_cpu - This function implements the 'cpu' command. * cpu [<cpunum>] * Returns: * KDB_CMD_CPU for success, a kdb diagnostic if error */ static void kdb_cpu_status(void) { int i, start_cpu, first_print = 1; char state, prev_state = '?'; kdb_printf("Currently on cpu %d\n", raw_smp_processor_id()); kdb_printf("Available cpus: "); for (start_cpu = -1, i = 0; i < NR_CPUS; i++) { if (!cpu_online(i)) { state = 'F'; /* cpu is offline */ } else { state = ' '; /* cpu is responding to kdb */ if (kdb_task_state_char(KDB_TSK(i)) == 'I') state = 'I'; /* idle task */ } if (state != prev_state) { if (prev_state != '?') { if (!first_print) kdb_printf(", "); first_print = 0; kdb_printf("%d", start_cpu); if (start_cpu < i-1) kdb_printf("-%d", i-1); if (prev_state != ' ') kdb_printf("(%c)", prev_state); } prev_state = state; start_cpu = i; } } /* print the trailing cpus, ignoring them if they are all offline */ if (prev_state != 'F') { if (!first_print) kdb_printf(", "); kdb_printf("%d", start_cpu); if (start_cpu < i-1) kdb_printf("-%d", i-1); if (prev_state != ' ') kdb_printf("(%c)", prev_state); } kdb_printf("\n"); } static int kdb_cpu(int argc, const char **argv) { unsigned long cpunum; int diag; if (argc == 0) { kdb_cpu_status(); return 0; } if (argc != 1) return KDB_ARGCOUNT; diag = kdbgetularg(argv[1], &cpunum); if (diag) return diag; /* * Validate cpunum */ if ((cpunum > NR_CPUS) || !cpu_online(cpunum)) return KDB_BADCPUNUM; dbg_switch_cpu = cpunum; /* * Switch to other cpu */ return KDB_CMD_CPU; } /* The user may not realize that ps/bta with no parameters does not print idle * or sleeping system daemon processes, so tell them how many were suppressed. */ void kdb_ps_suppressed(void) { int idle = 0, daemon = 0; unsigned long mask_I = kdb_task_state_string("I"), mask_M = kdb_task_state_string("M"); unsigned long cpu; const struct task_struct *p, *g; for_each_online_cpu(cpu) { p = kdb_curr_task(cpu); if (kdb_task_state(p, mask_I)) ++idle; } kdb_do_each_thread(g, p) { if (kdb_task_state(p, mask_M)) ++daemon; } kdb_while_each_thread(g, p); if (idle || daemon) { if (idle) kdb_printf("%d idle process%s (state I)%s\n", idle, idle == 1 ? "" : "es", daemon ? " and " : ""); if (daemon) kdb_printf("%d sleeping system daemon (state M) " "process%s", daemon, daemon == 1 ? "" : "es"); kdb_printf(" suppressed,\nuse 'ps A' to see all.\n"); } } /* * kdb_ps - This function implements the 'ps' command which shows a * list of the active processes. * ps [DRSTCZEUIMA] All processes, optionally filtered by state */ void kdb_ps1(const struct task_struct *p) { int cpu; unsigned long tmp; if (!p || probe_kernel_read(&tmp, (char *)p, sizeof(unsigned long))) return; cpu = kdb_process_cpu(p); kdb_printf("0x%p %8d %8d %d %4d %c 0x%p %c%s\n", (void *)p, p->pid, p->parent->pid, kdb_task_has_cpu(p), kdb_process_cpu(p), kdb_task_state_char(p), (void *)(&p->thread), p == kdb_curr_task(raw_smp_processor_id()) ? '*' : ' ', p->comm); if (kdb_task_has_cpu(p)) { if (!KDB_TSK(cpu)) { kdb_printf(" Error: no saved data for this cpu\n"); } else { if (KDB_TSK(cpu) != p) kdb_printf(" Error: does not match running " "process table (0x%p)\n", KDB_TSK(cpu)); } } } static int kdb_ps(int argc, const char **argv) { struct task_struct *g, *p; unsigned long mask, cpu; if (argc == 0) kdb_ps_suppressed(); kdb_printf("%-*s Pid Parent [*] cpu State %-*s Command\n", (int)(2*sizeof(void *))+2, "Task Addr", (int)(2*sizeof(void *))+2, "Thread"); mask = kdb_task_state_string(argc ? argv[1] : NULL); /* Run the active tasks first */ for_each_online_cpu(cpu) { if (KDB_FLAG(CMD_INTERRUPT)) return 0; p = kdb_curr_task(cpu); if (kdb_task_state(p, mask)) kdb_ps1(p); } kdb_printf("\n"); /* Now the real tasks */ kdb_do_each_thread(g, p) { if (KDB_FLAG(CMD_INTERRUPT)) return 0; if (kdb_task_state(p, mask)) kdb_ps1(p); } kdb_while_each_thread(g, p); return 0; } /* * kdb_pid - This function implements the 'pid' command which switches * the currently active process. * pid [<pid> | R] */ static int kdb_pid(int argc, const char **argv) { struct task_struct *p; unsigned long val; int diag; if (argc > 1) return KDB_ARGCOUNT; if (argc) { if (strcmp(argv[1], "R") == 0) { p = KDB_TSK(kdb_initial_cpu); } else { diag = kdbgetularg(argv[1], &val); if (diag) return KDB_BADINT; p = find_task_by_pid_ns((pid_t)val, &init_pid_ns); if (!p) { kdb_printf("No task with pid=%d\n", (pid_t)val); return 0; } } kdb_set_current_task(p); } kdb_printf("KDB current process is %s(pid=%d)\n", kdb_current_task->comm, kdb_current_task->pid); return 0; } /* * kdb_ll - This function implements the 'll' command which follows a * linked list and executes an arbitrary command for each * element. */ static int kdb_ll(int argc, const char **argv) { int diag = 0; unsigned long addr; long offset = 0; unsigned long va; unsigned long linkoffset; int nextarg; const char *command; if (argc != 3) return KDB_ARGCOUNT; nextarg = 1; diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL); if (diag) return diag; diag = kdbgetularg(argv[2], &linkoffset); if (diag) return diag; /* * Using the starting address as * the first element in the list, and assuming that * the list ends with a null pointer. */ va = addr; command = kdb_strdup(argv[3], GFP_KDB); if (!command) { kdb_printf("%s: cannot duplicate command\n", __func__); return 0; } /* Recursive use of kdb_parse, do not use argv after this point */ argv = NULL; while (va) { char buf[80]; if (KDB_FLAG(CMD_INTERRUPT)) goto out; sprintf(buf, "%s " kdb_machreg_fmt "\n", command, va); diag = kdb_parse(buf); if (diag) goto out; addr = va + linkoffset; if (kdb_getword(&va, addr, sizeof(va))) goto out; } out: kfree(command); return diag; } static int kdb_kgdb(int argc, const char **argv) { return KDB_CMD_KGDB; } /* * kdb_help - This function implements the 'help' and '?' commands. */ static int kdb_help(int argc, const char **argv) { kdbtab_t *kt; int i; kdb_printf("%-15.15s %-20.20s %s\n", "Command", "Usage", "Description"); kdb_printf("-----------------------------" "-----------------------------\n"); for_each_kdbcmd(kt, i) { if (kt->cmd_name) kdb_printf("%-15.15s %-20.20s %s\n", kt->cmd_name, kt->cmd_usage, kt->cmd_help); if (KDB_FLAG(CMD_INTERRUPT)) return 0; } return 0; } /* * kdb_kill - This function implements the 'kill' commands. */ static int kdb_kill(int argc, const char **argv) { long sig, pid; char *endp; struct task_struct *p; struct siginfo info; if (argc != 2) return KDB_ARGCOUNT; sig = simple_strtol(argv[1], &endp, 0); if (*endp) return KDB_BADINT; if (sig >= 0) { kdb_printf("Invalid signal parameter.<-signal>\n"); return 0; } sig = -sig; pid = simple_strtol(argv[2], &endp, 0); if (*endp) return KDB_BADINT; if (pid <= 0) { kdb_printf("Process ID must be large than 0.\n"); return 0; } /* Find the process. */ p = find_task_by_pid_ns(pid, &init_pid_ns); if (!p) { kdb_printf("The specified process isn't found.\n"); return 0; } p = p->group_leader; info.si_signo = sig; info.si_errno = 0; info.si_code = SI_USER; info.si_pid = pid; /* same capabilities as process being signalled */ info.si_uid = 0; /* kdb has root authority */ kdb_send_sig_info(p, &info); return 0; } struct kdb_tm { int tm_sec; /* seconds */ int tm_min; /* minutes */ int tm_hour; /* hours */ int tm_mday; /* day of the month */ int tm_mon; /* month */ int tm_year; /* year */ }; static void kdb_gmtime(struct timespec *tv, struct kdb_tm *tm) { /* This will work from 1970-2099, 2100 is not a leap year */ static int mon_day[] = { 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 }; memset(tm, 0, sizeof(*tm)); tm->tm_sec = tv->tv_sec % (24 * 60 * 60); tm->tm_mday = tv->tv_sec / (24 * 60 * 60) + (2 * 365 + 1); /* shift base from 1970 to 1968 */ tm->tm_min = tm->tm_sec / 60 % 60; tm->tm_hour = tm->tm_sec / 60 / 60; tm->tm_sec = tm->tm_sec % 60; tm->tm_year = 68 + 4*(tm->tm_mday / (4*365+1)); tm->tm_mday %= (4*365+1); mon_day[1] = 29; while (tm->tm_mday >= mon_day[tm->tm_mon]) { tm->tm_mday -= mon_day[tm->tm_mon]; if (++tm->tm_mon == 12) { tm->tm_mon = 0; ++tm->tm_year; mon_day[1] = 28; } } ++tm->tm_mday; } /* * Most of this code has been lifted from kernel/timer.c::sys_sysinfo(). * I cannot call that code directly from kdb, it has an unconditional * cli()/sti() and calls routines that take locks which can stop the debugger. */ static void kdb_sysinfo(struct sysinfo *val) { struct timespec uptime; do_posix_clock_monotonic_gettime(&uptime); memset(val, 0, sizeof(*val)); val->uptime = uptime.tv_sec; val->loads[0] = avenrun[0]; val->loads[1] = avenrun[1]; val->loads[2] = avenrun[2]; val->procs = nr_threads-1; si_meminfo(val); return; } /* * kdb_summary - This function implements the 'summary' command. */ static int kdb_summary(int argc, const char **argv) { struct timespec now; struct kdb_tm tm; struct sysinfo val; if (argc) return KDB_ARGCOUNT; kdb_printf("sysname %s\n", init_uts_ns.name.sysname); kdb_printf("release %s\n", init_uts_ns.name.release); kdb_printf("version %s\n", init_uts_ns.name.version); kdb_printf("machine %s\n", init_uts_ns.name.machine); kdb_printf("nodename %s\n", init_uts_ns.name.nodename); kdb_printf("domainname %s\n", init_uts_ns.name.domainname); kdb_printf("ccversion %s\n", __stringify(CCVERSION)); now = __current_kernel_time(); kdb_gmtime(&now, &tm); kdb_printf("date %04d-%02d-%02d %02d:%02d:%02d " "tz_minuteswest %d\n", 1900+tm.tm_year, tm.tm_mon+1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec, sys_tz.tz_minuteswest); kdb_sysinfo(&val); kdb_printf("uptime "); if (val.uptime > (24*60*60)) { int days = val.uptime / (24*60*60); val.uptime %= (24*60*60); kdb_printf("%d day%s ", days, days == 1 ? "" : "s"); } kdb_printf("%02ld:%02ld\n", val.uptime/(60*60), (val.uptime/60)%60); /* lifted from fs/proc/proc_misc.c::loadavg_read_proc() */ #define LOAD_INT(x) ((x) >> FSHIFT) #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) kdb_printf("load avg %ld.%02ld %ld.%02ld %ld.%02ld\n", LOAD_INT(val.loads[0]), LOAD_FRAC(val.loads[0]), LOAD_INT(val.loads[1]), LOAD_FRAC(val.loads[1]), LOAD_INT(val.loads[2]), LOAD_FRAC(val.loads[2])); #undef LOAD_INT #undef LOAD_FRAC /* Display in kilobytes */ #define K(x) ((x) << (PAGE_SHIFT - 10)) kdb_printf("\nMemTotal: %8lu kB\nMemFree: %8lu kB\n" "Buffers: %8lu kB\n", val.totalram, val.freeram, val.bufferram); return 0; } /* * kdb_per_cpu - This function implements the 'per_cpu' command. */ static int kdb_per_cpu(int argc, const char **argv) { char fmtstr[64]; int cpu, diag, nextarg = 1; unsigned long addr, symaddr, val, bytesperword = 0, whichcpu = ~0UL; if (argc < 1 || argc > 3) return KDB_ARGCOUNT; diag = kdbgetaddrarg(argc, argv, &nextarg, &symaddr, NULL, NULL); if (diag) return diag; if (argc >= 2) { diag = kdbgetularg(argv[2], &bytesperword); if (diag) return diag; } if (!bytesperword) bytesperword = KDB_WORD_SIZE; else if (bytesperword > KDB_WORD_SIZE) return KDB_BADWIDTH; sprintf(fmtstr, "%%0%dlx ", (int)(2*bytesperword)); if (argc >= 3) { diag = kdbgetularg(argv[3], &whichcpu); if (diag) return diag; if (!cpu_online(whichcpu)) { kdb_printf("cpu %ld is not online\n", whichcpu); return KDB_BADCPUNUM; } } /* Most architectures use __per_cpu_offset[cpu], some use * __per_cpu_offset(cpu), smp has no __per_cpu_offset. */ #ifdef __per_cpu_offset #define KDB_PCU(cpu) __per_cpu_offset(cpu) #else #ifdef CONFIG_SMP #define KDB_PCU(cpu) __per_cpu_offset[cpu] #else #define KDB_PCU(cpu) 0 #endif #endif for_each_online_cpu(cpu) { if (KDB_FLAG(CMD_INTERRUPT)) return 0; if (whichcpu != ~0UL && whichcpu != cpu) continue; addr = symaddr + KDB_PCU(cpu); diag = kdb_getword(&val, addr, bytesperword); if (diag) { kdb_printf("%5d " kdb_bfd_vma_fmt0 " - unable to " "read, diag=%d\n", cpu, addr, diag); continue; } kdb_printf("%5d ", cpu); kdb_md_line(fmtstr, addr, bytesperword == KDB_WORD_SIZE, 1, bytesperword, 1, 1, 0); } #undef KDB_PCU return 0; } /* * display help for the use of cmd | grep pattern */ static int kdb_grep_help(int argc, const char **argv) { kdb_printf("Usage of cmd args | grep pattern:\n"); kdb_printf(" Any command's output may be filtered through an "); kdb_printf("emulated 'pipe'.\n"); kdb_printf(" 'grep' is just a key word.\n"); kdb_printf(" The pattern may include a very limited set of " "metacharacters:\n"); kdb_printf(" pattern or ^pattern or pattern$ or ^pattern$\n"); kdb_printf(" And if there are spaces in the pattern, you may " "quote it:\n"); kdb_printf(" \"pat tern\" or \"^pat tern\" or \"pat tern$\"" " or \"^pat tern$\"\n"); return 0; } /* * kdb_register_repeat - This function is used to register a kernel * debugger command. * Inputs: * cmd Command name * func Function to execute the command * usage A simple usage string showing arguments * help A simple help string describing command * repeat Does the command auto repeat on enter? * Returns: * zero for success, one if a duplicate command. */ #define kdb_command_extend 50 /* arbitrary */ int kdb_register_repeat(char *cmd, kdb_func_t func, char *usage, char *help, short minlen, kdb_repeat_t repeat) { int i; kdbtab_t *kp; /* * Brute force method to determine duplicates */ for_each_kdbcmd(kp, i) { if (kp->cmd_name && (strcmp(kp->cmd_name, cmd) == 0)) { kdb_printf("Duplicate kdb command registered: " "%s, func %p help %s\n", cmd, func, help); return 1; } } /* * Insert command into first available location in table */ for_each_kdbcmd(kp, i) { if (kp->cmd_name == NULL) break; } if (i >= kdb_max_commands) { kdbtab_t *new = kmalloc((kdb_max_commands - KDB_BASE_CMD_MAX + kdb_command_extend) * sizeof(*new), GFP_KDB); if (!new) { kdb_printf("Could not allocate new kdb_command " "table\n"); return 1; } if (kdb_commands) { memcpy(new, kdb_commands, (kdb_max_commands - KDB_BASE_CMD_MAX) * sizeof(*new)); kfree(kdb_commands); } memset(new + kdb_max_commands, 0, kdb_command_extend * sizeof(*new)); kdb_commands = new; kp = kdb_commands + kdb_max_commands - KDB_BASE_CMD_MAX; kdb_max_commands += kdb_command_extend; } kp->cmd_name = cmd; kp->cmd_func = func; kp->cmd_usage = usage; kp->cmd_help = help; kp->cmd_flags = 0; kp->cmd_minlen = minlen; kp->cmd_repeat = repeat; return 0; } EXPORT_SYMBOL_GPL(kdb_register_repeat); /* * kdb_register - Compatibility register function for commands that do * not need to specify a repeat state. Equivalent to * kdb_register_repeat with KDB_REPEAT_NONE. * Inputs: * cmd Command name * func Function to execute the command * usage A simple usage string showing arguments * help A simple help string describing command * Returns: * zero for success, one if a duplicate command. */ int kdb_register(char *cmd, kdb_func_t func, char *usage, char *help, short minlen) { return kdb_register_repeat(cmd, func, usage, help, minlen, KDB_REPEAT_NONE); } EXPORT_SYMBOL_GPL(kdb_register); /* * kdb_unregister - This function is used to unregister a kernel * debugger command. It is generally called when a module which * implements kdb commands is unloaded. * Inputs: * cmd Command name * Returns: * zero for success, one command not registered. */ int kdb_unregister(char *cmd) { int i; kdbtab_t *kp; /* * find the command. */ for_each_kdbcmd(kp, i) { if (kp->cmd_name && (strcmp(kp->cmd_name, cmd) == 0)) { kp->cmd_name = NULL; return 0; } } /* Couldn't find it. */ return 1; } EXPORT_SYMBOL_GPL(kdb_unregister); /* Initialize the kdb command table. */ static void __init kdb_inittab(void) { int i; kdbtab_t *kp; for_each_kdbcmd(kp, i) kp->cmd_name = NULL; kdb_register_repeat("md", kdb_md, "<vaddr>", "Display Memory Contents, also mdWcN, e.g. md8c1", 1, KDB_REPEAT_NO_ARGS); kdb_register_repeat("mdr", kdb_md, "<vaddr> <bytes>", "Display Raw Memory", 0, KDB_REPEAT_NO_ARGS); kdb_register_repeat("mdp", kdb_md, "<paddr> <bytes>", "Display Physical Memory", 0, KDB_REPEAT_NO_ARGS); kdb_register_repeat("mds", kdb_md, "<vaddr>", "Display Memory Symbolically", 0, KDB_REPEAT_NO_ARGS); kdb_register_repeat("mm", kdb_mm, "<vaddr> <contents>", "Modify Memory Contents", 0, KDB_REPEAT_NO_ARGS); kdb_register_repeat("go", kdb_go, "[<vaddr>]", "Continue Execution", 1, KDB_REPEAT_NONE); kdb_register_repeat("rd", kdb_rd, "", "Display Registers", 0, KDB_REPEAT_NONE); kdb_register_repeat("rm", kdb_rm, "<reg> <contents>", "Modify Registers", 0, KDB_REPEAT_NONE); kdb_register_repeat("ef", kdb_ef, "<vaddr>", "Display exception frame", 0, KDB_REPEAT_NONE); kdb_register_repeat("bt", kdb_bt, "[<vaddr>]", "Stack traceback", 1, KDB_REPEAT_NONE); kdb_register_repeat("btp", kdb_bt, "<pid>", "Display stack for process <pid>", 0, KDB_REPEAT_NONE); kdb_register_repeat("bta", kdb_bt, "[DRSTCZEUIMA]", "Display stack all processes", 0, KDB_REPEAT_NONE); kdb_register_repeat("btc", kdb_bt, "", "Backtrace current process on each cpu", 0, KDB_REPEAT_NONE); kdb_register_repeat("btt", kdb_bt, "<vaddr>", "Backtrace process given its struct task address", 0, KDB_REPEAT_NONE); kdb_register_repeat("ll", kdb_ll, "<first-element> <linkoffset> <cmd>", "Execute cmd for each element in linked list", 0, KDB_REPEAT_NONE); kdb_register_repeat("env", kdb_env, "", "Show environment variables", 0, KDB_REPEAT_NONE); kdb_register_repeat("set", kdb_set, "", "Set environment variables", 0, KDB_REPEAT_NONE); kdb_register_repeat("help", kdb_help, "", "Display Help Message", 1, KDB_REPEAT_NONE); kdb_register_repeat("?", kdb_help, "", "Display Help Message", 0, KDB_REPEAT_NONE); kdb_register_repeat("cpu", kdb_cpu, "<cpunum>", "Switch to new cpu", 0, KDB_REPEAT_NONE); kdb_register_repeat("kgdb", kdb_kgdb, "", "Enter kgdb mode", 0, KDB_REPEAT_NONE); kdb_register_repeat("ps", kdb_ps, "[<flags>|A]", "Display active task list", 0, KDB_REPEAT_NONE); kdb_register_repeat("pid", kdb_pid, "<pidnum>", "Switch to another task", 0, KDB_REPEAT_NONE); kdb_register_repeat("reboot", kdb_reboot, "", "Reboot the machine immediately", 0, KDB_REPEAT_NONE); #if defined(CONFIG_MODULES) kdb_register_repeat("lsmod", kdb_lsmod, "", "List loaded kernel modules", 0, KDB_REPEAT_NONE); #endif #if defined(CONFIG_MAGIC_SYSRQ) kdb_register_repeat("sr", kdb_sr, "<key>", "Magic SysRq key", 0, KDB_REPEAT_NONE); #endif #if defined(CONFIG_PRINTK) kdb_register_repeat("dmesg", kdb_dmesg, "[lines]", "Display syslog buffer", 0, KDB_REPEAT_NONE); #endif kdb_register_repeat("defcmd", kdb_defcmd, "name \"usage\" \"help\"", "Define a set of commands, down to endefcmd", 0, KDB_REPEAT_NONE); kdb_register_repeat("kill", kdb_kill, "<-signal> <pid>", "Send a signal to a process", 0, KDB_REPEAT_NONE); kdb_register_repeat("summary", kdb_summary, "", "Summarize the system", 4, KDB_REPEAT_NONE); kdb_register_repeat("per_cpu", kdb_per_cpu, "<sym> [<bytes>] [<cpu>]", "Display per_cpu variables", 3, KDB_REPEAT_NONE); kdb_register_repeat("grephelp", kdb_grep_help, "", "Display help on | grep", 0, KDB_REPEAT_NONE); } /* Execute any commands defined in kdb_cmds. */ static void __init kdb_cmd_init(void) { int i, diag; for (i = 0; kdb_cmds[i]; ++i) { diag = kdb_parse(kdb_cmds[i]); if (diag) kdb_printf("kdb command %s failed, kdb diag %d\n", kdb_cmds[i], diag); } if (defcmd_in_progress) { kdb_printf("Incomplete 'defcmd' set, forcing endefcmd\n"); kdb_parse("endefcmd"); } } /* Initialize kdb_printf, breakpoint tables and kdb state */ void __init kdb_init(int lvl) { static int kdb_init_lvl = KDB_NOT_INITIALIZED; int i; if (kdb_init_lvl == KDB_INIT_FULL || lvl <= kdb_init_lvl) return; for (i = kdb_init_lvl; i < lvl; i++) { switch (i) { case KDB_NOT_INITIALIZED: kdb_inittab(); /* Initialize Command Table */ kdb_initbptab(); /* Initialize Breakpoints */ break; case KDB_INIT_EARLY: kdb_cmd_init(); /* Build kdb_cmds tables */ break; } } kdb_init_lvl = lvl; }
gpl-2.0
SmartisanTech/T1Kernel
arch/x86/xen/pci-swiotlb-xen.c
4820
1818
/* Glue code to lib/swiotlb-xen.c */ #include <linux/dma-mapping.h> #include <linux/pci.h> #include <xen/swiotlb-xen.h> #include <asm/xen/hypervisor.h> #include <xen/xen.h> #include <asm/iommu_table.h> int xen_swiotlb __read_mostly; static struct dma_map_ops xen_swiotlb_dma_ops = { .mapping_error = xen_swiotlb_dma_mapping_error, .alloc = xen_swiotlb_alloc_coherent, .free = xen_swiotlb_free_coherent, .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu, .sync_single_for_device = xen_swiotlb_sync_single_for_device, .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu, .sync_sg_for_device = xen_swiotlb_sync_sg_for_device, .map_sg = xen_swiotlb_map_sg_attrs, .unmap_sg = xen_swiotlb_unmap_sg_attrs, .map_page = xen_swiotlb_map_page, .unmap_page = xen_swiotlb_unmap_page, .dma_supported = xen_swiotlb_dma_supported, }; /* * pci_xen_swiotlb_detect - set xen_swiotlb to 1 if necessary * * This returns non-zero if we are forced to use xen_swiotlb (by the boot * option). */ int __init pci_xen_swiotlb_detect(void) { /* If running as PV guest, either iommu=soft, or swiotlb=force will * activate this IOMMU. If running as PV privileged, activate it * irregardless. */ if ((xen_initial_domain() || swiotlb || swiotlb_force) && (xen_pv_domain())) xen_swiotlb = 1; /* If we are running under Xen, we MUST disable the native SWIOTLB. * Don't worry about swiotlb_force flag activating the native, as * the 'swiotlb' flag is the only one turning it on. */ if (xen_pv_domain()) swiotlb = 0; return xen_swiotlb; } void __init pci_xen_swiotlb_init(void) { if (xen_swiotlb) { xen_swiotlb_init(1); dma_ops = &xen_swiotlb_dma_ops; /* Make sure ACS will be enabled */ pci_request_acs(); } } IOMMU_INIT_FINISH(pci_xen_swiotlb_detect, 0, pci_xen_swiotlb_init, 0);
gpl-2.0
ajfink/android_kernel_lge_e7lte
drivers/net/wan/sbni.c
7380
42919
/* sbni.c: Granch SBNI12 leased line adapters driver for linux * * Written 2001 by Denis I.Timofeev (timofeev@granch.ru) * * Previous versions were written by Yaroslav Polyakov, * Alexey Zverev and Max Khon. * * Driver supports SBNI12-02,-04,-05,-10,-11 cards, single and * double-channel, PCI and ISA modifications. * More info and useful utilities to work with SBNI12 cards you can find * at http://www.granch.com (English) or http://www.granch.ru (Russian) * * This software may be used and distributed according to the terms * of the GNU General Public License. * * * 5.0.1 Jun 22 2001 * - Fixed bug in probe * 5.0.0 Jun 06 2001 * - Driver was completely redesigned by Denis I.Timofeev, * - now PCI/Dual, ISA/Dual (with single interrupt line) models are * - supported * 3.3.0 Thu Feb 24 21:30:28 NOVT 2000 * - PCI cards support * 3.2.0 Mon Dec 13 22:26:53 NOVT 1999 * - Completely rebuilt all the packet storage system * - to work in Ethernet-like style. * 3.1.1 just fixed some bugs (5 aug 1999) * 3.1.0 added balancing feature (26 apr 1999) * 3.0.1 just fixed some bugs (14 apr 1999). * 3.0.0 Initial Revision, Yaroslav Polyakov (24 Feb 1999) * - added pre-calculation for CRC, fixed bug with "len-2" frames, * - removed outbound fragmentation (MTU=1000), written CRC-calculation * - on asm, added work with hard_headers and now we have our own cache * - for them, optionally supported word-interchange on some chipsets, * * Known problem: this driver wasn't tested on multiprocessor machine. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/ptrace.h> #include <linux/fcntl.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/pci.h> #include <linux/skbuff.h> #include <linux/timer.h> #include <linux/init.h> #include <linux/delay.h> #include <net/net_namespace.h> #include <net/arp.h> #include <asm/io.h> #include <asm/types.h> #include <asm/byteorder.h> #include <asm/irq.h> #include <asm/uaccess.h> #include "sbni.h" /* device private data */ struct net_local { struct timer_list watchdog; spinlock_t lock; struct sk_buff *rx_buf_p; /* receive buffer ptr */ struct sk_buff *tx_buf_p; /* transmit buffer ptr */ unsigned int framelen; /* current frame length */ unsigned int maxframe; /* maximum valid frame length */ unsigned int state; unsigned int inppos, outpos; /* positions in rx/tx buffers */ /* transmitting frame number - from frames qty to 1 */ unsigned int tx_frameno; /* expected number of next receiving frame */ unsigned int wait_frameno; /* count of failed attempts to frame send - 32 attempts do before error - while receiver tunes on opposite side of wire */ unsigned int trans_errors; /* idle time; send pong when limit exceeded */ unsigned int timer_ticks; /* fields used for receive level autoselection */ int delta_rxl; unsigned int cur_rxl_index, timeout_rxl; unsigned long cur_rxl_rcvd, prev_rxl_rcvd; struct sbni_csr1 csr1; /* current value of CSR1 */ struct sbni_in_stats in_stats; /* internal statistics */ struct net_device *second; /* for ISA/dual cards */ #ifdef CONFIG_SBNI_MULTILINE struct net_device *master; struct net_device *link; #endif }; static int sbni_card_probe( unsigned long ); static int sbni_pci_probe( struct net_device * ); static struct net_device *sbni_probe1(struct net_device *, unsigned long, int); static int sbni_open( struct net_device * ); static int sbni_close( struct net_device * ); static netdev_tx_t sbni_start_xmit(struct sk_buff *, struct net_device * ); static int sbni_ioctl( struct net_device *, struct ifreq *, int ); static void set_multicast_list( struct net_device * ); static irqreturn_t sbni_interrupt( int, void * ); static void handle_channel( struct net_device * ); static int recv_frame( struct net_device * ); static void send_frame( struct net_device * ); static int upload_data( struct net_device *, unsigned, unsigned, unsigned, u32 ); static void download_data( struct net_device *, u32 * ); static void sbni_watchdog( unsigned long ); static void interpret_ack( struct net_device *, unsigned ); static int append_frame_to_pkt( struct net_device *, unsigned, u32 ); static void indicate_pkt( struct net_device * ); static void card_start( struct net_device * ); static void prepare_to_send( struct sk_buff *, struct net_device * ); static void drop_xmit_queue( struct net_device * ); static void send_frame_header( struct net_device *, u32 * ); static int skip_tail( unsigned int, unsigned int, u32 ); static int check_fhdr( u32, u32 *, u32 *, u32 *, u32 *, u32 * ); static void change_level( struct net_device * ); static void timeout_change_level( struct net_device * ); static u32 calc_crc32( u32, u8 *, u32 ); static struct sk_buff * get_rx_buf( struct net_device * ); static int sbni_init( struct net_device * ); #ifdef CONFIG_SBNI_MULTILINE static int enslave( struct net_device *, struct net_device * ); static int emancipate( struct net_device * ); #endif #ifdef __i386__ #define ASM_CRC 1 #endif static const char version[] = "Granch SBNI12 driver ver 5.0.1 Jun 22 2001 Denis I.Timofeev.\n"; static bool skip_pci_probe __initdata = false; static int scandone __initdata = 0; static int num __initdata = 0; static unsigned char rxl_tab[]; static u32 crc32tab[]; /* A list of all installed devices, for removing the driver module. */ static struct net_device *sbni_cards[ SBNI_MAX_NUM_CARDS ]; /* Lists of device's parameters */ static u32 io[ SBNI_MAX_NUM_CARDS ] __initdata = { [0 ... SBNI_MAX_NUM_CARDS-1] = -1 }; static u32 irq[ SBNI_MAX_NUM_CARDS ] __initdata; static u32 baud[ SBNI_MAX_NUM_CARDS ] __initdata; static u32 rxl[ SBNI_MAX_NUM_CARDS ] __initdata = { [0 ... SBNI_MAX_NUM_CARDS-1] = -1 }; static u32 mac[ SBNI_MAX_NUM_CARDS ] __initdata; #ifndef MODULE typedef u32 iarr[]; static iarr __initdata *dest[5] = { &io, &irq, &baud, &rxl, &mac }; #endif /* A zero-terminated list of I/O addresses to be probed on ISA bus */ static unsigned int netcard_portlist[ ] __initdata = { 0x210, 0x214, 0x220, 0x224, 0x230, 0x234, 0x240, 0x244, 0x250, 0x254, 0x260, 0x264, 0x270, 0x274, 0x280, 0x284, 0x290, 0x294, 0x2a0, 0x2a4, 0x2b0, 0x2b4, 0x2c0, 0x2c4, 0x2d0, 0x2d4, 0x2e0, 0x2e4, 0x2f0, 0x2f4, 0 }; #define NET_LOCAL_LOCK(dev) (((struct net_local *)netdev_priv(dev))->lock) /* * Look for SBNI card which addr stored in dev->base_addr, if nonzero. * Otherwise, look through PCI bus. If none PCI-card was found, scan ISA. */ static inline int __init sbni_isa_probe( struct net_device *dev ) { if( dev->base_addr > 0x1ff && request_region( dev->base_addr, SBNI_IO_EXTENT, dev->name ) && sbni_probe1( dev, dev->base_addr, dev->irq ) ) return 0; else { pr_err("base address 0x%lx is busy, or adapter is malfunctional!\n", dev->base_addr); return -ENODEV; } } static const struct net_device_ops sbni_netdev_ops = { .ndo_open = sbni_open, .ndo_stop = sbni_close, .ndo_start_xmit = sbni_start_xmit, .ndo_set_rx_mode = set_multicast_list, .ndo_do_ioctl = sbni_ioctl, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static void __init sbni_devsetup(struct net_device *dev) { ether_setup( dev ); dev->netdev_ops = &sbni_netdev_ops; } int __init sbni_probe(int unit) { struct net_device *dev; int err; dev = alloc_netdev(sizeof(struct net_local), "sbni", sbni_devsetup); if (!dev) return -ENOMEM; dev->netdev_ops = &sbni_netdev_ops; sprintf(dev->name, "sbni%d", unit); netdev_boot_setup_check(dev); err = sbni_init(dev); if (err) { free_netdev(dev); return err; } err = register_netdev(dev); if (err) { release_region( dev->base_addr, SBNI_IO_EXTENT ); free_netdev(dev); return err; } pr_info_once("%s", version); return 0; } static int __init sbni_init(struct net_device *dev) { int i; if( dev->base_addr ) return sbni_isa_probe( dev ); /* otherwise we have to perform search our adapter */ if( io[ num ] != -1 ) dev->base_addr = io[ num ], dev->irq = irq[ num ]; else if( scandone || io[ 0 ] != -1 ) return -ENODEV; /* if io[ num ] contains non-zero address, then that is on ISA bus */ if( dev->base_addr ) return sbni_isa_probe( dev ); /* ...otherwise - scan PCI first */ if( !skip_pci_probe && !sbni_pci_probe( dev ) ) return 0; if( io[ num ] == -1 ) { /* Auto-scan will be stopped when first ISA card were found */ scandone = 1; if( num > 0 ) return -ENODEV; } for( i = 0; netcard_portlist[ i ]; ++i ) { int ioaddr = netcard_portlist[ i ]; if( request_region( ioaddr, SBNI_IO_EXTENT, dev->name ) && sbni_probe1( dev, ioaddr, 0 )) return 0; } return -ENODEV; } static int __init sbni_pci_probe( struct net_device *dev ) { struct pci_dev *pdev = NULL; while( (pdev = pci_get_class( PCI_CLASS_NETWORK_OTHER << 8, pdev )) != NULL ) { int pci_irq_line; unsigned long pci_ioaddr; if( pdev->vendor != SBNI_PCI_VENDOR && pdev->device != SBNI_PCI_DEVICE ) continue; pci_ioaddr = pci_resource_start( pdev, 0 ); pci_irq_line = pdev->irq; /* Avoid already found cards from previous calls */ if( !request_region( pci_ioaddr, SBNI_IO_EXTENT, dev->name ) ) { if (pdev->subsystem_device != 2) continue; /* Dual adapter is present */ if (!request_region(pci_ioaddr += 4, SBNI_IO_EXTENT, dev->name ) ) continue; } if (pci_irq_line <= 0 || pci_irq_line >= nr_irqs) pr_warn( "WARNING: The PCI BIOS assigned this PCI card to IRQ %d, which is unlikely to work!.\n" "You should use the PCI BIOS setup to assign a valid IRQ line.\n", pci_irq_line ); /* avoiding re-enable dual adapters */ if( (pci_ioaddr & 7) == 0 && pci_enable_device( pdev ) ) { release_region( pci_ioaddr, SBNI_IO_EXTENT ); pci_dev_put( pdev ); return -EIO; } if( sbni_probe1( dev, pci_ioaddr, pci_irq_line ) ) { SET_NETDEV_DEV(dev, &pdev->dev); /* not the best thing to do, but this is all messed up for hotplug systems anyway... */ pci_dev_put( pdev ); return 0; } } return -ENODEV; } static struct net_device * __init sbni_probe1( struct net_device *dev, unsigned long ioaddr, int irq ) { struct net_local *nl; if( sbni_card_probe( ioaddr ) ) { release_region( ioaddr, SBNI_IO_EXTENT ); return NULL; } outb( 0, ioaddr + CSR0 ); if( irq < 2 ) { unsigned long irq_mask; irq_mask = probe_irq_on(); outb( EN_INT | TR_REQ, ioaddr + CSR0 ); outb( PR_RES, ioaddr + CSR1 ); mdelay(50); irq = probe_irq_off(irq_mask); outb( 0, ioaddr + CSR0 ); if( !irq ) { pr_err("%s: can't detect device irq!\n", dev->name); release_region( ioaddr, SBNI_IO_EXTENT ); return NULL; } } else if( irq == 2 ) irq = 9; dev->irq = irq; dev->base_addr = ioaddr; /* Fill in sbni-specific dev fields. */ nl = netdev_priv(dev); if( !nl ) { pr_err("%s: unable to get memory!\n", dev->name); release_region( ioaddr, SBNI_IO_EXTENT ); return NULL; } memset( nl, 0, sizeof(struct net_local) ); spin_lock_init( &nl->lock ); /* store MAC address (generate if that isn't known) */ *(__be16 *)dev->dev_addr = htons( 0x00ff ); *(__be32 *)(dev->dev_addr + 2) = htonl( 0x01000000 | ((mac[num] ? mac[num] : (u32)((long)netdev_priv(dev))) & 0x00ffffff)); /* store link settings (speed, receive level ) */ nl->maxframe = DEFAULT_FRAME_LEN; nl->csr1.rate = baud[ num ]; if( (nl->cur_rxl_index = rxl[ num ]) == -1 ) /* autotune rxl */ nl->cur_rxl_index = DEF_RXL, nl->delta_rxl = DEF_RXL_DELTA; else nl->delta_rxl = 0; nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ]; if( inb( ioaddr + CSR0 ) & 0x01 ) nl->state |= FL_SLOW_MODE; pr_notice("%s: ioaddr %#lx, irq %d, MAC: 00:ff:01:%02x:%02x:%02x\n", dev->name, dev->base_addr, dev->irq, ((u8 *)dev->dev_addr)[3], ((u8 *)dev->dev_addr)[4], ((u8 *)dev->dev_addr)[5]); pr_notice("%s: speed %d", dev->name, ((nl->state & FL_SLOW_MODE) ? 500000 : 2000000) / (1 << nl->csr1.rate)); if( nl->delta_rxl == 0 ) pr_cont(", receive level 0x%x (fixed)\n", nl->cur_rxl_index); else pr_cont(", receive level (auto)\n"); #ifdef CONFIG_SBNI_MULTILINE nl->master = dev; nl->link = NULL; #endif sbni_cards[ num++ ] = dev; return dev; } /* -------------------------------------------------------------------------- */ #ifdef CONFIG_SBNI_MULTILINE static netdev_tx_t sbni_start_xmit( struct sk_buff *skb, struct net_device *dev ) { struct net_device *p; netif_stop_queue( dev ); /* Looking for idle device in the list */ for( p = dev; p; ) { struct net_local *nl = netdev_priv(p); spin_lock( &nl->lock ); if( nl->tx_buf_p || (nl->state & FL_LINE_DOWN) ) { p = nl->link; spin_unlock( &nl->lock ); } else { /* Idle dev is found */ prepare_to_send( skb, p ); spin_unlock( &nl->lock ); netif_start_queue( dev ); return NETDEV_TX_OK; } } return NETDEV_TX_BUSY; } #else /* CONFIG_SBNI_MULTILINE */ static netdev_tx_t sbni_start_xmit( struct sk_buff *skb, struct net_device *dev ) { struct net_local *nl = netdev_priv(dev); netif_stop_queue( dev ); spin_lock( &nl->lock ); prepare_to_send( skb, dev ); spin_unlock( &nl->lock ); return NETDEV_TX_OK; } #endif /* CONFIG_SBNI_MULTILINE */ /* -------------------------------------------------------------------------- */ /* interrupt handler */ /* * SBNI12D-10, -11/ISA boards within "common interrupt" mode could not * be looked as two independent single-channel devices. Every channel seems * as Ethernet interface but interrupt handler must be common. Really, first * channel ("master") driver only registers the handler. In its struct net_local * it has got pointer to "slave" channel's struct net_local and handles that's * interrupts too. * dev of successfully attached ISA SBNI boards is linked to list. * While next board driver is initialized, it scans this list. If one * has found dev with same irq and ioaddr different by 4 then it assumes * this board to be "master". */ static irqreturn_t sbni_interrupt( int irq, void *dev_id ) { struct net_device *dev = dev_id; struct net_local *nl = netdev_priv(dev); int repeat; spin_lock( &nl->lock ); if( nl->second ) spin_lock(&NET_LOCAL_LOCK(nl->second)); do { repeat = 0; if( inb( dev->base_addr + CSR0 ) & (RC_RDY | TR_RDY) ) handle_channel( dev ), repeat = 1; if( nl->second && /* second channel present */ (inb( nl->second->base_addr+CSR0 ) & (RC_RDY | TR_RDY)) ) handle_channel( nl->second ), repeat = 1; } while( repeat ); if( nl->second ) spin_unlock(&NET_LOCAL_LOCK(nl->second)); spin_unlock( &nl->lock ); return IRQ_HANDLED; } static void handle_channel( struct net_device *dev ) { struct net_local *nl = netdev_priv(dev); unsigned long ioaddr = dev->base_addr; int req_ans; unsigned char csr0; #ifdef CONFIG_SBNI_MULTILINE /* Lock the master device because we going to change its local data */ if( nl->state & FL_SLAVE ) spin_lock(&NET_LOCAL_LOCK(nl->master)); #endif outb( (inb( ioaddr + CSR0 ) & ~EN_INT) | TR_REQ, ioaddr + CSR0 ); nl->timer_ticks = CHANGE_LEVEL_START_TICKS; for(;;) { csr0 = inb( ioaddr + CSR0 ); if( ( csr0 & (RC_RDY | TR_RDY) ) == 0 ) break; req_ans = !(nl->state & FL_PREV_OK); if( csr0 & RC_RDY ) req_ans = recv_frame( dev ); /* * TR_RDY always equals 1 here because we have owned the marker, * and we set TR_REQ when disabled interrupts */ csr0 = inb( ioaddr + CSR0 ); if( !(csr0 & TR_RDY) || (csr0 & RC_RDY) ) netdev_err(dev, "internal error!\n"); /* if state & FL_NEED_RESEND != 0 then tx_frameno != 0 */ if( req_ans || nl->tx_frameno != 0 ) send_frame( dev ); else /* send marker without any data */ outb( inb( ioaddr + CSR0 ) & ~TR_REQ, ioaddr + CSR0 ); } outb( inb( ioaddr + CSR0 ) | EN_INT, ioaddr + CSR0 ); #ifdef CONFIG_SBNI_MULTILINE if( nl->state & FL_SLAVE ) spin_unlock(&NET_LOCAL_LOCK(nl->master)); #endif } /* * Routine returns 1 if it need to acknoweledge received frame. * Empty frame received without errors won't be acknoweledged. */ static int recv_frame( struct net_device *dev ) { struct net_local *nl = netdev_priv(dev); unsigned long ioaddr = dev->base_addr; u32 crc = CRC32_INITIAL; unsigned framelen = 0, frameno, ack; unsigned is_first, frame_ok = 0; if( check_fhdr( ioaddr, &framelen, &frameno, &ack, &is_first, &crc ) ) { frame_ok = framelen > 4 ? upload_data( dev, framelen, frameno, is_first, crc ) : skip_tail( ioaddr, framelen, crc ); if( frame_ok ) interpret_ack( dev, ack ); } outb( inb( ioaddr + CSR0 ) ^ CT_ZER, ioaddr + CSR0 ); if( frame_ok ) { nl->state |= FL_PREV_OK; if( framelen > 4 ) nl->in_stats.all_rx_number++; } else nl->state &= ~FL_PREV_OK, change_level( dev ), nl->in_stats.all_rx_number++, nl->in_stats.bad_rx_number++; return !frame_ok || framelen > 4; } static void send_frame( struct net_device *dev ) { struct net_local *nl = netdev_priv(dev); u32 crc = CRC32_INITIAL; if( nl->state & FL_NEED_RESEND ) { /* if frame was sended but not ACK'ed - resend it */ if( nl->trans_errors ) { --nl->trans_errors; if( nl->framelen != 0 ) nl->in_stats.resend_tx_number++; } else { /* cannot xmit with many attempts */ #ifdef CONFIG_SBNI_MULTILINE if( (nl->state & FL_SLAVE) || nl->link ) #endif nl->state |= FL_LINE_DOWN; drop_xmit_queue( dev ); goto do_send; } } else nl->trans_errors = TR_ERROR_COUNT; send_frame_header( dev, &crc ); nl->state |= FL_NEED_RESEND; /* * FL_NEED_RESEND will be cleared after ACK, but if empty * frame sended then in prepare_to_send next frame */ if( nl->framelen ) { download_data( dev, &crc ); nl->in_stats.all_tx_number++; nl->state |= FL_WAIT_ACK; } outsb( dev->base_addr + DAT, (u8 *)&crc, sizeof crc ); do_send: outb( inb( dev->base_addr + CSR0 ) & ~TR_REQ, dev->base_addr + CSR0 ); if( nl->tx_frameno ) /* next frame exists - we request card to send it */ outb( inb( dev->base_addr + CSR0 ) | TR_REQ, dev->base_addr + CSR0 ); } /* * Write the frame data into adapter's buffer memory, and calculate CRC. * Do padding if necessary. */ static void download_data( struct net_device *dev, u32 *crc_p ) { struct net_local *nl = netdev_priv(dev); struct sk_buff *skb = nl->tx_buf_p; unsigned len = min_t(unsigned int, skb->len - nl->outpos, nl->framelen); outsb( dev->base_addr + DAT, skb->data + nl->outpos, len ); *crc_p = calc_crc32( *crc_p, skb->data + nl->outpos, len ); /* if packet too short we should write some more bytes to pad */ for( len = nl->framelen - len; len--; ) outb( 0, dev->base_addr + DAT ), *crc_p = CRC32( 0, *crc_p ); } static int upload_data( struct net_device *dev, unsigned framelen, unsigned frameno, unsigned is_first, u32 crc ) { struct net_local *nl = netdev_priv(dev); int frame_ok; if( is_first ) nl->wait_frameno = frameno, nl->inppos = 0; if( nl->wait_frameno == frameno ) { if( nl->inppos + framelen <= ETHER_MAX_LEN ) frame_ok = append_frame_to_pkt( dev, framelen, crc ); /* * if CRC is right but framelen incorrect then transmitter * error was occurred... drop entire packet */ else if( (frame_ok = skip_tail( dev->base_addr, framelen, crc )) != 0 ) nl->wait_frameno = 0, nl->inppos = 0, #ifdef CONFIG_SBNI_MULTILINE nl->master->stats.rx_errors++, nl->master->stats.rx_missed_errors++; #else dev->stats.rx_errors++, dev->stats.rx_missed_errors++; #endif /* now skip all frames until is_first != 0 */ } else frame_ok = skip_tail( dev->base_addr, framelen, crc ); if( is_first && !frame_ok ) /* * Frame has been broken, but we had already stored * is_first... Drop entire packet. */ nl->wait_frameno = 0, #ifdef CONFIG_SBNI_MULTILINE nl->master->stats.rx_errors++, nl->master->stats.rx_crc_errors++; #else dev->stats.rx_errors++, dev->stats.rx_crc_errors++; #endif return frame_ok; } static inline void send_complete( struct net_device *dev ) { struct net_local *nl = netdev_priv(dev); #ifdef CONFIG_SBNI_MULTILINE nl->master->stats.tx_packets++; nl->master->stats.tx_bytes += nl->tx_buf_p->len; #else dev->stats.tx_packets++; dev->stats.tx_bytes += nl->tx_buf_p->len; #endif dev_kfree_skb_irq( nl->tx_buf_p ); nl->tx_buf_p = NULL; nl->outpos = 0; nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND); nl->framelen = 0; } static void interpret_ack( struct net_device *dev, unsigned ack ) { struct net_local *nl = netdev_priv(dev); if( ack == FRAME_SENT_OK ) { nl->state &= ~FL_NEED_RESEND; if( nl->state & FL_WAIT_ACK ) { nl->outpos += nl->framelen; if( --nl->tx_frameno ) nl->framelen = min_t(unsigned int, nl->maxframe, nl->tx_buf_p->len - nl->outpos); else send_complete( dev ), #ifdef CONFIG_SBNI_MULTILINE netif_wake_queue( nl->master ); #else netif_wake_queue( dev ); #endif } } nl->state &= ~FL_WAIT_ACK; } /* * Glue received frame with previous fragments of packet. * Indicate packet when last frame would be accepted. */ static int append_frame_to_pkt( struct net_device *dev, unsigned framelen, u32 crc ) { struct net_local *nl = netdev_priv(dev); u8 *p; if( nl->inppos + framelen > ETHER_MAX_LEN ) return 0; if( !nl->rx_buf_p && !(nl->rx_buf_p = get_rx_buf( dev )) ) return 0; p = nl->rx_buf_p->data + nl->inppos; insb( dev->base_addr + DAT, p, framelen ); if( calc_crc32( crc, p, framelen ) != CRC32_REMAINDER ) return 0; nl->inppos += framelen - 4; if( --nl->wait_frameno == 0 ) /* last frame received */ indicate_pkt( dev ); return 1; } /* * Prepare to start output on adapter. * Transmitter will be actually activated when marker is accepted. */ static void prepare_to_send( struct sk_buff *skb, struct net_device *dev ) { struct net_local *nl = netdev_priv(dev); unsigned int len; /* nl->tx_buf_p == NULL here! */ if( nl->tx_buf_p ) netdev_err(dev, "memory leak!\n"); nl->outpos = 0; nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND); len = skb->len; if( len < SBNI_MIN_LEN ) len = SBNI_MIN_LEN; nl->tx_buf_p = skb; nl->tx_frameno = DIV_ROUND_UP(len, nl->maxframe); nl->framelen = len < nl->maxframe ? len : nl->maxframe; outb( inb( dev->base_addr + CSR0 ) | TR_REQ, dev->base_addr + CSR0 ); #ifdef CONFIG_SBNI_MULTILINE nl->master->trans_start = jiffies; #else dev->trans_start = jiffies; #endif } static void drop_xmit_queue( struct net_device *dev ) { struct net_local *nl = netdev_priv(dev); if( nl->tx_buf_p ) dev_kfree_skb_any( nl->tx_buf_p ), nl->tx_buf_p = NULL, #ifdef CONFIG_SBNI_MULTILINE nl->master->stats.tx_errors++, nl->master->stats.tx_carrier_errors++; #else dev->stats.tx_errors++, dev->stats.tx_carrier_errors++; #endif nl->tx_frameno = 0; nl->framelen = 0; nl->outpos = 0; nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND); #ifdef CONFIG_SBNI_MULTILINE netif_start_queue( nl->master ); nl->master->trans_start = jiffies; #else netif_start_queue( dev ); dev->trans_start = jiffies; #endif } static void send_frame_header( struct net_device *dev, u32 *crc_p ) { struct net_local *nl = netdev_priv(dev); u32 crc = *crc_p; u32 len_field = nl->framelen + 6; /* CRC + frameno + reserved */ u8 value; if( nl->state & FL_NEED_RESEND ) len_field |= FRAME_RETRY; /* non-first attempt... */ if( nl->outpos == 0 ) len_field |= FRAME_FIRST; len_field |= (nl->state & FL_PREV_OK) ? FRAME_SENT_OK : FRAME_SENT_BAD; outb( SBNI_SIG, dev->base_addr + DAT ); value = (u8) len_field; outb( value, dev->base_addr + DAT ); crc = CRC32( value, crc ); value = (u8) (len_field >> 8); outb( value, dev->base_addr + DAT ); crc = CRC32( value, crc ); outb( nl->tx_frameno, dev->base_addr + DAT ); crc = CRC32( nl->tx_frameno, crc ); outb( 0, dev->base_addr + DAT ); crc = CRC32( 0, crc ); *crc_p = crc; } /* * if frame tail not needed (incorrect number or received twice), * it won't store, but CRC will be calculated */ static int skip_tail( unsigned int ioaddr, unsigned int tail_len, u32 crc ) { while( tail_len-- ) crc = CRC32( inb( ioaddr + DAT ), crc ); return crc == CRC32_REMAINDER; } /* * Preliminary checks if frame header is correct, calculates its CRC * and split it to simple fields */ static int check_fhdr( u32 ioaddr, u32 *framelen, u32 *frameno, u32 *ack, u32 *is_first, u32 *crc_p ) { u32 crc = *crc_p; u8 value; if( inb( ioaddr + DAT ) != SBNI_SIG ) return 0; value = inb( ioaddr + DAT ); *framelen = (u32)value; crc = CRC32( value, crc ); value = inb( ioaddr + DAT ); *framelen |= ((u32)value) << 8; crc = CRC32( value, crc ); *ack = *framelen & FRAME_ACK_MASK; *is_first = (*framelen & FRAME_FIRST) != 0; if( (*framelen &= FRAME_LEN_MASK) < 6 || *framelen > SBNI_MAX_FRAME - 3 ) return 0; value = inb( ioaddr + DAT ); *frameno = (u32)value; crc = CRC32( value, crc ); crc = CRC32( inb( ioaddr + DAT ), crc ); /* reserved byte */ *framelen -= 2; *crc_p = crc; return 1; } static struct sk_buff * get_rx_buf( struct net_device *dev ) { /* +2 is to compensate for the alignment fixup below */ struct sk_buff *skb = dev_alloc_skb( ETHER_MAX_LEN + 2 ); if( !skb ) return NULL; skb_reserve( skb, 2 ); /* Align IP on longword boundaries */ return skb; } static void indicate_pkt( struct net_device *dev ) { struct net_local *nl = netdev_priv(dev); struct sk_buff *skb = nl->rx_buf_p; skb_put( skb, nl->inppos ); #ifdef CONFIG_SBNI_MULTILINE skb->protocol = eth_type_trans( skb, nl->master ); netif_rx( skb ); ++nl->master->stats.rx_packets; nl->master->stats.rx_bytes += nl->inppos; #else skb->protocol = eth_type_trans( skb, dev ); netif_rx( skb ); ++dev->stats.rx_packets; dev->stats.rx_bytes += nl->inppos; #endif nl->rx_buf_p = NULL; /* protocol driver will clear this sk_buff */ } /* -------------------------------------------------------------------------- */ /* * Routine checks periodically wire activity and regenerates marker if * connect was inactive for a long time. */ static void sbni_watchdog( unsigned long arg ) { struct net_device *dev = (struct net_device *) arg; struct net_local *nl = netdev_priv(dev); struct timer_list *w = &nl->watchdog; unsigned long flags; unsigned char csr0; spin_lock_irqsave( &nl->lock, flags ); csr0 = inb( dev->base_addr + CSR0 ); if( csr0 & RC_CHK ) { if( nl->timer_ticks ) { if( csr0 & (RC_RDY | BU_EMP) ) /* receiving not active */ nl->timer_ticks--; } else { nl->in_stats.timeout_number++; if( nl->delta_rxl ) timeout_change_level( dev ); outb( *(u_char *)&nl->csr1 | PR_RES, dev->base_addr + CSR1 ); csr0 = inb( dev->base_addr + CSR0 ); } } else nl->state &= ~FL_LINE_DOWN; outb( csr0 | RC_CHK, dev->base_addr + CSR0 ); init_timer( w ); w->expires = jiffies + SBNI_TIMEOUT; w->data = arg; w->function = sbni_watchdog; add_timer( w ); spin_unlock_irqrestore( &nl->lock, flags ); } static unsigned char rxl_tab[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x08, 0x0a, 0x0c, 0x0f, 0x16, 0x18, 0x1a, 0x1c, 0x1f }; #define SIZE_OF_TIMEOUT_RXL_TAB 4 static unsigned char timeout_rxl_tab[] = { 0x03, 0x05, 0x08, 0x0b }; /* -------------------------------------------------------------------------- */ static void card_start( struct net_device *dev ) { struct net_local *nl = netdev_priv(dev); nl->timer_ticks = CHANGE_LEVEL_START_TICKS; nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND); nl->state |= FL_PREV_OK; nl->inppos = nl->outpos = 0; nl->wait_frameno = 0; nl->tx_frameno = 0; nl->framelen = 0; outb( *(u_char *)&nl->csr1 | PR_RES, dev->base_addr + CSR1 ); outb( EN_INT, dev->base_addr + CSR0 ); } /* -------------------------------------------------------------------------- */ /* Receive level auto-selection */ static void change_level( struct net_device *dev ) { struct net_local *nl = netdev_priv(dev); if( nl->delta_rxl == 0 ) /* do not auto-negotiate RxL */ return; if( nl->cur_rxl_index == 0 ) nl->delta_rxl = 1; else if( nl->cur_rxl_index == 15 ) nl->delta_rxl = -1; else if( nl->cur_rxl_rcvd < nl->prev_rxl_rcvd ) nl->delta_rxl = -nl->delta_rxl; nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index += nl->delta_rxl ]; inb( dev->base_addr + CSR0 ); /* needs for PCI cards */ outb( *(u8 *)&nl->csr1, dev->base_addr + CSR1 ); nl->prev_rxl_rcvd = nl->cur_rxl_rcvd; nl->cur_rxl_rcvd = 0; } static void timeout_change_level( struct net_device *dev ) { struct net_local *nl = netdev_priv(dev); nl->cur_rxl_index = timeout_rxl_tab[ nl->timeout_rxl ]; if( ++nl->timeout_rxl >= 4 ) nl->timeout_rxl = 0; nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ]; inb( dev->base_addr + CSR0 ); outb( *(unsigned char *)&nl->csr1, dev->base_addr + CSR1 ); nl->prev_rxl_rcvd = nl->cur_rxl_rcvd; nl->cur_rxl_rcvd = 0; } /* -------------------------------------------------------------------------- */ /* * Open/initialize the board. */ static int sbni_open( struct net_device *dev ) { struct net_local *nl = netdev_priv(dev); struct timer_list *w = &nl->watchdog; /* * For double ISA adapters within "common irq" mode, we have to * determine whether primary or secondary channel is initialized, * and set the irq handler only in first case. */ if( dev->base_addr < 0x400 ) { /* ISA only */ struct net_device **p = sbni_cards; for( ; *p && p < sbni_cards + SBNI_MAX_NUM_CARDS; ++p ) if( (*p)->irq == dev->irq && ((*p)->base_addr == dev->base_addr + 4 || (*p)->base_addr == dev->base_addr - 4) && (*p)->flags & IFF_UP ) { ((struct net_local *) (netdev_priv(*p))) ->second = dev; netdev_notice(dev, "using shared irq with %s\n", (*p)->name); nl->state |= FL_SECONDARY; goto handler_attached; } } if( request_irq(dev->irq, sbni_interrupt, IRQF_SHARED, dev->name, dev) ) { netdev_err(dev, "unable to get IRQ %d\n", dev->irq); return -EAGAIN; } handler_attached: spin_lock( &nl->lock ); memset( &dev->stats, 0, sizeof(struct net_device_stats) ); memset( &nl->in_stats, 0, sizeof(struct sbni_in_stats) ); card_start( dev ); netif_start_queue( dev ); /* set timer watchdog */ init_timer( w ); w->expires = jiffies + SBNI_TIMEOUT; w->data = (unsigned long) dev; w->function = sbni_watchdog; add_timer( w ); spin_unlock( &nl->lock ); return 0; } static int sbni_close( struct net_device *dev ) { struct net_local *nl = netdev_priv(dev); if( nl->second && nl->second->flags & IFF_UP ) { netdev_notice(dev, "Secondary channel (%s) is active!\n", nl->second->name); return -EBUSY; } #ifdef CONFIG_SBNI_MULTILINE if( nl->state & FL_SLAVE ) emancipate( dev ); else while( nl->link ) /* it's master device! */ emancipate( nl->link ); #endif spin_lock( &nl->lock ); nl->second = NULL; drop_xmit_queue( dev ); netif_stop_queue( dev ); del_timer( &nl->watchdog ); outb( 0, dev->base_addr + CSR0 ); if( !(nl->state & FL_SECONDARY) ) free_irq( dev->irq, dev ); nl->state &= FL_SECONDARY; spin_unlock( &nl->lock ); return 0; } /* Valid combinations in CSR0 (for probing): VALID_DECODER 0000,0011,1011,1010 ; 0 ; - TR_REQ ; 1 ; + TR_RDY ; 2 ; - TR_RDY TR_REQ ; 3 ; + BU_EMP ; 4 ; + BU_EMP TR_REQ ; 5 ; + BU_EMP TR_RDY ; 6 ; - BU_EMP TR_RDY TR_REQ ; 7 ; + RC_RDY ; 8 ; + RC_RDY TR_REQ ; 9 ; + RC_RDY TR_RDY ; 10 ; - RC_RDY TR_RDY TR_REQ ; 11 ; - RC_RDY BU_EMP ; 12 ; - RC_RDY BU_EMP TR_REQ ; 13 ; - RC_RDY BU_EMP TR_RDY ; 14 ; - RC_RDY BU_EMP TR_RDY TR_REQ ; 15 ; - */ #define VALID_DECODER (2 + 8 + 0x10 + 0x20 + 0x80 + 0x100 + 0x200) static int sbni_card_probe( unsigned long ioaddr ) { unsigned char csr0; csr0 = inb( ioaddr + CSR0 ); if( csr0 != 0xff && csr0 != 0x00 ) { csr0 &= ~EN_INT; if( csr0 & BU_EMP ) csr0 |= EN_INT; if( VALID_DECODER & (1 << (csr0 >> 4)) ) return 0; } return -ENODEV; } /* -------------------------------------------------------------------------- */ static int sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd ) { struct net_local *nl = netdev_priv(dev); struct sbni_flags flags; int error = 0; #ifdef CONFIG_SBNI_MULTILINE struct net_device *slave_dev; char slave_name[ 8 ]; #endif switch( cmd ) { case SIOCDEVGETINSTATS : if (copy_to_user( ifr->ifr_data, &nl->in_stats, sizeof(struct sbni_in_stats) )) error = -EFAULT; break; case SIOCDEVRESINSTATS : if (!capable(CAP_NET_ADMIN)) return -EPERM; memset( &nl->in_stats, 0, sizeof(struct sbni_in_stats) ); break; case SIOCDEVGHWSTATE : flags.mac_addr = *(u32 *)(dev->dev_addr + 3); flags.rate = nl->csr1.rate; flags.slow_mode = (nl->state & FL_SLOW_MODE) != 0; flags.rxl = nl->cur_rxl_index; flags.fixed_rxl = nl->delta_rxl == 0; if (copy_to_user( ifr->ifr_data, &flags, sizeof flags )) error = -EFAULT; break; case SIOCDEVSHWSTATE : if (!capable(CAP_NET_ADMIN)) return -EPERM; spin_lock( &nl->lock ); flags = *(struct sbni_flags*) &ifr->ifr_ifru; if( flags.fixed_rxl ) nl->delta_rxl = 0, nl->cur_rxl_index = flags.rxl; else nl->delta_rxl = DEF_RXL_DELTA, nl->cur_rxl_index = DEF_RXL; nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ]; nl->csr1.rate = flags.rate; outb( *(u8 *)&nl->csr1 | PR_RES, dev->base_addr + CSR1 ); spin_unlock( &nl->lock ); break; #ifdef CONFIG_SBNI_MULTILINE case SIOCDEVENSLAVE : if (!capable(CAP_NET_ADMIN)) return -EPERM; if (copy_from_user( slave_name, ifr->ifr_data, sizeof slave_name )) return -EFAULT; slave_dev = dev_get_by_name(&init_net, slave_name ); if( !slave_dev || !(slave_dev->flags & IFF_UP) ) { netdev_err(dev, "trying to enslave non-active device %s\n", slave_name); return -EPERM; } return enslave( dev, slave_dev ); case SIOCDEVEMANSIPATE : if (!capable(CAP_NET_ADMIN)) return -EPERM; return emancipate( dev ); #endif /* CONFIG_SBNI_MULTILINE */ default : return -EOPNOTSUPP; } return error; } #ifdef CONFIG_SBNI_MULTILINE static int enslave( struct net_device *dev, struct net_device *slave_dev ) { struct net_local *nl = netdev_priv(dev); struct net_local *snl = netdev_priv(slave_dev); if( nl->state & FL_SLAVE ) /* This isn't master or free device */ return -EBUSY; if( snl->state & FL_SLAVE ) /* That was already enslaved */ return -EBUSY; spin_lock( &nl->lock ); spin_lock( &snl->lock ); /* append to list */ snl->link = nl->link; nl->link = slave_dev; snl->master = dev; snl->state |= FL_SLAVE; /* Summary statistics of MultiLine operation will be stored in master's counters */ memset( &slave_dev->stats, 0, sizeof(struct net_device_stats) ); netif_stop_queue( slave_dev ); netif_wake_queue( dev ); /* Now we are able to transmit */ spin_unlock( &snl->lock ); spin_unlock( &nl->lock ); netdev_notice(dev, "slave device (%s) attached\n", slave_dev->name); return 0; } static int emancipate( struct net_device *dev ) { struct net_local *snl = netdev_priv(dev); struct net_device *p = snl->master; struct net_local *nl = netdev_priv(p); if( !(snl->state & FL_SLAVE) ) return -EINVAL; spin_lock( &nl->lock ); spin_lock( &snl->lock ); drop_xmit_queue( dev ); /* exclude from list */ for(;;) { /* must be in list */ struct net_local *t = netdev_priv(p); if( t->link == dev ) { t->link = snl->link; break; } p = t->link; } snl->link = NULL; snl->master = dev; snl->state &= ~FL_SLAVE; netif_start_queue( dev ); spin_unlock( &snl->lock ); spin_unlock( &nl->lock ); dev_put( dev ); return 0; } #endif static void set_multicast_list( struct net_device *dev ) { return; /* sbni always operate in promiscuos mode */ } #ifdef MODULE module_param_array(io, int, NULL, 0); module_param_array(irq, int, NULL, 0); module_param_array(baud, int, NULL, 0); module_param_array(rxl, int, NULL, 0); module_param_array(mac, int, NULL, 0); module_param(skip_pci_probe, bool, 0); MODULE_LICENSE("GPL"); int __init init_module( void ) { struct net_device *dev; int err; while( num < SBNI_MAX_NUM_CARDS ) { dev = alloc_netdev(sizeof(struct net_local), "sbni%d", sbni_devsetup); if( !dev) break; sprintf( dev->name, "sbni%d", num ); err = sbni_init(dev); if (err) { free_netdev(dev); break; } if( register_netdev( dev ) ) { release_region( dev->base_addr, SBNI_IO_EXTENT ); free_netdev( dev ); break; } } return *sbni_cards ? 0 : -ENODEV; } void cleanup_module(void) { int i; for (i = 0; i < SBNI_MAX_NUM_CARDS; ++i) { struct net_device *dev = sbni_cards[i]; if (dev != NULL) { unregister_netdev(dev); release_region(dev->base_addr, SBNI_IO_EXTENT); free_netdev(dev); } } } #else /* MODULE */ static int __init sbni_setup( char *p ) { int n, parm; if( *p++ != '(' ) goto bad_param; for( n = 0, parm = 0; *p && n < 8; ) { (*dest[ parm ])[ n ] = simple_strtol( p, &p, 0 ); if( !*p || *p == ')' ) return 1; if( *p == ';' ) ++p, ++n, parm = 0; else if( *p++ != ',' ) break; else if( ++parm >= 5 ) break; } bad_param: pr_err("Error in sbni kernel parameter!\n"); return 0; } __setup( "sbni=", sbni_setup ); #endif /* MODULE */ /* -------------------------------------------------------------------------- */ #ifdef ASM_CRC static u32 calc_crc32( u32 crc, u8 *p, u32 len ) { register u32 _crc; _crc = crc; __asm__ __volatile__ ( "xorl %%ebx, %%ebx\n" "movl %2, %%esi\n" "movl %3, %%ecx\n" "movl $crc32tab, %%edi\n" "shrl $2, %%ecx\n" "jz 1f\n" ".align 4\n" "0:\n" "movb %%al, %%bl\n" "movl (%%esi), %%edx\n" "shrl $8, %%eax\n" "xorb %%dl, %%bl\n" "shrl $8, %%edx\n" "xorl (%%edi,%%ebx,4), %%eax\n" "movb %%al, %%bl\n" "shrl $8, %%eax\n" "xorb %%dl, %%bl\n" "shrl $8, %%edx\n" "xorl (%%edi,%%ebx,4), %%eax\n" "movb %%al, %%bl\n" "shrl $8, %%eax\n" "xorb %%dl, %%bl\n" "movb %%dh, %%dl\n" "xorl (%%edi,%%ebx,4), %%eax\n" "movb %%al, %%bl\n" "shrl $8, %%eax\n" "xorb %%dl, %%bl\n" "addl $4, %%esi\n" "xorl (%%edi,%%ebx,4), %%eax\n" "decl %%ecx\n" "jnz 0b\n" "1:\n" "movl %3, %%ecx\n" "andl $3, %%ecx\n" "jz 2f\n" "movb %%al, %%bl\n" "shrl $8, %%eax\n" "xorb (%%esi), %%bl\n" "xorl (%%edi,%%ebx,4), %%eax\n" "decl %%ecx\n" "jz 2f\n" "movb %%al, %%bl\n" "shrl $8, %%eax\n" "xorb 1(%%esi), %%bl\n" "xorl (%%edi,%%ebx,4), %%eax\n" "decl %%ecx\n" "jz 2f\n" "movb %%al, %%bl\n" "shrl $8, %%eax\n" "xorb 2(%%esi), %%bl\n" "xorl (%%edi,%%ebx,4), %%eax\n" "2:\n" : "=a" (_crc) : "0" (_crc), "g" (p), "g" (len) : "bx", "cx", "dx", "si", "di" ); return _crc; } #else /* ASM_CRC */ static u32 calc_crc32( u32 crc, u8 *p, u32 len ) { while( len-- ) crc = CRC32( *p++, crc ); return crc; } #endif /* ASM_CRC */ static u32 crc32tab[] __attribute__ ((aligned(8))) = { 0xD202EF8D, 0xA505DF1B, 0x3C0C8EA1, 0x4B0BBE37, 0xD56F2B94, 0xA2681B02, 0x3B614AB8, 0x4C667A2E, 0xDCD967BF, 0xABDE5729, 0x32D70693, 0x45D03605, 0xDBB4A3A6, 0xACB39330, 0x35BAC28A, 0x42BDF21C, 0xCFB5FFE9, 0xB8B2CF7F, 0x21BB9EC5, 0x56BCAE53, 0xC8D83BF0, 0xBFDF0B66, 0x26D65ADC, 0x51D16A4A, 0xC16E77DB, 0xB669474D, 0x2F6016F7, 0x58672661, 0xC603B3C2, 0xB1048354, 0x280DD2EE, 0x5F0AE278, 0xE96CCF45, 0x9E6BFFD3, 0x0762AE69, 0x70659EFF, 0xEE010B5C, 0x99063BCA, 0x000F6A70, 0x77085AE6, 0xE7B74777, 0x90B077E1, 0x09B9265B, 0x7EBE16CD, 0xE0DA836E, 0x97DDB3F8, 0x0ED4E242, 0x79D3D2D4, 0xF4DBDF21, 0x83DCEFB7, 0x1AD5BE0D, 0x6DD28E9B, 0xF3B61B38, 0x84B12BAE, 0x1DB87A14, 0x6ABF4A82, 0xFA005713, 0x8D076785, 0x140E363F, 0x630906A9, 0xFD6D930A, 0x8A6AA39C, 0x1363F226, 0x6464C2B0, 0xA4DEAE1D, 0xD3D99E8B, 0x4AD0CF31, 0x3DD7FFA7, 0xA3B36A04, 0xD4B45A92, 0x4DBD0B28, 0x3ABA3BBE, 0xAA05262F, 0xDD0216B9, 0x440B4703, 0x330C7795, 0xAD68E236, 0xDA6FD2A0, 0x4366831A, 0x3461B38C, 0xB969BE79, 0xCE6E8EEF, 0x5767DF55, 0x2060EFC3, 0xBE047A60, 0xC9034AF6, 0x500A1B4C, 0x270D2BDA, 0xB7B2364B, 0xC0B506DD, 0x59BC5767, 0x2EBB67F1, 0xB0DFF252, 0xC7D8C2C4, 0x5ED1937E, 0x29D6A3E8, 0x9FB08ED5, 0xE8B7BE43, 0x71BEEFF9, 0x06B9DF6F, 0x98DD4ACC, 0xEFDA7A5A, 0x76D32BE0, 0x01D41B76, 0x916B06E7, 0xE66C3671, 0x7F6567CB, 0x0862575D, 0x9606C2FE, 0xE101F268, 0x7808A3D2, 0x0F0F9344, 0x82079EB1, 0xF500AE27, 0x6C09FF9D, 0x1B0ECF0B, 0x856A5AA8, 0xF26D6A3E, 0x6B643B84, 0x1C630B12, 0x8CDC1683, 0xFBDB2615, 0x62D277AF, 0x15D54739, 0x8BB1D29A, 0xFCB6E20C, 0x65BFB3B6, 0x12B88320, 0x3FBA6CAD, 0x48BD5C3B, 0xD1B40D81, 0xA6B33D17, 0x38D7A8B4, 0x4FD09822, 0xD6D9C998, 0xA1DEF90E, 0x3161E49F, 0x4666D409, 0xDF6F85B3, 0xA868B525, 0x360C2086, 0x410B1010, 0xD80241AA, 0xAF05713C, 0x220D7CC9, 0x550A4C5F, 0xCC031DE5, 0xBB042D73, 0x2560B8D0, 0x52678846, 0xCB6ED9FC, 0xBC69E96A, 0x2CD6F4FB, 0x5BD1C46D, 0xC2D895D7, 0xB5DFA541, 0x2BBB30E2, 0x5CBC0074, 0xC5B551CE, 0xB2B26158, 0x04D44C65, 0x73D37CF3, 0xEADA2D49, 0x9DDD1DDF, 0x03B9887C, 0x74BEB8EA, 0xEDB7E950, 0x9AB0D9C6, 0x0A0FC457, 0x7D08F4C1, 0xE401A57B, 0x930695ED, 0x0D62004E, 0x7A6530D8, 0xE36C6162, 0x946B51F4, 0x19635C01, 0x6E646C97, 0xF76D3D2D, 0x806A0DBB, 0x1E0E9818, 0x6909A88E, 0xF000F934, 0x8707C9A2, 0x17B8D433, 0x60BFE4A5, 0xF9B6B51F, 0x8EB18589, 0x10D5102A, 0x67D220BC, 0xFEDB7106, 0x89DC4190, 0x49662D3D, 0x3E611DAB, 0xA7684C11, 0xD06F7C87, 0x4E0BE924, 0x390CD9B2, 0xA0058808, 0xD702B89E, 0x47BDA50F, 0x30BA9599, 0xA9B3C423, 0xDEB4F4B5, 0x40D06116, 0x37D75180, 0xAEDE003A, 0xD9D930AC, 0x54D13D59, 0x23D60DCF, 0xBADF5C75, 0xCDD86CE3, 0x53BCF940, 0x24BBC9D6, 0xBDB2986C, 0xCAB5A8FA, 0x5A0AB56B, 0x2D0D85FD, 0xB404D447, 0xC303E4D1, 0x5D677172, 0x2A6041E4, 0xB369105E, 0xC46E20C8, 0x72080DF5, 0x050F3D63, 0x9C066CD9, 0xEB015C4F, 0x7565C9EC, 0x0262F97A, 0x9B6BA8C0, 0xEC6C9856, 0x7CD385C7, 0x0BD4B551, 0x92DDE4EB, 0xE5DAD47D, 0x7BBE41DE, 0x0CB97148, 0x95B020F2, 0xE2B71064, 0x6FBF1D91, 0x18B82D07, 0x81B17CBD, 0xF6B64C2B, 0x68D2D988, 0x1FD5E91E, 0x86DCB8A4, 0xF1DB8832, 0x616495A3, 0x1663A535, 0x8F6AF48F, 0xF86DC419, 0x660951BA, 0x110E612C, 0x88073096, 0xFF000000 };
gpl-2.0
q-li/linux-sunxi
arch/mips/cavium-octeon/executive/cvmx-helper-npi.c
9428
3446
/***********************license start*************** * Author: Cavium Networks * * Contact: support@caviumnetworks.com * This file is part of the OCTEON SDK * * Copyright (c) 2003-2008 Cavium Networks * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as * published by the Free Software Foundation. * * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this file; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * or visit http://www.gnu.org/licenses/. * * This file may also be available under a different license from Cavium. * Contact Cavium Networks for more information ***********************license end**************************************/ /* * Functions for NPI initialization, configuration, * and monitoring. */ #include <asm/octeon/octeon.h> #include <asm/octeon/cvmx-config.h> #include <asm/octeon/cvmx-helper.h> #include <asm/octeon/cvmx-pip-defs.h> /** * Probe a NPI interface and determine the number of ports * connected to it. The NPI interface should still be down * after this call. * * @interface: Interface to probe * * Returns Number of ports on the interface. Zero to disable. */ int __cvmx_helper_npi_probe(int interface) { #if CVMX_PKO_QUEUES_PER_PORT_PCI > 0 if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)) return 4; else if (OCTEON_IS_MODEL(OCTEON_CN56XX) && !OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)) /* The packet engines didn't exist before pass 2 */ return 4; else if (OCTEON_IS_MODEL(OCTEON_CN52XX) && !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) /* The packet engines didn't exist before pass 2 */ return 4; #if 0 /* * Technically CN30XX, CN31XX, and CN50XX contain packet * engines, but nobody ever uses them. Since this is the case, * we disable them here. */ else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)) return 2; else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) return 1; #endif #endif return 0; } /** * Bringup and enable a NPI interface. After this call packet * I/O should be fully functional. This is called with IPD * enabled but PKO disabled. * * @interface: Interface to bring up * * Returns Zero on success, negative on failure */ int __cvmx_helper_npi_enable(int interface) { /* * On CN50XX, CN52XX, and CN56XX we need to disable length * checking so packet < 64 bytes and jumbo frames don't get * errors. */ if (!OCTEON_IS_MODEL(OCTEON_CN3XXX) && !OCTEON_IS_MODEL(OCTEON_CN58XX)) { int num_ports = cvmx_helper_ports_on_interface(interface); int port; for (port = 0; port < num_ports; port++) { union cvmx_pip_prt_cfgx port_cfg; int ipd_port = cvmx_helper_get_ipd_port(interface, port); port_cfg.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port)); port_cfg.s.maxerr_en = 0; port_cfg.s.minerr_en = 0; cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_cfg.u64); } } /* Enables are controlled by the remote host, so nothing to do here */ return 0; }
gpl-2.0
felipesanches/linux-media
drivers/gpu/drm/gma500/mdfld_output.c
10452
2343
/* * Copyright (c) 2010 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicensen * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Thomas Eaton <thomas.g.eaton@intel.com> * Scott Rowe <scott.m.rowe@intel.com> */ #include "mdfld_output.h" #include "mdfld_dsi_dpi.h" #include "mdfld_dsi_output.h" #include "tc35876x-dsi-lvds.h" int mdfld_get_panel_type(struct drm_device *dev, int pipe) { struct drm_psb_private *dev_priv = dev->dev_private; return dev_priv->mdfld_panel_id; } static void mdfld_init_panel(struct drm_device *dev, int mipi_pipe, int p_type) { switch (p_type) { case TPO_VID: mdfld_dsi_output_init(dev, mipi_pipe, &mdfld_tpo_vid_funcs); break; case TC35876X: tc35876x_init(dev); mdfld_dsi_output_init(dev, mipi_pipe, &mdfld_tc35876x_funcs); break; case TMD_VID: mdfld_dsi_output_init(dev, mipi_pipe, &mdfld_tmd_vid_funcs); break; case HDMI: /* if (dev_priv->mdfld_hdmi_present) mdfld_hdmi_init(dev, &dev_priv->mode_dev); */ break; } } int mdfld_output_init(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; /* FIXME: hardcoded for now */ dev_priv->mdfld_panel_id = TC35876X; /* MIPI panel 1 */ mdfld_init_panel(dev, 0, dev_priv->mdfld_panel_id); /* HDMI panel */ mdfld_init_panel(dev, 1, HDMI); return 0; }
gpl-2.0
Howpathetic/ShooterU_kernel
arch/parisc/lib/checksum.c
13524
3607
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * MIPS specific IP/TCP/UDP checksumming routines * * Authors: Ralf Baechle, <ralf@waldorf-gmbh.de> * Lots of code moved from tcp.c and ip.c; see those files * for more names. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/types.h> #include <net/checksum.h> #include <asm/byteorder.h> #include <asm/string.h> #include <asm/uaccess.h> #define addc(_t,_r) \ __asm__ __volatile__ ( \ " add %0, %1, %0\n" \ " addc %0, %%r0, %0\n" \ : "=r"(_t) \ : "r"(_r), "0"(_t)); static inline unsigned short from32to16(unsigned int x) { /* 32 bits --> 16 bits + carry */ x = (x & 0xffff) + (x >> 16); /* 16 bits + carry --> 16 bits including carry */ x = (x & 0xffff) + (x >> 16); return (unsigned short)x; } static inline unsigned int do_csum(const unsigned char * buff, int len) { int odd, count; unsigned int result = 0; if (len <= 0) goto out; odd = 1 & (unsigned long) buff; if (odd) { result = be16_to_cpu(*buff); len--; buff++; } count = len >> 1; /* nr of 16-bit words.. */ if (count) { if (2 & (unsigned long) buff) { result += *(unsigned short *) buff; count--; len -= 2; buff += 2; } count >>= 1; /* nr of 32-bit words.. */ if (count) { while (count >= 4) { unsigned int r1, r2, r3, r4; r1 = *(unsigned int *)(buff + 0); r2 = *(unsigned int *)(buff + 4); r3 = *(unsigned int *)(buff + 8); r4 = *(unsigned int *)(buff + 12); addc(result, r1); addc(result, r2); addc(result, r3); addc(result, r4); count -= 4; buff += 16; } while (count) { unsigned int w = *(unsigned int *) buff; count--; buff += 4; addc(result, w); } result = (result & 0xffff) + (result >> 16); } if (len & 2) { result += *(unsigned short *) buff; buff += 2; } } if (len & 1) result += le16_to_cpu(*buff); result = from32to16(result); if (odd) result = swab16(result); out: return result; } /* * computes a partial checksum, e.g. for TCP/UDP fragments */ /* * why bother folding? */ __wsum csum_partial(const void *buff, int len, __wsum sum) { unsigned int result = do_csum(buff, len); addc(result, sum); return (__force __wsum)from32to16(result); } EXPORT_SYMBOL(csum_partial); /* * copy while checksumming, otherwise like csum_partial */ __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) { /* * It's 2:30 am and I don't feel like doing it real ... * This is lots slower than the real thing (tm) */ sum = csum_partial(src, len, sum); memcpy(dst, src, len); return sum; } EXPORT_SYMBOL(csum_partial_copy_nocheck); /* * Copy from userspace and compute checksum. If we catch an exception * then zero the rest of the buffer. */ __wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr) { int missing; missing = copy_from_user(dst, src, len); if (missing) { memset(dst + len - missing, 0, missing); *err_ptr = -EFAULT; } return csum_partial(dst, len, sum); } EXPORT_SYMBOL(csum_partial_copy_from_user);
gpl-2.0
h2o64/android_kernel_motorola_msm8226
arch/parisc/lib/checksum.c
13524
3607
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * MIPS specific IP/TCP/UDP checksumming routines * * Authors: Ralf Baechle, <ralf@waldorf-gmbh.de> * Lots of code moved from tcp.c and ip.c; see those files * for more names. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/types.h> #include <net/checksum.h> #include <asm/byteorder.h> #include <asm/string.h> #include <asm/uaccess.h> #define addc(_t,_r) \ __asm__ __volatile__ ( \ " add %0, %1, %0\n" \ " addc %0, %%r0, %0\n" \ : "=r"(_t) \ : "r"(_r), "0"(_t)); static inline unsigned short from32to16(unsigned int x) { /* 32 bits --> 16 bits + carry */ x = (x & 0xffff) + (x >> 16); /* 16 bits + carry --> 16 bits including carry */ x = (x & 0xffff) + (x >> 16); return (unsigned short)x; } static inline unsigned int do_csum(const unsigned char * buff, int len) { int odd, count; unsigned int result = 0; if (len <= 0) goto out; odd = 1 & (unsigned long) buff; if (odd) { result = be16_to_cpu(*buff); len--; buff++; } count = len >> 1; /* nr of 16-bit words.. */ if (count) { if (2 & (unsigned long) buff) { result += *(unsigned short *) buff; count--; len -= 2; buff += 2; } count >>= 1; /* nr of 32-bit words.. */ if (count) { while (count >= 4) { unsigned int r1, r2, r3, r4; r1 = *(unsigned int *)(buff + 0); r2 = *(unsigned int *)(buff + 4); r3 = *(unsigned int *)(buff + 8); r4 = *(unsigned int *)(buff + 12); addc(result, r1); addc(result, r2); addc(result, r3); addc(result, r4); count -= 4; buff += 16; } while (count) { unsigned int w = *(unsigned int *) buff; count--; buff += 4; addc(result, w); } result = (result & 0xffff) + (result >> 16); } if (len & 2) { result += *(unsigned short *) buff; buff += 2; } } if (len & 1) result += le16_to_cpu(*buff); result = from32to16(result); if (odd) result = swab16(result); out: return result; } /* * computes a partial checksum, e.g. for TCP/UDP fragments */ /* * why bother folding? */ __wsum csum_partial(const void *buff, int len, __wsum sum) { unsigned int result = do_csum(buff, len); addc(result, sum); return (__force __wsum)from32to16(result); } EXPORT_SYMBOL(csum_partial); /* * copy while checksumming, otherwise like csum_partial */ __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) { /* * It's 2:30 am and I don't feel like doing it real ... * This is lots slower than the real thing (tm) */ sum = csum_partial(src, len, sum); memcpy(dst, src, len); return sum; } EXPORT_SYMBOL(csum_partial_copy_nocheck); /* * Copy from userspace and compute checksum. If we catch an exception * then zero the rest of the buffer. */ __wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr) { int missing; missing = copy_from_user(dst, src, len); if (missing) { memset(dst + len - missing, 0, missing); *err_ptr = -EFAULT; } return csum_partial(dst, len, sum); } EXPORT_SYMBOL(csum_partial_copy_from_user);
gpl-2.0
NKMSKV/kernel-salsa
arch/x86/kernel/early-quirks.c
213
7388
/* Various workarounds for chipset bugs. This code runs very early and can't use the regular PCI subsystem The entries are keyed to PCI bridges which usually identify chipsets uniquely. This is only for whole classes of chipsets with specific problems which need early invasive action (e.g. before the timers are initialized). Most PCI device specific workarounds can be done later and should be in standard PCI quirks Mainboard specific bugs should be handled by DMI entries. CPU specific bugs in setup.c */ #include <linux/pci.h> #include <linux/acpi.h> #include <linux/pci_ids.h> #include <asm/pci-direct.h> #include <asm/dma.h> #include <asm/io_apic.h> #include <asm/apic.h> #include <asm/iommu.h> #include <asm/gart.h> static void __init fix_hypertransport_config(int num, int slot, int func) { u32 htcfg; /* * we found a hypertransport bus * make sure that we are broadcasting * interrupts to all cpus on the ht bus * if we're using extended apic ids */ htcfg = read_pci_config(num, slot, func, 0x68); if (htcfg & (1 << 18)) { printk(KERN_INFO "Detected use of extended apic ids " "on hypertransport bus\n"); if ((htcfg & (1 << 17)) == 0) { printk(KERN_INFO "Enabling hypertransport extended " "apic interrupt broadcast\n"); printk(KERN_INFO "Note this is a bios bug, " "please contact your hw vendor\n"); htcfg |= (1 << 17); write_pci_config(num, slot, func, 0x68, htcfg); } } } static void __init via_bugs(int num, int slot, int func) { #ifdef CONFIG_GART_IOMMU if ((max_pfn > MAX_DMA32_PFN || force_iommu) && !gart_iommu_aperture_allowed) { printk(KERN_INFO "Looks like a VIA chipset. Disabling IOMMU." " Override with iommu=allowed\n"); gart_iommu_aperture_disabled = 1; } #endif } #ifdef CONFIG_ACPI #ifdef CONFIG_X86_IO_APIC static int __init nvidia_hpet_check(struct acpi_table_header *header) { return 0; } #endif /* CONFIG_X86_IO_APIC */ #endif /* CONFIG_ACPI */ static void __init nvidia_bugs(int num, int slot, int func) { #ifdef CONFIG_ACPI #ifdef CONFIG_X86_IO_APIC /* * All timer overrides on Nvidia are * wrong unless HPET is enabled. * Unfortunately that's not true on many Asus boards. * We don't know yet how to detect this automatically, but * at least allow a command line override. */ if (acpi_use_timer_override) return; if (acpi_table_parse(ACPI_SIG_HPET, nvidia_hpet_check)) { acpi_skip_timer_override = 1; printk(KERN_INFO "Nvidia board " "detected. Ignoring ACPI " "timer override.\n"); printk(KERN_INFO "If you got timer trouble " "try acpi_use_timer_override\n"); } #endif #endif /* RED-PEN skip them on mptables too? */ } #if defined(CONFIG_ACPI) && defined(CONFIG_X86_IO_APIC) static u32 __init ati_ixp4x0_rev(int num, int slot, int func) { u32 d; u8 b; b = read_pci_config_byte(num, slot, func, 0xac); b &= ~(1<<5); write_pci_config_byte(num, slot, func, 0xac, b); d = read_pci_config(num, slot, func, 0x70); d |= 1<<8; write_pci_config(num, slot, func, 0x70, d); d = read_pci_config(num, slot, func, 0x8); d &= 0xff; return d; } static void __init ati_bugs(int num, int slot, int func) { u32 d; u8 b; if (acpi_use_timer_override) return; d = ati_ixp4x0_rev(num, slot, func); if (d < 0x82) acpi_skip_timer_override = 1; else { /* check for IRQ0 interrupt swap */ outb(0x72, 0xcd6); b = inb(0xcd7); if (!(b & 0x2)) acpi_skip_timer_override = 1; } if (acpi_skip_timer_override) { printk(KERN_INFO "SB4X0 revision 0x%x\n", d); printk(KERN_INFO "Ignoring ACPI timer override.\n"); printk(KERN_INFO "If you got timer trouble " "try acpi_use_timer_override\n"); } } static u32 __init ati_sbx00_rev(int num, int slot, int func) { u32 old, d; d = read_pci_config(num, slot, func, 0x70); old = d; d &= ~(1<<8); write_pci_config(num, slot, func, 0x70, d); d = read_pci_config(num, slot, func, 0x8); d &= 0xff; write_pci_config(num, slot, func, 0x70, old); return d; } static void __init ati_bugs_contd(int num, int slot, int func) { u32 d, rev; if (acpi_use_timer_override) return; rev = ati_sbx00_rev(num, slot, func); if (rev > 0x13) return; /* check for IRQ0 interrupt swap */ d = read_pci_config(num, slot, func, 0x64); if (!(d & (1<<14))) acpi_skip_timer_override = 1; if (acpi_skip_timer_override) { printk(KERN_INFO "SB600 revision 0x%x\n", rev); printk(KERN_INFO "Ignoring ACPI timer override.\n"); printk(KERN_INFO "If you got timer trouble " "try acpi_use_timer_override\n"); } } #else static void __init ati_bugs(int num, int slot, int func) { } static void __init ati_bugs_contd(int num, int slot, int func) { } #endif #define QFLAG_APPLY_ONCE 0x1 #define QFLAG_APPLIED 0x2 #define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED) struct chipset { u32 vendor; u32 device; u32 class; u32 class_mask; u32 flags; void (*f)(int num, int slot, int func); }; /* * Only works for devices on the root bus. If you add any devices * not on bus 0 readd another loop level in early_quirks(). But * be careful because at least the Nvidia quirk here relies on * only matching on bus 0. */ static struct chipset early_qrk[] __initdata = { { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, nvidia_bugs }, { PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, via_bugs }, { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB, PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, fix_hypertransport_config }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS, PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd }, {} }; /** * check_dev_quirk - apply early quirks to a given PCI device * @num: bus number * @slot: slot number * @func: PCI function * * Check the vendor & device ID against the early quirks table. * * If the device is single function, let early_quirks() know so we don't * poke at this device again. */ static int __init check_dev_quirk(int num, int slot, int func) { u16 class; u16 vendor; u16 device; u8 type; int i; class = read_pci_config_16(num, slot, func, PCI_CLASS_DEVICE); if (class == 0xffff) return -1; /* no class, treat as single function */ vendor = read_pci_config_16(num, slot, func, PCI_VENDOR_ID); device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID); for (i = 0; early_qrk[i].f != NULL; i++) { if (((early_qrk[i].vendor == PCI_ANY_ID) || (early_qrk[i].vendor == vendor)) && ((early_qrk[i].device == PCI_ANY_ID) || (early_qrk[i].device == device)) && (!((early_qrk[i].class ^ class) & early_qrk[i].class_mask))) { if ((early_qrk[i].flags & QFLAG_DONE) != QFLAG_DONE) early_qrk[i].f(num, slot, func); early_qrk[i].flags |= QFLAG_APPLIED; } } type = read_pci_config_byte(num, slot, func, PCI_HEADER_TYPE); if (!(type & 0x80)) return -1; return 0; } void __init early_quirks(void) { int slot, func; if (!early_pci_allowed()) return; /* Poor man's PCI discovery */ /* Only scan the root bus */ for (slot = 0; slot < 32; slot++) for (func = 0; func < 8; func++) { /* Only probe function 0 on single fn devices */ if (check_dev_quirk(0, slot, func)) break; } }
gpl-2.0
T-J-Teru/synopsys-gcc
gcc/testsuite/gcc.c-torture/execute/ieee/fp-cmp-8.c
213
3221
#ifndef FLOAT #define FLOAT double #endif /* Like fp-cmp-4.c, but test that the cmove patterns are correct. */ static FLOAT test_isunordered(FLOAT x, FLOAT y, FLOAT a, FLOAT b) { return __builtin_isunordered(x, y) ? a : b; } static FLOAT test_not_isunordered(FLOAT x, FLOAT y, FLOAT a, FLOAT b) { return !__builtin_isunordered(x, y) ? a : b; } static FLOAT test_isless(FLOAT x, FLOAT y, FLOAT a, FLOAT b) { return __builtin_isless(x, y) ? a : b; } static FLOAT test_not_isless(FLOAT x, FLOAT y, FLOAT a, FLOAT b) { return !__builtin_isless(x, y) ? a : b; } static FLOAT test_islessequal(FLOAT x, FLOAT y, FLOAT a, FLOAT b) { return __builtin_islessequal(x, y) ? a : b; } static FLOAT test_not_islessequal(FLOAT x, FLOAT y, FLOAT a, FLOAT b) { return !__builtin_islessequal(x, y) ? a : b; } static FLOAT test_isgreater(FLOAT x, FLOAT y, FLOAT a, FLOAT b) { return __builtin_isgreater(x, y) ? a : b; } static FLOAT test_not_isgreater(FLOAT x, FLOAT y, FLOAT a, FLOAT b) { return !__builtin_isgreater(x, y) ? a : b; } static FLOAT test_isgreaterequal(FLOAT x, FLOAT y, FLOAT a, FLOAT b) { return __builtin_isgreaterequal(x, y) ? a : b; } static FLOAT test_not_isgreaterequal(FLOAT x, FLOAT y, FLOAT a, FLOAT b) { return !__builtin_isgreaterequal(x, y) ? a : b; } static FLOAT test_islessgreater(FLOAT x, FLOAT y, FLOAT a, FLOAT b) { return __builtin_islessgreater(x, y) ? a : b; } static FLOAT test_not_islessgreater(FLOAT x, FLOAT y, FLOAT a, FLOAT b) { return !__builtin_islessgreater(x, y) ? a : b; } static void one_test(FLOAT x, FLOAT y, int expected, FLOAT (*pos) (FLOAT, FLOAT, FLOAT, FLOAT), FLOAT (*neg) (FLOAT, FLOAT, FLOAT, FLOAT)) { if (((*pos)(x, y, 1.0, 2.0) == 1.0) != expected) abort (); if (((*neg)(x, y, 3.0, 4.0) == 4.0) != expected) abort (); } #define NAN (0.0 / 0.0) #define INF (1.0 / 0.0) int main() { struct try { FLOAT x, y; int result[6]; }; static struct try const data[] = { { NAN, NAN, { 1, 0, 0, 0, 0, 0 } }, { 0.0, NAN, { 1, 0, 0, 0, 0, 0 } }, { NAN, 0.0, { 1, 0, 0, 0, 0, 0 } }, { 0.0, 0.0, { 0, 0, 1, 0, 1, 0 } }, { 1.0, 2.0, { 0, 1, 1, 0, 0, 1 } }, { 2.0, 1.0, { 0, 0, 0, 1, 1, 1 } }, { INF, 0.0, { 0, 0, 0, 1, 1, 1 } }, { 1.0, INF, { 0, 1, 1, 0, 0, 1 } }, { INF, INF, { 0, 0, 1, 0, 1, 0 } }, { 0.0, -INF, { 0, 0, 0, 1, 1, 1 } }, { -INF, 1.0, { 0, 1, 1, 0, 0, 1 } }, { -INF, -INF, { 0, 0, 1, 0, 1, 0 } }, { INF, -INF, { 0, 0, 0, 1, 1, 1 } }, { -INF, INF, { 0, 1, 1, 0, 0, 1 } }, }; struct test { FLOAT (*pos)(FLOAT, FLOAT, FLOAT, FLOAT); FLOAT (*neg)(FLOAT, FLOAT, FLOAT, FLOAT); }; static struct test const tests[] = { { test_isunordered, test_not_isunordered }, { test_isless, test_not_isless }, { test_islessequal, test_not_islessequal }, { test_isgreater, test_not_isgreater }, { test_isgreaterequal, test_not_isgreaterequal }, { test_islessgreater, test_not_islessgreater } }; const int n = sizeof(data) / sizeof(data[0]); int i, j; for (i = 0; i < n; ++i) for (j = 0; j < 6; ++j) one_test (data[i].x, data[i].y, data[i].result[j], tests[j].pos, tests[j].neg); exit (0); }
gpl-2.0
ApisSys/linux-analogdevicesinc-ap6
drivers/watchdog/ebc-c384_wdt.c
213
4305
/* * Watchdog timer driver for the WinSystems EBC-C384 * Copyright (C) 2016 William Breathitt Gray * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <linux/device.h> #include <linux/dmi.h> #include <linux/errno.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/isa.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/watchdog.h> #define MODULE_NAME "ebc-c384_wdt" #define WATCHDOG_TIMEOUT 60 /* * The timeout value in minutes must fit in a single byte when sent to the * watchdog timer; the maximum timeout possible is 15300 (255 * 60) seconds. */ #define WATCHDOG_MAX_TIMEOUT 15300 #define BASE_ADDR 0x564 #define ADDR_EXTENT 5 #define CFG_ADDR (BASE_ADDR + 1) #define PET_ADDR (BASE_ADDR + 2) static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); static unsigned timeout; module_param(timeout, uint, 0); MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds (default=" __MODULE_STRING(WATCHDOG_TIMEOUT) ")"); static int ebc_c384_wdt_start(struct watchdog_device *wdev) { unsigned t = wdev->timeout; /* resolution is in minutes for timeouts greater than 255 seconds */ if (t > 255) t = DIV_ROUND_UP(t, 60); outb(t, PET_ADDR); return 0; } static int ebc_c384_wdt_stop(struct watchdog_device *wdev) { outb(0x00, PET_ADDR); return 0; } static int ebc_c384_wdt_set_timeout(struct watchdog_device *wdev, unsigned t) { /* resolution is in minutes for timeouts greater than 255 seconds */ if (t > 255) { /* round second resolution up to minute granularity */ wdev->timeout = roundup(t, 60); /* set watchdog timer for minutes */ outb(0x00, CFG_ADDR); } else { wdev->timeout = t; /* set watchdog timer for seconds */ outb(0x80, CFG_ADDR); } return 0; } static const struct watchdog_ops ebc_c384_wdt_ops = { .start = ebc_c384_wdt_start, .stop = ebc_c384_wdt_stop, .set_timeout = ebc_c384_wdt_set_timeout }; static const struct watchdog_info ebc_c384_wdt_info = { .options = WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT, .identity = MODULE_NAME }; static int ebc_c384_wdt_probe(struct device *dev, unsigned int id) { struct watchdog_device *wdd; if (!devm_request_region(dev, BASE_ADDR, ADDR_EXTENT, dev_name(dev))) { dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n", BASE_ADDR, BASE_ADDR + ADDR_EXTENT); return -EBUSY; } wdd = devm_kzalloc(dev, sizeof(*wdd), GFP_KERNEL); if (!wdd) return -ENOMEM; wdd->info = &ebc_c384_wdt_info; wdd->ops = &ebc_c384_wdt_ops; wdd->timeout = WATCHDOG_TIMEOUT; wdd->min_timeout = 1; wdd->max_timeout = WATCHDOG_MAX_TIMEOUT; watchdog_set_nowayout(wdd, nowayout); if (watchdog_init_timeout(wdd, timeout, dev)) dev_warn(dev, "Invalid timeout (%u seconds), using default (%u seconds)\n", timeout, WATCHDOG_TIMEOUT); dev_set_drvdata(dev, wdd); return watchdog_register_device(wdd); } static int ebc_c384_wdt_remove(struct device *dev, unsigned int id) { struct watchdog_device *wdd = dev_get_drvdata(dev); watchdog_unregister_device(wdd); return 0; } static struct isa_driver ebc_c384_wdt_driver = { .probe = ebc_c384_wdt_probe, .driver = { .name = MODULE_NAME }, .remove = ebc_c384_wdt_remove }; static int __init ebc_c384_wdt_init(void) { if (!dmi_match(DMI_BOARD_NAME, "EBC-C384 SBC")) return -ENODEV; return isa_register_driver(&ebc_c384_wdt_driver, 1); } static void __exit ebc_c384_wdt_exit(void) { isa_unregister_driver(&ebc_c384_wdt_driver); } module_init(ebc_c384_wdt_init); module_exit(ebc_c384_wdt_exit); MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>"); MODULE_DESCRIPTION("WinSystems EBC-C384 watchdog timer driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("isa:" MODULE_NAME);
gpl-2.0
morfes/kernel_ideos_usb_host
arch/arm/mach-ixp4xx/dsmg600-pci.c
725
1898
/* * DSM-G600 board-level PCI initialization * * Copyright (C) 2006 Tower Technologies * Author: Alessandro Zummo <a.zummo@towertech.it> * * based on ixdp425-pci.c: * Copyright (C) 2002 Intel Corporation. * Copyright (C) 2003-2004 MontaVista Software, Inc. * * Maintainer: http://www.nslu2-linux.org/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/pci.h> #include <linux/init.h> #include <linux/irq.h> #include <asm/mach/pci.h> #include <asm/mach-types.h> void __init dsmg600_pci_preinit(void) { set_irq_type(IRQ_DSMG600_PCI_INTA, IRQ_TYPE_LEVEL_LOW); set_irq_type(IRQ_DSMG600_PCI_INTB, IRQ_TYPE_LEVEL_LOW); set_irq_type(IRQ_DSMG600_PCI_INTC, IRQ_TYPE_LEVEL_LOW); set_irq_type(IRQ_DSMG600_PCI_INTD, IRQ_TYPE_LEVEL_LOW); set_irq_type(IRQ_DSMG600_PCI_INTE, IRQ_TYPE_LEVEL_LOW); set_irq_type(IRQ_DSMG600_PCI_INTF, IRQ_TYPE_LEVEL_LOW); ixp4xx_pci_preinit(); } static int __init dsmg600_map_irq(struct pci_dev *dev, u8 slot, u8 pin) { static int pci_irq_table[DSMG600_PCI_MAX_DEV][DSMG600_PCI_IRQ_LINES] = { { IRQ_DSMG600_PCI_INTE, -1, -1 }, { IRQ_DSMG600_PCI_INTA, -1, -1 }, { IRQ_DSMG600_PCI_INTB, IRQ_DSMG600_PCI_INTC, IRQ_DSMG600_PCI_INTD }, { IRQ_DSMG600_PCI_INTF, -1, -1 }, }; int irq = -1; if (slot >= 1 && slot <= DSMG600_PCI_MAX_DEV && pin >= 1 && pin <= DSMG600_PCI_IRQ_LINES) irq = pci_irq_table[slot-1][pin-1]; return irq; } struct hw_pci __initdata dsmg600_pci = { .nr_controllers = 1, .preinit = dsmg600_pci_preinit, .swizzle = pci_std_swizzle, .setup = ixp4xx_setup, .scan = ixp4xx_scan_bus, .map_irq = dsmg600_map_irq, }; int __init dsmg600_pci_init(void) { if (machine_is_dsmg600()) pci_common_init(&dsmg600_pci); return 0; } subsys_initcall(dsmg600_pci_init);
gpl-2.0
meyskld/samsung-kernel-galaxysii
arch/sparc/kernel/pci_sun4v.c
725
24404
/* pci_sun4v.c: SUN4V specific PCI controller support. * * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net) */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/percpu.h> #include <linux/irq.h> #include <linux/msi.h> #include <linux/log2.h> #include <linux/of_device.h> #include <asm/iommu.h> #include <asm/irq.h> #include <asm/hypervisor.h> #include <asm/prom.h> #include "pci_impl.h" #include "iommu_common.h" #include "pci_sun4v.h" #define DRIVER_NAME "pci_sun4v" #define PFX DRIVER_NAME ": " static unsigned long vpci_major = 1; static unsigned long vpci_minor = 1; #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) struct iommu_batch { struct device *dev; /* Device mapping is for. */ unsigned long prot; /* IOMMU page protections */ unsigned long entry; /* Index into IOTSB. */ u64 *pglist; /* List of physical pages */ unsigned long npages; /* Number of pages in list. */ }; static DEFINE_PER_CPU(struct iommu_batch, iommu_batch); static int iommu_batch_initialized; /* Interrupts must be disabled. */ static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry) { struct iommu_batch *p = &__get_cpu_var(iommu_batch); p->dev = dev; p->prot = prot; p->entry = entry; p->npages = 0; } /* Interrupts must be disabled. */ static long iommu_batch_flush(struct iommu_batch *p) { struct pci_pbm_info *pbm = p->dev->archdata.host_controller; unsigned long devhandle = pbm->devhandle; unsigned long prot = p->prot; unsigned long entry = p->entry; u64 *pglist = p->pglist; unsigned long npages = p->npages; while (npages != 0) { long num; num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry), npages, prot, __pa(pglist)); if (unlikely(num < 0)) { if (printk_ratelimit()) printk("iommu_batch_flush: IOMMU map of " "[%08lx:%08llx:%lx:%lx:%lx] failed with " "status %ld\n", devhandle, HV_PCI_TSBID(0, entry), npages, prot, __pa(pglist), num); return -1; } entry += num; npages -= num; pglist += num; } p->entry = entry; p->npages = 0; return 0; } static inline void iommu_batch_new_entry(unsigned long entry) { struct iommu_batch *p = &__get_cpu_var(iommu_batch); if (p->entry + p->npages == entry) return; if (p->entry != ~0UL) iommu_batch_flush(p); p->entry = entry; } /* Interrupts must be disabled. */ static inline long iommu_batch_add(u64 phys_page) { struct iommu_batch *p = &__get_cpu_var(iommu_batch); BUG_ON(p->npages >= PGLIST_NENTS); p->pglist[p->npages++] = phys_page; if (p->npages == PGLIST_NENTS) return iommu_batch_flush(p); return 0; } /* Interrupts must be disabled. */ static inline long iommu_batch_end(void) { struct iommu_batch *p = &__get_cpu_var(iommu_batch); BUG_ON(p->npages >= PGLIST_NENTS); return iommu_batch_flush(p); } static void *dma_4v_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp) { unsigned long flags, order, first_page, npages, n; struct iommu *iommu; struct page *page; void *ret; long entry; int nid; size = IO_PAGE_ALIGN(size); order = get_order(size); if (unlikely(order >= MAX_ORDER)) return NULL; npages = size >> IO_PAGE_SHIFT; nid = dev->archdata.numa_node; page = alloc_pages_node(nid, gfp, order); if (unlikely(!page)) return NULL; first_page = (unsigned long) page_address(page); memset((char *)first_page, 0, PAGE_SIZE << order); iommu = dev->archdata.iommu; spin_lock_irqsave(&iommu->lock, flags); entry = iommu_range_alloc(dev, iommu, npages, NULL); spin_unlock_irqrestore(&iommu->lock, flags); if (unlikely(entry == DMA_ERROR_CODE)) goto range_alloc_fail; *dma_addrp = (iommu->page_table_map_base + (entry << IO_PAGE_SHIFT)); ret = (void *) first_page; first_page = __pa(first_page); local_irq_save(flags); iommu_batch_start(dev, (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE), entry); for (n = 0; n < npages; n++) { long err = iommu_batch_add(first_page + (n * PAGE_SIZE)); if (unlikely(err < 0L)) goto iommu_map_fail; } if (unlikely(iommu_batch_end() < 0L)) goto iommu_map_fail; local_irq_restore(flags); return ret; iommu_map_fail: /* Interrupts are disabled. */ spin_lock(&iommu->lock); iommu_range_free(iommu, *dma_addrp, npages); spin_unlock_irqrestore(&iommu->lock, flags); range_alloc_fail: free_pages(first_page, order); return NULL; } static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, dma_addr_t dvma) { struct pci_pbm_info *pbm; struct iommu *iommu; unsigned long flags, order, npages, entry; u32 devhandle; npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; iommu = dev->archdata.iommu; pbm = dev->archdata.host_controller; devhandle = pbm->devhandle; entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); spin_lock_irqsave(&iommu->lock, flags); iommu_range_free(iommu, dvma, npages); do { unsigned long num; num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), npages); entry += num; npages -= num; } while (npages != 0); spin_unlock_irqrestore(&iommu->lock, flags); order = get_order(size); if (order < 10) free_pages((unsigned long)cpu, order); } static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, unsigned long offset, size_t sz, enum dma_data_direction direction, struct dma_attrs *attrs) { struct iommu *iommu; unsigned long flags, npages, oaddr; unsigned long i, base_paddr; u32 bus_addr, ret; unsigned long prot; long entry; iommu = dev->archdata.iommu; if (unlikely(direction == DMA_NONE)) goto bad; oaddr = (unsigned long)(page_address(page) + offset); npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); npages >>= IO_PAGE_SHIFT; spin_lock_irqsave(&iommu->lock, flags); entry = iommu_range_alloc(dev, iommu, npages, NULL); spin_unlock_irqrestore(&iommu->lock, flags); if (unlikely(entry == DMA_ERROR_CODE)) goto bad; bus_addr = (iommu->page_table_map_base + (entry << IO_PAGE_SHIFT)); ret = bus_addr | (oaddr & ~IO_PAGE_MASK); base_paddr = __pa(oaddr & IO_PAGE_MASK); prot = HV_PCI_MAP_ATTR_READ; if (direction != DMA_TO_DEVICE) prot |= HV_PCI_MAP_ATTR_WRITE; local_irq_save(flags); iommu_batch_start(dev, prot, entry); for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) { long err = iommu_batch_add(base_paddr); if (unlikely(err < 0L)) goto iommu_map_fail; } if (unlikely(iommu_batch_end() < 0L)) goto iommu_map_fail; local_irq_restore(flags); return ret; bad: if (printk_ratelimit()) WARN_ON(1); return DMA_ERROR_CODE; iommu_map_fail: /* Interrupts are disabled. */ spin_lock(&iommu->lock); iommu_range_free(iommu, bus_addr, npages); spin_unlock_irqrestore(&iommu->lock, flags); return DMA_ERROR_CODE; } static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, size_t sz, enum dma_data_direction direction, struct dma_attrs *attrs) { struct pci_pbm_info *pbm; struct iommu *iommu; unsigned long flags, npages; long entry; u32 devhandle; if (unlikely(direction == DMA_NONE)) { if (printk_ratelimit()) WARN_ON(1); return; } iommu = dev->archdata.iommu; pbm = dev->archdata.host_controller; devhandle = pbm->devhandle; npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); npages >>= IO_PAGE_SHIFT; bus_addr &= IO_PAGE_MASK; spin_lock_irqsave(&iommu->lock, flags); iommu_range_free(iommu, bus_addr, npages); entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT; do { unsigned long num; num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), npages); entry += num; npages -= num; } while (npages != 0); spin_unlock_irqrestore(&iommu->lock, flags); } static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction, struct dma_attrs *attrs) { struct scatterlist *s, *outs, *segstart; unsigned long flags, handle, prot; dma_addr_t dma_next = 0, dma_addr; unsigned int max_seg_size; unsigned long seg_boundary_size; int outcount, incount, i; struct iommu *iommu; unsigned long base_shift; long err; BUG_ON(direction == DMA_NONE); iommu = dev->archdata.iommu; if (nelems == 0 || !iommu) return 0; prot = HV_PCI_MAP_ATTR_READ; if (direction != DMA_TO_DEVICE) prot |= HV_PCI_MAP_ATTR_WRITE; outs = s = segstart = &sglist[0]; outcount = 1; incount = nelems; handle = 0; /* Init first segment length for backout at failure */ outs->dma_length = 0; spin_lock_irqsave(&iommu->lock, flags); iommu_batch_start(dev, prot, ~0UL); max_seg_size = dma_get_max_seg_size(dev); seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, IO_PAGE_SIZE) >> IO_PAGE_SHIFT; base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT; for_each_sg(sglist, s, nelems, i) { unsigned long paddr, npages, entry, out_entry = 0, slen; slen = s->length; /* Sanity check */ if (slen == 0) { dma_next = 0; continue; } /* Allocate iommu entries for that segment */ paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s); npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE); entry = iommu_range_alloc(dev, iommu, npages, &handle); /* Handle failure */ if (unlikely(entry == DMA_ERROR_CODE)) { if (printk_ratelimit()) printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx" " npages %lx\n", iommu, paddr, npages); goto iommu_map_failed; } iommu_batch_new_entry(entry); /* Convert entry to a dma_addr_t */ dma_addr = iommu->page_table_map_base + (entry << IO_PAGE_SHIFT); dma_addr |= (s->offset & ~IO_PAGE_MASK); /* Insert into HW table */ paddr &= IO_PAGE_MASK; while (npages--) { err = iommu_batch_add(paddr); if (unlikely(err < 0L)) goto iommu_map_failed; paddr += IO_PAGE_SIZE; } /* If we are in an open segment, try merging */ if (segstart != s) { /* We cannot merge if: * - allocated dma_addr isn't contiguous to previous allocation */ if ((dma_addr != dma_next) || (outs->dma_length + s->length > max_seg_size) || (is_span_boundary(out_entry, base_shift, seg_boundary_size, outs, s))) { /* Can't merge: create a new segment */ segstart = s; outcount++; outs = sg_next(outs); } else { outs->dma_length += s->length; } } if (segstart == s) { /* This is a new segment, fill entries */ outs->dma_address = dma_addr; outs->dma_length = slen; out_entry = entry; } /* Calculate next page pointer for contiguous check */ dma_next = dma_addr + slen; } err = iommu_batch_end(); if (unlikely(err < 0L)) goto iommu_map_failed; spin_unlock_irqrestore(&iommu->lock, flags); if (outcount < incount) { outs = sg_next(outs); outs->dma_address = DMA_ERROR_CODE; outs->dma_length = 0; } return outcount; iommu_map_failed: for_each_sg(sglist, s, nelems, i) { if (s->dma_length != 0) { unsigned long vaddr, npages; vaddr = s->dma_address & IO_PAGE_MASK; npages = iommu_num_pages(s->dma_address, s->dma_length, IO_PAGE_SIZE); iommu_range_free(iommu, vaddr, npages); /* XXX demap? XXX */ s->dma_address = DMA_ERROR_CODE; s->dma_length = 0; } if (s == outs) break; } spin_unlock_irqrestore(&iommu->lock, flags); return 0; } static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction, struct dma_attrs *attrs) { struct pci_pbm_info *pbm; struct scatterlist *sg; struct iommu *iommu; unsigned long flags; u32 devhandle; BUG_ON(direction == DMA_NONE); iommu = dev->archdata.iommu; pbm = dev->archdata.host_controller; devhandle = pbm->devhandle; spin_lock_irqsave(&iommu->lock, flags); sg = sglist; while (nelems--) { dma_addr_t dma_handle = sg->dma_address; unsigned int len = sg->dma_length; unsigned long npages, entry; if (!len) break; npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE); iommu_range_free(iommu, dma_handle, npages); entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT); while (npages) { unsigned long num; num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), npages); entry += num; npages -= num; } sg = sg_next(sg); } spin_unlock_irqrestore(&iommu->lock, flags); } static struct dma_map_ops sun4v_dma_ops = { .alloc_coherent = dma_4v_alloc_coherent, .free_coherent = dma_4v_free_coherent, .map_page = dma_4v_map_page, .unmap_page = dma_4v_unmap_page, .map_sg = dma_4v_map_sg, .unmap_sg = dma_4v_unmap_sg, }; static void __devinit pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent) { struct property *prop; struct device_node *dp; dp = pbm->op->dev.of_node; prop = of_find_property(dp, "66mhz-capable", NULL); pbm->is_66mhz_capable = (prop != NULL); pbm->pci_bus = pci_scan_one_pbm(pbm, parent); /* XXX register error interrupt handlers XXX */ } static unsigned long __devinit probe_existing_entries(struct pci_pbm_info *pbm, struct iommu *iommu) { struct iommu_arena *arena = &iommu->arena; unsigned long i, cnt = 0; u32 devhandle; devhandle = pbm->devhandle; for (i = 0; i < arena->limit; i++) { unsigned long ret, io_attrs, ra; ret = pci_sun4v_iommu_getmap(devhandle, HV_PCI_TSBID(0, i), &io_attrs, &ra); if (ret == HV_EOK) { if (page_in_phys_avail(ra)) { pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, i), 1); } else { cnt++; __set_bit(i, arena->map); } } } return cnt; } static int __devinit pci_sun4v_iommu_init(struct pci_pbm_info *pbm) { static const u32 vdma_default[] = { 0x80000000, 0x80000000 }; struct iommu *iommu = pbm->iommu; unsigned long num_tsb_entries, sz, tsbsize; u32 dma_mask, dma_offset; const u32 *vdma; vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", NULL); if (!vdma) vdma = vdma_default; if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) { printk(KERN_ERR PFX "Strange virtual-dma[%08x:%08x].\n", vdma[0], vdma[1]); return -EINVAL; }; dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL); num_tsb_entries = vdma[1] / IO_PAGE_SIZE; tsbsize = num_tsb_entries * sizeof(iopte_t); dma_offset = vdma[0]; /* Setup initial software IOMMU state. */ spin_lock_init(&iommu->lock); iommu->ctx_lowest_free = 1; iommu->page_table_map_base = dma_offset; iommu->dma_addr_mask = dma_mask; /* Allocate and initialize the free area map. */ sz = (num_tsb_entries + 7) / 8; sz = (sz + 7UL) & ~7UL; iommu->arena.map = kzalloc(sz, GFP_KERNEL); if (!iommu->arena.map) { printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n"); return -ENOMEM; } iommu->arena.limit = num_tsb_entries; sz = probe_existing_entries(pbm, iommu); if (sz) printk("%s: Imported %lu TSB entries from OBP\n", pbm->name, sz); return 0; } #ifdef CONFIG_PCI_MSI struct pci_sun4v_msiq_entry { u64 version_type; #define MSIQ_VERSION_MASK 0xffffffff00000000UL #define MSIQ_VERSION_SHIFT 32 #define MSIQ_TYPE_MASK 0x00000000000000ffUL #define MSIQ_TYPE_SHIFT 0 #define MSIQ_TYPE_NONE 0x00 #define MSIQ_TYPE_MSG 0x01 #define MSIQ_TYPE_MSI32 0x02 #define MSIQ_TYPE_MSI64 0x03 #define MSIQ_TYPE_INTX 0x08 #define MSIQ_TYPE_NONE2 0xff u64 intx_sysino; u64 reserved1; u64 stick; u64 req_id; /* bus/device/func */ #define MSIQ_REQID_BUS_MASK 0xff00UL #define MSIQ_REQID_BUS_SHIFT 8 #define MSIQ_REQID_DEVICE_MASK 0x00f8UL #define MSIQ_REQID_DEVICE_SHIFT 3 #define MSIQ_REQID_FUNC_MASK 0x0007UL #define MSIQ_REQID_FUNC_SHIFT 0 u64 msi_address; /* The format of this value is message type dependent. * For MSI bits 15:0 are the data from the MSI packet. * For MSI-X bits 31:0 are the data from the MSI packet. * For MSG, the message code and message routing code where: * bits 39:32 is the bus/device/fn of the msg target-id * bits 18:16 is the message routing code * bits 7:0 is the message code * For INTx the low order 2-bits are: * 00 - INTA * 01 - INTB * 10 - INTC * 11 - INTD */ u64 msi_data; u64 reserved2; }; static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid, unsigned long *head) { unsigned long err, limit; err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head); if (unlikely(err)) return -ENXIO; limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry); if (unlikely(*head >= limit)) return -EFBIG; return 0; } static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm, unsigned long msiqid, unsigned long *head, unsigned long *msi) { struct pci_sun4v_msiq_entry *ep; unsigned long err, type; /* Note: void pointer arithmetic, 'head' is a byte offset */ ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) * (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry))) + *head); if ((ep->version_type & MSIQ_TYPE_MASK) == 0) return 0; type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT; if (unlikely(type != MSIQ_TYPE_MSI32 && type != MSIQ_TYPE_MSI64)) return -EINVAL; *msi = ep->msi_data; err = pci_sun4v_msi_setstate(pbm->devhandle, ep->msi_data /* msi_num */, HV_MSISTATE_IDLE); if (unlikely(err)) return -ENXIO; /* Clear the entry. */ ep->version_type &= ~MSIQ_TYPE_MASK; (*head) += sizeof(struct pci_sun4v_msiq_entry); if (*head >= (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry))) *head = 0; return 1; } static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid, unsigned long head) { unsigned long err; err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head); if (unlikely(err)) return -EINVAL; return 0; } static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid, unsigned long msi, int is_msi64) { if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid, (is_msi64 ? HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32))) return -ENXIO; if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE)) return -ENXIO; if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID)) return -ENXIO; return 0; } static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi) { unsigned long err, msiqid; err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid); if (err) return -ENXIO; pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID); return 0; } static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm) { unsigned long q_size, alloc_size, pages, order; int i; q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry); alloc_size = (pbm->msiq_num * q_size); order = get_order(alloc_size); pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order); if (pages == 0UL) { printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n", order); return -ENOMEM; } memset((char *)pages, 0, PAGE_SIZE << order); pbm->msi_queues = (void *) pages; for (i = 0; i < pbm->msiq_num; i++) { unsigned long err, base = __pa(pages + (i * q_size)); unsigned long ret1, ret2; err = pci_sun4v_msiq_conf(pbm->devhandle, pbm->msiq_first + i, base, pbm->msiq_ent_count); if (err) { printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n", err); goto h_error; } err = pci_sun4v_msiq_info(pbm->devhandle, pbm->msiq_first + i, &ret1, &ret2); if (err) { printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n", err); goto h_error; } if (ret1 != base || ret2 != pbm->msiq_ent_count) { printk(KERN_ERR "MSI: Bogus qconf " "expected[%lx:%x] got[%lx:%lx]\n", base, pbm->msiq_ent_count, ret1, ret2); goto h_error; } } return 0; h_error: free_pages(pages, order); return -EINVAL; } static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm) { unsigned long q_size, alloc_size, pages, order; int i; for (i = 0; i < pbm->msiq_num; i++) { unsigned long msiqid = pbm->msiq_first + i; (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0); } q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry); alloc_size = (pbm->msiq_num * q_size); order = get_order(alloc_size); pages = (unsigned long) pbm->msi_queues; free_pages(pages, order); pbm->msi_queues = NULL; } static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm, unsigned long msiqid, unsigned long devino) { unsigned int virt_irq = sun4v_build_irq(pbm->devhandle, devino); if (!virt_irq) return -ENOMEM; if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE)) return -EINVAL; if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID)) return -EINVAL; return virt_irq; } static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = { .get_head = pci_sun4v_get_head, .dequeue_msi = pci_sun4v_dequeue_msi, .set_head = pci_sun4v_set_head, .msi_setup = pci_sun4v_msi_setup, .msi_teardown = pci_sun4v_msi_teardown, .msiq_alloc = pci_sun4v_msiq_alloc, .msiq_free = pci_sun4v_msiq_free, .msiq_build_irq = pci_sun4v_msiq_build_irq, }; static void pci_sun4v_msi_init(struct pci_pbm_info *pbm) { sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops); } #else /* CONFIG_PCI_MSI */ static void pci_sun4v_msi_init(struct pci_pbm_info *pbm) { } #endif /* !(CONFIG_PCI_MSI) */ static int __devinit pci_sun4v_pbm_init(struct pci_pbm_info *pbm, struct of_device *op, u32 devhandle) { struct device_node *dp = op->dev.of_node; int err; pbm->numa_node = of_node_to_nid(dp); pbm->pci_ops = &sun4v_pci_ops; pbm->config_space_reg_bits = 12; pbm->index = pci_num_pbms++; pbm->op = op; pbm->devhandle = devhandle; pbm->name = dp->full_name; printk("%s: SUN4V PCI Bus Module\n", pbm->name); printk("%s: On NUMA node %d\n", pbm->name, pbm->numa_node); pci_determine_mem_io_space(pbm); pci_get_pbm_props(pbm); err = pci_sun4v_iommu_init(pbm); if (err) return err; pci_sun4v_msi_init(pbm); pci_sun4v_scan_bus(pbm, &op->dev); pbm->next = pci_pbm_root; pci_pbm_root = pbm; return 0; } static int __devinit pci_sun4v_probe(struct of_device *op, const struct of_device_id *match) { const struct linux_prom64_registers *regs; static int hvapi_negotiated = 0; struct pci_pbm_info *pbm; struct device_node *dp; struct iommu *iommu; u32 devhandle; int i, err; dp = op->dev.of_node; if (!hvapi_negotiated++) { err = sun4v_hvapi_register(HV_GRP_PCI, vpci_major, &vpci_minor); if (err) { printk(KERN_ERR PFX "Could not register hvapi, " "err=%d\n", err); return err; } printk(KERN_INFO PFX "Registered hvapi major[%lu] minor[%lu]\n", vpci_major, vpci_minor); dma_ops = &sun4v_dma_ops; } regs = of_get_property(dp, "reg", NULL); err = -ENODEV; if (!regs) { printk(KERN_ERR PFX "Could not find config registers\n"); goto out_err; } devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff; err = -ENOMEM; if (!iommu_batch_initialized) { for_each_possible_cpu(i) { unsigned long page = get_zeroed_page(GFP_KERNEL); if (!page) goto out_err; per_cpu(iommu_batch, i).pglist = (u64 *) page; } iommu_batch_initialized = 1; } pbm = kzalloc(sizeof(*pbm), GFP_KERNEL); if (!pbm) { printk(KERN_ERR PFX "Could not allocate pci_pbm_info\n"); goto out_err; } iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL); if (!iommu) { printk(KERN_ERR PFX "Could not allocate pbm iommu\n"); goto out_free_controller; } pbm->iommu = iommu; err = pci_sun4v_pbm_init(pbm, op, devhandle); if (err) goto out_free_iommu; dev_set_drvdata(&op->dev, pbm); return 0; out_free_iommu: kfree(pbm->iommu); out_free_controller: kfree(pbm); out_err: return err; } static struct of_device_id __initdata pci_sun4v_match[] = { { .name = "pci", .compatible = "SUNW,sun4v-pci", }, {}, }; static struct of_platform_driver pci_sun4v_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, .of_match_table = pci_sun4v_match, }, .probe = pci_sun4v_probe, }; static int __init pci_sun4v_init(void) { return of_register_driver(&pci_sun4v_driver, &of_bus_type); } subsys_initcall(pci_sun4v_init);
gpl-2.0
theme/linux
drivers/clk/clk-efm32gg.c
981
3248
/* * Copyright (C) 2013 Pengutronix * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de> * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ #include <linux/clk.h> #include <linux/io.h> #include <linux/clk-provider.h> #include <linux/of.h> #include <linux/of_address.h> #include <dt-bindings/clock/efm32-cmu.h> #define CMU_HFPERCLKEN0 0x44 static struct clk *clk[37]; static struct clk_onecell_data clk_data = { .clks = clk, .clk_num = ARRAY_SIZE(clk), }; static void __init efm32gg_cmu_init(struct device_node *np) { int i; void __iomem *base; for (i = 0; i < ARRAY_SIZE(clk); ++i) clk[i] = ERR_PTR(-ENOENT); base = of_iomap(np, 0); if (!base) { pr_warn("Failed to map address range for efm32gg,cmu node\n"); return; } clk[clk_HFXO] = clk_register_fixed_rate(NULL, "HFXO", NULL, CLK_IS_ROOT, 48000000); clk[clk_HFPERCLKUSART0] = clk_register_gate(NULL, "HFPERCLK.USART0", "HFXO", 0, base + CMU_HFPERCLKEN0, 0, 0, NULL); clk[clk_HFPERCLKUSART1] = clk_register_gate(NULL, "HFPERCLK.USART1", "HFXO", 0, base + CMU_HFPERCLKEN0, 1, 0, NULL); clk[clk_HFPERCLKUSART2] = clk_register_gate(NULL, "HFPERCLK.USART2", "HFXO", 0, base + CMU_HFPERCLKEN0, 2, 0, NULL); clk[clk_HFPERCLKUART0] = clk_register_gate(NULL, "HFPERCLK.UART0", "HFXO", 0, base + CMU_HFPERCLKEN0, 3, 0, NULL); clk[clk_HFPERCLKUART1] = clk_register_gate(NULL, "HFPERCLK.UART1", "HFXO", 0, base + CMU_HFPERCLKEN0, 4, 0, NULL); clk[clk_HFPERCLKTIMER0] = clk_register_gate(NULL, "HFPERCLK.TIMER0", "HFXO", 0, base + CMU_HFPERCLKEN0, 5, 0, NULL); clk[clk_HFPERCLKTIMER1] = clk_register_gate(NULL, "HFPERCLK.TIMER1", "HFXO", 0, base + CMU_HFPERCLKEN0, 6, 0, NULL); clk[clk_HFPERCLKTIMER2] = clk_register_gate(NULL, "HFPERCLK.TIMER2", "HFXO", 0, base + CMU_HFPERCLKEN0, 7, 0, NULL); clk[clk_HFPERCLKTIMER3] = clk_register_gate(NULL, "HFPERCLK.TIMER3", "HFXO", 0, base + CMU_HFPERCLKEN0, 8, 0, NULL); clk[clk_HFPERCLKACMP0] = clk_register_gate(NULL, "HFPERCLK.ACMP0", "HFXO", 0, base + CMU_HFPERCLKEN0, 9, 0, NULL); clk[clk_HFPERCLKACMP1] = clk_register_gate(NULL, "HFPERCLK.ACMP1", "HFXO", 0, base + CMU_HFPERCLKEN0, 10, 0, NULL); clk[clk_HFPERCLKI2C0] = clk_register_gate(NULL, "HFPERCLK.I2C0", "HFXO", 0, base + CMU_HFPERCLKEN0, 11, 0, NULL); clk[clk_HFPERCLKI2C1] = clk_register_gate(NULL, "HFPERCLK.I2C1", "HFXO", 0, base + CMU_HFPERCLKEN0, 12, 0, NULL); clk[clk_HFPERCLKGPIO] = clk_register_gate(NULL, "HFPERCLK.GPIO", "HFXO", 0, base + CMU_HFPERCLKEN0, 13, 0, NULL); clk[clk_HFPERCLKVCMP] = clk_register_gate(NULL, "HFPERCLK.VCMP", "HFXO", 0, base + CMU_HFPERCLKEN0, 14, 0, NULL); clk[clk_HFPERCLKPRS] = clk_register_gate(NULL, "HFPERCLK.PRS", "HFXO", 0, base + CMU_HFPERCLKEN0, 15, 0, NULL); clk[clk_HFPERCLKADC0] = clk_register_gate(NULL, "HFPERCLK.ADC0", "HFXO", 0, base + CMU_HFPERCLKEN0, 16, 0, NULL); clk[clk_HFPERCLKDAC0] = clk_register_gate(NULL, "HFPERCLK.DAC0", "HFXO", 0, base + CMU_HFPERCLKEN0, 17, 0, NULL); of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); } CLK_OF_DECLARE(efm32ggcmu, "efm32gg,cmu", efm32gg_cmu_init);
gpl-2.0
DIGImend/linux
arch/arm/mach-shmobile/smp-emev2.c
981
1622
/* * SMP support for Emma Mobile EV2 * * Copyright (C) 2012 Renesas Solutions Corp. * Copyright (C) 2012 Magnus Damm * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/smp.h> #include <linux/spinlock.h> #include <linux/io.h> #include <linux/delay.h> #include <asm/smp_plat.h> #include <asm/smp_scu.h> #include "common.h" #define EMEV2_SCU_BASE 0x1e000000 #define EMEV2_SMU_BASE 0xe0110000 #define SMU_GENERAL_REG0 0x7c0 static int emev2_boot_secondary(unsigned int cpu, struct task_struct *idle) { arch_send_wakeup_ipi_mask(cpumask_of(cpu_logical_map(cpu))); return 0; } static void __init emev2_smp_prepare_cpus(unsigned int max_cpus) { void __iomem *smu; /* Tell ROM loader about our vector (in headsmp.S) */ smu = ioremap(EMEV2_SMU_BASE, PAGE_SIZE); if (smu) { iowrite32(__pa(shmobile_boot_vector), smu + SMU_GENERAL_REG0); iounmap(smu); } /* setup EMEV2 specific SCU bits */ shmobile_scu_base = ioremap(EMEV2_SCU_BASE, PAGE_SIZE); shmobile_smp_scu_prepare_cpus(max_cpus); } struct smp_operations emev2_smp_ops __initdata = { .smp_prepare_cpus = emev2_smp_prepare_cpus, .smp_boot_secondary = emev2_boot_secondary, };
gpl-2.0
lacvapps/linux
drivers/input/keyboard/lpc32xx-keys.c
1237
9163
/* * NXP LPC32xx SoC Key Scan Interface * * Authors: * Kevin Wells <kevin.wells@nxp.com> * Roland Stigge <stigge@antcom.de> * * Copyright (C) 2010 NXP Semiconductors * Copyright (C) 2012 Roland Stigge * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * * This controller supports square key matrices from 1x1 up to 8x8 */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/irq.h> #include <linux/pm.h> #include <linux/platform_device.h> #include <linux/input.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/of.h> #include <linux/input/matrix_keypad.h> #define DRV_NAME "lpc32xx_keys" /* * Key scanner register offsets */ #define LPC32XX_KS_DEB(x) ((x) + 0x00) #define LPC32XX_KS_STATE_COND(x) ((x) + 0x04) #define LPC32XX_KS_IRQ(x) ((x) + 0x08) #define LPC32XX_KS_SCAN_CTL(x) ((x) + 0x0C) #define LPC32XX_KS_FAST_TST(x) ((x) + 0x10) #define LPC32XX_KS_MATRIX_DIM(x) ((x) + 0x14) /* 1..8 */ #define LPC32XX_KS_DATA(x, y) ((x) + 0x40 + ((y) << 2)) #define LPC32XX_KSCAN_DEB_NUM_DEB_PASS(n) ((n) & 0xFF) #define LPC32XX_KSCAN_SCOND_IN_IDLE 0x0 #define LPC32XX_KSCAN_SCOND_IN_SCANONCE 0x1 #define LPC32XX_KSCAN_SCOND_IN_IRQGEN 0x2 #define LPC32XX_KSCAN_SCOND_IN_SCAN_MATRIX 0x3 #define LPC32XX_KSCAN_IRQ_PENDING_CLR 0x1 #define LPC32XX_KSCAN_SCTRL_SCAN_DELAY(n) ((n) & 0xFF) #define LPC32XX_KSCAN_FTST_FORCESCANONCE 0x1 #define LPC32XX_KSCAN_FTST_USE32K_CLK 0x2 #define LPC32XX_KSCAN_MSEL_SELECT(n) ((n) & 0xF) struct lpc32xx_kscan_drv { struct input_dev *input; struct clk *clk; void __iomem *kscan_base; unsigned int irq; u32 matrix_sz; /* Size of matrix in XxY, ie. 3 = 3x3 */ u32 deb_clks; /* Debounce clocks (based on 32KHz clock) */ u32 scan_delay; /* Scan delay (based on 32KHz clock) */ unsigned short *keymap; /* Pointer to key map for the scan matrix */ unsigned int row_shift; u8 lastkeystates[8]; }; static void lpc32xx_mod_states(struct lpc32xx_kscan_drv *kscandat, int col) { struct input_dev *input = kscandat->input; unsigned row, changed, scancode, keycode; u8 key; key = readl(LPC32XX_KS_DATA(kscandat->kscan_base, col)); changed = key ^ kscandat->lastkeystates[col]; kscandat->lastkeystates[col] = key; for (row = 0; changed; row++, changed >>= 1) { if (changed & 1) { /* Key state changed, signal an event */ scancode = MATRIX_SCAN_CODE(row, col, kscandat->row_shift); keycode = kscandat->keymap[scancode]; input_event(input, EV_MSC, MSC_SCAN, scancode); input_report_key(input, keycode, key & (1 << row)); } } } static irqreturn_t lpc32xx_kscan_irq(int irq, void *dev_id) { struct lpc32xx_kscan_drv *kscandat = dev_id; int i; for (i = 0; i < kscandat->matrix_sz; i++) lpc32xx_mod_states(kscandat, i); writel(1, LPC32XX_KS_IRQ(kscandat->kscan_base)); input_sync(kscandat->input); return IRQ_HANDLED; } static int lpc32xx_kscan_open(struct input_dev *dev) { struct lpc32xx_kscan_drv *kscandat = input_get_drvdata(dev); int error; error = clk_prepare_enable(kscandat->clk); if (error) return error; writel(1, LPC32XX_KS_IRQ(kscandat->kscan_base)); return 0; } static void lpc32xx_kscan_close(struct input_dev *dev) { struct lpc32xx_kscan_drv *kscandat = input_get_drvdata(dev); writel(1, LPC32XX_KS_IRQ(kscandat->kscan_base)); clk_disable_unprepare(kscandat->clk); } static int lpc32xx_parse_dt(struct device *dev, struct lpc32xx_kscan_drv *kscandat) { struct device_node *np = dev->of_node; u32 rows = 0, columns = 0; int err; err = matrix_keypad_parse_of_params(dev, &rows, &columns); if (err) return err; if (rows != columns) { dev_err(dev, "rows and columns must be equal!\n"); return -EINVAL; } kscandat->matrix_sz = rows; kscandat->row_shift = get_count_order(columns); of_property_read_u32(np, "nxp,debounce-delay-ms", &kscandat->deb_clks); of_property_read_u32(np, "nxp,scan-delay-ms", &kscandat->scan_delay); if (!kscandat->deb_clks || !kscandat->scan_delay) { dev_err(dev, "debounce or scan delay not specified\n"); return -EINVAL; } return 0; } static int lpc32xx_kscan_probe(struct platform_device *pdev) { struct lpc32xx_kscan_drv *kscandat; struct input_dev *input; struct resource *res; size_t keymap_size; int error; int irq; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "failed to get platform I/O memory\n"); return -EINVAL; } irq = platform_get_irq(pdev, 0); if (irq < 0 || irq >= NR_IRQS) { dev_err(&pdev->dev, "failed to get platform irq\n"); return -EINVAL; } kscandat = devm_kzalloc(&pdev->dev, sizeof(*kscandat), GFP_KERNEL); if (!kscandat) return -ENOMEM; error = lpc32xx_parse_dt(&pdev->dev, kscandat); if (error) { dev_err(&pdev->dev, "failed to parse device tree\n"); return error; } keymap_size = sizeof(kscandat->keymap[0]) * (kscandat->matrix_sz << kscandat->row_shift); kscandat->keymap = devm_kzalloc(&pdev->dev, keymap_size, GFP_KERNEL); if (!kscandat->keymap) return -ENOMEM; kscandat->input = input = devm_input_allocate_device(&pdev->dev); if (!input) { dev_err(&pdev->dev, "failed to allocate input device\n"); return -ENOMEM; } /* Setup key input */ input->name = pdev->name; input->phys = "lpc32xx/input0"; input->id.vendor = 0x0001; input->id.product = 0x0001; input->id.version = 0x0100; input->open = lpc32xx_kscan_open; input->close = lpc32xx_kscan_close; input->dev.parent = &pdev->dev; input_set_capability(input, EV_MSC, MSC_SCAN); error = matrix_keypad_build_keymap(NULL, NULL, kscandat->matrix_sz, kscandat->matrix_sz, kscandat->keymap, kscandat->input); if (error) { dev_err(&pdev->dev, "failed to build keymap\n"); return error; } input_set_drvdata(kscandat->input, kscandat); kscandat->kscan_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(kscandat->kscan_base)) return PTR_ERR(kscandat->kscan_base); /* Get the key scanner clock */ kscandat->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(kscandat->clk)) { dev_err(&pdev->dev, "failed to get clock\n"); return PTR_ERR(kscandat->clk); } /* Configure the key scanner */ error = clk_prepare_enable(kscandat->clk); if (error) return error; writel(kscandat->deb_clks, LPC32XX_KS_DEB(kscandat->kscan_base)); writel(kscandat->scan_delay, LPC32XX_KS_SCAN_CTL(kscandat->kscan_base)); writel(LPC32XX_KSCAN_FTST_USE32K_CLK, LPC32XX_KS_FAST_TST(kscandat->kscan_base)); writel(kscandat->matrix_sz, LPC32XX_KS_MATRIX_DIM(kscandat->kscan_base)); writel(1, LPC32XX_KS_IRQ(kscandat->kscan_base)); clk_disable_unprepare(kscandat->clk); error = devm_request_irq(&pdev->dev, irq, lpc32xx_kscan_irq, 0, pdev->name, kscandat); if (error) { dev_err(&pdev->dev, "failed to request irq\n"); return error; } error = input_register_device(kscandat->input); if (error) { dev_err(&pdev->dev, "failed to register input device\n"); return error; } platform_set_drvdata(pdev, kscandat); return 0; } #ifdef CONFIG_PM_SLEEP static int lpc32xx_kscan_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct lpc32xx_kscan_drv *kscandat = platform_get_drvdata(pdev); struct input_dev *input = kscandat->input; mutex_lock(&input->mutex); if (input->users) { /* Clear IRQ and disable clock */ writel(1, LPC32XX_KS_IRQ(kscandat->kscan_base)); clk_disable_unprepare(kscandat->clk); } mutex_unlock(&input->mutex); return 0; } static int lpc32xx_kscan_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct lpc32xx_kscan_drv *kscandat = platform_get_drvdata(pdev); struct input_dev *input = kscandat->input; int retval = 0; mutex_lock(&input->mutex); if (input->users) { /* Enable clock and clear IRQ */ retval = clk_prepare_enable(kscandat->clk); if (retval == 0) writel(1, LPC32XX_KS_IRQ(kscandat->kscan_base)); } mutex_unlock(&input->mutex); return retval; } #endif static SIMPLE_DEV_PM_OPS(lpc32xx_kscan_pm_ops, lpc32xx_kscan_suspend, lpc32xx_kscan_resume); static const struct of_device_id lpc32xx_kscan_match[] = { { .compatible = "nxp,lpc3220-key" }, {}, }; MODULE_DEVICE_TABLE(of, lpc32xx_kscan_match); static struct platform_driver lpc32xx_kscan_driver = { .probe = lpc32xx_kscan_probe, .driver = { .name = DRV_NAME, .pm = &lpc32xx_kscan_pm_ops, .of_match_table = lpc32xx_kscan_match, } }; module_platform_driver(lpc32xx_kscan_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>"); MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>"); MODULE_DESCRIPTION("Key scanner driver for LPC32XX devices");
gpl-2.0
NXTnet/android_kernel_samsung_msm8916-caf
arch/x86/mm/init.c
1749
16014
#include <linux/gfp.h> #include <linux/initrd.h> #include <linux/ioport.h> #include <linux/swap.h> #include <linux/memblock.h> #include <linux/bootmem.h> /* for max_low_pfn */ #include <asm/cacheflush.h> #include <asm/e820.h> #include <asm/init.h> #include <asm/page.h> #include <asm/page_types.h> #include <asm/sections.h> #include <asm/setup.h> #include <asm/tlbflush.h> #include <asm/tlb.h> #include <asm/proto.h> #include <asm/dma.h> /* for MAX_DMA_PFN */ #include <asm/microcode.h> #include "mm_internal.h" static unsigned long __initdata pgt_buf_start; static unsigned long __initdata pgt_buf_end; static unsigned long __initdata pgt_buf_top; static unsigned long min_pfn_mapped; static bool __initdata can_use_brk_pgt = true; /* * Pages returned are already directly mapped. * * Changing that is likely to break Xen, see commit: * * 279b706 x86,xen: introduce x86_init.mapping.pagetable_reserve * * for detailed information. */ __ref void *alloc_low_pages(unsigned int num) { unsigned long pfn; int i; if (after_bootmem) { unsigned int order; order = get_order((unsigned long)num << PAGE_SHIFT); return (void *)__get_free_pages(GFP_ATOMIC | __GFP_NOTRACK | __GFP_ZERO, order); } if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) { unsigned long ret; if (min_pfn_mapped >= max_pfn_mapped) panic("alloc_low_page: ran out of memory"); ret = memblock_find_in_range(min_pfn_mapped << PAGE_SHIFT, max_pfn_mapped << PAGE_SHIFT, PAGE_SIZE * num , PAGE_SIZE); if (!ret) panic("alloc_low_page: can not alloc memory"); memblock_reserve(ret, PAGE_SIZE * num); pfn = ret >> PAGE_SHIFT; } else { pfn = pgt_buf_end; pgt_buf_end += num; printk(KERN_DEBUG "BRK [%#010lx, %#010lx] PGTABLE\n", pfn << PAGE_SHIFT, (pgt_buf_end << PAGE_SHIFT) - 1); } for (i = 0; i < num; i++) { void *adr; adr = __va((pfn + i) << PAGE_SHIFT); clear_page(adr); } return __va(pfn << PAGE_SHIFT); } /* need 3 4k for initial PMD_SIZE, 3 4k for 0-ISA_END_ADDRESS */ #define INIT_PGT_BUF_SIZE (6 * PAGE_SIZE) RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE); void __init early_alloc_pgt_buf(void) { unsigned long tables = INIT_PGT_BUF_SIZE; phys_addr_t base; base = __pa(extend_brk(tables, PAGE_SIZE)); pgt_buf_start = base >> PAGE_SHIFT; pgt_buf_end = pgt_buf_start; pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT); } int after_bootmem; int direct_gbpages #ifdef CONFIG_DIRECT_GBPAGES = 1 #endif ; static void __init init_gbpages(void) { #ifdef CONFIG_X86_64 if (direct_gbpages && cpu_has_gbpages) printk(KERN_INFO "Using GB pages for direct mapping\n"); else direct_gbpages = 0; #endif } struct map_range { unsigned long start; unsigned long end; unsigned page_size_mask; }; static int page_size_mask; static void __init probe_page_size_mask(void) { init_gbpages(); #if !defined(CONFIG_DEBUG_PAGEALLOC) && !defined(CONFIG_KMEMCHECK) /* * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages. * This will simplify cpa(), which otherwise needs to support splitting * large pages into small in interrupt context, etc. */ if (direct_gbpages) page_size_mask |= 1 << PG_LEVEL_1G; if (cpu_has_pse) page_size_mask |= 1 << PG_LEVEL_2M; #endif /* Enable PSE if available */ if (cpu_has_pse) set_in_cr4(X86_CR4_PSE); /* Enable PGE if available */ if (cpu_has_pge) { set_in_cr4(X86_CR4_PGE); __supported_pte_mask |= _PAGE_GLOBAL; } } #ifdef CONFIG_X86_32 #define NR_RANGE_MR 3 #else /* CONFIG_X86_64 */ #define NR_RANGE_MR 5 #endif static int __meminit save_mr(struct map_range *mr, int nr_range, unsigned long start_pfn, unsigned long end_pfn, unsigned long page_size_mask) { if (start_pfn < end_pfn) { if (nr_range >= NR_RANGE_MR) panic("run out of range for init_memory_mapping\n"); mr[nr_range].start = start_pfn<<PAGE_SHIFT; mr[nr_range].end = end_pfn<<PAGE_SHIFT; mr[nr_range].page_size_mask = page_size_mask; nr_range++; } return nr_range; } /* * adjust the page_size_mask for small range to go with * big page size instead small one if nearby are ram too. */ static void __init_refok adjust_range_page_size_mask(struct map_range *mr, int nr_range) { int i; for (i = 0; i < nr_range; i++) { if ((page_size_mask & (1<<PG_LEVEL_2M)) && !(mr[i].page_size_mask & (1<<PG_LEVEL_2M))) { unsigned long start = round_down(mr[i].start, PMD_SIZE); unsigned long end = round_up(mr[i].end, PMD_SIZE); #ifdef CONFIG_X86_32 if ((end >> PAGE_SHIFT) > max_low_pfn) continue; #endif if (memblock_is_region_memory(start, end - start)) mr[i].page_size_mask |= 1<<PG_LEVEL_2M; } if ((page_size_mask & (1<<PG_LEVEL_1G)) && !(mr[i].page_size_mask & (1<<PG_LEVEL_1G))) { unsigned long start = round_down(mr[i].start, PUD_SIZE); unsigned long end = round_up(mr[i].end, PUD_SIZE); if (memblock_is_region_memory(start, end - start)) mr[i].page_size_mask |= 1<<PG_LEVEL_1G; } } } static int __meminit split_mem_range(struct map_range *mr, int nr_range, unsigned long start, unsigned long end) { unsigned long start_pfn, end_pfn, limit_pfn; unsigned long pfn; int i; limit_pfn = PFN_DOWN(end); /* head if not big page alignment ? */ pfn = start_pfn = PFN_DOWN(start); #ifdef CONFIG_X86_32 /* * Don't use a large page for the first 2/4MB of memory * because there are often fixed size MTRRs in there * and overlapping MTRRs into large pages can cause * slowdowns. */ if (pfn == 0) end_pfn = PFN_DOWN(PMD_SIZE); else end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); #else /* CONFIG_X86_64 */ end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); #endif if (end_pfn > limit_pfn) end_pfn = limit_pfn; if (start_pfn < end_pfn) { nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); pfn = end_pfn; } /* big page (2M) range */ start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); #ifdef CONFIG_X86_32 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE)); #else /* CONFIG_X86_64 */ end_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE)); if (end_pfn > round_down(limit_pfn, PFN_DOWN(PMD_SIZE))) end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE)); #endif if (start_pfn < end_pfn) { nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, page_size_mask & (1<<PG_LEVEL_2M)); pfn = end_pfn; } #ifdef CONFIG_X86_64 /* big page (1G) range */ start_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE)); end_pfn = round_down(limit_pfn, PFN_DOWN(PUD_SIZE)); if (start_pfn < end_pfn) { nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, page_size_mask & ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G))); pfn = end_pfn; } /* tail is not big page (1G) alignment */ start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE)); if (start_pfn < end_pfn) { nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, page_size_mask & (1<<PG_LEVEL_2M)); pfn = end_pfn; } #endif /* tail is not big page (2M) alignment */ start_pfn = pfn; end_pfn = limit_pfn; nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); if (!after_bootmem) adjust_range_page_size_mask(mr, nr_range); /* try to merge same page size and continuous */ for (i = 0; nr_range > 1 && i < nr_range - 1; i++) { unsigned long old_start; if (mr[i].end != mr[i+1].start || mr[i].page_size_mask != mr[i+1].page_size_mask) continue; /* move it */ old_start = mr[i].start; memmove(&mr[i], &mr[i+1], (nr_range - 1 - i) * sizeof(struct map_range)); mr[i--].start = old_start; nr_range--; } for (i = 0; i < nr_range; i++) printk(KERN_DEBUG " [mem %#010lx-%#010lx] page %s\n", mr[i].start, mr[i].end - 1, (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":( (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k")); return nr_range; } struct range pfn_mapped[E820_X_MAX]; int nr_pfn_mapped; static void add_pfn_range_mapped(unsigned long start_pfn, unsigned long end_pfn) { nr_pfn_mapped = add_range_with_merge(pfn_mapped, E820_X_MAX, nr_pfn_mapped, start_pfn, end_pfn); nr_pfn_mapped = clean_sort_range(pfn_mapped, E820_X_MAX); max_pfn_mapped = max(max_pfn_mapped, end_pfn); if (start_pfn < (1UL<<(32-PAGE_SHIFT))) max_low_pfn_mapped = max(max_low_pfn_mapped, min(end_pfn, 1UL<<(32-PAGE_SHIFT))); } bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn) { int i; for (i = 0; i < nr_pfn_mapped; i++) if ((start_pfn >= pfn_mapped[i].start) && (end_pfn <= pfn_mapped[i].end)) return true; return false; } /* * Setup the direct mapping of the physical memory at PAGE_OFFSET. * This runs before bootmem is initialized and gets pages directly from * the physical memory. To access them they are temporarily mapped. */ unsigned long __init_refok init_memory_mapping(unsigned long start, unsigned long end) { struct map_range mr[NR_RANGE_MR]; unsigned long ret = 0; int nr_range, i; pr_info("init_memory_mapping: [mem %#010lx-%#010lx]\n", start, end - 1); memset(mr, 0, sizeof(mr)); nr_range = split_mem_range(mr, 0, start, end); for (i = 0; i < nr_range; i++) ret = kernel_physical_mapping_init(mr[i].start, mr[i].end, mr[i].page_size_mask); add_pfn_range_mapped(start >> PAGE_SHIFT, ret >> PAGE_SHIFT); return ret >> PAGE_SHIFT; } /* * We need to iterate through the E820 memory map and create direct mappings * for only E820_RAM and E820_KERN_RESERVED regions. We cannot simply * create direct mappings for all pfns from [0 to max_low_pfn) and * [4GB to max_pfn) because of possible memory holes in high addresses * that cannot be marked as UC by fixed/variable range MTRRs. * Depending on the alignment of E820 ranges, this may possibly result * in using smaller size (i.e. 4K instead of 2M or 1G) page tables. * * init_mem_mapping() calls init_range_memory_mapping() with big range. * That range would have hole in the middle or ends, and only ram parts * will be mapped in init_range_memory_mapping(). */ static unsigned long __init init_range_memory_mapping( unsigned long r_start, unsigned long r_end) { unsigned long start_pfn, end_pfn; unsigned long mapped_ram_size = 0; int i; for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { u64 start = clamp_val(PFN_PHYS(start_pfn), r_start, r_end); u64 end = clamp_val(PFN_PHYS(end_pfn), r_start, r_end); if (start >= end) continue; /* * if it is overlapping with brk pgt, we need to * alloc pgt buf from memblock instead. */ can_use_brk_pgt = max(start, (u64)pgt_buf_end<<PAGE_SHIFT) >= min(end, (u64)pgt_buf_top<<PAGE_SHIFT); init_memory_mapping(start, end); mapped_ram_size += end - start; can_use_brk_pgt = true; } return mapped_ram_size; } /* (PUD_SHIFT-PMD_SHIFT)/2 */ #define STEP_SIZE_SHIFT 5 void __init init_mem_mapping(void) { unsigned long end, real_end, start, last_start; unsigned long step_size; unsigned long addr; unsigned long mapped_ram_size = 0; unsigned long new_mapped_ram_size; probe_page_size_mask(); #ifdef CONFIG_X86_64 end = max_pfn << PAGE_SHIFT; #else end = max_low_pfn << PAGE_SHIFT; #endif /* the ISA range is always mapped regardless of memory holes */ init_memory_mapping(0, ISA_END_ADDRESS); /* xen has big range in reserved near end of ram, skip it at first.*/ addr = memblock_find_in_range(ISA_END_ADDRESS, end, PMD_SIZE, PMD_SIZE); real_end = addr + PMD_SIZE; /* step_size need to be small so pgt_buf from BRK could cover it */ step_size = PMD_SIZE; max_pfn_mapped = 0; /* will get exact value next */ min_pfn_mapped = real_end >> PAGE_SHIFT; last_start = start = real_end; /* * We start from the top (end of memory) and go to the bottom. * The memblock_find_in_range() gets us a block of RAM from the * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages * for page table. */ while (last_start > ISA_END_ADDRESS) { if (last_start > step_size) { start = round_down(last_start - 1, step_size); if (start < ISA_END_ADDRESS) start = ISA_END_ADDRESS; } else start = ISA_END_ADDRESS; new_mapped_ram_size = init_range_memory_mapping(start, last_start); last_start = start; min_pfn_mapped = last_start >> PAGE_SHIFT; /* only increase step_size after big range get mapped */ if (new_mapped_ram_size > mapped_ram_size) step_size <<= STEP_SIZE_SHIFT; mapped_ram_size += new_mapped_ram_size; } if (real_end < end) init_range_memory_mapping(real_end, end); #ifdef CONFIG_X86_64 if (max_pfn > max_low_pfn) { /* can we preseve max_low_pfn ?*/ max_low_pfn = max_pfn; } #else early_ioremap_page_table_range_init(); #endif load_cr3(swapper_pg_dir); __flush_tlb_all(); early_memtest(0, max_pfn_mapped << PAGE_SHIFT); } /* * devmem_is_allowed() checks to see if /dev/mem access to a certain address * is valid. The argument is a physical page number. * * * On x86, access has to be given to the first megabyte of ram because that area * contains bios code and data regions used by X and dosemu and similar apps. * Access has to be given to non-kernel-ram areas as well, these contain the PCI * mmio resources as well as potential bios/acpi data regions. */ int devmem_is_allowed(unsigned long pagenr) { if (pagenr < 256) return 1; if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) return 0; if (!page_is_ram(pagenr)) return 1; return 0; } void free_init_pages(char *what, unsigned long begin, unsigned long end) { unsigned long addr; unsigned long begin_aligned, end_aligned; /* Make sure boundaries are page aligned */ begin_aligned = PAGE_ALIGN(begin); end_aligned = end & PAGE_MASK; if (WARN_ON(begin_aligned != begin || end_aligned != end)) { begin = begin_aligned; end = end_aligned; } if (begin >= end) return; addr = begin; /* * If debugging page accesses then do not free this memory but * mark them not present - any buggy init-section access will * create a kernel page fault: */ #ifdef CONFIG_DEBUG_PAGEALLOC printk(KERN_INFO "debug: unmapping init [mem %#010lx-%#010lx]\n", begin, end - 1); set_memory_np(begin, (end - begin) >> PAGE_SHIFT); #else /* * We just marked the kernel text read only above, now that * we are going to free part of that, we need to make that * writeable and non-executable first. */ set_memory_nx(begin, (end - begin) >> PAGE_SHIFT); set_memory_rw(begin, (end - begin) >> PAGE_SHIFT); printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); for (; addr < end; addr += PAGE_SIZE) { memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); free_reserved_page(virt_to_page(addr)); } #endif } void free_initmem(void) { free_init_pages("unused kernel memory", (unsigned long)(&__init_begin), (unsigned long)(&__init_end)); } #ifdef CONFIG_BLK_DEV_INITRD void __init free_initrd_mem(unsigned long start, unsigned long end) { #ifdef CONFIG_MICROCODE_EARLY /* * Remember, initrd memory may contain microcode or other useful things. * Before we lose initrd mem, we need to find a place to hold them * now that normal virtual memory is enabled. */ save_microcode_in_initrd(); #endif /* * end could be not aligned, and We can not align that, * decompresser could be confused by aligned initrd_end * We already reserve the end partial page before in * - i386_start_kernel() * - x86_64_start_kernel() * - relocate_initrd() * So here We can do PAGE_ALIGN() safely to get partial page to be freed */ free_init_pages("initrd memory", start, PAGE_ALIGN(end)); } #endif void __init zone_sizes_init(void) { unsigned long max_zone_pfns[MAX_NR_ZONES]; memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); #ifdef CONFIG_ZONE_DMA max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; #endif #ifdef CONFIG_ZONE_DMA32 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; #endif max_zone_pfns[ZONE_NORMAL] = max_low_pfn; #ifdef CONFIG_HIGHMEM max_zone_pfns[ZONE_HIGHMEM] = max_pfn; #endif free_area_init_nodes(max_zone_pfns); }
gpl-2.0
metacloud/linux
arch/powerpc/platforms/86xx/gef_ppc9a.c
2773
5709
/* * GE PPC9A board support * * Author: Martyn Welch <martyn.welch@ge.com> * * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * Based on: mpc86xx_hpcn.c (MPC86xx HPCN board specific routines) * Copyright 2006 Freescale Semiconductor Inc. * * NEC fixup adapted from arch/mips/pci/fixup-lm2e.c */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/kdev_t.h> #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/of_platform.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <asm/prom.h> #include <mm/mmu_decl.h> #include <asm/udbg.h> #include <asm/mpic.h> #include <asm/nvram.h> #include <sysdev/fsl_pci.h> #include <sysdev/fsl_soc.h> #include <sysdev/ge/ge_pic.h> #include "mpc86xx.h" #undef DEBUG #ifdef DEBUG #define DBG (fmt...) do { printk(KERN_ERR "PPC9A: " fmt); } while (0) #else #define DBG (fmt...) do { } while (0) #endif void __iomem *ppc9a_regs; static void __init gef_ppc9a_init_irq(void) { struct device_node *cascade_node = NULL; mpc86xx_init_irq(); /* * There is a simple interrupt handler in the main FPGA, this needs * to be cascaded into the MPIC */ cascade_node = of_find_compatible_node(NULL, NULL, "gef,fpga-pic-1.00"); if (!cascade_node) { printk(KERN_WARNING "PPC9A: No FPGA PIC\n"); return; } gef_pic_init(cascade_node); of_node_put(cascade_node); } static void __init gef_ppc9a_setup_arch(void) { struct device_node *regs; printk(KERN_INFO "GE Intelligent Platforms PPC9A 6U VME SBC\n"); #ifdef CONFIG_SMP mpc86xx_smp_init(); #endif fsl_pci_assign_primary(); /* Remap basic board registers */ regs = of_find_compatible_node(NULL, NULL, "gef,ppc9a-fpga-regs"); if (regs) { ppc9a_regs = of_iomap(regs, 0); if (ppc9a_regs == NULL) printk(KERN_WARNING "Unable to map board registers\n"); of_node_put(regs); } #if defined(CONFIG_MMIO_NVRAM) mmio_nvram_init(); #endif } /* Return the PCB revision */ static unsigned int gef_ppc9a_get_pcb_rev(void) { unsigned int reg; reg = ioread32be(ppc9a_regs); return (reg >> 16) & 0xff; } /* Return the board (software) revision */ static unsigned int gef_ppc9a_get_board_rev(void) { unsigned int reg; reg = ioread32be(ppc9a_regs); return (reg >> 8) & 0xff; } /* Return the FPGA revision */ static unsigned int gef_ppc9a_get_fpga_rev(void) { unsigned int reg; reg = ioread32be(ppc9a_regs); return reg & 0xf; } /* Return VME Geographical Address */ static unsigned int gef_ppc9a_get_vme_geo_addr(void) { unsigned int reg; reg = ioread32be(ppc9a_regs + 0x4); return reg & 0x1f; } /* Return VME System Controller Status */ static unsigned int gef_ppc9a_get_vme_is_syscon(void) { unsigned int reg; reg = ioread32be(ppc9a_regs + 0x4); return (reg >> 9) & 0x1; } static void gef_ppc9a_show_cpuinfo(struct seq_file *m) { uint svid = mfspr(SPRN_SVR); seq_printf(m, "Vendor\t\t: GE Intelligent Platforms\n"); seq_printf(m, "Revision\t: %u%c\n", gef_ppc9a_get_pcb_rev(), ('A' + gef_ppc9a_get_board_rev())); seq_printf(m, "FPGA Revision\t: %u\n", gef_ppc9a_get_fpga_rev()); seq_printf(m, "SVR\t\t: 0x%x\n", svid); seq_printf(m, "VME geo. addr\t: %u\n", gef_ppc9a_get_vme_geo_addr()); seq_printf(m, "VME syscon\t: %s\n", gef_ppc9a_get_vme_is_syscon() ? "yes" : "no"); } static void gef_ppc9a_nec_fixup(struct pci_dev *pdev) { unsigned int val; /* Do not do the fixup on other platforms! */ if (!machine_is(gef_ppc9a)) return; printk(KERN_INFO "Running NEC uPD720101 Fixup\n"); /* Ensure ports 1, 2, 3, 4 & 5 are enabled */ pci_read_config_dword(pdev, 0xe0, &val); pci_write_config_dword(pdev, 0xe0, (val & ~7) | 0x5); /* System clock is 48-MHz Oscillator and EHCI Enabled. */ pci_write_config_dword(pdev, 0xe4, 1 << 5); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB, gef_ppc9a_nec_fixup); /* * Called very early, device-tree isn't unflattened * * This function is called to determine whether the BSP is compatible with the * supplied device-tree, which is assumed to be the correct one for the actual * board. It is expected thati, in the future, a kernel may support multiple * boards. */ static int __init gef_ppc9a_probe(void) { unsigned long root = of_get_flat_dt_root(); if (of_flat_dt_is_compatible(root, "gef,ppc9a")) return 1; return 0; } static long __init mpc86xx_time_init(void) { unsigned int temp; /* Set the time base to zero */ mtspr(SPRN_TBWL, 0); mtspr(SPRN_TBWU, 0); temp = mfspr(SPRN_HID0); temp |= HID0_TBEN; mtspr(SPRN_HID0, temp); asm volatile("isync"); return 0; } static __initdata struct of_device_id of_bus_ids[] = { { .compatible = "simple-bus", }, { .compatible = "gianfar", }, { .compatible = "fsl,mpc8641-pcie", }, {}, }; static int __init declare_of_platform_devices(void) { printk(KERN_DEBUG "Probe platform devices\n"); of_platform_bus_probe(NULL, of_bus_ids, NULL); return 0; } machine_arch_initcall(gef_ppc9a, declare_of_platform_devices); define_machine(gef_ppc9a) { .name = "GE PPC9A", .probe = gef_ppc9a_probe, .setup_arch = gef_ppc9a_setup_arch, .init_IRQ = gef_ppc9a_init_irq, .show_cpuinfo = gef_ppc9a_show_cpuinfo, .get_irq = mpic_get_irq, .restart = fsl_rstcr_restart, .time_init = mpc86xx_time_init, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, #endif };
gpl-2.0
gh2o/rk3066-linux
drivers/isdn/hisax/avma1_cs.c
3029
4249
/* * PCMCIA client driver for AVM A1 / Fritz!PCMCIA * * Author Carsten Paeth * Copyright 1998-2001 by Carsten Paeth <calle@calle.in-berlin.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/string.h> #include <asm/io.h> #include <asm/system.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> #include "hisax_cfg.h" MODULE_DESCRIPTION("ISDN4Linux: PCMCIA client driver for AVM A1/Fritz!PCMCIA cards"); MODULE_AUTHOR("Carsten Paeth"); MODULE_LICENSE("GPL"); /*====================================================================*/ /* Parameters that can be set with 'insmod' */ static int isdnprot = 2; module_param(isdnprot, int, 0); /*====================================================================*/ static int avma1cs_config(struct pcmcia_device *link) __devinit ; static void avma1cs_release(struct pcmcia_device *link); static void avma1cs_detach(struct pcmcia_device *p_dev) __devexit ; static int __devinit avma1cs_probe(struct pcmcia_device *p_dev) { dev_dbg(&p_dev->dev, "avma1cs_attach()\n"); /* General socket configuration */ p_dev->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; p_dev->config_index = 1; p_dev->config_regs = PRESENT_OPTION; return avma1cs_config(p_dev); } /* avma1cs_attach */ static void __devexit avma1cs_detach(struct pcmcia_device *link) { dev_dbg(&link->dev, "avma1cs_detach(0x%p)\n", link); avma1cs_release(link); kfree(link->priv); } /* avma1cs_detach */ static int avma1cs_configcheck(struct pcmcia_device *p_dev, void *priv_data) { p_dev->resource[0]->end = 16; p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; p_dev->io_lines = 5; return pcmcia_request_io(p_dev); } static int __devinit avma1cs_config(struct pcmcia_device *link) { int i = -1; char devname[128]; IsdnCard_t icard; int busy = 0; dev_dbg(&link->dev, "avma1cs_config(0x%p)\n", link); devname[0] = 0; if (link->prod_id[1]) strlcpy(devname, link->prod_id[1], sizeof(devname)); if (pcmcia_loop_config(link, avma1cs_configcheck, NULL)) return -ENODEV; do { /* * allocate an interrupt line */ if (!link->irq) { /* undo */ pcmcia_disable_device(link); break; } /* * configure the PCMCIA socket */ i = pcmcia_enable_device(link); if (i != 0) { pcmcia_disable_device(link); break; } } while (0); /* If any step failed, release any partially configured state */ if (i != 0) { avma1cs_release(link); return -ENODEV; } icard.para[0] = link->irq; icard.para[1] = link->resource[0]->start; icard.protocol = isdnprot; icard.typ = ISDN_CTYPE_A1_PCMCIA; i = hisax_init_pcmcia(link, &busy, &icard); if (i < 0) { printk(KERN_ERR "avma1_cs: failed to initialize AVM A1 " "PCMCIA %d at i/o %#x\n", i, (unsigned int) link->resource[0]->start); avma1cs_release(link); return -ENODEV; } link->priv = (void *) (unsigned long) i; return 0; } /* avma1cs_config */ static void avma1cs_release(struct pcmcia_device *link) { unsigned long minor = (unsigned long) link->priv; dev_dbg(&link->dev, "avma1cs_release(0x%p)\n", link); /* now unregister function with hisax */ HiSax_closecard(minor); pcmcia_disable_device(link); } /* avma1cs_release */ static const struct pcmcia_device_id avma1cs_ids[] = { PCMCIA_DEVICE_PROD_ID12("AVM", "ISDN A", 0x95d42008, 0xadc9d4bb), PCMCIA_DEVICE_PROD_ID12("ISDN", "CARD", 0x8d9761c8, 0x01c5aa7b), PCMCIA_DEVICE_NULL }; MODULE_DEVICE_TABLE(pcmcia, avma1cs_ids); static struct pcmcia_driver avma1cs_driver = { .owner = THIS_MODULE, .name = "avma1_cs", .probe = avma1cs_probe, .remove = __devexit_p(avma1cs_detach), .id_table = avma1cs_ids, }; static int __init init_avma1_cs(void) { return pcmcia_register_driver(&avma1cs_driver); } static void __exit exit_avma1_cs(void) { pcmcia_unregister_driver(&avma1cs_driver); } module_init(init_avma1_cs); module_exit(exit_avma1_cs);
gpl-2.0
Luquidtester/DirtyKernel-3x-ION
drivers/hwmon/lm70.c
3029
6042
/* * lm70.c * * The LM70 is a temperature sensor chip from National Semiconductor (NS). * Copyright (C) 2006 Kaiwan N Billimoria <kaiwan@designergraphix.com> * * The LM70 communicates with a host processor via an SPI/Microwire Bus * interface. The complete datasheet is available at National's website * here: * http://www.national.com/pf/LM/LM70.html * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/err.h> #include <linux/sysfs.h> #include <linux/hwmon.h> #include <linux/mutex.h> #include <linux/mod_devicetable.h> #include <linux/spi/spi.h> #include <linux/slab.h> #define DRVNAME "lm70" #define LM70_CHIP_LM70 0 /* original NS LM70 */ #define LM70_CHIP_TMP121 1 /* TI TMP121/TMP123 */ struct lm70 { struct device *hwmon_dev; struct mutex lock; unsigned int chip; }; /* sysfs hook function */ static ssize_t lm70_sense_temp(struct device *dev, struct device_attribute *attr, char *buf) { struct spi_device *spi = to_spi_device(dev); int status, val = 0; u8 rxbuf[2]; s16 raw=0; struct lm70 *p_lm70 = spi_get_drvdata(spi); if (mutex_lock_interruptible(&p_lm70->lock)) return -ERESTARTSYS; /* * spi_read() requires a DMA-safe buffer; so we use * spi_write_then_read(), transmitting 0 bytes. */ status = spi_write_then_read(spi, NULL, 0, &rxbuf[0], 2); if (status < 0) { pr_warn("spi_write_then_read failed with status %d\n", status); goto out; } raw = (rxbuf[0] << 8) + rxbuf[1]; dev_dbg(dev, "rxbuf[0] : 0x%02x rxbuf[1] : 0x%02x raw=0x%04x\n", rxbuf[0], rxbuf[1], raw); /* * LM70: * The "raw" temperature read into rxbuf[] is a 16-bit signed 2's * complement value. Only the MSB 11 bits (1 sign + 10 temperature * bits) are meaningful; the LSB 5 bits are to be discarded. * See the datasheet. * * Further, each bit represents 0.25 degrees Celsius; so, multiply * by 0.25. Also multiply by 1000 to represent in millidegrees * Celsius. * So it's equivalent to multiplying by 0.25 * 1000 = 250. * * TMP121/TMP123: * 13 bits of 2's complement data, discard LSB 3 bits, * resolution 0.0625 degrees celsius. */ switch (p_lm70->chip) { case LM70_CHIP_LM70: val = ((int)raw / 32) * 250; break; case LM70_CHIP_TMP121: val = ((int)raw / 8) * 625 / 10; break; } status = sprintf(buf, "%d\n", val); /* millidegrees Celsius */ out: mutex_unlock(&p_lm70->lock); return status; } static DEVICE_ATTR(temp1_input, S_IRUGO, lm70_sense_temp, NULL); static ssize_t lm70_show_name(struct device *dev, struct device_attribute *devattr, char *buf) { struct lm70 *p_lm70 = dev_get_drvdata(dev); int ret; switch (p_lm70->chip) { case LM70_CHIP_LM70: ret = sprintf(buf, "lm70\n"); break; case LM70_CHIP_TMP121: ret = sprintf(buf, "tmp121\n"); break; default: ret = -EINVAL; } return ret; } static DEVICE_ATTR(name, S_IRUGO, lm70_show_name, NULL); /*----------------------------------------------------------------------*/ static int __devinit lm70_probe(struct spi_device *spi) { int chip = spi_get_device_id(spi)->driver_data; struct lm70 *p_lm70; int status; /* signaling is SPI_MODE_0 for both LM70 and TMP121 */ if (spi->mode & (SPI_CPOL | SPI_CPHA)) return -EINVAL; /* 3-wire link (shared SI/SO) for LM70 */ if (chip == LM70_CHIP_LM70 && !(spi->mode & SPI_3WIRE)) return -EINVAL; /* NOTE: we assume 8-bit words, and convert to 16 bits manually */ p_lm70 = kzalloc(sizeof *p_lm70, GFP_KERNEL); if (!p_lm70) return -ENOMEM; mutex_init(&p_lm70->lock); p_lm70->chip = chip; /* sysfs hook */ p_lm70->hwmon_dev = hwmon_device_register(&spi->dev); if (IS_ERR(p_lm70->hwmon_dev)) { dev_dbg(&spi->dev, "hwmon_device_register failed.\n"); status = PTR_ERR(p_lm70->hwmon_dev); goto out_dev_reg_failed; } spi_set_drvdata(spi, p_lm70); if ((status = device_create_file(&spi->dev, &dev_attr_temp1_input)) || (status = device_create_file(&spi->dev, &dev_attr_name))) { dev_dbg(&spi->dev, "device_create_file failure.\n"); goto out_dev_create_file_failed; } return 0; out_dev_create_file_failed: device_remove_file(&spi->dev, &dev_attr_temp1_input); hwmon_device_unregister(p_lm70->hwmon_dev); out_dev_reg_failed: spi_set_drvdata(spi, NULL); kfree(p_lm70); return status; } static int __devexit lm70_remove(struct spi_device *spi) { struct lm70 *p_lm70 = spi_get_drvdata(spi); device_remove_file(&spi->dev, &dev_attr_temp1_input); device_remove_file(&spi->dev, &dev_attr_name); hwmon_device_unregister(p_lm70->hwmon_dev); spi_set_drvdata(spi, NULL); kfree(p_lm70); return 0; } static const struct spi_device_id lm70_ids[] = { { "lm70", LM70_CHIP_LM70 }, { "tmp121", LM70_CHIP_TMP121 }, { }, }; MODULE_DEVICE_TABLE(spi, lm70_ids); static struct spi_driver lm70_driver = { .driver = { .name = "lm70", .owner = THIS_MODULE, }, .id_table = lm70_ids, .probe = lm70_probe, .remove = __devexit_p(lm70_remove), }; static int __init init_lm70(void) { return spi_register_driver(&lm70_driver); } static void __exit cleanup_lm70(void) { spi_unregister_driver(&lm70_driver); } module_init(init_lm70); module_exit(cleanup_lm70); MODULE_AUTHOR("Kaiwan N Billimoria"); MODULE_DESCRIPTION("NS LM70 / TI TMP121/TMP123 Linux driver"); MODULE_LICENSE("GPL");
gpl-2.0
pio-masaki/kernel_at300se
drivers/isdn/hisax/avma1_cs.c
3029
4249
/* * PCMCIA client driver for AVM A1 / Fritz!PCMCIA * * Author Carsten Paeth * Copyright 1998-2001 by Carsten Paeth <calle@calle.in-berlin.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/string.h> #include <asm/io.h> #include <asm/system.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> #include "hisax_cfg.h" MODULE_DESCRIPTION("ISDN4Linux: PCMCIA client driver for AVM A1/Fritz!PCMCIA cards"); MODULE_AUTHOR("Carsten Paeth"); MODULE_LICENSE("GPL"); /*====================================================================*/ /* Parameters that can be set with 'insmod' */ static int isdnprot = 2; module_param(isdnprot, int, 0); /*====================================================================*/ static int avma1cs_config(struct pcmcia_device *link) __devinit ; static void avma1cs_release(struct pcmcia_device *link); static void avma1cs_detach(struct pcmcia_device *p_dev) __devexit ; static int __devinit avma1cs_probe(struct pcmcia_device *p_dev) { dev_dbg(&p_dev->dev, "avma1cs_attach()\n"); /* General socket configuration */ p_dev->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; p_dev->config_index = 1; p_dev->config_regs = PRESENT_OPTION; return avma1cs_config(p_dev); } /* avma1cs_attach */ static void __devexit avma1cs_detach(struct pcmcia_device *link) { dev_dbg(&link->dev, "avma1cs_detach(0x%p)\n", link); avma1cs_release(link); kfree(link->priv); } /* avma1cs_detach */ static int avma1cs_configcheck(struct pcmcia_device *p_dev, void *priv_data) { p_dev->resource[0]->end = 16; p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; p_dev->io_lines = 5; return pcmcia_request_io(p_dev); } static int __devinit avma1cs_config(struct pcmcia_device *link) { int i = -1; char devname[128]; IsdnCard_t icard; int busy = 0; dev_dbg(&link->dev, "avma1cs_config(0x%p)\n", link); devname[0] = 0; if (link->prod_id[1]) strlcpy(devname, link->prod_id[1], sizeof(devname)); if (pcmcia_loop_config(link, avma1cs_configcheck, NULL)) return -ENODEV; do { /* * allocate an interrupt line */ if (!link->irq) { /* undo */ pcmcia_disable_device(link); break; } /* * configure the PCMCIA socket */ i = pcmcia_enable_device(link); if (i != 0) { pcmcia_disable_device(link); break; } } while (0); /* If any step failed, release any partially configured state */ if (i != 0) { avma1cs_release(link); return -ENODEV; } icard.para[0] = link->irq; icard.para[1] = link->resource[0]->start; icard.protocol = isdnprot; icard.typ = ISDN_CTYPE_A1_PCMCIA; i = hisax_init_pcmcia(link, &busy, &icard); if (i < 0) { printk(KERN_ERR "avma1_cs: failed to initialize AVM A1 " "PCMCIA %d at i/o %#x\n", i, (unsigned int) link->resource[0]->start); avma1cs_release(link); return -ENODEV; } link->priv = (void *) (unsigned long) i; return 0; } /* avma1cs_config */ static void avma1cs_release(struct pcmcia_device *link) { unsigned long minor = (unsigned long) link->priv; dev_dbg(&link->dev, "avma1cs_release(0x%p)\n", link); /* now unregister function with hisax */ HiSax_closecard(minor); pcmcia_disable_device(link); } /* avma1cs_release */ static const struct pcmcia_device_id avma1cs_ids[] = { PCMCIA_DEVICE_PROD_ID12("AVM", "ISDN A", 0x95d42008, 0xadc9d4bb), PCMCIA_DEVICE_PROD_ID12("ISDN", "CARD", 0x8d9761c8, 0x01c5aa7b), PCMCIA_DEVICE_NULL }; MODULE_DEVICE_TABLE(pcmcia, avma1cs_ids); static struct pcmcia_driver avma1cs_driver = { .owner = THIS_MODULE, .name = "avma1_cs", .probe = avma1cs_probe, .remove = __devexit_p(avma1cs_detach), .id_table = avma1cs_ids, }; static int __init init_avma1_cs(void) { return pcmcia_register_driver(&avma1cs_driver); } static void __exit exit_avma1_cs(void) { pcmcia_unregister_driver(&avma1cs_driver); } module_init(init_avma1_cs); module_exit(exit_avma1_cs);
gpl-2.0
andip71/boeffla-kernel-omnirom-s3
sound/pci/maestro3.c
3541
87295
/* * Driver for ESS Maestro3/Allegro (ES1988) soundcards. * Copyright (c) 2000 by Zach Brown <zab@zabbo.net> * Takashi Iwai <tiwai@suse.de> * * Most of the hardware init stuffs are based on maestro3 driver for * OSS/Free by Zach Brown. Many thanks to Zach! * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * * ChangeLog: * Aug. 27, 2001 * - Fixed deadlock on capture * - Added Canyon3D-2 support by Rob Riggs <rob@pangalactic.org> * */ #define CARD_NAME "ESS Maestro3/Allegro/Canyon3D-2" #define DRIVER_NAME "Maestro3" #include <asm/io.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/moduleparam.h> #include <linux/firmware.h> #include <linux/input.h> #include <sound/core.h> #include <sound/info.h> #include <sound/control.h> #include <sound/pcm.h> #include <sound/mpu401.h> #include <sound/ac97_codec.h> #include <sound/initval.h> #include <asm/byteorder.h> MODULE_AUTHOR("Zach Brown <zab@zabbo.net>, Takashi Iwai <tiwai@suse.de>"); MODULE_DESCRIPTION("ESS Maestro3 PCI"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{ESS,Maestro3 PCI}," "{ESS,ES1988}," "{ESS,Allegro PCI}," "{ESS,Allegro-1 PCI}," "{ESS,Canyon3D-2/LE PCI}}"); MODULE_FIRMWARE("ess/maestro3_assp_kernel.fw"); MODULE_FIRMWARE("ess/maestro3_assp_minisrc.fw"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* all enabled */ static int external_amp[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 1}; static int amp_gpio[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = -1}; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for " CARD_NAME " soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for " CARD_NAME " soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable this soundcard."); module_param_array(external_amp, bool, NULL, 0444); MODULE_PARM_DESC(external_amp, "Enable external amp for " CARD_NAME " soundcard."); module_param_array(amp_gpio, int, NULL, 0444); MODULE_PARM_DESC(amp_gpio, "GPIO pin number for external amp. (default = -1)"); #define MAX_PLAYBACKS 2 #define MAX_CAPTURES 1 #define NR_DSPS (MAX_PLAYBACKS + MAX_CAPTURES) /* * maestro3 registers */ /* Allegro PCI configuration registers */ #define PCI_LEGACY_AUDIO_CTRL 0x40 #define SOUND_BLASTER_ENABLE 0x00000001 #define FM_SYNTHESIS_ENABLE 0x00000002 #define GAME_PORT_ENABLE 0x00000004 #define MPU401_IO_ENABLE 0x00000008 #define MPU401_IRQ_ENABLE 0x00000010 #define ALIAS_10BIT_IO 0x00000020 #define SB_DMA_MASK 0x000000C0 #define SB_DMA_0 0x00000040 #define SB_DMA_1 0x00000040 #define SB_DMA_R 0x00000080 #define SB_DMA_3 0x000000C0 #define SB_IRQ_MASK 0x00000700 #define SB_IRQ_5 0x00000000 #define SB_IRQ_7 0x00000100 #define SB_IRQ_9 0x00000200 #define SB_IRQ_10 0x00000300 #define MIDI_IRQ_MASK 0x00003800 #define SERIAL_IRQ_ENABLE 0x00004000 #define DISABLE_LEGACY 0x00008000 #define PCI_ALLEGRO_CONFIG 0x50 #define SB_ADDR_240 0x00000004 #define MPU_ADDR_MASK 0x00000018 #define MPU_ADDR_330 0x00000000 #define MPU_ADDR_300 0x00000008 #define MPU_ADDR_320 0x00000010 #define MPU_ADDR_340 0x00000018 #define USE_PCI_TIMING 0x00000040 #define POSTED_WRITE_ENABLE 0x00000080 #define DMA_POLICY_MASK 0x00000700 #define DMA_DDMA 0x00000000 #define DMA_TDMA 0x00000100 #define DMA_PCPCI 0x00000200 #define DMA_WBDMA16 0x00000400 #define DMA_WBDMA4 0x00000500 #define DMA_WBDMA2 0x00000600 #define DMA_WBDMA1 0x00000700 #define DMA_SAFE_GUARD 0x00000800 #define HI_PERF_GP_ENABLE 0x00001000 #define PIC_SNOOP_MODE_0 0x00002000 #define PIC_SNOOP_MODE_1 0x00004000 #define SOUNDBLASTER_IRQ_MASK 0x00008000 #define RING_IN_ENABLE 0x00010000 #define SPDIF_TEST_MODE 0x00020000 #define CLK_MULT_MODE_SELECT_2 0x00040000 #define EEPROM_WRITE_ENABLE 0x00080000 #define CODEC_DIR_IN 0x00100000 #define HV_BUTTON_FROM_GD 0x00200000 #define REDUCED_DEBOUNCE 0x00400000 #define HV_CTRL_ENABLE 0x00800000 #define SPDIF_ENABLE 0x01000000 #define CLK_DIV_SELECT 0x06000000 #define CLK_DIV_BY_48 0x00000000 #define CLK_DIV_BY_49 0x02000000 #define CLK_DIV_BY_50 0x04000000 #define CLK_DIV_RESERVED 0x06000000 #define PM_CTRL_ENABLE 0x08000000 #define CLK_MULT_MODE_SELECT 0x30000000 #define CLK_MULT_MODE_SHIFT 28 #define CLK_MULT_MODE_0 0x00000000 #define CLK_MULT_MODE_1 0x10000000 #define CLK_MULT_MODE_2 0x20000000 #define CLK_MULT_MODE_3 0x30000000 #define INT_CLK_SELECT 0x40000000 #define INT_CLK_MULT_RESET 0x80000000 /* M3 */ #define INT_CLK_SRC_NOT_PCI 0x00100000 #define INT_CLK_MULT_ENABLE 0x80000000 #define PCI_ACPI_CONTROL 0x54 #define PCI_ACPI_D0 0x00000000 #define PCI_ACPI_D1 0xB4F70000 #define PCI_ACPI_D2 0xB4F7B4F7 #define PCI_USER_CONFIG 0x58 #define EXT_PCI_MASTER_ENABLE 0x00000001 #define SPDIF_OUT_SELECT 0x00000002 #define TEST_PIN_DIR_CTRL 0x00000004 #define AC97_CODEC_TEST 0x00000020 #define TRI_STATE_BUFFER 0x00000080 #define IN_CLK_12MHZ_SELECT 0x00000100 #define MULTI_FUNC_DISABLE 0x00000200 #define EXT_MASTER_PAIR_SEL 0x00000400 #define PCI_MASTER_SUPPORT 0x00000800 #define STOP_CLOCK_ENABLE 0x00001000 #define EAPD_DRIVE_ENABLE 0x00002000 #define REQ_TRI_STATE_ENABLE 0x00004000 #define REQ_LOW_ENABLE 0x00008000 #define MIDI_1_ENABLE 0x00010000 #define MIDI_2_ENABLE 0x00020000 #define SB_AUDIO_SYNC 0x00040000 #define HV_CTRL_TEST 0x00100000 #define SOUNDBLASTER_TEST 0x00400000 #define PCI_USER_CONFIG_C 0x5C #define PCI_DDMA_CTRL 0x60 #define DDMA_ENABLE 0x00000001 /* Allegro registers */ #define HOST_INT_CTRL 0x18 #define SB_INT_ENABLE 0x0001 #define MPU401_INT_ENABLE 0x0002 #define ASSP_INT_ENABLE 0x0010 #define RING_INT_ENABLE 0x0020 #define HV_INT_ENABLE 0x0040 #define CLKRUN_GEN_ENABLE 0x0100 #define HV_CTRL_TO_PME 0x0400 #define SOFTWARE_RESET_ENABLE 0x8000 /* * should be using the above defines, probably. */ #define REGB_ENABLE_RESET 0x01 #define REGB_STOP_CLOCK 0x10 #define HOST_INT_STATUS 0x1A #define SB_INT_PENDING 0x01 #define MPU401_INT_PENDING 0x02 #define ASSP_INT_PENDING 0x10 #define RING_INT_PENDING 0x20 #define HV_INT_PENDING 0x40 #define HARDWARE_VOL_CTRL 0x1B #define SHADOW_MIX_REG_VOICE 0x1C #define HW_VOL_COUNTER_VOICE 0x1D #define SHADOW_MIX_REG_MASTER 0x1E #define HW_VOL_COUNTER_MASTER 0x1F #define CODEC_COMMAND 0x30 #define CODEC_READ_B 0x80 #define CODEC_STATUS 0x30 #define CODEC_BUSY_B 0x01 #define CODEC_DATA 0x32 #define RING_BUS_CTRL_A 0x36 #define RAC_PME_ENABLE 0x0100 #define RAC_SDFS_ENABLE 0x0200 #define LAC_PME_ENABLE 0x0400 #define LAC_SDFS_ENABLE 0x0800 #define SERIAL_AC_LINK_ENABLE 0x1000 #define IO_SRAM_ENABLE 0x2000 #define IIS_INPUT_ENABLE 0x8000 #define RING_BUS_CTRL_B 0x38 #define SECOND_CODEC_ID_MASK 0x0003 #define SPDIF_FUNC_ENABLE 0x0010 #define SECOND_AC_ENABLE 0x0020 #define SB_MODULE_INTF_ENABLE 0x0040 #define SSPE_ENABLE 0x0040 #define M3I_DOCK_ENABLE 0x0080 #define SDO_OUT_DEST_CTRL 0x3A #define COMMAND_ADDR_OUT 0x0003 #define PCM_LR_OUT_LOCAL 0x0000 #define PCM_LR_OUT_REMOTE 0x0004 #define PCM_LR_OUT_MUTE 0x0008 #define PCM_LR_OUT_BOTH 0x000C #define LINE1_DAC_OUT_LOCAL 0x0000 #define LINE1_DAC_OUT_REMOTE 0x0010 #define LINE1_DAC_OUT_MUTE 0x0020 #define LINE1_DAC_OUT_BOTH 0x0030 #define PCM_CLS_OUT_LOCAL 0x0000 #define PCM_CLS_OUT_REMOTE 0x0040 #define PCM_CLS_OUT_MUTE 0x0080 #define PCM_CLS_OUT_BOTH 0x00C0 #define PCM_RLF_OUT_LOCAL 0x0000 #define PCM_RLF_OUT_REMOTE 0x0100 #define PCM_RLF_OUT_MUTE 0x0200 #define PCM_RLF_OUT_BOTH 0x0300 #define LINE2_DAC_OUT_LOCAL 0x0000 #define LINE2_DAC_OUT_REMOTE 0x0400 #define LINE2_DAC_OUT_MUTE 0x0800 #define LINE2_DAC_OUT_BOTH 0x0C00 #define HANDSET_OUT_LOCAL 0x0000 #define HANDSET_OUT_REMOTE 0x1000 #define HANDSET_OUT_MUTE 0x2000 #define HANDSET_OUT_BOTH 0x3000 #define IO_CTRL_OUT_LOCAL 0x0000 #define IO_CTRL_OUT_REMOTE 0x4000 #define IO_CTRL_OUT_MUTE 0x8000 #define IO_CTRL_OUT_BOTH 0xC000 #define SDO_IN_DEST_CTRL 0x3C #define STATUS_ADDR_IN 0x0003 #define PCM_LR_IN_LOCAL 0x0000 #define PCM_LR_IN_REMOTE 0x0004 #define PCM_LR_RESERVED 0x0008 #define PCM_LR_IN_BOTH 0x000C #define LINE1_ADC_IN_LOCAL 0x0000 #define LINE1_ADC_IN_REMOTE 0x0010 #define LINE1_ADC_IN_MUTE 0x0020 #define MIC_ADC_IN_LOCAL 0x0000 #define MIC_ADC_IN_REMOTE 0x0040 #define MIC_ADC_IN_MUTE 0x0080 #define LINE2_DAC_IN_LOCAL 0x0000 #define LINE2_DAC_IN_REMOTE 0x0400 #define LINE2_DAC_IN_MUTE 0x0800 #define HANDSET_IN_LOCAL 0x0000 #define HANDSET_IN_REMOTE 0x1000 #define HANDSET_IN_MUTE 0x2000 #define IO_STATUS_IN_LOCAL 0x0000 #define IO_STATUS_IN_REMOTE 0x4000 #define SPDIF_IN_CTRL 0x3E #define SPDIF_IN_ENABLE 0x0001 #define GPIO_DATA 0x60 #define GPIO_DATA_MASK 0x0FFF #define GPIO_HV_STATUS 0x3000 #define GPIO_PME_STATUS 0x4000 #define GPIO_MASK 0x64 #define GPIO_DIRECTION 0x68 #define GPO_PRIMARY_AC97 0x0001 #define GPI_LINEOUT_SENSE 0x0004 #define GPO_SECONDARY_AC97 0x0008 #define GPI_VOL_DOWN 0x0010 #define GPI_VOL_UP 0x0020 #define GPI_IIS_CLK 0x0040 #define GPI_IIS_LRCLK 0x0080 #define GPI_IIS_DATA 0x0100 #define GPI_DOCKING_STATUS 0x0100 #define GPI_HEADPHONE_SENSE 0x0200 #define GPO_EXT_AMP_SHUTDOWN 0x1000 #define GPO_EXT_AMP_M3 1 /* default m3 amp */ #define GPO_EXT_AMP_ALLEGRO 8 /* default allegro amp */ /* M3 */ #define GPO_M3_EXT_AMP_SHUTDN 0x0002 #define ASSP_INDEX_PORT 0x80 #define ASSP_MEMORY_PORT 0x82 #define ASSP_DATA_PORT 0x84 #define MPU401_DATA_PORT 0x98 #define MPU401_STATUS_PORT 0x99 #define CLK_MULT_DATA_PORT 0x9C #define ASSP_CONTROL_A 0xA2 #define ASSP_0_WS_ENABLE 0x01 #define ASSP_CTRL_A_RESERVED1 0x02 #define ASSP_CTRL_A_RESERVED2 0x04 #define ASSP_CLK_49MHZ_SELECT 0x08 #define FAST_PLU_ENABLE 0x10 #define ASSP_CTRL_A_RESERVED3 0x20 #define DSP_CLK_36MHZ_SELECT 0x40 #define ASSP_CONTROL_B 0xA4 #define RESET_ASSP 0x00 #define RUN_ASSP 0x01 #define ENABLE_ASSP_CLOCK 0x00 #define STOP_ASSP_CLOCK 0x10 #define RESET_TOGGLE 0x40 #define ASSP_CONTROL_C 0xA6 #define ASSP_HOST_INT_ENABLE 0x01 #define FM_ADDR_REMAP_DISABLE 0x02 #define HOST_WRITE_PORT_ENABLE 0x08 #define ASSP_HOST_INT_STATUS 0xAC #define DSP2HOST_REQ_PIORECORD 0x01 #define DSP2HOST_REQ_I2SRATE 0x02 #define DSP2HOST_REQ_TIMER 0x04 /* AC97 registers */ /* XXX fix this crap up */ /*#define AC97_RESET 0x00*/ #define AC97_VOL_MUTE_B 0x8000 #define AC97_VOL_M 0x1F #define AC97_LEFT_VOL_S 8 #define AC97_MASTER_VOL 0x02 #define AC97_LINE_LEVEL_VOL 0x04 #define AC97_MASTER_MONO_VOL 0x06 #define AC97_PC_BEEP_VOL 0x0A #define AC97_PC_BEEP_VOL_M 0x0F #define AC97_SROUND_MASTER_VOL 0x38 #define AC97_PC_BEEP_VOL_S 1 /*#define AC97_PHONE_VOL 0x0C #define AC97_MIC_VOL 0x0E*/ #define AC97_MIC_20DB_ENABLE 0x40 /*#define AC97_LINEIN_VOL 0x10 #define AC97_CD_VOL 0x12 #define AC97_VIDEO_VOL 0x14 #define AC97_AUX_VOL 0x16*/ #define AC97_PCM_OUT_VOL 0x18 /*#define AC97_RECORD_SELECT 0x1A*/ #define AC97_RECORD_MIC 0x00 #define AC97_RECORD_CD 0x01 #define AC97_RECORD_VIDEO 0x02 #define AC97_RECORD_AUX 0x03 #define AC97_RECORD_MONO_MUX 0x02 #define AC97_RECORD_DIGITAL 0x03 #define AC97_RECORD_LINE 0x04 #define AC97_RECORD_STEREO 0x05 #define AC97_RECORD_MONO 0x06 #define AC97_RECORD_PHONE 0x07 /*#define AC97_RECORD_GAIN 0x1C*/ #define AC97_RECORD_VOL_M 0x0F /*#define AC97_GENERAL_PURPOSE 0x20*/ #define AC97_POWER_DOWN_CTRL 0x26 #define AC97_ADC_READY 0x0001 #define AC97_DAC_READY 0x0002 #define AC97_ANALOG_READY 0x0004 #define AC97_VREF_ON 0x0008 #define AC97_PR0 0x0100 #define AC97_PR1 0x0200 #define AC97_PR2 0x0400 #define AC97_PR3 0x0800 #define AC97_PR4 0x1000 #define AC97_RESERVED1 0x28 #define AC97_VENDOR_TEST 0x5A #define AC97_CLOCK_DELAY 0x5C #define AC97_LINEOUT_MUX_SEL 0x0001 #define AC97_MONO_MUX_SEL 0x0002 #define AC97_CLOCK_DELAY_SEL 0x1F #define AC97_DAC_CDS_SHIFT 6 #define AC97_ADC_CDS_SHIFT 11 #define AC97_MULTI_CHANNEL_SEL 0x74 /*#define AC97_VENDOR_ID1 0x7C #define AC97_VENDOR_ID2 0x7E*/ /* * ASSP control regs */ #define DSP_PORT_TIMER_COUNT 0x06 #define DSP_PORT_MEMORY_INDEX 0x80 #define DSP_PORT_MEMORY_TYPE 0x82 #define MEMTYPE_INTERNAL_CODE 0x0002 #define MEMTYPE_INTERNAL_DATA 0x0003 #define MEMTYPE_MASK 0x0003 #define DSP_PORT_MEMORY_DATA 0x84 #define DSP_PORT_CONTROL_REG_A 0xA2 #define DSP_PORT_CONTROL_REG_B 0xA4 #define DSP_PORT_CONTROL_REG_C 0xA6 #define REV_A_CODE_MEMORY_BEGIN 0x0000 #define REV_A_CODE_MEMORY_END 0x0FFF #define REV_A_CODE_MEMORY_UNIT_LENGTH 0x0040 #define REV_A_CODE_MEMORY_LENGTH (REV_A_CODE_MEMORY_END - REV_A_CODE_MEMORY_BEGIN + 1) #define REV_B_CODE_MEMORY_BEGIN 0x0000 #define REV_B_CODE_MEMORY_END 0x0BFF #define REV_B_CODE_MEMORY_UNIT_LENGTH 0x0040 #define REV_B_CODE_MEMORY_LENGTH (REV_B_CODE_MEMORY_END - REV_B_CODE_MEMORY_BEGIN + 1) #define REV_A_DATA_MEMORY_BEGIN 0x1000 #define REV_A_DATA_MEMORY_END 0x2FFF #define REV_A_DATA_MEMORY_UNIT_LENGTH 0x0080 #define REV_A_DATA_MEMORY_LENGTH (REV_A_DATA_MEMORY_END - REV_A_DATA_MEMORY_BEGIN + 1) #define REV_B_DATA_MEMORY_BEGIN 0x1000 #define REV_B_DATA_MEMORY_END 0x2BFF #define REV_B_DATA_MEMORY_UNIT_LENGTH 0x0080 #define REV_B_DATA_MEMORY_LENGTH (REV_B_DATA_MEMORY_END - REV_B_DATA_MEMORY_BEGIN + 1) #define NUM_UNITS_KERNEL_CODE 16 #define NUM_UNITS_KERNEL_DATA 2 #define NUM_UNITS_KERNEL_CODE_WITH_HSP 16 #define NUM_UNITS_KERNEL_DATA_WITH_HSP 5 /* * Kernel data layout */ #define DP_SHIFT_COUNT 7 #define KDATA_BASE_ADDR 0x1000 #define KDATA_BASE_ADDR2 0x1080 #define KDATA_TASK0 (KDATA_BASE_ADDR + 0x0000) #define KDATA_TASK1 (KDATA_BASE_ADDR + 0x0001) #define KDATA_TASK2 (KDATA_BASE_ADDR + 0x0002) #define KDATA_TASK3 (KDATA_BASE_ADDR + 0x0003) #define KDATA_TASK4 (KDATA_BASE_ADDR + 0x0004) #define KDATA_TASK5 (KDATA_BASE_ADDR + 0x0005) #define KDATA_TASK6 (KDATA_BASE_ADDR + 0x0006) #define KDATA_TASK7 (KDATA_BASE_ADDR + 0x0007) #define KDATA_TASK_ENDMARK (KDATA_BASE_ADDR + 0x0008) #define KDATA_CURRENT_TASK (KDATA_BASE_ADDR + 0x0009) #define KDATA_TASK_SWITCH (KDATA_BASE_ADDR + 0x000A) #define KDATA_INSTANCE0_POS3D (KDATA_BASE_ADDR + 0x000B) #define KDATA_INSTANCE1_POS3D (KDATA_BASE_ADDR + 0x000C) #define KDATA_INSTANCE2_POS3D (KDATA_BASE_ADDR + 0x000D) #define KDATA_INSTANCE3_POS3D (KDATA_BASE_ADDR + 0x000E) #define KDATA_INSTANCE4_POS3D (KDATA_BASE_ADDR + 0x000F) #define KDATA_INSTANCE5_POS3D (KDATA_BASE_ADDR + 0x0010) #define KDATA_INSTANCE6_POS3D (KDATA_BASE_ADDR + 0x0011) #define KDATA_INSTANCE7_POS3D (KDATA_BASE_ADDR + 0x0012) #define KDATA_INSTANCE8_POS3D (KDATA_BASE_ADDR + 0x0013) #define KDATA_INSTANCE_POS3D_ENDMARK (KDATA_BASE_ADDR + 0x0014) #define KDATA_INSTANCE0_SPKVIRT (KDATA_BASE_ADDR + 0x0015) #define KDATA_INSTANCE_SPKVIRT_ENDMARK (KDATA_BASE_ADDR + 0x0016) #define KDATA_INSTANCE0_SPDIF (KDATA_BASE_ADDR + 0x0017) #define KDATA_INSTANCE_SPDIF_ENDMARK (KDATA_BASE_ADDR + 0x0018) #define KDATA_INSTANCE0_MODEM (KDATA_BASE_ADDR + 0x0019) #define KDATA_INSTANCE_MODEM_ENDMARK (KDATA_BASE_ADDR + 0x001A) #define KDATA_INSTANCE0_SRC (KDATA_BASE_ADDR + 0x001B) #define KDATA_INSTANCE1_SRC (KDATA_BASE_ADDR + 0x001C) #define KDATA_INSTANCE_SRC_ENDMARK (KDATA_BASE_ADDR + 0x001D) #define KDATA_INSTANCE0_MINISRC (KDATA_BASE_ADDR + 0x001E) #define KDATA_INSTANCE1_MINISRC (KDATA_BASE_ADDR + 0x001F) #define KDATA_INSTANCE2_MINISRC (KDATA_BASE_ADDR + 0x0020) #define KDATA_INSTANCE3_MINISRC (KDATA_BASE_ADDR + 0x0021) #define KDATA_INSTANCE_MINISRC_ENDMARK (KDATA_BASE_ADDR + 0x0022) #define KDATA_INSTANCE0_CPYTHRU (KDATA_BASE_ADDR + 0x0023) #define KDATA_INSTANCE1_CPYTHRU (KDATA_BASE_ADDR + 0x0024) #define KDATA_INSTANCE_CPYTHRU_ENDMARK (KDATA_BASE_ADDR + 0x0025) #define KDATA_CURRENT_DMA (KDATA_BASE_ADDR + 0x0026) #define KDATA_DMA_SWITCH (KDATA_BASE_ADDR + 0x0027) #define KDATA_DMA_ACTIVE (KDATA_BASE_ADDR + 0x0028) #define KDATA_DMA_XFER0 (KDATA_BASE_ADDR + 0x0029) #define KDATA_DMA_XFER1 (KDATA_BASE_ADDR + 0x002A) #define KDATA_DMA_XFER2 (KDATA_BASE_ADDR + 0x002B) #define KDATA_DMA_XFER3 (KDATA_BASE_ADDR + 0x002C) #define KDATA_DMA_XFER4 (KDATA_BASE_ADDR + 0x002D) #define KDATA_DMA_XFER5 (KDATA_BASE_ADDR + 0x002E) #define KDATA_DMA_XFER6 (KDATA_BASE_ADDR + 0x002F) #define KDATA_DMA_XFER7 (KDATA_BASE_ADDR + 0x0030) #define KDATA_DMA_XFER8 (KDATA_BASE_ADDR + 0x0031) #define KDATA_DMA_XFER_ENDMARK (KDATA_BASE_ADDR + 0x0032) #define KDATA_I2S_SAMPLE_COUNT (KDATA_BASE_ADDR + 0x0033) #define KDATA_I2S_INT_METER (KDATA_BASE_ADDR + 0x0034) #define KDATA_I2S_ACTIVE (KDATA_BASE_ADDR + 0x0035) #define KDATA_TIMER_COUNT_RELOAD (KDATA_BASE_ADDR + 0x0036) #define KDATA_TIMER_COUNT_CURRENT (KDATA_BASE_ADDR + 0x0037) #define KDATA_HALT_SYNCH_CLIENT (KDATA_BASE_ADDR + 0x0038) #define KDATA_HALT_SYNCH_DMA (KDATA_BASE_ADDR + 0x0039) #define KDATA_HALT_ACKNOWLEDGE (KDATA_BASE_ADDR + 0x003A) #define KDATA_ADC1_XFER0 (KDATA_BASE_ADDR + 0x003B) #define KDATA_ADC1_XFER_ENDMARK (KDATA_BASE_ADDR + 0x003C) #define KDATA_ADC1_LEFT_VOLUME (KDATA_BASE_ADDR + 0x003D) #define KDATA_ADC1_RIGHT_VOLUME (KDATA_BASE_ADDR + 0x003E) #define KDATA_ADC1_LEFT_SUR_VOL (KDATA_BASE_ADDR + 0x003F) #define KDATA_ADC1_RIGHT_SUR_VOL (KDATA_BASE_ADDR + 0x0040) #define KDATA_ADC2_XFER0 (KDATA_BASE_ADDR + 0x0041) #define KDATA_ADC2_XFER_ENDMARK (KDATA_BASE_ADDR + 0x0042) #define KDATA_ADC2_LEFT_VOLUME (KDATA_BASE_ADDR + 0x0043) #define KDATA_ADC2_RIGHT_VOLUME (KDATA_BASE_ADDR + 0x0044) #define KDATA_ADC2_LEFT_SUR_VOL (KDATA_BASE_ADDR + 0x0045) #define KDATA_ADC2_RIGHT_SUR_VOL (KDATA_BASE_ADDR + 0x0046) #define KDATA_CD_XFER0 (KDATA_BASE_ADDR + 0x0047) #define KDATA_CD_XFER_ENDMARK (KDATA_BASE_ADDR + 0x0048) #define KDATA_CD_LEFT_VOLUME (KDATA_BASE_ADDR + 0x0049) #define KDATA_CD_RIGHT_VOLUME (KDATA_BASE_ADDR + 0x004A) #define KDATA_CD_LEFT_SUR_VOL (KDATA_BASE_ADDR + 0x004B) #define KDATA_CD_RIGHT_SUR_VOL (KDATA_BASE_ADDR + 0x004C) #define KDATA_MIC_XFER0 (KDATA_BASE_ADDR + 0x004D) #define KDATA_MIC_XFER_ENDMARK (KDATA_BASE_ADDR + 0x004E) #define KDATA_MIC_VOLUME (KDATA_BASE_ADDR + 0x004F) #define KDATA_MIC_SUR_VOL (KDATA_BASE_ADDR + 0x0050) #define KDATA_I2S_XFER0 (KDATA_BASE_ADDR + 0x0051) #define KDATA_I2S_XFER_ENDMARK (KDATA_BASE_ADDR + 0x0052) #define KDATA_CHI_XFER0 (KDATA_BASE_ADDR + 0x0053) #define KDATA_CHI_XFER_ENDMARK (KDATA_BASE_ADDR + 0x0054) #define KDATA_SPDIF_XFER (KDATA_BASE_ADDR + 0x0055) #define KDATA_SPDIF_CURRENT_FRAME (KDATA_BASE_ADDR + 0x0056) #define KDATA_SPDIF_FRAME0 (KDATA_BASE_ADDR + 0x0057) #define KDATA_SPDIF_FRAME1 (KDATA_BASE_ADDR + 0x0058) #define KDATA_SPDIF_FRAME2 (KDATA_BASE_ADDR + 0x0059) #define KDATA_SPDIF_REQUEST (KDATA_BASE_ADDR + 0x005A) #define KDATA_SPDIF_TEMP (KDATA_BASE_ADDR + 0x005B) #define KDATA_SPDIFIN_XFER0 (KDATA_BASE_ADDR + 0x005C) #define KDATA_SPDIFIN_XFER_ENDMARK (KDATA_BASE_ADDR + 0x005D) #define KDATA_SPDIFIN_INT_METER (KDATA_BASE_ADDR + 0x005E) #define KDATA_DSP_RESET_COUNT (KDATA_BASE_ADDR + 0x005F) #define KDATA_DEBUG_OUTPUT (KDATA_BASE_ADDR + 0x0060) #define KDATA_KERNEL_ISR_LIST (KDATA_BASE_ADDR + 0x0061) #define KDATA_KERNEL_ISR_CBSR1 (KDATA_BASE_ADDR + 0x0062) #define KDATA_KERNEL_ISR_CBER1 (KDATA_BASE_ADDR + 0x0063) #define KDATA_KERNEL_ISR_CBCR (KDATA_BASE_ADDR + 0x0064) #define KDATA_KERNEL_ISR_AR0 (KDATA_BASE_ADDR + 0x0065) #define KDATA_KERNEL_ISR_AR1 (KDATA_BASE_ADDR + 0x0066) #define KDATA_KERNEL_ISR_AR2 (KDATA_BASE_ADDR + 0x0067) #define KDATA_KERNEL_ISR_AR3 (KDATA_BASE_ADDR + 0x0068) #define KDATA_KERNEL_ISR_AR4 (KDATA_BASE_ADDR + 0x0069) #define KDATA_KERNEL_ISR_AR5 (KDATA_BASE_ADDR + 0x006A) #define KDATA_KERNEL_ISR_BRCR (KDATA_BASE_ADDR + 0x006B) #define KDATA_KERNEL_ISR_PASR (KDATA_BASE_ADDR + 0x006C) #define KDATA_KERNEL_ISR_PAER (KDATA_BASE_ADDR + 0x006D) #define KDATA_CLIENT_SCRATCH0 (KDATA_BASE_ADDR + 0x006E) #define KDATA_CLIENT_SCRATCH1 (KDATA_BASE_ADDR + 0x006F) #define KDATA_KERNEL_SCRATCH (KDATA_BASE_ADDR + 0x0070) #define KDATA_KERNEL_ISR_SCRATCH (KDATA_BASE_ADDR + 0x0071) #define KDATA_OUEUE_LEFT (KDATA_BASE_ADDR + 0x0072) #define KDATA_QUEUE_RIGHT (KDATA_BASE_ADDR + 0x0073) #define KDATA_ADC1_REQUEST (KDATA_BASE_ADDR + 0x0074) #define KDATA_ADC2_REQUEST (KDATA_BASE_ADDR + 0x0075) #define KDATA_CD_REQUEST (KDATA_BASE_ADDR + 0x0076) #define KDATA_MIC_REQUEST (KDATA_BASE_ADDR + 0x0077) #define KDATA_ADC1_MIXER_REQUEST (KDATA_BASE_ADDR + 0x0078) #define KDATA_ADC2_MIXER_REQUEST (KDATA_BASE_ADDR + 0x0079) #define KDATA_CD_MIXER_REQUEST (KDATA_BASE_ADDR + 0x007A) #define KDATA_MIC_MIXER_REQUEST (KDATA_BASE_ADDR + 0x007B) #define KDATA_MIC_SYNC_COUNTER (KDATA_BASE_ADDR + 0x007C) /* * second 'segment' (?) reserved for mixer * buffers.. */ #define KDATA_MIXER_WORD0 (KDATA_BASE_ADDR2 + 0x0000) #define KDATA_MIXER_WORD1 (KDATA_BASE_ADDR2 + 0x0001) #define KDATA_MIXER_WORD2 (KDATA_BASE_ADDR2 + 0x0002) #define KDATA_MIXER_WORD3 (KDATA_BASE_ADDR2 + 0x0003) #define KDATA_MIXER_WORD4 (KDATA_BASE_ADDR2 + 0x0004) #define KDATA_MIXER_WORD5 (KDATA_BASE_ADDR2 + 0x0005) #define KDATA_MIXER_WORD6 (KDATA_BASE_ADDR2 + 0x0006) #define KDATA_MIXER_WORD7 (KDATA_BASE_ADDR2 + 0x0007) #define KDATA_MIXER_WORD8 (KDATA_BASE_ADDR2 + 0x0008) #define KDATA_MIXER_WORD9 (KDATA_BASE_ADDR2 + 0x0009) #define KDATA_MIXER_WORDA (KDATA_BASE_ADDR2 + 0x000A) #define KDATA_MIXER_WORDB (KDATA_BASE_ADDR2 + 0x000B) #define KDATA_MIXER_WORDC (KDATA_BASE_ADDR2 + 0x000C) #define KDATA_MIXER_WORDD (KDATA_BASE_ADDR2 + 0x000D) #define KDATA_MIXER_WORDE (KDATA_BASE_ADDR2 + 0x000E) #define KDATA_MIXER_WORDF (KDATA_BASE_ADDR2 + 0x000F) #define KDATA_MIXER_XFER0 (KDATA_BASE_ADDR2 + 0x0010) #define KDATA_MIXER_XFER1 (KDATA_BASE_ADDR2 + 0x0011) #define KDATA_MIXER_XFER2 (KDATA_BASE_ADDR2 + 0x0012) #define KDATA_MIXER_XFER3 (KDATA_BASE_ADDR2 + 0x0013) #define KDATA_MIXER_XFER4 (KDATA_BASE_ADDR2 + 0x0014) #define KDATA_MIXER_XFER5 (KDATA_BASE_ADDR2 + 0x0015) #define KDATA_MIXER_XFER6 (KDATA_BASE_ADDR2 + 0x0016) #define KDATA_MIXER_XFER7 (KDATA_BASE_ADDR2 + 0x0017) #define KDATA_MIXER_XFER8 (KDATA_BASE_ADDR2 + 0x0018) #define KDATA_MIXER_XFER9 (KDATA_BASE_ADDR2 + 0x0019) #define KDATA_MIXER_XFER_ENDMARK (KDATA_BASE_ADDR2 + 0x001A) #define KDATA_MIXER_TASK_NUMBER (KDATA_BASE_ADDR2 + 0x001B) #define KDATA_CURRENT_MIXER (KDATA_BASE_ADDR2 + 0x001C) #define KDATA_MIXER_ACTIVE (KDATA_BASE_ADDR2 + 0x001D) #define KDATA_MIXER_BANK_STATUS (KDATA_BASE_ADDR2 + 0x001E) #define KDATA_DAC_LEFT_VOLUME (KDATA_BASE_ADDR2 + 0x001F) #define KDATA_DAC_RIGHT_VOLUME (KDATA_BASE_ADDR2 + 0x0020) #define MAX_INSTANCE_MINISRC (KDATA_INSTANCE_MINISRC_ENDMARK - KDATA_INSTANCE0_MINISRC) #define MAX_VIRTUAL_DMA_CHANNELS (KDATA_DMA_XFER_ENDMARK - KDATA_DMA_XFER0) #define MAX_VIRTUAL_MIXER_CHANNELS (KDATA_MIXER_XFER_ENDMARK - KDATA_MIXER_XFER0) #define MAX_VIRTUAL_ADC1_CHANNELS (KDATA_ADC1_XFER_ENDMARK - KDATA_ADC1_XFER0) /* * client data area offsets */ #define CDATA_INSTANCE_READY 0x00 #define CDATA_HOST_SRC_ADDRL 0x01 #define CDATA_HOST_SRC_ADDRH 0x02 #define CDATA_HOST_SRC_END_PLUS_1L 0x03 #define CDATA_HOST_SRC_END_PLUS_1H 0x04 #define CDATA_HOST_SRC_CURRENTL 0x05 #define CDATA_HOST_SRC_CURRENTH 0x06 #define CDATA_IN_BUF_CONNECT 0x07 #define CDATA_OUT_BUF_CONNECT 0x08 #define CDATA_IN_BUF_BEGIN 0x09 #define CDATA_IN_BUF_END_PLUS_1 0x0A #define CDATA_IN_BUF_HEAD 0x0B #define CDATA_IN_BUF_TAIL 0x0C #define CDATA_OUT_BUF_BEGIN 0x0D #define CDATA_OUT_BUF_END_PLUS_1 0x0E #define CDATA_OUT_BUF_HEAD 0x0F #define CDATA_OUT_BUF_TAIL 0x10 #define CDATA_DMA_CONTROL 0x11 #define CDATA_RESERVED 0x12 #define CDATA_FREQUENCY 0x13 #define CDATA_LEFT_VOLUME 0x14 #define CDATA_RIGHT_VOLUME 0x15 #define CDATA_LEFT_SUR_VOL 0x16 #define CDATA_RIGHT_SUR_VOL 0x17 #define CDATA_HEADER_LEN 0x18 #define SRC3_DIRECTION_OFFSET CDATA_HEADER_LEN #define SRC3_MODE_OFFSET (CDATA_HEADER_LEN + 1) #define SRC3_WORD_LENGTH_OFFSET (CDATA_HEADER_LEN + 2) #define SRC3_PARAMETER_OFFSET (CDATA_HEADER_LEN + 3) #define SRC3_COEFF_ADDR_OFFSET (CDATA_HEADER_LEN + 8) #define SRC3_FILTAP_ADDR_OFFSET (CDATA_HEADER_LEN + 10) #define SRC3_TEMP_INBUF_ADDR_OFFSET (CDATA_HEADER_LEN + 16) #define SRC3_TEMP_OUTBUF_ADDR_OFFSET (CDATA_HEADER_LEN + 17) #define MINISRC_IN_BUFFER_SIZE ( 0x50 * 2 ) #define MINISRC_OUT_BUFFER_SIZE ( 0x50 * 2 * 2) #define MINISRC_TMP_BUFFER_SIZE ( 112 + ( MINISRC_BIQUAD_STAGE * 3 + 4 ) * 2 * 2 ) #define MINISRC_BIQUAD_STAGE 2 #define MINISRC_COEF_LOC 0x175 #define DMACONTROL_BLOCK_MASK 0x000F #define DMAC_BLOCK0_SELECTOR 0x0000 #define DMAC_BLOCK1_SELECTOR 0x0001 #define DMAC_BLOCK2_SELECTOR 0x0002 #define DMAC_BLOCK3_SELECTOR 0x0003 #define DMAC_BLOCK4_SELECTOR 0x0004 #define DMAC_BLOCK5_SELECTOR 0x0005 #define DMAC_BLOCK6_SELECTOR 0x0006 #define DMAC_BLOCK7_SELECTOR 0x0007 #define DMAC_BLOCK8_SELECTOR 0x0008 #define DMAC_BLOCK9_SELECTOR 0x0009 #define DMAC_BLOCKA_SELECTOR 0x000A #define DMAC_BLOCKB_SELECTOR 0x000B #define DMAC_BLOCKC_SELECTOR 0x000C #define DMAC_BLOCKD_SELECTOR 0x000D #define DMAC_BLOCKE_SELECTOR 0x000E #define DMAC_BLOCKF_SELECTOR 0x000F #define DMACONTROL_PAGE_MASK 0x00F0 #define DMAC_PAGE0_SELECTOR 0x0030 #define DMAC_PAGE1_SELECTOR 0x0020 #define DMAC_PAGE2_SELECTOR 0x0010 #define DMAC_PAGE3_SELECTOR 0x0000 #define DMACONTROL_AUTOREPEAT 0x1000 #define DMACONTROL_STOPPED 0x2000 #define DMACONTROL_DIRECTION 0x0100 /* * an arbitrary volume we set the internal * volume settings to so that the ac97 volume * range is a little less insane. 0x7fff is * max. */ #define ARB_VOLUME ( 0x6800 ) /* */ struct m3_list { int curlen; int mem_addr; int max; }; struct m3_dma { int number; struct snd_pcm_substream *substream; struct assp_instance { unsigned short code, data; } inst; int running; int opened; unsigned long buffer_addr; int dma_size; int period_size; unsigned int hwptr; int count; int index[3]; struct m3_list *index_list[3]; int in_lists; struct list_head list; }; struct snd_m3 { struct snd_card *card; unsigned long iobase; int irq; unsigned int allegro_flag : 1; struct snd_ac97 *ac97; struct snd_pcm *pcm; struct pci_dev *pci; int dacs_active; int timer_users; struct m3_list msrc_list; struct m3_list mixer_list; struct m3_list adc1_list; struct m3_list dma_list; /* for storing reset state..*/ u8 reset_state; int external_amp; int amp_gpio; /* gpio pin # for external amp, -1 = default */ unsigned int hv_config; /* hardware-volume config bits */ unsigned irda_workaround :1; /* avoid to touch 0x10 on GPIO_DIRECTION (e.g. for IrDA on Dell Inspirons) */ unsigned is_omnibook :1; /* Do HP OmniBook GPIO magic? */ /* midi */ struct snd_rawmidi *rmidi; /* pcm streams */ int num_substreams; struct m3_dma *substreams; spinlock_t reg_lock; #ifdef CONFIG_SND_MAESTRO3_INPUT struct input_dev *input_dev; char phys[64]; /* physical device path */ #else spinlock_t ac97_lock; struct snd_kcontrol *master_switch; struct snd_kcontrol *master_volume; struct tasklet_struct hwvol_tq; #endif unsigned int in_suspend; #ifdef CONFIG_PM u16 *suspend_mem; #endif const struct firmware *assp_kernel_image; const struct firmware *assp_minisrc_image; }; /* * pci ids */ static DEFINE_PCI_DEVICE_TABLE(snd_m3_ids) = { {PCI_VENDOR_ID_ESS, PCI_DEVICE_ID_ESS_ALLEGRO_1, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_MULTIMEDIA_AUDIO << 8, 0xffff00, 0}, {PCI_VENDOR_ID_ESS, PCI_DEVICE_ID_ESS_ALLEGRO, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_MULTIMEDIA_AUDIO << 8, 0xffff00, 0}, {PCI_VENDOR_ID_ESS, PCI_DEVICE_ID_ESS_CANYON3D_2LE, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_MULTIMEDIA_AUDIO << 8, 0xffff00, 0}, {PCI_VENDOR_ID_ESS, PCI_DEVICE_ID_ESS_CANYON3D_2, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_MULTIMEDIA_AUDIO << 8, 0xffff00, 0}, {PCI_VENDOR_ID_ESS, PCI_DEVICE_ID_ESS_MAESTRO3, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_MULTIMEDIA_AUDIO << 8, 0xffff00, 0}, {PCI_VENDOR_ID_ESS, PCI_DEVICE_ID_ESS_MAESTRO3_1, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_MULTIMEDIA_AUDIO << 8, 0xffff00, 0}, {PCI_VENDOR_ID_ESS, PCI_DEVICE_ID_ESS_MAESTRO3_HW, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_MULTIMEDIA_AUDIO << 8, 0xffff00, 0}, {PCI_VENDOR_ID_ESS, PCI_DEVICE_ID_ESS_MAESTRO3_2, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_MULTIMEDIA_AUDIO << 8, 0xffff00, 0}, {0,}, }; MODULE_DEVICE_TABLE(pci, snd_m3_ids); static struct snd_pci_quirk m3_amp_quirk_list[] __devinitdata = { SND_PCI_QUIRK(0x0E11, 0x0094, "Compaq Evo N600c", 0x0c), SND_PCI_QUIRK(0x10f7, 0x833e, "Panasonic CF-28", 0x0d), SND_PCI_QUIRK(0x10f7, 0x833d, "Panasonic CF-72", 0x0d), SND_PCI_QUIRK(0x1033, 0x80f1, "NEC LM800J/7", 0x03), SND_PCI_QUIRK(0x1509, 0x1740, "LEGEND ZhaoYang 3100CF", 0x03), { } /* END */ }; static struct snd_pci_quirk m3_irda_quirk_list[] __devinitdata = { SND_PCI_QUIRK(0x1028, 0x00b0, "Dell Inspiron 4000", 1), SND_PCI_QUIRK(0x1028, 0x00a4, "Dell Inspiron 8000", 1), SND_PCI_QUIRK(0x1028, 0x00e6, "Dell Inspiron 8100", 1), { } /* END */ }; /* hardware volume quirks */ static struct snd_pci_quirk m3_hv_quirk_list[] __devinitdata = { /* Allegro chips */ SND_PCI_QUIRK(0x0E11, 0x002E, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x0E11, 0x0094, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x0E11, 0xB112, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x0E11, 0xB114, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x103C, 0x0012, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x103C, 0x0018, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x103C, 0x001C, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x103C, 0x001D, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x103C, 0x001E, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x107B, 0x3350, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x10F7, 0x8338, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x10F7, 0x833C, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x10F7, 0x833D, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x10F7, 0x833E, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x10F7, 0x833F, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x13BD, 0x1018, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x13BD, 0x1019, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x13BD, 0x101A, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x14FF, 0x0F03, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x14FF, 0x0F04, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x14FF, 0x0F05, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x156D, 0xB400, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x156D, 0xB795, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x156D, 0xB797, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x156D, 0xC700, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x1033, 0x80F1, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD | REDUCED_DEBOUNCE), SND_PCI_QUIRK(0x103C, 0x001A, NULL, /* HP OmniBook 6100 */ HV_CTRL_ENABLE | HV_BUTTON_FROM_GD | REDUCED_DEBOUNCE), SND_PCI_QUIRK(0x107B, 0x340A, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD | REDUCED_DEBOUNCE), SND_PCI_QUIRK(0x107B, 0x3450, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD | REDUCED_DEBOUNCE), SND_PCI_QUIRK(0x109F, 0x3134, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD | REDUCED_DEBOUNCE), SND_PCI_QUIRK(0x109F, 0x3161, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD | REDUCED_DEBOUNCE), SND_PCI_QUIRK(0x144D, 0x3280, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD | REDUCED_DEBOUNCE), SND_PCI_QUIRK(0x144D, 0x3281, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD | REDUCED_DEBOUNCE), SND_PCI_QUIRK(0x144D, 0xC002, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD | REDUCED_DEBOUNCE), SND_PCI_QUIRK(0x144D, 0xC003, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD | REDUCED_DEBOUNCE), SND_PCI_QUIRK(0x1509, 0x1740, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD | REDUCED_DEBOUNCE), SND_PCI_QUIRK(0x1610, 0x0010, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD | REDUCED_DEBOUNCE), SND_PCI_QUIRK(0x1042, 0x1042, NULL, HV_CTRL_ENABLE), SND_PCI_QUIRK(0x107B, 0x9500, NULL, HV_CTRL_ENABLE), SND_PCI_QUIRK(0x14FF, 0x0F06, NULL, HV_CTRL_ENABLE), SND_PCI_QUIRK(0x1558, 0x8586, NULL, HV_CTRL_ENABLE), SND_PCI_QUIRK(0x161F, 0x2011, NULL, HV_CTRL_ENABLE), /* Maestro3 chips */ SND_PCI_QUIRK(0x103C, 0x000E, NULL, HV_CTRL_ENABLE), SND_PCI_QUIRK(0x103C, 0x0010, NULL, HV_CTRL_ENABLE), SND_PCI_QUIRK(0x103C, 0x0011, NULL, HV_CTRL_ENABLE), SND_PCI_QUIRK(0x103C, 0x001B, NULL, HV_CTRL_ENABLE), SND_PCI_QUIRK(0x104D, 0x80A6, NULL, HV_CTRL_ENABLE), SND_PCI_QUIRK(0x104D, 0x80AA, NULL, HV_CTRL_ENABLE), SND_PCI_QUIRK(0x107B, 0x5300, NULL, HV_CTRL_ENABLE), SND_PCI_QUIRK(0x110A, 0x1998, NULL, HV_CTRL_ENABLE), SND_PCI_QUIRK(0x13BD, 0x1015, NULL, HV_CTRL_ENABLE), SND_PCI_QUIRK(0x13BD, 0x101C, NULL, HV_CTRL_ENABLE), SND_PCI_QUIRK(0x13BD, 0x1802, NULL, HV_CTRL_ENABLE), SND_PCI_QUIRK(0x1599, 0x0715, NULL, HV_CTRL_ENABLE), SND_PCI_QUIRK(0x5643, 0x5643, NULL, HV_CTRL_ENABLE), SND_PCI_QUIRK(0x144D, 0x3260, NULL, HV_CTRL_ENABLE | REDUCED_DEBOUNCE), SND_PCI_QUIRK(0x144D, 0x3261, NULL, HV_CTRL_ENABLE | REDUCED_DEBOUNCE), SND_PCI_QUIRK(0x144D, 0xC000, NULL, HV_CTRL_ENABLE | REDUCED_DEBOUNCE), SND_PCI_QUIRK(0x144D, 0xC001, NULL, HV_CTRL_ENABLE | REDUCED_DEBOUNCE), { } /* END */ }; /* HP Omnibook quirks */ static struct snd_pci_quirk m3_omnibook_quirk_list[] __devinitdata = { SND_PCI_QUIRK_ID(0x103c, 0x0010), /* HP OmniBook 6000 */ SND_PCI_QUIRK_ID(0x103c, 0x0011), /* HP OmniBook 500 */ { } /* END */ }; /* * lowlevel functions */ static inline void snd_m3_outw(struct snd_m3 *chip, u16 value, unsigned long reg) { outw(value, chip->iobase + reg); } static inline u16 snd_m3_inw(struct snd_m3 *chip, unsigned long reg) { return inw(chip->iobase + reg); } static inline void snd_m3_outb(struct snd_m3 *chip, u8 value, unsigned long reg) { outb(value, chip->iobase + reg); } static inline u8 snd_m3_inb(struct snd_m3 *chip, unsigned long reg) { return inb(chip->iobase + reg); } /* * access 16bit words to the code or data regions of the dsp's memory. * index addresses 16bit words. */ static u16 snd_m3_assp_read(struct snd_m3 *chip, u16 region, u16 index) { snd_m3_outw(chip, region & MEMTYPE_MASK, DSP_PORT_MEMORY_TYPE); snd_m3_outw(chip, index, DSP_PORT_MEMORY_INDEX); return snd_m3_inw(chip, DSP_PORT_MEMORY_DATA); } static void snd_m3_assp_write(struct snd_m3 *chip, u16 region, u16 index, u16 data) { snd_m3_outw(chip, region & MEMTYPE_MASK, DSP_PORT_MEMORY_TYPE); snd_m3_outw(chip, index, DSP_PORT_MEMORY_INDEX); snd_m3_outw(chip, data, DSP_PORT_MEMORY_DATA); } static void snd_m3_assp_halt(struct snd_m3 *chip) { chip->reset_state = snd_m3_inb(chip, DSP_PORT_CONTROL_REG_B) & ~REGB_STOP_CLOCK; msleep(10); snd_m3_outb(chip, chip->reset_state & ~REGB_ENABLE_RESET, DSP_PORT_CONTROL_REG_B); } static void snd_m3_assp_continue(struct snd_m3 *chip) { snd_m3_outb(chip, chip->reset_state | REGB_ENABLE_RESET, DSP_PORT_CONTROL_REG_B); } /* * This makes me sad. the maestro3 has lists * internally that must be packed.. 0 terminates, * apparently, or maybe all unused entries have * to be 0, the lists have static lengths set * by the binary code images. */ static int snd_m3_add_list(struct snd_m3 *chip, struct m3_list *list, u16 val) { snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, list->mem_addr + list->curlen, val); return list->curlen++; } static void snd_m3_remove_list(struct snd_m3 *chip, struct m3_list *list, int index) { u16 val; int lastindex = list->curlen - 1; if (index != lastindex) { val = snd_m3_assp_read(chip, MEMTYPE_INTERNAL_DATA, list->mem_addr + lastindex); snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, list->mem_addr + index, val); } snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, list->mem_addr + lastindex, 0); list->curlen--; } static void snd_m3_inc_timer_users(struct snd_m3 *chip) { chip->timer_users++; if (chip->timer_users != 1) return; snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, KDATA_TIMER_COUNT_RELOAD, 240); snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, KDATA_TIMER_COUNT_CURRENT, 240); snd_m3_outw(chip, snd_m3_inw(chip, HOST_INT_CTRL) | CLKRUN_GEN_ENABLE, HOST_INT_CTRL); } static void snd_m3_dec_timer_users(struct snd_m3 *chip) { chip->timer_users--; if (chip->timer_users > 0) return; snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, KDATA_TIMER_COUNT_RELOAD, 0); snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, KDATA_TIMER_COUNT_CURRENT, 0); snd_m3_outw(chip, snd_m3_inw(chip, HOST_INT_CTRL) & ~CLKRUN_GEN_ENABLE, HOST_INT_CTRL); } /* * start/stop */ /* spinlock held! */ static int snd_m3_pcm_start(struct snd_m3 *chip, struct m3_dma *s, struct snd_pcm_substream *subs) { if (! s || ! subs) return -EINVAL; snd_m3_inc_timer_users(chip); switch (subs->stream) { case SNDRV_PCM_STREAM_PLAYBACK: chip->dacs_active++; snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_INSTANCE_READY, 1); snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, KDATA_MIXER_TASK_NUMBER, chip->dacs_active); break; case SNDRV_PCM_STREAM_CAPTURE: snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, KDATA_ADC1_REQUEST, 1); snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_INSTANCE_READY, 1); break; } return 0; } /* spinlock held! */ static int snd_m3_pcm_stop(struct snd_m3 *chip, struct m3_dma *s, struct snd_pcm_substream *subs) { if (! s || ! subs) return -EINVAL; snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_INSTANCE_READY, 0); snd_m3_dec_timer_users(chip); switch (subs->stream) { case SNDRV_PCM_STREAM_PLAYBACK: chip->dacs_active--; snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, KDATA_MIXER_TASK_NUMBER, chip->dacs_active); break; case SNDRV_PCM_STREAM_CAPTURE: snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, KDATA_ADC1_REQUEST, 0); break; } return 0; } static int snd_m3_pcm_trigger(struct snd_pcm_substream *subs, int cmd) { struct snd_m3 *chip = snd_pcm_substream_chip(subs); struct m3_dma *s = subs->runtime->private_data; int err = -EINVAL; if (snd_BUG_ON(!s)) return -ENXIO; spin_lock(&chip->reg_lock); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: if (s->running) err = -EBUSY; else { s->running = 1; err = snd_m3_pcm_start(chip, s, subs); } break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: if (! s->running) err = 0; /* should return error? */ else { s->running = 0; err = snd_m3_pcm_stop(chip, s, subs); } break; } spin_unlock(&chip->reg_lock); return err; } /* * setup */ static void snd_m3_pcm_setup1(struct snd_m3 *chip, struct m3_dma *s, struct snd_pcm_substream *subs) { int dsp_in_size, dsp_out_size, dsp_in_buffer, dsp_out_buffer; struct snd_pcm_runtime *runtime = subs->runtime; if (subs->stream == SNDRV_PCM_STREAM_PLAYBACK) { dsp_in_size = MINISRC_IN_BUFFER_SIZE - (0x20 * 2); dsp_out_size = MINISRC_OUT_BUFFER_SIZE - (0x20 * 2); } else { dsp_in_size = MINISRC_IN_BUFFER_SIZE - (0x10 * 2); dsp_out_size = MINISRC_OUT_BUFFER_SIZE - (0x10 * 2); } dsp_in_buffer = s->inst.data + (MINISRC_TMP_BUFFER_SIZE / 2); dsp_out_buffer = dsp_in_buffer + (dsp_in_size / 2) + 1; s->dma_size = frames_to_bytes(runtime, runtime->buffer_size); s->period_size = frames_to_bytes(runtime, runtime->period_size); s->hwptr = 0; s->count = 0; #define LO(x) ((x) & 0xffff) #define HI(x) LO((x) >> 16) /* host dma buffer pointers */ snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_HOST_SRC_ADDRL, LO(s->buffer_addr)); snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_HOST_SRC_ADDRH, HI(s->buffer_addr)); snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_HOST_SRC_END_PLUS_1L, LO(s->buffer_addr + s->dma_size)); snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_HOST_SRC_END_PLUS_1H, HI(s->buffer_addr + s->dma_size)); snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_HOST_SRC_CURRENTL, LO(s->buffer_addr)); snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_HOST_SRC_CURRENTH, HI(s->buffer_addr)); #undef LO #undef HI /* dsp buffers */ snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_IN_BUF_BEGIN, dsp_in_buffer); snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_IN_BUF_END_PLUS_1, dsp_in_buffer + (dsp_in_size / 2)); snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_IN_BUF_HEAD, dsp_in_buffer); snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_IN_BUF_TAIL, dsp_in_buffer); snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_OUT_BUF_BEGIN, dsp_out_buffer); snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_OUT_BUF_END_PLUS_1, dsp_out_buffer + (dsp_out_size / 2)); snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_OUT_BUF_HEAD, dsp_out_buffer); snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_OUT_BUF_TAIL, dsp_out_buffer); } static void snd_m3_pcm_setup2(struct snd_m3 *chip, struct m3_dma *s, struct snd_pcm_runtime *runtime) { u32 freq; /* * put us in the lists if we're not already there */ if (! s->in_lists) { s->index[0] = snd_m3_add_list(chip, s->index_list[0], s->inst.data >> DP_SHIFT_COUNT); s->index[1] = snd_m3_add_list(chip, s->index_list[1], s->inst.data >> DP_SHIFT_COUNT); s->index[2] = snd_m3_add_list(chip, s->index_list[2], s->inst.data >> DP_SHIFT_COUNT); s->in_lists = 1; } /* write to 'mono' word */ snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + SRC3_DIRECTION_OFFSET + 1, runtime->channels == 2 ? 0 : 1); /* write to '8bit' word */ snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + SRC3_DIRECTION_OFFSET + 2, snd_pcm_format_width(runtime->format) == 16 ? 0 : 1); /* set up dac/adc rate */ freq = ((runtime->rate << 15) + 24000 ) / 48000; if (freq) freq--; snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_FREQUENCY, freq); } static const struct play_vals { u16 addr, val; } pv[] = { {CDATA_LEFT_VOLUME, ARB_VOLUME}, {CDATA_RIGHT_VOLUME, ARB_VOLUME}, {SRC3_DIRECTION_OFFSET, 0} , /* +1, +2 are stereo/16 bit */ {SRC3_DIRECTION_OFFSET + 3, 0x0000}, /* fraction? */ {SRC3_DIRECTION_OFFSET + 4, 0}, /* first l */ {SRC3_DIRECTION_OFFSET + 5, 0}, /* first r */ {SRC3_DIRECTION_OFFSET + 6, 0}, /* second l */ {SRC3_DIRECTION_OFFSET + 7, 0}, /* second r */ {SRC3_DIRECTION_OFFSET + 8, 0}, /* delta l */ {SRC3_DIRECTION_OFFSET + 9, 0}, /* delta r */ {SRC3_DIRECTION_OFFSET + 10, 0x8000}, /* round */ {SRC3_DIRECTION_OFFSET + 11, 0xFF00}, /* higher bute mark */ {SRC3_DIRECTION_OFFSET + 13, 0}, /* temp0 */ {SRC3_DIRECTION_OFFSET + 14, 0}, /* c fraction */ {SRC3_DIRECTION_OFFSET + 15, 0}, /* counter */ {SRC3_DIRECTION_OFFSET + 16, 8}, /* numin */ {SRC3_DIRECTION_OFFSET + 17, 50*2}, /* numout */ {SRC3_DIRECTION_OFFSET + 18, MINISRC_BIQUAD_STAGE - 1}, /* numstage */ {SRC3_DIRECTION_OFFSET + 20, 0}, /* filtertap */ {SRC3_DIRECTION_OFFSET + 21, 0} /* booster */ }; /* the mode passed should be already shifted and masked */ static void snd_m3_playback_setup(struct snd_m3 *chip, struct m3_dma *s, struct snd_pcm_substream *subs) { unsigned int i; /* * some per client initializers */ snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + SRC3_DIRECTION_OFFSET + 12, s->inst.data + 40 + 8); snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + SRC3_DIRECTION_OFFSET + 19, s->inst.code + MINISRC_COEF_LOC); /* enable or disable low pass filter? */ snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + SRC3_DIRECTION_OFFSET + 22, subs->runtime->rate > 45000 ? 0xff : 0); /* tell it which way dma is going? */ snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_DMA_CONTROL, DMACONTROL_AUTOREPEAT + DMAC_PAGE3_SELECTOR + DMAC_BLOCKF_SELECTOR); /* * set an armload of static initializers */ for (i = 0; i < ARRAY_SIZE(pv); i++) snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + pv[i].addr, pv[i].val); } /* * Native record driver */ static const struct rec_vals { u16 addr, val; } rv[] = { {CDATA_LEFT_VOLUME, ARB_VOLUME}, {CDATA_RIGHT_VOLUME, ARB_VOLUME}, {SRC3_DIRECTION_OFFSET, 1} , /* +1, +2 are stereo/16 bit */ {SRC3_DIRECTION_OFFSET + 3, 0x0000}, /* fraction? */ {SRC3_DIRECTION_OFFSET + 4, 0}, /* first l */ {SRC3_DIRECTION_OFFSET + 5, 0}, /* first r */ {SRC3_DIRECTION_OFFSET + 6, 0}, /* second l */ {SRC3_DIRECTION_OFFSET + 7, 0}, /* second r */ {SRC3_DIRECTION_OFFSET + 8, 0}, /* delta l */ {SRC3_DIRECTION_OFFSET + 9, 0}, /* delta r */ {SRC3_DIRECTION_OFFSET + 10, 0x8000}, /* round */ {SRC3_DIRECTION_OFFSET + 11, 0xFF00}, /* higher bute mark */ {SRC3_DIRECTION_OFFSET + 13, 0}, /* temp0 */ {SRC3_DIRECTION_OFFSET + 14, 0}, /* c fraction */ {SRC3_DIRECTION_OFFSET + 15, 0}, /* counter */ {SRC3_DIRECTION_OFFSET + 16, 50},/* numin */ {SRC3_DIRECTION_OFFSET + 17, 8}, /* numout */ {SRC3_DIRECTION_OFFSET + 18, 0}, /* numstage */ {SRC3_DIRECTION_OFFSET + 19, 0}, /* coef */ {SRC3_DIRECTION_OFFSET + 20, 0}, /* filtertap */ {SRC3_DIRECTION_OFFSET + 21, 0}, /* booster */ {SRC3_DIRECTION_OFFSET + 22, 0xff} /* skip lpf */ }; static void snd_m3_capture_setup(struct snd_m3 *chip, struct m3_dma *s, struct snd_pcm_substream *subs) { unsigned int i; /* * some per client initializers */ snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + SRC3_DIRECTION_OFFSET + 12, s->inst.data + 40 + 8); /* tell it which way dma is going? */ snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_DMA_CONTROL, DMACONTROL_DIRECTION + DMACONTROL_AUTOREPEAT + DMAC_PAGE3_SELECTOR + DMAC_BLOCKF_SELECTOR); /* * set an armload of static initializers */ for (i = 0; i < ARRAY_SIZE(rv); i++) snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + rv[i].addr, rv[i].val); } static int snd_m3_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct m3_dma *s = substream->runtime->private_data; int err; if ((err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params))) < 0) return err; /* set buffer address */ s->buffer_addr = substream->runtime->dma_addr; if (s->buffer_addr & 0x3) { snd_printk(KERN_ERR "oh my, not aligned\n"); s->buffer_addr = s->buffer_addr & ~0x3; } return 0; } static int snd_m3_pcm_hw_free(struct snd_pcm_substream *substream) { struct m3_dma *s; if (substream->runtime->private_data == NULL) return 0; s = substream->runtime->private_data; snd_pcm_lib_free_pages(substream); s->buffer_addr = 0; return 0; } static int snd_m3_pcm_prepare(struct snd_pcm_substream *subs) { struct snd_m3 *chip = snd_pcm_substream_chip(subs); struct snd_pcm_runtime *runtime = subs->runtime; struct m3_dma *s = runtime->private_data; if (snd_BUG_ON(!s)) return -ENXIO; if (runtime->format != SNDRV_PCM_FORMAT_U8 && runtime->format != SNDRV_PCM_FORMAT_S16_LE) return -EINVAL; if (runtime->rate > 48000 || runtime->rate < 8000) return -EINVAL; spin_lock_irq(&chip->reg_lock); snd_m3_pcm_setup1(chip, s, subs); if (subs->stream == SNDRV_PCM_STREAM_PLAYBACK) snd_m3_playback_setup(chip, s, subs); else snd_m3_capture_setup(chip, s, subs); snd_m3_pcm_setup2(chip, s, runtime); spin_unlock_irq(&chip->reg_lock); return 0; } /* * get current pointer */ static unsigned int snd_m3_get_pointer(struct snd_m3 *chip, struct m3_dma *s, struct snd_pcm_substream *subs) { u16 hi = 0, lo = 0; int retry = 10; u32 addr; /* * try and get a valid answer */ while (retry--) { hi = snd_m3_assp_read(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_HOST_SRC_CURRENTH); lo = snd_m3_assp_read(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_HOST_SRC_CURRENTL); if (hi == snd_m3_assp_read(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_HOST_SRC_CURRENTH)) break; } addr = lo | ((u32)hi<<16); return (unsigned int)(addr - s->buffer_addr); } static snd_pcm_uframes_t snd_m3_pcm_pointer(struct snd_pcm_substream *subs) { struct snd_m3 *chip = snd_pcm_substream_chip(subs); unsigned int ptr; struct m3_dma *s = subs->runtime->private_data; if (snd_BUG_ON(!s)) return 0; spin_lock(&chip->reg_lock); ptr = snd_m3_get_pointer(chip, s, subs); spin_unlock(&chip->reg_lock); return bytes_to_frames(subs->runtime, ptr); } /* update pointer */ /* spinlock held! */ static void snd_m3_update_ptr(struct snd_m3 *chip, struct m3_dma *s) { struct snd_pcm_substream *subs = s->substream; unsigned int hwptr; int diff; if (! s->running) return; hwptr = snd_m3_get_pointer(chip, s, subs); /* try to avoid expensive modulo divisions */ if (hwptr >= s->dma_size) hwptr %= s->dma_size; diff = s->dma_size + hwptr - s->hwptr; if (diff >= s->dma_size) diff %= s->dma_size; s->hwptr = hwptr; s->count += diff; if (s->count >= (signed)s->period_size) { if (s->count < 2 * (signed)s->period_size) s->count -= (signed)s->period_size; else s->count %= s->period_size; spin_unlock(&chip->reg_lock); snd_pcm_period_elapsed(subs); spin_lock(&chip->reg_lock); } } /* The m3's hardware volume works by incrementing / decrementing 2 counters (without wrap around) in response to volume button presses and then generating an interrupt. The pair of counters is stored in bits 1-3 and 5-7 of a byte wide register. The meaning of bits 0 and 4 is unknown. */ static void snd_m3_update_hw_volume(unsigned long private_data) { struct snd_m3 *chip = (struct snd_m3 *) private_data; int x, val; #ifndef CONFIG_SND_MAESTRO3_INPUT unsigned long flags; #endif /* Figure out which volume control button was pushed, based on differences from the default register values. */ x = inb(chip->iobase + SHADOW_MIX_REG_VOICE) & 0xee; /* Reset the volume counters to 4. Tests on the allegro integrated into a Compaq N600C laptop, have revealed that: 1) Writing any value will result in the 2 counters being reset to 4 so writing 0x88 is not strictly necessary 2) Writing to any of the 4 involved registers will reset all 4 of them (and reading them always returns the same value for all of them) It could be that a maestro deviates from this, so leave the code as is. */ outb(0x88, chip->iobase + SHADOW_MIX_REG_VOICE); outb(0x88, chip->iobase + HW_VOL_COUNTER_VOICE); outb(0x88, chip->iobase + SHADOW_MIX_REG_MASTER); outb(0x88, chip->iobase + HW_VOL_COUNTER_MASTER); /* Ignore spurious HV interrupts during suspend / resume, this avoids mistaking them for a mute button press. */ if (chip->in_suspend) return; #ifndef CONFIG_SND_MAESTRO3_INPUT if (!chip->master_switch || !chip->master_volume) return; /* FIXME: we can't call snd_ac97_* functions since here is in tasklet. */ spin_lock_irqsave(&chip->ac97_lock, flags); val = chip->ac97->regs[AC97_MASTER_VOL]; switch (x) { case 0x88: /* The counters have not changed, yet we've received a HV interrupt. According to tests run by various people this happens when pressing the mute button. */ val ^= 0x8000; chip->ac97->regs[AC97_MASTER_VOL] = val; outw(val, chip->iobase + CODEC_DATA); outb(AC97_MASTER_VOL, chip->iobase + CODEC_COMMAND); snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE, &chip->master_switch->id); break; case 0xaa: /* counters increased by 1 -> volume up */ if ((val & 0x7f) > 0) val--; if ((val & 0x7f00) > 0) val -= 0x0100; chip->ac97->regs[AC97_MASTER_VOL] = val; outw(val, chip->iobase + CODEC_DATA); outb(AC97_MASTER_VOL, chip->iobase + CODEC_COMMAND); snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE, &chip->master_volume->id); break; case 0x66: /* counters decreased by 1 -> volume down */ if ((val & 0x7f) < 0x1f) val++; if ((val & 0x7f00) < 0x1f00) val += 0x0100; chip->ac97->regs[AC97_MASTER_VOL] = val; outw(val, chip->iobase + CODEC_DATA); outb(AC97_MASTER_VOL, chip->iobase + CODEC_COMMAND); snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE, &chip->master_volume->id); break; } spin_unlock_irqrestore(&chip->ac97_lock, flags); #else if (!chip->input_dev) return; val = 0; switch (x) { case 0x88: /* The counters have not changed, yet we've received a HV interrupt. According to tests run by various people this happens when pressing the mute button. */ val = KEY_MUTE; break; case 0xaa: /* counters increased by 1 -> volume up */ val = KEY_VOLUMEUP; break; case 0x66: /* counters decreased by 1 -> volume down */ val = KEY_VOLUMEDOWN; break; } if (val) { input_report_key(chip->input_dev, val, 1); input_sync(chip->input_dev); input_report_key(chip->input_dev, val, 0); input_sync(chip->input_dev); } #endif } static irqreturn_t snd_m3_interrupt(int irq, void *dev_id) { struct snd_m3 *chip = dev_id; u8 status; int i; status = inb(chip->iobase + HOST_INT_STATUS); if (status == 0xff) return IRQ_NONE; if (status & HV_INT_PENDING) #ifdef CONFIG_SND_MAESTRO3_INPUT snd_m3_update_hw_volume((unsigned long)chip); #else tasklet_schedule(&chip->hwvol_tq); #endif /* * ack an assp int if its running * and has an int pending */ if (status & ASSP_INT_PENDING) { u8 ctl = inb(chip->iobase + ASSP_CONTROL_B); if (!(ctl & STOP_ASSP_CLOCK)) { ctl = inb(chip->iobase + ASSP_HOST_INT_STATUS); if (ctl & DSP2HOST_REQ_TIMER) { outb(DSP2HOST_REQ_TIMER, chip->iobase + ASSP_HOST_INT_STATUS); /* update adc/dac info if it was a timer int */ spin_lock(&chip->reg_lock); for (i = 0; i < chip->num_substreams; i++) { struct m3_dma *s = &chip->substreams[i]; if (s->running) snd_m3_update_ptr(chip, s); } spin_unlock(&chip->reg_lock); } } } #if 0 /* TODO: not supported yet */ if ((status & MPU401_INT_PENDING) && chip->rmidi) snd_mpu401_uart_interrupt(irq, chip->rmidi->private_data, regs); #endif /* ack ints */ outb(status, chip->iobase + HOST_INT_STATUS); return IRQ_HANDLED; } /* */ static struct snd_pcm_hardware snd_m3_playback = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_BLOCK_TRANSFER | /*SNDRV_PCM_INFO_PAUSE |*/ SNDRV_PCM_INFO_RESUME), .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 8000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (512*1024), .period_bytes_min = 64, .period_bytes_max = (512*1024), .periods_min = 1, .periods_max = 1024, }; static struct snd_pcm_hardware snd_m3_capture = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_BLOCK_TRANSFER | /*SNDRV_PCM_INFO_PAUSE |*/ SNDRV_PCM_INFO_RESUME), .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 8000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (512*1024), .period_bytes_min = 64, .period_bytes_max = (512*1024), .periods_min = 1, .periods_max = 1024, }; /* */ static int snd_m3_substream_open(struct snd_m3 *chip, struct snd_pcm_substream *subs) { int i; struct m3_dma *s; spin_lock_irq(&chip->reg_lock); for (i = 0; i < chip->num_substreams; i++) { s = &chip->substreams[i]; if (! s->opened) goto __found; } spin_unlock_irq(&chip->reg_lock); return -ENOMEM; __found: s->opened = 1; s->running = 0; spin_unlock_irq(&chip->reg_lock); subs->runtime->private_data = s; s->substream = subs; /* set list owners */ if (subs->stream == SNDRV_PCM_STREAM_PLAYBACK) { s->index_list[0] = &chip->mixer_list; } else s->index_list[0] = &chip->adc1_list; s->index_list[1] = &chip->msrc_list; s->index_list[2] = &chip->dma_list; return 0; } static void snd_m3_substream_close(struct snd_m3 *chip, struct snd_pcm_substream *subs) { struct m3_dma *s = subs->runtime->private_data; if (s == NULL) return; /* not opened properly */ spin_lock_irq(&chip->reg_lock); if (s->substream && s->running) snd_m3_pcm_stop(chip, s, s->substream); /* does this happen? */ if (s->in_lists) { snd_m3_remove_list(chip, s->index_list[0], s->index[0]); snd_m3_remove_list(chip, s->index_list[1], s->index[1]); snd_m3_remove_list(chip, s->index_list[2], s->index[2]); s->in_lists = 0; } s->running = 0; s->opened = 0; spin_unlock_irq(&chip->reg_lock); } static int snd_m3_playback_open(struct snd_pcm_substream *subs) { struct snd_m3 *chip = snd_pcm_substream_chip(subs); struct snd_pcm_runtime *runtime = subs->runtime; int err; if ((err = snd_m3_substream_open(chip, subs)) < 0) return err; runtime->hw = snd_m3_playback; return 0; } static int snd_m3_playback_close(struct snd_pcm_substream *subs) { struct snd_m3 *chip = snd_pcm_substream_chip(subs); snd_m3_substream_close(chip, subs); return 0; } static int snd_m3_capture_open(struct snd_pcm_substream *subs) { struct snd_m3 *chip = snd_pcm_substream_chip(subs); struct snd_pcm_runtime *runtime = subs->runtime; int err; if ((err = snd_m3_substream_open(chip, subs)) < 0) return err; runtime->hw = snd_m3_capture; return 0; } static int snd_m3_capture_close(struct snd_pcm_substream *subs) { struct snd_m3 *chip = snd_pcm_substream_chip(subs); snd_m3_substream_close(chip, subs); return 0; } /* * create pcm instance */ static struct snd_pcm_ops snd_m3_playback_ops = { .open = snd_m3_playback_open, .close = snd_m3_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_m3_pcm_hw_params, .hw_free = snd_m3_pcm_hw_free, .prepare = snd_m3_pcm_prepare, .trigger = snd_m3_pcm_trigger, .pointer = snd_m3_pcm_pointer, }; static struct snd_pcm_ops snd_m3_capture_ops = { .open = snd_m3_capture_open, .close = snd_m3_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_m3_pcm_hw_params, .hw_free = snd_m3_pcm_hw_free, .prepare = snd_m3_pcm_prepare, .trigger = snd_m3_pcm_trigger, .pointer = snd_m3_pcm_pointer, }; static int __devinit snd_m3_pcm(struct snd_m3 * chip, int device) { struct snd_pcm *pcm; int err; err = snd_pcm_new(chip->card, chip->card->driver, device, MAX_PLAYBACKS, MAX_CAPTURES, &pcm); if (err < 0) return err; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_m3_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_m3_capture_ops); pcm->private_data = chip; pcm->info_flags = 0; strcpy(pcm->name, chip->card->driver); chip->pcm = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), 64*1024, 64*1024); return 0; } /* * ac97 interface */ /* * Wait for the ac97 serial bus to be free. * return nonzero if the bus is still busy. */ static int snd_m3_ac97_wait(struct snd_m3 *chip) { int i = 10000; do { if (! (snd_m3_inb(chip, 0x30) & 1)) return 0; cpu_relax(); } while (i-- > 0); snd_printk(KERN_ERR "ac97 serial bus busy\n"); return 1; } static unsigned short snd_m3_ac97_read(struct snd_ac97 *ac97, unsigned short reg) { struct snd_m3 *chip = ac97->private_data; #ifndef CONFIG_SND_MAESTRO3_INPUT unsigned long flags; #endif unsigned short data = 0xffff; if (snd_m3_ac97_wait(chip)) goto fail; #ifndef CONFIG_SND_MAESTRO3_INPUT spin_lock_irqsave(&chip->ac97_lock, flags); #endif snd_m3_outb(chip, 0x80 | (reg & 0x7f), CODEC_COMMAND); if (snd_m3_ac97_wait(chip)) goto fail_unlock; data = snd_m3_inw(chip, CODEC_DATA); fail_unlock: #ifndef CONFIG_SND_MAESTRO3_INPUT spin_unlock_irqrestore(&chip->ac97_lock, flags); #endif fail: return data; } static void snd_m3_ac97_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short val) { struct snd_m3 *chip = ac97->private_data; #ifndef CONFIG_SND_MAESTRO3_INPUT unsigned long flags; #endif if (snd_m3_ac97_wait(chip)) return; #ifndef CONFIG_SND_MAESTRO3_INPUT spin_lock_irqsave(&chip->ac97_lock, flags); #endif snd_m3_outw(chip, val, CODEC_DATA); snd_m3_outb(chip, reg & 0x7f, CODEC_COMMAND); #ifndef CONFIG_SND_MAESTRO3_INPUT spin_unlock_irqrestore(&chip->ac97_lock, flags); #endif } static void snd_m3_remote_codec_config(int io, int isremote) { isremote = isremote ? 1 : 0; outw((inw(io + RING_BUS_CTRL_B) & ~SECOND_CODEC_ID_MASK) | isremote, io + RING_BUS_CTRL_B); outw((inw(io + SDO_OUT_DEST_CTRL) & ~COMMAND_ADDR_OUT) | isremote, io + SDO_OUT_DEST_CTRL); outw((inw(io + SDO_IN_DEST_CTRL) & ~STATUS_ADDR_IN) | isremote, io + SDO_IN_DEST_CTRL); } /* * hack, returns non zero on err */ static int snd_m3_try_read_vendor(struct snd_m3 *chip) { u16 ret; if (snd_m3_ac97_wait(chip)) return 1; snd_m3_outb(chip, 0x80 | (AC97_VENDOR_ID1 & 0x7f), 0x30); if (snd_m3_ac97_wait(chip)) return 1; ret = snd_m3_inw(chip, 0x32); return (ret == 0) || (ret == 0xffff); } static void snd_m3_ac97_reset(struct snd_m3 *chip) { u16 dir; int delay1 = 0, delay2 = 0, i; int io = chip->iobase; if (chip->allegro_flag) { /* * the onboard codec on the allegro seems * to want to wait a very long time before * coming back to life */ delay1 = 50; delay2 = 800; } else { /* maestro3 */ delay1 = 20; delay2 = 500; } for (i = 0; i < 5; i++) { dir = inw(io + GPIO_DIRECTION); if (!chip->irda_workaround) dir |= 0x10; /* assuming pci bus master? */ snd_m3_remote_codec_config(io, 0); outw(IO_SRAM_ENABLE, io + RING_BUS_CTRL_A); udelay(20); outw(dir & ~GPO_PRIMARY_AC97 , io + GPIO_DIRECTION); outw(~GPO_PRIMARY_AC97 , io + GPIO_MASK); outw(0, io + GPIO_DATA); outw(dir | GPO_PRIMARY_AC97, io + GPIO_DIRECTION); schedule_timeout_uninterruptible(msecs_to_jiffies(delay1)); outw(GPO_PRIMARY_AC97, io + GPIO_DATA); udelay(5); /* ok, bring back the ac-link */ outw(IO_SRAM_ENABLE | SERIAL_AC_LINK_ENABLE, io + RING_BUS_CTRL_A); outw(~0, io + GPIO_MASK); schedule_timeout_uninterruptible(msecs_to_jiffies(delay2)); if (! snd_m3_try_read_vendor(chip)) break; delay1 += 10; delay2 += 100; snd_printd("maestro3: retrying codec reset with delays of %d and %d ms\n", delay1, delay2); } #if 0 /* more gung-ho reset that doesn't * seem to work anywhere :) */ tmp = inw(io + RING_BUS_CTRL_A); outw(RAC_SDFS_ENABLE|LAC_SDFS_ENABLE, io + RING_BUS_CTRL_A); msleep(20); outw(tmp, io + RING_BUS_CTRL_A); msleep(50); #endif } static int __devinit snd_m3_mixer(struct snd_m3 *chip) { struct snd_ac97_bus *pbus; struct snd_ac97_template ac97; #ifndef CONFIG_SND_MAESTRO3_INPUT struct snd_ctl_elem_id elem_id; #endif int err; static struct snd_ac97_bus_ops ops = { .write = snd_m3_ac97_write, .read = snd_m3_ac97_read, }; if ((err = snd_ac97_bus(chip->card, 0, &ops, NULL, &pbus)) < 0) return err; memset(&ac97, 0, sizeof(ac97)); ac97.private_data = chip; if ((err = snd_ac97_mixer(pbus, &ac97, &chip->ac97)) < 0) return err; /* seems ac97 PCM needs initialization.. hack hack.. */ snd_ac97_write(chip->ac97, AC97_PCM, 0x8000 | (15 << 8) | 15); schedule_timeout_uninterruptible(msecs_to_jiffies(100)); snd_ac97_write(chip->ac97, AC97_PCM, 0); #ifndef CONFIG_SND_MAESTRO3_INPUT memset(&elem_id, 0, sizeof(elem_id)); elem_id.iface = SNDRV_CTL_ELEM_IFACE_MIXER; strcpy(elem_id.name, "Master Playback Switch"); chip->master_switch = snd_ctl_find_id(chip->card, &elem_id); memset(&elem_id, 0, sizeof(elem_id)); elem_id.iface = SNDRV_CTL_ELEM_IFACE_MIXER; strcpy(elem_id.name, "Master Playback Volume"); chip->master_volume = snd_ctl_find_id(chip->card, &elem_id); #endif return 0; } /* * initialize ASSP */ #define MINISRC_LPF_LEN 10 static const u16 minisrc_lpf[MINISRC_LPF_LEN] = { 0X0743, 0X1104, 0X0A4C, 0XF88D, 0X242C, 0X1023, 0X1AA9, 0X0B60, 0XEFDD, 0X186F }; static void snd_m3_assp_init(struct snd_m3 *chip) { unsigned int i; const u16 *data; /* zero kernel data */ for (i = 0; i < (REV_B_DATA_MEMORY_UNIT_LENGTH * NUM_UNITS_KERNEL_DATA) / 2; i++) snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, KDATA_BASE_ADDR + i, 0); /* zero mixer data? */ for (i = 0; i < (REV_B_DATA_MEMORY_UNIT_LENGTH * NUM_UNITS_KERNEL_DATA) / 2; i++) snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, KDATA_BASE_ADDR2 + i, 0); /* init dma pointer */ snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, KDATA_CURRENT_DMA, KDATA_DMA_XFER0); /* write kernel into code memory.. */ data = (const u16 *)chip->assp_kernel_image->data; for (i = 0 ; i * 2 < chip->assp_kernel_image->size; i++) { snd_m3_assp_write(chip, MEMTYPE_INTERNAL_CODE, REV_B_CODE_MEMORY_BEGIN + i, le16_to_cpu(data[i])); } /* * We only have this one client and we know that 0x400 * is free in our kernel's mem map, so lets just * drop it there. It seems that the minisrc doesn't * need vectors, so we won't bother with them.. */ data = (const u16 *)chip->assp_minisrc_image->data; for (i = 0; i * 2 < chip->assp_minisrc_image->size; i++) { snd_m3_assp_write(chip, MEMTYPE_INTERNAL_CODE, 0x400 + i, le16_to_cpu(data[i])); } /* * write the coefficients for the low pass filter? */ for (i = 0; i < MINISRC_LPF_LEN ; i++) { snd_m3_assp_write(chip, MEMTYPE_INTERNAL_CODE, 0x400 + MINISRC_COEF_LOC + i, minisrc_lpf[i]); } snd_m3_assp_write(chip, MEMTYPE_INTERNAL_CODE, 0x400 + MINISRC_COEF_LOC + MINISRC_LPF_LEN, 0x8000); /* * the minisrc is the only thing on * our task list.. */ snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, KDATA_TASK0, 0x400); /* * init the mixer number.. */ snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, KDATA_MIXER_TASK_NUMBER,0); /* * EXTREME KERNEL MASTER VOLUME */ snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, KDATA_DAC_LEFT_VOLUME, ARB_VOLUME); snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, KDATA_DAC_RIGHT_VOLUME, ARB_VOLUME); chip->mixer_list.curlen = 0; chip->mixer_list.mem_addr = KDATA_MIXER_XFER0; chip->mixer_list.max = MAX_VIRTUAL_MIXER_CHANNELS; chip->adc1_list.curlen = 0; chip->adc1_list.mem_addr = KDATA_ADC1_XFER0; chip->adc1_list.max = MAX_VIRTUAL_ADC1_CHANNELS; chip->dma_list.curlen = 0; chip->dma_list.mem_addr = KDATA_DMA_XFER0; chip->dma_list.max = MAX_VIRTUAL_DMA_CHANNELS; chip->msrc_list.curlen = 0; chip->msrc_list.mem_addr = KDATA_INSTANCE0_MINISRC; chip->msrc_list.max = MAX_INSTANCE_MINISRC; } static int __devinit snd_m3_assp_client_init(struct snd_m3 *chip, struct m3_dma *s, int index) { int data_bytes = 2 * ( MINISRC_TMP_BUFFER_SIZE / 2 + MINISRC_IN_BUFFER_SIZE / 2 + 1 + MINISRC_OUT_BUFFER_SIZE / 2 + 1 ); int address, i; /* * the revb memory map has 0x1100 through 0x1c00 * free. */ /* * align instance address to 256 bytes so that its * shifted list address is aligned. * list address = (mem address >> 1) >> 7; */ data_bytes = ALIGN(data_bytes, 256); address = 0x1100 + ((data_bytes/2) * index); if ((address + (data_bytes/2)) >= 0x1c00) { snd_printk(KERN_ERR "no memory for %d bytes at ind %d (addr 0x%x)\n", data_bytes, index, address); return -ENOMEM; } s->number = index; s->inst.code = 0x400; s->inst.data = address; for (i = data_bytes / 2; i > 0; address++, i--) { snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, address, 0); } return 0; } /* * this works for the reference board, have to find * out about others * * this needs more magic for 4 speaker, but.. */ static void snd_m3_amp_enable(struct snd_m3 *chip, int enable) { int io = chip->iobase; u16 gpo, polarity; if (! chip->external_amp) return; polarity = enable ? 0 : 1; polarity = polarity << chip->amp_gpio; gpo = 1 << chip->amp_gpio; outw(~gpo, io + GPIO_MASK); outw(inw(io + GPIO_DIRECTION) | gpo, io + GPIO_DIRECTION); outw((GPO_SECONDARY_AC97 | GPO_PRIMARY_AC97 | polarity), io + GPIO_DATA); outw(0xffff, io + GPIO_MASK); } static void snd_m3_hv_init(struct snd_m3 *chip) { unsigned long io = chip->iobase; u16 val = GPI_VOL_DOWN | GPI_VOL_UP; if (!chip->is_omnibook) return; /* * Volume buttons on some HP OmniBook laptops * require some GPIO magic to work correctly. */ outw(0xffff, io + GPIO_MASK); outw(0x0000, io + GPIO_DATA); outw(~val, io + GPIO_MASK); outw(inw(io + GPIO_DIRECTION) & ~val, io + GPIO_DIRECTION); outw(val, io + GPIO_MASK); outw(0xffff, io + GPIO_MASK); } static int snd_m3_chip_init(struct snd_m3 *chip) { struct pci_dev *pcidev = chip->pci; unsigned long io = chip->iobase; u32 n; u16 w; u8 t; /* makes as much sense as 'n', no? */ pci_read_config_word(pcidev, PCI_LEGACY_AUDIO_CTRL, &w); w &= ~(SOUND_BLASTER_ENABLE|FM_SYNTHESIS_ENABLE| MPU401_IO_ENABLE|MPU401_IRQ_ENABLE|ALIAS_10BIT_IO| DISABLE_LEGACY); pci_write_config_word(pcidev, PCI_LEGACY_AUDIO_CTRL, w); pci_read_config_dword(pcidev, PCI_ALLEGRO_CONFIG, &n); n &= ~(HV_CTRL_ENABLE | REDUCED_DEBOUNCE | HV_BUTTON_FROM_GD); n |= chip->hv_config; /* For some reason we must always use reduced debounce. */ n |= REDUCED_DEBOUNCE; n |= PM_CTRL_ENABLE | CLK_DIV_BY_49 | USE_PCI_TIMING; pci_write_config_dword(pcidev, PCI_ALLEGRO_CONFIG, n); outb(RESET_ASSP, chip->iobase + ASSP_CONTROL_B); pci_read_config_dword(pcidev, PCI_ALLEGRO_CONFIG, &n); n &= ~INT_CLK_SELECT; if (!chip->allegro_flag) { n &= ~INT_CLK_MULT_ENABLE; n |= INT_CLK_SRC_NOT_PCI; } n &= ~( CLK_MULT_MODE_SELECT | CLK_MULT_MODE_SELECT_2 ); pci_write_config_dword(pcidev, PCI_ALLEGRO_CONFIG, n); if (chip->allegro_flag) { pci_read_config_dword(pcidev, PCI_USER_CONFIG, &n); n |= IN_CLK_12MHZ_SELECT; pci_write_config_dword(pcidev, PCI_USER_CONFIG, n); } t = inb(chip->iobase + ASSP_CONTROL_A); t &= ~( DSP_CLK_36MHZ_SELECT | ASSP_CLK_49MHZ_SELECT); t |= ASSP_CLK_49MHZ_SELECT; t |= ASSP_0_WS_ENABLE; outb(t, chip->iobase + ASSP_CONTROL_A); snd_m3_assp_init(chip); /* download DSP code before starting ASSP below */ outb(RUN_ASSP, chip->iobase + ASSP_CONTROL_B); outb(0x00, io + HARDWARE_VOL_CTRL); outb(0x88, io + SHADOW_MIX_REG_VOICE); outb(0x88, io + HW_VOL_COUNTER_VOICE); outb(0x88, io + SHADOW_MIX_REG_MASTER); outb(0x88, io + HW_VOL_COUNTER_MASTER); return 0; } static void snd_m3_enable_ints(struct snd_m3 *chip) { unsigned long io = chip->iobase; unsigned short val; /* TODO: MPU401 not supported yet */ val = ASSP_INT_ENABLE /*| MPU401_INT_ENABLE*/; if (chip->hv_config & HV_CTRL_ENABLE) val |= HV_INT_ENABLE; outb(val, chip->iobase + HOST_INT_STATUS); outw(val, io + HOST_INT_CTRL); outb(inb(io + ASSP_CONTROL_C) | ASSP_HOST_INT_ENABLE, io + ASSP_CONTROL_C); } /* */ static int snd_m3_free(struct snd_m3 *chip) { struct m3_dma *s; int i; #ifdef CONFIG_SND_MAESTRO3_INPUT if (chip->input_dev) input_unregister_device(chip->input_dev); #endif if (chip->substreams) { spin_lock_irq(&chip->reg_lock); for (i = 0; i < chip->num_substreams; i++) { s = &chip->substreams[i]; /* check surviving pcms; this should not happen though.. */ if (s->substream && s->running) snd_m3_pcm_stop(chip, s, s->substream); } spin_unlock_irq(&chip->reg_lock); kfree(chip->substreams); } if (chip->iobase) { outw(0, chip->iobase + HOST_INT_CTRL); /* disable ints */ } #ifdef CONFIG_PM vfree(chip->suspend_mem); #endif if (chip->irq >= 0) free_irq(chip->irq, chip); if (chip->iobase) pci_release_regions(chip->pci); release_firmware(chip->assp_kernel_image); release_firmware(chip->assp_minisrc_image); pci_disable_device(chip->pci); kfree(chip); return 0; } /* * APM support */ #ifdef CONFIG_PM static int m3_suspend(struct pci_dev *pci, pm_message_t state) { struct snd_card *card = pci_get_drvdata(pci); struct snd_m3 *chip = card->private_data; int i, dsp_index; if (chip->suspend_mem == NULL) return 0; chip->in_suspend = 1; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); snd_pcm_suspend_all(chip->pcm); snd_ac97_suspend(chip->ac97); msleep(10); /* give the assp a chance to idle.. */ snd_m3_assp_halt(chip); /* save dsp image */ dsp_index = 0; for (i = REV_B_CODE_MEMORY_BEGIN; i <= REV_B_CODE_MEMORY_END; i++) chip->suspend_mem[dsp_index++] = snd_m3_assp_read(chip, MEMTYPE_INTERNAL_CODE, i); for (i = REV_B_DATA_MEMORY_BEGIN ; i <= REV_B_DATA_MEMORY_END; i++) chip->suspend_mem[dsp_index++] = snd_m3_assp_read(chip, MEMTYPE_INTERNAL_DATA, i); pci_disable_device(pci); pci_save_state(pci); pci_set_power_state(pci, pci_choose_state(pci, state)); return 0; } static int m3_resume(struct pci_dev *pci) { struct snd_card *card = pci_get_drvdata(pci); struct snd_m3 *chip = card->private_data; int i, dsp_index; if (chip->suspend_mem == NULL) return 0; pci_set_power_state(pci, PCI_D0); pci_restore_state(pci); if (pci_enable_device(pci) < 0) { printk(KERN_ERR "maestor3: pci_enable_device failed, " "disabling device\n"); snd_card_disconnect(card); return -EIO; } pci_set_master(pci); /* first lets just bring everything back. .*/ snd_m3_outw(chip, 0, 0x54); snd_m3_outw(chip, 0, 0x56); snd_m3_chip_init(chip); snd_m3_assp_halt(chip); snd_m3_ac97_reset(chip); /* restore dsp image */ dsp_index = 0; for (i = REV_B_CODE_MEMORY_BEGIN; i <= REV_B_CODE_MEMORY_END; i++) snd_m3_assp_write(chip, MEMTYPE_INTERNAL_CODE, i, chip->suspend_mem[dsp_index++]); for (i = REV_B_DATA_MEMORY_BEGIN ; i <= REV_B_DATA_MEMORY_END; i++) snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, i, chip->suspend_mem[dsp_index++]); /* tell the dma engine to restart itself */ snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, KDATA_DMA_ACTIVE, 0); /* restore ac97 registers */ snd_ac97_resume(chip->ac97); snd_m3_assp_continue(chip); snd_m3_enable_ints(chip); snd_m3_amp_enable(chip, 1); snd_m3_hv_init(chip); snd_power_change_state(card, SNDRV_CTL_POWER_D0); chip->in_suspend = 0; return 0; } #endif /* CONFIG_PM */ #ifdef CONFIG_SND_MAESTRO3_INPUT static int __devinit snd_m3_input_register(struct snd_m3 *chip) { struct input_dev *input_dev; int err; input_dev = input_allocate_device(); if (!input_dev) return -ENOMEM; snprintf(chip->phys, sizeof(chip->phys), "pci-%s/input0", pci_name(chip->pci)); input_dev->name = chip->card->driver; input_dev->phys = chip->phys; input_dev->id.bustype = BUS_PCI; input_dev->id.vendor = chip->pci->vendor; input_dev->id.product = chip->pci->device; input_dev->dev.parent = &chip->pci->dev; __set_bit(EV_KEY, input_dev->evbit); __set_bit(KEY_MUTE, input_dev->keybit); __set_bit(KEY_VOLUMEDOWN, input_dev->keybit); __set_bit(KEY_VOLUMEUP, input_dev->keybit); err = input_register_device(input_dev); if (err) { input_free_device(input_dev); return err; } chip->input_dev = input_dev; return 0; } #endif /* CONFIG_INPUT */ /* */ static int snd_m3_dev_free(struct snd_device *device) { struct snd_m3 *chip = device->device_data; return snd_m3_free(chip); } static int __devinit snd_m3_create(struct snd_card *card, struct pci_dev *pci, int enable_amp, int amp_gpio, struct snd_m3 **chip_ret) { struct snd_m3 *chip; int i, err; const struct snd_pci_quirk *quirk; static struct snd_device_ops ops = { .dev_free = snd_m3_dev_free, }; *chip_ret = NULL; if (pci_enable_device(pci)) return -EIO; /* check, if we can restrict PCI DMA transfers to 28 bits */ if (pci_set_dma_mask(pci, DMA_BIT_MASK(28)) < 0 || pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(28)) < 0) { snd_printk(KERN_ERR "architecture does not support 28bit PCI busmaster DMA\n"); pci_disable_device(pci); return -ENXIO; } chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (chip == NULL) { pci_disable_device(pci); return -ENOMEM; } spin_lock_init(&chip->reg_lock); #ifndef CONFIG_SND_MAESTRO3_INPUT spin_lock_init(&chip->ac97_lock); #endif switch (pci->device) { case PCI_DEVICE_ID_ESS_ALLEGRO: case PCI_DEVICE_ID_ESS_ALLEGRO_1: case PCI_DEVICE_ID_ESS_CANYON3D_2LE: case PCI_DEVICE_ID_ESS_CANYON3D_2: chip->allegro_flag = 1; break; } chip->card = card; chip->pci = pci; chip->irq = -1; chip->external_amp = enable_amp; if (amp_gpio >= 0 && amp_gpio <= 0x0f) chip->amp_gpio = amp_gpio; else { quirk = snd_pci_quirk_lookup(pci, m3_amp_quirk_list); if (quirk) { snd_printdd(KERN_INFO "maestro3: set amp-gpio " "for '%s'\n", quirk->name); chip->amp_gpio = quirk->value; } else if (chip->allegro_flag) chip->amp_gpio = GPO_EXT_AMP_ALLEGRO; else /* presumably this is for all 'maestro3's.. */ chip->amp_gpio = GPO_EXT_AMP_M3; } quirk = snd_pci_quirk_lookup(pci, m3_irda_quirk_list); if (quirk) { snd_printdd(KERN_INFO "maestro3: enabled irda workaround " "for '%s'\n", quirk->name); chip->irda_workaround = 1; } quirk = snd_pci_quirk_lookup(pci, m3_hv_quirk_list); if (quirk) chip->hv_config = quirk->value; if (snd_pci_quirk_lookup(pci, m3_omnibook_quirk_list)) chip->is_omnibook = 1; chip->num_substreams = NR_DSPS; chip->substreams = kcalloc(chip->num_substreams, sizeof(struct m3_dma), GFP_KERNEL); if (chip->substreams == NULL) { kfree(chip); pci_disable_device(pci); return -ENOMEM; } err = request_firmware(&chip->assp_kernel_image, "ess/maestro3_assp_kernel.fw", &pci->dev); if (err < 0) { snd_m3_free(chip); return err; } err = request_firmware(&chip->assp_minisrc_image, "ess/maestro3_assp_minisrc.fw", &pci->dev); if (err < 0) { snd_m3_free(chip); return err; } if ((err = pci_request_regions(pci, card->driver)) < 0) { snd_m3_free(chip); return err; } chip->iobase = pci_resource_start(pci, 0); /* just to be sure */ pci_set_master(pci); snd_m3_chip_init(chip); snd_m3_assp_halt(chip); snd_m3_ac97_reset(chip); snd_m3_amp_enable(chip, 1); snd_m3_hv_init(chip); #ifndef CONFIG_SND_MAESTRO3_INPUT tasklet_init(&chip->hwvol_tq, snd_m3_update_hw_volume, (unsigned long)chip); #endif if (request_irq(pci->irq, snd_m3_interrupt, IRQF_SHARED, card->driver, chip)) { snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq); snd_m3_free(chip); return -ENOMEM; } chip->irq = pci->irq; #ifdef CONFIG_PM chip->suspend_mem = vmalloc(sizeof(u16) * (REV_B_CODE_MEMORY_LENGTH + REV_B_DATA_MEMORY_LENGTH)); if (chip->suspend_mem == NULL) snd_printk(KERN_WARNING "can't allocate apm buffer\n"); #endif if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) { snd_m3_free(chip); return err; } if ((err = snd_m3_mixer(chip)) < 0) return err; for (i = 0; i < chip->num_substreams; i++) { struct m3_dma *s = &chip->substreams[i]; if ((err = snd_m3_assp_client_init(chip, s, i)) < 0) return err; } if ((err = snd_m3_pcm(chip, 0)) < 0) return err; #ifdef CONFIG_SND_MAESTRO3_INPUT if (chip->hv_config & HV_CTRL_ENABLE) { err = snd_m3_input_register(chip); if (err) snd_printk(KERN_WARNING "Input device registration " "failed with error %i", err); } #endif snd_m3_enable_ints(chip); snd_m3_assp_continue(chip); snd_card_set_dev(card, &pci->dev); *chip_ret = chip; return 0; } /* */ static int __devinit snd_m3_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { static int dev; struct snd_card *card; struct snd_m3 *chip; int err; /* don't pick up modems */ if (((pci->class >> 8) & 0xffff) != PCI_CLASS_MULTIMEDIA_AUDIO) return -ENODEV; if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { dev++; return -ENOENT; } err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) return err; switch (pci->device) { case PCI_DEVICE_ID_ESS_ALLEGRO: case PCI_DEVICE_ID_ESS_ALLEGRO_1: strcpy(card->driver, "Allegro"); break; case PCI_DEVICE_ID_ESS_CANYON3D_2LE: case PCI_DEVICE_ID_ESS_CANYON3D_2: strcpy(card->driver, "Canyon3D-2"); break; default: strcpy(card->driver, "Maestro3"); break; } if ((err = snd_m3_create(card, pci, external_amp[dev], amp_gpio[dev], &chip)) < 0) { snd_card_free(card); return err; } card->private_data = chip; sprintf(card->shortname, "ESS %s PCI", card->driver); sprintf(card->longname, "%s at 0x%lx, irq %d", card->shortname, chip->iobase, chip->irq); if ((err = snd_card_register(card)) < 0) { snd_card_free(card); return err; } #if 0 /* TODO: not supported yet */ /* TODO enable MIDI IRQ and I/O */ err = snd_mpu401_uart_new(chip->card, 0, MPU401_HW_MPU401, chip->iobase + MPU401_DATA_PORT, MPU401_INFO_INTEGRATED, chip->irq, 0, &chip->rmidi); if (err < 0) printk(KERN_WARNING "maestro3: no MIDI support.\n"); #endif pci_set_drvdata(pci, card); dev++; return 0; } static void __devexit snd_m3_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } static struct pci_driver driver = { .name = "Maestro3", .id_table = snd_m3_ids, .probe = snd_m3_probe, .remove = __devexit_p(snd_m3_remove), #ifdef CONFIG_PM .suspend = m3_suspend, .resume = m3_resume, #endif }; static int __init alsa_card_m3_init(void) { return pci_register_driver(&driver); } static void __exit alsa_card_m3_exit(void) { pci_unregister_driver(&driver); } module_init(alsa_card_m3_init) module_exit(alsa_card_m3_exit)
gpl-2.0
farchanrifai/kernel_cancro
drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
4821
105005
/****************************************************************************** * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> ******************************************************************************/ #include "rtl_core.h" #include "rtl_dm.h" #include "r8192E_hw.h" #include "r8192E_phy.h" #include "r8192E_phyreg.h" #include "r8190P_rtl8256.h" #include "r8192E_cmdpkt.h" /*---------------------------Define Local Constant---------------------------*/ static u32 edca_setting_DL[HT_IOT_PEER_MAX] = { 0x5e4322, 0x5e4322, 0x5ea44f, 0x5e4322, 0x604322, 0xa44f, 0x5e4322, 0x5e4332 }; static u32 edca_setting_DL_GMode[HT_IOT_PEER_MAX] = { 0x5e4322, 0x5e4322, 0x5e4322, 0x5e4322, 0x604322, 0xa44f, 0x5e4322, 0x5e4322 }; static u32 edca_setting_UL[HT_IOT_PEER_MAX] = { 0x5e4322, 0xa44f, 0x5ea44f, 0x5e4322, 0x604322, 0x5e4322, 0x5e4322, 0x5e4332 }; #define RTK_UL_EDCA 0xa44f #define RTK_DL_EDCA 0x5e4322 /*---------------------------Define Local Constant---------------------------*/ /*------------------------Define global variable-----------------------------*/ struct dig_t dm_digtable; u8 dm_shadow[16][256] = { {0} }; struct drx_path_sel DM_RxPathSelTable; /*------------------------Define global variable-----------------------------*/ /*------------------------Define local variable------------------------------*/ /*------------------------Define local variable------------------------------*/ /*---------------------Define local function prototype-----------------------*/ static void dm_check_rate_adaptive(struct net_device *dev); static void dm_init_bandwidth_autoswitch(struct net_device *dev); static void dm_bandwidth_autoswitch(struct net_device *dev); static void dm_check_txpower_tracking(struct net_device *dev); static void dm_bb_initialgain_restore(struct net_device *dev); static void dm_bb_initialgain_backup(struct net_device *dev); static void dm_dig_init(struct net_device *dev); static void dm_ctrl_initgain_byrssi(struct net_device *dev); static void dm_ctrl_initgain_byrssi_highpwr(struct net_device *dev); static void dm_ctrl_initgain_byrssi_by_driverrssi(struct net_device *dev); static void dm_ctrl_initgain_byrssi_by_fwfalse_alarm(struct net_device *dev); static void dm_initial_gain(struct net_device *dev); static void dm_pd_th(struct net_device *dev); static void dm_cs_ratio(struct net_device *dev); static void dm_init_ctstoself(struct net_device *dev); static void dm_Init_WA_Broadcom_IOT(struct net_device *dev); static void dm_check_edca_turbo(struct net_device *dev); static void dm_check_pbc_gpio(struct net_device *dev); static void dm_check_rx_path_selection(struct net_device *dev); static void dm_init_rxpath_selection(struct net_device *dev); static void dm_rxpath_sel_byrssi(struct net_device *dev); static void dm_init_fsync(struct net_device *dev); static void dm_deInit_fsync(struct net_device *dev); static void dm_check_txrateandretrycount(struct net_device *dev); static void dm_check_ac_dc_power(struct net_device *dev); /*---------------------Define local function prototype-----------------------*/ static void dm_init_dynamic_txpower(struct net_device *dev); static void dm_dynamic_txpower(struct net_device *dev); static void dm_send_rssi_tofw(struct net_device *dev); static void dm_ctstoself(struct net_device *dev); /*---------------------------Define function prototype------------------------*/ void init_hal_dm(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); priv->DM_Type = DM_Type_ByDriver; priv->undecorated_smoothed_pwdb = -1; dm_init_dynamic_txpower(dev); init_rate_adaptive(dev); dm_dig_init(dev); dm_init_edca_turbo(dev); dm_init_bandwidth_autoswitch(dev); dm_init_fsync(dev); dm_init_rxpath_selection(dev); dm_init_ctstoself(dev); if (IS_HARDWARE_TYPE_8192SE(dev)) dm_Init_WA_Broadcom_IOT(dev); INIT_DELAYED_WORK_RSL(&priv->gpio_change_rf_wq, (void *)dm_CheckRfCtrlGPIO, dev); } void deinit_hal_dm(struct net_device *dev) { dm_deInit_fsync(dev); } void hal_dm_watchdog(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); if (priv->being_init_adapter) return; dm_check_ac_dc_power(dev); dm_check_pbc_gpio(dev); dm_check_txrateandretrycount(dev); dm_check_edca_turbo(dev); dm_check_rate_adaptive(dev); dm_dynamic_txpower(dev); dm_check_txpower_tracking(dev); dm_ctrl_initgain_byrssi(dev); dm_bandwidth_autoswitch(dev); dm_check_rx_path_selection(dev); dm_check_fsync(dev); dm_send_rssi_tofw(dev); dm_ctstoself(dev); } static void dm_check_ac_dc_power(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); static char *ac_dc_check_script_path = "/etc/acpi/wireless-rtl-ac-dc-power.sh"; char *argv[] = {ac_dc_check_script_path, DRV_NAME, NULL}; static char *envp[] = {"HOME=/", "TERM=linux", "PATH=/usr/bin:/bin", NULL}; if (priv->ResetProgress == RESET_TYPE_SILENT) { RT_TRACE((COMP_INIT | COMP_POWER | COMP_RF), "GPIOChangeRFWorkItemCallBack(): Silent Reseting!!!!!!!\n"); return; } if (priv->rtllib->state != RTLLIB_LINKED) return; call_usermodehelper(ac_dc_check_script_path, argv, envp, UMH_WAIT_PROC); return; }; void init_rate_adaptive(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); struct rate_adaptive *pra = (struct rate_adaptive *)&priv->rate_adaptive; pra->ratr_state = DM_RATR_STA_MAX; pra->high2low_rssi_thresh_for_ra = RateAdaptiveTH_High; pra->low2high_rssi_thresh_for_ra20M = RateAdaptiveTH_Low_20M+5; pra->low2high_rssi_thresh_for_ra40M = RateAdaptiveTH_Low_40M+5; pra->high_rssi_thresh_for_ra = RateAdaptiveTH_High+5; pra->low_rssi_thresh_for_ra20M = RateAdaptiveTH_Low_20M; pra->low_rssi_thresh_for_ra40M = RateAdaptiveTH_Low_40M; if (priv->CustomerID == RT_CID_819x_Netcore) pra->ping_rssi_enable = 1; else pra->ping_rssi_enable = 0; pra->ping_rssi_thresh_for_ra = 15; if (priv->rf_type == RF_2T4R) { pra->upper_rssi_threshold_ratr = 0x8f0f0000; pra->middle_rssi_threshold_ratr = 0x8f0ff000; pra->low_rssi_threshold_ratr = 0x8f0ff001; pra->low_rssi_threshold_ratr_40M = 0x8f0ff005; pra->low_rssi_threshold_ratr_20M = 0x8f0ff001; pra->ping_rssi_ratr = 0x0000000d; } else if (priv->rf_type == RF_1T2R) { pra->upper_rssi_threshold_ratr = 0x000fc000; pra->middle_rssi_threshold_ratr = 0x000ff000; pra->low_rssi_threshold_ratr = 0x000ff001; pra->low_rssi_threshold_ratr_40M = 0x000ff005; pra->low_rssi_threshold_ratr_20M = 0x000ff001; pra->ping_rssi_ratr = 0x0000000d; } } static void dm_check_rate_adaptive(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); struct rt_hi_throughput *pHTInfo = priv->rtllib->pHTInfo; struct rate_adaptive *pra = (struct rate_adaptive *)&priv->rate_adaptive; u32 currentRATR, targetRATR = 0; u32 LowRSSIThreshForRA = 0, HighRSSIThreshForRA = 0; bool bshort_gi_enabled = false; static u8 ping_rssi_state; if (IS_NIC_DOWN(priv)) { RT_TRACE(COMP_RATE, "<---- dm_check_rate_adaptive(): driver is going to unload\n"); return; } if (pra->rate_adaptive_disabled) return; if (!(priv->rtllib->mode == WIRELESS_MODE_N_24G || priv->rtllib->mode == WIRELESS_MODE_N_5G)) return; if (priv->rtllib->state == RTLLIB_LINKED) { bshort_gi_enabled = (pHTInfo->bCurTxBW40MHz && pHTInfo->bCurShortGI40MHz) || (!pHTInfo->bCurTxBW40MHz && pHTInfo->bCurShortGI20MHz); pra->upper_rssi_threshold_ratr = (pra->upper_rssi_threshold_ratr & (~BIT31)) | ((bshort_gi_enabled) ? BIT31 : 0); pra->middle_rssi_threshold_ratr = (pra->middle_rssi_threshold_ratr & (~BIT31)) | ((bshort_gi_enabled) ? BIT31 : 0); if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) { pra->low_rssi_threshold_ratr = (pra->low_rssi_threshold_ratr_40M & (~BIT31)) | ((bshort_gi_enabled) ? BIT31 : 0); } else { pra->low_rssi_threshold_ratr = (pra->low_rssi_threshold_ratr_20M & (~BIT31)) | ((bshort_gi_enabled) ? BIT31 : 0); } pra->ping_rssi_ratr = (pra->ping_rssi_ratr & (~BIT31)) | ((bshort_gi_enabled) ? BIT31 : 0); if (pra->ratr_state == DM_RATR_STA_HIGH) { HighRSSIThreshForRA = pra->high2low_rssi_thresh_for_ra; LowRSSIThreshForRA = (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) ? (pra->low_rssi_thresh_for_ra40M) : (pra->low_rssi_thresh_for_ra20M); } else if (pra->ratr_state == DM_RATR_STA_LOW) { HighRSSIThreshForRA = pra->high_rssi_thresh_for_ra; LowRSSIThreshForRA = (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) ? (pra->low2high_rssi_thresh_for_ra40M) : (pra->low2high_rssi_thresh_for_ra20M); } else { HighRSSIThreshForRA = pra->high_rssi_thresh_for_ra; LowRSSIThreshForRA = (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) ? (pra->low_rssi_thresh_for_ra40M) : (pra->low_rssi_thresh_for_ra20M); } if (priv->undecorated_smoothed_pwdb >= (long)HighRSSIThreshForRA) { pra->ratr_state = DM_RATR_STA_HIGH; targetRATR = pra->upper_rssi_threshold_ratr; } else if (priv->undecorated_smoothed_pwdb >= (long)LowRSSIThreshForRA) { pra->ratr_state = DM_RATR_STA_MIDDLE; targetRATR = pra->middle_rssi_threshold_ratr; } else { pra->ratr_state = DM_RATR_STA_LOW; targetRATR = pra->low_rssi_threshold_ratr; } if (pra->ping_rssi_enable) { if (priv->undecorated_smoothed_pwdb < (long)(pra->ping_rssi_thresh_for_ra+5)) { if ((priv->undecorated_smoothed_pwdb < (long)pra->ping_rssi_thresh_for_ra) || ping_rssi_state) { pra->ratr_state = DM_RATR_STA_LOW; targetRATR = pra->ping_rssi_ratr; ping_rssi_state = 1; } } else { ping_rssi_state = 0; } } if (priv->rtllib->GetHalfNmodeSupportByAPsHandler(dev)) targetRATR &= 0xf00fffff; currentRATR = read_nic_dword(dev, RATR0); if (targetRATR != currentRATR) { u32 ratr_value; ratr_value = targetRATR; RT_TRACE(COMP_RATE, "currentRATR = %x, targetRATR = %x\n", currentRATR, targetRATR); if (priv->rf_type == RF_1T2R) ratr_value &= ~(RATE_ALL_OFDM_2SS); write_nic_dword(dev, RATR0, ratr_value); write_nic_byte(dev, UFWP, 1); pra->last_ratr = targetRATR; } } else { pra->ratr_state = DM_RATR_STA_MAX; } } static void dm_init_bandwidth_autoswitch(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); priv->rtllib->bandwidth_auto_switch.threshold_20Mhzto40Mhz = BW_AUTO_SWITCH_LOW_HIGH; priv->rtllib->bandwidth_auto_switch.threshold_40Mhzto20Mhz = BW_AUTO_SWITCH_HIGH_LOW; priv->rtllib->bandwidth_auto_switch.bforced_tx20Mhz = false; priv->rtllib->bandwidth_auto_switch.bautoswitch_enable = false; } static void dm_bandwidth_autoswitch(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); if (priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20 || !priv->rtllib->bandwidth_auto_switch.bautoswitch_enable) { return; } else { if (priv->rtllib->bandwidth_auto_switch.bforced_tx20Mhz == false) { if (priv->undecorated_smoothed_pwdb <= priv->rtllib->bandwidth_auto_switch.threshold_40Mhzto20Mhz) priv->rtllib->bandwidth_auto_switch.bforced_tx20Mhz = true; } else { if (priv->undecorated_smoothed_pwdb >= priv->rtllib->bandwidth_auto_switch.threshold_20Mhzto40Mhz) priv->rtllib->bandwidth_auto_switch.bforced_tx20Mhz = false; } } } static u32 OFDMSwingTable[OFDM_Table_Length] = { 0x7f8001fe, 0x71c001c7, 0x65400195, 0x5a400169, 0x50800142, 0x47c0011f, 0x40000100, 0x390000e4, 0x32c000cb, 0x2d4000b5, 0x288000a2, 0x24000090, 0x20000080, 0x1c800072, 0x19800066, 0x26c0005b, 0x24400051, 0x12000048, 0x10000040 }; static u8 CCKSwingTable_Ch1_Ch13[CCK_Table_length][8] = { {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04}, {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01} }; static u8 CCKSwingTable_Ch14[CCK_Table_length][8] = { {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00}, {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00} }; #define Pw_Track_Flag 0x11d #define Tssi_Mea_Value 0x13c #define Tssi_Report_Value1 0x134 #define Tssi_Report_Value2 0x13e #define FW_Busy_Flag 0x13f static void dm_TXPowerTrackingCallback_TSSI(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); bool bHighpowerstate, viviflag = false; struct dcmd_txcmd tx_cmd; u8 powerlevelOFDM24G; int i = 0, j = 0, k = 0; u8 RF_Type, tmp_report[5] = {0, 0, 0, 0, 0}; u32 Value; u8 Pwr_Flag; u16 Avg_TSSI_Meas, TSSI_13dBm, Avg_TSSI_Meas_from_driver = 0; u32 delta = 0; RT_TRACE(COMP_POWER_TRACKING, "%s()\n", __func__); write_nic_byte(dev, Pw_Track_Flag, 0); write_nic_byte(dev, FW_Busy_Flag, 0); priv->rtllib->bdynamic_txpower_enable = false; bHighpowerstate = priv->bDynamicTxHighPower; powerlevelOFDM24G = (u8)(priv->Pwr_Track>>24); RF_Type = priv->rf_type; Value = (RF_Type<<8) | powerlevelOFDM24G; RT_TRACE(COMP_POWER_TRACKING, "powerlevelOFDM24G = %x\n", powerlevelOFDM24G); for (j = 0; j <= 30; j++) { tx_cmd.Op = TXCMD_SET_TX_PWR_TRACKING; tx_cmd.Length = 4; tx_cmd.Value = Value; cmpk_message_handle_tx(dev, (u8 *)&tx_cmd, DESC_PACKET_TYPE_INIT, sizeof(struct dcmd_txcmd)); mdelay(1); for (i = 0; i <= 30; i++) { Pwr_Flag = read_nic_byte(dev, Pw_Track_Flag); if (Pwr_Flag == 0) { mdelay(1); if (priv->bResetInProgress) { RT_TRACE(COMP_POWER_TRACKING, "we are in slient reset progress, so return\n"); write_nic_byte(dev, Pw_Track_Flag, 0); write_nic_byte(dev, FW_Busy_Flag, 0); return; } if ((priv->rtllib->eRFPowerState != eRfOn)) { RT_TRACE(COMP_POWER_TRACKING, "we are in power save, so return\n"); write_nic_byte(dev, Pw_Track_Flag, 0); write_nic_byte(dev, FW_Busy_Flag, 0); return; } continue; } Avg_TSSI_Meas = read_nic_word(dev, Tssi_Mea_Value); if (Avg_TSSI_Meas == 0) { write_nic_byte(dev, Pw_Track_Flag, 0); write_nic_byte(dev, FW_Busy_Flag, 0); return; } for (k = 0; k < 5; k++) { if (k != 4) tmp_report[k] = read_nic_byte(dev, Tssi_Report_Value1+k); else tmp_report[k] = read_nic_byte(dev, Tssi_Report_Value2); RT_TRACE(COMP_POWER_TRACKING, "TSSI_report_value = %d\n", tmp_report[k]); if (tmp_report[k] <= 20) { viviflag = true; break; } } if (viviflag == true) { write_nic_byte(dev, Pw_Track_Flag, 0); viviflag = false; RT_TRACE(COMP_POWER_TRACKING, "we filted this data\n"); for (k = 0; k < 5; k++) tmp_report[k] = 0; break; } for (k = 0; k < 5; k++) Avg_TSSI_Meas_from_driver += tmp_report[k]; Avg_TSSI_Meas_from_driver = Avg_TSSI_Meas_from_driver*100/5; RT_TRACE(COMP_POWER_TRACKING, "Avg_TSSI_Meas_from_driver = %d\n", Avg_TSSI_Meas_from_driver); TSSI_13dBm = priv->TSSI_13dBm; RT_TRACE(COMP_POWER_TRACKING, "TSSI_13dBm = %d\n", TSSI_13dBm); if (Avg_TSSI_Meas_from_driver > TSSI_13dBm) delta = Avg_TSSI_Meas_from_driver - TSSI_13dBm; else delta = TSSI_13dBm - Avg_TSSI_Meas_from_driver; if (delta <= E_FOR_TX_POWER_TRACK) { priv->rtllib->bdynamic_txpower_enable = true; write_nic_byte(dev, Pw_Track_Flag, 0); write_nic_byte(dev, FW_Busy_Flag, 0); RT_TRACE(COMP_POWER_TRACKING, "tx power track is done\n"); RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpowertrackingindex = %d\n", priv->rfa_txpowertrackingindex); RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpowertrackingindex_real = %d\n", priv->rfa_txpowertrackingindex_real); RT_TRACE(COMP_POWER_TRACKING, "priv->CCKPresentAttentuation_difference = %d\n", priv->CCKPresentAttentuation_difference); RT_TRACE(COMP_POWER_TRACKING, "priv->CCKPresentAttentuation = %d\n", priv->CCKPresentAttentuation); return; } else { if (Avg_TSSI_Meas_from_driver < TSSI_13dBm - E_FOR_TX_POWER_TRACK) { if (RF_Type == RF_2T4R) { if ((priv->rfa_txpowertrackingindex > 0) && (priv->rfc_txpowertrackingindex > 0)) { priv->rfa_txpowertrackingindex--; if (priv->rfa_txpowertrackingindex_real > 4) { priv->rfa_txpowertrackingindex_real--; rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfa_txpowertrackingindex_real].txbbgain_value); } priv->rfc_txpowertrackingindex--; if (priv->rfc_txpowertrackingindex_real > 4) { priv->rfc_txpowertrackingindex_real--; rtl8192_setBBreg(dev, rOFDM0_XCTxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfc_txpowertrackingindex_real].txbbgain_value); } } else { rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance, bMaskDWord, priv->txbbgain_table[4].txbbgain_value); rtl8192_setBBreg(dev, rOFDM0_XCTxIQImbalance, bMaskDWord, priv->txbbgain_table[4].txbbgain_value); } } else { if (priv->rfa_txpowertrackingindex > 0) { priv->rfa_txpowertrackingindex--; if (priv->rfa_txpowertrackingindex_real > 4) { priv->rfa_txpowertrackingindex_real--; rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfa_txpowertrackingindex_real].txbbgain_value); } } else rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance, bMaskDWord, priv->txbbgain_table[4].txbbgain_value); } } else { if (RF_Type == RF_2T4R) { if ((priv->rfa_txpowertrackingindex < TxBBGainTableLength - 1) && (priv->rfc_txpowertrackingindex < TxBBGainTableLength - 1)) { priv->rfa_txpowertrackingindex++; priv->rfa_txpowertrackingindex_real++; rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance, bMaskDWord, priv->txbbgain_table [priv->rfa_txpowertrackingindex_real].txbbgain_value); priv->rfc_txpowertrackingindex++; priv->rfc_txpowertrackingindex_real++; rtl8192_setBBreg(dev, rOFDM0_XCTxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfc_txpowertrackingindex_real].txbbgain_value); } else { rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance, bMaskDWord, priv->txbbgain_table[TxBBGainTableLength - 1].txbbgain_value); rtl8192_setBBreg(dev, rOFDM0_XCTxIQImbalance, bMaskDWord, priv->txbbgain_table[TxBBGainTableLength - 1].txbbgain_value); } } else { if (priv->rfa_txpowertrackingindex < (TxBBGainTableLength - 1)) { priv->rfa_txpowertrackingindex++; priv->rfa_txpowertrackingindex_real++; rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfa_txpowertrackingindex_real].txbbgain_value); } else rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance, bMaskDWord, priv->txbbgain_table[TxBBGainTableLength - 1].txbbgain_value); } } if (RF_Type == RF_2T4R) { priv->CCKPresentAttentuation_difference = priv->rfa_txpowertrackingindex - priv->rfa_txpowertracking_default; } else { priv->CCKPresentAttentuation_difference = priv->rfa_txpowertrackingindex_real - priv->rfa_txpowertracking_default; } if (priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20) priv->CCKPresentAttentuation = priv->CCKPresentAttentuation_20Mdefault + priv->CCKPresentAttentuation_difference; else priv->CCKPresentAttentuation = priv->CCKPresentAttentuation_40Mdefault + priv->CCKPresentAttentuation_difference; if (priv->CCKPresentAttentuation > (CCKTxBBGainTableLength-1)) priv->CCKPresentAttentuation = CCKTxBBGainTableLength-1; if (priv->CCKPresentAttentuation < 0) priv->CCKPresentAttentuation = 0; if (priv->CCKPresentAttentuation > -1 && priv->CCKPresentAttentuation < CCKTxBBGainTableLength) { if (priv->rtllib->current_network.channel == 14 && !priv->bcck_in_ch14) { priv->bcck_in_ch14 = true; dm_cck_txpower_adjust(dev, priv->bcck_in_ch14); } else if (priv->rtllib->current_network.channel != 14 && priv->bcck_in_ch14) { priv->bcck_in_ch14 = false; dm_cck_txpower_adjust(dev, priv->bcck_in_ch14); } else dm_cck_txpower_adjust(dev, priv->bcck_in_ch14); } RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpowertrackingindex = %d\n", priv->rfa_txpowertrackingindex); RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpowertrackingindex_real = %d\n", priv->rfa_txpowertrackingindex_real); RT_TRACE(COMP_POWER_TRACKING, "priv->CCKPresentAttentuation_difference = %d\n", priv->CCKPresentAttentuation_difference); RT_TRACE(COMP_POWER_TRACKING, "priv->CCKPresentAttentuation = %d\n", priv->CCKPresentAttentuation); if (priv->CCKPresentAttentuation_difference <= -12 || priv->CCKPresentAttentuation_difference >= 24) { priv->rtllib->bdynamic_txpower_enable = true; write_nic_byte(dev, Pw_Track_Flag, 0); write_nic_byte(dev, FW_Busy_Flag, 0); RT_TRACE(COMP_POWER_TRACKING, "tx power track--->limited\n"); return; } } write_nic_byte(dev, Pw_Track_Flag, 0); Avg_TSSI_Meas_from_driver = 0; for (k = 0; k < 5; k++) tmp_report[k] = 0; break; } write_nic_byte(dev, FW_Busy_Flag, 0); } priv->rtllib->bdynamic_txpower_enable = true; write_nic_byte(dev, Pw_Track_Flag, 0); } static void dm_TXPowerTrackingCallback_ThermalMeter(struct net_device *dev) { #define ThermalMeterVal 9 struct r8192_priv *priv = rtllib_priv(dev); u32 tmpRegA, TempCCk; u8 tmpOFDMindex, tmpCCKindex, tmpCCK20Mindex, tmpCCK40Mindex, tmpval; int i = 0, CCKSwingNeedUpdate = 0; if (!priv->btxpower_trackingInit) { tmpRegA = rtl8192_QueryBBReg(dev, rOFDM0_XATxIQImbalance, bMaskDWord); for (i = 0; i < OFDM_Table_Length; i++) { if (tmpRegA == OFDMSwingTable[i]) { priv->OFDM_index[0] = (u8)i; RT_TRACE(COMP_POWER_TRACKING, "Initial reg0x%x = 0x%x, OFDM_index = 0x%x\n", rOFDM0_XATxIQImbalance, tmpRegA, priv->OFDM_index[0]); } } TempCCk = rtl8192_QueryBBReg(dev, rCCK0_TxFilter1, bMaskByte2); for (i = 0; i < CCK_Table_length; i++) { if (TempCCk == (u32)CCKSwingTable_Ch1_Ch13[i][0]) { priv->CCK_index = (u8) i; RT_TRACE(COMP_POWER_TRACKING, "Initial reg0x%x" " = 0x%x, CCK_index = 0x%x\n", rCCK0_TxFilter1, TempCCk, priv->CCK_index); break; } } priv->btxpower_trackingInit = true; return; } tmpRegA = rtl8192_phy_QueryRFReg(dev, RF90_PATH_A, 0x12, 0x078); RT_TRACE(COMP_POWER_TRACKING, "Readback ThermalMeterA = %d\n", tmpRegA); if (tmpRegA < 3 || tmpRegA > 13) return; if (tmpRegA >= 12) tmpRegA = 12; RT_TRACE(COMP_POWER_TRACKING, "Valid ThermalMeterA = %d\n", tmpRegA); priv->ThermalMeter[0] = ThermalMeterVal; priv->ThermalMeter[1] = ThermalMeterVal; if (priv->ThermalMeter[0] >= (u8)tmpRegA) { tmpOFDMindex = tmpCCK20Mindex = 6+(priv->ThermalMeter[0] - (u8)tmpRegA); tmpCCK40Mindex = tmpCCK20Mindex - 6; if (tmpOFDMindex >= OFDM_Table_Length) tmpOFDMindex = OFDM_Table_Length-1; if (tmpCCK20Mindex >= CCK_Table_length) tmpCCK20Mindex = CCK_Table_length-1; if (tmpCCK40Mindex >= CCK_Table_length) tmpCCK40Mindex = CCK_Table_length-1; } else { tmpval = ((u8)tmpRegA - priv->ThermalMeter[0]); if (tmpval >= 6) tmpOFDMindex = tmpCCK20Mindex = 0; else tmpOFDMindex = tmpCCK20Mindex = 6 - tmpval; tmpCCK40Mindex = 0; } if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) tmpCCKindex = tmpCCK40Mindex; else tmpCCKindex = tmpCCK20Mindex; priv->Record_CCK_20Mindex = tmpCCK20Mindex; priv->Record_CCK_40Mindex = tmpCCK40Mindex; RT_TRACE(COMP_POWER_TRACKING, "Record_CCK_20Mindex / Record_CCK_40" "Mindex = %d / %d.\n", priv->Record_CCK_20Mindex, priv->Record_CCK_40Mindex); if (priv->rtllib->current_network.channel == 14 && !priv->bcck_in_ch14) { priv->bcck_in_ch14 = true; CCKSwingNeedUpdate = 1; } else if (priv->rtllib->current_network.channel != 14 && priv->bcck_in_ch14) { priv->bcck_in_ch14 = false; CCKSwingNeedUpdate = 1; } if (priv->CCK_index != tmpCCKindex) { priv->CCK_index = tmpCCKindex; CCKSwingNeedUpdate = 1; } if (CCKSwingNeedUpdate) dm_cck_txpower_adjust(dev, priv->bcck_in_ch14); if (priv->OFDM_index[0] != tmpOFDMindex) { priv->OFDM_index[0] = tmpOFDMindex; rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance, bMaskDWord, OFDMSwingTable[priv->OFDM_index[0]]); RT_TRACE(COMP_POWER_TRACKING, "Update OFDMSwing[%d] = 0x%x\n", priv->OFDM_index[0], OFDMSwingTable[priv->OFDM_index[0]]); } priv->txpower_count = 0; } void dm_txpower_trackingcallback(void *data) { struct r8192_priv *priv = container_of_dwork_rsl(data, struct r8192_priv, txpower_tracking_wq); struct net_device *dev = priv->rtllib->dev; if (priv->IC_Cut >= IC_VersionCut_D) dm_TXPowerTrackingCallback_TSSI(dev); else dm_TXPowerTrackingCallback_ThermalMeter(dev); } static void dm_InitializeTXPowerTracking_TSSI(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); priv->txbbgain_table[0].txbb_iq_amplifygain = 12; priv->txbbgain_table[0].txbbgain_value = 0x7f8001fe; priv->txbbgain_table[1].txbb_iq_amplifygain = 11; priv->txbbgain_table[1].txbbgain_value = 0x788001e2; priv->txbbgain_table[2].txbb_iq_amplifygain = 10; priv->txbbgain_table[2].txbbgain_value = 0x71c001c7; priv->txbbgain_table[3].txbb_iq_amplifygain = 9; priv->txbbgain_table[3].txbbgain_value = 0x6b8001ae; priv->txbbgain_table[4].txbb_iq_amplifygain = 8; priv->txbbgain_table[4].txbbgain_value = 0x65400195; priv->txbbgain_table[5].txbb_iq_amplifygain = 7; priv->txbbgain_table[5].txbbgain_value = 0x5fc0017f; priv->txbbgain_table[6].txbb_iq_amplifygain = 6; priv->txbbgain_table[6].txbbgain_value = 0x5a400169; priv->txbbgain_table[7].txbb_iq_amplifygain = 5; priv->txbbgain_table[7].txbbgain_value = 0x55400155; priv->txbbgain_table[8].txbb_iq_amplifygain = 4; priv->txbbgain_table[8].txbbgain_value = 0x50800142; priv->txbbgain_table[9].txbb_iq_amplifygain = 3; priv->txbbgain_table[9].txbbgain_value = 0x4c000130; priv->txbbgain_table[10].txbb_iq_amplifygain = 2; priv->txbbgain_table[10].txbbgain_value = 0x47c0011f; priv->txbbgain_table[11].txbb_iq_amplifygain = 1; priv->txbbgain_table[11].txbbgain_value = 0x43c0010f; priv->txbbgain_table[12].txbb_iq_amplifygain = 0; priv->txbbgain_table[12].txbbgain_value = 0x40000100; priv->txbbgain_table[13].txbb_iq_amplifygain = -1; priv->txbbgain_table[13].txbbgain_value = 0x3c8000f2; priv->txbbgain_table[14].txbb_iq_amplifygain = -2; priv->txbbgain_table[14].txbbgain_value = 0x390000e4; priv->txbbgain_table[15].txbb_iq_amplifygain = -3; priv->txbbgain_table[15].txbbgain_value = 0x35c000d7; priv->txbbgain_table[16].txbb_iq_amplifygain = -4; priv->txbbgain_table[16].txbbgain_value = 0x32c000cb; priv->txbbgain_table[17].txbb_iq_amplifygain = -5; priv->txbbgain_table[17].txbbgain_value = 0x300000c0; priv->txbbgain_table[18].txbb_iq_amplifygain = -6; priv->txbbgain_table[18].txbbgain_value = 0x2d4000b5; priv->txbbgain_table[19].txbb_iq_amplifygain = -7; priv->txbbgain_table[19].txbbgain_value = 0x2ac000ab; priv->txbbgain_table[20].txbb_iq_amplifygain = -8; priv->txbbgain_table[20].txbbgain_value = 0x288000a2; priv->txbbgain_table[21].txbb_iq_amplifygain = -9; priv->txbbgain_table[21].txbbgain_value = 0x26000098; priv->txbbgain_table[22].txbb_iq_amplifygain = -10; priv->txbbgain_table[22].txbbgain_value = 0x24000090; priv->txbbgain_table[23].txbb_iq_amplifygain = -11; priv->txbbgain_table[23].txbbgain_value = 0x22000088; priv->txbbgain_table[24].txbb_iq_amplifygain = -12; priv->txbbgain_table[24].txbbgain_value = 0x20000080; priv->txbbgain_table[25].txbb_iq_amplifygain = -13; priv->txbbgain_table[25].txbbgain_value = 0x1a00006c; priv->txbbgain_table[26].txbb_iq_amplifygain = -14; priv->txbbgain_table[26].txbbgain_value = 0x1c800072; priv->txbbgain_table[27].txbb_iq_amplifygain = -15; priv->txbbgain_table[27].txbbgain_value = 0x18000060; priv->txbbgain_table[28].txbb_iq_amplifygain = -16; priv->txbbgain_table[28].txbbgain_value = 0x19800066; priv->txbbgain_table[29].txbb_iq_amplifygain = -17; priv->txbbgain_table[29].txbbgain_value = 0x15800056; priv->txbbgain_table[30].txbb_iq_amplifygain = -18; priv->txbbgain_table[30].txbbgain_value = 0x26c0005b; priv->txbbgain_table[31].txbb_iq_amplifygain = -19; priv->txbbgain_table[31].txbbgain_value = 0x14400051; priv->txbbgain_table[32].txbb_iq_amplifygain = -20; priv->txbbgain_table[32].txbbgain_value = 0x24400051; priv->txbbgain_table[33].txbb_iq_amplifygain = -21; priv->txbbgain_table[33].txbbgain_value = 0x1300004c; priv->txbbgain_table[34].txbb_iq_amplifygain = -22; priv->txbbgain_table[34].txbbgain_value = 0x12000048; priv->txbbgain_table[35].txbb_iq_amplifygain = -23; priv->txbbgain_table[35].txbbgain_value = 0x11000044; priv->txbbgain_table[36].txbb_iq_amplifygain = -24; priv->txbbgain_table[36].txbbgain_value = 0x10000040; priv->cck_txbbgain_table[0].ccktxbb_valuearray[0] = 0x36; priv->cck_txbbgain_table[0].ccktxbb_valuearray[1] = 0x35; priv->cck_txbbgain_table[0].ccktxbb_valuearray[2] = 0x2e; priv->cck_txbbgain_table[0].ccktxbb_valuearray[3] = 0x25; priv->cck_txbbgain_table[0].ccktxbb_valuearray[4] = 0x1c; priv->cck_txbbgain_table[0].ccktxbb_valuearray[5] = 0x12; priv->cck_txbbgain_table[0].ccktxbb_valuearray[6] = 0x09; priv->cck_txbbgain_table[0].ccktxbb_valuearray[7] = 0x04; priv->cck_txbbgain_table[1].ccktxbb_valuearray[0] = 0x33; priv->cck_txbbgain_table[1].ccktxbb_valuearray[1] = 0x32; priv->cck_txbbgain_table[1].ccktxbb_valuearray[2] = 0x2b; priv->cck_txbbgain_table[1].ccktxbb_valuearray[3] = 0x23; priv->cck_txbbgain_table[1].ccktxbb_valuearray[4] = 0x1a; priv->cck_txbbgain_table[1].ccktxbb_valuearray[5] = 0x11; priv->cck_txbbgain_table[1].ccktxbb_valuearray[6] = 0x08; priv->cck_txbbgain_table[1].ccktxbb_valuearray[7] = 0x04; priv->cck_txbbgain_table[2].ccktxbb_valuearray[0] = 0x30; priv->cck_txbbgain_table[2].ccktxbb_valuearray[1] = 0x2f; priv->cck_txbbgain_table[2].ccktxbb_valuearray[2] = 0x29; priv->cck_txbbgain_table[2].ccktxbb_valuearray[3] = 0x21; priv->cck_txbbgain_table[2].ccktxbb_valuearray[4] = 0x19; priv->cck_txbbgain_table[2].ccktxbb_valuearray[5] = 0x10; priv->cck_txbbgain_table[2].ccktxbb_valuearray[6] = 0x08; priv->cck_txbbgain_table[2].ccktxbb_valuearray[7] = 0x03; priv->cck_txbbgain_table[3].ccktxbb_valuearray[0] = 0x2d; priv->cck_txbbgain_table[3].ccktxbb_valuearray[1] = 0x2d; priv->cck_txbbgain_table[3].ccktxbb_valuearray[2] = 0x27; priv->cck_txbbgain_table[3].ccktxbb_valuearray[3] = 0x1f; priv->cck_txbbgain_table[3].ccktxbb_valuearray[4] = 0x18; priv->cck_txbbgain_table[3].ccktxbb_valuearray[5] = 0x0f; priv->cck_txbbgain_table[3].ccktxbb_valuearray[6] = 0x08; priv->cck_txbbgain_table[3].ccktxbb_valuearray[7] = 0x03; priv->cck_txbbgain_table[4].ccktxbb_valuearray[0] = 0x2b; priv->cck_txbbgain_table[4].ccktxbb_valuearray[1] = 0x2a; priv->cck_txbbgain_table[4].ccktxbb_valuearray[2] = 0x25; priv->cck_txbbgain_table[4].ccktxbb_valuearray[3] = 0x1e; priv->cck_txbbgain_table[4].ccktxbb_valuearray[4] = 0x16; priv->cck_txbbgain_table[4].ccktxbb_valuearray[5] = 0x0e; priv->cck_txbbgain_table[4].ccktxbb_valuearray[6] = 0x07; priv->cck_txbbgain_table[4].ccktxbb_valuearray[7] = 0x03; priv->cck_txbbgain_table[5].ccktxbb_valuearray[0] = 0x28; priv->cck_txbbgain_table[5].ccktxbb_valuearray[1] = 0x28; priv->cck_txbbgain_table[5].ccktxbb_valuearray[2] = 0x22; priv->cck_txbbgain_table[5].ccktxbb_valuearray[3] = 0x1c; priv->cck_txbbgain_table[5].ccktxbb_valuearray[4] = 0x15; priv->cck_txbbgain_table[5].ccktxbb_valuearray[5] = 0x0d; priv->cck_txbbgain_table[5].ccktxbb_valuearray[6] = 0x07; priv->cck_txbbgain_table[5].ccktxbb_valuearray[7] = 0x03; priv->cck_txbbgain_table[6].ccktxbb_valuearray[0] = 0x26; priv->cck_txbbgain_table[6].ccktxbb_valuearray[1] = 0x25; priv->cck_txbbgain_table[6].ccktxbb_valuearray[2] = 0x21; priv->cck_txbbgain_table[6].ccktxbb_valuearray[3] = 0x1b; priv->cck_txbbgain_table[6].ccktxbb_valuearray[4] = 0x14; priv->cck_txbbgain_table[6].ccktxbb_valuearray[5] = 0x0d; priv->cck_txbbgain_table[6].ccktxbb_valuearray[6] = 0x06; priv->cck_txbbgain_table[6].ccktxbb_valuearray[7] = 0x03; priv->cck_txbbgain_table[7].ccktxbb_valuearray[0] = 0x24; priv->cck_txbbgain_table[7].ccktxbb_valuearray[1] = 0x23; priv->cck_txbbgain_table[7].ccktxbb_valuearray[2] = 0x1f; priv->cck_txbbgain_table[7].ccktxbb_valuearray[3] = 0x19; priv->cck_txbbgain_table[7].ccktxbb_valuearray[4] = 0x13; priv->cck_txbbgain_table[7].ccktxbb_valuearray[5] = 0x0c; priv->cck_txbbgain_table[7].ccktxbb_valuearray[6] = 0x06; priv->cck_txbbgain_table[7].ccktxbb_valuearray[7] = 0x03; priv->cck_txbbgain_table[8].ccktxbb_valuearray[0] = 0x22; priv->cck_txbbgain_table[8].ccktxbb_valuearray[1] = 0x21; priv->cck_txbbgain_table[8].ccktxbb_valuearray[2] = 0x1d; priv->cck_txbbgain_table[8].ccktxbb_valuearray[3] = 0x18; priv->cck_txbbgain_table[8].ccktxbb_valuearray[4] = 0x11; priv->cck_txbbgain_table[8].ccktxbb_valuearray[5] = 0x0b; priv->cck_txbbgain_table[8].ccktxbb_valuearray[6] = 0x06; priv->cck_txbbgain_table[8].ccktxbb_valuearray[7] = 0x02; priv->cck_txbbgain_table[9].ccktxbb_valuearray[0] = 0x20; priv->cck_txbbgain_table[9].ccktxbb_valuearray[1] = 0x20; priv->cck_txbbgain_table[9].ccktxbb_valuearray[2] = 0x1b; priv->cck_txbbgain_table[9].ccktxbb_valuearray[3] = 0x16; priv->cck_txbbgain_table[9].ccktxbb_valuearray[4] = 0x11; priv->cck_txbbgain_table[9].ccktxbb_valuearray[5] = 0x08; priv->cck_txbbgain_table[9].ccktxbb_valuearray[6] = 0x05; priv->cck_txbbgain_table[9].ccktxbb_valuearray[7] = 0x02; priv->cck_txbbgain_table[10].ccktxbb_valuearray[0] = 0x1f; priv->cck_txbbgain_table[10].ccktxbb_valuearray[1] = 0x1e; priv->cck_txbbgain_table[10].ccktxbb_valuearray[2] = 0x1a; priv->cck_txbbgain_table[10].ccktxbb_valuearray[3] = 0x15; priv->cck_txbbgain_table[10].ccktxbb_valuearray[4] = 0x10; priv->cck_txbbgain_table[10].ccktxbb_valuearray[5] = 0x0a; priv->cck_txbbgain_table[10].ccktxbb_valuearray[6] = 0x05; priv->cck_txbbgain_table[10].ccktxbb_valuearray[7] = 0x02; priv->cck_txbbgain_table[11].ccktxbb_valuearray[0] = 0x1d; priv->cck_txbbgain_table[11].ccktxbb_valuearray[1] = 0x1c; priv->cck_txbbgain_table[11].ccktxbb_valuearray[2] = 0x18; priv->cck_txbbgain_table[11].ccktxbb_valuearray[3] = 0x14; priv->cck_txbbgain_table[11].ccktxbb_valuearray[4] = 0x0f; priv->cck_txbbgain_table[11].ccktxbb_valuearray[5] = 0x0a; priv->cck_txbbgain_table[11].ccktxbb_valuearray[6] = 0x05; priv->cck_txbbgain_table[11].ccktxbb_valuearray[7] = 0x02; priv->cck_txbbgain_table[12].ccktxbb_valuearray[0] = 0x1b; priv->cck_txbbgain_table[12].ccktxbb_valuearray[1] = 0x1a; priv->cck_txbbgain_table[12].ccktxbb_valuearray[2] = 0x17; priv->cck_txbbgain_table[12].ccktxbb_valuearray[3] = 0x13; priv->cck_txbbgain_table[12].ccktxbb_valuearray[4] = 0x0e; priv->cck_txbbgain_table[12].ccktxbb_valuearray[5] = 0x09; priv->cck_txbbgain_table[12].ccktxbb_valuearray[6] = 0x04; priv->cck_txbbgain_table[12].ccktxbb_valuearray[7] = 0x02; priv->cck_txbbgain_table[13].ccktxbb_valuearray[0] = 0x1a; priv->cck_txbbgain_table[13].ccktxbb_valuearray[1] = 0x19; priv->cck_txbbgain_table[13].ccktxbb_valuearray[2] = 0x16; priv->cck_txbbgain_table[13].ccktxbb_valuearray[3] = 0x12; priv->cck_txbbgain_table[13].ccktxbb_valuearray[4] = 0x0d; priv->cck_txbbgain_table[13].ccktxbb_valuearray[5] = 0x09; priv->cck_txbbgain_table[13].ccktxbb_valuearray[6] = 0x04; priv->cck_txbbgain_table[13].ccktxbb_valuearray[7] = 0x02; priv->cck_txbbgain_table[14].ccktxbb_valuearray[0] = 0x18; priv->cck_txbbgain_table[14].ccktxbb_valuearray[1] = 0x17; priv->cck_txbbgain_table[14].ccktxbb_valuearray[2] = 0x15; priv->cck_txbbgain_table[14].ccktxbb_valuearray[3] = 0x11; priv->cck_txbbgain_table[14].ccktxbb_valuearray[4] = 0x0c; priv->cck_txbbgain_table[14].ccktxbb_valuearray[5] = 0x08; priv->cck_txbbgain_table[14].ccktxbb_valuearray[6] = 0x04; priv->cck_txbbgain_table[14].ccktxbb_valuearray[7] = 0x02; priv->cck_txbbgain_table[15].ccktxbb_valuearray[0] = 0x17; priv->cck_txbbgain_table[15].ccktxbb_valuearray[1] = 0x16; priv->cck_txbbgain_table[15].ccktxbb_valuearray[2] = 0x13; priv->cck_txbbgain_table[15].ccktxbb_valuearray[3] = 0x10; priv->cck_txbbgain_table[15].ccktxbb_valuearray[4] = 0x0c; priv->cck_txbbgain_table[15].ccktxbb_valuearray[5] = 0x08; priv->cck_txbbgain_table[15].ccktxbb_valuearray[6] = 0x04; priv->cck_txbbgain_table[15].ccktxbb_valuearray[7] = 0x02; priv->cck_txbbgain_table[16].ccktxbb_valuearray[0] = 0x16; priv->cck_txbbgain_table[16].ccktxbb_valuearray[1] = 0x15; priv->cck_txbbgain_table[16].ccktxbb_valuearray[2] = 0x12; priv->cck_txbbgain_table[16].ccktxbb_valuearray[3] = 0x0f; priv->cck_txbbgain_table[16].ccktxbb_valuearray[4] = 0x0b; priv->cck_txbbgain_table[16].ccktxbb_valuearray[5] = 0x07; priv->cck_txbbgain_table[16].ccktxbb_valuearray[6] = 0x04; priv->cck_txbbgain_table[16].ccktxbb_valuearray[7] = 0x01; priv->cck_txbbgain_table[17].ccktxbb_valuearray[0] = 0x14; priv->cck_txbbgain_table[17].ccktxbb_valuearray[1] = 0x14; priv->cck_txbbgain_table[17].ccktxbb_valuearray[2] = 0x11; priv->cck_txbbgain_table[17].ccktxbb_valuearray[3] = 0x0e; priv->cck_txbbgain_table[17].ccktxbb_valuearray[4] = 0x0b; priv->cck_txbbgain_table[17].ccktxbb_valuearray[5] = 0x07; priv->cck_txbbgain_table[17].ccktxbb_valuearray[6] = 0x03; priv->cck_txbbgain_table[17].ccktxbb_valuearray[7] = 0x02; priv->cck_txbbgain_table[18].ccktxbb_valuearray[0] = 0x13; priv->cck_txbbgain_table[18].ccktxbb_valuearray[1] = 0x13; priv->cck_txbbgain_table[18].ccktxbb_valuearray[2] = 0x10; priv->cck_txbbgain_table[18].ccktxbb_valuearray[3] = 0x0d; priv->cck_txbbgain_table[18].ccktxbb_valuearray[4] = 0x0a; priv->cck_txbbgain_table[18].ccktxbb_valuearray[5] = 0x06; priv->cck_txbbgain_table[18].ccktxbb_valuearray[6] = 0x03; priv->cck_txbbgain_table[18].ccktxbb_valuearray[7] = 0x01; priv->cck_txbbgain_table[19].ccktxbb_valuearray[0] = 0x12; priv->cck_txbbgain_table[19].ccktxbb_valuearray[1] = 0x12; priv->cck_txbbgain_table[19].ccktxbb_valuearray[2] = 0x0f; priv->cck_txbbgain_table[19].ccktxbb_valuearray[3] = 0x0c; priv->cck_txbbgain_table[19].ccktxbb_valuearray[4] = 0x09; priv->cck_txbbgain_table[19].ccktxbb_valuearray[5] = 0x06; priv->cck_txbbgain_table[19].ccktxbb_valuearray[6] = 0x03; priv->cck_txbbgain_table[19].ccktxbb_valuearray[7] = 0x01; priv->cck_txbbgain_table[20].ccktxbb_valuearray[0] = 0x11; priv->cck_txbbgain_table[20].ccktxbb_valuearray[1] = 0x11; priv->cck_txbbgain_table[20].ccktxbb_valuearray[2] = 0x0f; priv->cck_txbbgain_table[20].ccktxbb_valuearray[3] = 0x0c; priv->cck_txbbgain_table[20].ccktxbb_valuearray[4] = 0x09; priv->cck_txbbgain_table[20].ccktxbb_valuearray[5] = 0x06; priv->cck_txbbgain_table[20].ccktxbb_valuearray[6] = 0x03; priv->cck_txbbgain_table[20].ccktxbb_valuearray[7] = 0x01; priv->cck_txbbgain_table[21].ccktxbb_valuearray[0] = 0x10; priv->cck_txbbgain_table[21].ccktxbb_valuearray[1] = 0x10; priv->cck_txbbgain_table[21].ccktxbb_valuearray[2] = 0x0e; priv->cck_txbbgain_table[21].ccktxbb_valuearray[3] = 0x0b; priv->cck_txbbgain_table[21].ccktxbb_valuearray[4] = 0x08; priv->cck_txbbgain_table[21].ccktxbb_valuearray[5] = 0x05; priv->cck_txbbgain_table[21].ccktxbb_valuearray[6] = 0x03; priv->cck_txbbgain_table[21].ccktxbb_valuearray[7] = 0x01; priv->cck_txbbgain_table[22].ccktxbb_valuearray[0] = 0x0f; priv->cck_txbbgain_table[22].ccktxbb_valuearray[1] = 0x0f; priv->cck_txbbgain_table[22].ccktxbb_valuearray[2] = 0x0d; priv->cck_txbbgain_table[22].ccktxbb_valuearray[3] = 0x0b; priv->cck_txbbgain_table[22].ccktxbb_valuearray[4] = 0x08; priv->cck_txbbgain_table[22].ccktxbb_valuearray[5] = 0x05; priv->cck_txbbgain_table[22].ccktxbb_valuearray[6] = 0x03; priv->cck_txbbgain_table[22].ccktxbb_valuearray[7] = 0x01; priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[0] = 0x36; priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[1] = 0x35; priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[2] = 0x2e; priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[3] = 0x1b; priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[1].ccktxbb_valuearray[0] = 0x33; priv->cck_txbbgain_ch14_table[1].ccktxbb_valuearray[1] = 0x32; priv->cck_txbbgain_ch14_table[1].ccktxbb_valuearray[2] = 0x2b; priv->cck_txbbgain_ch14_table[1].ccktxbb_valuearray[3] = 0x19; priv->cck_txbbgain_ch14_table[1].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[1].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[1].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[1].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[2].ccktxbb_valuearray[0] = 0x30; priv->cck_txbbgain_ch14_table[2].ccktxbb_valuearray[1] = 0x2f; priv->cck_txbbgain_ch14_table[2].ccktxbb_valuearray[2] = 0x29; priv->cck_txbbgain_ch14_table[2].ccktxbb_valuearray[3] = 0x18; priv->cck_txbbgain_ch14_table[2].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[2].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[2].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[2].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[3].ccktxbb_valuearray[0] = 0x2d; priv->cck_txbbgain_ch14_table[3].ccktxbb_valuearray[1] = 0x2d; priv->cck_txbbgain_ch14_table[3].ccktxbb_valuearray[2] = 0x27; priv->cck_txbbgain_ch14_table[3].ccktxbb_valuearray[3] = 0x17; priv->cck_txbbgain_ch14_table[3].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[3].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[3].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[3].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[4].ccktxbb_valuearray[0] = 0x2b; priv->cck_txbbgain_ch14_table[4].ccktxbb_valuearray[1] = 0x2a; priv->cck_txbbgain_ch14_table[4].ccktxbb_valuearray[2] = 0x25; priv->cck_txbbgain_ch14_table[4].ccktxbb_valuearray[3] = 0x15; priv->cck_txbbgain_ch14_table[4].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[4].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[4].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[4].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[5].ccktxbb_valuearray[0] = 0x28; priv->cck_txbbgain_ch14_table[5].ccktxbb_valuearray[1] = 0x28; priv->cck_txbbgain_ch14_table[5].ccktxbb_valuearray[2] = 0x22; priv->cck_txbbgain_ch14_table[5].ccktxbb_valuearray[3] = 0x14; priv->cck_txbbgain_ch14_table[5].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[5].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[5].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[5].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[6].ccktxbb_valuearray[0] = 0x26; priv->cck_txbbgain_ch14_table[6].ccktxbb_valuearray[1] = 0x25; priv->cck_txbbgain_ch14_table[6].ccktxbb_valuearray[2] = 0x21; priv->cck_txbbgain_ch14_table[6].ccktxbb_valuearray[3] = 0x13; priv->cck_txbbgain_ch14_table[6].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[6].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[6].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[6].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[7].ccktxbb_valuearray[0] = 0x24; priv->cck_txbbgain_ch14_table[7].ccktxbb_valuearray[1] = 0x23; priv->cck_txbbgain_ch14_table[7].ccktxbb_valuearray[2] = 0x1f; priv->cck_txbbgain_ch14_table[7].ccktxbb_valuearray[3] = 0x12; priv->cck_txbbgain_ch14_table[7].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[7].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[7].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[7].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[8].ccktxbb_valuearray[0] = 0x22; priv->cck_txbbgain_ch14_table[8].ccktxbb_valuearray[1] = 0x21; priv->cck_txbbgain_ch14_table[8].ccktxbb_valuearray[2] = 0x1d; priv->cck_txbbgain_ch14_table[8].ccktxbb_valuearray[3] = 0x11; priv->cck_txbbgain_ch14_table[8].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[8].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[8].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[8].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[9].ccktxbb_valuearray[0] = 0x20; priv->cck_txbbgain_ch14_table[9].ccktxbb_valuearray[1] = 0x20; priv->cck_txbbgain_ch14_table[9].ccktxbb_valuearray[2] = 0x1b; priv->cck_txbbgain_ch14_table[9].ccktxbb_valuearray[3] = 0x10; priv->cck_txbbgain_ch14_table[9].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[9].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[9].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[9].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[10].ccktxbb_valuearray[0] = 0x1f; priv->cck_txbbgain_ch14_table[10].ccktxbb_valuearray[1] = 0x1e; priv->cck_txbbgain_ch14_table[10].ccktxbb_valuearray[2] = 0x1a; priv->cck_txbbgain_ch14_table[10].ccktxbb_valuearray[3] = 0x0f; priv->cck_txbbgain_ch14_table[10].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[10].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[10].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[10].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[11].ccktxbb_valuearray[0] = 0x1d; priv->cck_txbbgain_ch14_table[11].ccktxbb_valuearray[1] = 0x1c; priv->cck_txbbgain_ch14_table[11].ccktxbb_valuearray[2] = 0x18; priv->cck_txbbgain_ch14_table[11].ccktxbb_valuearray[3] = 0x0e; priv->cck_txbbgain_ch14_table[11].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[11].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[11].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[11].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[12].ccktxbb_valuearray[0] = 0x1b; priv->cck_txbbgain_ch14_table[12].ccktxbb_valuearray[1] = 0x1a; priv->cck_txbbgain_ch14_table[12].ccktxbb_valuearray[2] = 0x17; priv->cck_txbbgain_ch14_table[12].ccktxbb_valuearray[3] = 0x0e; priv->cck_txbbgain_ch14_table[12].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[12].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[12].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[12].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[13].ccktxbb_valuearray[0] = 0x1a; priv->cck_txbbgain_ch14_table[13].ccktxbb_valuearray[1] = 0x19; priv->cck_txbbgain_ch14_table[13].ccktxbb_valuearray[2] = 0x16; priv->cck_txbbgain_ch14_table[13].ccktxbb_valuearray[3] = 0x0d; priv->cck_txbbgain_ch14_table[13].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[13].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[13].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[13].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[14].ccktxbb_valuearray[0] = 0x18; priv->cck_txbbgain_ch14_table[14].ccktxbb_valuearray[1] = 0x17; priv->cck_txbbgain_ch14_table[14].ccktxbb_valuearray[2] = 0x15; priv->cck_txbbgain_ch14_table[14].ccktxbb_valuearray[3] = 0x0c; priv->cck_txbbgain_ch14_table[14].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[14].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[14].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[14].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[15].ccktxbb_valuearray[0] = 0x17; priv->cck_txbbgain_ch14_table[15].ccktxbb_valuearray[1] = 0x16; priv->cck_txbbgain_ch14_table[15].ccktxbb_valuearray[2] = 0x13; priv->cck_txbbgain_ch14_table[15].ccktxbb_valuearray[3] = 0x0b; priv->cck_txbbgain_ch14_table[15].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[15].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[15].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[15].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[16].ccktxbb_valuearray[0] = 0x16; priv->cck_txbbgain_ch14_table[16].ccktxbb_valuearray[1] = 0x15; priv->cck_txbbgain_ch14_table[16].ccktxbb_valuearray[2] = 0x12; priv->cck_txbbgain_ch14_table[16].ccktxbb_valuearray[3] = 0x0b; priv->cck_txbbgain_ch14_table[16].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[16].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[16].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[16].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[17].ccktxbb_valuearray[0] = 0x14; priv->cck_txbbgain_ch14_table[17].ccktxbb_valuearray[1] = 0x14; priv->cck_txbbgain_ch14_table[17].ccktxbb_valuearray[2] = 0x11; priv->cck_txbbgain_ch14_table[17].ccktxbb_valuearray[3] = 0x0a; priv->cck_txbbgain_ch14_table[17].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[17].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[17].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[17].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[18].ccktxbb_valuearray[0] = 0x13; priv->cck_txbbgain_ch14_table[18].ccktxbb_valuearray[1] = 0x13; priv->cck_txbbgain_ch14_table[18].ccktxbb_valuearray[2] = 0x10; priv->cck_txbbgain_ch14_table[18].ccktxbb_valuearray[3] = 0x0a; priv->cck_txbbgain_ch14_table[18].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[18].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[18].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[18].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[19].ccktxbb_valuearray[0] = 0x12; priv->cck_txbbgain_ch14_table[19].ccktxbb_valuearray[1] = 0x12; priv->cck_txbbgain_ch14_table[19].ccktxbb_valuearray[2] = 0x0f; priv->cck_txbbgain_ch14_table[19].ccktxbb_valuearray[3] = 0x09; priv->cck_txbbgain_ch14_table[19].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[19].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[19].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[19].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[20].ccktxbb_valuearray[0] = 0x11; priv->cck_txbbgain_ch14_table[20].ccktxbb_valuearray[1] = 0x11; priv->cck_txbbgain_ch14_table[20].ccktxbb_valuearray[2] = 0x0f; priv->cck_txbbgain_ch14_table[20].ccktxbb_valuearray[3] = 0x09; priv->cck_txbbgain_ch14_table[20].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[20].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[20].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[20].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[21].ccktxbb_valuearray[0] = 0x10; priv->cck_txbbgain_ch14_table[21].ccktxbb_valuearray[1] = 0x10; priv->cck_txbbgain_ch14_table[21].ccktxbb_valuearray[2] = 0x0e; priv->cck_txbbgain_ch14_table[21].ccktxbb_valuearray[3] = 0x08; priv->cck_txbbgain_ch14_table[21].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[21].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[21].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[21].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[22].ccktxbb_valuearray[0] = 0x0f; priv->cck_txbbgain_ch14_table[22].ccktxbb_valuearray[1] = 0x0f; priv->cck_txbbgain_ch14_table[22].ccktxbb_valuearray[2] = 0x0d; priv->cck_txbbgain_ch14_table[22].ccktxbb_valuearray[3] = 0x08; priv->cck_txbbgain_ch14_table[22].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[22].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[22].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[22].ccktxbb_valuearray[7] = 0x00; priv->btxpower_tracking = true; priv->txpower_count = 0; priv->btxpower_trackingInit = false; } static void dm_InitializeTXPowerTracking_ThermalMeter(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); if (priv->rtllib->FwRWRF) priv->btxpower_tracking = true; else priv->btxpower_tracking = false; priv->txpower_count = 0; priv->btxpower_trackingInit = false; RT_TRACE(COMP_POWER_TRACKING, "pMgntInfo->bTXPowerTracking = %d\n", priv->btxpower_tracking); } void dm_initialize_txpower_tracking(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); if (priv->IC_Cut >= IC_VersionCut_D) dm_InitializeTXPowerTracking_TSSI(dev); else dm_InitializeTXPowerTracking_ThermalMeter(dev); } static void dm_CheckTXPowerTracking_TSSI(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); static u32 tx_power_track_counter; RT_TRACE(COMP_POWER_TRACKING, "%s()\n", __func__); if (read_nic_byte(dev, 0x11e) == 1) return; if (!priv->btxpower_tracking) return; tx_power_track_counter++; if (tx_power_track_counter >= 180) { queue_delayed_work_rsl(priv->priv_wq, &priv->txpower_tracking_wq, 0); tx_power_track_counter = 0; } } static void dm_CheckTXPowerTracking_ThermalMeter(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); static u8 TM_Trigger; u8 TxPowerCheckCnt = 0; if (IS_HARDWARE_TYPE_8192SE(dev)) TxPowerCheckCnt = 5; else TxPowerCheckCnt = 2; if (!priv->btxpower_tracking) { return; } else { if (priv->txpower_count <= TxPowerCheckCnt) { priv->txpower_count++; return; } } if (!TM_Trigger) { { rtl8192_phy_SetRFReg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4d); rtl8192_phy_SetRFReg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4f); rtl8192_phy_SetRFReg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4d); rtl8192_phy_SetRFReg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4f); } TM_Trigger = 1; return; } else { printk(KERN_INFO "===============>Schedule TxPowerTrackingWorkItem\n"); queue_delayed_work_rsl(priv->priv_wq, &priv->txpower_tracking_wq, 0); TM_Trigger = 0; } } static void dm_check_txpower_tracking(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); if (priv->IC_Cut >= IC_VersionCut_D) dm_CheckTXPowerTracking_TSSI(dev); else dm_CheckTXPowerTracking_ThermalMeter(dev); } static void dm_CCKTxPowerAdjust_TSSI(struct net_device *dev, bool bInCH14) { u32 TempVal; struct r8192_priv *priv = rtllib_priv(dev); TempVal = 0; if (!bInCH14) { TempVal = (u32)(priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[0] + (priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[1]<<8)) ; rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal); TempVal = 0; TempVal = (u32)(priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[2] + (priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[3]<<8) + (priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[4]<<16)+ (priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[5]<<24)); rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal); TempVal = 0; TempVal = (u32)(priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[6] + (priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[7]<<8)) ; rtl8192_setBBreg(dev, rCCK0_DebugPort, bMaskLWord, TempVal); } else { TempVal = (u32)(priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[0] + (priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[1]<<8)) ; rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal); TempVal = 0; TempVal = (u32)(priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[2] + (priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[3]<<8) + (priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[4]<<16)+ (priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[5]<<24)); rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal); TempVal = 0; TempVal = (u32)(priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[6] + (priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[7]<<8)) ; rtl8192_setBBreg(dev, rCCK0_DebugPort, bMaskLWord, TempVal); } } static void dm_CCKTxPowerAdjust_ThermalMeter(struct net_device *dev, bool bInCH14) { u32 TempVal; struct r8192_priv *priv = rtllib_priv(dev); TempVal = 0; if (!bInCH14) { TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][0] + (CCKSwingTable_Ch1_Ch13[priv->CCK_index][1]<<8) ; rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal); RT_TRACE(COMP_POWER_TRACKING, "CCK not chnl 14, reg 0x%x = 0x%x\n", rCCK0_TxFilter1, TempVal); TempVal = 0; TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][2] + (CCKSwingTable_Ch1_Ch13[priv->CCK_index][3]<<8) + (CCKSwingTable_Ch1_Ch13[priv->CCK_index][4]<<16)+ (CCKSwingTable_Ch1_Ch13[priv->CCK_index][5]<<24); rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal); RT_TRACE(COMP_POWER_TRACKING, "CCK not chnl 14, reg 0x%x = 0x%x\n", rCCK0_TxFilter2, TempVal); TempVal = 0; TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][6] + (CCKSwingTable_Ch1_Ch13[priv->CCK_index][7]<<8) ; rtl8192_setBBreg(dev, rCCK0_DebugPort, bMaskLWord, TempVal); RT_TRACE(COMP_POWER_TRACKING, "CCK not chnl 14, reg 0x%x = 0x%x\n", rCCK0_DebugPort, TempVal); } else { TempVal = CCKSwingTable_Ch14[priv->CCK_index][0] + (CCKSwingTable_Ch14[priv->CCK_index][1]<<8) ; rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal); RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n", rCCK0_TxFilter1, TempVal); TempVal = 0; TempVal = CCKSwingTable_Ch14[priv->CCK_index][2] + (CCKSwingTable_Ch14[priv->CCK_index][3]<<8) + (CCKSwingTable_Ch14[priv->CCK_index][4]<<16)+ (CCKSwingTable_Ch14[priv->CCK_index][5]<<24); rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal); RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n", rCCK0_TxFilter2, TempVal); TempVal = 0; TempVal = CCKSwingTable_Ch14[priv->CCK_index][6] + (CCKSwingTable_Ch14[priv->CCK_index][7]<<8) ; rtl8192_setBBreg(dev, rCCK0_DebugPort, bMaskLWord, TempVal); RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n", rCCK0_DebugPort, TempVal); } } void dm_cck_txpower_adjust(struct net_device *dev, bool binch14) { struct r8192_priv *priv = rtllib_priv(dev); if (priv->IC_Cut >= IC_VersionCut_D) dm_CCKTxPowerAdjust_TSSI(dev, binch14); else dm_CCKTxPowerAdjust_ThermalMeter(dev, binch14); } static void dm_txpower_reset_recovery(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); RT_TRACE(COMP_POWER_TRACKING, "Start Reset Recovery ==>\n"); rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfa_txpowertrackingindex].txbbgain_value); RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: Fill in 0xc80 is %08x\n", priv->txbbgain_table[priv->rfa_txpowertrackingindex].txbbgain_value); RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: Fill in RFA_txPowerTrackingIndex is %x\n", priv->rfa_txpowertrackingindex); RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery : RF A I/Q Amplify Gain is %ld\n", priv->txbbgain_table[priv->rfa_txpowertrackingindex].txbb_iq_amplifygain); RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: CCK Attenuation is %d dB\n", priv->CCKPresentAttentuation); dm_cck_txpower_adjust(dev, priv->bcck_in_ch14); rtl8192_setBBreg(dev, rOFDM0_XCTxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfc_txpowertrackingindex].txbbgain_value); RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: Fill in 0xc90 is %08x\n", priv->txbbgain_table[priv->rfc_txpowertrackingindex].txbbgain_value); RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: Fill in RFC_txPowerTrackingIndex is %x\n", priv->rfc_txpowertrackingindex); RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery : RF C I/Q Amplify Gain is %ld\n", priv->txbbgain_table[priv->rfc_txpowertrackingindex].txbb_iq_amplifygain); } void dm_restore_dynamic_mechanism_state(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); u32 reg_ratr = priv->rate_adaptive.last_ratr; u32 ratr_value; if (IS_NIC_DOWN(priv)) { RT_TRACE(COMP_RATE, "<---- dm_restore_dynamic_mechanism_state(): driver is going to unload\n"); return; } if (priv->rate_adaptive.rate_adaptive_disabled) return; if (!(priv->rtllib->mode == WIRELESS_MODE_N_24G || priv->rtllib->mode == WIRELESS_MODE_N_5G)) return; ratr_value = reg_ratr; if (priv->rf_type == RF_1T2R) ratr_value &= ~(RATE_ALL_OFDM_2SS); write_nic_dword(dev, RATR0, ratr_value); write_nic_byte(dev, UFWP, 1); if (priv->btxpower_trackingInit && priv->btxpower_tracking) dm_txpower_reset_recovery(dev); dm_bb_initialgain_restore(dev); } static void dm_bb_initialgain_restore(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); u32 bit_mask = 0x7f; if (dm_digtable.dig_algorithm == DIG_ALGO_BY_RSSI) return; rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8); rtl8192_setBBreg(dev, rOFDM0_XAAGCCore1, bit_mask, (u32)priv->initgain_backup.xaagccore1); rtl8192_setBBreg(dev, rOFDM0_XBAGCCore1, bit_mask, (u32)priv->initgain_backup.xbagccore1); rtl8192_setBBreg(dev, rOFDM0_XCAGCCore1, bit_mask, (u32)priv->initgain_backup.xcagccore1); rtl8192_setBBreg(dev, rOFDM0_XDAGCCore1, bit_mask, (u32)priv->initgain_backup.xdagccore1); bit_mask = bMaskByte2; rtl8192_setBBreg(dev, rCCK0_CCA, bit_mask, (u32)priv->initgain_backup.cca); RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xc50 is %x\n", priv->initgain_backup.xaagccore1); RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xc58 is %x\n", priv->initgain_backup.xbagccore1); RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xc60 is %x\n", priv->initgain_backup.xcagccore1); RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xc68 is %x\n", priv->initgain_backup.xdagccore1); RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xa0a is %x\n", priv->initgain_backup.cca); rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x1); } void dm_backup_dynamic_mechanism_state(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); priv->bswitch_fsync = false; priv->bfsync_processing = false; dm_bb_initialgain_backup(dev); } static void dm_bb_initialgain_backup(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); u32 bit_mask = bMaskByte0; if (dm_digtable.dig_algorithm == DIG_ALGO_BY_RSSI) return; rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8); priv->initgain_backup.xaagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XAAGCCore1, bit_mask); priv->initgain_backup.xbagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XBAGCCore1, bit_mask); priv->initgain_backup.xcagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XCAGCCore1, bit_mask); priv->initgain_backup.xdagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XDAGCCore1, bit_mask); bit_mask = bMaskByte2; priv->initgain_backup.cca = (u8)rtl8192_QueryBBReg(dev, rCCK0_CCA, bit_mask); RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xc50 is %x\n", priv->initgain_backup.xaagccore1); RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xc58 is %x\n", priv->initgain_backup.xbagccore1); RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xc60 is %x\n", priv->initgain_backup.xcagccore1); RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xc68 is %x\n", priv->initgain_backup.xdagccore1); RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xa0a is %x\n", priv->initgain_backup.cca); } void dm_change_dynamic_initgain_thresh(struct net_device *dev, u32 dm_type, u32 dm_value) { if (dm_type == DIG_TYPE_THRESH_HIGH) { dm_digtable.rssi_high_thresh = dm_value; } else if (dm_type == DIG_TYPE_THRESH_LOW) { dm_digtable.rssi_low_thresh = dm_value; } else if (dm_type == DIG_TYPE_THRESH_HIGHPWR_HIGH) { dm_digtable.rssi_high_power_highthresh = dm_value; } else if (dm_type == DIG_TYPE_THRESH_HIGHPWR_HIGH) { dm_digtable.rssi_high_power_highthresh = dm_value; } else if (dm_type == DIG_TYPE_ENABLE) { dm_digtable.dig_state = DM_STA_DIG_MAX; dm_digtable.dig_enable_flag = true; } else if (dm_type == DIG_TYPE_DISABLE) { dm_digtable.dig_state = DM_STA_DIG_MAX; dm_digtable.dig_enable_flag = false; } else if (dm_type == DIG_TYPE_DBG_MODE) { if (dm_value >= DM_DBG_MAX) dm_value = DM_DBG_OFF; dm_digtable.dbg_mode = (u8)dm_value; } else if (dm_type == DIG_TYPE_RSSI) { if (dm_value > 100) dm_value = 30; dm_digtable.rssi_val = (long)dm_value; } else if (dm_type == DIG_TYPE_ALGORITHM) { if (dm_value >= DIG_ALGO_MAX) dm_value = DIG_ALGO_BY_FALSE_ALARM; if (dm_digtable.dig_algorithm != (u8)dm_value) dm_digtable.dig_algorithm_switch = 1; dm_digtable.dig_algorithm = (u8)dm_value; } else if (dm_type == DIG_TYPE_BACKOFF) { if (dm_value > 30) dm_value = 30; dm_digtable.backoff_val = (u8)dm_value; } else if (dm_type == DIG_TYPE_RX_GAIN_MIN) { if (dm_value == 0) dm_value = 0x1; dm_digtable.rx_gain_range_min = (u8)dm_value; } else if (dm_type == DIG_TYPE_RX_GAIN_MAX) { if (dm_value > 0x50) dm_value = 0x50; dm_digtable.rx_gain_range_max = (u8)dm_value; } } static void dm_dig_init(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); dm_digtable.dig_enable_flag = true; dm_digtable.Backoff_Enable_Flag = true; dm_digtable.dig_algorithm = DIG_ALGO_BY_RSSI; dm_digtable.Dig_TwoPort_Algorithm = DIG_TWO_PORT_ALGO_RSSI; dm_digtable.Dig_Ext_Port_Stage = DIG_EXT_PORT_STAGE_MAX; dm_digtable.dbg_mode = DM_DBG_OFF; dm_digtable.dig_algorithm_switch = 0; dm_digtable.dig_state = DM_STA_DIG_MAX; dm_digtable.dig_highpwr_state = DM_STA_DIG_MAX; dm_digtable.CurSTAConnectState = dm_digtable.PreSTAConnectState = DIG_STA_DISCONNECT; dm_digtable.CurAPConnectState = dm_digtable.PreAPConnectState = DIG_AP_DISCONNECT; dm_digtable.initialgain_lowerbound_state = false; dm_digtable.rssi_low_thresh = DM_DIG_THRESH_LOW; dm_digtable.rssi_high_thresh = DM_DIG_THRESH_HIGH; dm_digtable.FALowThresh = DM_FALSEALARM_THRESH_LOW; dm_digtable.FAHighThresh = DM_FALSEALARM_THRESH_HIGH; dm_digtable.rssi_high_power_lowthresh = DM_DIG_HIGH_PWR_THRESH_LOW; dm_digtable.rssi_high_power_highthresh = DM_DIG_HIGH_PWR_THRESH_HIGH; dm_digtable.rssi_val = 50; dm_digtable.backoff_val = DM_DIG_BACKOFF; dm_digtable.rx_gain_range_max = DM_DIG_MAX; if (priv->CustomerID == RT_CID_819x_Netcore) dm_digtable.rx_gain_range_min = DM_DIG_MIN_Netcore; else dm_digtable.rx_gain_range_min = DM_DIG_MIN; dm_digtable.BackoffVal_range_max = DM_DIG_BACKOFF_MAX; dm_digtable.BackoffVal_range_min = DM_DIG_BACKOFF_MIN; } static void dm_ctrl_initgain_byrssi(struct net_device *dev) { if (dm_digtable.dig_enable_flag == false) return; if (dm_digtable.dig_algorithm == DIG_ALGO_BY_FALSE_ALARM) dm_ctrl_initgain_byrssi_by_fwfalse_alarm(dev); else if (dm_digtable.dig_algorithm == DIG_ALGO_BY_RSSI) dm_ctrl_initgain_byrssi_by_driverrssi(dev); else return; } /*----------------------------------------------------------------------------- * Function: dm_CtrlInitGainBeforeConnectByRssiAndFalseAlarm() * * Overview: Driver monitor RSSI and False Alarm to change initial gain. Only change initial gain during link in progress. * * Input: IN PADAPTER pAdapter * * Output: NONE * * Return: NONE * * Revised History: * When Who Remark * 03/04/2009 hpfan Create Version 0. * *---------------------------------------------------------------------------*/ static void dm_ctrl_initgain_byrssi_by_driverrssi( struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); u8 i; static u8 fw_dig; if (dm_digtable.dig_enable_flag == false) return; if (dm_digtable.dig_algorithm_switch) fw_dig = 0; if (fw_dig <= 3) { for (i = 0; i < 3; i++) rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8); fw_dig++; dm_digtable.dig_state = DM_STA_DIG_OFF; } if (priv->rtllib->state == RTLLIB_LINKED) dm_digtable.CurSTAConnectState = DIG_STA_CONNECT; else dm_digtable.CurSTAConnectState = DIG_STA_DISCONNECT; if (dm_digtable.dbg_mode == DM_DBG_OFF) dm_digtable.rssi_val = priv->undecorated_smoothed_pwdb; dm_initial_gain(dev); dm_pd_th(dev); dm_cs_ratio(dev); if (dm_digtable.dig_algorithm_switch) dm_digtable.dig_algorithm_switch = 0; dm_digtable.PreSTAConnectState = dm_digtable.CurSTAConnectState; } static void dm_ctrl_initgain_byrssi_by_fwfalse_alarm( struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); static u32 reset_cnt; u8 i; if (dm_digtable.dig_enable_flag == false) return; if (dm_digtable.dig_algorithm_switch) { dm_digtable.dig_state = DM_STA_DIG_MAX; for (i = 0; i < 3; i++) rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x1); dm_digtable.dig_algorithm_switch = 0; } if (priv->rtllib->state != RTLLIB_LINKED) return; if ((priv->undecorated_smoothed_pwdb > dm_digtable.rssi_low_thresh) && (priv->undecorated_smoothed_pwdb < dm_digtable.rssi_high_thresh)) return; if ((priv->undecorated_smoothed_pwdb <= dm_digtable.rssi_low_thresh)) { if (dm_digtable.dig_state == DM_STA_DIG_OFF && (priv->reset_count == reset_cnt)) return; else reset_cnt = priv->reset_count; dm_digtable.dig_highpwr_state = DM_STA_DIG_MAX; dm_digtable.dig_state = DM_STA_DIG_OFF; rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8); write_nic_byte(dev, rOFDM0_XAAGCCore1, 0x17); write_nic_byte(dev, rOFDM0_XBAGCCore1, 0x17); write_nic_byte(dev, rOFDM0_XCAGCCore1, 0x17); write_nic_byte(dev, rOFDM0_XDAGCCore1, 0x17); if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x00); else write_nic_byte(dev, rOFDM0_RxDetector1, 0x42); write_nic_byte(dev, 0xa0a, 0x08); return; } if ((priv->undecorated_smoothed_pwdb >= dm_digtable.rssi_high_thresh)) { u8 reset_flag = 0; if (dm_digtable.dig_state == DM_STA_DIG_ON && (priv->reset_count == reset_cnt)) { dm_ctrl_initgain_byrssi_highpwr(dev); return; } else { if (priv->reset_count != reset_cnt) reset_flag = 1; reset_cnt = priv->reset_count; } dm_digtable.dig_state = DM_STA_DIG_ON; if (reset_flag == 1) { write_nic_byte(dev, rOFDM0_XAAGCCore1, 0x2c); write_nic_byte(dev, rOFDM0_XBAGCCore1, 0x2c); write_nic_byte(dev, rOFDM0_XCAGCCore1, 0x2c); write_nic_byte(dev, rOFDM0_XDAGCCore1, 0x2c); } else { write_nic_byte(dev, rOFDM0_XAAGCCore1, 0x20); write_nic_byte(dev, rOFDM0_XBAGCCore1, 0x20); write_nic_byte(dev, rOFDM0_XCAGCCore1, 0x20); write_nic_byte(dev, rOFDM0_XDAGCCore1, 0x20); } if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x20); else write_nic_byte(dev, rOFDM0_RxDetector1, 0x44); write_nic_byte(dev, 0xa0a, 0xcd); rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x1); } dm_ctrl_initgain_byrssi_highpwr(dev); } static void dm_ctrl_initgain_byrssi_highpwr(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); static u32 reset_cnt_highpwr; if ((priv->undecorated_smoothed_pwdb > dm_digtable.rssi_high_power_lowthresh) && (priv->undecorated_smoothed_pwdb < dm_digtable.rssi_high_power_highthresh)) return; if (priv->undecorated_smoothed_pwdb >= dm_digtable.rssi_high_power_highthresh) { if (dm_digtable.dig_highpwr_state == DM_STA_DIG_ON && (priv->reset_count == reset_cnt_highpwr)) return; else dm_digtable.dig_highpwr_state = DM_STA_DIG_ON; if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x10); else write_nic_byte(dev, rOFDM0_RxDetector1, 0x43); } else { if (dm_digtable.dig_highpwr_state == DM_STA_DIG_OFF && (priv->reset_count == reset_cnt_highpwr)) return; else dm_digtable.dig_highpwr_state = DM_STA_DIG_OFF; if (priv->undecorated_smoothed_pwdb < dm_digtable.rssi_high_power_lowthresh && priv->undecorated_smoothed_pwdb >= dm_digtable.rssi_high_thresh) { if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x20); else write_nic_byte(dev, rOFDM0_RxDetector1, 0x44); } } reset_cnt_highpwr = priv->reset_count; } static void dm_initial_gain(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); u8 initial_gain = 0; static u8 initialized, force_write; static u32 reset_cnt; if (dm_digtable.dig_algorithm_switch) { initialized = 0; reset_cnt = 0; } if (rtllib_act_scanning(priv->rtllib, true) == true) { force_write = 1; return; } if (dm_digtable.PreSTAConnectState == dm_digtable.CurSTAConnectState) { if (dm_digtable.CurSTAConnectState == DIG_STA_CONNECT) { if ((dm_digtable.rssi_val+10-dm_digtable.backoff_val) > dm_digtable.rx_gain_range_max) dm_digtable.cur_ig_value = dm_digtable.rx_gain_range_max; else if ((dm_digtable.rssi_val+10-dm_digtable.backoff_val) < dm_digtable.rx_gain_range_min) dm_digtable.cur_ig_value = dm_digtable.rx_gain_range_min; else dm_digtable.cur_ig_value = dm_digtable.rssi_val+10-dm_digtable.backoff_val; } else { if (dm_digtable.cur_ig_value == 0) dm_digtable.cur_ig_value = priv->DefaultInitialGain[0]; else dm_digtable.cur_ig_value = dm_digtable.pre_ig_value; } } else { dm_digtable.cur_ig_value = priv->DefaultInitialGain[0]; dm_digtable.pre_ig_value = 0; } if (priv->reset_count != reset_cnt) { force_write = 1; reset_cnt = priv->reset_count; } if (dm_digtable.pre_ig_value != read_nic_byte(dev, rOFDM0_XAAGCCore1)) force_write = 1; if ((dm_digtable.pre_ig_value != dm_digtable.cur_ig_value) || !initialized || force_write) { initial_gain = (u8)dm_digtable.cur_ig_value; write_nic_byte(dev, rOFDM0_XAAGCCore1, initial_gain); write_nic_byte(dev, rOFDM0_XBAGCCore1, initial_gain); write_nic_byte(dev, rOFDM0_XCAGCCore1, initial_gain); write_nic_byte(dev, rOFDM0_XDAGCCore1, initial_gain); dm_digtable.pre_ig_value = dm_digtable.cur_ig_value; initialized = 1; force_write = 0; } } static void dm_pd_th(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); static u8 initialized, force_write; static u32 reset_cnt; if (dm_digtable.dig_algorithm_switch) { initialized = 0; reset_cnt = 0; } if (dm_digtable.PreSTAConnectState == dm_digtable.CurSTAConnectState) { if (dm_digtable.CurSTAConnectState == DIG_STA_CONNECT) { if (dm_digtable.rssi_val >= dm_digtable.rssi_high_power_highthresh) dm_digtable.curpd_thstate = DIG_PD_AT_HIGH_POWER; else if ((dm_digtable.rssi_val <= dm_digtable.rssi_low_thresh)) dm_digtable.curpd_thstate = DIG_PD_AT_LOW_POWER; else if ((dm_digtable.rssi_val >= dm_digtable.rssi_high_thresh) && (dm_digtable.rssi_val < dm_digtable.rssi_high_power_lowthresh)) dm_digtable.curpd_thstate = DIG_PD_AT_NORMAL_POWER; else dm_digtable.curpd_thstate = dm_digtable.prepd_thstate; } else { dm_digtable.curpd_thstate = DIG_PD_AT_LOW_POWER; } } else { dm_digtable.curpd_thstate = DIG_PD_AT_LOW_POWER; } if (priv->reset_count != reset_cnt) { force_write = 1; reset_cnt = priv->reset_count; } if ((dm_digtable.prepd_thstate != dm_digtable.curpd_thstate) || (initialized <= 3) || force_write) { if (dm_digtable.curpd_thstate == DIG_PD_AT_LOW_POWER) { if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x00); else write_nic_byte(dev, rOFDM0_RxDetector1, 0x42); } else if (dm_digtable.curpd_thstate == DIG_PD_AT_NORMAL_POWER) { if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x20); else write_nic_byte(dev, rOFDM0_RxDetector1, 0x44); } else if (dm_digtable.curpd_thstate == DIG_PD_AT_HIGH_POWER) { if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x10); else write_nic_byte(dev, rOFDM0_RxDetector1, 0x43); } dm_digtable.prepd_thstate = dm_digtable.curpd_thstate; if (initialized <= 3) initialized++; force_write = 0; } } static void dm_cs_ratio(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); static u8 initialized, force_write; static u32 reset_cnt; if (dm_digtable.dig_algorithm_switch) { initialized = 0; reset_cnt = 0; } if (dm_digtable.PreSTAConnectState == dm_digtable.CurSTAConnectState) { if (dm_digtable.CurSTAConnectState == DIG_STA_CONNECT) { if ((dm_digtable.rssi_val <= dm_digtable.rssi_low_thresh)) dm_digtable.curcs_ratio_state = DIG_CS_RATIO_LOWER; else if ((dm_digtable.rssi_val >= dm_digtable.rssi_high_thresh)) dm_digtable.curcs_ratio_state = DIG_CS_RATIO_HIGHER; else dm_digtable.curcs_ratio_state = dm_digtable.precs_ratio_state; } else { dm_digtable.curcs_ratio_state = DIG_CS_RATIO_LOWER; } } else { dm_digtable.curcs_ratio_state = DIG_CS_RATIO_LOWER; } if (priv->reset_count != reset_cnt) { force_write = 1; reset_cnt = priv->reset_count; } if ((dm_digtable.precs_ratio_state != dm_digtable.curcs_ratio_state) || !initialized || force_write) { if (dm_digtable.curcs_ratio_state == DIG_CS_RATIO_LOWER) write_nic_byte(dev, 0xa0a, 0x08); else if (dm_digtable.curcs_ratio_state == DIG_CS_RATIO_HIGHER) write_nic_byte(dev, 0xa0a, 0xcd); dm_digtable.precs_ratio_state = dm_digtable.curcs_ratio_state; initialized = 1; force_write = 0; } } void dm_init_edca_turbo(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); priv->bcurrent_turbo_EDCA = false; priv->rtllib->bis_any_nonbepkts = false; priv->bis_cur_rdlstate = false; } static void dm_check_edca_turbo(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); struct rt_hi_throughput *pHTInfo = priv->rtllib->pHTInfo; static unsigned long lastTxOkCnt; static unsigned long lastRxOkCnt; unsigned long curTxOkCnt = 0; unsigned long curRxOkCnt = 0; if (priv->rtllib->iw_mode == IW_MODE_ADHOC) goto dm_CheckEdcaTurbo_EXIT; if (priv->rtllib->state != RTLLIB_LINKED) goto dm_CheckEdcaTurbo_EXIT; if (priv->rtllib->pHTInfo->IOTAction & HT_IOT_ACT_DISABLE_EDCA_TURBO) goto dm_CheckEdcaTurbo_EXIT; { u8 *peername[11] = { "unknown", "realtek_90", "realtek_92se", "broadcom", "ralink", "atheros", "cisco", "marvell", "92u_softap", "self_softap" }; static int wb_tmp; if (wb_tmp == 0) { printk(KERN_INFO "%s():iot peer is %s, bssid:" " %pM\n", __func__, peername[pHTInfo->IOTPeer], priv->rtllib->current_network.bssid); wb_tmp = 1; } } if (!priv->rtllib->bis_any_nonbepkts) { curTxOkCnt = priv->stats.txbytesunicast - lastTxOkCnt; curRxOkCnt = priv->stats.rxbytesunicast - lastRxOkCnt; if (pHTInfo->IOTAction & HT_IOT_ACT_EDCA_BIAS_ON_RX) { if (curTxOkCnt > 4*curRxOkCnt) { if (priv->bis_cur_rdlstate || !priv->bcurrent_turbo_EDCA) { write_nic_dword(dev, EDCAPARA_BE, edca_setting_UL[pHTInfo->IOTPeer]); priv->bis_cur_rdlstate = false; } } else { if (!priv->bis_cur_rdlstate || !priv->bcurrent_turbo_EDCA) { if (priv->rtllib->mode == WIRELESS_MODE_G) write_nic_dword(dev, EDCAPARA_BE, edca_setting_DL_GMode[pHTInfo->IOTPeer]); else write_nic_dword(dev, EDCAPARA_BE, edca_setting_DL[pHTInfo->IOTPeer]); priv->bis_cur_rdlstate = true; } } priv->bcurrent_turbo_EDCA = true; } else { if (curRxOkCnt > 4*curTxOkCnt) { if (!priv->bis_cur_rdlstate || !priv->bcurrent_turbo_EDCA) { if (priv->rtllib->mode == WIRELESS_MODE_G) write_nic_dword(dev, EDCAPARA_BE, edca_setting_DL_GMode[pHTInfo->IOTPeer]); else write_nic_dword(dev, EDCAPARA_BE, edca_setting_DL[pHTInfo->IOTPeer]); priv->bis_cur_rdlstate = true; } } else { if (priv->bis_cur_rdlstate || !priv->bcurrent_turbo_EDCA) { write_nic_dword(dev, EDCAPARA_BE, edca_setting_UL[pHTInfo->IOTPeer]); priv->bis_cur_rdlstate = false; } } priv->bcurrent_turbo_EDCA = true; } } else { if (priv->bcurrent_turbo_EDCA) { u8 tmp = AC0_BE; priv->rtllib->SetHwRegHandler(dev, HW_VAR_AC_PARAM, (u8 *)(&tmp)); priv->bcurrent_turbo_EDCA = false; } } dm_CheckEdcaTurbo_EXIT: priv->rtllib->bis_any_nonbepkts = false; lastTxOkCnt = priv->stats.txbytesunicast; lastRxOkCnt = priv->stats.rxbytesunicast; } static void dm_init_ctstoself(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv((struct net_device *)dev); priv->rtllib->bCTSToSelfEnable = true; priv->rtllib->CTSToSelfTH = CTSToSelfTHVal; } static void dm_ctstoself(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv((struct net_device *)dev); struct rt_hi_throughput *pHTInfo = priv->rtllib->pHTInfo; static unsigned long lastTxOkCnt; static unsigned long lastRxOkCnt; unsigned long curTxOkCnt = 0; unsigned long curRxOkCnt = 0; if (priv->rtllib->bCTSToSelfEnable != true) { pHTInfo->IOTAction &= ~HT_IOT_ACT_FORCED_CTS2SELF; return; } if (pHTInfo->IOTPeer == HT_IOT_PEER_BROADCOM) { curTxOkCnt = priv->stats.txbytesunicast - lastTxOkCnt; curRxOkCnt = priv->stats.rxbytesunicast - lastRxOkCnt; if (curRxOkCnt > 4*curTxOkCnt) pHTInfo->IOTAction &= ~HT_IOT_ACT_FORCED_CTS2SELF; else pHTInfo->IOTAction |= HT_IOT_ACT_FORCED_CTS2SELF; lastTxOkCnt = priv->stats.txbytesunicast; lastRxOkCnt = priv->stats.rxbytesunicast; } } static void dm_Init_WA_Broadcom_IOT(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv((struct net_device *)dev); struct rt_hi_throughput *pHTInfo = priv->rtllib->pHTInfo; pHTInfo->bWAIotBroadcom = false; pHTInfo->WAIotTH = WAIotTHVal; } static void dm_check_pbc_gpio(struct net_device *dev) { } void dm_CheckRfCtrlGPIO(void *data) { struct r8192_priv *priv = container_of_dwork_rsl(data, struct r8192_priv, gpio_change_rf_wq); struct net_device *dev = priv->rtllib->dev; u8 tmp1byte; enum rt_rf_power_state eRfPowerStateToSet; bool bActuallySet = false; char *argv[3]; static char *RadioPowerPath = "/etc/acpi/events/RadioPower.sh"; static char *envp[] = {"HOME=/", "TERM=linux", "PATH=/usr/bin:/bin", NULL}; bActuallySet = false; if ((priv->up_first_time == 1) || (priv->being_init_adapter)) return; if (priv->bfirst_after_down) { priv->bfirst_after_down = 1; return; } tmp1byte = read_nic_byte(dev, GPI); eRfPowerStateToSet = (tmp1byte&BIT1) ? eRfOn : eRfOff; if ((priv->bHwRadioOff == true) && (eRfPowerStateToSet == eRfOn)) { RT_TRACE(COMP_RF, "gpiochangeRF - HW Radio ON\n"); printk(KERN_INFO "gpiochangeRF - HW Radio ON\n"); priv->bHwRadioOff = false; bActuallySet = true; } else if ((priv->bHwRadioOff == false) && (eRfPowerStateToSet == eRfOff)) { RT_TRACE(COMP_RF, "gpiochangeRF - HW Radio OFF\n"); printk(KERN_INFO "gpiochangeRF - HW Radio OFF\n"); priv->bHwRadioOff = true; bActuallySet = true; } if (bActuallySet) { mdelay(1000); priv->bHwRfOffAction = 1; MgntActSet_RF_State(dev, eRfPowerStateToSet, RF_CHANGE_BY_HW, true); if (priv->bHwRadioOff == true) argv[1] = "RFOFF"; else argv[1] = "RFON"; argv[0] = RadioPowerPath; argv[2] = NULL; call_usermodehelper(RadioPowerPath, argv, envp, UMH_WAIT_PROC); } } void dm_rf_pathcheck_workitemcallback(void *data) { struct r8192_priv *priv = container_of_dwork_rsl(data, struct r8192_priv, rfpath_check_wq); struct net_device *dev = priv->rtllib->dev; u8 rfpath = 0, i; rfpath = read_nic_byte(dev, 0xc04); for (i = 0; i < RF90_PATH_MAX; i++) { if (rfpath & (0x01<<i)) priv->brfpath_rxenable[i] = 1; else priv->brfpath_rxenable[i] = 0; } if (!DM_RxPathSelTable.Enable) return; dm_rxpath_sel_byrssi(dev); } static void dm_init_rxpath_selection(struct net_device *dev) { u8 i; struct r8192_priv *priv = rtllib_priv(dev); DM_RxPathSelTable.Enable = 1; DM_RxPathSelTable.SS_TH_low = RxPathSelection_SS_TH_low; DM_RxPathSelTable.diff_TH = RxPathSelection_diff_TH; if (priv->CustomerID == RT_CID_819x_Netcore) DM_RxPathSelTable.cck_method = CCK_Rx_Version_2; else DM_RxPathSelTable.cck_method = CCK_Rx_Version_1; DM_RxPathSelTable.DbgMode = DM_DBG_OFF; DM_RxPathSelTable.disabledRF = 0; for (i = 0; i < 4; i++) { DM_RxPathSelTable.rf_rssi[i] = 50; DM_RxPathSelTable.cck_pwdb_sta[i] = -64; DM_RxPathSelTable.rf_enable_rssi_th[i] = 100; } } #define PWDB_IN_RANGE ((cur_cck_pwdb < tmp_cck_max_pwdb) && \ (cur_cck_pwdb > tmp_cck_sec_pwdb)) static void dm_rxpath_sel_byrssi(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); u8 i, max_rssi_index = 0, min_rssi_index = 0; u8 sec_rssi_index = 0, rf_num = 0; u8 tmp_max_rssi = 0, tmp_min_rssi = 0, tmp_sec_rssi = 0; u8 cck_default_Rx = 0x2; u8 cck_optional_Rx = 0x3; long tmp_cck_max_pwdb = 0, tmp_cck_min_pwdb = 0, tmp_cck_sec_pwdb = 0; u8 cck_rx_ver2_max_index = 0, cck_rx_ver2_min_index = 0; u8 cck_rx_ver2_sec_index = 0; u8 cur_rf_rssi; long cur_cck_pwdb; static u8 disabled_rf_cnt, cck_Rx_Path_initialized; u8 update_cck_rx_path; if (priv->rf_type != RF_2T4R) return; if (!cck_Rx_Path_initialized) { DM_RxPathSelTable.cck_Rx_path = (read_nic_byte(dev, 0xa07)&0xf); cck_Rx_Path_initialized = 1; } DM_RxPathSelTable.disabledRF = 0xf; DM_RxPathSelTable.disabledRF &= ~(read_nic_byte(dev, 0xc04)); if (priv->rtllib->mode == WIRELESS_MODE_B) DM_RxPathSelTable.cck_method = CCK_Rx_Version_2; for (i = 0; i < RF90_PATH_MAX; i++) { if (!DM_RxPathSelTable.DbgMode) DM_RxPathSelTable.rf_rssi[i] = priv->stats.rx_rssi_percentage[i]; if (priv->brfpath_rxenable[i]) { rf_num++; cur_rf_rssi = DM_RxPathSelTable.rf_rssi[i]; if (rf_num == 1) { max_rssi_index = min_rssi_index = sec_rssi_index = i; tmp_max_rssi = tmp_min_rssi = tmp_sec_rssi = cur_rf_rssi; } else if (rf_num == 2) { if (cur_rf_rssi >= tmp_max_rssi) { tmp_max_rssi = cur_rf_rssi; max_rssi_index = i; } else { tmp_sec_rssi = tmp_min_rssi = cur_rf_rssi; sec_rssi_index = min_rssi_index = i; } } else { if (cur_rf_rssi > tmp_max_rssi) { tmp_sec_rssi = tmp_max_rssi; sec_rssi_index = max_rssi_index; tmp_max_rssi = cur_rf_rssi; max_rssi_index = i; } else if (cur_rf_rssi == tmp_max_rssi) { tmp_sec_rssi = cur_rf_rssi; sec_rssi_index = i; } else if ((cur_rf_rssi < tmp_max_rssi) && (cur_rf_rssi > tmp_sec_rssi)) { tmp_sec_rssi = cur_rf_rssi; sec_rssi_index = i; } else if (cur_rf_rssi == tmp_sec_rssi) { if (tmp_sec_rssi == tmp_min_rssi) { tmp_sec_rssi = cur_rf_rssi; sec_rssi_index = i; } } else if ((cur_rf_rssi < tmp_sec_rssi) && (cur_rf_rssi > tmp_min_rssi)) { ; } else if (cur_rf_rssi == tmp_min_rssi) { if (tmp_sec_rssi == tmp_min_rssi) { tmp_min_rssi = cur_rf_rssi; min_rssi_index = i; } } else if (cur_rf_rssi < tmp_min_rssi) { tmp_min_rssi = cur_rf_rssi; min_rssi_index = i; } } } } rf_num = 0; if (DM_RxPathSelTable.cck_method == CCK_Rx_Version_2) { for (i = 0; i < RF90_PATH_MAX; i++) { if (priv->brfpath_rxenable[i]) { rf_num++; cur_cck_pwdb = DM_RxPathSelTable.cck_pwdb_sta[i]; if (rf_num == 1) { cck_rx_ver2_max_index = i; cck_rx_ver2_min_index = i; cck_rx_ver2_sec_index = i; tmp_cck_max_pwdb = cur_cck_pwdb; tmp_cck_min_pwdb = cur_cck_pwdb; tmp_cck_sec_pwdb = cur_cck_pwdb; } else if (rf_num == 2) { if (cur_cck_pwdb >= tmp_cck_max_pwdb) { tmp_cck_max_pwdb = cur_cck_pwdb; cck_rx_ver2_max_index = i; } else { tmp_cck_sec_pwdb = cur_cck_pwdb; tmp_cck_min_pwdb = cur_cck_pwdb; cck_rx_ver2_sec_index = i; cck_rx_ver2_min_index = i; } } else { if (cur_cck_pwdb > tmp_cck_max_pwdb) { tmp_cck_sec_pwdb = tmp_cck_max_pwdb; cck_rx_ver2_sec_index = cck_rx_ver2_max_index; tmp_cck_max_pwdb = cur_cck_pwdb; cck_rx_ver2_max_index = i; } else if (cur_cck_pwdb == tmp_cck_max_pwdb) { tmp_cck_sec_pwdb = cur_cck_pwdb; cck_rx_ver2_sec_index = i; } else if (PWDB_IN_RANGE) { tmp_cck_sec_pwdb = cur_cck_pwdb; cck_rx_ver2_sec_index = i; } else if (cur_cck_pwdb == tmp_cck_sec_pwdb) { if (tmp_cck_sec_pwdb == tmp_cck_min_pwdb) { tmp_cck_sec_pwdb = cur_cck_pwdb; cck_rx_ver2_sec_index = i; } } else if ((cur_cck_pwdb < tmp_cck_sec_pwdb) && (cur_cck_pwdb > tmp_cck_min_pwdb)) { ; } else if (cur_cck_pwdb == tmp_cck_min_pwdb) { if (tmp_cck_sec_pwdb == tmp_cck_min_pwdb) { tmp_cck_min_pwdb = cur_cck_pwdb; cck_rx_ver2_min_index = i; } } else if (cur_cck_pwdb < tmp_cck_min_pwdb) { tmp_cck_min_pwdb = cur_cck_pwdb; cck_rx_ver2_min_index = i; } } } } } update_cck_rx_path = 0; if (DM_RxPathSelTable.cck_method == CCK_Rx_Version_2) { cck_default_Rx = cck_rx_ver2_max_index; cck_optional_Rx = cck_rx_ver2_sec_index; if (tmp_cck_max_pwdb != -64) update_cck_rx_path = 1; } if (tmp_min_rssi < DM_RxPathSelTable.SS_TH_low && disabled_rf_cnt < 2) { if ((tmp_max_rssi - tmp_min_rssi) >= DM_RxPathSelTable.diff_TH) { DM_RxPathSelTable.rf_enable_rssi_th[min_rssi_index] = tmp_max_rssi+5; rtl8192_setBBreg(dev, rOFDM0_TRxPathEnable, 0x1<<min_rssi_index, 0x0); rtl8192_setBBreg(dev, rOFDM1_TRxPathEnable, 0x1<<min_rssi_index, 0x0); disabled_rf_cnt++; } if (DM_RxPathSelTable.cck_method == CCK_Rx_Version_1) { cck_default_Rx = max_rssi_index; cck_optional_Rx = sec_rssi_index; if (tmp_max_rssi) update_cck_rx_path = 1; } } if (update_cck_rx_path) { DM_RxPathSelTable.cck_Rx_path = (cck_default_Rx<<2) | (cck_optional_Rx); rtl8192_setBBreg(dev, rCCK0_AFESetting, 0x0f000000, DM_RxPathSelTable.cck_Rx_path); } if (DM_RxPathSelTable.disabledRF) { for (i = 0; i < 4; i++) { if ((DM_RxPathSelTable.disabledRF>>i) & 0x1) { if (tmp_max_rssi >= DM_RxPathSelTable.rf_enable_rssi_th[i]) { rtl8192_setBBreg(dev, rOFDM0_TRxPathEnable, 0x1 << i, 0x1); rtl8192_setBBreg(dev, rOFDM1_TRxPathEnable, 0x1 << i, 0x1); DM_RxPathSelTable.rf_enable_rssi_th[i] = 100; disabled_rf_cnt--; } } } } } static void dm_check_rx_path_selection(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); queue_delayed_work_rsl(priv->priv_wq, &priv->rfpath_check_wq, 0); } static void dm_init_fsync(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); priv->rtllib->fsync_time_interval = 500; priv->rtllib->fsync_rate_bitmap = 0x0f000800; priv->rtllib->fsync_rssi_threshold = 30; priv->rtllib->bfsync_enable = false; priv->rtllib->fsync_multiple_timeinterval = 3; priv->rtllib->fsync_firstdiff_ratethreshold = 100; priv->rtllib->fsync_seconddiff_ratethreshold = 200; priv->rtllib->fsync_state = Default_Fsync; priv->framesyncMonitor = 1; init_timer(&priv->fsync_timer); setup_timer(&priv->fsync_timer, dm_fsync_timer_callback, (unsigned long) dev); } static void dm_deInit_fsync(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); del_timer_sync(&priv->fsync_timer); } void dm_fsync_timer_callback(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct r8192_priv *priv = rtllib_priv((struct net_device *)data); u32 rate_index, rate_count = 0, rate_count_diff = 0; bool bSwitchFromCountDiff = false; bool bDoubleTimeInterval = false; if (priv->rtllib->state == RTLLIB_LINKED && priv->rtllib->bfsync_enable && (priv->rtllib->pHTInfo->IOTAction & HT_IOT_ACT_CDD_FSYNC)) { u32 rate_bitmap; for (rate_index = 0; rate_index <= 27; rate_index++) { rate_bitmap = 1 << rate_index; if (priv->rtllib->fsync_rate_bitmap & rate_bitmap) rate_count += priv->stats.received_rate_histogram[1] [rate_index]; } if (rate_count < priv->rate_record) rate_count_diff = 0xffffffff - rate_count + priv->rate_record; else rate_count_diff = rate_count - priv->rate_record; if (rate_count_diff < priv->rateCountDiffRecord) { u32 DiffNum = priv->rateCountDiffRecord - rate_count_diff; if (DiffNum >= priv->rtllib->fsync_seconddiff_ratethreshold) priv->ContiuneDiffCount++; else priv->ContiuneDiffCount = 0; if (priv->ContiuneDiffCount >= 2) { bSwitchFromCountDiff = true; priv->ContiuneDiffCount = 0; } } else { priv->ContiuneDiffCount = 0; } if (rate_count_diff <= priv->rtllib->fsync_firstdiff_ratethreshold) { bSwitchFromCountDiff = true; priv->ContiuneDiffCount = 0; } priv->rate_record = rate_count; priv->rateCountDiffRecord = rate_count_diff; RT_TRACE(COMP_HALDM, "rateRecord %d rateCount %d, rate" "Countdiff %d bSwitchFsync %d\n", priv->rate_record, rate_count, rate_count_diff, priv->bswitch_fsync); if (priv->undecorated_smoothed_pwdb > priv->rtllib->fsync_rssi_threshold && bSwitchFromCountDiff) { bDoubleTimeInterval = true; priv->bswitch_fsync = !priv->bswitch_fsync; if (priv->bswitch_fsync) { write_nic_byte(dev, 0xC36, 0x1c); write_nic_byte(dev, 0xC3e, 0x90); } else { write_nic_byte(dev, 0xC36, 0x5c); write_nic_byte(dev, 0xC3e, 0x96); } } else if (priv->undecorated_smoothed_pwdb <= priv->rtllib->fsync_rssi_threshold) { if (priv->bswitch_fsync) { priv->bswitch_fsync = false; write_nic_byte(dev, 0xC36, 0x5c); write_nic_byte(dev, 0xC3e, 0x96); } } if (bDoubleTimeInterval) { if (timer_pending(&priv->fsync_timer)) del_timer_sync(&priv->fsync_timer); priv->fsync_timer.expires = jiffies + MSECS(priv->rtllib->fsync_time_interval * priv->rtllib->fsync_multiple_timeinterval); add_timer(&priv->fsync_timer); } else { if (timer_pending(&priv->fsync_timer)) del_timer_sync(&priv->fsync_timer); priv->fsync_timer.expires = jiffies + MSECS(priv->rtllib->fsync_time_interval); add_timer(&priv->fsync_timer); } } else { if (priv->bswitch_fsync) { priv->bswitch_fsync = false; write_nic_byte(dev, 0xC36, 0x5c); write_nic_byte(dev, 0xC3e, 0x96); } priv->ContiuneDiffCount = 0; write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c52cd); } RT_TRACE(COMP_HALDM, "ContiuneDiffCount %d\n", priv->ContiuneDiffCount); RT_TRACE(COMP_HALDM, "rateRecord %d rateCount %d, rateCountdiff %d " "bSwitchFsync %d\n", priv->rate_record, rate_count, rate_count_diff, priv->bswitch_fsync); } static void dm_StartHWFsync(struct net_device *dev) { u8 rf_timing = 0x77; struct r8192_priv *priv = rtllib_priv(dev); RT_TRACE(COMP_HALDM, "%s\n", __func__); write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c12cf); priv->rtllib->SetHwRegHandler(dev, HW_VAR_RF_TIMING, (u8 *)(&rf_timing)); write_nic_byte(dev, 0xc3b, 0x41); } static void dm_EndHWFsync(struct net_device *dev) { u8 rf_timing = 0xaa; struct r8192_priv *priv = rtllib_priv(dev); RT_TRACE(COMP_HALDM, "%s\n", __func__); write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c52cd); priv->rtllib->SetHwRegHandler(dev, HW_VAR_RF_TIMING, (u8 *) (&rf_timing)); write_nic_byte(dev, 0xc3b, 0x49); } static void dm_EndSWFsync(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); RT_TRACE(COMP_HALDM, "%s\n", __func__); del_timer_sync(&(priv->fsync_timer)); if (priv->bswitch_fsync) { priv->bswitch_fsync = false; write_nic_byte(dev, 0xC36, 0x5c); write_nic_byte(dev, 0xC3e, 0x96); } priv->ContiuneDiffCount = 0; write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c52cd); } static void dm_StartSWFsync(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); u32 rateIndex; u32 rateBitmap; RT_TRACE(COMP_HALDM, "%s\n", __func__); priv->rate_record = 0; priv->ContiuneDiffCount = 0; priv->rateCountDiffRecord = 0; priv->bswitch_fsync = false; if (priv->rtllib->mode == WIRELESS_MODE_N_24G) { priv->rtllib->fsync_firstdiff_ratethreshold = 600; priv->rtllib->fsync_seconddiff_ratethreshold = 0xffff; } else { priv->rtllib->fsync_firstdiff_ratethreshold = 200; priv->rtllib->fsync_seconddiff_ratethreshold = 200; } for (rateIndex = 0; rateIndex <= 27; rateIndex++) { rateBitmap = 1 << rateIndex; if (priv->rtllib->fsync_rate_bitmap & rateBitmap) priv->rate_record += priv->stats.received_rate_histogram[1] [rateIndex]; } if (timer_pending(&priv->fsync_timer)) del_timer_sync(&priv->fsync_timer); priv->fsync_timer.expires = jiffies + MSECS(priv->rtllib->fsync_time_interval); add_timer(&priv->fsync_timer); write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c12cd); } void dm_check_fsync(struct net_device *dev) { #define RegC38_Default 0 #define RegC38_NonFsync_Other_AP 1 #define RegC38_Fsync_AP_BCM 2 struct r8192_priv *priv = rtllib_priv(dev); static u8 reg_c38_State = RegC38_Default; static u32 reset_cnt; RT_TRACE(COMP_HALDM, "RSSI %d TimeInterval %d MultipleTimeInterval " "%d\n", priv->rtllib->fsync_rssi_threshold, priv->rtllib->fsync_time_interval, priv->rtllib->fsync_multiple_timeinterval); RT_TRACE(COMP_HALDM, "RateBitmap 0x%x FirstDiffRateThreshold %d Second" "DiffRateThreshold %d\n", priv->rtllib->fsync_rate_bitmap, priv->rtllib->fsync_firstdiff_ratethreshold, priv->rtllib->fsync_seconddiff_ratethreshold); if (priv->rtllib->state == RTLLIB_LINKED && priv->rtllib->pHTInfo->IOTPeer == HT_IOT_PEER_BROADCOM) { if (priv->rtllib->bfsync_enable == 0) { switch (priv->rtllib->fsync_state) { case Default_Fsync: dm_StartHWFsync(dev); priv->rtllib->fsync_state = HW_Fsync; break; case SW_Fsync: dm_EndSWFsync(dev); dm_StartHWFsync(dev); priv->rtllib->fsync_state = HW_Fsync; break; case HW_Fsync: default: break; } } else { switch (priv->rtllib->fsync_state) { case Default_Fsync: dm_StartSWFsync(dev); priv->rtllib->fsync_state = SW_Fsync; break; case HW_Fsync: dm_EndHWFsync(dev); dm_StartSWFsync(dev); priv->rtllib->fsync_state = SW_Fsync; break; case SW_Fsync: default: break; } } if (priv->framesyncMonitor) { if (reg_c38_State != RegC38_Fsync_AP_BCM) { write_nic_byte(dev, rOFDM0_RxDetector3, 0x95); reg_c38_State = RegC38_Fsync_AP_BCM; } } } else { switch (priv->rtllib->fsync_state) { case HW_Fsync: dm_EndHWFsync(dev); priv->rtllib->fsync_state = Default_Fsync; break; case SW_Fsync: dm_EndSWFsync(dev); priv->rtllib->fsync_state = Default_Fsync; break; case Default_Fsync: default: break; } if (priv->framesyncMonitor) { if (priv->rtllib->state == RTLLIB_LINKED) { if (priv->undecorated_smoothed_pwdb <= RegC38_TH) { if (reg_c38_State != RegC38_NonFsync_Other_AP) { write_nic_byte(dev, rOFDM0_RxDetector3, 0x90); reg_c38_State = RegC38_NonFsync_Other_AP; } } else if (priv->undecorated_smoothed_pwdb >= (RegC38_TH+5)) { if (reg_c38_State) { write_nic_byte(dev, rOFDM0_RxDetector3, priv->framesync); reg_c38_State = RegC38_Default; } } } else { if (reg_c38_State) { write_nic_byte(dev, rOFDM0_RxDetector3, priv->framesync); reg_c38_State = RegC38_Default; } } } } if (priv->framesyncMonitor) { if (priv->reset_count != reset_cnt) { write_nic_byte(dev, rOFDM0_RxDetector3, priv->framesync); reg_c38_State = RegC38_Default; reset_cnt = priv->reset_count; } } else { if (reg_c38_State) { write_nic_byte(dev, rOFDM0_RxDetector3, priv->framesync); reg_c38_State = RegC38_Default; } } } void dm_shadow_init(struct net_device *dev) { u8 page; u16 offset; for (page = 0; page < 5; page++) for (offset = 0; offset < 256; offset++) dm_shadow[page][offset] = read_nic_byte(dev, offset+page * 256); for (page = 8; page < 11; page++) for (offset = 0; offset < 256; offset++) dm_shadow[page][offset] = read_nic_byte(dev, offset+page * 256); for (page = 12; page < 15; page++) for (offset = 0; offset < 256; offset++) dm_shadow[page][offset] = read_nic_byte(dev, offset+page*256); } /*---------------------------Define function prototype------------------------*/ static void dm_init_dynamic_txpower(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); priv->rtllib->bdynamic_txpower_enable = true; priv->bLastDTPFlag_High = false; priv->bLastDTPFlag_Low = false; priv->bDynamicTxHighPower = false; priv->bDynamicTxLowPower = false; } static void dm_dynamic_txpower(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); unsigned int txhipower_threshhold = 0; unsigned int txlowpower_threshold = 0; if (priv->rtllib->bdynamic_txpower_enable != true) { priv->bDynamicTxHighPower = false; priv->bDynamicTxLowPower = false; return; } if ((priv->rtllib->pHTInfo->IOTPeer == HT_IOT_PEER_ATHEROS) && (priv->rtllib->mode == IEEE_G)) { txhipower_threshhold = TX_POWER_ATHEROAP_THRESH_HIGH; txlowpower_threshold = TX_POWER_ATHEROAP_THRESH_LOW; } else { txhipower_threshhold = TX_POWER_NEAR_FIELD_THRESH_HIGH; txlowpower_threshold = TX_POWER_NEAR_FIELD_THRESH_LOW; } RT_TRACE(COMP_TXAGC, "priv->undecorated_smoothed_pwdb = %ld\n", priv->undecorated_smoothed_pwdb); if (priv->rtllib->state == RTLLIB_LINKED) { if (priv->undecorated_smoothed_pwdb >= txhipower_threshhold) { priv->bDynamicTxHighPower = true; priv->bDynamicTxLowPower = false; } else { if (priv->undecorated_smoothed_pwdb < txlowpower_threshold && priv->bDynamicTxHighPower == true) priv->bDynamicTxHighPower = false; if (priv->undecorated_smoothed_pwdb < 35) priv->bDynamicTxLowPower = true; else if (priv->undecorated_smoothed_pwdb >= 40) priv->bDynamicTxLowPower = false; } } else { priv->bDynamicTxHighPower = false; priv->bDynamicTxLowPower = false; } if ((priv->bDynamicTxHighPower != priv->bLastDTPFlag_High) || (priv->bDynamicTxLowPower != priv->bLastDTPFlag_Low)) { RT_TRACE(COMP_TXAGC, "SetTxPowerLevel8190() channel = %d\n", priv->rtllib->current_network.channel); rtl8192_phy_setTxPower(dev, priv->rtllib->current_network.channel); } priv->bLastDTPFlag_High = priv->bDynamicTxHighPower; priv->bLastDTPFlag_Low = priv->bDynamicTxLowPower; } static void dm_check_txrateandretrycount(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); struct rtllib_device *ieee = priv->rtllib; ieee->softmac_stats.CurrentShowTxate = read_nic_byte(dev, Current_Tx_Rate_Reg); ieee->softmac_stats.last_packet_rate = read_nic_byte(dev, Initial_Tx_Rate_Reg); ieee->softmac_stats.txretrycount = read_nic_dword(dev, Tx_Retry_Count_Reg); } static void dm_send_rssi_tofw(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); write_nic_byte(dev, DRIVER_RSSI, (u8)priv->undecorated_smoothed_pwdb); }
gpl-2.0
bju2000/android_kernel_samsung_slteskt
drivers/media/rc/keymaps/rc-terratec-cinergy-xs.c
7637
2238
/* terratec-cinergy-xs.h - Keytable for terratec_cinergy_xs Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> #include <linux/module.h> /* Terratec Cinergy Hybrid T USB XS Devin Heitmueller <dheitmueller@linuxtv.org> */ static struct rc_map_table terratec_cinergy_xs[] = { { 0x41, KEY_HOME}, { 0x01, KEY_POWER}, { 0x42, KEY_MENU}, { 0x02, KEY_1}, { 0x03, KEY_2}, { 0x04, KEY_3}, { 0x43, KEY_SUBTITLE}, { 0x05, KEY_4}, { 0x06, KEY_5}, { 0x07, KEY_6}, { 0x44, KEY_TEXT}, { 0x08, KEY_7}, { 0x09, KEY_8}, { 0x0a, KEY_9}, { 0x45, KEY_DELETE}, { 0x0b, KEY_TUNER}, { 0x0c, KEY_0}, { 0x0d, KEY_MODE}, { 0x46, KEY_TV}, { 0x47, KEY_DVD}, { 0x49, KEY_VIDEO}, { 0x4b, KEY_AUX}, { 0x10, KEY_UP}, { 0x11, KEY_LEFT}, { 0x12, KEY_OK}, { 0x13, KEY_RIGHT}, { 0x14, KEY_DOWN}, { 0x0f, KEY_EPG}, { 0x16, KEY_INFO}, { 0x4d, KEY_BACKSPACE}, { 0x1c, KEY_VOLUMEUP}, { 0x4c, KEY_PLAY}, { 0x1b, KEY_CHANNELUP}, { 0x1e, KEY_VOLUMEDOWN}, { 0x1d, KEY_MUTE}, { 0x1f, KEY_CHANNELDOWN}, { 0x17, KEY_RED}, { 0x18, KEY_GREEN}, { 0x19, KEY_YELLOW}, { 0x1a, KEY_BLUE}, { 0x58, KEY_RECORD}, { 0x48, KEY_STOP}, { 0x40, KEY_PAUSE}, { 0x54, KEY_LAST}, { 0x4e, KEY_REWIND}, { 0x4f, KEY_FASTFORWARD}, { 0x5c, KEY_NEXT}, }; static struct rc_map_list terratec_cinergy_xs_map = { .map = { .scan = terratec_cinergy_xs, .size = ARRAY_SIZE(terratec_cinergy_xs), .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_TERRATEC_CINERGY_XS, } }; static int __init init_rc_map_terratec_cinergy_xs(void) { return rc_map_register(&terratec_cinergy_xs_map); } static void __exit exit_rc_map_terratec_cinergy_xs(void) { rc_map_unregister(&terratec_cinergy_xs_map); } module_init(init_rc_map_terratec_cinergy_xs) module_exit(exit_rc_map_terratec_cinergy_xs) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
LeJay/android_kernel_samsung_jactiveltexx
drivers/media/rc/keymaps/rc-pctv-sedna.c
7637
2136
/* pctv-sedna.h - Keytable for pctv_sedna Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> #include <linux/module.h> /* Mapping for the 28 key remote control as seen at http://www.sednacomputer.com/photo/cardbus-tv.jpg Pavel Mihaylov <bin@bash.info> Also for the remote bundled with Kozumi KTV-01C card */ static struct rc_map_table pctv_sedna[] = { { 0x00, KEY_0 }, { 0x01, KEY_1 }, { 0x02, KEY_2 }, { 0x03, KEY_3 }, { 0x04, KEY_4 }, { 0x05, KEY_5 }, { 0x06, KEY_6 }, { 0x07, KEY_7 }, { 0x08, KEY_8 }, { 0x09, KEY_9 }, { 0x0a, KEY_AGAIN }, /* Recall */ { 0x0b, KEY_CHANNELUP }, { 0x0c, KEY_VOLUMEUP }, { 0x0d, KEY_MODE }, /* Stereo */ { 0x0e, KEY_STOP }, { 0x0f, KEY_PREVIOUSSONG }, { 0x10, KEY_ZOOM }, { 0x11, KEY_VIDEO }, /* Source */ { 0x12, KEY_POWER }, { 0x13, KEY_MUTE }, { 0x15, KEY_CHANNELDOWN }, { 0x18, KEY_VOLUMEDOWN }, { 0x19, KEY_CAMERA }, /* Snapshot */ { 0x1a, KEY_NEXTSONG }, { 0x1b, KEY_TIME }, /* Time Shift */ { 0x1c, KEY_RADIO }, /* FM Radio */ { 0x1d, KEY_RECORD }, { 0x1e, KEY_PAUSE }, /* additional codes for Kozumi's remote */ { 0x14, KEY_INFO }, /* OSD */ { 0x16, KEY_OK }, /* OK */ { 0x17, KEY_DIGITS }, /* Plus */ { 0x1f, KEY_PLAY }, /* Play */ }; static struct rc_map_list pctv_sedna_map = { .map = { .scan = pctv_sedna, .size = ARRAY_SIZE(pctv_sedna), .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_PCTV_SEDNA, } }; static int __init init_rc_map_pctv_sedna(void) { return rc_map_register(&pctv_sedna_map); } static void __exit exit_rc_map_pctv_sedna(void) { rc_map_unregister(&pctv_sedna_map); } module_init(init_rc_map_pctv_sedna) module_exit(exit_rc_map_pctv_sedna) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
lenovo-a3-dev/kernel_lenovo_a3
drivers/media/rc/keymaps/rc-encore-enltv-fm53.c
7637
2058
/* encore-enltv-fm53.h - Keytable for encore_enltv_fm53 Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> #include <linux/module.h> /* Encore ENLTV-FM v5.3 Mauro Carvalho Chehab <mchehab@infradead.org> */ static struct rc_map_table encore_enltv_fm53[] = { { 0x10, KEY_POWER2}, { 0x06, KEY_MUTE}, { 0x09, KEY_1}, { 0x1d, KEY_2}, { 0x1f, KEY_3}, { 0x19, KEY_4}, { 0x1b, KEY_5}, { 0x11, KEY_6}, { 0x17, KEY_7}, { 0x12, KEY_8}, { 0x16, KEY_9}, { 0x48, KEY_0}, { 0x04, KEY_LIST}, /* -/-- */ { 0x40, KEY_LAST}, /* recall */ { 0x02, KEY_MODE}, /* TV/AV */ { 0x05, KEY_CAMERA}, /* SNAPSHOT */ { 0x4c, KEY_CHANNELUP}, /* UP */ { 0x00, KEY_CHANNELDOWN}, /* DOWN */ { 0x0d, KEY_VOLUMEUP}, /* RIGHT */ { 0x15, KEY_VOLUMEDOWN}, /* LEFT */ { 0x49, KEY_ENTER}, /* OK */ { 0x54, KEY_RECORD}, { 0x4d, KEY_PLAY}, /* pause */ { 0x1e, KEY_MENU}, /* video setting */ { 0x0e, KEY_RIGHT}, /* <- */ { 0x1a, KEY_LEFT}, /* -> */ { 0x0a, KEY_CLEAR}, /* video default */ { 0x0c, KEY_ZOOM}, /* hide pannel */ { 0x47, KEY_SLEEP}, /* shutdown */ }; static struct rc_map_list encore_enltv_fm53_map = { .map = { .scan = encore_enltv_fm53, .size = ARRAY_SIZE(encore_enltv_fm53), .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_ENCORE_ENLTV_FM53, } }; static int __init init_rc_map_encore_enltv_fm53(void) { return rc_map_register(&encore_enltv_fm53_map); } static void __exit exit_rc_map_encore_enltv_fm53(void) { rc_map_unregister(&encore_enltv_fm53_map); } module_init(init_rc_map_encore_enltv_fm53) module_exit(exit_rc_map_encore_enltv_fm53) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
imoseyon/leanKernel-note3
drivers/media/rc/keymaps/rc-dntv-live-dvb-t.c
7637
2078
/* dntv-live-dvb-t.h - Keytable for dntv_live_dvb_t Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> #include <linux/module.h> /* DigitalNow DNTV Live DVB-T Remote */ static struct rc_map_table dntv_live_dvb_t[] = { { 0x00, KEY_ESC }, /* 'go up a level?' */ /* Keys 0 to 9 */ { 0x0a, KEY_0 }, { 0x01, KEY_1 }, { 0x02, KEY_2 }, { 0x03, KEY_3 }, { 0x04, KEY_4 }, { 0x05, KEY_5 }, { 0x06, KEY_6 }, { 0x07, KEY_7 }, { 0x08, KEY_8 }, { 0x09, KEY_9 }, { 0x0b, KEY_TUNER }, /* tv/fm */ { 0x0c, KEY_SEARCH }, /* scan */ { 0x0d, KEY_STOP }, { 0x0e, KEY_PAUSE }, { 0x0f, KEY_VIDEO }, /* source */ { 0x10, KEY_MUTE }, { 0x11, KEY_REWIND }, /* backward << */ { 0x12, KEY_POWER }, { 0x13, KEY_CAMERA }, /* snap */ { 0x14, KEY_AUDIO }, /* stereo */ { 0x15, KEY_CLEAR }, /* reset */ { 0x16, KEY_PLAY }, { 0x17, KEY_ENTER }, { 0x18, KEY_ZOOM }, /* full screen */ { 0x19, KEY_FASTFORWARD }, /* forward >> */ { 0x1a, KEY_CHANNELUP }, { 0x1b, KEY_VOLUMEUP }, { 0x1c, KEY_INFO }, /* preview */ { 0x1d, KEY_RECORD }, /* record */ { 0x1e, KEY_CHANNELDOWN }, { 0x1f, KEY_VOLUMEDOWN }, }; static struct rc_map_list dntv_live_dvb_t_map = { .map = { .scan = dntv_live_dvb_t, .size = ARRAY_SIZE(dntv_live_dvb_t), .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_DNTV_LIVE_DVB_T, } }; static int __init init_rc_map_dntv_live_dvb_t(void) { return rc_map_register(&dntv_live_dvb_t_map); } static void __exit exit_rc_map_dntv_live_dvb_t(void) { rc_map_unregister(&dntv_live_dvb_t_map); } module_init(init_rc_map_dntv_live_dvb_t) module_exit(exit_rc_map_dntv_live_dvb_t) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
javilonas/Hammerhead
drivers/media/rc/keymaps/rc-dm1105-nec.c
7637
2020
/* dm1105-nec.h - Keytable for dm1105_nec Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> #include <linux/module.h> /* DVBWorld remotes Igor M. Liplianin <liplianin@me.by> */ static struct rc_map_table dm1105_nec[] = { { 0x0a, KEY_POWER2}, /* power */ { 0x0c, KEY_MUTE}, /* mute */ { 0x11, KEY_1}, { 0x12, KEY_2}, { 0x13, KEY_3}, { 0x14, KEY_4}, { 0x15, KEY_5}, { 0x16, KEY_6}, { 0x17, KEY_7}, { 0x18, KEY_8}, { 0x19, KEY_9}, { 0x10, KEY_0}, { 0x1c, KEY_CHANNELUP}, /* ch+ */ { 0x0f, KEY_CHANNELDOWN}, /* ch- */ { 0x1a, KEY_VOLUMEUP}, /* vol+ */ { 0x0e, KEY_VOLUMEDOWN}, /* vol- */ { 0x04, KEY_RECORD}, /* rec */ { 0x09, KEY_CHANNEL}, /* fav */ { 0x08, KEY_BACKSPACE}, /* rewind */ { 0x07, KEY_FASTFORWARD}, /* fast */ { 0x0b, KEY_PAUSE}, /* pause */ { 0x02, KEY_ESC}, /* cancel */ { 0x03, KEY_TAB}, /* tab */ { 0x00, KEY_UP}, /* up */ { 0x1f, KEY_ENTER}, /* ok */ { 0x01, KEY_DOWN}, /* down */ { 0x05, KEY_RECORD}, /* cap */ { 0x06, KEY_STOP}, /* stop */ { 0x40, KEY_ZOOM}, /* full */ { 0x1e, KEY_TV}, /* tvmode */ { 0x1b, KEY_B}, /* recall */ }; static struct rc_map_list dm1105_nec_map = { .map = { .scan = dm1105_nec, .size = ARRAY_SIZE(dm1105_nec), .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_DM1105_NEC, } }; static int __init init_rc_map_dm1105_nec(void) { return rc_map_register(&dm1105_nec_map); } static void __exit exit_rc_map_dm1105_nec(void) { rc_map_unregister(&dm1105_nec_map); } module_init(init_rc_map_dm1105_nec) module_exit(exit_rc_map_dm1105_nec) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
Bogdacutu/STLinux-Kernel
drivers/media/rc/keymaps/rc-avermedia.c
7637
2240
/* avermedia.h - Keytable for avermedia Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> #include <linux/module.h> /* Alex Hermann <gaaf@gmx.net> */ static struct rc_map_table avermedia[] = { { 0x28, KEY_1 }, { 0x18, KEY_2 }, { 0x38, KEY_3 }, { 0x24, KEY_4 }, { 0x14, KEY_5 }, { 0x34, KEY_6 }, { 0x2c, KEY_7 }, { 0x1c, KEY_8 }, { 0x3c, KEY_9 }, { 0x22, KEY_0 }, { 0x20, KEY_TV }, /* TV/FM */ { 0x10, KEY_CD }, /* CD */ { 0x30, KEY_TEXT }, /* TELETEXT */ { 0x00, KEY_POWER }, /* POWER */ { 0x08, KEY_VIDEO }, /* VIDEO */ { 0x04, KEY_AUDIO }, /* AUDIO */ { 0x0c, KEY_ZOOM }, /* FULL SCREEN */ { 0x12, KEY_SUBTITLE }, /* DISPLAY */ { 0x32, KEY_REWIND }, /* LOOP */ { 0x02, KEY_PRINT }, /* PREVIEW */ { 0x2a, KEY_SEARCH }, /* AUTOSCAN */ { 0x1a, KEY_SLEEP }, /* FREEZE */ { 0x3a, KEY_CAMERA }, /* SNAPSHOT */ { 0x0a, KEY_MUTE }, /* MUTE */ { 0x26, KEY_RECORD }, /* RECORD */ { 0x16, KEY_PAUSE }, /* PAUSE */ { 0x36, KEY_STOP }, /* STOP */ { 0x06, KEY_PLAY }, /* PLAY */ { 0x2e, KEY_RED }, /* RED */ { 0x21, KEY_GREEN }, /* GREEN */ { 0x0e, KEY_YELLOW }, /* YELLOW */ { 0x01, KEY_BLUE }, /* BLUE */ { 0x1e, KEY_VOLUMEDOWN }, /* VOLUME- */ { 0x3e, KEY_VOLUMEUP }, /* VOLUME+ */ { 0x11, KEY_CHANNELDOWN }, /* CHANNEL/PAGE- */ { 0x31, KEY_CHANNELUP } /* CHANNEL/PAGE+ */ }; static struct rc_map_list avermedia_map = { .map = { .scan = avermedia, .size = ARRAY_SIZE(avermedia), .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_AVERMEDIA, } }; static int __init init_rc_map_avermedia(void) { return rc_map_register(&avermedia_map); } static void __exit exit_rc_map_avermedia(void) { rc_map_unregister(&avermedia_map); } module_init(init_rc_map_avermedia) module_exit(exit_rc_map_avermedia) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
MoKee/android_kernel_zte_msm8994
drivers/media/rc/keymaps/rc-iodata-bctv7e.c
7637
2124
/* iodata-bctv7e.h - Keytable for iodata_bctv7e Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> #include <linux/module.h> /* IO-DATA BCTV7E Remote */ static struct rc_map_table iodata_bctv7e[] = { { 0x40, KEY_TV }, { 0x20, KEY_RADIO }, /* FM */ { 0x60, KEY_EPG }, { 0x00, KEY_POWER }, /* Keys 0 to 9 */ { 0x44, KEY_0 }, /* 10 */ { 0x50, KEY_1 }, { 0x30, KEY_2 }, { 0x70, KEY_3 }, { 0x48, KEY_4 }, { 0x28, KEY_5 }, { 0x68, KEY_6 }, { 0x58, KEY_7 }, { 0x38, KEY_8 }, { 0x78, KEY_9 }, { 0x10, KEY_L }, /* Live */ { 0x08, KEY_TIME }, /* Time Shift */ { 0x18, KEY_PLAYPAUSE }, /* Play */ { 0x24, KEY_ENTER }, /* 11 */ { 0x64, KEY_ESC }, /* 12 */ { 0x04, KEY_M }, /* Multi */ { 0x54, KEY_VIDEO }, { 0x34, KEY_CHANNELUP }, { 0x74, KEY_VOLUMEUP }, { 0x14, KEY_MUTE }, { 0x4c, KEY_VCR }, /* SVIDEO */ { 0x2c, KEY_CHANNELDOWN }, { 0x6c, KEY_VOLUMEDOWN }, { 0x0c, KEY_ZOOM }, { 0x5c, KEY_PAUSE }, { 0x3c, KEY_RED }, /* || (red) */ { 0x7c, KEY_RECORD }, /* recording */ { 0x1c, KEY_STOP }, { 0x41, KEY_REWIND }, /* backward << */ { 0x21, KEY_PLAY }, { 0x61, KEY_FASTFORWARD }, /* forward >> */ { 0x01, KEY_NEXT }, /* skip >| */ }; static struct rc_map_list iodata_bctv7e_map = { .map = { .scan = iodata_bctv7e, .size = ARRAY_SIZE(iodata_bctv7e), .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_IODATA_BCTV7E, } }; static int __init init_rc_map_iodata_bctv7e(void) { return rc_map_register(&iodata_bctv7e_map); } static void __exit exit_rc_map_iodata_bctv7e(void) { rc_map_unregister(&iodata_bctv7e_map); } module_init(init_rc_map_iodata_bctv7e) module_exit(exit_rc_map_iodata_bctv7e) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
GalaxyTab4/android_kernel_samsung_degaswifi
drivers/staging/usbip/userspace/src/utils.c
8405
2205
/* * Copyright (C) 2011 matt mooney <mfm@muteddisk.com> * 2005-2007 Takahiro Hirofuchi * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <sysfs/libsysfs.h> #include <errno.h> #include <stdio.h> #include <string.h> #include "usbip_common.h" #include "utils.h" int modify_match_busid(char *busid, int add) { char bus_type[] = "usb"; char attr_name[] = "match_busid"; char buff[SYSFS_BUS_ID_SIZE + 4]; char sysfs_mntpath[SYSFS_PATH_MAX]; char match_busid_attr_path[SYSFS_PATH_MAX]; struct sysfs_attribute *match_busid_attr; int rc, ret = 0; if (strnlen(busid, SYSFS_BUS_ID_SIZE) > SYSFS_BUS_ID_SIZE - 1) { dbg("busid is too long"); return -1; } rc = sysfs_get_mnt_path(sysfs_mntpath, SYSFS_PATH_MAX); if (rc < 0) { err("sysfs must be mounted: %s", strerror(errno)); return -1; } snprintf(match_busid_attr_path, sizeof(match_busid_attr_path), "%s/%s/%s/%s/%s/%s", sysfs_mntpath, SYSFS_BUS_NAME, bus_type, SYSFS_DRIVERS_NAME, USBIP_HOST_DRV_NAME, attr_name); match_busid_attr = sysfs_open_attribute(match_busid_attr_path); if (!match_busid_attr) { dbg("problem getting match_busid attribute: %s", strerror(errno)); return -1; } if (add) snprintf(buff, SYSFS_BUS_ID_SIZE + 4, "add %s", busid); else snprintf(buff, SYSFS_BUS_ID_SIZE + 4, "del %s", busid); dbg("write \"%s\" to %s", buff, match_busid_attr->path); rc = sysfs_write_attribute(match_busid_attr, buff, sizeof(buff)); if (rc < 0) { dbg("failed to write match_busid: %s", strerror(errno)); ret = -1; } sysfs_close_attribute(match_busid_attr); return ret; }
gpl-2.0
davidmueller13/lt03lte_tw_kernel_5.1.1
drivers/pci/hotplug/cpqphp_ctrl.c
9173
78004
/* * Compaq Hot Plug Controller Driver * * Copyright (C) 1995,2001 Compaq Computer Corporation * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com) * Copyright (C) 2001 IBM Corp. * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to <greg@kroah.com> * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/wait.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> #include <linux/kthread.h> #include "cpqphp.h" static u32 configure_new_device(struct controller* ctrl, struct pci_func *func, u8 behind_bridge, struct resource_lists *resources); static int configure_new_function(struct controller* ctrl, struct pci_func *func, u8 behind_bridge, struct resource_lists *resources); static void interrupt_event_handler(struct controller *ctrl); static struct task_struct *cpqhp_event_thread; static unsigned long pushbutton_pending; /* = 0 */ /* delay is in jiffies to wait for */ static void long_delay(int delay) { /* * XXX(hch): if someone is bored please convert all callers * to call msleep_interruptible directly. They really want * to specify timeouts in natural units and spend a lot of * effort converting them to jiffies.. */ msleep_interruptible(jiffies_to_msecs(delay)); } /* FIXME: The following line needs to be somewhere else... */ #define WRONG_BUS_FREQUENCY 0x07 static u8 handle_switch_change(u8 change, struct controller * ctrl) { int hp_slot; u8 rc = 0; u16 temp_word; struct pci_func *func; struct event_info *taskInfo; if (!change) return 0; /* Switch Change */ dbg("cpqsbd: Switch interrupt received.\n"); for (hp_slot = 0; hp_slot < 6; hp_slot++) { if (change & (0x1L << hp_slot)) { /* * this one changed. */ func = cpqhp_slot_find(ctrl->bus, (hp_slot + ctrl->slot_device_offset), 0); /* this is the structure that tells the worker thread * what to do */ taskInfo = &(ctrl->event_queue[ctrl->next_event]); ctrl->next_event = (ctrl->next_event + 1) % 10; taskInfo->hp_slot = hp_slot; rc++; temp_word = ctrl->ctrl_int_comp >> 16; func->presence_save = (temp_word >> hp_slot) & 0x01; func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02; if (ctrl->ctrl_int_comp & (0x1L << hp_slot)) { /* * Switch opened */ func->switch_save = 0; taskInfo->event_type = INT_SWITCH_OPEN; } else { /* * Switch closed */ func->switch_save = 0x10; taskInfo->event_type = INT_SWITCH_CLOSE; } } } return rc; } /** * cpqhp_find_slot - find the struct slot of given device * @ctrl: scan lots of this controller * @device: the device id to find */ static struct slot *cpqhp_find_slot(struct controller *ctrl, u8 device) { struct slot *slot = ctrl->slot; while (slot && (slot->device != device)) slot = slot->next; return slot; } static u8 handle_presence_change(u16 change, struct controller * ctrl) { int hp_slot; u8 rc = 0; u8 temp_byte; u16 temp_word; struct pci_func *func; struct event_info *taskInfo; struct slot *p_slot; if (!change) return 0; /* * Presence Change */ dbg("cpqsbd: Presence/Notify input change.\n"); dbg(" Changed bits are 0x%4.4x\n", change ); for (hp_slot = 0; hp_slot < 6; hp_slot++) { if (change & (0x0101 << hp_slot)) { /* * this one changed. */ func = cpqhp_slot_find(ctrl->bus, (hp_slot + ctrl->slot_device_offset), 0); taskInfo = &(ctrl->event_queue[ctrl->next_event]); ctrl->next_event = (ctrl->next_event + 1) % 10; taskInfo->hp_slot = hp_slot; rc++; p_slot = cpqhp_find_slot(ctrl, hp_slot + (readb(ctrl->hpc_reg + SLOT_MASK) >> 4)); if (!p_slot) return 0; /* If the switch closed, must be a button * If not in button mode, nevermind */ if (func->switch_save && (ctrl->push_button == 1)) { temp_word = ctrl->ctrl_int_comp >> 16; temp_byte = (temp_word >> hp_slot) & 0x01; temp_byte |= (temp_word >> (hp_slot + 7)) & 0x02; if (temp_byte != func->presence_save) { /* * button Pressed (doesn't do anything) */ dbg("hp_slot %d button pressed\n", hp_slot); taskInfo->event_type = INT_BUTTON_PRESS; } else { /* * button Released - TAKE ACTION!!!! */ dbg("hp_slot %d button released\n", hp_slot); taskInfo->event_type = INT_BUTTON_RELEASE; /* Cancel if we are still blinking */ if ((p_slot->state == BLINKINGON_STATE) || (p_slot->state == BLINKINGOFF_STATE)) { taskInfo->event_type = INT_BUTTON_CANCEL; dbg("hp_slot %d button cancel\n", hp_slot); } else if ((p_slot->state == POWERON_STATE) || (p_slot->state == POWEROFF_STATE)) { /* info(msg_button_ignore, p_slot->number); */ taskInfo->event_type = INT_BUTTON_IGNORE; dbg("hp_slot %d button ignore\n", hp_slot); } } } else { /* Switch is open, assume a presence change * Save the presence state */ temp_word = ctrl->ctrl_int_comp >> 16; func->presence_save = (temp_word >> hp_slot) & 0x01; func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02; if ((!(ctrl->ctrl_int_comp & (0x010000 << hp_slot))) || (!(ctrl->ctrl_int_comp & (0x01000000 << hp_slot)))) { /* Present */ taskInfo->event_type = INT_PRESENCE_ON; } else { /* Not Present */ taskInfo->event_type = INT_PRESENCE_OFF; } } } } return rc; } static u8 handle_power_fault(u8 change, struct controller * ctrl) { int hp_slot; u8 rc = 0; struct pci_func *func; struct event_info *taskInfo; if (!change) return 0; /* * power fault */ info("power fault interrupt\n"); for (hp_slot = 0; hp_slot < 6; hp_slot++) { if (change & (0x01 << hp_slot)) { /* * this one changed. */ func = cpqhp_slot_find(ctrl->bus, (hp_slot + ctrl->slot_device_offset), 0); taskInfo = &(ctrl->event_queue[ctrl->next_event]); ctrl->next_event = (ctrl->next_event + 1) % 10; taskInfo->hp_slot = hp_slot; rc++; if (ctrl->ctrl_int_comp & (0x00000100 << hp_slot)) { /* * power fault Cleared */ func->status = 0x00; taskInfo->event_type = INT_POWER_FAULT_CLEAR; } else { /* * power fault */ taskInfo->event_type = INT_POWER_FAULT; if (ctrl->rev < 4) { amber_LED_on (ctrl, hp_slot); green_LED_off (ctrl, hp_slot); set_SOGO (ctrl); /* this is a fatal condition, we want * to crash the machine to protect from * data corruption. simulated_NMI * shouldn't ever return */ /* FIXME simulated_NMI(hp_slot, ctrl); */ /* The following code causes a software * crash just in case simulated_NMI did * return */ /*FIXME panic(msg_power_fault); */ } else { /* set power fault status for this board */ func->status = 0xFF; info("power fault bit %x set\n", hp_slot); } } } } return rc; } /** * sort_by_size - sort nodes on the list by their length, smallest first. * @head: list to sort */ static int sort_by_size(struct pci_resource **head) { struct pci_resource *current_res; struct pci_resource *next_res; int out_of_order = 1; if (!(*head)) return 1; if (!((*head)->next)) return 0; while (out_of_order) { out_of_order = 0; /* Special case for swapping list head */ if (((*head)->next) && ((*head)->length > (*head)->next->length)) { out_of_order++; current_res = *head; *head = (*head)->next; current_res->next = (*head)->next; (*head)->next = current_res; } current_res = *head; while (current_res->next && current_res->next->next) { if (current_res->next->length > current_res->next->next->length) { out_of_order++; next_res = current_res->next; current_res->next = current_res->next->next; current_res = current_res->next; next_res->next = current_res->next; current_res->next = next_res; } else current_res = current_res->next; } } /* End of out_of_order loop */ return 0; } /** * sort_by_max_size - sort nodes on the list by their length, largest first. * @head: list to sort */ static int sort_by_max_size(struct pci_resource **head) { struct pci_resource *current_res; struct pci_resource *next_res; int out_of_order = 1; if (!(*head)) return 1; if (!((*head)->next)) return 0; while (out_of_order) { out_of_order = 0; /* Special case for swapping list head */ if (((*head)->next) && ((*head)->length < (*head)->next->length)) { out_of_order++; current_res = *head; *head = (*head)->next; current_res->next = (*head)->next; (*head)->next = current_res; } current_res = *head; while (current_res->next && current_res->next->next) { if (current_res->next->length < current_res->next->next->length) { out_of_order++; next_res = current_res->next; current_res->next = current_res->next->next; current_res = current_res->next; next_res->next = current_res->next; current_res->next = next_res; } else current_res = current_res->next; } } /* End of out_of_order loop */ return 0; } /** * do_pre_bridge_resource_split - find node of resources that are unused * @head: new list head * @orig_head: original list head * @alignment: max node size (?) */ static struct pci_resource *do_pre_bridge_resource_split(struct pci_resource **head, struct pci_resource **orig_head, u32 alignment) { struct pci_resource *prevnode = NULL; struct pci_resource *node; struct pci_resource *split_node; u32 rc; u32 temp_dword; dbg("do_pre_bridge_resource_split\n"); if (!(*head) || !(*orig_head)) return NULL; rc = cpqhp_resource_sort_and_combine(head); if (rc) return NULL; if ((*head)->base != (*orig_head)->base) return NULL; if ((*head)->length == (*orig_head)->length) return NULL; /* If we got here, there the bridge requires some of the resource, but * we may be able to split some off of the front */ node = *head; if (node->length & (alignment -1)) { /* this one isn't an aligned length, so we'll make a new entry * and split it up. */ split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); if (!split_node) return NULL; temp_dword = (node->length | (alignment-1)) + 1 - alignment; split_node->base = node->base; split_node->length = temp_dword; node->length -= temp_dword; node->base += split_node->length; /* Put it in the list */ *head = split_node; split_node->next = node; } if (node->length < alignment) return NULL; /* Now unlink it */ if (*head == node) { *head = node->next; } else { prevnode = *head; while (prevnode->next != node) prevnode = prevnode->next; prevnode->next = node->next; } node->next = NULL; return node; } /** * do_bridge_resource_split - find one node of resources that aren't in use * @head: list head * @alignment: max node size (?) */ static struct pci_resource *do_bridge_resource_split(struct pci_resource **head, u32 alignment) { struct pci_resource *prevnode = NULL; struct pci_resource *node; u32 rc; u32 temp_dword; rc = cpqhp_resource_sort_and_combine(head); if (rc) return NULL; node = *head; while (node->next) { prevnode = node; node = node->next; kfree(prevnode); } if (node->length < alignment) goto error; if (node->base & (alignment - 1)) { /* Short circuit if adjusted size is too small */ temp_dword = (node->base | (alignment-1)) + 1; if ((node->length - (temp_dword - node->base)) < alignment) goto error; node->length -= (temp_dword - node->base); node->base = temp_dword; } if (node->length & (alignment - 1)) /* There's stuff in use after this node */ goto error; return node; error: kfree(node); return NULL; } /** * get_io_resource - find first node of given size not in ISA aliasing window. * @head: list to search * @size: size of node to find, must be a power of two. * * Description: This function sorts the resource list by size and then returns * returns the first node of "size" length that is not in the ISA aliasing * window. If it finds a node larger than "size" it will split it up. */ static struct pci_resource *get_io_resource(struct pci_resource **head, u32 size) { struct pci_resource *prevnode; struct pci_resource *node; struct pci_resource *split_node; u32 temp_dword; if (!(*head)) return NULL; if (cpqhp_resource_sort_and_combine(head)) return NULL; if (sort_by_size(head)) return NULL; for (node = *head; node; node = node->next) { if (node->length < size) continue; if (node->base & (size - 1)) { /* this one isn't base aligned properly * so we'll make a new entry and split it up */ temp_dword = (node->base | (size-1)) + 1; /* Short circuit if adjusted size is too small */ if ((node->length - (temp_dword - node->base)) < size) continue; split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); if (!split_node) return NULL; split_node->base = node->base; split_node->length = temp_dword - node->base; node->base = temp_dword; node->length -= split_node->length; /* Put it in the list */ split_node->next = node->next; node->next = split_node; } /* End of non-aligned base */ /* Don't need to check if too small since we already did */ if (node->length > size) { /* this one is longer than we need * so we'll make a new entry and split it up */ split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); if (!split_node) return NULL; split_node->base = node->base + size; split_node->length = node->length - size; node->length = size; /* Put it in the list */ split_node->next = node->next; node->next = split_node; } /* End of too big on top end */ /* For IO make sure it's not in the ISA aliasing space */ if (node->base & 0x300L) continue; /* If we got here, then it is the right size * Now take it out of the list and break */ if (*head == node) { *head = node->next; } else { prevnode = *head; while (prevnode->next != node) prevnode = prevnode->next; prevnode->next = node->next; } node->next = NULL; break; } return node; } /** * get_max_resource - get largest node which has at least the given size. * @head: the list to search the node in * @size: the minimum size of the node to find * * Description: Gets the largest node that is at least "size" big from the * list pointed to by head. It aligns the node on top and bottom * to "size" alignment before returning it. */ static struct pci_resource *get_max_resource(struct pci_resource **head, u32 size) { struct pci_resource *max; struct pci_resource *temp; struct pci_resource *split_node; u32 temp_dword; if (cpqhp_resource_sort_and_combine(head)) return NULL; if (sort_by_max_size(head)) return NULL; for (max = *head; max; max = max->next) { /* If not big enough we could probably just bail, * instead we'll continue to the next. */ if (max->length < size) continue; if (max->base & (size - 1)) { /* this one isn't base aligned properly * so we'll make a new entry and split it up */ temp_dword = (max->base | (size-1)) + 1; /* Short circuit if adjusted size is too small */ if ((max->length - (temp_dword - max->base)) < size) continue; split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); if (!split_node) return NULL; split_node->base = max->base; split_node->length = temp_dword - max->base; max->base = temp_dword; max->length -= split_node->length; split_node->next = max->next; max->next = split_node; } if ((max->base + max->length) & (size - 1)) { /* this one isn't end aligned properly at the top * so we'll make a new entry and split it up */ split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); if (!split_node) return NULL; temp_dword = ((max->base + max->length) & ~(size - 1)); split_node->base = temp_dword; split_node->length = max->length + max->base - split_node->base; max->length -= split_node->length; split_node->next = max->next; max->next = split_node; } /* Make sure it didn't shrink too much when we aligned it */ if (max->length < size) continue; /* Now take it out of the list */ temp = *head; if (temp == max) { *head = max->next; } else { while (temp && temp->next != max) { temp = temp->next; } temp->next = max->next; } max->next = NULL; break; } return max; } /** * get_resource - find resource of given size and split up larger ones. * @head: the list to search for resources * @size: the size limit to use * * Description: This function sorts the resource list by size and then * returns the first node of "size" length. If it finds a node * larger than "size" it will split it up. * * size must be a power of two. */ static struct pci_resource *get_resource(struct pci_resource **head, u32 size) { struct pci_resource *prevnode; struct pci_resource *node; struct pci_resource *split_node; u32 temp_dword; if (cpqhp_resource_sort_and_combine(head)) return NULL; if (sort_by_size(head)) return NULL; for (node = *head; node; node = node->next) { dbg("%s: req_size =%x node=%p, base=%x, length=%x\n", __func__, size, node, node->base, node->length); if (node->length < size) continue; if (node->base & (size - 1)) { dbg("%s: not aligned\n", __func__); /* this one isn't base aligned properly * so we'll make a new entry and split it up */ temp_dword = (node->base | (size-1)) + 1; /* Short circuit if adjusted size is too small */ if ((node->length - (temp_dword - node->base)) < size) continue; split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); if (!split_node) return NULL; split_node->base = node->base; split_node->length = temp_dword - node->base; node->base = temp_dword; node->length -= split_node->length; split_node->next = node->next; node->next = split_node; } /* End of non-aligned base */ /* Don't need to check if too small since we already did */ if (node->length > size) { dbg("%s: too big\n", __func__); /* this one is longer than we need * so we'll make a new entry and split it up */ split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); if (!split_node) return NULL; split_node->base = node->base + size; split_node->length = node->length - size; node->length = size; /* Put it in the list */ split_node->next = node->next; node->next = split_node; } /* End of too big on top end */ dbg("%s: got one!!!\n", __func__); /* If we got here, then it is the right size * Now take it out of the list */ if (*head == node) { *head = node->next; } else { prevnode = *head; while (prevnode->next != node) prevnode = prevnode->next; prevnode->next = node->next; } node->next = NULL; break; } return node; } /** * cpqhp_resource_sort_and_combine - sort nodes by base addresses and clean up * @head: the list to sort and clean up * * Description: Sorts all of the nodes in the list in ascending order by * their base addresses. Also does garbage collection by * combining adjacent nodes. * * Returns %0 if success. */ int cpqhp_resource_sort_and_combine(struct pci_resource **head) { struct pci_resource *node1; struct pci_resource *node2; int out_of_order = 1; dbg("%s: head = %p, *head = %p\n", __func__, head, *head); if (!(*head)) return 1; dbg("*head->next = %p\n",(*head)->next); if (!(*head)->next) return 0; /* only one item on the list, already sorted! */ dbg("*head->base = 0x%x\n",(*head)->base); dbg("*head->next->base = 0x%x\n",(*head)->next->base); while (out_of_order) { out_of_order = 0; /* Special case for swapping list head */ if (((*head)->next) && ((*head)->base > (*head)->next->base)) { node1 = *head; (*head) = (*head)->next; node1->next = (*head)->next; (*head)->next = node1; out_of_order++; } node1 = (*head); while (node1->next && node1->next->next) { if (node1->next->base > node1->next->next->base) { out_of_order++; node2 = node1->next; node1->next = node1->next->next; node1 = node1->next; node2->next = node1->next; node1->next = node2; } else node1 = node1->next; } } /* End of out_of_order loop */ node1 = *head; while (node1 && node1->next) { if ((node1->base + node1->length) == node1->next->base) { /* Combine */ dbg("8..\n"); node1->length += node1->next->length; node2 = node1->next; node1->next = node1->next->next; kfree(node2); } else node1 = node1->next; } return 0; } irqreturn_t cpqhp_ctrl_intr(int IRQ, void *data) { struct controller *ctrl = data; u8 schedule_flag = 0; u8 reset; u16 misc; u32 Diff; u32 temp_dword; misc = readw(ctrl->hpc_reg + MISC); /* * Check to see if it was our interrupt */ if (!(misc & 0x000C)) { return IRQ_NONE; } if (misc & 0x0004) { /* * Serial Output interrupt Pending */ /* Clear the interrupt */ misc |= 0x0004; writew(misc, ctrl->hpc_reg + MISC); /* Read to clear posted writes */ misc = readw(ctrl->hpc_reg + MISC); dbg ("%s - waking up\n", __func__); wake_up_interruptible(&ctrl->queue); } if (misc & 0x0008) { /* General-interrupt-input interrupt Pending */ Diff = readl(ctrl->hpc_reg + INT_INPUT_CLEAR) ^ ctrl->ctrl_int_comp; ctrl->ctrl_int_comp = readl(ctrl->hpc_reg + INT_INPUT_CLEAR); /* Clear the interrupt */ writel(Diff, ctrl->hpc_reg + INT_INPUT_CLEAR); /* Read it back to clear any posted writes */ temp_dword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR); if (!Diff) /* Clear all interrupts */ writel(0xFFFFFFFF, ctrl->hpc_reg + INT_INPUT_CLEAR); schedule_flag += handle_switch_change((u8)(Diff & 0xFFL), ctrl); schedule_flag += handle_presence_change((u16)((Diff & 0xFFFF0000L) >> 16), ctrl); schedule_flag += handle_power_fault((u8)((Diff & 0xFF00L) >> 8), ctrl); } reset = readb(ctrl->hpc_reg + RESET_FREQ_MODE); if (reset & 0x40) { /* Bus reset has completed */ reset &= 0xCF; writeb(reset, ctrl->hpc_reg + RESET_FREQ_MODE); reset = readb(ctrl->hpc_reg + RESET_FREQ_MODE); wake_up_interruptible(&ctrl->queue); } if (schedule_flag) { wake_up_process(cpqhp_event_thread); dbg("Waking even thread"); } return IRQ_HANDLED; } /** * cpqhp_slot_create - Creates a node and adds it to the proper bus. * @busnumber: bus where new node is to be located * * Returns pointer to the new node or %NULL if unsuccessful. */ struct pci_func *cpqhp_slot_create(u8 busnumber) { struct pci_func *new_slot; struct pci_func *next; new_slot = kzalloc(sizeof(*new_slot), GFP_KERNEL); if (new_slot == NULL) return new_slot; new_slot->next = NULL; new_slot->configured = 1; if (cpqhp_slot_list[busnumber] == NULL) { cpqhp_slot_list[busnumber] = new_slot; } else { next = cpqhp_slot_list[busnumber]; while (next->next != NULL) next = next->next; next->next = new_slot; } return new_slot; } /** * slot_remove - Removes a node from the linked list of slots. * @old_slot: slot to remove * * Returns %0 if successful, !0 otherwise. */ static int slot_remove(struct pci_func * old_slot) { struct pci_func *next; if (old_slot == NULL) return 1; next = cpqhp_slot_list[old_slot->bus]; if (next == NULL) return 1; if (next == old_slot) { cpqhp_slot_list[old_slot->bus] = old_slot->next; cpqhp_destroy_board_resources(old_slot); kfree(old_slot); return 0; } while ((next->next != old_slot) && (next->next != NULL)) next = next->next; if (next->next == old_slot) { next->next = old_slot->next; cpqhp_destroy_board_resources(old_slot); kfree(old_slot); return 0; } else return 2; } /** * bridge_slot_remove - Removes a node from the linked list of slots. * @bridge: bridge to remove * * Returns %0 if successful, !0 otherwise. */ static int bridge_slot_remove(struct pci_func *bridge) { u8 subordinateBus, secondaryBus; u8 tempBus; struct pci_func *next; secondaryBus = (bridge->config_space[0x06] >> 8) & 0xFF; subordinateBus = (bridge->config_space[0x06] >> 16) & 0xFF; for (tempBus = secondaryBus; tempBus <= subordinateBus; tempBus++) { next = cpqhp_slot_list[tempBus]; while (!slot_remove(next)) next = cpqhp_slot_list[tempBus]; } next = cpqhp_slot_list[bridge->bus]; if (next == NULL) return 1; if (next == bridge) { cpqhp_slot_list[bridge->bus] = bridge->next; goto out; } while ((next->next != bridge) && (next->next != NULL)) next = next->next; if (next->next != bridge) return 2; next->next = bridge->next; out: kfree(bridge); return 0; } /** * cpqhp_slot_find - Looks for a node by bus, and device, multiple functions accessed * @bus: bus to find * @device: device to find * @index: is %0 for first function found, %1 for the second... * * Returns pointer to the node if successful, %NULL otherwise. */ struct pci_func *cpqhp_slot_find(u8 bus, u8 device, u8 index) { int found = -1; struct pci_func *func; func = cpqhp_slot_list[bus]; if ((func == NULL) || ((func->device == device) && (index == 0))) return func; if (func->device == device) found++; while (func->next != NULL) { func = func->next; if (func->device == device) found++; if (found == index) return func; } return NULL; } /* DJZ: I don't think is_bridge will work as is. * FIXME */ static int is_bridge(struct pci_func * func) { /* Check the header type */ if (((func->config_space[0x03] >> 16) & 0xFF) == 0x01) return 1; else return 0; } /** * set_controller_speed - set the frequency and/or mode of a specific controller segment. * @ctrl: controller to change frequency/mode for. * @adapter_speed: the speed of the adapter we want to match. * @hp_slot: the slot number where the adapter is installed. * * Returns %0 if we successfully change frequency and/or mode to match the * adapter speed. */ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_slot) { struct slot *slot; struct pci_bus *bus = ctrl->pci_bus; u8 reg; u8 slot_power = readb(ctrl->hpc_reg + SLOT_POWER); u16 reg16; u32 leds = readl(ctrl->hpc_reg + LED_CONTROL); if (bus->cur_bus_speed == adapter_speed) return 0; /* We don't allow freq/mode changes if we find another adapter running * in another slot on this controller */ for(slot = ctrl->slot; slot; slot = slot->next) { if (slot->device == (hp_slot + ctrl->slot_device_offset)) continue; if (!slot->hotplug_slot || !slot->hotplug_slot->info) continue; if (slot->hotplug_slot->info->adapter_status == 0) continue; /* If another adapter is running on the same segment but at a * lower speed/mode, we allow the new adapter to function at * this rate if supported */ if (bus->cur_bus_speed < adapter_speed) return 0; return 1; } /* If the controller doesn't support freq/mode changes and the * controller is running at a higher mode, we bail */ if ((bus->cur_bus_speed > adapter_speed) && (!ctrl->pcix_speed_capability)) return 1; /* But we allow the adapter to run at a lower rate if possible */ if ((bus->cur_bus_speed < adapter_speed) && (!ctrl->pcix_speed_capability)) return 0; /* We try to set the max speed supported by both the adapter and * controller */ if (bus->max_bus_speed < adapter_speed) { if (bus->cur_bus_speed == bus->max_bus_speed) return 0; adapter_speed = bus->max_bus_speed; } writel(0x0L, ctrl->hpc_reg + LED_CONTROL); writeb(0x00, ctrl->hpc_reg + SLOT_ENABLE); set_SOGO(ctrl); wait_for_ctrl_irq(ctrl); if (adapter_speed != PCI_SPEED_133MHz_PCIX) reg = 0xF5; else reg = 0xF4; pci_write_config_byte(ctrl->pci_dev, 0x41, reg); reg16 = readw(ctrl->hpc_reg + NEXT_CURR_FREQ); reg16 &= ~0x000F; switch(adapter_speed) { case(PCI_SPEED_133MHz_PCIX): reg = 0x75; reg16 |= 0xB; break; case(PCI_SPEED_100MHz_PCIX): reg = 0x74; reg16 |= 0xA; break; case(PCI_SPEED_66MHz_PCIX): reg = 0x73; reg16 |= 0x9; break; case(PCI_SPEED_66MHz): reg = 0x73; reg16 |= 0x1; break; default: /* 33MHz PCI 2.2 */ reg = 0x71; break; } reg16 |= 0xB << 12; writew(reg16, ctrl->hpc_reg + NEXT_CURR_FREQ); mdelay(5); /* Reenable interrupts */ writel(0, ctrl->hpc_reg + INT_MASK); pci_write_config_byte(ctrl->pci_dev, 0x41, reg); /* Restart state machine */ reg = ~0xF; pci_read_config_byte(ctrl->pci_dev, 0x43, &reg); pci_write_config_byte(ctrl->pci_dev, 0x43, reg); /* Only if mode change...*/ if (((bus->cur_bus_speed == PCI_SPEED_66MHz) && (adapter_speed == PCI_SPEED_66MHz_PCIX)) || ((bus->cur_bus_speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz))) set_SOGO(ctrl); wait_for_ctrl_irq(ctrl); mdelay(1100); /* Restore LED/Slot state */ writel(leds, ctrl->hpc_reg + LED_CONTROL); writeb(slot_power, ctrl->hpc_reg + SLOT_ENABLE); set_SOGO(ctrl); wait_for_ctrl_irq(ctrl); bus->cur_bus_speed = adapter_speed; slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); info("Successfully changed frequency/mode for adapter in slot %d\n", slot->number); return 0; } /* the following routines constitute the bulk of the * hotplug controller logic */ /** * board_replaced - Called after a board has been replaced in the system. * @func: PCI device/function information * @ctrl: hotplug controller * * This is only used if we don't have resources for hot add. * Turns power on for the board. * Checks to see if board is the same. * If board is same, reconfigures it. * If board isn't same, turns it back off. */ static u32 board_replaced(struct pci_func *func, struct controller *ctrl) { struct pci_bus *bus = ctrl->pci_bus; u8 hp_slot; u8 temp_byte; u8 adapter_speed; u32 rc = 0; hp_slot = func->device - ctrl->slot_device_offset; /* * The switch is open. */ if (readl(ctrl->hpc_reg + INT_INPUT_CLEAR) & (0x01L << hp_slot)) rc = INTERLOCK_OPEN; /* * The board is already on */ else if (is_slot_enabled (ctrl, hp_slot)) rc = CARD_FUNCTIONING; else { mutex_lock(&ctrl->crit_sect); /* turn on board without attaching to the bus */ enable_slot_power (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); /* Change bits in slot power register to force another shift out * NOTE: this is to work around the timer bug */ temp_byte = readb(ctrl->hpc_reg + SLOT_POWER); writeb(0x00, ctrl->hpc_reg + SLOT_POWER); writeb(temp_byte, ctrl->hpc_reg + SLOT_POWER); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); adapter_speed = get_adapter_speed(ctrl, hp_slot); if (bus->cur_bus_speed != adapter_speed) if (set_controller_speed(ctrl, adapter_speed, hp_slot)) rc = WRONG_BUS_FREQUENCY; /* turn off board without attaching to the bus */ disable_slot_power (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); mutex_unlock(&ctrl->crit_sect); if (rc) return rc; mutex_lock(&ctrl->crit_sect); slot_enable (ctrl, hp_slot); green_LED_blink (ctrl, hp_slot); amber_LED_off (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); mutex_unlock(&ctrl->crit_sect); /* Wait for ~1 second because of hot plug spec */ long_delay(1*HZ); /* Check for a power fault */ if (func->status == 0xFF) { /* power fault occurred, but it was benign */ rc = POWER_FAILURE; func->status = 0; } else rc = cpqhp_valid_replace(ctrl, func); if (!rc) { /* It must be the same board */ rc = cpqhp_configure_board(ctrl, func); /* If configuration fails, turn it off * Get slot won't work for devices behind * bridges, but in this case it will always be * called for the "base" bus/dev/func of an * adapter. */ mutex_lock(&ctrl->crit_sect); amber_LED_on (ctrl, hp_slot); green_LED_off (ctrl, hp_slot); slot_disable (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); mutex_unlock(&ctrl->crit_sect); if (rc) return rc; else return 1; } else { /* Something is wrong * Get slot won't work for devices behind bridges, but * in this case it will always be called for the "base" * bus/dev/func of an adapter. */ mutex_lock(&ctrl->crit_sect); amber_LED_on (ctrl, hp_slot); green_LED_off (ctrl, hp_slot); slot_disable (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); mutex_unlock(&ctrl->crit_sect); } } return rc; } /** * board_added - Called after a board has been added to the system. * @func: PCI device/function info * @ctrl: hotplug controller * * Turns power on for the board. * Configures board. */ static u32 board_added(struct pci_func *func, struct controller *ctrl) { u8 hp_slot; u8 temp_byte; u8 adapter_speed; int index; u32 temp_register = 0xFFFFFFFF; u32 rc = 0; struct pci_func *new_slot = NULL; struct pci_bus *bus = ctrl->pci_bus; struct slot *p_slot; struct resource_lists res_lists; hp_slot = func->device - ctrl->slot_device_offset; dbg("%s: func->device, slot_offset, hp_slot = %d, %d ,%d\n", __func__, func->device, ctrl->slot_device_offset, hp_slot); mutex_lock(&ctrl->crit_sect); /* turn on board without attaching to the bus */ enable_slot_power(ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); /* Change bits in slot power register to force another shift out * NOTE: this is to work around the timer bug */ temp_byte = readb(ctrl->hpc_reg + SLOT_POWER); writeb(0x00, ctrl->hpc_reg + SLOT_POWER); writeb(temp_byte, ctrl->hpc_reg + SLOT_POWER); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); adapter_speed = get_adapter_speed(ctrl, hp_slot); if (bus->cur_bus_speed != adapter_speed) if (set_controller_speed(ctrl, adapter_speed, hp_slot)) rc = WRONG_BUS_FREQUENCY; /* turn off board without attaching to the bus */ disable_slot_power (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq(ctrl); mutex_unlock(&ctrl->crit_sect); if (rc) return rc; p_slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); /* turn on board and blink green LED */ dbg("%s: before down\n", __func__); mutex_lock(&ctrl->crit_sect); dbg("%s: after down\n", __func__); dbg("%s: before slot_enable\n", __func__); slot_enable (ctrl, hp_slot); dbg("%s: before green_LED_blink\n", __func__); green_LED_blink (ctrl, hp_slot); dbg("%s: before amber_LED_blink\n", __func__); amber_LED_off (ctrl, hp_slot); dbg("%s: before set_SOGO\n", __func__); set_SOGO(ctrl); /* Wait for SOBS to be unset */ dbg("%s: before wait_for_ctrl_irq\n", __func__); wait_for_ctrl_irq (ctrl); dbg("%s: after wait_for_ctrl_irq\n", __func__); dbg("%s: before up\n", __func__); mutex_unlock(&ctrl->crit_sect); dbg("%s: after up\n", __func__); /* Wait for ~1 second because of hot plug spec */ dbg("%s: before long_delay\n", __func__); long_delay(1*HZ); dbg("%s: after long_delay\n", __func__); dbg("%s: func status = %x\n", __func__, func->status); /* Check for a power fault */ if (func->status == 0xFF) { /* power fault occurred, but it was benign */ temp_register = 0xFFFFFFFF; dbg("%s: temp register set to %x by power fault\n", __func__, temp_register); rc = POWER_FAILURE; func->status = 0; } else { /* Get vendor/device ID u32 */ ctrl->pci_bus->number = func->bus; rc = pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(func->device, func->function), PCI_VENDOR_ID, &temp_register); dbg("%s: pci_read_config_dword returns %d\n", __func__, rc); dbg("%s: temp_register is %x\n", __func__, temp_register); if (rc != 0) { /* Something's wrong here */ temp_register = 0xFFFFFFFF; dbg("%s: temp register set to %x by error\n", __func__, temp_register); } /* Preset return code. It will be changed later if things go okay. */ rc = NO_ADAPTER_PRESENT; } /* All F's is an empty slot or an invalid board */ if (temp_register != 0xFFFFFFFF) { res_lists.io_head = ctrl->io_head; res_lists.mem_head = ctrl->mem_head; res_lists.p_mem_head = ctrl->p_mem_head; res_lists.bus_head = ctrl->bus_head; res_lists.irqs = NULL; rc = configure_new_device(ctrl, func, 0, &res_lists); dbg("%s: back from configure_new_device\n", __func__); ctrl->io_head = res_lists.io_head; ctrl->mem_head = res_lists.mem_head; ctrl->p_mem_head = res_lists.p_mem_head; ctrl->bus_head = res_lists.bus_head; cpqhp_resource_sort_and_combine(&(ctrl->mem_head)); cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head)); cpqhp_resource_sort_and_combine(&(ctrl->io_head)); cpqhp_resource_sort_and_combine(&(ctrl->bus_head)); if (rc) { mutex_lock(&ctrl->crit_sect); amber_LED_on (ctrl, hp_slot); green_LED_off (ctrl, hp_slot); slot_disable (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); mutex_unlock(&ctrl->crit_sect); return rc; } else { cpqhp_save_slot_config(ctrl, func); } func->status = 0; func->switch_save = 0x10; func->is_a_board = 0x01; /* next, we will instantiate the linux pci_dev structures (with * appropriate driver notification, if already present) */ dbg("%s: configure linux pci_dev structure\n", __func__); index = 0; do { new_slot = cpqhp_slot_find(ctrl->bus, func->device, index++); if (new_slot && !new_slot->pci_dev) cpqhp_configure_device(ctrl, new_slot); } while (new_slot); mutex_lock(&ctrl->crit_sect); green_LED_on (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); mutex_unlock(&ctrl->crit_sect); } else { mutex_lock(&ctrl->crit_sect); amber_LED_on (ctrl, hp_slot); green_LED_off (ctrl, hp_slot); slot_disable (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); mutex_unlock(&ctrl->crit_sect); return rc; } return 0; } /** * remove_board - Turns off slot and LEDs * @func: PCI device/function info * @replace_flag: whether replacing or adding a new device * @ctrl: target controller */ static u32 remove_board(struct pci_func * func, u32 replace_flag, struct controller * ctrl) { int index; u8 skip = 0; u8 device; u8 hp_slot; u8 temp_byte; u32 rc; struct resource_lists res_lists; struct pci_func *temp_func; if (cpqhp_unconfigure_device(func)) return 1; device = func->device; hp_slot = func->device - ctrl->slot_device_offset; dbg("In %s, hp_slot = %d\n", __func__, hp_slot); /* When we get here, it is safe to change base address registers. * We will attempt to save the base address register lengths */ if (replace_flag || !ctrl->add_support) rc = cpqhp_save_base_addr_length(ctrl, func); else if (!func->bus_head && !func->mem_head && !func->p_mem_head && !func->io_head) { /* Here we check to see if we've saved any of the board's * resources already. If so, we'll skip the attempt to * determine what's being used. */ index = 0; temp_func = cpqhp_slot_find(func->bus, func->device, index++); while (temp_func) { if (temp_func->bus_head || temp_func->mem_head || temp_func->p_mem_head || temp_func->io_head) { skip = 1; break; } temp_func = cpqhp_slot_find(temp_func->bus, temp_func->device, index++); } if (!skip) rc = cpqhp_save_used_resources(ctrl, func); } /* Change status to shutdown */ if (func->is_a_board) func->status = 0x01; func->configured = 0; mutex_lock(&ctrl->crit_sect); green_LED_off (ctrl, hp_slot); slot_disable (ctrl, hp_slot); set_SOGO(ctrl); /* turn off SERR for slot */ temp_byte = readb(ctrl->hpc_reg + SLOT_SERR); temp_byte &= ~(0x01 << hp_slot); writeb(temp_byte, ctrl->hpc_reg + SLOT_SERR); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); mutex_unlock(&ctrl->crit_sect); if (!replace_flag && ctrl->add_support) { while (func) { res_lists.io_head = ctrl->io_head; res_lists.mem_head = ctrl->mem_head; res_lists.p_mem_head = ctrl->p_mem_head; res_lists.bus_head = ctrl->bus_head; cpqhp_return_board_resources(func, &res_lists); ctrl->io_head = res_lists.io_head; ctrl->mem_head = res_lists.mem_head; ctrl->p_mem_head = res_lists.p_mem_head; ctrl->bus_head = res_lists.bus_head; cpqhp_resource_sort_and_combine(&(ctrl->mem_head)); cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head)); cpqhp_resource_sort_and_combine(&(ctrl->io_head)); cpqhp_resource_sort_and_combine(&(ctrl->bus_head)); if (is_bridge(func)) { bridge_slot_remove(func); } else slot_remove(func); func = cpqhp_slot_find(ctrl->bus, device, 0); } /* Setup slot structure with entry for empty slot */ func = cpqhp_slot_create(ctrl->bus); if (func == NULL) return 1; func->bus = ctrl->bus; func->device = device; func->function = 0; func->configured = 0; func->switch_save = 0x10; func->is_a_board = 0; func->p_task_event = NULL; } return 0; } static void pushbutton_helper_thread(unsigned long data) { pushbutton_pending = data; wake_up_process(cpqhp_event_thread); } /* this is the main worker thread */ static int event_thread(void* data) { struct controller *ctrl; while (1) { dbg("!!!!event_thread sleeping\n"); set_current_state(TASK_INTERRUPTIBLE); schedule(); if (kthread_should_stop()) break; /* Do stuff here */ if (pushbutton_pending) cpqhp_pushbutton_thread(pushbutton_pending); else for (ctrl = cpqhp_ctrl_list; ctrl; ctrl=ctrl->next) interrupt_event_handler(ctrl); } dbg("event_thread signals exit\n"); return 0; } int cpqhp_event_start_thread(void) { cpqhp_event_thread = kthread_run(event_thread, NULL, "phpd_event"); if (IS_ERR(cpqhp_event_thread)) { err ("Can't start up our event thread\n"); return PTR_ERR(cpqhp_event_thread); } return 0; } void cpqhp_event_stop_thread(void) { kthread_stop(cpqhp_event_thread); } static int update_slot_info(struct controller *ctrl, struct slot *slot) { struct hotplug_slot_info *info; int result; info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; info->power_status = get_slot_enabled(ctrl, slot); info->attention_status = cpq_get_attention_status(ctrl, slot); info->latch_status = cpq_get_latch_status(ctrl, slot); info->adapter_status = get_presence_status(ctrl, slot); result = pci_hp_change_slot_info(slot->hotplug_slot, info); kfree (info); return result; } static void interrupt_event_handler(struct controller *ctrl) { int loop = 0; int change = 1; struct pci_func *func; u8 hp_slot; struct slot *p_slot; while (change) { change = 0; for (loop = 0; loop < 10; loop++) { /* dbg("loop %d\n", loop); */ if (ctrl->event_queue[loop].event_type != 0) { hp_slot = ctrl->event_queue[loop].hp_slot; func = cpqhp_slot_find(ctrl->bus, (hp_slot + ctrl->slot_device_offset), 0); if (!func) return; p_slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); if (!p_slot) return; dbg("hp_slot %d, func %p, p_slot %p\n", hp_slot, func, p_slot); if (ctrl->event_queue[loop].event_type == INT_BUTTON_PRESS) { dbg("button pressed\n"); } else if (ctrl->event_queue[loop].event_type == INT_BUTTON_CANCEL) { dbg("button cancel\n"); del_timer(&p_slot->task_event); mutex_lock(&ctrl->crit_sect); if (p_slot->state == BLINKINGOFF_STATE) { /* slot is on */ dbg("turn on green LED\n"); green_LED_on (ctrl, hp_slot); } else if (p_slot->state == BLINKINGON_STATE) { /* slot is off */ dbg("turn off green LED\n"); green_LED_off (ctrl, hp_slot); } info(msg_button_cancel, p_slot->number); p_slot->state = STATIC_STATE; amber_LED_off (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); mutex_unlock(&ctrl->crit_sect); } /*** button Released (No action on press...) */ else if (ctrl->event_queue[loop].event_type == INT_BUTTON_RELEASE) { dbg("button release\n"); if (is_slot_enabled (ctrl, hp_slot)) { dbg("slot is on\n"); p_slot->state = BLINKINGOFF_STATE; info(msg_button_off, p_slot->number); } else { dbg("slot is off\n"); p_slot->state = BLINKINGON_STATE; info(msg_button_on, p_slot->number); } mutex_lock(&ctrl->crit_sect); dbg("blink green LED and turn off amber\n"); amber_LED_off (ctrl, hp_slot); green_LED_blink (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); mutex_unlock(&ctrl->crit_sect); init_timer(&p_slot->task_event); p_slot->hp_slot = hp_slot; p_slot->ctrl = ctrl; /* p_slot->physical_slot = physical_slot; */ p_slot->task_event.expires = jiffies + 5 * HZ; /* 5 second delay */ p_slot->task_event.function = pushbutton_helper_thread; p_slot->task_event.data = (u32) p_slot; dbg("add_timer p_slot = %p\n", p_slot); add_timer(&p_slot->task_event); } /***********POWER FAULT */ else if (ctrl->event_queue[loop].event_type == INT_POWER_FAULT) { dbg("power fault\n"); } else { /* refresh notification */ if (p_slot) update_slot_info(ctrl, p_slot); } ctrl->event_queue[loop].event_type = 0; change = 1; } } /* End of FOR loop */ } return; } /** * cpqhp_pushbutton_thread - handle pushbutton events * @slot: target slot (struct) * * Scheduled procedure to handle blocking stuff for the pushbuttons. * Handles all pending events and exits. */ void cpqhp_pushbutton_thread(unsigned long slot) { u8 hp_slot; u8 device; struct pci_func *func; struct slot *p_slot = (struct slot *) slot; struct controller *ctrl = (struct controller *) p_slot->ctrl; pushbutton_pending = 0; hp_slot = p_slot->hp_slot; device = p_slot->device; if (is_slot_enabled(ctrl, hp_slot)) { p_slot->state = POWEROFF_STATE; /* power Down board */ func = cpqhp_slot_find(p_slot->bus, p_slot->device, 0); dbg("In power_down_board, func = %p, ctrl = %p\n", func, ctrl); if (!func) { dbg("Error! func NULL in %s\n", __func__); return ; } if (cpqhp_process_SS(ctrl, func) != 0) { amber_LED_on(ctrl, hp_slot); green_LED_on(ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq(ctrl); } p_slot->state = STATIC_STATE; } else { p_slot->state = POWERON_STATE; /* slot is off */ func = cpqhp_slot_find(p_slot->bus, p_slot->device, 0); dbg("In add_board, func = %p, ctrl = %p\n", func, ctrl); if (!func) { dbg("Error! func NULL in %s\n", __func__); return ; } if (ctrl != NULL) { if (cpqhp_process_SI(ctrl, func) != 0) { amber_LED_on(ctrl, hp_slot); green_LED_off(ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); } } p_slot->state = STATIC_STATE; } return; } int cpqhp_process_SI(struct controller *ctrl, struct pci_func *func) { u8 device, hp_slot; u16 temp_word; u32 tempdword; int rc; struct slot* p_slot; int physical_slot = 0; tempdword = 0; device = func->device; hp_slot = device - ctrl->slot_device_offset; p_slot = cpqhp_find_slot(ctrl, device); if (p_slot) physical_slot = p_slot->number; /* Check to see if the interlock is closed */ tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR); if (tempdword & (0x01 << hp_slot)) { return 1; } if (func->is_a_board) { rc = board_replaced(func, ctrl); } else { /* add board */ slot_remove(func); func = cpqhp_slot_create(ctrl->bus); if (func == NULL) return 1; func->bus = ctrl->bus; func->device = device; func->function = 0; func->configured = 0; func->is_a_board = 1; /* We have to save the presence info for these slots */ temp_word = ctrl->ctrl_int_comp >> 16; func->presence_save = (temp_word >> hp_slot) & 0x01; func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02; if (ctrl->ctrl_int_comp & (0x1L << hp_slot)) { func->switch_save = 0; } else { func->switch_save = 0x10; } rc = board_added(func, ctrl); if (rc) { if (is_bridge(func)) { bridge_slot_remove(func); } else slot_remove(func); /* Setup slot structure with entry for empty slot */ func = cpqhp_slot_create(ctrl->bus); if (func == NULL) return 1; func->bus = ctrl->bus; func->device = device; func->function = 0; func->configured = 0; func->is_a_board = 0; /* We have to save the presence info for these slots */ temp_word = ctrl->ctrl_int_comp >> 16; func->presence_save = (temp_word >> hp_slot) & 0x01; func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02; if (ctrl->ctrl_int_comp & (0x1L << hp_slot)) { func->switch_save = 0; } else { func->switch_save = 0x10; } } } if (rc) { dbg("%s: rc = %d\n", __func__, rc); } if (p_slot) update_slot_info(ctrl, p_slot); return rc; } int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func) { u8 device, class_code, header_type, BCR; u8 index = 0; u8 replace_flag; u32 rc = 0; unsigned int devfn; struct slot* p_slot; struct pci_bus *pci_bus = ctrl->pci_bus; int physical_slot=0; device = func->device; func = cpqhp_slot_find(ctrl->bus, device, index++); p_slot = cpqhp_find_slot(ctrl, device); if (p_slot) { physical_slot = p_slot->number; } /* Make sure there are no video controllers here */ while (func && !rc) { pci_bus->number = func->bus; devfn = PCI_DEVFN(func->device, func->function); /* Check the Class Code */ rc = pci_bus_read_config_byte (pci_bus, devfn, 0x0B, &class_code); if (rc) return rc; if (class_code == PCI_BASE_CLASS_DISPLAY) { /* Display/Video adapter (not supported) */ rc = REMOVE_NOT_SUPPORTED; } else { /* See if it's a bridge */ rc = pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type); if (rc) return rc; /* If it's a bridge, check the VGA Enable bit */ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { rc = pci_bus_read_config_byte (pci_bus, devfn, PCI_BRIDGE_CONTROL, &BCR); if (rc) return rc; /* If the VGA Enable bit is set, remove isn't * supported */ if (BCR & PCI_BRIDGE_CTL_VGA) rc = REMOVE_NOT_SUPPORTED; } } func = cpqhp_slot_find(ctrl->bus, device, index++); } func = cpqhp_slot_find(ctrl->bus, device, 0); if ((func != NULL) && !rc) { /* FIXME: Replace flag should be passed into process_SS */ replace_flag = !(ctrl->add_support); rc = remove_board(func, replace_flag, ctrl); } else if (!rc) { rc = 1; } if (p_slot) update_slot_info(ctrl, p_slot); return rc; } /** * switch_leds - switch the leds, go from one site to the other. * @ctrl: controller to use * @num_of_slots: number of slots to use * @work_LED: LED control value * @direction: 1 to start from the left side, 0 to start right. */ static void switch_leds(struct controller *ctrl, const int num_of_slots, u32 *work_LED, const int direction) { int loop; for (loop = 0; loop < num_of_slots; loop++) { if (direction) *work_LED = *work_LED >> 1; else *work_LED = *work_LED << 1; writel(*work_LED, ctrl->hpc_reg + LED_CONTROL); set_SOGO(ctrl); /* Wait for SOGO interrupt */ wait_for_ctrl_irq(ctrl); /* Get ready for next iteration */ long_delay((2*HZ)/10); } } /** * cpqhp_hardware_test - runs hardware tests * @ctrl: target controller * @test_num: the number written to the "test" file in sysfs. * * For hot plug ctrl folks to play with. */ int cpqhp_hardware_test(struct controller *ctrl, int test_num) { u32 save_LED; u32 work_LED; int loop; int num_of_slots; num_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0f; switch (test_num) { case 1: /* Do stuff here! */ /* Do that funky LED thing */ /* so we can restore them later */ save_LED = readl(ctrl->hpc_reg + LED_CONTROL); work_LED = 0x01010101; switch_leds(ctrl, num_of_slots, &work_LED, 0); switch_leds(ctrl, num_of_slots, &work_LED, 1); switch_leds(ctrl, num_of_slots, &work_LED, 0); switch_leds(ctrl, num_of_slots, &work_LED, 1); work_LED = 0x01010000; writel(work_LED, ctrl->hpc_reg + LED_CONTROL); switch_leds(ctrl, num_of_slots, &work_LED, 0); switch_leds(ctrl, num_of_slots, &work_LED, 1); work_LED = 0x00000101; writel(work_LED, ctrl->hpc_reg + LED_CONTROL); switch_leds(ctrl, num_of_slots, &work_LED, 0); switch_leds(ctrl, num_of_slots, &work_LED, 1); work_LED = 0x01010000; writel(work_LED, ctrl->hpc_reg + LED_CONTROL); for (loop = 0; loop < num_of_slots; loop++) { set_SOGO(ctrl); /* Wait for SOGO interrupt */ wait_for_ctrl_irq (ctrl); /* Get ready for next iteration */ long_delay((3*HZ)/10); work_LED = work_LED >> 16; writel(work_LED, ctrl->hpc_reg + LED_CONTROL); set_SOGO(ctrl); /* Wait for SOGO interrupt */ wait_for_ctrl_irq (ctrl); /* Get ready for next iteration */ long_delay((3*HZ)/10); work_LED = work_LED << 16; writel(work_LED, ctrl->hpc_reg + LED_CONTROL); work_LED = work_LED << 1; writel(work_LED, ctrl->hpc_reg + LED_CONTROL); } /* put it back the way it was */ writel(save_LED, ctrl->hpc_reg + LED_CONTROL); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); break; case 2: /* Do other stuff here! */ break; case 3: /* and more... */ break; } return 0; } /** * configure_new_device - Configures the PCI header information of one board. * @ctrl: pointer to controller structure * @func: pointer to function structure * @behind_bridge: 1 if this is a recursive call, 0 if not * @resources: pointer to set of resource lists * * Returns 0 if success. */ static u32 configure_new_device(struct controller * ctrl, struct pci_func * func, u8 behind_bridge, struct resource_lists * resources) { u8 temp_byte, function, max_functions, stop_it; int rc; u32 ID; struct pci_func *new_slot; int index; new_slot = func; dbg("%s\n", __func__); /* Check for Multi-function device */ ctrl->pci_bus->number = func->bus; rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(func->device, func->function), 0x0E, &temp_byte); if (rc) { dbg("%s: rc = %d\n", __func__, rc); return rc; } if (temp_byte & 0x80) /* Multi-function device */ max_functions = 8; else max_functions = 1; function = 0; do { rc = configure_new_function(ctrl, new_slot, behind_bridge, resources); if (rc) { dbg("configure_new_function failed %d\n",rc); index = 0; while (new_slot) { new_slot = cpqhp_slot_find(new_slot->bus, new_slot->device, index++); if (new_slot) cpqhp_return_board_resources(new_slot, resources); } return rc; } function++; stop_it = 0; /* The following loop skips to the next present function * and creates a board structure */ while ((function < max_functions) && (!stop_it)) { pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(func->device, function), 0x00, &ID); if (ID == 0xFFFFFFFF) { function++; } else { /* Setup slot structure. */ new_slot = cpqhp_slot_create(func->bus); if (new_slot == NULL) return 1; new_slot->bus = func->bus; new_slot->device = func->device; new_slot->function = function; new_slot->is_a_board = 1; new_slot->status = 0; stop_it++; } } } while (function < max_functions); dbg("returning from configure_new_device\n"); return 0; } /* * Configuration logic that involves the hotplug data structures and * their bookkeeping */ /** * configure_new_function - Configures the PCI header information of one device * @ctrl: pointer to controller structure * @func: pointer to function structure * @behind_bridge: 1 if this is a recursive call, 0 if not * @resources: pointer to set of resource lists * * Calls itself recursively for bridged devices. * Returns 0 if success. */ static int configure_new_function(struct controller *ctrl, struct pci_func *func, u8 behind_bridge, struct resource_lists *resources) { int cloop; u8 IRQ = 0; u8 temp_byte; u8 device; u8 class_code; u16 command; u16 temp_word; u32 temp_dword; u32 rc; u32 temp_register; u32 base; u32 ID; unsigned int devfn; struct pci_resource *mem_node; struct pci_resource *p_mem_node; struct pci_resource *io_node; struct pci_resource *bus_node; struct pci_resource *hold_mem_node; struct pci_resource *hold_p_mem_node; struct pci_resource *hold_IO_node; struct pci_resource *hold_bus_node; struct irq_mapping irqs; struct pci_func *new_slot; struct pci_bus *pci_bus; struct resource_lists temp_resources; pci_bus = ctrl->pci_bus; pci_bus->number = func->bus; devfn = PCI_DEVFN(func->device, func->function); /* Check for Bridge */ rc = pci_bus_read_config_byte(pci_bus, devfn, PCI_HEADER_TYPE, &temp_byte); if (rc) return rc; if ((temp_byte & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { /* set Primary bus */ dbg("set Primary bus = %d\n", func->bus); rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_PRIMARY_BUS, func->bus); if (rc) return rc; /* find range of busses to use */ dbg("find ranges of buses to use\n"); bus_node = get_max_resource(&(resources->bus_head), 1); /* If we don't have any busses to allocate, we can't continue */ if (!bus_node) return -ENOMEM; /* set Secondary bus */ temp_byte = bus_node->base; dbg("set Secondary bus = %d\n", bus_node->base); rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_SECONDARY_BUS, temp_byte); if (rc) return rc; /* set subordinate bus */ temp_byte = bus_node->base + bus_node->length - 1; dbg("set subordinate bus = %d\n", bus_node->base + bus_node->length - 1); rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_SUBORDINATE_BUS, temp_byte); if (rc) return rc; /* set subordinate Latency Timer and base Latency Timer */ temp_byte = 0x40; rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_SEC_LATENCY_TIMER, temp_byte); if (rc) return rc; rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_LATENCY_TIMER, temp_byte); if (rc) return rc; /* set Cache Line size */ temp_byte = 0x08; rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_CACHE_LINE_SIZE, temp_byte); if (rc) return rc; /* Setup the IO, memory, and prefetchable windows */ io_node = get_max_resource(&(resources->io_head), 0x1000); if (!io_node) return -ENOMEM; mem_node = get_max_resource(&(resources->mem_head), 0x100000); if (!mem_node) return -ENOMEM; p_mem_node = get_max_resource(&(resources->p_mem_head), 0x100000); if (!p_mem_node) return -ENOMEM; dbg("Setup the IO, memory, and prefetchable windows\n"); dbg("io_node\n"); dbg("(base, len, next) (%x, %x, %p)\n", io_node->base, io_node->length, io_node->next); dbg("mem_node\n"); dbg("(base, len, next) (%x, %x, %p)\n", mem_node->base, mem_node->length, mem_node->next); dbg("p_mem_node\n"); dbg("(base, len, next) (%x, %x, %p)\n", p_mem_node->base, p_mem_node->length, p_mem_node->next); /* set up the IRQ info */ if (!resources->irqs) { irqs.barber_pole = 0; irqs.interrupt[0] = 0; irqs.interrupt[1] = 0; irqs.interrupt[2] = 0; irqs.interrupt[3] = 0; irqs.valid_INT = 0; } else { irqs.barber_pole = resources->irqs->barber_pole; irqs.interrupt[0] = resources->irqs->interrupt[0]; irqs.interrupt[1] = resources->irqs->interrupt[1]; irqs.interrupt[2] = resources->irqs->interrupt[2]; irqs.interrupt[3] = resources->irqs->interrupt[3]; irqs.valid_INT = resources->irqs->valid_INT; } /* set up resource lists that are now aligned on top and bottom * for anything behind the bridge. */ temp_resources.bus_head = bus_node; temp_resources.io_head = io_node; temp_resources.mem_head = mem_node; temp_resources.p_mem_head = p_mem_node; temp_resources.irqs = &irqs; /* Make copies of the nodes we are going to pass down so that * if there is a problem,we can just use these to free resources */ hold_bus_node = kmalloc(sizeof(*hold_bus_node), GFP_KERNEL); hold_IO_node = kmalloc(sizeof(*hold_IO_node), GFP_KERNEL); hold_mem_node = kmalloc(sizeof(*hold_mem_node), GFP_KERNEL); hold_p_mem_node = kmalloc(sizeof(*hold_p_mem_node), GFP_KERNEL); if (!hold_bus_node || !hold_IO_node || !hold_mem_node || !hold_p_mem_node) { kfree(hold_bus_node); kfree(hold_IO_node); kfree(hold_mem_node); kfree(hold_p_mem_node); return 1; } memcpy(hold_bus_node, bus_node, sizeof(struct pci_resource)); bus_node->base += 1; bus_node->length -= 1; bus_node->next = NULL; /* If we have IO resources copy them and fill in the bridge's * IO range registers */ if (io_node) { memcpy(hold_IO_node, io_node, sizeof(struct pci_resource)); io_node->next = NULL; /* set IO base and Limit registers */ temp_byte = io_node->base >> 8; rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_IO_BASE, temp_byte); temp_byte = (io_node->base + io_node->length - 1) >> 8; rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_IO_LIMIT, temp_byte); } else { kfree(hold_IO_node); hold_IO_node = NULL; } /* If we have memory resources copy them and fill in the * bridge's memory range registers. Otherwise, fill in the * range registers with values that disable them. */ if (mem_node) { memcpy(hold_mem_node, mem_node, sizeof(struct pci_resource)); mem_node->next = NULL; /* set Mem base and Limit registers */ temp_word = mem_node->base >> 16; rc = pci_bus_write_config_word(pci_bus, devfn, PCI_MEMORY_BASE, temp_word); temp_word = (mem_node->base + mem_node->length - 1) >> 16; rc = pci_bus_write_config_word(pci_bus, devfn, PCI_MEMORY_LIMIT, temp_word); } else { temp_word = 0xFFFF; rc = pci_bus_write_config_word(pci_bus, devfn, PCI_MEMORY_BASE, temp_word); temp_word = 0x0000; rc = pci_bus_write_config_word(pci_bus, devfn, PCI_MEMORY_LIMIT, temp_word); kfree(hold_mem_node); hold_mem_node = NULL; } memcpy(hold_p_mem_node, p_mem_node, sizeof(struct pci_resource)); p_mem_node->next = NULL; /* set Pre Mem base and Limit registers */ temp_word = p_mem_node->base >> 16; rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_BASE, temp_word); temp_word = (p_mem_node->base + p_mem_node->length - 1) >> 16; rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, temp_word); /* Adjust this to compensate for extra adjustment in first loop */ irqs.barber_pole--; rc = 0; /* Here we actually find the devices and configure them */ for (device = 0; (device <= 0x1F) && !rc; device++) { irqs.barber_pole = (irqs.barber_pole + 1) & 0x03; ID = 0xFFFFFFFF; pci_bus->number = hold_bus_node->base; pci_bus_read_config_dword (pci_bus, PCI_DEVFN(device, 0), 0x00, &ID); pci_bus->number = func->bus; if (ID != 0xFFFFFFFF) { /* device present */ /* Setup slot structure. */ new_slot = cpqhp_slot_create(hold_bus_node->base); if (new_slot == NULL) { rc = -ENOMEM; continue; } new_slot->bus = hold_bus_node->base; new_slot->device = device; new_slot->function = 0; new_slot->is_a_board = 1; new_slot->status = 0; rc = configure_new_device(ctrl, new_slot, 1, &temp_resources); dbg("configure_new_device rc=0x%x\n",rc); } /* End of IF (device in slot?) */ } /* End of FOR loop */ if (rc) goto free_and_out; /* save the interrupt routing information */ if (resources->irqs) { resources->irqs->interrupt[0] = irqs.interrupt[0]; resources->irqs->interrupt[1] = irqs.interrupt[1]; resources->irqs->interrupt[2] = irqs.interrupt[2]; resources->irqs->interrupt[3] = irqs.interrupt[3]; resources->irqs->valid_INT = irqs.valid_INT; } else if (!behind_bridge) { /* We need to hook up the interrupts here */ for (cloop = 0; cloop < 4; cloop++) { if (irqs.valid_INT & (0x01 << cloop)) { rc = cpqhp_set_irq(func->bus, func->device, cloop + 1, irqs.interrupt[cloop]); if (rc) goto free_and_out; } } /* end of for loop */ } /* Return unused bus resources * First use the temporary node to store information for * the board */ if (hold_bus_node && bus_node && temp_resources.bus_head) { hold_bus_node->length = bus_node->base - hold_bus_node->base; hold_bus_node->next = func->bus_head; func->bus_head = hold_bus_node; temp_byte = temp_resources.bus_head->base - 1; /* set subordinate bus */ rc = pci_bus_write_config_byte (pci_bus, devfn, PCI_SUBORDINATE_BUS, temp_byte); if (temp_resources.bus_head->length == 0) { kfree(temp_resources.bus_head); temp_resources.bus_head = NULL; } else { return_resource(&(resources->bus_head), temp_resources.bus_head); } } /* If we have IO space available and there is some left, * return the unused portion */ if (hold_IO_node && temp_resources.io_head) { io_node = do_pre_bridge_resource_split(&(temp_resources.io_head), &hold_IO_node, 0x1000); /* Check if we were able to split something off */ if (io_node) { hold_IO_node->base = io_node->base + io_node->length; temp_byte = (hold_IO_node->base) >> 8; rc = pci_bus_write_config_word (pci_bus, devfn, PCI_IO_BASE, temp_byte); return_resource(&(resources->io_head), io_node); } io_node = do_bridge_resource_split(&(temp_resources.io_head), 0x1000); /* Check if we were able to split something off */ if (io_node) { /* First use the temporary node to store * information for the board */ hold_IO_node->length = io_node->base - hold_IO_node->base; /* If we used any, add it to the board's list */ if (hold_IO_node->length) { hold_IO_node->next = func->io_head; func->io_head = hold_IO_node; temp_byte = (io_node->base - 1) >> 8; rc = pci_bus_write_config_byte (pci_bus, devfn, PCI_IO_LIMIT, temp_byte); return_resource(&(resources->io_head), io_node); } else { /* it doesn't need any IO */ temp_word = 0x0000; rc = pci_bus_write_config_word (pci_bus, devfn, PCI_IO_LIMIT, temp_word); return_resource(&(resources->io_head), io_node); kfree(hold_IO_node); } } else { /* it used most of the range */ hold_IO_node->next = func->io_head; func->io_head = hold_IO_node; } } else if (hold_IO_node) { /* it used the whole range */ hold_IO_node->next = func->io_head; func->io_head = hold_IO_node; } /* If we have memory space available and there is some left, * return the unused portion */ if (hold_mem_node && temp_resources.mem_head) { mem_node = do_pre_bridge_resource_split(&(temp_resources. mem_head), &hold_mem_node, 0x100000); /* Check if we were able to split something off */ if (mem_node) { hold_mem_node->base = mem_node->base + mem_node->length; temp_word = (hold_mem_node->base) >> 16; rc = pci_bus_write_config_word (pci_bus, devfn, PCI_MEMORY_BASE, temp_word); return_resource(&(resources->mem_head), mem_node); } mem_node = do_bridge_resource_split(&(temp_resources.mem_head), 0x100000); /* Check if we were able to split something off */ if (mem_node) { /* First use the temporary node to store * information for the board */ hold_mem_node->length = mem_node->base - hold_mem_node->base; if (hold_mem_node->length) { hold_mem_node->next = func->mem_head; func->mem_head = hold_mem_node; /* configure end address */ temp_word = (mem_node->base - 1) >> 16; rc = pci_bus_write_config_word (pci_bus, devfn, PCI_MEMORY_LIMIT, temp_word); /* Return unused resources to the pool */ return_resource(&(resources->mem_head), mem_node); } else { /* it doesn't need any Mem */ temp_word = 0x0000; rc = pci_bus_write_config_word (pci_bus, devfn, PCI_MEMORY_LIMIT, temp_word); return_resource(&(resources->mem_head), mem_node); kfree(hold_mem_node); } } else { /* it used most of the range */ hold_mem_node->next = func->mem_head; func->mem_head = hold_mem_node; } } else if (hold_mem_node) { /* it used the whole range */ hold_mem_node->next = func->mem_head; func->mem_head = hold_mem_node; } /* If we have prefetchable memory space available and there * is some left at the end, return the unused portion */ if (hold_p_mem_node && temp_resources.p_mem_head) { p_mem_node = do_pre_bridge_resource_split(&(temp_resources.p_mem_head), &hold_p_mem_node, 0x100000); /* Check if we were able to split something off */ if (p_mem_node) { hold_p_mem_node->base = p_mem_node->base + p_mem_node->length; temp_word = (hold_p_mem_node->base) >> 16; rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_BASE, temp_word); return_resource(&(resources->p_mem_head), p_mem_node); } p_mem_node = do_bridge_resource_split(&(temp_resources.p_mem_head), 0x100000); /* Check if we were able to split something off */ if (p_mem_node) { /* First use the temporary node to store * information for the board */ hold_p_mem_node->length = p_mem_node->base - hold_p_mem_node->base; /* If we used any, add it to the board's list */ if (hold_p_mem_node->length) { hold_p_mem_node->next = func->p_mem_head; func->p_mem_head = hold_p_mem_node; temp_word = (p_mem_node->base - 1) >> 16; rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, temp_word); return_resource(&(resources->p_mem_head), p_mem_node); } else { /* it doesn't need any PMem */ temp_word = 0x0000; rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, temp_word); return_resource(&(resources->p_mem_head), p_mem_node); kfree(hold_p_mem_node); } } else { /* it used the most of the range */ hold_p_mem_node->next = func->p_mem_head; func->p_mem_head = hold_p_mem_node; } } else if (hold_p_mem_node) { /* it used the whole range */ hold_p_mem_node->next = func->p_mem_head; func->p_mem_head = hold_p_mem_node; } /* We should be configuring an IRQ and the bridge's base address * registers if it needs them. Although we have never seen such * a device */ /* enable card */ command = 0x0157; /* = PCI_COMMAND_IO | * PCI_COMMAND_MEMORY | * PCI_COMMAND_MASTER | * PCI_COMMAND_INVALIDATE | * PCI_COMMAND_PARITY | * PCI_COMMAND_SERR */ rc = pci_bus_write_config_word (pci_bus, devfn, PCI_COMMAND, command); /* set Bridge Control Register */ command = 0x07; /* = PCI_BRIDGE_CTL_PARITY | * PCI_BRIDGE_CTL_SERR | * PCI_BRIDGE_CTL_NO_ISA */ rc = pci_bus_write_config_word (pci_bus, devfn, PCI_BRIDGE_CONTROL, command); } else if ((temp_byte & 0x7F) == PCI_HEADER_TYPE_NORMAL) { /* Standard device */ rc = pci_bus_read_config_byte (pci_bus, devfn, 0x0B, &class_code); if (class_code == PCI_BASE_CLASS_DISPLAY) { /* Display (video) adapter (not supported) */ return DEVICE_TYPE_NOT_SUPPORTED; } /* Figure out IO and memory needs */ for (cloop = 0x10; cloop <= 0x24; cloop += 4) { temp_register = 0xFFFFFFFF; dbg("CND: bus=%d, devfn=%d, offset=%d\n", pci_bus->number, devfn, cloop); rc = pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register); rc = pci_bus_read_config_dword (pci_bus, devfn, cloop, &temp_register); dbg("CND: base = 0x%x\n", temp_register); if (temp_register) { /* If this register is implemented */ if ((temp_register & 0x03L) == 0x01) { /* Map IO */ /* set base = amount of IO space */ base = temp_register & 0xFFFFFFFC; base = ~base + 1; dbg("CND: length = 0x%x\n", base); io_node = get_io_resource(&(resources->io_head), base); dbg("Got io_node start = %8.8x, length = %8.8x next (%p)\n", io_node->base, io_node->length, io_node->next); dbg("func (%p) io_head (%p)\n", func, func->io_head); /* allocate the resource to the board */ if (io_node) { base = io_node->base; io_node->next = func->io_head; func->io_head = io_node; } else return -ENOMEM; } else if ((temp_register & 0x0BL) == 0x08) { /* Map prefetchable memory */ base = temp_register & 0xFFFFFFF0; base = ~base + 1; dbg("CND: length = 0x%x\n", base); p_mem_node = get_resource(&(resources->p_mem_head), base); /* allocate the resource to the board */ if (p_mem_node) { base = p_mem_node->base; p_mem_node->next = func->p_mem_head; func->p_mem_head = p_mem_node; } else return -ENOMEM; } else if ((temp_register & 0x0BL) == 0x00) { /* Map memory */ base = temp_register & 0xFFFFFFF0; base = ~base + 1; dbg("CND: length = 0x%x\n", base); mem_node = get_resource(&(resources->mem_head), base); /* allocate the resource to the board */ if (mem_node) { base = mem_node->base; mem_node->next = func->mem_head; func->mem_head = mem_node; } else return -ENOMEM; } else if ((temp_register & 0x0BL) == 0x04) { /* Map memory */ base = temp_register & 0xFFFFFFF0; base = ~base + 1; dbg("CND: length = 0x%x\n", base); mem_node = get_resource(&(resources->mem_head), base); /* allocate the resource to the board */ if (mem_node) { base = mem_node->base; mem_node->next = func->mem_head; func->mem_head = mem_node; } else return -ENOMEM; } else if ((temp_register & 0x0BL) == 0x06) { /* Those bits are reserved, we can't handle this */ return 1; } else { /* Requesting space below 1M */ return NOT_ENOUGH_RESOURCES; } rc = pci_bus_write_config_dword(pci_bus, devfn, cloop, base); /* Check for 64-bit base */ if ((temp_register & 0x07L) == 0x04) { cloop += 4; /* Upper 32 bits of address always zero * on today's systems */ /* FIXME this is probably not true on * Alpha and ia64??? */ base = 0; rc = pci_bus_write_config_dword(pci_bus, devfn, cloop, base); } } } /* End of base register loop */ if (cpqhp_legacy_mode) { /* Figure out which interrupt pin this function uses */ rc = pci_bus_read_config_byte (pci_bus, devfn, PCI_INTERRUPT_PIN, &temp_byte); /* If this function needs an interrupt and we are behind * a bridge and the pin is tied to something that's * alread mapped, set this one the same */ if (temp_byte && resources->irqs && (resources->irqs->valid_INT & (0x01 << ((temp_byte + resources->irqs->barber_pole - 1) & 0x03)))) { /* We have to share with something already set up */ IRQ = resources->irqs->interrupt[(temp_byte + resources->irqs->barber_pole - 1) & 0x03]; } else { /* Program IRQ based on card type */ rc = pci_bus_read_config_byte (pci_bus, devfn, 0x0B, &class_code); if (class_code == PCI_BASE_CLASS_STORAGE) IRQ = cpqhp_disk_irq; else IRQ = cpqhp_nic_irq; } /* IRQ Line */ rc = pci_bus_write_config_byte (pci_bus, devfn, PCI_INTERRUPT_LINE, IRQ); } if (!behind_bridge) { rc = cpqhp_set_irq(func->bus, func->device, temp_byte, IRQ); if (rc) return 1; } else { /* TBD - this code may also belong in the other clause * of this If statement */ resources->irqs->interrupt[(temp_byte + resources->irqs->barber_pole - 1) & 0x03] = IRQ; resources->irqs->valid_INT |= 0x01 << (temp_byte + resources->irqs->barber_pole - 1) & 0x03; } /* Latency Timer */ temp_byte = 0x40; rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_LATENCY_TIMER, temp_byte); /* Cache Line size */ temp_byte = 0x08; rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_CACHE_LINE_SIZE, temp_byte); /* disable ROM base Address */ temp_dword = 0x00L; rc = pci_bus_write_config_word(pci_bus, devfn, PCI_ROM_ADDRESS, temp_dword); /* enable card */ temp_word = 0x0157; /* = PCI_COMMAND_IO | * PCI_COMMAND_MEMORY | * PCI_COMMAND_MASTER | * PCI_COMMAND_INVALIDATE | * PCI_COMMAND_PARITY | * PCI_COMMAND_SERR */ rc = pci_bus_write_config_word (pci_bus, devfn, PCI_COMMAND, temp_word); } else { /* End of Not-A-Bridge else */ /* It's some strange type of PCI adapter (Cardbus?) */ return DEVICE_TYPE_NOT_SUPPORTED; } func->configured = 1; return 0; free_and_out: cpqhp_destroy_resource_list (&temp_resources); return_resource(&(resources-> bus_head), hold_bus_node); return_resource(&(resources-> io_head), hold_IO_node); return_resource(&(resources-> mem_head), hold_mem_node); return_resource(&(resources-> p_mem_head), hold_p_mem_node); return rc; }
gpl-2.0
AshishNamdev/linux
drivers/scsi/scsi_common.c
214
8876
/* * SCSI functions used by both the initiator and the target code. */ #include <linux/bug.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <asm/unaligned.h> #include <scsi/scsi_common.h> /* NB: These are exposed through /proc/scsi/scsi and form part of the ABI. * You may not alter any existing entry (although adding new ones is * encouraged once assigned by ANSI/INCITS T10 */ static const char *const scsi_device_types[] = { "Direct-Access ", "Sequential-Access", "Printer ", "Processor ", "WORM ", "CD-ROM ", "Scanner ", "Optical Device ", "Medium Changer ", "Communications ", "ASC IT8 ", "ASC IT8 ", "RAID ", "Enclosure ", "Direct-Access-RBC", "Optical card ", "Bridge controller", "Object storage ", "Automation/Drive ", "Security Manager ", "Direct-Access-ZBC", }; /** * scsi_device_type - Return 17 char string indicating device type. * @type: type number to look up */ const char *scsi_device_type(unsigned type) { if (type == 0x1e) return "Well-known LUN "; if (type == 0x1f) return "No Device "; if (type >= ARRAY_SIZE(scsi_device_types)) return "Unknown "; return scsi_device_types[type]; } EXPORT_SYMBOL(scsi_device_type); /** * scsilun_to_int - convert a scsi_lun to an int * @scsilun: struct scsi_lun to be converted. * * Description: * Convert @scsilun from a struct scsi_lun to a four byte host byte-ordered * integer, and return the result. The caller must check for * truncation before using this function. * * Notes: * For a description of the LUN format, post SCSI-3 see the SCSI * Architecture Model, for SCSI-3 see the SCSI Controller Commands. * * Given a struct scsi_lun of: d2 04 0b 03 00 00 00 00, this function * returns the integer: 0x0b03d204 * * This encoding will return a standard integer LUN for LUNs smaller * than 256, which typically use a single level LUN structure with * addressing method 0. */ u64 scsilun_to_int(struct scsi_lun *scsilun) { int i; u64 lun; lun = 0; for (i = 0; i < sizeof(lun); i += 2) lun = lun | (((u64)scsilun->scsi_lun[i] << ((i + 1) * 8)) | ((u64)scsilun->scsi_lun[i + 1] << (i * 8))); return lun; } EXPORT_SYMBOL(scsilun_to_int); /** * int_to_scsilun - reverts an int into a scsi_lun * @lun: integer to be reverted * @scsilun: struct scsi_lun to be set. * * Description: * Reverts the functionality of the scsilun_to_int, which packed * an 8-byte lun value into an int. This routine unpacks the int * back into the lun value. * * Notes: * Given an integer : 0x0b03d204, this function returns a * struct scsi_lun of: d2 04 0b 03 00 00 00 00 * */ void int_to_scsilun(u64 lun, struct scsi_lun *scsilun) { int i; memset(scsilun->scsi_lun, 0, sizeof(scsilun->scsi_lun)); for (i = 0; i < sizeof(lun); i += 2) { scsilun->scsi_lun[i] = (lun >> 8) & 0xFF; scsilun->scsi_lun[i+1] = lun & 0xFF; lun = lun >> 16; } } EXPORT_SYMBOL(int_to_scsilun); /** * scsi_normalize_sense - normalize main elements from either fixed or * descriptor sense data format into a common format. * * @sense_buffer: byte array containing sense data returned by device * @sb_len: number of valid bytes in sense_buffer * @sshdr: pointer to instance of structure that common * elements are written to. * * Notes: * The "main elements" from sense data are: response_code, sense_key, * asc, ascq and additional_length (only for descriptor format). * * Typically this function can be called after a device has * responded to a SCSI command with the CHECK_CONDITION status. * * Return value: * true if valid sense data information found, else false; */ bool scsi_normalize_sense(const u8 *sense_buffer, int sb_len, struct scsi_sense_hdr *sshdr) { if (!sense_buffer || !sb_len) return false; memset(sshdr, 0, sizeof(struct scsi_sense_hdr)); sshdr->response_code = (sense_buffer[0] & 0x7f); if (!scsi_sense_valid(sshdr)) return false; if (sshdr->response_code >= 0x72) { /* * descriptor format */ if (sb_len > 1) sshdr->sense_key = (sense_buffer[1] & 0xf); if (sb_len > 2) sshdr->asc = sense_buffer[2]; if (sb_len > 3) sshdr->ascq = sense_buffer[3]; if (sb_len > 7) sshdr->additional_length = sense_buffer[7]; } else { /* * fixed format */ if (sb_len > 2) sshdr->sense_key = (sense_buffer[2] & 0xf); if (sb_len > 7) { sb_len = (sb_len < (sense_buffer[7] + 8)) ? sb_len : (sense_buffer[7] + 8); if (sb_len > 12) sshdr->asc = sense_buffer[12]; if (sb_len > 13) sshdr->ascq = sense_buffer[13]; } } return true; } EXPORT_SYMBOL(scsi_normalize_sense); /** * scsi_sense_desc_find - search for a given descriptor type in descriptor sense data format. * @sense_buffer: byte array of descriptor format sense data * @sb_len: number of valid bytes in sense_buffer * @desc_type: value of descriptor type to find * (e.g. 0 -> information) * * Notes: * only valid when sense data is in descriptor format * * Return value: * pointer to start of (first) descriptor if found else NULL */ const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len, int desc_type) { int add_sen_len, add_len, desc_len, k; const u8 * descp; if ((sb_len < 8) || (0 == (add_sen_len = sense_buffer[7]))) return NULL; if ((sense_buffer[0] < 0x72) || (sense_buffer[0] > 0x73)) return NULL; add_sen_len = (add_sen_len < (sb_len - 8)) ? add_sen_len : (sb_len - 8); descp = &sense_buffer[8]; for (desc_len = 0, k = 0; k < add_sen_len; k += desc_len) { descp += desc_len; add_len = (k < (add_sen_len - 1)) ? descp[1]: -1; desc_len = add_len + 2; if (descp[0] == desc_type) return descp; if (add_len < 0) // short descriptor ?? break; } return NULL; } EXPORT_SYMBOL(scsi_sense_desc_find); /** * scsi_build_sense_buffer - build sense data in a buffer * @desc: Sense format (non zero == descriptor format, * 0 == fixed format) * @buf: Where to build sense data * @key: Sense key * @asc: Additional sense code * @ascq: Additional sense code qualifier * **/ void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq) { if (desc) { buf[0] = 0x72; /* descriptor, current */ buf[1] = key; buf[2] = asc; buf[3] = ascq; buf[7] = 0; } else { buf[0] = 0x70; /* fixed, current */ buf[2] = key; buf[7] = 0xa; buf[12] = asc; buf[13] = ascq; } } EXPORT_SYMBOL(scsi_build_sense_buffer); /** * scsi_set_sense_information - set the information field in a * formatted sense data buffer * @buf: Where to build sense data * @buf_len: buffer length * @info: 64-bit information value to be set * * Return value: * 0 on success or EINVAL for invalid sense buffer length **/ int scsi_set_sense_information(u8 *buf, int buf_len, u64 info) { if ((buf[0] & 0x7f) == 0x72) { u8 *ucp, len; len = buf[7]; ucp = (char *)scsi_sense_desc_find(buf, len + 8, 0); if (!ucp) { buf[7] = len + 0xc; ucp = buf + 8 + len; } if (buf_len < len + 0xc) /* Not enough room for info */ return -EINVAL; ucp[0] = 0; ucp[1] = 0xa; ucp[2] = 0x80; /* Valid bit */ ucp[3] = 0; put_unaligned_be64(info, &ucp[4]); } else if ((buf[0] & 0x7f) == 0x70) { /* * Only set the 'VALID' bit if we can represent the value * correctly; otherwise just fill out the lower bytes and * clear the 'VALID' flag. */ if (info <= 0xffffffffUL) buf[0] |= 0x80; else buf[0] &= 0x7f; put_unaligned_be32((u32)info, &buf[3]); } return 0; } EXPORT_SYMBOL(scsi_set_sense_information); /** * scsi_set_sense_field_pointer - set the field pointer sense key * specific information in a formatted sense data buffer * @buf: Where to build sense data * @buf_len: buffer length * @fp: field pointer to be set * @bp: bit pointer to be set * @cd: command/data bit * * Return value: * 0 on success or EINVAL for invalid sense buffer length */ int scsi_set_sense_field_pointer(u8 *buf, int buf_len, u16 fp, u8 bp, bool cd) { u8 *ucp, len; if ((buf[0] & 0x7f) == 0x72) { len = buf[7]; ucp = (char *)scsi_sense_desc_find(buf, len + 8, 2); if (!ucp) { buf[7] = len + 8; ucp = buf + 8 + len; } if (buf_len < len + 8) /* Not enough room for info */ return -EINVAL; ucp[0] = 2; ucp[1] = 6; ucp[4] = 0x80; /* Valid bit */ if (cd) ucp[4] |= 0x40; if (bp < 0x8) ucp[4] |= 0x8 | bp; put_unaligned_be16(fp, &ucp[5]); } else if ((buf[0] & 0x7f) == 0x70) { len = buf[7]; if (len < 18) buf[7] = 18; buf[15] = 0x80; if (cd) buf[15] |= 0x40; if (bp < 0x8) buf[15] |= 0x8 | bp; put_unaligned_be16(fp, &buf[16]); } return 0; } EXPORT_SYMBOL(scsi_set_sense_field_pointer);
gpl-2.0
Huexxx/diana
arch/s390/kvm/kvm-s390.c
726
20336
/* * s390host.c -- hosting zSeries kernel virtual machines * * Copyright IBM Corp. 2008,2009 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License (version 2 only) * as published by the Free Software Foundation. * * Author(s): Carsten Otte <cotte@de.ibm.com> * Christian Borntraeger <borntraeger@de.ibm.com> * Heiko Carstens <heiko.carstens@de.ibm.com> * Christian Ehrhardt <ehrhardt@de.ibm.com> */ #include <linux/compiler.h> #include <linux/err.h> #include <linux/fs.h> #include <linux/hrtimer.h> #include <linux/init.h> #include <linux/kvm.h> #include <linux/kvm_host.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/timer.h> #include <asm/asm-offsets.h> #include <asm/lowcore.h> #include <asm/pgtable.h> #include <asm/nmi.h> #include <asm/system.h> #include "kvm-s390.h" #include "gaccess.h" #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU struct kvm_stats_debugfs_item debugfs_entries[] = { { "userspace_handled", VCPU_STAT(exit_userspace) }, { "exit_null", VCPU_STAT(exit_null) }, { "exit_validity", VCPU_STAT(exit_validity) }, { "exit_stop_request", VCPU_STAT(exit_stop_request) }, { "exit_external_request", VCPU_STAT(exit_external_request) }, { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) }, { "exit_instruction", VCPU_STAT(exit_instruction) }, { "exit_program_interruption", VCPU_STAT(exit_program_interruption) }, { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) }, { "instruction_lctlg", VCPU_STAT(instruction_lctlg) }, { "instruction_lctl", VCPU_STAT(instruction_lctl) }, { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) }, { "deliver_service_signal", VCPU_STAT(deliver_service_signal) }, { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) }, { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) }, { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) }, { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) }, { "deliver_program_interruption", VCPU_STAT(deliver_program_int) }, { "exit_wait_state", VCPU_STAT(exit_wait_state) }, { "instruction_stidp", VCPU_STAT(instruction_stidp) }, { "instruction_spx", VCPU_STAT(instruction_spx) }, { "instruction_stpx", VCPU_STAT(instruction_stpx) }, { "instruction_stap", VCPU_STAT(instruction_stap) }, { "instruction_storage_key", VCPU_STAT(instruction_storage_key) }, { "instruction_stsch", VCPU_STAT(instruction_stsch) }, { "instruction_chsc", VCPU_STAT(instruction_chsc) }, { "instruction_stsi", VCPU_STAT(instruction_stsi) }, { "instruction_stfl", VCPU_STAT(instruction_stfl) }, { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) }, { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) }, { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) }, { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) }, { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) }, { "diagnose_44", VCPU_STAT(diagnose_44) }, { NULL } }; static unsigned long long *facilities; /* Section: not file related */ int kvm_arch_hardware_enable(void *garbage) { /* every s390 is virtualization enabled ;-) */ return 0; } void kvm_arch_hardware_disable(void *garbage) { } int kvm_arch_hardware_setup(void) { return 0; } void kvm_arch_hardware_unsetup(void) { } void kvm_arch_check_processor_compat(void *rtn) { } int kvm_arch_init(void *opaque) { return 0; } void kvm_arch_exit(void) { } /* Section: device related */ long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { if (ioctl == KVM_S390_ENABLE_SIE) return s390_enable_sie(); return -EINVAL; } int kvm_dev_ioctl_check_extension(long ext) { int r; switch (ext) { case KVM_CAP_S390_PSW: r = 1; break; default: r = 0; } return r; } /* Section: vm related */ /* * Get (and clear) the dirty memory log for a memory slot. */ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) { return 0; } long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm *kvm = filp->private_data; void __user *argp = (void __user *)arg; int r; switch (ioctl) { case KVM_S390_INTERRUPT: { struct kvm_s390_interrupt s390int; r = -EFAULT; if (copy_from_user(&s390int, argp, sizeof(s390int))) break; r = kvm_s390_inject_vm(kvm, &s390int); break; } default: r = -ENOTTY; } return r; } struct kvm *kvm_arch_create_vm(void) { struct kvm *kvm; int rc; char debug_name[16]; rc = s390_enable_sie(); if (rc) goto out_nokvm; rc = -ENOMEM; kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL); if (!kvm) goto out_nokvm; kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL); if (!kvm->arch.sca) goto out_nosca; sprintf(debug_name, "kvm-%u", current->pid); kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long)); if (!kvm->arch.dbf) goto out_nodbf; spin_lock_init(&kvm->arch.float_int.lock); INIT_LIST_HEAD(&kvm->arch.float_int.list); debug_register_view(kvm->arch.dbf, &debug_sprintf_view); VM_EVENT(kvm, 3, "%s", "vm created"); return kvm; out_nodbf: free_page((unsigned long)(kvm->arch.sca)); out_nosca: kfree(kvm); out_nokvm: return ERR_PTR(rc); } void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) { VCPU_EVENT(vcpu, 3, "%s", "free cpu"); if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda == (__u64) vcpu->arch.sie_block) vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0; smp_mb(); free_page((unsigned long)(vcpu->arch.sie_block)); kvm_vcpu_uninit(vcpu); kfree(vcpu); } static void kvm_free_vcpus(struct kvm *kvm) { unsigned int i; struct kvm_vcpu *vcpu; kvm_for_each_vcpu(i, vcpu, kvm) kvm_arch_vcpu_destroy(vcpu); mutex_lock(&kvm->lock); for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) kvm->vcpus[i] = NULL; atomic_set(&kvm->online_vcpus, 0); mutex_unlock(&kvm->lock); } void kvm_arch_sync_events(struct kvm *kvm) { } void kvm_arch_destroy_vm(struct kvm *kvm) { kvm_free_vcpus(kvm); kvm_free_physmem(kvm); free_page((unsigned long)(kvm->arch.sca)); debug_unregister(kvm->arch.dbf); cleanup_srcu_struct(&kvm->srcu); kfree(kvm); } /* Section: vcpu related */ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) { return 0; } void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) { /* Nothing todo */ } void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { save_fp_regs(&vcpu->arch.host_fpregs); save_access_regs(vcpu->arch.host_acrs); vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK; restore_fp_regs(&vcpu->arch.guest_fpregs); restore_access_regs(vcpu->arch.guest_acrs); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { save_fp_regs(&vcpu->arch.guest_fpregs); save_access_regs(vcpu->arch.guest_acrs); restore_fp_regs(&vcpu->arch.host_fpregs); restore_access_regs(vcpu->arch.host_acrs); } static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) { /* this equals initial cpu reset in pop, but we don't switch to ESA */ vcpu->arch.sie_block->gpsw.mask = 0UL; vcpu->arch.sie_block->gpsw.addr = 0UL; vcpu->arch.sie_block->prefix = 0UL; vcpu->arch.sie_block->ihcpu = 0xffff; vcpu->arch.sie_block->cputm = 0UL; vcpu->arch.sie_block->ckc = 0UL; vcpu->arch.sie_block->todpr = 0; memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64)); vcpu->arch.sie_block->gcr[0] = 0xE0UL; vcpu->arch.sie_block->gcr[14] = 0xC2000000UL; vcpu->arch.guest_fpregs.fpc = 0; asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc)); vcpu->arch.sie_block->gbea = 1; } int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) { atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH); set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests); vcpu->arch.sie_block->ecb = 2; vcpu->arch.sie_block->eca = 0xC1002001U; vcpu->arch.sie_block->fac = (int) (long) facilities; hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet, (unsigned long) vcpu); vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; get_cpu_id(&vcpu->arch.cpu_id); vcpu->arch.cpu_id.version = 0xff; return 0; } struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) { struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL); int rc = -ENOMEM; if (!vcpu) goto out_nomem; vcpu->arch.sie_block = (struct kvm_s390_sie_block *) get_zeroed_page(GFP_KERNEL); if (!vcpu->arch.sie_block) goto out_free_cpu; vcpu->arch.sie_block->icpua = id; BUG_ON(!kvm->arch.sca); if (!kvm->arch.sca->cpu[id].sda) kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block; vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32); vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; spin_lock_init(&vcpu->arch.local_int.lock); INIT_LIST_HEAD(&vcpu->arch.local_int.list); vcpu->arch.local_int.float_int = &kvm->arch.float_int; spin_lock(&kvm->arch.float_int.lock); kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int; init_waitqueue_head(&vcpu->arch.local_int.wq); vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; spin_unlock(&kvm->arch.float_int.lock); rc = kvm_vcpu_init(vcpu, kvm, id); if (rc) goto out_free_sie_block; VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu, vcpu->arch.sie_block); return vcpu; out_free_sie_block: free_page((unsigned long)(vcpu->arch.sie_block)); out_free_cpu: kfree(vcpu); out_nomem: return ERR_PTR(rc); } int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) { /* kvm common code refers to this, but never calls it */ BUG(); return 0; } static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu) { vcpu_load(vcpu); kvm_s390_vcpu_initial_reset(vcpu); vcpu_put(vcpu); return 0; } int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { vcpu_load(vcpu); memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs)); vcpu_put(vcpu); return 0; } int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { vcpu_load(vcpu); memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs)); vcpu_put(vcpu); return 0; } int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { vcpu_load(vcpu); memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs)); memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); vcpu_put(vcpu); return 0; } int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { vcpu_load(vcpu); memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs)); memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); vcpu_put(vcpu); return 0; } int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { vcpu_load(vcpu); memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); vcpu->arch.guest_fpregs.fpc = fpu->fpc; vcpu_put(vcpu); return 0; } int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { vcpu_load(vcpu); memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs)); fpu->fpc = vcpu->arch.guest_fpregs.fpc; vcpu_put(vcpu); return 0; } static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw) { int rc = 0; vcpu_load(vcpu); if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING) rc = -EBUSY; else { vcpu->run->psw_mask = psw.mask; vcpu->run->psw_addr = psw.addr; } vcpu_put(vcpu); return rc; } int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr) { return -EINVAL; /* not implemented yet */ } int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) { return -EINVAL; /* not implemented yet */ } int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { return -EINVAL; /* not implemented yet */ } int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { return -EINVAL; /* not implemented yet */ } static void __vcpu_run(struct kvm_vcpu *vcpu) { memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16); if (need_resched()) schedule(); if (test_thread_flag(TIF_MCCK_PENDING)) s390_handle_mcck(); kvm_s390_deliver_pending_interrupts(vcpu); vcpu->arch.sie_block->icptcode = 0; local_irq_disable(); kvm_guest_enter(); local_irq_enable(); VCPU_EVENT(vcpu, 6, "entering sie flags %x", atomic_read(&vcpu->arch.sie_block->cpuflags)); if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) { VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); } VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", vcpu->arch.sie_block->icptcode); local_irq_disable(); kvm_guest_exit(); local_irq_enable(); memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16); } int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { int rc; sigset_t sigsaved; vcpu_load(vcpu); rerun_vcpu: if (vcpu->requests) if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) kvm_s390_vcpu_set_mem(vcpu); /* verify, that memory has been registered */ if (!vcpu->arch.sie_block->gmslm) { vcpu_put(vcpu); VCPU_EVENT(vcpu, 3, "%s", "no memory registered to run vcpu"); return -EINVAL; } if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL); switch (kvm_run->exit_reason) { case KVM_EXIT_S390_SIEIC: case KVM_EXIT_UNKNOWN: case KVM_EXIT_INTR: case KVM_EXIT_S390_RESET: break; default: BUG(); } vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; might_fault(); do { __vcpu_run(vcpu); rc = kvm_handle_sie_intercept(vcpu); } while (!signal_pending(current) && !rc); if (rc == SIE_INTERCEPT_RERUNVCPU) goto rerun_vcpu; if (signal_pending(current) && !rc) { kvm_run->exit_reason = KVM_EXIT_INTR; rc = -EINTR; } if (rc == -EOPNOTSUPP) { /* intercept cannot be handled in-kernel, prepare kvm-run */ kvm_run->exit_reason = KVM_EXIT_S390_SIEIC; kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; rc = 0; } if (rc == -EREMOTE) { /* intercept was handled, but userspace support is needed * kvm_run has been prepared by the handler */ rc = 0; } kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &sigsaved, NULL); vcpu_put(vcpu); vcpu->stat.exit_userspace++; return rc; } static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from, unsigned long n, int prefix) { if (prefix) return copy_to_guest(vcpu, guestdest, from, n); else return copy_to_guest_absolute(vcpu, guestdest, from, n); } /* * store status at address * we use have two special cases: * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit * KVM_S390_STORE_STATUS_PREFIXED: -> prefix */ int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) { const unsigned char archmode = 1; int prefix; if (addr == KVM_S390_STORE_STATUS_NOADDR) { if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1)) return -EFAULT; addr = SAVE_AREA_BASE; prefix = 0; } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) { if (copy_to_guest(vcpu, 163ul, &archmode, 1)) return -EFAULT; addr = SAVE_AREA_BASE; prefix = 1; } else prefix = 0; if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs), vcpu->arch.guest_fpregs.fprs, 128, prefix)) return -EFAULT; if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs), vcpu->arch.guest_gprs, 128, prefix)) return -EFAULT; if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw), &vcpu->arch.sie_block->gpsw, 16, prefix)) return -EFAULT; if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg), &vcpu->arch.sie_block->prefix, 4, prefix)) return -EFAULT; if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_ctrl_reg), &vcpu->arch.guest_fpregs.fpc, 4, prefix)) return -EFAULT; if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg), &vcpu->arch.sie_block->todpr, 4, prefix)) return -EFAULT; if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer), &vcpu->arch.sie_block->cputm, 8, prefix)) return -EFAULT; if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp), &vcpu->arch.sie_block->ckc, 8, prefix)) return -EFAULT; if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs), &vcpu->arch.guest_acrs, 64, prefix)) return -EFAULT; if (__guestcopy(vcpu, addr + offsetof(struct save_area, ctrl_regs), &vcpu->arch.sie_block->gcr, 128, prefix)) return -EFAULT; return 0; } static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) { int rc; vcpu_load(vcpu); rc = __kvm_s390_vcpu_store_status(vcpu, addr); vcpu_put(vcpu); return rc; } long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm_vcpu *vcpu = filp->private_data; void __user *argp = (void __user *)arg; switch (ioctl) { case KVM_S390_INTERRUPT: { struct kvm_s390_interrupt s390int; if (copy_from_user(&s390int, argp, sizeof(s390int))) return -EFAULT; return kvm_s390_inject_vcpu(vcpu, &s390int); } case KVM_S390_STORE_STATUS: return kvm_s390_vcpu_store_status(vcpu, arg); case KVM_S390_SET_INITIAL_PSW: { psw_t psw; if (copy_from_user(&psw, argp, sizeof(psw))) return -EFAULT; return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw); } case KVM_S390_INITIAL_RESET: return kvm_arch_vcpu_ioctl_initial_reset(vcpu); default: ; } return -EINVAL; } /* Section: memory related */ int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, struct kvm_memory_slot old, struct kvm_userspace_memory_region *mem, int user_alloc) { /* A few sanity checks. We can have exactly one memory slot which has to start at guest virtual zero and which has to be located at a page boundary in userland and which has to end at a page boundary. The memory in userland is ok to be fragmented into various different vmas. It is okay to mmap() and munmap() stuff in this slot after doing this call at any time */ if (mem->slot) return -EINVAL; if (mem->guest_phys_addr) return -EINVAL; if (mem->userspace_addr & (PAGE_SIZE - 1)) return -EINVAL; if (mem->memory_size & (PAGE_SIZE - 1)) return -EINVAL; if (!user_alloc) return -EINVAL; return 0; } void kvm_arch_commit_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, struct kvm_memory_slot old, int user_alloc) { int i; struct kvm_vcpu *vcpu; /* request update of sie control block for all available vcpus */ kvm_for_each_vcpu(i, vcpu, kvm) { if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) continue; kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP); } } void kvm_arch_flush_shadow(struct kvm *kvm) { } gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) { return gfn; } static int __init kvm_s390_init(void) { int ret; ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); if (ret) return ret; /* * guests can ask for up to 255+1 double words, we need a full page * to hold the maximum amount of facilites. On the other hand, we * only set facilities that are known to work in KVM. */ facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA); if (!facilities) { kvm_exit(); return -ENOMEM; } stfle(facilities, 1); facilities[0] &= 0xff00fff3f0700000ULL; return 0; } static void __exit kvm_s390_exit(void) { free_page((unsigned long) facilities); kvm_exit(); } module_init(kvm_s390_init); module_exit(kvm_s390_exit);
gpl-2.0
GuneetAtwal/kernel_n9005
arch/x86/mm/init_64.c
1494
24318
/* * linux/arch/x86_64/mm/init.c * * Copyright (C) 1995 Linus Torvalds * Copyright (C) 2000 Pavel Machek <pavel@ucw.cz> * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de> */ #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/smp.h> #include <linux/init.h> #include <linux/initrd.h> #include <linux/pagemap.h> #include <linux/bootmem.h> #include <linux/memblock.h> #include <linux/proc_fs.h> #include <linux/pci.h> #include <linux/pfn.h> #include <linux/poison.h> #include <linux/dma-mapping.h> #include <linux/module.h> #include <linux/memory.h> #include <linux/memory_hotplug.h> #include <linux/nmi.h> #include <linux/gfp.h> #include <asm/processor.h> #include <asm/bios_ebda.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/dma.h> #include <asm/fixmap.h> #include <asm/e820.h> #include <asm/apic.h> #include <asm/tlb.h> #include <asm/mmu_context.h> #include <asm/proto.h> #include <asm/smp.h> #include <asm/sections.h> #include <asm/kdebug.h> #include <asm/numa.h> #include <asm/cacheflush.h> #include <asm/init.h> #include <asm/uv/uv.h> #include <asm/setup.h> static int __init parse_direct_gbpages_off(char *arg) { direct_gbpages = 0; return 0; } early_param("nogbpages", parse_direct_gbpages_off); static int __init parse_direct_gbpages_on(char *arg) { direct_gbpages = 1; return 0; } early_param("gbpages", parse_direct_gbpages_on); /* * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the * physical space so we can cache the place of the first one and move * around without checking the pgd every time. */ pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP; EXPORT_SYMBOL_GPL(__supported_pte_mask); int force_personality32; /* * noexec32=on|off * Control non executable heap for 32bit processes. * To control the stack too use noexec=off * * on PROT_READ does not imply PROT_EXEC for 32-bit processes (default) * off PROT_READ implies PROT_EXEC */ static int __init nonx32_setup(char *str) { if (!strcmp(str, "on")) force_personality32 &= ~READ_IMPLIES_EXEC; else if (!strcmp(str, "off")) force_personality32 |= READ_IMPLIES_EXEC; return 1; } __setup("noexec32=", nonx32_setup); /* * When memory was added/removed make sure all the processes MM have * suitable PGD entries in the local PGD level page. */ void sync_global_pgds(unsigned long start, unsigned long end) { unsigned long address; for (address = start; address <= end; address += PGDIR_SIZE) { const pgd_t *pgd_ref = pgd_offset_k(address); struct page *page; if (pgd_none(*pgd_ref)) continue; spin_lock(&pgd_lock); list_for_each_entry(page, &pgd_list, lru) { pgd_t *pgd; spinlock_t *pgt_lock; pgd = (pgd_t *)page_address(page) + pgd_index(address); /* the pgt_lock only for Xen */ pgt_lock = &pgd_page_get_mm(page)->page_table_lock; spin_lock(pgt_lock); if (pgd_none(*pgd)) set_pgd(pgd, *pgd_ref); else BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); spin_unlock(pgt_lock); } spin_unlock(&pgd_lock); } } /* * NOTE: This function is marked __ref because it calls __init function * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0. */ static __ref void *spp_getpage(void) { void *ptr; if (after_bootmem) ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK); else ptr = alloc_bootmem_pages(PAGE_SIZE); if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) { panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem ? "after bootmem" : ""); } pr_debug("spp_getpage %p\n", ptr); return ptr; } static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr) { if (pgd_none(*pgd)) { pud_t *pud = (pud_t *)spp_getpage(); pgd_populate(&init_mm, pgd, pud); if (pud != pud_offset(pgd, 0)) printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n", pud, pud_offset(pgd, 0)); } return pud_offset(pgd, vaddr); } static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr) { if (pud_none(*pud)) { pmd_t *pmd = (pmd_t *) spp_getpage(); pud_populate(&init_mm, pud, pmd); if (pmd != pmd_offset(pud, 0)) printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud, 0)); } return pmd_offset(pud, vaddr); } static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr) { if (pmd_none(*pmd)) { pte_t *pte = (pte_t *) spp_getpage(); pmd_populate_kernel(&init_mm, pmd, pte); if (pte != pte_offset_kernel(pmd, 0)) printk(KERN_ERR "PAGETABLE BUG #02!\n"); } return pte_offset_kernel(pmd, vaddr); } void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte) { pud_t *pud; pmd_t *pmd; pte_t *pte; pud = pud_page + pud_index(vaddr); pmd = fill_pmd(pud, vaddr); pte = fill_pte(pmd, vaddr); set_pte(pte, new_pte); /* * It's enough to flush this one mapping. * (PGE mappings get flushed as well) */ __flush_tlb_one(vaddr); } void set_pte_vaddr(unsigned long vaddr, pte_t pteval) { pgd_t *pgd; pud_t *pud_page; pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval)); pgd = pgd_offset_k(vaddr); if (pgd_none(*pgd)) { printk(KERN_ERR "PGD FIXMAP MISSING, it should be setup in head.S!\n"); return; } pud_page = (pud_t*)pgd_page_vaddr(*pgd); set_pte_vaddr_pud(pud_page, vaddr, pteval); } pmd_t * __init populate_extra_pmd(unsigned long vaddr) { pgd_t *pgd; pud_t *pud; pgd = pgd_offset_k(vaddr); pud = fill_pud(pgd, vaddr); return fill_pmd(pud, vaddr); } pte_t * __init populate_extra_pte(unsigned long vaddr) { pmd_t *pmd; pmd = populate_extra_pmd(vaddr); return fill_pte(pmd, vaddr); } /* * Create large page table mappings for a range of physical addresses. */ static void __init __init_extra_mapping(unsigned long phys, unsigned long size, pgprot_t prot) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK)); for (; size; phys += PMD_SIZE, size -= PMD_SIZE) { pgd = pgd_offset_k((unsigned long)__va(phys)); if (pgd_none(*pgd)) { pud = (pud_t *) spp_getpage(); set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE | _PAGE_USER)); } pud = pud_offset(pgd, (unsigned long)__va(phys)); if (pud_none(*pud)) { pmd = (pmd_t *) spp_getpage(); set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER)); } pmd = pmd_offset(pud, phys); BUG_ON(!pmd_none(*pmd)); set_pmd(pmd, __pmd(phys | pgprot_val(prot))); } } void __init init_extra_mapping_wb(unsigned long phys, unsigned long size) { __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE); } void __init init_extra_mapping_uc(unsigned long phys, unsigned long size) { __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE_NOCACHE); } /* * The head.S code sets up the kernel high mapping: * * from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text) * * phys_addr holds the negative offset to the kernel, which is added * to the compile time generated pmds. This results in invalid pmds up * to the point where we hit the physaddr 0 mapping. * * We limit the mappings to the region from _text to _brk_end. _brk_end * is rounded up to the 2MB boundary. This catches the invalid pmds as * well, as they are located before _text: */ void __init cleanup_highmap(void) { unsigned long vaddr = __START_KERNEL_map; unsigned long vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT); unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1; pmd_t *pmd = level2_kernel_pgt; for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) { if (pmd_none(*pmd)) continue; if (vaddr < (unsigned long) _text || vaddr > end) set_pmd(pmd, __pmd(0)); } } static __ref void *alloc_low_page(unsigned long *phys) { unsigned long pfn = pgt_buf_end++; void *adr; if (after_bootmem) { adr = (void *)get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK); *phys = __pa(adr); return adr; } if (pfn >= pgt_buf_top) panic("alloc_low_page: ran out of memory"); adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE); clear_page(adr); *phys = pfn * PAGE_SIZE; return adr; } static __ref void *map_low_page(void *virt) { void *adr; unsigned long phys, left; if (after_bootmem) return virt; phys = __pa(virt); left = phys & (PAGE_SIZE - 1); adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE); adr = (void *)(((unsigned long)adr) | left); return adr; } static __ref void unmap_low_page(void *adr) { if (after_bootmem) return; early_iounmap((void *)((unsigned long)adr & PAGE_MASK), PAGE_SIZE); } static unsigned long __meminit phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end, pgprot_t prot) { unsigned pages = 0; unsigned long last_map_addr = end; int i; pte_t *pte = pte_page + pte_index(addr); for(i = pte_index(addr); i < PTRS_PER_PTE; i++, addr += PAGE_SIZE, pte++) { if (addr >= end) { if (!after_bootmem) { for(; i < PTRS_PER_PTE; i++, pte++) set_pte(pte, __pte(0)); } break; } /* * We will re-use the existing mapping. * Xen for example has some special requirements, like mapping * pagetable pages as RO. So assume someone who pre-setup * these mappings are more intelligent. */ if (pte_val(*pte)) { pages++; continue; } if (0) printk(" pte=%p addr=%lx pte=%016lx\n", pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte); pages++; set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, prot)); last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE; } update_page_count(PG_LEVEL_4K, pages); return last_map_addr; } static unsigned long __meminit phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, unsigned long page_size_mask, pgprot_t prot) { unsigned long pages = 0; unsigned long last_map_addr = end; int i = pmd_index(address); for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) { unsigned long pte_phys; pmd_t *pmd = pmd_page + pmd_index(address); pte_t *pte; pgprot_t new_prot = prot; if (address >= end) { if (!after_bootmem) { for (; i < PTRS_PER_PMD; i++, pmd++) set_pmd(pmd, __pmd(0)); } break; } if (pmd_val(*pmd)) { if (!pmd_large(*pmd)) { spin_lock(&init_mm.page_table_lock); pte = map_low_page((pte_t *)pmd_page_vaddr(*pmd)); last_map_addr = phys_pte_init(pte, address, end, prot); unmap_low_page(pte); spin_unlock(&init_mm.page_table_lock); continue; } /* * If we are ok with PG_LEVEL_2M mapping, then we will * use the existing mapping, * * Otherwise, we will split the large page mapping but * use the same existing protection bits except for * large page, so that we don't violate Intel's TLB * Application note (317080) which says, while changing * the page sizes, new and old translations should * not differ with respect to page frame and * attributes. */ if (page_size_mask & (1 << PG_LEVEL_2M)) { pages++; continue; } new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd)); } if (page_size_mask & (1<<PG_LEVEL_2M)) { pages++; spin_lock(&init_mm.page_table_lock); set_pte((pte_t *)pmd, pfn_pte(address >> PAGE_SHIFT, __pgprot(pgprot_val(prot) | _PAGE_PSE))); spin_unlock(&init_mm.page_table_lock); last_map_addr = (address & PMD_MASK) + PMD_SIZE; continue; } pte = alloc_low_page(&pte_phys); last_map_addr = phys_pte_init(pte, address, end, new_prot); unmap_low_page(pte); spin_lock(&init_mm.page_table_lock); pmd_populate_kernel(&init_mm, pmd, __va(pte_phys)); spin_unlock(&init_mm.page_table_lock); } update_page_count(PG_LEVEL_2M, pages); return last_map_addr; } static unsigned long __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, unsigned long page_size_mask) { unsigned long pages = 0; unsigned long last_map_addr = end; int i = pud_index(addr); for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE) { unsigned long pmd_phys; pud_t *pud = pud_page + pud_index(addr); pmd_t *pmd; pgprot_t prot = PAGE_KERNEL; if (addr >= end) break; if (!after_bootmem && !e820_any_mapped(addr, addr+PUD_SIZE, 0)) { set_pud(pud, __pud(0)); continue; } if (pud_val(*pud)) { if (!pud_large(*pud)) { pmd = map_low_page(pmd_offset(pud, 0)); last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask, prot); unmap_low_page(pmd); __flush_tlb_all(); continue; } /* * If we are ok with PG_LEVEL_1G mapping, then we will * use the existing mapping. * * Otherwise, we will split the gbpage mapping but use * the same existing protection bits except for large * page, so that we don't violate Intel's TLB * Application note (317080) which says, while changing * the page sizes, new and old translations should * not differ with respect to page frame and * attributes. */ if (page_size_mask & (1 << PG_LEVEL_1G)) { pages++; continue; } prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud)); } if (page_size_mask & (1<<PG_LEVEL_1G)) { pages++; spin_lock(&init_mm.page_table_lock); set_pte((pte_t *)pud, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); spin_unlock(&init_mm.page_table_lock); last_map_addr = (addr & PUD_MASK) + PUD_SIZE; continue; } pmd = alloc_low_page(&pmd_phys); last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask, prot); unmap_low_page(pmd); spin_lock(&init_mm.page_table_lock); pud_populate(&init_mm, pud, __va(pmd_phys)); spin_unlock(&init_mm.page_table_lock); } __flush_tlb_all(); update_page_count(PG_LEVEL_1G, pages); return last_map_addr; } unsigned long __meminit kernel_physical_mapping_init(unsigned long start, unsigned long end, unsigned long page_size_mask) { bool pgd_changed = false; unsigned long next, last_map_addr = end; unsigned long addr; start = (unsigned long)__va(start); end = (unsigned long)__va(end); addr = start; for (; start < end; start = next) { pgd_t *pgd = pgd_offset_k(start); unsigned long pud_phys; pud_t *pud; next = (start + PGDIR_SIZE) & PGDIR_MASK; if (next > end) next = end; if (pgd_val(*pgd)) { pud = map_low_page((pud_t *)pgd_page_vaddr(*pgd)); last_map_addr = phys_pud_init(pud, __pa(start), __pa(end), page_size_mask); unmap_low_page(pud); continue; } pud = alloc_low_page(&pud_phys); last_map_addr = phys_pud_init(pud, __pa(start), __pa(next), page_size_mask); unmap_low_page(pud); spin_lock(&init_mm.page_table_lock); pgd_populate(&init_mm, pgd, __va(pud_phys)); spin_unlock(&init_mm.page_table_lock); pgd_changed = true; } if (pgd_changed) sync_global_pgds(addr, end); __flush_tlb_all(); return last_map_addr; } #ifndef CONFIG_NUMA void __init initmem_init(void) { memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0); } #endif void __init paging_init(void) { sparse_memory_present_with_active_regions(MAX_NUMNODES); sparse_init(); /* * clear the default setting with node 0 * note: don't use nodes_clear here, that is really clearing when * numa support is not compiled in, and later node_set_state * will not set it back. */ node_clear_state(0, N_NORMAL_MEMORY); zone_sizes_init(); } /* * Memory hotplug specific functions */ #ifdef CONFIG_MEMORY_HOTPLUG /* * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need * updating. */ static void update_end_of_memory_vars(u64 start, u64 size) { unsigned long end_pfn = PFN_UP(start + size); if (end_pfn > max_pfn) { max_pfn = end_pfn; max_low_pfn = end_pfn; high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; } } /* * Memory is added always to NORMAL zone. This means you will never get * additional DMA/DMA32 memory. */ int arch_add_memory(int nid, u64 start, u64 size) { struct pglist_data *pgdat = NODE_DATA(nid); struct zone *zone = pgdat->node_zones + ZONE_NORMAL; unsigned long last_mapped_pfn, start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; int ret; last_mapped_pfn = init_memory_mapping(start, start + size); if (last_mapped_pfn > max_pfn_mapped) max_pfn_mapped = last_mapped_pfn; ret = __add_pages(nid, zone, start_pfn, nr_pages); WARN_ON_ONCE(ret); /* update max_pfn, max_low_pfn and high_memory */ update_end_of_memory_vars(start, size); return ret; } EXPORT_SYMBOL_GPL(arch_add_memory); #endif /* CONFIG_MEMORY_HOTPLUG */ static struct kcore_list kcore_vsyscall; void __init mem_init(void) { long codesize, reservedpages, datasize, initsize; unsigned long absent_pages; pci_iommu_alloc(); /* clear_bss() already clear the empty_zero_page */ reservedpages = 0; /* this will put all low memory onto the freelists */ #ifdef CONFIG_NUMA totalram_pages = numa_free_all_bootmem(); #else totalram_pages = free_all_bootmem(); #endif absent_pages = absent_pages_in_range(0, max_pfn); reservedpages = max_pfn - totalram_pages - absent_pages; after_bootmem = 1; codesize = (unsigned long) &_etext - (unsigned long) &_text; datasize = (unsigned long) &_edata - (unsigned long) &_etext; initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; /* Register memory areas for /proc/kcore */ kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START, VSYSCALL_END - VSYSCALL_START, KCORE_OTHER); printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " "%ldk absent, %ldk reserved, %ldk data, %ldk init)\n", nr_free_pages() << (PAGE_SHIFT-10), max_pfn << (PAGE_SHIFT-10), codesize >> 10, absent_pages << (PAGE_SHIFT-10), reservedpages << (PAGE_SHIFT-10), datasize >> 10, initsize >> 10); } #ifdef CONFIG_DEBUG_RODATA const int rodata_test_data = 0xC3; EXPORT_SYMBOL_GPL(rodata_test_data); int kernel_set_to_readonly; void set_kernel_text_rw(void) { unsigned long start = PFN_ALIGN(_text); unsigned long end = PFN_ALIGN(__stop___ex_table); if (!kernel_set_to_readonly) return; pr_debug("Set kernel text: %lx - %lx for read write\n", start, end); /* * Make the kernel identity mapping for text RW. Kernel text * mapping will always be RO. Refer to the comment in * static_protections() in pageattr.c */ set_memory_rw(start, (end - start) >> PAGE_SHIFT); } void set_kernel_text_ro(void) { unsigned long start = PFN_ALIGN(_text); unsigned long end = PFN_ALIGN(__stop___ex_table); if (!kernel_set_to_readonly) return; pr_debug("Set kernel text: %lx - %lx for read only\n", start, end); /* * Set the kernel identity mapping for text RO. */ set_memory_ro(start, (end - start) >> PAGE_SHIFT); } void mark_rodata_ro(void) { unsigned long start = PFN_ALIGN(_text); unsigned long rodata_start = ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK; unsigned long end = (unsigned long) &__end_rodata_hpage_align; unsigned long text_end = PAGE_ALIGN((unsigned long) &__stop___ex_table); unsigned long rodata_end = PAGE_ALIGN((unsigned long) &__end_rodata); unsigned long data_start = (unsigned long) &_sdata; printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", (end - start) >> 10); set_memory_ro(start, (end - start) >> PAGE_SHIFT); kernel_set_to_readonly = 1; /* * The rodata section (but not the kernel text!) should also be * not-executable. */ set_memory_nx(rodata_start, (end - rodata_start) >> PAGE_SHIFT); rodata_test(); #ifdef CONFIG_CPA_DEBUG printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end); set_memory_rw(start, (end-start) >> PAGE_SHIFT); printk(KERN_INFO "Testing CPA: again\n"); set_memory_ro(start, (end-start) >> PAGE_SHIFT); #endif free_init_pages("unused kernel memory", (unsigned long) page_address(virt_to_page(text_end)), (unsigned long) page_address(virt_to_page(rodata_start))); free_init_pages("unused kernel memory", (unsigned long) page_address(virt_to_page(rodata_end)), (unsigned long) page_address(virt_to_page(data_start))); } #endif int kern_addr_valid(unsigned long addr) { unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT; pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; if (above != 0 && above != -1UL) return 0; pgd = pgd_offset_k(addr); if (pgd_none(*pgd)) return 0; pud = pud_offset(pgd, addr); if (pud_none(*pud)) return 0; if (pud_large(*pud)) return pfn_valid(pud_pfn(*pud)); pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) return 0; if (pmd_large(*pmd)) return pfn_valid(pmd_pfn(*pmd)); pte = pte_offset_kernel(pmd, addr); if (pte_none(*pte)) return 0; return pfn_valid(pte_pfn(*pte)); } /* * A pseudo VMA to allow ptrace access for the vsyscall page. This only * covers the 64bit vsyscall page now. 32bit has a real VMA now and does * not need special handling anymore: */ static struct vm_area_struct gate_vma = { .vm_start = VSYSCALL_START, .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE), .vm_page_prot = PAGE_READONLY_EXEC, .vm_flags = VM_READ | VM_EXEC }; struct vm_area_struct *get_gate_vma(struct mm_struct *mm) { #ifdef CONFIG_IA32_EMULATION if (!mm || mm->context.ia32_compat) return NULL; #endif return &gate_vma; } int in_gate_area(struct mm_struct *mm, unsigned long addr) { struct vm_area_struct *vma = get_gate_vma(mm); if (!vma) return 0; return (addr >= vma->vm_start) && (addr < vma->vm_end); } /* * Use this when you have no reliable mm, typically from interrupt * context. It is less reliable than using a task's mm and may give * false positives. */ int in_gate_area_no_mm(unsigned long addr) { return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END); } const char *arch_vma_name(struct vm_area_struct *vma) { if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) return "[vdso]"; if (vma == &gate_vma) return "[vsyscall]"; return NULL; } #ifdef CONFIG_X86_UV unsigned long memory_block_size_bytes(void) { if (is_uv_system()) { printk(KERN_INFO "UV: memory block size 2GB\n"); return 2UL * 1024 * 1024 * 1024; } return MIN_MEMORY_BLOCK_SIZE; } #endif #ifdef CONFIG_SPARSEMEM_VMEMMAP /* * Initialise the sparsemem vmemmap using huge-pages at the PMD level. */ static long __meminitdata addr_start, addr_end; static void __meminitdata *p_start, *p_end; static int __meminitdata node_start; int __meminit vmemmap_populate(struct page *start_page, unsigned long size, int node) { unsigned long addr = (unsigned long)start_page; unsigned long end = (unsigned long)(start_page + size); unsigned long next; pgd_t *pgd; pud_t *pud; pmd_t *pmd; for (; addr < end; addr = next) { void *p = NULL; pgd = vmemmap_pgd_populate(addr, node); if (!pgd) return -ENOMEM; pud = vmemmap_pud_populate(pgd, addr, node); if (!pud) return -ENOMEM; if (!cpu_has_pse) { next = (addr + PAGE_SIZE) & PAGE_MASK; pmd = vmemmap_pmd_populate(pud, addr, node); if (!pmd) return -ENOMEM; p = vmemmap_pte_populate(pmd, addr, node); if (!p) return -ENOMEM; addr_end = addr + PAGE_SIZE; p_end = p + PAGE_SIZE; } else { next = pmd_addr_end(addr, end); pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) { pte_t entry; p = vmemmap_alloc_block_buf(PMD_SIZE, node); if (!p) return -ENOMEM; entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL_LARGE); set_pmd(pmd, __pmd(pte_val(entry))); /* check to see if we have contiguous blocks */ if (p_end != p || node_start != node) { if (p_start) printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n", addr_start, addr_end-1, p_start, p_end-1, node_start); addr_start = addr; node_start = node; p_start = p; } addr_end = addr + PMD_SIZE; p_end = p + PMD_SIZE; } else vmemmap_verify((pte_t *)pmd, node, addr, next); } } sync_global_pgds((unsigned long)start_page, end); return 0; } void __meminit vmemmap_populate_print_last(void) { if (p_start) { printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n", addr_start, addr_end-1, p_start, p_end-1, node_start); p_start = NULL; p_end = NULL; node_start = 0; } } #endif
gpl-2.0
assusdan/cyanogenmod_kernel_hs_puref
drivers/usb/gadget/lpc32xx_udc.c
2262
87349
/* * USB Gadget driver for LPC32xx * * Authors: * Kevin Wells <kevin.wells@nxp.com> * Mike James * Roland Stigge <stigge@antcom.de> * * Copyright (C) 2006 Philips Semiconductors * Copyright (C) 2009 NXP Semiconductors * Copyright (C) 2012 Roland Stigge * * Note: This driver is based on original work done by Mike James for * the LPC3180. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/list.h> #include <linux/interrupt.h> #include <linux/proc_fs.h> #include <linux/clk.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> #include <linux/i2c.h> #include <linux/kthread.h> #include <linux/freezer.h> #include <linux/dma-mapping.h> #include <linux/dmapool.h> #include <linux/workqueue.h> #include <linux/of.h> #include <linux/usb/isp1301.h> #include <asm/byteorder.h> #include <mach/hardware.h> #include <linux/io.h> #include <asm/irq.h> #include <asm/system.h> #include <mach/platform.h> #include <mach/irqs.h> #include <mach/board.h> #ifdef CONFIG_USB_GADGET_DEBUG_FILES #include <linux/debugfs.h> #include <linux/seq_file.h> #endif /* * USB device configuration structure */ typedef void (*usc_chg_event)(int); struct lpc32xx_usbd_cfg { int vbus_drv_pol; /* 0=active low drive for VBUS via ISP1301 */ usc_chg_event conn_chgb; /* Connection change event (optional) */ usc_chg_event susp_chgb; /* Suspend/resume event (optional) */ usc_chg_event rmwk_chgb; /* Enable/disable remote wakeup */ }; /* * controller driver data structures */ /* 16 endpoints (not to be confused with 32 hardware endpoints) */ #define NUM_ENDPOINTS 16 /* * IRQ indices make reading the code a little easier */ #define IRQ_USB_LP 0 #define IRQ_USB_HP 1 #define IRQ_USB_DEVDMA 2 #define IRQ_USB_ATX 3 #define EP_OUT 0 /* RX (from host) */ #define EP_IN 1 /* TX (to host) */ /* Returns the interrupt mask for the selected hardware endpoint */ #define EP_MASK_SEL(ep, dir) (1 << (((ep) * 2) + dir)) #define EP_INT_TYPE 0 #define EP_ISO_TYPE 1 #define EP_BLK_TYPE 2 #define EP_CTL_TYPE 3 /* EP0 states */ #define WAIT_FOR_SETUP 0 /* Wait for setup packet */ #define DATA_IN 1 /* Expect dev->host transfer */ #define DATA_OUT 2 /* Expect host->dev transfer */ /* DD (DMA Descriptor) structure, requires word alignment, this is already * defined in the LPC32XX USB device header file, but this version is slightly * modified to tag some work data with each DMA descriptor. */ struct lpc32xx_usbd_dd_gad { u32 dd_next_phy; u32 dd_setup; u32 dd_buffer_addr; u32 dd_status; u32 dd_iso_ps_mem_addr; u32 this_dma; u32 iso_status[6]; /* 5 spare */ u32 dd_next_v; }; /* * Logical endpoint structure */ struct lpc32xx_ep { struct usb_ep ep; struct list_head queue; struct lpc32xx_udc *udc; u32 hwep_num_base; /* Physical hardware EP */ u32 hwep_num; /* Maps to hardware endpoint */ u32 maxpacket; u32 lep; bool is_in; bool req_pending; u32 eptype; u32 totalints; bool wedge; }; /* * Common UDC structure */ struct lpc32xx_udc { struct usb_gadget gadget; struct usb_gadget_driver *driver; struct platform_device *pdev; struct device *dev; struct dentry *pde; spinlock_t lock; struct i2c_client *isp1301_i2c_client; /* Board and device specific */ struct lpc32xx_usbd_cfg *board; u32 io_p_start; u32 io_p_size; void __iomem *udp_baseaddr; int udp_irq[4]; struct clk *usb_pll_clk; struct clk *usb_slv_clk; struct clk *usb_otg_clk; /* DMA support */ u32 *udca_v_base; u32 udca_p_base; struct dma_pool *dd_cache; /* Common EP and control data */ u32 enabled_devints; u32 enabled_hwepints; u32 dev_status; u32 realized_eps; /* VBUS detection, pullup, and power flags */ u8 vbus; u8 last_vbus; int pullup; int poweron; /* Work queues related to I2C support */ struct work_struct pullup_job; struct work_struct vbus_job; struct work_struct power_job; /* USB device peripheral - various */ struct lpc32xx_ep ep[NUM_ENDPOINTS]; bool enabled; bool clocked; bool suspended; bool selfpowered; int ep0state; atomic_t enabled_ep_cnt; wait_queue_head_t ep_disable_wait_queue; }; /* * Endpoint request */ struct lpc32xx_request { struct usb_request req; struct list_head queue; struct lpc32xx_usbd_dd_gad *dd_desc_ptr; bool mapped; bool send_zlp; }; static inline struct lpc32xx_udc *to_udc(struct usb_gadget *g) { return container_of(g, struct lpc32xx_udc, gadget); } #define ep_dbg(epp, fmt, arg...) \ dev_dbg(epp->udc->dev, "%s: " fmt, __func__, ## arg) #define ep_err(epp, fmt, arg...) \ dev_err(epp->udc->dev, "%s: " fmt, __func__, ## arg) #define ep_info(epp, fmt, arg...) \ dev_info(epp->udc->dev, "%s: " fmt, __func__, ## arg) #define ep_warn(epp, fmt, arg...) \ dev_warn(epp->udc->dev, "%s:" fmt, __func__, ## arg) #define UDCA_BUFF_SIZE (128) /* TODO: When the clock framework is introduced in LPC32xx, IO_ADDRESS will * be replaced with an inremap()ed pointer * */ #define USB_CTRL IO_ADDRESS(LPC32XX_CLK_PM_BASE + 0x64) /* USB_CTRL bit defines */ #define USB_SLAVE_HCLK_EN (1 << 24) #define USB_HOST_NEED_CLK_EN (1 << 21) #define USB_DEV_NEED_CLK_EN (1 << 22) /********************************************************************** * USB device controller register offsets **********************************************************************/ #define USBD_DEVINTST(x) ((x) + 0x200) #define USBD_DEVINTEN(x) ((x) + 0x204) #define USBD_DEVINTCLR(x) ((x) + 0x208) #define USBD_DEVINTSET(x) ((x) + 0x20C) #define USBD_CMDCODE(x) ((x) + 0x210) #define USBD_CMDDATA(x) ((x) + 0x214) #define USBD_RXDATA(x) ((x) + 0x218) #define USBD_TXDATA(x) ((x) + 0x21C) #define USBD_RXPLEN(x) ((x) + 0x220) #define USBD_TXPLEN(x) ((x) + 0x224) #define USBD_CTRL(x) ((x) + 0x228) #define USBD_DEVINTPRI(x) ((x) + 0x22C) #define USBD_EPINTST(x) ((x) + 0x230) #define USBD_EPINTEN(x) ((x) + 0x234) #define USBD_EPINTCLR(x) ((x) + 0x238) #define USBD_EPINTSET(x) ((x) + 0x23C) #define USBD_EPINTPRI(x) ((x) + 0x240) #define USBD_REEP(x) ((x) + 0x244) #define USBD_EPIND(x) ((x) + 0x248) #define USBD_EPMAXPSIZE(x) ((x) + 0x24C) /* DMA support registers only below */ /* Set, clear, or get enabled state of the DMA request status. If * enabled, an IN or OUT token will start a DMA transfer for the EP */ #define USBD_DMARST(x) ((x) + 0x250) #define USBD_DMARCLR(x) ((x) + 0x254) #define USBD_DMARSET(x) ((x) + 0x258) /* DMA UDCA head pointer */ #define USBD_UDCAH(x) ((x) + 0x280) /* EP DMA status, enable, and disable. This is used to specifically * enabled or disable DMA for a specific EP */ #define USBD_EPDMAST(x) ((x) + 0x284) #define USBD_EPDMAEN(x) ((x) + 0x288) #define USBD_EPDMADIS(x) ((x) + 0x28C) /* DMA master interrupts enable and pending interrupts */ #define USBD_DMAINTST(x) ((x) + 0x290) #define USBD_DMAINTEN(x) ((x) + 0x294) /* DMA end of transfer interrupt enable, disable, status */ #define USBD_EOTINTST(x) ((x) + 0x2A0) #define USBD_EOTINTCLR(x) ((x) + 0x2A4) #define USBD_EOTINTSET(x) ((x) + 0x2A8) /* New DD request interrupt enable, disable, status */ #define USBD_NDDRTINTST(x) ((x) + 0x2AC) #define USBD_NDDRTINTCLR(x) ((x) + 0x2B0) #define USBD_NDDRTINTSET(x) ((x) + 0x2B4) /* DMA error interrupt enable, disable, status */ #define USBD_SYSERRTINTST(x) ((x) + 0x2B8) #define USBD_SYSERRTINTCLR(x) ((x) + 0x2BC) #define USBD_SYSERRTINTSET(x) ((x) + 0x2C0) /********************************************************************** * USBD_DEVINTST/USBD_DEVINTEN/USBD_DEVINTCLR/USBD_DEVINTSET/ * USBD_DEVINTPRI register definitions **********************************************************************/ #define USBD_ERR_INT (1 << 9) #define USBD_EP_RLZED (1 << 8) #define USBD_TXENDPKT (1 << 7) #define USBD_RXENDPKT (1 << 6) #define USBD_CDFULL (1 << 5) #define USBD_CCEMPTY (1 << 4) #define USBD_DEV_STAT (1 << 3) #define USBD_EP_SLOW (1 << 2) #define USBD_EP_FAST (1 << 1) #define USBD_FRAME (1 << 0) /********************************************************************** * USBD_EPINTST/USBD_EPINTEN/USBD_EPINTCLR/USBD_EPINTSET/ * USBD_EPINTPRI register definitions **********************************************************************/ /* End point selection macro (RX) */ #define USBD_RX_EP_SEL(e) (1 << ((e) << 1)) /* End point selection macro (TX) */ #define USBD_TX_EP_SEL(e) (1 << (((e) << 1) + 1)) /********************************************************************** * USBD_REEP/USBD_DMARST/USBD_DMARCLR/USBD_DMARSET/USBD_EPDMAST/ * USBD_EPDMAEN/USBD_EPDMADIS/ * USBD_NDDRTINTST/USBD_NDDRTINTCLR/USBD_NDDRTINTSET/ * USBD_EOTINTST/USBD_EOTINTCLR/USBD_EOTINTSET/ * USBD_SYSERRTINTST/USBD_SYSERRTINTCLR/USBD_SYSERRTINTSET * register definitions **********************************************************************/ /* Endpoint selection macro */ #define USBD_EP_SEL(e) (1 << (e)) /********************************************************************** * SBD_DMAINTST/USBD_DMAINTEN **********************************************************************/ #define USBD_SYS_ERR_INT (1 << 2) #define USBD_NEW_DD_INT (1 << 1) #define USBD_EOT_INT (1 << 0) /********************************************************************** * USBD_RXPLEN register definitions **********************************************************************/ #define USBD_PKT_RDY (1 << 11) #define USBD_DV (1 << 10) #define USBD_PK_LEN_MASK 0x3FF /********************************************************************** * USBD_CTRL register definitions **********************************************************************/ #define USBD_LOG_ENDPOINT(e) ((e) << 2) #define USBD_WR_EN (1 << 1) #define USBD_RD_EN (1 << 0) /********************************************************************** * USBD_CMDCODE register definitions **********************************************************************/ #define USBD_CMD_CODE(c) ((c) << 16) #define USBD_CMD_PHASE(p) ((p) << 8) /********************************************************************** * USBD_DMARST/USBD_DMARCLR/USBD_DMARSET register definitions **********************************************************************/ #define USBD_DMAEP(e) (1 << (e)) /* DD (DMA Descriptor) structure, requires word alignment */ struct lpc32xx_usbd_dd { u32 *dd_next; u32 dd_setup; u32 dd_buffer_addr; u32 dd_status; u32 dd_iso_ps_mem_addr; }; /* dd_setup bit defines */ #define DD_SETUP_ATLE_DMA_MODE 0x01 #define DD_SETUP_NEXT_DD_VALID 0x04 #define DD_SETUP_ISO_EP 0x10 #define DD_SETUP_PACKETLEN(n) (((n) & 0x7FF) << 5) #define DD_SETUP_DMALENBYTES(n) (((n) & 0xFFFF) << 16) /* dd_status bit defines */ #define DD_STATUS_DD_RETIRED 0x01 #define DD_STATUS_STS_MASK 0x1E #define DD_STATUS_STS_NS 0x00 /* Not serviced */ #define DD_STATUS_STS_BS 0x02 /* Being serviced */ #define DD_STATUS_STS_NC 0x04 /* Normal completion */ #define DD_STATUS_STS_DUR 0x06 /* Data underrun (short packet) */ #define DD_STATUS_STS_DOR 0x08 /* Data overrun */ #define DD_STATUS_STS_SE 0x12 /* System error */ #define DD_STATUS_PKT_VAL 0x20 /* Packet valid */ #define DD_STATUS_LSB_EX 0x40 /* LS byte extracted (ATLE) */ #define DD_STATUS_MSB_EX 0x80 /* MS byte extracted (ATLE) */ #define DD_STATUS_MLEN(n) (((n) >> 8) & 0x3F) #define DD_STATUS_CURDMACNT(n) (((n) >> 16) & 0xFFFF) /* * * Protocol engine bits below * */ /* Device Interrupt Bit Definitions */ #define FRAME_INT 0x00000001 #define EP_FAST_INT 0x00000002 #define EP_SLOW_INT 0x00000004 #define DEV_STAT_INT 0x00000008 #define CCEMTY_INT 0x00000010 #define CDFULL_INT 0x00000020 #define RxENDPKT_INT 0x00000040 #define TxENDPKT_INT 0x00000080 #define EP_RLZED_INT 0x00000100 #define ERR_INT 0x00000200 /* Rx & Tx Packet Length Definitions */ #define PKT_LNGTH_MASK 0x000003FF #define PKT_DV 0x00000400 #define PKT_RDY 0x00000800 /* USB Control Definitions */ #define CTRL_RD_EN 0x00000001 #define CTRL_WR_EN 0x00000002 /* Command Codes */ #define CMD_SET_ADDR 0x00D00500 #define CMD_CFG_DEV 0x00D80500 #define CMD_SET_MODE 0x00F30500 #define CMD_RD_FRAME 0x00F50500 #define DAT_RD_FRAME 0x00F50200 #define CMD_RD_TEST 0x00FD0500 #define DAT_RD_TEST 0x00FD0200 #define CMD_SET_DEV_STAT 0x00FE0500 #define CMD_GET_DEV_STAT 0x00FE0500 #define DAT_GET_DEV_STAT 0x00FE0200 #define CMD_GET_ERR_CODE 0x00FF0500 #define DAT_GET_ERR_CODE 0x00FF0200 #define CMD_RD_ERR_STAT 0x00FB0500 #define DAT_RD_ERR_STAT 0x00FB0200 #define DAT_WR_BYTE(x) (0x00000100 | ((x) << 16)) #define CMD_SEL_EP(x) (0x00000500 | ((x) << 16)) #define DAT_SEL_EP(x) (0x00000200 | ((x) << 16)) #define CMD_SEL_EP_CLRI(x) (0x00400500 | ((x) << 16)) #define DAT_SEL_EP_CLRI(x) (0x00400200 | ((x) << 16)) #define CMD_SET_EP_STAT(x) (0x00400500 | ((x) << 16)) #define CMD_CLR_BUF 0x00F20500 #define DAT_CLR_BUF 0x00F20200 #define CMD_VALID_BUF 0x00FA0500 /* Device Address Register Definitions */ #define DEV_ADDR_MASK 0x7F #define DEV_EN 0x80 /* Device Configure Register Definitions */ #define CONF_DVICE 0x01 /* Device Mode Register Definitions */ #define AP_CLK 0x01 #define INAK_CI 0x02 #define INAK_CO 0x04 #define INAK_II 0x08 #define INAK_IO 0x10 #define INAK_BI 0x20 #define INAK_BO 0x40 /* Device Status Register Definitions */ #define DEV_CON 0x01 #define DEV_CON_CH 0x02 #define DEV_SUS 0x04 #define DEV_SUS_CH 0x08 #define DEV_RST 0x10 /* Error Code Register Definitions */ #define ERR_EC_MASK 0x0F #define ERR_EA 0x10 /* Error Status Register Definitions */ #define ERR_PID 0x01 #define ERR_UEPKT 0x02 #define ERR_DCRC 0x04 #define ERR_TIMOUT 0x08 #define ERR_EOP 0x10 #define ERR_B_OVRN 0x20 #define ERR_BTSTF 0x40 #define ERR_TGL 0x80 /* Endpoint Select Register Definitions */ #define EP_SEL_F 0x01 #define EP_SEL_ST 0x02 #define EP_SEL_STP 0x04 #define EP_SEL_PO 0x08 #define EP_SEL_EPN 0x10 #define EP_SEL_B_1_FULL 0x20 #define EP_SEL_B_2_FULL 0x40 /* Endpoint Status Register Definitions */ #define EP_STAT_ST 0x01 #define EP_STAT_DA 0x20 #define EP_STAT_RF_MO 0x40 #define EP_STAT_CND_ST 0x80 /* Clear Buffer Register Definitions */ #define CLR_BUF_PO 0x01 /* DMA Interrupt Bit Definitions */ #define EOT_INT 0x01 #define NDD_REQ_INT 0x02 #define SYS_ERR_INT 0x04 #define DRIVER_VERSION "1.03" static const char driver_name[] = "lpc32xx_udc"; /* * * proc interface support * */ #ifdef CONFIG_USB_GADGET_DEBUG_FILES static char *epnames[] = {"INT", "ISO", "BULK", "CTRL"}; static const char debug_filename[] = "driver/udc"; static void proc_ep_show(struct seq_file *s, struct lpc32xx_ep *ep) { struct lpc32xx_request *req; seq_printf(s, "\n"); seq_printf(s, "%12s, maxpacket %4d %3s", ep->ep.name, ep->ep.maxpacket, ep->is_in ? "in" : "out"); seq_printf(s, " type %4s", epnames[ep->eptype]); seq_printf(s, " ints: %12d", ep->totalints); if (list_empty(&ep->queue)) seq_printf(s, "\t(queue empty)\n"); else { list_for_each_entry(req, &ep->queue, queue) { u32 length = req->req.actual; seq_printf(s, "\treq %p len %d/%d buf %p\n", &req->req, length, req->req.length, req->req.buf); } } } static int proc_udc_show(struct seq_file *s, void *unused) { struct lpc32xx_udc *udc = s->private; struct lpc32xx_ep *ep; unsigned long flags; seq_printf(s, "%s: version %s\n", driver_name, DRIVER_VERSION); spin_lock_irqsave(&udc->lock, flags); seq_printf(s, "vbus %s, pullup %s, %s powered%s, gadget %s\n\n", udc->vbus ? "present" : "off", udc->enabled ? (udc->vbus ? "active" : "enabled") : "disabled", udc->selfpowered ? "self" : "VBUS", udc->suspended ? ", suspended" : "", udc->driver ? udc->driver->driver.name : "(none)"); if (udc->enabled && udc->vbus) { proc_ep_show(s, &udc->ep[0]); list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) proc_ep_show(s, ep); } spin_unlock_irqrestore(&udc->lock, flags); return 0; } static int proc_udc_open(struct inode *inode, struct file *file) { return single_open(file, proc_udc_show, PDE_DATA(inode)); } static const struct file_operations proc_ops = { .owner = THIS_MODULE, .open = proc_udc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static void create_debug_file(struct lpc32xx_udc *udc) { udc->pde = debugfs_create_file(debug_filename, 0, NULL, udc, &proc_ops); } static void remove_debug_file(struct lpc32xx_udc *udc) { if (udc->pde) debugfs_remove(udc->pde); } #else static inline void create_debug_file(struct lpc32xx_udc *udc) {} static inline void remove_debug_file(struct lpc32xx_udc *udc) {} #endif /* Primary initialization sequence for the ISP1301 transceiver */ static void isp1301_udc_configure(struct lpc32xx_udc *udc) { /* LPC32XX only supports DAT_SE0 USB mode */ /* This sequence is important */ /* Disable transparent UART mode first */ i2c_smbus_write_byte_data(udc->isp1301_i2c_client, (ISP1301_I2C_MODE_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR), MC1_UART_EN); /* Set full speed and SE0 mode */ i2c_smbus_write_byte_data(udc->isp1301_i2c_client, (ISP1301_I2C_MODE_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR), ~0); i2c_smbus_write_byte_data(udc->isp1301_i2c_client, ISP1301_I2C_MODE_CONTROL_1, (MC1_SPEED_REG | MC1_DAT_SE0)); /* * The PSW_OE enable bit state is reversed in the ISP1301 User's Guide */ i2c_smbus_write_byte_data(udc->isp1301_i2c_client, (ISP1301_I2C_MODE_CONTROL_2 | ISP1301_I2C_REG_CLEAR_ADDR), ~0); i2c_smbus_write_byte_data(udc->isp1301_i2c_client, ISP1301_I2C_MODE_CONTROL_2, (MC2_BI_DI | MC2_SPD_SUSP_CTRL)); /* Driver VBUS_DRV high or low depending on board setup */ if (udc->board->vbus_drv_pol != 0) i2c_smbus_write_byte_data(udc->isp1301_i2c_client, ISP1301_I2C_OTG_CONTROL_1, OTG1_VBUS_DRV); else i2c_smbus_write_byte_data(udc->isp1301_i2c_client, ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR, OTG1_VBUS_DRV); /* Bi-directional mode with suspend control * Enable both pulldowns for now - the pullup will be enable when VBUS * is detected */ i2c_smbus_write_byte_data(udc->isp1301_i2c_client, (ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR), ~0); i2c_smbus_write_byte_data(udc->isp1301_i2c_client, ISP1301_I2C_OTG_CONTROL_1, (0 | OTG1_DM_PULLDOWN | OTG1_DP_PULLDOWN)); /* Discharge VBUS (just in case) */ i2c_smbus_write_byte_data(udc->isp1301_i2c_client, ISP1301_I2C_OTG_CONTROL_1, OTG1_VBUS_DISCHRG); msleep(1); i2c_smbus_write_byte_data(udc->isp1301_i2c_client, (ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR), OTG1_VBUS_DISCHRG); /* Clear and enable VBUS high edge interrupt */ i2c_smbus_write_byte_data(udc->isp1301_i2c_client, ISP1301_I2C_INTERRUPT_LATCH | ISP1301_I2C_REG_CLEAR_ADDR, ~0); i2c_smbus_write_byte_data(udc->isp1301_i2c_client, ISP1301_I2C_INTERRUPT_FALLING | ISP1301_I2C_REG_CLEAR_ADDR, ~0); i2c_smbus_write_byte_data(udc->isp1301_i2c_client, ISP1301_I2C_INTERRUPT_FALLING, INT_VBUS_VLD); i2c_smbus_write_byte_data(udc->isp1301_i2c_client, ISP1301_I2C_INTERRUPT_RISING | ISP1301_I2C_REG_CLEAR_ADDR, ~0); i2c_smbus_write_byte_data(udc->isp1301_i2c_client, ISP1301_I2C_INTERRUPT_RISING, INT_VBUS_VLD); /* Enable usb_need_clk clock after transceiver is initialized */ writel((readl(USB_CTRL) | USB_DEV_NEED_CLK_EN), USB_CTRL); dev_info(udc->dev, "ISP1301 Vendor ID : 0x%04x\n", i2c_smbus_read_word_data(udc->isp1301_i2c_client, 0x00)); dev_info(udc->dev, "ISP1301 Product ID : 0x%04x\n", i2c_smbus_read_word_data(udc->isp1301_i2c_client, 0x02)); dev_info(udc->dev, "ISP1301 Version ID : 0x%04x\n", i2c_smbus_read_word_data(udc->isp1301_i2c_client, 0x14)); } /* Enables or disables the USB device pullup via the ISP1301 transceiver */ static void isp1301_pullup_set(struct lpc32xx_udc *udc) { if (udc->pullup) /* Enable pullup for bus signalling */ i2c_smbus_write_byte_data(udc->isp1301_i2c_client, ISP1301_I2C_OTG_CONTROL_1, OTG1_DP_PULLUP); else /* Enable pullup for bus signalling */ i2c_smbus_write_byte_data(udc->isp1301_i2c_client, ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR, OTG1_DP_PULLUP); } static void pullup_work(struct work_struct *work) { struct lpc32xx_udc *udc = container_of(work, struct lpc32xx_udc, pullup_job); isp1301_pullup_set(udc); } static void isp1301_pullup_enable(struct lpc32xx_udc *udc, int en_pullup, int block) { if (en_pullup == udc->pullup) return; udc->pullup = en_pullup; if (block) isp1301_pullup_set(udc); else /* defer slow i2c pull up setting */ schedule_work(&udc->pullup_job); } #ifdef CONFIG_PM /* Powers up or down the ISP1301 transceiver */ static void isp1301_set_powerstate(struct lpc32xx_udc *udc, int enable) { if (enable != 0) /* Power up ISP1301 - this ISP1301 will automatically wakeup when VBUS is detected */ i2c_smbus_write_byte_data(udc->isp1301_i2c_client, ISP1301_I2C_MODE_CONTROL_2 | ISP1301_I2C_REG_CLEAR_ADDR, MC2_GLOBAL_PWR_DN); else /* Power down ISP1301 */ i2c_smbus_write_byte_data(udc->isp1301_i2c_client, ISP1301_I2C_MODE_CONTROL_2, MC2_GLOBAL_PWR_DN); } static void power_work(struct work_struct *work) { struct lpc32xx_udc *udc = container_of(work, struct lpc32xx_udc, power_job); isp1301_set_powerstate(udc, udc->poweron); } #endif /* * * USB protocol engine command/data read/write helper functions * */ /* Issues a single command to the USB device state machine */ static void udc_protocol_cmd_w(struct lpc32xx_udc *udc, u32 cmd) { u32 pass = 0; int to; /* EP may lock on CLRI if this read isn't done */ u32 tmp = readl(USBD_DEVINTST(udc->udp_baseaddr)); (void) tmp; while (pass == 0) { writel(USBD_CCEMPTY, USBD_DEVINTCLR(udc->udp_baseaddr)); /* Write command code */ writel(cmd, USBD_CMDCODE(udc->udp_baseaddr)); to = 10000; while (((readl(USBD_DEVINTST(udc->udp_baseaddr)) & USBD_CCEMPTY) == 0) && (to > 0)) { to--; } if (to > 0) pass = 1; cpu_relax(); } } /* Issues 2 commands (or command and data) to the USB device state machine */ static inline void udc_protocol_cmd_data_w(struct lpc32xx_udc *udc, u32 cmd, u32 data) { udc_protocol_cmd_w(udc, cmd); udc_protocol_cmd_w(udc, data); } /* Issues a single command to the USB device state machine and reads * response data */ static u32 udc_protocol_cmd_r(struct lpc32xx_udc *udc, u32 cmd) { u32 tmp; int to = 1000; /* Write a command and read data from the protocol engine */ writel((USBD_CDFULL | USBD_CCEMPTY), USBD_DEVINTCLR(udc->udp_baseaddr)); /* Write command code */ udc_protocol_cmd_w(udc, cmd); tmp = readl(USBD_DEVINTST(udc->udp_baseaddr)); while ((!(readl(USBD_DEVINTST(udc->udp_baseaddr)) & USBD_CDFULL)) && (to > 0)) to--; if (!to) dev_dbg(udc->dev, "Protocol engine didn't receive response (CDFULL)\n"); return readl(USBD_CMDDATA(udc->udp_baseaddr)); } /* * * USB device interrupt mask support functions * */ /* Enable one or more USB device interrupts */ static inline void uda_enable_devint(struct lpc32xx_udc *udc, u32 devmask) { udc->enabled_devints |= devmask; writel(udc->enabled_devints, USBD_DEVINTEN(udc->udp_baseaddr)); } /* Disable one or more USB device interrupts */ static inline void uda_disable_devint(struct lpc32xx_udc *udc, u32 mask) { udc->enabled_devints &= ~mask; writel(udc->enabled_devints, USBD_DEVINTEN(udc->udp_baseaddr)); } /* Clear one or more USB device interrupts */ static inline void uda_clear_devint(struct lpc32xx_udc *udc, u32 mask) { writel(mask, USBD_DEVINTCLR(udc->udp_baseaddr)); } /* * * Endpoint interrupt disable/enable functions * */ /* Enable one or more USB endpoint interrupts */ static void uda_enable_hwepint(struct lpc32xx_udc *udc, u32 hwep) { udc->enabled_hwepints |= (1 << hwep); writel(udc->enabled_hwepints, USBD_EPINTEN(udc->udp_baseaddr)); } /* Disable one or more USB endpoint interrupts */ static void uda_disable_hwepint(struct lpc32xx_udc *udc, u32 hwep) { udc->enabled_hwepints &= ~(1 << hwep); writel(udc->enabled_hwepints, USBD_EPINTEN(udc->udp_baseaddr)); } /* Clear one or more USB endpoint interrupts */ static inline void uda_clear_hwepint(struct lpc32xx_udc *udc, u32 hwep) { writel((1 << hwep), USBD_EPINTCLR(udc->udp_baseaddr)); } /* Enable DMA for the HW channel */ static inline void udc_ep_dma_enable(struct lpc32xx_udc *udc, u32 hwep) { writel((1 << hwep), USBD_EPDMAEN(udc->udp_baseaddr)); } /* Disable DMA for the HW channel */ static inline void udc_ep_dma_disable(struct lpc32xx_udc *udc, u32 hwep) { writel((1 << hwep), USBD_EPDMADIS(udc->udp_baseaddr)); } /* * * Endpoint realize/unrealize functions * */ /* Before an endpoint can be used, it needs to be realized * in the USB protocol engine - this realizes the endpoint. * The interrupt (FIFO or DMA) is not enabled with this function */ static void udc_realize_hwep(struct lpc32xx_udc *udc, u32 hwep, u32 maxpacket) { int to = 1000; writel(USBD_EP_RLZED, USBD_DEVINTCLR(udc->udp_baseaddr)); writel(hwep, USBD_EPIND(udc->udp_baseaddr)); udc->realized_eps |= (1 << hwep); writel(udc->realized_eps, USBD_REEP(udc->udp_baseaddr)); writel(maxpacket, USBD_EPMAXPSIZE(udc->udp_baseaddr)); /* Wait until endpoint is realized in hardware */ while ((!(readl(USBD_DEVINTST(udc->udp_baseaddr)) & USBD_EP_RLZED)) && (to > 0)) to--; if (!to) dev_dbg(udc->dev, "EP not correctly realized in hardware\n"); writel(USBD_EP_RLZED, USBD_DEVINTCLR(udc->udp_baseaddr)); } /* Unrealize an EP */ static void udc_unrealize_hwep(struct lpc32xx_udc *udc, u32 hwep) { udc->realized_eps &= ~(1 << hwep); writel(udc->realized_eps, USBD_REEP(udc->udp_baseaddr)); } /* * * Endpoint support functions * */ /* Select and clear endpoint interrupt */ static u32 udc_selep_clrint(struct lpc32xx_udc *udc, u32 hwep) { udc_protocol_cmd_w(udc, CMD_SEL_EP_CLRI(hwep)); return udc_protocol_cmd_r(udc, DAT_SEL_EP_CLRI(hwep)); } /* Disables the endpoint in the USB protocol engine */ static void udc_disable_hwep(struct lpc32xx_udc *udc, u32 hwep) { udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(hwep), DAT_WR_BYTE(EP_STAT_DA)); } /* Stalls the endpoint - endpoint will return STALL */ static void udc_stall_hwep(struct lpc32xx_udc *udc, u32 hwep) { udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(hwep), DAT_WR_BYTE(EP_STAT_ST)); } /* Clear stall or reset endpoint */ static void udc_clrstall_hwep(struct lpc32xx_udc *udc, u32 hwep) { udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(hwep), DAT_WR_BYTE(0)); } /* Select an endpoint for endpoint status, clear, validate */ static void udc_select_hwep(struct lpc32xx_udc *udc, u32 hwep) { udc_protocol_cmd_w(udc, CMD_SEL_EP(hwep)); } /* * * Endpoint buffer management functions * */ /* Clear the current endpoint's buffer */ static void udc_clr_buffer_hwep(struct lpc32xx_udc *udc, u32 hwep) { udc_select_hwep(udc, hwep); udc_protocol_cmd_w(udc, CMD_CLR_BUF); } /* Validate the current endpoint's buffer */ static void udc_val_buffer_hwep(struct lpc32xx_udc *udc, u32 hwep) { udc_select_hwep(udc, hwep); udc_protocol_cmd_w(udc, CMD_VALID_BUF); } static inline u32 udc_clearep_getsts(struct lpc32xx_udc *udc, u32 hwep) { /* Clear EP interrupt */ uda_clear_hwepint(udc, hwep); return udc_selep_clrint(udc, hwep); } /* * * USB EP DMA support * */ /* Allocate a DMA Descriptor */ static struct lpc32xx_usbd_dd_gad *udc_dd_alloc(struct lpc32xx_udc *udc) { dma_addr_t dma; struct lpc32xx_usbd_dd_gad *dd; dd = (struct lpc32xx_usbd_dd_gad *) dma_pool_alloc( udc->dd_cache, (GFP_KERNEL | GFP_DMA), &dma); if (dd) dd->this_dma = dma; return dd; } /* Free a DMA Descriptor */ static void udc_dd_free(struct lpc32xx_udc *udc, struct lpc32xx_usbd_dd_gad *dd) { dma_pool_free(udc->dd_cache, dd, dd->this_dma); } /* * * USB setup and shutdown functions * */ /* Enables or disables most of the USB system clocks when low power mode is * needed. Clocks are typically started on a connection event, and disabled * when a cable is disconnected */ static void udc_clk_set(struct lpc32xx_udc *udc, int enable) { if (enable != 0) { if (udc->clocked) return; udc->clocked = 1; /* 48MHz PLL up */ clk_enable(udc->usb_pll_clk); /* Enable the USB device clock */ writel(readl(USB_CTRL) | USB_DEV_NEED_CLK_EN, USB_CTRL); clk_enable(udc->usb_otg_clk); } else { if (!udc->clocked) return; udc->clocked = 0; /* Never disable the USB_HCLK during normal operation */ /* 48MHz PLL dpwn */ clk_disable(udc->usb_pll_clk); /* Disable the USB device clock */ writel(readl(USB_CTRL) & ~USB_DEV_NEED_CLK_EN, USB_CTRL); clk_disable(udc->usb_otg_clk); } } /* Set/reset USB device address */ static void udc_set_address(struct lpc32xx_udc *udc, u32 addr) { /* Address will be latched at the end of the status phase, or latched immediately if function is called twice */ udc_protocol_cmd_data_w(udc, CMD_SET_ADDR, DAT_WR_BYTE(DEV_EN | addr)); } /* Setup up a IN request for DMA transfer - this consists of determining the * list of DMA addresses for the transfer, allocating DMA Descriptors, * installing the DD into the UDCA, and then enabling the DMA for that EP */ static int udc_ep_in_req_dma(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep) { struct lpc32xx_request *req; u32 hwep = ep->hwep_num; ep->req_pending = 1; /* There will always be a request waiting here */ req = list_entry(ep->queue.next, struct lpc32xx_request, queue); /* Place the DD Descriptor into the UDCA */ udc->udca_v_base[hwep] = req->dd_desc_ptr->this_dma; /* Enable DMA and interrupt for the HW EP */ udc_ep_dma_enable(udc, hwep); /* Clear ZLP if last packet is not of MAXP size */ if (req->req.length % ep->ep.maxpacket) req->send_zlp = 0; return 0; } /* Setup up a OUT request for DMA transfer - this consists of determining the * list of DMA addresses for the transfer, allocating DMA Descriptors, * installing the DD into the UDCA, and then enabling the DMA for that EP */ static int udc_ep_out_req_dma(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep) { struct lpc32xx_request *req; u32 hwep = ep->hwep_num; ep->req_pending = 1; /* There will always be a request waiting here */ req = list_entry(ep->queue.next, struct lpc32xx_request, queue); /* Place the DD Descriptor into the UDCA */ udc->udca_v_base[hwep] = req->dd_desc_ptr->this_dma; /* Enable DMA and interrupt for the HW EP */ udc_ep_dma_enable(udc, hwep); return 0; } static void udc_disable(struct lpc32xx_udc *udc) { u32 i; /* Disable device */ udc_protocol_cmd_data_w(udc, CMD_CFG_DEV, DAT_WR_BYTE(0)); udc_protocol_cmd_data_w(udc, CMD_SET_DEV_STAT, DAT_WR_BYTE(0)); /* Disable all device interrupts (including EP0) */ uda_disable_devint(udc, 0x3FF); /* Disable and reset all endpoint interrupts */ for (i = 0; i < 32; i++) { uda_disable_hwepint(udc, i); uda_clear_hwepint(udc, i); udc_disable_hwep(udc, i); udc_unrealize_hwep(udc, i); udc->udca_v_base[i] = 0; /* Disable and clear all interrupts and DMA */ udc_ep_dma_disable(udc, i); writel((1 << i), USBD_EOTINTCLR(udc->udp_baseaddr)); writel((1 << i), USBD_NDDRTINTCLR(udc->udp_baseaddr)); writel((1 << i), USBD_SYSERRTINTCLR(udc->udp_baseaddr)); writel((1 << i), USBD_DMARCLR(udc->udp_baseaddr)); } /* Disable DMA interrupts */ writel(0, USBD_DMAINTEN(udc->udp_baseaddr)); writel(0, USBD_UDCAH(udc->udp_baseaddr)); } static void udc_enable(struct lpc32xx_udc *udc) { u32 i; struct lpc32xx_ep *ep = &udc->ep[0]; /* Start with known state */ udc_disable(udc); /* Enable device */ udc_protocol_cmd_data_w(udc, CMD_SET_DEV_STAT, DAT_WR_BYTE(DEV_CON)); /* EP interrupts on high priority, FRAME interrupt on low priority */ writel(USBD_EP_FAST, USBD_DEVINTPRI(udc->udp_baseaddr)); writel(0xFFFF, USBD_EPINTPRI(udc->udp_baseaddr)); /* Clear any pending device interrupts */ writel(0x3FF, USBD_DEVINTCLR(udc->udp_baseaddr)); /* Setup UDCA - not yet used (DMA) */ writel(udc->udca_p_base, USBD_UDCAH(udc->udp_baseaddr)); /* Only enable EP0 in and out for now, EP0 only works in FIFO mode */ for (i = 0; i <= 1; i++) { udc_realize_hwep(udc, i, ep->ep.maxpacket); uda_enable_hwepint(udc, i); udc_select_hwep(udc, i); udc_clrstall_hwep(udc, i); udc_clr_buffer_hwep(udc, i); } /* Device interrupt setup */ uda_clear_devint(udc, (USBD_ERR_INT | USBD_DEV_STAT | USBD_EP_SLOW | USBD_EP_FAST)); uda_enable_devint(udc, (USBD_ERR_INT | USBD_DEV_STAT | USBD_EP_SLOW | USBD_EP_FAST)); /* Set device address to 0 - called twice to force a latch in the USB engine without the need of a setup packet status closure */ udc_set_address(udc, 0); udc_set_address(udc, 0); /* Enable master DMA interrupts */ writel((USBD_SYS_ERR_INT | USBD_EOT_INT), USBD_DMAINTEN(udc->udp_baseaddr)); udc->dev_status = 0; } /* * * USB device board specific events handled via callbacks * */ /* Connection change event - notify board function of change */ static void uda_power_event(struct lpc32xx_udc *udc, u32 conn) { /* Just notify of a connection change event (optional) */ if (udc->board->conn_chgb != NULL) udc->board->conn_chgb(conn); } /* Suspend/resume event - notify board function of change */ static void uda_resm_susp_event(struct lpc32xx_udc *udc, u32 conn) { /* Just notify of a Suspend/resume change event (optional) */ if (udc->board->susp_chgb != NULL) udc->board->susp_chgb(conn); if (conn) udc->suspended = 0; else udc->suspended = 1; } /* Remote wakeup enable/disable - notify board function of change */ static void uda_remwkp_cgh(struct lpc32xx_udc *udc) { if (udc->board->rmwk_chgb != NULL) udc->board->rmwk_chgb(udc->dev_status & (1 << USB_DEVICE_REMOTE_WAKEUP)); } /* Reads data from FIFO, adjusts for alignment and data size */ static void udc_pop_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes) { int n, i, bl; u16 *p16; u32 *p32, tmp, cbytes; /* Use optimal data transfer method based on source address and size */ switch (((u32) data) & 0x3) { case 0: /* 32-bit aligned */ p32 = (u32 *) data; cbytes = (bytes & ~0x3); /* Copy 32-bit aligned data first */ for (n = 0; n < cbytes; n += 4) *p32++ = readl(USBD_RXDATA(udc->udp_baseaddr)); /* Handle any remaining bytes */ bl = bytes - cbytes; if (bl) { tmp = readl(USBD_RXDATA(udc->udp_baseaddr)); for (n = 0; n < bl; n++) data[cbytes + n] = ((tmp >> (n * 8)) & 0xFF); } break; case 1: /* 8-bit aligned */ case 3: /* Each byte has to be handled independently */ for (n = 0; n < bytes; n += 4) { tmp = readl(USBD_RXDATA(udc->udp_baseaddr)); bl = bytes - n; if (bl > 3) bl = 3; for (i = 0; i < bl; i++) data[n + i] = (u8) ((tmp >> (n * 8)) & 0xFF); } break; case 2: /* 16-bit aligned */ p16 = (u16 *) data; cbytes = (bytes & ~0x3); /* Copy 32-bit sized objects first with 16-bit alignment */ for (n = 0; n < cbytes; n += 4) { tmp = readl(USBD_RXDATA(udc->udp_baseaddr)); *p16++ = (u16)(tmp & 0xFFFF); *p16++ = (u16)((tmp >> 16) & 0xFFFF); } /* Handle any remaining bytes */ bl = bytes - cbytes; if (bl) { tmp = readl(USBD_RXDATA(udc->udp_baseaddr)); for (n = 0; n < bl; n++) data[cbytes + n] = ((tmp >> (n * 8)) & 0xFF); } break; } } /* Read data from the FIFO for an endpoint. This function is for endpoints (such * as EP0) that don't use DMA. This function should only be called if a packet * is known to be ready to read for the endpoint. Note that the endpoint must * be selected in the protocol engine prior to this call. */ static u32 udc_read_hwep(struct lpc32xx_udc *udc, u32 hwep, u32 *data, u32 bytes) { u32 tmpv; int to = 1000; u32 tmp, hwrep = ((hwep & 0x1E) << 1) | CTRL_RD_EN; /* Setup read of endpoint */ writel(hwrep, USBD_CTRL(udc->udp_baseaddr)); /* Wait until packet is ready */ while ((((tmpv = readl(USBD_RXPLEN(udc->udp_baseaddr))) & PKT_RDY) == 0) && (to > 0)) to--; if (!to) dev_dbg(udc->dev, "No packet ready on FIFO EP read\n"); /* Mask out count */ tmp = tmpv & PKT_LNGTH_MASK; if (bytes < tmp) tmp = bytes; if ((tmp > 0) && (data != NULL)) udc_pop_fifo(udc, (u8 *) data, tmp); writel(((hwep & 0x1E) << 1), USBD_CTRL(udc->udp_baseaddr)); /* Clear the buffer */ udc_clr_buffer_hwep(udc, hwep); return tmp; } /* Stuffs data into the FIFO, adjusts for alignment and data size */ static void udc_stuff_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes) { int n, i, bl; u16 *p16; u32 *p32, tmp, cbytes; /* Use optimal data transfer method based on source address and size */ switch (((u32) data) & 0x3) { case 0: /* 32-bit aligned */ p32 = (u32 *) data; cbytes = (bytes & ~0x3); /* Copy 32-bit aligned data first */ for (n = 0; n < cbytes; n += 4) writel(*p32++, USBD_TXDATA(udc->udp_baseaddr)); /* Handle any remaining bytes */ bl = bytes - cbytes; if (bl) { tmp = 0; for (n = 0; n < bl; n++) tmp |= data[cbytes + n] << (n * 8); writel(tmp, USBD_TXDATA(udc->udp_baseaddr)); } break; case 1: /* 8-bit aligned */ case 3: /* Each byte has to be handled independently */ for (n = 0; n < bytes; n += 4) { bl = bytes - n; if (bl > 4) bl = 4; tmp = 0; for (i = 0; i < bl; i++) tmp |= data[n + i] << (i * 8); writel(tmp, USBD_TXDATA(udc->udp_baseaddr)); } break; case 2: /* 16-bit aligned */ p16 = (u16 *) data; cbytes = (bytes & ~0x3); /* Copy 32-bit aligned data first */ for (n = 0; n < cbytes; n += 4) { tmp = *p16++ & 0xFFFF; tmp |= (*p16++ & 0xFFFF) << 16; writel(tmp, USBD_TXDATA(udc->udp_baseaddr)); } /* Handle any remaining bytes */ bl = bytes - cbytes; if (bl) { tmp = 0; for (n = 0; n < bl; n++) tmp |= data[cbytes + n] << (n * 8); writel(tmp, USBD_TXDATA(udc->udp_baseaddr)); } break; } } /* Write data to the FIFO for an endpoint. This function is for endpoints (such * as EP0) that don't use DMA. Note that the endpoint must be selected in the * protocol engine prior to this call. */ static void udc_write_hwep(struct lpc32xx_udc *udc, u32 hwep, u32 *data, u32 bytes) { u32 hwwep = ((hwep & 0x1E) << 1) | CTRL_WR_EN; if ((bytes > 0) && (data == NULL)) return; /* Setup write of endpoint */ writel(hwwep, USBD_CTRL(udc->udp_baseaddr)); writel(bytes, USBD_TXPLEN(udc->udp_baseaddr)); /* Need at least 1 byte to trigger TX */ if (bytes == 0) writel(0, USBD_TXDATA(udc->udp_baseaddr)); else udc_stuff_fifo(udc, (u8 *) data, bytes); writel(((hwep & 0x1E) << 1), USBD_CTRL(udc->udp_baseaddr)); udc_val_buffer_hwep(udc, hwep); } /* USB device reset - resets USB to a default state with just EP0 enabled */ static void uda_usb_reset(struct lpc32xx_udc *udc) { u32 i = 0; /* Re-init device controller and EP0 */ udc_enable(udc); udc->gadget.speed = USB_SPEED_FULL; for (i = 1; i < NUM_ENDPOINTS; i++) { struct lpc32xx_ep *ep = &udc->ep[i]; ep->req_pending = 0; } } /* Send a ZLP on EP0 */ static void udc_ep0_send_zlp(struct lpc32xx_udc *udc) { udc_write_hwep(udc, EP_IN, NULL, 0); } /* Get current frame number */ static u16 udc_get_current_frame(struct lpc32xx_udc *udc) { u16 flo, fhi; udc_protocol_cmd_w(udc, CMD_RD_FRAME); flo = (u16) udc_protocol_cmd_r(udc, DAT_RD_FRAME); fhi = (u16) udc_protocol_cmd_r(udc, DAT_RD_FRAME); return (fhi << 8) | flo; } /* Set the device as configured - enables all endpoints */ static inline void udc_set_device_configured(struct lpc32xx_udc *udc) { udc_protocol_cmd_data_w(udc, CMD_CFG_DEV, DAT_WR_BYTE(CONF_DVICE)); } /* Set the device as unconfigured - disables all endpoints */ static inline void udc_set_device_unconfigured(struct lpc32xx_udc *udc) { udc_protocol_cmd_data_w(udc, CMD_CFG_DEV, DAT_WR_BYTE(0)); } /* reinit == restore initial software state */ static void udc_reinit(struct lpc32xx_udc *udc) { u32 i; INIT_LIST_HEAD(&udc->gadget.ep_list); INIT_LIST_HEAD(&udc->gadget.ep0->ep_list); for (i = 0; i < NUM_ENDPOINTS; i++) { struct lpc32xx_ep *ep = &udc->ep[i]; if (i != 0) list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); ep->ep.maxpacket = ep->maxpacket; INIT_LIST_HEAD(&ep->queue); ep->req_pending = 0; } udc->ep0state = WAIT_FOR_SETUP; } /* Must be called with lock */ static void done(struct lpc32xx_ep *ep, struct lpc32xx_request *req, int status) { struct lpc32xx_udc *udc = ep->udc; list_del_init(&req->queue); if (req->req.status == -EINPROGRESS) req->req.status = status; else status = req->req.status; if (ep->lep) { usb_gadget_unmap_request(&udc->gadget, &req->req, ep->is_in); /* Free DDs */ udc_dd_free(udc, req->dd_desc_ptr); } if (status && status != -ESHUTDOWN) ep_dbg(ep, "%s done %p, status %d\n", ep->ep.name, req, status); ep->req_pending = 0; spin_unlock(&udc->lock); req->req.complete(&ep->ep, &req->req); spin_lock(&udc->lock); } /* Must be called with lock */ static void nuke(struct lpc32xx_ep *ep, int status) { struct lpc32xx_request *req; while (!list_empty(&ep->queue)) { req = list_entry(ep->queue.next, struct lpc32xx_request, queue); done(ep, req, status); } if (status == -ESHUTDOWN) { uda_disable_hwepint(ep->udc, ep->hwep_num); udc_disable_hwep(ep->udc, ep->hwep_num); } } /* IN endpoint 0 transfer */ static int udc_ep0_in_req(struct lpc32xx_udc *udc) { struct lpc32xx_request *req; struct lpc32xx_ep *ep0 = &udc->ep[0]; u32 tsend, ts = 0; if (list_empty(&ep0->queue)) /* Nothing to send */ return 0; else req = list_entry(ep0->queue.next, struct lpc32xx_request, queue); tsend = ts = req->req.length - req->req.actual; if (ts == 0) { /* Send a ZLP */ udc_ep0_send_zlp(udc); done(ep0, req, 0); return 1; } else if (ts > ep0->ep.maxpacket) ts = ep0->ep.maxpacket; /* Just send what we can */ /* Write data to the EP0 FIFO and start transfer */ udc_write_hwep(udc, EP_IN, (req->req.buf + req->req.actual), ts); /* Increment data pointer */ req->req.actual += ts; if (tsend >= ep0->ep.maxpacket) return 0; /* Stay in data transfer state */ /* Transfer request is complete */ udc->ep0state = WAIT_FOR_SETUP; done(ep0, req, 0); return 1; } /* OUT endpoint 0 transfer */ static int udc_ep0_out_req(struct lpc32xx_udc *udc) { struct lpc32xx_request *req; struct lpc32xx_ep *ep0 = &udc->ep[0]; u32 tr, bufferspace; if (list_empty(&ep0->queue)) return 0; else req = list_entry(ep0->queue.next, struct lpc32xx_request, queue); if (req) { if (req->req.length == 0) { /* Just dequeue request */ done(ep0, req, 0); udc->ep0state = WAIT_FOR_SETUP; return 1; } /* Get data from FIFO */ bufferspace = req->req.length - req->req.actual; if (bufferspace > ep0->ep.maxpacket) bufferspace = ep0->ep.maxpacket; /* Copy data to buffer */ prefetchw(req->req.buf + req->req.actual); tr = udc_read_hwep(udc, EP_OUT, req->req.buf + req->req.actual, bufferspace); req->req.actual += bufferspace; if (tr < ep0->ep.maxpacket) { /* This is the last packet */ done(ep0, req, 0); udc->ep0state = WAIT_FOR_SETUP; return 1; } } return 0; } /* Must be called with lock */ static void stop_activity(struct lpc32xx_udc *udc) { struct usb_gadget_driver *driver = udc->driver; int i; if (udc->gadget.speed == USB_SPEED_UNKNOWN) driver = NULL; udc->gadget.speed = USB_SPEED_UNKNOWN; udc->suspended = 0; for (i = 0; i < NUM_ENDPOINTS; i++) { struct lpc32xx_ep *ep = &udc->ep[i]; nuke(ep, -ESHUTDOWN); } if (driver) { spin_unlock(&udc->lock); driver->disconnect(&udc->gadget); spin_lock(&udc->lock); } isp1301_pullup_enable(udc, 0, 0); udc_disable(udc); udc_reinit(udc); } /* * Activate or kill host pullup * Can be called with or without lock */ static void pullup(struct lpc32xx_udc *udc, int is_on) { if (!udc->clocked) return; if (!udc->enabled || !udc->vbus) is_on = 0; if (is_on != udc->pullup) isp1301_pullup_enable(udc, is_on, 0); } /* Must be called without lock */ static int lpc32xx_ep_disable(struct usb_ep *_ep) { struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep); struct lpc32xx_udc *udc = ep->udc; unsigned long flags; if ((ep->hwep_num_base == 0) || (ep->hwep_num == 0)) return -EINVAL; spin_lock_irqsave(&udc->lock, flags); nuke(ep, -ESHUTDOWN); /* Clear all DMA statuses for this EP */ udc_ep_dma_disable(udc, ep->hwep_num); writel(1 << ep->hwep_num, USBD_EOTINTCLR(udc->udp_baseaddr)); writel(1 << ep->hwep_num, USBD_NDDRTINTCLR(udc->udp_baseaddr)); writel(1 << ep->hwep_num, USBD_SYSERRTINTCLR(udc->udp_baseaddr)); writel(1 << ep->hwep_num, USBD_DMARCLR(udc->udp_baseaddr)); /* Remove the DD pointer in the UDCA */ udc->udca_v_base[ep->hwep_num] = 0; /* Disable and reset endpoint and interrupt */ uda_clear_hwepint(udc, ep->hwep_num); udc_unrealize_hwep(udc, ep->hwep_num); ep->hwep_num = 0; spin_unlock_irqrestore(&udc->lock, flags); atomic_dec(&udc->enabled_ep_cnt); wake_up(&udc->ep_disable_wait_queue); return 0; } /* Must be called without lock */ static int lpc32xx_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) { struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep); struct lpc32xx_udc *udc = ep->udc; u16 maxpacket; u32 tmp; unsigned long flags; /* Verify EP data */ if ((!_ep) || (!ep) || (!desc) || (desc->bDescriptorType != USB_DT_ENDPOINT)) { dev_dbg(udc->dev, "bad ep or descriptor\n"); return -EINVAL; } maxpacket = usb_endpoint_maxp(desc); if ((maxpacket == 0) || (maxpacket > ep->maxpacket)) { dev_dbg(udc->dev, "bad ep descriptor's packet size\n"); return -EINVAL; } /* Don't touch EP0 */ if (ep->hwep_num_base == 0) { dev_dbg(udc->dev, "Can't re-enable EP0!!!\n"); return -EINVAL; } /* Is driver ready? */ if ((!udc->driver) || (udc->gadget.speed == USB_SPEED_UNKNOWN)) { dev_dbg(udc->dev, "bogus device state\n"); return -ESHUTDOWN; } tmp = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; switch (tmp) { case USB_ENDPOINT_XFER_CONTROL: return -EINVAL; case USB_ENDPOINT_XFER_INT: if (maxpacket > ep->maxpacket) { dev_dbg(udc->dev, "Bad INT endpoint maxpacket %d\n", maxpacket); return -EINVAL; } break; case USB_ENDPOINT_XFER_BULK: switch (maxpacket) { case 8: case 16: case 32: case 64: break; default: dev_dbg(udc->dev, "Bad BULK endpoint maxpacket %d\n", maxpacket); return -EINVAL; } break; case USB_ENDPOINT_XFER_ISOC: break; } spin_lock_irqsave(&udc->lock, flags); /* Initialize endpoint to match the selected descriptor */ ep->is_in = (desc->bEndpointAddress & USB_DIR_IN) != 0; ep->ep.maxpacket = maxpacket; /* Map hardware endpoint from base and direction */ if (ep->is_in) /* IN endpoints are offset 1 from the OUT endpoint */ ep->hwep_num = ep->hwep_num_base + EP_IN; else ep->hwep_num = ep->hwep_num_base; ep_dbg(ep, "EP enabled: %s, HW:%d, MP:%d IN:%d\n", ep->ep.name, ep->hwep_num, maxpacket, (ep->is_in == 1)); /* Realize the endpoint, interrupt is enabled later when * buffers are queued, IN EPs will NAK until buffers are ready */ udc_realize_hwep(udc, ep->hwep_num, ep->ep.maxpacket); udc_clr_buffer_hwep(udc, ep->hwep_num); uda_disable_hwepint(udc, ep->hwep_num); udc_clrstall_hwep(udc, ep->hwep_num); /* Clear all DMA statuses for this EP */ udc_ep_dma_disable(udc, ep->hwep_num); writel(1 << ep->hwep_num, USBD_EOTINTCLR(udc->udp_baseaddr)); writel(1 << ep->hwep_num, USBD_NDDRTINTCLR(udc->udp_baseaddr)); writel(1 << ep->hwep_num, USBD_SYSERRTINTCLR(udc->udp_baseaddr)); writel(1 << ep->hwep_num, USBD_DMARCLR(udc->udp_baseaddr)); spin_unlock_irqrestore(&udc->lock, flags); atomic_inc(&udc->enabled_ep_cnt); return 0; } /* * Allocate a USB request list * Can be called with or without lock */ static struct usb_request *lpc32xx_ep_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) { struct lpc32xx_request *req; req = kzalloc(sizeof(struct lpc32xx_request), gfp_flags); if (!req) return NULL; INIT_LIST_HEAD(&req->queue); return &req->req; } /* * De-allocate a USB request list * Can be called with or without lock */ static void lpc32xx_ep_free_request(struct usb_ep *_ep, struct usb_request *_req) { struct lpc32xx_request *req; req = container_of(_req, struct lpc32xx_request, req); BUG_ON(!list_empty(&req->queue)); kfree(req); } /* Must be called without lock */ static int lpc32xx_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) { struct lpc32xx_request *req; struct lpc32xx_ep *ep; struct lpc32xx_udc *udc; unsigned long flags; int status = 0; req = container_of(_req, struct lpc32xx_request, req); ep = container_of(_ep, struct lpc32xx_ep, ep); if (!_req || !_req->complete || !_req->buf || !list_empty(&req->queue)) return -EINVAL; udc = ep->udc; if (!_ep) { dev_dbg(udc->dev, "invalid ep\n"); return -EINVAL; } if ((!udc) || (!udc->driver) || (udc->gadget.speed == USB_SPEED_UNKNOWN)) { dev_dbg(udc->dev, "invalid device\n"); return -EINVAL; } if (ep->lep) { struct lpc32xx_usbd_dd_gad *dd; status = usb_gadget_map_request(&udc->gadget, _req, ep->is_in); if (status) return status; /* For the request, build a list of DDs */ dd = udc_dd_alloc(udc); if (!dd) { /* Error allocating DD */ return -ENOMEM; } req->dd_desc_ptr = dd; /* Setup the DMA descriptor */ dd->dd_next_phy = dd->dd_next_v = 0; dd->dd_buffer_addr = req->req.dma; dd->dd_status = 0; /* Special handling for ISO EPs */ if (ep->eptype == EP_ISO_TYPE) { dd->dd_setup = DD_SETUP_ISO_EP | DD_SETUP_PACKETLEN(0) | DD_SETUP_DMALENBYTES(1); dd->dd_iso_ps_mem_addr = dd->this_dma + 24; if (ep->is_in) dd->iso_status[0] = req->req.length; else dd->iso_status[0] = 0; } else dd->dd_setup = DD_SETUP_PACKETLEN(ep->ep.maxpacket) | DD_SETUP_DMALENBYTES(req->req.length); } ep_dbg(ep, "%s queue req %p len %d buf %p (in=%d) z=%d\n", _ep->name, _req, _req->length, _req->buf, ep->is_in, _req->zero); spin_lock_irqsave(&udc->lock, flags); _req->status = -EINPROGRESS; _req->actual = 0; req->send_zlp = _req->zero; /* Kickstart empty queues */ if (list_empty(&ep->queue)) { list_add_tail(&req->queue, &ep->queue); if (ep->hwep_num_base == 0) { /* Handle expected data direction */ if (ep->is_in) { /* IN packet to host */ udc->ep0state = DATA_IN; status = udc_ep0_in_req(udc); } else { /* OUT packet from host */ udc->ep0state = DATA_OUT; status = udc_ep0_out_req(udc); } } else if (ep->is_in) { /* IN packet to host and kick off transfer */ if (!ep->req_pending) udc_ep_in_req_dma(udc, ep); } else /* OUT packet from host and kick off list */ if (!ep->req_pending) udc_ep_out_req_dma(udc, ep); } else list_add_tail(&req->queue, &ep->queue); spin_unlock_irqrestore(&udc->lock, flags); return (status < 0) ? status : 0; } /* Must be called without lock */ static int lpc32xx_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) { struct lpc32xx_ep *ep; struct lpc32xx_request *req; unsigned long flags; ep = container_of(_ep, struct lpc32xx_ep, ep); if (!_ep || ep->hwep_num_base == 0) return -EINVAL; spin_lock_irqsave(&ep->udc->lock, flags); /* make sure it's actually queued on this endpoint */ list_for_each_entry(req, &ep->queue, queue) { if (&req->req == _req) break; } if (&req->req != _req) { spin_unlock_irqrestore(&ep->udc->lock, flags); return -EINVAL; } done(ep, req, -ECONNRESET); spin_unlock_irqrestore(&ep->udc->lock, flags); return 0; } /* Must be called without lock */ static int lpc32xx_ep_set_halt(struct usb_ep *_ep, int value) { struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep); struct lpc32xx_udc *udc = ep->udc; unsigned long flags; if ((!ep) || (ep->hwep_num <= 1)) return -EINVAL; /* Don't halt an IN EP */ if (ep->is_in) return -EAGAIN; spin_lock_irqsave(&udc->lock, flags); if (value == 1) { /* stall */ udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(ep->hwep_num), DAT_WR_BYTE(EP_STAT_ST)); } else { /* End stall */ ep->wedge = 0; udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(ep->hwep_num), DAT_WR_BYTE(0)); } spin_unlock_irqrestore(&udc->lock, flags); return 0; } /* set the halt feature and ignores clear requests */ static int lpc32xx_ep_set_wedge(struct usb_ep *_ep) { struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep); if (!_ep || !ep->udc) return -EINVAL; ep->wedge = 1; return usb_ep_set_halt(_ep); } static const struct usb_ep_ops lpc32xx_ep_ops = { .enable = lpc32xx_ep_enable, .disable = lpc32xx_ep_disable, .alloc_request = lpc32xx_ep_alloc_request, .free_request = lpc32xx_ep_free_request, .queue = lpc32xx_ep_queue, .dequeue = lpc32xx_ep_dequeue, .set_halt = lpc32xx_ep_set_halt, .set_wedge = lpc32xx_ep_set_wedge, }; /* Send a ZLP on a non-0 IN EP */ void udc_send_in_zlp(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep) { /* Clear EP status */ udc_clearep_getsts(udc, ep->hwep_num); /* Send ZLP via FIFO mechanism */ udc_write_hwep(udc, ep->hwep_num, NULL, 0); } /* * Handle EP completion for ZLP * This function will only be called when a delayed ZLP needs to be sent out * after a DMA transfer has filled both buffers. */ void udc_handle_eps(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep) { u32 epstatus; struct lpc32xx_request *req; if (ep->hwep_num <= 0) return; uda_clear_hwepint(udc, ep->hwep_num); /* If this interrupt isn't enabled, return now */ if (!(udc->enabled_hwepints & (1 << ep->hwep_num))) return; /* Get endpoint status */ epstatus = udc_clearep_getsts(udc, ep->hwep_num); /* * This should never happen, but protect against writing to the * buffer when full. */ if (epstatus & EP_SEL_F) return; if (ep->is_in) { udc_send_in_zlp(udc, ep); uda_disable_hwepint(udc, ep->hwep_num); } else return; /* If there isn't a request waiting, something went wrong */ req = list_entry(ep->queue.next, struct lpc32xx_request, queue); if (req) { done(ep, req, 0); /* Start another request if ready */ if (!list_empty(&ep->queue)) { if (ep->is_in) udc_ep_in_req_dma(udc, ep); else udc_ep_out_req_dma(udc, ep); } else ep->req_pending = 0; } } /* DMA end of transfer completion */ static void udc_handle_dma_ep(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep) { u32 status, epstatus; struct lpc32xx_request *req; struct lpc32xx_usbd_dd_gad *dd; #ifdef CONFIG_USB_GADGET_DEBUG_FILES ep->totalints++; #endif req = list_entry(ep->queue.next, struct lpc32xx_request, queue); if (!req) { ep_err(ep, "DMA interrupt on no req!\n"); return; } dd = req->dd_desc_ptr; /* DMA descriptor should always be retired for this call */ if (!(dd->dd_status & DD_STATUS_DD_RETIRED)) ep_warn(ep, "DMA descriptor did not retire\n"); /* Disable DMA */ udc_ep_dma_disable(udc, ep->hwep_num); writel((1 << ep->hwep_num), USBD_EOTINTCLR(udc->udp_baseaddr)); writel((1 << ep->hwep_num), USBD_NDDRTINTCLR(udc->udp_baseaddr)); /* System error? */ if (readl(USBD_SYSERRTINTST(udc->udp_baseaddr)) & (1 << ep->hwep_num)) { writel((1 << ep->hwep_num), USBD_SYSERRTINTCLR(udc->udp_baseaddr)); ep_err(ep, "AHB critical error!\n"); ep->req_pending = 0; /* The error could have occurred on a packet of a multipacket * transfer, so recovering the transfer is not possible. Close * the request with an error */ done(ep, req, -ECONNABORTED); return; } /* Handle the current DD's status */ status = dd->dd_status; switch (status & DD_STATUS_STS_MASK) { case DD_STATUS_STS_NS: /* DD not serviced? This shouldn't happen! */ ep->req_pending = 0; ep_err(ep, "DMA critical EP error: DD not serviced (0x%x)!\n", status); done(ep, req, -ECONNABORTED); return; case DD_STATUS_STS_BS: /* Interrupt only fires on EOT - This shouldn't happen! */ ep->req_pending = 0; ep_err(ep, "DMA critical EP error: EOT prior to service completion (0x%x)!\n", status); done(ep, req, -ECONNABORTED); return; case DD_STATUS_STS_NC: case DD_STATUS_STS_DUR: /* Really just a short packet, not an underrun */ /* This is a good status and what we expect */ break; default: /* Data overrun, system error, or unknown */ ep->req_pending = 0; ep_err(ep, "DMA critical EP error: System error (0x%x)!\n", status); done(ep, req, -ECONNABORTED); return; } /* ISO endpoints are handled differently */ if (ep->eptype == EP_ISO_TYPE) { if (ep->is_in) req->req.actual = req->req.length; else req->req.actual = dd->iso_status[0] & 0xFFFF; } else req->req.actual += DD_STATUS_CURDMACNT(status); /* Send a ZLP if necessary. This will be done for non-int * packets which have a size that is a divisor of MAXP */ if (req->send_zlp) { /* * If at least 1 buffer is available, send the ZLP now. * Otherwise, the ZLP send needs to be deferred until a * buffer is available. */ if (udc_clearep_getsts(udc, ep->hwep_num) & EP_SEL_F) { udc_clearep_getsts(udc, ep->hwep_num); uda_enable_hwepint(udc, ep->hwep_num); epstatus = udc_clearep_getsts(udc, ep->hwep_num); /* Let the EP interrupt handle the ZLP */ return; } else udc_send_in_zlp(udc, ep); } /* Transfer request is complete */ done(ep, req, 0); /* Start another request if ready */ udc_clearep_getsts(udc, ep->hwep_num); if (!list_empty((&ep->queue))) { if (ep->is_in) udc_ep_in_req_dma(udc, ep); else udc_ep_out_req_dma(udc, ep); } else ep->req_pending = 0; } /* * * Endpoint 0 functions * */ static void udc_handle_dev(struct lpc32xx_udc *udc) { u32 tmp; udc_protocol_cmd_w(udc, CMD_GET_DEV_STAT); tmp = udc_protocol_cmd_r(udc, DAT_GET_DEV_STAT); if (tmp & DEV_RST) uda_usb_reset(udc); else if (tmp & DEV_CON_CH) uda_power_event(udc, (tmp & DEV_CON)); else if (tmp & DEV_SUS_CH) { if (tmp & DEV_SUS) { if (udc->vbus == 0) stop_activity(udc); else if ((udc->gadget.speed != USB_SPEED_UNKNOWN) && udc->driver) { /* Power down transceiver */ udc->poweron = 0; schedule_work(&udc->pullup_job); uda_resm_susp_event(udc, 1); } } else if ((udc->gadget.speed != USB_SPEED_UNKNOWN) && udc->driver && udc->vbus) { uda_resm_susp_event(udc, 0); /* Power up transceiver */ udc->poweron = 1; schedule_work(&udc->pullup_job); } } } static int udc_get_status(struct lpc32xx_udc *udc, u16 reqtype, u16 wIndex) { struct lpc32xx_ep *ep; u32 ep0buff = 0, tmp; switch (reqtype & USB_RECIP_MASK) { case USB_RECIP_INTERFACE: break; /* Not supported */ case USB_RECIP_DEVICE: ep0buff = (udc->selfpowered << USB_DEVICE_SELF_POWERED); if (udc->dev_status & (1 << USB_DEVICE_REMOTE_WAKEUP)) ep0buff |= (1 << USB_DEVICE_REMOTE_WAKEUP); break; case USB_RECIP_ENDPOINT: tmp = wIndex & USB_ENDPOINT_NUMBER_MASK; ep = &udc->ep[tmp]; if ((tmp == 0) || (tmp >= NUM_ENDPOINTS)) return -EOPNOTSUPP; if (wIndex & USB_DIR_IN) { if (!ep->is_in) return -EOPNOTSUPP; /* Something's wrong */ } else if (ep->is_in) return -EOPNOTSUPP; /* Not an IN endpoint */ /* Get status of the endpoint */ udc_protocol_cmd_w(udc, CMD_SEL_EP(ep->hwep_num)); tmp = udc_protocol_cmd_r(udc, DAT_SEL_EP(ep->hwep_num)); if (tmp & EP_SEL_ST) ep0buff = (1 << USB_ENDPOINT_HALT); else ep0buff = 0; break; default: break; } /* Return data */ udc_write_hwep(udc, EP_IN, &ep0buff, 2); return 0; } static void udc_handle_ep0_setup(struct lpc32xx_udc *udc) { struct lpc32xx_ep *ep, *ep0 = &udc->ep[0]; struct usb_ctrlrequest ctrlpkt; int i, bytes; u16 wIndex, wValue, wLength, reqtype, req, tmp; /* Nuke previous transfers */ nuke(ep0, -EPROTO); /* Get setup packet */ bytes = udc_read_hwep(udc, EP_OUT, (u32 *) &ctrlpkt, 8); if (bytes != 8) { ep_warn(ep0, "Incorrectly sized setup packet (s/b 8, is %d)!\n", bytes); return; } /* Native endianness */ wIndex = le16_to_cpu(ctrlpkt.wIndex); wValue = le16_to_cpu(ctrlpkt.wValue); wLength = le16_to_cpu(ctrlpkt.wLength); reqtype = le16_to_cpu(ctrlpkt.bRequestType); /* Set direction of EP0 */ if (likely(reqtype & USB_DIR_IN)) ep0->is_in = 1; else ep0->is_in = 0; /* Handle SETUP packet */ req = le16_to_cpu(ctrlpkt.bRequest); switch (req) { case USB_REQ_CLEAR_FEATURE: case USB_REQ_SET_FEATURE: switch (reqtype) { case (USB_TYPE_STANDARD | USB_RECIP_DEVICE): if (wValue != USB_DEVICE_REMOTE_WAKEUP) goto stall; /* Nothing else handled */ /* Tell board about event */ if (req == USB_REQ_CLEAR_FEATURE) udc->dev_status &= ~(1 << USB_DEVICE_REMOTE_WAKEUP); else udc->dev_status |= (1 << USB_DEVICE_REMOTE_WAKEUP); uda_remwkp_cgh(udc); goto zlp_send; case (USB_TYPE_STANDARD | USB_RECIP_ENDPOINT): tmp = wIndex & USB_ENDPOINT_NUMBER_MASK; if ((wValue != USB_ENDPOINT_HALT) || (tmp >= NUM_ENDPOINTS)) break; /* Find hardware endpoint from logical endpoint */ ep = &udc->ep[tmp]; tmp = ep->hwep_num; if (tmp == 0) break; if (req == USB_REQ_SET_FEATURE) udc_stall_hwep(udc, tmp); else if (!ep->wedge) udc_clrstall_hwep(udc, tmp); goto zlp_send; default: break; } case USB_REQ_SET_ADDRESS: if (reqtype == (USB_TYPE_STANDARD | USB_RECIP_DEVICE)) { udc_set_address(udc, wValue); goto zlp_send; } break; case USB_REQ_GET_STATUS: udc_get_status(udc, reqtype, wIndex); return; default: break; /* Let GadgetFS handle the descriptor instead */ } if (likely(udc->driver)) { /* device-2-host (IN) or no data setup command, process * immediately */ spin_unlock(&udc->lock); i = udc->driver->setup(&udc->gadget, &ctrlpkt); spin_lock(&udc->lock); if (req == USB_REQ_SET_CONFIGURATION) { /* Configuration is set after endpoints are realized */ if (wValue) { /* Set configuration */ udc_set_device_configured(udc); udc_protocol_cmd_data_w(udc, CMD_SET_MODE, DAT_WR_BYTE(AP_CLK | INAK_BI | INAK_II)); } else { /* Clear configuration */ udc_set_device_unconfigured(udc); /* Disable NAK interrupts */ udc_protocol_cmd_data_w(udc, CMD_SET_MODE, DAT_WR_BYTE(AP_CLK)); } } if (i < 0) { /* setup processing failed, force stall */ dev_dbg(udc->dev, "req %02x.%02x protocol STALL; stat %d\n", reqtype, req, i); udc->ep0state = WAIT_FOR_SETUP; goto stall; } } if (!ep0->is_in) udc_ep0_send_zlp(udc); /* ZLP IN packet on data phase */ return; stall: udc_stall_hwep(udc, EP_IN); return; zlp_send: udc_ep0_send_zlp(udc); return; } /* IN endpoint 0 transfer */ static void udc_handle_ep0_in(struct lpc32xx_udc *udc) { struct lpc32xx_ep *ep0 = &udc->ep[0]; u32 epstatus; /* Clear EP interrupt */ epstatus = udc_clearep_getsts(udc, EP_IN); #ifdef CONFIG_USB_GADGET_DEBUG_FILES ep0->totalints++; #endif /* Stalled? Clear stall and reset buffers */ if (epstatus & EP_SEL_ST) { udc_clrstall_hwep(udc, EP_IN); nuke(ep0, -ECONNABORTED); udc->ep0state = WAIT_FOR_SETUP; return; } /* Is a buffer available? */ if (!(epstatus & EP_SEL_F)) { /* Handle based on current state */ if (udc->ep0state == DATA_IN) udc_ep0_in_req(udc); else { /* Unknown state for EP0 oe end of DATA IN phase */ nuke(ep0, -ECONNABORTED); udc->ep0state = WAIT_FOR_SETUP; } } } /* OUT endpoint 0 transfer */ static void udc_handle_ep0_out(struct lpc32xx_udc *udc) { struct lpc32xx_ep *ep0 = &udc->ep[0]; u32 epstatus; /* Clear EP interrupt */ epstatus = udc_clearep_getsts(udc, EP_OUT); #ifdef CONFIG_USB_GADGET_DEBUG_FILES ep0->totalints++; #endif /* Stalled? */ if (epstatus & EP_SEL_ST) { udc_clrstall_hwep(udc, EP_OUT); nuke(ep0, -ECONNABORTED); udc->ep0state = WAIT_FOR_SETUP; return; } /* A NAK may occur if a packet couldn't be received yet */ if (epstatus & EP_SEL_EPN) return; /* Setup packet incoming? */ if (epstatus & EP_SEL_STP) { nuke(ep0, 0); udc->ep0state = WAIT_FOR_SETUP; } /* Data available? */ if (epstatus & EP_SEL_F) /* Handle based on current state */ switch (udc->ep0state) { case WAIT_FOR_SETUP: udc_handle_ep0_setup(udc); break; case DATA_OUT: udc_ep0_out_req(udc); break; default: /* Unknown state for EP0 */ nuke(ep0, -ECONNABORTED); udc->ep0state = WAIT_FOR_SETUP; } } /* Must be called without lock */ static int lpc32xx_get_frame(struct usb_gadget *gadget) { int frame; unsigned long flags; struct lpc32xx_udc *udc = to_udc(gadget); if (!udc->clocked) return -EINVAL; spin_lock_irqsave(&udc->lock, flags); frame = (int) udc_get_current_frame(udc); spin_unlock_irqrestore(&udc->lock, flags); return frame; } static int lpc32xx_wakeup(struct usb_gadget *gadget) { return -ENOTSUPP; } static int lpc32xx_set_selfpowered(struct usb_gadget *gadget, int is_on) { struct lpc32xx_udc *udc = to_udc(gadget); /* Always self-powered */ udc->selfpowered = (is_on != 0); return 0; } /* * vbus is here! turn everything on that's ready * Must be called without lock */ static int lpc32xx_vbus_session(struct usb_gadget *gadget, int is_active) { unsigned long flags; struct lpc32xx_udc *udc = to_udc(gadget); spin_lock_irqsave(&udc->lock, flags); /* Doesn't need lock */ if (udc->driver) { udc_clk_set(udc, 1); udc_enable(udc); pullup(udc, is_active); } else { stop_activity(udc); pullup(udc, 0); spin_unlock_irqrestore(&udc->lock, flags); /* * Wait for all the endpoints to disable, * before disabling clocks. Don't wait if * endpoints are not enabled. */ if (atomic_read(&udc->enabled_ep_cnt)) wait_event_interruptible(udc->ep_disable_wait_queue, (atomic_read(&udc->enabled_ep_cnt) == 0)); spin_lock_irqsave(&udc->lock, flags); udc_clk_set(udc, 0); } spin_unlock_irqrestore(&udc->lock, flags); return 0; } /* Can be called with or without lock */ static int lpc32xx_pullup(struct usb_gadget *gadget, int is_on) { struct lpc32xx_udc *udc = to_udc(gadget); /* Doesn't need lock */ pullup(udc, is_on); return 0; } static int lpc32xx_start(struct usb_gadget *, struct usb_gadget_driver *); static int lpc32xx_stop(struct usb_gadget *, struct usb_gadget_driver *); static const struct usb_gadget_ops lpc32xx_udc_ops = { .get_frame = lpc32xx_get_frame, .wakeup = lpc32xx_wakeup, .set_selfpowered = lpc32xx_set_selfpowered, .vbus_session = lpc32xx_vbus_session, .pullup = lpc32xx_pullup, .udc_start = lpc32xx_start, .udc_stop = lpc32xx_stop, }; static void nop_release(struct device *dev) { /* nothing to free */ } static const struct lpc32xx_udc controller_template = { .gadget = { .ops = &lpc32xx_udc_ops, .name = driver_name, .dev = { .init_name = "gadget", .release = nop_release, } }, .ep[0] = { .ep = { .name = "ep0", .ops = &lpc32xx_ep_ops, }, .maxpacket = 64, .hwep_num_base = 0, .hwep_num = 0, /* Can be 0 or 1, has special handling */ .lep = 0, .eptype = EP_CTL_TYPE, }, .ep[1] = { .ep = { .name = "ep1-int", .ops = &lpc32xx_ep_ops, }, .maxpacket = 64, .hwep_num_base = 2, .hwep_num = 0, /* 2 or 3, will be set later */ .lep = 1, .eptype = EP_INT_TYPE, }, .ep[2] = { .ep = { .name = "ep2-bulk", .ops = &lpc32xx_ep_ops, }, .maxpacket = 64, .hwep_num_base = 4, .hwep_num = 0, /* 4 or 5, will be set later */ .lep = 2, .eptype = EP_BLK_TYPE, }, .ep[3] = { .ep = { .name = "ep3-iso", .ops = &lpc32xx_ep_ops, }, .maxpacket = 1023, .hwep_num_base = 6, .hwep_num = 0, /* 6 or 7, will be set later */ .lep = 3, .eptype = EP_ISO_TYPE, }, .ep[4] = { .ep = { .name = "ep4-int", .ops = &lpc32xx_ep_ops, }, .maxpacket = 64, .hwep_num_base = 8, .hwep_num = 0, /* 8 or 9, will be set later */ .lep = 4, .eptype = EP_INT_TYPE, }, .ep[5] = { .ep = { .name = "ep5-bulk", .ops = &lpc32xx_ep_ops, }, .maxpacket = 64, .hwep_num_base = 10, .hwep_num = 0, /* 10 or 11, will be set later */ .lep = 5, .eptype = EP_BLK_TYPE, }, .ep[6] = { .ep = { .name = "ep6-iso", .ops = &lpc32xx_ep_ops, }, .maxpacket = 1023, .hwep_num_base = 12, .hwep_num = 0, /* 12 or 13, will be set later */ .lep = 6, .eptype = EP_ISO_TYPE, }, .ep[7] = { .ep = { .name = "ep7-int", .ops = &lpc32xx_ep_ops, }, .maxpacket = 64, .hwep_num_base = 14, .hwep_num = 0, .lep = 7, .eptype = EP_INT_TYPE, }, .ep[8] = { .ep = { .name = "ep8-bulk", .ops = &lpc32xx_ep_ops, }, .maxpacket = 64, .hwep_num_base = 16, .hwep_num = 0, .lep = 8, .eptype = EP_BLK_TYPE, }, .ep[9] = { .ep = { .name = "ep9-iso", .ops = &lpc32xx_ep_ops, }, .maxpacket = 1023, .hwep_num_base = 18, .hwep_num = 0, .lep = 9, .eptype = EP_ISO_TYPE, }, .ep[10] = { .ep = { .name = "ep10-int", .ops = &lpc32xx_ep_ops, }, .maxpacket = 64, .hwep_num_base = 20, .hwep_num = 0, .lep = 10, .eptype = EP_INT_TYPE, }, .ep[11] = { .ep = { .name = "ep11-bulk", .ops = &lpc32xx_ep_ops, }, .maxpacket = 64, .hwep_num_base = 22, .hwep_num = 0, .lep = 11, .eptype = EP_BLK_TYPE, }, .ep[12] = { .ep = { .name = "ep12-iso", .ops = &lpc32xx_ep_ops, }, .maxpacket = 1023, .hwep_num_base = 24, .hwep_num = 0, .lep = 12, .eptype = EP_ISO_TYPE, }, .ep[13] = { .ep = { .name = "ep13-int", .ops = &lpc32xx_ep_ops, }, .maxpacket = 64, .hwep_num_base = 26, .hwep_num = 0, .lep = 13, .eptype = EP_INT_TYPE, }, .ep[14] = { .ep = { .name = "ep14-bulk", .ops = &lpc32xx_ep_ops, }, .maxpacket = 64, .hwep_num_base = 28, .hwep_num = 0, .lep = 14, .eptype = EP_BLK_TYPE, }, .ep[15] = { .ep = { .name = "ep15-bulk", .ops = &lpc32xx_ep_ops, }, .maxpacket = 1023, .hwep_num_base = 30, .hwep_num = 0, .lep = 15, .eptype = EP_BLK_TYPE, }, }; /* ISO and status interrupts */ static irqreturn_t lpc32xx_usb_lp_irq(int irq, void *_udc) { u32 tmp, devstat; struct lpc32xx_udc *udc = _udc; spin_lock(&udc->lock); /* Read the device status register */ devstat = readl(USBD_DEVINTST(udc->udp_baseaddr)); devstat &= ~USBD_EP_FAST; writel(devstat, USBD_DEVINTCLR(udc->udp_baseaddr)); devstat = devstat & udc->enabled_devints; /* Device specific handling needed? */ if (devstat & USBD_DEV_STAT) udc_handle_dev(udc); /* Start of frame? (devstat & FRAME_INT): * The frame interrupt isn't really needed for ISO support, * as the driver will queue the necessary packets */ /* Error? */ if (devstat & ERR_INT) { /* All types of errors, from cable removal during transfer to * misc protocol and bit errors. These are mostly for just info, * as the USB hardware will work around these. If these errors * happen alot, something is wrong. */ udc_protocol_cmd_w(udc, CMD_RD_ERR_STAT); tmp = udc_protocol_cmd_r(udc, DAT_RD_ERR_STAT); dev_dbg(udc->dev, "Device error (0x%x)!\n", tmp); } spin_unlock(&udc->lock); return IRQ_HANDLED; } /* EP interrupts */ static irqreturn_t lpc32xx_usb_hp_irq(int irq, void *_udc) { u32 tmp; struct lpc32xx_udc *udc = _udc; spin_lock(&udc->lock); /* Read the device status register */ writel(USBD_EP_FAST, USBD_DEVINTCLR(udc->udp_baseaddr)); /* Endpoints */ tmp = readl(USBD_EPINTST(udc->udp_baseaddr)); /* Special handling for EP0 */ if (tmp & (EP_MASK_SEL(0, EP_OUT) | EP_MASK_SEL(0, EP_IN))) { /* Handle EP0 IN */ if (tmp & (EP_MASK_SEL(0, EP_IN))) udc_handle_ep0_in(udc); /* Handle EP0 OUT */ if (tmp & (EP_MASK_SEL(0, EP_OUT))) udc_handle_ep0_out(udc); } /* All other EPs */ if (tmp & ~(EP_MASK_SEL(0, EP_OUT) | EP_MASK_SEL(0, EP_IN))) { int i; /* Handle other EP interrupts */ for (i = 1; i < NUM_ENDPOINTS; i++) { if (tmp & (1 << udc->ep[i].hwep_num)) udc_handle_eps(udc, &udc->ep[i]); } } spin_unlock(&udc->lock); return IRQ_HANDLED; } static irqreturn_t lpc32xx_usb_devdma_irq(int irq, void *_udc) { struct lpc32xx_udc *udc = _udc; int i; u32 tmp; spin_lock(&udc->lock); /* Handle EP DMA EOT interrupts */ tmp = readl(USBD_EOTINTST(udc->udp_baseaddr)) | (readl(USBD_EPDMAST(udc->udp_baseaddr)) & readl(USBD_NDDRTINTST(udc->udp_baseaddr))) | readl(USBD_SYSERRTINTST(udc->udp_baseaddr)); for (i = 1; i < NUM_ENDPOINTS; i++) { if (tmp & (1 << udc->ep[i].hwep_num)) udc_handle_dma_ep(udc, &udc->ep[i]); } spin_unlock(&udc->lock); return IRQ_HANDLED; } /* * * VBUS detection, pullup handler, and Gadget cable state notification * */ static void vbus_work(struct work_struct *work) { u8 value; struct lpc32xx_udc *udc = container_of(work, struct lpc32xx_udc, vbus_job); if (udc->enabled != 0) { /* Discharge VBUS real quick */ i2c_smbus_write_byte_data(udc->isp1301_i2c_client, ISP1301_I2C_OTG_CONTROL_1, OTG1_VBUS_DISCHRG); /* Give VBUS some time (100mS) to discharge */ msleep(100); /* Disable VBUS discharge resistor */ i2c_smbus_write_byte_data(udc->isp1301_i2c_client, ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR, OTG1_VBUS_DISCHRG); /* Clear interrupt */ i2c_smbus_write_byte_data(udc->isp1301_i2c_client, ISP1301_I2C_INTERRUPT_LATCH | ISP1301_I2C_REG_CLEAR_ADDR, ~0); /* Get the VBUS status from the transceiver */ value = i2c_smbus_read_byte_data(udc->isp1301_i2c_client, ISP1301_I2C_INTERRUPT_SOURCE); /* VBUS on or off? */ if (value & INT_SESS_VLD) udc->vbus = 1; else udc->vbus = 0; /* VBUS changed? */ if (udc->last_vbus != udc->vbus) { udc->last_vbus = udc->vbus; lpc32xx_vbus_session(&udc->gadget, udc->vbus); } } /* Re-enable after completion */ enable_irq(udc->udp_irq[IRQ_USB_ATX]); } static irqreturn_t lpc32xx_usb_vbus_irq(int irq, void *_udc) { struct lpc32xx_udc *udc = _udc; /* Defer handling of VBUS IRQ to work queue */ disable_irq_nosync(udc->udp_irq[IRQ_USB_ATX]); schedule_work(&udc->vbus_job); return IRQ_HANDLED; } static int lpc32xx_start(struct usb_gadget *gadget, struct usb_gadget_driver *driver) { struct lpc32xx_udc *udc = to_udc(gadget); int i; if (!driver || driver->max_speed < USB_SPEED_FULL || !driver->setup) { dev_err(udc->dev, "bad parameter.\n"); return -EINVAL; } if (udc->driver) { dev_err(udc->dev, "UDC already has a gadget driver\n"); return -EBUSY; } udc->driver = driver; udc->gadget.dev.of_node = udc->dev->of_node; udc->enabled = 1; udc->selfpowered = 1; udc->vbus = 0; /* Force VBUS process once to check for cable insertion */ udc->last_vbus = udc->vbus = 0; schedule_work(&udc->vbus_job); /* Do not re-enable ATX IRQ (3) */ for (i = IRQ_USB_LP; i < IRQ_USB_ATX; i++) enable_irq(udc->udp_irq[i]); return 0; } static int lpc32xx_stop(struct usb_gadget *gadget, struct usb_gadget_driver *driver) { int i; struct lpc32xx_udc *udc = to_udc(gadget); if (!driver || driver != udc->driver) return -EINVAL; for (i = IRQ_USB_LP; i <= IRQ_USB_ATX; i++) disable_irq(udc->udp_irq[i]); if (udc->clocked) { spin_lock(&udc->lock); stop_activity(udc); spin_unlock(&udc->lock); /* * Wait for all the endpoints to disable, * before disabling clocks. Don't wait if * endpoints are not enabled. */ if (atomic_read(&udc->enabled_ep_cnt)) wait_event_interruptible(udc->ep_disable_wait_queue, (atomic_read(&udc->enabled_ep_cnt) == 0)); spin_lock(&udc->lock); udc_clk_set(udc, 0); spin_unlock(&udc->lock); } udc->enabled = 0; udc->driver = NULL; return 0; } static void lpc32xx_udc_shutdown(struct platform_device *dev) { /* Force disconnect on reboot */ struct lpc32xx_udc *udc = platform_get_drvdata(dev); pullup(udc, 0); } /* * Callbacks to be overridden by options passed via OF (TODO) */ static void lpc32xx_usbd_conn_chg(int conn) { /* Do nothing, it might be nice to enable an LED * based on conn state being !0 */ } static void lpc32xx_usbd_susp_chg(int susp) { /* Device suspend if susp != 0 */ } static void lpc32xx_rmwkup_chg(int remote_wakup_enable) { /* Enable or disable USB remote wakeup */ } struct lpc32xx_usbd_cfg lpc32xx_usbddata = { .vbus_drv_pol = 0, .conn_chgb = &lpc32xx_usbd_conn_chg, .susp_chgb = &lpc32xx_usbd_susp_chg, .rmwk_chgb = &lpc32xx_rmwkup_chg, }; static u64 lpc32xx_usbd_dmamask = ~(u32) 0x7F; static int __init lpc32xx_udc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct lpc32xx_udc *udc; int retval, i; struct resource *res; dma_addr_t dma_handle; struct device_node *isp1301_node; udc = kzalloc(sizeof(*udc), GFP_KERNEL); if (!udc) return -ENOMEM; memcpy(udc, &controller_template, sizeof(*udc)); for (i = 0; i <= 15; i++) udc->ep[i].udc = udc; udc->gadget.ep0 = &udc->ep[0].ep; /* init software state */ udc->gadget.dev.parent = dev; udc->pdev = pdev; udc->dev = &pdev->dev; udc->enabled = 0; if (pdev->dev.of_node) { isp1301_node = of_parse_phandle(pdev->dev.of_node, "transceiver", 0); } else { isp1301_node = NULL; } udc->isp1301_i2c_client = isp1301_get_client(isp1301_node); if (!udc->isp1301_i2c_client) { retval = -EPROBE_DEFER; goto phy_fail; } dev_info(udc->dev, "ISP1301 I2C device at address 0x%x\n", udc->isp1301_i2c_client->addr); pdev->dev.dma_mask = &lpc32xx_usbd_dmamask; pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); udc->board = &lpc32xx_usbddata; /* * Resources are mapped as follows: * IORESOURCE_MEM, base address and size of USB space * IORESOURCE_IRQ, USB device low priority interrupt number * IORESOURCE_IRQ, USB device high priority interrupt number * IORESOURCE_IRQ, USB device interrupt number * IORESOURCE_IRQ, USB transceiver interrupt number */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { retval = -ENXIO; goto resource_fail; } spin_lock_init(&udc->lock); /* Get IRQs */ for (i = 0; i < 4; i++) { udc->udp_irq[i] = platform_get_irq(pdev, i); if (udc->udp_irq[i] < 0) { dev_err(udc->dev, "irq resource %d not available!\n", i); retval = udc->udp_irq[i]; goto irq_fail; } } udc->io_p_start = res->start; udc->io_p_size = resource_size(res); if (!request_mem_region(udc->io_p_start, udc->io_p_size, driver_name)) { dev_err(udc->dev, "someone's using UDC memory\n"); retval = -EBUSY; goto request_mem_region_fail; } udc->udp_baseaddr = ioremap(udc->io_p_start, udc->io_p_size); if (!udc->udp_baseaddr) { retval = -ENOMEM; dev_err(udc->dev, "IO map failure\n"); goto io_map_fail; } /* Enable AHB slave USB clock, needed for further USB clock control */ writel(USB_SLAVE_HCLK_EN | (1 << 19), USB_CTRL); /* Get required clocks */ udc->usb_pll_clk = clk_get(&pdev->dev, "ck_pll5"); if (IS_ERR(udc->usb_pll_clk)) { dev_err(udc->dev, "failed to acquire USB PLL\n"); retval = PTR_ERR(udc->usb_pll_clk); goto pll_get_fail; } udc->usb_slv_clk = clk_get(&pdev->dev, "ck_usbd"); if (IS_ERR(udc->usb_slv_clk)) { dev_err(udc->dev, "failed to acquire USB device clock\n"); retval = PTR_ERR(udc->usb_slv_clk); goto usb_clk_get_fail; } udc->usb_otg_clk = clk_get(&pdev->dev, "ck_usb_otg"); if (IS_ERR(udc->usb_otg_clk)) { dev_err(udc->dev, "failed to acquire USB otg clock\n"); retval = PTR_ERR(udc->usb_otg_clk); goto usb_otg_clk_get_fail; } /* Setup PLL clock to 48MHz */ retval = clk_enable(udc->usb_pll_clk); if (retval < 0) { dev_err(udc->dev, "failed to start USB PLL\n"); goto pll_enable_fail; } retval = clk_set_rate(udc->usb_pll_clk, 48000); if (retval < 0) { dev_err(udc->dev, "failed to set USB clock rate\n"); goto pll_set_fail; } writel(readl(USB_CTRL) | USB_DEV_NEED_CLK_EN, USB_CTRL); /* Enable USB device clock */ retval = clk_enable(udc->usb_slv_clk); if (retval < 0) { dev_err(udc->dev, "failed to start USB device clock\n"); goto usb_clk_enable_fail; } /* Enable USB OTG clock */ retval = clk_enable(udc->usb_otg_clk); if (retval < 0) { dev_err(udc->dev, "failed to start USB otg clock\n"); goto usb_otg_clk_enable_fail; } /* Setup deferred workqueue data */ udc->poweron = udc->pullup = 0; INIT_WORK(&udc->pullup_job, pullup_work); INIT_WORK(&udc->vbus_job, vbus_work); #ifdef CONFIG_PM INIT_WORK(&udc->power_job, power_work); #endif /* All clocks are now on */ udc->clocked = 1; isp1301_udc_configure(udc); /* Allocate memory for the UDCA */ udc->udca_v_base = dma_alloc_coherent(&pdev->dev, UDCA_BUFF_SIZE, &dma_handle, (GFP_KERNEL | GFP_DMA)); if (!udc->udca_v_base) { dev_err(udc->dev, "error getting UDCA region\n"); retval = -ENOMEM; goto i2c_fail; } udc->udca_p_base = dma_handle; dev_dbg(udc->dev, "DMA buffer(0x%x bytes), P:0x%08x, V:0x%p\n", UDCA_BUFF_SIZE, udc->udca_p_base, udc->udca_v_base); /* Setup the DD DMA memory pool */ udc->dd_cache = dma_pool_create("udc_dd", udc->dev, sizeof(struct lpc32xx_usbd_dd_gad), sizeof(u32), 0); if (!udc->dd_cache) { dev_err(udc->dev, "error getting DD DMA region\n"); retval = -ENOMEM; goto dma_alloc_fail; } /* Clear USB peripheral and initialize gadget endpoints */ udc_disable(udc); udc_reinit(udc); /* Request IRQs - low and high priority USB device IRQs are routed to * the same handler, while the DMA interrupt is routed elsewhere */ retval = request_irq(udc->udp_irq[IRQ_USB_LP], lpc32xx_usb_lp_irq, 0, "udc_lp", udc); if (retval < 0) { dev_err(udc->dev, "LP request irq %d failed\n", udc->udp_irq[IRQ_USB_LP]); goto irq_lp_fail; } retval = request_irq(udc->udp_irq[IRQ_USB_HP], lpc32xx_usb_hp_irq, 0, "udc_hp", udc); if (retval < 0) { dev_err(udc->dev, "HP request irq %d failed\n", udc->udp_irq[IRQ_USB_HP]); goto irq_hp_fail; } retval = request_irq(udc->udp_irq[IRQ_USB_DEVDMA], lpc32xx_usb_devdma_irq, 0, "udc_dma", udc); if (retval < 0) { dev_err(udc->dev, "DEV request irq %d failed\n", udc->udp_irq[IRQ_USB_DEVDMA]); goto irq_dev_fail; } /* The transceiver interrupt is used for VBUS detection and will kick off the VBUS handler function */ retval = request_irq(udc->udp_irq[IRQ_USB_ATX], lpc32xx_usb_vbus_irq, 0, "udc_otg", udc); if (retval < 0) { dev_err(udc->dev, "VBUS request irq %d failed\n", udc->udp_irq[IRQ_USB_ATX]); goto irq_xcvr_fail; } /* Initialize wait queue */ init_waitqueue_head(&udc->ep_disable_wait_queue); atomic_set(&udc->enabled_ep_cnt, 0); /* Keep all IRQs disabled until GadgetFS starts up */ for (i = IRQ_USB_LP; i <= IRQ_USB_ATX; i++) disable_irq(udc->udp_irq[i]); retval = usb_add_gadget_udc(dev, &udc->gadget); if (retval < 0) goto add_gadget_fail; dev_set_drvdata(dev, udc); device_init_wakeup(dev, 1); create_debug_file(udc); /* Disable clocks for now */ udc_clk_set(udc, 0); dev_info(udc->dev, "%s version %s\n", driver_name, DRIVER_VERSION); return 0; add_gadget_fail: free_irq(udc->udp_irq[IRQ_USB_ATX], udc); irq_xcvr_fail: free_irq(udc->udp_irq[IRQ_USB_DEVDMA], udc); irq_dev_fail: free_irq(udc->udp_irq[IRQ_USB_HP], udc); irq_hp_fail: free_irq(udc->udp_irq[IRQ_USB_LP], udc); irq_lp_fail: dma_pool_destroy(udc->dd_cache); dma_alloc_fail: dma_free_coherent(&pdev->dev, UDCA_BUFF_SIZE, udc->udca_v_base, udc->udca_p_base); i2c_fail: clk_disable(udc->usb_otg_clk); usb_otg_clk_enable_fail: clk_disable(udc->usb_slv_clk); usb_clk_enable_fail: pll_set_fail: clk_disable(udc->usb_pll_clk); pll_enable_fail: clk_put(udc->usb_slv_clk); usb_otg_clk_get_fail: clk_put(udc->usb_otg_clk); usb_clk_get_fail: clk_put(udc->usb_pll_clk); pll_get_fail: iounmap(udc->udp_baseaddr); io_map_fail: release_mem_region(udc->io_p_start, udc->io_p_size); dev_err(udc->dev, "%s probe failed, %d\n", driver_name, retval); request_mem_region_fail: irq_fail: resource_fail: phy_fail: kfree(udc); return retval; } static int lpc32xx_udc_remove(struct platform_device *pdev) { struct lpc32xx_udc *udc = platform_get_drvdata(pdev); usb_del_gadget_udc(&udc->gadget); if (udc->driver) return -EBUSY; udc_clk_set(udc, 1); udc_disable(udc); pullup(udc, 0); free_irq(udc->udp_irq[IRQ_USB_ATX], udc); device_init_wakeup(&pdev->dev, 0); remove_debug_file(udc); dma_pool_destroy(udc->dd_cache); dma_free_coherent(&pdev->dev, UDCA_BUFF_SIZE, udc->udca_v_base, udc->udca_p_base); free_irq(udc->udp_irq[IRQ_USB_DEVDMA], udc); free_irq(udc->udp_irq[IRQ_USB_HP], udc); free_irq(udc->udp_irq[IRQ_USB_LP], udc); clk_disable(udc->usb_otg_clk); clk_put(udc->usb_otg_clk); clk_disable(udc->usb_slv_clk); clk_put(udc->usb_slv_clk); clk_disable(udc->usb_pll_clk); clk_put(udc->usb_pll_clk); iounmap(udc->udp_baseaddr); release_mem_region(udc->io_p_start, udc->io_p_size); kfree(udc); return 0; } #ifdef CONFIG_PM static int lpc32xx_udc_suspend(struct platform_device *pdev, pm_message_t mesg) { struct lpc32xx_udc *udc = platform_get_drvdata(pdev); if (udc->clocked) { /* Power down ISP */ udc->poweron = 0; isp1301_set_powerstate(udc, 0); /* Disable clocking */ udc_clk_set(udc, 0); /* Keep clock flag on, so we know to re-enable clocks on resume */ udc->clocked = 1; /* Kill global USB clock */ clk_disable(udc->usb_slv_clk); } return 0; } static int lpc32xx_udc_resume(struct platform_device *pdev) { struct lpc32xx_udc *udc = platform_get_drvdata(pdev); if (udc->clocked) { /* Enable global USB clock */ clk_enable(udc->usb_slv_clk); /* Enable clocking */ udc_clk_set(udc, 1); /* ISP back to normal power mode */ udc->poweron = 1; isp1301_set_powerstate(udc, 1); } return 0; } #else #define lpc32xx_udc_suspend NULL #define lpc32xx_udc_resume NULL #endif #ifdef CONFIG_OF static struct of_device_id lpc32xx_udc_of_match[] = { { .compatible = "nxp,lpc3220-udc", }, { }, }; MODULE_DEVICE_TABLE(of, lpc32xx_udc_of_match); #endif static struct platform_driver lpc32xx_udc_driver = { .remove = lpc32xx_udc_remove, .shutdown = lpc32xx_udc_shutdown, .suspend = lpc32xx_udc_suspend, .resume = lpc32xx_udc_resume, .driver = { .name = (char *) driver_name, .owner = THIS_MODULE, .of_match_table = of_match_ptr(lpc32xx_udc_of_match), }, }; module_platform_driver_probe(lpc32xx_udc_driver, lpc32xx_udc_probe); MODULE_DESCRIPTION("LPC32XX udc driver"); MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>"); MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:lpc32xx_udc");
gpl-2.0
TRKP/android_kernel_samsung_i9300
sound/pci/cmipci.c
2518
104378
/* * Driver for C-Media CMI8338 and 8738 PCI soundcards. * Copyright (c) 2000 by Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* Does not work. Warning may block system in capture mode */ /* #define USE_VAR48KRATE */ #include <asm/io.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/gameport.h> #include <linux/moduleparam.h> #include <linux/mutex.h> #include <sound/core.h> #include <sound/info.h> #include <sound/control.h> #include <sound/pcm.h> #include <sound/rawmidi.h> #include <sound/mpu401.h> #include <sound/opl3.h> #include <sound/sb.h> #include <sound/asoundef.h> #include <sound/initval.h> MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); MODULE_DESCRIPTION("C-Media CMI8x38 PCI"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{C-Media,CMI8738}," "{C-Media,CMI8738B}," "{C-Media,CMI8338A}," "{C-Media,CMI8338B}}"); #if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE)) #define SUPPORT_JOYSTICK 1 #endif static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable switches */ static long mpu_port[SNDRV_CARDS]; static long fm_port[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)]=1}; static int soft_ac3[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)]=1}; #ifdef SUPPORT_JOYSTICK static int joystick_port[SNDRV_CARDS]; #endif module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for C-Media PCI soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for C-Media PCI soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable C-Media PCI soundcard."); module_param_array(mpu_port, long, NULL, 0444); MODULE_PARM_DESC(mpu_port, "MPU-401 port."); module_param_array(fm_port, long, NULL, 0444); MODULE_PARM_DESC(fm_port, "FM port."); module_param_array(soft_ac3, bool, NULL, 0444); MODULE_PARM_DESC(soft_ac3, "Software-conversion of raw SPDIF packets (model 033 only)."); #ifdef SUPPORT_JOYSTICK module_param_array(joystick_port, int, NULL, 0444); MODULE_PARM_DESC(joystick_port, "Joystick port address."); #endif /* * CM8x38 registers definition */ #define CM_REG_FUNCTRL0 0x00 #define CM_RST_CH1 0x00080000 #define CM_RST_CH0 0x00040000 #define CM_CHEN1 0x00020000 /* ch1: enable */ #define CM_CHEN0 0x00010000 /* ch0: enable */ #define CM_PAUSE1 0x00000008 /* ch1: pause */ #define CM_PAUSE0 0x00000004 /* ch0: pause */ #define CM_CHADC1 0x00000002 /* ch1, 0:playback, 1:record */ #define CM_CHADC0 0x00000001 /* ch0, 0:playback, 1:record */ #define CM_REG_FUNCTRL1 0x04 #define CM_DSFC_MASK 0x0000E000 /* channel 1 (DAC?) sampling frequency */ #define CM_DSFC_SHIFT 13 #define CM_ASFC_MASK 0x00001C00 /* channel 0 (ADC?) sampling frequency */ #define CM_ASFC_SHIFT 10 #define CM_SPDF_1 0x00000200 /* SPDIF IN/OUT at channel B */ #define CM_SPDF_0 0x00000100 /* SPDIF OUT only channel A */ #define CM_SPDFLOOP 0x00000080 /* ext. SPDIIF/IN -> OUT loopback */ #define CM_SPDO2DAC 0x00000040 /* SPDIF/OUT can be heard from internal DAC */ #define CM_INTRM 0x00000020 /* master control block (MCB) interrupt enabled */ #define CM_BREQ 0x00000010 /* bus master enabled */ #define CM_VOICE_EN 0x00000008 /* legacy voice (SB16,FM) */ #define CM_UART_EN 0x00000004 /* legacy UART */ #define CM_JYSTK_EN 0x00000002 /* legacy joystick */ #define CM_ZVPORT 0x00000001 /* ZVPORT */ #define CM_REG_CHFORMAT 0x08 #define CM_CHB3D5C 0x80000000 /* 5,6 channels */ #define CM_FMOFFSET2 0x40000000 /* initial FM PCM offset 2 when Fmute=1 */ #define CM_CHB3D 0x20000000 /* 4 channels */ #define CM_CHIP_MASK1 0x1f000000 #define CM_CHIP_037 0x01000000 #define CM_SETLAT48 0x00800000 /* set latency timer 48h */ #define CM_EDGEIRQ 0x00400000 /* emulated edge trigger legacy IRQ */ #define CM_SPD24SEL39 0x00200000 /* 24-bit spdif: model 039 */ #define CM_AC3EN1 0x00100000 /* enable AC3: model 037 */ #define CM_SPDIF_SELECT1 0x00080000 /* for model <= 037 ? */ #define CM_SPD24SEL 0x00020000 /* 24bit spdif: model 037 */ /* #define CM_SPDIF_INVERSE 0x00010000 */ /* ??? */ #define CM_ADCBITLEN_MASK 0x0000C000 #define CM_ADCBITLEN_16 0x00000000 #define CM_ADCBITLEN_15 0x00004000 #define CM_ADCBITLEN_14 0x00008000 #define CM_ADCBITLEN_13 0x0000C000 #define CM_ADCDACLEN_MASK 0x00003000 /* model 037 */ #define CM_ADCDACLEN_060 0x00000000 #define CM_ADCDACLEN_066 0x00001000 #define CM_ADCDACLEN_130 0x00002000 #define CM_ADCDACLEN_280 0x00003000 #define CM_ADCDLEN_MASK 0x00003000 /* model 039 */ #define CM_ADCDLEN_ORIGINAL 0x00000000 #define CM_ADCDLEN_EXTRA 0x00001000 #define CM_ADCDLEN_24K 0x00002000 #define CM_ADCDLEN_WEIGHT 0x00003000 #define CM_CH1_SRATE_176K 0x00000800 #define CM_CH1_SRATE_96K 0x00000800 /* model 055? */ #define CM_CH1_SRATE_88K 0x00000400 #define CM_CH0_SRATE_176K 0x00000200 #define CM_CH0_SRATE_96K 0x00000200 /* model 055? */ #define CM_CH0_SRATE_88K 0x00000100 #define CM_CH0_SRATE_128K 0x00000300 #define CM_CH0_SRATE_MASK 0x00000300 #define CM_SPDIF_INVERSE2 0x00000080 /* model 055? */ #define CM_DBLSPDS 0x00000040 /* double SPDIF sample rate 88.2/96 */ #define CM_POLVALID 0x00000020 /* inverse SPDIF/IN valid bit */ #define CM_SPDLOCKED 0x00000010 #define CM_CH1FMT_MASK 0x0000000C /* bit 3: 16 bits, bit 2: stereo */ #define CM_CH1FMT_SHIFT 2 #define CM_CH0FMT_MASK 0x00000003 /* bit 1: 16 bits, bit 0: stereo */ #define CM_CH0FMT_SHIFT 0 #define CM_REG_INT_HLDCLR 0x0C #define CM_CHIP_MASK2 0xff000000 #define CM_CHIP_8768 0x20000000 #define CM_CHIP_055 0x08000000 #define CM_CHIP_039 0x04000000 #define CM_CHIP_039_6CH 0x01000000 #define CM_UNKNOWN_INT_EN 0x00080000 /* ? */ #define CM_TDMA_INT_EN 0x00040000 #define CM_CH1_INT_EN 0x00020000 #define CM_CH0_INT_EN 0x00010000 #define CM_REG_INT_STATUS 0x10 #define CM_INTR 0x80000000 #define CM_VCO 0x08000000 /* Voice Control? CMI8738 */ #define CM_MCBINT 0x04000000 /* Master Control Block abort cond.? */ #define CM_UARTINT 0x00010000 #define CM_LTDMAINT 0x00008000 #define CM_HTDMAINT 0x00004000 #define CM_XDO46 0x00000080 /* Modell 033? Direct programming EEPROM (read data register) */ #define CM_LHBTOG 0x00000040 /* High/Low status from DMA ctrl register */ #define CM_LEG_HDMA 0x00000020 /* Legacy is in High DMA channel */ #define CM_LEG_STEREO 0x00000010 /* Legacy is in Stereo mode */ #define CM_CH1BUSY 0x00000008 #define CM_CH0BUSY 0x00000004 #define CM_CHINT1 0x00000002 #define CM_CHINT0 0x00000001 #define CM_REG_LEGACY_CTRL 0x14 #define CM_NXCHG 0x80000000 /* don't map base reg dword->sample */ #define CM_VMPU_MASK 0x60000000 /* MPU401 i/o port address */ #define CM_VMPU_330 0x00000000 #define CM_VMPU_320 0x20000000 #define CM_VMPU_310 0x40000000 #define CM_VMPU_300 0x60000000 #define CM_ENWR8237 0x10000000 /* enable bus master to write 8237 base reg */ #define CM_VSBSEL_MASK 0x0C000000 /* SB16 base address */ #define CM_VSBSEL_220 0x00000000 #define CM_VSBSEL_240 0x04000000 #define CM_VSBSEL_260 0x08000000 #define CM_VSBSEL_280 0x0C000000 #define CM_FMSEL_MASK 0x03000000 /* FM OPL3 base address */ #define CM_FMSEL_388 0x00000000 #define CM_FMSEL_3C8 0x01000000 #define CM_FMSEL_3E0 0x02000000 #define CM_FMSEL_3E8 0x03000000 #define CM_ENSPDOUT 0x00800000 /* enable XSPDIF/OUT to I/O interface */ #define CM_SPDCOPYRHT 0x00400000 /* spdif in/out copyright bit */ #define CM_DAC2SPDO 0x00200000 /* enable wave+fm_midi -> SPDIF/OUT */ #define CM_INVIDWEN 0x00100000 /* internal vendor ID write enable, model 039? */ #define CM_SETRETRY 0x00100000 /* 0: legacy i/o wait (default), 1: legacy i/o bus retry */ #define CM_C_EEACCESS 0x00080000 /* direct programming eeprom regs */ #define CM_C_EECS 0x00040000 #define CM_C_EEDI46 0x00020000 #define CM_C_EECK46 0x00010000 #define CM_CHB3D6C 0x00008000 /* 5.1 channels support */ #define CM_CENTR2LIN 0x00004000 /* line-in as center out */ #define CM_BASE2LIN 0x00002000 /* line-in as bass out */ #define CM_EXBASEN 0x00001000 /* external bass input enable */ #define CM_REG_MISC_CTRL 0x18 #define CM_PWD 0x80000000 /* power down */ #define CM_RESET 0x40000000 #define CM_SFIL_MASK 0x30000000 /* filter control at front end DAC, model 037? */ #define CM_VMGAIN 0x10000000 /* analog master amp +6dB, model 039? */ #define CM_TXVX 0x08000000 /* model 037? */ #define CM_N4SPK3D 0x04000000 /* copy front to rear */ #define CM_SPDO5V 0x02000000 /* 5V spdif output (1 = 0.5v (coax)) */ #define CM_SPDIF48K 0x01000000 /* write */ #define CM_SPATUS48K 0x01000000 /* read */ #define CM_ENDBDAC 0x00800000 /* enable double dac */ #define CM_XCHGDAC 0x00400000 /* 0: front=ch0, 1: front=ch1 */ #define CM_SPD32SEL 0x00200000 /* 0: 16bit SPDIF, 1: 32bit */ #define CM_SPDFLOOPI 0x00100000 /* int. SPDIF-OUT -> int. IN */ #define CM_FM_EN 0x00080000 /* enable legacy FM */ #define CM_AC3EN2 0x00040000 /* enable AC3: model 039 */ #define CM_ENWRASID 0x00010000 /* choose writable internal SUBID (audio) */ #define CM_VIDWPDSB 0x00010000 /* model 037? */ #define CM_SPDF_AC97 0x00008000 /* 0: SPDIF/OUT 44.1K, 1: 48K */ #define CM_MASK_EN 0x00004000 /* activate channel mask on legacy DMA */ #define CM_ENWRMSID 0x00002000 /* choose writable internal SUBID (modem) */ #define CM_VIDWPPRT 0x00002000 /* model 037? */ #define CM_SFILENB 0x00001000 /* filter stepping at front end DAC, model 037? */ #define CM_MMODE_MASK 0x00000E00 /* model DAA interface mode */ #define CM_SPDIF_SELECT2 0x00000100 /* for model > 039 ? */ #define CM_ENCENTER 0x00000080 #define CM_FLINKON 0x00000040 /* force modem link detection on, model 037 */ #define CM_MUTECH1 0x00000040 /* mute PCI ch1 to DAC */ #define CM_FLINKOFF 0x00000020 /* force modem link detection off, model 037 */ #define CM_MIDSMP 0x00000010 /* 1/2 interpolation at front end DAC */ #define CM_UPDDMA_MASK 0x0000000C /* TDMA position update notification */ #define CM_UPDDMA_2048 0x00000000 #define CM_UPDDMA_1024 0x00000004 #define CM_UPDDMA_512 0x00000008 #define CM_UPDDMA_256 0x0000000C #define CM_TWAIT_MASK 0x00000003 /* model 037 */ #define CM_TWAIT1 0x00000002 /* FM i/o cycle, 0: 48, 1: 64 PCICLKs */ #define CM_TWAIT0 0x00000001 /* i/o cycle, 0: 4, 1: 6 PCICLKs */ #define CM_REG_TDMA_POSITION 0x1C #define CM_TDMA_CNT_MASK 0xFFFF0000 /* current byte/word count */ #define CM_TDMA_ADR_MASK 0x0000FFFF /* current address */ /* byte */ #define CM_REG_MIXER0 0x20 #define CM_REG_SBVR 0x20 /* write: sb16 version */ #define CM_REG_DEV 0x20 /* read: hardware device version */ #define CM_REG_MIXER21 0x21 #define CM_UNKNOWN_21_MASK 0x78 /* ? */ #define CM_X_ADPCM 0x04 /* SB16 ADPCM enable */ #define CM_PROINV 0x02 /* SBPro left/right channel switching */ #define CM_X_SB16 0x01 /* SB16 compatible */ #define CM_REG_SB16_DATA 0x22 #define CM_REG_SB16_ADDR 0x23 #define CM_REFFREQ_XIN (315*1000*1000)/22 /* 14.31818 Mhz reference clock frequency pin XIN */ #define CM_ADCMULT_XIN 512 /* Guessed (487 best for 44.1kHz, not for 88/176kHz) */ #define CM_TOLERANCE_RATE 0.001 /* Tolerance sample rate pitch (1000ppm) */ #define CM_MAXIMUM_RATE 80000000 /* Note more than 80MHz */ #define CM_REG_MIXER1 0x24 #define CM_FMMUTE 0x80 /* mute FM */ #define CM_FMMUTE_SHIFT 7 #define CM_WSMUTE 0x40 /* mute PCM */ #define CM_WSMUTE_SHIFT 6 #define CM_REAR2LIN 0x20 /* lin-in -> rear line out */ #define CM_REAR2LIN_SHIFT 5 #define CM_REAR2FRONT 0x10 /* exchange rear/front */ #define CM_REAR2FRONT_SHIFT 4 #define CM_WAVEINL 0x08 /* digital wave rec. left chan */ #define CM_WAVEINL_SHIFT 3 #define CM_WAVEINR 0x04 /* digical wave rec. right */ #define CM_WAVEINR_SHIFT 2 #define CM_X3DEN 0x02 /* 3D surround enable */ #define CM_X3DEN_SHIFT 1 #define CM_CDPLAY 0x01 /* enable SPDIF/IN PCM -> DAC */ #define CM_CDPLAY_SHIFT 0 #define CM_REG_MIXER2 0x25 #define CM_RAUXREN 0x80 /* AUX right capture */ #define CM_RAUXREN_SHIFT 7 #define CM_RAUXLEN 0x40 /* AUX left capture */ #define CM_RAUXLEN_SHIFT 6 #define CM_VAUXRM 0x20 /* AUX right mute */ #define CM_VAUXRM_SHIFT 5 #define CM_VAUXLM 0x10 /* AUX left mute */ #define CM_VAUXLM_SHIFT 4 #define CM_VADMIC_MASK 0x0e /* mic gain level (0-3) << 1 */ #define CM_VADMIC_SHIFT 1 #define CM_MICGAINZ 0x01 /* mic boost */ #define CM_MICGAINZ_SHIFT 0 #define CM_REG_MIXER3 0x24 #define CM_REG_AUX_VOL 0x26 #define CM_VAUXL_MASK 0xf0 #define CM_VAUXR_MASK 0x0f #define CM_REG_MISC 0x27 #define CM_UNKNOWN_27_MASK 0xd8 /* ? */ #define CM_XGPO1 0x20 // #define CM_XGPBIO 0x04 #define CM_MIC_CENTER_LFE 0x04 /* mic as center/lfe out? (model 039 or later?) */ #define CM_SPDIF_INVERSE 0x04 /* spdif input phase inverse (model 037) */ #define CM_SPDVALID 0x02 /* spdif input valid check */ #define CM_DMAUTO 0x01 /* SB16 DMA auto detect */ #define CM_REG_AC97 0x28 /* hmmm.. do we have ac97 link? */ /* * For CMI-8338 (0x28 - 0x2b) .. is this valid for CMI-8738 * or identical with AC97 codec? */ #define CM_REG_EXTERN_CODEC CM_REG_AC97 /* * MPU401 pci port index address 0x40 - 0x4f (CMI-8738 spec ver. 0.6) */ #define CM_REG_MPU_PCI 0x40 /* * FM pci port index address 0x50 - 0x5f (CMI-8738 spec ver. 0.6) */ #define CM_REG_FM_PCI 0x50 /* * access from SB-mixer port */ #define CM_REG_EXTENT_IND 0xf0 #define CM_VPHONE_MASK 0xe0 /* Phone volume control (0-3) << 5 */ #define CM_VPHONE_SHIFT 5 #define CM_VPHOM 0x10 /* Phone mute control */ #define CM_VSPKM 0x08 /* Speaker mute control, default high */ #define CM_RLOOPREN 0x04 /* Rec. R-channel enable */ #define CM_RLOOPLEN 0x02 /* Rec. L-channel enable */ #define CM_VADMIC3 0x01 /* Mic record boost */ /* * CMI-8338 spec ver 0.5 (this is not valid for CMI-8738): * the 8 registers 0xf8 - 0xff are used for programming m/n counter by the PLL * unit (readonly?). */ #define CM_REG_PLL 0xf8 /* * extended registers */ #define CM_REG_CH0_FRAME1 0x80 /* write: base address */ #define CM_REG_CH0_FRAME2 0x84 /* read: current address */ #define CM_REG_CH1_FRAME1 0x88 /* 0-15: count of samples at bus master; buffer size */ #define CM_REG_CH1_FRAME2 0x8C /* 16-31: count of samples at codec; fragment size */ #define CM_REG_EXT_MISC 0x90 #define CM_ADC48K44K 0x10000000 /* ADC parameters group, 0: 44k, 1: 48k */ #define CM_CHB3D8C 0x00200000 /* 7.1 channels support */ #define CM_SPD32FMT 0x00100000 /* SPDIF/IN 32k sample rate */ #define CM_ADC2SPDIF 0x00080000 /* ADC output to SPDIF/OUT */ #define CM_SHAREADC 0x00040000 /* DAC in ADC as Center/LFE */ #define CM_REALTCMP 0x00020000 /* monitor the CMPL/CMPR of ADC */ #define CM_INVLRCK 0x00010000 /* invert ZVPORT's LRCK */ #define CM_UNKNOWN_90_MASK 0x0000FFFF /* ? */ /* * size of i/o region */ #define CM_EXTENT_CODEC 0x100 #define CM_EXTENT_MIDI 0x2 #define CM_EXTENT_SYNTH 0x4 /* * channels for playback / capture */ #define CM_CH_PLAY 0 #define CM_CH_CAPT 1 /* * flags to check device open/close */ #define CM_OPEN_NONE 0 #define CM_OPEN_CH_MASK 0x01 #define CM_OPEN_DAC 0x10 #define CM_OPEN_ADC 0x20 #define CM_OPEN_SPDIF 0x40 #define CM_OPEN_MCHAN 0x80 #define CM_OPEN_PLAYBACK (CM_CH_PLAY | CM_OPEN_DAC) #define CM_OPEN_PLAYBACK2 (CM_CH_CAPT | CM_OPEN_DAC) #define CM_OPEN_PLAYBACK_MULTI (CM_CH_PLAY | CM_OPEN_DAC | CM_OPEN_MCHAN) #define CM_OPEN_CAPTURE (CM_CH_CAPT | CM_OPEN_ADC) #define CM_OPEN_SPDIF_PLAYBACK (CM_CH_PLAY | CM_OPEN_DAC | CM_OPEN_SPDIF) #define CM_OPEN_SPDIF_CAPTURE (CM_CH_CAPT | CM_OPEN_ADC | CM_OPEN_SPDIF) #if CM_CH_PLAY == 1 #define CM_PLAYBACK_SRATE_176K CM_CH1_SRATE_176K #define CM_PLAYBACK_SPDF CM_SPDF_1 #define CM_CAPTURE_SPDF CM_SPDF_0 #else #define CM_PLAYBACK_SRATE_176K CM_CH0_SRATE_176K #define CM_PLAYBACK_SPDF CM_SPDF_0 #define CM_CAPTURE_SPDF CM_SPDF_1 #endif /* * driver data */ struct cmipci_pcm { struct snd_pcm_substream *substream; u8 running; /* dac/adc running? */ u8 fmt; /* format bits */ u8 is_dac; u8 needs_silencing; unsigned int dma_size; /* in frames */ unsigned int shift; unsigned int ch; /* channel (0/1) */ unsigned int offset; /* physical address of the buffer */ }; /* mixer elements toggled/resumed during ac3 playback */ struct cmipci_mixer_auto_switches { const char *name; /* switch to toggle */ int toggle_on; /* value to change when ac3 mode */ }; static const struct cmipci_mixer_auto_switches cm_saved_mixer[] = { {"PCM Playback Switch", 0}, {"IEC958 Output Switch", 1}, {"IEC958 Mix Analog", 0}, // {"IEC958 Out To DAC", 1}, // no longer used {"IEC958 Loop", 0}, }; #define CM_SAVED_MIXERS ARRAY_SIZE(cm_saved_mixer) struct cmipci { struct snd_card *card; struct pci_dev *pci; unsigned int device; /* device ID */ int irq; unsigned long iobase; unsigned int ctrl; /* FUNCTRL0 current value */ struct snd_pcm *pcm; /* DAC/ADC PCM */ struct snd_pcm *pcm2; /* 2nd DAC */ struct snd_pcm *pcm_spdif; /* SPDIF */ int chip_version; int max_channels; unsigned int can_ac3_sw: 1; unsigned int can_ac3_hw: 1; unsigned int can_multi_ch: 1; unsigned int can_96k: 1; /* samplerate above 48k */ unsigned int do_soft_ac3: 1; unsigned int spdif_playback_avail: 1; /* spdif ready? */ unsigned int spdif_playback_enabled: 1; /* spdif switch enabled? */ int spdif_counter; /* for software AC3 */ unsigned int dig_status; unsigned int dig_pcm_status; struct snd_pcm_hardware *hw_info[3]; /* for playbacks */ int opened[2]; /* open mode */ struct mutex open_mutex; unsigned int mixer_insensitive: 1; struct snd_kcontrol *mixer_res_ctl[CM_SAVED_MIXERS]; int mixer_res_status[CM_SAVED_MIXERS]; struct cmipci_pcm channel[2]; /* ch0 - DAC, ch1 - ADC or 2nd DAC */ /* external MIDI */ struct snd_rawmidi *rmidi; #ifdef SUPPORT_JOYSTICK struct gameport *gameport; #endif spinlock_t reg_lock; #ifdef CONFIG_PM unsigned int saved_regs[0x20]; unsigned char saved_mixers[0x20]; #endif }; /* read/write operations for dword register */ static inline void snd_cmipci_write(struct cmipci *cm, unsigned int cmd, unsigned int data) { outl(data, cm->iobase + cmd); } static inline unsigned int snd_cmipci_read(struct cmipci *cm, unsigned int cmd) { return inl(cm->iobase + cmd); } /* read/write operations for word register */ static inline void snd_cmipci_write_w(struct cmipci *cm, unsigned int cmd, unsigned short data) { outw(data, cm->iobase + cmd); } static inline unsigned short snd_cmipci_read_w(struct cmipci *cm, unsigned int cmd) { return inw(cm->iobase + cmd); } /* read/write operations for byte register */ static inline void snd_cmipci_write_b(struct cmipci *cm, unsigned int cmd, unsigned char data) { outb(data, cm->iobase + cmd); } static inline unsigned char snd_cmipci_read_b(struct cmipci *cm, unsigned int cmd) { return inb(cm->iobase + cmd); } /* bit operations for dword register */ static int snd_cmipci_set_bit(struct cmipci *cm, unsigned int cmd, unsigned int flag) { unsigned int val, oval; val = oval = inl(cm->iobase + cmd); val |= flag; if (val == oval) return 0; outl(val, cm->iobase + cmd); return 1; } static int snd_cmipci_clear_bit(struct cmipci *cm, unsigned int cmd, unsigned int flag) { unsigned int val, oval; val = oval = inl(cm->iobase + cmd); val &= ~flag; if (val == oval) return 0; outl(val, cm->iobase + cmd); return 1; } /* bit operations for byte register */ static int snd_cmipci_set_bit_b(struct cmipci *cm, unsigned int cmd, unsigned char flag) { unsigned char val, oval; val = oval = inb(cm->iobase + cmd); val |= flag; if (val == oval) return 0; outb(val, cm->iobase + cmd); return 1; } static int snd_cmipci_clear_bit_b(struct cmipci *cm, unsigned int cmd, unsigned char flag) { unsigned char val, oval; val = oval = inb(cm->iobase + cmd); val &= ~flag; if (val == oval) return 0; outb(val, cm->iobase + cmd); return 1; } /* * PCM interface */ /* * calculate frequency */ static unsigned int rates[] = { 5512, 11025, 22050, 44100, 8000, 16000, 32000, 48000 }; static unsigned int snd_cmipci_rate_freq(unsigned int rate) { unsigned int i; for (i = 0; i < ARRAY_SIZE(rates); i++) { if (rates[i] == rate) return i; } snd_BUG(); return 0; } #ifdef USE_VAR48KRATE /* * Determine PLL values for frequency setup, maybe the CMI8338 (CMI8738???) * does it this way .. maybe not. Never get any information from C-Media about * that <werner@suse.de>. */ static int snd_cmipci_pll_rmn(unsigned int rate, unsigned int adcmult, int *r, int *m, int *n) { unsigned int delta, tolerance; int xm, xn, xr; for (*r = 0; rate < CM_MAXIMUM_RATE/adcmult; *r += (1<<5)) rate <<= 1; *n = -1; if (*r > 0xff) goto out; tolerance = rate*CM_TOLERANCE_RATE; for (xn = (1+2); xn < (0x1f+2); xn++) { for (xm = (1+2); xm < (0xff+2); xm++) { xr = ((CM_REFFREQ_XIN/adcmult) * xm) / xn; if (xr < rate) delta = rate - xr; else delta = xr - rate; /* * If we found one, remember this, * and try to find a closer one */ if (delta < tolerance) { tolerance = delta; *m = xm - 2; *n = xn - 2; } } } out: return (*n > -1); } /* * Program pll register bits, I assume that the 8 registers 0xf8 up to 0xff * are mapped onto the 8 ADC/DAC sampling frequency which can be chosen * at the register CM_REG_FUNCTRL1 (0x04). * Problem: other ways are also possible (any information about that?) */ static void snd_cmipci_set_pll(struct cmipci *cm, unsigned int rate, unsigned int slot) { unsigned int reg = CM_REG_PLL + slot; /* * Guess that this programs at reg. 0x04 the pos 15:13/12:10 * for DSFC/ASFC (000 up to 111). */ /* FIXME: Init (Do we've to set an other register first before programming?) */ /* FIXME: Is this correct? Or shouldn't the m/n/r values be used for that? */ snd_cmipci_write_b(cm, reg, rate>>8); snd_cmipci_write_b(cm, reg, rate&0xff); /* FIXME: Setup (Do we've to set an other register first to enable this?) */ } #endif /* USE_VAR48KRATE */ static int snd_cmipci_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); } static int snd_cmipci_playback2_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct cmipci *cm = snd_pcm_substream_chip(substream); if (params_channels(hw_params) > 2) { mutex_lock(&cm->open_mutex); if (cm->opened[CM_CH_PLAY]) { mutex_unlock(&cm->open_mutex); return -EBUSY; } /* reserve the channel A */ cm->opened[CM_CH_PLAY] = CM_OPEN_PLAYBACK_MULTI; mutex_unlock(&cm->open_mutex); } return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); } static void snd_cmipci_ch_reset(struct cmipci *cm, int ch) { int reset = CM_RST_CH0 << (cm->channel[ch].ch); snd_cmipci_write(cm, CM_REG_FUNCTRL0, cm->ctrl | reset); snd_cmipci_write(cm, CM_REG_FUNCTRL0, cm->ctrl & ~reset); udelay(10); } static int snd_cmipci_hw_free(struct snd_pcm_substream *substream) { return snd_pcm_lib_free_pages(substream); } /* */ static unsigned int hw_channels[] = {1, 2, 4, 6, 8}; static struct snd_pcm_hw_constraint_list hw_constraints_channels_4 = { .count = 3, .list = hw_channels, .mask = 0, }; static struct snd_pcm_hw_constraint_list hw_constraints_channels_6 = { .count = 4, .list = hw_channels, .mask = 0, }; static struct snd_pcm_hw_constraint_list hw_constraints_channels_8 = { .count = 5, .list = hw_channels, .mask = 0, }; static int set_dac_channels(struct cmipci *cm, struct cmipci_pcm *rec, int channels) { if (channels > 2) { if (!cm->can_multi_ch || !rec->ch) return -EINVAL; if (rec->fmt != 0x03) /* stereo 16bit only */ return -EINVAL; } if (cm->can_multi_ch) { spin_lock_irq(&cm->reg_lock); if (channels > 2) { snd_cmipci_set_bit(cm, CM_REG_LEGACY_CTRL, CM_NXCHG); snd_cmipci_set_bit(cm, CM_REG_MISC_CTRL, CM_XCHGDAC); } else { snd_cmipci_clear_bit(cm, CM_REG_LEGACY_CTRL, CM_NXCHG); snd_cmipci_clear_bit(cm, CM_REG_MISC_CTRL, CM_XCHGDAC); } if (channels == 8) snd_cmipci_set_bit(cm, CM_REG_EXT_MISC, CM_CHB3D8C); else snd_cmipci_clear_bit(cm, CM_REG_EXT_MISC, CM_CHB3D8C); if (channels == 6) { snd_cmipci_set_bit(cm, CM_REG_CHFORMAT, CM_CHB3D5C); snd_cmipci_set_bit(cm, CM_REG_LEGACY_CTRL, CM_CHB3D6C); } else { snd_cmipci_clear_bit(cm, CM_REG_CHFORMAT, CM_CHB3D5C); snd_cmipci_clear_bit(cm, CM_REG_LEGACY_CTRL, CM_CHB3D6C); } if (channels == 4) snd_cmipci_set_bit(cm, CM_REG_CHFORMAT, CM_CHB3D); else snd_cmipci_clear_bit(cm, CM_REG_CHFORMAT, CM_CHB3D); spin_unlock_irq(&cm->reg_lock); } return 0; } /* * prepare playback/capture channel * channel to be used must have been set in rec->ch. */ static int snd_cmipci_pcm_prepare(struct cmipci *cm, struct cmipci_pcm *rec, struct snd_pcm_substream *substream) { unsigned int reg, freq, freq_ext, val; unsigned int period_size; struct snd_pcm_runtime *runtime = substream->runtime; rec->fmt = 0; rec->shift = 0; if (snd_pcm_format_width(runtime->format) >= 16) { rec->fmt |= 0x02; if (snd_pcm_format_width(runtime->format) > 16) rec->shift++; /* 24/32bit */ } if (runtime->channels > 1) rec->fmt |= 0x01; if (rec->is_dac && set_dac_channels(cm, rec, runtime->channels) < 0) { snd_printd("cannot set dac channels\n"); return -EINVAL; } rec->offset = runtime->dma_addr; /* buffer and period sizes in frame */ rec->dma_size = runtime->buffer_size << rec->shift; period_size = runtime->period_size << rec->shift; if (runtime->channels > 2) { /* multi-channels */ rec->dma_size = (rec->dma_size * runtime->channels) / 2; period_size = (period_size * runtime->channels) / 2; } spin_lock_irq(&cm->reg_lock); /* set buffer address */ reg = rec->ch ? CM_REG_CH1_FRAME1 : CM_REG_CH0_FRAME1; snd_cmipci_write(cm, reg, rec->offset); /* program sample counts */ reg = rec->ch ? CM_REG_CH1_FRAME2 : CM_REG_CH0_FRAME2; snd_cmipci_write_w(cm, reg, rec->dma_size - 1); snd_cmipci_write_w(cm, reg + 2, period_size - 1); /* set adc/dac flag */ val = rec->ch ? CM_CHADC1 : CM_CHADC0; if (rec->is_dac) cm->ctrl &= ~val; else cm->ctrl |= val; snd_cmipci_write(cm, CM_REG_FUNCTRL0, cm->ctrl); //snd_printd("cmipci: functrl0 = %08x\n", cm->ctrl); /* set sample rate */ freq = 0; freq_ext = 0; if (runtime->rate > 48000) switch (runtime->rate) { case 88200: freq_ext = CM_CH0_SRATE_88K; break; case 96000: freq_ext = CM_CH0_SRATE_96K; break; case 128000: freq_ext = CM_CH0_SRATE_128K; break; default: snd_BUG(); break; } else freq = snd_cmipci_rate_freq(runtime->rate); val = snd_cmipci_read(cm, CM_REG_FUNCTRL1); if (rec->ch) { val &= ~CM_DSFC_MASK; val |= (freq << CM_DSFC_SHIFT) & CM_DSFC_MASK; } else { val &= ~CM_ASFC_MASK; val |= (freq << CM_ASFC_SHIFT) & CM_ASFC_MASK; } snd_cmipci_write(cm, CM_REG_FUNCTRL1, val); //snd_printd("cmipci: functrl1 = %08x\n", val); /* set format */ val = snd_cmipci_read(cm, CM_REG_CHFORMAT); if (rec->ch) { val &= ~CM_CH1FMT_MASK; val |= rec->fmt << CM_CH1FMT_SHIFT; } else { val &= ~CM_CH0FMT_MASK; val |= rec->fmt << CM_CH0FMT_SHIFT; } if (cm->can_96k) { val &= ~(CM_CH0_SRATE_MASK << (rec->ch * 2)); val |= freq_ext << (rec->ch * 2); } snd_cmipci_write(cm, CM_REG_CHFORMAT, val); //snd_printd("cmipci: chformat = %08x\n", val); if (!rec->is_dac && cm->chip_version) { if (runtime->rate > 44100) snd_cmipci_set_bit(cm, CM_REG_EXT_MISC, CM_ADC48K44K); else snd_cmipci_clear_bit(cm, CM_REG_EXT_MISC, CM_ADC48K44K); } rec->running = 0; spin_unlock_irq(&cm->reg_lock); return 0; } /* * PCM trigger/stop */ static int snd_cmipci_pcm_trigger(struct cmipci *cm, struct cmipci_pcm *rec, int cmd) { unsigned int inthld, chen, reset, pause; int result = 0; inthld = CM_CH0_INT_EN << rec->ch; chen = CM_CHEN0 << rec->ch; reset = CM_RST_CH0 << rec->ch; pause = CM_PAUSE0 << rec->ch; spin_lock(&cm->reg_lock); switch (cmd) { case SNDRV_PCM_TRIGGER_START: rec->running = 1; /* set interrupt */ snd_cmipci_set_bit(cm, CM_REG_INT_HLDCLR, inthld); cm->ctrl |= chen; /* enable channel */ snd_cmipci_write(cm, CM_REG_FUNCTRL0, cm->ctrl); //snd_printd("cmipci: functrl0 = %08x\n", cm->ctrl); break; case SNDRV_PCM_TRIGGER_STOP: rec->running = 0; /* disable interrupt */ snd_cmipci_clear_bit(cm, CM_REG_INT_HLDCLR, inthld); /* reset */ cm->ctrl &= ~chen; snd_cmipci_write(cm, CM_REG_FUNCTRL0, cm->ctrl | reset); snd_cmipci_write(cm, CM_REG_FUNCTRL0, cm->ctrl & ~reset); rec->needs_silencing = rec->is_dac; break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: case SNDRV_PCM_TRIGGER_SUSPEND: cm->ctrl |= pause; snd_cmipci_write(cm, CM_REG_FUNCTRL0, cm->ctrl); break; case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: case SNDRV_PCM_TRIGGER_RESUME: cm->ctrl &= ~pause; snd_cmipci_write(cm, CM_REG_FUNCTRL0, cm->ctrl); break; default: result = -EINVAL; break; } spin_unlock(&cm->reg_lock); return result; } /* * return the current pointer */ static snd_pcm_uframes_t snd_cmipci_pcm_pointer(struct cmipci *cm, struct cmipci_pcm *rec, struct snd_pcm_substream *substream) { size_t ptr; unsigned int reg, rem, tries; if (!rec->running) return 0; #if 1 // this seems better.. reg = rec->ch ? CM_REG_CH1_FRAME2 : CM_REG_CH0_FRAME2; for (tries = 0; tries < 3; tries++) { rem = snd_cmipci_read_w(cm, reg); if (rem < rec->dma_size) goto ok; } printk(KERN_ERR "cmipci: invalid PCM pointer: %#x\n", rem); return SNDRV_PCM_POS_XRUN; ok: ptr = (rec->dma_size - (rem + 1)) >> rec->shift; #else reg = rec->ch ? CM_REG_CH1_FRAME1 : CM_REG_CH0_FRAME1; ptr = snd_cmipci_read(cm, reg) - rec->offset; ptr = bytes_to_frames(substream->runtime, ptr); #endif if (substream->runtime->channels > 2) ptr = (ptr * 2) / substream->runtime->channels; return ptr; } /* * playback */ static int snd_cmipci_playback_trigger(struct snd_pcm_substream *substream, int cmd) { struct cmipci *cm = snd_pcm_substream_chip(substream); return snd_cmipci_pcm_trigger(cm, &cm->channel[CM_CH_PLAY], cmd); } static snd_pcm_uframes_t snd_cmipci_playback_pointer(struct snd_pcm_substream *substream) { struct cmipci *cm = snd_pcm_substream_chip(substream); return snd_cmipci_pcm_pointer(cm, &cm->channel[CM_CH_PLAY], substream); } /* * capture */ static int snd_cmipci_capture_trigger(struct snd_pcm_substream *substream, int cmd) { struct cmipci *cm = snd_pcm_substream_chip(substream); return snd_cmipci_pcm_trigger(cm, &cm->channel[CM_CH_CAPT], cmd); } static snd_pcm_uframes_t snd_cmipci_capture_pointer(struct snd_pcm_substream *substream) { struct cmipci *cm = snd_pcm_substream_chip(substream); return snd_cmipci_pcm_pointer(cm, &cm->channel[CM_CH_CAPT], substream); } /* * hw preparation for spdif */ static int snd_cmipci_spdif_default_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int snd_cmipci_spdif_default_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct cmipci *chip = snd_kcontrol_chip(kcontrol); int i; spin_lock_irq(&chip->reg_lock); for (i = 0; i < 4; i++) ucontrol->value.iec958.status[i] = (chip->dig_status >> (i * 8)) & 0xff; spin_unlock_irq(&chip->reg_lock); return 0; } static int snd_cmipci_spdif_default_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct cmipci *chip = snd_kcontrol_chip(kcontrol); int i, change; unsigned int val; val = 0; spin_lock_irq(&chip->reg_lock); for (i = 0; i < 4; i++) val |= (unsigned int)ucontrol->value.iec958.status[i] << (i * 8); change = val != chip->dig_status; chip->dig_status = val; spin_unlock_irq(&chip->reg_lock); return change; } static struct snd_kcontrol_new snd_cmipci_spdif_default __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT), .info = snd_cmipci_spdif_default_info, .get = snd_cmipci_spdif_default_get, .put = snd_cmipci_spdif_default_put }; static int snd_cmipci_spdif_mask_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int snd_cmipci_spdif_mask_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.iec958.status[0] = 0xff; ucontrol->value.iec958.status[1] = 0xff; ucontrol->value.iec958.status[2] = 0xff; ucontrol->value.iec958.status[3] = 0xff; return 0; } static struct snd_kcontrol_new snd_cmipci_spdif_mask __devinitdata = { .access = SNDRV_CTL_ELEM_ACCESS_READ, .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,CON_MASK), .info = snd_cmipci_spdif_mask_info, .get = snd_cmipci_spdif_mask_get, }; static int snd_cmipci_spdif_stream_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int snd_cmipci_spdif_stream_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct cmipci *chip = snd_kcontrol_chip(kcontrol); int i; spin_lock_irq(&chip->reg_lock); for (i = 0; i < 4; i++) ucontrol->value.iec958.status[i] = (chip->dig_pcm_status >> (i * 8)) & 0xff; spin_unlock_irq(&chip->reg_lock); return 0; } static int snd_cmipci_spdif_stream_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct cmipci *chip = snd_kcontrol_chip(kcontrol); int i, change; unsigned int val; val = 0; spin_lock_irq(&chip->reg_lock); for (i = 0; i < 4; i++) val |= (unsigned int)ucontrol->value.iec958.status[i] << (i * 8); change = val != chip->dig_pcm_status; chip->dig_pcm_status = val; spin_unlock_irq(&chip->reg_lock); return change; } static struct snd_kcontrol_new snd_cmipci_spdif_stream __devinitdata = { .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_INACTIVE, .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,PCM_STREAM), .info = snd_cmipci_spdif_stream_info, .get = snd_cmipci_spdif_stream_get, .put = snd_cmipci_spdif_stream_put }; /* */ /* save mixer setting and mute for AC3 playback */ static int save_mixer_state(struct cmipci *cm) { if (! cm->mixer_insensitive) { struct snd_ctl_elem_value *val; unsigned int i; val = kmalloc(sizeof(*val), GFP_ATOMIC); if (!val) return -ENOMEM; for (i = 0; i < CM_SAVED_MIXERS; i++) { struct snd_kcontrol *ctl = cm->mixer_res_ctl[i]; if (ctl) { int event; memset(val, 0, sizeof(*val)); ctl->get(ctl, val); cm->mixer_res_status[i] = val->value.integer.value[0]; val->value.integer.value[0] = cm_saved_mixer[i].toggle_on; event = SNDRV_CTL_EVENT_MASK_INFO; if (cm->mixer_res_status[i] != val->value.integer.value[0]) { ctl->put(ctl, val); /* toggle */ event |= SNDRV_CTL_EVENT_MASK_VALUE; } ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE; snd_ctl_notify(cm->card, event, &ctl->id); } } kfree(val); cm->mixer_insensitive = 1; } return 0; } /* restore the previously saved mixer status */ static void restore_mixer_state(struct cmipci *cm) { if (cm->mixer_insensitive) { struct snd_ctl_elem_value *val; unsigned int i; val = kmalloc(sizeof(*val), GFP_KERNEL); if (!val) return; cm->mixer_insensitive = 0; /* at first clear this; otherwise the changes will be ignored */ for (i = 0; i < CM_SAVED_MIXERS; i++) { struct snd_kcontrol *ctl = cm->mixer_res_ctl[i]; if (ctl) { int event; memset(val, 0, sizeof(*val)); ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE; ctl->get(ctl, val); event = SNDRV_CTL_EVENT_MASK_INFO; if (val->value.integer.value[0] != cm->mixer_res_status[i]) { val->value.integer.value[0] = cm->mixer_res_status[i]; ctl->put(ctl, val); event |= SNDRV_CTL_EVENT_MASK_VALUE; } snd_ctl_notify(cm->card, event, &ctl->id); } } kfree(val); } } /* spinlock held! */ static void setup_ac3(struct cmipci *cm, struct snd_pcm_substream *subs, int do_ac3, int rate) { if (do_ac3) { /* AC3EN for 037 */ snd_cmipci_set_bit(cm, CM_REG_CHFORMAT, CM_AC3EN1); /* AC3EN for 039 */ snd_cmipci_set_bit(cm, CM_REG_MISC_CTRL, CM_AC3EN2); if (cm->can_ac3_hw) { /* SPD24SEL for 037, 0x02 */ /* SPD24SEL for 039, 0x20, but cannot be set */ snd_cmipci_set_bit(cm, CM_REG_CHFORMAT, CM_SPD24SEL); snd_cmipci_clear_bit(cm, CM_REG_MISC_CTRL, CM_SPD32SEL); } else { /* can_ac3_sw */ /* SPD32SEL for 037 & 039, 0x20 */ snd_cmipci_set_bit(cm, CM_REG_MISC_CTRL, CM_SPD32SEL); /* set 176K sample rate to fix 033 HW bug */ if (cm->chip_version == 33) { if (rate >= 48000) { snd_cmipci_set_bit(cm, CM_REG_CHFORMAT, CM_PLAYBACK_SRATE_176K); } else { snd_cmipci_clear_bit(cm, CM_REG_CHFORMAT, CM_PLAYBACK_SRATE_176K); } } } } else { snd_cmipci_clear_bit(cm, CM_REG_CHFORMAT, CM_AC3EN1); snd_cmipci_clear_bit(cm, CM_REG_MISC_CTRL, CM_AC3EN2); if (cm->can_ac3_hw) { /* chip model >= 37 */ if (snd_pcm_format_width(subs->runtime->format) > 16) { snd_cmipci_set_bit(cm, CM_REG_MISC_CTRL, CM_SPD32SEL); snd_cmipci_set_bit(cm, CM_REG_CHFORMAT, CM_SPD24SEL); } else { snd_cmipci_clear_bit(cm, CM_REG_MISC_CTRL, CM_SPD32SEL); snd_cmipci_clear_bit(cm, CM_REG_CHFORMAT, CM_SPD24SEL); } } else { snd_cmipci_clear_bit(cm, CM_REG_MISC_CTRL, CM_SPD32SEL); snd_cmipci_clear_bit(cm, CM_REG_CHFORMAT, CM_SPD24SEL); snd_cmipci_clear_bit(cm, CM_REG_CHFORMAT, CM_PLAYBACK_SRATE_176K); } } } static int setup_spdif_playback(struct cmipci *cm, struct snd_pcm_substream *subs, int up, int do_ac3) { int rate, err; rate = subs->runtime->rate; if (up && do_ac3) if ((err = save_mixer_state(cm)) < 0) return err; spin_lock_irq(&cm->reg_lock); cm->spdif_playback_avail = up; if (up) { /* they are controlled via "IEC958 Output Switch" */ /* snd_cmipci_set_bit(cm, CM_REG_LEGACY_CTRL, CM_ENSPDOUT); */ /* snd_cmipci_set_bit(cm, CM_REG_FUNCTRL1, CM_SPDO2DAC); */ if (cm->spdif_playback_enabled) snd_cmipci_set_bit(cm, CM_REG_FUNCTRL1, CM_PLAYBACK_SPDF); setup_ac3(cm, subs, do_ac3, rate); if (rate == 48000 || rate == 96000) snd_cmipci_set_bit(cm, CM_REG_MISC_CTRL, CM_SPDIF48K | CM_SPDF_AC97); else snd_cmipci_clear_bit(cm, CM_REG_MISC_CTRL, CM_SPDIF48K | CM_SPDF_AC97); if (rate > 48000) snd_cmipci_set_bit(cm, CM_REG_CHFORMAT, CM_DBLSPDS); else snd_cmipci_clear_bit(cm, CM_REG_CHFORMAT, CM_DBLSPDS); } else { /* they are controlled via "IEC958 Output Switch" */ /* snd_cmipci_clear_bit(cm, CM_REG_LEGACY_CTRL, CM_ENSPDOUT); */ /* snd_cmipci_clear_bit(cm, CM_REG_FUNCTRL1, CM_SPDO2DAC); */ snd_cmipci_clear_bit(cm, CM_REG_CHFORMAT, CM_DBLSPDS); snd_cmipci_clear_bit(cm, CM_REG_FUNCTRL1, CM_PLAYBACK_SPDF); setup_ac3(cm, subs, 0, 0); } spin_unlock_irq(&cm->reg_lock); return 0; } /* * preparation */ /* playback - enable spdif only on the certain condition */ static int snd_cmipci_playback_prepare(struct snd_pcm_substream *substream) { struct cmipci *cm = snd_pcm_substream_chip(substream); int rate = substream->runtime->rate; int err, do_spdif, do_ac3 = 0; do_spdif = (rate >= 44100 && rate <= 96000 && substream->runtime->format == SNDRV_PCM_FORMAT_S16_LE && substream->runtime->channels == 2); if (do_spdif && cm->can_ac3_hw) do_ac3 = cm->dig_pcm_status & IEC958_AES0_NONAUDIO; if ((err = setup_spdif_playback(cm, substream, do_spdif, do_ac3)) < 0) return err; return snd_cmipci_pcm_prepare(cm, &cm->channel[CM_CH_PLAY], substream); } /* playback (via device #2) - enable spdif always */ static int snd_cmipci_playback_spdif_prepare(struct snd_pcm_substream *substream) { struct cmipci *cm = snd_pcm_substream_chip(substream); int err, do_ac3; if (cm->can_ac3_hw) do_ac3 = cm->dig_pcm_status & IEC958_AES0_NONAUDIO; else do_ac3 = 1; /* doesn't matter */ if ((err = setup_spdif_playback(cm, substream, 1, do_ac3)) < 0) return err; return snd_cmipci_pcm_prepare(cm, &cm->channel[CM_CH_PLAY], substream); } /* * Apparently, the samples last played on channel A stay in some buffer, even * after the channel is reset, and get added to the data for the rear DACs when * playing a multichannel stream on channel B. This is likely to generate * wraparounds and thus distortions. * To avoid this, we play at least one zero sample after the actual stream has * stopped. */ static void snd_cmipci_silence_hack(struct cmipci *cm, struct cmipci_pcm *rec) { struct snd_pcm_runtime *runtime = rec->substream->runtime; unsigned int reg, val; if (rec->needs_silencing && runtime && runtime->dma_area) { /* set up a small silence buffer */ memset(runtime->dma_area, 0, PAGE_SIZE); reg = rec->ch ? CM_REG_CH1_FRAME2 : CM_REG_CH0_FRAME2; val = ((PAGE_SIZE / 4) - 1) | (((PAGE_SIZE / 4) / 2 - 1) << 16); snd_cmipci_write(cm, reg, val); /* configure for 16 bits, 2 channels, 8 kHz */ if (runtime->channels > 2) set_dac_channels(cm, rec, 2); spin_lock_irq(&cm->reg_lock); val = snd_cmipci_read(cm, CM_REG_FUNCTRL1); val &= ~(CM_ASFC_MASK << (rec->ch * 3)); val |= (4 << CM_ASFC_SHIFT) << (rec->ch * 3); snd_cmipci_write(cm, CM_REG_FUNCTRL1, val); val = snd_cmipci_read(cm, CM_REG_CHFORMAT); val &= ~(CM_CH0FMT_MASK << (rec->ch * 2)); val |= (3 << CM_CH0FMT_SHIFT) << (rec->ch * 2); if (cm->can_96k) val &= ~(CM_CH0_SRATE_MASK << (rec->ch * 2)); snd_cmipci_write(cm, CM_REG_CHFORMAT, val); /* start stream (we don't need interrupts) */ cm->ctrl |= CM_CHEN0 << rec->ch; snd_cmipci_write(cm, CM_REG_FUNCTRL0, cm->ctrl); spin_unlock_irq(&cm->reg_lock); msleep(1); /* stop and reset stream */ spin_lock_irq(&cm->reg_lock); cm->ctrl &= ~(CM_CHEN0 << rec->ch); val = CM_RST_CH0 << rec->ch; snd_cmipci_write(cm, CM_REG_FUNCTRL0, cm->ctrl | val); snd_cmipci_write(cm, CM_REG_FUNCTRL0, cm->ctrl & ~val); spin_unlock_irq(&cm->reg_lock); rec->needs_silencing = 0; } } static int snd_cmipci_playback_hw_free(struct snd_pcm_substream *substream) { struct cmipci *cm = snd_pcm_substream_chip(substream); setup_spdif_playback(cm, substream, 0, 0); restore_mixer_state(cm); snd_cmipci_silence_hack(cm, &cm->channel[0]); return snd_cmipci_hw_free(substream); } static int snd_cmipci_playback2_hw_free(struct snd_pcm_substream *substream) { struct cmipci *cm = snd_pcm_substream_chip(substream); snd_cmipci_silence_hack(cm, &cm->channel[1]); return snd_cmipci_hw_free(substream); } /* capture */ static int snd_cmipci_capture_prepare(struct snd_pcm_substream *substream) { struct cmipci *cm = snd_pcm_substream_chip(substream); return snd_cmipci_pcm_prepare(cm, &cm->channel[CM_CH_CAPT], substream); } /* capture with spdif (via device #2) */ static int snd_cmipci_capture_spdif_prepare(struct snd_pcm_substream *substream) { struct cmipci *cm = snd_pcm_substream_chip(substream); spin_lock_irq(&cm->reg_lock); snd_cmipci_set_bit(cm, CM_REG_FUNCTRL1, CM_CAPTURE_SPDF); if (cm->can_96k) { if (substream->runtime->rate > 48000) snd_cmipci_set_bit(cm, CM_REG_CHFORMAT, CM_DBLSPDS); else snd_cmipci_clear_bit(cm, CM_REG_CHFORMAT, CM_DBLSPDS); } if (snd_pcm_format_width(substream->runtime->format) > 16) snd_cmipci_set_bit(cm, CM_REG_MISC_CTRL, CM_SPD32SEL); else snd_cmipci_clear_bit(cm, CM_REG_MISC_CTRL, CM_SPD32SEL); spin_unlock_irq(&cm->reg_lock); return snd_cmipci_pcm_prepare(cm, &cm->channel[CM_CH_CAPT], substream); } static int snd_cmipci_capture_spdif_hw_free(struct snd_pcm_substream *subs) { struct cmipci *cm = snd_pcm_substream_chip(subs); spin_lock_irq(&cm->reg_lock); snd_cmipci_clear_bit(cm, CM_REG_FUNCTRL1, CM_CAPTURE_SPDF); snd_cmipci_clear_bit(cm, CM_REG_MISC_CTRL, CM_SPD32SEL); spin_unlock_irq(&cm->reg_lock); return snd_cmipci_hw_free(subs); } /* * interrupt handler */ static irqreturn_t snd_cmipci_interrupt(int irq, void *dev_id) { struct cmipci *cm = dev_id; unsigned int status, mask = 0; /* fastpath out, to ease interrupt sharing */ status = snd_cmipci_read(cm, CM_REG_INT_STATUS); if (!(status & CM_INTR)) return IRQ_NONE; /* acknowledge interrupt */ spin_lock(&cm->reg_lock); if (status & CM_CHINT0) mask |= CM_CH0_INT_EN; if (status & CM_CHINT1) mask |= CM_CH1_INT_EN; snd_cmipci_clear_bit(cm, CM_REG_INT_HLDCLR, mask); snd_cmipci_set_bit(cm, CM_REG_INT_HLDCLR, mask); spin_unlock(&cm->reg_lock); if (cm->rmidi && (status & CM_UARTINT)) snd_mpu401_uart_interrupt(irq, cm->rmidi->private_data); if (cm->pcm) { if ((status & CM_CHINT0) && cm->channel[0].running) snd_pcm_period_elapsed(cm->channel[0].substream); if ((status & CM_CHINT1) && cm->channel[1].running) snd_pcm_period_elapsed(cm->channel[1].substream); } return IRQ_HANDLED; } /* * h/w infos */ /* playback on channel A */ static struct snd_pcm_hardware snd_cmipci_playback = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_MMAP_VALID), .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_5512 | SNDRV_PCM_RATE_8000_48000, .rate_min = 5512, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = 64, .period_bytes_max = (128*1024), .periods_min = 2, .periods_max = 1024, .fifo_size = 0, }; /* capture on channel B */ static struct snd_pcm_hardware snd_cmipci_capture = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_MMAP_VALID), .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_5512 | SNDRV_PCM_RATE_8000_48000, .rate_min = 5512, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = 64, .period_bytes_max = (128*1024), .periods_min = 2, .periods_max = 1024, .fifo_size = 0, }; /* playback on channel B - stereo 16bit only? */ static struct snd_pcm_hardware snd_cmipci_playback2 = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_MMAP_VALID), .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_5512 | SNDRV_PCM_RATE_8000_48000, .rate_min = 5512, .rate_max = 48000, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = 64, .period_bytes_max = (128*1024), .periods_min = 2, .periods_max = 1024, .fifo_size = 0, }; /* spdif playback on channel A */ static struct snd_pcm_hardware snd_cmipci_playback_spdif = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_MMAP_VALID), .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000, .rate_min = 44100, .rate_max = 48000, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = 64, .period_bytes_max = (128*1024), .periods_min = 2, .periods_max = 1024, .fifo_size = 0, }; /* spdif playback on channel A (32bit, IEC958 subframes) */ static struct snd_pcm_hardware snd_cmipci_playback_iec958_subframe = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_MMAP_VALID), .formats = SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE, .rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000, .rate_min = 44100, .rate_max = 48000, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = 64, .period_bytes_max = (128*1024), .periods_min = 2, .periods_max = 1024, .fifo_size = 0, }; /* spdif capture on channel B */ static struct snd_pcm_hardware snd_cmipci_capture_spdif = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_MMAP_VALID), .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE, .rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000, .rate_min = 44100, .rate_max = 48000, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = 64, .period_bytes_max = (128*1024), .periods_min = 2, .periods_max = 1024, .fifo_size = 0, }; static unsigned int rate_constraints[] = { 5512, 8000, 11025, 16000, 22050, 32000, 44100, 48000, 88200, 96000, 128000 }; static struct snd_pcm_hw_constraint_list hw_constraints_rates = { .count = ARRAY_SIZE(rate_constraints), .list = rate_constraints, .mask = 0, }; /* * check device open/close */ static int open_device_check(struct cmipci *cm, int mode, struct snd_pcm_substream *subs) { int ch = mode & CM_OPEN_CH_MASK; /* FIXME: a file should wait until the device becomes free * when it's opened on blocking mode. however, since the current * pcm framework doesn't pass file pointer before actually opened, * we can't know whether blocking mode or not in open callback.. */ mutex_lock(&cm->open_mutex); if (cm->opened[ch]) { mutex_unlock(&cm->open_mutex); return -EBUSY; } cm->opened[ch] = mode; cm->channel[ch].substream = subs; if (! (mode & CM_OPEN_DAC)) { /* disable dual DAC mode */ cm->channel[ch].is_dac = 0; spin_lock_irq(&cm->reg_lock); snd_cmipci_clear_bit(cm, CM_REG_MISC_CTRL, CM_ENDBDAC); spin_unlock_irq(&cm->reg_lock); } mutex_unlock(&cm->open_mutex); return 0; } static void close_device_check(struct cmipci *cm, int mode) { int ch = mode & CM_OPEN_CH_MASK; mutex_lock(&cm->open_mutex); if (cm->opened[ch] == mode) { if (cm->channel[ch].substream) { snd_cmipci_ch_reset(cm, ch); cm->channel[ch].running = 0; cm->channel[ch].substream = NULL; } cm->opened[ch] = 0; if (! cm->channel[ch].is_dac) { /* enable dual DAC mode again */ cm->channel[ch].is_dac = 1; spin_lock_irq(&cm->reg_lock); snd_cmipci_set_bit(cm, CM_REG_MISC_CTRL, CM_ENDBDAC); spin_unlock_irq(&cm->reg_lock); } } mutex_unlock(&cm->open_mutex); } /* */ static int snd_cmipci_playback_open(struct snd_pcm_substream *substream) { struct cmipci *cm = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int err; if ((err = open_device_check(cm, CM_OPEN_PLAYBACK, substream)) < 0) return err; runtime->hw = snd_cmipci_playback; if (cm->chip_version == 68) { runtime->hw.rates |= SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000; runtime->hw.rate_max = 96000; } else if (cm->chip_version == 55) { err = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &hw_constraints_rates); if (err < 0) return err; runtime->hw.rates |= SNDRV_PCM_RATE_KNOT; runtime->hw.rate_max = 128000; } snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 0, 0x10000); cm->dig_pcm_status = cm->dig_status; return 0; } static int snd_cmipci_capture_open(struct snd_pcm_substream *substream) { struct cmipci *cm = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int err; if ((err = open_device_check(cm, CM_OPEN_CAPTURE, substream)) < 0) return err; runtime->hw = snd_cmipci_capture; if (cm->chip_version == 68) { // 8768 only supports 44k/48k recording runtime->hw.rate_min = 41000; runtime->hw.rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000; } else if (cm->chip_version == 55) { err = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &hw_constraints_rates); if (err < 0) return err; runtime->hw.rates |= SNDRV_PCM_RATE_KNOT; runtime->hw.rate_max = 128000; } snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 0, 0x10000); return 0; } static int snd_cmipci_playback2_open(struct snd_pcm_substream *substream) { struct cmipci *cm = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int err; if ((err = open_device_check(cm, CM_OPEN_PLAYBACK2, substream)) < 0) /* use channel B */ return err; runtime->hw = snd_cmipci_playback2; mutex_lock(&cm->open_mutex); if (! cm->opened[CM_CH_PLAY]) { if (cm->can_multi_ch) { runtime->hw.channels_max = cm->max_channels; if (cm->max_channels == 4) snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS, &hw_constraints_channels_4); else if (cm->max_channels == 6) snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS, &hw_constraints_channels_6); else if (cm->max_channels == 8) snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS, &hw_constraints_channels_8); } } mutex_unlock(&cm->open_mutex); if (cm->chip_version == 68) { runtime->hw.rates |= SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000; runtime->hw.rate_max = 96000; } else if (cm->chip_version == 55) { err = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &hw_constraints_rates); if (err < 0) return err; runtime->hw.rates |= SNDRV_PCM_RATE_KNOT; runtime->hw.rate_max = 128000; } snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 0, 0x10000); return 0; } static int snd_cmipci_playback_spdif_open(struct snd_pcm_substream *substream) { struct cmipci *cm = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int err; if ((err = open_device_check(cm, CM_OPEN_SPDIF_PLAYBACK, substream)) < 0) /* use channel A */ return err; if (cm->can_ac3_hw) { runtime->hw = snd_cmipci_playback_spdif; if (cm->chip_version >= 37) { runtime->hw.formats |= SNDRV_PCM_FMTBIT_S32_LE; snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24); } if (cm->can_96k) { runtime->hw.rates |= SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000; runtime->hw.rate_max = 96000; } } else { runtime->hw = snd_cmipci_playback_iec958_subframe; } snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 0, 0x40000); cm->dig_pcm_status = cm->dig_status; return 0; } static int snd_cmipci_capture_spdif_open(struct snd_pcm_substream *substream) { struct cmipci *cm = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int err; if ((err = open_device_check(cm, CM_OPEN_SPDIF_CAPTURE, substream)) < 0) /* use channel B */ return err; runtime->hw = snd_cmipci_capture_spdif; if (cm->can_96k && !(cm->chip_version == 68)) { runtime->hw.rates |= SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000; runtime->hw.rate_max = 96000; } snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 0, 0x40000); return 0; } /* */ static int snd_cmipci_playback_close(struct snd_pcm_substream *substream) { struct cmipci *cm = snd_pcm_substream_chip(substream); close_device_check(cm, CM_OPEN_PLAYBACK); return 0; } static int snd_cmipci_capture_close(struct snd_pcm_substream *substream) { struct cmipci *cm = snd_pcm_substream_chip(substream); close_device_check(cm, CM_OPEN_CAPTURE); return 0; } static int snd_cmipci_playback2_close(struct snd_pcm_substream *substream) { struct cmipci *cm = snd_pcm_substream_chip(substream); close_device_check(cm, CM_OPEN_PLAYBACK2); close_device_check(cm, CM_OPEN_PLAYBACK_MULTI); return 0; } static int snd_cmipci_playback_spdif_close(struct snd_pcm_substream *substream) { struct cmipci *cm = snd_pcm_substream_chip(substream); close_device_check(cm, CM_OPEN_SPDIF_PLAYBACK); return 0; } static int snd_cmipci_capture_spdif_close(struct snd_pcm_substream *substream) { struct cmipci *cm = snd_pcm_substream_chip(substream); close_device_check(cm, CM_OPEN_SPDIF_CAPTURE); return 0; } /* */ static struct snd_pcm_ops snd_cmipci_playback_ops = { .open = snd_cmipci_playback_open, .close = snd_cmipci_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_cmipci_hw_params, .hw_free = snd_cmipci_playback_hw_free, .prepare = snd_cmipci_playback_prepare, .trigger = snd_cmipci_playback_trigger, .pointer = snd_cmipci_playback_pointer, }; static struct snd_pcm_ops snd_cmipci_capture_ops = { .open = snd_cmipci_capture_open, .close = snd_cmipci_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_cmipci_hw_params, .hw_free = snd_cmipci_hw_free, .prepare = snd_cmipci_capture_prepare, .trigger = snd_cmipci_capture_trigger, .pointer = snd_cmipci_capture_pointer, }; static struct snd_pcm_ops snd_cmipci_playback2_ops = { .open = snd_cmipci_playback2_open, .close = snd_cmipci_playback2_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_cmipci_playback2_hw_params, .hw_free = snd_cmipci_playback2_hw_free, .prepare = snd_cmipci_capture_prepare, /* channel B */ .trigger = snd_cmipci_capture_trigger, /* channel B */ .pointer = snd_cmipci_capture_pointer, /* channel B */ }; static struct snd_pcm_ops snd_cmipci_playback_spdif_ops = { .open = snd_cmipci_playback_spdif_open, .close = snd_cmipci_playback_spdif_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_cmipci_hw_params, .hw_free = snd_cmipci_playback_hw_free, .prepare = snd_cmipci_playback_spdif_prepare, /* set up rate */ .trigger = snd_cmipci_playback_trigger, .pointer = snd_cmipci_playback_pointer, }; static struct snd_pcm_ops snd_cmipci_capture_spdif_ops = { .open = snd_cmipci_capture_spdif_open, .close = snd_cmipci_capture_spdif_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_cmipci_hw_params, .hw_free = snd_cmipci_capture_spdif_hw_free, .prepare = snd_cmipci_capture_spdif_prepare, .trigger = snd_cmipci_capture_trigger, .pointer = snd_cmipci_capture_pointer, }; /* */ static int __devinit snd_cmipci_pcm_new(struct cmipci *cm, int device) { struct snd_pcm *pcm; int err; err = snd_pcm_new(cm->card, cm->card->driver, device, 1, 1, &pcm); if (err < 0) return err; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_cmipci_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_cmipci_capture_ops); pcm->private_data = cm; pcm->info_flags = 0; strcpy(pcm->name, "C-Media PCI DAC/ADC"); cm->pcm = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(cm->pci), 64*1024, 128*1024); return 0; } static int __devinit snd_cmipci_pcm2_new(struct cmipci *cm, int device) { struct snd_pcm *pcm; int err; err = snd_pcm_new(cm->card, cm->card->driver, device, 1, 0, &pcm); if (err < 0) return err; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_cmipci_playback2_ops); pcm->private_data = cm; pcm->info_flags = 0; strcpy(pcm->name, "C-Media PCI 2nd DAC"); cm->pcm2 = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(cm->pci), 64*1024, 128*1024); return 0; } static int __devinit snd_cmipci_pcm_spdif_new(struct cmipci *cm, int device) { struct snd_pcm *pcm; int err; err = snd_pcm_new(cm->card, cm->card->driver, device, 1, 1, &pcm); if (err < 0) return err; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_cmipci_playback_spdif_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_cmipci_capture_spdif_ops); pcm->private_data = cm; pcm->info_flags = 0; strcpy(pcm->name, "C-Media PCI IEC958"); cm->pcm_spdif = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(cm->pci), 64*1024, 128*1024); return 0; } /* * mixer interface: * - CM8338/8738 has a compatible mixer interface with SB16, but * lack of some elements like tone control, i/o gain and AGC. * - Access to native registers: * - A 3D switch * - Output mute switches */ static void snd_cmipci_mixer_write(struct cmipci *s, unsigned char idx, unsigned char data) { outb(idx, s->iobase + CM_REG_SB16_ADDR); outb(data, s->iobase + CM_REG_SB16_DATA); } static unsigned char snd_cmipci_mixer_read(struct cmipci *s, unsigned char idx) { unsigned char v; outb(idx, s->iobase + CM_REG_SB16_ADDR); v = inb(s->iobase + CM_REG_SB16_DATA); return v; } /* * general mixer element */ struct cmipci_sb_reg { unsigned int left_reg, right_reg; unsigned int left_shift, right_shift; unsigned int mask; unsigned int invert: 1; unsigned int stereo: 1; }; #define COMPOSE_SB_REG(lreg,rreg,lshift,rshift,mask,invert,stereo) \ ((lreg) | ((rreg) << 8) | (lshift << 16) | (rshift << 19) | (mask << 24) | (invert << 22) | (stereo << 23)) #define CMIPCI_DOUBLE(xname, left_reg, right_reg, left_shift, right_shift, mask, invert, stereo) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \ .info = snd_cmipci_info_volume, \ .get = snd_cmipci_get_volume, .put = snd_cmipci_put_volume, \ .private_value = COMPOSE_SB_REG(left_reg, right_reg, left_shift, right_shift, mask, invert, stereo), \ } #define CMIPCI_SB_VOL_STEREO(xname,reg,shift,mask) CMIPCI_DOUBLE(xname, reg, reg+1, shift, shift, mask, 0, 1) #define CMIPCI_SB_VOL_MONO(xname,reg,shift,mask) CMIPCI_DOUBLE(xname, reg, reg, shift, shift, mask, 0, 0) #define CMIPCI_SB_SW_STEREO(xname,lshift,rshift) CMIPCI_DOUBLE(xname, SB_DSP4_OUTPUT_SW, SB_DSP4_OUTPUT_SW, lshift, rshift, 1, 0, 1) #define CMIPCI_SB_SW_MONO(xname,shift) CMIPCI_DOUBLE(xname, SB_DSP4_OUTPUT_SW, SB_DSP4_OUTPUT_SW, shift, shift, 1, 0, 0) static void cmipci_sb_reg_decode(struct cmipci_sb_reg *r, unsigned long val) { r->left_reg = val & 0xff; r->right_reg = (val >> 8) & 0xff; r->left_shift = (val >> 16) & 0x07; r->right_shift = (val >> 19) & 0x07; r->invert = (val >> 22) & 1; r->stereo = (val >> 23) & 1; r->mask = (val >> 24) & 0xff; } static int snd_cmipci_info_volume(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct cmipci_sb_reg reg; cmipci_sb_reg_decode(&reg, kcontrol->private_value); uinfo->type = reg.mask == 1 ? SNDRV_CTL_ELEM_TYPE_BOOLEAN : SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = reg.stereo + 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = reg.mask; return 0; } static int snd_cmipci_get_volume(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct cmipci *cm = snd_kcontrol_chip(kcontrol); struct cmipci_sb_reg reg; int val; cmipci_sb_reg_decode(&reg, kcontrol->private_value); spin_lock_irq(&cm->reg_lock); val = (snd_cmipci_mixer_read(cm, reg.left_reg) >> reg.left_shift) & reg.mask; if (reg.invert) val = reg.mask - val; ucontrol->value.integer.value[0] = val; if (reg.stereo) { val = (snd_cmipci_mixer_read(cm, reg.right_reg) >> reg.right_shift) & reg.mask; if (reg.invert) val = reg.mask - val; ucontrol->value.integer.value[1] = val; } spin_unlock_irq(&cm->reg_lock); return 0; } static int snd_cmipci_put_volume(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct cmipci *cm = snd_kcontrol_chip(kcontrol); struct cmipci_sb_reg reg; int change; int left, right, oleft, oright; cmipci_sb_reg_decode(&reg, kcontrol->private_value); left = ucontrol->value.integer.value[0] & reg.mask; if (reg.invert) left = reg.mask - left; left <<= reg.left_shift; if (reg.stereo) { right = ucontrol->value.integer.value[1] & reg.mask; if (reg.invert) right = reg.mask - right; right <<= reg.right_shift; } else right = 0; spin_lock_irq(&cm->reg_lock); oleft = snd_cmipci_mixer_read(cm, reg.left_reg); left |= oleft & ~(reg.mask << reg.left_shift); change = left != oleft; if (reg.stereo) { if (reg.left_reg != reg.right_reg) { snd_cmipci_mixer_write(cm, reg.left_reg, left); oright = snd_cmipci_mixer_read(cm, reg.right_reg); } else oright = left; right |= oright & ~(reg.mask << reg.right_shift); change |= right != oright; snd_cmipci_mixer_write(cm, reg.right_reg, right); } else snd_cmipci_mixer_write(cm, reg.left_reg, left); spin_unlock_irq(&cm->reg_lock); return change; } /* * input route (left,right) -> (left,right) */ #define CMIPCI_SB_INPUT_SW(xname, left_shift, right_shift) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \ .info = snd_cmipci_info_input_sw, \ .get = snd_cmipci_get_input_sw, .put = snd_cmipci_put_input_sw, \ .private_value = COMPOSE_SB_REG(SB_DSP4_INPUT_LEFT, SB_DSP4_INPUT_RIGHT, left_shift, right_shift, 1, 0, 1), \ } static int snd_cmipci_info_input_sw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; uinfo->count = 4; uinfo->value.integer.min = 0; uinfo->value.integer.max = 1; return 0; } static int snd_cmipci_get_input_sw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct cmipci *cm = snd_kcontrol_chip(kcontrol); struct cmipci_sb_reg reg; int val1, val2; cmipci_sb_reg_decode(&reg, kcontrol->private_value); spin_lock_irq(&cm->reg_lock); val1 = snd_cmipci_mixer_read(cm, reg.left_reg); val2 = snd_cmipci_mixer_read(cm, reg.right_reg); spin_unlock_irq(&cm->reg_lock); ucontrol->value.integer.value[0] = (val1 >> reg.left_shift) & 1; ucontrol->value.integer.value[1] = (val2 >> reg.left_shift) & 1; ucontrol->value.integer.value[2] = (val1 >> reg.right_shift) & 1; ucontrol->value.integer.value[3] = (val2 >> reg.right_shift) & 1; return 0; } static int snd_cmipci_put_input_sw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct cmipci *cm = snd_kcontrol_chip(kcontrol); struct cmipci_sb_reg reg; int change; int val1, val2, oval1, oval2; cmipci_sb_reg_decode(&reg, kcontrol->private_value); spin_lock_irq(&cm->reg_lock); oval1 = snd_cmipci_mixer_read(cm, reg.left_reg); oval2 = snd_cmipci_mixer_read(cm, reg.right_reg); val1 = oval1 & ~((1 << reg.left_shift) | (1 << reg.right_shift)); val2 = oval2 & ~((1 << reg.left_shift) | (1 << reg.right_shift)); val1 |= (ucontrol->value.integer.value[0] & 1) << reg.left_shift; val2 |= (ucontrol->value.integer.value[1] & 1) << reg.left_shift; val1 |= (ucontrol->value.integer.value[2] & 1) << reg.right_shift; val2 |= (ucontrol->value.integer.value[3] & 1) << reg.right_shift; change = val1 != oval1 || val2 != oval2; snd_cmipci_mixer_write(cm, reg.left_reg, val1); snd_cmipci_mixer_write(cm, reg.right_reg, val2); spin_unlock_irq(&cm->reg_lock); return change; } /* * native mixer switches/volumes */ #define CMIPCI_MIXER_SW_STEREO(xname, reg, lshift, rshift, invert) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \ .info = snd_cmipci_info_native_mixer, \ .get = snd_cmipci_get_native_mixer, .put = snd_cmipci_put_native_mixer, \ .private_value = COMPOSE_SB_REG(reg, reg, lshift, rshift, 1, invert, 1), \ } #define CMIPCI_MIXER_SW_MONO(xname, reg, shift, invert) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \ .info = snd_cmipci_info_native_mixer, \ .get = snd_cmipci_get_native_mixer, .put = snd_cmipci_put_native_mixer, \ .private_value = COMPOSE_SB_REG(reg, reg, shift, shift, 1, invert, 0), \ } #define CMIPCI_MIXER_VOL_STEREO(xname, reg, lshift, rshift, mask) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \ .info = snd_cmipci_info_native_mixer, \ .get = snd_cmipci_get_native_mixer, .put = snd_cmipci_put_native_mixer, \ .private_value = COMPOSE_SB_REG(reg, reg, lshift, rshift, mask, 0, 1), \ } #define CMIPCI_MIXER_VOL_MONO(xname, reg, shift, mask) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \ .info = snd_cmipci_info_native_mixer, \ .get = snd_cmipci_get_native_mixer, .put = snd_cmipci_put_native_mixer, \ .private_value = COMPOSE_SB_REG(reg, reg, shift, shift, mask, 0, 0), \ } static int snd_cmipci_info_native_mixer(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct cmipci_sb_reg reg; cmipci_sb_reg_decode(&reg, kcontrol->private_value); uinfo->type = reg.mask == 1 ? SNDRV_CTL_ELEM_TYPE_BOOLEAN : SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = reg.stereo + 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = reg.mask; return 0; } static int snd_cmipci_get_native_mixer(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct cmipci *cm = snd_kcontrol_chip(kcontrol); struct cmipci_sb_reg reg; unsigned char oreg, val; cmipci_sb_reg_decode(&reg, kcontrol->private_value); spin_lock_irq(&cm->reg_lock); oreg = inb(cm->iobase + reg.left_reg); val = (oreg >> reg.left_shift) & reg.mask; if (reg.invert) val = reg.mask - val; ucontrol->value.integer.value[0] = val; if (reg.stereo) { val = (oreg >> reg.right_shift) & reg.mask; if (reg.invert) val = reg.mask - val; ucontrol->value.integer.value[1] = val; } spin_unlock_irq(&cm->reg_lock); return 0; } static int snd_cmipci_put_native_mixer(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct cmipci *cm = snd_kcontrol_chip(kcontrol); struct cmipci_sb_reg reg; unsigned char oreg, nreg, val; cmipci_sb_reg_decode(&reg, kcontrol->private_value); spin_lock_irq(&cm->reg_lock); oreg = inb(cm->iobase + reg.left_reg); val = ucontrol->value.integer.value[0] & reg.mask; if (reg.invert) val = reg.mask - val; nreg = oreg & ~(reg.mask << reg.left_shift); nreg |= (val << reg.left_shift); if (reg.stereo) { val = ucontrol->value.integer.value[1] & reg.mask; if (reg.invert) val = reg.mask - val; nreg &= ~(reg.mask << reg.right_shift); nreg |= (val << reg.right_shift); } outb(nreg, cm->iobase + reg.left_reg); spin_unlock_irq(&cm->reg_lock); return (nreg != oreg); } /* * special case - check mixer sensitivity */ static int snd_cmipci_get_native_mixer_sensitive(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { //struct cmipci *cm = snd_kcontrol_chip(kcontrol); return snd_cmipci_get_native_mixer(kcontrol, ucontrol); } static int snd_cmipci_put_native_mixer_sensitive(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct cmipci *cm = snd_kcontrol_chip(kcontrol); if (cm->mixer_insensitive) { /* ignored */ return 0; } return snd_cmipci_put_native_mixer(kcontrol, ucontrol); } static struct snd_kcontrol_new snd_cmipci_mixers[] __devinitdata = { CMIPCI_SB_VOL_STEREO("Master Playback Volume", SB_DSP4_MASTER_DEV, 3, 31), CMIPCI_MIXER_SW_MONO("3D Control - Switch", CM_REG_MIXER1, CM_X3DEN_SHIFT, 0), CMIPCI_SB_VOL_STEREO("PCM Playback Volume", SB_DSP4_PCM_DEV, 3, 31), //CMIPCI_MIXER_SW_MONO("PCM Playback Switch", CM_REG_MIXER1, CM_WSMUTE_SHIFT, 1), { /* switch with sensitivity */ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "PCM Playback Switch", .info = snd_cmipci_info_native_mixer, .get = snd_cmipci_get_native_mixer_sensitive, .put = snd_cmipci_put_native_mixer_sensitive, .private_value = COMPOSE_SB_REG(CM_REG_MIXER1, CM_REG_MIXER1, CM_WSMUTE_SHIFT, CM_WSMUTE_SHIFT, 1, 1, 0), }, CMIPCI_MIXER_SW_STEREO("PCM Capture Switch", CM_REG_MIXER1, CM_WAVEINL_SHIFT, CM_WAVEINR_SHIFT, 0), CMIPCI_SB_VOL_STEREO("Synth Playback Volume", SB_DSP4_SYNTH_DEV, 3, 31), CMIPCI_MIXER_SW_MONO("Synth Playback Switch", CM_REG_MIXER1, CM_FMMUTE_SHIFT, 1), CMIPCI_SB_INPUT_SW("Synth Capture Route", 6, 5), CMIPCI_SB_VOL_STEREO("CD Playback Volume", SB_DSP4_CD_DEV, 3, 31), CMIPCI_SB_SW_STEREO("CD Playback Switch", 2, 1), CMIPCI_SB_INPUT_SW("CD Capture Route", 2, 1), CMIPCI_SB_VOL_STEREO("Line Playback Volume", SB_DSP4_LINE_DEV, 3, 31), CMIPCI_SB_SW_STEREO("Line Playback Switch", 4, 3), CMIPCI_SB_INPUT_SW("Line Capture Route", 4, 3), CMIPCI_SB_VOL_MONO("Mic Playback Volume", SB_DSP4_MIC_DEV, 3, 31), CMIPCI_SB_SW_MONO("Mic Playback Switch", 0), CMIPCI_DOUBLE("Mic Capture Switch", SB_DSP4_INPUT_LEFT, SB_DSP4_INPUT_RIGHT, 0, 0, 1, 0, 0), CMIPCI_SB_VOL_MONO("Beep Playback Volume", SB_DSP4_SPEAKER_DEV, 6, 3), CMIPCI_MIXER_VOL_STEREO("Aux Playback Volume", CM_REG_AUX_VOL, 4, 0, 15), CMIPCI_MIXER_SW_STEREO("Aux Playback Switch", CM_REG_MIXER2, CM_VAUXLM_SHIFT, CM_VAUXRM_SHIFT, 0), CMIPCI_MIXER_SW_STEREO("Aux Capture Switch", CM_REG_MIXER2, CM_RAUXLEN_SHIFT, CM_RAUXREN_SHIFT, 0), CMIPCI_MIXER_SW_MONO("Mic Boost Playback Switch", CM_REG_MIXER2, CM_MICGAINZ_SHIFT, 1), CMIPCI_MIXER_VOL_MONO("Mic Capture Volume", CM_REG_MIXER2, CM_VADMIC_SHIFT, 7), CMIPCI_SB_VOL_MONO("Phone Playback Volume", CM_REG_EXTENT_IND, 5, 7), CMIPCI_DOUBLE("Phone Playback Switch", CM_REG_EXTENT_IND, CM_REG_EXTENT_IND, 4, 4, 1, 0, 0), CMIPCI_DOUBLE("Beep Playback Switch", CM_REG_EXTENT_IND, CM_REG_EXTENT_IND, 3, 3, 1, 0, 0), CMIPCI_DOUBLE("Mic Boost Capture Switch", CM_REG_EXTENT_IND, CM_REG_EXTENT_IND, 0, 0, 1, 0, 0), }; /* * other switches */ struct cmipci_switch_args { int reg; /* register index */ unsigned int mask; /* mask bits */ unsigned int mask_on; /* mask bits to turn on */ unsigned int is_byte: 1; /* byte access? */ unsigned int ac3_sensitive: 1; /* access forbidden during * non-audio operation? */ }; #define snd_cmipci_uswitch_info snd_ctl_boolean_mono_info static int _snd_cmipci_uswitch_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol, struct cmipci_switch_args *args) { unsigned int val; struct cmipci *cm = snd_kcontrol_chip(kcontrol); spin_lock_irq(&cm->reg_lock); if (args->ac3_sensitive && cm->mixer_insensitive) { ucontrol->value.integer.value[0] = 0; spin_unlock_irq(&cm->reg_lock); return 0; } if (args->is_byte) val = inb(cm->iobase + args->reg); else val = snd_cmipci_read(cm, args->reg); ucontrol->value.integer.value[0] = ((val & args->mask) == args->mask_on) ? 1 : 0; spin_unlock_irq(&cm->reg_lock); return 0; } static int snd_cmipci_uswitch_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct cmipci_switch_args *args; args = (struct cmipci_switch_args *)kcontrol->private_value; if (snd_BUG_ON(!args)) return -EINVAL; return _snd_cmipci_uswitch_get(kcontrol, ucontrol, args); } static int _snd_cmipci_uswitch_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol, struct cmipci_switch_args *args) { unsigned int val; int change; struct cmipci *cm = snd_kcontrol_chip(kcontrol); spin_lock_irq(&cm->reg_lock); if (args->ac3_sensitive && cm->mixer_insensitive) { /* ignored */ spin_unlock_irq(&cm->reg_lock); return 0; } if (args->is_byte) val = inb(cm->iobase + args->reg); else val = snd_cmipci_read(cm, args->reg); change = (val & args->mask) != (ucontrol->value.integer.value[0] ? args->mask_on : (args->mask & ~args->mask_on)); if (change) { val &= ~args->mask; if (ucontrol->value.integer.value[0]) val |= args->mask_on; else val |= (args->mask & ~args->mask_on); if (args->is_byte) outb((unsigned char)val, cm->iobase + args->reg); else snd_cmipci_write(cm, args->reg, val); } spin_unlock_irq(&cm->reg_lock); return change; } static int snd_cmipci_uswitch_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct cmipci_switch_args *args; args = (struct cmipci_switch_args *)kcontrol->private_value; if (snd_BUG_ON(!args)) return -EINVAL; return _snd_cmipci_uswitch_put(kcontrol, ucontrol, args); } #define DEFINE_SWITCH_ARG(sname, xreg, xmask, xmask_on, xis_byte, xac3) \ static struct cmipci_switch_args cmipci_switch_arg_##sname = { \ .reg = xreg, \ .mask = xmask, \ .mask_on = xmask_on, \ .is_byte = xis_byte, \ .ac3_sensitive = xac3, \ } #define DEFINE_BIT_SWITCH_ARG(sname, xreg, xmask, xis_byte, xac3) \ DEFINE_SWITCH_ARG(sname, xreg, xmask, xmask, xis_byte, xac3) #if 0 /* these will be controlled in pcm device */ DEFINE_BIT_SWITCH_ARG(spdif_in, CM_REG_FUNCTRL1, CM_SPDF_1, 0, 0); DEFINE_BIT_SWITCH_ARG(spdif_out, CM_REG_FUNCTRL1, CM_SPDF_0, 0, 0); #endif DEFINE_BIT_SWITCH_ARG(spdif_in_sel1, CM_REG_CHFORMAT, CM_SPDIF_SELECT1, 0, 0); DEFINE_BIT_SWITCH_ARG(spdif_in_sel2, CM_REG_MISC_CTRL, CM_SPDIF_SELECT2, 0, 0); DEFINE_BIT_SWITCH_ARG(spdif_enable, CM_REG_LEGACY_CTRL, CM_ENSPDOUT, 0, 0); DEFINE_BIT_SWITCH_ARG(spdo2dac, CM_REG_FUNCTRL1, CM_SPDO2DAC, 0, 1); DEFINE_BIT_SWITCH_ARG(spdi_valid, CM_REG_MISC, CM_SPDVALID, 1, 0); DEFINE_BIT_SWITCH_ARG(spdif_copyright, CM_REG_LEGACY_CTRL, CM_SPDCOPYRHT, 0, 0); DEFINE_BIT_SWITCH_ARG(spdif_dac_out, CM_REG_LEGACY_CTRL, CM_DAC2SPDO, 0, 1); DEFINE_SWITCH_ARG(spdo_5v, CM_REG_MISC_CTRL, CM_SPDO5V, 0, 0, 0); /* inverse: 0 = 5V */ // DEFINE_BIT_SWITCH_ARG(spdo_48k, CM_REG_MISC_CTRL, CM_SPDF_AC97|CM_SPDIF48K, 0, 1); DEFINE_BIT_SWITCH_ARG(spdif_loop, CM_REG_FUNCTRL1, CM_SPDFLOOP, 0, 1); DEFINE_BIT_SWITCH_ARG(spdi_monitor, CM_REG_MIXER1, CM_CDPLAY, 1, 0); /* DEFINE_BIT_SWITCH_ARG(spdi_phase, CM_REG_CHFORMAT, CM_SPDIF_INVERSE, 0, 0); */ DEFINE_BIT_SWITCH_ARG(spdi_phase, CM_REG_MISC, CM_SPDIF_INVERSE, 1, 0); DEFINE_BIT_SWITCH_ARG(spdi_phase2, CM_REG_CHFORMAT, CM_SPDIF_INVERSE2, 0, 0); #if CM_CH_PLAY == 1 DEFINE_SWITCH_ARG(exchange_dac, CM_REG_MISC_CTRL, CM_XCHGDAC, 0, 0, 0); /* reversed */ #else DEFINE_SWITCH_ARG(exchange_dac, CM_REG_MISC_CTRL, CM_XCHGDAC, CM_XCHGDAC, 0, 0); #endif DEFINE_BIT_SWITCH_ARG(fourch, CM_REG_MISC_CTRL, CM_N4SPK3D, 0, 0); // DEFINE_BIT_SWITCH_ARG(line_rear, CM_REG_MIXER1, CM_REAR2LIN, 1, 0); // DEFINE_BIT_SWITCH_ARG(line_bass, CM_REG_LEGACY_CTRL, CM_CENTR2LIN|CM_BASE2LIN, 0, 0); // DEFINE_BIT_SWITCH_ARG(joystick, CM_REG_FUNCTRL1, CM_JYSTK_EN, 0, 0); /* now module option */ DEFINE_SWITCH_ARG(modem, CM_REG_MISC_CTRL, CM_FLINKON|CM_FLINKOFF, CM_FLINKON, 0, 0); #define DEFINE_SWITCH(sname, stype, sarg) \ { .name = sname, \ .iface = stype, \ .info = snd_cmipci_uswitch_info, \ .get = snd_cmipci_uswitch_get, \ .put = snd_cmipci_uswitch_put, \ .private_value = (unsigned long)&cmipci_switch_arg_##sarg,\ } #define DEFINE_CARD_SWITCH(sname, sarg) DEFINE_SWITCH(sname, SNDRV_CTL_ELEM_IFACE_CARD, sarg) #define DEFINE_MIXER_SWITCH(sname, sarg) DEFINE_SWITCH(sname, SNDRV_CTL_ELEM_IFACE_MIXER, sarg) /* * callbacks for spdif output switch * needs toggle two registers.. */ static int snd_cmipci_spdout_enable_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int changed; changed = _snd_cmipci_uswitch_get(kcontrol, ucontrol, &cmipci_switch_arg_spdif_enable); changed |= _snd_cmipci_uswitch_get(kcontrol, ucontrol, &cmipci_switch_arg_spdo2dac); return changed; } static int snd_cmipci_spdout_enable_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct cmipci *chip = snd_kcontrol_chip(kcontrol); int changed; changed = _snd_cmipci_uswitch_put(kcontrol, ucontrol, &cmipci_switch_arg_spdif_enable); changed |= _snd_cmipci_uswitch_put(kcontrol, ucontrol, &cmipci_switch_arg_spdo2dac); if (changed) { if (ucontrol->value.integer.value[0]) { if (chip->spdif_playback_avail) snd_cmipci_set_bit(chip, CM_REG_FUNCTRL1, CM_PLAYBACK_SPDF); } else { if (chip->spdif_playback_avail) snd_cmipci_clear_bit(chip, CM_REG_FUNCTRL1, CM_PLAYBACK_SPDF); } } chip->spdif_playback_enabled = ucontrol->value.integer.value[0]; return changed; } static int snd_cmipci_line_in_mode_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct cmipci *cm = snd_kcontrol_chip(kcontrol); static const char *const texts[3] = { "Line-In", "Rear Output", "Bass Output" }; return snd_ctl_enum_info(uinfo, 1, cm->chip_version >= 39 ? 3 : 2, texts); } static inline unsigned int get_line_in_mode(struct cmipci *cm) { unsigned int val; if (cm->chip_version >= 39) { val = snd_cmipci_read(cm, CM_REG_LEGACY_CTRL); if (val & (CM_CENTR2LIN | CM_BASE2LIN)) return 2; } val = snd_cmipci_read_b(cm, CM_REG_MIXER1); if (val & CM_REAR2LIN) return 1; return 0; } static int snd_cmipci_line_in_mode_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct cmipci *cm = snd_kcontrol_chip(kcontrol); spin_lock_irq(&cm->reg_lock); ucontrol->value.enumerated.item[0] = get_line_in_mode(cm); spin_unlock_irq(&cm->reg_lock); return 0; } static int snd_cmipci_line_in_mode_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct cmipci *cm = snd_kcontrol_chip(kcontrol); int change; spin_lock_irq(&cm->reg_lock); if (ucontrol->value.enumerated.item[0] == 2) change = snd_cmipci_set_bit(cm, CM_REG_LEGACY_CTRL, CM_CENTR2LIN | CM_BASE2LIN); else change = snd_cmipci_clear_bit(cm, CM_REG_LEGACY_CTRL, CM_CENTR2LIN | CM_BASE2LIN); if (ucontrol->value.enumerated.item[0] == 1) change |= snd_cmipci_set_bit_b(cm, CM_REG_MIXER1, CM_REAR2LIN); else change |= snd_cmipci_clear_bit_b(cm, CM_REG_MIXER1, CM_REAR2LIN); spin_unlock_irq(&cm->reg_lock); return change; } static int snd_cmipci_mic_in_mode_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { static const char *const texts[2] = { "Mic-In", "Center/LFE Output" }; return snd_ctl_enum_info(uinfo, 1, 2, texts); } static int snd_cmipci_mic_in_mode_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct cmipci *cm = snd_kcontrol_chip(kcontrol); /* same bit as spdi_phase */ spin_lock_irq(&cm->reg_lock); ucontrol->value.enumerated.item[0] = (snd_cmipci_read_b(cm, CM_REG_MISC) & CM_SPDIF_INVERSE) ? 1 : 0; spin_unlock_irq(&cm->reg_lock); return 0; } static int snd_cmipci_mic_in_mode_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct cmipci *cm = snd_kcontrol_chip(kcontrol); int change; spin_lock_irq(&cm->reg_lock); if (ucontrol->value.enumerated.item[0]) change = snd_cmipci_set_bit_b(cm, CM_REG_MISC, CM_SPDIF_INVERSE); else change = snd_cmipci_clear_bit_b(cm, CM_REG_MISC, CM_SPDIF_INVERSE); spin_unlock_irq(&cm->reg_lock); return change; } /* both for CM8338/8738 */ static struct snd_kcontrol_new snd_cmipci_mixer_switches[] __devinitdata = { DEFINE_MIXER_SWITCH("Four Channel Mode", fourch), { .name = "Line-In Mode", .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .info = snd_cmipci_line_in_mode_info, .get = snd_cmipci_line_in_mode_get, .put = snd_cmipci_line_in_mode_put, }, }; /* for non-multichannel chips */ static struct snd_kcontrol_new snd_cmipci_nomulti_switch __devinitdata = DEFINE_MIXER_SWITCH("Exchange DAC", exchange_dac); /* only for CM8738 */ static struct snd_kcontrol_new snd_cmipci_8738_mixer_switches[] __devinitdata = { #if 0 /* controlled in pcm device */ DEFINE_MIXER_SWITCH("IEC958 In Record", spdif_in), DEFINE_MIXER_SWITCH("IEC958 Out", spdif_out), DEFINE_MIXER_SWITCH("IEC958 Out To DAC", spdo2dac), #endif // DEFINE_MIXER_SWITCH("IEC958 Output Switch", spdif_enable), { .name = "IEC958 Output Switch", .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .info = snd_cmipci_uswitch_info, .get = snd_cmipci_spdout_enable_get, .put = snd_cmipci_spdout_enable_put, }, DEFINE_MIXER_SWITCH("IEC958 In Valid", spdi_valid), DEFINE_MIXER_SWITCH("IEC958 Copyright", spdif_copyright), DEFINE_MIXER_SWITCH("IEC958 5V", spdo_5v), // DEFINE_MIXER_SWITCH("IEC958 In/Out 48KHz", spdo_48k), DEFINE_MIXER_SWITCH("IEC958 Loop", spdif_loop), DEFINE_MIXER_SWITCH("IEC958 In Monitor", spdi_monitor), }; /* only for model 033/037 */ static struct snd_kcontrol_new snd_cmipci_old_mixer_switches[] __devinitdata = { DEFINE_MIXER_SWITCH("IEC958 Mix Analog", spdif_dac_out), DEFINE_MIXER_SWITCH("IEC958 In Phase Inverse", spdi_phase), DEFINE_MIXER_SWITCH("IEC958 In Select", spdif_in_sel1), }; /* only for model 039 or later */ static struct snd_kcontrol_new snd_cmipci_extra_mixer_switches[] __devinitdata = { DEFINE_MIXER_SWITCH("IEC958 In Select", spdif_in_sel2), DEFINE_MIXER_SWITCH("IEC958 In Phase Inverse", spdi_phase2), { .name = "Mic-In Mode", .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .info = snd_cmipci_mic_in_mode_info, .get = snd_cmipci_mic_in_mode_get, .put = snd_cmipci_mic_in_mode_put, } }; /* card control switches */ static struct snd_kcontrol_new snd_cmipci_modem_switch __devinitdata = DEFINE_CARD_SWITCH("Modem", modem); static int __devinit snd_cmipci_mixer_new(struct cmipci *cm, int pcm_spdif_device) { struct snd_card *card; struct snd_kcontrol_new *sw; struct snd_kcontrol *kctl; unsigned int idx; int err; if (snd_BUG_ON(!cm || !cm->card)) return -EINVAL; card = cm->card; strcpy(card->mixername, "CMedia PCI"); spin_lock_irq(&cm->reg_lock); snd_cmipci_mixer_write(cm, 0x00, 0x00); /* mixer reset */ spin_unlock_irq(&cm->reg_lock); for (idx = 0; idx < ARRAY_SIZE(snd_cmipci_mixers); idx++) { if (cm->chip_version == 68) { // 8768 has no PCM volume if (!strcmp(snd_cmipci_mixers[idx].name, "PCM Playback Volume")) continue; } if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_cmipci_mixers[idx], cm))) < 0) return err; } /* mixer switches */ sw = snd_cmipci_mixer_switches; for (idx = 0; idx < ARRAY_SIZE(snd_cmipci_mixer_switches); idx++, sw++) { err = snd_ctl_add(cm->card, snd_ctl_new1(sw, cm)); if (err < 0) return err; } if (! cm->can_multi_ch) { err = snd_ctl_add(cm->card, snd_ctl_new1(&snd_cmipci_nomulti_switch, cm)); if (err < 0) return err; } if (cm->device == PCI_DEVICE_ID_CMEDIA_CM8738 || cm->device == PCI_DEVICE_ID_CMEDIA_CM8738B) { sw = snd_cmipci_8738_mixer_switches; for (idx = 0; idx < ARRAY_SIZE(snd_cmipci_8738_mixer_switches); idx++, sw++) { err = snd_ctl_add(cm->card, snd_ctl_new1(sw, cm)); if (err < 0) return err; } if (cm->can_ac3_hw) { if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_cmipci_spdif_default, cm))) < 0) return err; kctl->id.device = pcm_spdif_device; if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_cmipci_spdif_mask, cm))) < 0) return err; kctl->id.device = pcm_spdif_device; if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_cmipci_spdif_stream, cm))) < 0) return err; kctl->id.device = pcm_spdif_device; } if (cm->chip_version <= 37) { sw = snd_cmipci_old_mixer_switches; for (idx = 0; idx < ARRAY_SIZE(snd_cmipci_old_mixer_switches); idx++, sw++) { err = snd_ctl_add(cm->card, snd_ctl_new1(sw, cm)); if (err < 0) return err; } } } if (cm->chip_version >= 39) { sw = snd_cmipci_extra_mixer_switches; for (idx = 0; idx < ARRAY_SIZE(snd_cmipci_extra_mixer_switches); idx++, sw++) { err = snd_ctl_add(cm->card, snd_ctl_new1(sw, cm)); if (err < 0) return err; } } /* card switches */ /* * newer chips don't have the register bits to force modem link * detection; the bit that was FLINKON now mutes CH1 */ if (cm->chip_version < 39) { err = snd_ctl_add(cm->card, snd_ctl_new1(&snd_cmipci_modem_switch, cm)); if (err < 0) return err; } for (idx = 0; idx < CM_SAVED_MIXERS; idx++) { struct snd_ctl_elem_id elem_id; struct snd_kcontrol *ctl; memset(&elem_id, 0, sizeof(elem_id)); elem_id.iface = SNDRV_CTL_ELEM_IFACE_MIXER; strcpy(elem_id.name, cm_saved_mixer[idx].name); ctl = snd_ctl_find_id(cm->card, &elem_id); if (ctl) cm->mixer_res_ctl[idx] = ctl; } return 0; } /* * proc interface */ #ifdef CONFIG_PROC_FS static void snd_cmipci_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct cmipci *cm = entry->private_data; int i, v; snd_iprintf(buffer, "%s\n", cm->card->longname); for (i = 0; i < 0x94; i++) { if (i == 0x28) i = 0x90; v = inb(cm->iobase + i); if (i % 4 == 0) snd_iprintf(buffer, "\n%02x:", i); snd_iprintf(buffer, " %02x", v); } snd_iprintf(buffer, "\n"); } static void __devinit snd_cmipci_proc_init(struct cmipci *cm) { struct snd_info_entry *entry; if (! snd_card_proc_new(cm->card, "cmipci", &entry)) snd_info_set_text_ops(entry, cm, snd_cmipci_proc_read); } #else /* !CONFIG_PROC_FS */ static inline void snd_cmipci_proc_init(struct cmipci *cm) {} #endif static DEFINE_PCI_DEVICE_TABLE(snd_cmipci_ids) = { {PCI_VDEVICE(CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8338A), 0}, {PCI_VDEVICE(CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8338B), 0}, {PCI_VDEVICE(CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8738), 0}, {PCI_VDEVICE(CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8738B), 0}, {PCI_VDEVICE(AL, PCI_DEVICE_ID_CMEDIA_CM8738), 0}, {0,}, }; /* * check chip version and capabilities * driver name is modified according to the chip model */ static void __devinit query_chip(struct cmipci *cm) { unsigned int detect; /* check reg 0Ch, bit 24-31 */ detect = snd_cmipci_read(cm, CM_REG_INT_HLDCLR) & CM_CHIP_MASK2; if (! detect) { /* check reg 08h, bit 24-28 */ detect = snd_cmipci_read(cm, CM_REG_CHFORMAT) & CM_CHIP_MASK1; switch (detect) { case 0: cm->chip_version = 33; if (cm->do_soft_ac3) cm->can_ac3_sw = 1; else cm->can_ac3_hw = 1; break; case CM_CHIP_037: cm->chip_version = 37; cm->can_ac3_hw = 1; break; default: cm->chip_version = 39; cm->can_ac3_hw = 1; break; } cm->max_channels = 2; } else { if (detect & CM_CHIP_039) { cm->chip_version = 39; if (detect & CM_CHIP_039_6CH) /* 4 or 6 channels */ cm->max_channels = 6; else cm->max_channels = 4; } else if (detect & CM_CHIP_8768) { cm->chip_version = 68; cm->max_channels = 8; cm->can_96k = 1; } else { cm->chip_version = 55; cm->max_channels = 6; cm->can_96k = 1; } cm->can_ac3_hw = 1; cm->can_multi_ch = 1; } } #ifdef SUPPORT_JOYSTICK static int __devinit snd_cmipci_create_gameport(struct cmipci *cm, int dev) { static int ports[] = { 0x201, 0x200, 0 }; /* FIXME: majority is 0x201? */ struct gameport *gp; struct resource *r = NULL; int i, io_port = 0; if (joystick_port[dev] == 0) return -ENODEV; if (joystick_port[dev] == 1) { /* auto-detect */ for (i = 0; ports[i]; i++) { io_port = ports[i]; r = request_region(io_port, 1, "CMIPCI gameport"); if (r) break; } } else { io_port = joystick_port[dev]; r = request_region(io_port, 1, "CMIPCI gameport"); } if (!r) { printk(KERN_WARNING "cmipci: cannot reserve joystick ports\n"); return -EBUSY; } cm->gameport = gp = gameport_allocate_port(); if (!gp) { printk(KERN_ERR "cmipci: cannot allocate memory for gameport\n"); release_and_free_resource(r); return -ENOMEM; } gameport_set_name(gp, "C-Media Gameport"); gameport_set_phys(gp, "pci%s/gameport0", pci_name(cm->pci)); gameport_set_dev_parent(gp, &cm->pci->dev); gp->io = io_port; gameport_set_port_data(gp, r); snd_cmipci_set_bit(cm, CM_REG_FUNCTRL1, CM_JYSTK_EN); gameport_register_port(cm->gameport); return 0; } static void snd_cmipci_free_gameport(struct cmipci *cm) { if (cm->gameport) { struct resource *r = gameport_get_port_data(cm->gameport); gameport_unregister_port(cm->gameport); cm->gameport = NULL; snd_cmipci_clear_bit(cm, CM_REG_FUNCTRL1, CM_JYSTK_EN); release_and_free_resource(r); } } #else static inline int snd_cmipci_create_gameport(struct cmipci *cm, int dev) { return -ENOSYS; } static inline void snd_cmipci_free_gameport(struct cmipci *cm) { } #endif static int snd_cmipci_free(struct cmipci *cm) { if (cm->irq >= 0) { snd_cmipci_clear_bit(cm, CM_REG_MISC_CTRL, CM_FM_EN); snd_cmipci_clear_bit(cm, CM_REG_LEGACY_CTRL, CM_ENSPDOUT); snd_cmipci_write(cm, CM_REG_INT_HLDCLR, 0); /* disable ints */ snd_cmipci_ch_reset(cm, CM_CH_PLAY); snd_cmipci_ch_reset(cm, CM_CH_CAPT); snd_cmipci_write(cm, CM_REG_FUNCTRL0, 0); /* disable channels */ snd_cmipci_write(cm, CM_REG_FUNCTRL1, 0); /* reset mixer */ snd_cmipci_mixer_write(cm, 0, 0); free_irq(cm->irq, cm); } snd_cmipci_free_gameport(cm); pci_release_regions(cm->pci); pci_disable_device(cm->pci); kfree(cm); return 0; } static int snd_cmipci_dev_free(struct snd_device *device) { struct cmipci *cm = device->device_data; return snd_cmipci_free(cm); } static int __devinit snd_cmipci_create_fm(struct cmipci *cm, long fm_port) { long iosynth; unsigned int val; struct snd_opl3 *opl3; int err; if (!fm_port) goto disable_fm; if (cm->chip_version >= 39) { /* first try FM regs in PCI port range */ iosynth = cm->iobase + CM_REG_FM_PCI; err = snd_opl3_create(cm->card, iosynth, iosynth + 2, OPL3_HW_OPL3, 1, &opl3); } else { err = -EIO; } if (err < 0) { /* then try legacy ports */ val = snd_cmipci_read(cm, CM_REG_LEGACY_CTRL) & ~CM_FMSEL_MASK; iosynth = fm_port; switch (iosynth) { case 0x3E8: val |= CM_FMSEL_3E8; break; case 0x3E0: val |= CM_FMSEL_3E0; break; case 0x3C8: val |= CM_FMSEL_3C8; break; case 0x388: val |= CM_FMSEL_388; break; default: goto disable_fm; } snd_cmipci_write(cm, CM_REG_LEGACY_CTRL, val); /* enable FM */ snd_cmipci_set_bit(cm, CM_REG_MISC_CTRL, CM_FM_EN); if (snd_opl3_create(cm->card, iosynth, iosynth + 2, OPL3_HW_OPL3, 0, &opl3) < 0) { printk(KERN_ERR "cmipci: no OPL device at %#lx, " "skipping...\n", iosynth); goto disable_fm; } } if ((err = snd_opl3_hwdep_new(opl3, 0, 1, NULL)) < 0) { printk(KERN_ERR "cmipci: cannot create OPL3 hwdep\n"); return err; } return 0; disable_fm: snd_cmipci_clear_bit(cm, CM_REG_LEGACY_CTRL, CM_FMSEL_MASK); snd_cmipci_clear_bit(cm, CM_REG_MISC_CTRL, CM_FM_EN); return 0; } static int __devinit snd_cmipci_create(struct snd_card *card, struct pci_dev *pci, int dev, struct cmipci **rcmipci) { struct cmipci *cm; int err; static struct snd_device_ops ops = { .dev_free = snd_cmipci_dev_free, }; unsigned int val; long iomidi = 0; int integrated_midi = 0; char modelstr[16]; int pcm_index, pcm_spdif_index; static DEFINE_PCI_DEVICE_TABLE(intel_82437vx) = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437VX) }, { }, }; *rcmipci = NULL; if ((err = pci_enable_device(pci)) < 0) return err; cm = kzalloc(sizeof(*cm), GFP_KERNEL); if (cm == NULL) { pci_disable_device(pci); return -ENOMEM; } spin_lock_init(&cm->reg_lock); mutex_init(&cm->open_mutex); cm->device = pci->device; cm->card = card; cm->pci = pci; cm->irq = -1; cm->channel[0].ch = 0; cm->channel[1].ch = 1; cm->channel[0].is_dac = cm->channel[1].is_dac = 1; /* dual DAC mode */ if ((err = pci_request_regions(pci, card->driver)) < 0) { kfree(cm); pci_disable_device(pci); return err; } cm->iobase = pci_resource_start(pci, 0); if (request_irq(pci->irq, snd_cmipci_interrupt, IRQF_SHARED, card->driver, cm)) { snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq); snd_cmipci_free(cm); return -EBUSY; } cm->irq = pci->irq; pci_set_master(cm->pci); /* * check chip version, max channels and capabilities */ cm->chip_version = 0; cm->max_channels = 2; cm->do_soft_ac3 = soft_ac3[dev]; if (pci->device != PCI_DEVICE_ID_CMEDIA_CM8338A && pci->device != PCI_DEVICE_ID_CMEDIA_CM8338B) query_chip(cm); /* added -MCx suffix for chip supporting multi-channels */ if (cm->can_multi_ch) sprintf(cm->card->driver + strlen(cm->card->driver), "-MC%d", cm->max_channels); else if (cm->can_ac3_sw) strcpy(cm->card->driver + strlen(cm->card->driver), "-SWIEC"); cm->dig_status = SNDRV_PCM_DEFAULT_CON_SPDIF; cm->dig_pcm_status = SNDRV_PCM_DEFAULT_CON_SPDIF; #if CM_CH_PLAY == 1 cm->ctrl = CM_CHADC0; /* default FUNCNTRL0 */ #else cm->ctrl = CM_CHADC1; /* default FUNCNTRL0 */ #endif /* initialize codec registers */ snd_cmipci_set_bit(cm, CM_REG_MISC_CTRL, CM_RESET); snd_cmipci_clear_bit(cm, CM_REG_MISC_CTRL, CM_RESET); snd_cmipci_write(cm, CM_REG_INT_HLDCLR, 0); /* disable ints */ snd_cmipci_ch_reset(cm, CM_CH_PLAY); snd_cmipci_ch_reset(cm, CM_CH_CAPT); snd_cmipci_write(cm, CM_REG_FUNCTRL0, 0); /* disable channels */ snd_cmipci_write(cm, CM_REG_FUNCTRL1, 0); snd_cmipci_write(cm, CM_REG_CHFORMAT, 0); snd_cmipci_set_bit(cm, CM_REG_MISC_CTRL, CM_ENDBDAC|CM_N4SPK3D); #if CM_CH_PLAY == 1 snd_cmipci_set_bit(cm, CM_REG_MISC_CTRL, CM_XCHGDAC); #else snd_cmipci_clear_bit(cm, CM_REG_MISC_CTRL, CM_XCHGDAC); #endif if (cm->chip_version) { snd_cmipci_write_b(cm, CM_REG_EXT_MISC, 0x20); /* magic */ snd_cmipci_write_b(cm, CM_REG_EXT_MISC + 1, 0x09); /* more magic */ } /* Set Bus Master Request */ snd_cmipci_set_bit(cm, CM_REG_FUNCTRL1, CM_BREQ); /* Assume TX and compatible chip set (Autodetection required for VX chip sets) */ switch (pci->device) { case PCI_DEVICE_ID_CMEDIA_CM8738: case PCI_DEVICE_ID_CMEDIA_CM8738B: if (!pci_dev_present(intel_82437vx)) snd_cmipci_set_bit(cm, CM_REG_MISC_CTRL, CM_TXVX); break; default: break; } if (cm->chip_version < 68) { val = pci->device < 0x110 ? 8338 : 8738; } else { switch (snd_cmipci_read_b(cm, CM_REG_INT_HLDCLR + 3) & 0x03) { case 0: val = 8769; break; case 2: val = 8762; break; default: switch ((pci->subsystem_vendor << 16) | pci->subsystem_device) { case 0x13f69761: case 0x584d3741: case 0x584d3751: case 0x584d3761: case 0x584d3771: case 0x72848384: val = 8770; break; default: val = 8768; break; } } } sprintf(card->shortname, "C-Media CMI%d", val); if (cm->chip_version < 68) sprintf(modelstr, " (model %d)", cm->chip_version); else modelstr[0] = '\0'; sprintf(card->longname, "%s%s at %#lx, irq %i", card->shortname, modelstr, cm->iobase, cm->irq); if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, cm, &ops)) < 0) { snd_cmipci_free(cm); return err; } if (cm->chip_version >= 39) { val = snd_cmipci_read_b(cm, CM_REG_MPU_PCI + 1); if (val != 0x00 && val != 0xff) { iomidi = cm->iobase + CM_REG_MPU_PCI; integrated_midi = 1; } } if (!integrated_midi) { val = 0; iomidi = mpu_port[dev]; switch (iomidi) { case 0x320: val = CM_VMPU_320; break; case 0x310: val = CM_VMPU_310; break; case 0x300: val = CM_VMPU_300; break; case 0x330: val = CM_VMPU_330; break; default: iomidi = 0; break; } if (iomidi > 0) { snd_cmipci_write(cm, CM_REG_LEGACY_CTRL, val); /* enable UART */ snd_cmipci_set_bit(cm, CM_REG_FUNCTRL1, CM_UART_EN); if (inb(iomidi + 1) == 0xff) { snd_printk(KERN_ERR "cannot enable MPU-401 port" " at %#lx\n", iomidi); snd_cmipci_clear_bit(cm, CM_REG_FUNCTRL1, CM_UART_EN); iomidi = 0; } } } if (cm->chip_version < 68) { err = snd_cmipci_create_fm(cm, fm_port[dev]); if (err < 0) return err; } /* reset mixer */ snd_cmipci_mixer_write(cm, 0, 0); snd_cmipci_proc_init(cm); /* create pcm devices */ pcm_index = pcm_spdif_index = 0; if ((err = snd_cmipci_pcm_new(cm, pcm_index)) < 0) return err; pcm_index++; if ((err = snd_cmipci_pcm2_new(cm, pcm_index)) < 0) return err; pcm_index++; if (cm->can_ac3_hw || cm->can_ac3_sw) { pcm_spdif_index = pcm_index; if ((err = snd_cmipci_pcm_spdif_new(cm, pcm_index)) < 0) return err; } /* create mixer interface & switches */ if ((err = snd_cmipci_mixer_new(cm, pcm_spdif_index)) < 0) return err; if (iomidi > 0) { if ((err = snd_mpu401_uart_new(card, 0, MPU401_HW_CMIPCI, iomidi, (integrated_midi ? MPU401_INFO_INTEGRATED : 0), cm->irq, 0, &cm->rmidi)) < 0) { printk(KERN_ERR "cmipci: no UART401 device at 0x%lx\n", iomidi); } } #ifdef USE_VAR48KRATE for (val = 0; val < ARRAY_SIZE(rates); val++) snd_cmipci_set_pll(cm, rates[val], val); /* * (Re-)Enable external switch spdo_48k */ snd_cmipci_set_bit(cm, CM_REG_MISC_CTRL, CM_SPDIF48K|CM_SPDF_AC97); #endif /* USE_VAR48KRATE */ if (snd_cmipci_create_gameport(cm, dev) < 0) snd_cmipci_clear_bit(cm, CM_REG_FUNCTRL1, CM_JYSTK_EN); snd_card_set_dev(card, &pci->dev); *rcmipci = cm; return 0; } /* */ MODULE_DEVICE_TABLE(pci, snd_cmipci_ids); static int __devinit snd_cmipci_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { static int dev; struct snd_card *card; struct cmipci *cm; int err; if (dev >= SNDRV_CARDS) return -ENODEV; if (! enable[dev]) { dev++; return -ENOENT; } err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) return err; switch (pci->device) { case PCI_DEVICE_ID_CMEDIA_CM8738: case PCI_DEVICE_ID_CMEDIA_CM8738B: strcpy(card->driver, "CMI8738"); break; case PCI_DEVICE_ID_CMEDIA_CM8338A: case PCI_DEVICE_ID_CMEDIA_CM8338B: strcpy(card->driver, "CMI8338"); break; default: strcpy(card->driver, "CMIPCI"); break; } if ((err = snd_cmipci_create(card, pci, dev, &cm)) < 0) { snd_card_free(card); return err; } card->private_data = cm; if ((err = snd_card_register(card)) < 0) { snd_card_free(card); return err; } pci_set_drvdata(pci, card); dev++; return 0; } static void __devexit snd_cmipci_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } #ifdef CONFIG_PM /* * power management */ static unsigned char saved_regs[] = { CM_REG_FUNCTRL1, CM_REG_CHFORMAT, CM_REG_LEGACY_CTRL, CM_REG_MISC_CTRL, CM_REG_MIXER0, CM_REG_MIXER1, CM_REG_MIXER2, CM_REG_MIXER3, CM_REG_PLL, CM_REG_CH0_FRAME1, CM_REG_CH0_FRAME2, CM_REG_CH1_FRAME1, CM_REG_CH1_FRAME2, CM_REG_EXT_MISC, CM_REG_INT_STATUS, CM_REG_INT_HLDCLR, CM_REG_FUNCTRL0, }; static unsigned char saved_mixers[] = { SB_DSP4_MASTER_DEV, SB_DSP4_MASTER_DEV + 1, SB_DSP4_PCM_DEV, SB_DSP4_PCM_DEV + 1, SB_DSP4_SYNTH_DEV, SB_DSP4_SYNTH_DEV + 1, SB_DSP4_CD_DEV, SB_DSP4_CD_DEV + 1, SB_DSP4_LINE_DEV, SB_DSP4_LINE_DEV + 1, SB_DSP4_MIC_DEV, SB_DSP4_SPEAKER_DEV, CM_REG_EXTENT_IND, SB_DSP4_OUTPUT_SW, SB_DSP4_INPUT_LEFT, SB_DSP4_INPUT_RIGHT, }; static int snd_cmipci_suspend(struct pci_dev *pci, pm_message_t state) { struct snd_card *card = pci_get_drvdata(pci); struct cmipci *cm = card->private_data; int i; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); snd_pcm_suspend_all(cm->pcm); snd_pcm_suspend_all(cm->pcm2); snd_pcm_suspend_all(cm->pcm_spdif); /* save registers */ for (i = 0; i < ARRAY_SIZE(saved_regs); i++) cm->saved_regs[i] = snd_cmipci_read(cm, saved_regs[i]); for (i = 0; i < ARRAY_SIZE(saved_mixers); i++) cm->saved_mixers[i] = snd_cmipci_mixer_read(cm, saved_mixers[i]); /* disable ints */ snd_cmipci_write(cm, CM_REG_INT_HLDCLR, 0); pci_disable_device(pci); pci_save_state(pci); pci_set_power_state(pci, pci_choose_state(pci, state)); return 0; } static int snd_cmipci_resume(struct pci_dev *pci) { struct snd_card *card = pci_get_drvdata(pci); struct cmipci *cm = card->private_data; int i; pci_set_power_state(pci, PCI_D0); pci_restore_state(pci); if (pci_enable_device(pci) < 0) { printk(KERN_ERR "cmipci: pci_enable_device failed, " "disabling device\n"); snd_card_disconnect(card); return -EIO; } pci_set_master(pci); /* reset / initialize to a sane state */ snd_cmipci_write(cm, CM_REG_INT_HLDCLR, 0); snd_cmipci_ch_reset(cm, CM_CH_PLAY); snd_cmipci_ch_reset(cm, CM_CH_CAPT); snd_cmipci_mixer_write(cm, 0, 0); /* restore registers */ for (i = 0; i < ARRAY_SIZE(saved_regs); i++) snd_cmipci_write(cm, saved_regs[i], cm->saved_regs[i]); for (i = 0; i < ARRAY_SIZE(saved_mixers); i++) snd_cmipci_mixer_write(cm, saved_mixers[i], cm->saved_mixers[i]); snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } #endif /* CONFIG_PM */ static struct pci_driver driver = { .name = "C-Media PCI", .id_table = snd_cmipci_ids, .probe = snd_cmipci_probe, .remove = __devexit_p(snd_cmipci_remove), #ifdef CONFIG_PM .suspend = snd_cmipci_suspend, .resume = snd_cmipci_resume, #endif }; static int __init alsa_card_cmipci_init(void) { return pci_register_driver(&driver); } static void __exit alsa_card_cmipci_exit(void) { pci_unregister_driver(&driver); } module_init(alsa_card_cmipci_init) module_exit(alsa_card_cmipci_exit)
gpl-2.0