repo_name
string
path
string
copies
string
size
string
content
string
license
string
G33KS44n/mysql-5.6
regex/reginit.c
186
2157
/* Init cclasses array from ctypes */ #include <my_global.h> #include <m_ctype.h> #include <m_string.h> #include "cclass.h" #include "my_regex.h" static my_bool regex_inited=0; extern my_regex_stack_check_t my_regex_enough_mem_in_stack; void my_regex_init(const CHARSET_INFO *cs, my_regex_stack_check_t func) { char buff[CCLASS_LAST][256]; int count[CCLASS_LAST]; uint i; if (!regex_inited) { regex_inited=1; my_regex_enough_mem_in_stack= func; memset(&count, 0, sizeof(count)); for (i=1 ; i<= 255; i++) { if (my_isalnum(cs,i)) buff[CCLASS_ALNUM][count[CCLASS_ALNUM]++]=(char) i; if (my_isalpha(cs,i)) buff[CCLASS_ALPHA][count[CCLASS_ALPHA]++]=(char) i; if (my_iscntrl(cs,i)) buff[CCLASS_CNTRL][count[CCLASS_CNTRL]++]=(char) i; if (my_isdigit(cs,i)) buff[CCLASS_DIGIT][count[CCLASS_DIGIT]++]=(char) i; if (my_isgraph(cs,i)) buff[CCLASS_GRAPH][count[CCLASS_GRAPH]++]=(char) i; if (my_islower(cs,i)) buff[CCLASS_LOWER][count[CCLASS_LOWER]++]=(char) i; if (my_isprint(cs,i)) buff[CCLASS_PRINT][count[CCLASS_PRINT]++]=(char) i; if (my_ispunct(cs,i)) buff[CCLASS_PUNCT][count[CCLASS_PUNCT]++]=(char) i; if (my_isspace(cs,i)) buff[CCLASS_SPACE][count[CCLASS_SPACE]++]=(char) i; if (my_isupper(cs,i)) buff[CCLASS_UPPER][count[CCLASS_UPPER]++]=(char) i; if (my_isxdigit(cs,i)) buff[CCLASS_XDIGIT][count[CCLASS_XDIGIT]++]=(char) i; } buff[CCLASS_BLANK][0]=' '; buff[CCLASS_BLANK][1]='\t'; count[CCLASS_BLANK]=2; for (i=0; i < CCLASS_LAST ; i++) { char *tmp=(char*) malloc(count[i]+1); if (!tmp) { /* This is very unlikely to happen as this function is called once at program startup */ fprintf(stderr, "Fatal error: Can't allocate memory in regex_init\n"); exit(1); } memcpy(tmp,buff[i],count[i]*sizeof(char)); tmp[count[i]]=0; cclasses[i].chars=tmp; } } return; } void my_regex_end() { if (regex_inited) { int i; for (i=0; i < CCLASS_LAST ; i++) free((char*) cclasses[i].chars); my_regex_enough_mem_in_stack= NULL; regex_inited=0; } }
gpl-2.0
emxys1/imx6rex-bombardier-base-linux-3.14.28
drivers/input/touchscreen/tsc2007.c
954
12048
/* * drivers/input/touchscreen/tsc2007.c * * Copyright (c) 2008 MtekVision Co., Ltd. * Kwangwoo Lee <kwlee@mtekvision.com> * * Using code from: * - ads7846.c * Copyright (c) 2005 David Brownell * Copyright (c) 2006 Nokia Corporation * - corgi_ts.c * Copyright (C) 2004-2005 Richard Purdie * - omap_ts.[hc], ads7846.h, ts_osk.c * Copyright (C) 2002 MontaVista Software * Copyright (C) 2004 Texas Instruments * Copyright (C) 2005 Dirk Behme * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/interrupt.h> #include <linux/i2c.h> #include <linux/i2c/tsc2007.h> #include <linux/of_device.h> #include <linux/of.h> #include <linux/of_gpio.h> #define TSC2007_MEASURE_TEMP0 (0x0 << 4) #define TSC2007_MEASURE_AUX (0x2 << 4) #define TSC2007_MEASURE_TEMP1 (0x4 << 4) #define TSC2007_ACTIVATE_XN (0x8 << 4) #define TSC2007_ACTIVATE_YN (0x9 << 4) #define TSC2007_ACTIVATE_YP_XN (0xa << 4) #define TSC2007_SETUP (0xb << 4) #define TSC2007_MEASURE_X (0xc << 4) #define TSC2007_MEASURE_Y (0xd << 4) #define TSC2007_MEASURE_Z1 (0xe << 4) #define TSC2007_MEASURE_Z2 (0xf << 4) #define TSC2007_POWER_OFF_IRQ_EN (0x0 << 2) #define TSC2007_ADC_ON_IRQ_DIS0 (0x1 << 2) #define TSC2007_ADC_OFF_IRQ_EN (0x2 << 2) #define TSC2007_ADC_ON_IRQ_DIS1 (0x3 << 2) #define TSC2007_12BIT (0x0 << 1) #define TSC2007_8BIT (0x1 << 1) #define MAX_12BIT ((1 << 12) - 1) #define ADC_ON_12BIT (TSC2007_12BIT | TSC2007_ADC_ON_IRQ_DIS0) #define READ_Y (ADC_ON_12BIT | TSC2007_MEASURE_Y) #define READ_Z1 (ADC_ON_12BIT | TSC2007_MEASURE_Z1) #define READ_Z2 (ADC_ON_12BIT | TSC2007_MEASURE_Z2) #define READ_X (ADC_ON_12BIT | TSC2007_MEASURE_X) #define PWRDOWN (TSC2007_12BIT | TSC2007_POWER_OFF_IRQ_EN) struct ts_event { u16 x; u16 y; u16 z1, z2; }; struct tsc2007 { struct input_dev *input; char phys[32]; struct i2c_client *client; u16 model; u16 x_plate_ohms; u16 max_rt; unsigned long poll_period; int fuzzx; int fuzzy; int fuzzz; unsigned gpio; int irq; wait_queue_head_t wait; bool stopped; int (*get_pendown_state)(struct device *); void (*clear_penirq)(void); }; static inline int tsc2007_xfer(struct tsc2007 *tsc, u8 cmd) { s32 data; u16 val; data = i2c_smbus_read_word_data(tsc->client, cmd); if (data < 0) { dev_err(&tsc->client->dev, "i2c io error: %d\n", data); return data; } /* The protocol and raw data format from i2c interface: * S Addr Wr [A] Comm [A] S Addr Rd [A] [DataLow] A [DataHigh] NA P * Where DataLow has [D11-D4], DataHigh has [D3-D0 << 4 | Dummy 4bit]. */ val = swab16(data) >> 4; dev_dbg(&tsc->client->dev, "data: 0x%x, val: 0x%x\n", data, val); return val; } static void tsc2007_read_values(struct tsc2007 *tsc, struct ts_event *tc) { /* y- still on; turn on only y+ (and ADC) */ tc->y = tsc2007_xfer(tsc, READ_Y); /* turn y- off, x+ on, then leave in lowpower */ tc->x = tsc2007_xfer(tsc, READ_X); /* turn y+ off, x- on; we'll use formula #1 */ tc->z1 = tsc2007_xfer(tsc, READ_Z1); tc->z2 = tsc2007_xfer(tsc, READ_Z2); /* Prepare for next touch reading - power down ADC, enable PENIRQ */ tsc2007_xfer(tsc, PWRDOWN); } static u32 tsc2007_calculate_pressure(struct tsc2007 *tsc, struct ts_event *tc) { u32 rt = 0; /* range filtering */ if (tc->x == MAX_12BIT) tc->x = 0; if (likely(tc->x && tc->z1)) { /* compute touch pressure resistance using equation #1 */ rt = tc->z2 - tc->z1; rt *= tc->x; rt *= tsc->x_plate_ohms; rt /= tc->z1; rt = (rt + 2047) >> 12; } return rt; } static bool tsc2007_is_pen_down(struct tsc2007 *ts) { /* * NOTE: We can't rely on the pressure to determine the pen down * state, even though this controller has a pressure sensor. * The pressure value can fluctuate for quite a while after * lifting the pen and in some cases may not even settle at the * expected value. * * The only safe way to check for the pen up condition is in the * work function by reading the pen signal state (it's a GPIO * and IRQ). Unfortunately such callback is not always available, * in that case we assume that the pen is down and expect caller * to fall back on the pressure reading. */ if (!ts->get_pendown_state) return true; return ts->get_pendown_state(&ts->client->dev); } static irqreturn_t tsc2007_soft_irq(int irq, void *handle) { struct tsc2007 *ts = handle; struct input_dev *input = ts->input; struct ts_event tc; u32 rt; while (!ts->stopped && tsc2007_is_pen_down(ts)) { /* pen is down, continue with the measurement */ tsc2007_read_values(ts, &tc); rt = tsc2007_calculate_pressure(ts, &tc); if (!rt && !ts->get_pendown_state) { /* * If pressure reported is 0 and we don't have * callback to check pendown state, we have to * assume that pen was lifted up. */ break; } if (rt <= ts->max_rt) { dev_dbg(&ts->client->dev, "DOWN point(%4d,%4d), pressure (%4u)\n", tc.x, tc.y, rt); input_report_key(input, BTN_TOUCH, 1); input_report_abs(input, ABS_X, tc.x); input_report_abs(input, ABS_Y, tc.y); input_report_abs(input, ABS_PRESSURE, rt); input_sync(input); } else { /* * Sample found inconsistent by debouncing or pressure is * beyond the maximum. Don't report it to user space, * repeat at least once more the measurement. */ dev_dbg(&ts->client->dev, "ignored pressure %d\n", rt); } wait_event_timeout(ts->wait, ts->stopped, msecs_to_jiffies(ts->poll_period)); } dev_dbg(&ts->client->dev, "UP\n"); input_report_key(input, BTN_TOUCH, 0); input_report_abs(input, ABS_PRESSURE, 0); input_sync(input); if (ts->clear_penirq) ts->clear_penirq(); return IRQ_HANDLED; } static irqreturn_t tsc2007_hard_irq(int irq, void *handle) { struct tsc2007 *ts = handle; if (tsc2007_is_pen_down(ts)) return IRQ_WAKE_THREAD; if (ts->clear_penirq) ts->clear_penirq(); return IRQ_HANDLED; } static void tsc2007_stop(struct tsc2007 *ts) { ts->stopped = true; mb(); wake_up(&ts->wait); disable_irq(ts->irq); } static int tsc2007_open(struct input_dev *input_dev) { struct tsc2007 *ts = input_get_drvdata(input_dev); int err; ts->stopped = false; mb(); enable_irq(ts->irq); /* Prepare for touch readings - power down ADC and enable PENIRQ */ err = tsc2007_xfer(ts, PWRDOWN); if (err < 0) { tsc2007_stop(ts); return err; } return 0; } static void tsc2007_close(struct input_dev *input_dev) { struct tsc2007 *ts = input_get_drvdata(input_dev); tsc2007_stop(ts); } #ifdef CONFIG_OF static int tsc2007_get_pendown_state_gpio(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct tsc2007 *ts = i2c_get_clientdata(client); return !gpio_get_value(ts->gpio); } static int tsc2007_probe_dt(struct i2c_client *client, struct tsc2007 *ts) { struct device_node *np = client->dev.of_node; u32 val32; u64 val64; if (!np) { dev_err(&client->dev, "missing device tree data\n"); return -EINVAL; } if (!of_property_read_u32(np, "ti,max-rt", &val32)) ts->max_rt = val32; else ts->max_rt = MAX_12BIT; if (!of_property_read_u32(np, "ti,fuzzx", &val32)) ts->fuzzx = val32; if (!of_property_read_u32(np, "ti,fuzzy", &val32)) ts->fuzzy = val32; if (!of_property_read_u32(np, "ti,fuzzz", &val32)) ts->fuzzz = val32; if (!of_property_read_u64(np, "ti,poll-period", &val64)) ts->poll_period = val64; else ts->poll_period = 1; if (!of_property_read_u32(np, "ti,x-plate-ohms", &val32)) { ts->x_plate_ohms = val32; } else { dev_err(&client->dev, "missing ti,x-plate-ohms devicetree property."); return -EINVAL; } ts->gpio = of_get_gpio(np, 0); if (gpio_is_valid(ts->gpio)) ts->get_pendown_state = tsc2007_get_pendown_state_gpio; else dev_warn(&client->dev, "GPIO not specified in DT (of_get_gpio returned %d)\n", ts->gpio); return 0; } #else static int tsc2007_probe_dt(struct i2c_client *client, struct tsc2007 *ts) { dev_err(&client->dev, "platform data is required!\n"); return -EINVAL; } #endif static int tsc2007_probe_pdev(struct i2c_client *client, struct tsc2007 *ts, const struct tsc2007_platform_data *pdata, const struct i2c_device_id *id) { ts->model = pdata->model; ts->x_plate_ohms = pdata->x_plate_ohms; ts->max_rt = pdata->max_rt ? : MAX_12BIT; ts->poll_period = pdata->poll_period ? : 1; ts->get_pendown_state = pdata->get_pendown_state; ts->clear_penirq = pdata->clear_penirq; ts->fuzzx = pdata->fuzzx; ts->fuzzy = pdata->fuzzy; ts->fuzzz = pdata->fuzzz; if (pdata->x_plate_ohms == 0) { dev_err(&client->dev, "x_plate_ohms is not set up in platform data"); return -EINVAL; } return 0; } static void tsc2007_call_exit_platform_hw(void *data) { struct device *dev = data; const struct tsc2007_platform_data *pdata = dev_get_platdata(dev); pdata->exit_platform_hw(); } static int tsc2007_probe(struct i2c_client *client, const struct i2c_device_id *id) { const struct tsc2007_platform_data *pdata = dev_get_platdata(&client->dev); struct tsc2007 *ts; struct input_dev *input_dev; int err; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_WORD_DATA)) return -EIO; ts = devm_kzalloc(&client->dev, sizeof(struct tsc2007), GFP_KERNEL); if (!ts) return -ENOMEM; if (pdata) err = tsc2007_probe_pdev(client, ts, pdata, id); else err = tsc2007_probe_dt(client, ts); if (err) return err; input_dev = devm_input_allocate_device(&client->dev); if (!input_dev) return -ENOMEM; i2c_set_clientdata(client, ts); ts->client = client; ts->irq = client->irq; ts->input = input_dev; init_waitqueue_head(&ts->wait); snprintf(ts->phys, sizeof(ts->phys), "%s/input0", dev_name(&client->dev)); input_dev->name = "TSC2007 Touchscreen"; input_dev->phys = ts->phys; input_dev->id.bustype = BUS_I2C; input_dev->open = tsc2007_open; input_dev->close = tsc2007_close; input_set_drvdata(input_dev, ts); input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); input_set_abs_params(input_dev, ABS_X, 0, MAX_12BIT, ts->fuzzx, 0); input_set_abs_params(input_dev, ABS_Y, 0, MAX_12BIT, ts->fuzzy, 0); input_set_abs_params(input_dev, ABS_PRESSURE, 0, MAX_12BIT, ts->fuzzz, 0); if (pdata) { if (pdata->exit_platform_hw) { err = devm_add_action(&client->dev, tsc2007_call_exit_platform_hw, &client->dev); if (err) { dev_err(&client->dev, "Failed to register exit_platform_hw action, %d\n", err); return err; } } if (pdata->init_platform_hw) pdata->init_platform_hw(); } err = devm_request_threaded_irq(&client->dev, ts->irq, tsc2007_hard_irq, tsc2007_soft_irq, IRQF_ONESHOT, client->dev.driver->name, ts); if (err) { dev_err(&client->dev, "Failed to request irq %d: %d\n", ts->irq, err); return err; } tsc2007_stop(ts); err = input_register_device(input_dev); if (err) { dev_err(&client->dev, "Failed to register input device: %d\n", err); return err; } return 0; } static const struct i2c_device_id tsc2007_idtable[] = { { "tsc2007", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, tsc2007_idtable); #ifdef CONFIG_OF static const struct of_device_id tsc2007_of_match[] = { { .compatible = "ti,tsc2007" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, tsc2007_of_match); #endif static struct i2c_driver tsc2007_driver = { .driver = { .owner = THIS_MODULE, .name = "tsc2007", .of_match_table = of_match_ptr(tsc2007_of_match), }, .id_table = tsc2007_idtable, .probe = tsc2007_probe, }; module_i2c_driver(tsc2007_driver); MODULE_AUTHOR("Kwangwoo Lee <kwlee@mtekvision.com>"); MODULE_DESCRIPTION("TSC2007 TouchScreen Driver"); MODULE_LICENSE("GPL");
gpl-2.0
aeroevan/htc_kernel_msm7x30
kernel/events/core.c
1210
172943
/* * Performance events core code: * * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> * * For licensing details see kernel-base/COPYING */ #include <linux/fs.h> #include <linux/mm.h> #include <linux/cpu.h> #include <linux/smp.h> #include <linux/idr.h> #include <linux/file.h> #include <linux/poll.h> #include <linux/slab.h> #include <linux/hash.h> #include <linux/sysfs.h> #include <linux/dcache.h> #include <linux/percpu.h> #include <linux/ptrace.h> #include <linux/reboot.h> #include <linux/vmstat.h> #include <linux/device.h> #include <linux/vmalloc.h> #include <linux/hardirq.h> #include <linux/rculist.h> #include <linux/uaccess.h> #include <linux/syscalls.h> #include <linux/anon_inodes.h> #include <linux/kernel_stat.h> #include <linux/perf_event.h> #include <linux/ftrace_event.h> #include <linux/hw_breakpoint.h> #include <asm/irq_regs.h> struct remote_function_call { struct task_struct *p; int (*func)(void *info); void *info; int ret; }; static void remote_function(void *data) { struct remote_function_call *tfc = data; struct task_struct *p = tfc->p; if (p) { tfc->ret = -EAGAIN; if (task_cpu(p) != smp_processor_id() || !task_curr(p)) return; } tfc->ret = tfc->func(tfc->info); } /** * task_function_call - call a function on the cpu on which a task runs * @p: the task to evaluate * @func: the function to be called * @info: the function call argument * * Calls the function @func when the task is currently running. This might * be on the current CPU, which just calls the function directly * * returns: @func return value, or * -ESRCH - when the process isn't running * -EAGAIN - when the process moved away */ static int task_function_call(struct task_struct *p, int (*func) (void *info), void *info) { struct remote_function_call data = { .p = p, .func = func, .info = info, .ret = -ESRCH, /* No such (running) process */ }; if (task_curr(p)) smp_call_function_single(task_cpu(p), remote_function, &data, 1); return data.ret; } /** * cpu_function_call - call a function on the cpu * @func: the function to be called * @info: the function call argument * * Calls the function @func on the remote cpu. * * returns: @func return value or -ENXIO when the cpu is offline */ static int cpu_function_call(int cpu, int (*func) (void *info), void *info) { struct remote_function_call data = { .p = NULL, .func = func, .info = info, .ret = -ENXIO, /* No such CPU */ }; smp_call_function_single(cpu, remote_function, &data, 1); return data.ret; } #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\ PERF_FLAG_FD_OUTPUT |\ PERF_FLAG_PID_CGROUP) enum event_type_t { EVENT_FLEXIBLE = 0x1, EVENT_PINNED = 0x2, EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED, }; /* * perf_sched_events : >0 events exist * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu */ struct jump_label_key perf_sched_events __read_mostly; static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); static atomic_t nr_mmap_events __read_mostly; static atomic_t nr_comm_events __read_mostly; static atomic_t nr_task_events __read_mostly; static LIST_HEAD(pmus); static DEFINE_MUTEX(pmus_lock); static struct srcu_struct pmus_srcu; /* * perf event paranoia level: * -1 - not paranoid at all * 0 - disallow raw tracepoint access for unpriv * 1 - disallow cpu events for unpriv * 2 - disallow kernel profiling for unpriv */ int sysctl_perf_event_paranoid __read_mostly = 1; /* Minimum for 512 kiB + 1 user control page */ int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */ /* * max perf event sample rate */ #define DEFAULT_MAX_SAMPLE_RATE 100000 int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE; static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ); int perf_proc_update_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int ret = proc_dointvec(table, write, buffer, lenp, ppos); if (ret || !write) return ret; max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ); return 0; } static atomic64_t perf_event_id; static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, enum event_type_t event_type); static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, enum event_type_t event_type, struct task_struct *task); static void update_context_time(struct perf_event_context *ctx); static u64 perf_event_time(struct perf_event *event); void __weak perf_event_print_debug(void) { } extern __weak const char *perf_pmu_name(void) { return "pmu"; } static inline u64 perf_clock(void) { return local_clock(); } static inline struct perf_cpu_context * __get_cpu_context(struct perf_event_context *ctx) { return this_cpu_ptr(ctx->pmu->pmu_cpu_context); } #ifdef CONFIG_CGROUP_PERF /* * Must ensure cgroup is pinned (css_get) before calling * this function. In other words, we cannot call this function * if there is no cgroup event for the current CPU context. */ static inline struct perf_cgroup * perf_cgroup_from_task(struct task_struct *task) { return container_of(task_subsys_state(task, perf_subsys_id), struct perf_cgroup, css); } static inline bool perf_cgroup_match(struct perf_event *event) { struct perf_event_context *ctx = event->ctx; struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); return !event->cgrp || event->cgrp == cpuctx->cgrp; } static inline void perf_get_cgroup(struct perf_event *event) { css_get(&event->cgrp->css); } static inline void perf_put_cgroup(struct perf_event *event) { css_put(&event->cgrp->css); } static inline void perf_detach_cgroup(struct perf_event *event) { perf_put_cgroup(event); event->cgrp = NULL; } static inline int is_cgroup_event(struct perf_event *event) { return event->cgrp != NULL; } static inline u64 perf_cgroup_event_time(struct perf_event *event) { struct perf_cgroup_info *t; t = per_cpu_ptr(event->cgrp->info, event->cpu); return t->time; } static inline void __update_cgrp_time(struct perf_cgroup *cgrp) { struct perf_cgroup_info *info; u64 now; now = perf_clock(); info = this_cpu_ptr(cgrp->info); info->time += now - info->timestamp; info->timestamp = now; } static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) { struct perf_cgroup *cgrp_out = cpuctx->cgrp; if (cgrp_out) __update_cgrp_time(cgrp_out); } static inline void update_cgrp_time_from_event(struct perf_event *event) { struct perf_cgroup *cgrp; /* * ensure we access cgroup data only when needed and * when we know the cgroup is pinned (css_get) */ if (!is_cgroup_event(event)) return; cgrp = perf_cgroup_from_task(current); /* * Do not update time when cgroup is not active */ if (cgrp == event->cgrp) __update_cgrp_time(event->cgrp); } static inline void perf_cgroup_set_timestamp(struct task_struct *task, struct perf_event_context *ctx) { struct perf_cgroup *cgrp; struct perf_cgroup_info *info; /* * ctx->lock held by caller * ensure we do not access cgroup data * unless we have the cgroup pinned (css_get) */ if (!task || !ctx->nr_cgroups) return; cgrp = perf_cgroup_from_task(task); info = this_cpu_ptr(cgrp->info); info->timestamp = ctx->timestamp; } #define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */ #define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */ /* * reschedule events based on the cgroup constraint of task. * * mode SWOUT : schedule out everything * mode SWIN : schedule in based on cgroup for next */ void perf_cgroup_switch(struct task_struct *task, int mode) { struct perf_cpu_context *cpuctx; struct pmu *pmu; unsigned long flags; /* * disable interrupts to avoid geting nr_cgroup * changes via __perf_event_disable(). Also * avoids preemption. */ local_irq_save(flags); /* * we reschedule only in the presence of cgroup * constrained events. */ rcu_read_lock(); list_for_each_entry_rcu(pmu, &pmus, entry) { cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); perf_pmu_disable(cpuctx->ctx.pmu); /* * perf_cgroup_events says at least one * context on this CPU has cgroup events. * * ctx->nr_cgroups reports the number of cgroup * events for a context. */ if (cpuctx->ctx.nr_cgroups > 0) { if (mode & PERF_CGROUP_SWOUT) { cpu_ctx_sched_out(cpuctx, EVENT_ALL); /* * must not be done before ctxswout due * to event_filter_match() in event_sched_out() */ cpuctx->cgrp = NULL; } if (mode & PERF_CGROUP_SWIN) { WARN_ON_ONCE(cpuctx->cgrp); /* set cgrp before ctxsw in to * allow event_filter_match() to not * have to pass task around */ cpuctx->cgrp = perf_cgroup_from_task(task); cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); } } perf_pmu_enable(cpuctx->ctx.pmu); } rcu_read_unlock(); local_irq_restore(flags); } static inline void perf_cgroup_sched_out(struct task_struct *task) { perf_cgroup_switch(task, PERF_CGROUP_SWOUT); } static inline void perf_cgroup_sched_in(struct task_struct *task) { perf_cgroup_switch(task, PERF_CGROUP_SWIN); } static inline int perf_cgroup_connect(int fd, struct perf_event *event, struct perf_event_attr *attr, struct perf_event *group_leader) { struct perf_cgroup *cgrp; struct cgroup_subsys_state *css; struct file *file; int ret = 0, fput_needed; file = fget_light(fd, &fput_needed); if (!file) return -EBADF; css = cgroup_css_from_dir(file, perf_subsys_id); if (IS_ERR(css)) { ret = PTR_ERR(css); goto out; } cgrp = container_of(css, struct perf_cgroup, css); event->cgrp = cgrp; /* must be done before we fput() the file */ perf_get_cgroup(event); /* * all events in a group must monitor * the same cgroup because a task belongs * to only one perf cgroup at a time */ if (group_leader && group_leader->cgrp != cgrp) { perf_detach_cgroup(event); ret = -EINVAL; } out: fput_light(file, fput_needed); return ret; } static inline void perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) { struct perf_cgroup_info *t; t = per_cpu_ptr(event->cgrp->info, event->cpu); event->shadow_ctx_time = now - t->timestamp; } static inline void perf_cgroup_defer_enabled(struct perf_event *event) { /* * when the current task's perf cgroup does not match * the event's, we need to remember to call the * perf_mark_enable() function the first time a task with * a matching perf cgroup is scheduled in. */ if (is_cgroup_event(event) && !perf_cgroup_match(event)) event->cgrp_defer_enabled = 1; } static inline void perf_cgroup_mark_enabled(struct perf_event *event, struct perf_event_context *ctx) { struct perf_event *sub; u64 tstamp = perf_event_time(event); if (!event->cgrp_defer_enabled) return; event->cgrp_defer_enabled = 0; event->tstamp_enabled = tstamp - event->total_time_enabled; list_for_each_entry(sub, &event->sibling_list, group_entry) { if (sub->state >= PERF_EVENT_STATE_INACTIVE) { sub->tstamp_enabled = tstamp - sub->total_time_enabled; sub->cgrp_defer_enabled = 0; } } } #else /* !CONFIG_CGROUP_PERF */ static inline bool perf_cgroup_match(struct perf_event *event) { return true; } static inline void perf_detach_cgroup(struct perf_event *event) {} static inline int is_cgroup_event(struct perf_event *event) { return 0; } static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event) { return 0; } static inline void update_cgrp_time_from_event(struct perf_event *event) { } static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) { } static inline void perf_cgroup_sched_out(struct task_struct *task) { } static inline void perf_cgroup_sched_in(struct task_struct *task) { } static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event, struct perf_event_attr *attr, struct perf_event *group_leader) { return -EINVAL; } static inline void perf_cgroup_set_timestamp(struct task_struct *task, struct perf_event_context *ctx) { } void perf_cgroup_switch(struct task_struct *task, struct task_struct *next) { } static inline void perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) { } static inline u64 perf_cgroup_event_time(struct perf_event *event) { return 0; } static inline void perf_cgroup_defer_enabled(struct perf_event *event) { } static inline void perf_cgroup_mark_enabled(struct perf_event *event, struct perf_event_context *ctx) { } #endif void perf_pmu_disable(struct pmu *pmu) { int *count = this_cpu_ptr(pmu->pmu_disable_count); if (!(*count)++) pmu->pmu_disable(pmu); } void perf_pmu_enable(struct pmu *pmu) { int *count = this_cpu_ptr(pmu->pmu_disable_count); if (!--(*count)) pmu->pmu_enable(pmu); } static DEFINE_PER_CPU(struct list_head, rotation_list); /* * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized * because they're strictly cpu affine and rotate_start is called with IRQs * disabled, while rotate_context is called from IRQ context. */ static void perf_pmu_rotate_start(struct pmu *pmu) { struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); struct list_head *head = &__get_cpu_var(rotation_list); WARN_ON(!irqs_disabled()); if (list_empty(&cpuctx->rotation_list)) list_add(&cpuctx->rotation_list, head); } static void get_ctx(struct perf_event_context *ctx) { WARN_ON(!atomic_inc_not_zero(&ctx->refcount)); } static void put_ctx(struct perf_event_context *ctx) { if (atomic_dec_and_test(&ctx->refcount)) { if (ctx->parent_ctx) put_ctx(ctx->parent_ctx); if (ctx->task) put_task_struct(ctx->task); kfree_rcu(ctx, rcu_head); } } static void unclone_ctx(struct perf_event_context *ctx) { if (ctx->parent_ctx) { put_ctx(ctx->parent_ctx); ctx->parent_ctx = NULL; } } static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) { /* * only top level events have the pid namespace they were created in */ if (event->parent) event = event->parent; return task_tgid_nr_ns(p, event->ns); } static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) { /* * only top level events have the pid namespace they were created in */ if (event->parent) event = event->parent; return task_pid_nr_ns(p, event->ns); } /* * If we inherit events we want to return the parent event id * to userspace. */ static u64 primary_event_id(struct perf_event *event) { u64 id = event->id; if (event->parent) id = event->parent->id; return id; } /* * Get the perf_event_context for a task and lock it. * This has to cope with with the fact that until it is locked, * the context could get moved to another task. */ static struct perf_event_context * perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags) { struct perf_event_context *ctx; rcu_read_lock(); retry: ctx = rcu_dereference(task->perf_event_ctxp[ctxn]); if (ctx) { /* * If this context is a clone of another, it might * get swapped for another underneath us by * perf_event_task_sched_out, though the * rcu_read_lock() protects us from any context * getting freed. Lock the context and check if it * got swapped before we could get the lock, and retry * if so. If we locked the right context, then it * can't get swapped on us any more. */ raw_spin_lock_irqsave(&ctx->lock, *flags); if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) { raw_spin_unlock_irqrestore(&ctx->lock, *flags); goto retry; } if (!atomic_inc_not_zero(&ctx->refcount)) { raw_spin_unlock_irqrestore(&ctx->lock, *flags); ctx = NULL; } } rcu_read_unlock(); return ctx; } /* * Get the context for a task and increment its pin_count so it * can't get swapped to another task. This also increments its * reference count so that the context can't get freed. */ static struct perf_event_context * perf_pin_task_context(struct task_struct *task, int ctxn) { struct perf_event_context *ctx; unsigned long flags; ctx = perf_lock_task_context(task, ctxn, &flags); if (ctx) { ++ctx->pin_count; raw_spin_unlock_irqrestore(&ctx->lock, flags); } return ctx; } static void perf_unpin_context(struct perf_event_context *ctx) { unsigned long flags; raw_spin_lock_irqsave(&ctx->lock, flags); --ctx->pin_count; raw_spin_unlock_irqrestore(&ctx->lock, flags); } /* * Update the record of the current time in a context. */ static void update_context_time(struct perf_event_context *ctx) { u64 now = perf_clock(); ctx->time += now - ctx->timestamp; ctx->timestamp = now; } static u64 perf_event_time(struct perf_event *event) { struct perf_event_context *ctx = event->ctx; if (is_cgroup_event(event)) return perf_cgroup_event_time(event); return ctx ? ctx->time : 0; } /* * Update the total_time_enabled and total_time_running fields for a event. */ static void update_event_times(struct perf_event *event) { struct perf_event_context *ctx = event->ctx; u64 run_end; if (event->state < PERF_EVENT_STATE_INACTIVE || event->group_leader->state < PERF_EVENT_STATE_INACTIVE) return; /* * in cgroup mode, time_enabled represents * the time the event was enabled AND active * tasks were in the monitored cgroup. This is * independent of the activity of the context as * there may be a mix of cgroup and non-cgroup events. * * That is why we treat cgroup events differently * here. */ if (is_cgroup_event(event)) run_end = perf_event_time(event); else if (ctx->is_active) run_end = ctx->time; else run_end = event->tstamp_stopped; event->total_time_enabled = run_end - event->tstamp_enabled; if (event->state == PERF_EVENT_STATE_INACTIVE) run_end = event->tstamp_stopped; else run_end = perf_event_time(event); event->total_time_running = run_end - event->tstamp_running; } /* * Update total_time_enabled and total_time_running for all events in a group. */ static void update_group_times(struct perf_event *leader) { struct perf_event *event; update_event_times(leader); list_for_each_entry(event, &leader->sibling_list, group_entry) update_event_times(event); } static struct list_head * ctx_group_list(struct perf_event *event, struct perf_event_context *ctx) { if (event->attr.pinned) return &ctx->pinned_groups; else return &ctx->flexible_groups; } /* * Add a event from the lists for its context. * Must be called with ctx->mutex and ctx->lock held. */ static void list_add_event(struct perf_event *event, struct perf_event_context *ctx) { WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); event->attach_state |= PERF_ATTACH_CONTEXT; /* * If we're a stand alone event or group leader, we go to the context * list, group events are kept attached to the group so that * perf_group_detach can, at all times, locate all siblings. */ if (event->group_leader == event) { struct list_head *list; if (is_software_event(event)) event->group_flags |= PERF_GROUP_SOFTWARE; list = ctx_group_list(event, ctx); list_add_tail(&event->group_entry, list); } if (is_cgroup_event(event)) ctx->nr_cgroups++; list_add_rcu(&event->event_entry, &ctx->event_list); if (!ctx->nr_events) perf_pmu_rotate_start(ctx->pmu); ctx->nr_events++; if (event->attr.inherit_stat) ctx->nr_stat++; } /* * Called at perf_event creation and when events are attached/detached from a * group. */ static void perf_event__read_size(struct perf_event *event) { int entry = sizeof(u64); /* value */ int size = 0; int nr = 1; if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) size += sizeof(u64); if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) size += sizeof(u64); if (event->attr.read_format & PERF_FORMAT_ID) entry += sizeof(u64); if (event->attr.read_format & PERF_FORMAT_GROUP) { nr += event->group_leader->nr_siblings; size += sizeof(u64); } size += entry * nr; event->read_size = size; } static void perf_event__header_size(struct perf_event *event) { struct perf_sample_data *data; u64 sample_type = event->attr.sample_type; u16 size = 0; perf_event__read_size(event); if (sample_type & PERF_SAMPLE_IP) size += sizeof(data->ip); if (sample_type & PERF_SAMPLE_ADDR) size += sizeof(data->addr); if (sample_type & PERF_SAMPLE_PERIOD) size += sizeof(data->period); if (sample_type & PERF_SAMPLE_READ) size += event->read_size; event->header_size = size; } static void perf_event__id_header_size(struct perf_event *event) { struct perf_sample_data *data; u64 sample_type = event->attr.sample_type; u16 size = 0; if (sample_type & PERF_SAMPLE_TID) size += sizeof(data->tid_entry); if (sample_type & PERF_SAMPLE_TIME) size += sizeof(data->time); if (sample_type & PERF_SAMPLE_ID) size += sizeof(data->id); if (sample_type & PERF_SAMPLE_STREAM_ID) size += sizeof(data->stream_id); if (sample_type & PERF_SAMPLE_CPU) size += sizeof(data->cpu_entry); event->id_header_size = size; } static void perf_group_attach(struct perf_event *event) { struct perf_event *group_leader = event->group_leader, *pos; /* * We can have double attach due to group movement in perf_event_open. */ if (event->attach_state & PERF_ATTACH_GROUP) return; event->attach_state |= PERF_ATTACH_GROUP; if (group_leader == event) return; if (group_leader->group_flags & PERF_GROUP_SOFTWARE && !is_software_event(event)) group_leader->group_flags &= ~PERF_GROUP_SOFTWARE; list_add_tail(&event->group_entry, &group_leader->sibling_list); group_leader->nr_siblings++; perf_event__header_size(group_leader); list_for_each_entry(pos, &group_leader->sibling_list, group_entry) perf_event__header_size(pos); } /* * Remove a event from the lists for its context. * Must be called with ctx->mutex and ctx->lock held. */ static void list_del_event(struct perf_event *event, struct perf_event_context *ctx) { struct perf_cpu_context *cpuctx; /* * We can have double detach due to exit/hot-unplug + close. */ if (!(event->attach_state & PERF_ATTACH_CONTEXT)) return; event->attach_state &= ~PERF_ATTACH_CONTEXT; if (is_cgroup_event(event)) { ctx->nr_cgroups--; cpuctx = __get_cpu_context(ctx); /* * if there are no more cgroup events * then cler cgrp to avoid stale pointer * in update_cgrp_time_from_cpuctx() */ if (!ctx->nr_cgroups) cpuctx->cgrp = NULL; } ctx->nr_events--; if (event->attr.inherit_stat) ctx->nr_stat--; list_del_rcu(&event->event_entry); if (event->group_leader == event) list_del_init(&event->group_entry); update_group_times(event); /* * If event was in error state, then keep it * that way, otherwise bogus counts will be * returned on read(). The only way to get out * of error state is by explicit re-enabling * of the event */ if (event->state > PERF_EVENT_STATE_OFF) event->state = PERF_EVENT_STATE_OFF; } static void perf_group_detach(struct perf_event *event) { struct perf_event *sibling, *tmp; struct list_head *list = NULL; /* * We can have double detach due to exit/hot-unplug + close. */ if (!(event->attach_state & PERF_ATTACH_GROUP)) return; event->attach_state &= ~PERF_ATTACH_GROUP; /* * If this is a sibling, remove it from its group. */ if (event->group_leader != event) { list_del_init(&event->group_entry); event->group_leader->nr_siblings--; goto out; } if (!list_empty(&event->group_entry)) list = &event->group_entry; /* * If this was a group event with sibling events then * upgrade the siblings to singleton events by adding them * to whatever list we are on. */ list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) { if (list) list_move_tail(&sibling->group_entry, list); sibling->group_leader = sibling; /* Inherit group flags from the previous leader */ sibling->group_flags = event->group_flags; } out: perf_event__header_size(event->group_leader); list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry) perf_event__header_size(tmp); } static inline int event_filter_match(struct perf_event *event) { return (event->cpu == -1 || event->cpu == smp_processor_id()) && perf_cgroup_match(event); } static void event_sched_out(struct perf_event *event, struct perf_cpu_context *cpuctx, struct perf_event_context *ctx) { u64 tstamp = perf_event_time(event); u64 delta; /* * An event which could not be activated because of * filter mismatch still needs to have its timings * maintained, otherwise bogus information is return * via read() for time_enabled, time_running: */ if (event->state == PERF_EVENT_STATE_INACTIVE && !event_filter_match(event)) { delta = tstamp - event->tstamp_stopped; event->tstamp_running += delta; event->tstamp_stopped = tstamp; } if (event->state != PERF_EVENT_STATE_ACTIVE) return; event->state = PERF_EVENT_STATE_INACTIVE; if (event->pending_disable) { event->pending_disable = 0; event->state = PERF_EVENT_STATE_OFF; } event->tstamp_stopped = tstamp; event->pmu->del(event, 0); event->oncpu = -1; if (!is_software_event(event)) cpuctx->active_oncpu--; ctx->nr_active--; if (event->attr.exclusive || !cpuctx->active_oncpu) cpuctx->exclusive = 0; } static void group_sched_out(struct perf_event *group_event, struct perf_cpu_context *cpuctx, struct perf_event_context *ctx) { struct perf_event *event; int state = group_event->state; event_sched_out(group_event, cpuctx, ctx); /* * Schedule out siblings (if any): */ list_for_each_entry(event, &group_event->sibling_list, group_entry) event_sched_out(event, cpuctx, ctx); if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive) cpuctx->exclusive = 0; } /* * Cross CPU call to remove a performance event * * We disable the event on the hardware level first. After that we * remove it from the context list. */ static int __perf_remove_from_context(void *info) { struct perf_event *event = info; struct perf_event_context *ctx = event->ctx; struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); raw_spin_lock(&ctx->lock); event_sched_out(event, cpuctx, ctx); list_del_event(event, ctx); raw_spin_unlock(&ctx->lock); return 0; } /* * Remove the event from a task's (or a CPU's) list of events. * * CPU events are removed with a smp call. For task events we only * call when the task is on a CPU. * * If event->ctx is a cloned context, callers must make sure that * every task struct that event->ctx->task could possibly point to * remains valid. This is OK when called from perf_release since * that only calls us on the top-level context, which can't be a clone. * When called from perf_event_exit_task, it's OK because the * context has been detached from its task. */ static void perf_remove_from_context(struct perf_event *event) { struct perf_event_context *ctx = event->ctx; struct task_struct *task = ctx->task; lockdep_assert_held(&ctx->mutex); if (!task) { /* * Per cpu events are removed via an smp call and * the removal is always successful. */ cpu_function_call(event->cpu, __perf_remove_from_context, event); return; } retry: if (!task_function_call(task, __perf_remove_from_context, event)) return; raw_spin_lock_irq(&ctx->lock); /* * If we failed to find a running task, but find the context active now * that we've acquired the ctx->lock, retry. */ if (ctx->is_active) { raw_spin_unlock_irq(&ctx->lock); goto retry; } /* * Since the task isn't running, its safe to remove the event, us * holding the ctx->lock ensures the task won't get scheduled in. */ list_del_event(event, ctx); raw_spin_unlock_irq(&ctx->lock); } /* * Cross CPU call to disable a performance event */ static int __perf_event_disable(void *info) { struct perf_event *event = info; struct perf_event_context *ctx = event->ctx; struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); /* * If this is a per-task event, need to check whether this * event's task is the current task on this cpu. * * Can trigger due to concurrent perf_event_context_sched_out() * flipping contexts around. */ if (ctx->task && cpuctx->task_ctx != ctx) return -EINVAL; raw_spin_lock(&ctx->lock); /* * If the event is on, turn it off. * If it is in error state, leave it in error state. */ if (event->state >= PERF_EVENT_STATE_INACTIVE) { update_context_time(ctx); update_cgrp_time_from_event(event); update_group_times(event); if (event == event->group_leader) group_sched_out(event, cpuctx, ctx); else event_sched_out(event, cpuctx, ctx); event->state = PERF_EVENT_STATE_OFF; } raw_spin_unlock(&ctx->lock); return 0; } /* * Disable a event. * * If event->ctx is a cloned context, callers must make sure that * every task struct that event->ctx->task could possibly point to * remains valid. This condition is satisifed when called through * perf_event_for_each_child or perf_event_for_each because they * hold the top-level event's child_mutex, so any descendant that * goes to exit will block in sync_child_event. * When called from perf_pending_event it's OK because event->ctx * is the current context on this CPU and preemption is disabled, * hence we can't get into perf_event_task_sched_out for this context. */ void perf_event_disable(struct perf_event *event) { struct perf_event_context *ctx = event->ctx; struct task_struct *task = ctx->task; if (!task) { /* * Disable the event on the cpu that it's on */ cpu_function_call(event->cpu, __perf_event_disable, event); return; } retry: if (!task_function_call(task, __perf_event_disable, event)) return; raw_spin_lock_irq(&ctx->lock); /* * If the event is still active, we need to retry the cross-call. */ if (event->state == PERF_EVENT_STATE_ACTIVE) { raw_spin_unlock_irq(&ctx->lock); /* * Reload the task pointer, it might have been changed by * a concurrent perf_event_context_sched_out(). */ task = ctx->task; goto retry; } /* * Since we have the lock this context can't be scheduled * in, so we can change the state safely. */ if (event->state == PERF_EVENT_STATE_INACTIVE) { update_group_times(event); event->state = PERF_EVENT_STATE_OFF; } raw_spin_unlock_irq(&ctx->lock); } static void perf_set_shadow_time(struct perf_event *event, struct perf_event_context *ctx, u64 tstamp) { /* * use the correct time source for the time snapshot * * We could get by without this by leveraging the * fact that to get to this function, the caller * has most likely already called update_context_time() * and update_cgrp_time_xx() and thus both timestamp * are identical (or very close). Given that tstamp is, * already adjusted for cgroup, we could say that: * tstamp - ctx->timestamp * is equivalent to * tstamp - cgrp->timestamp. * * Then, in perf_output_read(), the calculation would * work with no changes because: * - event is guaranteed scheduled in * - no scheduled out in between * - thus the timestamp would be the same * * But this is a bit hairy. * * So instead, we have an explicit cgroup call to remain * within the time time source all along. We believe it * is cleaner and simpler to understand. */ if (is_cgroup_event(event)) perf_cgroup_set_shadow_time(event, tstamp); else event->shadow_ctx_time = tstamp - ctx->timestamp; } #define MAX_INTERRUPTS (~0ULL) static void perf_log_throttle(struct perf_event *event, int enable); static int event_sched_in(struct perf_event *event, struct perf_cpu_context *cpuctx, struct perf_event_context *ctx) { u64 tstamp = perf_event_time(event); if (event->state <= PERF_EVENT_STATE_OFF) return 0; event->state = PERF_EVENT_STATE_ACTIVE; event->oncpu = smp_processor_id(); /* * Unthrottle events, since we scheduled we might have missed several * ticks already, also for a heavily scheduling task there is little * guarantee it'll get a tick in a timely manner. */ if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { perf_log_throttle(event, 1); event->hw.interrupts = 0; } /* * The new state must be visible before we turn it on in the hardware: */ smp_wmb(); if (event->pmu->add(event, PERF_EF_START)) { event->state = PERF_EVENT_STATE_INACTIVE; event->oncpu = -1; return -EAGAIN; } event->tstamp_running += tstamp - event->tstamp_stopped; perf_set_shadow_time(event, ctx, tstamp); if (!is_software_event(event)) cpuctx->active_oncpu++; ctx->nr_active++; if (event->attr.exclusive) cpuctx->exclusive = 1; return 0; } static int group_sched_in(struct perf_event *group_event, struct perf_cpu_context *cpuctx, struct perf_event_context *ctx) { struct perf_event *event, *partial_group = NULL; struct pmu *pmu = group_event->pmu; u64 now = ctx->time; bool simulate = false; if (group_event->state == PERF_EVENT_STATE_OFF) return 0; pmu->start_txn(pmu); if (event_sched_in(group_event, cpuctx, ctx)) { pmu->cancel_txn(pmu); return -EAGAIN; } /* * Schedule in siblings as one group (if any): */ list_for_each_entry(event, &group_event->sibling_list, group_entry) { if (event_sched_in(event, cpuctx, ctx)) { partial_group = event; goto group_error; } } if (!pmu->commit_txn(pmu)) return 0; group_error: /* * Groups can be scheduled in as one unit only, so undo any * partial group before returning: * The events up to the failed event are scheduled out normally, * tstamp_stopped will be updated. * * The failed events and the remaining siblings need to have * their timings updated as if they had gone thru event_sched_in() * and event_sched_out(). This is required to get consistent timings * across the group. This also takes care of the case where the group * could never be scheduled by ensuring tstamp_stopped is set to mark * the time the event was actually stopped, such that time delta * calculation in update_event_times() is correct. */ list_for_each_entry(event, &group_event->sibling_list, group_entry) { if (event == partial_group) simulate = true; if (simulate) { event->tstamp_running += now - event->tstamp_stopped; event->tstamp_stopped = now; } else { event_sched_out(event, cpuctx, ctx); } } event_sched_out(group_event, cpuctx, ctx); pmu->cancel_txn(pmu); return -EAGAIN; } /* * Work out whether we can put this event group on the CPU now. */ static int group_can_go_on(struct perf_event *event, struct perf_cpu_context *cpuctx, int can_add_hw) { /* * Groups consisting entirely of software events can always go on. */ if (event->group_flags & PERF_GROUP_SOFTWARE) return 1; /* * If an exclusive group is already on, no other hardware * events can go on. */ if (cpuctx->exclusive) return 0; /* * If this group is exclusive and there are already * events on the CPU, it can't go on. */ if (event->attr.exclusive && cpuctx->active_oncpu) return 0; /* * Otherwise, try to add it if all previous groups were able * to go on. */ return can_add_hw; } static void add_event_to_ctx(struct perf_event *event, struct perf_event_context *ctx) { u64 tstamp = perf_event_time(event); list_add_event(event, ctx); perf_group_attach(event); event->tstamp_enabled = tstamp; event->tstamp_running = tstamp; event->tstamp_stopped = tstamp; } static void perf_event_context_sched_in(struct perf_event_context *ctx, struct task_struct *tsk); /* * Cross CPU call to install and enable a performance event * * Must be called with ctx->mutex held */ static int __perf_install_in_context(void *info) { struct perf_event *event = info; struct perf_event_context *ctx = event->ctx; struct perf_event *leader = event->group_leader; struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); int err; /* * In case we're installing a new context to an already running task, * could also happen before perf_event_task_sched_in() on architectures * which do context switches with IRQs enabled. */ if (ctx->task && !cpuctx->task_ctx) perf_event_context_sched_in(ctx, ctx->task); raw_spin_lock(&ctx->lock); ctx->is_active = 1; update_context_time(ctx); /* * update cgrp time only if current cgrp * matches event->cgrp. Must be done before * calling add_event_to_ctx() */ update_cgrp_time_from_event(event); add_event_to_ctx(event, ctx); if (!event_filter_match(event)) goto unlock; /* * Don't put the event on if it is disabled or if * it is in a group and the group isn't on. */ if (event->state != PERF_EVENT_STATE_INACTIVE || (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)) goto unlock; /* * An exclusive event can't go on if there are already active * hardware events, and no hardware event can go on if there * is already an exclusive event on. */ if (!group_can_go_on(event, cpuctx, 1)) err = -EEXIST; else err = event_sched_in(event, cpuctx, ctx); if (err) { /* * This event couldn't go on. If it is in a group * then we have to pull the whole group off. * If the event group is pinned then put it in error state. */ if (leader != event) group_sched_out(leader, cpuctx, ctx); if (leader->attr.pinned) { update_group_times(leader); leader->state = PERF_EVENT_STATE_ERROR; } } unlock: raw_spin_unlock(&ctx->lock); return 0; } /* * Attach a performance event to a context * * First we add the event to the list with the hardware enable bit * in event->hw_config cleared. * * If the event is attached to a task which is on a CPU we use a smp * call to enable it in the task context. The task might have been * scheduled away, but we check this in the smp call again. */ static void perf_install_in_context(struct perf_event_context *ctx, struct perf_event *event, int cpu) { struct task_struct *task = ctx->task; lockdep_assert_held(&ctx->mutex); event->ctx = ctx; if (!task) { /* * Per cpu events are installed via an smp call and * the install is always successful. */ cpu_function_call(cpu, __perf_install_in_context, event); return; } retry: if (!task_function_call(task, __perf_install_in_context, event)) return; raw_spin_lock_irq(&ctx->lock); /* * If we failed to find a running task, but find the context active now * that we've acquired the ctx->lock, retry. */ if (ctx->is_active) { raw_spin_unlock_irq(&ctx->lock); goto retry; } /* * Since the task isn't running, its safe to add the event, us holding * the ctx->lock ensures the task won't get scheduled in. */ add_event_to_ctx(event, ctx); raw_spin_unlock_irq(&ctx->lock); } /* * Put a event into inactive state and update time fields. * Enabling the leader of a group effectively enables all * the group members that aren't explicitly disabled, so we * have to update their ->tstamp_enabled also. * Note: this works for group members as well as group leaders * since the non-leader members' sibling_lists will be empty. */ static void __perf_event_mark_enabled(struct perf_event *event, struct perf_event_context *ctx) { struct perf_event *sub; u64 tstamp = perf_event_time(event); event->state = PERF_EVENT_STATE_INACTIVE; event->tstamp_enabled = tstamp - event->total_time_enabled; list_for_each_entry(sub, &event->sibling_list, group_entry) { if (sub->state >= PERF_EVENT_STATE_INACTIVE) sub->tstamp_enabled = tstamp - sub->total_time_enabled; } } /* * Cross CPU call to enable a performance event */ static int __perf_event_enable(void *info) { struct perf_event *event = info; struct perf_event_context *ctx = event->ctx; struct perf_event *leader = event->group_leader; struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); int err; if (WARN_ON_ONCE(!ctx->is_active)) return -EINVAL; raw_spin_lock(&ctx->lock); update_context_time(ctx); if (event->state >= PERF_EVENT_STATE_INACTIVE) goto unlock; /* * set current task's cgroup time reference point */ perf_cgroup_set_timestamp(current, ctx); __perf_event_mark_enabled(event, ctx); if (!event_filter_match(event)) { if (is_cgroup_event(event)) perf_cgroup_defer_enabled(event); goto unlock; } /* * If the event is in a group and isn't the group leader, * then don't put it on unless the group is on. */ if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) goto unlock; if (!group_can_go_on(event, cpuctx, 1)) { err = -EEXIST; } else { if (event == leader) err = group_sched_in(event, cpuctx, ctx); else err = event_sched_in(event, cpuctx, ctx); } if (err) { /* * If this event can't go on and it's part of a * group, then the whole group has to come off. */ if (leader != event) group_sched_out(leader, cpuctx, ctx); if (leader->attr.pinned) { update_group_times(leader); leader->state = PERF_EVENT_STATE_ERROR; } } unlock: raw_spin_unlock(&ctx->lock); return 0; } /* * Enable a event. * * If event->ctx is a cloned context, callers must make sure that * every task struct that event->ctx->task could possibly point to * remains valid. This condition is satisfied when called through * perf_event_for_each_child or perf_event_for_each as described * for perf_event_disable. */ void perf_event_enable(struct perf_event *event) { struct perf_event_context *ctx = event->ctx; struct task_struct *task = ctx->task; if (!task) { /* * Enable the event on the cpu that it's on */ cpu_function_call(event->cpu, __perf_event_enable, event); return; } raw_spin_lock_irq(&ctx->lock); if (event->state >= PERF_EVENT_STATE_INACTIVE) goto out; /* * If the event is in error state, clear that first. * That way, if we see the event in error state below, we * know that it has gone back into error state, as distinct * from the task having been scheduled away before the * cross-call arrived. */ if (event->state == PERF_EVENT_STATE_ERROR) event->state = PERF_EVENT_STATE_OFF; retry: if (!ctx->is_active) { __perf_event_mark_enabled(event, ctx); goto out; } raw_spin_unlock_irq(&ctx->lock); if (!task_function_call(task, __perf_event_enable, event)) return; raw_spin_lock_irq(&ctx->lock); /* * If the context is active and the event is still off, * we need to retry the cross-call. */ if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) { /* * task could have been flipped by a concurrent * perf_event_context_sched_out() */ task = ctx->task; goto retry; } out: raw_spin_unlock_irq(&ctx->lock); } static int perf_event_refresh(struct perf_event *event, int refresh) { /* * not supported on inherited events */ if (event->attr.inherit || !is_sampling_event(event)) return -EINVAL; atomic_add(refresh, &event->event_limit); perf_event_enable(event); return 0; } static void ctx_sched_out(struct perf_event_context *ctx, struct perf_cpu_context *cpuctx, enum event_type_t event_type) { struct perf_event *event; raw_spin_lock(&ctx->lock); perf_pmu_disable(ctx->pmu); ctx->is_active = 0; if (likely(!ctx->nr_events)) goto out; update_context_time(ctx); update_cgrp_time_from_cpuctx(cpuctx); if (!ctx->nr_active) goto out; if (event_type & EVENT_PINNED) { list_for_each_entry(event, &ctx->pinned_groups, group_entry) group_sched_out(event, cpuctx, ctx); } if (event_type & EVENT_FLEXIBLE) { list_for_each_entry(event, &ctx->flexible_groups, group_entry) group_sched_out(event, cpuctx, ctx); } out: perf_pmu_enable(ctx->pmu); raw_spin_unlock(&ctx->lock); } /* * Test whether two contexts are equivalent, i.e. whether they * have both been cloned from the same version of the same context * and they both have the same number of enabled events. * If the number of enabled events is the same, then the set * of enabled events should be the same, because these are both * inherited contexts, therefore we can't access individual events * in them directly with an fd; we can only enable/disable all * events via prctl, or enable/disable all events in a family * via ioctl, which will have the same effect on both contexts. */ static int context_equiv(struct perf_event_context *ctx1, struct perf_event_context *ctx2) { return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx && ctx1->parent_gen == ctx2->parent_gen && !ctx1->pin_count && !ctx2->pin_count; } static void __perf_event_sync_stat(struct perf_event *event, struct perf_event *next_event) { u64 value; if (!event->attr.inherit_stat) return; /* * Update the event value, we cannot use perf_event_read() * because we're in the middle of a context switch and have IRQs * disabled, which upsets smp_call_function_single(), however * we know the event must be on the current CPU, therefore we * don't need to use it. */ switch (event->state) { case PERF_EVENT_STATE_ACTIVE: event->pmu->read(event); /* fall-through */ case PERF_EVENT_STATE_INACTIVE: update_event_times(event); break; default: break; } /* * In order to keep per-task stats reliable we need to flip the event * values when we flip the contexts. */ value = local64_read(&next_event->count); value = local64_xchg(&event->count, value); local64_set(&next_event->count, value); swap(event->total_time_enabled, next_event->total_time_enabled); swap(event->total_time_running, next_event->total_time_running); /* * Since we swizzled the values, update the user visible data too. */ perf_event_update_userpage(event); perf_event_update_userpage(next_event); } #define list_next_entry(pos, member) \ list_entry(pos->member.next, typeof(*pos), member) static void perf_event_sync_stat(struct perf_event_context *ctx, struct perf_event_context *next_ctx) { struct perf_event *event, *next_event; if (!ctx->nr_stat) return; update_context_time(ctx); event = list_first_entry(&ctx->event_list, struct perf_event, event_entry); next_event = list_first_entry(&next_ctx->event_list, struct perf_event, event_entry); while (&event->event_entry != &ctx->event_list && &next_event->event_entry != &next_ctx->event_list) { __perf_event_sync_stat(event, next_event); event = list_next_entry(event, event_entry); next_event = list_next_entry(next_event, event_entry); } } static void perf_event_context_sched_out(struct task_struct *task, int ctxn, struct task_struct *next) { struct perf_event_context *ctx = task->perf_event_ctxp[ctxn]; struct perf_event_context *next_ctx; struct perf_event_context *parent; struct perf_cpu_context *cpuctx; int do_switch = 1; if (likely(!ctx)) return; cpuctx = __get_cpu_context(ctx); if (!cpuctx->task_ctx) return; rcu_read_lock(); parent = rcu_dereference(ctx->parent_ctx); next_ctx = next->perf_event_ctxp[ctxn]; if (parent && next_ctx && rcu_dereference(next_ctx->parent_ctx) == parent) { /* * Looks like the two contexts are clones, so we might be * able to optimize the context switch. We lock both * contexts and check that they are clones under the * lock (including re-checking that neither has been * uncloned in the meantime). It doesn't matter which * order we take the locks because no other cpu could * be trying to lock both of these tasks. */ raw_spin_lock(&ctx->lock); raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); if (context_equiv(ctx, next_ctx)) { /* * XXX do we need a memory barrier of sorts * wrt to rcu_dereference() of perf_event_ctxp */ task->perf_event_ctxp[ctxn] = next_ctx; next->perf_event_ctxp[ctxn] = ctx; ctx->task = next; next_ctx->task = task; do_switch = 0; perf_event_sync_stat(ctx, next_ctx); } raw_spin_unlock(&next_ctx->lock); raw_spin_unlock(&ctx->lock); } rcu_read_unlock(); if (do_switch) { ctx_sched_out(ctx, cpuctx, EVENT_ALL); cpuctx->task_ctx = NULL; } } #define for_each_task_context_nr(ctxn) \ for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++) /* * Called from scheduler to remove the events of the current task, * with interrupts disabled. * * We stop each event and update the event value in event->count. * * This does not protect us against NMI, but disable() * sets the disabled bit in the control field of event _before_ * accessing the event control register. If a NMI hits, then it will * not restart the event. */ void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) { int ctxn; for_each_task_context_nr(ctxn) perf_event_context_sched_out(task, ctxn, next); /* * if cgroup events exist on this CPU, then we need * to check if we have to switch out PMU state. * cgroup event are system-wide mode only */ if (atomic_read(&__get_cpu_var(perf_cgroup_events))) perf_cgroup_sched_out(task); } static void task_ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type) { struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); if (!cpuctx->task_ctx) return; if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) return; ctx_sched_out(ctx, cpuctx, event_type); cpuctx->task_ctx = NULL; } /* * Called with IRQs disabled */ static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, enum event_type_t event_type) { ctx_sched_out(&cpuctx->ctx, cpuctx, event_type); } static void ctx_pinned_sched_in(struct perf_event_context *ctx, struct perf_cpu_context *cpuctx) { struct perf_event *event; list_for_each_entry(event, &ctx->pinned_groups, group_entry) { if (event->state <= PERF_EVENT_STATE_OFF) continue; if (!event_filter_match(event)) continue; /* may need to reset tstamp_enabled */ if (is_cgroup_event(event)) perf_cgroup_mark_enabled(event, ctx); if (group_can_go_on(event, cpuctx, 1)) group_sched_in(event, cpuctx, ctx); /* * If this pinned group hasn't been scheduled, * put it in error state. */ if (event->state == PERF_EVENT_STATE_INACTIVE) { update_group_times(event); event->state = PERF_EVENT_STATE_ERROR; } } } static void ctx_flexible_sched_in(struct perf_event_context *ctx, struct perf_cpu_context *cpuctx) { struct perf_event *event; int can_add_hw = 1; list_for_each_entry(event, &ctx->flexible_groups, group_entry) { /* Ignore events in OFF or ERROR state */ if (event->state <= PERF_EVENT_STATE_OFF) continue; /* * Listen to the 'cpu' scheduling filter constraint * of events: */ if (!event_filter_match(event)) continue; /* may need to reset tstamp_enabled */ if (is_cgroup_event(event)) perf_cgroup_mark_enabled(event, ctx); if (group_can_go_on(event, cpuctx, can_add_hw)) { if (group_sched_in(event, cpuctx, ctx)) can_add_hw = 0; } } } static void ctx_sched_in(struct perf_event_context *ctx, struct perf_cpu_context *cpuctx, enum event_type_t event_type, struct task_struct *task) { u64 now; raw_spin_lock(&ctx->lock); ctx->is_active = 1; if (likely(!ctx->nr_events)) goto out; now = perf_clock(); ctx->timestamp = now; perf_cgroup_set_timestamp(task, ctx); /* * First go through the list and put on any pinned groups * in order to give them the best chance of going on. */ if (event_type & EVENT_PINNED) ctx_pinned_sched_in(ctx, cpuctx); /* Then walk through the lower prio flexible groups */ if (event_type & EVENT_FLEXIBLE) ctx_flexible_sched_in(ctx, cpuctx); out: raw_spin_unlock(&ctx->lock); } static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, enum event_type_t event_type, struct task_struct *task) { struct perf_event_context *ctx = &cpuctx->ctx; ctx_sched_in(ctx, cpuctx, event_type, task); } static void task_ctx_sched_in(struct perf_event_context *ctx, enum event_type_t event_type) { struct perf_cpu_context *cpuctx; cpuctx = __get_cpu_context(ctx); if (cpuctx->task_ctx == ctx) return; ctx_sched_in(ctx, cpuctx, event_type, NULL); cpuctx->task_ctx = ctx; } static void perf_event_context_sched_in(struct perf_event_context *ctx, struct task_struct *task) { struct perf_cpu_context *cpuctx; cpuctx = __get_cpu_context(ctx); if (cpuctx->task_ctx == ctx) return; perf_pmu_disable(ctx->pmu); /* * We want to keep the following priority order: * cpu pinned (that don't need to move), task pinned, * cpu flexible, task flexible. */ cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task); cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task); ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task); cpuctx->task_ctx = ctx; /* * Since these rotations are per-cpu, we need to ensure the * cpu-context we got scheduled on is actually rotating. */ perf_pmu_rotate_start(ctx->pmu); perf_pmu_enable(ctx->pmu); } /* * Called from scheduler to add the events of the current task * with interrupts disabled. * * We restore the event value and then enable it. * * This does not protect us against NMI, but enable() * sets the enabled bit in the control field of event _before_ * accessing the event control register. If a NMI hits, then it will * keep the event running. */ void __perf_event_task_sched_in(struct task_struct *task) { struct perf_event_context *ctx; int ctxn; for_each_task_context_nr(ctxn) { ctx = task->perf_event_ctxp[ctxn]; if (likely(!ctx)) continue; perf_event_context_sched_in(ctx, task); } /* * if cgroup events exist on this CPU, then we need * to check if we have to switch in PMU state. * cgroup event are system-wide mode only */ if (atomic_read(&__get_cpu_var(perf_cgroup_events))) perf_cgroup_sched_in(task); } static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) { u64 frequency = event->attr.sample_freq; u64 sec = NSEC_PER_SEC; u64 divisor, dividend; int count_fls, nsec_fls, frequency_fls, sec_fls; count_fls = fls64(count); nsec_fls = fls64(nsec); frequency_fls = fls64(frequency); sec_fls = 30; /* * We got @count in @nsec, with a target of sample_freq HZ * the target period becomes: * * @count * 10^9 * period = ------------------- * @nsec * sample_freq * */ /* * Reduce accuracy by one bit such that @a and @b converge * to a similar magnitude. */ #define REDUCE_FLS(a, b) \ do { \ if (a##_fls > b##_fls) { \ a >>= 1; \ a##_fls--; \ } else { \ b >>= 1; \ b##_fls--; \ } \ } while (0) /* * Reduce accuracy until either term fits in a u64, then proceed with * the other, so that finally we can do a u64/u64 division. */ while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) { REDUCE_FLS(nsec, frequency); REDUCE_FLS(sec, count); } if (count_fls + sec_fls > 64) { divisor = nsec * frequency; while (count_fls + sec_fls > 64) { REDUCE_FLS(count, sec); divisor >>= 1; } dividend = count * sec; } else { dividend = count * sec; while (nsec_fls + frequency_fls > 64) { REDUCE_FLS(nsec, frequency); dividend >>= 1; } divisor = nsec * frequency; } if (!divisor) return dividend; return div64_u64(dividend, divisor); } static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) { struct hw_perf_event *hwc = &event->hw; s64 period, sample_period; s64 delta; period = perf_calculate_period(event, nsec, count); delta = (s64)(period - hwc->sample_period); delta = (delta + 7) / 8; /* low pass filter */ sample_period = hwc->sample_period + delta; if (!sample_period) sample_period = 1; hwc->sample_period = sample_period; if (local64_read(&hwc->period_left) > 8*sample_period) { event->pmu->stop(event, PERF_EF_UPDATE); local64_set(&hwc->period_left, 0); event->pmu->start(event, PERF_EF_RELOAD); } } static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period) { struct perf_event *event; struct hw_perf_event *hwc; u64 interrupts, now; s64 delta; raw_spin_lock(&ctx->lock); list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { if (event->state != PERF_EVENT_STATE_ACTIVE) continue; if (!event_filter_match(event)) continue; hwc = &event->hw; interrupts = hwc->interrupts; hwc->interrupts = 0; /* * unthrottle events on the tick */ if (interrupts == MAX_INTERRUPTS) { perf_log_throttle(event, 1); event->pmu->start(event, 0); } if (!event->attr.freq || !event->attr.sample_freq) continue; event->pmu->read(event); now = local64_read(&event->count); delta = now - hwc->freq_count_stamp; hwc->freq_count_stamp = now; if (delta > 0) perf_adjust_period(event, period, delta); } raw_spin_unlock(&ctx->lock); } /* * Round-robin a context's events: */ static void rotate_ctx(struct perf_event_context *ctx) { raw_spin_lock(&ctx->lock); /* * Rotate the first entry last of non-pinned groups. Rotation might be * disabled by the inheritance code. */ if (!ctx->rotate_disable) list_rotate_left(&ctx->flexible_groups); raw_spin_unlock(&ctx->lock); } /* * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized * because they're strictly cpu affine and rotate_start is called with IRQs * disabled, while rotate_context is called from IRQ context. */ static void perf_rotate_context(struct perf_cpu_context *cpuctx) { u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC; struct perf_event_context *ctx = NULL; int rotate = 0, remove = 1; if (cpuctx->ctx.nr_events) { remove = 0; if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) rotate = 1; } ctx = cpuctx->task_ctx; if (ctx && ctx->nr_events) { remove = 0; if (ctx->nr_events != ctx->nr_active) rotate = 1; } perf_pmu_disable(cpuctx->ctx.pmu); perf_ctx_adjust_freq(&cpuctx->ctx, interval); if (ctx) perf_ctx_adjust_freq(ctx, interval); if (!rotate) goto done; cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); if (ctx) task_ctx_sched_out(ctx, EVENT_FLEXIBLE); rotate_ctx(&cpuctx->ctx); if (ctx) rotate_ctx(ctx); cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, current); if (ctx) task_ctx_sched_in(ctx, EVENT_FLEXIBLE); done: if (remove) list_del_init(&cpuctx->rotation_list); perf_pmu_enable(cpuctx->ctx.pmu); } void perf_event_task_tick(void) { struct list_head *head = &__get_cpu_var(rotation_list); struct perf_cpu_context *cpuctx, *tmp; WARN_ON(!irqs_disabled()); list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) { if (cpuctx->jiffies_interval == 1 || !(jiffies % cpuctx->jiffies_interval)) perf_rotate_context(cpuctx); } } static int event_enable_on_exec(struct perf_event *event, struct perf_event_context *ctx) { if (!event->attr.enable_on_exec) return 0; event->attr.enable_on_exec = 0; if (event->state >= PERF_EVENT_STATE_INACTIVE) return 0; __perf_event_mark_enabled(event, ctx); return 1; } /* * Enable all of a task's events that have been marked enable-on-exec. * This expects task == current. */ static void perf_event_enable_on_exec(struct perf_event_context *ctx) { struct perf_event *event; unsigned long flags; int enabled = 0; int ret; local_irq_save(flags); if (!ctx || !ctx->nr_events) goto out; /* * We must ctxsw out cgroup events to avoid conflict * when invoking perf_task_event_sched_in() later on * in this function. Otherwise we end up trying to * ctxswin cgroup events which are already scheduled * in. */ perf_cgroup_sched_out(current); task_ctx_sched_out(ctx, EVENT_ALL); raw_spin_lock(&ctx->lock); list_for_each_entry(event, &ctx->pinned_groups, group_entry) { ret = event_enable_on_exec(event, ctx); if (ret) enabled = 1; } list_for_each_entry(event, &ctx->flexible_groups, group_entry) { ret = event_enable_on_exec(event, ctx); if (ret) enabled = 1; } /* * Unclone this context if we enabled any event. */ if (enabled) unclone_ctx(ctx); raw_spin_unlock(&ctx->lock); /* * Also calls ctxswin for cgroup events, if any: */ perf_event_context_sched_in(ctx, ctx->task); out: local_irq_restore(flags); } /* * Cross CPU call to read the hardware event */ static void __perf_event_read(void *info) { struct perf_event *event = info; struct perf_event_context *ctx = event->ctx; struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); /* * If this is a task context, we need to check whether it is * the current task context of this cpu. If not it has been * scheduled out before the smp call arrived. In that case * event->count would have been updated to a recent sample * when the event was scheduled out. */ if (ctx->task && cpuctx->task_ctx != ctx) return; raw_spin_lock(&ctx->lock); if (ctx->is_active) { update_context_time(ctx); update_cgrp_time_from_event(event); } update_event_times(event); if (event->state == PERF_EVENT_STATE_ACTIVE) event->pmu->read(event); raw_spin_unlock(&ctx->lock); } static inline u64 perf_event_count(struct perf_event *event) { return local64_read(&event->count) + atomic64_read(&event->child_count); } static u64 perf_event_read(struct perf_event *event) { /* * If event is enabled and currently active on a CPU, update the * value in the event structure: */ if (event->state == PERF_EVENT_STATE_ACTIVE) { smp_call_function_single(event->oncpu, __perf_event_read, event, 1); } else if (event->state == PERF_EVENT_STATE_INACTIVE) { struct perf_event_context *ctx = event->ctx; unsigned long flags; raw_spin_lock_irqsave(&ctx->lock, flags); /* * may read while context is not active * (e.g., thread is blocked), in that case * we cannot update context time */ if (ctx->is_active) { update_context_time(ctx); update_cgrp_time_from_event(event); } update_event_times(event); raw_spin_unlock_irqrestore(&ctx->lock, flags); } return perf_event_count(event); } /* * Callchain support */ struct callchain_cpus_entries { struct rcu_head rcu_head; struct perf_callchain_entry *cpu_entries[0]; }; static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]); static atomic_t nr_callchain_events; static DEFINE_MUTEX(callchain_mutex); struct callchain_cpus_entries *callchain_cpus_entries; __weak void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) { } __weak void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) { } static void release_callchain_buffers_rcu(struct rcu_head *head) { struct callchain_cpus_entries *entries; int cpu; entries = container_of(head, struct callchain_cpus_entries, rcu_head); for_each_possible_cpu(cpu) kfree(entries->cpu_entries[cpu]); kfree(entries); } static void release_callchain_buffers(void) { struct callchain_cpus_entries *entries; entries = callchain_cpus_entries; rcu_assign_pointer(callchain_cpus_entries, NULL); call_rcu(&entries->rcu_head, release_callchain_buffers_rcu); } static int alloc_callchain_buffers(void) { int cpu; int size; struct callchain_cpus_entries *entries; /* * We can't use the percpu allocation API for data that can be * accessed from NMI. Use a temporary manual per cpu allocation * until that gets sorted out. */ size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]); entries = kzalloc(size, GFP_KERNEL); if (!entries) return -ENOMEM; size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS; for_each_possible_cpu(cpu) { entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL, cpu_to_node(cpu)); if (!entries->cpu_entries[cpu]) goto fail; } rcu_assign_pointer(callchain_cpus_entries, entries); return 0; fail: for_each_possible_cpu(cpu) kfree(entries->cpu_entries[cpu]); kfree(entries); return -ENOMEM; } static int get_callchain_buffers(void) { int err = 0; int count; mutex_lock(&callchain_mutex); count = atomic_inc_return(&nr_callchain_events); if (WARN_ON_ONCE(count < 1)) { err = -EINVAL; goto exit; } if (count > 1) { /* If the allocation failed, give up */ if (!callchain_cpus_entries) err = -ENOMEM; goto exit; } err = alloc_callchain_buffers(); if (err) release_callchain_buffers(); exit: mutex_unlock(&callchain_mutex); return err; } static void put_callchain_buffers(void) { if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) { release_callchain_buffers(); mutex_unlock(&callchain_mutex); } } static int get_recursion_context(int *recursion) { int rctx; if (in_nmi()) rctx = 3; else if (in_irq()) rctx = 2; else if (in_softirq()) rctx = 1; else rctx = 0; if (recursion[rctx]) return -1; recursion[rctx]++; barrier(); return rctx; } static inline void put_recursion_context(int *recursion, int rctx) { barrier(); recursion[rctx]--; } static struct perf_callchain_entry *get_callchain_entry(int *rctx) { int cpu; struct callchain_cpus_entries *entries; *rctx = get_recursion_context(__get_cpu_var(callchain_recursion)); if (*rctx == -1) return NULL; entries = rcu_dereference(callchain_cpus_entries); if (!entries) return NULL; cpu = smp_processor_id(); return &entries->cpu_entries[cpu][*rctx]; } static void put_callchain_entry(int rctx) { put_recursion_context(__get_cpu_var(callchain_recursion), rctx); } static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) { int rctx; struct perf_callchain_entry *entry; entry = get_callchain_entry(&rctx); if (rctx == -1) return NULL; if (!entry) goto exit_put; entry->nr = 0; if (!user_mode(regs)) { perf_callchain_store(entry, PERF_CONTEXT_KERNEL); perf_callchain_kernel(entry, regs); if (current->mm) regs = task_pt_regs(current); else regs = NULL; } if (regs) { perf_callchain_store(entry, PERF_CONTEXT_USER); perf_callchain_user(entry, regs); } exit_put: put_callchain_entry(rctx); return entry; } /* * Initialize the perf_event context in a task_struct: */ static void __perf_event_init_context(struct perf_event_context *ctx) { raw_spin_lock_init(&ctx->lock); mutex_init(&ctx->mutex); INIT_LIST_HEAD(&ctx->pinned_groups); INIT_LIST_HEAD(&ctx->flexible_groups); INIT_LIST_HEAD(&ctx->event_list); atomic_set(&ctx->refcount, 1); } static struct perf_event_context * alloc_perf_context(struct pmu *pmu, struct task_struct *task) { struct perf_event_context *ctx; ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL); if (!ctx) return NULL; __perf_event_init_context(ctx); if (task) { ctx->task = task; get_task_struct(task); } ctx->pmu = pmu; return ctx; } static struct task_struct * find_lively_task_by_vpid(pid_t vpid) { struct task_struct *task; int err; rcu_read_lock(); if (!vpid) task = current; else task = find_task_by_vpid(vpid); if (task) get_task_struct(task); rcu_read_unlock(); if (!task) return ERR_PTR(-ESRCH); /* Reuse ptrace permission checks for now. */ err = -EACCES; if (!ptrace_may_access(task, PTRACE_MODE_READ)) goto errout; return task; errout: put_task_struct(task); return ERR_PTR(err); } /* * Returns a matching context with refcount and pincount. */ static struct perf_event_context * find_get_context(struct pmu *pmu, struct task_struct *task, int cpu) { struct perf_event_context *ctx; struct perf_cpu_context *cpuctx; unsigned long flags; int ctxn, err; if (!task) { /* Must be root to operate on a CPU event: */ if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) return ERR_PTR(-EACCES); /* * We could be clever and allow to attach a event to an * offline CPU and activate it when the CPU comes up, but * that's for later. */ if (!cpu_online(cpu)) return ERR_PTR(-ENODEV); cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); ctx = &cpuctx->ctx; get_ctx(ctx); ++ctx->pin_count; return ctx; } err = -EINVAL; ctxn = pmu->task_ctx_nr; if (ctxn < 0) goto errout; retry: ctx = perf_lock_task_context(task, ctxn, &flags); if (ctx) { unclone_ctx(ctx); ++ctx->pin_count; raw_spin_unlock_irqrestore(&ctx->lock, flags); } if (!ctx) { ctx = alloc_perf_context(pmu, task); err = -ENOMEM; if (!ctx) goto errout; get_ctx(ctx); err = 0; mutex_lock(&task->perf_event_mutex); /* * If it has already passed perf_event_exit_task(). * we must see PF_EXITING, it takes this mutex too. */ if (task->flags & PF_EXITING) err = -ESRCH; else if (task->perf_event_ctxp[ctxn]) err = -EAGAIN; else { ++ctx->pin_count; rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx); } mutex_unlock(&task->perf_event_mutex); if (unlikely(err)) { put_task_struct(task); kfree(ctx); if (err == -EAGAIN) goto retry; goto errout; } } return ctx; errout: return ERR_PTR(err); } static void perf_event_free_filter(struct perf_event *event); static void free_event_rcu(struct rcu_head *head) { struct perf_event *event; event = container_of(head, struct perf_event, rcu_head); if (event->ns) put_pid_ns(event->ns); perf_event_free_filter(event); kfree(event); } static void perf_buffer_put(struct perf_buffer *buffer); static void free_event(struct perf_event *event) { irq_work_sync(&event->pending); if (!event->parent) { if (event->attach_state & PERF_ATTACH_TASK) jump_label_dec(&perf_sched_events); if (event->attr.mmap || event->attr.mmap_data) atomic_dec(&nr_mmap_events); if (event->attr.comm) atomic_dec(&nr_comm_events); if (event->attr.task) atomic_dec(&nr_task_events); if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) put_callchain_buffers(); if (is_cgroup_event(event)) { atomic_dec(&per_cpu(perf_cgroup_events, event->cpu)); jump_label_dec(&perf_sched_events); } } if (event->buffer) { perf_buffer_put(event->buffer); event->buffer = NULL; } if (is_cgroup_event(event)) perf_detach_cgroup(event); if (event->destroy) event->destroy(event); if (event->ctx) put_ctx(event->ctx); call_rcu(&event->rcu_head, free_event_rcu); } int perf_event_release_kernel(struct perf_event *event) { struct perf_event_context *ctx = event->ctx; /* * Remove from the PMU, can't get re-enabled since we got * here because the last ref went. */ perf_event_disable(event); WARN_ON_ONCE(ctx->parent_ctx); /* * There are two ways this annotation is useful: * * 1) there is a lock recursion from perf_event_exit_task * see the comment there. * * 2) there is a lock-inversion with mmap_sem through * perf_event_read_group(), which takes faults while * holding ctx->mutex, however this is called after * the last filedesc died, so there is no possibility * to trigger the AB-BA case. */ mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING); raw_spin_lock_irq(&ctx->lock); perf_group_detach(event); list_del_event(event, ctx); raw_spin_unlock_irq(&ctx->lock); mutex_unlock(&ctx->mutex); free_event(event); return 0; } EXPORT_SYMBOL_GPL(perf_event_release_kernel); /* * Called when the last reference to the file is gone. */ static int perf_release(struct inode *inode, struct file *file) { struct perf_event *event = file->private_data; struct task_struct *owner; file->private_data = NULL; rcu_read_lock(); owner = ACCESS_ONCE(event->owner); /* * Matches the smp_wmb() in perf_event_exit_task(). If we observe * !owner it means the list deletion is complete and we can indeed * free this event, otherwise we need to serialize on * owner->perf_event_mutex. */ smp_read_barrier_depends(); if (owner) { /* * Since delayed_put_task_struct() also drops the last * task reference we can safely take a new reference * while holding the rcu_read_lock(). */ get_task_struct(owner); } rcu_read_unlock(); if (owner) { mutex_lock(&owner->perf_event_mutex); /* * We have to re-check the event->owner field, if it is cleared * we raced with perf_event_exit_task(), acquiring the mutex * ensured they're done, and we can proceed with freeing the * event. */ if (event->owner) list_del_init(&event->owner_entry); mutex_unlock(&owner->perf_event_mutex); put_task_struct(owner); } return perf_event_release_kernel(event); } u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) { struct perf_event *child; u64 total = 0; *enabled = 0; *running = 0; mutex_lock(&event->child_mutex); total += perf_event_read(event); *enabled += event->total_time_enabled + atomic64_read(&event->child_total_time_enabled); *running += event->total_time_running + atomic64_read(&event->child_total_time_running); list_for_each_entry(child, &event->child_list, child_list) { total += perf_event_read(child); *enabled += child->total_time_enabled; *running += child->total_time_running; } mutex_unlock(&event->child_mutex); return total; } EXPORT_SYMBOL_GPL(perf_event_read_value); static int perf_event_read_group(struct perf_event *event, u64 read_format, char __user *buf) { struct perf_event *leader = event->group_leader, *sub; int n = 0, size = 0, ret = -EFAULT; struct perf_event_context *ctx = leader->ctx; u64 values[5]; u64 count, enabled, running; mutex_lock(&ctx->mutex); count = perf_event_read_value(leader, &enabled, &running); values[n++] = 1 + leader->nr_siblings; if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) values[n++] = enabled; if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) values[n++] = running; values[n++] = count; if (read_format & PERF_FORMAT_ID) values[n++] = primary_event_id(leader); size = n * sizeof(u64); if (copy_to_user(buf, values, size)) goto unlock; ret = size; list_for_each_entry(sub, &leader->sibling_list, group_entry) { n = 0; values[n++] = perf_event_read_value(sub, &enabled, &running); if (read_format & PERF_FORMAT_ID) values[n++] = primary_event_id(sub); size = n * sizeof(u64); if (copy_to_user(buf + ret, values, size)) { ret = -EFAULT; goto unlock; } ret += size; } unlock: mutex_unlock(&ctx->mutex); return ret; } static int perf_event_read_one(struct perf_event *event, u64 read_format, char __user *buf) { u64 enabled, running; u64 values[4]; int n = 0; values[n++] = perf_event_read_value(event, &enabled, &running); if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) values[n++] = enabled; if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) values[n++] = running; if (read_format & PERF_FORMAT_ID) values[n++] = primary_event_id(event); if (copy_to_user(buf, values, n * sizeof(u64))) return -EFAULT; return n * sizeof(u64); } /* * Read the performance event - simple non blocking version for now */ static ssize_t perf_read_hw(struct perf_event *event, char __user *buf, size_t count) { u64 read_format = event->attr.read_format; int ret; /* * Return end-of-file for a read on a event that is in * error state (i.e. because it was pinned but it couldn't be * scheduled on to the CPU at some point). */ if (event->state == PERF_EVENT_STATE_ERROR) return 0; if (count < event->read_size) return -ENOSPC; WARN_ON_ONCE(event->ctx->parent_ctx); if (read_format & PERF_FORMAT_GROUP) ret = perf_event_read_group(event, read_format, buf); else ret = perf_event_read_one(event, read_format, buf); return ret; } static ssize_t perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct perf_event *event = file->private_data; return perf_read_hw(event, buf, count); } static unsigned int perf_poll(struct file *file, poll_table *wait) { struct perf_event *event = file->private_data; struct perf_buffer *buffer; unsigned int events = POLL_HUP; rcu_read_lock(); buffer = rcu_dereference(event->buffer); if (buffer) events = atomic_xchg(&buffer->poll, 0); rcu_read_unlock(); poll_wait(file, &event->waitq, wait); return events; } static void perf_event_reset(struct perf_event *event) { (void)perf_event_read(event); local64_set(&event->count, 0); perf_event_update_userpage(event); } /* * Holding the top-level event's child_mutex means that any * descendant process that has inherited this event will block * in sync_child_event if it goes to exit, thus satisfying the * task existence requirements of perf_event_enable/disable. */ static void perf_event_for_each_child(struct perf_event *event, void (*func)(struct perf_event *)) { struct perf_event *child; WARN_ON_ONCE(event->ctx->parent_ctx); mutex_lock(&event->child_mutex); func(event); list_for_each_entry(child, &event->child_list, child_list) func(child); mutex_unlock(&event->child_mutex); } static void perf_event_for_each(struct perf_event *event, void (*func)(struct perf_event *)) { struct perf_event_context *ctx = event->ctx; struct perf_event *sibling; WARN_ON_ONCE(ctx->parent_ctx); mutex_lock(&ctx->mutex); event = event->group_leader; perf_event_for_each_child(event, func); func(event); list_for_each_entry(sibling, &event->sibling_list, group_entry) perf_event_for_each_child(event, func); mutex_unlock(&ctx->mutex); } static int perf_event_period(struct perf_event *event, u64 __user *arg) { struct perf_event_context *ctx = event->ctx; int ret = 0; u64 value; if (!is_sampling_event(event)) return -EINVAL; if (copy_from_user(&value, arg, sizeof(value))) return -EFAULT; if (!value) return -EINVAL; raw_spin_lock_irq(&ctx->lock); if (event->attr.freq) { if (value > sysctl_perf_event_sample_rate) { ret = -EINVAL; goto unlock; } event->attr.sample_freq = value; } else { event->attr.sample_period = value; event->hw.sample_period = value; } unlock: raw_spin_unlock_irq(&ctx->lock); return ret; } static const struct file_operations perf_fops; static struct perf_event *perf_fget_light(int fd, int *fput_needed) { struct file *file; file = fget_light(fd, fput_needed); if (!file) return ERR_PTR(-EBADF); if (file->f_op != &perf_fops) { fput_light(file, *fput_needed); *fput_needed = 0; return ERR_PTR(-EBADF); } return file->private_data; } static int perf_event_set_output(struct perf_event *event, struct perf_event *output_event); static int perf_event_set_filter(struct perf_event *event, void __user *arg); static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct perf_event *event = file->private_data; void (*func)(struct perf_event *); u32 flags = arg; switch (cmd) { case PERF_EVENT_IOC_ENABLE: func = perf_event_enable; break; case PERF_EVENT_IOC_DISABLE: func = perf_event_disable; break; case PERF_EVENT_IOC_RESET: func = perf_event_reset; break; case PERF_EVENT_IOC_REFRESH: return perf_event_refresh(event, arg); case PERF_EVENT_IOC_PERIOD: return perf_event_period(event, (u64 __user *)arg); case PERF_EVENT_IOC_SET_OUTPUT: { struct perf_event *output_event = NULL; int fput_needed = 0; int ret; if (arg != -1) { output_event = perf_fget_light(arg, &fput_needed); if (IS_ERR(output_event)) return PTR_ERR(output_event); } ret = perf_event_set_output(event, output_event); if (output_event) fput_light(output_event->filp, fput_needed); return ret; } case PERF_EVENT_IOC_SET_FILTER: return perf_event_set_filter(event, (void __user *)arg); default: return -ENOTTY; } if (flags & PERF_IOC_FLAG_GROUP) perf_event_for_each(event, func); else perf_event_for_each_child(event, func); return 0; } int perf_event_task_enable(void) { struct perf_event *event; mutex_lock(&current->perf_event_mutex); list_for_each_entry(event, &current->perf_event_list, owner_entry) perf_event_for_each_child(event, perf_event_enable); mutex_unlock(&current->perf_event_mutex); return 0; } int perf_event_task_disable(void) { struct perf_event *event; mutex_lock(&current->perf_event_mutex); list_for_each_entry(event, &current->perf_event_list, owner_entry) perf_event_for_each_child(event, perf_event_disable); mutex_unlock(&current->perf_event_mutex); return 0; } #ifndef PERF_EVENT_INDEX_OFFSET # define PERF_EVENT_INDEX_OFFSET 0 #endif static int perf_event_index(struct perf_event *event) { if (event->hw.state & PERF_HES_STOPPED) return 0; if (event->state != PERF_EVENT_STATE_ACTIVE) return 0; return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET; } /* * Callers need to ensure there can be no nesting of this function, otherwise * the seqlock logic goes bad. We can not serialize this because the arch * code calls this from NMI context. */ void perf_event_update_userpage(struct perf_event *event) { struct perf_event_mmap_page *userpg; struct perf_buffer *buffer; rcu_read_lock(); buffer = rcu_dereference(event->buffer); if (!buffer) goto unlock; userpg = buffer->user_page; /* * Disable preemption so as to not let the corresponding user-space * spin too long if we get preempted. */ preempt_disable(); ++userpg->lock; barrier(); userpg->index = perf_event_index(event); userpg->offset = perf_event_count(event); if (event->state == PERF_EVENT_STATE_ACTIVE) userpg->offset -= local64_read(&event->hw.prev_count); userpg->time_enabled = event->total_time_enabled + atomic64_read(&event->child_total_time_enabled); userpg->time_running = event->total_time_running + atomic64_read(&event->child_total_time_running); barrier(); ++userpg->lock; preempt_enable(); unlock: rcu_read_unlock(); } static unsigned long perf_data_size(struct perf_buffer *buffer); static void perf_buffer_init(struct perf_buffer *buffer, long watermark, int flags) { long max_size = perf_data_size(buffer); if (watermark) buffer->watermark = min(max_size, watermark); if (!buffer->watermark) buffer->watermark = max_size / 2; if (flags & PERF_BUFFER_WRITABLE) buffer->writable = 1; atomic_set(&buffer->refcount, 1); } #ifndef CONFIG_PERF_USE_VMALLOC /* * Back perf_mmap() with regular GFP_KERNEL-0 pages. */ static struct page * perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff) { if (pgoff > buffer->nr_pages) return NULL; if (pgoff == 0) return virt_to_page(buffer->user_page); return virt_to_page(buffer->data_pages[pgoff - 1]); } static void *perf_mmap_alloc_page(int cpu) { struct page *page; int node; node = (cpu == -1) ? cpu : cpu_to_node(cpu); page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); if (!page) return NULL; return page_address(page); } static struct perf_buffer * perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags) { struct perf_buffer *buffer; unsigned long size; int i; size = sizeof(struct perf_buffer); size += nr_pages * sizeof(void *); buffer = kzalloc(size, GFP_KERNEL); if (!buffer) goto fail; buffer->user_page = perf_mmap_alloc_page(cpu); if (!buffer->user_page) goto fail_user_page; for (i = 0; i < nr_pages; i++) { buffer->data_pages[i] = perf_mmap_alloc_page(cpu); if (!buffer->data_pages[i]) goto fail_data_pages; } buffer->nr_pages = nr_pages; perf_buffer_init(buffer, watermark, flags); return buffer; fail_data_pages: for (i--; i >= 0; i--) free_page((unsigned long)buffer->data_pages[i]); free_page((unsigned long)buffer->user_page); fail_user_page: kfree(buffer); fail: return NULL; } static void perf_mmap_free_page(unsigned long addr) { struct page *page = virt_to_page((void *)addr); page->mapping = NULL; __free_page(page); } static void perf_buffer_free(struct perf_buffer *buffer) { int i; perf_mmap_free_page((unsigned long)buffer->user_page); for (i = 0; i < buffer->nr_pages; i++) perf_mmap_free_page((unsigned long)buffer->data_pages[i]); kfree(buffer); } static inline int page_order(struct perf_buffer *buffer) { return 0; } #else /* * Back perf_mmap() with vmalloc memory. * * Required for architectures that have d-cache aliasing issues. */ static inline int page_order(struct perf_buffer *buffer) { return buffer->page_order; } static struct page * perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff) { if (pgoff > (1UL << page_order(buffer))) return NULL; return vmalloc_to_page((void *)buffer->user_page + pgoff * PAGE_SIZE); } static void perf_mmap_unmark_page(void *addr) { struct page *page = vmalloc_to_page(addr); page->mapping = NULL; } static void perf_buffer_free_work(struct work_struct *work) { struct perf_buffer *buffer; void *base; int i, nr; buffer = container_of(work, struct perf_buffer, work); nr = 1 << page_order(buffer); base = buffer->user_page; for (i = 0; i < nr + 1; i++) perf_mmap_unmark_page(base + (i * PAGE_SIZE)); vfree(base); kfree(buffer); } static void perf_buffer_free(struct perf_buffer *buffer) { schedule_work(&buffer->work); } static struct perf_buffer * perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags) { struct perf_buffer *buffer; unsigned long size; void *all_buf; size = sizeof(struct perf_buffer); size += sizeof(void *); buffer = kzalloc(size, GFP_KERNEL); if (!buffer) goto fail; INIT_WORK(&buffer->work, perf_buffer_free_work); all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE); if (!all_buf) goto fail_all_buf; buffer->user_page = all_buf; buffer->data_pages[0] = all_buf + PAGE_SIZE; buffer->page_order = ilog2(nr_pages); buffer->nr_pages = 1; perf_buffer_init(buffer, watermark, flags); return buffer; fail_all_buf: kfree(buffer); fail: return NULL; } #endif static unsigned long perf_data_size(struct perf_buffer *buffer) { return buffer->nr_pages << (PAGE_SHIFT + page_order(buffer)); } static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct perf_event *event = vma->vm_file->private_data; struct perf_buffer *buffer; int ret = VM_FAULT_SIGBUS; if (vmf->flags & FAULT_FLAG_MKWRITE) { if (vmf->pgoff == 0) ret = 0; return ret; } rcu_read_lock(); buffer = rcu_dereference(event->buffer); if (!buffer) goto unlock; if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE)) goto unlock; vmf->page = perf_mmap_to_page(buffer, vmf->pgoff); if (!vmf->page) goto unlock; get_page(vmf->page); vmf->page->mapping = vma->vm_file->f_mapping; vmf->page->index = vmf->pgoff; ret = 0; unlock: rcu_read_unlock(); return ret; } static void perf_buffer_free_rcu(struct rcu_head *rcu_head) { struct perf_buffer *buffer; buffer = container_of(rcu_head, struct perf_buffer, rcu_head); perf_buffer_free(buffer); } static struct perf_buffer *perf_buffer_get(struct perf_event *event) { struct perf_buffer *buffer; rcu_read_lock(); buffer = rcu_dereference(event->buffer); if (buffer) { if (!atomic_inc_not_zero(&buffer->refcount)) buffer = NULL; } rcu_read_unlock(); return buffer; } static void perf_buffer_put(struct perf_buffer *buffer) { if (!atomic_dec_and_test(&buffer->refcount)) return; call_rcu(&buffer->rcu_head, perf_buffer_free_rcu); } static void perf_mmap_open(struct vm_area_struct *vma) { struct perf_event *event = vma->vm_file->private_data; atomic_inc(&event->mmap_count); } static void perf_mmap_close(struct vm_area_struct *vma) { struct perf_event *event = vma->vm_file->private_data; if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) { unsigned long size = perf_data_size(event->buffer); struct user_struct *user = event->mmap_user; struct perf_buffer *buffer = event->buffer; atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm); vma->vm_mm->locked_vm -= event->mmap_locked; rcu_assign_pointer(event->buffer, NULL); mutex_unlock(&event->mmap_mutex); perf_buffer_put(buffer); free_uid(user); } } static const struct vm_operations_struct perf_mmap_vmops = { .open = perf_mmap_open, .close = perf_mmap_close, .fault = perf_mmap_fault, .page_mkwrite = perf_mmap_fault, }; static int perf_mmap(struct file *file, struct vm_area_struct *vma) { struct perf_event *event = file->private_data; unsigned long user_locked, user_lock_limit; struct user_struct *user = current_user(); unsigned long locked, lock_limit; struct perf_buffer *buffer; unsigned long vma_size; unsigned long nr_pages; long user_extra, extra; int ret = 0, flags = 0; /* * Don't allow mmap() of inherited per-task counters. This would * create a performance issue due to all children writing to the * same buffer. */ if (event->cpu == -1 && event->attr.inherit) return -EINVAL; if (!(vma->vm_flags & VM_SHARED)) return -EINVAL; vma_size = vma->vm_end - vma->vm_start; nr_pages = (vma_size / PAGE_SIZE) - 1; /* * If we have buffer pages ensure they're a power-of-two number, so we * can do bitmasks instead of modulo. */ if (nr_pages != 0 && !is_power_of_2(nr_pages)) return -EINVAL; if (vma_size != PAGE_SIZE * (1 + nr_pages)) return -EINVAL; if (vma->vm_pgoff != 0) return -EINVAL; WARN_ON_ONCE(event->ctx->parent_ctx); mutex_lock(&event->mmap_mutex); if (event->buffer) { if (event->buffer->nr_pages == nr_pages) atomic_inc(&event->buffer->refcount); else ret = -EINVAL; goto unlock; } user_extra = nr_pages + 1; user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10); /* * Increase the limit linearly with more CPUs: */ user_lock_limit *= num_online_cpus(); user_locked = atomic_long_read(&user->locked_vm) + user_extra; extra = 0; if (user_locked > user_lock_limit) extra = user_locked - user_lock_limit; lock_limit = rlimit(RLIMIT_MEMLOCK); lock_limit >>= PAGE_SHIFT; locked = vma->vm_mm->locked_vm + extra; if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() && !capable(CAP_IPC_LOCK)) { ret = -EPERM; goto unlock; } WARN_ON(event->buffer); if (vma->vm_flags & VM_WRITE) flags |= PERF_BUFFER_WRITABLE; buffer = perf_buffer_alloc(nr_pages, event->attr.wakeup_watermark, event->cpu, flags); if (!buffer) { ret = -ENOMEM; goto unlock; } rcu_assign_pointer(event->buffer, buffer); atomic_long_add(user_extra, &user->locked_vm); event->mmap_locked = extra; event->mmap_user = get_current_user(); vma->vm_mm->locked_vm += event->mmap_locked; unlock: if (!ret) atomic_inc(&event->mmap_count); mutex_unlock(&event->mmap_mutex); vma->vm_flags |= VM_RESERVED; vma->vm_ops = &perf_mmap_vmops; return ret; } static int perf_fasync(int fd, struct file *filp, int on) { struct inode *inode = filp->f_path.dentry->d_inode; struct perf_event *event = filp->private_data; int retval; mutex_lock(&inode->i_mutex); retval = fasync_helper(fd, filp, on, &event->fasync); mutex_unlock(&inode->i_mutex); if (retval < 0) return retval; return 0; } static const struct file_operations perf_fops = { .llseek = no_llseek, .release = perf_release, .read = perf_read, .poll = perf_poll, .unlocked_ioctl = perf_ioctl, .compat_ioctl = perf_ioctl, .mmap = perf_mmap, .fasync = perf_fasync, }; /* * Perf event wakeup * * If there's data, ensure we set the poll() state and publish everything * to user-space before waking everybody up. */ void perf_event_wakeup(struct perf_event *event) { wake_up_all(&event->waitq); if (event->pending_kill) { kill_fasync(&event->fasync, SIGIO, event->pending_kill); event->pending_kill = 0; } } static void perf_pending_event(struct irq_work *entry) { struct perf_event *event = container_of(entry, struct perf_event, pending); if (event->pending_disable) { event->pending_disable = 0; __perf_event_disable(event); } if (event->pending_wakeup) { event->pending_wakeup = 0; perf_event_wakeup(event); } } /* * We assume there is only KVM supporting the callbacks. * Later on, we might change it to a list if there is * another virtualization implementation supporting the callbacks. */ struct perf_guest_info_callbacks *perf_guest_cbs; int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) { perf_guest_cbs = cbs; return 0; } EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks); int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) { perf_guest_cbs = NULL; return 0; } EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks); /* * Output */ static bool perf_output_space(struct perf_buffer *buffer, unsigned long tail, unsigned long offset, unsigned long head) { unsigned long mask; if (!buffer->writable) return true; mask = perf_data_size(buffer) - 1; offset = (offset - tail) & mask; head = (head - tail) & mask; if ((int)(head - offset) < 0) return false; return true; } static void perf_output_wakeup(struct perf_output_handle *handle) { atomic_set(&handle->buffer->poll, POLL_IN); if (handle->nmi) { handle->event->pending_wakeup = 1; irq_work_queue(&handle->event->pending); } else perf_event_wakeup(handle->event); } /* * We need to ensure a later event_id doesn't publish a head when a former * event isn't done writing. However since we need to deal with NMIs we * cannot fully serialize things. * * We only publish the head (and generate a wakeup) when the outer-most * event completes. */ static void perf_output_get_handle(struct perf_output_handle *handle) { struct perf_buffer *buffer = handle->buffer; preempt_disable(); local_inc(&buffer->nest); handle->wakeup = local_read(&buffer->wakeup); } static void perf_output_put_handle(struct perf_output_handle *handle) { struct perf_buffer *buffer = handle->buffer; unsigned long head; again: head = local_read(&buffer->head); /* * IRQ/NMI can happen here, which means we can miss a head update. */ if (!local_dec_and_test(&buffer->nest)) goto out; /* * Publish the known good head. Rely on the full barrier implied * by atomic_dec_and_test() order the buffer->head read and this * write. */ buffer->user_page->data_head = head; /* * Now check if we missed an update, rely on the (compiler) * barrier in atomic_dec_and_test() to re-read buffer->head. */ if (unlikely(head != local_read(&buffer->head))) { local_inc(&buffer->nest); goto again; } if (handle->wakeup != local_read(&buffer->wakeup)) perf_output_wakeup(handle); out: preempt_enable(); } __always_inline void perf_output_copy(struct perf_output_handle *handle, const void *buf, unsigned int len) { do { unsigned long size = min_t(unsigned long, handle->size, len); memcpy(handle->addr, buf, size); len -= size; handle->addr += size; buf += size; handle->size -= size; if (!handle->size) { struct perf_buffer *buffer = handle->buffer; handle->page++; handle->page &= buffer->nr_pages - 1; handle->addr = buffer->data_pages[handle->page]; handle->size = PAGE_SIZE << page_order(buffer); } } while (len); } static void __perf_event_header__init_id(struct perf_event_header *header, struct perf_sample_data *data, struct perf_event *event) { u64 sample_type = event->attr.sample_type; data->type = sample_type; header->size += event->id_header_size; if (sample_type & PERF_SAMPLE_TID) { /* namespace issues */ data->tid_entry.pid = perf_event_pid(event, current); data->tid_entry.tid = perf_event_tid(event, current); } if (sample_type & PERF_SAMPLE_TIME) data->time = perf_clock(); if (sample_type & PERF_SAMPLE_ID) data->id = primary_event_id(event); if (sample_type & PERF_SAMPLE_STREAM_ID) data->stream_id = event->id; if (sample_type & PERF_SAMPLE_CPU) { data->cpu_entry.cpu = raw_smp_processor_id(); data->cpu_entry.reserved = 0; } } static void perf_event_header__init_id(struct perf_event_header *header, struct perf_sample_data *data, struct perf_event *event) { if (event->attr.sample_id_all) __perf_event_header__init_id(header, data, event); } static void __perf_event__output_id_sample(struct perf_output_handle *handle, struct perf_sample_data *data) { u64 sample_type = data->type; if (sample_type & PERF_SAMPLE_TID) perf_output_put(handle, data->tid_entry); if (sample_type & PERF_SAMPLE_TIME) perf_output_put(handle, data->time); if (sample_type & PERF_SAMPLE_ID) perf_output_put(handle, data->id); if (sample_type & PERF_SAMPLE_STREAM_ID) perf_output_put(handle, data->stream_id); if (sample_type & PERF_SAMPLE_CPU) perf_output_put(handle, data->cpu_entry); } static void perf_event__output_id_sample(struct perf_event *event, struct perf_output_handle *handle, struct perf_sample_data *sample) { if (event->attr.sample_id_all) __perf_event__output_id_sample(handle, sample); } int perf_output_begin(struct perf_output_handle *handle, struct perf_event *event, unsigned int size, int nmi, int sample) { struct perf_buffer *buffer; unsigned long tail, offset, head; int have_lost; struct perf_sample_data sample_data; struct { struct perf_event_header header; u64 id; u64 lost; } lost_event; rcu_read_lock(); /* * For inherited events we send all the output towards the parent. */ if (event->parent) event = event->parent; buffer = rcu_dereference(event->buffer); if (!buffer) goto out; handle->buffer = buffer; handle->event = event; handle->nmi = nmi; handle->sample = sample; if (!buffer->nr_pages) goto out; have_lost = local_read(&buffer->lost); if (have_lost) { lost_event.header.size = sizeof(lost_event); perf_event_header__init_id(&lost_event.header, &sample_data, event); size += lost_event.header.size; } perf_output_get_handle(handle); do { /* * Userspace could choose to issue a mb() before updating the * tail pointer. So that all reads will be completed before the * write is issued. */ tail = ACCESS_ONCE(buffer->user_page->data_tail); smp_rmb(); offset = head = local_read(&buffer->head); head += size; if (unlikely(!perf_output_space(buffer, tail, offset, head))) goto fail; } while (local_cmpxchg(&buffer->head, offset, head) != offset); if (head - local_read(&buffer->wakeup) > buffer->watermark) local_add(buffer->watermark, &buffer->wakeup); handle->page = offset >> (PAGE_SHIFT + page_order(buffer)); handle->page &= buffer->nr_pages - 1; handle->size = offset & ((PAGE_SIZE << page_order(buffer)) - 1); handle->addr = buffer->data_pages[handle->page]; handle->addr += handle->size; handle->size = (PAGE_SIZE << page_order(buffer)) - handle->size; if (have_lost) { lost_event.header.type = PERF_RECORD_LOST; lost_event.header.misc = 0; lost_event.id = event->id; lost_event.lost = local_xchg(&buffer->lost, 0); perf_output_put(handle, lost_event); perf_event__output_id_sample(event, handle, &sample_data); } return 0; fail: local_inc(&buffer->lost); perf_output_put_handle(handle); out: rcu_read_unlock(); return -ENOSPC; } void perf_output_end(struct perf_output_handle *handle) { struct perf_event *event = handle->event; struct perf_buffer *buffer = handle->buffer; int wakeup_events = event->attr.wakeup_events; if (handle->sample && wakeup_events) { int events = local_inc_return(&buffer->events); if (events >= wakeup_events) { local_sub(wakeup_events, &buffer->events); local_inc(&buffer->wakeup); } } perf_output_put_handle(handle); rcu_read_unlock(); } static void perf_output_read_one(struct perf_output_handle *handle, struct perf_event *event, u64 enabled, u64 running) { u64 read_format = event->attr.read_format; u64 values[4]; int n = 0; values[n++] = perf_event_count(event); if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { values[n++] = enabled + atomic64_read(&event->child_total_time_enabled); } if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { values[n++] = running + atomic64_read(&event->child_total_time_running); } if (read_format & PERF_FORMAT_ID) values[n++] = primary_event_id(event); perf_output_copy(handle, values, n * sizeof(u64)); } /* * XXX PERF_FORMAT_GROUP vs inherited events seems difficult. */ static void perf_output_read_group(struct perf_output_handle *handle, struct perf_event *event, u64 enabled, u64 running) { struct perf_event *leader = event->group_leader, *sub; u64 read_format = event->attr.read_format; u64 values[5]; int n = 0; values[n++] = 1 + leader->nr_siblings; if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) values[n++] = enabled; if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) values[n++] = running; if (leader != event) leader->pmu->read(leader); values[n++] = perf_event_count(leader); if (read_format & PERF_FORMAT_ID) values[n++] = primary_event_id(leader); perf_output_copy(handle, values, n * sizeof(u64)); list_for_each_entry(sub, &leader->sibling_list, group_entry) { n = 0; if (sub != event) sub->pmu->read(sub); values[n++] = perf_event_count(sub); if (read_format & PERF_FORMAT_ID) values[n++] = primary_event_id(sub); perf_output_copy(handle, values, n * sizeof(u64)); } } #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\ PERF_FORMAT_TOTAL_TIME_RUNNING) static void perf_output_read(struct perf_output_handle *handle, struct perf_event *event) { u64 enabled = 0, running = 0, now, ctx_time; u64 read_format = event->attr.read_format; /* * compute total_time_enabled, total_time_running * based on snapshot values taken when the event * was last scheduled in. * * we cannot simply called update_context_time() * because of locking issue as we are called in * NMI context */ if (read_format & PERF_FORMAT_TOTAL_TIMES) { now = perf_clock(); ctx_time = event->shadow_ctx_time + now; enabled = ctx_time - event->tstamp_enabled; running = ctx_time - event->tstamp_running; } if (event->attr.read_format & PERF_FORMAT_GROUP) perf_output_read_group(handle, event, enabled, running); else perf_output_read_one(handle, event, enabled, running); } void perf_output_sample(struct perf_output_handle *handle, struct perf_event_header *header, struct perf_sample_data *data, struct perf_event *event) { u64 sample_type = data->type; perf_output_put(handle, *header); if (sample_type & PERF_SAMPLE_IP) perf_output_put(handle, data->ip); if (sample_type & PERF_SAMPLE_TID) perf_output_put(handle, data->tid_entry); if (sample_type & PERF_SAMPLE_TIME) perf_output_put(handle, data->time); if (sample_type & PERF_SAMPLE_ADDR) perf_output_put(handle, data->addr); if (sample_type & PERF_SAMPLE_ID) perf_output_put(handle, data->id); if (sample_type & PERF_SAMPLE_STREAM_ID) perf_output_put(handle, data->stream_id); if (sample_type & PERF_SAMPLE_CPU) perf_output_put(handle, data->cpu_entry); if (sample_type & PERF_SAMPLE_PERIOD) perf_output_put(handle, data->period); if (sample_type & PERF_SAMPLE_READ) perf_output_read(handle, event); if (sample_type & PERF_SAMPLE_CALLCHAIN) { if (data->callchain) { int size = 1; if (data->callchain) size += data->callchain->nr; size *= sizeof(u64); perf_output_copy(handle, data->callchain, size); } else { u64 nr = 0; perf_output_put(handle, nr); } } if (sample_type & PERF_SAMPLE_RAW) { if (data->raw) { perf_output_put(handle, data->raw->size); perf_output_copy(handle, data->raw->data, data->raw->size); } else { struct { u32 size; u32 data; } raw = { .size = sizeof(u32), .data = 0, }; perf_output_put(handle, raw); } } } void perf_prepare_sample(struct perf_event_header *header, struct perf_sample_data *data, struct perf_event *event, struct pt_regs *regs) { u64 sample_type = event->attr.sample_type; header->type = PERF_RECORD_SAMPLE; header->size = sizeof(*header) + event->header_size; header->misc = 0; header->misc |= perf_misc_flags(regs); __perf_event_header__init_id(header, data, event); if (sample_type & PERF_SAMPLE_IP) data->ip = perf_instruction_pointer(regs); if (sample_type & PERF_SAMPLE_CALLCHAIN) { int size = 1; data->callchain = perf_callchain(regs); if (data->callchain) size += data->callchain->nr; header->size += size * sizeof(u64); } if (sample_type & PERF_SAMPLE_RAW) { int size = sizeof(u32); if (data->raw) size += data->raw->size; else size += sizeof(u32); WARN_ON_ONCE(size & (sizeof(u64)-1)); header->size += size; } } static void perf_event_output(struct perf_event *event, int nmi, struct perf_sample_data *data, struct pt_regs *regs) { struct perf_output_handle handle; struct perf_event_header header; /* protect the callchain buffers */ rcu_read_lock(); perf_prepare_sample(&header, data, event, regs); if (perf_output_begin(&handle, event, header.size, nmi, 1)) goto exit; perf_output_sample(&handle, &header, data, event); perf_output_end(&handle); exit: rcu_read_unlock(); } /* * read event_id */ struct perf_read_event { struct perf_event_header header; u32 pid; u32 tid; }; static void perf_event_read_event(struct perf_event *event, struct task_struct *task) { struct perf_output_handle handle; struct perf_sample_data sample; struct perf_read_event read_event = { .header = { .type = PERF_RECORD_READ, .misc = 0, .size = sizeof(read_event) + event->read_size, }, .pid = perf_event_pid(event, task), .tid = perf_event_tid(event, task), }; int ret; perf_event_header__init_id(&read_event.header, &sample, event); ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0); if (ret) return; perf_output_put(&handle, read_event); perf_output_read(&handle, event); perf_event__output_id_sample(event, &handle, &sample); perf_output_end(&handle); } /* * task tracking -- fork/exit * * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task */ struct perf_task_event { struct task_struct *task; struct perf_event_context *task_ctx; struct { struct perf_event_header header; u32 pid; u32 ppid; u32 tid; u32 ptid; u64 time; } event_id; }; static void perf_event_task_output(struct perf_event *event, struct perf_task_event *task_event) { struct perf_output_handle handle; struct perf_sample_data sample; struct task_struct *task = task_event->task; int ret, size = task_event->event_id.header.size; perf_event_header__init_id(&task_event->event_id.header, &sample, event); ret = perf_output_begin(&handle, event, task_event->event_id.header.size, 0, 0); if (ret) goto out; task_event->event_id.pid = perf_event_pid(event, task); task_event->event_id.ppid = perf_event_pid(event, current); task_event->event_id.tid = perf_event_tid(event, task); task_event->event_id.ptid = perf_event_tid(event, current); perf_output_put(&handle, task_event->event_id); perf_event__output_id_sample(event, &handle, &sample); perf_output_end(&handle); out: task_event->event_id.header.size = size; } static int perf_event_task_match(struct perf_event *event) { if (event->state < PERF_EVENT_STATE_INACTIVE) return 0; if (!event_filter_match(event)) return 0; if (event->attr.comm || event->attr.mmap || event->attr.mmap_data || event->attr.task) return 1; return 0; } static void perf_event_task_ctx(struct perf_event_context *ctx, struct perf_task_event *task_event) { struct perf_event *event; list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { if (perf_event_task_match(event)) perf_event_task_output(event, task_event); } } static void perf_event_task_event(struct perf_task_event *task_event) { struct perf_cpu_context *cpuctx; struct perf_event_context *ctx; struct pmu *pmu; int ctxn; rcu_read_lock(); list_for_each_entry_rcu(pmu, &pmus, entry) { cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); if (cpuctx->active_pmu != pmu) goto next; perf_event_task_ctx(&cpuctx->ctx, task_event); ctx = task_event->task_ctx; if (!ctx) { ctxn = pmu->task_ctx_nr; if (ctxn < 0) goto next; ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); } if (ctx) perf_event_task_ctx(ctx, task_event); next: put_cpu_ptr(pmu->pmu_cpu_context); } rcu_read_unlock(); } static void perf_event_task(struct task_struct *task, struct perf_event_context *task_ctx, int new) { struct perf_task_event task_event; if (!atomic_read(&nr_comm_events) && !atomic_read(&nr_mmap_events) && !atomic_read(&nr_task_events)) return; task_event = (struct perf_task_event){ .task = task, .task_ctx = task_ctx, .event_id = { .header = { .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT, .misc = 0, .size = sizeof(task_event.event_id), }, /* .pid */ /* .ppid */ /* .tid */ /* .ptid */ .time = perf_clock(), }, }; perf_event_task_event(&task_event); } void perf_event_fork(struct task_struct *task) { perf_event_task(task, NULL, 1); } /* * comm tracking */ struct perf_comm_event { struct task_struct *task; char *comm; int comm_size; struct { struct perf_event_header header; u32 pid; u32 tid; } event_id; }; static void perf_event_comm_output(struct perf_event *event, struct perf_comm_event *comm_event) { struct perf_output_handle handle; struct perf_sample_data sample; int size = comm_event->event_id.header.size; int ret; perf_event_header__init_id(&comm_event->event_id.header, &sample, event); ret = perf_output_begin(&handle, event, comm_event->event_id.header.size, 0, 0); if (ret) goto out; comm_event->event_id.pid = perf_event_pid(event, comm_event->task); comm_event->event_id.tid = perf_event_tid(event, comm_event->task); perf_output_put(&handle, comm_event->event_id); perf_output_copy(&handle, comm_event->comm, comm_event->comm_size); perf_event__output_id_sample(event, &handle, &sample); perf_output_end(&handle); out: comm_event->event_id.header.size = size; } static int perf_event_comm_match(struct perf_event *event) { if (event->state < PERF_EVENT_STATE_INACTIVE) return 0; if (!event_filter_match(event)) return 0; if (event->attr.comm) return 1; return 0; } static void perf_event_comm_ctx(struct perf_event_context *ctx, struct perf_comm_event *comm_event) { struct perf_event *event; list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { if (perf_event_comm_match(event)) perf_event_comm_output(event, comm_event); } } static void perf_event_comm_event(struct perf_comm_event *comm_event) { struct perf_cpu_context *cpuctx; struct perf_event_context *ctx; char comm[TASK_COMM_LEN]; unsigned int size; struct pmu *pmu; int ctxn; memset(comm, 0, sizeof(comm)); strlcpy(comm, comm_event->task->comm, sizeof(comm)); size = ALIGN(strlen(comm)+1, sizeof(u64)); comm_event->comm = comm; comm_event->comm_size = size; comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; rcu_read_lock(); list_for_each_entry_rcu(pmu, &pmus, entry) { cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); if (cpuctx->active_pmu != pmu) goto next; perf_event_comm_ctx(&cpuctx->ctx, comm_event); ctxn = pmu->task_ctx_nr; if (ctxn < 0) goto next; ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); if (ctx) perf_event_comm_ctx(ctx, comm_event); next: put_cpu_ptr(pmu->pmu_cpu_context); } rcu_read_unlock(); } void perf_event_comm(struct task_struct *task) { struct perf_comm_event comm_event; struct perf_event_context *ctx; int ctxn; for_each_task_context_nr(ctxn) { ctx = task->perf_event_ctxp[ctxn]; if (!ctx) continue; perf_event_enable_on_exec(ctx); } if (!atomic_read(&nr_comm_events)) return; comm_event = (struct perf_comm_event){ .task = task, /* .comm */ /* .comm_size */ .event_id = { .header = { .type = PERF_RECORD_COMM, .misc = 0, /* .size */ }, /* .pid */ /* .tid */ }, }; perf_event_comm_event(&comm_event); } /* * mmap tracking */ struct perf_mmap_event { struct vm_area_struct *vma; const char *file_name; int file_size; struct { struct perf_event_header header; u32 pid; u32 tid; u64 start; u64 len; u64 pgoff; } event_id; }; static void perf_event_mmap_output(struct perf_event *event, struct perf_mmap_event *mmap_event) { struct perf_output_handle handle; struct perf_sample_data sample; int size = mmap_event->event_id.header.size; int ret; perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); ret = perf_output_begin(&handle, event, mmap_event->event_id.header.size, 0, 0); if (ret) goto out; mmap_event->event_id.pid = perf_event_pid(event, current); mmap_event->event_id.tid = perf_event_tid(event, current); perf_output_put(&handle, mmap_event->event_id); perf_output_copy(&handle, mmap_event->file_name, mmap_event->file_size); perf_event__output_id_sample(event, &handle, &sample); perf_output_end(&handle); out: mmap_event->event_id.header.size = size; } static int perf_event_mmap_match(struct perf_event *event, struct perf_mmap_event *mmap_event, int executable) { if (event->state < PERF_EVENT_STATE_INACTIVE) return 0; if (!event_filter_match(event)) return 0; if ((!executable && event->attr.mmap_data) || (executable && event->attr.mmap)) return 1; return 0; } static void perf_event_mmap_ctx(struct perf_event_context *ctx, struct perf_mmap_event *mmap_event, int executable) { struct perf_event *event; list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { if (perf_event_mmap_match(event, mmap_event, executable)) perf_event_mmap_output(event, mmap_event); } } static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) { struct perf_cpu_context *cpuctx; struct perf_event_context *ctx; struct vm_area_struct *vma = mmap_event->vma; struct file *file = vma->vm_file; unsigned int size; char tmp[16]; char *buf = NULL; const char *name; struct pmu *pmu; int ctxn; memset(tmp, 0, sizeof(tmp)); if (file) { /* * d_path works from the end of the buffer backwards, so we * need to add enough zero bytes after the string to handle * the 64bit alignment we do later. */ buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL); if (!buf) { name = strncpy(tmp, "//enomem", sizeof(tmp)); goto got_name; } name = d_path(&file->f_path, buf, PATH_MAX); if (IS_ERR(name)) { name = strncpy(tmp, "//toolong", sizeof(tmp)); goto got_name; } } else { if (arch_vma_name(mmap_event->vma)) { name = strncpy(tmp, arch_vma_name(mmap_event->vma), sizeof(tmp)); goto got_name; } if (!vma->vm_mm) { name = strncpy(tmp, "[vdso]", sizeof(tmp)); goto got_name; } else if (vma->vm_start <= vma->vm_mm->start_brk && vma->vm_end >= vma->vm_mm->brk) { name = strncpy(tmp, "[heap]", sizeof(tmp)); goto got_name; } else if (vma->vm_start <= vma->vm_mm->start_stack && vma->vm_end >= vma->vm_mm->start_stack) { name = strncpy(tmp, "[stack]", sizeof(tmp)); goto got_name; } name = strncpy(tmp, "//anon", sizeof(tmp)); goto got_name; } got_name: size = ALIGN(strlen(name)+1, sizeof(u64)); mmap_event->file_name = name; mmap_event->file_size = size; mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; rcu_read_lock(); list_for_each_entry_rcu(pmu, &pmus, entry) { cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); if (cpuctx->active_pmu != pmu) goto next; perf_event_mmap_ctx(&cpuctx->ctx, mmap_event, vma->vm_flags & VM_EXEC); ctxn = pmu->task_ctx_nr; if (ctxn < 0) goto next; ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); if (ctx) { perf_event_mmap_ctx(ctx, mmap_event, vma->vm_flags & VM_EXEC); } next: put_cpu_ptr(pmu->pmu_cpu_context); } rcu_read_unlock(); kfree(buf); } void perf_event_mmap(struct vm_area_struct *vma) { struct perf_mmap_event mmap_event; if (!atomic_read(&nr_mmap_events)) return; mmap_event = (struct perf_mmap_event){ .vma = vma, /* .file_name */ /* .file_size */ .event_id = { .header = { .type = PERF_RECORD_MMAP, .misc = PERF_RECORD_MISC_USER, /* .size */ }, /* .pid */ /* .tid */ .start = vma->vm_start, .len = vma->vm_end - vma->vm_start, .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT, }, }; perf_event_mmap_event(&mmap_event); } /* * IRQ throttle logging */ static void perf_log_throttle(struct perf_event *event, int enable) { struct perf_output_handle handle; struct perf_sample_data sample; int ret; struct { struct perf_event_header header; u64 time; u64 id; u64 stream_id; } throttle_event = { .header = { .type = PERF_RECORD_THROTTLE, .misc = 0, .size = sizeof(throttle_event), }, .time = perf_clock(), .id = primary_event_id(event), .stream_id = event->id, }; if (enable) throttle_event.header.type = PERF_RECORD_UNTHROTTLE; perf_event_header__init_id(&throttle_event.header, &sample, event); ret = perf_output_begin(&handle, event, throttle_event.header.size, 1, 0); if (ret) return; perf_output_put(&handle, throttle_event); perf_event__output_id_sample(event, &handle, &sample); perf_output_end(&handle); } /* * Generic event overflow handling, sampling. */ static int __perf_event_overflow(struct perf_event *event, int nmi, int throttle, struct perf_sample_data *data, struct pt_regs *regs) { int events = atomic_read(&event->event_limit); struct hw_perf_event *hwc = &event->hw; int ret = 0; /* * Non-sampling counters might still use the PMI to fold short * hardware counters, ignore those. */ if (unlikely(!is_sampling_event(event))) return 0; if (unlikely(hwc->interrupts >= max_samples_per_tick)) { if (throttle) { hwc->interrupts = MAX_INTERRUPTS; perf_log_throttle(event, 0); ret = 1; } } else hwc->interrupts++; if (event->attr.freq) { u64 now = perf_clock(); s64 delta = now - hwc->freq_time_stamp; hwc->freq_time_stamp = now; if (delta > 0 && delta < 2*TICK_NSEC) perf_adjust_period(event, delta, hwc->last_period); } /* * XXX event_limit might not quite work as expected on inherited * events */ event->pending_kill = POLL_IN; if (events && atomic_dec_and_test(&event->event_limit)) { ret = 1; event->pending_kill = POLL_HUP; event->pending_disable = 1; irq_work_queue(&event->pending); } if (event->overflow_handler) event->overflow_handler(event, nmi, data, regs); else perf_event_output(event, nmi, data, regs); if (event->fasync && event->pending_kill) { if (nmi) { event->pending_wakeup = 1; irq_work_queue(&event->pending); } else perf_event_wakeup(event); } return ret; } int perf_event_overflow(struct perf_event *event, int nmi, struct perf_sample_data *data, struct pt_regs *regs) { return __perf_event_overflow(event, nmi, 1, data, regs); } /* * Generic software event infrastructure */ struct swevent_htable { struct swevent_hlist *swevent_hlist; struct mutex hlist_mutex; int hlist_refcount; /* Recursion avoidance in each contexts */ int recursion[PERF_NR_CONTEXTS]; }; static DEFINE_PER_CPU(struct swevent_htable, swevent_htable); /* * We directly increment event->count and keep a second value in * event->hw.period_left to count intervals. This period event * is kept in the range [-sample_period, 0] so that we can use the * sign as trigger. */ static u64 perf_swevent_set_period(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; u64 period = hwc->last_period; u64 nr, offset; s64 old, val; hwc->last_period = hwc->sample_period; again: old = val = local64_read(&hwc->period_left); if (val < 0) return 0; nr = div64_u64(period + val, period); offset = nr * period; val -= offset; if (local64_cmpxchg(&hwc->period_left, old, val) != old) goto again; return nr; } static void perf_swevent_overflow(struct perf_event *event, u64 overflow, int nmi, struct perf_sample_data *data, struct pt_regs *regs) { struct hw_perf_event *hwc = &event->hw; int throttle = 0; data->period = event->hw.last_period; if (!overflow) overflow = perf_swevent_set_period(event); if (hwc->interrupts == MAX_INTERRUPTS) return; for (; overflow; overflow--) { if (__perf_event_overflow(event, nmi, throttle, data, regs)) { /* * We inhibit the overflow from happening when * hwc->interrupts == MAX_INTERRUPTS. */ break; } throttle = 1; } } static void perf_swevent_event(struct perf_event *event, u64 nr, int nmi, struct perf_sample_data *data, struct pt_regs *regs) { struct hw_perf_event *hwc = &event->hw; local64_add(nr, &event->count); if (!regs) return; if (!is_sampling_event(event)) return; if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) return perf_swevent_overflow(event, 1, nmi, data, regs); if (local64_add_negative(nr, &hwc->period_left)) return; perf_swevent_overflow(event, 0, nmi, data, regs); } static int perf_exclude_event(struct perf_event *event, struct pt_regs *regs) { if (event->hw.state & PERF_HES_STOPPED) return 1; if (regs) { if (event->attr.exclude_user && user_mode(regs)) return 1; if (event->attr.exclude_kernel && !user_mode(regs)) return 1; } return 0; } static int perf_swevent_match(struct perf_event *event, enum perf_type_id type, u32 event_id, struct perf_sample_data *data, struct pt_regs *regs) { if (event->attr.type != type) return 0; if (event->attr.config != event_id) return 0; if (perf_exclude_event(event, regs)) return 0; return 1; } static inline u64 swevent_hash(u64 type, u32 event_id) { u64 val = event_id | (type << 32); return hash_64(val, SWEVENT_HLIST_BITS); } static inline struct hlist_head * __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id) { u64 hash = swevent_hash(type, event_id); return &hlist->heads[hash]; } /* For the read side: events when they trigger */ static inline struct hlist_head * find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id) { struct swevent_hlist *hlist; hlist = rcu_dereference(swhash->swevent_hlist); if (!hlist) return NULL; return __find_swevent_head(hlist, type, event_id); } /* For the event head insertion and removal in the hlist */ static inline struct hlist_head * find_swevent_head(struct swevent_htable *swhash, struct perf_event *event) { struct swevent_hlist *hlist; u32 event_id = event->attr.config; u64 type = event->attr.type; /* * Event scheduling is always serialized against hlist allocation * and release. Which makes the protected version suitable here. * The context lock guarantees that. */ hlist = rcu_dereference_protected(swhash->swevent_hlist, lockdep_is_held(&event->ctx->lock)); if (!hlist) return NULL; return __find_swevent_head(hlist, type, event_id); } static void do_perf_sw_event(enum perf_type_id type, u32 event_id, u64 nr, int nmi, struct perf_sample_data *data, struct pt_regs *regs) { struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); struct perf_event *event; struct hlist_node *node; struct hlist_head *head; rcu_read_lock(); head = find_swevent_head_rcu(swhash, type, event_id); if (!head) goto end; hlist_for_each_entry_rcu(event, node, head, hlist_entry) { if (perf_swevent_match(event, type, event_id, data, regs)) perf_swevent_event(event, nr, nmi, data, regs); } end: rcu_read_unlock(); } int perf_swevent_get_recursion_context(void) { struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); return get_recursion_context(swhash->recursion); } EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context); inline void perf_swevent_put_recursion_context(int rctx) { struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); put_recursion_context(swhash->recursion, rctx); } void __perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) { struct perf_sample_data data; int rctx; preempt_disable_notrace(); rctx = perf_swevent_get_recursion_context(); if (rctx < 0) return; perf_sample_data_init(&data, addr); do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs); perf_swevent_put_recursion_context(rctx); preempt_enable_notrace(); } static void perf_swevent_read(struct perf_event *event) { } static int perf_swevent_add(struct perf_event *event, int flags) { struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); struct hw_perf_event *hwc = &event->hw; struct hlist_head *head; if (is_sampling_event(event)) { hwc->last_period = hwc->sample_period; perf_swevent_set_period(event); } hwc->state = !(flags & PERF_EF_START); head = find_swevent_head(swhash, event); if (WARN_ON_ONCE(!head)) return -EINVAL; hlist_add_head_rcu(&event->hlist_entry, head); return 0; } static void perf_swevent_del(struct perf_event *event, int flags) { hlist_del_rcu(&event->hlist_entry); } static void perf_swevent_start(struct perf_event *event, int flags) { event->hw.state = 0; } static void perf_swevent_stop(struct perf_event *event, int flags) { event->hw.state = PERF_HES_STOPPED; } /* Deref the hlist from the update side */ static inline struct swevent_hlist * swevent_hlist_deref(struct swevent_htable *swhash) { return rcu_dereference_protected(swhash->swevent_hlist, lockdep_is_held(&swhash->hlist_mutex)); } static void swevent_hlist_release(struct swevent_htable *swhash) { struct swevent_hlist *hlist = swevent_hlist_deref(swhash); if (!hlist) return; rcu_assign_pointer(swhash->swevent_hlist, NULL); kfree_rcu(hlist, rcu_head); } static void swevent_hlist_put_cpu(struct perf_event *event, int cpu) { struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); mutex_lock(&swhash->hlist_mutex); if (!--swhash->hlist_refcount) swevent_hlist_release(swhash); mutex_unlock(&swhash->hlist_mutex); } static void swevent_hlist_put(struct perf_event *event) { int cpu; if (event->cpu != -1) { swevent_hlist_put_cpu(event, event->cpu); return; } for_each_possible_cpu(cpu) swevent_hlist_put_cpu(event, cpu); } static int swevent_hlist_get_cpu(struct perf_event *event, int cpu) { struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); int err = 0; mutex_lock(&swhash->hlist_mutex); if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) { struct swevent_hlist *hlist; hlist = kzalloc(sizeof(*hlist), GFP_KERNEL); if (!hlist) { err = -ENOMEM; goto exit; } rcu_assign_pointer(swhash->swevent_hlist, hlist); } swhash->hlist_refcount++; exit: mutex_unlock(&swhash->hlist_mutex); return err; } static int swevent_hlist_get(struct perf_event *event) { int err; int cpu, failed_cpu; if (event->cpu != -1) return swevent_hlist_get_cpu(event, event->cpu); get_online_cpus(); for_each_possible_cpu(cpu) { err = swevent_hlist_get_cpu(event, cpu); if (err) { failed_cpu = cpu; goto fail; } } put_online_cpus(); return 0; fail: for_each_possible_cpu(cpu) { if (cpu == failed_cpu) break; swevent_hlist_put_cpu(event, cpu); } put_online_cpus(); return err; } struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; static void sw_perf_event_destroy(struct perf_event *event) { u64 event_id = event->attr.config; WARN_ON(event->parent); jump_label_dec(&perf_swevent_enabled[event_id]); swevent_hlist_put(event); } static int perf_swevent_init(struct perf_event *event) { int event_id = event->attr.config; if (event->attr.type != PERF_TYPE_SOFTWARE) return -ENOENT; switch (event_id) { case PERF_COUNT_SW_CPU_CLOCK: case PERF_COUNT_SW_TASK_CLOCK: return -ENOENT; default: break; } if (event_id >= PERF_COUNT_SW_MAX) return -ENOENT; if (!event->parent) { int err; err = swevent_hlist_get(event); if (err) return err; jump_label_inc(&perf_swevent_enabled[event_id]); event->destroy = sw_perf_event_destroy; } return 0; } static struct pmu perf_swevent = { .task_ctx_nr = perf_sw_context, .event_init = perf_swevent_init, .add = perf_swevent_add, .del = perf_swevent_del, .start = perf_swevent_start, .stop = perf_swevent_stop, .read = perf_swevent_read, }; #ifdef CONFIG_EVENT_TRACING static int perf_tp_filter_match(struct perf_event *event, struct perf_sample_data *data) { void *record = data->raw->data; if (likely(!event->filter) || filter_match_preds(event->filter, record)) return 1; return 0; } static int perf_tp_event_match(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs) { if (event->hw.state & PERF_HES_STOPPED) return 0; /* * All tracepoints are from kernel-space. */ if (event->attr.exclude_kernel) return 0; if (!perf_tp_filter_match(event, data)) return 0; return 1; } void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, struct pt_regs *regs, struct hlist_head *head, int rctx) { struct perf_sample_data data; struct perf_event *event; struct hlist_node *node; struct perf_raw_record raw = { .size = entry_size, .data = record, }; perf_sample_data_init(&data, addr); data.raw = &raw; hlist_for_each_entry_rcu(event, node, head, hlist_entry) { if (perf_tp_event_match(event, &data, regs)) perf_swevent_event(event, count, 1, &data, regs); } perf_swevent_put_recursion_context(rctx); } EXPORT_SYMBOL_GPL(perf_tp_event); static void tp_perf_event_destroy(struct perf_event *event) { perf_trace_destroy(event); } static int perf_tp_event_init(struct perf_event *event) { int err; if (event->attr.type != PERF_TYPE_TRACEPOINT) return -ENOENT; err = perf_trace_init(event); if (err) return err; event->destroy = tp_perf_event_destroy; return 0; } static struct pmu perf_tracepoint = { .task_ctx_nr = perf_sw_context, .event_init = perf_tp_event_init, .add = perf_trace_add, .del = perf_trace_del, .start = perf_swevent_start, .stop = perf_swevent_stop, .read = perf_swevent_read, }; static inline void perf_tp_register(void) { perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT); } static int perf_event_set_filter(struct perf_event *event, void __user *arg) { char *filter_str; int ret; if (event->attr.type != PERF_TYPE_TRACEPOINT) return -EINVAL; filter_str = strndup_user(arg, PAGE_SIZE); if (IS_ERR(filter_str)) return PTR_ERR(filter_str); ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); kfree(filter_str); return ret; } static void perf_event_free_filter(struct perf_event *event) { ftrace_profile_free_filter(event); } #else static inline void perf_tp_register(void) { } static int perf_event_set_filter(struct perf_event *event, void __user *arg) { return -ENOENT; } static void perf_event_free_filter(struct perf_event *event) { } #endif /* CONFIG_EVENT_TRACING */ #ifdef CONFIG_HAVE_HW_BREAKPOINT void perf_bp_event(struct perf_event *bp, void *data) { struct perf_sample_data sample; struct pt_regs *regs = data; perf_sample_data_init(&sample, bp->attr.bp_addr); if (!bp->hw.state && !perf_exclude_event(bp, regs)) perf_swevent_event(bp, 1, 1, &sample, regs); } #endif /* * hrtimer based swevent callback */ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) { enum hrtimer_restart ret = HRTIMER_RESTART; struct perf_sample_data data; struct pt_regs *regs; struct perf_event *event; u64 period; event = container_of(hrtimer, struct perf_event, hw.hrtimer); if (event->state != PERF_EVENT_STATE_ACTIVE) return HRTIMER_NORESTART; event->pmu->read(event); perf_sample_data_init(&data, 0); data.period = event->hw.last_period; regs = get_irq_regs(); if (regs && !perf_exclude_event(event, regs)) { if (!(event->attr.exclude_idle && current->pid == 0)) if (perf_event_overflow(event, 0, &data, regs)) ret = HRTIMER_NORESTART; } period = max_t(u64, 10000, event->hw.sample_period); hrtimer_forward_now(hrtimer, ns_to_ktime(period)); return ret; } static void perf_swevent_start_hrtimer(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; s64 period; if (!is_sampling_event(event)) return; period = local64_read(&hwc->period_left); if (period) { if (period < 0) period = 10000; local64_set(&hwc->period_left, 0); } else { period = max_t(u64, 10000, hwc->sample_period); } __hrtimer_start_range_ns(&hwc->hrtimer, ns_to_ktime(period), 0, HRTIMER_MODE_REL_PINNED, 0); } static void perf_swevent_cancel_hrtimer(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; if (is_sampling_event(event)) { ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer); local64_set(&hwc->period_left, ktime_to_ns(remaining)); hrtimer_cancel(&hwc->hrtimer); } } static void perf_swevent_init_hrtimer(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; if (!is_sampling_event(event)) return; hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hwc->hrtimer.function = perf_swevent_hrtimer; /* * Since hrtimers have a fixed rate, we can do a static freq->period * mapping and avoid the whole period adjust feedback stuff. */ if (event->attr.freq) { long freq = event->attr.sample_freq; event->attr.sample_period = NSEC_PER_SEC / freq; hwc->sample_period = event->attr.sample_period; local64_set(&hwc->period_left, hwc->sample_period); event->attr.freq = 0; } } /* * Software event: cpu wall time clock */ static void cpu_clock_event_update(struct perf_event *event) { s64 prev; u64 now; now = local_clock(); prev = local64_xchg(&event->hw.prev_count, now); local64_add(now - prev, &event->count); } static void cpu_clock_event_start(struct perf_event *event, int flags) { local64_set(&event->hw.prev_count, local_clock()); perf_swevent_start_hrtimer(event); } static void cpu_clock_event_stop(struct perf_event *event, int flags) { perf_swevent_cancel_hrtimer(event); cpu_clock_event_update(event); } static int cpu_clock_event_add(struct perf_event *event, int flags) { if (flags & PERF_EF_START) cpu_clock_event_start(event, flags); return 0; } static void cpu_clock_event_del(struct perf_event *event, int flags) { cpu_clock_event_stop(event, flags); } static void cpu_clock_event_read(struct perf_event *event) { cpu_clock_event_update(event); } static int cpu_clock_event_init(struct perf_event *event) { if (event->attr.type != PERF_TYPE_SOFTWARE) return -ENOENT; if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) return -ENOENT; perf_swevent_init_hrtimer(event); return 0; } static struct pmu perf_cpu_clock = { .task_ctx_nr = perf_sw_context, .event_init = cpu_clock_event_init, .add = cpu_clock_event_add, .del = cpu_clock_event_del, .start = cpu_clock_event_start, .stop = cpu_clock_event_stop, .read = cpu_clock_event_read, }; /* * Software event: task time clock */ static void task_clock_event_update(struct perf_event *event, u64 now) { u64 prev; s64 delta; prev = local64_xchg(&event->hw.prev_count, now); delta = now - prev; local64_add(delta, &event->count); } static void task_clock_event_start(struct perf_event *event, int flags) { local64_set(&event->hw.prev_count, event->ctx->time); perf_swevent_start_hrtimer(event); } static void task_clock_event_stop(struct perf_event *event, int flags) { perf_swevent_cancel_hrtimer(event); task_clock_event_update(event, event->ctx->time); } static int task_clock_event_add(struct perf_event *event, int flags) { if (flags & PERF_EF_START) task_clock_event_start(event, flags); return 0; } static void task_clock_event_del(struct perf_event *event, int flags) { task_clock_event_stop(event, PERF_EF_UPDATE); } static void task_clock_event_read(struct perf_event *event) { u64 now = perf_clock(); u64 delta = now - event->ctx->timestamp; u64 time = event->ctx->time + delta; task_clock_event_update(event, time); } static int task_clock_event_init(struct perf_event *event) { if (event->attr.type != PERF_TYPE_SOFTWARE) return -ENOENT; if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) return -ENOENT; perf_swevent_init_hrtimer(event); return 0; } static struct pmu perf_task_clock = { .task_ctx_nr = perf_sw_context, .event_init = task_clock_event_init, .add = task_clock_event_add, .del = task_clock_event_del, .start = task_clock_event_start, .stop = task_clock_event_stop, .read = task_clock_event_read, }; static void perf_pmu_nop_void(struct pmu *pmu) { } static int perf_pmu_nop_int(struct pmu *pmu) { return 0; } static void perf_pmu_start_txn(struct pmu *pmu) { perf_pmu_disable(pmu); } static int perf_pmu_commit_txn(struct pmu *pmu) { perf_pmu_enable(pmu); return 0; } static void perf_pmu_cancel_txn(struct pmu *pmu) { perf_pmu_enable(pmu); } /* * Ensures all contexts with the same task_ctx_nr have the same * pmu_cpu_context too. */ static void *find_pmu_context(int ctxn) { struct pmu *pmu; if (ctxn < 0) return NULL; list_for_each_entry(pmu, &pmus, entry) { if (pmu->task_ctx_nr == ctxn) return pmu->pmu_cpu_context; } return NULL; } static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu) { int cpu; for_each_possible_cpu(cpu) { struct perf_cpu_context *cpuctx; cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); if (cpuctx->active_pmu == old_pmu) cpuctx->active_pmu = pmu; } } static void free_pmu_context(struct pmu *pmu) { struct pmu *i; mutex_lock(&pmus_lock); /* * Like a real lame refcount. */ list_for_each_entry(i, &pmus, entry) { if (i->pmu_cpu_context == pmu->pmu_cpu_context) { update_pmu_context(i, pmu); goto out; } } free_percpu(pmu->pmu_cpu_context); out: mutex_unlock(&pmus_lock); } static struct idr pmu_idr; static ssize_t type_show(struct device *dev, struct device_attribute *attr, char *page) { struct pmu *pmu = dev_get_drvdata(dev); return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type); } static struct device_attribute pmu_dev_attrs[] = { __ATTR_RO(type), __ATTR_NULL, }; static int pmu_bus_running; static struct bus_type pmu_bus = { .name = "event_source", .dev_attrs = pmu_dev_attrs, }; static void pmu_dev_release(struct device *dev) { kfree(dev); } static int pmu_dev_alloc(struct pmu *pmu) { int ret = -ENOMEM; pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL); if (!pmu->dev) goto out; device_initialize(pmu->dev); ret = dev_set_name(pmu->dev, "%s", pmu->name); if (ret) goto free_dev; dev_set_drvdata(pmu->dev, pmu); pmu->dev->bus = &pmu_bus; pmu->dev->release = pmu_dev_release; ret = device_add(pmu->dev); if (ret) goto free_dev; out: return ret; free_dev: put_device(pmu->dev); goto out; } static struct lock_class_key cpuctx_mutex; int perf_pmu_register(struct pmu *pmu, char *name, int type) { int cpu, ret; mutex_lock(&pmus_lock); ret = -ENOMEM; pmu->pmu_disable_count = alloc_percpu(int); if (!pmu->pmu_disable_count) goto unlock; pmu->type = -1; if (!name) goto skip_type; pmu->name = name; if (type < 0) { int err = idr_pre_get(&pmu_idr, GFP_KERNEL); if (!err) goto free_pdc; err = idr_get_new_above(&pmu_idr, pmu, PERF_TYPE_MAX, &type); if (err) { ret = err; goto free_pdc; } } pmu->type = type; if (pmu_bus_running) { ret = pmu_dev_alloc(pmu); if (ret) goto free_idr; } skip_type: pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr); if (pmu->pmu_cpu_context) goto got_cpu_context; pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context); if (!pmu->pmu_cpu_context) goto free_dev; for_each_possible_cpu(cpu) { struct perf_cpu_context *cpuctx; cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); __perf_event_init_context(&cpuctx->ctx); lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); cpuctx->ctx.type = cpu_context; cpuctx->ctx.pmu = pmu; cpuctx->jiffies_interval = 1; INIT_LIST_HEAD(&cpuctx->rotation_list); cpuctx->active_pmu = pmu; } got_cpu_context: if (!pmu->start_txn) { if (pmu->pmu_enable) { /* * If we have pmu_enable/pmu_disable calls, install * transaction stubs that use that to try and batch * hardware accesses. */ pmu->start_txn = perf_pmu_start_txn; pmu->commit_txn = perf_pmu_commit_txn; pmu->cancel_txn = perf_pmu_cancel_txn; } else { pmu->start_txn = perf_pmu_nop_void; pmu->commit_txn = perf_pmu_nop_int; pmu->cancel_txn = perf_pmu_nop_void; } } if (!pmu->pmu_enable) { pmu->pmu_enable = perf_pmu_nop_void; pmu->pmu_disable = perf_pmu_nop_void; } list_add_rcu(&pmu->entry, &pmus); ret = 0; unlock: mutex_unlock(&pmus_lock); return ret; free_dev: device_del(pmu->dev); put_device(pmu->dev); free_idr: if (pmu->type >= PERF_TYPE_MAX) idr_remove(&pmu_idr, pmu->type); free_pdc: free_percpu(pmu->pmu_disable_count); goto unlock; } void perf_pmu_unregister(struct pmu *pmu) { mutex_lock(&pmus_lock); list_del_rcu(&pmu->entry); mutex_unlock(&pmus_lock); /* * We dereference the pmu list under both SRCU and regular RCU, so * synchronize against both of those. */ synchronize_srcu(&pmus_srcu); synchronize_rcu(); free_percpu(pmu->pmu_disable_count); if (pmu->type >= PERF_TYPE_MAX) idr_remove(&pmu_idr, pmu->type); device_del(pmu->dev); put_device(pmu->dev); free_pmu_context(pmu); } struct pmu *perf_init_event(struct perf_event *event) { struct pmu *pmu = NULL; int idx; int ret; idx = srcu_read_lock(&pmus_srcu); rcu_read_lock(); pmu = idr_find(&pmu_idr, event->attr.type); rcu_read_unlock(); if (pmu) { ret = pmu->event_init(event); if (ret) pmu = ERR_PTR(ret); goto unlock; } list_for_each_entry_rcu(pmu, &pmus, entry) { ret = pmu->event_init(event); if (!ret) goto unlock; if (ret != -ENOENT) { pmu = ERR_PTR(ret); goto unlock; } } pmu = ERR_PTR(-ENOENT); unlock: srcu_read_unlock(&pmus_srcu, idx); return pmu; } /* * Allocate and initialize a event structure */ static struct perf_event * perf_event_alloc(struct perf_event_attr *attr, int cpu, struct task_struct *task, struct perf_event *group_leader, struct perf_event *parent_event, perf_overflow_handler_t overflow_handler) { struct pmu *pmu; struct perf_event *event; struct hw_perf_event *hwc; long err; if ((unsigned)cpu >= nr_cpu_ids) { if (!task || cpu != -1) return ERR_PTR(-EINVAL); } event = kzalloc(sizeof(*event), GFP_KERNEL); if (!event) return ERR_PTR(-ENOMEM); /* * Single events are their own group leaders, with an * empty sibling list: */ if (!group_leader) group_leader = event; mutex_init(&event->child_mutex); INIT_LIST_HEAD(&event->child_list); INIT_LIST_HEAD(&event->group_entry); INIT_LIST_HEAD(&event->event_entry); INIT_LIST_HEAD(&event->sibling_list); init_waitqueue_head(&event->waitq); init_irq_work(&event->pending, perf_pending_event); mutex_init(&event->mmap_mutex); event->cpu = cpu; event->attr = *attr; event->group_leader = group_leader; event->pmu = NULL; event->oncpu = -1; event->parent = parent_event; event->ns = get_pid_ns(current->nsproxy->pid_ns); event->id = atomic64_inc_return(&perf_event_id); event->state = PERF_EVENT_STATE_INACTIVE; if (task) { event->attach_state = PERF_ATTACH_TASK; #ifdef CONFIG_HAVE_HW_BREAKPOINT /* * hw_breakpoint is a bit difficult here.. */ if (attr->type == PERF_TYPE_BREAKPOINT) event->hw.bp_target = task; #endif } if (!overflow_handler && parent_event) overflow_handler = parent_event->overflow_handler; event->overflow_handler = overflow_handler; if (attr->disabled) event->state = PERF_EVENT_STATE_OFF; pmu = NULL; hwc = &event->hw; hwc->sample_period = attr->sample_period; if (attr->freq && attr->sample_freq) hwc->sample_period = 1; hwc->last_period = hwc->sample_period; local64_set(&hwc->period_left, hwc->sample_period); /* * we currently do not support PERF_FORMAT_GROUP on inherited events */ if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) goto done; pmu = perf_init_event(event); done: err = 0; if (!pmu) err = -EINVAL; else if (IS_ERR(pmu)) err = PTR_ERR(pmu); if (err) { if (event->ns) put_pid_ns(event->ns); kfree(event); return ERR_PTR(err); } event->pmu = pmu; if (!event->parent) { if (event->attach_state & PERF_ATTACH_TASK) jump_label_inc(&perf_sched_events); if (event->attr.mmap || event->attr.mmap_data) atomic_inc(&nr_mmap_events); if (event->attr.comm) atomic_inc(&nr_comm_events); if (event->attr.task) atomic_inc(&nr_task_events); if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { err = get_callchain_buffers(); if (err) { free_event(event); return ERR_PTR(err); } } } return event; } static int perf_copy_attr(struct perf_event_attr __user *uattr, struct perf_event_attr *attr) { u32 size; int ret; if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0)) return -EFAULT; /* * zero the full structure, so that a short copy will be nice. */ memset(attr, 0, sizeof(*attr)); ret = get_user(size, &uattr->size); if (ret) return ret; if (size > PAGE_SIZE) /* silly large */ goto err_size; if (!size) /* abi compat */ size = PERF_ATTR_SIZE_VER0; if (size < PERF_ATTR_SIZE_VER0) goto err_size; /* * If we're handed a bigger struct than we know of, * ensure all the unknown bits are 0 - i.e. new * user-space does not rely on any kernel feature * extensions we dont know about yet. */ if (size > sizeof(*attr)) { unsigned char __user *addr; unsigned char __user *end; unsigned char val; addr = (void __user *)uattr + sizeof(*attr); end = (void __user *)uattr + size; for (; addr < end; addr++) { ret = get_user(val, addr); if (ret) return ret; if (val) goto err_size; } size = sizeof(*attr); } ret = copy_from_user(attr, uattr, size); if (ret) return -EFAULT; /* * If the type exists, the corresponding creation will verify * the attr->config. */ if (attr->type >= PERF_TYPE_MAX) return -EINVAL; if (attr->__reserved_1) return -EINVAL; if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) return -EINVAL; if (attr->read_format & ~(PERF_FORMAT_MAX-1)) return -EINVAL; out: return ret; err_size: put_user(sizeof(*attr), &uattr->size); ret = -E2BIG; goto out; } static int perf_event_set_output(struct perf_event *event, struct perf_event *output_event) { struct perf_buffer *buffer = NULL, *old_buffer = NULL; int ret = -EINVAL; if (!output_event) goto set; /* don't allow circular references */ if (event == output_event) goto out; /* * Don't allow cross-cpu buffers */ if (output_event->cpu != event->cpu) goto out; /* * If its not a per-cpu buffer, it must be the same task. */ if (output_event->cpu == -1 && output_event->ctx != event->ctx) goto out; set: mutex_lock(&event->mmap_mutex); /* Can't redirect output if we've got an active mmap() */ if (atomic_read(&event->mmap_count)) goto unlock; if (output_event) { /* get the buffer we want to redirect to */ buffer = perf_buffer_get(output_event); if (!buffer) goto unlock; } old_buffer = event->buffer; rcu_assign_pointer(event->buffer, buffer); ret = 0; unlock: mutex_unlock(&event->mmap_mutex); if (old_buffer) perf_buffer_put(old_buffer); out: return ret; } /** * sys_perf_event_open - open a performance event, associate it to a task/cpu * * @attr_uptr: event_id type attributes for monitoring/sampling * @pid: target pid * @cpu: target cpu * @group_fd: group leader event fd */ SYSCALL_DEFINE5(perf_event_open, struct perf_event_attr __user *, attr_uptr, pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) { struct perf_event *group_leader = NULL, *output_event = NULL; struct perf_event *event, *sibling; struct perf_event_attr attr; struct perf_event_context *ctx; struct file *event_file = NULL; struct file *group_file = NULL; struct task_struct *task = NULL; struct pmu *pmu; int event_fd; int move_group = 0; int fput_needed = 0; int err; /* for future expandability... */ if (flags & ~PERF_FLAG_ALL) return -EINVAL; err = perf_copy_attr(attr_uptr, &attr); if (err) return err; if (!attr.exclude_kernel) { if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) return -EACCES; } if (attr.freq) { if (attr.sample_freq > sysctl_perf_event_sample_rate) return -EINVAL; } /* * In cgroup mode, the pid argument is used to pass the fd * opened to the cgroup directory in cgroupfs. The cpu argument * designates the cpu on which to monitor threads from that * cgroup. */ if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1)) return -EINVAL; event_fd = get_unused_fd_flags(O_RDWR); if (event_fd < 0) return event_fd; if (group_fd != -1) { group_leader = perf_fget_light(group_fd, &fput_needed); if (IS_ERR(group_leader)) { err = PTR_ERR(group_leader); goto err_fd; } group_file = group_leader->filp; if (flags & PERF_FLAG_FD_OUTPUT) output_event = group_leader; if (flags & PERF_FLAG_FD_NO_GROUP) group_leader = NULL; } if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) { task = find_lively_task_by_vpid(pid); if (IS_ERR(task)) { err = PTR_ERR(task); goto err_group_fd; } } event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, NULL); if (IS_ERR(event)) { err = PTR_ERR(event); goto err_task; } if (flags & PERF_FLAG_PID_CGROUP) { err = perf_cgroup_connect(pid, event, &attr, group_leader); if (err) goto err_alloc; /* * one more event: * - that has cgroup constraint on event->cpu * - that may need work on context switch */ atomic_inc(&per_cpu(perf_cgroup_events, event->cpu)); jump_label_inc(&perf_sched_events); } /* * Special case software events and allow them to be part of * any hardware group. */ pmu = event->pmu; if (group_leader && (is_software_event(event) != is_software_event(group_leader))) { if (is_software_event(event)) { /* * If event and group_leader are not both a software * event, and event is, then group leader is not. * * Allow the addition of software events to !software * groups, this is safe because software events never * fail to schedule. */ pmu = group_leader->pmu; } else if (is_software_event(group_leader) && (group_leader->group_flags & PERF_GROUP_SOFTWARE)) { /* * In case the group is a pure software group, and we * try to add a hardware event, move the whole group to * the hardware context. */ move_group = 1; } } /* * Get the target context (task or percpu): */ ctx = find_get_context(pmu, task, cpu); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto err_alloc; } if (task) { put_task_struct(task); task = NULL; } /* * Look up the group leader (we will attach this event to it): */ if (group_leader) { err = -EINVAL; /* * Do not allow a recursive hierarchy (this new sibling * becoming part of another group-sibling): */ if (group_leader->group_leader != group_leader) goto err_context; /* * Do not allow to attach to a group in a different * task or CPU context: */ if (move_group) { if (group_leader->ctx->type != ctx->type) goto err_context; } else { if (group_leader->ctx != ctx) goto err_context; } /* * Only a group leader can be exclusive or pinned */ if (attr.exclusive || attr.pinned) goto err_context; } if (output_event) { err = perf_event_set_output(event, output_event); if (err) goto err_context; } event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR); if (IS_ERR(event_file)) { err = PTR_ERR(event_file); goto err_context; } if (move_group) { struct perf_event_context *gctx = group_leader->ctx; mutex_lock(&gctx->mutex); perf_remove_from_context(group_leader); list_for_each_entry(sibling, &group_leader->sibling_list, group_entry) { perf_remove_from_context(sibling); put_ctx(gctx); } mutex_unlock(&gctx->mutex); put_ctx(gctx); } event->filp = event_file; WARN_ON_ONCE(ctx->parent_ctx); mutex_lock(&ctx->mutex); if (move_group) { perf_install_in_context(ctx, group_leader, cpu); get_ctx(ctx); list_for_each_entry(sibling, &group_leader->sibling_list, group_entry) { perf_install_in_context(ctx, sibling, cpu); get_ctx(ctx); } } perf_install_in_context(ctx, event, cpu); ++ctx->generation; perf_unpin_context(ctx); mutex_unlock(&ctx->mutex); event->owner = current; mutex_lock(&current->perf_event_mutex); list_add_tail(&event->owner_entry, &current->perf_event_list); mutex_unlock(&current->perf_event_mutex); /* * Precalculate sample_data sizes */ perf_event__header_size(event); perf_event__id_header_size(event); /* * Drop the reference on the group_event after placing the * new event on the sibling_list. This ensures destruction * of the group leader will find the pointer to itself in * perf_group_detach(). */ fput_light(group_file, fput_needed); fd_install(event_fd, event_file); return event_fd; err_context: perf_unpin_context(ctx); put_ctx(ctx); err_alloc: free_event(event); err_task: if (task) put_task_struct(task); err_group_fd: fput_light(group_file, fput_needed); err_fd: put_unused_fd(event_fd); return err; } /** * perf_event_create_kernel_counter * * @attr: attributes of the counter to create * @cpu: cpu in which the counter is bound * @task: task to profile (NULL for percpu) */ struct perf_event * perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, struct task_struct *task, perf_overflow_handler_t overflow_handler) { struct perf_event_context *ctx; struct perf_event *event; int err; /* * Get the target context (task or percpu): */ event = perf_event_alloc(attr, cpu, task, NULL, NULL, overflow_handler); if (IS_ERR(event)) { err = PTR_ERR(event); goto err; } ctx = find_get_context(event->pmu, task, cpu); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto err_free; } event->filp = NULL; WARN_ON_ONCE(ctx->parent_ctx); mutex_lock(&ctx->mutex); perf_install_in_context(ctx, event, cpu); ++ctx->generation; perf_unpin_context(ctx); mutex_unlock(&ctx->mutex); return event; err_free: free_event(event); err: return ERR_PTR(err); } EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter); static void sync_child_event(struct perf_event *child_event, struct task_struct *child) { struct perf_event *parent_event = child_event->parent; u64 child_val; if (child_event->attr.inherit_stat) perf_event_read_event(child_event, child); child_val = perf_event_count(child_event); /* * Add back the child's count to the parent's count: */ atomic64_add(child_val, &parent_event->child_count); atomic64_add(child_event->total_time_enabled, &parent_event->child_total_time_enabled); atomic64_add(child_event->total_time_running, &parent_event->child_total_time_running); /* * Remove this event from the parent's list */ WARN_ON_ONCE(parent_event->ctx->parent_ctx); mutex_lock(&parent_event->child_mutex); list_del_init(&child_event->child_list); mutex_unlock(&parent_event->child_mutex); /* * Release the parent event, if this was the last * reference to it. */ fput(parent_event->filp); } static void __perf_event_exit_task(struct perf_event *child_event, struct perf_event_context *child_ctx, struct task_struct *child) { if (child_event->parent) { raw_spin_lock_irq(&child_ctx->lock); perf_group_detach(child_event); raw_spin_unlock_irq(&child_ctx->lock); } perf_remove_from_context(child_event); /* * It can happen that the parent exits first, and has events * that are still around due to the child reference. These * events need to be zapped. */ if (child_event->parent) { sync_child_event(child_event, child); free_event(child_event); } } static void perf_event_exit_task_context(struct task_struct *child, int ctxn) { struct perf_event *child_event, *tmp; struct perf_event_context *child_ctx; unsigned long flags; if (likely(!child->perf_event_ctxp[ctxn])) { perf_event_task(child, NULL, 0); return; } local_irq_save(flags); /* * We can't reschedule here because interrupts are disabled, * and either child is current or it is a task that can't be * scheduled, so we are now safe from rescheduling changing * our context. */ child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]); task_ctx_sched_out(child_ctx, EVENT_ALL); /* * Take the context lock here so that if find_get_context is * reading child->perf_event_ctxp, we wait until it has * incremented the context's refcount before we do put_ctx below. */ raw_spin_lock(&child_ctx->lock); child->perf_event_ctxp[ctxn] = NULL; /* * If this context is a clone; unclone it so it can't get * swapped to another process while we're removing all * the events from it. */ unclone_ctx(child_ctx); update_context_time(child_ctx); raw_spin_unlock_irqrestore(&child_ctx->lock, flags); /* * Report the task dead after unscheduling the events so that we * won't get any samples after PERF_RECORD_EXIT. We can however still * get a few PERF_RECORD_READ events. */ perf_event_task(child, child_ctx, 0); /* * We can recurse on the same lock type through: * * __perf_event_exit_task() * sync_child_event() * fput(parent_event->filp) * perf_release() * mutex_lock(&ctx->mutex) * * But since its the parent context it won't be the same instance. */ mutex_lock(&child_ctx->mutex); again: list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups, group_entry) __perf_event_exit_task(child_event, child_ctx, child); list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups, group_entry) __perf_event_exit_task(child_event, child_ctx, child); /* * If the last event was a group event, it will have appended all * its siblings to the list, but we obtained 'tmp' before that which * will still point to the list head terminating the iteration. */ if (!list_empty(&child_ctx->pinned_groups) || !list_empty(&child_ctx->flexible_groups)) goto again; mutex_unlock(&child_ctx->mutex); put_ctx(child_ctx); } /* * When a child task exits, feed back event values to parent events. */ void perf_event_exit_task(struct task_struct *child) { struct perf_event *event, *tmp; int ctxn; mutex_lock(&child->perf_event_mutex); list_for_each_entry_safe(event, tmp, &child->perf_event_list, owner_entry) { list_del_init(&event->owner_entry); /* * Ensure the list deletion is visible before we clear * the owner, closes a race against perf_release() where * we need to serialize on the owner->perf_event_mutex. */ smp_wmb(); event->owner = NULL; } mutex_unlock(&child->perf_event_mutex); for_each_task_context_nr(ctxn) perf_event_exit_task_context(child, ctxn); } static void perf_free_event(struct perf_event *event, struct perf_event_context *ctx) { struct perf_event *parent = event->parent; if (WARN_ON_ONCE(!parent)) return; mutex_lock(&parent->child_mutex); list_del_init(&event->child_list); mutex_unlock(&parent->child_mutex); fput(parent->filp); perf_group_detach(event); list_del_event(event, ctx); free_event(event); } /* * free an unexposed, unused context as created by inheritance by * perf_event_init_task below, used by fork() in case of fail. */ void perf_event_free_task(struct task_struct *task) { struct perf_event_context *ctx; struct perf_event *event, *tmp; int ctxn; for_each_task_context_nr(ctxn) { ctx = task->perf_event_ctxp[ctxn]; if (!ctx) continue; mutex_lock(&ctx->mutex); again: list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry) perf_free_event(event, ctx); list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry) perf_free_event(event, ctx); if (!list_empty(&ctx->pinned_groups) || !list_empty(&ctx->flexible_groups)) goto again; mutex_unlock(&ctx->mutex); put_ctx(ctx); } } void perf_event_delayed_put(struct task_struct *task) { int ctxn; for_each_task_context_nr(ctxn) WARN_ON_ONCE(task->perf_event_ctxp[ctxn]); } /* * inherit a event from parent task to child task: */ static struct perf_event * inherit_event(struct perf_event *parent_event, struct task_struct *parent, struct perf_event_context *parent_ctx, struct task_struct *child, struct perf_event *group_leader, struct perf_event_context *child_ctx) { struct perf_event *child_event; unsigned long flags; /* * Instead of creating recursive hierarchies of events, * we link inherited events back to the original parent, * which has a filp for sure, which we use as the reference * count: */ if (parent_event->parent) parent_event = parent_event->parent; child_event = perf_event_alloc(&parent_event->attr, parent_event->cpu, child, group_leader, parent_event, NULL); if (IS_ERR(child_event)) return child_event; get_ctx(child_ctx); /* * Make the child state follow the state of the parent event, * not its attr.disabled bit. We hold the parent's mutex, * so we won't race with perf_event_{en, dis}able_family. */ if (parent_event->state >= PERF_EVENT_STATE_INACTIVE) child_event->state = PERF_EVENT_STATE_INACTIVE; else child_event->state = PERF_EVENT_STATE_OFF; if (parent_event->attr.freq) { u64 sample_period = parent_event->hw.sample_period; struct hw_perf_event *hwc = &child_event->hw; hwc->sample_period = sample_period; hwc->last_period = sample_period; local64_set(&hwc->period_left, sample_period); } child_event->ctx = child_ctx; child_event->overflow_handler = parent_event->overflow_handler; /* * Precalculate sample_data sizes */ perf_event__header_size(child_event); perf_event__id_header_size(child_event); /* * Link it up in the child's context: */ raw_spin_lock_irqsave(&child_ctx->lock, flags); add_event_to_ctx(child_event, child_ctx); raw_spin_unlock_irqrestore(&child_ctx->lock, flags); /* * Get a reference to the parent filp - we will fput it * when the child event exits. This is safe to do because * we are in the parent and we know that the filp still * exists and has a nonzero count: */ atomic_long_inc(&parent_event->filp->f_count); /* * Link this into the parent event's child list */ WARN_ON_ONCE(parent_event->ctx->parent_ctx); mutex_lock(&parent_event->child_mutex); list_add_tail(&child_event->child_list, &parent_event->child_list); mutex_unlock(&parent_event->child_mutex); return child_event; } static int inherit_group(struct perf_event *parent_event, struct task_struct *parent, struct perf_event_context *parent_ctx, struct task_struct *child, struct perf_event_context *child_ctx) { struct perf_event *leader; struct perf_event *sub; struct perf_event *child_ctr; leader = inherit_event(parent_event, parent, parent_ctx, child, NULL, child_ctx); if (IS_ERR(leader)) return PTR_ERR(leader); list_for_each_entry(sub, &parent_event->sibling_list, group_entry) { child_ctr = inherit_event(sub, parent, parent_ctx, child, leader, child_ctx); if (IS_ERR(child_ctr)) return PTR_ERR(child_ctr); } return 0; } static int inherit_task_group(struct perf_event *event, struct task_struct *parent, struct perf_event_context *parent_ctx, struct task_struct *child, int ctxn, int *inherited_all) { int ret; struct perf_event_context *child_ctx; if (!event->attr.inherit) { *inherited_all = 0; return 0; } child_ctx = child->perf_event_ctxp[ctxn]; if (!child_ctx) { /* * This is executed from the parent task context, so * inherit events that have been marked for cloning. * First allocate and initialize a context for the * child. */ child_ctx = alloc_perf_context(event->pmu, child); if (!child_ctx) return -ENOMEM; child->perf_event_ctxp[ctxn] = child_ctx; } ret = inherit_group(event, parent, parent_ctx, child, child_ctx); if (ret) *inherited_all = 0; return ret; } /* * Initialize the perf_event context in task_struct */ int perf_event_init_context(struct task_struct *child, int ctxn) { struct perf_event_context *child_ctx, *parent_ctx; struct perf_event_context *cloned_ctx; struct perf_event *event; struct task_struct *parent = current; int inherited_all = 1; unsigned long flags; int ret = 0; if (likely(!parent->perf_event_ctxp[ctxn])) return 0; /* * If the parent's context is a clone, pin it so it won't get * swapped under us. */ parent_ctx = perf_pin_task_context(parent, ctxn); /* * No need to check if parent_ctx != NULL here; since we saw * it non-NULL earlier, the only reason for it to become NULL * is if we exit, and since we're currently in the middle of * a fork we can't be exiting at the same time. */ /* * Lock the parent list. No need to lock the child - not PID * hashed yet and not running, so nobody can access it. */ mutex_lock(&parent_ctx->mutex); /* * We dont have to disable NMIs - we are only looking at * the list, not manipulating it: */ list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) { ret = inherit_task_group(event, parent, parent_ctx, child, ctxn, &inherited_all); if (ret) break; } /* * We can't hold ctx->lock when iterating the ->flexible_group list due * to allocations, but we need to prevent rotation because * rotate_ctx() will change the list from interrupt context. */ raw_spin_lock_irqsave(&parent_ctx->lock, flags); parent_ctx->rotate_disable = 1; raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) { ret = inherit_task_group(event, parent, parent_ctx, child, ctxn, &inherited_all); if (ret) break; } raw_spin_lock_irqsave(&parent_ctx->lock, flags); parent_ctx->rotate_disable = 0; child_ctx = child->perf_event_ctxp[ctxn]; if (child_ctx && inherited_all) { /* * Mark the child context as a clone of the parent * context, or of whatever the parent is a clone of. * * Note that if the parent is a clone, the holding of * parent_ctx->lock avoids it from being uncloned. */ cloned_ctx = parent_ctx->parent_ctx; if (cloned_ctx) { child_ctx->parent_ctx = cloned_ctx; child_ctx->parent_gen = parent_ctx->parent_gen; } else { child_ctx->parent_ctx = parent_ctx; child_ctx->parent_gen = parent_ctx->generation; } get_ctx(child_ctx->parent_ctx); } raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); mutex_unlock(&parent_ctx->mutex); perf_unpin_context(parent_ctx); put_ctx(parent_ctx); return ret; } /* * Initialize the perf_event context in task_struct */ int perf_event_init_task(struct task_struct *child) { int ctxn, ret; memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp)); mutex_init(&child->perf_event_mutex); INIT_LIST_HEAD(&child->perf_event_list); for_each_task_context_nr(ctxn) { ret = perf_event_init_context(child, ctxn); if (ret) return ret; } return 0; } static void __init perf_event_init_all_cpus(void) { struct swevent_htable *swhash; int cpu; for_each_possible_cpu(cpu) { swhash = &per_cpu(swevent_htable, cpu); mutex_init(&swhash->hlist_mutex); INIT_LIST_HEAD(&per_cpu(rotation_list, cpu)); } } static void __cpuinit perf_event_init_cpu(int cpu) { struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); mutex_lock(&swhash->hlist_mutex); if (swhash->hlist_refcount > 0) { struct swevent_hlist *hlist; hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu)); WARN_ON(!hlist); rcu_assign_pointer(swhash->swevent_hlist, hlist); } mutex_unlock(&swhash->hlist_mutex); } #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC static void perf_pmu_rotate_stop(struct pmu *pmu) { struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); WARN_ON(!irqs_disabled()); list_del_init(&cpuctx->rotation_list); } static void __perf_event_exit_context(void *__info) { struct perf_event_context *ctx = __info; struct perf_event *event, *tmp; perf_pmu_rotate_stop(ctx->pmu); list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry) __perf_remove_from_context(event); list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry) __perf_remove_from_context(event); } static void perf_event_exit_cpu_context(int cpu) { struct perf_event_context *ctx; struct pmu *pmu; int idx; idx = srcu_read_lock(&pmus_srcu); list_for_each_entry_rcu(pmu, &pmus, entry) { ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx; mutex_lock(&ctx->mutex); smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1); mutex_unlock(&ctx->mutex); } srcu_read_unlock(&pmus_srcu, idx); } static void perf_event_exit_cpu(int cpu) { struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); mutex_lock(&swhash->hlist_mutex); swevent_hlist_release(swhash); mutex_unlock(&swhash->hlist_mutex); perf_event_exit_cpu_context(cpu); } #else static inline void perf_event_exit_cpu(int cpu) { } #endif static int perf_reboot(struct notifier_block *notifier, unsigned long val, void *v) { int cpu; for_each_online_cpu(cpu) perf_event_exit_cpu(cpu); return NOTIFY_OK; } /* * Run the perf reboot notifier at the very last possible moment so that * the generic watchdog code runs as long as possible. */ static struct notifier_block perf_reboot_notifier = { .notifier_call = perf_reboot, .priority = INT_MIN, }; static int __cpuinit perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { unsigned int cpu = (long)hcpu; switch (action & ~CPU_TASKS_FROZEN) { case CPU_UP_PREPARE: case CPU_DOWN_FAILED: perf_event_init_cpu(cpu); break; case CPU_UP_CANCELED: case CPU_DOWN_PREPARE: perf_event_exit_cpu(cpu); break; default: break; } return NOTIFY_OK; } void __init perf_event_init(void) { int ret; idr_init(&pmu_idr); perf_event_init_all_cpus(); init_srcu_struct(&pmus_srcu); perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE); perf_pmu_register(&perf_cpu_clock, NULL, -1); perf_pmu_register(&perf_task_clock, NULL, -1); perf_tp_register(); perf_cpu_notifier(perf_cpu_notify); register_reboot_notifier(&perf_reboot_notifier); ret = init_hw_breakpoint(); WARN(ret, "hw_breakpoint initialization failed with: %d", ret); } static int __init perf_event_sysfs_init(void) { struct pmu *pmu; int ret; mutex_lock(&pmus_lock); ret = bus_register(&pmu_bus); if (ret) goto unlock; list_for_each_entry(pmu, &pmus, entry) { if (!pmu->name || pmu->type < 0) continue; ret = pmu_dev_alloc(pmu); WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret); } pmu_bus_running = 1; ret = 0; unlock: mutex_unlock(&pmus_lock); return ret; } device_initcall(perf_event_sysfs_init); #ifdef CONFIG_CGROUP_PERF static struct cgroup_subsys_state *perf_cgroup_create( struct cgroup_subsys *ss, struct cgroup *cont) { struct perf_cgroup *jc; jc = kzalloc(sizeof(*jc), GFP_KERNEL); if (!jc) return ERR_PTR(-ENOMEM); jc->info = alloc_percpu(struct perf_cgroup_info); if (!jc->info) { kfree(jc); return ERR_PTR(-ENOMEM); } return &jc->css; } static void perf_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cont) { struct perf_cgroup *jc; jc = container_of(cgroup_subsys_state(cont, perf_subsys_id), struct perf_cgroup, css); free_percpu(jc->info); kfree(jc); } static int __perf_cgroup_move(void *info) { struct task_struct *task = info; perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN); return 0; } static void perf_cgroup_attach_task(struct cgroup *cgrp, struct task_struct *task) { task_function_call(task, __perf_cgroup_move, task); } static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp, struct cgroup *old_cgrp, struct task_struct *task) { /* * cgroup_exit() is called in the copy_process() failure path. * Ignore this case since the task hasn't ran yet, this avoids * trying to poke a half freed task state from generic code. */ if (!(task->flags & PF_EXITING)) return; perf_cgroup_attach_task(cgrp, task); } struct cgroup_subsys perf_subsys = { .name = "perf_event", .subsys_id = perf_subsys_id, .create = perf_cgroup_create, .destroy = perf_cgroup_destroy, .exit = perf_cgroup_exit, .attach_task = perf_cgroup_attach_task, }; #endif /* CONFIG_CGROUP_PERF */
gpl-2.0
sagigrimberg/linux
arch/arm/mach-mmp/flint.c
4026
2907
/* * linux/arch/arm/mach-mmp/flint.c * * Support for the Marvell Flint Development Platform. * * Copyright (C) 2009 Marvell International Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * publishhed by the Free Software Foundation. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/smc91x.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/gpio-pxa.h> #include <linux/interrupt.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/addr-map.h> #include <mach/mfp-mmp2.h> #include <mach/mmp2.h> #include <mach/irqs.h> #include "common.h" #define FLINT_NR_IRQS (MMP_NR_IRQS + 48) static unsigned long flint_pin_config[] __initdata = { /* UART1 */ GPIO45_UART1_RXD, GPIO46_UART1_TXD, /* UART2 */ GPIO47_UART2_RXD, GPIO48_UART2_TXD, /* SMC */ GPIO151_SMC_SCLK, GPIO145_SMC_nCS0, GPIO146_SMC_nCS1, GPIO152_SMC_BE0, GPIO153_SMC_BE1, GPIO154_SMC_IRQ, GPIO113_SMC_RDY, /*Ethernet*/ GPIO155_GPIO, /* DFI */ GPIO168_DFI_D0, GPIO167_DFI_D1, GPIO166_DFI_D2, GPIO165_DFI_D3, GPIO107_DFI_D4, GPIO106_DFI_D5, GPIO105_DFI_D6, GPIO104_DFI_D7, GPIO111_DFI_D8, GPIO164_DFI_D9, GPIO163_DFI_D10, GPIO162_DFI_D11, GPIO161_DFI_D12, GPIO110_DFI_D13, GPIO109_DFI_D14, GPIO108_DFI_D15, GPIO143_ND_nCS0, GPIO144_ND_nCS1, GPIO147_ND_nWE, GPIO148_ND_nRE, GPIO150_ND_ALE, GPIO149_ND_CLE, GPIO112_ND_RDY0, GPIO160_ND_RDY1, }; static struct pxa_gpio_platform_data mmp2_gpio_pdata = { .irq_base = MMP_GPIO_TO_IRQ(0), }; static struct smc91x_platdata flint_smc91x_info = { .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, }; static struct resource smc91x_resources[] = { [0] = { .start = SMC_CS1_PHYS_BASE + 0x300, .end = SMC_CS1_PHYS_BASE + 0xfffff, .flags = IORESOURCE_MEM, }, [1] = { .start = MMP_GPIO_TO_IRQ(155), .end = MMP_GPIO_TO_IRQ(155), .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, } }; static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .dev = { .platform_data = &flint_smc91x_info, }, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, }; static void __init flint_init(void) { mfp_config(ARRAY_AND_SIZE(flint_pin_config)); /* on-chip devices */ mmp2_add_uart(1); mmp2_add_uart(2); platform_device_add_data(&mmp2_device_gpio, &mmp2_gpio_pdata, sizeof(struct pxa_gpio_platform_data)); platform_device_register(&mmp2_device_gpio); /* off-chip devices */ platform_device_register(&smc91x_device); } MACHINE_START(FLINT, "Flint Development Platform") .map_io = mmp_map_io, .nr_irqs = FLINT_NR_IRQS, .init_irq = mmp2_init_irq, .init_time = mmp2_timer_init, .init_machine = flint_init, .restart = mmp_restart, MACHINE_END
gpl-2.0
ztemt/Z5_H112_kernel
arch/arm/plat-mxc/iomux-v1.c
5562
4884
/* * arch/arm/plat-mxc/iomux-v1.c * * Copyright (C) 2004 Sascha Hauer, Synertronixx GmbH * Copyright (C) 2009 Uwe Kleine-Koenig, Pengutronix * * Common code for i.MX1, i.MX21 and i.MX27 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/errno.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> #include <linux/gpio.h> #include <mach/hardware.h> #include <asm/mach/map.h> #include <mach/iomux-v1.h> static void __iomem *imx_iomuxv1_baseaddr; static unsigned imx_iomuxv1_numports; static inline unsigned long imx_iomuxv1_readl(unsigned offset) { return __raw_readl(imx_iomuxv1_baseaddr + offset); } static inline void imx_iomuxv1_writel(unsigned long val, unsigned offset) { __raw_writel(val, imx_iomuxv1_baseaddr + offset); } static inline void imx_iomuxv1_rmwl(unsigned offset, unsigned long mask, unsigned long value) { unsigned long reg = imx_iomuxv1_readl(offset); reg &= ~mask; reg |= value; imx_iomuxv1_writel(reg, offset); } static inline void imx_iomuxv1_set_puen( unsigned int port, unsigned int pin, int on) { unsigned long mask = 1 << pin; imx_iomuxv1_rmwl(MXC_PUEN(port), mask, on ? mask : 0); } static inline void imx_iomuxv1_set_ddir( unsigned int port, unsigned int pin, int out) { unsigned long mask = 1 << pin; imx_iomuxv1_rmwl(MXC_DDIR(port), mask, out ? mask : 0); } static inline void imx_iomuxv1_set_gpr( unsigned int port, unsigned int pin, int af) { unsigned long mask = 1 << pin; imx_iomuxv1_rmwl(MXC_GPR(port), mask, af ? mask : 0); } static inline void imx_iomuxv1_set_gius( unsigned int port, unsigned int pin, int inuse) { unsigned long mask = 1 << pin; imx_iomuxv1_rmwl(MXC_GIUS(port), mask, inuse ? mask : 0); } static inline void imx_iomuxv1_set_ocr( unsigned int port, unsigned int pin, unsigned int ocr) { unsigned long shift = (pin & 0xf) << 1; unsigned long mask = 3 << shift; unsigned long value = ocr << shift; unsigned long offset = pin < 16 ? MXC_OCR1(port) : MXC_OCR2(port); imx_iomuxv1_rmwl(offset, mask, value); } static inline void imx_iomuxv1_set_iconfa( unsigned int port, unsigned int pin, unsigned int aout) { unsigned long shift = (pin & 0xf) << 1; unsigned long mask = 3 << shift; unsigned long value = aout << shift; unsigned long offset = pin < 16 ? MXC_ICONFA1(port) : MXC_ICONFA2(port); imx_iomuxv1_rmwl(offset, mask, value); } static inline void imx_iomuxv1_set_iconfb( unsigned int port, unsigned int pin, unsigned int bout) { unsigned long shift = (pin & 0xf) << 1; unsigned long mask = 3 << shift; unsigned long value = bout << shift; unsigned long offset = pin < 16 ? MXC_ICONFB1(port) : MXC_ICONFB2(port); imx_iomuxv1_rmwl(offset, mask, value); } int mxc_gpio_mode(int gpio_mode) { unsigned int pin = gpio_mode & GPIO_PIN_MASK; unsigned int port = (gpio_mode & GPIO_PORT_MASK) >> GPIO_PORT_SHIFT; unsigned int ocr = (gpio_mode & GPIO_OCR_MASK) >> GPIO_OCR_SHIFT; unsigned int aout = (gpio_mode >> GPIO_AOUT_SHIFT) & 3; unsigned int bout = (gpio_mode >> GPIO_BOUT_SHIFT) & 3; if (port >= imx_iomuxv1_numports) return -EINVAL; /* Pullup enable */ imx_iomuxv1_set_puen(port, pin, gpio_mode & GPIO_PUEN); /* Data direction */ imx_iomuxv1_set_ddir(port, pin, gpio_mode & GPIO_OUT); /* Primary / alternate function */ imx_iomuxv1_set_gpr(port, pin, gpio_mode & GPIO_AF); /* use as gpio? */ imx_iomuxv1_set_gius(port, pin, !(gpio_mode & (GPIO_PF | GPIO_AF))); imx_iomuxv1_set_ocr(port, pin, ocr); imx_iomuxv1_set_iconfa(port, pin, aout); imx_iomuxv1_set_iconfb(port, pin, bout); return 0; } EXPORT_SYMBOL(mxc_gpio_mode); static int imx_iomuxv1_setup_multiple(const int *list, unsigned count) { size_t i; int ret = 0; for (i = 0; i < count; ++i) { ret = mxc_gpio_mode(list[i]); if (ret) return ret; } return ret; } int mxc_gpio_setup_multiple_pins(const int *pin_list, unsigned count, const char *label) { int ret; ret = imx_iomuxv1_setup_multiple(pin_list, count); return ret; } EXPORT_SYMBOL(mxc_gpio_setup_multiple_pins); int __init imx_iomuxv1_init(void __iomem *base, int numports) { imx_iomuxv1_baseaddr = base; imx_iomuxv1_numports = numports; return 0; }
gpl-2.0
byeonggon/project-pika-lynx
net/irda/irnet/irnet_ppp.c
8378
33314
/* * IrNET protocol module : Synchronous PPP over an IrDA socket. * * Jean II - HPL `00 - <jt@hpl.hp.com> * * This file implement the PPP interface and /dev/irnet character device. * The PPP interface hook to the ppp_generic module, handle all our * relationship to the PPP code in the kernel (and by extension to pppd), * and exchange PPP frames with this module (send/receive). * The /dev/irnet device is used primarily for 2 functions : * 1) as a stub for pppd (the ppp daemon), so that we can appropriately * generate PPP sessions (we pretend we are a tty). * 2) as a control channel (write commands, read events) */ #include <linux/sched.h> #include <linux/slab.h> #include "irnet_ppp.h" /* Private header */ /* Please put other headers in irnet.h - Thanks */ /* Generic PPP callbacks (to call us) */ static const struct ppp_channel_ops irnet_ppp_ops = { .start_xmit = ppp_irnet_send, .ioctl = ppp_irnet_ioctl }; /************************* CONTROL CHANNEL *************************/ /* * When a pppd instance is not active on /dev/irnet, it acts as a control * channel. * Writing allow to set up the IrDA destination of the IrNET channel, * and any application may be read events happening in IrNET... */ /*------------------------------------------------------------------*/ /* * Write is used to send a command to configure a IrNET channel * before it is open by pppd. The syntax is : "command argument" * Currently there is only two defined commands : * o name : set the requested IrDA nickname of the IrNET peer. * o addr : set the requested IrDA address of the IrNET peer. * Note : the code is crude, but effective... */ static inline ssize_t irnet_ctrl_write(irnet_socket * ap, const char __user *buf, size_t count) { char command[IRNET_MAX_COMMAND]; char * start; /* Current command being processed */ char * next; /* Next command to process */ int length; /* Length of current command */ DENTER(CTRL_TRACE, "(ap=0x%p, count=%Zd)\n", ap, count); /* Check for overflow... */ DABORT(count >= IRNET_MAX_COMMAND, -ENOMEM, CTRL_ERROR, "Too much data !!!\n"); /* Get the data in the driver */ if(copy_from_user(command, buf, count)) { DERROR(CTRL_ERROR, "Invalid user space pointer.\n"); return -EFAULT; } /* Safe terminate the string */ command[count] = '\0'; DEBUG(CTRL_INFO, "Command line received is ``%s'' (%Zd).\n", command, count); /* Check every commands in the command line */ next = command; while(next != NULL) { /* Look at the next command */ start = next; /* Scrap whitespaces before the command */ start = skip_spaces(start); /* ',' is our command separator */ next = strchr(start, ','); if(next) { *next = '\0'; /* Terminate command */ length = next - start; /* Length */ next++; /* Skip the '\0' */ } else length = strlen(start); DEBUG(CTRL_INFO, "Found command ``%s'' (%d).\n", start, length); /* Check if we recognised one of the known command * We can't use "switch" with strings, so hack with "continue" */ /* First command : name -> Requested IrDA nickname */ if(!strncmp(start, "name", 4)) { /* Copy the name only if is included and not "any" */ if((length > 5) && (strcmp(start + 5, "any"))) { /* Strip out trailing whitespaces */ while(isspace(start[length - 1])) length--; DABORT(length < 5 || length > NICKNAME_MAX_LEN + 5, -EINVAL, CTRL_ERROR, "Invalid nickname.\n"); /* Copy the name for later reuse */ memcpy(ap->rname, start + 5, length - 5); ap->rname[length - 5] = '\0'; } else ap->rname[0] = '\0'; DEBUG(CTRL_INFO, "Got rname = ``%s''\n", ap->rname); /* Restart the loop */ continue; } /* Second command : addr, daddr -> Requested IrDA destination address * Also process : saddr -> Requested IrDA source address */ if((!strncmp(start, "addr", 4)) || (!strncmp(start, "daddr", 5)) || (!strncmp(start, "saddr", 5))) { __u32 addr = DEV_ADDR_ANY; /* Copy the address only if is included and not "any" */ if((length > 5) && (strcmp(start + 5, "any"))) { char * begp = start + 5; char * endp; /* Scrap whitespaces before the command */ begp = skip_spaces(begp); /* Convert argument to a number (last arg is the base) */ addr = simple_strtoul(begp, &endp, 16); /* Has it worked ? (endp should be start + length) */ DABORT(endp <= (start + 5), -EINVAL, CTRL_ERROR, "Invalid address.\n"); } /* Which type of address ? */ if(start[0] == 's') { /* Save it */ ap->rsaddr = addr; DEBUG(CTRL_INFO, "Got rsaddr = %08x\n", ap->rsaddr); } else { /* Save it */ ap->rdaddr = addr; DEBUG(CTRL_INFO, "Got rdaddr = %08x\n", ap->rdaddr); } /* Restart the loop */ continue; } /* Other possible command : connect N (number of retries) */ /* No command matched -> Failed... */ DABORT(1, -EINVAL, CTRL_ERROR, "Not a recognised IrNET command.\n"); } /* Success : we have parsed all commands successfully */ return count; } #ifdef INITIAL_DISCOVERY /*------------------------------------------------------------------*/ /* * Function irnet_get_discovery_log (self) * * Query the content on the discovery log if not done * * This function query the current content of the discovery log * at the startup of the event channel and save it in the internal struct. */ static void irnet_get_discovery_log(irnet_socket * ap) { __u16 mask = irlmp_service_to_hint(S_LAN); /* Ask IrLMP for the current discovery log */ ap->discoveries = irlmp_get_discoveries(&ap->disco_number, mask, DISCOVERY_DEFAULT_SLOTS); /* Check if the we got some results */ if(ap->discoveries == NULL) ap->disco_number = -1; DEBUG(CTRL_INFO, "Got the log (0x%p), size is %d\n", ap->discoveries, ap->disco_number); } /*------------------------------------------------------------------*/ /* * Function irnet_read_discovery_log (self, event) * * Read the content on the discovery log * * This function dump the current content of the discovery log * at the startup of the event channel. * Return 1 if wrote an event on the control channel... * * State of the ap->disco_XXX variables : * Socket creation : discoveries = NULL ; disco_index = 0 ; disco_number = 0 * While reading : discoveries = ptr ; disco_index = X ; disco_number = Y * After reading : discoveries = NULL ; disco_index = Y ; disco_number = -1 */ static inline int irnet_read_discovery_log(irnet_socket * ap, char * event) { int done_event = 0; DENTER(CTRL_TRACE, "(ap=0x%p, event=0x%p)\n", ap, event); /* Test if we have some work to do or we have already finished */ if(ap->disco_number == -1) { DEBUG(CTRL_INFO, "Already done\n"); return 0; } /* Test if it's the first time and therefore we need to get the log */ if(ap->discoveries == NULL) irnet_get_discovery_log(ap); /* Check if we have more item to dump */ if(ap->disco_index < ap->disco_number) { /* Write an event */ sprintf(event, "Found %08x (%s) behind %08x {hints %02X-%02X}\n", ap->discoveries[ap->disco_index].daddr, ap->discoveries[ap->disco_index].info, ap->discoveries[ap->disco_index].saddr, ap->discoveries[ap->disco_index].hints[0], ap->discoveries[ap->disco_index].hints[1]); DEBUG(CTRL_INFO, "Writing discovery %d : %s\n", ap->disco_index, ap->discoveries[ap->disco_index].info); /* We have an event */ done_event = 1; /* Next discovery */ ap->disco_index++; } /* Check if we have done the last item */ if(ap->disco_index >= ap->disco_number) { /* No more items : remove the log and signal termination */ DEBUG(CTRL_INFO, "Cleaning up log (0x%p)\n", ap->discoveries); if(ap->discoveries != NULL) { /* Cleanup our copy of the discovery log */ kfree(ap->discoveries); ap->discoveries = NULL; } ap->disco_number = -1; } return done_event; } #endif /* INITIAL_DISCOVERY */ /*------------------------------------------------------------------*/ /* * Read is used to get IrNET events */ static inline ssize_t irnet_ctrl_read(irnet_socket * ap, struct file * file, char __user * buf, size_t count) { DECLARE_WAITQUEUE(wait, current); char event[64]; /* Max event is 61 char */ ssize_t ret = 0; DENTER(CTRL_TRACE, "(ap=0x%p, count=%Zd)\n", ap, count); /* Check if we can write an event out in one go */ DABORT(count < sizeof(event), -EOVERFLOW, CTRL_ERROR, "Buffer to small.\n"); #ifdef INITIAL_DISCOVERY /* Check if we have read the log */ if(irnet_read_discovery_log(ap, event)) { /* We have an event !!! Copy it to the user */ if(copy_to_user(buf, event, strlen(event))) { DERROR(CTRL_ERROR, "Invalid user space pointer.\n"); return -EFAULT; } DEXIT(CTRL_TRACE, "\n"); return strlen(event); } #endif /* INITIAL_DISCOVERY */ /* Put ourselves on the wait queue to be woken up */ add_wait_queue(&irnet_events.rwait, &wait); current->state = TASK_INTERRUPTIBLE; for(;;) { /* If there is unread events */ ret = 0; if(ap->event_index != irnet_events.index) break; ret = -EAGAIN; if(file->f_flags & O_NONBLOCK) break; ret = -ERESTARTSYS; if(signal_pending(current)) break; /* Yield and wait to be woken up */ schedule(); } current->state = TASK_RUNNING; remove_wait_queue(&irnet_events.rwait, &wait); /* Did we got it ? */ if(ret != 0) { /* No, return the error code */ DEXIT(CTRL_TRACE, " - ret %Zd\n", ret); return ret; } /* Which event is it ? */ switch(irnet_events.log[ap->event_index].event) { case IRNET_DISCOVER: sprintf(event, "Discovered %08x (%s) behind %08x {hints %02X-%02X}\n", irnet_events.log[ap->event_index].daddr, irnet_events.log[ap->event_index].name, irnet_events.log[ap->event_index].saddr, irnet_events.log[ap->event_index].hints.byte[0], irnet_events.log[ap->event_index].hints.byte[1]); break; case IRNET_EXPIRE: sprintf(event, "Expired %08x (%s) behind %08x {hints %02X-%02X}\n", irnet_events.log[ap->event_index].daddr, irnet_events.log[ap->event_index].name, irnet_events.log[ap->event_index].saddr, irnet_events.log[ap->event_index].hints.byte[0], irnet_events.log[ap->event_index].hints.byte[1]); break; case IRNET_CONNECT_TO: sprintf(event, "Connected to %08x (%s) on ppp%d\n", irnet_events.log[ap->event_index].daddr, irnet_events.log[ap->event_index].name, irnet_events.log[ap->event_index].unit); break; case IRNET_CONNECT_FROM: sprintf(event, "Connection from %08x (%s) on ppp%d\n", irnet_events.log[ap->event_index].daddr, irnet_events.log[ap->event_index].name, irnet_events.log[ap->event_index].unit); break; case IRNET_REQUEST_FROM: sprintf(event, "Request from %08x (%s) behind %08x\n", irnet_events.log[ap->event_index].daddr, irnet_events.log[ap->event_index].name, irnet_events.log[ap->event_index].saddr); break; case IRNET_NOANSWER_FROM: sprintf(event, "No-answer from %08x (%s) on ppp%d\n", irnet_events.log[ap->event_index].daddr, irnet_events.log[ap->event_index].name, irnet_events.log[ap->event_index].unit); break; case IRNET_BLOCKED_LINK: sprintf(event, "Blocked link with %08x (%s) on ppp%d\n", irnet_events.log[ap->event_index].daddr, irnet_events.log[ap->event_index].name, irnet_events.log[ap->event_index].unit); break; case IRNET_DISCONNECT_FROM: sprintf(event, "Disconnection from %08x (%s) on ppp%d\n", irnet_events.log[ap->event_index].daddr, irnet_events.log[ap->event_index].name, irnet_events.log[ap->event_index].unit); break; case IRNET_DISCONNECT_TO: sprintf(event, "Disconnected to %08x (%s)\n", irnet_events.log[ap->event_index].daddr, irnet_events.log[ap->event_index].name); break; default: sprintf(event, "Bug\n"); } /* Increment our event index */ ap->event_index = (ap->event_index + 1) % IRNET_MAX_EVENTS; DEBUG(CTRL_INFO, "Event is :%s", event); /* Copy it to the user */ if(copy_to_user(buf, event, strlen(event))) { DERROR(CTRL_ERROR, "Invalid user space pointer.\n"); return -EFAULT; } DEXIT(CTRL_TRACE, "\n"); return strlen(event); } /*------------------------------------------------------------------*/ /* * Poll : called when someone do a select on /dev/irnet. * Just check if there are new events... */ static inline unsigned int irnet_ctrl_poll(irnet_socket * ap, struct file * file, poll_table * wait) { unsigned int mask; DENTER(CTRL_TRACE, "(ap=0x%p)\n", ap); poll_wait(file, &irnet_events.rwait, wait); mask = POLLOUT | POLLWRNORM; /* If there is unread events */ if(ap->event_index != irnet_events.index) mask |= POLLIN | POLLRDNORM; #ifdef INITIAL_DISCOVERY if(ap->disco_number != -1) { /* Test if it's the first time and therefore we need to get the log */ if(ap->discoveries == NULL) irnet_get_discovery_log(ap); /* Recheck */ if(ap->disco_number != -1) mask |= POLLIN | POLLRDNORM; } #endif /* INITIAL_DISCOVERY */ DEXIT(CTRL_TRACE, " - mask=0x%X\n", mask); return mask; } /*********************** FILESYSTEM CALLBACKS ***********************/ /* * Implement the usual open, read, write functions that will be called * by the file system when some action is performed on /dev/irnet. * Most of those actions will in fact be performed by "pppd" or * the control channel, we just act as a redirector... */ /*------------------------------------------------------------------*/ /* * Open : when somebody open /dev/irnet * We basically create a new instance of irnet and initialise it. */ static int dev_irnet_open(struct inode * inode, struct file * file) { struct irnet_socket * ap; int err; DENTER(FS_TRACE, "(file=0x%p)\n", file); #ifdef SECURE_DEVIRNET /* This could (should?) be enforced by the permissions on /dev/irnet. */ if(!capable(CAP_NET_ADMIN)) return -EPERM; #endif /* SECURE_DEVIRNET */ /* Allocate a private structure for this IrNET instance */ ap = kzalloc(sizeof(*ap), GFP_KERNEL); DABORT(ap == NULL, -ENOMEM, FS_ERROR, "Can't allocate struct irnet...\n"); /* initialize the irnet structure */ ap->file = file; /* PPP channel setup */ ap->ppp_open = 0; ap->chan.private = ap; ap->chan.ops = &irnet_ppp_ops; ap->chan.mtu = (2048 - TTP_MAX_HEADER - 2 - PPP_HDRLEN); ap->chan.hdrlen = 2 + TTP_MAX_HEADER; /* for A/C + Max IrDA hdr */ /* PPP parameters */ ap->mru = (2048 - TTP_MAX_HEADER - 2 - PPP_HDRLEN); ap->xaccm[0] = ~0U; ap->xaccm[3] = 0x60000000U; ap->raccm = ~0U; /* Setup the IrDA part... */ err = irda_irnet_create(ap); if(err) { DERROR(FS_ERROR, "Can't setup IrDA link...\n"); kfree(ap); return err; } /* For the control channel */ ap->event_index = irnet_events.index; /* Cancel all past events */ mutex_init(&ap->lock); /* Put our stuff where we will be able to find it later */ file->private_data = ap; DEXIT(FS_TRACE, " - ap=0x%p\n", ap); return 0; } /*------------------------------------------------------------------*/ /* * Close : when somebody close /dev/irnet * Destroy the instance of /dev/irnet */ static int dev_irnet_close(struct inode * inode, struct file * file) { irnet_socket * ap = file->private_data; DENTER(FS_TRACE, "(file=0x%p, ap=0x%p)\n", file, ap); DABORT(ap == NULL, 0, FS_ERROR, "ap is NULL !!!\n"); /* Detach ourselves */ file->private_data = NULL; /* Close IrDA stuff */ irda_irnet_destroy(ap); /* Disconnect from the generic PPP layer if not already done */ if(ap->ppp_open) { DERROR(FS_ERROR, "Channel still registered - deregistering !\n"); ap->ppp_open = 0; ppp_unregister_channel(&ap->chan); } kfree(ap); DEXIT(FS_TRACE, "\n"); return 0; } /*------------------------------------------------------------------*/ /* * Write does nothing. * (we receive packet from ppp_generic through ppp_irnet_send()) */ static ssize_t dev_irnet_write(struct file * file, const char __user *buf, size_t count, loff_t * ppos) { irnet_socket * ap = file->private_data; DPASS(FS_TRACE, "(file=0x%p, ap=0x%p, count=%Zd)\n", file, ap, count); DABORT(ap == NULL, -ENXIO, FS_ERROR, "ap is NULL !!!\n"); /* If we are connected to ppp_generic, let it handle the job */ if(ap->ppp_open) return -EAGAIN; else return irnet_ctrl_write(ap, buf, count); } /*------------------------------------------------------------------*/ /* * Read doesn't do much either. * (pppd poll us, but ultimately reads through /dev/ppp) */ static ssize_t dev_irnet_read(struct file * file, char __user * buf, size_t count, loff_t * ppos) { irnet_socket * ap = file->private_data; DPASS(FS_TRACE, "(file=0x%p, ap=0x%p, count=%Zd)\n", file, ap, count); DABORT(ap == NULL, -ENXIO, FS_ERROR, "ap is NULL !!!\n"); /* If we are connected to ppp_generic, let it handle the job */ if(ap->ppp_open) return -EAGAIN; else return irnet_ctrl_read(ap, file, buf, count); } /*------------------------------------------------------------------*/ /* * Poll : called when someone do a select on /dev/irnet */ static unsigned int dev_irnet_poll(struct file * file, poll_table * wait) { irnet_socket * ap = file->private_data; unsigned int mask; DENTER(FS_TRACE, "(file=0x%p, ap=0x%p)\n", file, ap); mask = POLLOUT | POLLWRNORM; DABORT(ap == NULL, mask, FS_ERROR, "ap is NULL !!!\n"); /* If we are connected to ppp_generic, let it handle the job */ if(!ap->ppp_open) mask |= irnet_ctrl_poll(ap, file, wait); DEXIT(FS_TRACE, " - mask=0x%X\n", mask); return mask; } /*------------------------------------------------------------------*/ /* * IOCtl : Called when someone does some ioctls on /dev/irnet * This is the way pppd configure us and control us while the PPP * instance is active. */ static long dev_irnet_ioctl( struct file * file, unsigned int cmd, unsigned long arg) { irnet_socket * ap = file->private_data; int err; int val; void __user *argp = (void __user *)arg; DENTER(FS_TRACE, "(file=0x%p, ap=0x%p, cmd=0x%X)\n", file, ap, cmd); /* Basic checks... */ DASSERT(ap != NULL, -ENXIO, PPP_ERROR, "ap is NULL...\n"); #ifdef SECURE_DEVIRNET if(!capable(CAP_NET_ADMIN)) return -EPERM; #endif /* SECURE_DEVIRNET */ err = -EFAULT; switch(cmd) { /* Set discipline (should be N_SYNC_PPP or N_TTY) */ case TIOCSETD: if(get_user(val, (int __user *)argp)) break; if((val == N_SYNC_PPP) || (val == N_PPP)) { DEBUG(FS_INFO, "Entering PPP discipline.\n"); /* PPP channel setup (ap->chan in configured in dev_irnet_open())*/ if (mutex_lock_interruptible(&ap->lock)) return -EINTR; err = ppp_register_channel(&ap->chan); if(err == 0) { /* Our ppp side is active */ ap->ppp_open = 1; DEBUG(FS_INFO, "Trying to establish a connection.\n"); /* Setup the IrDA link now - may fail... */ irda_irnet_connect(ap); } else DERROR(FS_ERROR, "Can't setup PPP channel...\n"); mutex_unlock(&ap->lock); } else { /* In theory, should be N_TTY */ DEBUG(FS_INFO, "Exiting PPP discipline.\n"); /* Disconnect from the generic PPP layer */ if (mutex_lock_interruptible(&ap->lock)) return -EINTR; if(ap->ppp_open) { ap->ppp_open = 0; ppp_unregister_channel(&ap->chan); } else DERROR(FS_ERROR, "Channel not registered !\n"); err = 0; mutex_unlock(&ap->lock); } break; /* Query PPP channel and unit number */ case PPPIOCGCHAN: if (mutex_lock_interruptible(&ap->lock)) return -EINTR; if(ap->ppp_open && !put_user(ppp_channel_index(&ap->chan), (int __user *)argp)) err = 0; mutex_unlock(&ap->lock); break; case PPPIOCGUNIT: if (mutex_lock_interruptible(&ap->lock)) return -EINTR; if(ap->ppp_open && !put_user(ppp_unit_number(&ap->chan), (int __user *)argp)) err = 0; mutex_unlock(&ap->lock); break; /* All these ioctls can be passed both directly and from ppp_generic, * so we just deal with them in one place... */ case PPPIOCGFLAGS: case PPPIOCSFLAGS: case PPPIOCGASYNCMAP: case PPPIOCSASYNCMAP: case PPPIOCGRASYNCMAP: case PPPIOCSRASYNCMAP: case PPPIOCGXASYNCMAP: case PPPIOCSXASYNCMAP: case PPPIOCGMRU: case PPPIOCSMRU: DEBUG(FS_INFO, "Standard PPP ioctl.\n"); if(!capable(CAP_NET_ADMIN)) err = -EPERM; else { if (mutex_lock_interruptible(&ap->lock)) return -EINTR; err = ppp_irnet_ioctl(&ap->chan, cmd, arg); mutex_unlock(&ap->lock); } break; /* TTY IOCTLs : Pretend that we are a tty, to keep pppd happy */ /* Get termios */ case TCGETS: DEBUG(FS_INFO, "Get termios.\n"); if (mutex_lock_interruptible(&ap->lock)) return -EINTR; #ifndef TCGETS2 if(!kernel_termios_to_user_termios((struct termios __user *)argp, &ap->termios)) err = 0; #else if(kernel_termios_to_user_termios_1((struct termios __user *)argp, &ap->termios)) err = 0; #endif mutex_unlock(&ap->lock); break; /* Set termios */ case TCSETSF: DEBUG(FS_INFO, "Set termios.\n"); if (mutex_lock_interruptible(&ap->lock)) return -EINTR; #ifndef TCGETS2 if(!user_termios_to_kernel_termios(&ap->termios, (struct termios __user *)argp)) err = 0; #else if(!user_termios_to_kernel_termios_1(&ap->termios, (struct termios __user *)argp)) err = 0; #endif mutex_unlock(&ap->lock); break; /* Set DTR/RTS */ case TIOCMBIS: case TIOCMBIC: /* Set exclusive/non-exclusive mode */ case TIOCEXCL: case TIOCNXCL: DEBUG(FS_INFO, "TTY compatibility.\n"); err = 0; break; case TCGETA: DEBUG(FS_INFO, "TCGETA\n"); break; case TCFLSH: DEBUG(FS_INFO, "TCFLSH\n"); /* Note : this will flush buffers in PPP, so it *must* be done * We should also worry that we don't accept junk here and that * we get rid of our own buffers */ #ifdef FLUSH_TO_PPP if (mutex_lock_interruptible(&ap->lock)) return -EINTR; ppp_output_wakeup(&ap->chan); mutex_unlock(&ap->lock); #endif /* FLUSH_TO_PPP */ err = 0; break; case FIONREAD: DEBUG(FS_INFO, "FIONREAD\n"); val = 0; if(put_user(val, (int __user *)argp)) break; err = 0; break; default: DERROR(FS_ERROR, "Unsupported ioctl (0x%X)\n", cmd); err = -ENOTTY; } DEXIT(FS_TRACE, " - err = 0x%X\n", err); return err; } /************************** PPP CALLBACKS **************************/ /* * This are the functions that the generic PPP driver in the kernel * will call to communicate to us. */ /*------------------------------------------------------------------*/ /* * Prepare the ppp frame for transmission over the IrDA socket. * We make sure that the header space is enough, and we change ppp header * according to flags passed by pppd. * This is not a callback, but just a helper function used in ppp_irnet_send() */ static inline struct sk_buff * irnet_prepare_skb(irnet_socket * ap, struct sk_buff * skb) { unsigned char * data; int proto; /* PPP protocol */ int islcp; /* Protocol == LCP */ int needaddr; /* Need PPP address */ DENTER(PPP_TRACE, "(ap=0x%p, skb=0x%p)\n", ap, skb); /* Extract PPP protocol from the frame */ data = skb->data; proto = (data[0] << 8) + data[1]; /* LCP packets with codes between 1 (configure-request) * and 7 (code-reject) must be sent as though no options * have been negotiated. */ islcp = (proto == PPP_LCP) && (1 <= data[2]) && (data[2] <= 7); /* compress protocol field if option enabled */ if((data[0] == 0) && (ap->flags & SC_COMP_PROT) && (!islcp)) skb_pull(skb,1); /* Check if we need address/control fields */ needaddr = 2*((ap->flags & SC_COMP_AC) == 0 || islcp); /* Is the skb headroom large enough to contain all IrDA-headers? */ if((skb_headroom(skb) < (ap->max_header_size + needaddr)) || (skb_shared(skb))) { struct sk_buff * new_skb; DEBUG(PPP_INFO, "Reallocating skb\n"); /* Create a new skb */ new_skb = skb_realloc_headroom(skb, ap->max_header_size + needaddr); /* We have to free the original skb anyway */ dev_kfree_skb(skb); /* Did the realloc succeed ? */ DABORT(new_skb == NULL, NULL, PPP_ERROR, "Could not realloc skb\n"); /* Use the new skb instead */ skb = new_skb; } /* prepend address/control fields if necessary */ if(needaddr) { skb_push(skb, 2); skb->data[0] = PPP_ALLSTATIONS; skb->data[1] = PPP_UI; } DEXIT(PPP_TRACE, "\n"); return skb; } /*------------------------------------------------------------------*/ /* * Send a packet to the peer over the IrTTP connection. * Returns 1 iff the packet was accepted. * Returns 0 iff packet was not consumed. * If the packet was not accepted, we will call ppp_output_wakeup * at some later time to reactivate flow control in ppp_generic. */ static int ppp_irnet_send(struct ppp_channel * chan, struct sk_buff * skb) { irnet_socket * self = (struct irnet_socket *) chan->private; int ret; DENTER(PPP_TRACE, "(channel=0x%p, ap/self=0x%p)\n", chan, self); /* Check if things are somewhat valid... */ DASSERT(self != NULL, 0, PPP_ERROR, "Self is NULL !!!\n"); /* Check if we are connected */ if(!(test_bit(0, &self->ttp_open))) { #ifdef CONNECT_IN_SEND /* Let's try to connect one more time... */ /* Note : we won't be connected after this call, but we should be * ready for next packet... */ /* If we are already connecting, this will fail */ irda_irnet_connect(self); #endif /* CONNECT_IN_SEND */ DEBUG(PPP_INFO, "IrTTP not ready ! (%ld-%ld)\n", self->ttp_open, self->ttp_connect); /* Note : we can either drop the packet or block the packet. * * Blocking the packet allow us a better connection time, * because by calling ppp_output_wakeup() we can have * ppp_generic resending the LCP request immediately to us, * rather than waiting for one of pppd periodic transmission of * LCP request. * * On the other hand, if we block all packet, all those periodic * transmissions of pppd accumulate in ppp_generic, creating a * backlog of LCP request. When we eventually connect later on, * we have to transmit all this backlog before we can connect * proper (if we don't timeout before). * * The current strategy is as follow : * While we are attempting to connect, we block packets to get * a better connection time. * If we fail to connect, we drain the queue and start dropping packets */ #ifdef BLOCK_WHEN_CONNECT /* If we are attempting to connect */ if(test_bit(0, &self->ttp_connect)) { /* Blocking packet, ppp_generic will retry later */ return 0; } #endif /* BLOCK_WHEN_CONNECT */ /* Dropping packet, pppd will retry later */ dev_kfree_skb(skb); return 1; } /* Check if the queue can accept any packet, otherwise block */ if(self->tx_flow != FLOW_START) DRETURN(0, PPP_INFO, "IrTTP queue full (%d skbs)...\n", skb_queue_len(&self->tsap->tx_queue)); /* Prepare ppp frame for transmission */ skb = irnet_prepare_skb(self, skb); DABORT(skb == NULL, 1, PPP_ERROR, "Prepare skb for Tx failed.\n"); /* Send the packet to IrTTP */ ret = irttp_data_request(self->tsap, skb); if(ret < 0) { /* * > IrTTPs tx queue is full, so we just have to * > drop the frame! You might think that we should * > just return -1 and don't deallocate the frame, * > but that is dangerous since it's possible that * > we have replaced the original skb with a new * > one with larger headroom, and that would really * > confuse do_dev_queue_xmit() in dev.c! I have * > tried :-) DB * Correction : we verify the flow control above (self->tx_flow), * so we come here only if IrTTP doesn't like the packet (empty, * too large, IrTTP not connected). In those rare cases, it's ok * to drop it, we don't want to see it here again... * Jean II */ DERROR(PPP_ERROR, "IrTTP doesn't like this packet !!! (0x%X)\n", ret); /* irttp_data_request already free the packet */ } DEXIT(PPP_TRACE, "\n"); return 1; /* Packet has been consumed */ } /*------------------------------------------------------------------*/ /* * Take care of the ioctls that ppp_generic doesn't want to deal with... * Note : we are also called from dev_irnet_ioctl(). */ static int ppp_irnet_ioctl(struct ppp_channel * chan, unsigned int cmd, unsigned long arg) { irnet_socket * ap = (struct irnet_socket *) chan->private; int err; int val; u32 accm[8]; void __user *argp = (void __user *)arg; DENTER(PPP_TRACE, "(channel=0x%p, ap=0x%p, cmd=0x%X)\n", chan, ap, cmd); /* Basic checks... */ DASSERT(ap != NULL, -ENXIO, PPP_ERROR, "ap is NULL...\n"); err = -EFAULT; switch(cmd) { /* PPP flags */ case PPPIOCGFLAGS: val = ap->flags | ap->rbits; if(put_user(val, (int __user *) argp)) break; err = 0; break; case PPPIOCSFLAGS: if(get_user(val, (int __user *) argp)) break; ap->flags = val & ~SC_RCV_BITS; ap->rbits = val & SC_RCV_BITS; err = 0; break; /* Async map stuff - all dummy to please pppd */ case PPPIOCGASYNCMAP: if(put_user(ap->xaccm[0], (u32 __user *) argp)) break; err = 0; break; case PPPIOCSASYNCMAP: if(get_user(ap->xaccm[0], (u32 __user *) argp)) break; err = 0; break; case PPPIOCGRASYNCMAP: if(put_user(ap->raccm, (u32 __user *) argp)) break; err = 0; break; case PPPIOCSRASYNCMAP: if(get_user(ap->raccm, (u32 __user *) argp)) break; err = 0; break; case PPPIOCGXASYNCMAP: if(copy_to_user(argp, ap->xaccm, sizeof(ap->xaccm))) break; err = 0; break; case PPPIOCSXASYNCMAP: if(copy_from_user(accm, argp, sizeof(accm))) break; accm[2] &= ~0x40000000U; /* can't escape 0x5e */ accm[3] |= 0x60000000U; /* must escape 0x7d, 0x7e */ memcpy(ap->xaccm, accm, sizeof(ap->xaccm)); err = 0; break; /* Max PPP frame size */ case PPPIOCGMRU: if(put_user(ap->mru, (int __user *) argp)) break; err = 0; break; case PPPIOCSMRU: if(get_user(val, (int __user *) argp)) break; if(val < PPP_MRU) val = PPP_MRU; ap->mru = val; err = 0; break; default: DEBUG(PPP_INFO, "Unsupported ioctl (0x%X)\n", cmd); err = -ENOIOCTLCMD; } DEXIT(PPP_TRACE, " - err = 0x%X\n", err); return err; } /************************** INITIALISATION **************************/ /* * Module initialisation and all that jazz... */ /*------------------------------------------------------------------*/ /* * Hook our device callbacks in the filesystem, to connect our code * to /dev/irnet */ static inline int __init ppp_irnet_init(void) { int err = 0; DENTER(MODULE_TRACE, "()\n"); /* Allocate ourselves as a minor in the misc range */ err = misc_register(&irnet_misc_device); DEXIT(MODULE_TRACE, "\n"); return err; } /*------------------------------------------------------------------*/ /* * Cleanup at exit... */ static inline void __exit ppp_irnet_cleanup(void) { DENTER(MODULE_TRACE, "()\n"); /* De-allocate /dev/irnet minor in misc range */ misc_deregister(&irnet_misc_device); DEXIT(MODULE_TRACE, "\n"); } /*------------------------------------------------------------------*/ /* * Module main entry point */ static int __init irnet_init(void) { int err; /* Initialise both parts... */ err = irda_irnet_init(); if(!err) err = ppp_irnet_init(); return err; } /*------------------------------------------------------------------*/ /* * Module exit */ static void __exit irnet_cleanup(void) { irda_irnet_cleanup(); ppp_irnet_cleanup(); } /*------------------------------------------------------------------*/ /* * Module magic */ module_init(irnet_init); module_exit(irnet_cleanup); MODULE_AUTHOR("Jean Tourrilhes <jt@hpl.hp.com>"); MODULE_DESCRIPTION("IrNET : Synchronous PPP over IrDA"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CHARDEV(10, 187);
gpl-2.0
chil360/chil360-kernel
drivers/net/irda/toim3232-sir.c
8890
12466
/********************************************************************* * * Filename: toim3232-sir.c * Version: 1.0 * Description: Implementation of dongles based on the Vishay/Temic * TOIM3232 SIR Endec chipset. Currently only the * IRWave IR320ST-2 is tested, although it should work * with any TOIM3232 or TOIM4232 chipset based RS232 * dongle with minimal modification. * Based heavily on the Tekram driver (tekram.c), * with thanks to Dag Brattli and Martin Diehl. * Status: Experimental. * Author: David Basden <davidb-irda@rcpt.to> * Created at: Thu Feb 09 23:47:32 2006 * * Copyright (c) 2006 David Basden. * Copyright (c) 1998-1999 Dag Brattli, * Copyright (c) 2002 Martin Diehl, * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * ********************************************************************/ /* * This driver has currently only been tested on the IRWave IR320ST-2 * * PROTOCOL: * * The protocol for talking to the TOIM3232 is quite easy, and is * designed to interface with RS232 with only level convertors. The * BR/~D line on the chip is brought high to signal 'command mode', * where a command byte is sent to select the baudrate of the RS232 * interface and the pulse length of the IRDA output. When BR/~D * is brought low, the dongle then changes to the selected baudrate, * and the RS232 interface is used for data until BR/~D is brought * high again. The initial speed for the TOIMx323 after RESET is * 9600 baud. The baudrate for command-mode is the last selected * baud-rate, or 9600 after a RESET. * * The dongle I have (below) adds some extra hardware on the front end, * but this is mostly directed towards pariasitic power from the RS232 * line rather than changing very much about how to communicate with * the TOIM3232. * * The protocol to talk to the TOIM4232 chipset seems to be almost * identical to the TOIM3232 (and the 4232 datasheet is more detailed) * so this code will probably work on that as well, although I haven't * tested it on that hardware. * * Target dongle variations that might be common: * * DTR and RTS function: * The data sheet for the 4232 has a sample implementation that hooks the * DTR and RTS lines to the RESET and BaudRate/~Data lines of the * chip (through line-converters). Given both DTR and RTS would have to * be held low in normal operation, and the TOIMx232 requires +5V to * signal ground, most dongle designers would almost certainly choose * an implementation that kept at least one of DTR or RTS high in * normal operation to provide power to the dongle, but will likely * vary between designs. * * User specified command bits: * There are two user-controllable output lines from the TOIMx232 that * can be set low or high by setting the appropriate bits in the * high-nibble of the command byte (when setting speed and pulse length). * These might be used to switch on and off added hardware or extra * dongle features. * * * Target hardware: IRWave IR320ST-2 * * The IRWave IR320ST-2 is a simple dongle based on the Vishay/Temic * TOIM3232 SIR Endec and the Vishay/Temic TFDS4500 SIR IRDA transceiver. * It uses a hex inverter and some discrete components to buffer and * line convert the RS232 down to 5V. * * The dongle is powered through a voltage regulator, fed by a large * capacitor. To switch the dongle on, DTR is brought high to charge * the capacitor and drive the voltage regulator. DTR isn't associated * with any control lines on the TOIM3232. Parisitic power is also taken * from the RTS, TD and RD lines when brought high, but through resistors. * When DTR is low, the circuit might lose power even with RTS high. * * RTS is inverted and attached to the BR/~D input pin. When RTS * is high, BR/~D is low, and the TOIM3232 is in the normal 'data' mode. * RTS is brought low, BR/~D is high, and the TOIM3232 is in 'command * mode'. * * For some unknown reason, the RESET line isn't actually connected * to anything. This means to reset the dongle to get it to a known * state (9600 baud) you must drop DTR and RTS low, wait for the power * capacitor to discharge, and then bring DTR (and RTS for data mode) * high again, and wait for the capacitor to charge, the power supply * to stabilise, and the oscillator clock to stabilise. * * Fortunately, if the current baudrate is known, the chipset can * easily change speed by entering command mode without having to * reset the dongle first. * * Major Components: * * - Vishay/Temic TOIM3232 SIR Endec to change RS232 pulse timings * to IRDA pulse timings * - 3.6864MHz crystal to drive TOIM3232 clock oscillator * - DM74lS04M Inverting Hex line buffer for RS232 input buffering * and level conversion * - PJ2951AC 150mA voltage regulator * - Vishay/Temic TFDS4500 SIR IRDA front-end transceiver * */ #include <linux/module.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/sched.h> #include <net/irda/irda.h> #include "sir-dev.h" static int toim3232delay = 150; /* default is 150 ms */ module_param(toim3232delay, int, 0); MODULE_PARM_DESC(toim3232delay, "toim3232 dongle write complete delay"); #if 0 static int toim3232flipdtr = 0; /* default is DTR high to reset */ module_param(toim3232flipdtr, int, 0); MODULE_PARM_DESC(toim3232flipdtr, "toim3232 dongle invert DTR (Reset)"); static int toim3232fliprts = 0; /* default is RTS high for baud change */ module_param(toim3232fliptrs, int, 0); MODULE_PARM_DESC(toim3232fliprts, "toim3232 dongle invert RTS (BR/D)"); #endif static int toim3232_open(struct sir_dev *); static int toim3232_close(struct sir_dev *); static int toim3232_change_speed(struct sir_dev *, unsigned); static int toim3232_reset(struct sir_dev *); #define TOIM3232_115200 0x00 #define TOIM3232_57600 0x01 #define TOIM3232_38400 0x02 #define TOIM3232_19200 0x03 #define TOIM3232_9600 0x06 #define TOIM3232_2400 0x0A #define TOIM3232_PW 0x10 /* Pulse select bit */ static struct dongle_driver toim3232 = { .owner = THIS_MODULE, .driver_name = "Vishay TOIM3232", .type = IRDA_TOIM3232_DONGLE, .open = toim3232_open, .close = toim3232_close, .reset = toim3232_reset, .set_speed = toim3232_change_speed, }; static int __init toim3232_sir_init(void) { if (toim3232delay < 1 || toim3232delay > 500) toim3232delay = 200; IRDA_DEBUG(1, "%s - using %d ms delay\n", toim3232.driver_name, toim3232delay); return irda_register_dongle(&toim3232); } static void __exit toim3232_sir_cleanup(void) { irda_unregister_dongle(&toim3232); } static int toim3232_open(struct sir_dev *dev) { struct qos_info *qos = &dev->qos; IRDA_DEBUG(2, "%s()\n", __func__); /* Pull the lines high to start with. * * For the IR320ST-2, we need to charge the main supply capacitor to * switch the device on. We keep DTR high throughout to do this. * When RTS, TD and RD are high, they will also trickle-charge the * cap. RTS is high for data transmission, and low for baud rate select. * -- DGB */ sirdev_set_dtr_rts(dev, TRUE, TRUE); /* The TOI3232 supports many speeds between 1200bps and 115000bps. * We really only care about those supported by the IRDA spec, but * 38400 seems to be implemented in many places */ qos->baud_rate.bits &= IR_2400|IR_9600|IR_19200|IR_38400|IR_57600|IR_115200; /* From the tekram driver. Not sure what a reasonable value is -- DGB */ qos->min_turn_time.bits = 0x01; /* Needs at least 10 ms */ irda_qos_bits_to_value(qos); /* irda thread waits 50 msec for power settling */ return 0; } static int toim3232_close(struct sir_dev *dev) { IRDA_DEBUG(2, "%s()\n", __func__); /* Power off dongle */ sirdev_set_dtr_rts(dev, FALSE, FALSE); return 0; } /* * Function toim3232change_speed (dev, state, speed) * * Set the speed for the TOIM3232 based dongle. Warning, this * function must be called with a process context! * * Algorithm * 1. keep DTR high but clear RTS to bring into baud programming mode * 2. wait at least 7us to enter programming mode * 3. send control word to set baud rate and timing * 4. wait at least 1us * 5. bring RTS high to enter DATA mode (RS232 is passed through to transceiver) * 6. should take effect immediately (although probably worth waiting) */ #define TOIM3232_STATE_WAIT_SPEED (SIRDEV_STATE_DONGLE_SPEED + 1) static int toim3232_change_speed(struct sir_dev *dev, unsigned speed) { unsigned state = dev->fsm.substate; unsigned delay = 0; u8 byte; static int ret = 0; IRDA_DEBUG(2, "%s()\n", __func__); switch(state) { case SIRDEV_STATE_DONGLE_SPEED: /* Figure out what we are going to send as a control byte */ switch (speed) { case 2400: byte = TOIM3232_PW|TOIM3232_2400; break; default: speed = 9600; ret = -EINVAL; /* fall thru */ case 9600: byte = TOIM3232_PW|TOIM3232_9600; break; case 19200: byte = TOIM3232_PW|TOIM3232_19200; break; case 38400: byte = TOIM3232_PW|TOIM3232_38400; break; case 57600: byte = TOIM3232_PW|TOIM3232_57600; break; case 115200: byte = TOIM3232_115200; break; } /* Set DTR, Clear RTS: Go into baud programming mode */ sirdev_set_dtr_rts(dev, TRUE, FALSE); /* Wait at least 7us */ udelay(14); /* Write control byte */ sirdev_raw_write(dev, &byte, 1); dev->speed = speed; state = TOIM3232_STATE_WAIT_SPEED; delay = toim3232delay; break; case TOIM3232_STATE_WAIT_SPEED: /* Have transmitted control byte * Wait for 'at least 1us' */ udelay(14); /* Set DTR, Set RTS: Go into normal data mode */ sirdev_set_dtr_rts(dev, TRUE, TRUE); /* Wait (TODO: check this is needed) */ udelay(50); break; default: printk(KERN_ERR "%s - undefined state %d\n", __func__, state); ret = -EINVAL; break; } dev->fsm.substate = state; return (delay > 0) ? delay : ret; } /* * Function toim3232reset (driver) * * This function resets the toim3232 dongle. Warning, this function * must be called with a process context!! * * What we should do is: * 0. Pull RESET high * 1. Wait for at least 7us * 2. Pull RESET low * 3. Wait for at least 7us * 4. Pull BR/~D high * 5. Wait for at least 7us * 6. Send control byte to set baud rate * 7. Wait at least 1us after stop bit * 8. Pull BR/~D low * 9. Should then be in data mode * * Because the IR320ST-2 doesn't have the RESET line connected for some reason, * we'll have to do something else. * * The default speed after a RESET is 9600, so lets try just bringing it up in * data mode after switching it off, waiting for the supply capacitor to * discharge, and then switch it back on. This isn't actually pulling RESET * high, but it seems to have the same effect. * * This behaviour will probably work on dongles that have the RESET line connected, * but if not, add a flag for the IR320ST-2, and implment the above-listed proper * behaviour. * * RTS is inverted and then fed to BR/~D, so to put it in programming mode, we * need to have pull RTS low */ static int toim3232_reset(struct sir_dev *dev) { IRDA_DEBUG(2, "%s()\n", __func__); /* Switch off both DTR and RTS to switch off dongle */ sirdev_set_dtr_rts(dev, FALSE, FALSE); /* Should sleep a while. This might be evil doing it this way.*/ set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(msecs_to_jiffies(50)); /* Set DTR, Set RTS (data mode) */ sirdev_set_dtr_rts(dev, TRUE, TRUE); /* Wait at least 10 ms for power to stabilize again */ set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(msecs_to_jiffies(10)); /* Speed should now be 9600 */ dev->speed = 9600; return 0; } MODULE_AUTHOR("David Basden <davidb-linux@rcpt.to>"); MODULE_DESCRIPTION("Vishay/Temic TOIM3232 based dongle driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("irda-dongle-12"); /* IRDA_TOIM3232_DONGLE */ module_init(toim3232_sir_init); module_exit(toim3232_sir_cleanup);
gpl-2.0
MoKee/android_kernel_amazon_otter-common
crypto/async_tx/async_memset.c
9914
2645
/* * memory fill offload engine support * * Copyright © 2006, Intel Corporation. * * Dan Williams <dan.j.williams@intel.com> * * with architecture considerations by: * Neil Brown <neilb@suse.de> * Jeff Garzik <jeff@garzik.org> * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * */ #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/mm.h> #include <linux/dma-mapping.h> #include <linux/async_tx.h> /** * async_memset - attempt to fill memory with a dma engine. * @dest: destination page * @val: fill value * @offset: offset in pages to start transaction * @len: length in bytes * * honored flags: ASYNC_TX_ACK */ struct dma_async_tx_descriptor * async_memset(struct page *dest, int val, unsigned int offset, size_t len, struct async_submit_ctl *submit) { struct dma_chan *chan = async_tx_find_channel(submit, DMA_MEMSET, &dest, 1, NULL, 0, len); struct dma_device *device = chan ? chan->device : NULL; struct dma_async_tx_descriptor *tx = NULL; if (device && is_dma_fill_aligned(device, offset, 0, len)) { dma_addr_t dma_dest; unsigned long dma_prep_flags = 0; if (submit->cb_fn) dma_prep_flags |= DMA_PREP_INTERRUPT; if (submit->flags & ASYNC_TX_FENCE) dma_prep_flags |= DMA_PREP_FENCE; dma_dest = dma_map_page(device->dev, dest, offset, len, DMA_FROM_DEVICE); tx = device->device_prep_dma_memset(chan, dma_dest, val, len, dma_prep_flags); } if (tx) { pr_debug("%s: (async) len: %zu\n", __func__, len); async_tx_submit(chan, tx, submit); } else { /* run the memset synchronously */ void *dest_buf; pr_debug("%s: (sync) len: %zu\n", __func__, len); dest_buf = page_address(dest) + offset; /* wait for any prerequisite operations */ async_tx_quiesce(&submit->depend_tx); memset(dest_buf, val, len); async_tx_sync_epilog(submit); } return tx; } EXPORT_SYMBOL_GPL(async_memset); MODULE_AUTHOR("Intel Corporation"); MODULE_DESCRIPTION("asynchronous memset api"); MODULE_LICENSE("GPL");
gpl-2.0
JmzTaylor/android_kernel_htc_a32e
arch/sh/boards/mach-sdk7786/irq.c
13242
1090
/* * SDK7786 FPGA IRQ Controller Support. * * Copyright (C) 2010 Matt Fleming * Copyright (C) 2010 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/irq.h> #include <mach/fpga.h> #include <mach/irq.h> enum { ATA_IRQ_BIT = 1, SPI_BUSY_BIT = 2, LIRQ5_BIT = 3, LIRQ6_BIT = 4, LIRQ7_BIT = 5, LIRQ8_BIT = 6, KEY_IRQ_BIT = 7, PEN_IRQ_BIT = 8, ETH_IRQ_BIT = 9, RTC_ALARM_BIT = 10, CRYSTAL_FAIL_BIT = 12, ETH_PME_BIT = 14, }; void __init sdk7786_init_irq(void) { unsigned int tmp; /* Enable priority encoding for all IRLs */ fpga_write_reg(fpga_read_reg(INTMSR) | 0x0303, INTMSR); /* Clear FPGA interrupt status registers */ fpga_write_reg(0x0000, INTASR); fpga_write_reg(0x0000, INTBSR); /* Unmask FPGA interrupts */ tmp = fpga_read_reg(INTAMR); tmp &= ~(1 << ETH_IRQ_BIT); fpga_write_reg(tmp, INTAMR); plat_irq_setup_pins(IRQ_MODE_IRL7654_MASK); plat_irq_setup_pins(IRQ_MODE_IRL3210_MASK); }
gpl-2.0
RidaShamasneh/nethunter_kernel_g5
arch/powerpc/boot/ebony.c
14010
2504
/* * Copyright 2007 David Gibson, IBM Corporation. * * Based on earlier code: * Copyright (C) Paul Mackerras 1997. * * Matt Porter <mporter@kernel.crashing.org> * Copyright 2002-2005 MontaVista Software Inc. * * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> * Copyright (c) 2003, 2004 Zultys Technologies * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <stdarg.h> #include <stddef.h> #include "types.h" #include "elf.h" #include "string.h" #include "stdio.h" #include "page.h" #include "ops.h" #include "reg.h" #include "io.h" #include "dcr.h" #include "4xx.h" #include "44x.h" static u8 *ebony_mac0, *ebony_mac1; #define EBONY_FPGA_PATH "/plb/opb/ebc/fpga" #define EBONY_FPGA_FLASH_SEL 0x01 #define EBONY_SMALL_FLASH_PATH "/plb/opb/ebc/small-flash" static void ebony_flashsel_fixup(void) { void *devp; u32 reg[3] = {0x0, 0x0, 0x80000}; u8 *fpga; u8 fpga_reg0 = 0x0; devp = finddevice(EBONY_FPGA_PATH); if (!devp) fatal("Couldn't locate FPGA node %s\n\r", EBONY_FPGA_PATH); if (getprop(devp, "virtual-reg", &fpga, sizeof(fpga)) != sizeof(fpga)) fatal("%s has missing or invalid virtual-reg property\n\r", EBONY_FPGA_PATH); fpga_reg0 = in_8(fpga); devp = finddevice(EBONY_SMALL_FLASH_PATH); if (!devp) fatal("Couldn't locate small flash node %s\n\r", EBONY_SMALL_FLASH_PATH); if (getprop(devp, "reg", reg, sizeof(reg)) != sizeof(reg)) fatal("%s has reg property of unexpected size\n\r", EBONY_SMALL_FLASH_PATH); /* Invert address bit 14 (IBM-endian) if FLASH_SEL fpga bit is set */ if (fpga_reg0 & EBONY_FPGA_FLASH_SEL) reg[1] ^= 0x80000; setprop(devp, "reg", reg, sizeof(reg)); } static void ebony_fixups(void) { // FIXME: sysclk should be derived by reading the FPGA registers unsigned long sysclk = 33000000; ibm440gp_fixup_clocks(sysclk, 6 * 1843200); ibm4xx_sdram_fixup_memsize(); dt_fixup_mac_address_by_alias("ethernet0", ebony_mac0); dt_fixup_mac_address_by_alias("ethernet1", ebony_mac1); ibm4xx_fixup_ebc_ranges("/plb/opb/ebc"); ebony_flashsel_fixup(); } void ebony_init(void *mac0, void *mac1) { platform_ops.fixups = ebony_fixups; platform_ops.exit = ibm44x_dbcr_reset; ebony_mac0 = mac0; ebony_mac1 = mac1; fdt_init(_dtb_start); serial_console_init(); }
gpl-2.0
greguu/linux-4.9-rc3-c3x00
arch/x86/kernel/asm-offsets_32.c
187
2891
#ifndef __LINUX_KBUILD_H # error "Please do not build this file directly, build asm-offsets.c instead" #endif #include <asm/ucontext.h> #include <linux/lguest.h> #include "../../../drivers/lguest/lg.h" #define __SYSCALL_I386(nr, sym, qual) [nr] = 1, static char syscalls[] = { #include <asm/syscalls_32.h> }; /* workaround for a warning with -Wmissing-prototypes */ void foo(void); void foo(void) { OFFSET(CPUINFO_x86, cpuinfo_x86, x86); OFFSET(CPUINFO_x86_vendor, cpuinfo_x86, x86_vendor); OFFSET(CPUINFO_x86_model, cpuinfo_x86, x86_model); OFFSET(CPUINFO_x86_mask, cpuinfo_x86, x86_mask); OFFSET(CPUINFO_cpuid_level, cpuinfo_x86, cpuid_level); OFFSET(CPUINFO_x86_capability, cpuinfo_x86, x86_capability); OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id); BLANK(); OFFSET(PT_EBX, pt_regs, bx); OFFSET(PT_ECX, pt_regs, cx); OFFSET(PT_EDX, pt_regs, dx); OFFSET(PT_ESI, pt_regs, si); OFFSET(PT_EDI, pt_regs, di); OFFSET(PT_EBP, pt_regs, bp); OFFSET(PT_EAX, pt_regs, ax); OFFSET(PT_DS, pt_regs, ds); OFFSET(PT_ES, pt_regs, es); OFFSET(PT_FS, pt_regs, fs); OFFSET(PT_GS, pt_regs, gs); OFFSET(PT_ORIG_EAX, pt_regs, orig_ax); OFFSET(PT_EIP, pt_regs, ip); OFFSET(PT_CS, pt_regs, cs); OFFSET(PT_EFLAGS, pt_regs, flags); OFFSET(PT_OLDESP, pt_regs, sp); OFFSET(PT_OLDSS, pt_regs, ss); BLANK(); OFFSET(saved_context_gdt_desc, saved_context, gdt_desc); BLANK(); /* Offset from the sysenter stack to tss.sp0 */ DEFINE(TSS_sysenter_sp0, offsetof(struct tss_struct, x86_tss.sp0) - offsetofend(struct tss_struct, SYSENTER_stack)); /* Offset from cpu_tss to SYSENTER_stack */ OFFSET(CPU_TSS_SYSENTER_stack, tss_struct, SYSENTER_stack); /* Size of SYSENTER_stack */ DEFINE(SIZEOF_SYSENTER_stack, sizeof(((struct tss_struct *)0)->SYSENTER_stack)); #ifdef CONFIG_CC_STACKPROTECTOR BLANK(); OFFSET(stack_canary_offset, stack_canary, canary); #endif #if defined(CONFIG_LGUEST) || defined(CONFIG_LGUEST_GUEST) || defined(CONFIG_LGUEST_MODULE) BLANK(); OFFSET(LGUEST_DATA_irq_enabled, lguest_data, irq_enabled); OFFSET(LGUEST_DATA_irq_pending, lguest_data, irq_pending); BLANK(); OFFSET(LGUEST_PAGES_host_gdt_desc, lguest_pages, state.host_gdt_desc); OFFSET(LGUEST_PAGES_host_idt_desc, lguest_pages, state.host_idt_desc); OFFSET(LGUEST_PAGES_host_cr3, lguest_pages, state.host_cr3); OFFSET(LGUEST_PAGES_host_sp, lguest_pages, state.host_sp); OFFSET(LGUEST_PAGES_guest_gdt_desc, lguest_pages,state.guest_gdt_desc); OFFSET(LGUEST_PAGES_guest_idt_desc, lguest_pages,state.guest_idt_desc); OFFSET(LGUEST_PAGES_guest_gdt, lguest_pages, state.guest_gdt); OFFSET(LGUEST_PAGES_regs_trapnum, lguest_pages, regs.trapnum); OFFSET(LGUEST_PAGES_regs_errcode, lguest_pages, regs.errcode); OFFSET(LGUEST_PAGES_regs, lguest_pages, regs); #endif BLANK(); DEFINE(__NR_syscall_max, sizeof(syscalls) - 1); DEFINE(NR_syscalls, sizeof(syscalls)); }
gpl-2.0
shubhangi-shrivastava/drm-intel-nightly
drivers/tty/sysrq.c
187
26434
/* * Linux Magic System Request Key Hacks * * (c) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz> * based on ideas by Pavel Machek <pavel@atrey.karlin.mff.cuni.cz> * * (c) 2000 Crutcher Dunnavant <crutcher+kernel@datastacks.com> * overhauled to use key registration * based upon discusions in irc://irc.openprojects.net/#kernelnewbies * * Copyright (c) 2010 Dmitry Torokhov * Input handler conversion */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/sched.h> #include <linux/sched/rt.h> #include <linux/interrupt.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/mount.h> #include <linux/kdev_t.h> #include <linux/major.h> #include <linux/reboot.h> #include <linux/sysrq.h> #include <linux/kbd_kern.h> #include <linux/proc_fs.h> #include <linux/nmi.h> #include <linux/quotaops.h> #include <linux/perf_event.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/suspend.h> #include <linux/writeback.h> #include <linux/swap.h> #include <linux/spinlock.h> #include <linux/vt_kern.h> #include <linux/workqueue.h> #include <linux/hrtimer.h> #include <linux/oom.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/uaccess.h> #include <linux/moduleparam.h> #include <linux/jiffies.h> #include <linux/syscalls.h> #include <linux/of.h> #include <linux/rcupdate.h> #include <asm/ptrace.h> #include <asm/irq_regs.h> /* Whether we react on sysrq keys or just ignore them */ static int __read_mostly sysrq_enabled = CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE; static bool __read_mostly sysrq_always_enabled; static bool sysrq_on(void) { return sysrq_enabled || sysrq_always_enabled; } /* * A value of 1 means 'all', other nonzero values are an op mask: */ static bool sysrq_on_mask(int mask) { return sysrq_always_enabled || sysrq_enabled == 1 || (sysrq_enabled & mask); } static int __init sysrq_always_enabled_setup(char *str) { sysrq_always_enabled = true; pr_info("sysrq always enabled.\n"); return 1; } __setup("sysrq_always_enabled", sysrq_always_enabled_setup); static void sysrq_handle_loglevel(int key) { int i; i = key - '0'; console_loglevel = CONSOLE_LOGLEVEL_DEFAULT; pr_info("Loglevel set to %d\n", i); console_loglevel = i; } static struct sysrq_key_op sysrq_loglevel_op = { .handler = sysrq_handle_loglevel, .help_msg = "loglevel(0-9)", .action_msg = "Changing Loglevel", .enable_mask = SYSRQ_ENABLE_LOG, }; #ifdef CONFIG_VT static void sysrq_handle_SAK(int key) { struct work_struct *SAK_work = &vc_cons[fg_console].SAK_work; schedule_work(SAK_work); } static struct sysrq_key_op sysrq_SAK_op = { .handler = sysrq_handle_SAK, .help_msg = "sak(k)", .action_msg = "SAK", .enable_mask = SYSRQ_ENABLE_KEYBOARD, }; #else #define sysrq_SAK_op (*(struct sysrq_key_op *)NULL) #endif #ifdef CONFIG_VT static void sysrq_handle_unraw(int key) { vt_reset_unicode(fg_console); } static struct sysrq_key_op sysrq_unraw_op = { .handler = sysrq_handle_unraw, .help_msg = "unraw(r)", .action_msg = "Keyboard mode set to system default", .enable_mask = SYSRQ_ENABLE_KEYBOARD, }; #else #define sysrq_unraw_op (*(struct sysrq_key_op *)NULL) #endif /* CONFIG_VT */ static void sysrq_handle_crash(int key) { char *killer = NULL; panic_on_oops = 1; /* force panic */ wmb(); *killer = 1; } static struct sysrq_key_op sysrq_crash_op = { .handler = sysrq_handle_crash, .help_msg = "crash(c)", .action_msg = "Trigger a crash", .enable_mask = SYSRQ_ENABLE_DUMP, }; static void sysrq_handle_reboot(int key) { lockdep_off(); local_irq_enable(); emergency_restart(); } static struct sysrq_key_op sysrq_reboot_op = { .handler = sysrq_handle_reboot, .help_msg = "reboot(b)", .action_msg = "Resetting", .enable_mask = SYSRQ_ENABLE_BOOT, }; static void sysrq_handle_sync(int key) { emergency_sync(); } static struct sysrq_key_op sysrq_sync_op = { .handler = sysrq_handle_sync, .help_msg = "sync(s)", .action_msg = "Emergency Sync", .enable_mask = SYSRQ_ENABLE_SYNC, }; static void sysrq_handle_show_timers(int key) { sysrq_timer_list_show(); } static struct sysrq_key_op sysrq_show_timers_op = { .handler = sysrq_handle_show_timers, .help_msg = "show-all-timers(q)", .action_msg = "Show clockevent devices & pending hrtimers (no others)", }; static void sysrq_handle_mountro(int key) { emergency_remount(); } static struct sysrq_key_op sysrq_mountro_op = { .handler = sysrq_handle_mountro, .help_msg = "unmount(u)", .action_msg = "Emergency Remount R/O", .enable_mask = SYSRQ_ENABLE_REMOUNT, }; #ifdef CONFIG_LOCKDEP static void sysrq_handle_showlocks(int key) { debug_show_all_locks(); } static struct sysrq_key_op sysrq_showlocks_op = { .handler = sysrq_handle_showlocks, .help_msg = "show-all-locks(d)", .action_msg = "Show Locks Held", }; #else #define sysrq_showlocks_op (*(struct sysrq_key_op *)NULL) #endif #ifdef CONFIG_SMP static DEFINE_SPINLOCK(show_lock); static void showacpu(void *dummy) { unsigned long flags; /* Idle CPUs have no interesting backtrace. */ if (idle_cpu(smp_processor_id())) return; spin_lock_irqsave(&show_lock, flags); pr_info("CPU%d:\n", smp_processor_id()); show_stack(NULL, NULL); spin_unlock_irqrestore(&show_lock, flags); } static void sysrq_showregs_othercpus(struct work_struct *dummy) { smp_call_function(showacpu, NULL, 0); } static DECLARE_WORK(sysrq_showallcpus, sysrq_showregs_othercpus); static void sysrq_handle_showallcpus(int key) { /* * Fall back to the workqueue based printing if the * backtrace printing did not succeed or the * architecture has no support for it: */ if (!trigger_all_cpu_backtrace()) { struct pt_regs *regs = get_irq_regs(); if (regs) { pr_info("CPU%d:\n", smp_processor_id()); show_regs(regs); } schedule_work(&sysrq_showallcpus); } } static struct sysrq_key_op sysrq_showallcpus_op = { .handler = sysrq_handle_showallcpus, .help_msg = "show-backtrace-all-active-cpus(l)", .action_msg = "Show backtrace of all active CPUs", .enable_mask = SYSRQ_ENABLE_DUMP, }; #endif static void sysrq_handle_showregs(int key) { struct pt_regs *regs = get_irq_regs(); if (regs) show_regs(regs); perf_event_print_debug(); } static struct sysrq_key_op sysrq_showregs_op = { .handler = sysrq_handle_showregs, .help_msg = "show-registers(p)", .action_msg = "Show Regs", .enable_mask = SYSRQ_ENABLE_DUMP, }; static void sysrq_handle_showstate(int key) { show_state(); show_workqueue_state(); } static struct sysrq_key_op sysrq_showstate_op = { .handler = sysrq_handle_showstate, .help_msg = "show-task-states(t)", .action_msg = "Show State", .enable_mask = SYSRQ_ENABLE_DUMP, }; static void sysrq_handle_showstate_blocked(int key) { show_state_filter(TASK_UNINTERRUPTIBLE); } static struct sysrq_key_op sysrq_showstate_blocked_op = { .handler = sysrq_handle_showstate_blocked, .help_msg = "show-blocked-tasks(w)", .action_msg = "Show Blocked State", .enable_mask = SYSRQ_ENABLE_DUMP, }; #ifdef CONFIG_TRACING #include <linux/ftrace.h> static void sysrq_ftrace_dump(int key) { ftrace_dump(DUMP_ALL); } static struct sysrq_key_op sysrq_ftrace_dump_op = { .handler = sysrq_ftrace_dump, .help_msg = "dump-ftrace-buffer(z)", .action_msg = "Dump ftrace buffer", .enable_mask = SYSRQ_ENABLE_DUMP, }; #else #define sysrq_ftrace_dump_op (*(struct sysrq_key_op *)NULL) #endif static void sysrq_handle_showmem(int key) { show_mem(0); } static struct sysrq_key_op sysrq_showmem_op = { .handler = sysrq_handle_showmem, .help_msg = "show-memory-usage(m)", .action_msg = "Show Memory", .enable_mask = SYSRQ_ENABLE_DUMP, }; /* * Signal sysrq helper function. Sends a signal to all user processes. */ static void send_sig_all(int sig) { struct task_struct *p; read_lock(&tasklist_lock); for_each_process(p) { if (p->flags & PF_KTHREAD) continue; if (is_global_init(p)) continue; do_send_sig_info(sig, SEND_SIG_FORCED, p, true); } read_unlock(&tasklist_lock); } static void sysrq_handle_term(int key) { send_sig_all(SIGTERM); console_loglevel = CONSOLE_LOGLEVEL_DEBUG; } static struct sysrq_key_op sysrq_term_op = { .handler = sysrq_handle_term, .help_msg = "terminate-all-tasks(e)", .action_msg = "Terminate All Tasks", .enable_mask = SYSRQ_ENABLE_SIGNAL, }; static void moom_callback(struct work_struct *ignored) { const gfp_t gfp_mask = GFP_KERNEL; struct oom_control oc = { .zonelist = node_zonelist(first_memory_node, gfp_mask), .nodemask = NULL, .gfp_mask = gfp_mask, .order = -1, }; mutex_lock(&oom_lock); if (!out_of_memory(&oc)) pr_info("OOM request ignored because killer is disabled\n"); mutex_unlock(&oom_lock); } static DECLARE_WORK(moom_work, moom_callback); static void sysrq_handle_moom(int key) { schedule_work(&moom_work); } static struct sysrq_key_op sysrq_moom_op = { .handler = sysrq_handle_moom, .help_msg = "memory-full-oom-kill(f)", .action_msg = "Manual OOM execution", .enable_mask = SYSRQ_ENABLE_SIGNAL, }; #ifdef CONFIG_BLOCK static void sysrq_handle_thaw(int key) { emergency_thaw_all(); } static struct sysrq_key_op sysrq_thaw_op = { .handler = sysrq_handle_thaw, .help_msg = "thaw-filesystems(j)", .action_msg = "Emergency Thaw of all frozen filesystems", .enable_mask = SYSRQ_ENABLE_SIGNAL, }; #endif static void sysrq_handle_kill(int key) { send_sig_all(SIGKILL); console_loglevel = CONSOLE_LOGLEVEL_DEBUG; } static struct sysrq_key_op sysrq_kill_op = { .handler = sysrq_handle_kill, .help_msg = "kill-all-tasks(i)", .action_msg = "Kill All Tasks", .enable_mask = SYSRQ_ENABLE_SIGNAL, }; static void sysrq_handle_unrt(int key) { normalize_rt_tasks(); } static struct sysrq_key_op sysrq_unrt_op = { .handler = sysrq_handle_unrt, .help_msg = "nice-all-RT-tasks(n)", .action_msg = "Nice All RT Tasks", .enable_mask = SYSRQ_ENABLE_RTNICE, }; /* Key Operations table and lock */ static DEFINE_SPINLOCK(sysrq_key_table_lock); static struct sysrq_key_op *sysrq_key_table[36] = { &sysrq_loglevel_op, /* 0 */ &sysrq_loglevel_op, /* 1 */ &sysrq_loglevel_op, /* 2 */ &sysrq_loglevel_op, /* 3 */ &sysrq_loglevel_op, /* 4 */ &sysrq_loglevel_op, /* 5 */ &sysrq_loglevel_op, /* 6 */ &sysrq_loglevel_op, /* 7 */ &sysrq_loglevel_op, /* 8 */ &sysrq_loglevel_op, /* 9 */ /* * a: Don't use for system provided sysrqs, it is handled specially on * sparc and will never arrive. */ NULL, /* a */ &sysrq_reboot_op, /* b */ &sysrq_crash_op, /* c & ibm_emac driver debug */ &sysrq_showlocks_op, /* d */ &sysrq_term_op, /* e */ &sysrq_moom_op, /* f */ /* g: May be registered for the kernel debugger */ NULL, /* g */ NULL, /* h - reserved for help */ &sysrq_kill_op, /* i */ #ifdef CONFIG_BLOCK &sysrq_thaw_op, /* j */ #else NULL, /* j */ #endif &sysrq_SAK_op, /* k */ #ifdef CONFIG_SMP &sysrq_showallcpus_op, /* l */ #else NULL, /* l */ #endif &sysrq_showmem_op, /* m */ &sysrq_unrt_op, /* n */ /* o: This will often be registered as 'Off' at init time */ NULL, /* o */ &sysrq_showregs_op, /* p */ &sysrq_show_timers_op, /* q */ &sysrq_unraw_op, /* r */ &sysrq_sync_op, /* s */ &sysrq_showstate_op, /* t */ &sysrq_mountro_op, /* u */ /* v: May be registered for frame buffer console restore */ NULL, /* v */ &sysrq_showstate_blocked_op, /* w */ /* x: May be registered on mips for TLB dump */ /* x: May be registered on ppc/powerpc for xmon */ /* x: May be registered on sparc64 for global PMU dump */ NULL, /* x */ /* y: May be registered on sparc64 for global register dump */ NULL, /* y */ &sysrq_ftrace_dump_op, /* z */ }; /* key2index calculation, -1 on invalid index */ static int sysrq_key_table_key2index(int key) { int retval; if ((key >= '0') && (key <= '9')) retval = key - '0'; else if ((key >= 'a') && (key <= 'z')) retval = key + 10 - 'a'; else retval = -1; return retval; } /* * get and put functions for the table, exposed to modules. */ struct sysrq_key_op *__sysrq_get_key_op(int key) { struct sysrq_key_op *op_p = NULL; int i; i = sysrq_key_table_key2index(key); if (i != -1) op_p = sysrq_key_table[i]; return op_p; } static void __sysrq_put_key_op(int key, struct sysrq_key_op *op_p) { int i = sysrq_key_table_key2index(key); if (i != -1) sysrq_key_table[i] = op_p; } void __handle_sysrq(int key, bool check_mask) { struct sysrq_key_op *op_p; int orig_log_level; int i; rcu_sysrq_start(); rcu_read_lock(); /* * Raise the apparent loglevel to maximum so that the sysrq header * is shown to provide the user with positive feedback. We do not * simply emit this at KERN_EMERG as that would change message * routing in the consumers of /proc/kmsg. */ orig_log_level = console_loglevel; console_loglevel = CONSOLE_LOGLEVEL_DEFAULT; pr_info("SysRq : "); op_p = __sysrq_get_key_op(key); if (op_p) { /* * Should we check for enabled operations (/proc/sysrq-trigger * should not) and is the invoked operation enabled? */ if (!check_mask || sysrq_on_mask(op_p->enable_mask)) { pr_cont("%s\n", op_p->action_msg); console_loglevel = orig_log_level; op_p->handler(key); } else { pr_cont("This sysrq operation is disabled.\n"); } } else { pr_cont("HELP : "); /* Only print the help msg once per handler */ for (i = 0; i < ARRAY_SIZE(sysrq_key_table); i++) { if (sysrq_key_table[i]) { int j; for (j = 0; sysrq_key_table[i] != sysrq_key_table[j]; j++) ; if (j != i) continue; pr_cont("%s ", sysrq_key_table[i]->help_msg); } } pr_cont("\n"); console_loglevel = orig_log_level; } rcu_read_unlock(); rcu_sysrq_end(); } void handle_sysrq(int key) { if (sysrq_on()) __handle_sysrq(key, true); } EXPORT_SYMBOL(handle_sysrq); #ifdef CONFIG_INPUT static int sysrq_reset_downtime_ms; /* Simple translation table for the SysRq keys */ static const unsigned char sysrq_xlate[KEY_CNT] = "\000\0331234567890-=\177\t" /* 0x00 - 0x0f */ "qwertyuiop[]\r\000as" /* 0x10 - 0x1f */ "dfghjkl;'`\000\\zxcv" /* 0x20 - 0x2f */ "bnm,./\000*\000 \000\201\202\203\204\205" /* 0x30 - 0x3f */ "\206\207\210\211\212\000\000789-456+1" /* 0x40 - 0x4f */ "230\177\000\000\213\214\000\000\000\000\000\000\000\000\000\000" /* 0x50 - 0x5f */ "\r\000/"; /* 0x60 - 0x6f */ struct sysrq_state { struct input_handle handle; struct work_struct reinject_work; unsigned long key_down[BITS_TO_LONGS(KEY_CNT)]; unsigned int alt; unsigned int alt_use; bool active; bool need_reinject; bool reinjecting; /* reset sequence handling */ bool reset_canceled; bool reset_requested; unsigned long reset_keybit[BITS_TO_LONGS(KEY_CNT)]; int reset_seq_len; int reset_seq_cnt; int reset_seq_version; struct timer_list keyreset_timer; }; #define SYSRQ_KEY_RESET_MAX 20 /* Should be plenty */ static unsigned short sysrq_reset_seq[SYSRQ_KEY_RESET_MAX]; static unsigned int sysrq_reset_seq_len; static unsigned int sysrq_reset_seq_version = 1; static void sysrq_parse_reset_sequence(struct sysrq_state *state) { int i; unsigned short key; state->reset_seq_cnt = 0; for (i = 0; i < sysrq_reset_seq_len; i++) { key = sysrq_reset_seq[i]; if (key == KEY_RESERVED || key > KEY_MAX) break; __set_bit(key, state->reset_keybit); state->reset_seq_len++; if (test_bit(key, state->key_down)) state->reset_seq_cnt++; } /* Disable reset until old keys are not released */ state->reset_canceled = state->reset_seq_cnt != 0; state->reset_seq_version = sysrq_reset_seq_version; } static void sysrq_do_reset(unsigned long _state) { struct sysrq_state *state = (struct sysrq_state *) _state; state->reset_requested = true; sys_sync(); kernel_restart(NULL); } static void sysrq_handle_reset_request(struct sysrq_state *state) { if (state->reset_requested) __handle_sysrq(sysrq_xlate[KEY_B], false); if (sysrq_reset_downtime_ms) mod_timer(&state->keyreset_timer, jiffies + msecs_to_jiffies(sysrq_reset_downtime_ms)); else sysrq_do_reset((unsigned long)state); } static void sysrq_detect_reset_sequence(struct sysrq_state *state, unsigned int code, int value) { if (!test_bit(code, state->reset_keybit)) { /* * Pressing any key _not_ in reset sequence cancels * the reset sequence. Also cancelling the timer in * case additional keys were pressed after a reset * has been requested. */ if (value && state->reset_seq_cnt) { state->reset_canceled = true; del_timer(&state->keyreset_timer); } } else if (value == 0) { /* * Key release - all keys in the reset sequence need * to be pressed and held for the reset timeout * to hold. */ del_timer(&state->keyreset_timer); if (--state->reset_seq_cnt == 0) state->reset_canceled = false; } else if (value == 1) { /* key press, not autorepeat */ if (++state->reset_seq_cnt == state->reset_seq_len && !state->reset_canceled) { sysrq_handle_reset_request(state); } } } #ifdef CONFIG_OF static void sysrq_of_get_keyreset_config(void) { u32 key; struct device_node *np; struct property *prop; const __be32 *p; np = of_find_node_by_path("/chosen/linux,sysrq-reset-seq"); if (!np) { pr_debug("No sysrq node found"); return; } /* Reset in case a __weak definition was present */ sysrq_reset_seq_len = 0; of_property_for_each_u32(np, "keyset", prop, p, key) { if (key == KEY_RESERVED || key > KEY_MAX || sysrq_reset_seq_len == SYSRQ_KEY_RESET_MAX) break; sysrq_reset_seq[sysrq_reset_seq_len++] = (unsigned short)key; } /* Get reset timeout if any. */ of_property_read_u32(np, "timeout-ms", &sysrq_reset_downtime_ms); } #else static void sysrq_of_get_keyreset_config(void) { } #endif static void sysrq_reinject_alt_sysrq(struct work_struct *work) { struct sysrq_state *sysrq = container_of(work, struct sysrq_state, reinject_work); struct input_handle *handle = &sysrq->handle; unsigned int alt_code = sysrq->alt_use; if (sysrq->need_reinject) { /* we do not want the assignment to be reordered */ sysrq->reinjecting = true; mb(); /* Simulate press and release of Alt + SysRq */ input_inject_event(handle, EV_KEY, alt_code, 1); input_inject_event(handle, EV_KEY, KEY_SYSRQ, 1); input_inject_event(handle, EV_SYN, SYN_REPORT, 1); input_inject_event(handle, EV_KEY, KEY_SYSRQ, 0); input_inject_event(handle, EV_KEY, alt_code, 0); input_inject_event(handle, EV_SYN, SYN_REPORT, 1); mb(); sysrq->reinjecting = false; } } static bool sysrq_handle_keypress(struct sysrq_state *sysrq, unsigned int code, int value) { bool was_active = sysrq->active; bool suppress; switch (code) { case KEY_LEFTALT: case KEY_RIGHTALT: if (!value) { /* One of ALTs is being released */ if (sysrq->active && code == sysrq->alt_use) sysrq->active = false; sysrq->alt = KEY_RESERVED; } else if (value != 2) { sysrq->alt = code; sysrq->need_reinject = false; } break; case KEY_SYSRQ: if (value == 1 && sysrq->alt != KEY_RESERVED) { sysrq->active = true; sysrq->alt_use = sysrq->alt; /* * If nothing else will be pressed we'll need * to re-inject Alt-SysRq keysroke. */ sysrq->need_reinject = true; } /* * Pretend that sysrq was never pressed at all. This * is needed to properly handle KGDB which will try * to release all keys after exiting debugger. If we * do not clear key bit it KGDB will end up sending * release events for Alt and SysRq, potentially * triggering print screen function. */ if (sysrq->active) clear_bit(KEY_SYSRQ, sysrq->handle.dev->key); break; default: if (sysrq->active && value && value != 2) { sysrq->need_reinject = false; __handle_sysrq(sysrq_xlate[code], true); } break; } suppress = sysrq->active; if (!sysrq->active) { /* * See if reset sequence has changed since the last time. */ if (sysrq->reset_seq_version != sysrq_reset_seq_version) sysrq_parse_reset_sequence(sysrq); /* * If we are not suppressing key presses keep track of * keyboard state so we can release keys that have been * pressed before entering SysRq mode. */ if (value) set_bit(code, sysrq->key_down); else clear_bit(code, sysrq->key_down); if (was_active) schedule_work(&sysrq->reinject_work); /* Check for reset sequence */ sysrq_detect_reset_sequence(sysrq, code, value); } else if (value == 0 && test_and_clear_bit(code, sysrq->key_down)) { /* * Pass on release events for keys that was pressed before * entering SysRq mode. */ suppress = false; } return suppress; } static bool sysrq_filter(struct input_handle *handle, unsigned int type, unsigned int code, int value) { struct sysrq_state *sysrq = handle->private; bool suppress; /* * Do not filter anything if we are in the process of re-injecting * Alt+SysRq combination. */ if (sysrq->reinjecting) return false; switch (type) { case EV_SYN: suppress = false; break; case EV_KEY: suppress = sysrq_handle_keypress(sysrq, code, value); break; default: suppress = sysrq->active; break; } return suppress; } static int sysrq_connect(struct input_handler *handler, struct input_dev *dev, const struct input_device_id *id) { struct sysrq_state *sysrq; int error; sysrq = kzalloc(sizeof(struct sysrq_state), GFP_KERNEL); if (!sysrq) return -ENOMEM; INIT_WORK(&sysrq->reinject_work, sysrq_reinject_alt_sysrq); sysrq->handle.dev = dev; sysrq->handle.handler = handler; sysrq->handle.name = "sysrq"; sysrq->handle.private = sysrq; setup_timer(&sysrq->keyreset_timer, sysrq_do_reset, (unsigned long)sysrq); error = input_register_handle(&sysrq->handle); if (error) { pr_err("Failed to register input sysrq handler, error %d\n", error); goto err_free; } error = input_open_device(&sysrq->handle); if (error) { pr_err("Failed to open input device, error %d\n", error); goto err_unregister; } return 0; err_unregister: input_unregister_handle(&sysrq->handle); err_free: kfree(sysrq); return error; } static void sysrq_disconnect(struct input_handle *handle) { struct sysrq_state *sysrq = handle->private; input_close_device(handle); cancel_work_sync(&sysrq->reinject_work); del_timer_sync(&sysrq->keyreset_timer); input_unregister_handle(handle); kfree(sysrq); } /* * We are matching on KEY_LEFTALT instead of KEY_SYSRQ because not all * keyboards have SysRq key predefined and so user may add it to keymap * later, but we expect all such keyboards to have left alt. */ static const struct input_device_id sysrq_ids[] = { { .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, .evbit = { BIT_MASK(EV_KEY) }, .keybit = { BIT_MASK(KEY_LEFTALT) }, }, { }, }; static struct input_handler sysrq_handler = { .filter = sysrq_filter, .connect = sysrq_connect, .disconnect = sysrq_disconnect, .name = "sysrq", .id_table = sysrq_ids, }; static bool sysrq_handler_registered; static inline void sysrq_register_handler(void) { int error; sysrq_of_get_keyreset_config(); error = input_register_handler(&sysrq_handler); if (error) pr_err("Failed to register input handler, error %d", error); else sysrq_handler_registered = true; } static inline void sysrq_unregister_handler(void) { if (sysrq_handler_registered) { input_unregister_handler(&sysrq_handler); sysrq_handler_registered = false; } } static int sysrq_reset_seq_param_set(const char *buffer, const struct kernel_param *kp) { unsigned long val; int error; error = kstrtoul(buffer, 0, &val); if (error < 0) return error; if (val > KEY_MAX) return -EINVAL; *((unsigned short *)kp->arg) = val; sysrq_reset_seq_version++; return 0; } static const struct kernel_param_ops param_ops_sysrq_reset_seq = { .get = param_get_ushort, .set = sysrq_reset_seq_param_set, }; #define param_check_sysrq_reset_seq(name, p) \ __param_check(name, p, unsigned short) module_param_array_named(reset_seq, sysrq_reset_seq, sysrq_reset_seq, &sysrq_reset_seq_len, 0644); module_param_named(sysrq_downtime_ms, sysrq_reset_downtime_ms, int, 0644); #else static inline void sysrq_register_handler(void) { } static inline void sysrq_unregister_handler(void) { } #endif /* CONFIG_INPUT */ int sysrq_toggle_support(int enable_mask) { bool was_enabled = sysrq_on(); sysrq_enabled = enable_mask; if (was_enabled != sysrq_on()) { if (sysrq_on()) sysrq_register_handler(); else sysrq_unregister_handler(); } return 0; } static int __sysrq_swap_key_ops(int key, struct sysrq_key_op *insert_op_p, struct sysrq_key_op *remove_op_p) { int retval; spin_lock(&sysrq_key_table_lock); if (__sysrq_get_key_op(key) == remove_op_p) { __sysrq_put_key_op(key, insert_op_p); retval = 0; } else { retval = -1; } spin_unlock(&sysrq_key_table_lock); /* * A concurrent __handle_sysrq either got the old op or the new op. * Wait for it to go away before returning, so the code for an old * op is not freed (eg. on module unload) while it is in use. */ synchronize_rcu(); return retval; } int register_sysrq_key(int key, struct sysrq_key_op *op_p) { return __sysrq_swap_key_ops(key, op_p, NULL); } EXPORT_SYMBOL(register_sysrq_key); int unregister_sysrq_key(int key, struct sysrq_key_op *op_p) { return __sysrq_swap_key_ops(key, NULL, op_p); } EXPORT_SYMBOL(unregister_sysrq_key); #ifdef CONFIG_PROC_FS /* * writing 'C' to /proc/sysrq-trigger is like sysrq-C */ static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { if (count) { char c; if (get_user(c, buf)) return -EFAULT; __handle_sysrq(c, false); } return count; } static const struct file_operations proc_sysrq_trigger_operations = { .write = write_sysrq_trigger, .llseek = noop_llseek, }; static void sysrq_init_procfs(void) { if (!proc_create("sysrq-trigger", S_IWUSR, NULL, &proc_sysrq_trigger_operations)) pr_err("Failed to register proc interface\n"); } #else static inline void sysrq_init_procfs(void) { } #endif /* CONFIG_PROC_FS */ static int __init sysrq_init(void) { sysrq_init_procfs(); if (sysrq_on()) sysrq_register_handler(); return 0; } module_init(sysrq_init);
gpl-2.0
ammula88/linux
arch/m32r/kernel/smp.c
443
23707
/* * linux/arch/m32r/kernel/smp.c * * M32R SMP support routines. * * Copyright (c) 2001, 2002 Hitoshi Yamamoto * * Taken from i386 version. * (c) 1995 Alan Cox, Building #3 <alan@redhat.com> * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com> * * This code is released under the GNU General Public License version 2 or * later. */ #undef DEBUG_SMP #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/profile.h> #include <linux/cpu.h> #include <asm/cacheflush.h> #include <asm/pgalloc.h> #include <linux/atomic.h> #include <asm/io.h> #include <asm/mmu_context.h> #include <asm/m32r.h> #include <asm/tlbflush.h> /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ /* Data structures and variables */ /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ /* * For flush_cache_all() */ static DEFINE_SPINLOCK(flushcache_lock); static volatile unsigned long flushcache_cpumask = 0; /* * For flush_tlb_others() */ static cpumask_t flush_cpumask; static struct mm_struct *flush_mm; static struct vm_area_struct *flush_vma; static volatile unsigned long flush_va; static DEFINE_SPINLOCK(tlbstate_lock); #define FLUSH_ALL 0xffffffff DECLARE_PER_CPU(int, prof_multiplier); DECLARE_PER_CPU(int, prof_old_multiplier); DECLARE_PER_CPU(int, prof_counter); extern spinlock_t ipi_lock[]; /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ /* Function Prototypes */ /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ void smp_reschedule_interrupt(void); void smp_flush_cache_all_interrupt(void); static void flush_tlb_all_ipi(void *); static void flush_tlb_others(cpumask_t, struct mm_struct *, struct vm_area_struct *, unsigned long); void smp_invalidate_interrupt(void); static void stop_this_cpu(void *); void smp_ipi_timer_interrupt(struct pt_regs *); void smp_local_timer_interrupt(void); static void send_IPI_allbutself(int, int); static void send_IPI_mask(const struct cpumask *, int, int); /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ /* Rescheduling request Routines */ /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ /*==========================================================================* * Name: smp_send_reschedule * * Description: This routine requests other CPU to execute rescheduling. * 1.Send 'RESCHEDULE_IPI' to other CPU. * Request other CPU to execute 'smp_reschedule_interrupt()'. * * Born on Date: 2002.02.05 * * Arguments: cpu_id - Target CPU ID * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ void smp_send_reschedule(int cpu_id) { WARN_ON(cpu_is_offline(cpu_id)); send_IPI_mask(cpumask_of(cpu_id), RESCHEDULE_IPI, 1); } /*==========================================================================* * Name: smp_reschedule_interrupt * * Description: This routine executes on CPU which received * 'RESCHEDULE_IPI'. * * Born on Date: 2002.02.05 * * Arguments: NONE * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ void smp_reschedule_interrupt(void) { scheduler_ipi(); } /*==========================================================================* * Name: smp_flush_cache_all * * Description: This routine sends a 'INVALIDATE_CACHE_IPI' to all other * CPUs in the system. * * Born on Date: 2003-05-28 * * Arguments: NONE * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ void smp_flush_cache_all(void) { cpumask_t cpumask; unsigned long *mask; preempt_disable(); cpumask_copy(&cpumask, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), &cpumask); spin_lock(&flushcache_lock); mask=cpumask_bits(&cpumask); atomic_or(*mask, (atomic_t *)&flushcache_cpumask); send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0); _flush_cache_copyback_all(); while (flushcache_cpumask) mb(); spin_unlock(&flushcache_lock); preempt_enable(); } void smp_flush_cache_all_interrupt(void) { _flush_cache_copyback_all(); clear_bit(smp_processor_id(), &flushcache_cpumask); } /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ /* TLB flush request Routines */ /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ /*==========================================================================* * Name: smp_flush_tlb_all * * Description: This routine flushes all processes TLBs. * 1.Request other CPU to execute 'flush_tlb_all_ipi()'. * 2.Execute 'do_flush_tlb_all_local()'. * * Born on Date: 2002.02.05 * * Arguments: NONE * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ void smp_flush_tlb_all(void) { unsigned long flags; preempt_disable(); local_irq_save(flags); __flush_tlb_all(); local_irq_restore(flags); smp_call_function(flush_tlb_all_ipi, NULL, 1); preempt_enable(); } /*==========================================================================* * Name: flush_tlb_all_ipi * * Description: This routine flushes all local TLBs. * 1.Execute 'do_flush_tlb_all_local()'. * * Born on Date: 2002.02.05 * * Arguments: *info - not used * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ static void flush_tlb_all_ipi(void *info) { __flush_tlb_all(); } /*==========================================================================* * Name: smp_flush_tlb_mm * * Description: This routine flushes the specified mm context TLB's. * * Born on Date: 2002.02.05 * * Arguments: *mm - a pointer to the mm struct for flush TLB * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ void smp_flush_tlb_mm(struct mm_struct *mm) { int cpu_id; cpumask_t cpu_mask; unsigned long *mmc; unsigned long flags; preempt_disable(); cpu_id = smp_processor_id(); mmc = &mm->context[cpu_id]; cpumask_copy(&cpu_mask, mm_cpumask(mm)); cpumask_clear_cpu(cpu_id, &cpu_mask); if (*mmc != NO_CONTEXT) { local_irq_save(flags); *mmc = NO_CONTEXT; if (mm == current->mm) activate_context(mm); else cpumask_clear_cpu(cpu_id, mm_cpumask(mm)); local_irq_restore(flags); } if (!cpumask_empty(&cpu_mask)) flush_tlb_others(cpu_mask, mm, NULL, FLUSH_ALL); preempt_enable(); } /*==========================================================================* * Name: smp_flush_tlb_range * * Description: This routine flushes a range of pages. * * Born on Date: 2002.02.05 * * Arguments: *mm - a pointer to the mm struct for flush TLB * start - not used * end - not used * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { smp_flush_tlb_mm(vma->vm_mm); } /*==========================================================================* * Name: smp_flush_tlb_page * * Description: This routine flushes one page. * * Born on Date: 2002.02.05 * * Arguments: *vma - a pointer to the vma struct include va * va - virtual address for flush TLB * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va) { struct mm_struct *mm = vma->vm_mm; int cpu_id; cpumask_t cpu_mask; unsigned long *mmc; unsigned long flags; preempt_disable(); cpu_id = smp_processor_id(); mmc = &mm->context[cpu_id]; cpumask_copy(&cpu_mask, mm_cpumask(mm)); cpumask_clear_cpu(cpu_id, &cpu_mask); #ifdef DEBUG_SMP if (!mm) BUG(); #endif if (*mmc != NO_CONTEXT) { local_irq_save(flags); va &= PAGE_MASK; va |= (*mmc & MMU_CONTEXT_ASID_MASK); __flush_tlb_page(va); local_irq_restore(flags); } if (!cpumask_empty(&cpu_mask)) flush_tlb_others(cpu_mask, mm, vma, va); preempt_enable(); } /*==========================================================================* * Name: flush_tlb_others * * Description: This routine requests other CPU to execute flush TLB. * 1.Setup parameters. * 2.Send 'INVALIDATE_TLB_IPI' to other CPU. * Request other CPU to execute 'smp_invalidate_interrupt()'. * 3.Wait for other CPUs operation finished. * * Born on Date: 2002.02.05 * * Arguments: cpumask - bitmap of target CPUs * *mm - a pointer to the mm struct for flush TLB * *vma - a pointer to the vma struct include va * va - virtual address for flush TLB * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long va) { unsigned long *mask; #ifdef DEBUG_SMP unsigned long flags; __save_flags(flags); if (!(flags & 0x0040)) /* Interrupt Disable NONONO */ BUG(); #endif /* DEBUG_SMP */ /* * A couple of (to be removed) sanity checks: * * - we do not send IPIs to not-yet booted CPUs. * - current CPU must not be in mask * - mask must exist :) */ BUG_ON(cpumask_empty(&cpumask)); BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask)); BUG_ON(!mm); /* If a CPU which we ran on has gone down, OK. */ cpumask_and(&cpumask, &cpumask, cpu_online_mask); if (cpumask_empty(&cpumask)) return; /* * i'm not happy about this global shared spinlock in the * MM hot path, but we'll see how contended it is. * Temporarily this turns IRQs off, so that lockups are * detected by the NMI watchdog. */ spin_lock(&tlbstate_lock); flush_mm = mm; flush_vma = vma; flush_va = va; mask=cpumask_bits(&cpumask); atomic_or(*mask, (atomic_t *)&flush_cpumask); /* * We have to send the IPI only to * CPUs affected. */ send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0); while (!cpumask_empty(&flush_cpumask)) { /* nothing. lockup detection does not belong here */ mb(); } flush_mm = NULL; flush_vma = NULL; flush_va = 0; spin_unlock(&tlbstate_lock); } /*==========================================================================* * Name: smp_invalidate_interrupt * * Description: This routine executes on CPU which received * 'INVALIDATE_TLB_IPI'. * 1.Flush local TLB. * 2.Report flush TLB process was finished. * * Born on Date: 2002.02.05 * * Arguments: NONE * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ void smp_invalidate_interrupt(void) { int cpu_id = smp_processor_id(); unsigned long *mmc = &flush_mm->context[cpu_id]; if (!cpumask_test_cpu(cpu_id, &flush_cpumask)) return; if (flush_va == FLUSH_ALL) { *mmc = NO_CONTEXT; if (flush_mm == current->active_mm) activate_context(flush_mm); else cpumask_clear_cpu(cpu_id, mm_cpumask(flush_mm)); } else { unsigned long va = flush_va; if (*mmc != NO_CONTEXT) { va &= PAGE_MASK; va |= (*mmc & MMU_CONTEXT_ASID_MASK); __flush_tlb_page(va); } } cpumask_clear_cpu(cpu_id, &flush_cpumask); } /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ /* Stop CPU request Routines */ /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ /*==========================================================================* * Name: smp_send_stop * * Description: This routine requests stop all CPUs. * 1.Request other CPU to execute 'stop_this_cpu()'. * * Born on Date: 2002.02.05 * * Arguments: NONE * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ void smp_send_stop(void) { smp_call_function(stop_this_cpu, NULL, 0); } /*==========================================================================* * Name: stop_this_cpu * * Description: This routine halt CPU. * * Born on Date: 2002.02.05 * * Arguments: NONE * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ static void stop_this_cpu(void *dummy) { int cpu_id = smp_processor_id(); /* * Remove this CPU: */ set_cpu_online(cpu_id, false); /* * PSW IE = 1; * IMASK = 0; * goto SLEEP */ local_irq_disable(); outl(0, M32R_ICU_IMASK_PORTL); inl(M32R_ICU_IMASK_PORTL); /* dummy read */ local_irq_enable(); for ( ; ; ); } void arch_send_call_function_ipi_mask(const struct cpumask *mask) { send_IPI_mask(mask, CALL_FUNCTION_IPI, 0); } void arch_send_call_function_single_ipi(int cpu) { send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI, 0); } /*==========================================================================* * Name: smp_call_function_interrupt * * Description: This routine executes on CPU which received * 'CALL_FUNCTION_IPI'. * * Born on Date: 2002.02.05 * * Arguments: NONE * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ void smp_call_function_interrupt(void) { irq_enter(); generic_smp_call_function_interrupt(); irq_exit(); } void smp_call_function_single_interrupt(void) { irq_enter(); generic_smp_call_function_single_interrupt(); irq_exit(); } /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ /* Timer Routines */ /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ /*==========================================================================* * Name: smp_send_timer * * Description: This routine sends a 'LOCAL_TIMER_IPI' to all other CPUs * in the system. * * Born on Date: 2002.02.05 * * Arguments: NONE * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ void smp_send_timer(void) { send_IPI_allbutself(LOCAL_TIMER_IPI, 1); } /*==========================================================================* * Name: smp_send_timer * * Description: This routine executes on CPU which received * 'LOCAL_TIMER_IPI'. * * Born on Date: 2002.02.05 * * Arguments: *regs - a pointer to the saved regster info * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ void smp_ipi_timer_interrupt(struct pt_regs *regs) { struct pt_regs *old_regs; old_regs = set_irq_regs(regs); irq_enter(); smp_local_timer_interrupt(); irq_exit(); set_irq_regs(old_regs); } /*==========================================================================* * Name: smp_local_timer_interrupt * * Description: Local timer interrupt handler. It does both profiling and * process statistics/rescheduling. * We do profiling in every local tick, statistics/rescheduling * happen only every 'profiling multiplier' ticks. The default * multiplier is 1 and it can be changed by writing the new * multiplier value into /proc/profile. * * Born on Date: 2002.02.05 * * Arguments: *regs - a pointer to the saved regster info * * Returns: void (cannot fail) * * Original: arch/i386/kernel/apic.c * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * 2003-06-24 hy use per_cpu structure. *==========================================================================*/ void smp_local_timer_interrupt(void) { int user = user_mode(get_irq_regs()); int cpu_id = smp_processor_id(); /* * The profiling function is SMP safe. (nothing can mess * around with "current", and the profiling counters are * updated with atomic operations). This is especially * useful with a profiling multiplier != 1 */ profile_tick(CPU_PROFILING); if (--per_cpu(prof_counter, cpu_id) <= 0) { /* * The multiplier may have changed since the last time we got * to this point as a result of the user writing to * /proc/profile. In this case we need to adjust the APIC * timer accordingly. * * Interrupts are already masked off at this point. */ per_cpu(prof_counter, cpu_id) = per_cpu(prof_multiplier, cpu_id); if (per_cpu(prof_counter, cpu_id) != per_cpu(prof_old_multiplier, cpu_id)) { per_cpu(prof_old_multiplier, cpu_id) = per_cpu(prof_counter, cpu_id); } update_process_times(user); } } /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ /* Send IPI Routines */ /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ /*==========================================================================* * Name: send_IPI_allbutself * * Description: This routine sends a IPI to all other CPUs in the system. * * Born on Date: 2002.02.05 * * Arguments: ipi_num - Number of IPI * try - 0 : Send IPI certainly. * !0 : The following IPI is not sent when Target CPU * has not received the before IPI. * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ static void send_IPI_allbutself(int ipi_num, int try) { cpumask_t cpumask; cpumask_copy(&cpumask, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), &cpumask); send_IPI_mask(&cpumask, ipi_num, try); } /*==========================================================================* * Name: send_IPI_mask * * Description: This routine sends a IPI to CPUs in the system. * * Born on Date: 2002.02.05 * * Arguments: cpu_mask - Bitmap of target CPUs logical ID * ipi_num - Number of IPI * try - 0 : Send IPI certainly. * !0 : The following IPI is not sent when Target CPU * has not received the before IPI. * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try) { cpumask_t physid_mask, tmp; int cpu_id, phys_id; int num_cpus = num_online_cpus(); if (num_cpus <= 1) /* NO MP */ return; cpumask_and(&tmp, cpumask, cpu_online_mask); BUG_ON(!cpumask_equal(cpumask, &tmp)); cpumask_clear(&physid_mask); for_each_cpu(cpu_id, cpumask) { if ((phys_id = cpu_to_physid(cpu_id)) != -1) cpumask_set_cpu(phys_id, &physid_mask); } send_IPI_mask_phys(&physid_mask, ipi_num, try); } /*==========================================================================* * Name: send_IPI_mask_phys * * Description: This routine sends a IPI to other CPUs in the system. * * Born on Date: 2002.02.05 * * Arguments: cpu_mask - Bitmap of target CPUs physical ID * ipi_num - Number of IPI * try - 0 : Send IPI certainly. * !0 : The following IPI is not sent when Target CPU * has not received the before IPI. * * Returns: IPICRi regster value. * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ unsigned long send_IPI_mask_phys(const cpumask_t *physid_mask, int ipi_num, int try) { spinlock_t *ipilock; volatile unsigned long *ipicr_addr; unsigned long ipicr_val; unsigned long my_physid_mask; unsigned long mask = cpumask_bits(physid_mask)[0]; if (mask & ~physids_coerce(phys_cpu_present_map)) BUG(); if (ipi_num >= NR_IPIS || ipi_num < 0) BUG(); mask <<= IPI_SHIFT; ipilock = &ipi_lock[ipi_num]; ipicr_addr = (volatile unsigned long *)(M32R_ICU_IPICR_ADDR + (ipi_num << 2)); my_physid_mask = ~(1 << smp_processor_id()); /* * lock ipi_lock[i] * check IPICRi == 0 * write IPICRi (send IPIi) * unlock ipi_lock[i] */ spin_lock(ipilock); __asm__ __volatile__ ( ";; CHECK IPICRi == 0 \n\t" ".fillinsn \n" "1: \n\t" "ld %0, @%1 \n\t" "and %0, %4 \n\t" "beqz %0, 2f \n\t" "bnez %3, 3f \n\t" "bra 1b \n\t" ";; WRITE IPICRi (send IPIi) \n\t" ".fillinsn \n" "2: \n\t" "st %2, @%1 \n\t" ".fillinsn \n" "3: \n\t" : "=&r"(ipicr_val) : "r"(ipicr_addr), "r"(mask), "r"(try), "r"(my_physid_mask) : "memory" ); spin_unlock(ipilock); return ipicr_val; }
gpl-2.0
rhcp011235/sgh-t989_kernel
drivers/usb/serial/mct_u232.c
1979
28443
/* * MCT (Magic Control Technology Corp.) USB RS232 Converter Driver * * Copyright (C) 2000 Wolfgang Grandegger (wolfgang@ces.ch) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is largely derived from the Belkin USB Serial Adapter Driver * (see belkin_sa.[ch]). All of the information about the device was acquired * by using SniffUSB on Windows98. For technical details see mct_u232.h. * * William G. Greathouse and Greg Kroah-Hartman provided great help on how to * do the reverse engineering and how to write a USB serial device driver. * * TO BE DONE, TO BE CHECKED: * DTR/RTS signal handling may be incomplete or incorrect. I have mainly * implemented what I have seen with SniffUSB or found in belkin_sa.c. * For further TODOs check also belkin_sa.c. * * TEST STATUS: * Basic tests have been performed with minicom/zmodem transfers and * modem dialing under Linux 2.4.0-test10 (for me it works fine). * * 04-Nov-2003 Bill Marr <marr at flex dot com> * - Mimic Windows driver by sending 2 USB 'device request' messages * following normal 'baud rate change' message. This allows data to be * transmitted to RS-232 devices which don't assert the 'CTS' signal. * * 10-Nov-2001 Wolfgang Grandegger * - Fixed an endianess problem with the baudrate selection for PowerPC. * * 06-Dec-2001 Martin Hamilton <martinh@gnu.org> * - Added support for the Belkin F5U109 DB9 adaptor * * 30-May-2001 Greg Kroah-Hartman * - switched from using spinlock to a semaphore, which fixes lots of * problems. * * 04-May-2001 Stelian Pop * - Set the maximum bulk output size for Sitecom U232-P25 model to 16 bytes * instead of the device reported 32 (using 32 bytes causes many data * loss, Windows driver uses 16 too). * * 02-May-2001 Stelian Pop * - Fixed the baud calculation for Sitecom U232-P25 model * * 08-Apr-2001 gb * - Identify version on module load. * * 06-Jan-2001 Cornel Ciocirlan * - Added support for Sitecom U232-P25 model (Product Id 0x0230) * - Added support for D-Link DU-H3SP USB BAY (Product Id 0x0200) * * 29-Nov-2000 Greg Kroah-Hartman * - Added device id table to fit with 2.4.0-test11 structure. * - took out DEAL_WITH_TWO_INT_IN_ENDPOINTS #define as it's not needed * (lots of things will change if/when the usb-serial core changes to * handle these issues. * * 27-Nov-2000 Wolfgang Grandegge * A version for kernel 2.4.0-test10 released to the Linux community * (via linux-usb-devel). */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/uaccess.h> #include <asm/unaligned.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include <linux/serial.h> #include <linux/ioctl.h> #include "mct_u232.h" /* * Version Information */ #define DRIVER_VERSION "z2.1" /* Linux in-kernel version */ #define DRIVER_AUTHOR "Wolfgang Grandegger <wolfgang@ces.ch>" #define DRIVER_DESC "Magic Control Technology USB-RS232 converter driver" static int debug; /* * Function prototypes */ static int mct_u232_startup(struct usb_serial *serial); static void mct_u232_release(struct usb_serial *serial); static int mct_u232_open(struct tty_struct *tty, struct usb_serial_port *port); static void mct_u232_close(struct usb_serial_port *port); static void mct_u232_dtr_rts(struct usb_serial_port *port, int on); static void mct_u232_read_int_callback(struct urb *urb); static void mct_u232_set_termios(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old); static void mct_u232_break_ctl(struct tty_struct *tty, int break_state); static int mct_u232_tiocmget(struct tty_struct *tty); static int mct_u232_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear); static int mct_u232_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg); static int mct_u232_get_icount(struct tty_struct *tty, struct serial_icounter_struct *icount); static void mct_u232_throttle(struct tty_struct *tty); static void mct_u232_unthrottle(struct tty_struct *tty); /* * All of the device info needed for the MCT USB-RS232 converter. */ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(MCT_U232_VID, MCT_U232_PID) }, { USB_DEVICE(MCT_U232_VID, MCT_U232_SITECOM_PID) }, { USB_DEVICE(MCT_U232_VID, MCT_U232_DU_H3SP_PID) }, { USB_DEVICE(MCT_U232_BELKIN_F5U109_VID, MCT_U232_BELKIN_F5U109_PID) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, id_table_combined); static struct usb_driver mct_u232_driver = { .name = "mct_u232", .probe = usb_serial_probe, .disconnect = usb_serial_disconnect, .id_table = id_table_combined, .no_dynamic_id = 1, }; static struct usb_serial_driver mct_u232_device = { .driver = { .owner = THIS_MODULE, .name = "mct_u232", }, .description = "MCT U232", .usb_driver = &mct_u232_driver, .id_table = id_table_combined, .num_ports = 1, .open = mct_u232_open, .close = mct_u232_close, .dtr_rts = mct_u232_dtr_rts, .throttle = mct_u232_throttle, .unthrottle = mct_u232_unthrottle, .read_int_callback = mct_u232_read_int_callback, .set_termios = mct_u232_set_termios, .break_ctl = mct_u232_break_ctl, .tiocmget = mct_u232_tiocmget, .tiocmset = mct_u232_tiocmset, .attach = mct_u232_startup, .release = mct_u232_release, .ioctl = mct_u232_ioctl, .get_icount = mct_u232_get_icount, }; struct mct_u232_private { spinlock_t lock; unsigned int control_state; /* Modem Line Setting (TIOCM) */ unsigned char last_lcr; /* Line Control Register */ unsigned char last_lsr; /* Line Status Register */ unsigned char last_msr; /* Modem Status Register */ unsigned int rx_flags; /* Throttling flags */ struct async_icount icount; wait_queue_head_t msr_wait; /* for handling sleeping while waiting for msr change to happen */ }; #define THROTTLED 0x01 /* * Handle vendor specific USB requests */ #define WDR_TIMEOUT 5000 /* default urb timeout */ /* * Later day 2.6.0-test kernels have new baud rates like B230400 which * we do not know how to support. We ignore them for the moment. */ static int mct_u232_calculate_baud_rate(struct usb_serial *serial, speed_t value, speed_t *result) { *result = value; if (le16_to_cpu(serial->dev->descriptor.idProduct) == MCT_U232_SITECOM_PID || le16_to_cpu(serial->dev->descriptor.idProduct) == MCT_U232_BELKIN_F5U109_PID) { switch (value) { case 300: return 0x01; case 600: return 0x02; /* this one not tested */ case 1200: return 0x03; case 2400: return 0x04; case 4800: return 0x06; case 9600: return 0x08; case 19200: return 0x09; case 38400: return 0x0a; case 57600: return 0x0b; case 115200: return 0x0c; default: *result = 9600; return 0x08; } } else { /* FIXME: Can we use any divider - should we do divider = 115200/value; real baud = 115200/divider */ switch (value) { case 300: break; case 600: break; case 1200: break; case 2400: break; case 4800: break; case 9600: break; case 19200: break; case 38400: break; case 57600: break; case 115200: break; default: value = 9600; *result = 9600; } return 115200/value; } } static int mct_u232_set_baud_rate(struct tty_struct *tty, struct usb_serial *serial, struct usb_serial_port *port, speed_t value) { unsigned int divisor; int rc; unsigned char *buf; unsigned char cts_enable_byte = 0; speed_t speed; buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL); if (buf == NULL) return -ENOMEM; divisor = mct_u232_calculate_baud_rate(serial, value, &speed); put_unaligned_le32(cpu_to_le32(divisor), buf); rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), MCT_U232_SET_BAUD_RATE_REQUEST, MCT_U232_SET_REQUEST_TYPE, 0, 0, buf, MCT_U232_SET_BAUD_RATE_SIZE, WDR_TIMEOUT); if (rc < 0) /*FIXME: What value speed results */ dev_err(&port->dev, "Set BAUD RATE %d failed (error = %d)\n", value, rc); else tty_encode_baud_rate(tty, speed, speed); dbg("set_baud_rate: value: 0x%x, divisor: 0x%x", value, divisor); /* Mimic the MCT-supplied Windows driver (version 1.21P.0104), which always sends two extra USB 'device request' messages after the 'baud rate change' message. The actual functionality of the request codes in these messages is not fully understood but these particular codes are never seen in any operation besides a baud rate change. Both of these messages send a single byte of data. In the first message, the value of this byte is always zero. The second message has been determined experimentally to control whether data will be transmitted to a device which is not asserting the 'CTS' signal. If the second message's data byte is zero, data will be transmitted even if 'CTS' is not asserted (i.e. no hardware flow control). if the second message's data byte is nonzero (a value of 1 is used by this driver), data will not be transmitted to a device which is not asserting 'CTS'. */ buf[0] = 0; rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), MCT_U232_SET_UNKNOWN1_REQUEST, MCT_U232_SET_REQUEST_TYPE, 0, 0, buf, MCT_U232_SET_UNKNOWN1_SIZE, WDR_TIMEOUT); if (rc < 0) dev_err(&port->dev, "Sending USB device request code %d " "failed (error = %d)\n", MCT_U232_SET_UNKNOWN1_REQUEST, rc); if (port && C_CRTSCTS(tty)) cts_enable_byte = 1; dbg("set_baud_rate: send second control message, data = %02X", cts_enable_byte); buf[0] = cts_enable_byte; rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), MCT_U232_SET_CTS_REQUEST, MCT_U232_SET_REQUEST_TYPE, 0, 0, buf, MCT_U232_SET_CTS_SIZE, WDR_TIMEOUT); if (rc < 0) dev_err(&port->dev, "Sending USB device request code %d " "failed (error = %d)\n", MCT_U232_SET_CTS_REQUEST, rc); kfree(buf); return rc; } /* mct_u232_set_baud_rate */ static int mct_u232_set_line_ctrl(struct usb_serial *serial, unsigned char lcr) { int rc; unsigned char *buf; buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL); if (buf == NULL) return -ENOMEM; buf[0] = lcr; rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), MCT_U232_SET_LINE_CTRL_REQUEST, MCT_U232_SET_REQUEST_TYPE, 0, 0, buf, MCT_U232_SET_LINE_CTRL_SIZE, WDR_TIMEOUT); if (rc < 0) dev_err(&serial->dev->dev, "Set LINE CTRL 0x%x failed (error = %d)\n", lcr, rc); dbg("set_line_ctrl: 0x%x", lcr); kfree(buf); return rc; } /* mct_u232_set_line_ctrl */ static int mct_u232_set_modem_ctrl(struct usb_serial *serial, unsigned int control_state) { int rc; unsigned char mcr; unsigned char *buf; buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL); if (buf == NULL) return -ENOMEM; mcr = MCT_U232_MCR_NONE; if (control_state & TIOCM_DTR) mcr |= MCT_U232_MCR_DTR; if (control_state & TIOCM_RTS) mcr |= MCT_U232_MCR_RTS; buf[0] = mcr; rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), MCT_U232_SET_MODEM_CTRL_REQUEST, MCT_U232_SET_REQUEST_TYPE, 0, 0, buf, MCT_U232_SET_MODEM_CTRL_SIZE, WDR_TIMEOUT); if (rc < 0) dev_err(&serial->dev->dev, "Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc); dbg("set_modem_ctrl: state=0x%x ==> mcr=0x%x", control_state, mcr); kfree(buf); return rc; } /* mct_u232_set_modem_ctrl */ static int mct_u232_get_modem_stat(struct usb_serial *serial, unsigned char *msr) { int rc; unsigned char *buf; buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL); if (buf == NULL) { *msr = 0; return -ENOMEM; } rc = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), MCT_U232_GET_MODEM_STAT_REQUEST, MCT_U232_GET_REQUEST_TYPE, 0, 0, buf, MCT_U232_GET_MODEM_STAT_SIZE, WDR_TIMEOUT); if (rc < 0) { dev_err(&serial->dev->dev, "Get MODEM STATus failed (error = %d)\n", rc); *msr = 0; } else { *msr = buf[0]; } dbg("get_modem_stat: 0x%x", *msr); kfree(buf); return rc; } /* mct_u232_get_modem_stat */ static void mct_u232_msr_to_icount(struct async_icount *icount, unsigned char msr) { /* Translate Control Line states */ if (msr & MCT_U232_MSR_DDSR) icount->dsr++; if (msr & MCT_U232_MSR_DCTS) icount->cts++; if (msr & MCT_U232_MSR_DRI) icount->rng++; if (msr & MCT_U232_MSR_DCD) icount->dcd++; } /* mct_u232_msr_to_icount */ static void mct_u232_msr_to_state(unsigned int *control_state, unsigned char msr) { /* Translate Control Line states */ if (msr & MCT_U232_MSR_DSR) *control_state |= TIOCM_DSR; else *control_state &= ~TIOCM_DSR; if (msr & MCT_U232_MSR_CTS) *control_state |= TIOCM_CTS; else *control_state &= ~TIOCM_CTS; if (msr & MCT_U232_MSR_RI) *control_state |= TIOCM_RI; else *control_state &= ~TIOCM_RI; if (msr & MCT_U232_MSR_CD) *control_state |= TIOCM_CD; else *control_state &= ~TIOCM_CD; dbg("msr_to_state: msr=0x%x ==> state=0x%x", msr, *control_state); } /* mct_u232_msr_to_state */ /* * Driver's tty interface functions */ static int mct_u232_startup(struct usb_serial *serial) { struct mct_u232_private *priv; struct usb_serial_port *port, *rport; priv = kzalloc(sizeof(struct mct_u232_private), GFP_KERNEL); if (!priv) return -ENOMEM; spin_lock_init(&priv->lock); init_waitqueue_head(&priv->msr_wait); usb_set_serial_port_data(serial->port[0], priv); init_waitqueue_head(&serial->port[0]->write_wait); /* Puh, that's dirty */ port = serial->port[0]; rport = serial->port[1]; /* No unlinking, it wasn't submitted yet. */ usb_free_urb(port->read_urb); port->read_urb = rport->interrupt_in_urb; rport->interrupt_in_urb = NULL; port->read_urb->context = port; return 0; } /* mct_u232_startup */ static void mct_u232_release(struct usb_serial *serial) { struct mct_u232_private *priv; int i; dbg("%s", __func__); for (i = 0; i < serial->num_ports; ++i) { /* My special items, the standard routines free my urbs */ priv = usb_get_serial_port_data(serial->port[i]); kfree(priv); } } /* mct_u232_release */ static int mct_u232_open(struct tty_struct *tty, struct usb_serial_port *port) { struct usb_serial *serial = port->serial; struct mct_u232_private *priv = usb_get_serial_port_data(port); int retval = 0; unsigned int control_state; unsigned long flags; unsigned char last_lcr; unsigned char last_msr; dbg("%s port %d", __func__, port->number); /* Compensate for a hardware bug: although the Sitecom U232-P25 * device reports a maximum output packet size of 32 bytes, * it seems to be able to accept only 16 bytes (and that's what * SniffUSB says too...) */ if (le16_to_cpu(serial->dev->descriptor.idProduct) == MCT_U232_SITECOM_PID) port->bulk_out_size = 16; /* Do a defined restart: the normal serial device seems to * always turn on DTR and RTS here, so do the same. I'm not * sure if this is really necessary. But it should not harm * either. */ spin_lock_irqsave(&priv->lock, flags); if (tty && (tty->termios->c_cflag & CBAUD)) priv->control_state = TIOCM_DTR | TIOCM_RTS; else priv->control_state = 0; priv->last_lcr = (MCT_U232_DATA_BITS_8 | MCT_U232_PARITY_NONE | MCT_U232_STOP_BITS_1); control_state = priv->control_state; last_lcr = priv->last_lcr; spin_unlock_irqrestore(&priv->lock, flags); mct_u232_set_modem_ctrl(serial, control_state); mct_u232_set_line_ctrl(serial, last_lcr); /* Read modem status and update control state */ mct_u232_get_modem_stat(serial, &last_msr); spin_lock_irqsave(&priv->lock, flags); priv->last_msr = last_msr; mct_u232_msr_to_state(&priv->control_state, priv->last_msr); spin_unlock_irqrestore(&priv->lock, flags); port->read_urb->dev = port->serial->dev; retval = usb_submit_urb(port->read_urb, GFP_KERNEL); if (retval) { dev_err(&port->dev, "usb_submit_urb(read bulk) failed pipe 0x%x err %d\n", port->read_urb->pipe, retval); goto error; } port->interrupt_in_urb->dev = port->serial->dev; retval = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); if (retval) { usb_kill_urb(port->read_urb); dev_err(&port->dev, "usb_submit_urb(read int) failed pipe 0x%x err %d", port->interrupt_in_urb->pipe, retval); goto error; } return 0; error: return retval; } /* mct_u232_open */ static void mct_u232_dtr_rts(struct usb_serial_port *port, int on) { unsigned int control_state; struct mct_u232_private *priv = usb_get_serial_port_data(port); mutex_lock(&port->serial->disc_mutex); if (!port->serial->disconnected) { /* drop DTR and RTS */ spin_lock_irq(&priv->lock); if (on) priv->control_state |= TIOCM_DTR | TIOCM_RTS; else priv->control_state &= ~(TIOCM_DTR | TIOCM_RTS); control_state = priv->control_state; spin_unlock_irq(&priv->lock); mct_u232_set_modem_ctrl(port->serial, control_state); } mutex_unlock(&port->serial->disc_mutex); } static void mct_u232_close(struct usb_serial_port *port) { dbg("%s port %d", __func__, port->number); if (port->serial->dev) { /* shutdown our urbs */ usb_kill_urb(port->write_urb); usb_kill_urb(port->read_urb); usb_kill_urb(port->interrupt_in_urb); } } /* mct_u232_close */ static void mct_u232_read_int_callback(struct urb *urb) { struct usb_serial_port *port = urb->context; struct mct_u232_private *priv = usb_get_serial_port_data(port); struct usb_serial *serial = port->serial; struct tty_struct *tty; unsigned char *data = urb->transfer_buffer; int retval; int status = urb->status; unsigned long flags; switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dbg("%s - urb shutting down with status: %d", __func__, status); return; default: dbg("%s - nonzero urb status received: %d", __func__, status); goto exit; } if (!serial) { dbg("%s - bad serial pointer, exiting", __func__); return; } dbg("%s - port %d", __func__, port->number); usb_serial_debug_data(debug, &port->dev, __func__, urb->actual_length, data); /* * Work-a-round: handle the 'usual' bulk-in pipe here */ if (urb->transfer_buffer_length > 2) { if (urb->actual_length) { tty = tty_port_tty_get(&port->port); if (tty) { tty_insert_flip_string(tty, data, urb->actual_length); tty_flip_buffer_push(tty); } tty_kref_put(tty); } goto exit; } /* * The interrupt-in pipe signals exceptional conditions (modem line * signal changes and errors). data[0] holds MSR, data[1] holds LSR. */ spin_lock_irqsave(&priv->lock, flags); priv->last_msr = data[MCT_U232_MSR_INDEX]; /* Record Control Line states */ mct_u232_msr_to_state(&priv->control_state, priv->last_msr); mct_u232_msr_to_icount(&priv->icount, priv->last_msr); #if 0 /* Not yet handled. See belkin_sa.c for further information */ /* Now to report any errors */ priv->last_lsr = data[MCT_U232_LSR_INDEX]; /* * fill in the flip buffer here, but I do not know the relation * to the current/next receive buffer or characters. I need * to look in to this before committing any code. */ if (priv->last_lsr & MCT_U232_LSR_ERR) { tty = tty_port_tty_get(&port->port); /* Overrun Error */ if (priv->last_lsr & MCT_U232_LSR_OE) { } /* Parity Error */ if (priv->last_lsr & MCT_U232_LSR_PE) { } /* Framing Error */ if (priv->last_lsr & MCT_U232_LSR_FE) { } /* Break Indicator */ if (priv->last_lsr & MCT_U232_LSR_BI) { } tty_kref_put(tty); } #endif wake_up_interruptible(&priv->msr_wait); spin_unlock_irqrestore(&priv->lock, flags); exit: retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(&port->dev, "%s - usb_submit_urb failed with result %d\n", __func__, retval); } /* mct_u232_read_int_callback */ static void mct_u232_set_termios(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old_termios) { struct usb_serial *serial = port->serial; struct mct_u232_private *priv = usb_get_serial_port_data(port); struct ktermios *termios = tty->termios; unsigned int cflag = termios->c_cflag; unsigned int old_cflag = old_termios->c_cflag; unsigned long flags; unsigned int control_state; unsigned char last_lcr; /* get a local copy of the current port settings */ spin_lock_irqsave(&priv->lock, flags); control_state = priv->control_state; spin_unlock_irqrestore(&priv->lock, flags); last_lcr = 0; /* * Update baud rate. * Do not attempt to cache old rates and skip settings, * disconnects screw such tricks up completely. * Premature optimization is the root of all evil. */ /* reassert DTR and RTS on transition from B0 */ if ((old_cflag & CBAUD) == B0) { dbg("%s: baud was B0", __func__); control_state |= TIOCM_DTR | TIOCM_RTS; mct_u232_set_modem_ctrl(serial, control_state); } mct_u232_set_baud_rate(tty, serial, port, tty_get_baud_rate(tty)); if ((cflag & CBAUD) == B0) { dbg("%s: baud is B0", __func__); /* Drop RTS and DTR */ control_state &= ~(TIOCM_DTR | TIOCM_RTS); mct_u232_set_modem_ctrl(serial, control_state); } /* * Update line control register (LCR) */ /* set the parity */ if (cflag & PARENB) last_lcr |= (cflag & PARODD) ? MCT_U232_PARITY_ODD : MCT_U232_PARITY_EVEN; else last_lcr |= MCT_U232_PARITY_NONE; /* set the number of data bits */ switch (cflag & CSIZE) { case CS5: last_lcr |= MCT_U232_DATA_BITS_5; break; case CS6: last_lcr |= MCT_U232_DATA_BITS_6; break; case CS7: last_lcr |= MCT_U232_DATA_BITS_7; break; case CS8: last_lcr |= MCT_U232_DATA_BITS_8; break; default: dev_err(&port->dev, "CSIZE was not CS5-CS8, using default of 8\n"); last_lcr |= MCT_U232_DATA_BITS_8; break; } termios->c_cflag &= ~CMSPAR; /* set the number of stop bits */ last_lcr |= (cflag & CSTOPB) ? MCT_U232_STOP_BITS_2 : MCT_U232_STOP_BITS_1; mct_u232_set_line_ctrl(serial, last_lcr); /* save off the modified port settings */ spin_lock_irqsave(&priv->lock, flags); priv->control_state = control_state; priv->last_lcr = last_lcr; spin_unlock_irqrestore(&priv->lock, flags); } /* mct_u232_set_termios */ static void mct_u232_break_ctl(struct tty_struct *tty, int break_state) { struct usb_serial_port *port = tty->driver_data; struct usb_serial *serial = port->serial; struct mct_u232_private *priv = usb_get_serial_port_data(port); unsigned char lcr; unsigned long flags; dbg("%sstate=%d", __func__, break_state); spin_lock_irqsave(&priv->lock, flags); lcr = priv->last_lcr; if (break_state) lcr |= MCT_U232_SET_BREAK; spin_unlock_irqrestore(&priv->lock, flags); mct_u232_set_line_ctrl(serial, lcr); } /* mct_u232_break_ctl */ static int mct_u232_tiocmget(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct mct_u232_private *priv = usb_get_serial_port_data(port); unsigned int control_state; unsigned long flags; dbg("%s", __func__); spin_lock_irqsave(&priv->lock, flags); control_state = priv->control_state; spin_unlock_irqrestore(&priv->lock, flags); return control_state; } static int mct_u232_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; struct usb_serial *serial = port->serial; struct mct_u232_private *priv = usb_get_serial_port_data(port); unsigned int control_state; unsigned long flags; dbg("%s", __func__); spin_lock_irqsave(&priv->lock, flags); control_state = priv->control_state; if (set & TIOCM_RTS) control_state |= TIOCM_RTS; if (set & TIOCM_DTR) control_state |= TIOCM_DTR; if (clear & TIOCM_RTS) control_state &= ~TIOCM_RTS; if (clear & TIOCM_DTR) control_state &= ~TIOCM_DTR; priv->control_state = control_state; spin_unlock_irqrestore(&priv->lock, flags); return mct_u232_set_modem_ctrl(serial, control_state); } static void mct_u232_throttle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct mct_u232_private *priv = usb_get_serial_port_data(port); unsigned int control_state; dbg("%s - port %d", __func__, port->number); spin_lock_irq(&priv->lock); priv->rx_flags |= THROTTLED; if (C_CRTSCTS(tty)) { priv->control_state &= ~TIOCM_RTS; control_state = priv->control_state; spin_unlock_irq(&priv->lock); (void) mct_u232_set_modem_ctrl(port->serial, control_state); } else { spin_unlock_irq(&priv->lock); } } static void mct_u232_unthrottle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct mct_u232_private *priv = usb_get_serial_port_data(port); unsigned int control_state; dbg("%s - port %d", __func__, port->number); spin_lock_irq(&priv->lock); if ((priv->rx_flags & THROTTLED) && C_CRTSCTS(tty)) { priv->rx_flags &= ~THROTTLED; priv->control_state |= TIOCM_RTS; control_state = priv->control_state; spin_unlock_irq(&priv->lock); (void) mct_u232_set_modem_ctrl(port->serial, control_state); } else { spin_unlock_irq(&priv->lock); } } static int mct_u232_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { DEFINE_WAIT(wait); struct usb_serial_port *port = tty->driver_data; struct mct_u232_private *mct_u232_port = usb_get_serial_port_data(port); struct async_icount cnow, cprev; unsigned long flags; dbg("%s - port %d, cmd = 0x%x", __func__, port->number, cmd); switch (cmd) { case TIOCMIWAIT: dbg("%s (%d) TIOCMIWAIT", __func__, port->number); spin_lock_irqsave(&mct_u232_port->lock, flags); cprev = mct_u232_port->icount; spin_unlock_irqrestore(&mct_u232_port->lock, flags); for ( ; ; ) { prepare_to_wait(&mct_u232_port->msr_wait, &wait, TASK_INTERRUPTIBLE); schedule(); finish_wait(&mct_u232_port->msr_wait, &wait); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; spin_lock_irqsave(&mct_u232_port->lock, flags); cnow = mct_u232_port->icount; spin_unlock_irqrestore(&mct_u232_port->lock, flags); if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) return -EIO; /* no change => error */ if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) || ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) || ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) || ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts))) { return 0; } cprev = cnow; } } return -ENOIOCTLCMD; } static int mct_u232_get_icount(struct tty_struct *tty, struct serial_icounter_struct *icount) { struct usb_serial_port *port = tty->driver_data; struct mct_u232_private *mct_u232_port = usb_get_serial_port_data(port); struct async_icount *ic = &mct_u232_port->icount; unsigned long flags; spin_lock_irqsave(&mct_u232_port->lock, flags); icount->cts = ic->cts; icount->dsr = ic->dsr; icount->rng = ic->rng; icount->dcd = ic->dcd; icount->rx = ic->rx; icount->tx = ic->tx; icount->frame = ic->frame; icount->overrun = ic->overrun; icount->parity = ic->parity; icount->brk = ic->brk; icount->buf_overrun = ic->buf_overrun; spin_unlock_irqrestore(&mct_u232_port->lock, flags); dbg("%s (%d) TIOCGICOUNT RX=%d, TX=%d", __func__, port->number, icount->rx, icount->tx); return 0; } static int __init mct_u232_init(void) { int retval; retval = usb_serial_register(&mct_u232_device); if (retval) goto failed_usb_serial_register; retval = usb_register(&mct_u232_driver); if (retval) goto failed_usb_register; printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":" DRIVER_DESC "\n"); return 0; failed_usb_register: usb_serial_deregister(&mct_u232_device); failed_usb_serial_register: return retval; } static void __exit mct_u232_exit(void) { usb_deregister(&mct_u232_driver); usb_serial_deregister(&mct_u232_device); } module_init(mct_u232_init); module_exit(mct_u232_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debug enabled or not");
gpl-2.0
taoguan/linux
arch/frv/kernel/sysctl.c
2235
4475
/* sysctl.c: implementation of /proc/sys files relating to FRV specifically * * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/sysctl.h> #include <linux/proc_fs.h> #include <linux/init.h> #include <asm/uaccess.h> static const char frv_cache_wback[] = "wback"; static const char frv_cache_wthru[] = "wthru"; static void frv_change_dcache_mode(unsigned long newmode) { unsigned long flags, hsr0; local_irq_save(flags); hsr0 = __get_HSR(0); hsr0 &= ~HSR0_DCE; __set_HSR(0, hsr0); asm volatile(" dcef @(gr0,gr0),#1 \n" " membar \n" : : : "memory" ); hsr0 = (hsr0 & ~HSR0_CBM) | newmode; __set_HSR(0, hsr0); hsr0 |= HSR0_DCE; __set_HSR(0, hsr0); local_irq_restore(flags); //printk("HSR0 now %08lx\n", hsr0); } /*****************************************************************************/ /* * handle requests to dynamically switch the write caching mode delivered by /proc */ static int procctl_frv_cachemode(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { unsigned long hsr0; char buff[8]; int len; len = *lenp; if (write) { /* potential state change */ if (len <= 1 || len > sizeof(buff) - 1) return -EINVAL; if (copy_from_user(buff, buffer, len) != 0) return -EFAULT; if (buff[len - 1] == '\n') buff[len - 1] = '\0'; else buff[len] = '\0'; if (strcmp(buff, frv_cache_wback) == 0) { /* switch dcache into write-back mode */ frv_change_dcache_mode(HSR0_CBM_COPY_BACK); return 0; } if (strcmp(buff, frv_cache_wthru) == 0) { /* switch dcache into write-through mode */ frv_change_dcache_mode(HSR0_CBM_WRITE_THRU); return 0; } return -EINVAL; } /* read the state */ if (*ppos > 0) { *lenp = 0; return 0; } hsr0 = __get_HSR(0); switch (hsr0 & HSR0_CBM) { case HSR0_CBM_WRITE_THRU: memcpy(buff, frv_cache_wthru, sizeof(frv_cache_wthru) - 1); buff[sizeof(frv_cache_wthru) - 1] = '\n'; len = sizeof(frv_cache_wthru); break; default: memcpy(buff, frv_cache_wback, sizeof(frv_cache_wback) - 1); buff[sizeof(frv_cache_wback) - 1] = '\n'; len = sizeof(frv_cache_wback); break; } if (len > *lenp) len = *lenp; if (copy_to_user(buffer, buff, len) != 0) return -EFAULT; *lenp = len; *ppos = len; return 0; } /* end procctl_frv_cachemode() */ /*****************************************************************************/ /* * permit the mm_struct the nominated process is using have its MMU context ID pinned */ #ifdef CONFIG_MMU static int procctl_frv_pin_cxnr(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { pid_t pid; char buff[16], *p; int len; len = *lenp; if (write) { /* potential state change */ if (len <= 1 || len > sizeof(buff) - 1) return -EINVAL; if (copy_from_user(buff, buffer, len) != 0) return -EFAULT; if (buff[len - 1] == '\n') buff[len - 1] = '\0'; else buff[len] = '\0'; pid = simple_strtoul(buff, &p, 10); if (*p) return -EINVAL; return cxn_pin_by_pid(pid); } /* read the currently pinned CXN */ if (*ppos > 0) { *lenp = 0; return 0; } len = snprintf(buff, sizeof(buff), "%d\n", cxn_pinned); if (len > *lenp) len = *lenp; if (copy_to_user(buffer, buff, len) != 0) return -EFAULT; *lenp = len; *ppos = len; return 0; } /* end procctl_frv_pin_cxnr() */ #endif /* * FR-V specific sysctls */ static struct ctl_table frv_table[] = { { .procname = "cache-mode", .data = NULL, .maxlen = 0, .mode = 0644, .proc_handler = procctl_frv_cachemode, }, #ifdef CONFIG_MMU { .procname = "pin-cxnr", .data = NULL, .maxlen = 0, .mode = 0644, .proc_handler = procctl_frv_pin_cxnr }, #endif {} }; /* * Use a temporary sysctl number. Horrid, but will be cleaned up in 2.6 * when all the PM interfaces exist nicely. */ static struct ctl_table frv_dir_table[] = { { .procname = "frv", .mode = 0555, .child = frv_table }, {} }; /* * Initialize power interface */ static int __init frv_sysctl_init(void) { register_sysctl_table(frv_dir_table); return 0; } __initcall(frv_sysctl_init);
gpl-2.0
munjeni/stock_jb_kexec_kernel_for_locked_bootloader
arch/powerpc/platforms/pseries/reconfig.c
2491
12454
/* * pSeries_reconfig.c - support for dynamic reconfiguration (including PCI * Hotplug and Dynamic Logical Partitioning on RPA platforms). * * Copyright (C) 2005 Nathan Lynch * Copyright (C) 2005 IBM Corporation * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/kref.h> #include <linux/notifier.h> #include <linux/proc_fs.h> #include <linux/slab.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/uaccess.h> #include <asm/pSeries_reconfig.h> #include <asm/mmu.h> /* * Routines for "runtime" addition and removal of device tree nodes. */ #ifdef CONFIG_PROC_DEVICETREE /* * Add a node to /proc/device-tree. */ static void add_node_proc_entries(struct device_node *np) { struct proc_dir_entry *ent; ent = proc_mkdir(strrchr(np->full_name, '/') + 1, np->parent->pde); if (ent) proc_device_tree_add_node(np, ent); } static void remove_node_proc_entries(struct device_node *np) { struct property *pp = np->properties; struct device_node *parent = np->parent; while (pp) { remove_proc_entry(pp->name, np->pde); pp = pp->next; } if (np->pde) remove_proc_entry(np->pde->name, parent->pde); } #else /* !CONFIG_PROC_DEVICETREE */ static void add_node_proc_entries(struct device_node *np) { return; } static void remove_node_proc_entries(struct device_node *np) { return; } #endif /* CONFIG_PROC_DEVICETREE */ /** * derive_parent - basically like dirname(1) * @path: the full_name of a node to be added to the tree * * Returns the node which should be the parent of the node * described by path. E.g., for path = "/foo/bar", returns * the node with full_name = "/foo". */ static struct device_node *derive_parent(const char *path) { struct device_node *parent = NULL; char *parent_path = "/"; size_t parent_path_len = strrchr(path, '/') - path + 1; /* reject if path is "/" */ if (!strcmp(path, "/")) return ERR_PTR(-EINVAL); if (strrchr(path, '/') != path) { parent_path = kmalloc(parent_path_len, GFP_KERNEL); if (!parent_path) return ERR_PTR(-ENOMEM); strlcpy(parent_path, path, parent_path_len); } parent = of_find_node_by_path(parent_path); if (!parent) return ERR_PTR(-EINVAL); if (strcmp(parent_path, "/")) kfree(parent_path); return parent; } BLOCKING_NOTIFIER_HEAD(pSeries_reconfig_chain); int pSeries_reconfig_notifier_register(struct notifier_block *nb) { return blocking_notifier_chain_register(&pSeries_reconfig_chain, nb); } void pSeries_reconfig_notifier_unregister(struct notifier_block *nb) { blocking_notifier_chain_unregister(&pSeries_reconfig_chain, nb); } static int pSeries_reconfig_add_node(const char *path, struct property *proplist) { struct device_node *np; int err = -ENOMEM; np = kzalloc(sizeof(*np), GFP_KERNEL); if (!np) goto out_err; np->full_name = kstrdup(path, GFP_KERNEL); if (!np->full_name) goto out_err; np->properties = proplist; of_node_set_flag(np, OF_DYNAMIC); kref_init(&np->kref); np->parent = derive_parent(path); if (IS_ERR(np->parent)) { err = PTR_ERR(np->parent); goto out_err; } err = blocking_notifier_call_chain(&pSeries_reconfig_chain, PSERIES_RECONFIG_ADD, np); if (err == NOTIFY_BAD) { printk(KERN_ERR "Failed to add device node %s\n", path); err = -ENOMEM; /* For now, safe to assume kmalloc failure */ goto out_err; } of_attach_node(np); add_node_proc_entries(np); of_node_put(np->parent); return 0; out_err: if (np) { of_node_put(np->parent); kfree(np->full_name); kfree(np); } return err; } static int pSeries_reconfig_remove_node(struct device_node *np) { struct device_node *parent, *child; parent = of_get_parent(np); if (!parent) return -EINVAL; if ((child = of_get_next_child(np, NULL))) { of_node_put(child); of_node_put(parent); return -EBUSY; } remove_node_proc_entries(np); blocking_notifier_call_chain(&pSeries_reconfig_chain, PSERIES_RECONFIG_REMOVE, np); of_detach_node(np); of_node_put(parent); of_node_put(np); /* Must decrement the refcount */ return 0; } /* * /proc/powerpc/ofdt - yucky binary interface for adding and removing * OF device nodes. Should be deprecated as soon as we get an * in-kernel wrapper for the RTAS ibm,configure-connector call. */ static void release_prop_list(const struct property *prop) { struct property *next; for (; prop; prop = next) { next = prop->next; kfree(prop->name); kfree(prop->value); kfree(prop); } } /** * parse_next_property - process the next property from raw input buffer * @buf: input buffer, must be nul-terminated * @end: end of the input buffer + 1, for validation * @name: return value; set to property name in buf * @length: return value; set to length of value * @value: return value; set to the property value in buf * * Note that the caller must make copies of the name and value returned, * this function does no allocation or copying of the data. Return value * is set to the next name in buf, or NULL on error. */ static char * parse_next_property(char *buf, char *end, char **name, int *length, unsigned char **value) { char *tmp; *name = buf; tmp = strchr(buf, ' '); if (!tmp) { printk(KERN_ERR "property parse failed in %s at line %d\n", __func__, __LINE__); return NULL; } *tmp = '\0'; if (++tmp >= end) { printk(KERN_ERR "property parse failed in %s at line %d\n", __func__, __LINE__); return NULL; } /* now we're on the length */ *length = -1; *length = simple_strtoul(tmp, &tmp, 10); if (*length == -1) { printk(KERN_ERR "property parse failed in %s at line %d\n", __func__, __LINE__); return NULL; } if (*tmp != ' ' || ++tmp >= end) { printk(KERN_ERR "property parse failed in %s at line %d\n", __func__, __LINE__); return NULL; } /* now we're on the value */ *value = tmp; tmp += *length; if (tmp > end) { printk(KERN_ERR "property parse failed in %s at line %d\n", __func__, __LINE__); return NULL; } else if (tmp < end && *tmp != ' ' && *tmp != '\0') { printk(KERN_ERR "property parse failed in %s at line %d\n", __func__, __LINE__); return NULL; } tmp++; /* and now we should be on the next name, or the end */ return tmp; } static struct property *new_property(const char *name, const int length, const unsigned char *value, struct property *last) { struct property *new = kzalloc(sizeof(*new), GFP_KERNEL); if (!new) return NULL; if (!(new->name = kmalloc(strlen(name) + 1, GFP_KERNEL))) goto cleanup; if (!(new->value = kmalloc(length + 1, GFP_KERNEL))) goto cleanup; strcpy(new->name, name); memcpy(new->value, value, length); *(((char *)new->value) + length) = 0; new->length = length; new->next = last; return new; cleanup: kfree(new->name); kfree(new->value); kfree(new); return NULL; } static int do_add_node(char *buf, size_t bufsize) { char *path, *end, *name; struct device_node *np; struct property *prop = NULL; unsigned char* value; int length, rv = 0; end = buf + bufsize; path = buf; buf = strchr(buf, ' '); if (!buf) return -EINVAL; *buf = '\0'; buf++; if ((np = of_find_node_by_path(path))) { of_node_put(np); return -EINVAL; } /* rv = build_prop_list(tmp, bufsize - (tmp - buf), &proplist); */ while (buf < end && (buf = parse_next_property(buf, end, &name, &length, &value))) { struct property *last = prop; prop = new_property(name, length, value, last); if (!prop) { rv = -ENOMEM; prop = last; goto out; } } if (!buf) { rv = -EINVAL; goto out; } rv = pSeries_reconfig_add_node(path, prop); out: if (rv) release_prop_list(prop); return rv; } static int do_remove_node(char *buf) { struct device_node *node; int rv = -ENODEV; if ((node = of_find_node_by_path(buf))) rv = pSeries_reconfig_remove_node(node); of_node_put(node); return rv; } static char *parse_node(char *buf, size_t bufsize, struct device_node **npp) { char *handle_str; phandle handle; *npp = NULL; handle_str = buf; buf = strchr(buf, ' '); if (!buf) return NULL; *buf = '\0'; buf++; handle = simple_strtoul(handle_str, NULL, 0); *npp = of_find_node_by_phandle(handle); return buf; } static int do_add_property(char *buf, size_t bufsize) { struct property *prop = NULL; struct device_node *np; unsigned char *value; char *name, *end; int length; end = buf + bufsize; buf = parse_node(buf, bufsize, &np); if (!np) return -ENODEV; if (parse_next_property(buf, end, &name, &length, &value) == NULL) return -EINVAL; prop = new_property(name, length, value, NULL); if (!prop) return -ENOMEM; prom_add_property(np, prop); return 0; } static int do_remove_property(char *buf, size_t bufsize) { struct device_node *np; char *tmp; struct property *prop; buf = parse_node(buf, bufsize, &np); if (!np) return -ENODEV; tmp = strchr(buf,' '); if (tmp) *tmp = '\0'; if (strlen(buf) == 0) return -EINVAL; prop = of_find_property(np, buf, NULL); return prom_remove_property(np, prop); } static int do_update_property(char *buf, size_t bufsize) { struct device_node *np; unsigned char *value; char *name, *end, *next_prop; int rc, length; struct property *newprop, *oldprop; buf = parse_node(buf, bufsize, &np); end = buf + bufsize; if (!np) return -ENODEV; next_prop = parse_next_property(buf, end, &name, &length, &value); if (!next_prop) return -EINVAL; newprop = new_property(name, length, value, NULL); if (!newprop) return -ENOMEM; if (!strcmp(name, "slb-size") || !strcmp(name, "ibm,slb-size")) slb_set_size(*(int *)value); oldprop = of_find_property(np, name,NULL); if (!oldprop) { if (strlen(name)) return prom_add_property(np, newprop); return -ENODEV; } rc = prom_update_property(np, newprop, oldprop); if (rc) return rc; /* For memory under the ibm,dynamic-reconfiguration-memory node * of the device tree, adding and removing memory is just an update * to the ibm,dynamic-memory property instead of adding/removing a * memory node in the device tree. For these cases we still need to * involve the notifier chain. */ if (!strcmp(name, "ibm,dynamic-memory")) { int action; next_prop = parse_next_property(next_prop, end, &name, &length, &value); if (!next_prop) return -EINVAL; if (!strcmp(name, "add")) action = PSERIES_DRCONF_MEM_ADD; else action = PSERIES_DRCONF_MEM_REMOVE; rc = blocking_notifier_call_chain(&pSeries_reconfig_chain, action, value); if (rc == NOTIFY_BAD) { rc = prom_update_property(np, oldprop, newprop); return -ENOMEM; } } return 0; } /** * ofdt_write - perform operations on the Open Firmware device tree * * @file: not used * @buf: command and arguments * @count: size of the command buffer * @off: not used * * Operations supported at this time are addition and removal of * whole nodes along with their properties. Operations on individual * properties are not implemented (yet). */ static ssize_t ofdt_write(struct file *file, const char __user *buf, size_t count, loff_t *off) { int rv = 0; char *kbuf; char *tmp; if (!(kbuf = kmalloc(count + 1, GFP_KERNEL))) { rv = -ENOMEM; goto out; } if (copy_from_user(kbuf, buf, count)) { rv = -EFAULT; goto out; } kbuf[count] = '\0'; tmp = strchr(kbuf, ' '); if (!tmp) { rv = -EINVAL; goto out; } *tmp = '\0'; tmp++; if (!strcmp(kbuf, "add_node")) rv = do_add_node(tmp, count - (tmp - kbuf)); else if (!strcmp(kbuf, "remove_node")) rv = do_remove_node(tmp); else if (!strcmp(kbuf, "add_property")) rv = do_add_property(tmp, count - (tmp - kbuf)); else if (!strcmp(kbuf, "remove_property")) rv = do_remove_property(tmp, count - (tmp - kbuf)); else if (!strcmp(kbuf, "update_property")) rv = do_update_property(tmp, count - (tmp - kbuf)); else rv = -EINVAL; out: kfree(kbuf); return rv ? rv : count; } static const struct file_operations ofdt_fops = { .write = ofdt_write, .llseek = noop_llseek, }; /* create /proc/powerpc/ofdt write-only by root */ static int proc_ppc64_create_ofdt(void) { struct proc_dir_entry *ent; if (!machine_is(pseries)) return 0; ent = proc_create("powerpc/ofdt", S_IWUSR, NULL, &ofdt_fops); if (ent) ent->size = 0; return 0; } __initcall(proc_ppc64_create_ofdt);
gpl-2.0
atilag/hammerhead-nexus5-kernel
drivers/net/ethernet/intel/e1000e/82571.c
2747
55875
/******************************************************************************* Intel PRO/1000 Linux driver Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called "COPYING". Contact Information: Linux NICS <linux.nics@intel.com> e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *******************************************************************************/ /* * 82571EB Gigabit Ethernet Controller * 82571EB Gigabit Ethernet Controller (Copper) * 82571EB Gigabit Ethernet Controller (Fiber) * 82571EB Dual Port Gigabit Mezzanine Adapter * 82571EB Quad Port Gigabit Mezzanine Adapter * 82571PT Gigabit PT Quad Port Server ExpressModule * 82572EI Gigabit Ethernet Controller (Copper) * 82572EI Gigabit Ethernet Controller (Fiber) * 82572EI Gigabit Ethernet Controller * 82573V Gigabit Ethernet Controller (Copper) * 82573E Gigabit Ethernet Controller (Copper) * 82573L Gigabit Ethernet Controller * 82574L Gigabit Network Connection * 82583V Gigabit Network Connection */ #include "e1000.h" #define ID_LED_RESERVED_F746 0xF746 #define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \ (ID_LED_OFF1_ON2 << 8) | \ (ID_LED_DEF1_DEF2 << 4) | \ (ID_LED_DEF1_DEF2)) #define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 #define AN_RETRY_COUNT 5 /* Autoneg Retry Count value */ #define E1000_BASE1000T_STATUS 10 #define E1000_IDLE_ERROR_COUNT_MASK 0xFF #define E1000_RECEIVE_ERROR_COUNTER 21 #define E1000_RECEIVE_ERROR_MAX 0xFFFF #define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */ static s32 e1000_get_phy_id_82571(struct e1000_hw *hw); static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw); static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw); static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw); static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw); static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw); static s32 e1000_setup_link_82571(struct e1000_hw *hw); static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw); static void e1000_clear_vfta_82571(struct e1000_hw *hw); static bool e1000_check_mng_mode_82574(struct e1000_hw *hw); static s32 e1000_led_on_82574(struct e1000_hw *hw); static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw); static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw); static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw); static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw); static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw); static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active); static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active); /** * e1000_init_phy_params_82571 - Init PHY func ptrs. * @hw: pointer to the HW structure **/ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; if (hw->phy.media_type != e1000_media_type_copper) { phy->type = e1000_phy_none; return 0; } phy->addr = 1; phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; phy->reset_delay_us = 100; phy->ops.power_up = e1000_power_up_phy_copper; phy->ops.power_down = e1000_power_down_phy_copper_82571; switch (hw->mac.type) { case e1000_82571: case e1000_82572: phy->type = e1000_phy_igp_2; break; case e1000_82573: phy->type = e1000_phy_m88; break; case e1000_82574: case e1000_82583: phy->type = e1000_phy_bm; phy->ops.acquire = e1000_get_hw_semaphore_82574; phy->ops.release = e1000_put_hw_semaphore_82574; phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574; phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82574; break; default: return -E1000_ERR_PHY; break; } /* This can only be done after all function pointers are setup. */ ret_val = e1000_get_phy_id_82571(hw); if (ret_val) { e_dbg("Error getting PHY ID\n"); return ret_val; } /* Verify phy id */ switch (hw->mac.type) { case e1000_82571: case e1000_82572: if (phy->id != IGP01E1000_I_PHY_ID) ret_val = -E1000_ERR_PHY; break; case e1000_82573: if (phy->id != M88E1111_I_PHY_ID) ret_val = -E1000_ERR_PHY; break; case e1000_82574: case e1000_82583: if (phy->id != BME1000_E_PHY_ID_R2) ret_val = -E1000_ERR_PHY; break; default: ret_val = -E1000_ERR_PHY; break; } if (ret_val) e_dbg("PHY ID unknown: type = 0x%08x\n", phy->id); return ret_val; } /** * e1000_init_nvm_params_82571 - Init NVM func ptrs. * @hw: pointer to the HW structure **/ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw) { struct e1000_nvm_info *nvm = &hw->nvm; u32 eecd = er32(EECD); u16 size; nvm->opcode_bits = 8; nvm->delay_usec = 1; switch (nvm->override) { case e1000_nvm_override_spi_large: nvm->page_size = 32; nvm->address_bits = 16; break; case e1000_nvm_override_spi_small: nvm->page_size = 8; nvm->address_bits = 8; break; default: nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; break; } switch (hw->mac.type) { case e1000_82573: case e1000_82574: case e1000_82583: if (((eecd >> 15) & 0x3) == 0x3) { nvm->type = e1000_nvm_flash_hw; nvm->word_size = 2048; /* * Autonomous Flash update bit must be cleared due * to Flash update issue. */ eecd &= ~E1000_EECD_AUPDEN; ew32(EECD, eecd); break; } /* Fall Through */ default: nvm->type = e1000_nvm_eeprom_spi; size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> E1000_EECD_SIZE_EX_SHIFT); /* * Added to a constant, "size" becomes the left-shift value * for setting word_size. */ size += NVM_WORD_SIZE_BASE_SHIFT; /* EEPROM access above 16k is unsupported */ if (size > 14) size = 14; nvm->word_size = 1 << size; break; } /* Function Pointers */ switch (hw->mac.type) { case e1000_82574: case e1000_82583: nvm->ops.acquire = e1000_get_hw_semaphore_82574; nvm->ops.release = e1000_put_hw_semaphore_82574; break; default: break; } return 0; } /** * e1000_init_mac_params_82571 - Init MAC func ptrs. * @hw: pointer to the HW structure **/ static s32 e1000_init_mac_params_82571(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; u32 swsm = 0; u32 swsm2 = 0; bool force_clear_smbi = false; /* Set media type and media-dependent function pointers */ switch (hw->adapter->pdev->device) { case E1000_DEV_ID_82571EB_FIBER: case E1000_DEV_ID_82572EI_FIBER: case E1000_DEV_ID_82571EB_QUAD_FIBER: hw->phy.media_type = e1000_media_type_fiber; mac->ops.setup_physical_interface = e1000_setup_fiber_serdes_link_82571; mac->ops.check_for_link = e1000e_check_for_fiber_link; mac->ops.get_link_up_info = e1000e_get_speed_and_duplex_fiber_serdes; break; case E1000_DEV_ID_82571EB_SERDES: case E1000_DEV_ID_82571EB_SERDES_DUAL: case E1000_DEV_ID_82571EB_SERDES_QUAD: case E1000_DEV_ID_82572EI_SERDES: hw->phy.media_type = e1000_media_type_internal_serdes; mac->ops.setup_physical_interface = e1000_setup_fiber_serdes_link_82571; mac->ops.check_for_link = e1000_check_for_serdes_link_82571; mac->ops.get_link_up_info = e1000e_get_speed_and_duplex_fiber_serdes; break; default: hw->phy.media_type = e1000_media_type_copper; mac->ops.setup_physical_interface = e1000_setup_copper_link_82571; mac->ops.check_for_link = e1000e_check_for_copper_link; mac->ops.get_link_up_info = e1000e_get_speed_and_duplex_copper; break; } /* Set mta register count */ mac->mta_reg_count = 128; /* Set rar entry count */ mac->rar_entry_count = E1000_RAR_ENTRIES; /* Adaptive IFS supported */ mac->adaptive_ifs = true; /* MAC-specific function pointers */ switch (hw->mac.type) { case e1000_82573: mac->ops.set_lan_id = e1000_set_lan_id_single_port; mac->ops.check_mng_mode = e1000e_check_mng_mode_generic; mac->ops.led_on = e1000e_led_on_generic; mac->ops.blink_led = e1000e_blink_led_generic; /* FWSM register */ mac->has_fwsm = true; /* * ARC supported; valid only if manageability features are * enabled. */ mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK) ? true : false; break; case e1000_82574: case e1000_82583: mac->ops.set_lan_id = e1000_set_lan_id_single_port; mac->ops.check_mng_mode = e1000_check_mng_mode_82574; mac->ops.led_on = e1000_led_on_82574; break; default: mac->ops.check_mng_mode = e1000e_check_mng_mode_generic; mac->ops.led_on = e1000e_led_on_generic; mac->ops.blink_led = e1000e_blink_led_generic; /* FWSM register */ mac->has_fwsm = true; break; } /* * Ensure that the inter-port SWSM.SMBI lock bit is clear before * first NVM or PHY access. This should be done for single-port * devices, and for one port only on dual-port devices so that * for those devices we can still use the SMBI lock to synchronize * inter-port accesses to the PHY & NVM. */ switch (hw->mac.type) { case e1000_82571: case e1000_82572: swsm2 = er32(SWSM2); if (!(swsm2 & E1000_SWSM2_LOCK)) { /* Only do this for the first interface on this card */ ew32(SWSM2, swsm2 | E1000_SWSM2_LOCK); force_clear_smbi = true; } else { force_clear_smbi = false; } break; default: force_clear_smbi = true; break; } if (force_clear_smbi) { /* Make sure SWSM.SMBI is clear */ swsm = er32(SWSM); if (swsm & E1000_SWSM_SMBI) { /* This bit should not be set on a first interface, and * indicates that the bootagent or EFI code has * improperly left this bit enabled */ e_dbg("Please update your 82571 Bootagent\n"); } ew32(SWSM, swsm & ~E1000_SWSM_SMBI); } /* * Initialize device specific counter of SMBI acquisition * timeouts. */ hw->dev_spec.e82571.smb_counter = 0; return 0; } static s32 e1000_get_variants_82571(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; static int global_quad_port_a; /* global port a indication */ struct pci_dev *pdev = adapter->pdev; int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1; s32 rc; rc = e1000_init_mac_params_82571(hw); if (rc) return rc; rc = e1000_init_nvm_params_82571(hw); if (rc) return rc; rc = e1000_init_phy_params_82571(hw); if (rc) return rc; /* tag quad port adapters first, it's used below */ switch (pdev->device) { case E1000_DEV_ID_82571EB_QUAD_COPPER: case E1000_DEV_ID_82571EB_QUAD_FIBER: case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: case E1000_DEV_ID_82571PT_QUAD_COPPER: adapter->flags |= FLAG_IS_QUAD_PORT; /* mark the first port */ if (global_quad_port_a == 0) adapter->flags |= FLAG_IS_QUAD_PORT_A; /* Reset for multiple quad port adapters */ global_quad_port_a++; if (global_quad_port_a == 4) global_quad_port_a = 0; break; default: break; } switch (adapter->hw.mac.type) { case e1000_82571: /* these dual ports don't have WoL on port B at all */ if (((pdev->device == E1000_DEV_ID_82571EB_FIBER) || (pdev->device == E1000_DEV_ID_82571EB_SERDES) || (pdev->device == E1000_DEV_ID_82571EB_COPPER)) && (is_port_b)) adapter->flags &= ~FLAG_HAS_WOL; /* quad ports only support WoL on port A */ if (adapter->flags & FLAG_IS_QUAD_PORT && (!(adapter->flags & FLAG_IS_QUAD_PORT_A))) adapter->flags &= ~FLAG_HAS_WOL; /* Does not support WoL on any port */ if (pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD) adapter->flags &= ~FLAG_HAS_WOL; break; case e1000_82573: if (pdev->device == E1000_DEV_ID_82573L) { adapter->flags |= FLAG_HAS_JUMBO_FRAMES; adapter->max_hw_frame_size = DEFAULT_JUMBO; } break; default: break; } return 0; } /** * e1000_get_phy_id_82571 - Retrieve the PHY ID and revision * @hw: pointer to the HW structure * * Reads the PHY registers and stores the PHY ID and possibly the PHY * revision in the hardware structure. **/ static s32 e1000_get_phy_id_82571(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_id = 0; switch (hw->mac.type) { case e1000_82571: case e1000_82572: /* * The 82571 firmware may still be configuring the PHY. * In this case, we cannot access the PHY until the * configuration is done. So we explicitly set the * PHY ID. */ phy->id = IGP01E1000_I_PHY_ID; break; case e1000_82573: return e1000e_get_phy_id(hw); break; case e1000_82574: case e1000_82583: ret_val = e1e_rphy(hw, PHY_ID1, &phy_id); if (ret_val) return ret_val; phy->id = (u32)(phy_id << 16); udelay(20); ret_val = e1e_rphy(hw, PHY_ID2, &phy_id); if (ret_val) return ret_val; phy->id |= (u32)(phy_id); phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); break; default: return -E1000_ERR_PHY; break; } return 0; } /** * e1000_get_hw_semaphore_82571 - Acquire hardware semaphore * @hw: pointer to the HW structure * * Acquire the HW semaphore to access the PHY or NVM **/ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw) { u32 swsm; s32 sw_timeout = hw->nvm.word_size + 1; s32 fw_timeout = hw->nvm.word_size + 1; s32 i = 0; /* * If we have timedout 3 times on trying to acquire * the inter-port SMBI semaphore, there is old code * operating on the other port, and it is not * releasing SMBI. Modify the number of times that * we try for the semaphore to interwork with this * older code. */ if (hw->dev_spec.e82571.smb_counter > 2) sw_timeout = 1; /* Get the SW semaphore */ while (i < sw_timeout) { swsm = er32(SWSM); if (!(swsm & E1000_SWSM_SMBI)) break; udelay(50); i++; } if (i == sw_timeout) { e_dbg("Driver can't access device - SMBI bit is set.\n"); hw->dev_spec.e82571.smb_counter++; } /* Get the FW semaphore. */ for (i = 0; i < fw_timeout; i++) { swsm = er32(SWSM); ew32(SWSM, swsm | E1000_SWSM_SWESMBI); /* Semaphore acquired if bit latched */ if (er32(SWSM) & E1000_SWSM_SWESMBI) break; udelay(50); } if (i == fw_timeout) { /* Release semaphores */ e1000_put_hw_semaphore_82571(hw); e_dbg("Driver can't access the NVM\n"); return -E1000_ERR_NVM; } return 0; } /** * e1000_put_hw_semaphore_82571 - Release hardware semaphore * @hw: pointer to the HW structure * * Release hardware semaphore used to access the PHY or NVM **/ static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw) { u32 swsm; swsm = er32(SWSM); swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); ew32(SWSM, swsm); } /** * e1000_get_hw_semaphore_82573 - Acquire hardware semaphore * @hw: pointer to the HW structure * * Acquire the HW semaphore during reset. * **/ static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw) { u32 extcnf_ctrl; s32 i = 0; extcnf_ctrl = er32(EXTCNF_CTRL); extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; do { ew32(EXTCNF_CTRL, extcnf_ctrl); extcnf_ctrl = er32(EXTCNF_CTRL); if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP) break; extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; usleep_range(2000, 4000); i++; } while (i < MDIO_OWNERSHIP_TIMEOUT); if (i == MDIO_OWNERSHIP_TIMEOUT) { /* Release semaphores */ e1000_put_hw_semaphore_82573(hw); e_dbg("Driver can't access the PHY\n"); return -E1000_ERR_PHY; } return 0; } /** * e1000_put_hw_semaphore_82573 - Release hardware semaphore * @hw: pointer to the HW structure * * Release hardware semaphore used during reset. * **/ static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw) { u32 extcnf_ctrl; extcnf_ctrl = er32(EXTCNF_CTRL); extcnf_ctrl &= ~E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; ew32(EXTCNF_CTRL, extcnf_ctrl); } static DEFINE_MUTEX(swflag_mutex); /** * e1000_get_hw_semaphore_82574 - Acquire hardware semaphore * @hw: pointer to the HW structure * * Acquire the HW semaphore to access the PHY or NVM. * **/ static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw) { s32 ret_val; mutex_lock(&swflag_mutex); ret_val = e1000_get_hw_semaphore_82573(hw); if (ret_val) mutex_unlock(&swflag_mutex); return ret_val; } /** * e1000_put_hw_semaphore_82574 - Release hardware semaphore * @hw: pointer to the HW structure * * Release hardware semaphore used to access the PHY or NVM * **/ static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw) { e1000_put_hw_semaphore_82573(hw); mutex_unlock(&swflag_mutex); } /** * e1000_set_d0_lplu_state_82574 - Set Low Power Linkup D0 state * @hw: pointer to the HW structure * @active: true to enable LPLU, false to disable * * Sets the LPLU D0 state according to the active flag. * LPLU will not be activated unless the * device autonegotiation advertisement meets standards of * either 10 or 10/100 or 10/100/1000 at all duplexes. * This is a function pointer entry point only called by * PHY setup routines. **/ static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active) { u16 data = er32(POEMB); if (active) data |= E1000_PHY_CTRL_D0A_LPLU; else data &= ~E1000_PHY_CTRL_D0A_LPLU; ew32(POEMB, data); return 0; } /** * e1000_set_d3_lplu_state_82574 - Sets low power link up state for D3 * @hw: pointer to the HW structure * @active: boolean used to enable/disable lplu * * The low power link up (lplu) state is set to the power management level D3 * when active is true, else clear lplu for D3. LPLU * is used during Dx states where the power conservation is most important. * During driver activity, SmartSpeed should be enabled so performance is * maintained. **/ static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active) { u16 data = er32(POEMB); if (!active) { data &= ~E1000_PHY_CTRL_NOND0A_LPLU; } else if ((hw->phy.autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || (hw->phy.autoneg_advertised == E1000_ALL_NOT_GIG) || (hw->phy.autoneg_advertised == E1000_ALL_10_SPEED)) { data |= E1000_PHY_CTRL_NOND0A_LPLU; } ew32(POEMB, data); return 0; } /** * e1000_acquire_nvm_82571 - Request for access to the EEPROM * @hw: pointer to the HW structure * * To gain access to the EEPROM, first we must obtain a hardware semaphore. * Then for non-82573 hardware, set the EEPROM access request bit and wait * for EEPROM access grant bit. If the access grant bit is not set, release * hardware semaphore. **/ static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw) { s32 ret_val; ret_val = e1000_get_hw_semaphore_82571(hw); if (ret_val) return ret_val; switch (hw->mac.type) { case e1000_82573: break; default: ret_val = e1000e_acquire_nvm(hw); break; } if (ret_val) e1000_put_hw_semaphore_82571(hw); return ret_val; } /** * e1000_release_nvm_82571 - Release exclusive access to EEPROM * @hw: pointer to the HW structure * * Stop any current commands to the EEPROM and clear the EEPROM request bit. **/ static void e1000_release_nvm_82571(struct e1000_hw *hw) { e1000e_release_nvm(hw); e1000_put_hw_semaphore_82571(hw); } /** * e1000_write_nvm_82571 - Write to EEPROM using appropriate interface * @hw: pointer to the HW structure * @offset: offset within the EEPROM to be written to * @words: number of words to write * @data: 16 bit word(s) to be written to the EEPROM * * For non-82573 silicon, write data to EEPROM at offset using SPI interface. * * If e1000e_update_nvm_checksum is not called after this function, the * EEPROM will most likely contain an invalid checksum. **/ static s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) { s32 ret_val; switch (hw->mac.type) { case e1000_82573: case e1000_82574: case e1000_82583: ret_val = e1000_write_nvm_eewr_82571(hw, offset, words, data); break; case e1000_82571: case e1000_82572: ret_val = e1000e_write_nvm_spi(hw, offset, words, data); break; default: ret_val = -E1000_ERR_NVM; break; } return ret_val; } /** * e1000_update_nvm_checksum_82571 - Update EEPROM checksum * @hw: pointer to the HW structure * * Updates the EEPROM checksum by reading/adding each word of the EEPROM * up to the checksum. Then calculates the EEPROM checksum and writes the * value to the EEPROM. **/ static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw) { u32 eecd; s32 ret_val; u16 i; ret_val = e1000e_update_nvm_checksum_generic(hw); if (ret_val) return ret_val; /* * If our nvm is an EEPROM, then we're done * otherwise, commit the checksum to the flash NVM. */ if (hw->nvm.type != e1000_nvm_flash_hw) return 0; /* Check for pending operations. */ for (i = 0; i < E1000_FLASH_UPDATES; i++) { usleep_range(1000, 2000); if ((er32(EECD) & E1000_EECD_FLUPD) == 0) break; } if (i == E1000_FLASH_UPDATES) return -E1000_ERR_NVM; /* Reset the firmware if using STM opcode. */ if ((er32(FLOP) & 0xFF00) == E1000_STM_OPCODE) { /* * The enabling of and the actual reset must be done * in two write cycles. */ ew32(HICR, E1000_HICR_FW_RESET_ENABLE); e1e_flush(); ew32(HICR, E1000_HICR_FW_RESET); } /* Commit the write to flash */ eecd = er32(EECD) | E1000_EECD_FLUPD; ew32(EECD, eecd); for (i = 0; i < E1000_FLASH_UPDATES; i++) { usleep_range(1000, 2000); if ((er32(EECD) & E1000_EECD_FLUPD) == 0) break; } if (i == E1000_FLASH_UPDATES) return -E1000_ERR_NVM; return 0; } /** * e1000_validate_nvm_checksum_82571 - Validate EEPROM checksum * @hw: pointer to the HW structure * * Calculates the EEPROM checksum by reading/adding each word of the EEPROM * and then verifies that the sum of the EEPROM is equal to 0xBABA. **/ static s32 e1000_validate_nvm_checksum_82571(struct e1000_hw *hw) { if (hw->nvm.type == e1000_nvm_flash_hw) e1000_fix_nvm_checksum_82571(hw); return e1000e_validate_nvm_checksum_generic(hw); } /** * e1000_write_nvm_eewr_82571 - Write to EEPROM for 82573 silicon * @hw: pointer to the HW structure * @offset: offset within the EEPROM to be written to * @words: number of words to write * @data: 16 bit word(s) to be written to the EEPROM * * After checking for invalid values, poll the EEPROM to ensure the previous * command has completed before trying to write the next word. After write * poll for completion. * * If e1000e_update_nvm_checksum is not called after this function, the * EEPROM will most likely contain an invalid checksum. **/ static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) { struct e1000_nvm_info *nvm = &hw->nvm; u32 i, eewr = 0; s32 ret_val = 0; /* * A check for invalid values: offset too large, too many words, * and not enough words. */ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || (words == 0)) { e_dbg("nvm parameter(s) out of bounds\n"); return -E1000_ERR_NVM; } for (i = 0; i < words; i++) { eewr = (data[i] << E1000_NVM_RW_REG_DATA) | ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) | E1000_NVM_RW_REG_START; ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE); if (ret_val) break; ew32(EEWR, eewr); ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE); if (ret_val) break; } return ret_val; } /** * e1000_get_cfg_done_82571 - Poll for configuration done * @hw: pointer to the HW structure * * Reads the management control register for the config done bit to be set. **/ static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw) { s32 timeout = PHY_CFG_TIMEOUT; while (timeout) { if (er32(EEMNGCTL) & E1000_NVM_CFG_DONE_PORT_0) break; usleep_range(1000, 2000); timeout--; } if (!timeout) { e_dbg("MNG configuration cycle has not completed.\n"); return -E1000_ERR_RESET; } return 0; } /** * e1000_set_d0_lplu_state_82571 - Set Low Power Linkup D0 state * @hw: pointer to the HW structure * @active: true to enable LPLU, false to disable * * Sets the LPLU D0 state according to the active flag. When activating LPLU * this function also disables smart speed and vice versa. LPLU will not be * activated unless the device autonegotiation advertisement meets standards * of either 10 or 10/100 or 10/100/1000 at all duplexes. This is a function * pointer entry point only called by PHY setup routines. **/ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; ret_val = e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &data); if (ret_val) return ret_val; if (active) { data |= IGP02E1000_PM_D0_LPLU; ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); if (ret_val) return ret_val; /* When LPLU is enabled, we should disable SmartSpeed */ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) return ret_val; } else { data &= ~IGP02E1000_PM_D0_LPLU; ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); /* * LPLU and SmartSpeed are mutually exclusive. LPLU is used * during Dx states where the power conservation is most * important. During driver activity we should enable * SmartSpeed, so performance is maintained. */ if (phy->smart_speed == e1000_smart_speed_on) { ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data |= IGP01E1000_PSCFR_SMART_SPEED; ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) return ret_val; } else if (phy->smart_speed == e1000_smart_speed_off) { ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) return ret_val; } } return 0; } /** * e1000_reset_hw_82571 - Reset hardware * @hw: pointer to the HW structure * * This resets the hardware into a known state. **/ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) { u32 ctrl, ctrl_ext; s32 ret_val; /* * Prevent the PCI-E bus from sticking if there is no TLP connection * on the last TLP read/write transaction when MAC is reset. */ ret_val = e1000e_disable_pcie_master(hw); if (ret_val) e_dbg("PCI-E Master disable polling has failed.\n"); e_dbg("Masking off all interrupts\n"); ew32(IMC, 0xffffffff); ew32(RCTL, 0); ew32(TCTL, E1000_TCTL_PSP); e1e_flush(); usleep_range(10000, 20000); /* * Must acquire the MDIO ownership before MAC reset. * Ownership defaults to firmware after a reset. */ switch (hw->mac.type) { case e1000_82573: ret_val = e1000_get_hw_semaphore_82573(hw); break; case e1000_82574: case e1000_82583: ret_val = e1000_get_hw_semaphore_82574(hw); break; default: break; } if (ret_val) e_dbg("Cannot acquire MDIO ownership\n"); ctrl = er32(CTRL); e_dbg("Issuing a global reset to MAC\n"); ew32(CTRL, ctrl | E1000_CTRL_RST); /* Must release MDIO ownership and mutex after MAC reset. */ switch (hw->mac.type) { case e1000_82574: case e1000_82583: e1000_put_hw_semaphore_82574(hw); break; default: break; } if (hw->nvm.type == e1000_nvm_flash_hw) { udelay(10); ctrl_ext = er32(CTRL_EXT); ctrl_ext |= E1000_CTRL_EXT_EE_RST; ew32(CTRL_EXT, ctrl_ext); e1e_flush(); } ret_val = e1000e_get_auto_rd_done(hw); if (ret_val) /* We don't want to continue accessing MAC registers. */ return ret_val; /* * Phy configuration from NVM just starts after EECD_AUTO_RD is set. * Need to wait for Phy configuration completion before accessing * NVM and Phy. */ switch (hw->mac.type) { case e1000_82573: case e1000_82574: case e1000_82583: msleep(25); break; default: break; } /* Clear any pending interrupt events. */ ew32(IMC, 0xffffffff); er32(ICR); if (hw->mac.type == e1000_82571) { /* Install any alternate MAC address into RAR0 */ ret_val = e1000_check_alt_mac_addr_generic(hw); if (ret_val) return ret_val; e1000e_set_laa_state_82571(hw, true); } /* Reinitialize the 82571 serdes link state machine */ if (hw->phy.media_type == e1000_media_type_internal_serdes) hw->mac.serdes_link_state = e1000_serdes_link_down; return 0; } /** * e1000_init_hw_82571 - Initialize hardware * @hw: pointer to the HW structure * * This inits the hardware readying it for operation. **/ static s32 e1000_init_hw_82571(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; u32 reg_data; s32 ret_val; u16 i, rar_count = mac->rar_entry_count; e1000_initialize_hw_bits_82571(hw); /* Initialize identification LED */ ret_val = mac->ops.id_led_init(hw); if (ret_val) e_dbg("Error initializing identification LED\n"); /* This is not fatal and we should not stop init due to this */ /* Disabling VLAN filtering */ e_dbg("Initializing the IEEE VLAN\n"); mac->ops.clear_vfta(hw); /* Setup the receive address. */ /* * If, however, a locally administered address was assigned to the * 82571, we must reserve a RAR for it to work around an issue where * resetting one port will reload the MAC on the other port. */ if (e1000e_get_laa_state_82571(hw)) rar_count--; e1000e_init_rx_addrs(hw, rar_count); /* Zero out the Multicast HASH table */ e_dbg("Zeroing the MTA\n"); for (i = 0; i < mac->mta_reg_count; i++) E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); /* Setup link and flow control */ ret_val = mac->ops.setup_link(hw); /* Set the transmit descriptor write-back policy */ reg_data = er32(TXDCTL(0)); reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC; ew32(TXDCTL(0), reg_data); /* ...for both queues. */ switch (mac->type) { case e1000_82573: e1000e_enable_tx_pkt_filtering(hw); /* fall through */ case e1000_82574: case e1000_82583: reg_data = er32(GCR); reg_data |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX; ew32(GCR, reg_data); break; default: reg_data = er32(TXDCTL(1)); reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC; ew32(TXDCTL(1), reg_data); break; } /* * Clear all of the statistics registers (clear on read). It is * important that we do this after we have tried to establish link * because the symbol error count will increment wildly if there * is no link. */ e1000_clear_hw_cntrs_82571(hw); return ret_val; } /** * e1000_initialize_hw_bits_82571 - Initialize hardware-dependent bits * @hw: pointer to the HW structure * * Initializes required hardware-dependent bits needed for normal operation. **/ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) { u32 reg; /* Transmit Descriptor Control 0 */ reg = er32(TXDCTL(0)); reg |= (1 << 22); ew32(TXDCTL(0), reg); /* Transmit Descriptor Control 1 */ reg = er32(TXDCTL(1)); reg |= (1 << 22); ew32(TXDCTL(1), reg); /* Transmit Arbitration Control 0 */ reg = er32(TARC(0)); reg &= ~(0xF << 27); /* 30:27 */ switch (hw->mac.type) { case e1000_82571: case e1000_82572: reg |= (1 << 23) | (1 << 24) | (1 << 25) | (1 << 26); break; case e1000_82574: case e1000_82583: reg |= (1 << 26); break; default: break; } ew32(TARC(0), reg); /* Transmit Arbitration Control 1 */ reg = er32(TARC(1)); switch (hw->mac.type) { case e1000_82571: case e1000_82572: reg &= ~((1 << 29) | (1 << 30)); reg |= (1 << 22) | (1 << 24) | (1 << 25) | (1 << 26); if (er32(TCTL) & E1000_TCTL_MULR) reg &= ~(1 << 28); else reg |= (1 << 28); ew32(TARC(1), reg); break; default: break; } /* Device Control */ switch (hw->mac.type) { case e1000_82573: case e1000_82574: case e1000_82583: reg = er32(CTRL); reg &= ~(1 << 29); ew32(CTRL, reg); break; default: break; } /* Extended Device Control */ switch (hw->mac.type) { case e1000_82573: case e1000_82574: case e1000_82583: reg = er32(CTRL_EXT); reg &= ~(1 << 23); reg |= (1 << 22); ew32(CTRL_EXT, reg); break; default: break; } if (hw->mac.type == e1000_82571) { reg = er32(PBA_ECC); reg |= E1000_PBA_ECC_CORR_EN; ew32(PBA_ECC, reg); } /* * Workaround for hardware errata. * Ensure that DMA Dynamic Clock gating is disabled on 82571 and 82572 */ if ((hw->mac.type == e1000_82571) || (hw->mac.type == e1000_82572)) { reg = er32(CTRL_EXT); reg &= ~E1000_CTRL_EXT_DMA_DYN_CLK_EN; ew32(CTRL_EXT, reg); } /* PCI-Ex Control Registers */ switch (hw->mac.type) { case e1000_82574: case e1000_82583: reg = er32(GCR); reg |= (1 << 22); ew32(GCR, reg); /* * Workaround for hardware errata. * apply workaround for hardware errata documented in errata * docs Fixes issue where some error prone or unreliable PCIe * completions are occurring, particularly with ASPM enabled. * Without fix, issue can cause Tx timeouts. */ reg = er32(GCR2); reg |= 1; ew32(GCR2, reg); break; default: break; } } /** * e1000_clear_vfta_82571 - Clear VLAN filter table * @hw: pointer to the HW structure * * Clears the register array which contains the VLAN filter table by * setting all the values to 0. **/ static void e1000_clear_vfta_82571(struct e1000_hw *hw) { u32 offset; u32 vfta_value = 0; u32 vfta_offset = 0; u32 vfta_bit_in_reg = 0; switch (hw->mac.type) { case e1000_82573: case e1000_82574: case e1000_82583: if (hw->mng_cookie.vlan_id != 0) { /* * The VFTA is a 4096b bit-field, each identifying * a single VLAN ID. The following operations * determine which 32b entry (i.e. offset) into the * array we want to set the VLAN ID (i.e. bit) of * the manageability unit. */ vfta_offset = (hw->mng_cookie.vlan_id >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK; vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK); } break; default: break; } for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { /* * If the offset we want to clear is the same offset of the * manageability VLAN ID, then clear all bits except that of * the manageability unit. */ vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0; E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, vfta_value); e1e_flush(); } } /** * e1000_check_mng_mode_82574 - Check manageability is enabled * @hw: pointer to the HW structure * * Reads the NVM Initialization Control Word 2 and returns true * (>0) if any manageability is enabled, else false (0). **/ static bool e1000_check_mng_mode_82574(struct e1000_hw *hw) { u16 data; e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data); return (data & E1000_NVM_INIT_CTRL2_MNGM) != 0; } /** * e1000_led_on_82574 - Turn LED on * @hw: pointer to the HW structure * * Turn LED on. **/ static s32 e1000_led_on_82574(struct e1000_hw *hw) { u32 ctrl; u32 i; ctrl = hw->mac.ledctl_mode2; if (!(E1000_STATUS_LU & er32(STATUS))) { /* * If no link, then turn LED on by setting the invert bit * for each LED that's "on" (0x0E) in ledctl_mode2. */ for (i = 0; i < 4; i++) if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == E1000_LEDCTL_MODE_LED_ON) ctrl |= (E1000_LEDCTL_LED0_IVRT << (i * 8)); } ew32(LEDCTL, ctrl); return 0; } /** * e1000_check_phy_82574 - check 82574 phy hung state * @hw: pointer to the HW structure * * Returns whether phy is hung or not **/ bool e1000_check_phy_82574(struct e1000_hw *hw) { u16 status_1kbt = 0; u16 receive_errors = 0; s32 ret_val = 0; /* * Read PHY Receive Error counter first, if its is max - all F's then * read the Base1000T status register If both are max then PHY is hung. */ ret_val = e1e_rphy(hw, E1000_RECEIVE_ERROR_COUNTER, &receive_errors); if (ret_val) return false; if (receive_errors == E1000_RECEIVE_ERROR_MAX) { ret_val = e1e_rphy(hw, E1000_BASE1000T_STATUS, &status_1kbt); if (ret_val) return false; if ((status_1kbt & E1000_IDLE_ERROR_COUNT_MASK) == E1000_IDLE_ERROR_COUNT_MASK) return true; } return false; } /** * e1000_setup_link_82571 - Setup flow control and link settings * @hw: pointer to the HW structure * * Determines which flow control settings to use, then configures flow * control. Calls the appropriate media-specific link configuration * function. Assuming the adapter has a valid link partner, a valid link * should be established. Assumes the hardware has previously been reset * and the transmitter and receiver are not enabled. **/ static s32 e1000_setup_link_82571(struct e1000_hw *hw) { /* * 82573 does not have a word in the NVM to determine * the default flow control setting, so we explicitly * set it to full. */ switch (hw->mac.type) { case e1000_82573: case e1000_82574: case e1000_82583: if (hw->fc.requested_mode == e1000_fc_default) hw->fc.requested_mode = e1000_fc_full; break; default: break; } return e1000e_setup_link_generic(hw); } /** * e1000_setup_copper_link_82571 - Configure copper link settings * @hw: pointer to the HW structure * * Configures the link for auto-neg or forced speed and duplex. Then we check * for link, once link is established calls to configure collision distance * and flow control are called. **/ static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw) { u32 ctrl; s32 ret_val; ctrl = er32(CTRL); ctrl |= E1000_CTRL_SLU; ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); ew32(CTRL, ctrl); switch (hw->phy.type) { case e1000_phy_m88: case e1000_phy_bm: ret_val = e1000e_copper_link_setup_m88(hw); break; case e1000_phy_igp_2: ret_val = e1000e_copper_link_setup_igp(hw); break; default: return -E1000_ERR_PHY; break; } if (ret_val) return ret_val; return e1000e_setup_copper_link(hw); } /** * e1000_setup_fiber_serdes_link_82571 - Setup link for fiber/serdes * @hw: pointer to the HW structure * * Configures collision distance and flow control for fiber and serdes links. * Upon successful setup, poll for link. **/ static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw) { switch (hw->mac.type) { case e1000_82571: case e1000_82572: /* * If SerDes loopback mode is entered, there is no form * of reset to take the adapter out of that mode. So we * have to explicitly take the adapter out of loopback * mode. This prevents drivers from twiddling their thumbs * if another tool failed to take it out of loopback mode. */ ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); break; default: break; } return e1000e_setup_fiber_serdes_link(hw); } /** * e1000_check_for_serdes_link_82571 - Check for link (Serdes) * @hw: pointer to the HW structure * * Reports the link state as up or down. * * If autonegotiation is supported by the link partner, the link state is * determined by the result of autonegotiation. This is the most likely case. * If autonegotiation is not supported by the link partner, and the link * has a valid signal, force the link up. * * The link state is represented internally here by 4 states: * * 1) down * 2) autoneg_progress * 3) autoneg_complete (the link successfully autonegotiated) * 4) forced_up (the link has been forced up, it did not autonegotiate) * **/ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; u32 rxcw; u32 ctrl; u32 status; u32 txcw; u32 i; s32 ret_val = 0; ctrl = er32(CTRL); status = er32(STATUS); rxcw = er32(RXCW); if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) { /* Receiver is synchronized with no invalid bits. */ switch (mac->serdes_link_state) { case e1000_serdes_link_autoneg_complete: if (!(status & E1000_STATUS_LU)) { /* * We have lost link, retry autoneg before * reporting link failure */ mac->serdes_link_state = e1000_serdes_link_autoneg_progress; mac->serdes_has_link = false; e_dbg("AN_UP -> AN_PROG\n"); } else { mac->serdes_has_link = true; } break; case e1000_serdes_link_forced_up: /* * If we are receiving /C/ ordered sets, re-enable * auto-negotiation in the TXCW register and disable * forced link in the Device Control register in an * attempt to auto-negotiate with our link partner. * If the partner code word is null, stop forcing * and restart auto negotiation. */ if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW)) { /* Enable autoneg, and unforce link up */ ew32(TXCW, mac->txcw); ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); mac->serdes_link_state = e1000_serdes_link_autoneg_progress; mac->serdes_has_link = false; e_dbg("FORCED_UP -> AN_PROG\n"); } else { mac->serdes_has_link = true; } break; case e1000_serdes_link_autoneg_progress: if (rxcw & E1000_RXCW_C) { /* * We received /C/ ordered sets, meaning the * link partner has autonegotiated, and we can * trust the Link Up (LU) status bit. */ if (status & E1000_STATUS_LU) { mac->serdes_link_state = e1000_serdes_link_autoneg_complete; e_dbg("AN_PROG -> AN_UP\n"); mac->serdes_has_link = true; } else { /* Autoneg completed, but failed. */ mac->serdes_link_state = e1000_serdes_link_down; e_dbg("AN_PROG -> DOWN\n"); } } else { /* * The link partner did not autoneg. * Force link up and full duplex, and change * state to forced. */ ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); ew32(CTRL, ctrl); /* Configure Flow Control after link up. */ ret_val = e1000e_config_fc_after_link_up(hw); if (ret_val) { e_dbg("Error config flow control\n"); break; } mac->serdes_link_state = e1000_serdes_link_forced_up; mac->serdes_has_link = true; e_dbg("AN_PROG -> FORCED_UP\n"); } break; case e1000_serdes_link_down: default: /* * The link was down but the receiver has now gained * valid sync, so lets see if we can bring the link * up. */ ew32(TXCW, mac->txcw); ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); mac->serdes_link_state = e1000_serdes_link_autoneg_progress; mac->serdes_has_link = false; e_dbg("DOWN -> AN_PROG\n"); break; } } else { if (!(rxcw & E1000_RXCW_SYNCH)) { mac->serdes_has_link = false; mac->serdes_link_state = e1000_serdes_link_down; e_dbg("ANYSTATE -> DOWN\n"); } else { /* * Check several times, if Sync and Config * both are consistently 1 then simply ignore * the Invalid bit and restart Autoneg */ for (i = 0; i < AN_RETRY_COUNT; i++) { udelay(10); rxcw = er32(RXCW); if ((rxcw & E1000_RXCW_IV) && !((rxcw & E1000_RXCW_SYNCH) && (rxcw & E1000_RXCW_C))) { mac->serdes_has_link = false; mac->serdes_link_state = e1000_serdes_link_down; e_dbg("ANYSTATE -> DOWN\n"); break; } } if (i == AN_RETRY_COUNT) { txcw = er32(TXCW); txcw |= E1000_TXCW_ANE; ew32(TXCW, txcw); mac->serdes_link_state = e1000_serdes_link_autoneg_progress; mac->serdes_has_link = false; e_dbg("ANYSTATE -> AN_PROG\n"); } } } return ret_val; } /** * e1000_valid_led_default_82571 - Verify a valid default LED config * @hw: pointer to the HW structure * @data: pointer to the NVM (EEPROM) * * Read the EEPROM for the current default LED configuration. If the * LED configuration is not valid, set to a valid LED configuration. **/ static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data) { s32 ret_val; ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); if (ret_val) { e_dbg("NVM Read Error\n"); return ret_val; } switch (hw->mac.type) { case e1000_82573: case e1000_82574: case e1000_82583: if (*data == ID_LED_RESERVED_F746) *data = ID_LED_DEFAULT_82573; break; default: if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) *data = ID_LED_DEFAULT; break; } return 0; } /** * e1000e_get_laa_state_82571 - Get locally administered address state * @hw: pointer to the HW structure * * Retrieve and return the current locally administered address state. **/ bool e1000e_get_laa_state_82571(struct e1000_hw *hw) { if (hw->mac.type != e1000_82571) return false; return hw->dev_spec.e82571.laa_is_present; } /** * e1000e_set_laa_state_82571 - Set locally administered address state * @hw: pointer to the HW structure * @state: enable/disable locally administered address * * Enable/Disable the current locally administered address state. **/ void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state) { if (hw->mac.type != e1000_82571) return; hw->dev_spec.e82571.laa_is_present = state; /* If workaround is activated... */ if (state) /* * Hold a copy of the LAA in RAR[14] This is done so that * between the time RAR[0] gets clobbered and the time it * gets fixed, the actual LAA is in one of the RARs and no * incoming packets directed to this port are dropped. * Eventually the LAA will be in RAR[0] and RAR[14]. */ e1000e_rar_set(hw, hw->mac.addr, hw->mac.rar_entry_count - 1); } /** * e1000_fix_nvm_checksum_82571 - Fix EEPROM checksum * @hw: pointer to the HW structure * * Verifies that the EEPROM has completed the update. After updating the * EEPROM, we need to check bit 15 in work 0x23 for the checksum fix. If * the checksum fix is not implemented, we need to set the bit and update * the checksum. Otherwise, if bit 15 is set and the checksum is incorrect, * we need to return bad checksum. **/ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw) { struct e1000_nvm_info *nvm = &hw->nvm; s32 ret_val; u16 data; if (nvm->type != e1000_nvm_flash_hw) return 0; /* * Check bit 4 of word 10h. If it is 0, firmware is done updating * 10h-12h. Checksum may need to be fixed. */ ret_val = e1000_read_nvm(hw, 0x10, 1, &data); if (ret_val) return ret_val; if (!(data & 0x10)) { /* * Read 0x23 and check bit 15. This bit is a 1 * when the checksum has already been fixed. If * the checksum is still wrong and this bit is a * 1, we need to return bad checksum. Otherwise, * we need to set this bit to a 1 and update the * checksum. */ ret_val = e1000_read_nvm(hw, 0x23, 1, &data); if (ret_val) return ret_val; if (!(data & 0x8000)) { data |= 0x8000; ret_val = e1000_write_nvm(hw, 0x23, 1, &data); if (ret_val) return ret_val; ret_val = e1000e_update_nvm_checksum(hw); } } return 0; } /** * e1000_read_mac_addr_82571 - Read device MAC address * @hw: pointer to the HW structure **/ static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw) { if (hw->mac.type == e1000_82571) { s32 ret_val = 0; /* * If there's an alternate MAC address place it in RAR0 * so that it will override the Si installed default perm * address. */ ret_val = e1000_check_alt_mac_addr_generic(hw); if (ret_val) return ret_val; } return e1000_read_mac_addr_generic(hw); } /** * e1000_power_down_phy_copper_82571 - Remove link during PHY power down * @hw: pointer to the HW structure * * In the case of a PHY power down to save power, or to turn off link during a * driver unload, or wake on lan is not enabled, remove the link. **/ static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; struct e1000_mac_info *mac = &hw->mac; if (!phy->ops.check_reset_block) return; /* If the management interface is not enabled, then power down */ if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw))) e1000_power_down_phy_copper(hw); } /** * e1000_clear_hw_cntrs_82571 - Clear device specific hardware counters * @hw: pointer to the HW structure * * Clears the hardware counters by reading the counter registers. **/ static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw) { e1000e_clear_hw_cntrs_base(hw); er32(PRC64); er32(PRC127); er32(PRC255); er32(PRC511); er32(PRC1023); er32(PRC1522); er32(PTC64); er32(PTC127); er32(PTC255); er32(PTC511); er32(PTC1023); er32(PTC1522); er32(ALGNERRC); er32(RXERRC); er32(TNCRS); er32(CEXTERR); er32(TSCTC); er32(TSCTFC); er32(MGTPRC); er32(MGTPDC); er32(MGTPTC); er32(IAC); er32(ICRXOC); er32(ICRXPTC); er32(ICRXATC); er32(ICTXPTC); er32(ICTXATC); er32(ICTXQEC); er32(ICTXQMTC); er32(ICRXDMTC); } static const struct e1000_mac_operations e82571_mac_ops = { /* .check_mng_mode: mac type dependent */ /* .check_for_link: media type dependent */ .id_led_init = e1000e_id_led_init_generic, .cleanup_led = e1000e_cleanup_led_generic, .clear_hw_cntrs = e1000_clear_hw_cntrs_82571, .get_bus_info = e1000e_get_bus_info_pcie, .set_lan_id = e1000_set_lan_id_multi_port_pcie, /* .get_link_up_info: media type dependent */ /* .led_on: mac type dependent */ .led_off = e1000e_led_off_generic, .update_mc_addr_list = e1000e_update_mc_addr_list_generic, .write_vfta = e1000_write_vfta_generic, .clear_vfta = e1000_clear_vfta_82571, .reset_hw = e1000_reset_hw_82571, .init_hw = e1000_init_hw_82571, .setup_link = e1000_setup_link_82571, /* .setup_physical_interface: media type dependent */ .setup_led = e1000e_setup_led_generic, .config_collision_dist = e1000e_config_collision_dist_generic, .read_mac_addr = e1000_read_mac_addr_82571, }; static const struct e1000_phy_operations e82_phy_ops_igp = { .acquire = e1000_get_hw_semaphore_82571, .check_polarity = e1000_check_polarity_igp, .check_reset_block = e1000e_check_reset_block_generic, .commit = NULL, .force_speed_duplex = e1000e_phy_force_speed_duplex_igp, .get_cfg_done = e1000_get_cfg_done_82571, .get_cable_length = e1000e_get_cable_length_igp_2, .get_info = e1000e_get_phy_info_igp, .read_reg = e1000e_read_phy_reg_igp, .release = e1000_put_hw_semaphore_82571, .reset = e1000e_phy_hw_reset_generic, .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, .set_d3_lplu_state = e1000e_set_d3_lplu_state, .write_reg = e1000e_write_phy_reg_igp, .cfg_on_link_up = NULL, }; static const struct e1000_phy_operations e82_phy_ops_m88 = { .acquire = e1000_get_hw_semaphore_82571, .check_polarity = e1000_check_polarity_m88, .check_reset_block = e1000e_check_reset_block_generic, .commit = e1000e_phy_sw_reset, .force_speed_duplex = e1000e_phy_force_speed_duplex_m88, .get_cfg_done = e1000e_get_cfg_done, .get_cable_length = e1000e_get_cable_length_m88, .get_info = e1000e_get_phy_info_m88, .read_reg = e1000e_read_phy_reg_m88, .release = e1000_put_hw_semaphore_82571, .reset = e1000e_phy_hw_reset_generic, .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, .set_d3_lplu_state = e1000e_set_d3_lplu_state, .write_reg = e1000e_write_phy_reg_m88, .cfg_on_link_up = NULL, }; static const struct e1000_phy_operations e82_phy_ops_bm = { .acquire = e1000_get_hw_semaphore_82571, .check_polarity = e1000_check_polarity_m88, .check_reset_block = e1000e_check_reset_block_generic, .commit = e1000e_phy_sw_reset, .force_speed_duplex = e1000e_phy_force_speed_duplex_m88, .get_cfg_done = e1000e_get_cfg_done, .get_cable_length = e1000e_get_cable_length_m88, .get_info = e1000e_get_phy_info_m88, .read_reg = e1000e_read_phy_reg_bm2, .release = e1000_put_hw_semaphore_82571, .reset = e1000e_phy_hw_reset_generic, .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, .set_d3_lplu_state = e1000e_set_d3_lplu_state, .write_reg = e1000e_write_phy_reg_bm2, .cfg_on_link_up = NULL, }; static const struct e1000_nvm_operations e82571_nvm_ops = { .acquire = e1000_acquire_nvm_82571, .read = e1000e_read_nvm_eerd, .release = e1000_release_nvm_82571, .reload = e1000e_reload_nvm_generic, .update = e1000_update_nvm_checksum_82571, .valid_led_default = e1000_valid_led_default_82571, .validate = e1000_validate_nvm_checksum_82571, .write = e1000_write_nvm_82571, }; const struct e1000_info e1000_82571_info = { .mac = e1000_82571, .flags = FLAG_HAS_HW_VLAN_FILTER | FLAG_HAS_JUMBO_FRAMES | FLAG_HAS_WOL | FLAG_APME_IN_CTRL3 | FLAG_HAS_CTRLEXT_ON_LOAD | FLAG_HAS_SMART_POWER_DOWN | FLAG_RESET_OVERWRITES_LAA /* errata */ | FLAG_TARC_SPEED_MODE_BIT /* errata */ | FLAG_APME_CHECK_PORT_B, .flags2 = FLAG2_DISABLE_ASPM_L1 /* errata 13 */ | FLAG2_DMA_BURST, .pba = 38, .max_hw_frame_size = DEFAULT_JUMBO, .get_variants = e1000_get_variants_82571, .mac_ops = &e82571_mac_ops, .phy_ops = &e82_phy_ops_igp, .nvm_ops = &e82571_nvm_ops, }; const struct e1000_info e1000_82572_info = { .mac = e1000_82572, .flags = FLAG_HAS_HW_VLAN_FILTER | FLAG_HAS_JUMBO_FRAMES | FLAG_HAS_WOL | FLAG_APME_IN_CTRL3 | FLAG_HAS_CTRLEXT_ON_LOAD | FLAG_TARC_SPEED_MODE_BIT, /* errata */ .flags2 = FLAG2_DISABLE_ASPM_L1 /* errata 13 */ | FLAG2_DMA_BURST, .pba = 38, .max_hw_frame_size = DEFAULT_JUMBO, .get_variants = e1000_get_variants_82571, .mac_ops = &e82571_mac_ops, .phy_ops = &e82_phy_ops_igp, .nvm_ops = &e82571_nvm_ops, }; const struct e1000_info e1000_82573_info = { .mac = e1000_82573, .flags = FLAG_HAS_HW_VLAN_FILTER | FLAG_HAS_WOL | FLAG_APME_IN_CTRL3 | FLAG_HAS_SMART_POWER_DOWN | FLAG_HAS_AMT | FLAG_HAS_SWSM_ON_LOAD, .flags2 = FLAG2_DISABLE_ASPM_L1 | FLAG2_DISABLE_ASPM_L0S, .pba = 20, .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, .get_variants = e1000_get_variants_82571, .mac_ops = &e82571_mac_ops, .phy_ops = &e82_phy_ops_m88, .nvm_ops = &e82571_nvm_ops, }; const struct e1000_info e1000_82574_info = { .mac = e1000_82574, .flags = FLAG_HAS_HW_VLAN_FILTER | FLAG_HAS_MSIX | FLAG_HAS_JUMBO_FRAMES | FLAG_HAS_WOL | FLAG_APME_IN_CTRL3 | FLAG_HAS_SMART_POWER_DOWN | FLAG_HAS_AMT | FLAG_HAS_CTRLEXT_ON_LOAD, .flags2 = FLAG2_CHECK_PHY_HANG | FLAG2_DISABLE_ASPM_L0S | FLAG2_NO_DISABLE_RX, .pba = 32, .max_hw_frame_size = DEFAULT_JUMBO, .get_variants = e1000_get_variants_82571, .mac_ops = &e82571_mac_ops, .phy_ops = &e82_phy_ops_bm, .nvm_ops = &e82571_nvm_ops, }; const struct e1000_info e1000_82583_info = { .mac = e1000_82583, .flags = FLAG_HAS_HW_VLAN_FILTER | FLAG_HAS_WOL | FLAG_APME_IN_CTRL3 | FLAG_HAS_SMART_POWER_DOWN | FLAG_HAS_AMT | FLAG_HAS_JUMBO_FRAMES | FLAG_HAS_CTRLEXT_ON_LOAD, .flags2 = FLAG2_DISABLE_ASPM_L0S | FLAG2_NO_DISABLE_RX, .pba = 32, .max_hw_frame_size = DEFAULT_JUMBO, .get_variants = e1000_get_variants_82571, .mac_ops = &e82571_mac_ops, .phy_ops = &e82_phy_ops_bm, .nvm_ops = &e82571_nvm_ops, };
gpl-2.0
manveru0/FeaCore_Phoenix_S3
drivers/media/dvb/frontends/s921.c
3259
11854
/* * Sharp VA3A5JZ921 One Seg Broadcast Module driver * This device is labeled as just S. 921 at the top of the frontend can * * Copyright (C) 2009-2010 Mauro Carvalho Chehab <mchehab@redhat.com> * Copyright (C) 2009-2010 Douglas Landgraf <dougsland@redhat.com> * * Developed for Leadership SBTVD 1seg device sold in Brazil * * Frontend module based on cx24123 driver, getting some info from * the old s921 driver. * * FIXME: Need to port to DVB v5.2 API * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <linux/kernel.h> #include <asm/div64.h> #include "dvb_frontend.h" #include "s921.h" static int debug = 1; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Activates frontend debugging (default:0)"); #define rc(args...) do { \ printk(KERN_ERR "s921: " args); \ } while (0) #define dprintk(args...) \ do { \ if (debug) { \ printk(KERN_DEBUG "s921: %s: ", __func__); \ printk(args); \ } \ } while (0) struct s921_state { struct i2c_adapter *i2c; const struct s921_config *config; struct dvb_frontend frontend; /* The Demod can't easily provide these, we cache them */ u32 currentfreq; }; /* * Various tuner defaults need to be established for a given frequency kHz. * fixme: The bounds on the bands do not match the doc in real life. * fixme: Some of them have been moved, other might need adjustment. */ static struct s921_bandselect_val { u32 freq_low; u8 band_reg; } s921_bandselect[] = { { 0, 0x7b }, { 485140000, 0x5b }, { 515140000, 0x3b }, { 545140000, 0x1b }, { 599140000, 0xfb }, { 623140000, 0xdb }, { 659140000, 0xbb }, { 713140000, 0x9b }, }; struct regdata { u8 reg; u8 data; }; static struct regdata s921_init[] = { { 0x01, 0x80 }, /* Probably, a reset sequence */ { 0x01, 0x40 }, { 0x01, 0x80 }, { 0x01, 0x40 }, { 0x02, 0x00 }, { 0x03, 0x40 }, { 0x04, 0x01 }, { 0x05, 0x00 }, { 0x06, 0x00 }, { 0x07, 0x00 }, { 0x08, 0x00 }, { 0x09, 0x00 }, { 0x0a, 0x00 }, { 0x0b, 0x5a }, { 0x0c, 0x00 }, { 0x0d, 0x00 }, { 0x0f, 0x00 }, { 0x13, 0x1b }, { 0x14, 0x80 }, { 0x15, 0x40 }, { 0x17, 0x70 }, { 0x18, 0x01 }, { 0x19, 0x12 }, { 0x1a, 0x01 }, { 0x1b, 0x12 }, { 0x1c, 0xa0 }, { 0x1d, 0x00 }, { 0x1e, 0x0a }, { 0x1f, 0x08 }, { 0x20, 0x40 }, { 0x21, 0xff }, { 0x22, 0x4c }, { 0x23, 0x4e }, { 0x24, 0x4c }, { 0x25, 0x00 }, { 0x26, 0x00 }, { 0x27, 0xf4 }, { 0x28, 0x60 }, { 0x29, 0x88 }, { 0x2a, 0x40 }, { 0x2b, 0x40 }, { 0x2c, 0xff }, { 0x2d, 0x00 }, { 0x2e, 0xff }, { 0x2f, 0x00 }, { 0x30, 0x20 }, { 0x31, 0x06 }, { 0x32, 0x0c }, { 0x34, 0x0f }, { 0x37, 0xfe }, { 0x38, 0x00 }, { 0x39, 0x63 }, { 0x3a, 0x10 }, { 0x3b, 0x10 }, { 0x47, 0x00 }, { 0x49, 0xe5 }, { 0x4b, 0x00 }, { 0x50, 0xc0 }, { 0x52, 0x20 }, { 0x54, 0x5a }, { 0x55, 0x5b }, { 0x56, 0x40 }, { 0x57, 0x70 }, { 0x5c, 0x50 }, { 0x5d, 0x00 }, { 0x62, 0x17 }, { 0x63, 0x2f }, { 0x64, 0x6f }, { 0x68, 0x00 }, { 0x69, 0x89 }, { 0x6a, 0x00 }, { 0x6b, 0x00 }, { 0x6c, 0x00 }, { 0x6d, 0x00 }, { 0x6e, 0x00 }, { 0x70, 0x10 }, { 0x71, 0x00 }, { 0x75, 0x00 }, { 0x76, 0x30 }, { 0x77, 0x01 }, { 0xaf, 0x00 }, { 0xb0, 0xa0 }, { 0xb2, 0x3d }, { 0xb3, 0x25 }, { 0xb4, 0x8b }, { 0xb5, 0x4b }, { 0xb6, 0x3f }, { 0xb7, 0xff }, { 0xb8, 0xff }, { 0xb9, 0xfc }, { 0xba, 0x00 }, { 0xbb, 0x00 }, { 0xbc, 0x00 }, { 0xd0, 0x30 }, { 0xe4, 0x84 }, { 0xf0, 0x48 }, { 0xf1, 0x19 }, { 0xf2, 0x5a }, { 0xf3, 0x8e }, { 0xf4, 0x2d }, { 0xf5, 0x07 }, { 0xf6, 0x5a }, { 0xf7, 0xba }, { 0xf8, 0xd7 }, }; static struct regdata s921_prefreq[] = { { 0x47, 0x60 }, { 0x68, 0x00 }, { 0x69, 0x89 }, { 0xf0, 0x48 }, { 0xf1, 0x19 }, }; static struct regdata s921_postfreq[] = { { 0xf5, 0xae }, { 0xf6, 0xb7 }, { 0xf7, 0xba }, { 0xf8, 0xd7 }, { 0x68, 0x0a }, { 0x69, 0x09 }, }; static int s921_i2c_writereg(struct s921_state *state, u8 i2c_addr, int reg, int data) { u8 buf[] = { reg, data }; struct i2c_msg msg = { .addr = i2c_addr, .flags = 0, .buf = buf, .len = 2 }; int rc; rc = i2c_transfer(state->i2c, &msg, 1); if (rc != 1) { printk("%s: writereg rcor(rc == %i, reg == 0x%02x," " data == 0x%02x)\n", __func__, rc, reg, data); return rc; } return 0; } static int s921_i2c_writeregdata(struct s921_state *state, u8 i2c_addr, struct regdata *rd, int size) { int i, rc; for (i = 0; i < size; i++) { rc = s921_i2c_writereg(state, i2c_addr, rd[i].reg, rd[i].data); if (rc < 0) return rc; } return 0; } static int s921_i2c_readreg(struct s921_state *state, u8 i2c_addr, u8 reg) { u8 val; int rc; struct i2c_msg msg[] = { { .addr = i2c_addr, .flags = 0, .buf = &reg, .len = 1 }, { .addr = i2c_addr, .flags = I2C_M_RD, .buf = &val, .len = 1 } }; rc = i2c_transfer(state->i2c, msg, 2); if (rc != 2) { rc("%s: reg=0x%x (rcor=%d)\n", __func__, reg, rc); return rc; } return val; } #define s921_readreg(state, reg) \ s921_i2c_readreg(state, state->config->demod_address, reg) #define s921_writereg(state, reg, val) \ s921_i2c_writereg(state, state->config->demod_address, reg, val) #define s921_writeregdata(state, regdata) \ s921_i2c_writeregdata(state, state->config->demod_address, \ regdata, ARRAY_SIZE(regdata)) static int s921_pll_tune(struct dvb_frontend *fe, struct dvb_frontend_parameters *p) { struct s921_state *state = fe->demodulator_priv; int band, rc, i; unsigned long f_offset; u8 f_switch; u64 offset; dprintk("frequency=%i\n", p->frequency); for (band = 0; band < ARRAY_SIZE(s921_bandselect); band++) if (p->frequency < s921_bandselect[band].freq_low) break; band--; if (band < 0) { rc("%s: frequency out of range\n", __func__); return -EINVAL; } f_switch = s921_bandselect[band].band_reg; offset = ((u64)p->frequency) * 258; do_div(offset, 6000000); f_offset = ((unsigned long)offset) + 2321; rc = s921_writeregdata(state, s921_prefreq); if (rc < 0) return rc; rc = s921_writereg(state, 0xf2, (f_offset >> 8) & 0xff); if (rc < 0) return rc; rc = s921_writereg(state, 0xf3, f_offset & 0xff); if (rc < 0) return rc; rc = s921_writereg(state, 0xf4, f_switch); if (rc < 0) return rc; rc = s921_writeregdata(state, s921_postfreq); if (rc < 0) return rc; for (i = 0 ; i < 6; i++) { rc = s921_readreg(state, 0x80); dprintk("status 0x80: %02x\n", rc); } rc = s921_writereg(state, 0x01, 0x40); if (rc < 0) return rc; rc = s921_readreg(state, 0x01); dprintk("status 0x01: %02x\n", rc); rc = s921_readreg(state, 0x80); dprintk("status 0x80: %02x\n", rc); rc = s921_readreg(state, 0x80); dprintk("status 0x80: %02x\n", rc); rc = s921_readreg(state, 0x32); dprintk("status 0x32: %02x\n", rc); dprintk("pll tune band=%d, pll=%d\n", f_switch, (int)f_offset); return 0; } static int s921_initfe(struct dvb_frontend *fe) { struct s921_state *state = fe->demodulator_priv; int rc; dprintk("\n"); rc = s921_writeregdata(state, s921_init); if (rc < 0) return rc; return 0; } static int s921_read_status(struct dvb_frontend *fe, fe_status_t *status) { struct s921_state *state = fe->demodulator_priv; int regstatus, rc; *status = 0; rc = s921_readreg(state, 0x81); if (rc < 0) return rc; regstatus = rc << 8; rc = s921_readreg(state, 0x82); if (rc < 0) return rc; regstatus |= rc; dprintk("status = %04x\n", regstatus); /* Full Sync - We don't know what each bit means on regs 0x81/0x82 */ if ((regstatus & 0xff) == 0x40) { *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK; } else if (regstatus & 0x40) { /* This is close to Full Sync, but not enough to get useful info */ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC; } return 0; } static int s921_read_signal_strength(struct dvb_frontend *fe, u16 *strength) { fe_status_t status; struct s921_state *state = fe->demodulator_priv; int rc; /* FIXME: Use the proper register for it... 0x80? */ rc = s921_read_status(fe, &status); if (rc < 0) return rc; *strength = (status & FE_HAS_LOCK) ? 0xffff : 0; dprintk("strength = 0x%04x\n", *strength); rc = s921_readreg(state, 0x01); dprintk("status 0x01: %02x\n", rc); rc = s921_readreg(state, 0x80); dprintk("status 0x80: %02x\n", rc); rc = s921_readreg(state, 0x32); dprintk("status 0x32: %02x\n", rc); return 0; } static int s921_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *p) { struct s921_state *state = fe->demodulator_priv; int rc; dprintk("\n"); /* FIXME: We don't know how to use non-auto mode */ rc = s921_pll_tune(fe, p); if (rc < 0) return rc; state->currentfreq = p->frequency; return 0; } static int s921_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *p) { struct s921_state *state = fe->demodulator_priv; /* FIXME: Probably it is possible to get it from regs f1 and f2 */ p->frequency = state->currentfreq; return 0; } static int s921_tune(struct dvb_frontend *fe, struct dvb_frontend_parameters *params, unsigned int mode_flags, unsigned int *delay, fe_status_t *status) { int rc = 0; dprintk("\n"); if (params != NULL) rc = s921_set_frontend(fe, params); if (!(mode_flags & FE_TUNE_MODE_ONESHOT)) s921_read_status(fe, status); return rc; } static int s921_get_algo(struct dvb_frontend *fe) { return 1; /* FE_ALGO_HW */ } static void s921_release(struct dvb_frontend *fe) { struct s921_state *state = fe->demodulator_priv; dprintk("\n"); kfree(state); } static struct dvb_frontend_ops s921_ops; struct dvb_frontend *s921_attach(const struct s921_config *config, struct i2c_adapter *i2c) { /* allocate memory for the internal state */ struct s921_state *state = kzalloc(sizeof(struct s921_state), GFP_KERNEL); dprintk("\n"); if (state == NULL) { rc("Unable to kzalloc\n"); goto rcor; } /* setup the state */ state->config = config; state->i2c = i2c; /* create dvb_frontend */ memcpy(&state->frontend.ops, &s921_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; return &state->frontend; rcor: kfree(state); return NULL; } EXPORT_SYMBOL(s921_attach); static struct dvb_frontend_ops s921_ops = { /* Use dib8000 values per default */ .info = { .name = "Sharp S921", .type = FE_OFDM, .frequency_min = 470000000, /* * Max should be 770MHz instead, according with Sharp docs, * but Leadership doc says it works up to 806 MHz. This is * required to get channel 69, used in Brazil */ .frequency_max = 806000000, .frequency_tolerance = 0, .caps = FE_CAN_INVERSION_AUTO | FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_RECOVER | FE_CAN_HIERARCHY_AUTO, }, .release = s921_release, .init = s921_initfe, .set_frontend = s921_set_frontend, .get_frontend = s921_get_frontend, .read_status = s921_read_status, .read_signal_strength = s921_read_signal_strength, .tune = s921_tune, .get_frontend_algo = s921_get_algo, }; MODULE_DESCRIPTION("DVB Frontend module for Sharp S921 hardware"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>"); MODULE_AUTHOR("Douglas Landgraf <dougsland@redhat.com>"); MODULE_LICENSE("GPL");
gpl-2.0
bigbiff/i717-GB-Kernel
drivers/watchdog/w83977f_wdt.c
4283
12330
/* * W83977F Watchdog Timer Driver for Winbond W83977F I/O Chip * * (c) Copyright 2005 Jose Goncalves <jose.goncalves@inov.pt> * * Based on w83877f_wdt.c by Scott Jennings, * and wdt977.c by Woody Suwalski * * ----------------------- * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/miscdevice.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/watchdog.h> #include <linux/notifier.h> #include <linux/reboot.h> #include <linux/uaccess.h> #include <linux/io.h> #include <asm/system.h> #define WATCHDOG_VERSION "1.00" #define WATCHDOG_NAME "W83977F WDT" #define PFX WATCHDOG_NAME ": " #define DRIVER_VERSION WATCHDOG_NAME " driver, v" WATCHDOG_VERSION "\n" #define IO_INDEX_PORT 0x3F0 #define IO_DATA_PORT (IO_INDEX_PORT+1) #define UNLOCK_DATA 0x87 #define LOCK_DATA 0xAA #define DEVICE_REGISTER 0x07 #define DEFAULT_TIMEOUT 45 /* default timeout in seconds */ static int timeout = DEFAULT_TIMEOUT; static int timeoutW; /* timeout in watchdog counter units */ static unsigned long timer_alive; static int testmode; static char expect_close; static DEFINE_SPINLOCK(spinlock); module_param(timeout, int, 0); MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds (15..7635), default=" __MODULE_STRING(DEFAULT_TIMEOUT) ")"); module_param(testmode, int, 0); MODULE_PARM_DESC(testmode, "Watchdog testmode (1 = no reboot), default=0"); static int nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, int, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); /* * Start the watchdog */ static int wdt_start(void) { unsigned long flags; spin_lock_irqsave(&spinlock, flags); /* Unlock the SuperIO chip */ outb_p(UNLOCK_DATA, IO_INDEX_PORT); outb_p(UNLOCK_DATA, IO_INDEX_PORT); /* * Select device Aux2 (device=8) to set watchdog regs F2, F3 and F4. * F2 has the timeout in watchdog counter units. * F3 is set to enable watchdog LED blink at timeout. * F4 is used to just clear the TIMEOUT'ed state (bit 0). */ outb_p(DEVICE_REGISTER, IO_INDEX_PORT); outb_p(0x08, IO_DATA_PORT); outb_p(0xF2, IO_INDEX_PORT); outb_p(timeoutW, IO_DATA_PORT); outb_p(0xF3, IO_INDEX_PORT); outb_p(0x08, IO_DATA_PORT); outb_p(0xF4, IO_INDEX_PORT); outb_p(0x00, IO_DATA_PORT); /* Set device Aux2 active */ outb_p(0x30, IO_INDEX_PORT); outb_p(0x01, IO_DATA_PORT); /* * Select device Aux1 (dev=7) to set GP16 as the watchdog output * (in reg E6) and GP13 as the watchdog LED output (in reg E3). * Map GP16 at pin 119. * In test mode watch the bit 0 on F4 to indicate "triggered" or * check watchdog LED on SBC. */ outb_p(DEVICE_REGISTER, IO_INDEX_PORT); outb_p(0x07, IO_DATA_PORT); if (!testmode) { unsigned pin_map; outb_p(0xE6, IO_INDEX_PORT); outb_p(0x0A, IO_DATA_PORT); outb_p(0x2C, IO_INDEX_PORT); pin_map = inb_p(IO_DATA_PORT); pin_map |= 0x10; pin_map &= ~(0x20); outb_p(0x2C, IO_INDEX_PORT); outb_p(pin_map, IO_DATA_PORT); } outb_p(0xE3, IO_INDEX_PORT); outb_p(0x08, IO_DATA_PORT); /* Set device Aux1 active */ outb_p(0x30, IO_INDEX_PORT); outb_p(0x01, IO_DATA_PORT); /* Lock the SuperIO chip */ outb_p(LOCK_DATA, IO_INDEX_PORT); spin_unlock_irqrestore(&spinlock, flags); printk(KERN_INFO PFX "activated.\n"); return 0; } /* * Stop the watchdog */ static int wdt_stop(void) { unsigned long flags; spin_lock_irqsave(&spinlock, flags); /* Unlock the SuperIO chip */ outb_p(UNLOCK_DATA, IO_INDEX_PORT); outb_p(UNLOCK_DATA, IO_INDEX_PORT); /* * Select device Aux2 (device=8) to set watchdog regs F2, F3 and F4. * F2 is reset to its default value (watchdog timer disabled). * F3 is reset to its default state. * F4 clears the TIMEOUT'ed state (bit 0) - back to default. */ outb_p(DEVICE_REGISTER, IO_INDEX_PORT); outb_p(0x08, IO_DATA_PORT); outb_p(0xF2, IO_INDEX_PORT); outb_p(0xFF, IO_DATA_PORT); outb_p(0xF3, IO_INDEX_PORT); outb_p(0x00, IO_DATA_PORT); outb_p(0xF4, IO_INDEX_PORT); outb_p(0x00, IO_DATA_PORT); outb_p(0xF2, IO_INDEX_PORT); outb_p(0x00, IO_DATA_PORT); /* * Select device Aux1 (dev=7) to set GP16 (in reg E6) and * Gp13 (in reg E3) as inputs. */ outb_p(DEVICE_REGISTER, IO_INDEX_PORT); outb_p(0x07, IO_DATA_PORT); if (!testmode) { outb_p(0xE6, IO_INDEX_PORT); outb_p(0x01, IO_DATA_PORT); } outb_p(0xE3, IO_INDEX_PORT); outb_p(0x01, IO_DATA_PORT); /* Lock the SuperIO chip */ outb_p(LOCK_DATA, IO_INDEX_PORT); spin_unlock_irqrestore(&spinlock, flags); printk(KERN_INFO PFX "shutdown.\n"); return 0; } /* * Send a keepalive ping to the watchdog * This is done by simply re-writing the timeout to reg. 0xF2 */ static int wdt_keepalive(void) { unsigned long flags; spin_lock_irqsave(&spinlock, flags); /* Unlock the SuperIO chip */ outb_p(UNLOCK_DATA, IO_INDEX_PORT); outb_p(UNLOCK_DATA, IO_INDEX_PORT); /* Select device Aux2 (device=8) to kick watchdog reg F2 */ outb_p(DEVICE_REGISTER, IO_INDEX_PORT); outb_p(0x08, IO_DATA_PORT); outb_p(0xF2, IO_INDEX_PORT); outb_p(timeoutW, IO_DATA_PORT); /* Lock the SuperIO chip */ outb_p(LOCK_DATA, IO_INDEX_PORT); spin_unlock_irqrestore(&spinlock, flags); return 0; } /* * Set the watchdog timeout value */ static int wdt_set_timeout(int t) { int tmrval; /* * Convert seconds to watchdog counter time units, rounding up. * On PCM-5335 watchdog units are 30 seconds/step with 15 sec startup * value. This information is supplied in the PCM-5335 manual and was * checked by me on a real board. This is a bit strange because W83977f * datasheet says counter unit is in minutes! */ if (t < 15) return -EINVAL; tmrval = ((t + 15) + 29) / 30; if (tmrval > 255) return -EINVAL; /* * timeout is the timeout in seconds, * timeoutW is the timeout in watchdog counter units. */ timeoutW = tmrval; timeout = (timeoutW * 30) - 15; return 0; } /* * Get the watchdog status */ static int wdt_get_status(int *status) { int new_status; unsigned long flags; spin_lock_irqsave(&spinlock, flags); /* Unlock the SuperIO chip */ outb_p(UNLOCK_DATA, IO_INDEX_PORT); outb_p(UNLOCK_DATA, IO_INDEX_PORT); /* Select device Aux2 (device=8) to read watchdog reg F4 */ outb_p(DEVICE_REGISTER, IO_INDEX_PORT); outb_p(0x08, IO_DATA_PORT); outb_p(0xF4, IO_INDEX_PORT); new_status = inb_p(IO_DATA_PORT); /* Lock the SuperIO chip */ outb_p(LOCK_DATA, IO_INDEX_PORT); spin_unlock_irqrestore(&spinlock, flags); *status = 0; if (new_status & 1) *status |= WDIOF_CARDRESET; return 0; } /* * /dev/watchdog handling */ static int wdt_open(struct inode *inode, struct file *file) { /* If the watchdog is alive we don't need to start it again */ if (test_and_set_bit(0, &timer_alive)) return -EBUSY; if (nowayout) __module_get(THIS_MODULE); wdt_start(); return nonseekable_open(inode, file); } static int wdt_release(struct inode *inode, struct file *file) { /* * Shut off the timer. * Lock it in if it's a module and we set nowayout */ if (expect_close == 42) { wdt_stop(); clear_bit(0, &timer_alive); } else { wdt_keepalive(); printk(KERN_CRIT PFX "unexpected close, not stopping watchdog!\n"); } expect_close = 0; return 0; } /* * wdt_write: * @file: file handle to the watchdog * @buf: buffer to write (unused as data does not matter here * @count: count of bytes * @ppos: pointer to the position to write. No seeks allowed * * A write to a watchdog device is defined as a keepalive signal. Any * write of data will do, as we we don't define content meaning. */ static ssize_t wdt_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { /* See if we got the magic character 'V' and reload the timer */ if (count) { if (!nowayout) { size_t ofs; /* note: just in case someone wrote the magic character long ago */ expect_close = 0; /* scan to see whether or not we got the magic character */ for (ofs = 0; ofs != count; ofs++) { char c; if (get_user(c, buf + ofs)) return -EFAULT; if (c == 'V') expect_close = 42; } } /* someone wrote to us, we should restart timer */ wdt_keepalive(); } return count; } /* * wdt_ioctl: * @inode: inode of the device * @file: file handle to the device * @cmd: watchdog command * @arg: argument pointer * * The watchdog API defines a common set of functions for all watchdogs * according to their available features. */ static const struct watchdog_info ident = { .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING, .firmware_version = 1, .identity = WATCHDOG_NAME, }; static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int status; int new_options, retval = -EINVAL; int new_timeout; union { struct watchdog_info __user *ident; int __user *i; } uarg; uarg.i = (int __user *)arg; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(uarg.ident, &ident, sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: wdt_get_status(&status); return put_user(status, uarg.i); case WDIOC_GETBOOTSTATUS: return put_user(0, uarg.i); case WDIOC_SETOPTIONS: if (get_user(new_options, uarg.i)) return -EFAULT; if (new_options & WDIOS_DISABLECARD) { wdt_stop(); retval = 0; } if (new_options & WDIOS_ENABLECARD) { wdt_start(); retval = 0; } return retval; case WDIOC_KEEPALIVE: wdt_keepalive(); return 0; case WDIOC_SETTIMEOUT: if (get_user(new_timeout, uarg.i)) return -EFAULT; if (wdt_set_timeout(new_timeout)) return -EINVAL; wdt_keepalive(); /* Fall */ case WDIOC_GETTIMEOUT: return put_user(timeout, uarg.i); default: return -ENOTTY; } } static int wdt_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { if (code == SYS_DOWN || code == SYS_HALT) wdt_stop(); return NOTIFY_DONE; } static const struct file_operations wdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = wdt_write, .unlocked_ioctl = wdt_ioctl, .open = wdt_open, .release = wdt_release, }; static struct miscdevice wdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &wdt_fops, }; static struct notifier_block wdt_notifier = { .notifier_call = wdt_notify_sys, }; static int __init w83977f_wdt_init(void) { int rc; printk(KERN_INFO PFX DRIVER_VERSION); /* * Check that the timeout value is within it's range; * if not reset to the default */ if (wdt_set_timeout(timeout)) { wdt_set_timeout(DEFAULT_TIMEOUT); printk(KERN_INFO PFX "timeout value must be 15 <= timeout <= 7635, using %d\n", DEFAULT_TIMEOUT); } if (!request_region(IO_INDEX_PORT, 2, WATCHDOG_NAME)) { printk(KERN_ERR PFX "I/O address 0x%04x already in use\n", IO_INDEX_PORT); rc = -EIO; goto err_out; } rc = register_reboot_notifier(&wdt_notifier); if (rc) { printk(KERN_ERR PFX "cannot register reboot notifier (err=%d)\n", rc); goto err_out_region; } rc = misc_register(&wdt_miscdev); if (rc) { printk(KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n", wdt_miscdev.minor, rc); goto err_out_reboot; } printk(KERN_INFO PFX "initialized. timeout=%d sec (nowayout=%d testmode=%d)\n", timeout, nowayout, testmode); return 0; err_out_reboot: unregister_reboot_notifier(&wdt_notifier); err_out_region: release_region(IO_INDEX_PORT, 2); err_out: return rc; } static void __exit w83977f_wdt_exit(void) { wdt_stop(); misc_deregister(&wdt_miscdev); unregister_reboot_notifier(&wdt_notifier); release_region(IO_INDEX_PORT, 2); } module_init(w83977f_wdt_init); module_exit(w83977f_wdt_exit); MODULE_AUTHOR("Jose Goncalves <jose.goncalves@inov.pt>"); MODULE_DESCRIPTION("Driver for watchdog timer in W83977F I/O chip"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
gpl-2.0
CM-Tab-S/android_kernel_samsung_klimtwifi
drivers/net/wireless/rtlwifi/rtl8192ce/rf.c
5051
14309
/****************************************************************************** * * Copyright(c) 2009-2012 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> * *****************************************************************************/ #include "../wifi.h" #include "reg.h" #include "def.h" #include "phy.h" #include "rf.h" #include "dm.h" static bool _rtl92ce_phy_rf6052_config_parafile(struct ieee80211_hw *hw); void rtl92ce_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); switch (bandwidth) { case HT_CHANNEL_WIDTH_20: rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] & 0xfffff3ff) | 0x0400); rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK, rtlphy->rfreg_chnlval[0]); break; case HT_CHANNEL_WIDTH_20_40: rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] & 0xfffff3ff)); rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK, rtlphy->rfreg_chnlval[0]); break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "unknown bandwidth: %#X\n", bandwidth); break; } } void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, u8 *ppowerlevel) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); u32 tx_agc[2] = {0, 0}, tmpval; bool turbo_scanoff = false; u8 idx1, idx2; u8 *ptr; if (rtlefuse->eeprom_regulatory != 0) turbo_scanoff = true; if (mac->act_scanning) { tx_agc[RF90_PATH_A] = 0x3f3f3f3f; tx_agc[RF90_PATH_B] = 0x3f3f3f3f; if (turbo_scanoff) { for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { tx_agc[idx1] = ppowerlevel[idx1] | (ppowerlevel[idx1] << 8) | (ppowerlevel[idx1] << 16) | (ppowerlevel[idx1] << 24); } } } else { for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { tx_agc[idx1] = ppowerlevel[idx1] | (ppowerlevel[idx1] << 8) | (ppowerlevel[idx1] << 16) | (ppowerlevel[idx1] << 24); } if (rtlefuse->eeprom_regulatory == 0) { tmpval = (rtlphy->mcs_txpwrlevel_origoffset[0][6]) + (rtlphy->mcs_txpwrlevel_origoffset[0][7] << 8); tx_agc[RF90_PATH_A] += tmpval; tmpval = (rtlphy->mcs_txpwrlevel_origoffset[0][14]) + (rtlphy->mcs_txpwrlevel_origoffset[0][15] << 24); tx_agc[RF90_PATH_B] += tmpval; } } for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { ptr = (u8 *) (&(tx_agc[idx1])); for (idx2 = 0; idx2 < 4; idx2++) { if (*ptr > RF6052_MAX_TX_PWR) *ptr = RF6052_MAX_TX_PWR; ptr++; } } tmpval = tx_agc[RF90_PATH_A] & 0xff; rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, MASKBYTE1, tmpval); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n", tmpval, RTXAGC_A_CCK1_MCS32); tmpval = tx_agc[RF90_PATH_A] >> 8; tmpval = tmpval & 0xff00ffff; rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n", tmpval, RTXAGC_B_CCK11_A_CCK2_11); tmpval = tx_agc[RF90_PATH_B] >> 24; rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, MASKBYTE0, tmpval); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n", tmpval, RTXAGC_B_CCK11_A_CCK2_11); tmpval = tx_agc[RF90_PATH_B] & 0x00ffffff; rtl_set_bbreg(hw, RTXAGC_B_CCK1_55_MCS32, 0xffffff00, tmpval); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "CCK PWR 1~5.5M (rf-B) = 0x%x (reg 0x%x)\n", tmpval, RTXAGC_B_CCK1_55_MCS32); } static void rtl92c_phy_get_power_base(struct ieee80211_hw *hw, u8 *ppowerlevel, u8 channel, u32 *ofdmbase, u32 *mcsbase) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); u32 powerBase0, powerBase1; u8 legacy_pwrdiff, ht20_pwrdiff; u8 i, powerlevel[2]; for (i = 0; i < 2; i++) { powerlevel[i] = ppowerlevel[i]; legacy_pwrdiff = rtlefuse->txpwr_legacyhtdiff[i][channel - 1]; powerBase0 = powerlevel[i] + legacy_pwrdiff; powerBase0 = (powerBase0 << 24) | (powerBase0 << 16) | (powerBase0 << 8) | powerBase0; *(ofdmbase + i) = powerBase0; RTPRINT(rtlpriv, FPHY, PHY_TXPWR, " [OFDM power base index rf(%c) = 0x%x]\n", i == 0 ? 'A' : 'B', *(ofdmbase + i)); } for (i = 0; i < 2; i++) { if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) { ht20_pwrdiff = rtlefuse->txpwr_ht20diff[i][channel - 1]; powerlevel[i] += ht20_pwrdiff; } powerBase1 = powerlevel[i]; powerBase1 = (powerBase1 << 24) | (powerBase1 << 16) | (powerBase1 << 8) | powerBase1; *(mcsbase + i) = powerBase1; RTPRINT(rtlpriv, FPHY, PHY_TXPWR, " [MCS power base index rf(%c) = 0x%x]\n", i == 0 ? 'A' : 'B', *(mcsbase + i)); } } static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw, u8 channel, u8 index, u32 *powerBase0, u32 *powerBase1, u32 *p_outwriteval) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); u8 i, chnlgroup = 0, pwr_diff_limit[4]; u32 writeVal, customer_limit, rf; for (rf = 0; rf < 2; rf++) { switch (rtlefuse->eeprom_regulatory) { case 0: chnlgroup = 0; writeVal = rtlphy->mcs_txpwrlevel_origoffset[chnlgroup][index + (rf ? 8 : 0)] + ((index < 2) ? powerBase0[rf] : powerBase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "RTK better performance, writeVal(%c) = 0x%x\n", rf == 0 ? 'A' : 'B', writeVal); break; case 1: if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { writeVal = ((index < 2) ? powerBase0[rf] : powerBase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "Realtek regulatory, 40MHz, writeVal(%c) = 0x%x\n", rf == 0 ? 'A' : 'B', writeVal); } else { if (rtlphy->pwrgroup_cnt == 1) chnlgroup = 0; if (rtlphy->pwrgroup_cnt >= 3) { if (channel <= 3) chnlgroup = 0; else if (channel >= 4 && channel <= 9) chnlgroup = 1; else if (channel > 9) chnlgroup = 2; if (rtlphy->pwrgroup_cnt == 4) chnlgroup++; } writeVal = rtlphy->mcs_txpwrlevel_origoffset[chnlgroup] [index + (rf ? 8 : 0)] + ((index < 2) ? powerBase0[rf] : powerBase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "Realtek regulatory, 20MHz, writeVal(%c) = 0x%x\n", rf == 0 ? 'A' : 'B', writeVal); } break; case 2: writeVal = ((index < 2) ? powerBase0[rf] : powerBase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "Better regulatory, writeVal(%c) = 0x%x\n", rf == 0 ? 'A' : 'B', writeVal); break; case 3: chnlgroup = 0; if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "customer's limit, 40MHz rf(%c) = 0x%x\n", rf == 0 ? 'A' : 'B', rtlefuse->pwrgroup_ht40[rf][channel - 1]); } else { RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "customer's limit, 20MHz rf(%c) = 0x%x\n", rf == 0 ? 'A' : 'B', rtlefuse->pwrgroup_ht20[rf][channel - 1]); } for (i = 0; i < 4; i++) { pwr_diff_limit[i] = (u8) ((rtlphy->mcs_txpwrlevel_origoffset [chnlgroup][index + (rf ? 8 : 0)] & (0x7f << (i * 8))) >> (i * 8)); if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { if (pwr_diff_limit[i] > rtlefuse-> pwrgroup_ht40[rf][channel - 1]) pwr_diff_limit[i] = rtlefuse->pwrgroup_ht40[rf] [channel - 1]; } else { if (pwr_diff_limit[i] > rtlefuse-> pwrgroup_ht20[rf][channel - 1]) pwr_diff_limit[i] = rtlefuse->pwrgroup_ht20[rf] [channel - 1]; } } customer_limit = (pwr_diff_limit[3] << 24) | (pwr_diff_limit[2] << 16) | (pwr_diff_limit[1] << 8) | (pwr_diff_limit[0]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "Customer's limit rf(%c) = 0x%x\n", rf == 0 ? 'A' : 'B', customer_limit); writeVal = customer_limit + ((index < 2) ? powerBase0[rf] : powerBase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "Customer, writeVal rf(%c)= 0x%x\n", rf == 0 ? 'A' : 'B', writeVal); break; default: chnlgroup = 0; writeVal = rtlphy->mcs_txpwrlevel_origoffset[chnlgroup] [index + (rf ? 8 : 0)] + ((index < 2) ? powerBase0[rf] : powerBase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "RTK better performance, writeVal rf(%c) = 0x%x\n", rf == 0 ? 'A' : 'B', writeVal); break; } if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1) writeVal = writeVal - 0x06060606; else if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT2) writeVal = writeVal - 0x0c0c0c0c; *(p_outwriteval + rf) = writeVal; } } static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw, u8 index, u32 *pValue) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); u16 regoffset_a[6] = { RTXAGC_A_RATE18_06, RTXAGC_A_RATE54_24, RTXAGC_A_MCS03_MCS00, RTXAGC_A_MCS07_MCS04, RTXAGC_A_MCS11_MCS08, RTXAGC_A_MCS15_MCS12 }; u16 regoffset_b[6] = { RTXAGC_B_RATE18_06, RTXAGC_B_RATE54_24, RTXAGC_B_MCS03_MCS00, RTXAGC_B_MCS07_MCS04, RTXAGC_B_MCS11_MCS08, RTXAGC_B_MCS15_MCS12 }; u8 i, rf, pwr_val[4]; u32 writeVal; u16 regoffset; for (rf = 0; rf < 2; rf++) { writeVal = pValue[rf]; for (i = 0; i < 4; i++) { pwr_val[i] = (u8) ((writeVal & (0x7f << (i * 8))) >> (i * 8)); if (pwr_val[i] > RF6052_MAX_TX_PWR) pwr_val[i] = RF6052_MAX_TX_PWR; } writeVal = (pwr_val[3] << 24) | (pwr_val[2] << 16) | (pwr_val[1] << 8) | pwr_val[0]; if (rf == 0) regoffset = regoffset_a[index]; else regoffset = regoffset_b[index]; rtl_set_bbreg(hw, regoffset, MASKDWORD, writeVal); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "Set 0x%x = %08x\n", regoffset, writeVal); if (((get_rf_type(rtlphy) == RF_2T2R) && (regoffset == RTXAGC_A_MCS15_MCS12 || regoffset == RTXAGC_B_MCS15_MCS12)) || ((get_rf_type(rtlphy) != RF_2T2R) && (regoffset == RTXAGC_A_MCS07_MCS04 || regoffset == RTXAGC_B_MCS07_MCS04))) { writeVal = pwr_val[3]; if (regoffset == RTXAGC_A_MCS15_MCS12 || regoffset == RTXAGC_A_MCS07_MCS04) regoffset = 0xc90; if (regoffset == RTXAGC_B_MCS15_MCS12 || regoffset == RTXAGC_B_MCS07_MCS04) regoffset = 0xc98; for (i = 0; i < 3; i++) { writeVal = (writeVal > 6) ? (writeVal - 6) : 0; rtl_write_byte(rtlpriv, (u32) (regoffset + i), (u8) writeVal); } } } } void rtl92ce_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, u8 *ppowerlevel, u8 channel) { u32 writeVal[2], powerBase0[2], powerBase1[2]; u8 index; rtl92c_phy_get_power_base(hw, ppowerlevel, channel, &powerBase0[0], &powerBase1[0]); for (index = 0; index < 6; index++) { _rtl92c_get_txpower_writeval_by_regulatory(hw, channel, index, &powerBase0[0], &powerBase1[0], &writeVal[0]); _rtl92c_write_ofdm_power_reg(hw, index, &writeVal[0]); } } bool rtl92ce_phy_rf6052_config(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); if (rtlphy->rf_type == RF_1T1R) rtlphy->num_total_rfpath = 1; else rtlphy->num_total_rfpath = 2; return _rtl92ce_phy_rf6052_config_parafile(hw); } static bool _rtl92ce_phy_rf6052_config_parafile(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); u32 u4_regvalue = 0; u8 rfpath; bool rtstatus = true; struct bb_reg_def *pphyreg; for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) { pphyreg = &rtlphy->phyreg_def[rfpath]; switch (rfpath) { case RF90_PATH_A: case RF90_PATH_C: u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV); break; case RF90_PATH_B: case RF90_PATH_D: u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV << 16); break; } rtl_set_bbreg(hw, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1); udelay(1); rtl_set_bbreg(hw, pphyreg->rfintfo, BRFSI_RFENV, 0x1); udelay(1); rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREADDREAALENGTH, 0x0); udelay(1); rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREDATALENGTH, 0x0); udelay(1); switch (rfpath) { case RF90_PATH_A: rtstatus = rtl92c_phy_config_rf_with_headerfile(hw, (enum radio_path)rfpath); break; case RF90_PATH_B: rtstatus = rtl92c_phy_config_rf_with_headerfile(hw, (enum radio_path)rfpath); break; case RF90_PATH_C: break; case RF90_PATH_D: break; } switch (rfpath) { case RF90_PATH_A: case RF90_PATH_C: rtl_set_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV, u4_regvalue); break; case RF90_PATH_B: case RF90_PATH_D: rtl_set_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV << 16, u4_regvalue); break; } if (!rtstatus) { RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Radio[%d] Fail!!\n", rfpath); return false; } } RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "<---\n"); return rtstatus; }
gpl-2.0
xhteam/kernel_imx
drivers/staging/comedi/drivers/addi-data/addi_amcc_S5920.c
8123
7553
/** @verbatim Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module. ADDI-DATA GmbH Dieselstrasse 3 D-77833 Ottersweier Tel: +19(0)7223/9493-0 Fax: +49(0)7223/9493-92 http://www.addi-data.com info@addi-data.com This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA You should also find the complete GPL in the COPYING file accompanying this source code. @endverbatim */ /* +-----------------------------------------------------------------------+ | (C) ADDI-DATA GmbH Dieselstraße 3 D-77833 Ottersweier | +-----------------------------------------------------------------------+ | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com | | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com | +-------------------------------+---------------------------------------+ | Project : ADDI HEADER READ WRITER | Compiler : Visual C++ | | Module name : S5920.cpp | Version : 6.0 | +-------------------------------+---------------------------------------+ | Author : E. LIBS Date : 02/05/2002 | +-----------------------------------------------------------------------+ | Description : DLL with the S5920 PCI Controller functions | +-----------------------------------------------------------------------+ | UPDATE'S | +-----------------------------------------------------------------------+ | Date | Author | Description of updates | +----------+-----------+------------------------------------------------+ | 28/08/02 | LIBS Eric | Add return codes each time a function of the | | | | Addi Library is called | +-----------------------------------------------------------------------+ | 31/07/03 | KRAUTH J. | Changes for the MSX-Box | +-----------------------------------------------------------------------+ */ #include "addi_amcc_S5920.h" /*+----------------------------------------------------------------------------+*/ /*| Function Name : int i_AddiHeaderRW_ReadEeprom |*/ /*| (int i_NbOfWordsToRead, |*/ /*| unsigned int dw_PCIBoardEepromAddress, |*/ /*| unsigned short w_EepromStartAddress, |*/ /*| unsigned short * pw_DataRead) |*/ /*+----------------------------------------------------------------------------+*/ /*| Task : Read word from the 5920 eeprom. |*/ /*+----------------------------------------------------------------------------+*/ /*| Input Parameters : int i_NbOfWordsToRead : Nbr. of word to read |*/ /*| unsigned int dw_PCIBoardEepromAddress : Address of the eeprom |*/ /*| unsigned short w_EepromStartAddress : Eeprom start address |*/ /*+----------------------------------------------------------------------------+*/ /*| Output Parameters : unsigned short * pw_DataRead : Read data |*/ /*+----------------------------------------------------------------------------+*/ /*| Return Value : - |*/ /*+----------------------------------------------------------------------------+*/ int i_AddiHeaderRW_ReadEeprom(int i_NbOfWordsToRead, unsigned int dw_PCIBoardEepromAddress, unsigned short w_EepromStartAddress, unsigned short *pw_DataRead) { unsigned int dw_eeprom_busy = 0; int i_Counter = 0; int i_WordCounter; int i; unsigned char pb_ReadByte[1]; unsigned char b_ReadLowByte = 0; unsigned char b_ReadHighByte = 0; unsigned char b_SelectedAddressLow = 0; unsigned char b_SelectedAddressHigh = 0; unsigned short w_ReadWord = 0; for (i_WordCounter = 0; i_WordCounter < i_NbOfWordsToRead; i_WordCounter++) { do { dw_eeprom_busy = inl(dw_PCIBoardEepromAddress + AMCC_OP_REG_MCSR); dw_eeprom_busy = dw_eeprom_busy & EEPROM_BUSY; } while (dw_eeprom_busy == EEPROM_BUSY); for (i_Counter = 0; i_Counter < 2; i_Counter++) { b_SelectedAddressLow = (w_EepromStartAddress + i_Counter) % 256; /* Read the low 8 bit part */ b_SelectedAddressHigh = (w_EepromStartAddress + i_Counter) / 256; /* Read the high 8 bit part */ /* Select the load low address mode */ outb(NVCMD_LOAD_LOW, dw_PCIBoardEepromAddress + AMCC_OP_REG_MCSR + 3); /* Wait on busy */ do { dw_eeprom_busy = inl(dw_PCIBoardEepromAddress + AMCC_OP_REG_MCSR); dw_eeprom_busy = dw_eeprom_busy & EEPROM_BUSY; } while (dw_eeprom_busy == EEPROM_BUSY); /* Load the low address */ outb(b_SelectedAddressLow, dw_PCIBoardEepromAddress + AMCC_OP_REG_MCSR + 2); /* Wait on busy */ do { dw_eeprom_busy = inl(dw_PCIBoardEepromAddress + AMCC_OP_REG_MCSR); dw_eeprom_busy = dw_eeprom_busy & EEPROM_BUSY; } while (dw_eeprom_busy == EEPROM_BUSY); /* Select the load high address mode */ outb(NVCMD_LOAD_HIGH, dw_PCIBoardEepromAddress + AMCC_OP_REG_MCSR + 3); /* Wait on busy */ do { dw_eeprom_busy = inl(dw_PCIBoardEepromAddress + AMCC_OP_REG_MCSR); dw_eeprom_busy = dw_eeprom_busy & EEPROM_BUSY; } while (dw_eeprom_busy == EEPROM_BUSY); /* Load the high address */ outb(b_SelectedAddressHigh, dw_PCIBoardEepromAddress + AMCC_OP_REG_MCSR + 2); /* Wait on busy */ do { dw_eeprom_busy = inl(dw_PCIBoardEepromAddress + AMCC_OP_REG_MCSR); dw_eeprom_busy = dw_eeprom_busy & EEPROM_BUSY; } while (dw_eeprom_busy == EEPROM_BUSY); /* Select the READ mode */ outb(NVCMD_BEGIN_READ, dw_PCIBoardEepromAddress + AMCC_OP_REG_MCSR + 3); /* Wait on busy */ do { dw_eeprom_busy = inl(dw_PCIBoardEepromAddress + AMCC_OP_REG_MCSR); dw_eeprom_busy = dw_eeprom_busy & EEPROM_BUSY; } while (dw_eeprom_busy == EEPROM_BUSY); /* Read data into the EEPROM */ *pb_ReadByte = inb(dw_PCIBoardEepromAddress + AMCC_OP_REG_MCSR + 2); /* Wait on busy */ do { dw_eeprom_busy = inl(dw_PCIBoardEepromAddress + AMCC_OP_REG_MCSR); dw_eeprom_busy = dw_eeprom_busy & EEPROM_BUSY; } while (dw_eeprom_busy == EEPROM_BUSY); /* Select the upper address part */ if (i_Counter == 0) b_ReadLowByte = pb_ReadByte[0]; else b_ReadHighByte = pb_ReadByte[0]; /* Sleep */ msleep(1); } w_ReadWord = (b_ReadLowByte | (((unsigned short)b_ReadHighByte) * 256)); pw_DataRead[i_WordCounter] = w_ReadWord; w_EepromStartAddress += 2; /* to read the next word */ } /* for (...) i_NbOfWordsToRead */ return 0; }
gpl-2.0
ManhIT-CMB/Kernel-GalaxyJ
fs/jffs2/xattr_trusted.c
12731
1447
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2006 NEC Corporation * * Created by KaiGai Kohei <kaigai@ak.jp.nec.com> * * For licensing information, see the file 'LICENCE' in this directory. * */ #include <linux/kernel.h> #include <linux/fs.h> #include <linux/jffs2.h> #include <linux/xattr.h> #include <linux/mtd/mtd.h> #include "nodelist.h" static int jffs2_trusted_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size, int type) { if (!strcmp(name, "")) return -EINVAL; return do_jffs2_getxattr(dentry->d_inode, JFFS2_XPREFIX_TRUSTED, name, buffer, size); } static int jffs2_trusted_setxattr(struct dentry *dentry, const char *name, const void *buffer, size_t size, int flags, int type) { if (!strcmp(name, "")) return -EINVAL; return do_jffs2_setxattr(dentry->d_inode, JFFS2_XPREFIX_TRUSTED, name, buffer, size, flags); } static size_t jffs2_trusted_listxattr(struct dentry *dentry, char *list, size_t list_size, const char *name, size_t name_len, int type) { size_t retlen = XATTR_TRUSTED_PREFIX_LEN + name_len + 1; if (list && retlen<=list_size) { strcpy(list, XATTR_TRUSTED_PREFIX); strcpy(list + XATTR_TRUSTED_PREFIX_LEN, name); } return retlen; } const struct xattr_handler jffs2_trusted_xattr_handler = { .prefix = XATTR_TRUSTED_PREFIX, .list = jffs2_trusted_listxattr, .set = jffs2_trusted_setxattr, .get = jffs2_trusted_getxattr };
gpl-2.0
shinkumara/sprout_shinkumara_kernel
drivers/scsi/arm/msgqueue.c
15035
3884
/* * linux/drivers/acorn/scsi/msgqueue.c * * Copyright (C) 1997-1998 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * message queue handling */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/init.h> #include "msgqueue.h" /* * Function: struct msgqueue_entry *mqe_alloc(MsgQueue_t *msgq) * Purpose : Allocate a message queue entry * Params : msgq - message queue to claim entry for * Returns : message queue entry or NULL. */ static struct msgqueue_entry *mqe_alloc(MsgQueue_t *msgq) { struct msgqueue_entry *mq; if ((mq = msgq->free) != NULL) msgq->free = mq->next; return mq; } /* * Function: void mqe_free(MsgQueue_t *msgq, struct msgqueue_entry *mq) * Purpose : free a message queue entry * Params : msgq - message queue to free entry from * mq - message queue entry to free */ static void mqe_free(MsgQueue_t *msgq, struct msgqueue_entry *mq) { if (mq) { mq->next = msgq->free; msgq->free = mq; } } /* * Function: void msgqueue_initialise(MsgQueue_t *msgq) * Purpose : initialise a message queue * Params : msgq - queue to initialise */ void msgqueue_initialise(MsgQueue_t *msgq) { int i; msgq->qe = NULL; msgq->free = &msgq->entries[0]; for (i = 0; i < NR_MESSAGES; i++) msgq->entries[i].next = &msgq->entries[i + 1]; msgq->entries[NR_MESSAGES - 1].next = NULL; } /* * Function: void msgqueue_free(MsgQueue_t *msgq) * Purpose : free a queue * Params : msgq - queue to free */ void msgqueue_free(MsgQueue_t *msgq) { } /* * Function: int msgqueue_msglength(MsgQueue_t *msgq) * Purpose : calculate the total length of all messages on the message queue * Params : msgq - queue to examine * Returns : number of bytes of messages in queue */ int msgqueue_msglength(MsgQueue_t *msgq) { struct msgqueue_entry *mq = msgq->qe; int length = 0; for (mq = msgq->qe; mq; mq = mq->next) length += mq->msg.length; return length; } /* * Function: struct message *msgqueue_getmsg(MsgQueue_t *msgq, int msgno) * Purpose : return a message * Params : msgq - queue to obtain message from * : msgno - message number * Returns : pointer to message string, or NULL */ struct message *msgqueue_getmsg(MsgQueue_t *msgq, int msgno) { struct msgqueue_entry *mq; for (mq = msgq->qe; mq && msgno; mq = mq->next, msgno--); return mq ? &mq->msg : NULL; } /* * Function: int msgqueue_addmsg(MsgQueue_t *msgq, int length, ...) * Purpose : add a message onto a message queue * Params : msgq - queue to add message on * length - length of message * ... - message bytes * Returns : != 0 if successful */ int msgqueue_addmsg(MsgQueue_t *msgq, int length, ...) { struct msgqueue_entry *mq = mqe_alloc(msgq); va_list ap; if (mq) { struct msgqueue_entry **mqp; int i; va_start(ap, length); for (i = 0; i < length; i++) mq->msg.msg[i] = va_arg(ap, unsigned int); va_end(ap); mq->msg.length = length; mq->msg.fifo = 0; mq->next = NULL; mqp = &msgq->qe; while (*mqp) mqp = &(*mqp)->next; *mqp = mq; } return mq != NULL; } /* * Function: void msgqueue_flush(MsgQueue_t *msgq) * Purpose : flush all messages from message queue * Params : msgq - queue to flush */ void msgqueue_flush(MsgQueue_t *msgq) { struct msgqueue_entry *mq, *mqnext; for (mq = msgq->qe; mq; mq = mqnext) { mqnext = mq->next; mqe_free(msgq, mq); } msgq->qe = NULL; } EXPORT_SYMBOL(msgqueue_initialise); EXPORT_SYMBOL(msgqueue_free); EXPORT_SYMBOL(msgqueue_msglength); EXPORT_SYMBOL(msgqueue_getmsg); EXPORT_SYMBOL(msgqueue_addmsg); EXPORT_SYMBOL(msgqueue_flush); MODULE_AUTHOR("Russell King"); MODULE_DESCRIPTION("SCSI message queue handling"); MODULE_LICENSE("GPL");
gpl-2.0
BENETNATH/android_kernel_acer_A510
drivers/scsi/arm/msgqueue.c
15035
3884
/* * linux/drivers/acorn/scsi/msgqueue.c * * Copyright (C) 1997-1998 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * message queue handling */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/init.h> #include "msgqueue.h" /* * Function: struct msgqueue_entry *mqe_alloc(MsgQueue_t *msgq) * Purpose : Allocate a message queue entry * Params : msgq - message queue to claim entry for * Returns : message queue entry or NULL. */ static struct msgqueue_entry *mqe_alloc(MsgQueue_t *msgq) { struct msgqueue_entry *mq; if ((mq = msgq->free) != NULL) msgq->free = mq->next; return mq; } /* * Function: void mqe_free(MsgQueue_t *msgq, struct msgqueue_entry *mq) * Purpose : free a message queue entry * Params : msgq - message queue to free entry from * mq - message queue entry to free */ static void mqe_free(MsgQueue_t *msgq, struct msgqueue_entry *mq) { if (mq) { mq->next = msgq->free; msgq->free = mq; } } /* * Function: void msgqueue_initialise(MsgQueue_t *msgq) * Purpose : initialise a message queue * Params : msgq - queue to initialise */ void msgqueue_initialise(MsgQueue_t *msgq) { int i; msgq->qe = NULL; msgq->free = &msgq->entries[0]; for (i = 0; i < NR_MESSAGES; i++) msgq->entries[i].next = &msgq->entries[i + 1]; msgq->entries[NR_MESSAGES - 1].next = NULL; } /* * Function: void msgqueue_free(MsgQueue_t *msgq) * Purpose : free a queue * Params : msgq - queue to free */ void msgqueue_free(MsgQueue_t *msgq) { } /* * Function: int msgqueue_msglength(MsgQueue_t *msgq) * Purpose : calculate the total length of all messages on the message queue * Params : msgq - queue to examine * Returns : number of bytes of messages in queue */ int msgqueue_msglength(MsgQueue_t *msgq) { struct msgqueue_entry *mq = msgq->qe; int length = 0; for (mq = msgq->qe; mq; mq = mq->next) length += mq->msg.length; return length; } /* * Function: struct message *msgqueue_getmsg(MsgQueue_t *msgq, int msgno) * Purpose : return a message * Params : msgq - queue to obtain message from * : msgno - message number * Returns : pointer to message string, or NULL */ struct message *msgqueue_getmsg(MsgQueue_t *msgq, int msgno) { struct msgqueue_entry *mq; for (mq = msgq->qe; mq && msgno; mq = mq->next, msgno--); return mq ? &mq->msg : NULL; } /* * Function: int msgqueue_addmsg(MsgQueue_t *msgq, int length, ...) * Purpose : add a message onto a message queue * Params : msgq - queue to add message on * length - length of message * ... - message bytes * Returns : != 0 if successful */ int msgqueue_addmsg(MsgQueue_t *msgq, int length, ...) { struct msgqueue_entry *mq = mqe_alloc(msgq); va_list ap; if (mq) { struct msgqueue_entry **mqp; int i; va_start(ap, length); for (i = 0; i < length; i++) mq->msg.msg[i] = va_arg(ap, unsigned int); va_end(ap); mq->msg.length = length; mq->msg.fifo = 0; mq->next = NULL; mqp = &msgq->qe; while (*mqp) mqp = &(*mqp)->next; *mqp = mq; } return mq != NULL; } /* * Function: void msgqueue_flush(MsgQueue_t *msgq) * Purpose : flush all messages from message queue * Params : msgq - queue to flush */ void msgqueue_flush(MsgQueue_t *msgq) { struct msgqueue_entry *mq, *mqnext; for (mq = msgq->qe; mq; mq = mqnext) { mqnext = mq->next; mqe_free(msgq, mq); } msgq->qe = NULL; } EXPORT_SYMBOL(msgqueue_initialise); EXPORT_SYMBOL(msgqueue_free); EXPORT_SYMBOL(msgqueue_msglength); EXPORT_SYMBOL(msgqueue_getmsg); EXPORT_SYMBOL(msgqueue_addmsg); EXPORT_SYMBOL(msgqueue_flush); MODULE_AUTHOR("Russell King"); MODULE_DESCRIPTION("SCSI message queue handling"); MODULE_LICENSE("GPL");
gpl-2.0
devcreations07/spirit_cancro
drivers/devfreq/governor_cpubw_hwmon.c
700
11395
/* * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) "cpubw-hwmon: " fmt #include <linux/kernel.h> #include <asm/sizes.h> #include <linux/module.h> #include <linux/init.h> #include <linux/io.h> #include <linux/delay.h> #include <linux/ktime.h> #include <linux/time.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/mutex.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/of.h> #include <linux/devfreq.h> #include "governor.h" #include <mach/msm-krait-l2-accessors.h> #define L2PMRESR2 0x412 #define L2PMCR 0x400 #define L2PMCNTENCLR 0x402 #define L2PMCNTENSET 0x403 #define L2PMINTENCLR 0x404 #define L2PMINTENSET 0x405 #define L2PMOVSR 0x406 #define L2PMOVSSET 0x407 #define L2PMnEVCNTCR(n) (0x420 + n * 0x10) #define L2PMnEVCNTR(n) (0x421 + n * 0x10) #define L2PMnEVCNTSR(n) (0x422 + n * 0x10) #define L2PMnEVFILTER(n) (0x423 + n * 0x10) #define L2PMnEVTYPER(n) (0x424 + n * 0x10) #define show_attr(name) \ static ssize_t show_##name(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ return sprintf(buf, "%u\n", name); \ } #define store_attr(name, _min, _max) \ static ssize_t store_##name(struct device *dev, \ struct device_attribute *attr, const char *buf, \ size_t count) \ { \ int ret; \ unsigned int val; \ ret = sscanf(buf, "%u", &val); \ if (ret != 1) \ return -EINVAL; \ val = max(val, _min); \ val = min(val, _max); \ name = val; \ return count; \ } #define gov_attr(__attr, min, max) \ show_attr(__attr) \ store_attr(__attr, min, max) \ static DEVICE_ATTR(__attr, 0644, show_##__attr, store_##__attr) static int l2pm_irq; static unsigned int bytes_per_beat; static unsigned int tolerance_percent = 10; static unsigned int guard_band_mbps = 100; static unsigned int decay_rate = 90; static unsigned int io_percent = 16; static unsigned int bw_step = 190; #define MIN_MS 10U #define MAX_MS 500U static unsigned int sample_ms = 50; static u32 prev_r_start_val; static u32 prev_w_start_val; static unsigned long prev_ab; static ktime_t prev_ts; #define RD_MON 0 #define WR_MON 1 static void mon_init(void) { /* Set up counters 0/1 to count write/read beats */ set_l2_indirect_reg(L2PMRESR2, 0x8B0B0000); set_l2_indirect_reg(L2PMnEVCNTCR(RD_MON), 0x0); set_l2_indirect_reg(L2PMnEVCNTCR(WR_MON), 0x0); set_l2_indirect_reg(L2PMnEVCNTR(RD_MON), 0xFFFFFFFF); set_l2_indirect_reg(L2PMnEVCNTR(WR_MON), 0xFFFFFFFF); set_l2_indirect_reg(L2PMnEVFILTER(RD_MON), 0xF003F); set_l2_indirect_reg(L2PMnEVFILTER(WR_MON), 0xF003F); set_l2_indirect_reg(L2PMnEVTYPER(RD_MON), 0xA); set_l2_indirect_reg(L2PMnEVTYPER(WR_MON), 0xB); } static void global_mon_enable(bool en) { u32 regval; /* Global counter enable */ regval = get_l2_indirect_reg(L2PMCR); if (en) regval |= BIT(0); else regval &= ~BIT(0); set_l2_indirect_reg(L2PMCR, regval); } static void mon_enable(int n) { /* Clear previous overflow state for event counter n */ set_l2_indirect_reg(L2PMOVSR, BIT(n)); /* Enable event counter n */ set_l2_indirect_reg(L2PMCNTENSET, BIT(n)); } static void mon_disable(int n) { /* Disable event counter n */ set_l2_indirect_reg(L2PMCNTENCLR, BIT(n)); } static void mon_irq_enable(int n, bool en) { if (en) set_l2_indirect_reg(L2PMINTENSET, BIT(n)); else set_l2_indirect_reg(L2PMINTENCLR, BIT(n)); } /* Returns start counter value to be used with mon_get_mbps() */ static u32 mon_set_limit_mbyte(int n, unsigned int mbytes) { u32 regval, beats; beats = mult_frac(mbytes, SZ_1M, bytes_per_beat); regval = 0xFFFFFFFF - beats; set_l2_indirect_reg(L2PMnEVCNTR(n), regval); pr_debug("EV%d MB: %d, start val: %x\n", n, mbytes, regval); return regval; } long mon_get_count(int n, u32 start_val) { u32 overflow, count; count = get_l2_indirect_reg(L2PMnEVCNTR(n)); overflow = get_l2_indirect_reg(L2PMOVSR); pr_debug("EV%d ov: %x, cnt: %x\n", n, overflow, count); if (overflow & BIT(n)) return 0xFFFFFFFF - start_val + count; else return count - start_val; } /* Returns MBps of read/writes for the sampling window. */ unsigned int beats_to_mbps(long long beats, unsigned int us) { beats *= USEC_PER_SEC; beats *= bytes_per_beat; do_div(beats, us); beats = DIV_ROUND_UP_ULL(beats, SZ_1M); return beats; } static int to_limit(int mbps) { mbps *= (100 + tolerance_percent) * sample_ms; mbps /= 100; mbps = DIV_ROUND_UP(mbps, MSEC_PER_SEC); return mbps; } unsigned long measure_bw_and_set_irq(void) { long r_mbps, w_mbps, mbps; ktime_t ts; unsigned int us; /* * Since we are stopping the counters, we don't want this short work * to be interrupted by other tasks and cause the measurements to be * wrong. Not blocking interrupts to avoid affecting interrupt * latency and since they should be short anyway because they run in * atomic context. */ preempt_disable(); ts = ktime_get(); us = ktime_to_us(ktime_sub(ts, prev_ts)); if (!us) us = 1; mon_disable(RD_MON); mon_disable(WR_MON); r_mbps = mon_get_count(RD_MON, prev_r_start_val); r_mbps = beats_to_mbps(r_mbps, us); w_mbps = mon_get_count(WR_MON, prev_w_start_val); w_mbps = beats_to_mbps(w_mbps, us); prev_r_start_val = mon_set_limit_mbyte(RD_MON, to_limit(r_mbps)); prev_w_start_val = mon_set_limit_mbyte(WR_MON, to_limit(w_mbps)); prev_ts = ts; mon_enable(RD_MON); mon_enable(WR_MON); preempt_enable(); mbps = r_mbps + w_mbps; pr_debug("R/W/BW/us = %ld/%ld/%ld/%d\n", r_mbps, w_mbps, mbps, us); return mbps; } static void compute_bw(int mbps, unsigned long *freq, unsigned long *ab) { int new_bw; mbps += guard_band_mbps; if (mbps > prev_ab) { new_bw = mbps; } else { new_bw = mbps * decay_rate + prev_ab * (100 - decay_rate); new_bw /= 100; } prev_ab = new_bw; *ab = roundup(new_bw, bw_step); *freq = (new_bw * 100) / io_percent; } #define TOO_SOON_US (1 * USEC_PER_MSEC) static irqreturn_t mon_intr_handler(int irq, void *dev) { struct devfreq *df = dev; ktime_t ts; unsigned int us; u32 regval; int ret; regval = get_l2_indirect_reg(L2PMOVSR); pr_debug("Got interrupt: %x\n", regval); devfreq_monitor_stop(df); /* * Don't recalc bandwidth if the interrupt comes right after a * previous bandwidth calculation. This is done for two reasons: * * 1. Sampling the BW during a very short duration can result in a * very inaccurate measurement due to very short bursts. * 2. This can only happen if the limit was hit very close to the end * of the previous sample period. Which means the current BW * estimate is not very off and doesn't need to be readjusted. */ ts = ktime_get(); us = ktime_to_us(ktime_sub(ts, prev_ts)); if (us > TOO_SOON_US) { mutex_lock(&df->lock); ret = update_devfreq(df); if (ret) pr_err("Unable to update freq on IRQ!\n"); mutex_unlock(&df->lock); } devfreq_monitor_start(df); return IRQ_HANDLED; } static int start_monitoring(struct devfreq *df) { int ret, mbyte; ret = request_threaded_irq(l2pm_irq, NULL, mon_intr_handler, IRQF_ONESHOT | IRQF_SHARED, "cpubw_hwmon", df); if (ret) { pr_err("Unable to register interrupt handler\n"); return ret; } mon_init(); mon_disable(RD_MON); mon_disable(WR_MON); mbyte = (df->previous_freq * io_percent) / (2 * 100); prev_r_start_val = mon_set_limit_mbyte(RD_MON, mbyte); prev_w_start_val = mon_set_limit_mbyte(WR_MON, mbyte); prev_ts = ktime_get(); prev_ab = 0; mon_irq_enable(RD_MON, true); mon_irq_enable(WR_MON, true); mon_enable(RD_MON); mon_enable(WR_MON); global_mon_enable(true); return 0; } static void stop_monitoring(struct devfreq *df) { global_mon_enable(false); mon_disable(RD_MON); mon_disable(WR_MON); mon_irq_enable(RD_MON, false); mon_irq_enable(WR_MON, false); disable_irq(l2pm_irq); free_irq(l2pm_irq, df); } static int devfreq_cpubw_hwmon_get_freq(struct devfreq *df, unsigned long *freq, u32 *flag) { unsigned long mbps; mbps = measure_bw_and_set_irq(); compute_bw(mbps, freq, df->data); return 0; } gov_attr(tolerance_percent, 0U, 30U); gov_attr(guard_band_mbps, 0U, 2000U); gov_attr(decay_rate, 0U, 100U); gov_attr(io_percent, 1U, 100U); gov_attr(bw_step, 50U, 1000U); static struct attribute *dev_attr[] = { &dev_attr_tolerance_percent.attr, &dev_attr_guard_band_mbps.attr, &dev_attr_decay_rate.attr, &dev_attr_io_percent.attr, &dev_attr_bw_step.attr, NULL, }; static struct attribute_group dev_attr_group = { .name = "cpubw_hwmon", .attrs = dev_attr, }; static int devfreq_cpubw_hwmon_ev_handler(struct devfreq *df, unsigned int event, void *data) { int ret; switch (event) { case DEVFREQ_GOV_START: ret = start_monitoring(df); if (ret) return ret; ret = sysfs_create_group(&df->dev.kobj, &dev_attr_group); if (ret) return ret; sample_ms = df->profile->polling_ms; sample_ms = max(MIN_MS, sample_ms); sample_ms = min(MAX_MS, sample_ms); df->profile->polling_ms = sample_ms; devfreq_monitor_start(df); pr_debug("Enabled CPU BW HW monitor governor\n"); break; case DEVFREQ_GOV_STOP: sysfs_remove_group(&df->dev.kobj, &dev_attr_group); devfreq_monitor_stop(df); *(unsigned long *)df->data = 0; stop_monitoring(df); pr_debug("Disabled CPU BW HW monitor governor\n"); break; case DEVFREQ_GOV_INTERVAL: sample_ms = *(unsigned int *)data; sample_ms = max(MIN_MS, sample_ms); sample_ms = min(MAX_MS, sample_ms); devfreq_interval_update(df, &sample_ms); break; } return 0; } static struct devfreq_governor devfreq_cpubw_hwmon = { .name = "cpubw_hwmon", .get_target_freq = devfreq_cpubw_hwmon_get_freq, .event_handler = devfreq_cpubw_hwmon_ev_handler, }; static int cpubw_hwmon_driver_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; int ret; l2pm_irq = platform_get_irq(pdev, 0); if (l2pm_irq < 0) { pr_err("Unable to get IRQ number\n"); return l2pm_irq; } ret = of_property_read_u32(dev->of_node, "qcom,bytes-per-beat", &bytes_per_beat); if (ret) { pr_err("Unable to read bytes per beat\n"); return ret; } ret = devfreq_add_governor(&devfreq_cpubw_hwmon); if (ret) { pr_err("devfreq governor registration failed\n"); return ret; } return 0; } static struct of_device_id match_table[] = { { .compatible = "qcom,kraitbw-l2pm" }, {} }; static struct platform_driver cpubw_hwmon_driver = { .probe = cpubw_hwmon_driver_probe, .driver = { .name = "kraitbw-l2pm", .of_match_table = match_table, .owner = THIS_MODULE, }, }; static int __init cpubw_hwmon_init(void) { return platform_driver_register(&cpubw_hwmon_driver); } module_init(cpubw_hwmon_init); static void __exit cpubw_hwmon_exit(void) { platform_driver_unregister(&cpubw_hwmon_driver); } module_exit(cpubw_hwmon_exit); MODULE_DESCRIPTION("HW monitor based CPU DDR bandwidth voting driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
goulderb/sch-i405_kernel
sound/soc/omap/omap-mcbsp.c
700
24513
/* * omap-mcbsp.c -- OMAP ALSA SoC DAI driver using McBSP port * * Copyright (C) 2008 Nokia Corporation * * Contact: Jarkko Nikula <jhnikula@gmail.com> * Peter Ujfalusi <peter.ujfalusi@nokia.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include <linux/init.h> #include <linux/module.h> #include <linux/device.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/initval.h> #include <sound/soc.h> #include <plat/control.h> #include <plat/dma.h> #include <plat/mcbsp.h> #include "omap-mcbsp.h" #include "omap-pcm.h" #define OMAP_MCBSP_RATES (SNDRV_PCM_RATE_8000_96000) #define OMAP_MCBSP_SOC_SINGLE_S16_EXT(xname, xmin, xmax, \ xhandler_get, xhandler_put) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \ .info = omap_mcbsp_st_info_volsw, \ .get = xhandler_get, .put = xhandler_put, \ .private_value = (unsigned long) &(struct soc_mixer_control) \ {.min = xmin, .max = xmax} } struct omap_mcbsp_data { unsigned int bus_id; struct omap_mcbsp_reg_cfg regs; unsigned int fmt; /* * Flags indicating is the bus already activated and configured by * another substream */ int active; int configured; unsigned int in_freq; int clk_div; }; #define to_mcbsp(priv) container_of((priv), struct omap_mcbsp_data, bus_id) static struct omap_mcbsp_data mcbsp_data[NUM_LINKS]; /* * Stream DMA parameters. DMA request line and port address are set runtime * since they are different between OMAP1 and later OMAPs */ static struct omap_pcm_dma_data omap_mcbsp_dai_dma_params[NUM_LINKS][2]; #if defined(CONFIG_ARCH_OMAP15XX) || defined(CONFIG_ARCH_OMAP16XX) static const int omap1_dma_reqs[][2] = { { OMAP_DMA_MCBSP1_TX, OMAP_DMA_MCBSP1_RX }, { OMAP_DMA_MCBSP2_TX, OMAP_DMA_MCBSP2_RX }, { OMAP_DMA_MCBSP3_TX, OMAP_DMA_MCBSP3_RX }, }; static const unsigned long omap1_mcbsp_port[][2] = { { OMAP1510_MCBSP1_BASE + OMAP_MCBSP_REG_DXR1, OMAP1510_MCBSP1_BASE + OMAP_MCBSP_REG_DRR1 }, { OMAP1510_MCBSP2_BASE + OMAP_MCBSP_REG_DXR1, OMAP1510_MCBSP2_BASE + OMAP_MCBSP_REG_DRR1 }, { OMAP1510_MCBSP3_BASE + OMAP_MCBSP_REG_DXR1, OMAP1510_MCBSP3_BASE + OMAP_MCBSP_REG_DRR1 }, }; #else static const int omap1_dma_reqs[][2] = {}; static const unsigned long omap1_mcbsp_port[][2] = {}; #endif #if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3) static const int omap24xx_dma_reqs[][2] = { { OMAP24XX_DMA_MCBSP1_TX, OMAP24XX_DMA_MCBSP1_RX }, { OMAP24XX_DMA_MCBSP2_TX, OMAP24XX_DMA_MCBSP2_RX }, #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) { OMAP24XX_DMA_MCBSP3_TX, OMAP24XX_DMA_MCBSP3_RX }, { OMAP24XX_DMA_MCBSP4_TX, OMAP24XX_DMA_MCBSP4_RX }, { OMAP24XX_DMA_MCBSP5_TX, OMAP24XX_DMA_MCBSP5_RX }, #endif }; #else static const int omap24xx_dma_reqs[][2] = {}; #endif #if defined(CONFIG_ARCH_OMAP2420) static const unsigned long omap2420_mcbsp_port[][2] = { { OMAP24XX_MCBSP1_BASE + OMAP_MCBSP_REG_DXR1, OMAP24XX_MCBSP1_BASE + OMAP_MCBSP_REG_DRR1 }, { OMAP24XX_MCBSP2_BASE + OMAP_MCBSP_REG_DXR1, OMAP24XX_MCBSP2_BASE + OMAP_MCBSP_REG_DRR1 }, }; #else static const unsigned long omap2420_mcbsp_port[][2] = {}; #endif #if defined(CONFIG_ARCH_OMAP2430) static const unsigned long omap2430_mcbsp_port[][2] = { { OMAP24XX_MCBSP1_BASE + OMAP_MCBSP_REG_DXR, OMAP24XX_MCBSP1_BASE + OMAP_MCBSP_REG_DRR }, { OMAP24XX_MCBSP2_BASE + OMAP_MCBSP_REG_DXR, OMAP24XX_MCBSP2_BASE + OMAP_MCBSP_REG_DRR }, { OMAP2430_MCBSP3_BASE + OMAP_MCBSP_REG_DXR, OMAP2430_MCBSP3_BASE + OMAP_MCBSP_REG_DRR }, { OMAP2430_MCBSP4_BASE + OMAP_MCBSP_REG_DXR, OMAP2430_MCBSP4_BASE + OMAP_MCBSP_REG_DRR }, { OMAP2430_MCBSP5_BASE + OMAP_MCBSP_REG_DXR, OMAP2430_MCBSP5_BASE + OMAP_MCBSP_REG_DRR }, }; #else static const unsigned long omap2430_mcbsp_port[][2] = {}; #endif #if defined(CONFIG_ARCH_OMAP3) static const unsigned long omap34xx_mcbsp_port[][2] = { { OMAP34XX_MCBSP1_BASE + OMAP_MCBSP_REG_DXR, OMAP34XX_MCBSP1_BASE + OMAP_MCBSP_REG_DRR }, { OMAP34XX_MCBSP2_BASE + OMAP_MCBSP_REG_DXR, OMAP34XX_MCBSP2_BASE + OMAP_MCBSP_REG_DRR }, { OMAP34XX_MCBSP3_BASE + OMAP_MCBSP_REG_DXR, OMAP34XX_MCBSP3_BASE + OMAP_MCBSP_REG_DRR }, { OMAP34XX_MCBSP4_BASE + OMAP_MCBSP_REG_DXR, OMAP34XX_MCBSP4_BASE + OMAP_MCBSP_REG_DRR }, { OMAP34XX_MCBSP5_BASE + OMAP_MCBSP_REG_DXR, OMAP34XX_MCBSP5_BASE + OMAP_MCBSP_REG_DRR }, }; #else static const unsigned long omap34xx_mcbsp_port[][2] = {}; #endif static void omap_mcbsp_set_threshold(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai; struct omap_mcbsp_data *mcbsp_data = to_mcbsp(cpu_dai->private_data); int dma_op_mode = omap_mcbsp_get_dma_op_mode(mcbsp_data->bus_id); int samples; /* TODO: Currently, MODE_ELEMENT == MODE_FRAME */ if (dma_op_mode == MCBSP_DMA_MODE_THRESHOLD) samples = snd_pcm_lib_period_bytes(substream) >> 1; else samples = 1; /* Configure McBSP internal buffer usage */ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) omap_mcbsp_set_tx_threshold(mcbsp_data->bus_id, samples - 1); else omap_mcbsp_set_rx_threshold(mcbsp_data->bus_id, samples - 1); } static int omap_mcbsp_dai_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai; struct omap_mcbsp_data *mcbsp_data = to_mcbsp(cpu_dai->private_data); int bus_id = mcbsp_data->bus_id; int err = 0; if (!cpu_dai->active) err = omap_mcbsp_request(bus_id); if (cpu_is_omap343x()) { int dma_op_mode = omap_mcbsp_get_dma_op_mode(bus_id); int max_period; /* * McBSP2 in OMAP3 has 1024 * 32-bit internal audio buffer. * Set constraint for minimum buffer size to the same than FIFO * size in order to avoid underruns in playback startup because * HW is keeping the DMA request active until FIFO is filled. */ if (bus_id == 1) snd_pcm_hw_constraint_minmax(substream->runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 4096, UINT_MAX); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) max_period = omap_mcbsp_get_max_tx_threshold(bus_id); else max_period = omap_mcbsp_get_max_rx_threshold(bus_id); max_period++; max_period <<= 1; if (dma_op_mode == MCBSP_DMA_MODE_THRESHOLD) snd_pcm_hw_constraint_minmax(substream->runtime, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 32, max_period); } return err; } static void omap_mcbsp_dai_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai; struct omap_mcbsp_data *mcbsp_data = to_mcbsp(cpu_dai->private_data); if (!cpu_dai->active) { omap_mcbsp_free(mcbsp_data->bus_id); mcbsp_data->configured = 0; } } static int omap_mcbsp_dai_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai; struct omap_mcbsp_data *mcbsp_data = to_mcbsp(cpu_dai->private_data); int err = 0, play = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: mcbsp_data->active++; omap_mcbsp_start(mcbsp_data->bus_id, play, !play); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: omap_mcbsp_stop(mcbsp_data->bus_id, play, !play); mcbsp_data->active--; break; default: err = -EINVAL; } return err; } static snd_pcm_sframes_t omap_mcbsp_dai_delay( struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai; struct omap_mcbsp_data *mcbsp_data = to_mcbsp(cpu_dai->private_data); u16 fifo_use; snd_pcm_sframes_t delay; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) fifo_use = omap_mcbsp_get_tx_delay(mcbsp_data->bus_id); else fifo_use = omap_mcbsp_get_rx_delay(mcbsp_data->bus_id); /* * Divide the used locations with the channel count to get the * FIFO usage in samples (don't care about partial samples in the * buffer). */ delay = fifo_use / substream->runtime->channels; return delay; } static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai; struct omap_mcbsp_data *mcbsp_data = to_mcbsp(cpu_dai->private_data); struct omap_mcbsp_reg_cfg *regs = &mcbsp_data->regs; int dma, bus_id = mcbsp_data->bus_id, id = cpu_dai->id; int wlen, channels, wpf, sync_mode = OMAP_DMA_SYNC_ELEMENT; unsigned long port; unsigned int format, div, framesize, master; if (cpu_class_is_omap1()) { dma = omap1_dma_reqs[bus_id][substream->stream]; port = omap1_mcbsp_port[bus_id][substream->stream]; } else if (cpu_is_omap2420()) { dma = omap24xx_dma_reqs[bus_id][substream->stream]; port = omap2420_mcbsp_port[bus_id][substream->stream]; } else if (cpu_is_omap2430()) { dma = omap24xx_dma_reqs[bus_id][substream->stream]; port = omap2430_mcbsp_port[bus_id][substream->stream]; } else if (cpu_is_omap343x()) { dma = omap24xx_dma_reqs[bus_id][substream->stream]; port = omap34xx_mcbsp_port[bus_id][substream->stream]; omap_mcbsp_dai_dma_params[id][substream->stream].set_threshold = omap_mcbsp_set_threshold; /* TODO: Currently, MODE_ELEMENT == MODE_FRAME */ if (omap_mcbsp_get_dma_op_mode(bus_id) == MCBSP_DMA_MODE_THRESHOLD) sync_mode = OMAP_DMA_SYNC_FRAME; } else { return -ENODEV; } omap_mcbsp_dai_dma_params[id][substream->stream].name = substream->stream ? "Audio Capture" : "Audio Playback"; omap_mcbsp_dai_dma_params[id][substream->stream].dma_req = dma; omap_mcbsp_dai_dma_params[id][substream->stream].port_addr = port; omap_mcbsp_dai_dma_params[id][substream->stream].sync_mode = sync_mode; switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: omap_mcbsp_dai_dma_params[id][substream->stream].data_type = OMAP_DMA_DATA_TYPE_S16; break; case SNDRV_PCM_FORMAT_S32_LE: omap_mcbsp_dai_dma_params[id][substream->stream].data_type = OMAP_DMA_DATA_TYPE_S32; break; default: return -EINVAL; } snd_soc_dai_set_dma_data(cpu_dai, substream, &omap_mcbsp_dai_dma_params[id][substream->stream]); if (mcbsp_data->configured) { /* McBSP already configured by another stream */ return 0; } format = mcbsp_data->fmt & SND_SOC_DAIFMT_FORMAT_MASK; wpf = channels = params_channels(params); if (channels == 2 && (format == SND_SOC_DAIFMT_I2S || format == SND_SOC_DAIFMT_LEFT_J)) { /* Use dual-phase frames */ regs->rcr2 |= RPHASE; regs->xcr2 |= XPHASE; /* Set 1 word per (McBSP) frame for phase1 and phase2 */ wpf--; regs->rcr2 |= RFRLEN2(wpf - 1); regs->xcr2 |= XFRLEN2(wpf - 1); } regs->rcr1 |= RFRLEN1(wpf - 1); regs->xcr1 |= XFRLEN1(wpf - 1); switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: /* Set word lengths */ wlen = 16; regs->rcr2 |= RWDLEN2(OMAP_MCBSP_WORD_16); regs->rcr1 |= RWDLEN1(OMAP_MCBSP_WORD_16); regs->xcr2 |= XWDLEN2(OMAP_MCBSP_WORD_16); regs->xcr1 |= XWDLEN1(OMAP_MCBSP_WORD_16); break; case SNDRV_PCM_FORMAT_S32_LE: /* Set word lengths */ wlen = 32; regs->rcr2 |= RWDLEN2(OMAP_MCBSP_WORD_32); regs->rcr1 |= RWDLEN1(OMAP_MCBSP_WORD_32); regs->xcr2 |= XWDLEN2(OMAP_MCBSP_WORD_32); regs->xcr1 |= XWDLEN1(OMAP_MCBSP_WORD_32); break; default: /* Unsupported PCM format */ return -EINVAL; } /* In McBSP master modes, FRAME (i.e. sample rate) is generated * by _counting_ BCLKs. Calculate frame size in BCLKs */ master = mcbsp_data->fmt & SND_SOC_DAIFMT_MASTER_MASK; if (master == SND_SOC_DAIFMT_CBS_CFS) { div = mcbsp_data->clk_div ? mcbsp_data->clk_div : 1; framesize = (mcbsp_data->in_freq / div) / params_rate(params); if (framesize < wlen * channels) { printk(KERN_ERR "%s: not enough bandwidth for desired rate and " "channels\n", __func__); return -EINVAL; } } else framesize = wlen * channels; /* Set FS period and length in terms of bit clock periods */ switch (format) { case SND_SOC_DAIFMT_I2S: case SND_SOC_DAIFMT_LEFT_J: regs->srgr2 |= FPER(framesize - 1); regs->srgr1 |= FWID((framesize >> 1) - 1); break; case SND_SOC_DAIFMT_DSP_A: case SND_SOC_DAIFMT_DSP_B: regs->srgr2 |= FPER(framesize - 1); regs->srgr1 |= FWID(0); break; } omap_mcbsp_config(bus_id, &mcbsp_data->regs); mcbsp_data->configured = 1; return 0; } /* * This must be called before _set_clkdiv and _set_sysclk since McBSP register * cache is initialized here */ static int omap_mcbsp_dai_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) { struct omap_mcbsp_data *mcbsp_data = to_mcbsp(cpu_dai->private_data); struct omap_mcbsp_reg_cfg *regs = &mcbsp_data->regs; unsigned int temp_fmt = fmt; if (mcbsp_data->configured) return 0; mcbsp_data->fmt = fmt; memset(regs, 0, sizeof(*regs)); /* Generic McBSP register settings */ regs->spcr2 |= XINTM(3) | FREE; regs->spcr1 |= RINTM(3); /* RFIG and XFIG are not defined in 34xx */ if (!cpu_is_omap34xx()) { regs->rcr2 |= RFIG; regs->xcr2 |= XFIG; } if (cpu_is_omap2430() || cpu_is_omap34xx()) { regs->xccr = DXENDLY(1) | XDMAEN | XDISABLE; regs->rccr = RFULL_CYCLE | RDMAEN | RDISABLE; } switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: /* 1-bit data delay */ regs->rcr2 |= RDATDLY(1); regs->xcr2 |= XDATDLY(1); break; case SND_SOC_DAIFMT_LEFT_J: /* 0-bit data delay */ regs->rcr2 |= RDATDLY(0); regs->xcr2 |= XDATDLY(0); regs->spcr1 |= RJUST(2); /* Invert FS polarity configuration */ temp_fmt ^= SND_SOC_DAIFMT_NB_IF; break; case SND_SOC_DAIFMT_DSP_A: /* 1-bit data delay */ regs->rcr2 |= RDATDLY(1); regs->xcr2 |= XDATDLY(1); /* Invert FS polarity configuration */ temp_fmt ^= SND_SOC_DAIFMT_NB_IF; break; case SND_SOC_DAIFMT_DSP_B: /* 0-bit data delay */ regs->rcr2 |= RDATDLY(0); regs->xcr2 |= XDATDLY(0); /* Invert FS polarity configuration */ temp_fmt ^= SND_SOC_DAIFMT_NB_IF; break; default: /* Unsupported data format */ return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBS_CFS: /* McBSP master. Set FS and bit clocks as outputs */ regs->pcr0 |= FSXM | FSRM | CLKXM | CLKRM; /* Sample rate generator drives the FS */ regs->srgr2 |= FSGM; break; case SND_SOC_DAIFMT_CBM_CFM: /* McBSP slave */ break; default: /* Unsupported master/slave configuration */ return -EINVAL; } /* Set bit clock (CLKX/CLKR) and FS polarities */ switch (temp_fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: /* * Normal BCLK + FS. * FS active low. TX data driven on falling edge of bit clock * and RX data sampled on rising edge of bit clock. */ regs->pcr0 |= FSXP | FSRP | CLKXP | CLKRP; break; case SND_SOC_DAIFMT_NB_IF: regs->pcr0 |= CLKXP | CLKRP; break; case SND_SOC_DAIFMT_IB_NF: regs->pcr0 |= FSXP | FSRP; break; case SND_SOC_DAIFMT_IB_IF: break; default: return -EINVAL; } return 0; } static int omap_mcbsp_dai_set_clkdiv(struct snd_soc_dai *cpu_dai, int div_id, int div) { struct omap_mcbsp_data *mcbsp_data = to_mcbsp(cpu_dai->private_data); struct omap_mcbsp_reg_cfg *regs = &mcbsp_data->regs; if (div_id != OMAP_MCBSP_CLKGDV) return -ENODEV; mcbsp_data->clk_div = div; regs->srgr1 |= CLKGDV(div - 1); return 0; } static int omap_mcbsp_dai_set_clks_src(struct omap_mcbsp_data *mcbsp_data, int clk_id) { int sel_bit; u16 reg, reg_devconf1 = OMAP243X_CONTROL_DEVCONF1; if (cpu_class_is_omap1()) { /* OMAP1's can use only external source clock */ if (unlikely(clk_id == OMAP_MCBSP_SYSCLK_CLKS_FCLK)) return -EINVAL; else return 0; } if (cpu_is_omap2420() && mcbsp_data->bus_id > 1) return -EINVAL; if (cpu_is_omap343x()) reg_devconf1 = OMAP343X_CONTROL_DEVCONF1; switch (mcbsp_data->bus_id) { case 0: reg = OMAP2_CONTROL_DEVCONF0; sel_bit = 2; break; case 1: reg = OMAP2_CONTROL_DEVCONF0; sel_bit = 6; break; case 2: reg = reg_devconf1; sel_bit = 0; break; case 3: reg = reg_devconf1; sel_bit = 2; break; case 4: reg = reg_devconf1; sel_bit = 4; break; default: return -EINVAL; } if (clk_id == OMAP_MCBSP_SYSCLK_CLKS_FCLK) omap_ctrl_writel(omap_ctrl_readl(reg) & ~(1 << sel_bit), reg); else omap_ctrl_writel(omap_ctrl_readl(reg) | (1 << sel_bit), reg); return 0; } static int omap_mcbsp_dai_set_rcvr_src(struct omap_mcbsp_data *mcbsp_data, int clk_id) { int sel_bit, set = 0; u16 reg = OMAP2_CONTROL_DEVCONF0; if (cpu_class_is_omap1()) return -EINVAL; /* TODO: Can this be implemented for OMAP1? */ if (mcbsp_data->bus_id != 0) return -EINVAL; switch (clk_id) { case OMAP_MCBSP_CLKR_SRC_CLKX: set = 1; case OMAP_MCBSP_CLKR_SRC_CLKR: sel_bit = 3; break; case OMAP_MCBSP_FSR_SRC_FSX: set = 1; case OMAP_MCBSP_FSR_SRC_FSR: sel_bit = 4; break; default: return -EINVAL; } if (set) omap_ctrl_writel(omap_ctrl_readl(reg) | (1 << sel_bit), reg); else omap_ctrl_writel(omap_ctrl_readl(reg) & ~(1 << sel_bit), reg); return 0; } static int omap_mcbsp_dai_set_dai_sysclk(struct snd_soc_dai *cpu_dai, int clk_id, unsigned int freq, int dir) { struct omap_mcbsp_data *mcbsp_data = to_mcbsp(cpu_dai->private_data); struct omap_mcbsp_reg_cfg *regs = &mcbsp_data->regs; int err = 0; mcbsp_data->in_freq = freq; switch (clk_id) { case OMAP_MCBSP_SYSCLK_CLK: regs->srgr2 |= CLKSM; break; case OMAP_MCBSP_SYSCLK_CLKS_FCLK: case OMAP_MCBSP_SYSCLK_CLKS_EXT: err = omap_mcbsp_dai_set_clks_src(mcbsp_data, clk_id); break; case OMAP_MCBSP_SYSCLK_CLKX_EXT: regs->srgr2 |= CLKSM; case OMAP_MCBSP_SYSCLK_CLKR_EXT: regs->pcr0 |= SCLKME; break; case OMAP_MCBSP_CLKR_SRC_CLKR: case OMAP_MCBSP_CLKR_SRC_CLKX: case OMAP_MCBSP_FSR_SRC_FSR: case OMAP_MCBSP_FSR_SRC_FSX: err = omap_mcbsp_dai_set_rcvr_src(mcbsp_data, clk_id); break; default: err = -ENODEV; } return err; } static struct snd_soc_dai_ops omap_mcbsp_dai_ops = { .startup = omap_mcbsp_dai_startup, .shutdown = omap_mcbsp_dai_shutdown, .trigger = omap_mcbsp_dai_trigger, .delay = omap_mcbsp_dai_delay, .hw_params = omap_mcbsp_dai_hw_params, .set_fmt = omap_mcbsp_dai_set_dai_fmt, .set_clkdiv = omap_mcbsp_dai_set_clkdiv, .set_sysclk = omap_mcbsp_dai_set_dai_sysclk, }; #define OMAP_MCBSP_DAI_BUILDER(link_id) \ { \ .name = "omap-mcbsp-dai-"#link_id, \ .id = (link_id), \ .playback = { \ .channels_min = 1, \ .channels_max = 16, \ .rates = OMAP_MCBSP_RATES, \ .formats = SNDRV_PCM_FMTBIT_S16_LE | \ SNDRV_PCM_FMTBIT_S32_LE, \ }, \ .capture = { \ .channels_min = 1, \ .channels_max = 16, \ .rates = OMAP_MCBSP_RATES, \ .formats = SNDRV_PCM_FMTBIT_S16_LE | \ SNDRV_PCM_FMTBIT_S32_LE, \ }, \ .ops = &omap_mcbsp_dai_ops, \ .private_data = &mcbsp_data[(link_id)].bus_id, \ } struct snd_soc_dai omap_mcbsp_dai[] = { OMAP_MCBSP_DAI_BUILDER(0), OMAP_MCBSP_DAI_BUILDER(1), #if NUM_LINKS >= 3 OMAP_MCBSP_DAI_BUILDER(2), #endif #if NUM_LINKS == 5 OMAP_MCBSP_DAI_BUILDER(3), OMAP_MCBSP_DAI_BUILDER(4), #endif }; EXPORT_SYMBOL_GPL(omap_mcbsp_dai); int omap_mcbsp_st_info_volsw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; int max = mc->max; int min = mc->min; uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = min; uinfo->value.integer.max = max; return 0; } #define OMAP_MCBSP_ST_SET_CHANNEL_VOLUME(id, channel) \ static int \ omap_mcbsp##id##_set_st_ch##channel##_volume(struct snd_kcontrol *kc, \ struct snd_ctl_elem_value *uc) \ { \ struct soc_mixer_control *mc = \ (struct soc_mixer_control *)kc->private_value; \ int max = mc->max; \ int min = mc->min; \ int val = uc->value.integer.value[0]; \ \ if (val < min || val > max) \ return -EINVAL; \ \ /* OMAP McBSP implementation uses index values 0..4 */ \ return omap_st_set_chgain((id)-1, channel, val); \ } #define OMAP_MCBSP_ST_GET_CHANNEL_VOLUME(id, channel) \ static int \ omap_mcbsp##id##_get_st_ch##channel##_volume(struct snd_kcontrol *kc, \ struct snd_ctl_elem_value *uc) \ { \ s16 chgain; \ \ if (omap_st_get_chgain((id)-1, channel, &chgain)) \ return -EAGAIN; \ \ uc->value.integer.value[0] = chgain; \ return 0; \ } OMAP_MCBSP_ST_SET_CHANNEL_VOLUME(2, 0) OMAP_MCBSP_ST_SET_CHANNEL_VOLUME(2, 1) OMAP_MCBSP_ST_SET_CHANNEL_VOLUME(3, 0) OMAP_MCBSP_ST_SET_CHANNEL_VOLUME(3, 1) OMAP_MCBSP_ST_GET_CHANNEL_VOLUME(2, 0) OMAP_MCBSP_ST_GET_CHANNEL_VOLUME(2, 1) OMAP_MCBSP_ST_GET_CHANNEL_VOLUME(3, 0) OMAP_MCBSP_ST_GET_CHANNEL_VOLUME(3, 1) static int omap_mcbsp_st_put_mode(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; u8 value = ucontrol->value.integer.value[0]; if (value == omap_st_is_enabled(mc->reg)) return 0; if (value) omap_st_enable(mc->reg); else omap_st_disable(mc->reg); return 1; } static int omap_mcbsp_st_get_mode(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; ucontrol->value.integer.value[0] = omap_st_is_enabled(mc->reg); return 0; } static const struct snd_kcontrol_new omap_mcbsp2_st_controls[] = { SOC_SINGLE_EXT("McBSP2 Sidetone Switch", 1, 0, 1, 0, omap_mcbsp_st_get_mode, omap_mcbsp_st_put_mode), OMAP_MCBSP_SOC_SINGLE_S16_EXT("McBSP2 Sidetone Channel 0 Volume", -32768, 32767, omap_mcbsp2_get_st_ch0_volume, omap_mcbsp2_set_st_ch0_volume), OMAP_MCBSP_SOC_SINGLE_S16_EXT("McBSP2 Sidetone Channel 1 Volume", -32768, 32767, omap_mcbsp2_get_st_ch1_volume, omap_mcbsp2_set_st_ch1_volume), }; static const struct snd_kcontrol_new omap_mcbsp3_st_controls[] = { SOC_SINGLE_EXT("McBSP3 Sidetone Switch", 2, 0, 1, 0, omap_mcbsp_st_get_mode, omap_mcbsp_st_put_mode), OMAP_MCBSP_SOC_SINGLE_S16_EXT("McBSP3 Sidetone Channel 0 Volume", -32768, 32767, omap_mcbsp3_get_st_ch0_volume, omap_mcbsp3_set_st_ch0_volume), OMAP_MCBSP_SOC_SINGLE_S16_EXT("McBSP3 Sidetone Channel 1 Volume", -32768, 32767, omap_mcbsp3_get_st_ch1_volume, omap_mcbsp3_set_st_ch1_volume), }; int omap_mcbsp_st_add_controls(struct snd_soc_codec *codec, int mcbsp_id) { if (!cpu_is_omap34xx()) return -ENODEV; switch (mcbsp_id) { case 1: /* McBSP 2 */ return snd_soc_add_controls(codec, omap_mcbsp2_st_controls, ARRAY_SIZE(omap_mcbsp2_st_controls)); case 2: /* McBSP 3 */ return snd_soc_add_controls(codec, omap_mcbsp3_st_controls, ARRAY_SIZE(omap_mcbsp3_st_controls)); default: break; } return -EINVAL; } EXPORT_SYMBOL_GPL(omap_mcbsp_st_add_controls); static int __init snd_omap_mcbsp_init(void) { return snd_soc_register_dais(omap_mcbsp_dai, ARRAY_SIZE(omap_mcbsp_dai)); } module_init(snd_omap_mcbsp_init); static void __exit snd_omap_mcbsp_exit(void) { snd_soc_unregister_dais(omap_mcbsp_dai, ARRAY_SIZE(omap_mcbsp_dai)); } module_exit(snd_omap_mcbsp_exit); MODULE_AUTHOR("Jarkko Nikula <jhnikula@gmail.com>"); MODULE_DESCRIPTION("OMAP I2S SoC Interface"); MODULE_LICENSE("GPL");
gpl-2.0
mythos234/cmkernel_zeroltexx
drivers/scsi/libsas/sas_ata.c
1980
24188
/* * Support for SATA devices on Serial Attached SCSI (SAS) controllers * * Copyright (C) 2006 IBM Corporation * * Written by: Darrick J. Wong <djwong@us.ibm.com>, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * USA */ #include <linux/scatterlist.h> #include <linux/slab.h> #include <linux/async.h> #include <linux/export.h> #include <scsi/sas_ata.h> #include "sas_internal.h" #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_sas.h> #include "../scsi_sas_internal.h" #include "../scsi_transport_api.h" #include <scsi/scsi_eh.h> static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts) { /* Cheesy attempt to translate SAS errors into ATA. Hah! */ /* transport error */ if (ts->resp == SAS_TASK_UNDELIVERED) return AC_ERR_ATA_BUS; /* ts->resp == SAS_TASK_COMPLETE */ /* task delivered, what happened afterwards? */ switch (ts->stat) { case SAS_DEV_NO_RESPONSE: return AC_ERR_TIMEOUT; case SAS_INTERRUPTED: case SAS_PHY_DOWN: case SAS_NAK_R_ERR: return AC_ERR_ATA_BUS; case SAS_DATA_UNDERRUN: /* * Some programs that use the taskfile interface * (smartctl in particular) can cause underrun * problems. Ignore these errors, perhaps at our * peril. */ return 0; case SAS_DATA_OVERRUN: case SAS_QUEUE_FULL: case SAS_DEVICE_UNKNOWN: case SAS_SG_ERR: return AC_ERR_INVALID; case SAS_OPEN_TO: case SAS_OPEN_REJECT: SAS_DPRINTK("%s: Saw error %d. What to do?\n", __func__, ts->stat); return AC_ERR_OTHER; case SAM_STAT_CHECK_CONDITION: case SAS_ABORTED_TASK: return AC_ERR_DEV; case SAS_PROTO_RESPONSE: /* This means the ending_fis has the error * value; return 0 here to collect it */ return 0; default: return 0; } } static void sas_ata_task_done(struct sas_task *task) { struct ata_queued_cmd *qc = task->uldd_task; struct domain_device *dev = task->dev; struct task_status_struct *stat = &task->task_status; struct ata_task_resp *resp = (struct ata_task_resp *)stat->buf; struct sas_ha_struct *sas_ha = dev->port->ha; enum ata_completion_errors ac; unsigned long flags; struct ata_link *link; struct ata_port *ap; spin_lock_irqsave(&dev->done_lock, flags); if (test_bit(SAS_HA_FROZEN, &sas_ha->state)) task = NULL; else if (qc && qc->scsicmd) ASSIGN_SAS_TASK(qc->scsicmd, NULL); spin_unlock_irqrestore(&dev->done_lock, flags); /* check if libsas-eh got to the task before us */ if (unlikely(!task)) return; if (!qc) goto qc_already_gone; ap = qc->ap; link = &ap->link; spin_lock_irqsave(ap->lock, flags); /* check if we lost the race with libata/sas_ata_post_internal() */ if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) { spin_unlock_irqrestore(ap->lock, flags); if (qc->scsicmd) goto qc_already_gone; else { /* if eh is not involved and the port is frozen then the * ata internal abort process has taken responsibility * for this sas_task */ return; } } if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD || ((stat->stat == SAM_STAT_CHECK_CONDITION && dev->sata_dev.command_set == ATAPI_COMMAND_SET))) { memcpy(dev->sata_dev.fis, resp->ending_fis, ATA_RESP_FIS_SIZE); if (!link->sactive) { qc->err_mask |= ac_err_mask(dev->sata_dev.fis[2]); } else { link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.fis[2]); if (unlikely(link->eh_info.err_mask)) qc->flags |= ATA_QCFLAG_FAILED; } } else { ac = sas_to_ata_err(stat); if (ac) { SAS_DPRINTK("%s: SAS error %x\n", __func__, stat->stat); /* We saw a SAS error. Send a vague error. */ if (!link->sactive) { qc->err_mask = ac; } else { link->eh_info.err_mask |= AC_ERR_DEV; qc->flags |= ATA_QCFLAG_FAILED; } dev->sata_dev.fis[3] = 0x04; /* status err */ dev->sata_dev.fis[2] = ATA_ERR; } } qc->lldd_task = NULL; ata_qc_complete(qc); spin_unlock_irqrestore(ap->lock, flags); qc_already_gone: list_del_init(&task->list); sas_free_task(task); } static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) { unsigned long flags; struct sas_task *task; struct scatterlist *sg; int ret = AC_ERR_SYSTEM; unsigned int si, xfer = 0; struct ata_port *ap = qc->ap; struct domain_device *dev = ap->private_data; struct sas_ha_struct *sas_ha = dev->port->ha; struct Scsi_Host *host = sas_ha->core.shost; struct sas_internal *i = to_sas_internal(host->transportt); /* TODO: audit callers to ensure they are ready for qc_issue to * unconditionally re-enable interrupts */ local_irq_save(flags); spin_unlock(ap->lock); /* If the device fell off, no sense in issuing commands */ if (test_bit(SAS_DEV_GONE, &dev->state)) goto out; task = sas_alloc_task(GFP_ATOMIC); if (!task) goto out; task->dev = dev; task->task_proto = SAS_PROTOCOL_STP; task->task_done = sas_ata_task_done; if (qc->tf.command == ATA_CMD_FPDMA_WRITE || qc->tf.command == ATA_CMD_FPDMA_READ) { /* Need to zero out the tag libata assigned us */ qc->tf.nsect = 0; } ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, (u8 *)&task->ata_task.fis); task->uldd_task = qc; if (ata_is_atapi(qc->tf.protocol)) { memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len); task->total_xfer_len = qc->nbytes; task->num_scatter = qc->n_elem; } else { for_each_sg(qc->sg, sg, qc->n_elem, si) xfer += sg->length; task->total_xfer_len = xfer; task->num_scatter = si; } task->data_dir = qc->dma_dir; task->scatter = qc->sg; task->ata_task.retry_count = 1; task->task_state_flags = SAS_TASK_STATE_PENDING; qc->lldd_task = task; switch (qc->tf.protocol) { case ATA_PROT_NCQ: task->ata_task.use_ncq = 1; /* fall through */ case ATAPI_PROT_DMA: case ATA_PROT_DMA: task->ata_task.dma_xfer = 1; break; } if (qc->scsicmd) ASSIGN_SAS_TASK(qc->scsicmd, task); if (sas_ha->lldd_max_execute_num < 2) ret = i->dft->lldd_execute_task(task, 1, GFP_ATOMIC); else ret = sas_queue_up(task); /* Examine */ if (ret) { SAS_DPRINTK("lldd_execute_task returned: %d\n", ret); if (qc->scsicmd) ASSIGN_SAS_TASK(qc->scsicmd, NULL); sas_free_task(task); ret = AC_ERR_SYSTEM; } out: spin_lock(ap->lock); local_irq_restore(flags); return ret; } static bool sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc) { struct domain_device *dev = qc->ap->private_data; ata_tf_from_fis(dev->sata_dev.fis, &qc->result_tf); return true; } static struct sas_internal *dev_to_sas_internal(struct domain_device *dev) { return to_sas_internal(dev->port->ha->core.shost->transportt); } static void sas_get_ata_command_set(struct domain_device *dev); int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy) { if (phy->attached_tproto & SAS_PROTOCOL_STP) dev->tproto = phy->attached_tproto; if (phy->attached_sata_dev) dev->tproto |= SAS_SATA_DEV; if (phy->attached_dev_type == SAS_SATA_PENDING) dev->dev_type = SAS_SATA_PENDING; else { int res; dev->dev_type = SAS_SATA_DEV; res = sas_get_report_phy_sata(dev->parent, phy->phy_id, &dev->sata_dev.rps_resp); if (res) { SAS_DPRINTK("report phy sata to %016llx:0x%x returned " "0x%x\n", SAS_ADDR(dev->parent->sas_addr), phy->phy_id, res); return res; } memcpy(dev->frame_rcvd, &dev->sata_dev.rps_resp.rps.fis, sizeof(struct dev_to_host_fis)); /* TODO switch to ata_dev_classify() */ sas_get_ata_command_set(dev); } return 0; } static int sas_ata_clear_pending(struct domain_device *dev, struct ex_phy *phy) { int res; /* we weren't pending, so successfully end the reset sequence now */ if (dev->dev_type != SAS_SATA_PENDING) return 1; /* hmmm, if this succeeds do we need to repost the domain_device to the * lldd so it can pick up new parameters? */ res = sas_get_ata_info(dev, phy); if (res) return 0; /* retry */ else return 1; } static int smp_ata_check_ready(struct ata_link *link) { int res; struct ata_port *ap = link->ap; struct domain_device *dev = ap->private_data; struct domain_device *ex_dev = dev->parent; struct sas_phy *phy = sas_get_local_phy(dev); struct ex_phy *ex_phy = &ex_dev->ex_dev.ex_phy[phy->number]; res = sas_ex_phy_discover(ex_dev, phy->number); sas_put_local_phy(phy); /* break the wait early if the expander is unreachable, * otherwise keep polling */ if (res == -ECOMM) return res; if (res != SMP_RESP_FUNC_ACC) return 0; switch (ex_phy->attached_dev_type) { case SAS_SATA_PENDING: return 0; case SAS_END_DEVICE: if (ex_phy->attached_sata_dev) return sas_ata_clear_pending(dev, ex_phy); default: return -ENODEV; } } static int local_ata_check_ready(struct ata_link *link) { struct ata_port *ap = link->ap; struct domain_device *dev = ap->private_data; struct sas_internal *i = dev_to_sas_internal(dev); if (i->dft->lldd_ata_check_ready) return i->dft->lldd_ata_check_ready(dev); else { /* lldd's that don't implement 'ready' checking get the * old default behavior of not coordinating reset * recovery with libata */ return 1; } } static int sas_ata_printk(const char *level, const struct domain_device *ddev, const char *fmt, ...) { struct ata_port *ap = ddev->sata_dev.ap; struct device *dev = &ddev->rphy->dev; struct va_format vaf; va_list args; int r; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; r = printk("%ssas: ata%u: %s: %pV", level, ap->print_id, dev_name(dev), &vaf); va_end(args); return r; } static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class, unsigned long deadline) { int ret = 0, res; struct sas_phy *phy; struct ata_port *ap = link->ap; int (*check_ready)(struct ata_link *link); struct domain_device *dev = ap->private_data; struct sas_internal *i = dev_to_sas_internal(dev); res = i->dft->lldd_I_T_nexus_reset(dev); if (res == -ENODEV) return res; if (res != TMF_RESP_FUNC_COMPLETE) sas_ata_printk(KERN_DEBUG, dev, "Unable to reset ata device?\n"); phy = sas_get_local_phy(dev); if (scsi_is_sas_phy_local(phy)) check_ready = local_ata_check_ready; else check_ready = smp_ata_check_ready; sas_put_local_phy(phy); ret = ata_wait_after_reset(link, deadline, check_ready); if (ret && ret != -EAGAIN) sas_ata_printk(KERN_ERR, dev, "reset failed (errno=%d)\n", ret); /* XXX: if the class changes during the reset the upper layer * should be informed, if the device has gone away we assume * libsas will eventually delete it */ switch (dev->sata_dev.command_set) { case ATA_COMMAND_SET: *class = ATA_DEV_ATA; break; case ATAPI_COMMAND_SET: *class = ATA_DEV_ATAPI; break; } ap->cbl = ATA_CBL_SATA; return ret; } /* * notify the lldd to forget the sas_task for this internal ata command * that bypasses scsi-eh */ static void sas_ata_internal_abort(struct sas_task *task) { struct sas_internal *si = dev_to_sas_internal(task->dev); unsigned long flags; int res; spin_lock_irqsave(&task->task_state_lock, flags); if (task->task_state_flags & SAS_TASK_STATE_ABORTED || task->task_state_flags & SAS_TASK_STATE_DONE) { spin_unlock_irqrestore(&task->task_state_lock, flags); SAS_DPRINTK("%s: Task %p already finished.\n", __func__, task); goto out; } task->task_state_flags |= SAS_TASK_STATE_ABORTED; spin_unlock_irqrestore(&task->task_state_lock, flags); res = si->dft->lldd_abort_task(task); spin_lock_irqsave(&task->task_state_lock, flags); if (task->task_state_flags & SAS_TASK_STATE_DONE || res == TMF_RESP_FUNC_COMPLETE) { spin_unlock_irqrestore(&task->task_state_lock, flags); goto out; } /* XXX we are not prepared to deal with ->lldd_abort_task() * failures. TODO: lldds need to unconditionally forget about * aborted ata tasks, otherwise we (likely) leak the sas task * here */ SAS_DPRINTK("%s: Task %p leaked.\n", __func__, task); if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) task->task_state_flags &= ~SAS_TASK_STATE_ABORTED; spin_unlock_irqrestore(&task->task_state_lock, flags); return; out: list_del_init(&task->list); sas_free_task(task); } static void sas_ata_post_internal(struct ata_queued_cmd *qc) { if (qc->flags & ATA_QCFLAG_FAILED) qc->err_mask |= AC_ERR_OTHER; if (qc->err_mask) { /* * Find the sas_task and kill it. By this point, libata * has decided to kill the qc and has frozen the port. * In this state sas_ata_task_done() will no longer free * the sas_task, so we need to notify the lldd (via * ->lldd_abort_task) that the task is dead and free it * ourselves. */ struct sas_task *task = qc->lldd_task; qc->lldd_task = NULL; if (!task) return; task->uldd_task = NULL; sas_ata_internal_abort(task); } } static void sas_ata_set_dmamode(struct ata_port *ap, struct ata_device *ata_dev) { struct domain_device *dev = ap->private_data; struct sas_internal *i = dev_to_sas_internal(dev); if (i->dft->lldd_ata_set_dmamode) i->dft->lldd_ata_set_dmamode(dev); } static void sas_ata_sched_eh(struct ata_port *ap) { struct domain_device *dev = ap->private_data; struct sas_ha_struct *ha = dev->port->ha; unsigned long flags; spin_lock_irqsave(&ha->lock, flags); if (!test_and_set_bit(SAS_DEV_EH_PENDING, &dev->state)) ha->eh_active++; ata_std_sched_eh(ap); spin_unlock_irqrestore(&ha->lock, flags); } void sas_ata_end_eh(struct ata_port *ap) { struct domain_device *dev = ap->private_data; struct sas_ha_struct *ha = dev->port->ha; unsigned long flags; spin_lock_irqsave(&ha->lock, flags); if (test_and_clear_bit(SAS_DEV_EH_PENDING, &dev->state)) ha->eh_active--; spin_unlock_irqrestore(&ha->lock, flags); } static struct ata_port_operations sas_sata_ops = { .prereset = ata_std_prereset, .hardreset = sas_ata_hard_reset, .postreset = ata_std_postreset, .error_handler = ata_std_error_handler, .post_internal_cmd = sas_ata_post_internal, .qc_defer = ata_std_qc_defer, .qc_prep = ata_noop_qc_prep, .qc_issue = sas_ata_qc_issue, .qc_fill_rtf = sas_ata_qc_fill_rtf, .port_start = ata_sas_port_start, .port_stop = ata_sas_port_stop, .set_dmamode = sas_ata_set_dmamode, .sched_eh = sas_ata_sched_eh, .end_eh = sas_ata_end_eh, }; static struct ata_port_info sata_port_info = { .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &sas_sata_ops }; int sas_ata_init(struct domain_device *found_dev) { struct sas_ha_struct *ha = found_dev->port->ha; struct Scsi_Host *shost = ha->core.shost; struct ata_port *ap; int rc; ata_host_init(&found_dev->sata_dev.ata_host, ha->dev, &sas_sata_ops); ap = ata_sas_port_alloc(&found_dev->sata_dev.ata_host, &sata_port_info, shost); if (!ap) { SAS_DPRINTK("ata_sas_port_alloc failed.\n"); return -ENODEV; } ap->private_data = found_dev; ap->cbl = ATA_CBL_SATA; ap->scsi_host = shost; rc = ata_sas_port_init(ap); if (rc) { ata_sas_port_destroy(ap); return rc; } found_dev->sata_dev.ap = ap; return 0; } void sas_ata_task_abort(struct sas_task *task) { struct ata_queued_cmd *qc = task->uldd_task; struct completion *waiting; /* Bounce SCSI-initiated commands to the SCSI EH */ if (qc->scsicmd) { struct request_queue *q = qc->scsicmd->device->request_queue; unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); blk_abort_request(qc->scsicmd->request); spin_unlock_irqrestore(q->queue_lock, flags); return; } /* Internal command, fake a timeout and complete. */ qc->flags &= ~ATA_QCFLAG_ACTIVE; qc->flags |= ATA_QCFLAG_FAILED; qc->err_mask |= AC_ERR_TIMEOUT; waiting = qc->private_data; complete(waiting); } static void sas_get_ata_command_set(struct domain_device *dev) { struct dev_to_host_fis *fis = (struct dev_to_host_fis *) dev->frame_rcvd; if (dev->dev_type == SAS_SATA_PENDING) return; if ((fis->sector_count == 1 && /* ATA */ fis->lbal == 1 && fis->lbam == 0 && fis->lbah == 0 && fis->device == 0) || (fis->sector_count == 0 && /* CE-ATA (mATA) */ fis->lbal == 0 && fis->lbam == 0xCE && fis->lbah == 0xAA && (fis->device & ~0x10) == 0)) dev->sata_dev.command_set = ATA_COMMAND_SET; else if ((fis->interrupt_reason == 1 && /* ATAPI */ fis->lbal == 1 && fis->byte_count_low == 0x14 && fis->byte_count_high == 0xEB && (fis->device & ~0x10) == 0)) dev->sata_dev.command_set = ATAPI_COMMAND_SET; else if ((fis->sector_count == 1 && /* SEMB */ fis->lbal == 1 && fis->lbam == 0x3C && fis->lbah == 0xC3 && fis->device == 0) || (fis->interrupt_reason == 1 && /* SATA PM */ fis->lbal == 1 && fis->byte_count_low == 0x69 && fis->byte_count_high == 0x96 && (fis->device & ~0x10) == 0)) /* Treat it as a superset? */ dev->sata_dev.command_set = ATAPI_COMMAND_SET; } void sas_probe_sata(struct asd_sas_port *port) { struct domain_device *dev, *n; mutex_lock(&port->ha->disco_mutex); list_for_each_entry(dev, &port->disco_list, disco_list_node) { if (!dev_is_sata(dev)) continue; ata_sas_async_probe(dev->sata_dev.ap); } mutex_unlock(&port->ha->disco_mutex); list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node) { if (!dev_is_sata(dev)) continue; sas_ata_wait_eh(dev); /* if libata could not bring the link up, don't surface * the device */ if (ata_dev_disabled(sas_to_ata_dev(dev))) sas_fail_probe(dev, __func__, -ENODEV); } } static bool sas_ata_flush_pm_eh(struct asd_sas_port *port, const char *func) { struct domain_device *dev, *n; bool retry = false; list_for_each_entry_safe(dev, n, &port->dev_list, dev_list_node) { int rc; if (!dev_is_sata(dev)) continue; sas_ata_wait_eh(dev); rc = dev->sata_dev.pm_result; if (rc == -EAGAIN) retry = true; else if (rc) { /* since we don't have a * ->port_{suspend|resume} routine in our * ata_port ops, and no entanglements with * acpi, suspend should just be mechanical trip * through eh, catch cases where these * assumptions are invalidated */ WARN_ONCE(1, "failed %s %s error: %d\n", func, dev_name(&dev->rphy->dev), rc); } /* if libata failed to power manage the device, tear it down */ if (ata_dev_disabled(sas_to_ata_dev(dev))) sas_fail_probe(dev, func, -ENODEV); } return retry; } void sas_suspend_sata(struct asd_sas_port *port) { struct domain_device *dev; retry: mutex_lock(&port->ha->disco_mutex); list_for_each_entry(dev, &port->dev_list, dev_list_node) { struct sata_device *sata; if (!dev_is_sata(dev)) continue; sata = &dev->sata_dev; if (sata->ap->pm_mesg.event == PM_EVENT_SUSPEND) continue; sata->pm_result = -EIO; ata_sas_port_async_suspend(sata->ap, &sata->pm_result); } mutex_unlock(&port->ha->disco_mutex); if (sas_ata_flush_pm_eh(port, __func__)) goto retry; } void sas_resume_sata(struct asd_sas_port *port) { struct domain_device *dev; retry: mutex_lock(&port->ha->disco_mutex); list_for_each_entry(dev, &port->dev_list, dev_list_node) { struct sata_device *sata; if (!dev_is_sata(dev)) continue; sata = &dev->sata_dev; if (sata->ap->pm_mesg.event == PM_EVENT_ON) continue; sata->pm_result = -EIO; ata_sas_port_async_resume(sata->ap, &sata->pm_result); } mutex_unlock(&port->ha->disco_mutex); if (sas_ata_flush_pm_eh(port, __func__)) goto retry; } /** * sas_discover_sata -- discover an STP/SATA domain device * @dev: pointer to struct domain_device of interest * * Devices directly attached to a HA port, have no parents. All other * devices do, and should have their "parent" pointer set appropriately * before calling this function. */ int sas_discover_sata(struct domain_device *dev) { int res; if (dev->dev_type == SAS_SATA_PM) return -ENODEV; sas_get_ata_command_set(dev); sas_fill_in_rphy(dev, dev->rphy); res = sas_notify_lldd_dev_found(dev); if (res) return res; sas_discover_event(dev->port, DISCE_PROBE); return 0; } static void async_sas_ata_eh(void *data, async_cookie_t cookie) { struct domain_device *dev = data; struct ata_port *ap = dev->sata_dev.ap; struct sas_ha_struct *ha = dev->port->ha; sas_ata_printk(KERN_DEBUG, dev, "dev error handler\n"); ata_scsi_port_error_handler(ha->core.shost, ap); sas_put_device(dev); } void sas_ata_strategy_handler(struct Scsi_Host *shost) { struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); ASYNC_DOMAIN_EXCLUSIVE(async); int i; /* it's ok to defer revalidation events during ata eh, these * disks are in one of three states: * 1/ present for initial domain discovery, and these * resets will cause bcn flutters * 2/ hot removed, we'll discover that after eh fails * 3/ hot added after initial discovery, lost the race, and need * to catch the next train. */ sas_disable_revalidation(sas_ha); spin_lock_irq(&sas_ha->phy_port_lock); for (i = 0; i < sas_ha->num_phys; i++) { struct asd_sas_port *port = sas_ha->sas_port[i]; struct domain_device *dev; spin_lock(&port->dev_list_lock); list_for_each_entry(dev, &port->dev_list, dev_list_node) { if (!dev_is_sata(dev)) continue; /* hold a reference over eh since we may be * racing with final remove once all commands * are completed */ kref_get(&dev->kref); async_schedule_domain(async_sas_ata_eh, dev, &async); } spin_unlock(&port->dev_list_lock); } spin_unlock_irq(&sas_ha->phy_port_lock); async_synchronize_full_domain(&async); sas_enable_revalidation(sas_ha); } void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q, struct list_head *done_q) { struct scsi_cmnd *cmd, *n; struct domain_device *eh_dev; do { LIST_HEAD(sata_q); eh_dev = NULL; list_for_each_entry_safe(cmd, n, work_q, eh_entry) { struct domain_device *ddev = cmd_to_domain_dev(cmd); if (!dev_is_sata(ddev) || TO_SAS_TASK(cmd)) continue; if (eh_dev && eh_dev != ddev) continue; eh_dev = ddev; list_move(&cmd->eh_entry, &sata_q); } if (!list_empty(&sata_q)) { struct ata_port *ap = eh_dev->sata_dev.ap; sas_ata_printk(KERN_DEBUG, eh_dev, "cmd error handler\n"); ata_scsi_cmd_error_handler(shost, ap, &sata_q); /* * ata's error handler may leave the cmd on the list * so make sure they don't remain on a stack list * about to go out of scope. * * This looks strange, since the commands are * now part of no list, but the next error * action will be ata_port_error_handler() * which takes no list and sweeps them up * anyway from the ata tag array. */ while (!list_empty(&sata_q)) list_del_init(sata_q.next); } } while (eh_dev); } void sas_ata_schedule_reset(struct domain_device *dev) { struct ata_eh_info *ehi; struct ata_port *ap; unsigned long flags; if (!dev_is_sata(dev)) return; ap = dev->sata_dev.ap; ehi = &ap->link.eh_info; spin_lock_irqsave(ap->lock, flags); ehi->err_mask |= AC_ERR_TIMEOUT; ehi->action |= ATA_EH_RESET; ata_port_schedule_eh(ap); spin_unlock_irqrestore(ap->lock, flags); } EXPORT_SYMBOL_GPL(sas_ata_schedule_reset); void sas_ata_wait_eh(struct domain_device *dev) { struct ata_port *ap; if (!dev_is_sata(dev)) return; ap = dev->sata_dev.ap; ata_port_wait_eh(ap); }
gpl-2.0
garwynn/SC02E_MA6_Kernel
drivers/xen/tmem.c
2236
6421
/* * Xen implementation for transcendent memory (tmem) * * Copyright (C) 2009-2010 Oracle Corp. All rights reserved. * Author: Dan Magenheimer */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/init.h> #include <linux/pagemap.h> #include <linux/cleancache.h> #include <xen/xen.h> #include <xen/interface/xen.h> #include <asm/xen/hypercall.h> #include <asm/xen/page.h> #include <asm/xen/hypervisor.h> #define TMEM_CONTROL 0 #define TMEM_NEW_POOL 1 #define TMEM_DESTROY_POOL 2 #define TMEM_NEW_PAGE 3 #define TMEM_PUT_PAGE 4 #define TMEM_GET_PAGE 5 #define TMEM_FLUSH_PAGE 6 #define TMEM_FLUSH_OBJECT 7 #define TMEM_READ 8 #define TMEM_WRITE 9 #define TMEM_XCHG 10 /* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */ #define TMEM_POOL_PERSIST 1 #define TMEM_POOL_SHARED 2 #define TMEM_POOL_PAGESIZE_SHIFT 4 #define TMEM_VERSION_SHIFT 24 struct tmem_pool_uuid { u64 uuid_lo; u64 uuid_hi; }; struct tmem_oid { u64 oid[3]; }; #define TMEM_POOL_PRIVATE_UUID { 0, 0 } /* flags for tmem_ops.new_pool */ #define TMEM_POOL_PERSIST 1 #define TMEM_POOL_SHARED 2 /* xen tmem foundation ops/hypercalls */ static inline int xen_tmem_op(u32 tmem_cmd, u32 tmem_pool, struct tmem_oid oid, u32 index, unsigned long gmfn, u32 tmem_offset, u32 pfn_offset, u32 len) { struct tmem_op op; int rc = 0; op.cmd = tmem_cmd; op.pool_id = tmem_pool; op.u.gen.oid[0] = oid.oid[0]; op.u.gen.oid[1] = oid.oid[1]; op.u.gen.oid[2] = oid.oid[2]; op.u.gen.index = index; op.u.gen.tmem_offset = tmem_offset; op.u.gen.pfn_offset = pfn_offset; op.u.gen.len = len; set_xen_guest_handle(op.u.gen.gmfn, (void *)gmfn); rc = HYPERVISOR_tmem_op(&op); return rc; } static int xen_tmem_new_pool(struct tmem_pool_uuid uuid, u32 flags, unsigned long pagesize) { struct tmem_op op; int rc = 0, pageshift; for (pageshift = 0; pagesize != 1; pageshift++) pagesize >>= 1; flags |= (pageshift - 12) << TMEM_POOL_PAGESIZE_SHIFT; flags |= TMEM_SPEC_VERSION << TMEM_VERSION_SHIFT; op.cmd = TMEM_NEW_POOL; op.u.new.uuid[0] = uuid.uuid_lo; op.u.new.uuid[1] = uuid.uuid_hi; op.u.new.flags = flags; rc = HYPERVISOR_tmem_op(&op); return rc; } /* xen generic tmem ops */ static int xen_tmem_put_page(u32 pool_id, struct tmem_oid oid, u32 index, unsigned long pfn) { unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn; return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index, gmfn, 0, 0, 0); } static int xen_tmem_get_page(u32 pool_id, struct tmem_oid oid, u32 index, unsigned long pfn) { unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn; return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index, gmfn, 0, 0, 0); } static int xen_tmem_flush_page(u32 pool_id, struct tmem_oid oid, u32 index) { return xen_tmem_op(TMEM_FLUSH_PAGE, pool_id, oid, index, 0, 0, 0, 0); } static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid) { return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0); } static int xen_tmem_destroy_pool(u32 pool_id) { struct tmem_oid oid = { { 0 } }; return xen_tmem_op(TMEM_DESTROY_POOL, pool_id, oid, 0, 0, 0, 0, 0); } int tmem_enabled; static int __init enable_tmem(char *s) { tmem_enabled = 1; return 1; } __setup("tmem", enable_tmem); /* cleancache ops */ static void tmem_cleancache_put_page(int pool, struct cleancache_filekey key, pgoff_t index, struct page *page) { u32 ind = (u32) index; struct tmem_oid oid = *(struct tmem_oid *)&key; unsigned long pfn = page_to_pfn(page); if (pool < 0) return; if (ind != index) return; mb(); /* ensure page is quiescent; tmem may address it with an alias */ (void)xen_tmem_put_page((u32)pool, oid, ind, pfn); } static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key, pgoff_t index, struct page *page) { u32 ind = (u32) index; struct tmem_oid oid = *(struct tmem_oid *)&key; unsigned long pfn = page_to_pfn(page); int ret; /* translate return values to linux semantics */ if (pool < 0) return -1; if (ind != index) return -1; ret = xen_tmem_get_page((u32)pool, oid, ind, pfn); if (ret == 1) return 0; else return -1; } static void tmem_cleancache_flush_page(int pool, struct cleancache_filekey key, pgoff_t index) { u32 ind = (u32) index; struct tmem_oid oid = *(struct tmem_oid *)&key; if (pool < 0) return; if (ind != index) return; (void)xen_tmem_flush_page((u32)pool, oid, ind); } static void tmem_cleancache_flush_inode(int pool, struct cleancache_filekey key) { struct tmem_oid oid = *(struct tmem_oid *)&key; if (pool < 0) return; (void)xen_tmem_flush_object((u32)pool, oid); } static void tmem_cleancache_flush_fs(int pool) { if (pool < 0) return; (void)xen_tmem_destroy_pool((u32)pool); } static int tmem_cleancache_init_fs(size_t pagesize) { struct tmem_pool_uuid uuid_private = TMEM_POOL_PRIVATE_UUID; return xen_tmem_new_pool(uuid_private, 0, pagesize); } static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize) { struct tmem_pool_uuid shared_uuid; shared_uuid.uuid_lo = *(u64 *)uuid; shared_uuid.uuid_hi = *(u64 *)(&uuid[8]); return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize); } static int use_cleancache = 1; static int __init no_cleancache(char *s) { use_cleancache = 0; return 1; } __setup("nocleancache", no_cleancache); static struct cleancache_ops tmem_cleancache_ops = { .put_page = tmem_cleancache_put_page, .get_page = tmem_cleancache_get_page, .flush_page = tmem_cleancache_flush_page, .flush_inode = tmem_cleancache_flush_inode, .flush_fs = tmem_cleancache_flush_fs, .init_shared_fs = tmem_cleancache_init_shared_fs, .init_fs = tmem_cleancache_init_fs }; static int __init xen_tmem_init(void) { struct cleancache_ops old_ops; if (!xen_domain()) return 0; #ifdef CONFIG_CLEANCACHE BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid)); if (tmem_enabled && use_cleancache) { char *s = ""; old_ops = cleancache_register_ops(&tmem_cleancache_ops); if (old_ops.init_fs != NULL) s = " (WARNING: cleancache_ops overridden)"; printk(KERN_INFO "cleancache enabled, RAM provided by " "Xen Transcendent Memory%s\n", s); } #endif return 0; } module_init(xen_tmem_init)
gpl-2.0
DC07/spirit_sprout
sound/aoa/soundbus/i2sbus/core.c
2236
12428
/* * i2sbus driver * * Copyright 2006-2008 Johannes Berg <johannes@sipsolutions.net> * * GPL v2, can be found in COPYING. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <sound/core.h> #include <asm/macio.h> #include <asm/dbdma.h> #include "../soundbus.h" #include "i2sbus.h" MODULE_LICENSE("GPL"); MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>"); MODULE_DESCRIPTION("Apple Soundbus: I2S support"); static int force; module_param(force, int, 0444); MODULE_PARM_DESC(force, "Force loading i2sbus even when" " no layout-id property is present"); static struct of_device_id i2sbus_match[] = { { .name = "i2s" }, { } }; MODULE_DEVICE_TABLE(of, i2sbus_match); static int alloc_dbdma_descriptor_ring(struct i2sbus_dev *i2sdev, struct dbdma_command_mem *r, int numcmds) { /* one more for rounding, one for branch back, one for stop command */ r->size = (numcmds + 3) * sizeof(struct dbdma_cmd); /* We use the PCI APIs for now until the generic one gets fixed * enough or until we get some macio-specific versions */ r->space = dma_alloc_coherent( &macio_get_pci_dev(i2sdev->macio)->dev, r->size, &r->bus_addr, GFP_KERNEL); if (!r->space) return -ENOMEM; memset(r->space, 0, r->size); r->cmds = (void*)DBDMA_ALIGN(r->space); r->bus_cmd_start = r->bus_addr + (dma_addr_t)((char*)r->cmds - (char*)r->space); return 0; } static void free_dbdma_descriptor_ring(struct i2sbus_dev *i2sdev, struct dbdma_command_mem *r) { if (!r->space) return; dma_free_coherent(&macio_get_pci_dev(i2sdev->macio)->dev, r->size, r->space, r->bus_addr); } static void i2sbus_release_dev(struct device *dev) { struct i2sbus_dev *i2sdev; int i; i2sdev = container_of(dev, struct i2sbus_dev, sound.ofdev.dev); if (i2sdev->intfregs) iounmap(i2sdev->intfregs); if (i2sdev->out.dbdma) iounmap(i2sdev->out.dbdma); if (i2sdev->in.dbdma) iounmap(i2sdev->in.dbdma); for (i = aoa_resource_i2smmio; i <= aoa_resource_rxdbdma; i++) if (i2sdev->allocated_resource[i]) release_and_free_resource(i2sdev->allocated_resource[i]); free_dbdma_descriptor_ring(i2sdev, &i2sdev->out.dbdma_ring); free_dbdma_descriptor_ring(i2sdev, &i2sdev->in.dbdma_ring); for (i = aoa_resource_i2smmio; i <= aoa_resource_rxdbdma; i++) free_irq(i2sdev->interrupts[i], i2sdev); i2sbus_control_remove_dev(i2sdev->control, i2sdev); mutex_destroy(&i2sdev->lock); kfree(i2sdev); } static irqreturn_t i2sbus_bus_intr(int irq, void *devid) { struct i2sbus_dev *dev = devid; u32 intreg; spin_lock(&dev->low_lock); intreg = in_le32(&dev->intfregs->intr_ctl); /* acknowledge interrupt reasons */ out_le32(&dev->intfregs->intr_ctl, intreg); spin_unlock(&dev->low_lock); return IRQ_HANDLED; } /* * XXX FIXME: We test the layout_id's here to get the proper way of * mapping in various registers, thanks to bugs in Apple device-trees. * We could instead key off the machine model and the name of the i2s * node (i2s-a). This we'll do when we move it all to macio_asic.c * and have that export items for each sub-node too. */ static int i2sbus_get_and_fixup_rsrc(struct device_node *np, int index, int layout, struct resource *res) { struct device_node *parent; int pindex, rc = -ENXIO; const u32 *reg; /* Machines with layout 76 and 36 (K2 based) have a weird device * tree what we need to special case. * Normal machines just fetch the resource from the i2s-X node. * Darwin further divides normal machines into old and new layouts * with a subtely different code path but that doesn't seem necessary * in practice, they just bloated it. In addition, even on our K2 * case the i2s-modem node, if we ever want to handle it, uses the * normal layout */ if (layout != 76 && layout != 36) return of_address_to_resource(np, index, res); parent = of_get_parent(np); pindex = (index == aoa_resource_i2smmio) ? 0 : 1; rc = of_address_to_resource(parent, pindex, res); if (rc) goto bail; reg = of_get_property(np, "reg", NULL); if (reg == NULL) { rc = -ENXIO; goto bail; } res->start += reg[index * 2]; res->end = res->start + reg[index * 2 + 1] - 1; bail: of_node_put(parent); return rc; } /* FIXME: look at device node refcounting */ static int i2sbus_add_dev(struct macio_dev *macio, struct i2sbus_control *control, struct device_node *np) { struct i2sbus_dev *dev; struct device_node *child = NULL, *sound = NULL; struct resource *r; int i, layout = 0, rlen, ok = force; static const char *rnames[] = { "i2sbus: %s (control)", "i2sbus: %s (tx)", "i2sbus: %s (rx)" }; static irq_handler_t ints[] = { i2sbus_bus_intr, i2sbus_tx_intr, i2sbus_rx_intr }; if (strlen(np->name) != 5) return 0; if (strncmp(np->name, "i2s-", 4)) return 0; dev = kzalloc(sizeof(struct i2sbus_dev), GFP_KERNEL); if (!dev) return 0; i = 0; while ((child = of_get_next_child(np, child))) { if (strcmp(child->name, "sound") == 0) { i++; sound = child; } } if (i == 1) { const u32 *id = of_get_property(sound, "layout-id", NULL); if (id) { layout = *id; snprintf(dev->sound.modalias, 32, "sound-layout-%d", layout); ok = 1; } else { id = of_get_property(sound, "device-id", NULL); /* * We probably cannot handle all device-id machines, * so restrict to those we do handle for now. */ if (id && (*id == 22 || *id == 14 || *id == 35 || *id == 44)) { snprintf(dev->sound.modalias, 32, "aoa-device-id-%d", *id); ok = 1; layout = -1; } } } /* for the time being, until we can handle non-layout-id * things in some fabric, refuse to attach if there is no * layout-id property or we haven't been forced to attach. * When there are two i2s busses and only one has a layout-id, * then this depends on the order, but that isn't important * either as the second one in that case is just a modem. */ if (!ok) { kfree(dev); return -ENODEV; } mutex_init(&dev->lock); spin_lock_init(&dev->low_lock); dev->sound.ofdev.archdata.dma_mask = macio->ofdev.archdata.dma_mask; dev->sound.ofdev.dev.of_node = np; dev->sound.ofdev.dev.dma_mask = &dev->sound.ofdev.archdata.dma_mask; dev->sound.ofdev.dev.parent = &macio->ofdev.dev; dev->sound.ofdev.dev.release = i2sbus_release_dev; dev->sound.attach_codec = i2sbus_attach_codec; dev->sound.detach_codec = i2sbus_detach_codec; dev->sound.pcmid = -1; dev->macio = macio; dev->control = control; dev->bus_number = np->name[4] - 'a'; INIT_LIST_HEAD(&dev->sound.codec_list); for (i = aoa_resource_i2smmio; i <= aoa_resource_rxdbdma; i++) { dev->interrupts[i] = -1; snprintf(dev->rnames[i], sizeof(dev->rnames[i]), rnames[i], np->name); } for (i = aoa_resource_i2smmio; i <= aoa_resource_rxdbdma; i++) { int irq = irq_of_parse_and_map(np, i); if (request_irq(irq, ints[i], 0, dev->rnames[i], dev)) goto err; dev->interrupts[i] = irq; } /* Resource handling is problematic as some device-trees contain * useless crap (ugh ugh ugh). We work around that here by calling * specific functions for calculating the appropriate resources. * * This will all be moved to macio_asic.c at one point */ for (i = aoa_resource_i2smmio; i <= aoa_resource_rxdbdma; i++) { if (i2sbus_get_and_fixup_rsrc(np,i,layout,&dev->resources[i])) goto err; /* If only we could use our resource dev->resources[i]... * but request_resource doesn't know about parents and * contained resources... */ dev->allocated_resource[i] = request_mem_region(dev->resources[i].start, resource_size(&dev->resources[i]), dev->rnames[i]); if (!dev->allocated_resource[i]) { printk(KERN_ERR "i2sbus: failed to claim resource %d!\n", i); goto err; } } r = &dev->resources[aoa_resource_i2smmio]; rlen = resource_size(r); if (rlen < sizeof(struct i2s_interface_regs)) goto err; dev->intfregs = ioremap(r->start, rlen); r = &dev->resources[aoa_resource_txdbdma]; rlen = resource_size(r); if (rlen < sizeof(struct dbdma_regs)) goto err; dev->out.dbdma = ioremap(r->start, rlen); r = &dev->resources[aoa_resource_rxdbdma]; rlen = resource_size(r); if (rlen < sizeof(struct dbdma_regs)) goto err; dev->in.dbdma = ioremap(r->start, rlen); if (!dev->intfregs || !dev->out.dbdma || !dev->in.dbdma) goto err; if (alloc_dbdma_descriptor_ring(dev, &dev->out.dbdma_ring, MAX_DBDMA_COMMANDS)) goto err; if (alloc_dbdma_descriptor_ring(dev, &dev->in.dbdma_ring, MAX_DBDMA_COMMANDS)) goto err; if (i2sbus_control_add_dev(dev->control, dev)) { printk(KERN_ERR "i2sbus: control layer didn't like bus\n"); goto err; } if (soundbus_add_one(&dev->sound)) { printk(KERN_DEBUG "i2sbus: device registration error!\n"); goto err; } /* enable this cell */ i2sbus_control_cell(dev->control, dev, 1); i2sbus_control_enable(dev->control, dev); i2sbus_control_clock(dev->control, dev, 1); return 1; err: for (i=0;i<3;i++) if (dev->interrupts[i] != -1) free_irq(dev->interrupts[i], dev); free_dbdma_descriptor_ring(dev, &dev->out.dbdma_ring); free_dbdma_descriptor_ring(dev, &dev->in.dbdma_ring); if (dev->intfregs) iounmap(dev->intfregs); if (dev->out.dbdma) iounmap(dev->out.dbdma); if (dev->in.dbdma) iounmap(dev->in.dbdma); for (i=0;i<3;i++) if (dev->allocated_resource[i]) release_and_free_resource(dev->allocated_resource[i]); mutex_destroy(&dev->lock); kfree(dev); return 0; } static int i2sbus_probe(struct macio_dev* dev, const struct of_device_id *match) { struct device_node *np = NULL; int got = 0, err; struct i2sbus_control *control = NULL; err = i2sbus_control_init(dev, &control); if (err) return err; if (!control) { printk(KERN_ERR "i2sbus_control_init API breakage\n"); return -ENODEV; } while ((np = of_get_next_child(dev->ofdev.dev.of_node, np))) { if (of_device_is_compatible(np, "i2sbus") || of_device_is_compatible(np, "i2s-modem")) { got += i2sbus_add_dev(dev, control, np); } } if (!got) { /* found none, clean up */ i2sbus_control_destroy(control); return -ENODEV; } dev_set_drvdata(&dev->ofdev.dev, control); return 0; } static int i2sbus_remove(struct macio_dev* dev) { struct i2sbus_control *control = dev_get_drvdata(&dev->ofdev.dev); struct i2sbus_dev *i2sdev, *tmp; list_for_each_entry_safe(i2sdev, tmp, &control->list, item) soundbus_remove_one(&i2sdev->sound); return 0; } #ifdef CONFIG_PM static int i2sbus_suspend(struct macio_dev* dev, pm_message_t state) { struct i2sbus_control *control = dev_get_drvdata(&dev->ofdev.dev); struct codec_info_item *cii; struct i2sbus_dev* i2sdev; int err, ret = 0; list_for_each_entry(i2sdev, &control->list, item) { /* Notify Alsa */ if (i2sdev->sound.pcm) { /* Suspend PCM streams */ snd_pcm_suspend_all(i2sdev->sound.pcm); } /* Notify codecs */ list_for_each_entry(cii, &i2sdev->sound.codec_list, list) { err = 0; if (cii->codec->suspend) err = cii->codec->suspend(cii, state); if (err) ret = err; } /* wait until streams are stopped */ i2sbus_wait_for_stop_both(i2sdev); } return ret; } static int i2sbus_resume(struct macio_dev* dev) { struct i2sbus_control *control = dev_get_drvdata(&dev->ofdev.dev); struct codec_info_item *cii; struct i2sbus_dev* i2sdev; int err, ret = 0; list_for_each_entry(i2sdev, &control->list, item) { /* reset i2s bus format etc. */ i2sbus_pcm_prepare_both(i2sdev); /* Notify codecs so they can re-initialize */ list_for_each_entry(cii, &i2sdev->sound.codec_list, list) { err = 0; if (cii->codec->resume) err = cii->codec->resume(cii); if (err) ret = err; } } return ret; } #endif /* CONFIG_PM */ static int i2sbus_shutdown(struct macio_dev* dev) { return 0; } static struct macio_driver i2sbus_drv = { .driver = { .name = "soundbus-i2s", .owner = THIS_MODULE, .of_match_table = i2sbus_match, }, .probe = i2sbus_probe, .remove = i2sbus_remove, #ifdef CONFIG_PM .suspend = i2sbus_suspend, .resume = i2sbus_resume, #endif .shutdown = i2sbus_shutdown, }; static int __init soundbus_i2sbus_init(void) { return macio_register_driver(&i2sbus_drv); } static void __exit soundbus_i2sbus_exit(void) { macio_unregister_driver(&i2sbus_drv); } module_init(soundbus_i2sbus_init); module_exit(soundbus_i2sbus_exit);
gpl-2.0
sktjdgns1189/android_kernel_samsung_kccat6
drivers/video/s1d13xxxfb.c
2236
29055
/* drivers/video/s1d13xxxfb.c * * (c) 2004 Simtec Electronics * (c) 2005 Thibaut VARENE <varenet@parisc-linux.org> * (c) 2009 Kristoffer Ericson <kristoffer.ericson@gmail.com> * * Driver for Epson S1D13xxx series framebuffer chips * * Adapted from * linux/drivers/video/skeletonfb.c * linux/drivers/video/epson1355fb.c * linux/drivers/video/epson/s1d13xxxfb.c (2.4 driver by Epson) * * TODO: - handle dual screen display (CRT and LCD at the same time). * - check_var(), mode change, etc. * - probably not SMP safe :) * - support all bitblt operations on all cards * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/fb.h> #include <linux/spinlock_types.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <asm/io.h> #include <video/s1d13xxxfb.h> #define PFX "s1d13xxxfb: " #define BLIT "s1d13xxxfb_bitblt: " /* * set this to enable debugging on general functions */ #if 0 #define dbg(fmt, args...) do { printk(KERN_INFO fmt, ## args); } while(0) #else #define dbg(fmt, args...) do { } while (0) #endif /* * set this to enable debugging on 2D acceleration */ #if 0 #define dbg_blit(fmt, args...) do { printk(KERN_INFO BLIT fmt, ## args); } while (0) #else #define dbg_blit(fmt, args...) do { } while (0) #endif /* * we make sure only one bitblt operation is running */ static DEFINE_SPINLOCK(s1d13xxxfb_bitblt_lock); /* * list of card production ids */ static const int s1d13xxxfb_prod_ids[] = { S1D13505_PROD_ID, S1D13506_PROD_ID, S1D13806_PROD_ID, }; /* * List of card strings */ static const char *s1d13xxxfb_prod_names[] = { "S1D13505", "S1D13506", "S1D13806", }; /* * here we define the default struct fb_fix_screeninfo */ static struct fb_fix_screeninfo s1d13xxxfb_fix = { .id = S1D_FBID, .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_PSEUDOCOLOR, .xpanstep = 0, .ypanstep = 1, .ywrapstep = 0, .accel = FB_ACCEL_NONE, }; static inline u8 s1d13xxxfb_readreg(struct s1d13xxxfb_par *par, u16 regno) { #if defined(CONFIG_PLAT_M32700UT) || defined(CONFIG_PLAT_OPSPUT) || defined(CONFIG_PLAT_MAPPI3) regno=((regno & 1) ? (regno & ~1L) : (regno + 1)); #endif return readb(par->regs + regno); } static inline void s1d13xxxfb_writereg(struct s1d13xxxfb_par *par, u16 regno, u8 value) { #if defined(CONFIG_PLAT_M32700UT) || defined(CONFIG_PLAT_OPSPUT) || defined(CONFIG_PLAT_MAPPI3) regno=((regno & 1) ? (regno & ~1L) : (regno + 1)); #endif writeb(value, par->regs + regno); } static inline void s1d13xxxfb_runinit(struct s1d13xxxfb_par *par, const struct s1d13xxxfb_regval *initregs, const unsigned int size) { int i; for (i = 0; i < size; i++) { if ((initregs[i].addr == S1DREG_DELAYOFF) || (initregs[i].addr == S1DREG_DELAYON)) mdelay((int)initregs[i].value); else { s1d13xxxfb_writereg(par, initregs[i].addr, initregs[i].value); } } /* make sure the hardware can cope with us */ mdelay(1); } static inline void lcd_enable(struct s1d13xxxfb_par *par, int enable) { u8 mode = s1d13xxxfb_readreg(par, S1DREG_COM_DISP_MODE); if (enable) mode |= 0x01; else mode &= ~0x01; s1d13xxxfb_writereg(par, S1DREG_COM_DISP_MODE, mode); } static inline void crt_enable(struct s1d13xxxfb_par *par, int enable) { u8 mode = s1d13xxxfb_readreg(par, S1DREG_COM_DISP_MODE); if (enable) mode |= 0x02; else mode &= ~0x02; s1d13xxxfb_writereg(par, S1DREG_COM_DISP_MODE, mode); } /************************************************************* framebuffer control functions *************************************************************/ static inline void s1d13xxxfb_setup_pseudocolour(struct fb_info *info) { info->fix.visual = FB_VISUAL_PSEUDOCOLOR; info->var.red.length = 4; info->var.green.length = 4; info->var.blue.length = 4; } static inline void s1d13xxxfb_setup_truecolour(struct fb_info *info) { info->fix.visual = FB_VISUAL_TRUECOLOR; info->var.bits_per_pixel = 16; info->var.red.length = 5; info->var.red.offset = 11; info->var.green.length = 6; info->var.green.offset = 5; info->var.blue.length = 5; info->var.blue.offset = 0; } /** * s1d13xxxfb_set_par - Alters the hardware state. * @info: frame buffer structure * * Using the fb_var_screeninfo in fb_info we set the depth of the * framebuffer. This function alters the par AND the * fb_fix_screeninfo stored in fb_info. It doesn't not alter var in * fb_info since we are using that data. This means we depend on the * data in var inside fb_info to be supported by the hardware. * xxxfb_check_var is always called before xxxfb_set_par to ensure this. * * XXX TODO: write proper s1d13xxxfb_check_var(), without which that * function is quite useless. */ static int s1d13xxxfb_set_par(struct fb_info *info) { struct s1d13xxxfb_par *s1dfb = info->par; unsigned int val; dbg("s1d13xxxfb_set_par: bpp=%d\n", info->var.bits_per_pixel); if ((s1dfb->display & 0x01)) /* LCD */ val = s1d13xxxfb_readreg(s1dfb, S1DREG_LCD_DISP_MODE); /* read colour control */ else /* CRT */ val = s1d13xxxfb_readreg(s1dfb, S1DREG_CRT_DISP_MODE); /* read colour control */ val &= ~0x07; switch (info->var.bits_per_pixel) { case 4: dbg("pseudo colour 4\n"); s1d13xxxfb_setup_pseudocolour(info); val |= 2; break; case 8: dbg("pseudo colour 8\n"); s1d13xxxfb_setup_pseudocolour(info); val |= 3; break; case 16: dbg("true colour\n"); s1d13xxxfb_setup_truecolour(info); val |= 5; break; default: dbg("bpp not supported!\n"); return -EINVAL; } dbg("writing %02x to display mode register\n", val); if ((s1dfb->display & 0x01)) /* LCD */ s1d13xxxfb_writereg(s1dfb, S1DREG_LCD_DISP_MODE, val); else /* CRT */ s1d13xxxfb_writereg(s1dfb, S1DREG_CRT_DISP_MODE, val); info->fix.line_length = info->var.xres * info->var.bits_per_pixel; info->fix.line_length /= 8; dbg("setting line_length to %d\n", info->fix.line_length); dbg("done setup\n"); return 0; } /** * s1d13xxxfb_setcolreg - sets a color register. * @regno: Which register in the CLUT we are programming * @red: The red value which can be up to 16 bits wide * @green: The green value which can be up to 16 bits wide * @blue: The blue value which can be up to 16 bits wide. * @transp: If supported the alpha value which can be up to 16 bits wide. * @info: frame buffer info structure * * Returns negative errno on error, or zero on success. */ static int s1d13xxxfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info) { struct s1d13xxxfb_par *s1dfb = info->par; unsigned int pseudo_val; if (regno >= S1D_PALETTE_SIZE) return -EINVAL; dbg("s1d13xxxfb_setcolreg: %d: rgb=%d,%d,%d, tr=%d\n", regno, red, green, blue, transp); if (info->var.grayscale) red = green = blue = (19595*red + 38470*green + 7471*blue) >> 16; switch (info->fix.visual) { case FB_VISUAL_TRUECOLOR: if (regno >= 16) return -EINVAL; /* deal with creating pseudo-palette entries */ pseudo_val = (red >> 11) << info->var.red.offset; pseudo_val |= (green >> 10) << info->var.green.offset; pseudo_val |= (blue >> 11) << info->var.blue.offset; dbg("s1d13xxxfb_setcolreg: pseudo %d, val %08x\n", regno, pseudo_val); #if defined(CONFIG_PLAT_MAPPI) ((u32 *)info->pseudo_palette)[regno] = cpu_to_le16(pseudo_val); #else ((u32 *)info->pseudo_palette)[regno] = pseudo_val; #endif break; case FB_VISUAL_PSEUDOCOLOR: s1d13xxxfb_writereg(s1dfb, S1DREG_LKUP_ADDR, regno); s1d13xxxfb_writereg(s1dfb, S1DREG_LKUP_DATA, red); s1d13xxxfb_writereg(s1dfb, S1DREG_LKUP_DATA, green); s1d13xxxfb_writereg(s1dfb, S1DREG_LKUP_DATA, blue); break; default: return -ENOSYS; } dbg("s1d13xxxfb_setcolreg: done\n"); return 0; } /** * s1d13xxxfb_blank - blanks the display. * @blank_mode: the blank mode we want. * @info: frame buffer structure that represents a single frame buffer * * Blank the screen if blank_mode != 0, else unblank. Return 0 if * blanking succeeded, != 0 if un-/blanking failed due to e.g. a * video mode which doesn't support it. Implements VESA suspend * and powerdown modes on hardware that supports disabling hsync/vsync: * blank_mode == 2: suspend vsync * blank_mode == 3: suspend hsync * blank_mode == 4: powerdown * * Returns negative errno on error, or zero on success. */ static int s1d13xxxfb_blank(int blank_mode, struct fb_info *info) { struct s1d13xxxfb_par *par = info->par; dbg("s1d13xxxfb_blank: blank=%d, info=%p\n", blank_mode, info); switch (blank_mode) { case FB_BLANK_UNBLANK: case FB_BLANK_NORMAL: if ((par->display & 0x01) != 0) lcd_enable(par, 1); if ((par->display & 0x02) != 0) crt_enable(par, 1); break; case FB_BLANK_VSYNC_SUSPEND: case FB_BLANK_HSYNC_SUSPEND: break; case FB_BLANK_POWERDOWN: lcd_enable(par, 0); crt_enable(par, 0); break; default: return -EINVAL; } /* let fbcon do a soft blank for us */ return ((blank_mode == FB_BLANK_NORMAL) ? 1 : 0); } /** * s1d13xxxfb_pan_display - Pans the display. * @var: frame buffer variable screen structure * @info: frame buffer structure that represents a single frame buffer * * Pan (or wrap, depending on the `vmode' field) the display using the * `yoffset' field of the `var' structure (`xoffset' not yet supported). * If the values don't fit, return -EINVAL. * * Returns negative errno on error, or zero on success. */ static int s1d13xxxfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { struct s1d13xxxfb_par *par = info->par; u32 start; if (var->xoffset != 0) /* not yet ... */ return -EINVAL; if (var->yoffset + info->var.yres > info->var.yres_virtual) return -EINVAL; start = (info->fix.line_length >> 1) * var->yoffset; if ((par->display & 0x01)) { /* LCD */ s1d13xxxfb_writereg(par, S1DREG_LCD_DISP_START0, (start & 0xff)); s1d13xxxfb_writereg(par, S1DREG_LCD_DISP_START1, ((start >> 8) & 0xff)); s1d13xxxfb_writereg(par, S1DREG_LCD_DISP_START2, ((start >> 16) & 0x0f)); } else { /* CRT */ s1d13xxxfb_writereg(par, S1DREG_CRT_DISP_START0, (start & 0xff)); s1d13xxxfb_writereg(par, S1DREG_CRT_DISP_START1, ((start >> 8) & 0xff)); s1d13xxxfb_writereg(par, S1DREG_CRT_DISP_START2, ((start >> 16) & 0x0f)); } return 0; } /************************************************************ functions to handle bitblt acceleration ************************************************************/ /** * bltbit_wait_bitclear - waits for change in register value * @info : frambuffer structure * @bit : value currently in register * @timeout : ... * * waits until value changes FROM bit * */ static u8 bltbit_wait_bitclear(struct fb_info *info, u8 bit, int timeout) { while (s1d13xxxfb_readreg(info->par, S1DREG_BBLT_CTL0) & bit) { udelay(10); if (!--timeout) { dbg_blit("wait_bitclear timeout\n"); break; } } return timeout; } /* * s1d13xxxfb_bitblt_copyarea - accelerated copyarea function * @info : framebuffer structure * @area : fb_copyarea structure * * supports (atleast) S1D13506 * */ static void s1d13xxxfb_bitblt_copyarea(struct fb_info *info, const struct fb_copyarea *area) { u32 dst, src; u32 stride; u16 reverse = 0; u16 sx = area->sx, sy = area->sy; u16 dx = area->dx, dy = area->dy; u16 width = area->width, height = area->height; u16 bpp; spin_lock(&s1d13xxxfb_bitblt_lock); /* bytes per xres line */ bpp = (info->var.bits_per_pixel >> 3); stride = bpp * info->var.xres; /* reverse, calculate the last pixel in rectangle */ if ((dy > sy) || ((dy == sy) && (dx >= sx))) { dst = (((dy + height - 1) * stride) + (bpp * (dx + width - 1))); src = (((sy + height - 1) * stride) + (bpp * (sx + width - 1))); reverse = 1; /* not reverse, calculate the first pixel in rectangle */ } else { /* (y * xres) + (bpp * x) */ dst = (dy * stride) + (bpp * dx); src = (sy * stride) + (bpp * sx); } /* set source address */ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_SRC_START0, (src & 0xff)); s1d13xxxfb_writereg(info->par, S1DREG_BBLT_SRC_START1, (src >> 8) & 0x00ff); s1d13xxxfb_writereg(info->par, S1DREG_BBLT_SRC_START2, (src >> 16) & 0x00ff); /* set destination address */ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_DST_START0, (dst & 0xff)); s1d13xxxfb_writereg(info->par, S1DREG_BBLT_DST_START1, (dst >> 8) & 0x00ff); s1d13xxxfb_writereg(info->par, S1DREG_BBLT_DST_START2, (dst >> 16) & 0x00ff); /* program height and width */ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_WIDTH0, (width & 0xff) - 1); s1d13xxxfb_writereg(info->par, S1DREG_BBLT_WIDTH1, (width >> 8)); s1d13xxxfb_writereg(info->par, S1DREG_BBLT_HEIGHT0, (height & 0xff) - 1); s1d13xxxfb_writereg(info->par, S1DREG_BBLT_HEIGHT1, (height >> 8)); /* negative direction ROP */ if (reverse == 1) { dbg_blit("(copyarea) negative rop\n"); s1d13xxxfb_writereg(info->par, S1DREG_BBLT_OP, 0x03); } else /* positive direction ROP */ { s1d13xxxfb_writereg(info->par, S1DREG_BBLT_OP, 0x02); dbg_blit("(copyarea) positive rop\n"); } /* set for rectangel mode and not linear */ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CTL0, 0x0); /* setup the bpp 1 = 16bpp, 0 = 8bpp*/ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CTL1, (bpp >> 1)); /* set words per xres */ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_MEM_OFF0, (stride >> 1) & 0xff); s1d13xxxfb_writereg(info->par, S1DREG_BBLT_MEM_OFF1, (stride >> 9)); dbg_blit("(copyarea) dx=%d, dy=%d\n", dx, dy); dbg_blit("(copyarea) sx=%d, sy=%d\n", sx, sy); dbg_blit("(copyarea) width=%d, height=%d\n", width - 1, height - 1); dbg_blit("(copyarea) stride=%d\n", stride); dbg_blit("(copyarea) bpp=%d=0x0%d, mem_offset1=%d, mem_offset2=%d\n", bpp, (bpp >> 1), (stride >> 1) & 0xff, stride >> 9); s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CC_EXP, 0x0c); /* initialize the engine */ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CTL0, 0x80); /* wait to complete */ bltbit_wait_bitclear(info, 0x80, 8000); spin_unlock(&s1d13xxxfb_bitblt_lock); } /** * * s1d13xxxfb_bitblt_solidfill - accelerated solidfill function * @info : framebuffer structure * @rect : fb_fillrect structure * * supports (atleast 13506) * **/ static void s1d13xxxfb_bitblt_solidfill(struct fb_info *info, const struct fb_fillrect *rect) { u32 screen_stride, dest; u32 fg; u16 bpp = (info->var.bits_per_pixel >> 3); /* grab spinlock */ spin_lock(&s1d13xxxfb_bitblt_lock); /* bytes per x width */ screen_stride = (bpp * info->var.xres); /* bytes to starting point */ dest = ((rect->dy * screen_stride) + (bpp * rect->dx)); dbg_blit("(solidfill) dx=%d, dy=%d, stride=%d, dest=%d\n" "(solidfill) : rect_width=%d, rect_height=%d\n", rect->dx, rect->dy, screen_stride, dest, rect->width - 1, rect->height - 1); dbg_blit("(solidfill) : xres=%d, yres=%d, bpp=%d\n", info->var.xres, info->var.yres, info->var.bits_per_pixel); dbg_blit("(solidfill) : rop=%d\n", rect->rop); /* We split the destination into the three registers */ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_DST_START0, (dest & 0x00ff)); s1d13xxxfb_writereg(info->par, S1DREG_BBLT_DST_START1, ((dest >> 8) & 0x00ff)); s1d13xxxfb_writereg(info->par, S1DREG_BBLT_DST_START2, ((dest >> 16) & 0x00ff)); /* give information regarding rectangel width */ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_WIDTH0, ((rect->width) & 0x00ff) - 1); s1d13xxxfb_writereg(info->par, S1DREG_BBLT_WIDTH1, (rect->width >> 8)); /* give information regarding rectangel height */ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_HEIGHT0, ((rect->height) & 0x00ff) - 1); s1d13xxxfb_writereg(info->par, S1DREG_BBLT_HEIGHT1, (rect->height >> 8)); if (info->fix.visual == FB_VISUAL_TRUECOLOR || info->fix.visual == FB_VISUAL_DIRECTCOLOR) { fg = ((u32 *)info->pseudo_palette)[rect->color]; dbg_blit("(solidfill) truecolor/directcolor\n"); dbg_blit("(solidfill) pseudo_palette[%d] = %d\n", rect->color, fg); } else { fg = rect->color; dbg_blit("(solidfill) color = %d\n", rect->color); } /* set foreground color */ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_FGC0, (fg & 0xff)); s1d13xxxfb_writereg(info->par, S1DREG_BBLT_FGC1, (fg >> 8) & 0xff); /* set rectangual region of memory (rectangle and not linear) */ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CTL0, 0x0); /* set operation mode SOLID_FILL */ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_OP, BBLT_SOLID_FILL); /* set bits per pixel (1 = 16bpp, 0 = 8bpp) */ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CTL1, (info->var.bits_per_pixel >> 4)); /* set the memory offset for the bblt in word sizes */ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_MEM_OFF0, (screen_stride >> 1) & 0x00ff); s1d13xxxfb_writereg(info->par, S1DREG_BBLT_MEM_OFF1, (screen_stride >> 9)); /* and away we go.... */ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CTL0, 0x80); /* wait until its done */ bltbit_wait_bitclear(info, 0x80, 8000); /* let others play */ spin_unlock(&s1d13xxxfb_bitblt_lock); } /* framebuffer information structures */ static struct fb_ops s1d13xxxfb_fbops = { .owner = THIS_MODULE, .fb_set_par = s1d13xxxfb_set_par, .fb_setcolreg = s1d13xxxfb_setcolreg, .fb_blank = s1d13xxxfb_blank, .fb_pan_display = s1d13xxxfb_pan_display, /* gets replaced at chip detection time */ .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, }; static int s1d13xxxfb_width_tab[2][4] = { {4, 8, 16, -1}, {9, 12, 18, -1}, }; /** * s1d13xxxfb_fetch_hw_state - Configure the framebuffer according to * hardware setup. * @info: frame buffer structure * * We setup the framebuffer structures according to the current * hardware setup. On some machines, the BIOS will have filled * the chip registers with such info, on others, these values will * have been written in some init procedure. In any case, the * software values needs to match the hardware ones. This is what * this function ensures. * * Note: some of the hardcoded values here might need some love to * work on various chips, and might need to no longer be hardcoded. */ static void s1d13xxxfb_fetch_hw_state(struct fb_info *info) { struct fb_var_screeninfo *var = &info->var; struct fb_fix_screeninfo *fix = &info->fix; struct s1d13xxxfb_par *par = info->par; u8 panel, display; u16 offset; u32 xres, yres; u32 xres_virtual, yres_virtual; int bpp, lcd_bpp; int is_color, is_dual, is_tft; int lcd_enabled, crt_enabled; fix->type = FB_TYPE_PACKED_PIXELS; /* general info */ par->display = s1d13xxxfb_readreg(par, S1DREG_COM_DISP_MODE); crt_enabled = (par->display & 0x02) != 0; lcd_enabled = (par->display & 0x01) != 0; if (lcd_enabled && crt_enabled) printk(KERN_WARNING PFX "Warning: LCD and CRT detected, using LCD\n"); if (lcd_enabled) display = s1d13xxxfb_readreg(par, S1DREG_LCD_DISP_MODE); else /* CRT */ display = s1d13xxxfb_readreg(par, S1DREG_CRT_DISP_MODE); bpp = display & 0x07; switch (bpp) { case 2: /* 4 bpp */ case 3: /* 8 bpp */ var->bits_per_pixel = 8; var->red.offset = var->green.offset = var->blue.offset = 0; var->red.length = var->green.length = var->blue.length = 8; break; case 5: /* 16 bpp */ s1d13xxxfb_setup_truecolour(info); break; default: dbg("bpp: %i\n", bpp); } fb_alloc_cmap(&info->cmap, 256, 0); /* LCD info */ panel = s1d13xxxfb_readreg(par, S1DREG_PANEL_TYPE); is_color = (panel & 0x04) != 0; is_dual = (panel & 0x02) != 0; is_tft = (panel & 0x01) != 0; lcd_bpp = s1d13xxxfb_width_tab[is_tft][(panel >> 4) & 3]; if (lcd_enabled) { xres = (s1d13xxxfb_readreg(par, S1DREG_LCD_DISP_HWIDTH) + 1) * 8; yres = (s1d13xxxfb_readreg(par, S1DREG_LCD_DISP_VHEIGHT0) + ((s1d13xxxfb_readreg(par, S1DREG_LCD_DISP_VHEIGHT1) & 0x03) << 8) + 1); offset = (s1d13xxxfb_readreg(par, S1DREG_LCD_MEM_OFF0) + ((s1d13xxxfb_readreg(par, S1DREG_LCD_MEM_OFF1) & 0x7) << 8)); } else { /* crt */ xres = (s1d13xxxfb_readreg(par, S1DREG_CRT_DISP_HWIDTH) + 1) * 8; yres = (s1d13xxxfb_readreg(par, S1DREG_CRT_DISP_VHEIGHT0) + ((s1d13xxxfb_readreg(par, S1DREG_CRT_DISP_VHEIGHT1) & 0x03) << 8) + 1); offset = (s1d13xxxfb_readreg(par, S1DREG_CRT_MEM_OFF0) + ((s1d13xxxfb_readreg(par, S1DREG_CRT_MEM_OFF1) & 0x7) << 8)); } xres_virtual = offset * 16 / var->bits_per_pixel; yres_virtual = fix->smem_len / (offset * 2); var->xres = xres; var->yres = yres; var->xres_virtual = xres_virtual; var->yres_virtual = yres_virtual; var->xoffset = var->yoffset = 0; fix->line_length = offset * 2; var->grayscale = !is_color; var->activate = FB_ACTIVATE_NOW; dbg(PFX "bpp=%d, lcd_bpp=%d, " "crt_enabled=%d, lcd_enabled=%d\n", var->bits_per_pixel, lcd_bpp, crt_enabled, lcd_enabled); dbg(PFX "xres=%d, yres=%d, vxres=%d, vyres=%d " "is_color=%d, is_dual=%d, is_tft=%d\n", xres, yres, xres_virtual, yres_virtual, is_color, is_dual, is_tft); } static int s1d13xxxfb_remove(struct platform_device *pdev) { struct fb_info *info = platform_get_drvdata(pdev); struct s1d13xxxfb_par *par = NULL; if (info) { par = info->par; if (par && par->regs) { /* disable output & enable powersave */ s1d13xxxfb_writereg(par, S1DREG_COM_DISP_MODE, 0x00); s1d13xxxfb_writereg(par, S1DREG_PS_CNF, 0x11); iounmap(par->regs); } fb_dealloc_cmap(&info->cmap); if (info->screen_base) iounmap(info->screen_base); framebuffer_release(info); } release_mem_region(pdev->resource[0].start, pdev->resource[0].end - pdev->resource[0].start +1); release_mem_region(pdev->resource[1].start, pdev->resource[1].end - pdev->resource[1].start +1); return 0; } static int s1d13xxxfb_probe(struct platform_device *pdev) { struct s1d13xxxfb_par *default_par; struct fb_info *info; struct s1d13xxxfb_pdata *pdata = NULL; int ret = 0; int i; u8 revision, prod_id; dbg("probe called: device is %p\n", pdev); printk(KERN_INFO "Epson S1D13XXX FB Driver\n"); /* enable platform-dependent hardware glue, if any */ if (pdev->dev.platform_data) pdata = pdev->dev.platform_data; if (pdata && pdata->platform_init_video) pdata->platform_init_video(); if (pdev->num_resources != 2) { dev_err(&pdev->dev, "invalid num_resources: %i\n", pdev->num_resources); ret = -ENODEV; goto bail; } /* resource[0] is VRAM, resource[1] is registers */ if (pdev->resource[0].flags != IORESOURCE_MEM || pdev->resource[1].flags != IORESOURCE_MEM) { dev_err(&pdev->dev, "invalid resource type\n"); ret = -ENODEV; goto bail; } if (!request_mem_region(pdev->resource[0].start, pdev->resource[0].end - pdev->resource[0].start +1, "s1d13xxxfb mem")) { dev_dbg(&pdev->dev, "request_mem_region failed\n"); ret = -EBUSY; goto bail; } if (!request_mem_region(pdev->resource[1].start, pdev->resource[1].end - pdev->resource[1].start +1, "s1d13xxxfb regs")) { dev_dbg(&pdev->dev, "request_mem_region failed\n"); ret = -EBUSY; goto bail; } info = framebuffer_alloc(sizeof(struct s1d13xxxfb_par) + sizeof(u32) * 256, &pdev->dev); if (!info) { ret = -ENOMEM; goto bail; } platform_set_drvdata(pdev, info); default_par = info->par; default_par->regs = ioremap_nocache(pdev->resource[1].start, pdev->resource[1].end - pdev->resource[1].start +1); if (!default_par->regs) { printk(KERN_ERR PFX "unable to map registers\n"); ret = -ENOMEM; goto bail; } info->pseudo_palette = default_par->pseudo_palette; info->screen_base = ioremap_nocache(pdev->resource[0].start, pdev->resource[0].end - pdev->resource[0].start +1); if (!info->screen_base) { printk(KERN_ERR PFX "unable to map framebuffer\n"); ret = -ENOMEM; goto bail; } /* production id is top 6 bits */ prod_id = s1d13xxxfb_readreg(default_par, S1DREG_REV_CODE) >> 2; /* revision id is lower 2 bits */ revision = s1d13xxxfb_readreg(default_par, S1DREG_REV_CODE) & 0x3; ret = -ENODEV; for (i = 0; i < ARRAY_SIZE(s1d13xxxfb_prod_ids); i++) { if (prod_id == s1d13xxxfb_prod_ids[i]) { /* looks like we got it in our list */ default_par->prod_id = prod_id; default_par->revision = revision; ret = 0; break; } } if (!ret) { printk(KERN_INFO PFX "chip production id %i = %s\n", prod_id, s1d13xxxfb_prod_names[i]); printk(KERN_INFO PFX "chip revision %i\n", revision); } else { printk(KERN_INFO PFX "unknown chip production id %i, revision %i\n", prod_id, revision); printk(KERN_INFO PFX "please contact maintainer\n"); goto bail; } info->fix = s1d13xxxfb_fix; info->fix.mmio_start = pdev->resource[1].start; info->fix.mmio_len = pdev->resource[1].end - pdev->resource[1].start + 1; info->fix.smem_start = pdev->resource[0].start; info->fix.smem_len = pdev->resource[0].end - pdev->resource[0].start + 1; printk(KERN_INFO PFX "regs mapped at 0x%p, fb %d KiB mapped at 0x%p\n", default_par->regs, info->fix.smem_len / 1024, info->screen_base); info->par = default_par; info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN; info->fbops = &s1d13xxxfb_fbops; switch(prod_id) { case S1D13506_PROD_ID: /* activate acceleration */ s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill; s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea; info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN | FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA; break; default: break; } /* perform "manual" chip initialization, if needed */ if (pdata && pdata->initregs) s1d13xxxfb_runinit(info->par, pdata->initregs, pdata->initregssize); s1d13xxxfb_fetch_hw_state(info); if (register_framebuffer(info) < 0) { ret = -EINVAL; goto bail; } printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, info->fix.id); return 0; bail: s1d13xxxfb_remove(pdev); return ret; } #ifdef CONFIG_PM static int s1d13xxxfb_suspend(struct platform_device *dev, pm_message_t state) { struct fb_info *info = platform_get_drvdata(dev); struct s1d13xxxfb_par *s1dfb = info->par; struct s1d13xxxfb_pdata *pdata = NULL; /* disable display */ lcd_enable(s1dfb, 0); crt_enable(s1dfb, 0); if (dev->dev.platform_data) pdata = dev->dev.platform_data; #if 0 if (!s1dfb->disp_save) s1dfb->disp_save = kmalloc(info->fix.smem_len, GFP_KERNEL); if (!s1dfb->disp_save) { printk(KERN_ERR PFX "no memory to save screen"); return -ENOMEM; } memcpy_fromio(s1dfb->disp_save, info->screen_base, info->fix.smem_len); #else s1dfb->disp_save = NULL; #endif if (!s1dfb->regs_save) s1dfb->regs_save = kmalloc(info->fix.mmio_len, GFP_KERNEL); if (!s1dfb->regs_save) { printk(KERN_ERR PFX "no memory to save registers"); return -ENOMEM; } /* backup all registers */ memcpy_fromio(s1dfb->regs_save, s1dfb->regs, info->fix.mmio_len); /* now activate power save mode */ s1d13xxxfb_writereg(s1dfb, S1DREG_PS_CNF, 0x11); if (pdata && pdata->platform_suspend_video) return pdata->platform_suspend_video(); else return 0; } static int s1d13xxxfb_resume(struct platform_device *dev) { struct fb_info *info = platform_get_drvdata(dev); struct s1d13xxxfb_par *s1dfb = info->par; struct s1d13xxxfb_pdata *pdata = NULL; /* awaken the chip */ s1d13xxxfb_writereg(s1dfb, S1DREG_PS_CNF, 0x10); /* do not let go until SDRAM "wakes up" */ while ((s1d13xxxfb_readreg(s1dfb, S1DREG_PS_STATUS) & 0x01)) udelay(10); if (dev->dev.platform_data) pdata = dev->dev.platform_data; if (s1dfb->regs_save) { /* will write RO regs, *should* get away with it :) */ memcpy_toio(s1dfb->regs, s1dfb->regs_save, info->fix.mmio_len); kfree(s1dfb->regs_save); } if (s1dfb->disp_save) { memcpy_toio(info->screen_base, s1dfb->disp_save, info->fix.smem_len); kfree(s1dfb->disp_save); /* XXX kmalloc()'d when? */ } if ((s1dfb->display & 0x01) != 0) lcd_enable(s1dfb, 1); if ((s1dfb->display & 0x02) != 0) crt_enable(s1dfb, 1); if (pdata && pdata->platform_resume_video) return pdata->platform_resume_video(); else return 0; } #endif /* CONFIG_PM */ static struct platform_driver s1d13xxxfb_driver = { .probe = s1d13xxxfb_probe, .remove = s1d13xxxfb_remove, #ifdef CONFIG_PM .suspend = s1d13xxxfb_suspend, .resume = s1d13xxxfb_resume, #endif .driver = { .name = S1D_DEVICENAME, }, }; static int __init s1d13xxxfb_init(void) { #ifndef MODULE if (fb_get_options("s1d13xxxfb", NULL)) return -ENODEV; #endif return platform_driver_register(&s1d13xxxfb_driver); } static void __exit s1d13xxxfb_exit(void) { platform_driver_unregister(&s1d13xxxfb_driver); } module_init(s1d13xxxfb_init); module_exit(s1d13xxxfb_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Framebuffer driver for S1D13xxx devices"); MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>, Thibaut VARENE <varenet@parisc-linux.org>");
gpl-2.0
Motorhead1991/android_kernel_samsung_geim
arch/ia64/xen/hypervisor.c
4540
2837
/****************************************************************************** * arch/ia64/xen/hypervisor.c * * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp> * VA Linux Systems Japan K.K. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/efi.h> #include <asm/xen/hypervisor.h> #include <asm/xen/privop.h> #include "irq_xen.h" struct shared_info *HYPERVISOR_shared_info __read_mostly = (struct shared_info *)XSI_BASE; EXPORT_SYMBOL(HYPERVISOR_shared_info); DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); struct start_info *xen_start_info; EXPORT_SYMBOL(xen_start_info); EXPORT_SYMBOL(xen_domain_type); EXPORT_SYMBOL(__hypercall); /* Stolen from arch/x86/xen/enlighten.c */ /* * Flag to determine whether vcpu info placement is available on all * VCPUs. We assume it is to start with, and then set it to zero on * the first failure. This is because it can succeed on some VCPUs * and not others, since it can involve hypervisor memory allocation, * or because the guest failed to guarantee all the appropriate * constraints on all VCPUs (ie buffer can't cross a page boundary). * * Note that any particular CPU may be using a placed vcpu structure, * but we can only optimise if the all are. * * 0: not available, 1: available */ static void __init xen_vcpu_setup(int cpu) { /* * WARNING: * before changing MAX_VIRT_CPUS, * check that shared_info fits on a page */ BUILD_BUG_ON(sizeof(struct shared_info) > PAGE_SIZE); per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; } void __init xen_setup_vcpu_info_placement(void) { int cpu; for_each_possible_cpu(cpu) xen_vcpu_setup(cpu); } void __cpuinit xen_cpu_init(void) { xen_smp_intr_init(); } /************************************************************************** * opt feature */ void xen_ia64_enable_opt_feature(void) { /* Enable region 7 identity map optimizations in Xen */ struct xen_ia64_opt_feature optf; optf.cmd = XEN_IA64_OPTF_IDENT_MAP_REG7; optf.on = XEN_IA64_OPTF_ON; optf.pgprot = pgprot_val(PAGE_KERNEL); optf.key = 0; /* No key on linux. */ HYPERVISOR_opt_feature(&optf); }
gpl-2.0
jamiethemorris/sense-dna-kernel
drivers/net/ethernet/cirrus/cs89x0.c
4796
59779
/* cs89x0.c: A Crystal Semiconductor (Now Cirrus Logic) CS89[02]0 * driver for linux. */ /* Written 1996 by Russell Nelson, with reference to skeleton.c written 1993-1994 by Donald Becker. This software may be used and distributed according to the terms of the GNU General Public License, incorporated herein by reference. The author may be reached at nelson@crynwr.com, Crynwr Software, 521 Pleasant Valley Rd., Potsdam, NY 13676 Changelog: Mike Cruse : mcruse@cti-ltd.com : Changes for Linux 2.0 compatibility. : Added dev_id parameter in net_interrupt(), : request_irq() and free_irq(). Just NULL for now. Mike Cruse : Added MOD_INC_USE_COUNT and MOD_DEC_USE_COUNT macros : in net_open() and net_close() so kerneld would know : that the module is in use and wouldn't eject the : driver prematurely. Mike Cruse : Rewrote init_module() and cleanup_module using 8390.c : as an example. Disabled autoprobing in init_module(), : not a good thing to do to other devices while Linux : is running from all accounts. Russ Nelson : Jul 13 1998. Added RxOnly DMA support. Melody Lee : Aug 10 1999. Changes for Linux 2.2.5 compatibility. : email: ethernet@crystal.cirrus.com Alan Cox : Removed 1.2 support, added 2.1 extra counters. Andrew Morton : Kernel 2.3.48 : Handle kmalloc() failures : Other resource allocation fixes : Add SMP locks : Integrate Russ Nelson's ALLOW_DMA functionality back in. : If ALLOW_DMA is true, make DMA runtime selectable : Folded in changes from Cirrus (Melody Lee : <klee@crystal.cirrus.com>) : Don't call netif_wake_queue() in net_send_packet() : Fixed an out-of-mem bug in dma_rx() : Updated Documentation/networking/cs89x0.txt Andrew Morton : Kernel 2.3.99-pre1 : Use skb_reserve to longword align IP header (two places) : Remove a delay loop from dma_rx() : Replace '100' with HZ : Clean up a couple of skb API abuses : Added 'cs89x0_dma=N' kernel boot option : Correctly initialise lp->lock in non-module compile Andrew Morton : Kernel 2.3.99-pre4-1 : MOD_INC/DEC race fix (see : http://www.uwsg.indiana.edu/hypermail/linux/kernel/0003.3/1532.html) Andrew Morton : Kernel 2.4.0-test7-pre2 : Enhanced EEPROM support to cover more devices, : abstracted IRQ mapping to support CONFIG_ARCH_CLPS7500 arch : (Jason Gunthorpe <jgg@ualberta.ca>) Andrew Morton : Kernel 2.4.0-test11-pre4 : Use dev->name in request_*() (Andrey Panin) : Fix an error-path memleak in init_module() : Preserve return value from request_irq() : Fix type of `media' module parm (Keith Owens) : Use SET_MODULE_OWNER() : Tidied up strange request_irq() abuse in net_open(). Andrew Morton : Kernel 2.4.3-pre1 : Request correct number of pages for DMA (Hugh Dickens) : Select PP_ChipID _after_ unregister_netdev in cleanup_module() : because unregister_netdev() calls get_stats. : Make `version[]' __initdata : Uninlined the read/write reg/word functions. Oskar Schirmer : oskar@scara.com : HiCO.SH4 (superh) support added (irq#1, cs89x0_media=) Deepak Saxena : dsaxena@plexity.net : Intel IXDP2x01 (XScale ixp2x00 NPU) platform support Dmitry Pervushin : dpervushin@ru.mvista.com : PNX010X platform support Deepak Saxena : dsaxena@plexity.net : Intel IXDP2351 platform support Dmitry Pervushin : dpervushin@ru.mvista.com : PNX010X platform support Domenico Andreoli : cavokz@gmail.com : QQ2440 platform support */ /* * Set this to zero to disable DMA code * * Note that even if DMA is turned off we still support the 'dma' and 'use_dma' * module options so we don't break any startup scripts. */ #ifndef CONFIG_ISA_DMA_API #define ALLOW_DMA 0 #else #define ALLOW_DMA 1 #endif /* * Set this to zero to remove all the debug statements via * dead code elimination */ #define DEBUGGING 1 /* Sources: Crynwr packet driver epktisa. Crystal Semiconductor data sheets. */ #include <linux/module.h> #include <linux/printk.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/platform_device.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/init.h> #include <linux/bitops.h> #include <linux/delay.h> #include <linux/gfp.h> #include <asm/io.h> #include <asm/irq.h> #include <linux/atomic.h> #if ALLOW_DMA #include <asm/dma.h> #endif #include "cs89x0.h" static char version[] __initdata = "cs89x0.c: v2.4.3-pre1 Russell Nelson <nelson@crynwr.com>, Andrew Morton\n"; #define DRV_NAME "cs89x0" /* First, a few definitions that the brave might change. A zero-terminated list of I/O addresses to be probed. Some special flags.. Addr & 1 = Read back the address port, look for signature and reset the page window before probing Addr & 3 = Reset the page window and probe The CLPS eval board has the Cirrus chip at 0x80090300, in ARM IO space, but it is possible that a Cirrus board could be plugged into the ISA slots. */ /* The cs8900 has 4 IRQ pins, software selectable. cs8900_irq_map maps them to system IRQ numbers. This mapping is card specific and is set to the configuration of the Cirrus Eval board for this chip. */ #if defined(CONFIG_MACH_IXDP2351) #define CS89x0_NONISA_IRQ static unsigned int netcard_portlist[] __used __initdata = {IXDP2351_VIRT_CS8900_BASE, 0}; static unsigned int cs8900_irq_map[] = {IRQ_IXDP2351_CS8900, 0, 0, 0}; #elif defined(CONFIG_ARCH_IXDP2X01) #define CS89x0_NONISA_IRQ static unsigned int netcard_portlist[] __used __initdata = {IXDP2X01_CS8900_VIRT_BASE, 0}; static unsigned int cs8900_irq_map[] = {IRQ_IXDP2X01_CS8900, 0, 0, 0}; #else #ifndef CONFIG_CS89x0_PLATFORM static unsigned int netcard_portlist[] __used __initdata = { 0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0}; static unsigned int cs8900_irq_map[] = {10,11,12,5}; #endif #endif #if DEBUGGING static unsigned int net_debug = DEBUGGING; #else #define net_debug 0 /* gcc will remove all the debug code for us */ #endif /* The number of low I/O ports used by the ethercard. */ #define NETCARD_IO_EXTENT 16 /* we allow the user to override various values normally set in the EEPROM */ #define FORCE_RJ45 0x0001 /* pick one of these three */ #define FORCE_AUI 0x0002 #define FORCE_BNC 0x0004 #define FORCE_AUTO 0x0010 /* pick one of these three */ #define FORCE_HALF 0x0020 #define FORCE_FULL 0x0030 /* Information that need to be kept for each board. */ struct net_local { int chip_type; /* one of: CS8900, CS8920, CS8920M */ char chip_revision; /* revision letter of the chip ('A'...) */ int send_cmd; /* the proper send command: TX_NOW, TX_AFTER_381, or TX_AFTER_ALL */ int auto_neg_cnf; /* auto-negotiation word from EEPROM */ int adapter_cnf; /* adapter configuration from EEPROM */ int isa_config; /* ISA configuration from EEPROM */ int irq_map; /* IRQ map from EEPROM */ int rx_mode; /* what mode are we in? 0, RX_MULTCAST_ACCEPT, or RX_ALL_ACCEPT */ int curr_rx_cfg; /* a copy of PP_RxCFG */ int linectl; /* either 0 or LOW_RX_SQUELCH, depending on configuration. */ int send_underrun; /* keep track of how many underruns in a row we get */ int force; /* force various values; see FORCE* above. */ spinlock_t lock; #if ALLOW_DMA int use_dma; /* Flag: we're using dma */ int dma; /* DMA channel */ int dmasize; /* 16 or 64 */ unsigned char *dma_buff; /* points to the beginning of the buffer */ unsigned char *end_dma_buff; /* points to the end of the buffer */ unsigned char *rx_dma_ptr; /* points to the next packet */ #endif #ifdef CONFIG_CS89x0_PLATFORM void __iomem *virt_addr;/* Virtual address for accessing the CS89x0. */ unsigned long phys_addr;/* Physical address for accessing the CS89x0. */ unsigned long size; /* Length of CS89x0 memory region. */ #endif }; /* Index to functions, as function prototypes. */ static int cs89x0_probe1(struct net_device *dev, unsigned long ioaddr, int modular); static int net_open(struct net_device *dev); static netdev_tx_t net_send_packet(struct sk_buff *skb, struct net_device *dev); static irqreturn_t net_interrupt(int irq, void *dev_id); static void set_multicast_list(struct net_device *dev); static void net_timeout(struct net_device *dev); static void net_rx(struct net_device *dev); static int net_close(struct net_device *dev); static struct net_device_stats *net_get_stats(struct net_device *dev); static void reset_chip(struct net_device *dev); static int get_eeprom_data(struct net_device *dev, int off, int len, int *buffer); static int get_eeprom_cksum(int off, int len, int *buffer); static int set_mac_address(struct net_device *dev, void *addr); static void count_rx_errors(int status, struct net_device *dev); #ifdef CONFIG_NET_POLL_CONTROLLER static void net_poll_controller(struct net_device *dev); #endif #if ALLOW_DMA static void get_dma_channel(struct net_device *dev); static void release_dma_buff(struct net_local *lp); #endif /* Example routines you must write ;->. */ #define tx_done(dev) 1 /* * Permit 'cs89x0_dma=N' in the kernel boot environment */ #if !defined(MODULE) && (ALLOW_DMA != 0) static int g_cs89x0_dma; static int __init dma_fn(char *str) { g_cs89x0_dma = simple_strtol(str,NULL,0); return 1; } __setup("cs89x0_dma=", dma_fn); #endif /* !defined(MODULE) && (ALLOW_DMA != 0) */ #ifndef MODULE static int g_cs89x0_media__force; static int __init media_fn(char *str) { if (!strcmp(str, "rj45")) g_cs89x0_media__force = FORCE_RJ45; else if (!strcmp(str, "aui")) g_cs89x0_media__force = FORCE_AUI; else if (!strcmp(str, "bnc")) g_cs89x0_media__force = FORCE_BNC; return 1; } __setup("cs89x0_media=", media_fn); #ifndef CONFIG_CS89x0_PLATFORM /* Check for a network adaptor of this type, and return '0' iff one exists. If dev->base_addr == 0, probe all likely locations. If dev->base_addr == 1, always return failure. If dev->base_addr == 2, allocate space for the device and return success (detachable devices only). Return 0 on success. */ struct net_device * __init cs89x0_probe(int unit) { struct net_device *dev = alloc_etherdev(sizeof(struct net_local)); unsigned *port; int err = 0; int irq; int io; if (!dev) return ERR_PTR(-ENODEV); sprintf(dev->name, "eth%d", unit); netdev_boot_setup_check(dev); io = dev->base_addr; irq = dev->irq; if (net_debug) printk("cs89x0:cs89x0_probe(0x%x)\n", io); if (io > 0x1ff) { /* Check a single specified location. */ err = cs89x0_probe1(dev, io, 0); } else if (io != 0) { /* Don't probe at all. */ err = -ENXIO; } else { for (port = netcard_portlist; *port; port++) { if (cs89x0_probe1(dev, *port, 0) == 0) break; dev->irq = irq; } if (!*port) err = -ENODEV; } if (err) goto out; return dev; out: free_netdev(dev); printk(KERN_WARNING "cs89x0: no cs8900 or cs8920 detected. Be sure to disable PnP with SETUP\n"); return ERR_PTR(err); } #endif #endif #if defined(CONFIG_MACH_IXDP2351) static u16 readword(unsigned long base_addr, int portno) { return __raw_readw(base_addr + (portno << 1)); } static void writeword(unsigned long base_addr, int portno, u16 value) { __raw_writew(value, base_addr + (portno << 1)); } #elif defined(CONFIG_ARCH_IXDP2X01) static u16 readword(unsigned long base_addr, int portno) { return __raw_readl(base_addr + (portno << 1)); } static void writeword(unsigned long base_addr, int portno, u16 value) { __raw_writel(value, base_addr + (portno << 1)); } #else static u16 readword(unsigned long base_addr, int portno) { return inw(base_addr + portno); } static void writeword(unsigned long base_addr, int portno, u16 value) { outw(value, base_addr + portno); } #endif static void readwords(unsigned long base_addr, int portno, void *buf, int length) { u8 *buf8 = (u8 *)buf; do { u16 tmp16; tmp16 = readword(base_addr, portno); *buf8++ = (u8)tmp16; *buf8++ = (u8)(tmp16 >> 8); } while (--length); } static void writewords(unsigned long base_addr, int portno, void *buf, int length) { u8 *buf8 = (u8 *)buf; do { u16 tmp16; tmp16 = *buf8++; tmp16 |= (*buf8++) << 8; writeword(base_addr, portno, tmp16); } while (--length); } static u16 readreg(struct net_device *dev, u16 regno) { writeword(dev->base_addr, ADD_PORT, regno); return readword(dev->base_addr, DATA_PORT); } static void writereg(struct net_device *dev, u16 regno, u16 value) { writeword(dev->base_addr, ADD_PORT, regno); writeword(dev->base_addr, DATA_PORT, value); } static int __init wait_eeprom_ready(struct net_device *dev) { int timeout = jiffies; /* check to see if the EEPROM is ready, a timeout is used - just in case EEPROM is ready when SI_BUSY in the PP_SelfST is clear */ while(readreg(dev, PP_SelfST) & SI_BUSY) if (jiffies - timeout >= 40) return -1; return 0; } static int __init get_eeprom_data(struct net_device *dev, int off, int len, int *buffer) { int i; if (net_debug > 3) printk("EEPROM data from %x for %x:\n",off,len); for (i = 0; i < len; i++) { if (wait_eeprom_ready(dev) < 0) return -1; /* Now send the EEPROM read command and EEPROM location to read */ writereg(dev, PP_EECMD, (off + i) | EEPROM_READ_CMD); if (wait_eeprom_ready(dev) < 0) return -1; buffer[i] = readreg(dev, PP_EEData); if (net_debug > 3) printk("%04x ", buffer[i]); } if (net_debug > 3) printk("\n"); return 0; } static int __init get_eeprom_cksum(int off, int len, int *buffer) { int i, cksum; cksum = 0; for (i = 0; i < len; i++) cksum += buffer[i]; cksum &= 0xffff; if (cksum == 0) return 0; return -1; } #ifdef CONFIG_NET_POLL_CONTROLLER /* * Polling receive - used by netconsole and other diagnostic tools * to allow network i/o with interrupts disabled. */ static void net_poll_controller(struct net_device *dev) { disable_irq(dev->irq); net_interrupt(dev->irq, dev); enable_irq(dev->irq); } #endif static const struct net_device_ops net_ops = { .ndo_open = net_open, .ndo_stop = net_close, .ndo_tx_timeout = net_timeout, .ndo_start_xmit = net_send_packet, .ndo_get_stats = net_get_stats, .ndo_set_rx_mode = set_multicast_list, .ndo_set_mac_address = set_mac_address, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = net_poll_controller, #endif .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, }; /* This is the real probe routine. Linux has a history of friendly device probes on the ISA bus. A good device probes avoids doing writes, and verifies that the correct device exists and functions. Return 0 on success. */ static int __init cs89x0_probe1(struct net_device *dev, unsigned long ioaddr, int modular) { struct net_local *lp = netdev_priv(dev); static unsigned version_printed; int i; int tmp; unsigned rev_type = 0; int eeprom_buff[CHKSUM_LEN]; int retval; /* Initialize the device structure. */ if (!modular) { memset(lp, 0, sizeof(*lp)); spin_lock_init(&lp->lock); #ifndef MODULE #if ALLOW_DMA if (g_cs89x0_dma) { lp->use_dma = 1; lp->dma = g_cs89x0_dma; lp->dmasize = 16; /* Could make this an option... */ } #endif lp->force = g_cs89x0_media__force; #endif } /* Grab the region so we can find another board if autoIRQ fails. */ /* WTF is going on here? */ if (!request_region(ioaddr & ~3, NETCARD_IO_EXTENT, DRV_NAME)) { printk(KERN_ERR "%s: request_region(0x%lx, 0x%x) failed\n", DRV_NAME, ioaddr, NETCARD_IO_EXTENT); retval = -EBUSY; goto out1; } /* if they give us an odd I/O address, then do ONE write to the address port, to get it back to address zero, where we expect to find the EISA signature word. An IO with a base of 0x3 will skip the test for the ADD_PORT. */ if (ioaddr & 1) { if (net_debug > 1) printk(KERN_INFO "%s: odd ioaddr 0x%lx\n", dev->name, ioaddr); if ((ioaddr & 2) != 2) if ((readword(ioaddr & ~3, ADD_PORT) & ADD_MASK) != ADD_SIG) { printk(KERN_ERR "%s: bad signature 0x%x\n", dev->name, readword(ioaddr & ~3, ADD_PORT)); retval = -ENODEV; goto out2; } } ioaddr &= ~3; printk(KERN_DEBUG "PP_addr at %lx[%x]: 0x%x\n", ioaddr, ADD_PORT, readword(ioaddr, ADD_PORT)); writeword(ioaddr, ADD_PORT, PP_ChipID); tmp = readword(ioaddr, DATA_PORT); if (tmp != CHIP_EISA_ID_SIG) { printk(KERN_DEBUG "%s: incorrect signature at %lx[%x]: 0x%x!=" CHIP_EISA_ID_SIG_STR "\n", dev->name, ioaddr, DATA_PORT, tmp); retval = -ENODEV; goto out2; } /* Fill in the 'dev' fields. */ dev->base_addr = ioaddr; /* get the chip type */ rev_type = readreg(dev, PRODUCT_ID_ADD); lp->chip_type = rev_type &~ REVISON_BITS; lp->chip_revision = ((rev_type & REVISON_BITS) >> 8) + 'A'; /* Check the chip type and revision in order to set the correct send command CS8920 revision C and CS8900 revision F can use the faster send. */ lp->send_cmd = TX_AFTER_381; if (lp->chip_type == CS8900 && lp->chip_revision >= 'F') lp->send_cmd = TX_NOW; if (lp->chip_type != CS8900 && lp->chip_revision >= 'C') lp->send_cmd = TX_NOW; if (net_debug && version_printed++ == 0) printk(version); printk(KERN_INFO "%s: cs89%c0%s rev %c found at %#3lx ", dev->name, lp->chip_type==CS8900?'0':'2', lp->chip_type==CS8920M?"M":"", lp->chip_revision, dev->base_addr); reset_chip(dev); /* Here we read the current configuration of the chip. If there is no Extended EEPROM then the idea is to not disturb the chip configuration, it should have been correctly setup by automatic EEPROM read on reset. So, if the chip says it read the EEPROM the driver will always do *something* instead of complain that adapter_cnf is 0. */ if ((readreg(dev, PP_SelfST) & (EEPROM_OK | EEPROM_PRESENT)) == (EEPROM_OK|EEPROM_PRESENT)) { /* Load the MAC. */ for (i=0; i < ETH_ALEN/2; i++) { unsigned int Addr; Addr = readreg(dev, PP_IA+i*2); dev->dev_addr[i*2] = Addr & 0xFF; dev->dev_addr[i*2+1] = Addr >> 8; } /* Load the Adapter Configuration. Note: Barring any more specific information from some other source (ie EEPROM+Schematics), we would not know how to operate a 10Base2 interface on the AUI port. However, since we do read the status of HCB1 and use settings that always result in calls to control_dc_dc(dev,0) a BNC interface should work if the enable pin (dc/dc converter) is on HCB1. It will be called AUI however. */ lp->adapter_cnf = 0; i = readreg(dev, PP_LineCTL); /* Preserve the setting of the HCB1 pin. */ if ((i & (HCB1 | HCB1_ENBL)) == (HCB1 | HCB1_ENBL)) lp->adapter_cnf |= A_CNF_DC_DC_POLARITY; /* Save the sqelch bit */ if ((i & LOW_RX_SQUELCH) == LOW_RX_SQUELCH) lp->adapter_cnf |= A_CNF_EXTND_10B_2 | A_CNF_LOW_RX_SQUELCH; /* Check if the card is in 10Base-t only mode */ if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == 0) lp->adapter_cnf |= A_CNF_10B_T | A_CNF_MEDIA_10B_T; /* Check if the card is in AUI only mode */ if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == AUI_ONLY) lp->adapter_cnf |= A_CNF_AUI | A_CNF_MEDIA_AUI; /* Check if the card is in Auto mode. */ if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == AUTO_AUI_10BASET) lp->adapter_cnf |= A_CNF_AUI | A_CNF_10B_T | A_CNF_MEDIA_AUI | A_CNF_MEDIA_10B_T | A_CNF_MEDIA_AUTO; if (net_debug > 1) printk(KERN_INFO "%s: PP_LineCTL=0x%x, adapter_cnf=0x%x\n", dev->name, i, lp->adapter_cnf); /* IRQ. Other chips already probe, see below. */ if (lp->chip_type == CS8900) lp->isa_config = readreg(dev, PP_CS8900_ISAINT) & INT_NO_MASK; printk( "[Cirrus EEPROM] "); } printk("\n"); /* First check to see if an EEPROM is attached. */ if ((readreg(dev, PP_SelfST) & EEPROM_PRESENT) == 0) printk(KERN_WARNING "cs89x0: No EEPROM, relying on command line....\n"); else if (get_eeprom_data(dev, START_EEPROM_DATA,CHKSUM_LEN,eeprom_buff) < 0) { printk(KERN_WARNING "\ncs89x0: EEPROM read failed, relying on command line.\n"); } else if (get_eeprom_cksum(START_EEPROM_DATA,CHKSUM_LEN,eeprom_buff) < 0) { /* Check if the chip was able to read its own configuration starting at 0 in the EEPROM*/ if ((readreg(dev, PP_SelfST) & (EEPROM_OK | EEPROM_PRESENT)) != (EEPROM_OK|EEPROM_PRESENT)) printk(KERN_WARNING "cs89x0: Extended EEPROM checksum bad and no Cirrus EEPROM, relying on command line\n"); } else { /* This reads an extended EEPROM that is not documented in the CS8900 datasheet. */ /* get transmission control word but keep the autonegotiation bits */ if (!lp->auto_neg_cnf) lp->auto_neg_cnf = eeprom_buff[AUTO_NEG_CNF_OFFSET/2]; /* Store adapter configuration */ if (!lp->adapter_cnf) lp->adapter_cnf = eeprom_buff[ADAPTER_CNF_OFFSET/2]; /* Store ISA configuration */ lp->isa_config = eeprom_buff[ISA_CNF_OFFSET/2]; dev->mem_start = eeprom_buff[PACKET_PAGE_OFFSET/2] << 8; /* eeprom_buff has 32-bit ints, so we can't just memcpy it */ /* store the initial memory base address */ for (i = 0; i < ETH_ALEN/2; i++) { dev->dev_addr[i*2] = eeprom_buff[i]; dev->dev_addr[i*2+1] = eeprom_buff[i] >> 8; } if (net_debug > 1) printk(KERN_DEBUG "%s: new adapter_cnf: 0x%x\n", dev->name, lp->adapter_cnf); } /* allow them to force multiple transceivers. If they force multiple, autosense */ { int count = 0; if (lp->force & FORCE_RJ45) {lp->adapter_cnf |= A_CNF_10B_T; count++; } if (lp->force & FORCE_AUI) {lp->adapter_cnf |= A_CNF_AUI; count++; } if (lp->force & FORCE_BNC) {lp->adapter_cnf |= A_CNF_10B_2; count++; } if (count > 1) {lp->adapter_cnf |= A_CNF_MEDIA_AUTO; } else if (lp->force & FORCE_RJ45){lp->adapter_cnf |= A_CNF_MEDIA_10B_T; } else if (lp->force & FORCE_AUI) {lp->adapter_cnf |= A_CNF_MEDIA_AUI; } else if (lp->force & FORCE_BNC) {lp->adapter_cnf |= A_CNF_MEDIA_10B_2; } } if (net_debug > 1) printk(KERN_DEBUG "%s: after force 0x%x, adapter_cnf=0x%x\n", dev->name, lp->force, lp->adapter_cnf); /* FIXME: We don't let you set dc-dc polarity or low RX squelch from the command line: add it here */ /* FIXME: We don't let you set the IMM bit from the command line: add it to lp->auto_neg_cnf here */ /* FIXME: we don't set the Ethernet address on the command line. Use ifconfig IFACE hw ether AABBCCDDEEFF */ printk(KERN_INFO "cs89x0 media %s%s%s", (lp->adapter_cnf & A_CNF_10B_T)?"RJ-45,":"", (lp->adapter_cnf & A_CNF_AUI)?"AUI,":"", (lp->adapter_cnf & A_CNF_10B_2)?"BNC,":""); lp->irq_map = 0xffff; /* If this is a CS8900 then no pnp soft */ if (lp->chip_type != CS8900 && /* Check if the ISA IRQ has been set */ (i = readreg(dev, PP_CS8920_ISAINT) & 0xff, (i != 0 && i < CS8920_NO_INTS))) { if (!dev->irq) dev->irq = i; } else { i = lp->isa_config & INT_NO_MASK; #ifndef CONFIG_CS89x0_PLATFORM if (lp->chip_type == CS8900) { #ifdef CS89x0_NONISA_IRQ i = cs8900_irq_map[0]; #else /* Translate the IRQ using the IRQ mapping table. */ if (i >= ARRAY_SIZE(cs8900_irq_map)) printk("\ncs89x0: invalid ISA interrupt number %d\n", i); else i = cs8900_irq_map[i]; lp->irq_map = CS8900_IRQ_MAP; /* fixed IRQ map for CS8900 */ } else { int irq_map_buff[IRQ_MAP_LEN/2]; if (get_eeprom_data(dev, IRQ_MAP_EEPROM_DATA, IRQ_MAP_LEN/2, irq_map_buff) >= 0) { if ((irq_map_buff[0] & 0xff) == PNP_IRQ_FRMT) lp->irq_map = (irq_map_buff[0]>>8) | (irq_map_buff[1] << 8); } #endif } #endif if (!dev->irq) dev->irq = i; } printk(" IRQ %d", dev->irq); #if ALLOW_DMA if (lp->use_dma) { get_dma_channel(dev); printk(", DMA %d", dev->dma); } else #endif { printk(", programmed I/O"); } /* print the ethernet address. */ printk(", MAC %pM", dev->dev_addr); dev->netdev_ops = &net_ops; dev->watchdog_timeo = HZ; printk("\n"); if (net_debug) printk("cs89x0_probe1() successful\n"); retval = register_netdev(dev); if (retval) goto out3; return 0; out3: writeword(dev->base_addr, ADD_PORT, PP_ChipID); out2: release_region(ioaddr & ~3, NETCARD_IO_EXTENT); out1: return retval; } /********************************* * This page contains DMA routines **********************************/ #if ALLOW_DMA #define dma_page_eq(ptr1, ptr2) ((long)(ptr1)>>17 == (long)(ptr2)>>17) static void get_dma_channel(struct net_device *dev) { struct net_local *lp = netdev_priv(dev); if (lp->dma) { dev->dma = lp->dma; lp->isa_config |= ISA_RxDMA; } else { if ((lp->isa_config & ANY_ISA_DMA) == 0) return; dev->dma = lp->isa_config & DMA_NO_MASK; if (lp->chip_type == CS8900) dev->dma += 5; if (dev->dma < 5 || dev->dma > 7) { lp->isa_config &= ~ANY_ISA_DMA; return; } } } static void write_dma(struct net_device *dev, int chip_type, int dma) { struct net_local *lp = netdev_priv(dev); if ((lp->isa_config & ANY_ISA_DMA) == 0) return; if (chip_type == CS8900) { writereg(dev, PP_CS8900_ISADMA, dma-5); } else { writereg(dev, PP_CS8920_ISADMA, dma); } } static void set_dma_cfg(struct net_device *dev) { struct net_local *lp = netdev_priv(dev); if (lp->use_dma) { if ((lp->isa_config & ANY_ISA_DMA) == 0) { if (net_debug > 3) printk("set_dma_cfg(): no DMA\n"); return; } if (lp->isa_config & ISA_RxDMA) { lp->curr_rx_cfg |= RX_DMA_ONLY; if (net_debug > 3) printk("set_dma_cfg(): RX_DMA_ONLY\n"); } else { lp->curr_rx_cfg |= AUTO_RX_DMA; /* not that we support it... */ if (net_debug > 3) printk("set_dma_cfg(): AUTO_RX_DMA\n"); } } } static int dma_bufcfg(struct net_device *dev) { struct net_local *lp = netdev_priv(dev); if (lp->use_dma) return (lp->isa_config & ANY_ISA_DMA)? RX_DMA_ENBL : 0; else return 0; } static int dma_busctl(struct net_device *dev) { int retval = 0; struct net_local *lp = netdev_priv(dev); if (lp->use_dma) { if (lp->isa_config & ANY_ISA_DMA) retval |= RESET_RX_DMA; /* Reset the DMA pointer */ if (lp->isa_config & DMA_BURST) retval |= DMA_BURST_MODE; /* Does ISA config specify DMA burst ? */ if (lp->dmasize == 64) retval |= RX_DMA_SIZE_64K; /* did they ask for 64K? */ retval |= MEMORY_ON; /* we need memory enabled to use DMA. */ } return retval; } static void dma_rx(struct net_device *dev) { struct net_local *lp = netdev_priv(dev); struct sk_buff *skb; int status, length; unsigned char *bp = lp->rx_dma_ptr; status = bp[0] + (bp[1]<<8); length = bp[2] + (bp[3]<<8); bp += 4; if (net_debug > 5) { printk( "%s: receiving DMA packet at %lx, status %x, length %x\n", dev->name, (unsigned long)bp, status, length); } if ((status & RX_OK) == 0) { count_rx_errors(status, dev); goto skip_this_frame; } /* Malloc up new buffer. */ skb = netdev_alloc_skb(dev, length + 2); if (skb == NULL) { if (net_debug) /* I don't think we want to do this to a stressed system */ printk("%s: Memory squeeze, dropping packet.\n", dev->name); dev->stats.rx_dropped++; /* AKPM: advance bp to the next frame */ skip_this_frame: bp += (length + 3) & ~3; if (bp >= lp->end_dma_buff) bp -= lp->dmasize*1024; lp->rx_dma_ptr = bp; return; } skb_reserve(skb, 2); /* longword align L3 header */ if (bp + length > lp->end_dma_buff) { int semi_cnt = lp->end_dma_buff - bp; memcpy(skb_put(skb,semi_cnt), bp, semi_cnt); memcpy(skb_put(skb,length - semi_cnt), lp->dma_buff, length - semi_cnt); } else { memcpy(skb_put(skb,length), bp, length); } bp += (length + 3) & ~3; if (bp >= lp->end_dma_buff) bp -= lp->dmasize*1024; lp->rx_dma_ptr = bp; if (net_debug > 3) { printk( "%s: received %d byte DMA packet of type %x\n", dev->name, length, (skb->data[ETH_ALEN+ETH_ALEN] << 8) | skb->data[ETH_ALEN+ETH_ALEN+1]); } skb->protocol=eth_type_trans(skb,dev); netif_rx(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += length; } #endif /* ALLOW_DMA */ static void __init reset_chip(struct net_device *dev) { #if !defined(CONFIG_MACH_MX31ADS) #if !defined(CS89x0_NONISA_IRQ) struct net_local *lp = netdev_priv(dev); int ioaddr = dev->base_addr; #endif /* CS89x0_NONISA_IRQ */ int reset_start_time; writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET); /* wait 30 ms */ msleep(30); #if !defined(CS89x0_NONISA_IRQ) if (lp->chip_type != CS8900) { /* Hardware problem requires PNP registers to be reconfigured after a reset */ writeword(ioaddr, ADD_PORT, PP_CS8920_ISAINT); outb(dev->irq, ioaddr + DATA_PORT); outb(0, ioaddr + DATA_PORT + 1); writeword(ioaddr, ADD_PORT, PP_CS8920_ISAMemB); outb((dev->mem_start >> 16) & 0xff, ioaddr + DATA_PORT); outb((dev->mem_start >> 8) & 0xff, ioaddr + DATA_PORT + 1); } #endif /* CS89x0_NONISA_IRQ */ /* Wait until the chip is reset */ reset_start_time = jiffies; while( (readreg(dev, PP_SelfST) & INIT_DONE) == 0 && jiffies - reset_start_time < 2) ; #endif /* !CONFIG_MACH_MX31ADS */ } static void control_dc_dc(struct net_device *dev, int on_not_off) { struct net_local *lp = netdev_priv(dev); unsigned int selfcontrol; int timenow = jiffies; /* control the DC to DC convertor in the SelfControl register. Note: This is hooked up to a general purpose pin, might not always be a DC to DC convertor. */ selfcontrol = HCB1_ENBL; /* Enable the HCB1 bit as an output */ if (((lp->adapter_cnf & A_CNF_DC_DC_POLARITY) != 0) ^ on_not_off) selfcontrol |= HCB1; else selfcontrol &= ~HCB1; writereg(dev, PP_SelfCTL, selfcontrol); /* Wait for the DC/DC converter to power up - 500ms */ while (jiffies - timenow < HZ) ; } #define DETECTED_NONE 0 #define DETECTED_RJ45H 1 #define DETECTED_RJ45F 2 #define DETECTED_AUI 3 #define DETECTED_BNC 4 static int detect_tp(struct net_device *dev) { struct net_local *lp = netdev_priv(dev); int timenow = jiffies; int fdx; if (net_debug > 1) printk("%s: Attempting TP\n", dev->name); /* If connected to another full duplex capable 10-Base-T card the link pulses seem to be lost when the auto detect bit in the LineCTL is set. To overcome this the auto detect bit will be cleared whilst testing the 10-Base-T interface. This would not be necessary for the sparrow chip but is simpler to do it anyway. */ writereg(dev, PP_LineCTL, lp->linectl &~ AUI_ONLY); control_dc_dc(dev, 0); /* Delay for the hardware to work out if the TP cable is present - 150ms */ for (timenow = jiffies; jiffies - timenow < 15; ) ; if ((readreg(dev, PP_LineST) & LINK_OK) == 0) return DETECTED_NONE; if (lp->chip_type == CS8900) { switch (lp->force & 0xf0) { #if 0 case FORCE_AUTO: printk("%s: cs8900 doesn't autonegotiate\n",dev->name); return DETECTED_NONE; #endif /* CS8900 doesn't support AUTO, change to HALF*/ case FORCE_AUTO: lp->force &= ~FORCE_AUTO; lp->force |= FORCE_HALF; break; case FORCE_HALF: break; case FORCE_FULL: writereg(dev, PP_TestCTL, readreg(dev, PP_TestCTL) | FDX_8900); break; } fdx = readreg(dev, PP_TestCTL) & FDX_8900; } else { switch (lp->force & 0xf0) { case FORCE_AUTO: lp->auto_neg_cnf = AUTO_NEG_ENABLE; break; case FORCE_HALF: lp->auto_neg_cnf = 0; break; case FORCE_FULL: lp->auto_neg_cnf = RE_NEG_NOW | ALLOW_FDX; break; } writereg(dev, PP_AutoNegCTL, lp->auto_neg_cnf & AUTO_NEG_MASK); if ((lp->auto_neg_cnf & AUTO_NEG_BITS) == AUTO_NEG_ENABLE) { printk(KERN_INFO "%s: negotiating duplex...\n",dev->name); while (readreg(dev, PP_AutoNegST) & AUTO_NEG_BUSY) { if (jiffies - timenow > 4000) { printk(KERN_ERR "**** Full / half duplex auto-negotiation timed out ****\n"); break; } } } fdx = readreg(dev, PP_AutoNegST) & FDX_ACTIVE; } if (fdx) return DETECTED_RJ45F; else return DETECTED_RJ45H; } /* send a test packet - return true if carrier bits are ok */ static int send_test_pkt(struct net_device *dev) { char test_packet[] = { 0,0,0,0,0,0, 0,0,0,0,0,0, 0, 46, /* A 46 in network order */ 0, 0, /* DSAP=0 & SSAP=0 fields */ 0xf3, 0 /* Control (Test Req + P bit set) */ }; long timenow = jiffies; writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) | SERIAL_TX_ON); memcpy(test_packet, dev->dev_addr, ETH_ALEN); memcpy(test_packet+ETH_ALEN, dev->dev_addr, ETH_ALEN); writeword(dev->base_addr, TX_CMD_PORT, TX_AFTER_ALL); writeword(dev->base_addr, TX_LEN_PORT, ETH_ZLEN); /* Test to see if the chip has allocated memory for the packet */ while (jiffies - timenow < 5) if (readreg(dev, PP_BusST) & READY_FOR_TX_NOW) break; if (jiffies - timenow >= 5) return 0; /* this shouldn't happen */ /* Write the contents of the packet */ writewords(dev->base_addr, TX_FRAME_PORT,test_packet,(ETH_ZLEN+1) >>1); if (net_debug > 1) printk("Sending test packet "); /* wait a couple of jiffies for packet to be received */ for (timenow = jiffies; jiffies - timenow < 3; ) ; if ((readreg(dev, PP_TxEvent) & TX_SEND_OK_BITS) == TX_OK) { if (net_debug > 1) printk("succeeded\n"); return 1; } if (net_debug > 1) printk("failed\n"); return 0; } static int detect_aui(struct net_device *dev) { struct net_local *lp = netdev_priv(dev); if (net_debug > 1) printk("%s: Attempting AUI\n", dev->name); control_dc_dc(dev, 0); writereg(dev, PP_LineCTL, (lp->linectl &~ AUTO_AUI_10BASET) | AUI_ONLY); if (send_test_pkt(dev)) return DETECTED_AUI; else return DETECTED_NONE; } static int detect_bnc(struct net_device *dev) { struct net_local *lp = netdev_priv(dev); if (net_debug > 1) printk("%s: Attempting BNC\n", dev->name); control_dc_dc(dev, 1); writereg(dev, PP_LineCTL, (lp->linectl &~ AUTO_AUI_10BASET) | AUI_ONLY); if (send_test_pkt(dev)) return DETECTED_BNC; else return DETECTED_NONE; } static void write_irq(struct net_device *dev, int chip_type, int irq) { int i; if (chip_type == CS8900) { #ifndef CONFIG_CS89x0_PLATFORM /* Search the mapping table for the corresponding IRQ pin. */ for (i = 0; i != ARRAY_SIZE(cs8900_irq_map); i++) if (cs8900_irq_map[i] == irq) break; /* Not found */ if (i == ARRAY_SIZE(cs8900_irq_map)) i = 3; #else /* INTRQ0 pin is used for interrupt generation. */ i = 0; #endif writereg(dev, PP_CS8900_ISAINT, i); } else { writereg(dev, PP_CS8920_ISAINT, irq); } } /* Open/initialize the board. This is called (in the current kernel) sometime after booting when the 'ifconfig' program is run. This routine should set everything up anew at each open, even registers that "should" only need to be set once at boot, so that there is non-reboot way to recover if something goes wrong. */ /* AKPM: do we need to do any locking here? */ static int net_open(struct net_device *dev) { struct net_local *lp = netdev_priv(dev); int result = 0; int i; int ret; if (dev->irq < 2) { /* Allow interrupts to be generated by the chip */ /* Cirrus' release had this: */ #if 0 writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL)|ENABLE_IRQ ); #endif /* And 2.3.47 had this: */ writereg(dev, PP_BusCTL, ENABLE_IRQ | MEMORY_ON); for (i = 2; i < CS8920_NO_INTS; i++) { if ((1 << i) & lp->irq_map) { if (request_irq(i, net_interrupt, 0, dev->name, dev) == 0) { dev->irq = i; write_irq(dev, lp->chip_type, i); /* writereg(dev, PP_BufCFG, GENERATE_SW_INTERRUPT); */ break; } } } if (i >= CS8920_NO_INTS) { writereg(dev, PP_BusCTL, 0); /* disable interrupts. */ printk(KERN_ERR "cs89x0: can't get an interrupt\n"); ret = -EAGAIN; goto bad_out; } } else { #if !defined(CS89x0_NONISA_IRQ) && !defined(CONFIG_CS89x0_PLATFORM) if (((1 << dev->irq) & lp->irq_map) == 0) { printk(KERN_ERR "%s: IRQ %d is not in our map of allowable IRQs, which is %x\n", dev->name, dev->irq, lp->irq_map); ret = -EAGAIN; goto bad_out; } #endif /* FIXME: Cirrus' release had this: */ writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL)|ENABLE_IRQ ); /* And 2.3.47 had this: */ #if 0 writereg(dev, PP_BusCTL, ENABLE_IRQ | MEMORY_ON); #endif write_irq(dev, lp->chip_type, dev->irq); ret = request_irq(dev->irq, net_interrupt, 0, dev->name, dev); if (ret) { printk(KERN_ERR "cs89x0: request_irq(%d) failed\n", dev->irq); goto bad_out; } } #if ALLOW_DMA if (lp->use_dma) { if (lp->isa_config & ANY_ISA_DMA) { unsigned long flags; lp->dma_buff = (unsigned char *)__get_dma_pages(GFP_KERNEL, get_order(lp->dmasize * 1024)); if (!lp->dma_buff) { printk(KERN_ERR "%s: cannot get %dK memory for DMA\n", dev->name, lp->dmasize); goto release_irq; } if (net_debug > 1) { printk( "%s: dma %lx %lx\n", dev->name, (unsigned long)lp->dma_buff, (unsigned long)isa_virt_to_bus(lp->dma_buff)); } if ((unsigned long) lp->dma_buff >= MAX_DMA_ADDRESS || !dma_page_eq(lp->dma_buff, lp->dma_buff+lp->dmasize*1024-1)) { printk(KERN_ERR "%s: not usable as DMA buffer\n", dev->name); goto release_irq; } memset(lp->dma_buff, 0, lp->dmasize * 1024); /* Why? */ if (request_dma(dev->dma, dev->name)) { printk(KERN_ERR "%s: cannot get dma channel %d\n", dev->name, dev->dma); goto release_irq; } write_dma(dev, lp->chip_type, dev->dma); lp->rx_dma_ptr = lp->dma_buff; lp->end_dma_buff = lp->dma_buff + lp->dmasize*1024; spin_lock_irqsave(&lp->lock, flags); disable_dma(dev->dma); clear_dma_ff(dev->dma); set_dma_mode(dev->dma, DMA_RX_MODE); /* auto_init as well */ set_dma_addr(dev->dma, isa_virt_to_bus(lp->dma_buff)); set_dma_count(dev->dma, lp->dmasize*1024); enable_dma(dev->dma); spin_unlock_irqrestore(&lp->lock, flags); } } #endif /* ALLOW_DMA */ /* set the Ethernet address */ for (i=0; i < ETH_ALEN/2; i++) writereg(dev, PP_IA+i*2, dev->dev_addr[i*2] | (dev->dev_addr[i*2+1] << 8)); /* while we're testing the interface, leave interrupts disabled */ writereg(dev, PP_BusCTL, MEMORY_ON); /* Set the LineCTL quintuplet based on adapter configuration read from EEPROM */ if ((lp->adapter_cnf & A_CNF_EXTND_10B_2) && (lp->adapter_cnf & A_CNF_LOW_RX_SQUELCH)) lp->linectl = LOW_RX_SQUELCH; else lp->linectl = 0; /* check to make sure that they have the "right" hardware available */ switch(lp->adapter_cnf & A_CNF_MEDIA_TYPE) { case A_CNF_MEDIA_10B_T: result = lp->adapter_cnf & A_CNF_10B_T; break; case A_CNF_MEDIA_AUI: result = lp->adapter_cnf & A_CNF_AUI; break; case A_CNF_MEDIA_10B_2: result = lp->adapter_cnf & A_CNF_10B_2; break; default: result = lp->adapter_cnf & (A_CNF_10B_T | A_CNF_AUI | A_CNF_10B_2); } if (!result) { printk(KERN_ERR "%s: EEPROM is configured for unavailable media\n", dev->name); release_dma: #if ALLOW_DMA free_dma(dev->dma); release_irq: release_dma_buff(lp); #endif writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) & ~(SERIAL_TX_ON | SERIAL_RX_ON)); free_irq(dev->irq, dev); ret = -EAGAIN; goto bad_out; } /* set the hardware to the configured choice */ switch(lp->adapter_cnf & A_CNF_MEDIA_TYPE) { case A_CNF_MEDIA_10B_T: result = detect_tp(dev); if (result==DETECTED_NONE) { printk(KERN_WARNING "%s: 10Base-T (RJ-45) has no cable\n", dev->name); if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */ result = DETECTED_RJ45H; /* Yes! I don't care if I see a link pulse */ } break; case A_CNF_MEDIA_AUI: result = detect_aui(dev); if (result==DETECTED_NONE) { printk(KERN_WARNING "%s: 10Base-5 (AUI) has no cable\n", dev->name); if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */ result = DETECTED_AUI; /* Yes! I don't care if I see a carrrier */ } break; case A_CNF_MEDIA_10B_2: result = detect_bnc(dev); if (result==DETECTED_NONE) { printk(KERN_WARNING "%s: 10Base-2 (BNC) has no cable\n", dev->name); if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */ result = DETECTED_BNC; /* Yes! I don't care if I can xmit a packet */ } break; case A_CNF_MEDIA_AUTO: writereg(dev, PP_LineCTL, lp->linectl | AUTO_AUI_10BASET); if (lp->adapter_cnf & A_CNF_10B_T) if ((result = detect_tp(dev)) != DETECTED_NONE) break; if (lp->adapter_cnf & A_CNF_AUI) if ((result = detect_aui(dev)) != DETECTED_NONE) break; if (lp->adapter_cnf & A_CNF_10B_2) if ((result = detect_bnc(dev)) != DETECTED_NONE) break; printk(KERN_ERR "%s: no media detected\n", dev->name); goto release_dma; } switch(result) { case DETECTED_NONE: printk(KERN_ERR "%s: no network cable attached to configured media\n", dev->name); goto release_dma; case DETECTED_RJ45H: printk(KERN_INFO "%s: using half-duplex 10Base-T (RJ-45)\n", dev->name); break; case DETECTED_RJ45F: printk(KERN_INFO "%s: using full-duplex 10Base-T (RJ-45)\n", dev->name); break; case DETECTED_AUI: printk(KERN_INFO "%s: using 10Base-5 (AUI)\n", dev->name); break; case DETECTED_BNC: printk(KERN_INFO "%s: using 10Base-2 (BNC)\n", dev->name); break; } /* Turn on both receive and transmit operations */ writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) | SERIAL_RX_ON | SERIAL_TX_ON); /* Receive only error free packets addressed to this card */ lp->rx_mode = 0; writereg(dev, PP_RxCTL, DEF_RX_ACCEPT); lp->curr_rx_cfg = RX_OK_ENBL | RX_CRC_ERROR_ENBL; if (lp->isa_config & STREAM_TRANSFER) lp->curr_rx_cfg |= RX_STREAM_ENBL; #if ALLOW_DMA set_dma_cfg(dev); #endif writereg(dev, PP_RxCFG, lp->curr_rx_cfg); writereg(dev, PP_TxCFG, TX_LOST_CRS_ENBL | TX_SQE_ERROR_ENBL | TX_OK_ENBL | TX_LATE_COL_ENBL | TX_JBR_ENBL | TX_ANY_COL_ENBL | TX_16_COL_ENBL); writereg(dev, PP_BufCFG, READY_FOR_TX_ENBL | RX_MISS_COUNT_OVRFLOW_ENBL | #if ALLOW_DMA dma_bufcfg(dev) | #endif TX_COL_COUNT_OVRFLOW_ENBL | TX_UNDERRUN_ENBL); /* now that we've got our act together, enable everything */ writereg(dev, PP_BusCTL, ENABLE_IRQ | (dev->mem_start?MEMORY_ON : 0) /* turn memory on */ #if ALLOW_DMA | dma_busctl(dev) #endif ); netif_start_queue(dev); if (net_debug > 1) printk("cs89x0: net_open() succeeded\n"); return 0; bad_out: return ret; } static void net_timeout(struct net_device *dev) { /* If we get here, some higher level has decided we are broken. There should really be a "kick me" function call instead. */ if (net_debug > 0) printk("%s: transmit timed out, %s?\n", dev->name, tx_done(dev) ? "IRQ conflict ?" : "network cable problem"); /* Try to restart the adaptor. */ netif_wake_queue(dev); } static netdev_tx_t net_send_packet(struct sk_buff *skb,struct net_device *dev) { struct net_local *lp = netdev_priv(dev); unsigned long flags; if (net_debug > 3) { printk("%s: sent %d byte packet of type %x\n", dev->name, skb->len, (skb->data[ETH_ALEN+ETH_ALEN] << 8) | skb->data[ETH_ALEN+ETH_ALEN+1]); } /* keep the upload from being interrupted, since we ask the chip to start transmitting before the whole packet has been completely uploaded. */ spin_lock_irqsave(&lp->lock, flags); netif_stop_queue(dev); /* initiate a transmit sequence */ writeword(dev->base_addr, TX_CMD_PORT, lp->send_cmd); writeword(dev->base_addr, TX_LEN_PORT, skb->len); /* Test to see if the chip has allocated memory for the packet */ if ((readreg(dev, PP_BusST) & READY_FOR_TX_NOW) == 0) { /* * Gasp! It hasn't. But that shouldn't happen since * we're waiting for TxOk, so return 1 and requeue this packet. */ spin_unlock_irqrestore(&lp->lock, flags); if (net_debug) printk("cs89x0: Tx buffer not free!\n"); return NETDEV_TX_BUSY; } /* Write the contents of the packet */ writewords(dev->base_addr, TX_FRAME_PORT,skb->data,(skb->len+1) >>1); spin_unlock_irqrestore(&lp->lock, flags); dev->stats.tx_bytes += skb->len; dev_kfree_skb (skb); /* * We DO NOT call netif_wake_queue() here. * We also DO NOT call netif_start_queue(). * * Either of these would cause another bottom half run through * net_send_packet() before this packet has fully gone out. That causes * us to hit the "Gasp!" above and the send is rescheduled. it runs like * a dog. We just return and wait for the Tx completion interrupt handler * to restart the netdevice layer */ return NETDEV_TX_OK; } /* The typical workload of the driver: Handle the network interface interrupts. */ static irqreturn_t net_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct net_local *lp; int ioaddr, status; int handled = 0; ioaddr = dev->base_addr; lp = netdev_priv(dev); /* we MUST read all the events out of the ISQ, otherwise we'll never get interrupted again. As a consequence, we can't have any limit on the number of times we loop in the interrupt handler. The hardware guarantees that eventually we'll run out of events. Of course, if you're on a slow machine, and packets are arriving faster than you can read them off, you're screwed. Hasta la vista, baby! */ while ((status = readword(dev->base_addr, ISQ_PORT))) { if (net_debug > 4)printk("%s: event=%04x\n", dev->name, status); handled = 1; switch(status & ISQ_EVENT_MASK) { case ISQ_RECEIVER_EVENT: /* Got a packet(s). */ net_rx(dev); break; case ISQ_TRANSMITTER_EVENT: dev->stats.tx_packets++; netif_wake_queue(dev); /* Inform upper layers. */ if ((status & ( TX_OK | TX_LOST_CRS | TX_SQE_ERROR | TX_LATE_COL | TX_16_COL)) != TX_OK) { if ((status & TX_OK) == 0) dev->stats.tx_errors++; if (status & TX_LOST_CRS) dev->stats.tx_carrier_errors++; if (status & TX_SQE_ERROR) dev->stats.tx_heartbeat_errors++; if (status & TX_LATE_COL) dev->stats.tx_window_errors++; if (status & TX_16_COL) dev->stats.tx_aborted_errors++; } break; case ISQ_BUFFER_EVENT: if (status & READY_FOR_TX) { /* we tried to transmit a packet earlier, but inexplicably ran out of buffers. That shouldn't happen since we only ever load one packet. Shrug. Do the right thing anyway. */ netif_wake_queue(dev); /* Inform upper layers. */ } if (status & TX_UNDERRUN) { if (net_debug > 0) printk("%s: transmit underrun\n", dev->name); lp->send_underrun++; if (lp->send_underrun == 3) lp->send_cmd = TX_AFTER_381; else if (lp->send_underrun == 6) lp->send_cmd = TX_AFTER_ALL; /* transmit cycle is done, although frame wasn't transmitted - this avoids having to wait for the upper layers to timeout on us, in the event of a tx underrun */ netif_wake_queue(dev); /* Inform upper layers. */ } #if ALLOW_DMA if (lp->use_dma && (status & RX_DMA)) { int count = readreg(dev, PP_DmaFrameCnt); while(count) { if (net_debug > 5) printk("%s: receiving %d DMA frames\n", dev->name, count); if (net_debug > 2 && count >1) printk("%s: receiving %d DMA frames\n", dev->name, count); dma_rx(dev); if (--count == 0) count = readreg(dev, PP_DmaFrameCnt); if (net_debug > 2 && count > 0) printk("%s: continuing with %d DMA frames\n", dev->name, count); } } #endif break; case ISQ_RX_MISS_EVENT: dev->stats.rx_missed_errors += (status >> 6); break; case ISQ_TX_COL_EVENT: dev->stats.collisions += (status >> 6); break; } } return IRQ_RETVAL(handled); } static void count_rx_errors(int status, struct net_device *dev) { dev->stats.rx_errors++; if (status & RX_RUNT) dev->stats.rx_length_errors++; if (status & RX_EXTRA_DATA) dev->stats.rx_length_errors++; if ((status & RX_CRC_ERROR) && !(status & (RX_EXTRA_DATA|RX_RUNT))) /* per str 172 */ dev->stats.rx_crc_errors++; if (status & RX_DRIBBLE) dev->stats.rx_frame_errors++; } /* We have a good packet(s), get it/them out of the buffers. */ static void net_rx(struct net_device *dev) { struct sk_buff *skb; int status, length; int ioaddr = dev->base_addr; status = readword(ioaddr, RX_FRAME_PORT); length = readword(ioaddr, RX_FRAME_PORT); if ((status & RX_OK) == 0) { count_rx_errors(status, dev); return; } /* Malloc up new buffer. */ skb = netdev_alloc_skb(dev, length + 2); if (skb == NULL) { #if 0 /* Again, this seems a cruel thing to do */ printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name); #endif dev->stats.rx_dropped++; return; } skb_reserve(skb, 2); /* longword align L3 header */ readwords(ioaddr, RX_FRAME_PORT, skb_put(skb, length), length >> 1); if (length & 1) skb->data[length-1] = readword(ioaddr, RX_FRAME_PORT); if (net_debug > 3) { printk( "%s: received %d byte packet of type %x\n", dev->name, length, (skb->data[ETH_ALEN+ETH_ALEN] << 8) | skb->data[ETH_ALEN+ETH_ALEN+1]); } skb->protocol=eth_type_trans(skb,dev); netif_rx(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += length; } #if ALLOW_DMA static void release_dma_buff(struct net_local *lp) { if (lp->dma_buff) { free_pages((unsigned long)(lp->dma_buff), get_order(lp->dmasize * 1024)); lp->dma_buff = NULL; } } #endif /* The inverse routine to net_open(). */ static int net_close(struct net_device *dev) { #if ALLOW_DMA struct net_local *lp = netdev_priv(dev); #endif netif_stop_queue(dev); writereg(dev, PP_RxCFG, 0); writereg(dev, PP_TxCFG, 0); writereg(dev, PP_BufCFG, 0); writereg(dev, PP_BusCTL, 0); free_irq(dev->irq, dev); #if ALLOW_DMA if (lp->use_dma && lp->dma) { free_dma(dev->dma); release_dma_buff(lp); } #endif /* Update the statistics here. */ return 0; } /* Get the current statistics. This may be called with the card open or closed. */ static struct net_device_stats * net_get_stats(struct net_device *dev) { struct net_local *lp = netdev_priv(dev); unsigned long flags; spin_lock_irqsave(&lp->lock, flags); /* Update the statistics from the device registers. */ dev->stats.rx_missed_errors += (readreg(dev, PP_RxMiss) >> 6); dev->stats.collisions += (readreg(dev, PP_TxCol) >> 6); spin_unlock_irqrestore(&lp->lock, flags); return &dev->stats; } static void set_multicast_list(struct net_device *dev) { struct net_local *lp = netdev_priv(dev); unsigned long flags; spin_lock_irqsave(&lp->lock, flags); if(dev->flags&IFF_PROMISC) { lp->rx_mode = RX_ALL_ACCEPT; } else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) { /* The multicast-accept list is initialized to accept-all, and we rely on higher-level filtering for now. */ lp->rx_mode = RX_MULTCAST_ACCEPT; } else lp->rx_mode = 0; writereg(dev, PP_RxCTL, DEF_RX_ACCEPT | lp->rx_mode); /* in promiscuous mode, we accept errored packets, so we have to enable interrupts on them also */ writereg(dev, PP_RxCFG, lp->curr_rx_cfg | (lp->rx_mode == RX_ALL_ACCEPT? (RX_CRC_ERROR_ENBL|RX_RUNT_ENBL|RX_EXTRA_DATA_ENBL) : 0)); spin_unlock_irqrestore(&lp->lock, flags); } static int set_mac_address(struct net_device *dev, void *p) { int i; struct sockaddr *addr = p; if (netif_running(dev)) return -EBUSY; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); if (net_debug) printk("%s: Setting MAC address to %pM.\n", dev->name, dev->dev_addr); /* set the Ethernet address */ for (i=0; i < ETH_ALEN/2; i++) writereg(dev, PP_IA+i*2, dev->dev_addr[i*2] | (dev->dev_addr[i*2+1] << 8)); return 0; } #if defined(MODULE) && !defined(CONFIG_CS89x0_PLATFORM) static struct net_device *dev_cs89x0; /* * Support the 'debug' module parm even if we're compiled for non-debug to * avoid breaking someone's startup scripts */ static int io; static int irq; static int debug; static char media[8]; static int duplex=-1; static int use_dma; /* These generate unused var warnings if ALLOW_DMA = 0 */ static int dma; static int dmasize=16; /* or 64 */ module_param(io, int, 0); module_param(irq, int, 0); module_param(debug, int, 0); module_param_string(media, media, sizeof(media), 0); module_param(duplex, int, 0); module_param(dma , int, 0); module_param(dmasize , int, 0); module_param(use_dma , int, 0); MODULE_PARM_DESC(io, "cs89x0 I/O base address"); MODULE_PARM_DESC(irq, "cs89x0 IRQ number"); #if DEBUGGING MODULE_PARM_DESC(debug, "cs89x0 debug level (0-6)"); #else MODULE_PARM_DESC(debug, "(ignored)"); #endif MODULE_PARM_DESC(media, "Set cs89x0 adapter(s) media type(s) (rj45,bnc,aui)"); /* No other value than -1 for duplex seems to be currently interpreted */ MODULE_PARM_DESC(duplex, "(ignored)"); #if ALLOW_DMA MODULE_PARM_DESC(dma , "cs89x0 ISA DMA channel; ignored if use_dma=0"); MODULE_PARM_DESC(dmasize , "cs89x0 DMA size in kB (16,64); ignored if use_dma=0"); MODULE_PARM_DESC(use_dma , "cs89x0 using DMA (0-1)"); #else MODULE_PARM_DESC(dma , "(ignored)"); MODULE_PARM_DESC(dmasize , "(ignored)"); MODULE_PARM_DESC(use_dma , "(ignored)"); #endif MODULE_AUTHOR("Mike Cruse, Russwll Nelson <nelson@crynwr.com>, Andrew Morton"); MODULE_LICENSE("GPL"); /* * media=t - specify media type or media=2 or media=aui or medai=auto * duplex=0 - specify forced half/full/autonegotiate duplex * debug=# - debug level * Default Chip Configuration: * DMA Burst = enabled * IOCHRDY Enabled = enabled * UseSA = enabled * CS8900 defaults to half-duplex if not specified on command-line * CS8920 defaults to autoneg if not specified on command-line * Use reset defaults for other config parameters * Assumptions: * media type specified is supported (circuitry is present) * if memory address is > 1MB, then required mem decode hw is present * if 10B-2, then agent other than driver will enable DC/DC converter (hw or software util) */ int __init init_module(void) { struct net_device *dev = alloc_etherdev(sizeof(struct net_local)); struct net_local *lp; int ret = 0; #if DEBUGGING net_debug = debug; #else debug = 0; #endif if (!dev) return -ENOMEM; dev->irq = irq; dev->base_addr = io; lp = netdev_priv(dev); #if ALLOW_DMA if (use_dma) { lp->use_dma = use_dma; lp->dma = dma; lp->dmasize = dmasize; } #endif spin_lock_init(&lp->lock); /* boy, they'd better get these right */ if (!strcmp(media, "rj45")) lp->adapter_cnf = A_CNF_MEDIA_10B_T | A_CNF_10B_T; else if (!strcmp(media, "aui")) lp->adapter_cnf = A_CNF_MEDIA_AUI | A_CNF_AUI; else if (!strcmp(media, "bnc")) lp->adapter_cnf = A_CNF_MEDIA_10B_2 | A_CNF_10B_2; else lp->adapter_cnf = A_CNF_MEDIA_10B_T | A_CNF_10B_T; if (duplex==-1) lp->auto_neg_cnf = AUTO_NEG_ENABLE; if (io == 0) { printk(KERN_ERR "cs89x0.c: Module autoprobing not allowed.\n"); printk(KERN_ERR "cs89x0.c: Append io=0xNNN\n"); ret = -EPERM; goto out; } else if (io <= 0x1ff) { ret = -ENXIO; goto out; } #if ALLOW_DMA if (use_dma && dmasize != 16 && dmasize != 64) { printk(KERN_ERR "cs89x0.c: dma size must be either 16K or 64K, not %dK\n", dmasize); ret = -EPERM; goto out; } #endif ret = cs89x0_probe1(dev, io, 1); if (ret) goto out; dev_cs89x0 = dev; return 0; out: free_netdev(dev); return ret; } void __exit cleanup_module(void) { unregister_netdev(dev_cs89x0); writeword(dev_cs89x0->base_addr, ADD_PORT, PP_ChipID); release_region(dev_cs89x0->base_addr, NETCARD_IO_EXTENT); free_netdev(dev_cs89x0); } #endif /* MODULE && !CONFIG_CS89x0_PLATFORM */ #ifdef CONFIG_CS89x0_PLATFORM static int __init cs89x0_platform_probe(struct platform_device *pdev) { struct net_device *dev = alloc_etherdev(sizeof(struct net_local)); struct net_local *lp; struct resource *mem_res; int err; if (!dev) return -ENOMEM; lp = netdev_priv(dev); mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); dev->irq = platform_get_irq(pdev, 0); if (mem_res == NULL || dev->irq <= 0) { dev_warn(&dev->dev, "memory/interrupt resource missing.\n"); err = -ENXIO; goto free; } lp->phys_addr = mem_res->start; lp->size = resource_size(mem_res); if (!request_mem_region(lp->phys_addr, lp->size, DRV_NAME)) { dev_warn(&dev->dev, "request_mem_region() failed.\n"); err = -EBUSY; goto free; } lp->virt_addr = ioremap(lp->phys_addr, lp->size); if (!lp->virt_addr) { dev_warn(&dev->dev, "ioremap() failed.\n"); err = -ENOMEM; goto release; } err = cs89x0_probe1(dev, (unsigned long)lp->virt_addr, 0); if (err) { dev_warn(&dev->dev, "no cs8900 or cs8920 detected.\n"); goto unmap; } platform_set_drvdata(pdev, dev); return 0; unmap: iounmap(lp->virt_addr); release: release_mem_region(lp->phys_addr, lp->size); free: free_netdev(dev); return err; } static int cs89x0_platform_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct net_local *lp = netdev_priv(dev); unregister_netdev(dev); iounmap(lp->virt_addr); release_mem_region(lp->phys_addr, lp->size); free_netdev(dev); return 0; } static struct platform_driver cs89x0_driver = { .driver = { .name = DRV_NAME, .owner = THIS_MODULE, }, .remove = cs89x0_platform_remove, }; static int __init cs89x0_init(void) { return platform_driver_probe(&cs89x0_driver, cs89x0_platform_probe); } module_init(cs89x0_init); static void __exit cs89x0_cleanup(void) { platform_driver_unregister(&cs89x0_driver); } module_exit(cs89x0_cleanup); #endif /* CONFIG_CS89x0_PLATFORM */ /* * Local variables: * version-control: t * kept-new-versions: 5 * c-indent-level: 8 * tab-width: 8 * End: * */
gpl-2.0
lssjbrolli/android_kernel_samsung_klimtlte
drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
5052
3791
/****************************************************************************** * * Copyright(c) 2009-2012 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> * *****************************************************************************/ #include "../wifi.h" #include "../base.h" #include "reg.h" #include "def.h" #include "phy.h" #include "dm.h" void rtl92cu_dm_dynamic_txpower(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); long undecorated_smoothed_pwdb; if (!rtlpriv->dm.dynamic_txpower_enable) return; if (rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) { rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; return; } if ((mac->link_state < MAC80211_LINKED) && (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) { RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE, "Not connected to any\n"); rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL; return; } if (mac->link_state >= MAC80211_LINKED) { if (mac->opmode == NL80211_IFTYPE_ADHOC) { undecorated_smoothed_pwdb = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "AP Client PWDB = 0x%lx\n", undecorated_smoothed_pwdb); } else { undecorated_smoothed_pwdb = rtlpriv->dm.undecorated_smoothed_pwdb; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "STA Default Port PWDB = 0x%lx\n", undecorated_smoothed_pwdb); } } else { undecorated_smoothed_pwdb = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "AP Ext Port PWDB = 0x%lx\n", undecorated_smoothed_pwdb); } if (undecorated_smoothed_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) { rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n"); } else if ((undecorated_smoothed_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) && (undecorated_smoothed_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL1)) { rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n"); } else if (undecorated_smoothed_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) { rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "TXHIGHPWRLEVEL_NORMAL\n"); } if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) { RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "PHY_SetTxPowerLevel8192S() Channel = %d\n", rtlphy->current_channel); rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel); } rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl; }
gpl-2.0
omnirom/android_kernel_oppo_find5
arch/x86/kernel/x8664_ksyms_64.c
7868
1436
/* Exports for assembly files. All C exports should go in the respective C files. */ #include <linux/module.h> #include <linux/smp.h> #include <net/checksum.h> #include <asm/processor.h> #include <asm/pgtable.h> #include <asm/uaccess.h> #include <asm/desc.h> #include <asm/ftrace.h> #ifdef CONFIG_FUNCTION_TRACER /* mcount is defined in assembly */ EXPORT_SYMBOL(mcount); #endif EXPORT_SYMBOL(__get_user_1); EXPORT_SYMBOL(__get_user_2); EXPORT_SYMBOL(__get_user_4); EXPORT_SYMBOL(__get_user_8); EXPORT_SYMBOL(__put_user_1); EXPORT_SYMBOL(__put_user_2); EXPORT_SYMBOL(__put_user_4); EXPORT_SYMBOL(__put_user_8); EXPORT_SYMBOL(copy_user_generic_string); EXPORT_SYMBOL(copy_user_generic_unrolled); EXPORT_SYMBOL(__copy_user_nocache); EXPORT_SYMBOL(_copy_from_user); EXPORT_SYMBOL(_copy_to_user); EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(clear_page); EXPORT_SYMBOL(csum_partial); /* * Export string functions. We normally rely on gcc builtin for most of these, * but gcc sometimes decides not to inline them. */ #undef memcpy #undef memset #undef memmove extern void *memset(void *, int, __kernel_size_t); extern void *memcpy(void *, const void *, __kernel_size_t); extern void *__memcpy(void *, const void *, __kernel_size_t); EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(__memcpy); EXPORT_SYMBOL(memmove); EXPORT_SYMBOL(empty_zero_page); #ifndef CONFIG_PARAVIRT EXPORT_SYMBOL(native_load_gs_index); #endif
gpl-2.0
art1p/android_kernel_lge_omap4-common
drivers/staging/rtl8192u/r8190_rtl8256.c
9404
10482
/* This is part of the rtl8192 driver released under the GPL (See file COPYING for details). This files contains programming code for the rtl8256 radio frontend. *Many* thanks to Realtek Corp. for their great support! */ #include "r8192U.h" #include "r8192U_hw.h" #include "r819xU_phyreg.h" #include "r819xU_phy.h" #include "r8190_rtl8256.h" /*-------------------------------------------------------------------------- * Overview: set RF band width (20M or 40M) * Input: struct net_device* dev * WIRELESS_BANDWIDTH_E Bandwidth //20M or 40M * Output: NONE * Return: NONE * Note: 8226 support both 20M and 40 MHz *---------------------------------------------------------------------------*/ void PHY_SetRF8256Bandwidth(struct net_device* dev , HT_CHANNEL_WIDTH Bandwidth) //20M or 40M { u8 eRFPath; struct r8192_priv *priv = ieee80211_priv(dev); //for(eRFPath = RF90_PATH_A; eRFPath <pHalData->NumTotalRFPath; eRFPath++) for(eRFPath = 0; eRFPath <RF90_PATH_MAX; eRFPath++) { if (!rtl8192_phy_CheckIsLegalRFPath(dev, eRFPath)) continue; switch(Bandwidth) { case HT_CHANNEL_WIDTH_20: if(priv->card_8192_version == VERSION_819xU_A || priv->card_8192_version == VERSION_819xU_B)// 8256 D-cut, E-cut, xiong: consider it later! { rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, 0x0b, bMask12Bits, 0x100); //phy para:1ba rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, 0x2c, bMask12Bits, 0x3d7); rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, 0x0e, bMask12Bits, 0x021); //cosa add for sd3's request 01/23/2008 rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, 0x14, bMask12Bits, 0x5ab); } else { RT_TRACE(COMP_ERR, "PHY_SetRF8256Bandwidth(): unknown hardware version\n"); } break; case HT_CHANNEL_WIDTH_20_40: if(priv->card_8192_version == VERSION_819xU_A ||priv->card_8192_version == VERSION_819xU_B)// 8256 D-cut, E-cut, xiong: consider it later! { rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, 0x0b, bMask12Bits, 0x300); //phy para:3ba rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, 0x2c, bMask12Bits, 0x3df); rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, 0x0e, bMask12Bits, 0x0a1); //cosa add for sd3's request 01/23/2008 if(priv->chan == 3 || priv->chan == 9) //I need to set priv->chan whenever current channel changes rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, 0x14, bMask12Bits, 0x59b); else rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, 0x14, bMask12Bits, 0x5ab); } else { RT_TRACE(COMP_ERR, "PHY_SetRF8256Bandwidth(): unknown hardware version\n"); } break; default: RT_TRACE(COMP_ERR, "PHY_SetRF8256Bandwidth(): unknown Bandwidth: %#X\n",Bandwidth ); break; } } return; } /*-------------------------------------------------------------------------- * Overview: Interface to config 8256 * Input: struct net_device* dev * Output: NONE * Return: NONE *---------------------------------------------------------------------------*/ void PHY_RF8256_Config(struct net_device* dev) { struct r8192_priv *priv = ieee80211_priv(dev); // Initialize general global value // // TODO: Extend RF_PATH_C and RF_PATH_D in the future priv->NumTotalRFPath = RTL819X_TOTAL_RF_PATH; // Config BB and RF phy_RF8256_Config_ParaFile(dev); return; } /*-------------------------------------------------------------------------- * Overview: Interface to config 8256 * Input: struct net_device* dev * Output: NONE * Return: NONE *---------------------------------------------------------------------------*/ void phy_RF8256_Config_ParaFile(struct net_device* dev) { u32 u4RegValue = 0; //static s1Byte szRadioAFile[] = RTL819X_PHY_RADIO_A; //static s1Byte szRadioBFile[] = RTL819X_PHY_RADIO_B; //static s1Byte szRadioCFile[] = RTL819X_PHY_RADIO_C; //static s1Byte szRadioDFile[] = RTL819X_PHY_RADIO_D; u8 eRFPath; BB_REGISTER_DEFINITION_T *pPhyReg; struct r8192_priv *priv = ieee80211_priv(dev); u32 RegOffSetToBeCheck = 0x3; u32 RegValueToBeCheck = 0x7f1; u32 RF3_Final_Value = 0; u8 ConstRetryTimes = 5, RetryTimes = 5; u8 ret = 0; //3//----------------------------------------------------------------- //3// <2> Initialize RF //3//----------------------------------------------------------------- for(eRFPath = (RF90_RADIO_PATH_E)RF90_PATH_A; eRFPath <priv->NumTotalRFPath; eRFPath++) { if (!rtl8192_phy_CheckIsLegalRFPath(dev, eRFPath)) continue; pPhyReg = &priv->PHYRegDef[eRFPath]; // Joseph test for shorten RF config // pHalData->RfReg0Value[eRFPath] = rtl8192_phy_QueryRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, rGlobalCtrl, bMaskDWord); /*----Store original RFENV control type----*/ switch(eRFPath) { case RF90_PATH_A: case RF90_PATH_C: u4RegValue = rtl8192_QueryBBReg(dev, pPhyReg->rfintfs, bRFSI_RFENV); break; case RF90_PATH_B : case RF90_PATH_D: u4RegValue = rtl8192_QueryBBReg(dev, pPhyReg->rfintfs, bRFSI_RFENV<<16); break; } /*----Set RF_ENV enable----*/ rtl8192_setBBreg(dev, pPhyReg->rfintfe, bRFSI_RFENV<<16, 0x1); /*----Set RF_ENV output high----*/ rtl8192_setBBreg(dev, pPhyReg->rfintfo, bRFSI_RFENV, 0x1); /* Set bit number of Address and Data for RF register */ rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, b3WireAddressLength, 0x0); // Set 0 to 4 bits for Z-serial and set 1 to 6 bits for 8258 rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, b3WireDataLength, 0x0); // Set 0 to 12 bits for Z-serial and 8258, and set 1 to 14 bits for ??? rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E) eRFPath, 0x0, bMask12Bits, 0xbf); /*----Check RF block (for FPGA platform only)----*/ // TODO: this function should be removed on ASIC , Emily 2007.2.2 if (rtl8192_phy_checkBBAndRF(dev, HW90_BLOCK_RF, (RF90_RADIO_PATH_E)eRFPath)) { RT_TRACE(COMP_ERR, "PHY_RF8256_Config():Check Radio[%d] Fail!!\n", eRFPath); goto phy_RF8256_Config_ParaFile_Fail; } RetryTimes = ConstRetryTimes; RF3_Final_Value = 0; /*----Initialize RF fom connfiguration file----*/ switch(eRFPath) { case RF90_PATH_A: while(RF3_Final_Value!=RegValueToBeCheck && RetryTimes!=0) { ret = rtl8192_phy_ConfigRFWithHeaderFile(dev,(RF90_RADIO_PATH_E)eRFPath); RF3_Final_Value = rtl8192_phy_QueryRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, RegOffSetToBeCheck, bMask12Bits); RT_TRACE(COMP_RF, "RF %d %d register final value: %x\n", eRFPath, RegOffSetToBeCheck, RF3_Final_Value); RetryTimes--; } break; case RF90_PATH_B: while(RF3_Final_Value!=RegValueToBeCheck && RetryTimes!=0) { ret = rtl8192_phy_ConfigRFWithHeaderFile(dev,(RF90_RADIO_PATH_E)eRFPath); RF3_Final_Value = rtl8192_phy_QueryRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, RegOffSetToBeCheck, bMask12Bits); RT_TRACE(COMP_RF, "RF %d %d register final value: %x\n", eRFPath, RegOffSetToBeCheck, RF3_Final_Value); RetryTimes--; } break; case RF90_PATH_C: while(RF3_Final_Value!=RegValueToBeCheck && RetryTimes!=0) { ret = rtl8192_phy_ConfigRFWithHeaderFile(dev,(RF90_RADIO_PATH_E)eRFPath); RF3_Final_Value = rtl8192_phy_QueryRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, RegOffSetToBeCheck, bMask12Bits); RT_TRACE(COMP_RF, "RF %d %d register final value: %x\n", eRFPath, RegOffSetToBeCheck, RF3_Final_Value); RetryTimes--; } break; case RF90_PATH_D: while(RF3_Final_Value!=RegValueToBeCheck && RetryTimes!=0) { ret = rtl8192_phy_ConfigRFWithHeaderFile(dev,(RF90_RADIO_PATH_E)eRFPath); RF3_Final_Value = rtl8192_phy_QueryRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, RegOffSetToBeCheck, bMask12Bits); RT_TRACE(COMP_RF, "RF %d %d register final value: %x\n", eRFPath, RegOffSetToBeCheck, RF3_Final_Value); RetryTimes--; } break; } /*----Restore RFENV control type----*/; switch(eRFPath) { case RF90_PATH_A: case RF90_PATH_C: rtl8192_setBBreg(dev, pPhyReg->rfintfs, bRFSI_RFENV, u4RegValue); break; case RF90_PATH_B : case RF90_PATH_D: rtl8192_setBBreg(dev, pPhyReg->rfintfs, bRFSI_RFENV<<16, u4RegValue); break; } if(ret){ RT_TRACE(COMP_ERR, "phy_RF8256_Config_ParaFile():Radio[%d] Fail!!", eRFPath); goto phy_RF8256_Config_ParaFile_Fail; } } RT_TRACE(COMP_PHY, "PHY Initialization Success\n") ; return ; phy_RF8256_Config_ParaFile_Fail: RT_TRACE(COMP_ERR, "PHY Initialization failed\n") ; return ; } void PHY_SetRF8256CCKTxPower(struct net_device* dev, u8 powerlevel) { u32 TxAGC=0; struct r8192_priv *priv = ieee80211_priv(dev); //modified by vivi, 20080109 TxAGC = powerlevel; if(priv->bDynamicTxLowPower == TRUE ) //cosa 05/22/2008 for scan { if(priv->CustomerID == RT_CID_819x_Netcore) TxAGC = 0x22; else TxAGC += priv->CckPwEnl; } if(TxAGC > 0x24) TxAGC = 0x24; rtl8192_setBBreg(dev, rTxAGC_CCK_Mcs32, bTxAGCRateCCK, TxAGC); } void PHY_SetRF8256OFDMTxPower(struct net_device* dev, u8 powerlevel) { struct r8192_priv *priv = ieee80211_priv(dev); //Joseph TxPower for 8192 testing u32 writeVal, powerBase0, powerBase1, writeVal_tmp; u8 index = 0; u16 RegOffset[6] = {0xe00, 0xe04, 0xe10, 0xe14, 0xe18, 0xe1c}; u8 byte0, byte1, byte2, byte3; powerBase0 = powerlevel + priv->TxPowerDiff; //OFDM rates powerBase0 = (powerBase0<<24) | (powerBase0<<16) |(powerBase0<<8) |powerBase0; powerBase1 = powerlevel; //MCS rates powerBase1 = (powerBase1<<24) | (powerBase1<<16) |(powerBase1<<8) |powerBase1; for(index=0; index<6; index++) { writeVal = priv->MCSTxPowerLevelOriginalOffset[index] + ((index<2)?powerBase0:powerBase1); byte0 = (u8)(writeVal & 0x7f); byte1 = (u8)((writeVal & 0x7f00)>>8); byte2 = (u8)((writeVal & 0x7f0000)>>16); byte3 = (u8)((writeVal & 0x7f000000)>>24); if(byte0 > 0x24) // Max power index = 0x24 byte0 = 0x24; if(byte1 > 0x24) byte1 = 0x24; if(byte2 > 0x24) byte2 = 0x24; if(byte3 > 0x24) byte3 = 0x24; //for tx power track if(index == 3) { writeVal_tmp = (byte3<<24) | (byte2<<16) |(byte1<<8) |byte0; priv->Pwr_Track = writeVal_tmp; } if(priv->bDynamicTxHighPower == TRUE) //Add by Jacken 2008/03/06 { // Emily, 20080613. Set low tx power for both MCS and legacy OFDM writeVal = 0x03030303; } else { writeVal = (byte3<<24) | (byte2<<16) |(byte1<<8) |byte0; } rtl8192_setBBreg(dev, RegOffset[index], 0x7f7f7f7f, writeVal); } return; }
gpl-2.0
StarKissed/starkissed-kernel-trlte
net/ipv4/route.c
189
65981
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * ROUTE - implementation of the IP router. * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Alan Cox, <gw4pts@gw4pts.ampr.org> * Linus Torvalds, <Linus.Torvalds@helsinki.fi> * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> * * Fixes: * Alan Cox : Verify area fixes. * Alan Cox : cli() protects routing changes * Rui Oliveira : ICMP routing table updates * (rco@di.uminho.pt) Routing table insertion and update * Linus Torvalds : Rewrote bits to be sensible * Alan Cox : Added BSD route gw semantics * Alan Cox : Super /proc >4K * Alan Cox : MTU in route table * Alan Cox : MSS actually. Also added the window * clamper. * Sam Lantinga : Fixed route matching in rt_del() * Alan Cox : Routing cache support. * Alan Cox : Removed compatibility cruft. * Alan Cox : RTF_REJECT support. * Alan Cox : TCP irtt support. * Jonathan Naylor : Added Metric support. * Miquel van Smoorenburg : BSD API fixes. * Miquel van Smoorenburg : Metrics. * Alan Cox : Use __u32 properly * Alan Cox : Aligned routing errors more closely with BSD * our system is still very different. * Alan Cox : Faster /proc handling * Alexey Kuznetsov : Massive rework to support tree based routing, * routing caches and better behaviour. * * Olaf Erb : irtt wasn't being copied right. * Bjorn Ekwall : Kerneld route support. * Alan Cox : Multicast fixed (I hope) * Pavel Krauz : Limited broadcast fixed * Mike McLagan : Routing by source * Alexey Kuznetsov : End of old history. Split to fib.c and * route.c and rewritten from scratch. * Andi Kleen : Load-limit warning messages. * Vitaly E. Lavrov : Transparent proxy revived after year coma. * Vitaly E. Lavrov : Race condition in ip_route_input_slow. * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow. * Vladimir V. Ivanov : IP rule info (flowid) is really useful. * Marc Boucher : routing by fwmark * Robert Olsson : Added rt_cache statistics * Arnaldo C. Melo : Convert proc stuff to seq_file * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes. * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect * Ilia Sotnikov : Removed TOS from hash calculations * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #define pr_fmt(fmt) "IPv4: " fmt #include <linux/module.h> #include <asm/uaccess.h> #include <linux/bitops.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/errno.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/proc_fs.h> #include <linux/init.h> #include <linux/skbuff.h> #include <linux/inetdevice.h> #include <linux/igmp.h> #include <linux/pkt_sched.h> #include <linux/mroute.h> #include <linux/netfilter_ipv4.h> #include <linux/random.h> #include <linux/rcupdate.h> #include <linux/times.h> #include <linux/slab.h> #include <net/dst.h> #include <net/net_namespace.h> #include <net/protocol.h> #include <net/ip.h> #include <net/route.h> #include <net/inetpeer.h> #include <net/sock.h> #include <net/ip_fib.h> #include <net/arp.h> #include <net/tcp.h> #include <net/icmp.h> #include <net/xfrm.h> #include <net/netevent.h> #include <net/rtnetlink.h> #ifdef CONFIG_SYSCTL #include <linux/sysctl.h> #include <linux/kmemleak.h> #endif #include <net/secure_seq.h> #define RT_FL_TOS(oldflp4) \ ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK)) #define IP_MAX_MTU 0xFFF0 #define RT_GC_TIMEOUT (300*HZ) static int ip_rt_max_size; static int ip_rt_redirect_number __read_mostly = 9; static int ip_rt_redirect_load __read_mostly = HZ / 50; static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1)); static int ip_rt_error_cost __read_mostly = HZ; static int ip_rt_error_burst __read_mostly = 5 * HZ; static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ; static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; static int ip_rt_min_advmss __read_mostly = 256; /* * Interface to generic destination cache. */ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie); static unsigned int ipv4_default_advmss(const struct dst_entry *dst); static unsigned int ipv4_mtu(const struct dst_entry *dst); static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst); static void ipv4_link_failure(struct sk_buff *skb); static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, u32 mtu); static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb); static void ipv4_dst_destroy(struct dst_entry *dst); static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev, int how) { } static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old) { WARN_ON(1); return NULL; } static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, struct sk_buff *skb, const void *daddr); static struct dst_ops ipv4_dst_ops = { .family = AF_INET, .protocol = cpu_to_be16(ETH_P_IP), .check = ipv4_dst_check, .default_advmss = ipv4_default_advmss, .mtu = ipv4_mtu, .cow_metrics = ipv4_cow_metrics, .destroy = ipv4_dst_destroy, .ifdown = ipv4_dst_ifdown, .negative_advice = ipv4_negative_advice, .link_failure = ipv4_link_failure, .update_pmtu = ip_rt_update_pmtu, .redirect = ip_do_redirect, .local_out = __ip_local_out, .neigh_lookup = ipv4_neigh_lookup, }; #define ECN_OR_COST(class) TC_PRIO_##class const __u8 ip_tos2prio[16] = { TC_PRIO_BESTEFFORT, ECN_OR_COST(BESTEFFORT), TC_PRIO_BESTEFFORT, ECN_OR_COST(BESTEFFORT), TC_PRIO_BULK, ECN_OR_COST(BULK), TC_PRIO_BULK, ECN_OR_COST(BULK), TC_PRIO_INTERACTIVE, ECN_OR_COST(INTERACTIVE), TC_PRIO_INTERACTIVE, ECN_OR_COST(INTERACTIVE), TC_PRIO_INTERACTIVE_BULK, ECN_OR_COST(INTERACTIVE_BULK), TC_PRIO_INTERACTIVE_BULK, ECN_OR_COST(INTERACTIVE_BULK) }; EXPORT_SYMBOL(ip_tos2prio); static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); #define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field) #ifdef CONFIG_PROC_FS static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos) { if (*pos) return NULL; return SEQ_START_TOKEN; } static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos) { ++*pos; return NULL; } static void rt_cache_seq_stop(struct seq_file *seq, void *v) { } static int rt_cache_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t" "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t" "HHUptod\tSpecDst"); return 0; } static const struct seq_operations rt_cache_seq_ops = { .start = rt_cache_seq_start, .next = rt_cache_seq_next, .stop = rt_cache_seq_stop, .show = rt_cache_seq_show, }; static int rt_cache_seq_open(struct inode *inode, struct file *file) { return seq_open(file, &rt_cache_seq_ops); } static const struct file_operations rt_cache_seq_fops = { .owner = THIS_MODULE, .open = rt_cache_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos) { int cpu; if (*pos == 0) return SEQ_START_TOKEN; for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { if (!cpu_possible(cpu)) continue; *pos = cpu+1; return &per_cpu(rt_cache_stat, cpu); } return NULL; } static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) { int cpu; for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { if (!cpu_possible(cpu)) continue; *pos = cpu+1; return &per_cpu(rt_cache_stat, cpu); } return NULL; } static void rt_cpu_seq_stop(struct seq_file *seq, void *v) { } static int rt_cpu_seq_show(struct seq_file *seq, void *v) { struct rt_cache_stat *st = v; if (v == SEQ_START_TOKEN) { seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n"); return 0; } seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x " " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n", dst_entries_get_slow(&ipv4_dst_ops), st->in_hit, st->in_slow_tot, st->in_slow_mc, st->in_no_route, st->in_brd, st->in_martian_dst, st->in_martian_src, st->out_hit, st->out_slow_tot, st->out_slow_mc, st->gc_total, st->gc_ignored, st->gc_goal_miss, st->gc_dst_overflow, st->in_hlist_search, st->out_hlist_search ); return 0; } static const struct seq_operations rt_cpu_seq_ops = { .start = rt_cpu_seq_start, .next = rt_cpu_seq_next, .stop = rt_cpu_seq_stop, .show = rt_cpu_seq_show, }; static int rt_cpu_seq_open(struct inode *inode, struct file *file) { return seq_open(file, &rt_cpu_seq_ops); } static const struct file_operations rt_cpu_seq_fops = { .owner = THIS_MODULE, .open = rt_cpu_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; #ifdef CONFIG_IP_ROUTE_CLASSID static int rt_acct_proc_show(struct seq_file *m, void *v) { struct ip_rt_acct *dst, *src; unsigned int i, j; dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL); if (!dst) return -ENOMEM; for_each_possible_cpu(i) { src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i); for (j = 0; j < 256; j++) { dst[j].o_bytes += src[j].o_bytes; dst[j].o_packets += src[j].o_packets; dst[j].i_bytes += src[j].i_bytes; dst[j].i_packets += src[j].i_packets; } } seq_write(m, dst, 256 * sizeof(struct ip_rt_acct)); kfree(dst); return 0; } static int rt_acct_proc_open(struct inode *inode, struct file *file) { return single_open(file, rt_acct_proc_show, NULL); } static const struct file_operations rt_acct_proc_fops = { .owner = THIS_MODULE, .open = rt_acct_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; #endif static int __net_init ip_rt_do_proc_init(struct net *net) { struct proc_dir_entry *pde; pde = proc_create("rt_cache", S_IRUGO, net->proc_net, &rt_cache_seq_fops); if (!pde) goto err1; pde = proc_create("rt_cache", S_IRUGO, net->proc_net_stat, &rt_cpu_seq_fops); if (!pde) goto err2; #ifdef CONFIG_IP_ROUTE_CLASSID pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops); if (!pde) goto err3; #endif return 0; #ifdef CONFIG_IP_ROUTE_CLASSID err3: remove_proc_entry("rt_cache", net->proc_net_stat); #endif err2: remove_proc_entry("rt_cache", net->proc_net); err1: return -ENOMEM; } static void __net_exit ip_rt_do_proc_exit(struct net *net) { remove_proc_entry("rt_cache", net->proc_net_stat); remove_proc_entry("rt_cache", net->proc_net); #ifdef CONFIG_IP_ROUTE_CLASSID remove_proc_entry("rt_acct", net->proc_net); #endif } static struct pernet_operations ip_rt_proc_ops __net_initdata = { .init = ip_rt_do_proc_init, .exit = ip_rt_do_proc_exit, }; static int __init ip_rt_proc_init(void) { return register_pernet_subsys(&ip_rt_proc_ops); } #else static inline int ip_rt_proc_init(void) { return 0; } #endif /* CONFIG_PROC_FS */ static inline bool rt_is_expired(const struct rtable *rth) { return rth->rt_genid != rt_genid(dev_net(rth->dst.dev)); } void rt_cache_flush(struct net *net) { rt_genid_bump(net); } static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, struct sk_buff *skb, const void *daddr) { struct net_device *dev = dst->dev; const __be32 *pkey = daddr; const struct rtable *rt; struct neighbour *n; rt = (const struct rtable *) dst; if (rt->rt_gateway) pkey = (const __be32 *) &rt->rt_gateway; else if (skb) pkey = &ip_hdr(skb)->daddr; n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey); if (n) return n; return neigh_create(&arp_tbl, pkey, dev); } /* * Peer allocation may fail only in serious out-of-memory conditions. However * we still can generate some output. * Random ID selection looks a bit dangerous because we have no chances to * select ID being unique in a reasonable period of time. * But broken packet identifier may be better than no packet at all. */ static void ip_select_fb_ident(struct iphdr *iph) { static DEFINE_SPINLOCK(ip_fb_id_lock); static u32 ip_fallback_id; u32 salt; spin_lock_bh(&ip_fb_id_lock); salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr); iph->id = htons(salt & 0xFFFF); ip_fallback_id = salt; spin_unlock_bh(&ip_fb_id_lock); } void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more) { struct net *net = dev_net(dst->dev); struct inet_peer *peer; peer = inet_getpeer_v4(net->ipv4.peers, iph->daddr, 1); if (peer) { iph->id = htons(inet_getid(peer, more)); inet_putpeer(peer); return; } ip_select_fb_ident(iph); } EXPORT_SYMBOL(__ip_select_ident); static void __build_flow_key(struct flowi4 *fl4, const struct sock *sk, const struct iphdr *iph, int oif, u8 tos, u8 prot, u32 mark, int flow_flags) { if (sk) { const struct inet_sock *inet = inet_sk(sk); oif = sk->sk_bound_dev_if; mark = sk->sk_mark; tos = RT_CONN_FLAGS(sk); prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol; } flowi4_init_output(fl4, oif, mark, tos, RT_SCOPE_UNIVERSE, prot, flow_flags, iph->daddr, iph->saddr, 0, 0); } static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb, const struct sock *sk) { const struct iphdr *iph = ip_hdr(skb); int oif = skb->dev->ifindex; u8 tos = RT_TOS(iph->tos); u8 prot = iph->protocol; u32 mark = skb->mark; __build_flow_key(fl4, sk, iph, oif, tos, prot, mark, 0); } static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk) { const struct inet_sock *inet = inet_sk(sk); const struct ip_options_rcu *inet_opt; __be32 daddr = inet->inet_daddr; rcu_read_lock(); inet_opt = rcu_dereference(inet->inet_opt); if (inet_opt && inet_opt->opt.srr) daddr = inet_opt->opt.faddr; flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol, inet_sk_flowi_flags(sk), daddr, inet->inet_saddr, 0, 0); rcu_read_unlock(); } static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk, const struct sk_buff *skb) { if (skb) build_skb_flow_key(fl4, skb, sk); else build_sk_flow_key(fl4, sk); } static inline void rt_free(struct rtable *rt) { call_rcu(&rt->dst.rcu_head, dst_rcu_free); } static DEFINE_SPINLOCK(fnhe_lock); static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash) { struct fib_nh_exception *fnhe, *oldest; struct rtable *orig; oldest = rcu_dereference(hash->chain); for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe; fnhe = rcu_dereference(fnhe->fnhe_next)) { if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp)) oldest = fnhe; } orig = rcu_dereference(oldest->fnhe_rth); if (orig) { RCU_INIT_POINTER(oldest->fnhe_rth, NULL); rt_free(orig); } return oldest; } static inline u32 fnhe_hashfun(__be32 daddr) { u32 hval; hval = (__force u32) daddr; hval ^= (hval >> 11) ^ (hval >> 22); return hval & (FNHE_HASH_SIZE - 1); } static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw, u32 pmtu, unsigned long expires) { struct fnhe_hash_bucket *hash; struct fib_nh_exception *fnhe; int depth; u32 hval = fnhe_hashfun(daddr); spin_lock_bh(&fnhe_lock); hash = nh->nh_exceptions; if (!hash) { hash = kzalloc(FNHE_HASH_SIZE * sizeof(*hash), GFP_ATOMIC); if (!hash) goto out_unlock; nh->nh_exceptions = hash; } hash += hval; depth = 0; for (fnhe = rcu_dereference(hash->chain); fnhe; fnhe = rcu_dereference(fnhe->fnhe_next)) { if (fnhe->fnhe_daddr == daddr) break; depth++; } if (fnhe) { if (gw) fnhe->fnhe_gw = gw; if (pmtu) { fnhe->fnhe_pmtu = pmtu; fnhe->fnhe_expires = expires; } } else { if (depth > FNHE_RECLAIM_DEPTH) fnhe = fnhe_oldest(hash); else { fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC); if (!fnhe) goto out_unlock; fnhe->fnhe_next = hash->chain; rcu_assign_pointer(hash->chain, fnhe); } fnhe->fnhe_daddr = daddr; fnhe->fnhe_gw = gw; fnhe->fnhe_pmtu = pmtu; fnhe->fnhe_expires = expires; } fnhe->fnhe_stamp = jiffies; out_unlock: spin_unlock_bh(&fnhe_lock); return; } static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4, bool kill_route) { __be32 new_gw = icmp_hdr(skb)->un.gateway; __be32 old_gw = ip_hdr(skb)->saddr; struct net_device *dev = skb->dev; struct in_device *in_dev; struct fib_result res; struct neighbour *n; struct net *net; switch (icmp_hdr(skb)->code & 7) { case ICMP_REDIR_NET: case ICMP_REDIR_NETTOS: case ICMP_REDIR_HOST: case ICMP_REDIR_HOSTTOS: break; default: return; } if (rt->rt_gateway != old_gw) return; in_dev = __in_dev_get_rcu(dev); if (!in_dev) return; net = dev_net(dev); if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) || ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) || ipv4_is_zeronet(new_gw)) goto reject_redirect; if (!IN_DEV_SHARED_MEDIA(in_dev)) { if (!inet_addr_onlink(in_dev, new_gw, old_gw)) goto reject_redirect; if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev)) goto reject_redirect; } else { if (inet_addr_type(net, new_gw) != RTN_UNICAST) goto reject_redirect; } n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw); if (n) { if (!(n->nud_state & NUD_VALID)) { neigh_event_send(n, NULL); } else { if (fib_lookup(net, fl4, &res) == 0) { struct fib_nh *nh = &FIB_RES_NH(res); update_or_create_fnhe(nh, fl4->daddr, new_gw, 0, 0); } if (kill_route) rt->dst.obsolete = DST_OBSOLETE_KILL; call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n); } neigh_release(n); } return; reject_redirect: #ifdef CONFIG_IP_ROUTE_VERBOSE if (IN_DEV_LOG_MARTIANS(in_dev)) { const struct iphdr *iph = (const struct iphdr *) skb->data; __be32 daddr = iph->daddr; __be32 saddr = iph->saddr; net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n" " Advised path = %pI4 -> %pI4\n", &old_gw, dev->name, &new_gw, &saddr, &daddr); } #endif ; } static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb) { struct rtable *rt; struct flowi4 fl4; const struct iphdr *iph = (const struct iphdr *) skb->data; int oif = skb->dev->ifindex; u8 tos = RT_TOS(iph->tos); u8 prot = iph->protocol; u32 mark = skb->mark; rt = (struct rtable *) dst; __build_flow_key(&fl4, sk, iph, oif, tos, prot, mark, 0); __ip_do_redirect(rt, skb, &fl4, true); } static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst) { struct rtable *rt = (struct rtable *)dst; struct dst_entry *ret = dst; if (rt) { if (dst->obsolete > 0) { ip_rt_put(rt); ret = NULL; } else if ((rt->rt_flags & RTCF_REDIRECTED) || rt->dst.expires) { ip_rt_put(rt); ret = NULL; } } return ret; } /* * Algorithm: * 1. The first ip_rt_redirect_number redirects are sent * with exponential backoff, then we stop sending them at all, * assuming that the host ignores our redirects. * 2. If we did not see packets requiring redirects * during ip_rt_redirect_silence, we assume that the host * forgot redirected route and start to send redirects again. * * This algorithm is much cheaper and more intelligent than dumb load limiting * in icmp.c. * * NOTE. Do not forget to inhibit load limiting for redirects (redundant) * and "frag. need" (breaks PMTU discovery) in icmp.c. */ void ip_rt_send_redirect(struct sk_buff *skb) { struct rtable *rt = skb_rtable(skb); struct in_device *in_dev; struct inet_peer *peer; struct net *net; int log_martians; rcu_read_lock(); in_dev = __in_dev_get_rcu(rt->dst.dev); if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) { rcu_read_unlock(); return; } log_martians = IN_DEV_LOG_MARTIANS(in_dev); rcu_read_unlock(); net = dev_net(rt->dst.dev); peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1); if (!peer) { icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt_nexthop(rt, ip_hdr(skb)->daddr)); return; } /* No redirected packets during ip_rt_redirect_silence; * reset the algorithm. */ if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) peer->rate_tokens = 0; /* Too many ignored redirects; do not send anything * set dst.rate_last to the last seen redirected packet. */ if (peer->rate_tokens >= ip_rt_redirect_number) { peer->rate_last = jiffies; goto out_put_peer; } /* Check for load limit; set rate_last to the latest sent * redirect. */ if (peer->rate_tokens == 0 || time_after(jiffies, (peer->rate_last + (ip_rt_redirect_load << peer->rate_tokens)))) { __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr); icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw); peer->rate_last = jiffies; ++peer->rate_tokens; #ifdef CONFIG_IP_ROUTE_VERBOSE if (log_martians && peer->rate_tokens == ip_rt_redirect_number) net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n", &ip_hdr(skb)->saddr, inet_iif(skb), &ip_hdr(skb)->daddr, &gw); #endif } out_put_peer: inet_putpeer(peer); } static int ip_error(struct sk_buff *skb) { struct in_device *in_dev = __in_dev_get_rcu(skb->dev); struct rtable *rt = skb_rtable(skb); struct inet_peer *peer; unsigned long now; struct net *net; bool send; int code; net = dev_net(rt->dst.dev); if (!IN_DEV_FORWARD(in_dev)) { switch (rt->dst.error) { case EHOSTUNREACH: IP_INC_STATS_BH(net, IPSTATS_MIB_INADDRERRORS); break; case ENETUNREACH: IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES); break; } goto out; } switch (rt->dst.error) { case EINVAL: default: goto out; case EHOSTUNREACH: code = ICMP_HOST_UNREACH; break; case ENETUNREACH: code = ICMP_NET_UNREACH; IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES); break; case EACCES: code = ICMP_PKT_FILTERED; break; } peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1); send = true; if (peer) { now = jiffies; peer->rate_tokens += now - peer->rate_last; if (peer->rate_tokens > ip_rt_error_burst) peer->rate_tokens = ip_rt_error_burst; peer->rate_last = now; if (peer->rate_tokens >= ip_rt_error_cost) peer->rate_tokens -= ip_rt_error_cost; else send = false; inet_putpeer(peer); } if (send) icmp_send(skb, ICMP_DEST_UNREACH, code, 0); out: kfree_skb(skb); return 0; } static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) { struct dst_entry *dst = &rt->dst; struct fib_result res; if (dst_metric_locked(dst, RTAX_MTU)) return; if (dst->dev->mtu < mtu) return; if (mtu < ip_rt_min_pmtu) mtu = ip_rt_min_pmtu; if (!rt->rt_pmtu) { dst->obsolete = DST_OBSOLETE_KILL; } else { rt->rt_pmtu = mtu; dst->expires = max(1UL, jiffies + ip_rt_mtu_expires); } rcu_read_lock(); if (fib_lookup(dev_net(dst->dev), fl4, &res) == 0) { struct fib_nh *nh = &FIB_RES_NH(res); update_or_create_fnhe(nh, fl4->daddr, 0, mtu, jiffies + ip_rt_mtu_expires); } rcu_read_unlock(); } static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, u32 mtu) { struct rtable *rt = (struct rtable *) dst; struct flowi4 fl4; ip_rt_build_flow_key(&fl4, sk, skb); __ip_rt_update_pmtu(rt, &fl4, mtu); } void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu, int oif, u32 mark, u8 protocol, int flow_flags) { const struct iphdr *iph = (const struct iphdr *) skb->data; struct flowi4 fl4; struct rtable *rt; __build_flow_key(&fl4, NULL, iph, oif, RT_TOS(iph->tos), protocol, mark, flow_flags); rt = __ip_route_output_key(net, &fl4); if (!IS_ERR(rt)) { __ip_rt_update_pmtu(rt, &fl4, mtu); ip_rt_put(rt); } } EXPORT_SYMBOL_GPL(ipv4_update_pmtu); static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu) { const struct iphdr *iph = (const struct iphdr *) skb->data; struct flowi4 fl4; struct rtable *rt; __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0); rt = __ip_route_output_key(sock_net(sk), &fl4); if (!IS_ERR(rt)) { __ip_rt_update_pmtu(rt, &fl4, mtu); ip_rt_put(rt); } } void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu) { const struct iphdr *iph = (const struct iphdr *) skb->data; struct flowi4 fl4; struct rtable *rt; struct dst_entry *dst; bool new = false; bh_lock_sock(sk); rt = (struct rtable *) __sk_dst_get(sk); if (sock_owned_by_user(sk) || !rt) { __ipv4_sk_update_pmtu(skb, sk, mtu); goto out; } __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0); if (!__sk_dst_check(sk, 0)) { rt = ip_route_output_flow(sock_net(sk), &fl4, sk); if (IS_ERR(rt)) goto out; new = true; } __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu); dst = dst_check(&rt->dst, 0); if (!dst) { if (new) dst_release(&rt->dst); rt = ip_route_output_flow(sock_net(sk), &fl4, sk); if (IS_ERR(rt)) goto out; new = true; } if (new) __sk_dst_set(sk, &rt->dst); out: bh_unlock_sock(sk); } EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu); void ipv4_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark, u8 protocol, int flow_flags) { const struct iphdr *iph = (const struct iphdr *) skb->data; struct flowi4 fl4; struct rtable *rt; __build_flow_key(&fl4, NULL, iph, oif, RT_TOS(iph->tos), protocol, mark, flow_flags); rt = __ip_route_output_key(net, &fl4); if (!IS_ERR(rt)) { __ip_do_redirect(rt, skb, &fl4, false); ip_rt_put(rt); } } EXPORT_SYMBOL_GPL(ipv4_redirect); void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk) { const struct iphdr *iph = (const struct iphdr *) skb->data; struct flowi4 fl4; struct rtable *rt; __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0); rt = __ip_route_output_key(sock_net(sk), &fl4); if (!IS_ERR(rt)) { __ip_do_redirect(rt, skb, &fl4, false); ip_rt_put(rt); } } EXPORT_SYMBOL_GPL(ipv4_sk_redirect); static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie) { struct rtable *rt = (struct rtable *) dst; /* All IPV4 dsts are created with ->obsolete set to the value * DST_OBSOLETE_FORCE_CHK which forces validation calls down * into this function always. * * When a PMTU/redirect information update invalidates a * route, this is indicated by setting obsolete to * DST_OBSOLETE_KILL. */ if (dst->obsolete == DST_OBSOLETE_KILL || rt_is_expired(rt)) return NULL; return dst; } static void ipv4_link_failure(struct sk_buff *skb) { struct rtable *rt; icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); rt = skb_rtable(skb); if (rt) dst_set_expires(&rt->dst, 0); } static int ip_rt_bug(struct sk_buff *skb) { pr_debug("%s: %pI4 -> %pI4, %s\n", __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, skb->dev ? skb->dev->name : "?"); kfree_skb(skb); WARN_ON(1); return 0; } /* We do not cache source address of outgoing interface, because it is used only by IP RR, TS and SRR options, so that it out of fast path. BTW remember: "addr" is allowed to be not aligned in IP options! */ void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt) { __be32 src; if (rt_is_output_route(rt)) src = ip_hdr(skb)->saddr; else { struct fib_result res; struct flowi4 fl4; struct iphdr *iph; iph = ip_hdr(skb); memset(&fl4, 0, sizeof(fl4)); fl4.daddr = iph->daddr; fl4.saddr = iph->saddr; fl4.flowi4_tos = RT_TOS(iph->tos); fl4.flowi4_oif = rt->dst.dev->ifindex; fl4.flowi4_iif = skb->dev->ifindex; fl4.flowi4_mark = skb->mark; rcu_read_lock(); if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0) src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res); else src = inet_select_addr(rt->dst.dev, rt_nexthop(rt, iph->daddr), RT_SCOPE_UNIVERSE); rcu_read_unlock(); } memcpy(addr, &src, 4); } #ifdef CONFIG_IP_ROUTE_CLASSID static void set_class_tag(struct rtable *rt, u32 tag) { if (!(rt->dst.tclassid & 0xFFFF)) rt->dst.tclassid |= tag & 0xFFFF; if (!(rt->dst.tclassid & 0xFFFF0000)) rt->dst.tclassid |= tag & 0xFFFF0000; } #endif static unsigned int ipv4_default_advmss(const struct dst_entry *dst) { unsigned int advmss = dst_metric_raw(dst, RTAX_ADVMSS); if (advmss == 0) { advmss = max_t(unsigned int, dst->dev->mtu - 40, ip_rt_min_advmss); if (advmss > 65535 - 40) advmss = 65535 - 40; } return advmss; } static unsigned int ipv4_mtu(const struct dst_entry *dst) { const struct rtable *rt = (const struct rtable *) dst; unsigned int mtu = rt->rt_pmtu; if (!mtu || time_after_eq(jiffies, rt->dst.expires)) mtu = dst_metric_raw(dst, RTAX_MTU); if (mtu) return mtu; mtu = dst->dev->mtu; if (unlikely(dst_metric_locked(dst, RTAX_MTU))) { if (rt->rt_uses_gateway && mtu > 576) mtu = 576; } if (mtu > IP_MAX_MTU) mtu = IP_MAX_MTU; return mtu; } static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr) { struct fnhe_hash_bucket *hash = nh->nh_exceptions; struct fib_nh_exception *fnhe; u32 hval; if (!hash) return NULL; hval = fnhe_hashfun(daddr); for (fnhe = rcu_dereference(hash[hval].chain); fnhe; fnhe = rcu_dereference(fnhe->fnhe_next)) { if (fnhe->fnhe_daddr == daddr) return fnhe; } return NULL; } static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe, __be32 daddr) { bool ret = false; spin_lock_bh(&fnhe_lock); if (daddr == fnhe->fnhe_daddr) { struct rtable *orig = rcu_dereference(fnhe->fnhe_rth); if (orig && rt_is_expired(orig)) { fnhe->fnhe_gw = 0; fnhe->fnhe_pmtu = 0; fnhe->fnhe_expires = 0; } if (fnhe->fnhe_pmtu) { unsigned long expires = fnhe->fnhe_expires; unsigned long diff = expires - jiffies; if (time_before(jiffies, expires)) { rt->rt_pmtu = fnhe->fnhe_pmtu; dst_set_expires(&rt->dst, diff); } } if (fnhe->fnhe_gw) { rt->rt_flags |= RTCF_REDIRECTED; rt->rt_gateway = fnhe->fnhe_gw; rt->rt_uses_gateway = 1; } else if (!rt->rt_gateway) rt->rt_gateway = daddr; rcu_assign_pointer(fnhe->fnhe_rth, rt); if (orig) rt_free(orig); fnhe->fnhe_stamp = jiffies; ret = true; } spin_unlock_bh(&fnhe_lock); return ret; } static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt) { struct rtable *orig, *prev, **p; bool ret = true; if (rt_is_input_route(rt)) { p = (struct rtable **)&nh->nh_rth_input; } else { p = (struct rtable **)__this_cpu_ptr(nh->nh_pcpu_rth_output); } orig = *p; prev = cmpxchg(p, orig, rt); if (prev == orig) { if (orig) rt_free(orig); } else ret = false; return ret; } static DEFINE_SPINLOCK(rt_uncached_lock); static LIST_HEAD(rt_uncached_list); static void rt_add_uncached_list(struct rtable *rt) { spin_lock_bh(&rt_uncached_lock); list_add_tail(&rt->rt_uncached, &rt_uncached_list); spin_unlock_bh(&rt_uncached_lock); } static void ipv4_dst_destroy(struct dst_entry *dst) { struct rtable *rt = (struct rtable *) dst; if (!list_empty(&rt->rt_uncached)) { spin_lock_bh(&rt_uncached_lock); list_del(&rt->rt_uncached); spin_unlock_bh(&rt_uncached_lock); } } void rt_flush_dev(struct net_device *dev) { if (!list_empty(&rt_uncached_list)) { struct net *net = dev_net(dev); struct rtable *rt; spin_lock_bh(&rt_uncached_lock); list_for_each_entry(rt, &rt_uncached_list, rt_uncached) { if (rt->dst.dev != dev) continue; rt->dst.dev = net->loopback_dev; dev_hold(rt->dst.dev); dev_put(dev); } spin_unlock_bh(&rt_uncached_lock); } } static bool rt_cache_valid(const struct rtable *rt) { return rt && rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK && !rt_is_expired(rt); } static void rt_set_nexthop(struct rtable *rt, __be32 daddr, const struct fib_result *res, struct fib_nh_exception *fnhe, struct fib_info *fi, u16 type, u32 itag) { bool cached = false; if (fi) { struct fib_nh *nh = &FIB_RES_NH(*res); if (nh->nh_gw && nh->nh_scope == RT_SCOPE_LINK) { rt->rt_gateway = nh->nh_gw; rt->rt_uses_gateway = 1; } dst_init_metrics(&rt->dst, fi->fib_metrics, true); #ifdef CONFIG_IP_ROUTE_CLASSID rt->dst.tclassid = nh->nh_tclassid; #endif if (unlikely(fnhe)) cached = rt_bind_exception(rt, fnhe, daddr); else if (!(rt->dst.flags & DST_NOCACHE)) cached = rt_cache_route(nh, rt); if (unlikely(!cached)) { /* Routes we intend to cache in nexthop exception or * FIB nexthop have the DST_NOCACHE bit clear. * However, if we are unsuccessful at storing this * route into the cache we really need to set it. */ rt->dst.flags |= DST_NOCACHE; if (!rt->rt_gateway) rt->rt_gateway = daddr; rt_add_uncached_list(rt); } } else rt_add_uncached_list(rt); #ifdef CONFIG_IP_ROUTE_CLASSID #ifdef CONFIG_IP_MULTIPLE_TABLES set_class_tag(rt, res->tclassid); #endif set_class_tag(rt, itag); #endif } static struct rtable *rt_dst_alloc(struct net_device *dev, bool nopolicy, bool noxfrm, bool will_cache) { return dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK, (will_cache ? 0 : (DST_HOST | DST_NOCACHE)) | (nopolicy ? DST_NOPOLICY : 0) | (noxfrm ? DST_NOXFRM : 0)); } /* called in rcu_read_lock() section */ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, u8 tos, struct net_device *dev, int our) { struct rtable *rth; struct in_device *in_dev = __in_dev_get_rcu(dev); u32 itag = 0; int err; /* Primary sanity checks. */ if (in_dev == NULL) return -EINVAL; if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) || skb->protocol != htons(ETH_P_IP)) goto e_inval; if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev))) if (ipv4_is_loopback(saddr)) goto e_inval; if (ipv4_is_zeronet(saddr)) { if (!ipv4_is_local_multicast(daddr)) goto e_inval; } else { err = fib_validate_source(skb, saddr, 0, tos, 0, dev, in_dev, &itag); if (err < 0) goto e_err; } rth = rt_dst_alloc(dev_net(dev)->loopback_dev, IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false); if (!rth) goto e_nobufs; #ifdef CONFIG_IP_ROUTE_CLASSID rth->dst.tclassid = itag; #endif rth->dst.output = ip_rt_bug; rth->rt_genid = rt_genid(dev_net(dev)); rth->rt_flags = RTCF_MULTICAST; rth->rt_type = RTN_MULTICAST; rth->rt_is_input= 1; rth->rt_iif = 0; rth->rt_pmtu = 0; rth->rt_gateway = 0; rth->rt_uses_gateway = 0; INIT_LIST_HEAD(&rth->rt_uncached); if (our) { rth->dst.input= ip_local_deliver; rth->rt_flags |= RTCF_LOCAL; } #ifdef CONFIG_IP_MROUTE if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev)) rth->dst.input = ip_mr_input; #endif RT_CACHE_STAT_INC(in_slow_mc); skb_dst_set(skb, &rth->dst); return 0; e_nobufs: return -ENOBUFS; e_inval: return -EINVAL; e_err: return err; } static void ip_handle_martian_source(struct net_device *dev, struct in_device *in_dev, struct sk_buff *skb, __be32 daddr, __be32 saddr) { RT_CACHE_STAT_INC(in_martian_src); #ifdef CONFIG_IP_ROUTE_VERBOSE if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) { /* * RFC1812 recommendation, if source is martian, * the only hint is MAC header. */ pr_warn("martian source %pI4 from %pI4, on dev %s\n", &daddr, &saddr, dev->name); if (dev->hard_header_len && skb_mac_header_was_set(skb)) { print_hex_dump(KERN_WARNING, "ll header: ", DUMP_PREFIX_OFFSET, 16, 1, skb_mac_header(skb), dev->hard_header_len, true); } } #endif } /* called in rcu_read_lock() section */ static int __mkroute_input(struct sk_buff *skb, const struct fib_result *res, struct in_device *in_dev, __be32 daddr, __be32 saddr, u32 tos) { struct rtable *rth; int err; struct in_device *out_dev; unsigned int flags = 0; bool do_cache; u32 itag; /* get a working reference to the output device */ out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res)); if (out_dev == NULL) { net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n"); return -EINVAL; } err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res), in_dev->dev, in_dev, &itag); if (err < 0) { ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr, saddr); goto cleanup; } do_cache = res->fi && !itag; if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) && (IN_DEV_SHARED_MEDIA(out_dev) || inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) { flags |= RTCF_DOREDIRECT; do_cache = false; } if (skb->protocol != htons(ETH_P_IP)) { /* Not IP (i.e. ARP). Do not create route, if it is * invalid for proxy arp. DNAT routes are always valid. * * Proxy arp feature have been extended to allow, ARP * replies back to the same interface, to support * Private VLAN switch technologies. See arp.c. */ if (out_dev == in_dev && IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) { err = -EINVAL; goto cleanup; } } if (do_cache) { rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input); if (rt_cache_valid(rth)) { skb_dst_set_noref(skb, &rth->dst); goto out; } } rth = rt_dst_alloc(out_dev->dev, IN_DEV_CONF_GET(in_dev, NOPOLICY), IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache); if (!rth) { err = -ENOBUFS; goto cleanup; } rth->rt_genid = rt_genid(dev_net(rth->dst.dev)); rth->rt_flags = flags; rth->rt_type = res->type; rth->rt_is_input = 1; rth->rt_iif = 0; rth->rt_pmtu = 0; rth->rt_gateway = 0; rth->rt_uses_gateway = 0; INIT_LIST_HEAD(&rth->rt_uncached); rth->dst.input = ip_forward; rth->dst.output = ip_output; rt_set_nexthop(rth, daddr, res, NULL, res->fi, res->type, itag); skb_dst_set(skb, &rth->dst); out: err = 0; cleanup: return err; } static int ip_mkroute_input(struct sk_buff *skb, struct fib_result *res, const struct flowi4 *fl4, struct in_device *in_dev, __be32 daddr, __be32 saddr, u32 tos) { #ifdef CONFIG_IP_ROUTE_MULTIPATH if (res->fi && res->fi->fib_nhs > 1) fib_select_multipath(res); #endif /* create a routing cache entry */ return __mkroute_input(skb, res, in_dev, daddr, saddr, tos); } /* * NOTE. We drop all the packets that has local source * addresses, because every properly looped back packet * must have correct destination already attached by output routine. * * Such approach solves two big problems: * 1. Not simplex devices are handled properly. * 2. IP spoofing attempts are filtered with 100% of guarantee. * called with rcu_read_lock() */ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, u8 tos, struct net_device *dev) { struct fib_result res; struct in_device *in_dev = __in_dev_get_rcu(dev); struct flowi4 fl4; unsigned int flags = 0; u32 itag = 0; struct rtable *rth; int err = -EINVAL; struct net *net = dev_net(dev); bool do_cache; /* IP on this device is disabled. */ if (!in_dev) goto out; /* Check for the most weird martians, which can be not detected by fib_lookup. */ if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr)) goto martian_source; res.fi = NULL; if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0)) goto brd_input; /* Accept zero addresses only to limited broadcast; * I even do not know to fix it or not. Waiting for complains :-) */ if (ipv4_is_zeronet(saddr)) goto martian_source; if (ipv4_is_zeronet(daddr)) goto martian_destination; /* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(), * and call it once if daddr or/and saddr are loopback addresses */ if (ipv4_is_loopback(daddr)) { if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net)) goto martian_destination; } else if (ipv4_is_loopback(saddr)) { if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net)) goto martian_source; } /* * Now we are ready to route packet. */ fl4.flowi4_oif = 0; fl4.flowi4_iif = dev->ifindex; fl4.flowi4_mark = skb->mark; fl4.flowi4_tos = tos; fl4.flowi4_scope = RT_SCOPE_UNIVERSE; fl4.daddr = daddr; fl4.saddr = saddr; err = fib_lookup(net, &fl4, &res); if (err != 0) goto no_route; RT_CACHE_STAT_INC(in_slow_tot); if (res.type == RTN_BROADCAST) goto brd_input; if (res.type == RTN_LOCAL) { err = fib_validate_source(skb, saddr, daddr, tos, LOOPBACK_IFINDEX, dev, in_dev, &itag); if (err < 0) goto martian_source_keep_err; goto local_input; } if (!IN_DEV_FORWARD(in_dev)) goto no_route; if (res.type != RTN_UNICAST) goto martian_destination; err = ip_mkroute_input(skb, &res, &fl4, in_dev, daddr, saddr, tos); out: return err; brd_input: if (skb->protocol != htons(ETH_P_IP)) goto e_inval; if (!ipv4_is_zeronet(saddr)) { err = fib_validate_source(skb, saddr, 0, tos, 0, dev, in_dev, &itag); if (err < 0) goto martian_source_keep_err; } flags |= RTCF_BROADCAST; res.type = RTN_BROADCAST; RT_CACHE_STAT_INC(in_brd); local_input: do_cache = false; if (res.fi) { if (!itag) { rth = rcu_dereference(FIB_RES_NH(res).nh_rth_input); if (rt_cache_valid(rth)) { skb_dst_set_noref(skb, &rth->dst); err = 0; goto out; } do_cache = true; } } rth = rt_dst_alloc(net->loopback_dev, IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache); if (!rth) goto e_nobufs; rth->dst.input= ip_local_deliver; rth->dst.output= ip_rt_bug; #ifdef CONFIG_IP_ROUTE_CLASSID rth->dst.tclassid = itag; #endif rth->rt_genid = rt_genid(net); rth->rt_flags = flags|RTCF_LOCAL; rth->rt_type = res.type; rth->rt_is_input = 1; rth->rt_iif = 0; rth->rt_pmtu = 0; rth->rt_gateway = 0; rth->rt_uses_gateway = 0; INIT_LIST_HEAD(&rth->rt_uncached); if (res.type == RTN_UNREACHABLE) { rth->dst.input= ip_error; rth->dst.error= -err; rth->rt_flags &= ~RTCF_LOCAL; } if (do_cache) rt_cache_route(&FIB_RES_NH(res), rth); skb_dst_set(skb, &rth->dst); err = 0; goto out; no_route: RT_CACHE_STAT_INC(in_no_route); res.type = RTN_UNREACHABLE; if (err == -ESRCH) err = -ENETUNREACH; goto local_input; /* * Do not cache martian addresses: they should be logged (RFC1812) */ martian_destination: RT_CACHE_STAT_INC(in_martian_dst); #ifdef CONFIG_IP_ROUTE_VERBOSE if (IN_DEV_LOG_MARTIANS(in_dev)) net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n", &daddr, &saddr, dev->name); #endif e_inval: err = -EINVAL; goto out; e_nobufs: err = -ENOBUFS; goto out; martian_source: err = -EINVAL; martian_source_keep_err: ip_handle_martian_source(dev, in_dev, skb, daddr, saddr); goto out; } int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr, u8 tos, struct net_device *dev) { int res; rcu_read_lock(); /* Multicast recognition logic is moved from route cache to here. The problem was that too many Ethernet cards have broken/missing hardware multicast filters :-( As result the host on multicasting network acquires a lot of useless route cache entries, sort of SDR messages from all the world. Now we try to get rid of them. Really, provided software IP multicast filter is organized reasonably (at least, hashed), it does not result in a slowdown comparing with route cache reject entries. Note, that multicast routers are not affected, because route cache entry is created eventually. */ if (ipv4_is_multicast(daddr)) { struct in_device *in_dev = __in_dev_get_rcu(dev); if (in_dev) { int our = ip_check_mc_rcu(in_dev, daddr, saddr, ip_hdr(skb)->protocol); if (our #ifdef CONFIG_IP_MROUTE || (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev)) #endif ) { int res = ip_route_input_mc(skb, daddr, saddr, tos, dev, our); rcu_read_unlock(); return res; } } rcu_read_unlock(); return -EINVAL; } res = ip_route_input_slow(skb, daddr, saddr, tos, dev); rcu_read_unlock(); return res; } EXPORT_SYMBOL(ip_route_input_noref); /* called with rcu_read_lock() */ static struct rtable *__mkroute_output(const struct fib_result *res, const struct flowi4 *fl4, int orig_oif, struct net_device *dev_out, unsigned int flags) { struct fib_info *fi = res->fi; struct fib_nh_exception *fnhe; struct in_device *in_dev; u16 type = res->type; struct rtable *rth; bool do_cache; in_dev = __in_dev_get_rcu(dev_out); if (!in_dev) return ERR_PTR(-EINVAL); if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev))) if (ipv4_is_loopback(fl4->saddr) && !(dev_out->flags & IFF_LOOPBACK)) return ERR_PTR(-EINVAL); if (ipv4_is_lbcast(fl4->daddr)) type = RTN_BROADCAST; else if (ipv4_is_multicast(fl4->daddr)) type = RTN_MULTICAST; else if (ipv4_is_zeronet(fl4->daddr)) return ERR_PTR(-EINVAL); if (dev_out->flags & IFF_LOOPBACK) flags |= RTCF_LOCAL; do_cache = true; if (type == RTN_BROADCAST) { flags |= RTCF_BROADCAST | RTCF_LOCAL; fi = NULL; } else if (type == RTN_MULTICAST) { flags |= RTCF_MULTICAST | RTCF_LOCAL; if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr, fl4->flowi4_proto)) flags &= ~RTCF_LOCAL; else do_cache = false; /* If multicast route do not exist use * default one, but do not gateway in this case. * Yes, it is hack. */ if (fi && res->prefixlen < 4) fi = NULL; } fnhe = NULL; do_cache &= fi != NULL; if (do_cache) { struct rtable __rcu **prth; struct fib_nh *nh = &FIB_RES_NH(*res); fnhe = find_exception(nh, fl4->daddr); if (fnhe) prth = &fnhe->fnhe_rth; else { if (unlikely(fl4->flowi4_flags & FLOWI_FLAG_KNOWN_NH && !(nh->nh_gw && nh->nh_scope == RT_SCOPE_LINK))) { do_cache = false; goto add; } prth = __this_cpu_ptr(nh->nh_pcpu_rth_output); } rth = rcu_dereference(*prth); if (rt_cache_valid(rth)) { dst_hold(&rth->dst); return rth; } } add: rth = rt_dst_alloc(dev_out, IN_DEV_CONF_GET(in_dev, NOPOLICY), IN_DEV_CONF_GET(in_dev, NOXFRM), do_cache); if (!rth) return ERR_PTR(-ENOBUFS); rth->dst.output = ip_output; rth->rt_genid = rt_genid(dev_net(dev_out)); rth->rt_flags = flags; rth->rt_type = type; rth->rt_is_input = 0; rth->rt_iif = orig_oif ? : 0; rth->rt_pmtu = 0; rth->rt_gateway = 0; rth->rt_uses_gateway = 0; INIT_LIST_HEAD(&rth->rt_uncached); RT_CACHE_STAT_INC(out_slow_tot); if (flags & RTCF_LOCAL) rth->dst.input = ip_local_deliver; if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) { if (flags & RTCF_LOCAL && !(dev_out->flags & IFF_LOOPBACK)) { rth->dst.output = ip_mc_output; RT_CACHE_STAT_INC(out_slow_mc); } #ifdef CONFIG_IP_MROUTE if (type == RTN_MULTICAST) { if (IN_DEV_MFORWARD(in_dev) && !ipv4_is_local_multicast(fl4->daddr)) { rth->dst.input = ip_mr_input; rth->dst.output = ip_mc_output; } } #endif } rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0); return rth; } /* * Major route resolver routine. */ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4) { struct net_device *dev_out = NULL; __u8 tos = RT_FL_TOS(fl4); unsigned int flags = 0; struct fib_result res; struct rtable *rth; int orig_oif; res.tclassid = 0; res.fi = NULL; res.table = NULL; orig_oif = fl4->flowi4_oif; fl4->flowi4_iif = LOOPBACK_IFINDEX; fl4->flowi4_tos = tos & IPTOS_RT_MASK; fl4->flowi4_scope = ((tos & RTO_ONLINK) ? RT_SCOPE_LINK : RT_SCOPE_UNIVERSE); rcu_read_lock(); if (fl4->saddr) { rth = ERR_PTR(-EINVAL); if (ipv4_is_multicast(fl4->saddr) || ipv4_is_lbcast(fl4->saddr) || ipv4_is_zeronet(fl4->saddr)) goto out; /* I removed check for oif == dev_out->oif here. It was wrong for two reasons: 1. ip_dev_find(net, saddr) can return wrong iface, if saddr is assigned to multiple interfaces. 2. Moreover, we are allowed to send packets with saddr of another iface. --ANK */ if (fl4->flowi4_oif == 0 && (ipv4_is_multicast(fl4->daddr) || ipv4_is_lbcast(fl4->daddr))) { /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */ dev_out = __ip_dev_find(net, fl4->saddr, false); if (dev_out == NULL) goto out; /* Special hack: user can direct multicasts and limited broadcast via necessary interface without fiddling with IP_MULTICAST_IF or IP_PKTINFO. This hack is not just for fun, it allows vic,vat and friends to work. They bind socket to loopback, set ttl to zero and expect that it will work. From the viewpoint of routing cache they are broken, because we are not allowed to build multicast path with loopback source addr (look, routing cache cannot know, that ttl is zero, so that packet will not leave this host and route is valid). Luckily, this hack is good workaround. */ fl4->flowi4_oif = dev_out->ifindex; goto make_route; } if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) { /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */ if (!__ip_dev_find(net, fl4->saddr, false)) goto out; } } if (fl4->flowi4_oif) { dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif); rth = ERR_PTR(-ENODEV); if (dev_out == NULL) goto out; /* RACE: Check return value of inet_select_addr instead. */ if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) { rth = ERR_PTR(-ENETUNREACH); goto out; } if (ipv4_is_local_multicast(fl4->daddr) || ipv4_is_lbcast(fl4->daddr)) { if (!fl4->saddr) fl4->saddr = inet_select_addr(dev_out, 0, RT_SCOPE_LINK); goto make_route; } if (fl4->saddr) { if (ipv4_is_multicast(fl4->daddr)) fl4->saddr = inet_select_addr(dev_out, 0, fl4->flowi4_scope); else if (!fl4->daddr) fl4->saddr = inet_select_addr(dev_out, 0, RT_SCOPE_HOST); } } if (!fl4->daddr) { fl4->daddr = fl4->saddr; if (!fl4->daddr) fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK); dev_out = net->loopback_dev; fl4->flowi4_oif = LOOPBACK_IFINDEX; res.type = RTN_LOCAL; flags |= RTCF_LOCAL; goto make_route; } if (fib_lookup(net, fl4, &res)) { res.fi = NULL; res.table = NULL; if (fl4->flowi4_oif) { /* Apparently, routing tables are wrong. Assume, that the destination is on link. WHY? DW. Because we are allowed to send to iface even if it has NO routes and NO assigned addresses. When oif is specified, routing tables are looked up with only one purpose: to catch if destination is gatewayed, rather than direct. Moreover, if MSG_DONTROUTE is set, we send packet, ignoring both routing tables and ifaddr state. --ANK We could make it even if oif is unknown, likely IPv6, but we do not. */ if (fl4->saddr == 0) fl4->saddr = inet_select_addr(dev_out, 0, RT_SCOPE_LINK); res.type = RTN_UNICAST; goto make_route; } rth = ERR_PTR(-ENETUNREACH); goto out; } if (res.type == RTN_LOCAL) { if (!fl4->saddr) { if (res.fi->fib_prefsrc) fl4->saddr = res.fi->fib_prefsrc; else fl4->saddr = fl4->daddr; } dev_out = net->loopback_dev; fl4->flowi4_oif = dev_out->ifindex; flags |= RTCF_LOCAL; goto make_route; } #ifdef CONFIG_IP_ROUTE_MULTIPATH if (res.fi->fib_nhs > 1 && fl4->flowi4_oif == 0) fib_select_multipath(&res); else #endif if (!res.prefixlen && res.table->tb_num_default > 1 && res.type == RTN_UNICAST && !fl4->flowi4_oif) fib_select_default(&res); if (!fl4->saddr) fl4->saddr = FIB_RES_PREFSRC(net, res); dev_out = FIB_RES_DEV(res); fl4->flowi4_oif = dev_out->ifindex; make_route: rth = __mkroute_output(&res, fl4, orig_oif, dev_out, flags); out: rcu_read_unlock(); return rth; } EXPORT_SYMBOL_GPL(__ip_route_output_key); static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie) { return NULL; } static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst) { unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); return mtu ? : dst->dev->mtu; } static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, u32 mtu) { } static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb) { } static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst, unsigned long old) { return NULL; } static struct dst_ops ipv4_dst_blackhole_ops = { .family = AF_INET, .protocol = cpu_to_be16(ETH_P_IP), .check = ipv4_blackhole_dst_check, .mtu = ipv4_blackhole_mtu, .default_advmss = ipv4_default_advmss, .update_pmtu = ipv4_rt_blackhole_update_pmtu, .redirect = ipv4_rt_blackhole_redirect, .cow_metrics = ipv4_rt_blackhole_cow_metrics, .neigh_lookup = ipv4_neigh_lookup, }; struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig) { struct rtable *ort = (struct rtable *) dst_orig; struct rtable *rt; rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE, 0); if (rt) { struct dst_entry *new = &rt->dst; new->__use = 1; new->input = dst_discard; new->output = dst_discard; new->dev = ort->dst.dev; if (new->dev) dev_hold(new->dev); rt->rt_is_input = ort->rt_is_input; rt->rt_iif = ort->rt_iif; rt->rt_pmtu = ort->rt_pmtu; rt->rt_genid = rt_genid(net); rt->rt_flags = ort->rt_flags; rt->rt_type = ort->rt_type; rt->rt_gateway = ort->rt_gateway; rt->rt_uses_gateway = ort->rt_uses_gateway; INIT_LIST_HEAD(&rt->rt_uncached); dst_free(new); } dst_release(dst_orig); return rt ? &rt->dst : ERR_PTR(-ENOMEM); } struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4, struct sock *sk) { struct rtable *rt = __ip_route_output_key(net, flp4); if (IS_ERR(rt)) return rt; if (flp4->flowi4_proto) rt = (struct rtable *) xfrm_lookup(net, &rt->dst, flowi4_to_flowi(flp4), sk, 0); return rt; } EXPORT_SYMBOL_GPL(ip_route_output_flow); static int rt_fill_info(struct net *net, __be32 dst, __be32 src, struct flowi4 *fl4, struct sk_buff *skb, u32 portid, u32 seq, int event, int nowait, unsigned int flags) { struct rtable *rt = skb_rtable(skb); struct rtmsg *r; struct nlmsghdr *nlh; unsigned long expires = 0; u32 error; u32 metrics[RTAX_MAX]; nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), flags); if (nlh == NULL) return -EMSGSIZE; r = nlmsg_data(nlh); r->rtm_family = AF_INET; r->rtm_dst_len = 32; r->rtm_src_len = 0; r->rtm_tos = fl4->flowi4_tos; r->rtm_table = RT_TABLE_MAIN; if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN)) goto nla_put_failure; r->rtm_type = rt->rt_type; r->rtm_scope = RT_SCOPE_UNIVERSE; r->rtm_protocol = RTPROT_UNSPEC; r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED; if (rt->rt_flags & RTCF_NOTIFY) r->rtm_flags |= RTM_F_NOTIFY; if (nla_put_be32(skb, RTA_DST, dst)) goto nla_put_failure; if (src) { r->rtm_src_len = 32; if (nla_put_be32(skb, RTA_SRC, src)) goto nla_put_failure; } if (rt->dst.dev && nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex)) goto nla_put_failure; #ifdef CONFIG_IP_ROUTE_CLASSID if (rt->dst.tclassid && nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid)) goto nla_put_failure; #endif if (!rt_is_input_route(rt) && fl4->saddr != src) { if (nla_put_be32(skb, RTA_PREFSRC, fl4->saddr)) goto nla_put_failure; } if (rt->rt_uses_gateway && nla_put_be32(skb, RTA_GATEWAY, rt->rt_gateway)) goto nla_put_failure; expires = rt->dst.expires; if (expires) { unsigned long now = jiffies; if (time_before(now, expires)) expires -= now; else expires = 0; } memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics)); if (rt->rt_pmtu && expires) metrics[RTAX_MTU - 1] = rt->rt_pmtu; if (rtnetlink_put_metrics(skb, metrics) < 0) goto nla_put_failure; if (fl4->flowi4_mark && nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark)) goto nla_put_failure; error = rt->dst.error; if (rt_is_input_route(rt)) { #ifdef CONFIG_IP_MROUTE if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) && IPV4_DEVCONF_ALL(net, MC_FORWARDING)) { int err = ipmr_get_route(net, skb, fl4->saddr, fl4->daddr, r, nowait); if (err <= 0) { if (!nowait) { if (err == 0) return 0; goto nla_put_failure; } else { if (err == -EMSGSIZE) goto nla_put_failure; error = err; } } } else #endif if (nla_put_u32(skb, RTA_IIF, rt->rt_iif)) goto nla_put_failure; } if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0) goto nla_put_failure; return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh) { struct net *net = sock_net(in_skb->sk); struct rtmsg *rtm; struct nlattr *tb[RTA_MAX+1]; struct rtable *rt = NULL; struct flowi4 fl4; __be32 dst = 0; __be32 src = 0; u32 iif; int err; int mark; struct sk_buff *skb; err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy); if (err < 0) goto errout; rtm = nlmsg_data(nlh); skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (skb == NULL) { err = -ENOBUFS; goto errout; } /* Reserve room for dummy headers, this skb can pass through good chunk of routing engine. */ skb_reset_mac_header(skb); skb_reset_network_header(skb); /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */ ip_hdr(skb)->protocol = IPPROTO_ICMP; skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr)); src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0; dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0; iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0; mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0; memset(&fl4, 0, sizeof(fl4)); fl4.daddr = dst; fl4.saddr = src; fl4.flowi4_tos = rtm->rtm_tos; fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0; fl4.flowi4_mark = mark; if (iif) { struct net_device *dev; dev = __dev_get_by_index(net, iif); if (dev == NULL) { err = -ENODEV; goto errout_free; } skb->protocol = htons(ETH_P_IP); skb->dev = dev; skb->mark = mark; local_bh_disable(); err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev); local_bh_enable(); rt = skb_rtable(skb); if (err == 0 && rt->dst.error) err = -rt->dst.error; } else { rt = ip_route_output_key(net, &fl4); err = 0; if (IS_ERR(rt)) err = PTR_ERR(rt); } if (err) goto errout_free; skb_dst_set(skb, &rt->dst); if (rtm->rtm_flags & RTM_F_NOTIFY) rt->rt_flags |= RTCF_NOTIFY; err = rt_fill_info(net, dst, src, &fl4, skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, RTM_NEWROUTE, 0, 0); if (err <= 0) goto errout_free; err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); errout: return err; errout_free: kfree_skb(skb); goto errout; } int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb) { return skb->len; } void ip_rt_multicast_event(struct in_device *in_dev) { rt_cache_flush(dev_net(in_dev->dev)); } #ifdef CONFIG_SYSCTL static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT; static int ip_rt_gc_interval __read_mostly = 60 * HZ; static int ip_rt_gc_min_interval __read_mostly = HZ / 2; static int ip_rt_gc_elasticity __read_mostly = 8; static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { if (write) { rt_cache_flush((struct net *)__ctl->extra1); return 0; } return -EINVAL; } static ctl_table ipv4_route_table[] = { { .procname = "gc_thresh", .data = &ipv4_dst_ops.gc_thresh, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "max_size", .data = &ip_rt_max_size, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { /* Deprecated. Use gc_min_interval_ms */ .procname = "gc_min_interval", .data = &ip_rt_gc_min_interval, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "gc_min_interval_ms", .data = &ip_rt_gc_min_interval, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_ms_jiffies, }, { .procname = "gc_timeout", .data = &ip_rt_gc_timeout, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "gc_interval", .data = &ip_rt_gc_interval, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "redirect_load", .data = &ip_rt_redirect_load, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "redirect_number", .data = &ip_rt_redirect_number, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "redirect_silence", .data = &ip_rt_redirect_silence, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "error_cost", .data = &ip_rt_error_cost, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "error_burst", .data = &ip_rt_error_burst, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "gc_elasticity", .data = &ip_rt_gc_elasticity, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "mtu_expires", .data = &ip_rt_mtu_expires, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "min_pmtu", .data = &ip_rt_min_pmtu, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "min_adv_mss", .data = &ip_rt_min_advmss, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { } }; static struct ctl_table ipv4_route_flush_table[] = { { .procname = "flush", .maxlen = sizeof(int), .mode = 0200, .proc_handler = ipv4_sysctl_rtcache_flush, }, { }, }; static __net_init int sysctl_route_net_init(struct net *net) { struct ctl_table *tbl; tbl = ipv4_route_flush_table; if (!net_eq(net, &init_net)) { tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL); if (tbl == NULL) goto err_dup; /* Don't export sysctls to unprivileged users */ if (net->user_ns != &init_user_ns) tbl[0].procname = NULL; } tbl[0].extra1 = net; net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl); if (net->ipv4.route_hdr == NULL) goto err_reg; return 0; err_reg: if (tbl != ipv4_route_flush_table) kfree(tbl); err_dup: return -ENOMEM; } static __net_exit void sysctl_route_net_exit(struct net *net) { struct ctl_table *tbl; tbl = net->ipv4.route_hdr->ctl_table_arg; unregister_net_sysctl_table(net->ipv4.route_hdr); BUG_ON(tbl == ipv4_route_flush_table); kfree(tbl); } static __net_initdata struct pernet_operations sysctl_route_ops = { .init = sysctl_route_net_init, .exit = sysctl_route_net_exit, }; #endif static __net_init int rt_genid_init(struct net *net) { atomic_set(&net->rt_genid, 0); get_random_bytes(&net->ipv4.dev_addr_genid, sizeof(net->ipv4.dev_addr_genid)); return 0; } static __net_initdata struct pernet_operations rt_genid_ops = { .init = rt_genid_init, }; static int __net_init ipv4_inetpeer_init(struct net *net) { struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL); if (!bp) return -ENOMEM; inet_peer_base_init(bp); net->ipv4.peers = bp; return 0; } static void __net_exit ipv4_inetpeer_exit(struct net *net) { struct inet_peer_base *bp = net->ipv4.peers; net->ipv4.peers = NULL; inetpeer_invalidate_tree(bp); kfree(bp); } static __net_initdata struct pernet_operations ipv4_inetpeer_ops = { .init = ipv4_inetpeer_init, .exit = ipv4_inetpeer_exit, }; #ifdef CONFIG_IP_ROUTE_CLASSID struct ip_rt_acct __percpu *ip_rt_acct __read_mostly; #endif /* CONFIG_IP_ROUTE_CLASSID */ int __init ip_rt_init(void) { int rc = 0; #ifdef CONFIG_IP_ROUTE_CLASSID ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct)); if (!ip_rt_acct) panic("IP: failed to allocate ip_rt_acct\n"); #endif ipv4_dst_ops.kmem_cachep = kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep; if (dst_entries_init(&ipv4_dst_ops) < 0) panic("IP: failed to allocate ipv4_dst_ops counter\n"); if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0) panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n"); ipv4_dst_ops.gc_thresh = ~0; ip_rt_max_size = INT_MAX; devinet_init(); ip_fib_init(); if (ip_rt_proc_init()) pr_err("Unable to create route proc files\n"); #ifdef CONFIG_XFRM xfrm_init(); xfrm4_init(); #endif rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL); #ifdef CONFIG_SYSCTL register_pernet_subsys(&sysctl_route_ops); #endif register_pernet_subsys(&rt_genid_ops); register_pernet_subsys(&ipv4_inetpeer_ops); return rc; } #ifdef CONFIG_SYSCTL /* * We really need to sanitize the damn ipv4 init order, then all * this nonsense will go away. */ void __init ip_static_sysctl_init(void) { register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table); } #endif
gpl-2.0
gpandcb/pkernel
drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
189
74217
/* * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program5 is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pinctrl/pinctrl.h> #include <linux/platform_device.h> #include "pinctrl-uniphier.h" static const struct pinctrl_pin_desc uniphier_pro4_pins[] = { UNIPHIER_PINCTRL_PIN(0, "CK24O", UNIPHIER_PIN_IECTRL_NONE, 0, UNIPHIER_PIN_DRV_1BIT, 0, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(1, "VC27A", UNIPHIER_PIN_IECTRL_NONE, 1, UNIPHIER_PIN_DRV_1BIT, 1, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(2, "CK27AI", UNIPHIER_PIN_IECTRL_NONE, 2, UNIPHIER_PIN_DRV_1BIT, 2, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(3, "CK27AO", UNIPHIER_PIN_IECTRL_NONE, 3, UNIPHIER_PIN_DRV_1BIT, 3, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(4, "CKSEL", UNIPHIER_PIN_IECTRL_NONE, 4, UNIPHIER_PIN_DRV_1BIT, 4, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(5, "CK27AV", UNIPHIER_PIN_IECTRL_NONE, 5, UNIPHIER_PIN_DRV_1BIT, 5, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(6, "AEXCKA", UNIPHIER_PIN_IECTRL_NONE, 6, UNIPHIER_PIN_DRV_1BIT, 6, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(7, "ASEL", UNIPHIER_PIN_IECTRL_NONE, 7, UNIPHIER_PIN_DRV_1BIT, 7, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(8, "ARCRESET", UNIPHIER_PIN_IECTRL_NONE, 8, UNIPHIER_PIN_DRV_1BIT, 8, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(9, "ARCUNLOCK", UNIPHIER_PIN_IECTRL_NONE, 9, UNIPHIER_PIN_DRV_1BIT, 9, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(10, "XSRST", UNIPHIER_PIN_IECTRL_NONE, 10, UNIPHIER_PIN_DRV_1BIT, 10, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(11, "XNMIRQ", UNIPHIER_PIN_IECTRL_NONE, 11, UNIPHIER_PIN_DRV_1BIT, 11, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(12, "XSCIRQ", UNIPHIER_PIN_IECTRL_NONE, 12, UNIPHIER_PIN_DRV_1BIT, 12, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(13, "EXTRG", UNIPHIER_PIN_IECTRL_NONE, 13, UNIPHIER_PIN_DRV_1BIT, 13, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(14, "TRCCLK", UNIPHIER_PIN_IECTRL_NONE, 14, UNIPHIER_PIN_DRV_1BIT, 14, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(15, "TRCCTL", UNIPHIER_PIN_IECTRL_NONE, 15, UNIPHIER_PIN_DRV_1BIT, 15, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(16, "TRCD0", UNIPHIER_PIN_IECTRL_NONE, 16, UNIPHIER_PIN_DRV_1BIT, 16, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(17, "TRCD1", UNIPHIER_PIN_IECTRL_NONE, 17, UNIPHIER_PIN_DRV_1BIT, 17, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(18, "TRCD2", UNIPHIER_PIN_IECTRL_NONE, 18, UNIPHIER_PIN_DRV_1BIT, 18, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(19, "TRCD3", UNIPHIER_PIN_IECTRL_NONE, 19, UNIPHIER_PIN_DRV_1BIT, 19, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(20, "TRCD4", UNIPHIER_PIN_IECTRL_NONE, 20, UNIPHIER_PIN_DRV_1BIT, 20, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(21, "TRCD5", UNIPHIER_PIN_IECTRL_NONE, 21, UNIPHIER_PIN_DRV_1BIT, 21, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(22, "TRCD6", UNIPHIER_PIN_IECTRL_NONE, 22, UNIPHIER_PIN_DRV_1BIT, 22, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(23, "TRCD7", UNIPHIER_PIN_IECTRL_NONE, 23, UNIPHIER_PIN_DRV_1BIT, 23, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(24, "XECS1", UNIPHIER_PIN_IECTRL_NONE, 24, UNIPHIER_PIN_DRV_1BIT, 24, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(25, "ERXW", UNIPHIER_PIN_IECTRL_NONE, 25, UNIPHIER_PIN_DRV_1BIT, 25, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(26, "XERWE0", UNIPHIER_PIN_IECTRL_NONE, 26, UNIPHIER_PIN_DRV_1BIT, 26, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(27, "XERWE1", UNIPHIER_PIN_IECTRL_NONE, 27, UNIPHIER_PIN_DRV_1BIT, 27, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(28, "ES0", UNIPHIER_PIN_IECTRL_NONE, 28, UNIPHIER_PIN_DRV_1BIT, 28, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(29, "ES1", UNIPHIER_PIN_IECTRL_NONE, 29, UNIPHIER_PIN_DRV_1BIT, 29, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(30, "ES2", UNIPHIER_PIN_IECTRL_NONE, 30, UNIPHIER_PIN_DRV_1BIT, 30, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(31, "ED0", UNIPHIER_PIN_IECTRL_NONE, 31, UNIPHIER_PIN_DRV_1BIT, 31, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(32, "ED1", UNIPHIER_PIN_IECTRL_NONE, 32, UNIPHIER_PIN_DRV_1BIT, 32, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(33, "ED2", UNIPHIER_PIN_IECTRL_NONE, 33, UNIPHIER_PIN_DRV_1BIT, 33, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(34, "ED3", UNIPHIER_PIN_IECTRL_NONE, 34, UNIPHIER_PIN_DRV_1BIT, 34, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(35, "ED4", UNIPHIER_PIN_IECTRL_NONE, 35, UNIPHIER_PIN_DRV_1BIT, 35, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(36, "ED5", UNIPHIER_PIN_IECTRL_NONE, 36, UNIPHIER_PIN_DRV_1BIT, 36, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(37, "ED6", UNIPHIER_PIN_IECTRL_NONE, 37, UNIPHIER_PIN_DRV_1BIT, 37, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(38, "ED7", UNIPHIER_PIN_IECTRL_NONE, 38, UNIPHIER_PIN_DRV_1BIT, 38, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(39, "BOOTSWAP", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_NONE, 39, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(40, "NFD0", UNIPHIER_PIN_IECTRL_NONE, 2, UNIPHIER_PIN_DRV_2BIT, 40, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(41, "NFD1", UNIPHIER_PIN_IECTRL_NONE, 3, UNIPHIER_PIN_DRV_2BIT, 41, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(42, "NFD2", UNIPHIER_PIN_IECTRL_NONE, 4, UNIPHIER_PIN_DRV_2BIT, 42, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(43, "NFD3", UNIPHIER_PIN_IECTRL_NONE, 5, UNIPHIER_PIN_DRV_2BIT, 43, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(44, "NFD4", UNIPHIER_PIN_IECTRL_NONE, 6, UNIPHIER_PIN_DRV_2BIT, 44, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(45, "NFD5", UNIPHIER_PIN_IECTRL_NONE, 7, UNIPHIER_PIN_DRV_2BIT, 45, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(46, "NFD6", UNIPHIER_PIN_IECTRL_NONE, 8, UNIPHIER_PIN_DRV_2BIT, 46, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(47, "NFD7", UNIPHIER_PIN_IECTRL_NONE, 9, UNIPHIER_PIN_DRV_2BIT, 47, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(48, "NFALE", UNIPHIER_PIN_IECTRL_NONE, 48, UNIPHIER_PIN_DRV_1BIT, 48, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(49, "NFCLE", UNIPHIER_PIN_IECTRL_NONE, 49, UNIPHIER_PIN_DRV_1BIT, 49, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(50, "XNFRE", UNIPHIER_PIN_IECTRL_NONE, 50, UNIPHIER_PIN_DRV_1BIT, 50, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(51, "XNFWE", UNIPHIER_PIN_IECTRL_NONE, 0, UNIPHIER_PIN_DRV_2BIT, 51, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(52, "XNFWP", UNIPHIER_PIN_IECTRL_NONE, 52, UNIPHIER_PIN_DRV_1BIT, 52, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(53, "XNFCE0", UNIPHIER_PIN_IECTRL_NONE, 1, UNIPHIER_PIN_DRV_2BIT, 53, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(54, "NRYBY0", UNIPHIER_PIN_IECTRL_NONE, 54, UNIPHIER_PIN_DRV_1BIT, 54, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(55, "DMDSCLTST", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_NONE, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(56, "DMDSDATST", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(57, "AGCI0", 3, -1, UNIPHIER_PIN_DRV_FIXED4, 55, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(58, "DMDSCL0", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(59, "DMDSDA0", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(60, "AGCBS0", 5, -1, UNIPHIER_PIN_DRV_FIXED4, 56, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(61, "DMDSCL1", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(62, "DMDSDA1", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(63, "ANTSHORT", UNIPHIER_PIN_IECTRL_NONE, 57, UNIPHIER_PIN_DRV_1BIT, 57, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(64, "CH0CLK", UNIPHIER_PIN_IECTRL_NONE, 58, UNIPHIER_PIN_DRV_1BIT, 58, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(65, "CH0VAL", UNIPHIER_PIN_IECTRL_NONE, 59, UNIPHIER_PIN_DRV_1BIT, 59, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(66, "CH0PSYNC", UNIPHIER_PIN_IECTRL_NONE, 60, UNIPHIER_PIN_DRV_1BIT, 60, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(67, "CH0DATA", UNIPHIER_PIN_IECTRL_NONE, 61, UNIPHIER_PIN_DRV_1BIT, 61, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(68, "CH1CLK", UNIPHIER_PIN_IECTRL_NONE, 62, UNIPHIER_PIN_DRV_1BIT, 62, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(69, "CH1VAL", UNIPHIER_PIN_IECTRL_NONE, 63, UNIPHIER_PIN_DRV_1BIT, 63, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(70, "CH1PSYNC", UNIPHIER_PIN_IECTRL_NONE, 64, UNIPHIER_PIN_DRV_1BIT, 64, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(71, "CH1DATA", UNIPHIER_PIN_IECTRL_NONE, 65, UNIPHIER_PIN_DRV_1BIT, 65, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(72, "CH2CLK", UNIPHIER_PIN_IECTRL_NONE, 66, UNIPHIER_PIN_DRV_1BIT, 66, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(73, "CH2VAL", UNIPHIER_PIN_IECTRL_NONE, 67, UNIPHIER_PIN_DRV_1BIT, 67, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(74, "CH2PSYNC", UNIPHIER_PIN_IECTRL_NONE, 68, UNIPHIER_PIN_DRV_1BIT, 68, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(75, "CH2DATA", UNIPHIER_PIN_IECTRL_NONE, 69, UNIPHIER_PIN_DRV_1BIT, 69, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(76, "CH3CLK", UNIPHIER_PIN_IECTRL_NONE, 70, UNIPHIER_PIN_DRV_1BIT, 70, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(77, "CH3VAL", UNIPHIER_PIN_IECTRL_NONE, 71, UNIPHIER_PIN_DRV_1BIT, 71, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(78, "CH3PSYNC", UNIPHIER_PIN_IECTRL_NONE, 72, UNIPHIER_PIN_DRV_1BIT, 72, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(79, "CH3DATA", UNIPHIER_PIN_IECTRL_NONE, 73, UNIPHIER_PIN_DRV_1BIT, 73, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(80, "CH4CLK", UNIPHIER_PIN_IECTRL_NONE, 74, UNIPHIER_PIN_DRV_1BIT, 74, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(81, "CH4VAL", UNIPHIER_PIN_IECTRL_NONE, 75, UNIPHIER_PIN_DRV_1BIT, 75, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(82, "CH4PSYNC", UNIPHIER_PIN_IECTRL_NONE, 76, UNIPHIER_PIN_DRV_1BIT, 76, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(83, "CH4DATA", UNIPHIER_PIN_IECTRL_NONE, 77, UNIPHIER_PIN_DRV_1BIT, 77, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(84, "CH5CLK", UNIPHIER_PIN_IECTRL_NONE, 78, UNIPHIER_PIN_DRV_1BIT, 78, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(85, "CH5VAL", UNIPHIER_PIN_IECTRL_NONE, 79, UNIPHIER_PIN_DRV_1BIT, 79, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(86, "CH5PSYNC", UNIPHIER_PIN_IECTRL_NONE, 80, UNIPHIER_PIN_DRV_1BIT, 80, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(87, "CH5DATA", UNIPHIER_PIN_IECTRL_NONE, 81, UNIPHIER_PIN_DRV_1BIT, 81, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(88, "CH6CLK", UNIPHIER_PIN_IECTRL_NONE, 82, UNIPHIER_PIN_DRV_1BIT, 82, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(89, "CH6VAL", UNIPHIER_PIN_IECTRL_NONE, 83, UNIPHIER_PIN_DRV_1BIT, 83, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(90, "CH6PSYNC", UNIPHIER_PIN_IECTRL_NONE, 84, UNIPHIER_PIN_DRV_1BIT, 84, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(91, "CH6DATA", UNIPHIER_PIN_IECTRL_NONE, 85, UNIPHIER_PIN_DRV_1BIT, 85, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(92, "CKFEO", UNIPHIER_PIN_IECTRL_NONE, 86, UNIPHIER_PIN_DRV_1BIT, 86, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(93, "XFERST", UNIPHIER_PIN_IECTRL_NONE, 87, UNIPHIER_PIN_DRV_1BIT, 87, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(94, "P_FE_ON", UNIPHIER_PIN_IECTRL_NONE, 88, UNIPHIER_PIN_DRV_1BIT, 88, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(95, "P_TU0_ON", UNIPHIER_PIN_IECTRL_NONE, 89, UNIPHIER_PIN_DRV_1BIT, 89, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(96, "XFEIRQ0", UNIPHIER_PIN_IECTRL_NONE, 90, UNIPHIER_PIN_DRV_1BIT, 90, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(97, "XFEIRQ1", UNIPHIER_PIN_IECTRL_NONE, 91, UNIPHIER_PIN_DRV_1BIT, 91, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(98, "XFEIRQ2", UNIPHIER_PIN_IECTRL_NONE, 92, UNIPHIER_PIN_DRV_1BIT, 92, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(99, "XFEIRQ3", UNIPHIER_PIN_IECTRL_NONE, 93, UNIPHIER_PIN_DRV_1BIT, 93, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(100, "XFEIRQ4", UNIPHIER_PIN_IECTRL_NONE, 94, UNIPHIER_PIN_DRV_1BIT, 94, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(101, "XFEIRQ5", UNIPHIER_PIN_IECTRL_NONE, 95, UNIPHIER_PIN_DRV_1BIT, 95, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(102, "XFEIRQ6", UNIPHIER_PIN_IECTRL_NONE, 96, UNIPHIER_PIN_DRV_1BIT, 96, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(103, "SMTCLK0", UNIPHIER_PIN_IECTRL_NONE, 97, UNIPHIER_PIN_DRV_1BIT, 97, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(104, "SMTRST0", UNIPHIER_PIN_IECTRL_NONE, 98, UNIPHIER_PIN_DRV_1BIT, 98, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(105, "SMTCMD0", UNIPHIER_PIN_IECTRL_NONE, 99, UNIPHIER_PIN_DRV_1BIT, 99, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(106, "SMTD0", UNIPHIER_PIN_IECTRL_NONE, 100, UNIPHIER_PIN_DRV_1BIT, 100, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(107, "SMTSEL0", UNIPHIER_PIN_IECTRL_NONE, 101, UNIPHIER_PIN_DRV_1BIT, 101, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(108, "SMTDET0", UNIPHIER_PIN_IECTRL_NONE, 102, UNIPHIER_PIN_DRV_1BIT, 102, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(109, "SMTCLK1", UNIPHIER_PIN_IECTRL_NONE, 103, UNIPHIER_PIN_DRV_1BIT, 103, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(110, "SMTRST1", UNIPHIER_PIN_IECTRL_NONE, 104, UNIPHIER_PIN_DRV_1BIT, 104, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(111, "SMTCMD1", UNIPHIER_PIN_IECTRL_NONE, 105, UNIPHIER_PIN_DRV_1BIT, 105, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(112, "SMTD1", UNIPHIER_PIN_IECTRL_NONE, 106, UNIPHIER_PIN_DRV_1BIT, 106, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(113, "SMTSEL1", UNIPHIER_PIN_IECTRL_NONE, 107, UNIPHIER_PIN_DRV_1BIT, 107, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(114, "SMTDET1", UNIPHIER_PIN_IECTRL_NONE, 108, UNIPHIER_PIN_DRV_1BIT, 108, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(115, "XINTM", UNIPHIER_PIN_IECTRL_NONE, 109, UNIPHIER_PIN_DRV_1BIT, 109, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(116, "SCLKM", UNIPHIER_PIN_IECTRL_NONE, 110, UNIPHIER_PIN_DRV_1BIT, 110, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(117, "SBMTP", UNIPHIER_PIN_IECTRL_NONE, 111, UNIPHIER_PIN_DRV_1BIT, 111, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(118, "SBPTM", UNIPHIER_PIN_IECTRL_NONE, 112, UNIPHIER_PIN_DRV_1BIT, 112, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(119, "XMPREQ", UNIPHIER_PIN_IECTRL_NONE, 113, UNIPHIER_PIN_DRV_1BIT, 113, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(120, "XINTP", UNIPHIER_PIN_IECTRL_NONE, 114, UNIPHIER_PIN_DRV_1BIT, 114, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(121, "LPST", UNIPHIER_PIN_IECTRL_NONE, 115, UNIPHIER_PIN_DRV_1BIT, 115, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(122, "SDBOOT", UNIPHIER_PIN_IECTRL_NONE, 116, UNIPHIER_PIN_DRV_1BIT, 116, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(123, "BFAIL", UNIPHIER_PIN_IECTRL_NONE, 117, UNIPHIER_PIN_DRV_1BIT, 117, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(124, "XFWE", UNIPHIER_PIN_IECTRL_NONE, 118, UNIPHIER_PIN_DRV_1BIT, 118, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(125, "RF_COM_RDY", UNIPHIER_PIN_IECTRL_NONE, 119, UNIPHIER_PIN_DRV_1BIT, 119, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(126, "XDIAG0", UNIPHIER_PIN_IECTRL_NONE, 120, UNIPHIER_PIN_DRV_1BIT, 120, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(127, "RXD0", UNIPHIER_PIN_IECTRL_NONE, 121, UNIPHIER_PIN_DRV_1BIT, 121, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(128, "TXD0", UNIPHIER_PIN_IECTRL_NONE, 122, UNIPHIER_PIN_DRV_1BIT, 122, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(129, "RXD1", UNIPHIER_PIN_IECTRL_NONE, 123, UNIPHIER_PIN_DRV_1BIT, 123, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(130, "TXD1", UNIPHIER_PIN_IECTRL_NONE, 124, UNIPHIER_PIN_DRV_1BIT, 124, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(131, "RXD2", UNIPHIER_PIN_IECTRL_NONE, 125, UNIPHIER_PIN_DRV_1BIT, 125, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(132, "TXD2", UNIPHIER_PIN_IECTRL_NONE, 126, UNIPHIER_PIN_DRV_1BIT, 126, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(133, "SS0CS", UNIPHIER_PIN_IECTRL_NONE, 127, UNIPHIER_PIN_DRV_1BIT, 127, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(134, "SS0CLK", UNIPHIER_PIN_IECTRL_NONE, 128, UNIPHIER_PIN_DRV_1BIT, 128, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(135, "SS0DO", UNIPHIER_PIN_IECTRL_NONE, 129, UNIPHIER_PIN_DRV_1BIT, 129, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(136, "SS0DI", UNIPHIER_PIN_IECTRL_NONE, 130, UNIPHIER_PIN_DRV_1BIT, 130, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(137, "MS0CS0", UNIPHIER_PIN_IECTRL_NONE, 131, UNIPHIER_PIN_DRV_1BIT, 131, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(138, "MS0CLK", UNIPHIER_PIN_IECTRL_NONE, 132, UNIPHIER_PIN_DRV_1BIT, 132, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(139, "MS0DI", UNIPHIER_PIN_IECTRL_NONE, 133, UNIPHIER_PIN_DRV_1BIT, 133, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(140, "MS0DO", UNIPHIER_PIN_IECTRL_NONE, 134, UNIPHIER_PIN_DRV_1BIT, 134, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(141, "XMDMRST", UNIPHIER_PIN_IECTRL_NONE, 135, UNIPHIER_PIN_DRV_1BIT, 135, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(142, "SCL0", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(143, "SDA0", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(144, "SCL1", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(145, "SDA1", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(146, "SCL2", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(147, "SDA2", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(148, "SCL3", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(149, "SDA3", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(150, "SD0DAT0", UNIPHIER_PIN_IECTRL_NONE, 12, UNIPHIER_PIN_DRV_2BIT, 136, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(151, "SD0DAT1", UNIPHIER_PIN_IECTRL_NONE, 13, UNIPHIER_PIN_DRV_2BIT, 137, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(152, "SD0DAT2", UNIPHIER_PIN_IECTRL_NONE, 14, UNIPHIER_PIN_DRV_2BIT, 138, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(153, "SD0DAT3", UNIPHIER_PIN_IECTRL_NONE, 15, UNIPHIER_PIN_DRV_2BIT, 139, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(154, "SD0CMD", UNIPHIER_PIN_IECTRL_NONE, 11, UNIPHIER_PIN_DRV_2BIT, 141, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(155, "SD0CLK", UNIPHIER_PIN_IECTRL_NONE, 10, UNIPHIER_PIN_DRV_2BIT, 140, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(156, "SD0CD", UNIPHIER_PIN_IECTRL_NONE, 142, UNIPHIER_PIN_DRV_1BIT, 142, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(157, "SD0WP", UNIPHIER_PIN_IECTRL_NONE, 143, UNIPHIER_PIN_DRV_1BIT, 143, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(158, "SD0VTCG", UNIPHIER_PIN_IECTRL_NONE, 144, UNIPHIER_PIN_DRV_1BIT, 144, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(159, "CK25O", UNIPHIER_PIN_IECTRL_NONE, 145, UNIPHIER_PIN_DRV_1BIT, 145, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(160, "RGMII_TXCLK", 6, 146, UNIPHIER_PIN_DRV_1BIT, 146, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(161, "RGMII_TXD0", 6, 147, UNIPHIER_PIN_DRV_1BIT, 147, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(162, "RGMII_TXD1", 6, 148, UNIPHIER_PIN_DRV_1BIT, 148, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(163, "RGMII_TXD2", 6, 149, UNIPHIER_PIN_DRV_1BIT, 149, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(164, "RGMII_TXD3", 6, 150, UNIPHIER_PIN_DRV_1BIT, 150, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(165, "RGMII_TXCTL", 6, 151, UNIPHIER_PIN_DRV_1BIT, 151, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(166, "MII_TXER", UNIPHIER_PIN_IECTRL_NONE, 152, UNIPHIER_PIN_DRV_1BIT, 152, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(167, "RGMII_RXCLK", 6, 153, UNIPHIER_PIN_DRV_1BIT, 153, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(168, "RGMII_RXD0", 6, 154, UNIPHIER_PIN_DRV_1BIT, 154, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(169, "RGMII_RXD1", 6, 155, UNIPHIER_PIN_DRV_1BIT, 155, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(170, "RGMII_RXD2", 6, 156, UNIPHIER_PIN_DRV_1BIT, 156, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(171, "RGMII_RXD3", 6, 157, UNIPHIER_PIN_DRV_1BIT, 157, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(172, "RGMII_RXCTL", 6, 158, UNIPHIER_PIN_DRV_1BIT, 158, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(173, "MII_RXER", 6, 159, UNIPHIER_PIN_DRV_1BIT, 159, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(174, "MII_CRS", 6, 160, UNIPHIER_PIN_DRV_1BIT, 160, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(175, "MII_COL", 6, 161, UNIPHIER_PIN_DRV_1BIT, 161, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(176, "MDC", 6, 162, UNIPHIER_PIN_DRV_1BIT, 162, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(177, "MDIO", 6, 163, UNIPHIER_PIN_DRV_1BIT, 163, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(178, "MDIO_INTL", 6, 164, UNIPHIER_PIN_DRV_1BIT, 164, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(179, "XETH_RST", 6, 165, UNIPHIER_PIN_DRV_1BIT, 165, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(180, "USB0VBUS", UNIPHIER_PIN_IECTRL_NONE, 166, UNIPHIER_PIN_DRV_1BIT, 166, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(181, "USB0OD", UNIPHIER_PIN_IECTRL_NONE, 167, UNIPHIER_PIN_DRV_1BIT, 167, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(182, "USB1VBUS", UNIPHIER_PIN_IECTRL_NONE, 168, UNIPHIER_PIN_DRV_1BIT, 168, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(183, "USB1OD", UNIPHIER_PIN_IECTRL_NONE, 169, UNIPHIER_PIN_DRV_1BIT, 169, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(184, "USB2VBUS", UNIPHIER_PIN_IECTRL_NONE, 170, UNIPHIER_PIN_DRV_1BIT, 170, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(185, "USB2OD", UNIPHIER_PIN_IECTRL_NONE, 171, UNIPHIER_PIN_DRV_1BIT, 171, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(186, "USB2ID", UNIPHIER_PIN_IECTRL_NONE, 172, UNIPHIER_PIN_DRV_1BIT, 172, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(187, "USB3VBUS", UNIPHIER_PIN_IECTRL_NONE, 173, UNIPHIER_PIN_DRV_1BIT, 173, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(188, "USB3OD", UNIPHIER_PIN_IECTRL_NONE, 174, UNIPHIER_PIN_DRV_1BIT, 174, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(189, "LINKCLK", UNIPHIER_PIN_IECTRL_NONE, 175, UNIPHIER_PIN_DRV_1BIT, 175, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(190, "LINKREQ", UNIPHIER_PIN_IECTRL_NONE, 176, UNIPHIER_PIN_DRV_1BIT, 176, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(191, "LINKCTL0", UNIPHIER_PIN_IECTRL_NONE, 177, UNIPHIER_PIN_DRV_1BIT, 177, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(192, "LINKCTL1", UNIPHIER_PIN_IECTRL_NONE, 178, UNIPHIER_PIN_DRV_1BIT, 178, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(193, "LINKDT0", UNIPHIER_PIN_IECTRL_NONE, 179, UNIPHIER_PIN_DRV_1BIT, 179, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(194, "LINKDT1", UNIPHIER_PIN_IECTRL_NONE, 180, UNIPHIER_PIN_DRV_1BIT, 180, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(195, "LINKDT2", UNIPHIER_PIN_IECTRL_NONE, 181, UNIPHIER_PIN_DRV_1BIT, 181, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(196, "LINKDT3", UNIPHIER_PIN_IECTRL_NONE, 182, UNIPHIER_PIN_DRV_1BIT, 182, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(197, "LINKDT4", UNIPHIER_PIN_IECTRL_NONE, 183, UNIPHIER_PIN_DRV_1BIT, 183, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(198, "LINKDT5", UNIPHIER_PIN_IECTRL_NONE, 184, UNIPHIER_PIN_DRV_1BIT, 184, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(199, "LINKDT6", UNIPHIER_PIN_IECTRL_NONE, 185, UNIPHIER_PIN_DRV_1BIT, 185, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(200, "LINKDT7", UNIPHIER_PIN_IECTRL_NONE, 186, UNIPHIER_PIN_DRV_1BIT, 186, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(201, "CKDVO", UNIPHIER_PIN_IECTRL_NONE, 187, UNIPHIER_PIN_DRV_1BIT, 187, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(202, "PHY_PD", UNIPHIER_PIN_IECTRL_NONE, 188, UNIPHIER_PIN_DRV_1BIT, 188, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(203, "X1394_RST", UNIPHIER_PIN_IECTRL_NONE, 189, UNIPHIER_PIN_DRV_1BIT, 189, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(204, "VOUT_MUTE_L", UNIPHIER_PIN_IECTRL_NONE, 190, UNIPHIER_PIN_DRV_1BIT, 190, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(205, "CLK54O", UNIPHIER_PIN_IECTRL_NONE, 191, UNIPHIER_PIN_DRV_1BIT, 191, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(206, "CLK54I", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_NONE, 192, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(207, "YIN0", UNIPHIER_PIN_IECTRL_NONE, 193, UNIPHIER_PIN_DRV_1BIT, 193, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(208, "YIN1", UNIPHIER_PIN_IECTRL_NONE, 194, UNIPHIER_PIN_DRV_1BIT, 194, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(209, "YIN2", UNIPHIER_PIN_IECTRL_NONE, 195, UNIPHIER_PIN_DRV_1BIT, 195, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(210, "YIN3", UNIPHIER_PIN_IECTRL_NONE, 196, UNIPHIER_PIN_DRV_1BIT, 196, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(211, "YIN4", UNIPHIER_PIN_IECTRL_NONE, 197, UNIPHIER_PIN_DRV_1BIT, 197, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(212, "YIN5", UNIPHIER_PIN_IECTRL_NONE, 198, UNIPHIER_PIN_DRV_1BIT, 198, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(213, "CIN0", UNIPHIER_PIN_IECTRL_NONE, 199, UNIPHIER_PIN_DRV_1BIT, 199, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(214, "CIN1", UNIPHIER_PIN_IECTRL_NONE, 200, UNIPHIER_PIN_DRV_1BIT, 200, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(215, "CIN2", UNIPHIER_PIN_IECTRL_NONE, 201, UNIPHIER_PIN_DRV_1BIT, 201, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(216, "CIN3", UNIPHIER_PIN_IECTRL_NONE, 202, UNIPHIER_PIN_DRV_1BIT, 202, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(217, "CIN4", UNIPHIER_PIN_IECTRL_NONE, 203, UNIPHIER_PIN_DRV_1BIT, 203, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(218, "CIN5", UNIPHIER_PIN_IECTRL_NONE, 204, UNIPHIER_PIN_DRV_1BIT, 204, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(219, "GCP", UNIPHIER_PIN_IECTRL_NONE, 205, UNIPHIER_PIN_DRV_1BIT, 205, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(220, "ADFLG", UNIPHIER_PIN_IECTRL_NONE, 206, UNIPHIER_PIN_DRV_1BIT, 206, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(221, "CK27AIOF", UNIPHIER_PIN_IECTRL_NONE, 207, UNIPHIER_PIN_DRV_1BIT, 207, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(222, "DACOUT", UNIPHIER_PIN_IECTRL_NONE, 208, UNIPHIER_PIN_DRV_1BIT, 208, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(223, "DAFLG", UNIPHIER_PIN_IECTRL_NONE, 209, UNIPHIER_PIN_DRV_1BIT, 209, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(224, "VBIH", UNIPHIER_PIN_IECTRL_NONE, 210, UNIPHIER_PIN_DRV_1BIT, 210, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(225, "VBIL", UNIPHIER_PIN_IECTRL_NONE, 211, UNIPHIER_PIN_DRV_1BIT, 211, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(226, "XSUB_RST", UNIPHIER_PIN_IECTRL_NONE, 212, UNIPHIER_PIN_DRV_1BIT, 212, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(227, "XADC_PD", UNIPHIER_PIN_IECTRL_NONE, 213, UNIPHIER_PIN_DRV_1BIT, 213, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(228, "AI1ADCCK", UNIPHIER_PIN_IECTRL_NONE, 214, UNIPHIER_PIN_DRV_1BIT, 214, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(229, "AI1BCK", UNIPHIER_PIN_IECTRL_NONE, 215, UNIPHIER_PIN_DRV_1BIT, 215, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(230, "AI1LRCK", UNIPHIER_PIN_IECTRL_NONE, 216, UNIPHIER_PIN_DRV_1BIT, 216, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(231, "AI1DMIX", UNIPHIER_PIN_IECTRL_NONE, 217, UNIPHIER_PIN_DRV_1BIT, 217, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(232, "CK27HD", UNIPHIER_PIN_IECTRL_NONE, 218, UNIPHIER_PIN_DRV_1BIT, 218, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(233, "XHD_RST", UNIPHIER_PIN_IECTRL_NONE, 219, UNIPHIER_PIN_DRV_1BIT, 219, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(234, "INTHD", UNIPHIER_PIN_IECTRL_NONE, 220, UNIPHIER_PIN_DRV_1BIT, 220, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(235, "VO1HDCK", UNIPHIER_PIN_IECTRL_NONE, 221, UNIPHIER_PIN_DRV_1BIT, 221, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(236, "VO1HSYNC", UNIPHIER_PIN_IECTRL_NONE, 222, UNIPHIER_PIN_DRV_1BIT, 222, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(237, "VO1VSYNC", UNIPHIER_PIN_IECTRL_NONE, 223, UNIPHIER_PIN_DRV_1BIT, 223, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(238, "VO1DE", UNIPHIER_PIN_IECTRL_NONE, 224, UNIPHIER_PIN_DRV_1BIT, 224, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(239, "VO1Y0", UNIPHIER_PIN_IECTRL_NONE, 225, UNIPHIER_PIN_DRV_1BIT, 225, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(240, "VO1Y1", UNIPHIER_PIN_IECTRL_NONE, 226, UNIPHIER_PIN_DRV_1BIT, 226, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(241, "VO1Y2", UNIPHIER_PIN_IECTRL_NONE, 227, UNIPHIER_PIN_DRV_1BIT, 227, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(242, "VO1Y3", UNIPHIER_PIN_IECTRL_NONE, 228, UNIPHIER_PIN_DRV_1BIT, 228, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(243, "VO1Y4", UNIPHIER_PIN_IECTRL_NONE, 229, UNIPHIER_PIN_DRV_1BIT, 229, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(244, "VO1Y5", UNIPHIER_PIN_IECTRL_NONE, 230, UNIPHIER_PIN_DRV_1BIT, 230, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(245, "VO1Y6", UNIPHIER_PIN_IECTRL_NONE, 231, UNIPHIER_PIN_DRV_1BIT, 231, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(246, "VO1Y7", UNIPHIER_PIN_IECTRL_NONE, 232, UNIPHIER_PIN_DRV_1BIT, 232, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(247, "VO1Y8", UNIPHIER_PIN_IECTRL_NONE, 233, UNIPHIER_PIN_DRV_1BIT, 233, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(248, "VO1Y9", UNIPHIER_PIN_IECTRL_NONE, 234, UNIPHIER_PIN_DRV_1BIT, 234, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(249, "VO1Y10", UNIPHIER_PIN_IECTRL_NONE, 235, UNIPHIER_PIN_DRV_1BIT, 235, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(250, "VO1Y11", UNIPHIER_PIN_IECTRL_NONE, 236, UNIPHIER_PIN_DRV_1BIT, 236, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(251, "VO1CB0", UNIPHIER_PIN_IECTRL_NONE, 237, UNIPHIER_PIN_DRV_1BIT, 237, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(252, "VO1CB1", UNIPHIER_PIN_IECTRL_NONE, 238, UNIPHIER_PIN_DRV_1BIT, 238, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(253, "VO1CB2", UNIPHIER_PIN_IECTRL_NONE, 239, UNIPHIER_PIN_DRV_1BIT, 239, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(254, "VO1CB3", UNIPHIER_PIN_IECTRL_NONE, 240, UNIPHIER_PIN_DRV_1BIT, 240, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(255, "VO1CB4", UNIPHIER_PIN_IECTRL_NONE, 241, UNIPHIER_PIN_DRV_1BIT, 241, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(256, "VO1CB5", UNIPHIER_PIN_IECTRL_NONE, 242, UNIPHIER_PIN_DRV_1BIT, 242, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(257, "VO1CB6", UNIPHIER_PIN_IECTRL_NONE, 243, UNIPHIER_PIN_DRV_1BIT, 243, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(258, "VO1CB7", UNIPHIER_PIN_IECTRL_NONE, 244, UNIPHIER_PIN_DRV_1BIT, 244, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(259, "VO1CB8", UNIPHIER_PIN_IECTRL_NONE, 245, UNIPHIER_PIN_DRV_1BIT, 245, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(260, "VO1CB9", UNIPHIER_PIN_IECTRL_NONE, 246, UNIPHIER_PIN_DRV_1BIT, 246, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(261, "VO1CB10", UNIPHIER_PIN_IECTRL_NONE, 247, UNIPHIER_PIN_DRV_1BIT, 247, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(262, "VO1CB11", UNIPHIER_PIN_IECTRL_NONE, 248, UNIPHIER_PIN_DRV_1BIT, 248, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(263, "VO1CR0", UNIPHIER_PIN_IECTRL_NONE, 249, UNIPHIER_PIN_DRV_1BIT, 249, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(264, "VO1CR1", UNIPHIER_PIN_IECTRL_NONE, 250, UNIPHIER_PIN_DRV_1BIT, 250, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(265, "VO1CR2", UNIPHIER_PIN_IECTRL_NONE, 251, UNIPHIER_PIN_DRV_1BIT, 251, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(266, "VO1CR3", UNIPHIER_PIN_IECTRL_NONE, 252, UNIPHIER_PIN_DRV_1BIT, 252, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(267, "VO1CR4", UNIPHIER_PIN_IECTRL_NONE, 253, UNIPHIER_PIN_DRV_1BIT, 253, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(268, "VO1CR5", UNIPHIER_PIN_IECTRL_NONE, 254, UNIPHIER_PIN_DRV_1BIT, 254, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(269, "VO1CR6", UNIPHIER_PIN_IECTRL_NONE, 255, UNIPHIER_PIN_DRV_1BIT, 255, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(270, "VO1CR7", UNIPHIER_PIN_IECTRL_NONE, 256, UNIPHIER_PIN_DRV_1BIT, 256, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(271, "VO1CR8", UNIPHIER_PIN_IECTRL_NONE, 257, UNIPHIER_PIN_DRV_1BIT, 257, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(272, "VO1CR9", UNIPHIER_PIN_IECTRL_NONE, 258, UNIPHIER_PIN_DRV_1BIT, 258, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(273, "VO1CR10", UNIPHIER_PIN_IECTRL_NONE, 259, UNIPHIER_PIN_DRV_1BIT, 259, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(274, "VO1CR11", UNIPHIER_PIN_IECTRL_NONE, 260, UNIPHIER_PIN_DRV_1BIT, 260, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(275, "VO1EX0", UNIPHIER_PIN_IECTRL_NONE, 261, UNIPHIER_PIN_DRV_1BIT, 261, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(276, "VO1EX1", UNIPHIER_PIN_IECTRL_NONE, 262, UNIPHIER_PIN_DRV_1BIT, 262, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(277, "VO1EX2", UNIPHIER_PIN_IECTRL_NONE, 263, UNIPHIER_PIN_DRV_1BIT, 263, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(278, "VO1EX3", UNIPHIER_PIN_IECTRL_NONE, 264, UNIPHIER_PIN_DRV_1BIT, 264, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(279, "VEXCKA", UNIPHIER_PIN_IECTRL_NONE, 265, UNIPHIER_PIN_DRV_1BIT, 265, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(280, "VSEL0", UNIPHIER_PIN_IECTRL_NONE, 266, UNIPHIER_PIN_DRV_1BIT, 266, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(281, "VSEL1", UNIPHIER_PIN_IECTRL_NONE, 267, UNIPHIER_PIN_DRV_1BIT, 267, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(282, "AO1DACCK", UNIPHIER_PIN_IECTRL_NONE, 268, UNIPHIER_PIN_DRV_1BIT, 268, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(283, "AO1BCK", UNIPHIER_PIN_IECTRL_NONE, 269, UNIPHIER_PIN_DRV_1BIT, 269, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(284, "AO1LRCK", UNIPHIER_PIN_IECTRL_NONE, 270, UNIPHIER_PIN_DRV_1BIT, 270, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(285, "AO1D0", UNIPHIER_PIN_IECTRL_NONE, 271, UNIPHIER_PIN_DRV_1BIT, 271, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(286, "AO1D1", UNIPHIER_PIN_IECTRL_NONE, 272, UNIPHIER_PIN_DRV_1BIT, 272, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(287, "AO1D2", UNIPHIER_PIN_IECTRL_NONE, 273, UNIPHIER_PIN_DRV_1BIT, 273, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(288, "AO1D3", UNIPHIER_PIN_IECTRL_NONE, 274, UNIPHIER_PIN_DRV_1BIT, 274, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(289, "AO1IEC", UNIPHIER_PIN_IECTRL_NONE, 275, UNIPHIER_PIN_DRV_1BIT, 275, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(290, "XDAC_PD", UNIPHIER_PIN_IECTRL_NONE, 276, UNIPHIER_PIN_DRV_1BIT, 276, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(291, "EX_A_MUTE", UNIPHIER_PIN_IECTRL_NONE, 277, UNIPHIER_PIN_DRV_1BIT, 277, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(292, "AO2DACCK", UNIPHIER_PIN_IECTRL_NONE, 278, UNIPHIER_PIN_DRV_1BIT, 278, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(293, "AO2BCK", UNIPHIER_PIN_IECTRL_NONE, 279, UNIPHIER_PIN_DRV_1BIT, 279, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(294, "AO2LRCK", UNIPHIER_PIN_IECTRL_NONE, 280, UNIPHIER_PIN_DRV_1BIT, 280, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(295, "AO2DMIX", UNIPHIER_PIN_IECTRL_NONE, 281, UNIPHIER_PIN_DRV_1BIT, 281, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(296, "AO2IEC", UNIPHIER_PIN_IECTRL_NONE, 282, UNIPHIER_PIN_DRV_1BIT, 282, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(297, "HTHPD", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED5, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(298, "HTSCL", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED5, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(299, "HTSDA", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED5, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(300, "PORT00", UNIPHIER_PIN_IECTRL_NONE, 284, UNIPHIER_PIN_DRV_1BIT, 284, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(301, "PORT01", UNIPHIER_PIN_IECTRL_NONE, 285, UNIPHIER_PIN_DRV_1BIT, 285, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(302, "PORT02", UNIPHIER_PIN_IECTRL_NONE, 286, UNIPHIER_PIN_DRV_1BIT, 286, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(303, "PORT03", UNIPHIER_PIN_IECTRL_NONE, 287, UNIPHIER_PIN_DRV_1BIT, 287, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(304, "PORT04", UNIPHIER_PIN_IECTRL_NONE, 288, UNIPHIER_PIN_DRV_1BIT, 288, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(305, "PORT05", UNIPHIER_PIN_IECTRL_NONE, 289, UNIPHIER_PIN_DRV_1BIT, 289, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(306, "PORT06", UNIPHIER_PIN_IECTRL_NONE, 290, UNIPHIER_PIN_DRV_1BIT, 290, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(307, "PORT07", UNIPHIER_PIN_IECTRL_NONE, 291, UNIPHIER_PIN_DRV_1BIT, 291, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(308, "PORT10", UNIPHIER_PIN_IECTRL_NONE, 292, UNIPHIER_PIN_DRV_1BIT, 292, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(309, "PORT11", UNIPHIER_PIN_IECTRL_NONE, 293, UNIPHIER_PIN_DRV_1BIT, 293, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(310, "PORT12", UNIPHIER_PIN_IECTRL_NONE, 294, UNIPHIER_PIN_DRV_1BIT, 294, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(311, "PORT13", UNIPHIER_PIN_IECTRL_NONE, 295, UNIPHIER_PIN_DRV_1BIT, 295, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(312, "PORT14", UNIPHIER_PIN_IECTRL_NONE, 296, UNIPHIER_PIN_DRV_1BIT, 296, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(313, "PORT15", UNIPHIER_PIN_IECTRL_NONE, 297, UNIPHIER_PIN_DRV_1BIT, 297, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(314, "PORT16", UNIPHIER_PIN_IECTRL_NONE, 298, UNIPHIER_PIN_DRV_1BIT, 298, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(315, "PORT17", UNIPHIER_PIN_IECTRL_NONE, 299, UNIPHIER_PIN_DRV_1BIT, 299, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(316, "PORT20", UNIPHIER_PIN_IECTRL_NONE, 300, UNIPHIER_PIN_DRV_1BIT, 300, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(317, "PORT21", UNIPHIER_PIN_IECTRL_NONE, 301, UNIPHIER_PIN_DRV_1BIT, 301, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(318, "PORT22", UNIPHIER_PIN_IECTRL_NONE, 302, UNIPHIER_PIN_DRV_1BIT, 302, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(319, "SD1DAT0", UNIPHIER_PIN_IECTRL_NONE, 303, UNIPHIER_PIN_DRV_1BIT, 303, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(320, "SD1DAT1", UNIPHIER_PIN_IECTRL_NONE, 304, UNIPHIER_PIN_DRV_1BIT, 304, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(321, "SD1DAT2", UNIPHIER_PIN_IECTRL_NONE, 305, UNIPHIER_PIN_DRV_1BIT, 305, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(322, "SD1DAT3", UNIPHIER_PIN_IECTRL_NONE, 306, UNIPHIER_PIN_DRV_1BIT, 306, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(323, "SD1CMD", UNIPHIER_PIN_IECTRL_NONE, 307, UNIPHIER_PIN_DRV_1BIT, 307, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(324, "SD1CLK", UNIPHIER_PIN_IECTRL_NONE, 308, UNIPHIER_PIN_DRV_1BIT, 308, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(325, "SD1CD", UNIPHIER_PIN_IECTRL_NONE, 309, UNIPHIER_PIN_DRV_1BIT, 309, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(326, "SD1WP", UNIPHIER_PIN_IECTRL_NONE, 310, UNIPHIER_PIN_DRV_1BIT, 310, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(327, "SD1VTCG", UNIPHIER_PIN_IECTRL_NONE, 311, UNIPHIER_PIN_DRV_1BIT, 311, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(328, "DMDISO", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_NONE, 312, UNIPHIER_PIN_PULL_DOWN), }; static const unsigned emmc_pins[] = {40, 41, 42, 43, 51, 52, 53}; static const int emmc_muxvals[] = {1, 1, 1, 1, 1, 1, 1}; static const unsigned emmc_dat8_pins[] = {44, 45, 46, 47}; static const int emmc_dat8_muxvals[] = {1, 1, 1, 1}; static const unsigned ether_mii_pins[] = {160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179}; static const int ether_mii_muxvals[] = {1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; static const unsigned ether_rgmii_pins[] = {160, 161, 162, 163, 164, 165, 167, 168, 169, 170, 171, 172, 176, 177, 178, 179}; static const int ether_rgmii_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; static const unsigned ether_rmii_pins[] = {160, 161, 162, 165, 168, 169, 172, 173, 176, 177, 178, 179}; static const int ether_rmii_muxvals[] = {1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; static const unsigned ether_rmiib_pins[] = {161, 162, 165, 167, 168, 169, 172, 173, 176, 177, 178, 179}; static const int ether_rmiib_muxvals[] = {0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}; static const unsigned i2c0_pins[] = {142, 143}; static const int i2c0_muxvals[] = {0, 0}; static const unsigned i2c1_pins[] = {144, 145}; static const int i2c1_muxvals[] = {0, 0}; static const unsigned i2c2_pins[] = {146, 147}; static const int i2c2_muxvals[] = {0, 0}; static const unsigned i2c3_pins[] = {148, 149}; static const int i2c3_muxvals[] = {0, 0}; static const unsigned i2c6_pins[] = {308, 309}; static const int i2c6_muxvals[] = {6, 6}; static const unsigned nand_pins[] = {40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54}; static const int nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; static const unsigned nand_cs1_pins[] = {131, 132}; static const int nand_cs1_muxvals[] = {1, 1}; static const unsigned sd_pins[] = {150, 151, 152, 153, 154, 155, 156, 157, 158}; static const int sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; static const unsigned sd1_pins[] = {319, 320, 321, 322, 323, 324, 325, 326, 327}; static const int sd1_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; static const unsigned system_bus_pins[] = {25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}; static const int system_bus_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; static const unsigned system_bus_cs0_pins[] = {318}; static const int system_bus_cs0_muxvals[] = {5}; static const unsigned system_bus_cs1_pins[] = {24}; static const int system_bus_cs1_muxvals[] = {0}; static const unsigned system_bus_cs2_pins[] = {315}; static const int system_bus_cs2_muxvals[] = {5}; static const unsigned system_bus_cs3_pins[] = {313}; static const int system_bus_cs3_muxvals[] = {5}; static const unsigned system_bus_cs4_pins[] = {305}; static const int system_bus_cs4_muxvals[] = {5}; static const unsigned system_bus_cs5_pins[] = {303}; static const int system_bus_cs5_muxvals[] = {6}; static const unsigned system_bus_cs6_pins[] = {307}; static const int system_bus_cs6_muxvals[] = {6}; static const unsigned system_bus_cs7_pins[] = {312}; static const int system_bus_cs7_muxvals[] = {6}; static const unsigned uart0_pins[] = {127, 128}; static const int uart0_muxvals[] = {0, 0}; static const unsigned uart1_pins[] = {129, 130}; static const int uart1_muxvals[] = {0, 0}; static const unsigned uart2_pins[] = {131, 132}; static const int uart2_muxvals[] = {0, 0}; static const unsigned uart3_pins[] = {88, 89}; static const int uart3_muxvals[] = {2, 2}; static const unsigned usb0_pins[] = {180, 181}; static const int usb0_muxvals[] = {0, 0}; static const unsigned usb1_pins[] = {182, 183}; static const int usb1_muxvals[] = {0, 0}; static const unsigned usb2_pins[] = {184, 185}; static const int usb2_muxvals[] = {0, 0}; static const unsigned usb3_pins[] = {186, 187}; static const int usb3_muxvals[] = {0, 0}; static const unsigned port_range0_pins[] = { 300, 301, 302, 303, 304, 305, 306, 307, /* PORT0x */ 308, 309, 310, 311, 312, 313, 314, 315, /* PORT1x */ 316, 317, 318, 16, 17, 18, 19, 20, /* PORT2x */ 21, 22, 23, 4, 93, 94, 95, 63, /* PORT3x */ 123, 122, 124, 125, 126, 141, 202, 203, /* PORT4x */ 204, 226, 227, 290, 291, 233, 280, 281, /* PORT5x */ 8, 7, 10, 29, 30, 48, 49, 50, /* PORT6x */ 40, 41, 42, 43, 44, 45, 46, 47, /* PORT7x */ 54, 51, 52, 53, 127, 128, 129, 130, /* PORT8x */ 131, 132, 57, 60, 134, 133, 135, 136, /* PORT9x */ 138, 137, 140, 139, 64, 65, 66, 67, /* PORT10x */ 107, 106, 105, 104, 113, 112, 111, 110, /* PORT11x */ 68, 69, 70, 71, 72, 73, 74, 75, /* PORT12x */ 76, 77, 78, 79, 80, 81, 82, 83, /* PORT13x */ 84, 85, 86, 87, 88, 89, 90, 91, /* PORT14x */ }; static const int port_range0_muxvals[] = { 7, 7, 7, 7, 7, 7, 7, 7, /* PORT0x */ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT1x */ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT2x */ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT3x */ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT4x */ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT5x */ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT6x */ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT7x */ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT8x */ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT9x */ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT10x */ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT11x */ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT12x */ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT13x */ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT14x */ }; static const unsigned port_range1_pins[] = { 13, 14, 15, /* PORT175-177 */ 157, 158, 156, 154, 150, 151, 152, 153, /* PORT18x */ 326, 327, 325, 323, 319, 320, 321, 322, /* PORT19x */ 160, 161, 162, 163, 164, 165, 166, 167, /* PORT20x */ 168, 169, 170, 171, 172, 173, 174, 175, /* PORT21x */ 180, 181, 182, 183, 184, 185, 187, 188, /* PORT22x */ 193, 194, 195, 196, 197, 198, 199, 200, /* PORT23x */ 191, 192, 215, 216, 217, 218, 219, 220, /* PORT24x */ 222, 223, 224, 225, 228, 229, 230, 231, /* PORT25x */ 282, 283, 284, 285, 286, 287, 288, 289, /* PORT26x */ 292, 293, 294, 295, 296, 236, 237, 238, /* PORT27x */ 275, 276, 277, 278, 239, 240, 249, 250, /* PORT28x */ 251, 252, 261, 262, 263, 264, 273, 274, /* PORT29x */ 31, 32, 33, 34, 35, 36, 37, 38, /* PORT30x */ }; static const int port_range1_muxvals[] = { 7, 7, 7, /* PORT175-177 */ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT18x */ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT19x */ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT20x */ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT21x */ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT22x */ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT23x */ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT24x */ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT25x */ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT26x */ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT27x */ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT28x */ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT29x */ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT30x */ }; static const unsigned xirq_pins[] = { 11, 9, 12, 96, 97, 98, 108, 114, /* XIRQ0-7 */ 234, 186, 99, 100, 101, 102, 184, 301, /* XIRQ8-15 */ 302, 303, 304, 305, 306, /* XIRQ16-20 */ }; static const int xirq_muxvals[] = { 7, 7, 7, 7, 7, 7, 7, 7, /* XIRQ0-7 */ 7, 7, 7, 7, 7, 7, 2, 2, /* XIRQ8-15 */ 2, 2, 2, 2, 2, /* XIRQ16-20 */ }; static const unsigned xirq_alternatives_pins[] = { 184, 310, 316, }; static const int xirq_alternatives_muxvals[] = { 2, 2, 2, }; static const struct uniphier_pinctrl_group uniphier_pro4_groups[] = { UNIPHIER_PINCTRL_GROUP(emmc), UNIPHIER_PINCTRL_GROUP(emmc_dat8), UNIPHIER_PINCTRL_GROUP(ether_mii), UNIPHIER_PINCTRL_GROUP(ether_rgmii), UNIPHIER_PINCTRL_GROUP(ether_rmii), UNIPHIER_PINCTRL_GROUP(ether_rmiib), UNIPHIER_PINCTRL_GROUP(i2c0), UNIPHIER_PINCTRL_GROUP(i2c1), UNIPHIER_PINCTRL_GROUP(i2c2), UNIPHIER_PINCTRL_GROUP(i2c3), UNIPHIER_PINCTRL_GROUP(i2c6), UNIPHIER_PINCTRL_GROUP(nand), UNIPHIER_PINCTRL_GROUP(nand_cs1), UNIPHIER_PINCTRL_GROUP(sd), UNIPHIER_PINCTRL_GROUP(sd1), UNIPHIER_PINCTRL_GROUP(system_bus), UNIPHIER_PINCTRL_GROUP(system_bus_cs0), UNIPHIER_PINCTRL_GROUP(system_bus_cs1), UNIPHIER_PINCTRL_GROUP(system_bus_cs2), UNIPHIER_PINCTRL_GROUP(system_bus_cs3), UNIPHIER_PINCTRL_GROUP(system_bus_cs4), UNIPHIER_PINCTRL_GROUP(system_bus_cs5), UNIPHIER_PINCTRL_GROUP(system_bus_cs6), UNIPHIER_PINCTRL_GROUP(system_bus_cs7), UNIPHIER_PINCTRL_GROUP(uart0), UNIPHIER_PINCTRL_GROUP(uart1), UNIPHIER_PINCTRL_GROUP(uart2), UNIPHIER_PINCTRL_GROUP(uart3), UNIPHIER_PINCTRL_GROUP(usb0), UNIPHIER_PINCTRL_GROUP(usb1), UNIPHIER_PINCTRL_GROUP(usb2), UNIPHIER_PINCTRL_GROUP(usb3), UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range0), UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range1), UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq), UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq_alternatives), UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range0, 0), UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range0, 1), UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range0, 2), UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range0, 3), UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range0, 4), UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range0, 5), UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range0, 6), UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range0, 7), UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range0, 8), UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range0, 9), UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range0, 10), UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range0, 11), UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range0, 12), UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range0, 13), UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range0, 14), UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range0, 15), UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range0, 16), UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range0, 17), UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range0, 18), UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range0, 19), UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range0, 20), UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range0, 21), UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range0, 22), UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range0, 23), UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range0, 24), UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range0, 25), UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range0, 26), UNIPHIER_PINCTRL_GROUP_SINGLE(port33, port_range0, 27), UNIPHIER_PINCTRL_GROUP_SINGLE(port34, port_range0, 28), UNIPHIER_PINCTRL_GROUP_SINGLE(port35, port_range0, 29), UNIPHIER_PINCTRL_GROUP_SINGLE(port36, port_range0, 30), UNIPHIER_PINCTRL_GROUP_SINGLE(port37, port_range0, 31), UNIPHIER_PINCTRL_GROUP_SINGLE(port40, port_range0, 32), UNIPHIER_PINCTRL_GROUP_SINGLE(port41, port_range0, 33), UNIPHIER_PINCTRL_GROUP_SINGLE(port42, port_range0, 34), UNIPHIER_PINCTRL_GROUP_SINGLE(port43, port_range0, 35), UNIPHIER_PINCTRL_GROUP_SINGLE(port44, port_range0, 36), UNIPHIER_PINCTRL_GROUP_SINGLE(port45, port_range0, 37), UNIPHIER_PINCTRL_GROUP_SINGLE(port46, port_range0, 38), UNIPHIER_PINCTRL_GROUP_SINGLE(port47, port_range0, 39), UNIPHIER_PINCTRL_GROUP_SINGLE(port50, port_range0, 40), UNIPHIER_PINCTRL_GROUP_SINGLE(port51, port_range0, 41), UNIPHIER_PINCTRL_GROUP_SINGLE(port52, port_range0, 42), UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range0, 43), UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range0, 44), UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range0, 45), UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range0, 46), UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range0, 47), UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range0, 48), UNIPHIER_PINCTRL_GROUP_SINGLE(port61, port_range0, 49), UNIPHIER_PINCTRL_GROUP_SINGLE(port62, port_range0, 50), UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range0, 51), UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range0, 52), UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range0, 53), UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range0, 54), UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range0, 55), UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range0, 56), UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range0, 57), UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range0, 58), UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range0, 59), UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range0, 60), UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range0, 61), UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range0, 62), UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range0, 63), UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range0, 64), UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range0, 65), UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range0, 66), UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range0, 67), UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range0, 68), UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range0, 69), UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range0, 70), UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range0, 71), UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range0, 72), UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range0, 73), UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range0, 74), UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range0, 75), UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range0, 76), UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range0, 77), UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range0, 78), UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range0, 79), UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range0, 80), UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range0, 81), UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range0, 82), UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range0, 83), UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range0, 84), UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range0, 85), UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range0, 86), UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range0, 87), UNIPHIER_PINCTRL_GROUP_SINGLE(port110, port_range0, 88), UNIPHIER_PINCTRL_GROUP_SINGLE(port111, port_range0, 89), UNIPHIER_PINCTRL_GROUP_SINGLE(port112, port_range0, 90), UNIPHIER_PINCTRL_GROUP_SINGLE(port113, port_range0, 91), UNIPHIER_PINCTRL_GROUP_SINGLE(port114, port_range0, 92), UNIPHIER_PINCTRL_GROUP_SINGLE(port115, port_range0, 93), UNIPHIER_PINCTRL_GROUP_SINGLE(port116, port_range0, 94), UNIPHIER_PINCTRL_GROUP_SINGLE(port117, port_range0, 95), UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range0, 96), UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range0, 97), UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range0, 98), UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range0, 99), UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range0, 100), UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range0, 101), UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range0, 102), UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range0, 103), UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range0, 104), UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range0, 105), UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range0, 106), UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range0, 107), UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range0, 108), UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range0, 109), UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range0, 110), UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range0, 111), UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range0, 112), UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range0, 113), UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range0, 114), UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range0, 115), UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range0, 116), UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range0, 117), UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range0, 118), UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range0, 119), UNIPHIER_PINCTRL_GROUP_SINGLE(port175, port_range1, 0), UNIPHIER_PINCTRL_GROUP_SINGLE(port176, port_range1, 1), UNIPHIER_PINCTRL_GROUP_SINGLE(port177, port_range1, 2), UNIPHIER_PINCTRL_GROUP_SINGLE(port180, port_range1, 3), UNIPHIER_PINCTRL_GROUP_SINGLE(port181, port_range1, 4), UNIPHIER_PINCTRL_GROUP_SINGLE(port182, port_range1, 5), UNIPHIER_PINCTRL_GROUP_SINGLE(port183, port_range1, 6), UNIPHIER_PINCTRL_GROUP_SINGLE(port184, port_range1, 7), UNIPHIER_PINCTRL_GROUP_SINGLE(port185, port_range1, 8), UNIPHIER_PINCTRL_GROUP_SINGLE(port186, port_range1, 9), UNIPHIER_PINCTRL_GROUP_SINGLE(port187, port_range1, 10), UNIPHIER_PINCTRL_GROUP_SINGLE(port190, port_range1, 11), UNIPHIER_PINCTRL_GROUP_SINGLE(port191, port_range1, 12), UNIPHIER_PINCTRL_GROUP_SINGLE(port192, port_range1, 13), UNIPHIER_PINCTRL_GROUP_SINGLE(port193, port_range1, 14), UNIPHIER_PINCTRL_GROUP_SINGLE(port194, port_range1, 15), UNIPHIER_PINCTRL_GROUP_SINGLE(port195, port_range1, 16), UNIPHIER_PINCTRL_GROUP_SINGLE(port196, port_range1, 17), UNIPHIER_PINCTRL_GROUP_SINGLE(port197, port_range1, 18), UNIPHIER_PINCTRL_GROUP_SINGLE(port200, port_range1, 19), UNIPHIER_PINCTRL_GROUP_SINGLE(port201, port_range1, 20), UNIPHIER_PINCTRL_GROUP_SINGLE(port202, port_range1, 21), UNIPHIER_PINCTRL_GROUP_SINGLE(port203, port_range1, 22), UNIPHIER_PINCTRL_GROUP_SINGLE(port204, port_range1, 23), UNIPHIER_PINCTRL_GROUP_SINGLE(port205, port_range1, 24), UNIPHIER_PINCTRL_GROUP_SINGLE(port206, port_range1, 25), UNIPHIER_PINCTRL_GROUP_SINGLE(port207, port_range1, 26), UNIPHIER_PINCTRL_GROUP_SINGLE(port210, port_range1, 27), UNIPHIER_PINCTRL_GROUP_SINGLE(port211, port_range1, 28), UNIPHIER_PINCTRL_GROUP_SINGLE(port212, port_range1, 29), UNIPHIER_PINCTRL_GROUP_SINGLE(port213, port_range1, 30), UNIPHIER_PINCTRL_GROUP_SINGLE(port214, port_range1, 31), UNIPHIER_PINCTRL_GROUP_SINGLE(port215, port_range1, 32), UNIPHIER_PINCTRL_GROUP_SINGLE(port216, port_range1, 33), UNIPHIER_PINCTRL_GROUP_SINGLE(port217, port_range1, 34), UNIPHIER_PINCTRL_GROUP_SINGLE(port220, port_range1, 35), UNIPHIER_PINCTRL_GROUP_SINGLE(port221, port_range1, 36), UNIPHIER_PINCTRL_GROUP_SINGLE(port222, port_range1, 37), UNIPHIER_PINCTRL_GROUP_SINGLE(port223, port_range1, 38), UNIPHIER_PINCTRL_GROUP_SINGLE(port224, port_range1, 39), UNIPHIER_PINCTRL_GROUP_SINGLE(port225, port_range1, 40), UNIPHIER_PINCTRL_GROUP_SINGLE(port226, port_range1, 41), UNIPHIER_PINCTRL_GROUP_SINGLE(port227, port_range1, 42), UNIPHIER_PINCTRL_GROUP_SINGLE(port230, port_range1, 43), UNIPHIER_PINCTRL_GROUP_SINGLE(port231, port_range1, 44), UNIPHIER_PINCTRL_GROUP_SINGLE(port232, port_range1, 45), UNIPHIER_PINCTRL_GROUP_SINGLE(port233, port_range1, 46), UNIPHIER_PINCTRL_GROUP_SINGLE(port234, port_range1, 47), UNIPHIER_PINCTRL_GROUP_SINGLE(port235, port_range1, 48), UNIPHIER_PINCTRL_GROUP_SINGLE(port236, port_range1, 49), UNIPHIER_PINCTRL_GROUP_SINGLE(port237, port_range1, 50), UNIPHIER_PINCTRL_GROUP_SINGLE(port240, port_range1, 51), UNIPHIER_PINCTRL_GROUP_SINGLE(port241, port_range1, 52), UNIPHIER_PINCTRL_GROUP_SINGLE(port242, port_range1, 53), UNIPHIER_PINCTRL_GROUP_SINGLE(port243, port_range1, 54), UNIPHIER_PINCTRL_GROUP_SINGLE(port244, port_range1, 55), UNIPHIER_PINCTRL_GROUP_SINGLE(port245, port_range1, 56), UNIPHIER_PINCTRL_GROUP_SINGLE(port246, port_range1, 57), UNIPHIER_PINCTRL_GROUP_SINGLE(port247, port_range1, 58), UNIPHIER_PINCTRL_GROUP_SINGLE(port250, port_range1, 59), UNIPHIER_PINCTRL_GROUP_SINGLE(port251, port_range1, 60), UNIPHIER_PINCTRL_GROUP_SINGLE(port252, port_range1, 61), UNIPHIER_PINCTRL_GROUP_SINGLE(port253, port_range1, 62), UNIPHIER_PINCTRL_GROUP_SINGLE(port254, port_range1, 63), UNIPHIER_PINCTRL_GROUP_SINGLE(port255, port_range1, 64), UNIPHIER_PINCTRL_GROUP_SINGLE(port256, port_range1, 65), UNIPHIER_PINCTRL_GROUP_SINGLE(port257, port_range1, 66), UNIPHIER_PINCTRL_GROUP_SINGLE(port260, port_range1, 67), UNIPHIER_PINCTRL_GROUP_SINGLE(port261, port_range1, 68), UNIPHIER_PINCTRL_GROUP_SINGLE(port262, port_range1, 69), UNIPHIER_PINCTRL_GROUP_SINGLE(port263, port_range1, 70), UNIPHIER_PINCTRL_GROUP_SINGLE(port264, port_range1, 71), UNIPHIER_PINCTRL_GROUP_SINGLE(port265, port_range1, 72), UNIPHIER_PINCTRL_GROUP_SINGLE(port266, port_range1, 73), UNIPHIER_PINCTRL_GROUP_SINGLE(port267, port_range1, 74), UNIPHIER_PINCTRL_GROUP_SINGLE(port270, port_range1, 75), UNIPHIER_PINCTRL_GROUP_SINGLE(port271, port_range1, 76), UNIPHIER_PINCTRL_GROUP_SINGLE(port272, port_range1, 77), UNIPHIER_PINCTRL_GROUP_SINGLE(port273, port_range1, 78), UNIPHIER_PINCTRL_GROUP_SINGLE(port274, port_range1, 79), UNIPHIER_PINCTRL_GROUP_SINGLE(port275, port_range1, 80), UNIPHIER_PINCTRL_GROUP_SINGLE(port276, port_range1, 81), UNIPHIER_PINCTRL_GROUP_SINGLE(port277, port_range1, 82), UNIPHIER_PINCTRL_GROUP_SINGLE(port280, port_range1, 83), UNIPHIER_PINCTRL_GROUP_SINGLE(port281, port_range1, 84), UNIPHIER_PINCTRL_GROUP_SINGLE(port282, port_range1, 85), UNIPHIER_PINCTRL_GROUP_SINGLE(port283, port_range1, 86), UNIPHIER_PINCTRL_GROUP_SINGLE(port284, port_range1, 87), UNIPHIER_PINCTRL_GROUP_SINGLE(port285, port_range1, 88), UNIPHIER_PINCTRL_GROUP_SINGLE(port286, port_range1, 89), UNIPHIER_PINCTRL_GROUP_SINGLE(port287, port_range1, 90), UNIPHIER_PINCTRL_GROUP_SINGLE(port290, port_range1, 91), UNIPHIER_PINCTRL_GROUP_SINGLE(port291, port_range1, 92), UNIPHIER_PINCTRL_GROUP_SINGLE(port292, port_range1, 93), UNIPHIER_PINCTRL_GROUP_SINGLE(port293, port_range1, 94), UNIPHIER_PINCTRL_GROUP_SINGLE(port294, port_range1, 95), UNIPHIER_PINCTRL_GROUP_SINGLE(port295, port_range1, 96), UNIPHIER_PINCTRL_GROUP_SINGLE(port296, port_range1, 97), UNIPHIER_PINCTRL_GROUP_SINGLE(port297, port_range1, 98), UNIPHIER_PINCTRL_GROUP_SINGLE(port300, port_range1, 99), UNIPHIER_PINCTRL_GROUP_SINGLE(port301, port_range1, 100), UNIPHIER_PINCTRL_GROUP_SINGLE(port302, port_range1, 101), UNIPHIER_PINCTRL_GROUP_SINGLE(port303, port_range1, 102), UNIPHIER_PINCTRL_GROUP_SINGLE(port304, port_range1, 103), UNIPHIER_PINCTRL_GROUP_SINGLE(port305, port_range1, 104), UNIPHIER_PINCTRL_GROUP_SINGLE(port306, port_range1, 105), UNIPHIER_PINCTRL_GROUP_SINGLE(port307, port_range1, 106), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0, xirq, 0), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1, xirq, 1), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2, xirq, 2), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq3, xirq, 3), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq4, xirq, 4), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq5, xirq, 5), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq6, xirq, 6), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq7, xirq, 7), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq8, xirq, 8), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq9, xirq, 9), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq10, xirq, 10), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq11, xirq, 11), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq12, xirq, 12), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq13, xirq, 13), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14, xirq, 14), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq15, xirq, 15), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq16, xirq, 16), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17, xirq, 17), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18, xirq, 18), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq19, xirq, 19), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq20, xirq, 20), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14b, xirq_alternatives, 0), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17b, xirq_alternatives, 1), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18b, xirq_alternatives, 2), }; static const char * const emmc_groups[] = {"emmc", "emmc_dat8"}; static const char * const ether_mii_groups[] = {"ether_mii"}; static const char * const ether_rgmii_groups[] = {"ether_rgmii"}; static const char * const ether_rmii_groups[] = {"ether_rgmii", "ether_rgmiib"}; static const char * const i2c0_groups[] = {"i2c0"}; static const char * const i2c1_groups[] = {"i2c1"}; static const char * const i2c2_groups[] = {"i2c2"}; static const char * const i2c3_groups[] = {"i2c3"}; static const char * const i2c6_groups[] = {"i2c6"}; static const char * const nand_groups[] = {"nand", "nand_cs1"}; static const char * const sd_groups[] = {"sd"}; static const char * const sd1_groups[] = {"sd1"}; static const char * const system_bus_groups[] = {"system_bus", "system_bus_cs0", "system_bus_cs1", "system_bus_cs2", "system_bus_cs3", "system_bus_cs4", "system_bus_cs5", "system_bus_cs6", "system_bus_cs7"}; static const char * const uart0_groups[] = {"uart0"}; static const char * const uart1_groups[] = {"uart1"}; static const char * const uart2_groups[] = {"uart2"}; static const char * const uart3_groups[] = {"uart3"}; static const char * const usb0_groups[] = {"usb0"}; static const char * const usb1_groups[] = {"usb1"}; static const char * const usb2_groups[] = {"usb2"}; static const char * const usb3_groups[] = {"usb3"}; static const char * const port_groups[] = { "port00", "port01", "port02", "port03", "port04", "port05", "port06", "port07", "port10", "port11", "port12", "port13", "port14", "port15", "port16", "port17", "port20", "port21", "port22", "port23", "port24", "port25", "port26", "port27", "port30", "port31", "port32", "port33", "port34", "port35", "port36", "port37", "port40", "port41", "port42", "port43", "port44", "port45", "port46", "port47", "port50", "port51", "port52", "port53", "port54", "port55", "port56", "port57", "port60", "port61", "port62", "port63", "port64", "port65", "port66", "port67", "port70", "port71", "port72", "port73", "port74", "port75", "port76", "port77", "port80", "port81", "port82", "port83", "port84", "port85", "port86", "port87", "port90", "port91", "port92", "port93", "port94", "port95", "port96", "port97", "port100", "port101", "port102", "port103", "port104", "port105", "port106", "port107", "port110", "port111", "port112", "port113", "port114", "port115", "port116", "port117", "port120", "port121", "port122", "port123", "port124", "port125", "port126", "port127", "port130", "port131", "port132", "port133", "port134", "port135", "port136", "port137", "port140", "port141", "port142", "port143", "port144", "port145", "port146", "port147", /* port150-174 missing */ /* none */ "port175", "port176", "port177", "port180", "port181", "port182", "port183", "port184", "port185", "port186", "port187", "port190", "port191", "port192", "port193", "port194", "port195", "port196", "port197", "port200", "port201", "port202", "port203", "port204", "port205", "port206", "port207", "port210", "port211", "port212", "port213", "port214", "port215", "port216", "port217", "port220", "port221", "port222", "port223", "port224", "port225", "port226", "port227", "port230", "port231", "port232", "port233", "port234", "port235", "port236", "port237", "port240", "port241", "port242", "port243", "port244", "port245", "port246", "port247", "port250", "port251", "port252", "port253", "port254", "port255", "port256", "port257", "port260", "port261", "port262", "port263", "port264", "port265", "port266", "port267", "port270", "port271", "port272", "port273", "port274", "port275", "port276", "port277", "port280", "port281", "port282", "port283", "port284", "port285", "port286", "port287", "port290", "port291", "port292", "port293", "port294", "port295", "port296", "port297", "port300", "port301", "port302", "port303", "port304", "port305", "port306", "port307", }; static const char * const xirq_groups[] = { "xirq0", "xirq1", "xirq2", "xirq3", "xirq4", "xirq5", "xirq6", "xirq7", "xirq8", "xirq9", "xirq10", "xirq11", "xirq12", "xirq13", "xirq14", "xirq15", "xirq16", "xirq17", "xirq18", "xirq19", "xirq20", "xirq14b", "xirq17b", "xirq18b", }; static const struct uniphier_pinmux_function uniphier_pro4_functions[] = { UNIPHIER_PINMUX_FUNCTION(emmc), UNIPHIER_PINMUX_FUNCTION(ether_mii), UNIPHIER_PINMUX_FUNCTION(ether_rgmii), UNIPHIER_PINMUX_FUNCTION(ether_rmii), UNIPHIER_PINMUX_FUNCTION(i2c0), UNIPHIER_PINMUX_FUNCTION(i2c1), UNIPHIER_PINMUX_FUNCTION(i2c2), UNIPHIER_PINMUX_FUNCTION(i2c3), UNIPHIER_PINMUX_FUNCTION(i2c6), UNIPHIER_PINMUX_FUNCTION(nand), UNIPHIER_PINMUX_FUNCTION(sd), UNIPHIER_PINMUX_FUNCTION(sd1), UNIPHIER_PINMUX_FUNCTION(system_bus), UNIPHIER_PINMUX_FUNCTION(uart0), UNIPHIER_PINMUX_FUNCTION(uart1), UNIPHIER_PINMUX_FUNCTION(uart2), UNIPHIER_PINMUX_FUNCTION(uart3), UNIPHIER_PINMUX_FUNCTION(usb0), UNIPHIER_PINMUX_FUNCTION(usb1), UNIPHIER_PINMUX_FUNCTION(usb2), UNIPHIER_PINMUX_FUNCTION(usb3), UNIPHIER_PINMUX_FUNCTION(port), UNIPHIER_PINMUX_FUNCTION(xirq), }; static struct uniphier_pinctrl_socdata uniphier_pro4_pindata = { .pins = uniphier_pro4_pins, .npins = ARRAY_SIZE(uniphier_pro4_pins), .groups = uniphier_pro4_groups, .groups_count = ARRAY_SIZE(uniphier_pro4_groups), .functions = uniphier_pro4_functions, .functions_count = ARRAY_SIZE(uniphier_pro4_functions), .caps = UNIPHIER_PINCTRL_CAPS_DBGMUX_SEPARATE, }; static int uniphier_pro4_pinctrl_probe(struct platform_device *pdev) { return uniphier_pinctrl_probe(pdev, &uniphier_pro4_pindata); } static const struct of_device_id uniphier_pro4_pinctrl_match[] = { { .compatible = "socionext,uniphier-pro4-pinctrl" }, { .compatible = "socionext,ph1-pro4-pinctrl" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, uniphier_pro4_pinctrl_match); static struct platform_driver uniphier_pro4_pinctrl_driver = { .probe = uniphier_pro4_pinctrl_probe, .driver = { .name = "uniphier-pro4-pinctrl", .of_match_table = uniphier_pro4_pinctrl_match, }, }; module_platform_driver(uniphier_pro4_pinctrl_driver); MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>"); MODULE_DESCRIPTION("UniPhier PH1-Pro4 pinctrl driver"); MODULE_LICENSE("GPL");
gpl-2.0
Evervolv/android_kernel_lge_mako
arch/arm/kernel/smp.c
189
15123
/* * linux/arch/arm/kernel/smp.c * * Copyright (C) 2002 ARM Limited, All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/cache.h> #include <linux/profile.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/err.h> #include <linux/cpu.h> #include <linux/smp.h> #include <linux/seq_file.h> #include <linux/irq.h> #include <linux/percpu.h> #include <linux/clockchips.h> #include <linux/completion.h> #include <linux/atomic.h> #include <asm/cacheflush.h> #include <asm/cpu.h> #include <asm/cputype.h> #include <asm/exception.h> #include <asm/idmap.h> #include <asm/topology.h> #include <asm/mmu_context.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/processor.h> #include <asm/sections.h> #include <asm/tlbflush.h> #include <asm/ptrace.h> #include <asm/localtimer.h> #include <asm/smp_plat.h> /* * as from 2.5, kernels no longer have an init_tasks structure * so we need some other way of telling a new secondary core * where to place its SVC stack */ struct secondary_data secondary_data; enum ipi_msg_type { IPI_CPU_START = 1, IPI_TIMER = 2, IPI_RESCHEDULE, IPI_CALL_FUNC, IPI_CALL_FUNC_SINGLE, IPI_CPU_STOP, IPI_CPU_BACKTRACE, }; static DECLARE_COMPLETION(cpu_running); int __cpuinit __cpu_up(unsigned int cpu) { struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); struct task_struct *idle = ci->idle; int ret; /* * Spawn a new process manually, if not already done. * Grab a pointer to its task struct so we can mess with it */ if (!idle) { idle = fork_idle(cpu); if (IS_ERR(idle)) { printk(KERN_ERR "CPU%u: fork() failed\n", cpu); return PTR_ERR(idle); } ci->idle = idle; } else { /* * Since this idle thread is being re-used, call * init_idle() to reinitialize the thread structure. */ init_idle(idle, cpu); } /* * We need to tell the secondary core where to find * its stack and the page tables. */ secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; secondary_data.pgdir = virt_to_phys(idmap_pgd); secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir); __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data)); outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1)); /* * Now bring the CPU into our world. */ ret = boot_secondary(cpu, idle); if (ret == 0) { /* * CPU was successfully started, wait for it * to come online or time out. */ wait_for_completion_timeout(&cpu_running, msecs_to_jiffies(1000)); if (!cpu_online(cpu)) { pr_crit("CPU%u: failed to come online\n", cpu); ret = -EIO; } } else { pr_err("CPU%u: failed to boot: %d\n", cpu, ret); } secondary_data.stack = NULL; secondary_data.pgdir = 0; return ret; } #ifdef CONFIG_HOTPLUG_CPU static void percpu_timer_stop(void); /* * __cpu_disable runs on the processor to be shutdown. */ int __cpu_disable(void) { unsigned int cpu = smp_processor_id(); struct task_struct *p; int ret; ret = platform_cpu_disable(cpu); if (ret) return ret; /* * Take this CPU offline. Once we clear this, we can't return, * and we must not schedule until we're ready to give up the cpu. */ set_cpu_online(cpu, false); /* * OK - migrate IRQs away from this CPU */ migrate_irqs(); /* * Stop the local timer for this CPU. */ percpu_timer_stop(); /* * Flush user cache and TLB mappings, and then remove this CPU * from the vm mask set of all processes. */ flush_cache_all(); local_flush_tlb_all(); read_lock(&tasklist_lock); for_each_process(p) { if (p->mm) cpumask_clear_cpu(cpu, mm_cpumask(p->mm)); } read_unlock(&tasklist_lock); return 0; } static DECLARE_COMPLETION(cpu_died); /* * called on the thread which is asking for a CPU to be shutdown - * waits until shutdown has completed, or it is timed out. */ void __cpu_die(unsigned int cpu) { if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) { pr_err("CPU%u: cpu didn't die\n", cpu); return; } pr_debug("CPU%u: shutdown\n", cpu); if (!platform_cpu_kill(cpu)) printk("CPU%u: unable to kill\n", cpu); } /* * Called from the idle thread for the CPU which has been shutdown. * * Note that we disable IRQs here, but do not re-enable them * before returning to the caller. This is also the behaviour * of the other hotplug-cpu capable cores, so presumably coming * out of idle fixes this. */ void __ref cpu_die(void) { unsigned int cpu = smp_processor_id(); idle_task_exit(); local_irq_disable(); mb(); /* Tell __cpu_die() that this CPU is now safe to dispose of */ RCU_NONIDLE(complete(&cpu_died)); /* * actual CPU shutdown procedure is at least platform (if not * CPU) specific. */ platform_cpu_die(cpu); /* * Do not return to the idle loop - jump back to the secondary * cpu initialisation. There's some initialisation which needs * to be repeated to undo the effects of taking the CPU offline. */ __asm__("mov sp, %0\n" " mov fp, #0\n" " b secondary_start_kernel" : : "r" (task_stack_page(current) + THREAD_SIZE - 8)); } #endif /* CONFIG_HOTPLUG_CPU */ /* * Called by both boot and secondaries to move global data into * per-processor storage. */ static void __cpuinit smp_store_cpu_info(unsigned int cpuid) { struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); cpu_info->loops_per_jiffy = loops_per_jiffy; store_cpu_topology(cpuid); } /* * This is the secondary CPU boot entry. We're using this CPUs * idle thread stack, but a set of temporary page tables. */ asmlinkage void __cpuinit secondary_start_kernel(void) { struct mm_struct *mm = &init_mm; unsigned int cpu = smp_processor_id(); /* * All kernel threads share the same mm context; grab a * reference and switch to it. */ atomic_inc(&mm->mm_count); current->active_mm = mm; cpumask_set_cpu(cpu, mm_cpumask(mm)); cpu_switch_mm(mm->pgd, mm); enter_lazy_tlb(mm, current); local_flush_tlb_all(); pr_debug("CPU%u: Booted secondary processor\n", cpu); cpu_init(); preempt_disable(); trace_hardirqs_off(); /* * Give the platform a chance to do its own initialisation. */ platform_secondary_init(cpu); notify_cpu_starting(cpu); calibrate_delay(); smp_store_cpu_info(cpu); /* * OK, now it's safe to let the boot CPU continue. Wait for * the CPU migration code to notice that the CPU is online * before we continue - which happens after __cpu_up returns. */ set_cpu_online(cpu, true); complete(&cpu_running); /* * Setup the percpu timer for this CPU. */ percpu_timer_setup(); local_irq_enable(); local_fiq_enable(); /* * OK, it's off to the idle thread for us */ cpu_idle(); } void __init smp_cpus_done(unsigned int max_cpus) { int cpu; unsigned long bogosum = 0; for_each_online_cpu(cpu) bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; printk(KERN_INFO "SMP: Total of %d processors activated " "(%lu.%02lu BogoMIPS).\n", num_online_cpus(), bogosum / (500000/HZ), (bogosum / (5000/HZ)) % 100); } void __init smp_prepare_boot_cpu(void) { unsigned int cpu = smp_processor_id(); per_cpu(cpu_data, cpu).idle = current; } void __init smp_prepare_cpus(unsigned int max_cpus) { unsigned int ncores = num_possible_cpus(); init_cpu_topology(); smp_store_cpu_info(smp_processor_id()); /* * are we trying to boot more cores than exist? */ if (max_cpus > ncores) max_cpus = ncores; if (ncores > 1 && max_cpus) { /* * Enable the local timer or broadcast device for the * boot CPU, but only if we have more than one CPU. */ percpu_timer_setup(); /* * Initialise the present map, which describes the set of CPUs * actually populated at the present time. A platform should * re-initialize the map in platform_smp_prepare_cpus() if * present != possible (e.g. physical hotplug). */ init_cpu_present(cpu_possible_mask); /* * Initialise the SCU if there are more than one CPU * and let them know where to start. */ platform_smp_prepare_cpus(max_cpus); } } static void (*smp_cross_call)(const struct cpumask *, unsigned int); void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) { smp_cross_call = fn; } void arch_send_call_function_ipi_mask(const struct cpumask *mask) { smp_cross_call(mask, IPI_CALL_FUNC); } void arch_send_call_function_single_ipi(int cpu) { smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); } static const char *ipi_types[NR_IPI] = { #define S(x,s) [x - IPI_CPU_START] = s S(IPI_CPU_START, "CPU start interrupts"), S(IPI_TIMER, "Timer broadcast interrupts"), S(IPI_RESCHEDULE, "Rescheduling interrupts"), S(IPI_CALL_FUNC, "Function call interrupts"), S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"), S(IPI_CPU_STOP, "CPU stop interrupts"), S(IPI_CPU_BACKTRACE, "CPU backtrace"), }; void show_ipi_list(struct seq_file *p, int prec) { unsigned int cpu, i; for (i = 0; i < NR_IPI; i++) { seq_printf(p, "%*s%u: ", prec - 1, "IPI", i); for_each_present_cpu(cpu) seq_printf(p, "%10u ", __get_irq_stat(cpu, ipi_irqs[i])); seq_printf(p, " %s\n", ipi_types[i]); } } u64 smp_irq_stat_cpu(unsigned int cpu) { u64 sum = 0; int i; for (i = 0; i < NR_IPI; i++) sum += __get_irq_stat(cpu, ipi_irqs[i]); return sum; } /* * Timer (local or broadcast) support */ static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent); static void ipi_timer(void) { struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent); evt->event_handler(evt); } #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST static void smp_timer_broadcast(const struct cpumask *mask) { smp_cross_call(mask, IPI_TIMER); } #else #define smp_timer_broadcast NULL #endif static void broadcast_timer_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) { } static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt) { evt->name = "dummy_timer"; evt->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_DUMMY; evt->rating = 400; evt->mult = 1; evt->set_mode = broadcast_timer_set_mode; clockevents_register_device(evt); } static struct local_timer_ops *lt_ops; #ifdef CONFIG_LOCAL_TIMERS int local_timer_register(struct local_timer_ops *ops) { if (lt_ops) return -EBUSY; lt_ops = ops; return 0; } #endif void __cpuinit percpu_timer_setup(void) { unsigned int cpu = smp_processor_id(); struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); evt->cpumask = cpumask_of(cpu); evt->broadcast = smp_timer_broadcast; if (!lt_ops || lt_ops->setup(evt)) broadcast_timer_setup(evt); } #ifdef CONFIG_HOTPLUG_CPU /* * The generic clock events code purposely does not stop the local timer * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it * manually here. */ static void percpu_timer_stop(void) { unsigned int cpu = smp_processor_id(); struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); if (lt_ops) lt_ops->stop(evt); } #endif static DEFINE_RAW_SPINLOCK(stop_lock); /* * ipi_cpu_stop - handle IPI from smp_send_stop() */ static void ipi_cpu_stop(unsigned int cpu) { if (system_state == SYSTEM_BOOTING || system_state == SYSTEM_RUNNING) { raw_spin_lock(&stop_lock); printk(KERN_CRIT "CPU%u: stopping\n", cpu); dump_stack(); raw_spin_unlock(&stop_lock); } set_cpu_active(cpu, false); local_fiq_disable(); local_irq_disable(); while (1) cpu_relax(); } static cpumask_t backtrace_mask; static DEFINE_RAW_SPINLOCK(backtrace_lock); /* "in progress" flag of arch_trigger_all_cpu_backtrace */ static unsigned long backtrace_flag; void smp_send_all_cpu_backtrace(void) { unsigned int this_cpu = smp_processor_id(); int i; if (test_and_set_bit(0, &backtrace_flag)) /* * If there is already a trigger_all_cpu_backtrace() in progress * (backtrace_flag == 1), don't output double cpu dump infos. */ return; cpumask_copy(&backtrace_mask, cpu_online_mask); cpu_clear(this_cpu, backtrace_mask); pr_info("Backtrace for cpu %d (current):\n", this_cpu); dump_stack(); pr_info("\nsending IPI to all other CPUs:\n"); if (!cpus_empty(backtrace_mask)) smp_cross_call(&backtrace_mask, IPI_CPU_BACKTRACE); /* Wait for up to 10 seconds for all other CPUs to do the backtrace */ for (i = 0; i < 10 * 1000; i++) { if (cpumask_empty(&backtrace_mask)) break; mdelay(1); } clear_bit(0, &backtrace_flag); smp_mb__after_clear_bit(); } /* * ipi_cpu_backtrace - handle IPI from smp_send_all_cpu_backtrace() */ static void ipi_cpu_backtrace(unsigned int cpu, struct pt_regs *regs) { if (cpu_isset(cpu, backtrace_mask)) { raw_spin_lock(&backtrace_lock); pr_warning("IPI backtrace for cpu %d\n", cpu); show_regs(regs); raw_spin_unlock(&backtrace_lock); cpu_clear(cpu, backtrace_mask); } } /* * Main handler for inter-processor interrupts */ asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs) { handle_IPI(ipinr, regs); } void handle_IPI(int ipinr, struct pt_regs *regs) { unsigned int cpu = smp_processor_id(); struct pt_regs *old_regs = set_irq_regs(regs); if (ipinr >= IPI_CPU_START && ipinr < IPI_CPU_START + NR_IPI) __inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_CPU_START]); switch (ipinr) { case IPI_CPU_START: /* Wake up from WFI/WFE using SGI */ break; case IPI_TIMER: irq_enter(); ipi_timer(); irq_exit(); break; case IPI_RESCHEDULE: scheduler_ipi(); break; case IPI_CALL_FUNC: irq_enter(); generic_smp_call_function_interrupt(); irq_exit(); break; case IPI_CALL_FUNC_SINGLE: irq_enter(); generic_smp_call_function_single_interrupt(); irq_exit(); break; case IPI_CPU_STOP: irq_enter(); ipi_cpu_stop(cpu); irq_exit(); break; case IPI_CPU_BACKTRACE: ipi_cpu_backtrace(cpu, regs); break; default: printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); break; } set_irq_regs(old_regs); } void smp_send_reschedule(int cpu) { smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); } #ifdef CONFIG_HOTPLUG_CPU static void smp_kill_cpus(cpumask_t *mask) { unsigned int cpu; for_each_cpu(cpu, mask) platform_cpu_kill(cpu); } #else static void smp_kill_cpus(cpumask_t *mask) { } #endif void smp_send_stop(void) { unsigned long timeout; struct cpumask mask; cpumask_copy(&mask, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), &mask); if (!cpumask_empty(&mask)) smp_cross_call(&mask, IPI_CPU_STOP); /* Wait up to one second for other CPUs to stop */ timeout = USEC_PER_SEC; while (num_active_cpus() > 1 && timeout--) udelay(1); if (num_active_cpus() > 1) pr_warning("SMP: failed to stop secondary CPUs\n"); smp_kill_cpus(&mask); } /* * not supported here */ int setup_profiling_timer(unsigned int multiplier) { return -EINVAL; }
gpl-2.0
hayashikejinan/active_Th_kernel
Documentation/vm/page-types.c
445
20684
/* * page-types: Tool for querying page flags * * Copyright (C) 2009 Intel corporation * * Authors: Wu Fengguang <fengguang.wu@intel.com> * * Released under the General Public License (GPL). */ #define _LARGEFILE64_SOURCE #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <stdint.h> #include <stdarg.h> #include <string.h> #include <getopt.h> #include <limits.h> #include <assert.h> #include <sys/types.h> #include <sys/errno.h> #include <sys/fcntl.h> /* * pagemap kernel ABI bits */ #define PM_ENTRY_BYTES sizeof(uint64_t) #define PM_STATUS_BITS 3 #define PM_STATUS_OFFSET (64 - PM_STATUS_BITS) #define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET) #define PM_STATUS(nr) (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK) #define PM_PSHIFT_BITS 6 #define PM_PSHIFT_OFFSET (PM_STATUS_OFFSET - PM_PSHIFT_BITS) #define PM_PSHIFT_MASK (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET) #define PM_PSHIFT(x) (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK) #define PM_PFRAME_MASK ((1LL << PM_PSHIFT_OFFSET) - 1) #define PM_PFRAME(x) ((x) & PM_PFRAME_MASK) #define PM_PRESENT PM_STATUS(4LL) #define PM_SWAP PM_STATUS(2LL) /* * kernel page flags */ #define KPF_BYTES 8 #define PROC_KPAGEFLAGS "/proc/kpageflags" /* copied from kpageflags_read() */ #define KPF_LOCKED 0 #define KPF_ERROR 1 #define KPF_REFERENCED 2 #define KPF_UPTODATE 3 #define KPF_DIRTY 4 #define KPF_LRU 5 #define KPF_ACTIVE 6 #define KPF_SLAB 7 #define KPF_WRITEBACK 8 #define KPF_RECLAIM 9 #define KPF_BUDDY 10 /* [11-20] new additions in 2.6.31 */ #define KPF_MMAP 11 #define KPF_ANON 12 #define KPF_SWAPCACHE 13 #define KPF_SWAPBACKED 14 #define KPF_COMPOUND_HEAD 15 #define KPF_COMPOUND_TAIL 16 #define KPF_HUGE 17 #define KPF_UNEVICTABLE 18 #define KPF_HWPOISON 19 #define KPF_NOPAGE 20 #define KPF_KSM 21 /* [32-] kernel hacking assistances */ #define KPF_RESERVED 32 #define KPF_MLOCKED 33 #define KPF_MAPPEDTODISK 34 #define KPF_PRIVATE 35 #define KPF_PRIVATE_2 36 #define KPF_OWNER_PRIVATE 37 #define KPF_ARCH 38 #define KPF_UNCACHED 39 /* [48-] take some arbitrary free slots for expanding overloaded flags * not part of kernel API */ #define KPF_READAHEAD 48 #define KPF_SLOB_FREE 49 #define KPF_SLUB_FROZEN 50 #define KPF_SLUB_DEBUG 51 #define KPF_ALL_BITS ((uint64_t)~0ULL) #define KPF_HACKERS_BITS (0xffffULL << 32) #define KPF_OVERLOADED_BITS (0xffffULL << 48) #define BIT(name) (1ULL << KPF_##name) #define BITS_COMPOUND (BIT(COMPOUND_HEAD) | BIT(COMPOUND_TAIL)) static char *page_flag_names[] = { [KPF_LOCKED] = "L:locked", [KPF_ERROR] = "E:error", [KPF_REFERENCED] = "R:referenced", [KPF_UPTODATE] = "U:uptodate", [KPF_DIRTY] = "D:dirty", [KPF_LRU] = "l:lru", [KPF_ACTIVE] = "A:active", [KPF_SLAB] = "S:slab", [KPF_WRITEBACK] = "W:writeback", [KPF_RECLAIM] = "I:reclaim", [KPF_BUDDY] = "B:buddy", [KPF_MMAP] = "M:mmap", [KPF_ANON] = "a:anonymous", [KPF_SWAPCACHE] = "s:swapcache", [KPF_SWAPBACKED] = "b:swapbacked", [KPF_COMPOUND_HEAD] = "H:compound_head", [KPF_COMPOUND_TAIL] = "T:compound_tail", [KPF_HUGE] = "G:huge", [KPF_UNEVICTABLE] = "u:unevictable", [KPF_HWPOISON] = "X:hwpoison", [KPF_NOPAGE] = "n:nopage", [KPF_KSM] = "x:ksm", [KPF_RESERVED] = "r:reserved", [KPF_MLOCKED] = "m:mlocked", [KPF_MAPPEDTODISK] = "d:mappedtodisk", [KPF_PRIVATE] = "P:private", [KPF_PRIVATE_2] = "p:private_2", [KPF_OWNER_PRIVATE] = "O:owner_private", [KPF_ARCH] = "h:arch", [KPF_UNCACHED] = "c:uncached", [KPF_READAHEAD] = "I:readahead", [KPF_SLOB_FREE] = "P:slob_free", [KPF_SLUB_FROZEN] = "A:slub_frozen", [KPF_SLUB_DEBUG] = "E:slub_debug", }; /* * data structures */ static int opt_raw; /* for kernel developers */ static int opt_list; /* list pages (in ranges) */ static int opt_no_summary; /* don't show summary */ static pid_t opt_pid; /* process to walk */ #define MAX_ADDR_RANGES 1024 static int nr_addr_ranges; static unsigned long opt_offset[MAX_ADDR_RANGES]; static unsigned long opt_size[MAX_ADDR_RANGES]; #define MAX_VMAS 10240 static int nr_vmas; static unsigned long pg_start[MAX_VMAS]; static unsigned long pg_end[MAX_VMAS]; #define MAX_BIT_FILTERS 64 static int nr_bit_filters; static uint64_t opt_mask[MAX_BIT_FILTERS]; static uint64_t opt_bits[MAX_BIT_FILTERS]; static int page_size; static int pagemap_fd; static int kpageflags_fd; static int opt_hwpoison; static int opt_unpoison; static char *hwpoison_debug_fs = "/debug/hwpoison"; static int hwpoison_inject_fd; static int hwpoison_forget_fd; #define HASH_SHIFT 13 #define HASH_SIZE (1 << HASH_SHIFT) #define HASH_MASK (HASH_SIZE - 1) #define HASH_KEY(flags) (flags & HASH_MASK) static unsigned long total_pages; static unsigned long nr_pages[HASH_SIZE]; static uint64_t page_flags[HASH_SIZE]; /* * helper functions */ #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) #define min_t(type, x, y) ({ \ type __min1 = (x); \ type __min2 = (y); \ __min1 < __min2 ? __min1 : __min2; }) #define max_t(type, x, y) ({ \ type __max1 = (x); \ type __max2 = (y); \ __max1 > __max2 ? __max1 : __max2; }) static unsigned long pages2mb(unsigned long pages) { return (pages * page_size) >> 20; } static void fatal(const char *x, ...) { va_list ap; va_start(ap, x); vfprintf(stderr, x, ap); va_end(ap); exit(EXIT_FAILURE); } static int checked_open(const char *pathname, int flags) { int fd = open(pathname, flags); if (fd < 0) { perror(pathname); exit(EXIT_FAILURE); } return fd; } /* * pagemap/kpageflags routines */ static unsigned long do_u64_read(int fd, char *name, uint64_t *buf, unsigned long index, unsigned long count) { long bytes; if (index > ULONG_MAX / 8) fatal("index overflow: %lu\n", index); if (lseek(fd, index * 8, SEEK_SET) < 0) { perror(name); exit(EXIT_FAILURE); } bytes = read(fd, buf, count * 8); if (bytes < 0) { perror(name); exit(EXIT_FAILURE); } if (bytes % 8) fatal("partial read: %lu bytes\n", bytes); return bytes / 8; } static unsigned long kpageflags_read(uint64_t *buf, unsigned long index, unsigned long pages) { return do_u64_read(kpageflags_fd, PROC_KPAGEFLAGS, buf, index, pages); } static unsigned long pagemap_read(uint64_t *buf, unsigned long index, unsigned long pages) { return do_u64_read(pagemap_fd, "/proc/pid/pagemap", buf, index, pages); } static unsigned long pagemap_pfn(uint64_t val) { unsigned long pfn; if (val & PM_PRESENT) pfn = PM_PFRAME(val); else pfn = 0; return pfn; } /* * page flag names */ static char *page_flag_name(uint64_t flags) { static char buf[65]; int present; int i, j; for (i = 0, j = 0; i < ARRAY_SIZE(page_flag_names); i++) { present = (flags >> i) & 1; if (!page_flag_names[i]) { if (present) fatal("unkown flag bit %d\n", i); continue; } buf[j++] = present ? page_flag_names[i][0] : '_'; } return buf; } static char *page_flag_longname(uint64_t flags) { static char buf[1024]; int i, n; for (i = 0, n = 0; i < ARRAY_SIZE(page_flag_names); i++) { if (!page_flag_names[i]) continue; if ((flags >> i) & 1) n += snprintf(buf + n, sizeof(buf) - n, "%s,", page_flag_names[i] + 2); } if (n) n--; buf[n] = '\0'; return buf; } /* * page list and summary */ static void show_page_range(unsigned long voffset, unsigned long offset, uint64_t flags) { static uint64_t flags0; static unsigned long voff; static unsigned long index; static unsigned long count; if (flags == flags0 && offset == index + count && (!opt_pid || voffset == voff + count)) { count++; return; } if (count) { if (opt_pid) printf("%lx\t", voff); printf("%lx\t%lx\t%s\n", index, count, page_flag_name(flags0)); } flags0 = flags; index = offset; voff = voffset; count = 1; } static void show_page(unsigned long voffset, unsigned long offset, uint64_t flags) { if (opt_pid) printf("%lx\t", voffset); printf("%lx\t%s\n", offset, page_flag_name(flags)); } static void show_summary(void) { int i; printf(" flags\tpage-count MB" " symbolic-flags\t\t\tlong-symbolic-flags\n"); for (i = 0; i < ARRAY_SIZE(nr_pages); i++) { if (nr_pages[i]) printf("0x%016llx\t%10lu %8lu %s\t%s\n", (unsigned long long)page_flags[i], nr_pages[i], pages2mb(nr_pages[i]), page_flag_name(page_flags[i]), page_flag_longname(page_flags[i])); } printf(" total\t%10lu %8lu\n", total_pages, pages2mb(total_pages)); } /* * page flag filters */ static int bit_mask_ok(uint64_t flags) { int i; for (i = 0; i < nr_bit_filters; i++) { if (opt_bits[i] == KPF_ALL_BITS) { if ((flags & opt_mask[i]) == 0) return 0; } else { if ((flags & opt_mask[i]) != opt_bits[i]) return 0; } } return 1; } static uint64_t expand_overloaded_flags(uint64_t flags) { /* SLOB/SLUB overload several page flags */ if (flags & BIT(SLAB)) { if (flags & BIT(PRIVATE)) flags ^= BIT(PRIVATE) | BIT(SLOB_FREE); if (flags & BIT(ACTIVE)) flags ^= BIT(ACTIVE) | BIT(SLUB_FROZEN); if (flags & BIT(ERROR)) flags ^= BIT(ERROR) | BIT(SLUB_DEBUG); } /* PG_reclaim is overloaded as PG_readahead in the read path */ if ((flags & (BIT(RECLAIM) | BIT(WRITEBACK))) == BIT(RECLAIM)) flags ^= BIT(RECLAIM) | BIT(READAHEAD); return flags; } static uint64_t well_known_flags(uint64_t flags) { /* hide flags intended only for kernel hacker */ flags &= ~KPF_HACKERS_BITS; /* hide non-hugeTLB compound pages */ if ((flags & BITS_COMPOUND) && !(flags & BIT(HUGE))) flags &= ~BITS_COMPOUND; return flags; } static uint64_t kpageflags_flags(uint64_t flags) { flags = expand_overloaded_flags(flags); if (!opt_raw) flags = well_known_flags(flags); return flags; } /* * page actions */ static void prepare_hwpoison_fd(void) { char buf[100]; if (opt_hwpoison && !hwpoison_inject_fd) { sprintf(buf, "%s/corrupt-pfn", hwpoison_debug_fs); hwpoison_inject_fd = checked_open(buf, O_WRONLY); } if (opt_unpoison && !hwpoison_forget_fd) { sprintf(buf, "%s/renew-pfn", hwpoison_debug_fs); hwpoison_forget_fd = checked_open(buf, O_WRONLY); } } static int hwpoison_page(unsigned long offset) { char buf[100]; int len; len = sprintf(buf, "0x%lx\n", offset); len = write(hwpoison_inject_fd, buf, len); if (len < 0) { perror("hwpoison inject"); return len; } return 0; } static int unpoison_page(unsigned long offset) { char buf[100]; int len; len = sprintf(buf, "0x%lx\n", offset); len = write(hwpoison_forget_fd, buf, len); if (len < 0) { perror("hwpoison forget"); return len; } return 0; } /* * page frame walker */ static int hash_slot(uint64_t flags) { int k = HASH_KEY(flags); int i; /* Explicitly reserve slot 0 for flags 0: the following logic * cannot distinguish an unoccupied slot from slot (flags==0). */ if (flags == 0) return 0; /* search through the remaining (HASH_SIZE-1) slots */ for (i = 1; i < ARRAY_SIZE(page_flags); i++, k++) { if (!k || k >= ARRAY_SIZE(page_flags)) k = 1; if (page_flags[k] == 0) { page_flags[k] = flags; return k; } if (page_flags[k] == flags) return k; } fatal("hash table full: bump up HASH_SHIFT?\n"); exit(EXIT_FAILURE); } static void add_page(unsigned long voffset, unsigned long offset, uint64_t flags) { flags = kpageflags_flags(flags); if (!bit_mask_ok(flags)) return; if (opt_hwpoison) hwpoison_page(offset); if (opt_unpoison) unpoison_page(offset); if (opt_list == 1) show_page_range(voffset, offset, flags); else if (opt_list == 2) show_page(voffset, offset, flags); nr_pages[hash_slot(flags)]++; total_pages++; } #define KPAGEFLAGS_BATCH (64 << 10) /* 64k pages */ static void walk_pfn(unsigned long voffset, unsigned long index, unsigned long count) { uint64_t buf[KPAGEFLAGS_BATCH]; unsigned long batch; unsigned long pages; unsigned long i; while (count) { batch = min_t(unsigned long, count, KPAGEFLAGS_BATCH); pages = kpageflags_read(buf, index, batch); if (pages == 0) break; for (i = 0; i < pages; i++) add_page(voffset + i, index + i, buf[i]); index += pages; count -= pages; } } #define PAGEMAP_BATCH (64 << 10) static void walk_vma(unsigned long index, unsigned long count) { uint64_t buf[PAGEMAP_BATCH]; unsigned long batch; unsigned long pages; unsigned long pfn; unsigned long i; while (count) { batch = min_t(unsigned long, count, PAGEMAP_BATCH); pages = pagemap_read(buf, index, batch); if (pages == 0) break; for (i = 0; i < pages; i++) { pfn = pagemap_pfn(buf[i]); if (pfn) walk_pfn(index + i, pfn, 1); } index += pages; count -= pages; } } static void walk_task(unsigned long index, unsigned long count) { const unsigned long end = index + count; unsigned long start; int i = 0; while (index < end) { while (pg_end[i] <= index) if (++i >= nr_vmas) return; if (pg_start[i] >= end) return; start = max_t(unsigned long, pg_start[i], index); index = min_t(unsigned long, pg_end[i], end); assert(start < index); walk_vma(start, index - start); } } static void add_addr_range(unsigned long offset, unsigned long size) { if (nr_addr_ranges >= MAX_ADDR_RANGES) fatal("too many addr ranges\n"); opt_offset[nr_addr_ranges] = offset; opt_size[nr_addr_ranges] = min_t(unsigned long, size, ULONG_MAX-offset); nr_addr_ranges++; } static void walk_addr_ranges(void) { int i; kpageflags_fd = checked_open(PROC_KPAGEFLAGS, O_RDONLY); if (!nr_addr_ranges) add_addr_range(0, ULONG_MAX); for (i = 0; i < nr_addr_ranges; i++) if (!opt_pid) walk_pfn(0, opt_offset[i], opt_size[i]); else walk_task(opt_offset[i], opt_size[i]); close(kpageflags_fd); } /* * user interface */ static const char *page_flag_type(uint64_t flag) { if (flag & KPF_HACKERS_BITS) return "(r)"; if (flag & KPF_OVERLOADED_BITS) return "(o)"; return " "; } static void usage(void) { int i, j; printf( "page-types [options]\n" " -r|--raw Raw mode, for kernel developers\n" " -a|--addr addr-spec Walk a range of pages\n" " -b|--bits bits-spec Walk pages with specified bits\n" " -p|--pid pid Walk process address space\n" #if 0 /* planned features */ " -f|--file filename Walk file address space\n" #endif " -l|--list Show page details in ranges\n" " -L|--list-each Show page details one by one\n" " -N|--no-summary Don't show summay info\n" " -X|--hwpoison hwpoison pages\n" " -x|--unpoison unpoison pages\n" " -h|--help Show this usage message\n" "addr-spec:\n" " N one page at offset N (unit: pages)\n" " N+M pages range from N to N+M-1\n" " N,M pages range from N to M-1\n" " N, pages range from N to end\n" " ,M pages range from 0 to M-1\n" "bits-spec:\n" " bit1,bit2 (flags & (bit1|bit2)) != 0\n" " bit1,bit2=bit1 (flags & (bit1|bit2)) == bit1\n" " bit1,~bit2 (flags & (bit1|bit2)) == bit1\n" " =bit1,bit2 flags == (bit1|bit2)\n" "bit-names:\n" ); for (i = 0, j = 0; i < ARRAY_SIZE(page_flag_names); i++) { if (!page_flag_names[i]) continue; printf("%16s%s", page_flag_names[i] + 2, page_flag_type(1ULL << i)); if (++j > 3) { j = 0; putchar('\n'); } } printf("\n " "(r) raw mode bits (o) overloaded bits\n"); } static unsigned long long parse_number(const char *str) { unsigned long long n; n = strtoll(str, NULL, 0); if (n == 0 && str[0] != '0') fatal("invalid name or number: %s\n", str); return n; } static void parse_pid(const char *str) { FILE *file; char buf[5000]; opt_pid = parse_number(str); sprintf(buf, "/proc/%d/pagemap", opt_pid); pagemap_fd = checked_open(buf, O_RDONLY); sprintf(buf, "/proc/%d/maps", opt_pid); file = fopen(buf, "r"); if (!file) { perror(buf); exit(EXIT_FAILURE); } while (fgets(buf, sizeof(buf), file) != NULL) { unsigned long vm_start; unsigned long vm_end; unsigned long long pgoff; int major, minor; char r, w, x, s; unsigned long ino; int n; n = sscanf(buf, "%lx-%lx %c%c%c%c %llx %x:%x %lu", &vm_start, &vm_end, &r, &w, &x, &s, &pgoff, &major, &minor, &ino); if (n < 10) { fprintf(stderr, "unexpected line: %s\n", buf); continue; } pg_start[nr_vmas] = vm_start / page_size; pg_end[nr_vmas] = vm_end / page_size; if (++nr_vmas >= MAX_VMAS) { fprintf(stderr, "too many VMAs\n"); break; } } fclose(file); } static void parse_file(const char *name) { } static void parse_addr_range(const char *optarg) { unsigned long offset; unsigned long size; char *p; p = strchr(optarg, ','); if (!p) p = strchr(optarg, '+'); if (p == optarg) { offset = 0; size = parse_number(p + 1); } else if (p) { offset = parse_number(optarg); if (p[1] == '\0') size = ULONG_MAX; else { size = parse_number(p + 1); if (*p == ',') { if (size < offset) fatal("invalid range: %lu,%lu\n", offset, size); size -= offset; } } } else { offset = parse_number(optarg); size = 1; } add_addr_range(offset, size); } static void add_bits_filter(uint64_t mask, uint64_t bits) { if (nr_bit_filters >= MAX_BIT_FILTERS) fatal("too much bit filters\n"); opt_mask[nr_bit_filters] = mask; opt_bits[nr_bit_filters] = bits; nr_bit_filters++; } static uint64_t parse_flag_name(const char *str, int len) { int i; if (!*str || !len) return 0; if (len <= 8 && !strncmp(str, "compound", len)) return BITS_COMPOUND; for (i = 0; i < ARRAY_SIZE(page_flag_names); i++) { if (!page_flag_names[i]) continue; if (!strncmp(str, page_flag_names[i] + 2, len)) return 1ULL << i; } return parse_number(str); } static uint64_t parse_flag_names(const char *str, int all) { const char *p = str; uint64_t flags = 0; while (1) { if (*p == ',' || *p == '=' || *p == '\0') { if ((*str != '~') || (*str == '~' && all && *++str)) flags |= parse_flag_name(str, p - str); if (*p != ',') break; str = p + 1; } p++; } return flags; } static void parse_bits_mask(const char *optarg) { uint64_t mask; uint64_t bits; const char *p; p = strchr(optarg, '='); if (p == optarg) { mask = KPF_ALL_BITS; bits = parse_flag_names(p + 1, 0); } else if (p) { mask = parse_flag_names(optarg, 0); bits = parse_flag_names(p + 1, 0); } else if (strchr(optarg, '~')) { mask = parse_flag_names(optarg, 1); bits = parse_flag_names(optarg, 0); } else { mask = parse_flag_names(optarg, 0); bits = KPF_ALL_BITS; } add_bits_filter(mask, bits); } static struct option opts[] = { { "raw" , 0, NULL, 'r' }, { "pid" , 1, NULL, 'p' }, { "file" , 1, NULL, 'f' }, { "addr" , 1, NULL, 'a' }, { "bits" , 1, NULL, 'b' }, { "list" , 0, NULL, 'l' }, { "list-each" , 0, NULL, 'L' }, { "no-summary", 0, NULL, 'N' }, { "hwpoison" , 0, NULL, 'X' }, { "unpoison" , 0, NULL, 'x' }, { "help" , 0, NULL, 'h' }, { NULL , 0, NULL, 0 } }; int main(int argc, char *argv[]) { int c; page_size = getpagesize(); while ((c = getopt_long(argc, argv, "rp:f:a:b:lLNXxh", opts, NULL)) != -1) { switch (c) { case 'r': opt_raw = 1; break; case 'p': parse_pid(optarg); break; case 'f': parse_file(optarg); break; case 'a': parse_addr_range(optarg); break; case 'b': parse_bits_mask(optarg); break; case 'l': opt_list = 1; break; case 'L': opt_list = 2; break; case 'N': opt_no_summary = 1; break; case 'X': opt_hwpoison = 1; prepare_hwpoison_fd(); break; case 'x': opt_unpoison = 1; prepare_hwpoison_fd(); break; case 'h': usage(); exit(0); default: usage(); exit(1); } } if (opt_list && opt_pid) printf("voffset\t"); if (opt_list == 1) printf("offset\tlen\tflags\n"); if (opt_list == 2) printf("offset\tflags\n"); walk_addr_ranges(); if (opt_list == 1) show_page_range(0, 0, 0); /* drain the buffer */ if (opt_no_summary) return 0; if (opt_list) printf("\n\n"); show_summary(); return 0; }
gpl-2.0
alpinelinux/linux-stable-grsec
drivers/media/platform/davinci/vpbe_venc.c
957
18150
/* * Copyright (C) 2010 Texas Instruments Inc * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation version 2. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/ctype.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/videodev2.h> #include <linux/slab.h> #include <mach/hardware.h> #include <mach/mux.h> #include <linux/platform_data/i2c-davinci.h> #include <linux/io.h> #include <media/davinci/vpbe_types.h> #include <media/davinci/vpbe_venc.h> #include <media/davinci/vpss.h> #include <media/v4l2-device.h> #include "vpbe_venc_regs.h" #define MODULE_NAME "davinci-vpbe-venc" static struct platform_device_id vpbe_venc_devtype[] = { { .name = DM644X_VPBE_VENC_SUBDEV_NAME, .driver_data = VPBE_VERSION_1, }, { .name = DM365_VPBE_VENC_SUBDEV_NAME, .driver_data = VPBE_VERSION_2, }, { .name = DM355_VPBE_VENC_SUBDEV_NAME, .driver_data = VPBE_VERSION_3, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(platform, vpbe_venc_devtype); static int debug = 2; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Debug level 0-2"); struct venc_state { struct v4l2_subdev sd; struct venc_callback *callback; struct venc_platform_data *pdata; struct device *pdev; u32 output; v4l2_std_id std; spinlock_t lock; void __iomem *venc_base; void __iomem *vdaccfg_reg; enum vpbe_version venc_type; }; static inline struct venc_state *to_state(struct v4l2_subdev *sd) { return container_of(sd, struct venc_state, sd); } static inline u32 venc_read(struct v4l2_subdev *sd, u32 offset) { struct venc_state *venc = to_state(sd); return readl(venc->venc_base + offset); } static inline u32 venc_write(struct v4l2_subdev *sd, u32 offset, u32 val) { struct venc_state *venc = to_state(sd); writel(val, (venc->venc_base + offset)); return val; } static inline u32 venc_modify(struct v4l2_subdev *sd, u32 offset, u32 val, u32 mask) { u32 new_val = (venc_read(sd, offset) & ~mask) | (val & mask); venc_write(sd, offset, new_val); return new_val; } static inline u32 vdaccfg_write(struct v4l2_subdev *sd, u32 val) { struct venc_state *venc = to_state(sd); writel(val, venc->vdaccfg_reg); val = readl(venc->vdaccfg_reg); return val; } #define VDAC_COMPONENT 0x543 #define VDAC_S_VIDEO 0x210 /* This function sets the dac of the VPBE for various outputs */ static int venc_set_dac(struct v4l2_subdev *sd, u32 out_index) { switch (out_index) { case 0: v4l2_dbg(debug, 1, sd, "Setting output to Composite\n"); venc_write(sd, VENC_DACSEL, 0); break; case 1: v4l2_dbg(debug, 1, sd, "Setting output to Component\n"); venc_write(sd, VENC_DACSEL, VDAC_COMPONENT); break; case 2: v4l2_dbg(debug, 1, sd, "Setting output to S-video\n"); venc_write(sd, VENC_DACSEL, VDAC_S_VIDEO); break; default: return -EINVAL; } return 0; } static void venc_enabledigitaloutput(struct v4l2_subdev *sd, int benable) { struct venc_state *venc = to_state(sd); v4l2_dbg(debug, 2, sd, "venc_enabledigitaloutput\n"); if (benable) { venc_write(sd, VENC_VMOD, 0); venc_write(sd, VENC_CVBS, 0); venc_write(sd, VENC_LCDOUT, 0); venc_write(sd, VENC_HSPLS, 0); venc_write(sd, VENC_HSTART, 0); venc_write(sd, VENC_HVALID, 0); venc_write(sd, VENC_HINT, 0); venc_write(sd, VENC_VSPLS, 0); venc_write(sd, VENC_VSTART, 0); venc_write(sd, VENC_VVALID, 0); venc_write(sd, VENC_VINT, 0); venc_write(sd, VENC_YCCCTL, 0); venc_write(sd, VENC_DACSEL, 0); } else { venc_write(sd, VENC_VMOD, 0); /* disable VCLK output pin enable */ venc_write(sd, VENC_VIDCTL, 0x141); /* Disable output sync pins */ venc_write(sd, VENC_SYNCCTL, 0); /* Disable DCLOCK */ venc_write(sd, VENC_DCLKCTL, 0); venc_write(sd, VENC_DRGBX1, 0x0000057C); /* Disable LCD output control (accepting default polarity) */ venc_write(sd, VENC_LCDOUT, 0); if (venc->venc_type != VPBE_VERSION_3) venc_write(sd, VENC_CMPNT, 0x100); venc_write(sd, VENC_HSPLS, 0); venc_write(sd, VENC_HINT, 0); venc_write(sd, VENC_HSTART, 0); venc_write(sd, VENC_HVALID, 0); venc_write(sd, VENC_VSPLS, 0); venc_write(sd, VENC_VINT, 0); venc_write(sd, VENC_VSTART, 0); venc_write(sd, VENC_VVALID, 0); venc_write(sd, VENC_HSDLY, 0); venc_write(sd, VENC_VSDLY, 0); venc_write(sd, VENC_YCCCTL, 0); venc_write(sd, VENC_VSTARTA, 0); /* Set OSD clock and OSD Sync Adavance registers */ venc_write(sd, VENC_OSDCLK0, 1); venc_write(sd, VENC_OSDCLK1, 2); } } static void venc_enable_vpss_clock(int venc_type, enum vpbe_enc_timings_type type, unsigned int pclock) { if (venc_type == VPBE_VERSION_1) return; if (venc_type == VPBE_VERSION_2 && (type == VPBE_ENC_STD || (type == VPBE_ENC_DV_TIMINGS && pclock <= 27000000))) { vpss_enable_clock(VPSS_VENC_CLOCK_SEL, 1); vpss_enable_clock(VPSS_VPBE_CLOCK, 1); return; } if (venc_type == VPBE_VERSION_3 && type == VPBE_ENC_STD) vpss_enable_clock(VPSS_VENC_CLOCK_SEL, 0); } #define VDAC_CONFIG_SD_V3 0x0E21A6B6 #define VDAC_CONFIG_SD_V2 0x081141CF /* * setting NTSC mode */ static int venc_set_ntsc(struct v4l2_subdev *sd) { u32 val; struct venc_state *venc = to_state(sd); struct venc_platform_data *pdata = venc->pdata; v4l2_dbg(debug, 2, sd, "venc_set_ntsc\n"); /* Setup clock at VPSS & VENC for SD */ vpss_enable_clock(VPSS_VENC_CLOCK_SEL, 1); if (pdata->setup_clock(VPBE_ENC_STD, V4L2_STD_525_60) < 0) return -EINVAL; venc_enable_vpss_clock(venc->venc_type, VPBE_ENC_STD, V4L2_STD_525_60); venc_enabledigitaloutput(sd, 0); if (venc->venc_type == VPBE_VERSION_3) { venc_write(sd, VENC_CLKCTL, 0x01); venc_write(sd, VENC_VIDCTL, 0); val = vdaccfg_write(sd, VDAC_CONFIG_SD_V3); } else if (venc->venc_type == VPBE_VERSION_2) { venc_write(sd, VENC_CLKCTL, 0x01); venc_write(sd, VENC_VIDCTL, 0); vdaccfg_write(sd, VDAC_CONFIG_SD_V2); } else { /* to set VENC CLK DIV to 1 - final clock is 54 MHz */ venc_modify(sd, VENC_VIDCTL, 0, 1 << 1); /* Set REC656 Mode */ venc_write(sd, VENC_YCCCTL, 0x1); venc_modify(sd, VENC_VDPRO, 0, VENC_VDPRO_DAFRQ); venc_modify(sd, VENC_VDPRO, 0, VENC_VDPRO_DAUPS); } venc_write(sd, VENC_VMOD, 0); venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_VIE_SHIFT), VENC_VMOD_VIE); venc_modify(sd, VENC_VMOD, (0 << VENC_VMOD_VMD), VENC_VMOD_VMD); venc_modify(sd, VENC_VMOD, (0 << VENC_VMOD_TVTYP_SHIFT), VENC_VMOD_TVTYP); venc_write(sd, VENC_DACTST, 0x0); venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC); return 0; } /* * setting PAL mode */ static int venc_set_pal(struct v4l2_subdev *sd) { struct venc_state *venc = to_state(sd); v4l2_dbg(debug, 2, sd, "venc_set_pal\n"); /* Setup clock at VPSS & VENC for SD */ vpss_enable_clock(VPSS_VENC_CLOCK_SEL, 1); if (venc->pdata->setup_clock(VPBE_ENC_STD, V4L2_STD_625_50) < 0) return -EINVAL; venc_enable_vpss_clock(venc->venc_type, VPBE_ENC_STD, V4L2_STD_625_50); venc_enabledigitaloutput(sd, 0); if (venc->venc_type == VPBE_VERSION_3) { venc_write(sd, VENC_CLKCTL, 0x1); venc_write(sd, VENC_VIDCTL, 0); vdaccfg_write(sd, VDAC_CONFIG_SD_V3); } else if (venc->venc_type == VPBE_VERSION_2) { venc_write(sd, VENC_CLKCTL, 0x1); venc_write(sd, VENC_VIDCTL, 0); vdaccfg_write(sd, VDAC_CONFIG_SD_V2); } else { /* to set VENC CLK DIV to 1 - final clock is 54 MHz */ venc_modify(sd, VENC_VIDCTL, 0, 1 << 1); /* Set REC656 Mode */ venc_write(sd, VENC_YCCCTL, 0x1); } venc_modify(sd, VENC_SYNCCTL, 1 << VENC_SYNCCTL_OVD_SHIFT, VENC_SYNCCTL_OVD); venc_write(sd, VENC_VMOD, 0); venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_VIE_SHIFT), VENC_VMOD_VIE); venc_modify(sd, VENC_VMOD, (0 << VENC_VMOD_VMD), VENC_VMOD_VMD); venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_TVTYP_SHIFT), VENC_VMOD_TVTYP); venc_write(sd, VENC_DACTST, 0x0); venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC); return 0; } #define VDAC_CONFIG_HD_V2 0x081141EF /* * venc_set_480p59_94 * * This function configures the video encoder to EDTV(525p) component setting. */ static int venc_set_480p59_94(struct v4l2_subdev *sd) { struct venc_state *venc = to_state(sd); struct venc_platform_data *pdata = venc->pdata; v4l2_dbg(debug, 2, sd, "venc_set_480p59_94\n"); if (venc->venc_type != VPBE_VERSION_1 && venc->venc_type != VPBE_VERSION_2) return -EINVAL; /* Setup clock at VPSS & VENC for SD */ if (pdata->setup_clock(VPBE_ENC_DV_TIMINGS, 27000000) < 0) return -EINVAL; venc_enable_vpss_clock(venc->venc_type, VPBE_ENC_DV_TIMINGS, 27000000); venc_enabledigitaloutput(sd, 0); if (venc->venc_type == VPBE_VERSION_2) vdaccfg_write(sd, VDAC_CONFIG_HD_V2); venc_write(sd, VENC_OSDCLK0, 0); venc_write(sd, VENC_OSDCLK1, 1); if (venc->venc_type == VPBE_VERSION_1) { venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAFRQ, VENC_VDPRO_DAFRQ); venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAUPS, VENC_VDPRO_DAUPS); } venc_write(sd, VENC_VMOD, 0); venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_VIE_SHIFT), VENC_VMOD_VIE); venc_modify(sd, VENC_VMOD, VENC_VMOD_HDMD, VENC_VMOD_HDMD); venc_modify(sd, VENC_VMOD, (HDTV_525P << VENC_VMOD_TVTYP_SHIFT), VENC_VMOD_TVTYP); venc_modify(sd, VENC_VMOD, VENC_VMOD_VDMD_YCBCR8 << VENC_VMOD_VDMD_SHIFT, VENC_VMOD_VDMD); venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC); return 0; } /* * venc_set_625p * * This function configures the video encoder to HDTV(625p) component setting */ static int venc_set_576p50(struct v4l2_subdev *sd) { struct venc_state *venc = to_state(sd); struct venc_platform_data *pdata = venc->pdata; v4l2_dbg(debug, 2, sd, "venc_set_576p50\n"); if (venc->venc_type != VPBE_VERSION_1 && venc->venc_type != VPBE_VERSION_2) return -EINVAL; /* Setup clock at VPSS & VENC for SD */ if (pdata->setup_clock(VPBE_ENC_DV_TIMINGS, 27000000) < 0) return -EINVAL; venc_enable_vpss_clock(venc->venc_type, VPBE_ENC_DV_TIMINGS, 27000000); venc_enabledigitaloutput(sd, 0); if (venc->venc_type == VPBE_VERSION_2) vdaccfg_write(sd, VDAC_CONFIG_HD_V2); venc_write(sd, VENC_OSDCLK0, 0); venc_write(sd, VENC_OSDCLK1, 1); if (venc->venc_type == VPBE_VERSION_1) { venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAFRQ, VENC_VDPRO_DAFRQ); venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAUPS, VENC_VDPRO_DAUPS); } venc_write(sd, VENC_VMOD, 0); venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_VIE_SHIFT), VENC_VMOD_VIE); venc_modify(sd, VENC_VMOD, VENC_VMOD_HDMD, VENC_VMOD_HDMD); venc_modify(sd, VENC_VMOD, (HDTV_625P << VENC_VMOD_TVTYP_SHIFT), VENC_VMOD_TVTYP); venc_modify(sd, VENC_VMOD, VENC_VMOD_VDMD_YCBCR8 << VENC_VMOD_VDMD_SHIFT, VENC_VMOD_VDMD); venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC); return 0; } /* * venc_set_720p60_internal - Setup 720p60 in venc for dm365 only */ static int venc_set_720p60_internal(struct v4l2_subdev *sd) { struct venc_state *venc = to_state(sd); struct venc_platform_data *pdata = venc->pdata; if (pdata->setup_clock(VPBE_ENC_DV_TIMINGS, 74250000) < 0) return -EINVAL; venc_enable_vpss_clock(venc->venc_type, VPBE_ENC_DV_TIMINGS, 74250000); venc_enabledigitaloutput(sd, 0); venc_write(sd, VENC_OSDCLK0, 0); venc_write(sd, VENC_OSDCLK1, 1); venc_write(sd, VENC_VMOD, 0); /* DM365 component HD mode */ venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_VIE_SHIFT), VENC_VMOD_VIE); venc_modify(sd, VENC_VMOD, VENC_VMOD_HDMD, VENC_VMOD_HDMD); venc_modify(sd, VENC_VMOD, (HDTV_720P << VENC_VMOD_TVTYP_SHIFT), VENC_VMOD_TVTYP); venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC); venc_write(sd, VENC_XHINTVL, 0); return 0; } /* * venc_set_1080i30_internal - Setup 1080i30 in venc for dm365 only */ static int venc_set_1080i30_internal(struct v4l2_subdev *sd) { struct venc_state *venc = to_state(sd); struct venc_platform_data *pdata = venc->pdata; if (pdata->setup_clock(VPBE_ENC_DV_TIMINGS, 74250000) < 0) return -EINVAL; venc_enable_vpss_clock(venc->venc_type, VPBE_ENC_DV_TIMINGS, 74250000); venc_enabledigitaloutput(sd, 0); venc_write(sd, VENC_OSDCLK0, 0); venc_write(sd, VENC_OSDCLK1, 1); venc_write(sd, VENC_VMOD, 0); /* DM365 component HD mode */ venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_VIE_SHIFT), VENC_VMOD_VIE); venc_modify(sd, VENC_VMOD, VENC_VMOD_HDMD, VENC_VMOD_HDMD); venc_modify(sd, VENC_VMOD, (HDTV_1080I << VENC_VMOD_TVTYP_SHIFT), VENC_VMOD_TVTYP); venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC); venc_write(sd, VENC_XHINTVL, 0); return 0; } static int venc_s_std_output(struct v4l2_subdev *sd, v4l2_std_id norm) { v4l2_dbg(debug, 1, sd, "venc_s_std_output\n"); if (norm & V4L2_STD_525_60) return venc_set_ntsc(sd); else if (norm & V4L2_STD_625_50) return venc_set_pal(sd); return -EINVAL; } static int venc_s_dv_timings(struct v4l2_subdev *sd, struct v4l2_dv_timings *dv_timings) { struct venc_state *venc = to_state(sd); u32 height = dv_timings->bt.height; int ret; v4l2_dbg(debug, 1, sd, "venc_s_dv_timings\n"); if (height == 576) return venc_set_576p50(sd); else if (height == 480) return venc_set_480p59_94(sd); else if ((height == 720) && (venc->venc_type == VPBE_VERSION_2)) { /* TBD setup internal 720p mode here */ ret = venc_set_720p60_internal(sd); /* for DM365 VPBE, there is DAC inside */ vdaccfg_write(sd, VDAC_CONFIG_HD_V2); return ret; } else if ((height == 1080) && (venc->venc_type == VPBE_VERSION_2)) { /* TBD setup internal 1080i mode here */ ret = venc_set_1080i30_internal(sd); /* for DM365 VPBE, there is DAC inside */ vdaccfg_write(sd, VDAC_CONFIG_HD_V2); return ret; } return -EINVAL; } static int venc_s_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct venc_state *venc = to_state(sd); int ret; v4l2_dbg(debug, 1, sd, "venc_s_routing\n"); ret = venc_set_dac(sd, output); if (!ret) venc->output = output; return ret; } static long venc_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) { u32 val; switch (cmd) { case VENC_GET_FLD: val = venc_read(sd, VENC_VSTAT); *((int *)arg) = ((val & VENC_VSTAT_FIDST) == VENC_VSTAT_FIDST); break; default: v4l2_err(sd, "Wrong IOCTL cmd\n"); break; } return 0; } static const struct v4l2_subdev_core_ops venc_core_ops = { .ioctl = venc_ioctl, }; static const struct v4l2_subdev_video_ops venc_video_ops = { .s_routing = venc_s_routing, .s_std_output = venc_s_std_output, .s_dv_timings = venc_s_dv_timings, }; static const struct v4l2_subdev_ops venc_ops = { .core = &venc_core_ops, .video = &venc_video_ops, }; static int venc_initialize(struct v4l2_subdev *sd) { struct venc_state *venc = to_state(sd); int ret; /* Set default to output to composite and std to NTSC */ venc->output = 0; venc->std = V4L2_STD_525_60; ret = venc_s_routing(sd, 0, venc->output, 0); if (ret < 0) { v4l2_err(sd, "Error setting output during init\n"); return -EINVAL; } ret = venc_s_std_output(sd, venc->std); if (ret < 0) { v4l2_err(sd, "Error setting std during init\n"); return -EINVAL; } return ret; } static int venc_device_get(struct device *dev, void *data) { struct platform_device *pdev = to_platform_device(dev); struct venc_state **venc = data; if (strstr(pdev->name, "vpbe-venc") != NULL) *venc = platform_get_drvdata(pdev); return 0; } struct v4l2_subdev *venc_sub_dev_init(struct v4l2_device *v4l2_dev, const char *venc_name) { struct venc_state *venc; int err; err = bus_for_each_dev(&platform_bus_type, NULL, &venc, venc_device_get); if (venc == NULL) return NULL; v4l2_subdev_init(&venc->sd, &venc_ops); strcpy(venc->sd.name, venc_name); if (v4l2_device_register_subdev(v4l2_dev, &venc->sd) < 0) { v4l2_err(v4l2_dev, "vpbe unable to register venc sub device\n"); return NULL; } if (venc_initialize(&venc->sd)) { v4l2_err(v4l2_dev, "vpbe venc initialization failed\n"); return NULL; } return &venc->sd; } EXPORT_SYMBOL(venc_sub_dev_init); static int venc_probe(struct platform_device *pdev) { const struct platform_device_id *pdev_id; struct venc_state *venc; struct resource *res; if (!pdev->dev.platform_data) { dev_err(&pdev->dev, "No platform data for VENC sub device"); return -EINVAL; } pdev_id = platform_get_device_id(pdev); if (!pdev_id) return -EINVAL; venc = devm_kzalloc(&pdev->dev, sizeof(struct venc_state), GFP_KERNEL); if (venc == NULL) return -ENOMEM; venc->venc_type = pdev_id->driver_data; venc->pdev = &pdev->dev; venc->pdata = pdev->dev.platform_data; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); venc->venc_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(venc->venc_base)) return PTR_ERR(venc->venc_base); if (venc->venc_type != VPBE_VERSION_1) { res = platform_get_resource(pdev, IORESOURCE_MEM, 1); venc->vdaccfg_reg = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(venc->vdaccfg_reg)) return PTR_ERR(venc->vdaccfg_reg); } spin_lock_init(&venc->lock); platform_set_drvdata(pdev, venc); dev_notice(venc->pdev, "VENC sub device probe success\n"); return 0; } static int venc_remove(struct platform_device *pdev) { return 0; } static struct platform_driver venc_driver = { .probe = venc_probe, .remove = venc_remove, .driver = { .name = MODULE_NAME, .owner = THIS_MODULE, }, .id_table = vpbe_venc_devtype }; module_platform_driver(venc_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("VPBE VENC Driver"); MODULE_AUTHOR("Texas Instruments");
gpl-2.0
kevleyski/o5_raspberrypi_kernel
kernel/utsname_sysctl.c
1725
3084
/* * Copyright (C) 2007 * * Author: Eric Biederman <ebiederm@xmision.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2 of the * License. */ #include <linux/export.h> #include <linux/uts.h> #include <linux/utsname.h> #include <linux/sysctl.h> #include <linux/wait.h> #ifdef CONFIG_PROC_SYSCTL static void *get_uts(struct ctl_table *table, int write) { char *which = table->data; struct uts_namespace *uts_ns; uts_ns = current->nsproxy->uts_ns; which = (which - (char *)&init_uts_ns) + (char *)uts_ns; if (!write) down_read(&uts_sem); else down_write(&uts_sem); return which; } static void put_uts(struct ctl_table *table, int write, void *which) { if (!write) up_read(&uts_sem); else up_write(&uts_sem); } /* * Special case of dostring for the UTS structure. This has locks * to observe. Should this be in kernel/sys.c ???? */ static int proc_do_uts_string(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table uts_table; int r; memcpy(&uts_table, table, sizeof(uts_table)); uts_table.data = get_uts(table, write); r = proc_dostring(&uts_table, write, buffer, lenp, ppos); put_uts(table, write, uts_table.data); if (write) proc_sys_poll_notify(table->poll); return r; } #else #define proc_do_uts_string NULL #endif static DEFINE_CTL_TABLE_POLL(hostname_poll); static DEFINE_CTL_TABLE_POLL(domainname_poll); static struct ctl_table uts_kern_table[] = { { .procname = "ostype", .data = init_uts_ns.name.sysname, .maxlen = sizeof(init_uts_ns.name.sysname), .mode = 0444, .proc_handler = proc_do_uts_string, }, { .procname = "osrelease", .data = init_uts_ns.name.release, .maxlen = sizeof(init_uts_ns.name.release), .mode = 0444, .proc_handler = proc_do_uts_string, }, { .procname = "version", .data = init_uts_ns.name.version, .maxlen = sizeof(init_uts_ns.name.version), .mode = 0444, .proc_handler = proc_do_uts_string, }, { .procname = "hostname", .data = init_uts_ns.name.nodename, .maxlen = sizeof(init_uts_ns.name.nodename), .mode = 0644, .proc_handler = proc_do_uts_string, .poll = &hostname_poll, }, { .procname = "domainname", .data = init_uts_ns.name.domainname, .maxlen = sizeof(init_uts_ns.name.domainname), .mode = 0644, .proc_handler = proc_do_uts_string, .poll = &domainname_poll, }, {} }; static struct ctl_table uts_root_table[] = { { .procname = "kernel", .mode = 0555, .child = uts_kern_table, }, {} }; #ifdef CONFIG_PROC_SYSCTL /* * Notify userspace about a change in a certain entry of uts_kern_table, * identified by the parameter proc. */ void uts_proc_notify(enum uts_proc proc) { struct ctl_table *table = &uts_kern_table[proc]; proc_sys_poll_notify(table->poll); } #endif static int __init utsname_sysctl_init(void) { register_sysctl_table(uts_root_table); return 0; } device_initcall(utsname_sysctl_init);
gpl-2.0
eyeballer/tf300t-10.4.2.9-kernel
drivers/acpi/acpica/exfldio.c
2493
29272
/****************************************************************************** * * Module Name: exfldio - Aml Field I/O * *****************************************************************************/ /* * Copyright (C) 2000 - 2011, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acinterp.h" #include "amlcode.h" #include "acevents.h" #include "acdispat.h" #define _COMPONENT ACPI_EXECUTER ACPI_MODULE_NAME("exfldio") /* Local prototypes */ static acpi_status acpi_ex_field_datum_io(union acpi_operand_object *obj_desc, u32 field_datum_byte_offset, u64 *value, u32 read_write); static u8 acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value); static acpi_status acpi_ex_setup_region(union acpi_operand_object *obj_desc, u32 field_datum_byte_offset); /******************************************************************************* * * FUNCTION: acpi_ex_setup_region * * PARAMETERS: obj_desc - Field to be read or written * field_datum_byte_offset - Byte offset of this datum within the * parent field * * RETURN: Status * * DESCRIPTION: Common processing for acpi_ex_extract_from_field and * acpi_ex_insert_into_field. Initialize the Region if necessary and * validate the request. * ******************************************************************************/ static acpi_status acpi_ex_setup_region(union acpi_operand_object *obj_desc, u32 field_datum_byte_offset) { acpi_status status = AE_OK; union acpi_operand_object *rgn_desc; ACPI_FUNCTION_TRACE_U32(ex_setup_region, field_datum_byte_offset); rgn_desc = obj_desc->common_field.region_obj; /* We must have a valid region */ if (rgn_desc->common.type != ACPI_TYPE_REGION) { ACPI_ERROR((AE_INFO, "Needed Region, found type 0x%X (%s)", rgn_desc->common.type, acpi_ut_get_object_type_name(rgn_desc))); return_ACPI_STATUS(AE_AML_OPERAND_TYPE); } /* * If the Region Address and Length have not been previously evaluated, * evaluate them now and save the results. */ if (!(rgn_desc->common.flags & AOPOBJ_DATA_VALID)) { status = acpi_ds_get_region_arguments(rgn_desc); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } /* Exit if Address/Length have been disallowed by the host OS */ if (rgn_desc->common.flags & AOPOBJ_INVALID) { return_ACPI_STATUS(AE_AML_ILLEGAL_ADDRESS); } /* * Exit now for SMBus or IPMI address space, it has a non-linear * address space and the request cannot be directly validated */ if (rgn_desc->region.space_id == ACPI_ADR_SPACE_SMBUS || rgn_desc->region.space_id == ACPI_ADR_SPACE_IPMI) { /* SMBus or IPMI has a non-linear address space */ return_ACPI_STATUS(AE_OK); } #ifdef ACPI_UNDER_DEVELOPMENT /* * If the Field access is any_acc, we can now compute the optimal * access (because we know know the length of the parent region) */ if (!(obj_desc->common.flags & AOPOBJ_DATA_VALID)) { if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } #endif /* * Validate the request. The entire request from the byte offset for a * length of one field datum (access width) must fit within the region. * (Region length is specified in bytes) */ if (rgn_desc->region.length < (obj_desc->common_field.base_byte_offset + field_datum_byte_offset + obj_desc->common_field.access_byte_width)) { if (acpi_gbl_enable_interpreter_slack) { /* * Slack mode only: We will go ahead and allow access to this * field if it is within the region length rounded up to the next * access width boundary. acpi_size cast for 64-bit compile. */ if (ACPI_ROUND_UP(rgn_desc->region.length, obj_desc->common_field. access_byte_width) >= ((acpi_size) obj_desc->common_field. base_byte_offset + obj_desc->common_field.access_byte_width + field_datum_byte_offset)) { return_ACPI_STATUS(AE_OK); } } if (rgn_desc->region.length < obj_desc->common_field.access_byte_width) { /* * This is the case where the access_type (acc_word, etc.) is wider * than the region itself. For example, a region of length one * byte, and a field with Dword access specified. */ ACPI_ERROR((AE_INFO, "Field [%4.4s] access width (%u bytes) too large for region [%4.4s] (length %u)", acpi_ut_get_node_name(obj_desc-> common_field.node), obj_desc->common_field.access_byte_width, acpi_ut_get_node_name(rgn_desc->region. node), rgn_desc->region.length)); } /* * Offset rounded up to next multiple of field width * exceeds region length, indicate an error */ ACPI_ERROR((AE_INFO, "Field [%4.4s] Base+Offset+Width %u+%u+%u is beyond end of region [%4.4s] (length %u)", acpi_ut_get_node_name(obj_desc->common_field.node), obj_desc->common_field.base_byte_offset, field_datum_byte_offset, obj_desc->common_field.access_byte_width, acpi_ut_get_node_name(rgn_desc->region.node), rgn_desc->region.length)); return_ACPI_STATUS(AE_AML_REGION_LIMIT); } return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ex_access_region * * PARAMETERS: obj_desc - Field to be read * field_datum_byte_offset - Byte offset of this datum within the * parent field * Value - Where to store value (must at least * 64 bits) * Function - Read or Write flag plus other region- * dependent flags * * RETURN: Status * * DESCRIPTION: Read or Write a single field datum to an Operation Region. * ******************************************************************************/ acpi_status acpi_ex_access_region(union acpi_operand_object *obj_desc, u32 field_datum_byte_offset, u64 *value, u32 function) { acpi_status status; union acpi_operand_object *rgn_desc; u32 region_offset; ACPI_FUNCTION_TRACE(ex_access_region); /* * Ensure that the region operands are fully evaluated and verify * the validity of the request */ status = acpi_ex_setup_region(obj_desc, field_datum_byte_offset); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* * The physical address of this field datum is: * * 1) The base of the region, plus * 2) The base offset of the field, plus * 3) The current offset into the field */ rgn_desc = obj_desc->common_field.region_obj; region_offset = obj_desc->common_field.base_byte_offset + field_datum_byte_offset; if ((function & ACPI_IO_MASK) == ACPI_READ) { ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, "[READ]")); } else { ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, "[WRITE]")); } ACPI_DEBUG_PRINT_RAW((ACPI_DB_BFIELD, " Region [%s:%X], Width %X, ByteBase %X, Offset %X at %p\n", acpi_ut_get_region_name(rgn_desc->region. space_id), rgn_desc->region.space_id, obj_desc->common_field.access_byte_width, obj_desc->common_field.base_byte_offset, field_datum_byte_offset, ACPI_CAST_PTR(void, (rgn_desc-> region. address + region_offset)))); /* Invoke the appropriate address_space/op_region handler */ status = acpi_ev_address_space_dispatch(rgn_desc, function, region_offset, ACPI_MUL_8(obj_desc->common_field. access_byte_width), value); if (ACPI_FAILURE(status)) { if (status == AE_NOT_IMPLEMENTED) { ACPI_ERROR((AE_INFO, "Region %s (ID=%u) not implemented", acpi_ut_get_region_name(rgn_desc->region. space_id), rgn_desc->region.space_id)); } else if (status == AE_NOT_EXIST) { ACPI_ERROR((AE_INFO, "Region %s (ID=%u) has no handler", acpi_ut_get_region_name(rgn_desc->region. space_id), rgn_desc->region.space_id)); } } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_register_overflow * * PARAMETERS: obj_desc - Register(Field) to be written * Value - Value to be stored * * RETURN: TRUE if value overflows the field, FALSE otherwise * * DESCRIPTION: Check if a value is out of range of the field being written. * Used to check if the values written to Index and Bank registers * are out of range. Normally, the value is simply truncated * to fit the field, but this case is most likely a serious * coding error in the ASL. * ******************************************************************************/ static u8 acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value) { if (obj_desc->common_field.bit_length >= ACPI_INTEGER_BIT_SIZE) { /* * The field is large enough to hold the maximum integer, so we can * never overflow it. */ return (FALSE); } if (value >= ((u64) 1 << obj_desc->common_field.bit_length)) { /* * The Value is larger than the maximum value that can fit into * the register. */ return (TRUE); } /* The Value will fit into the field with no truncation */ return (FALSE); } /******************************************************************************* * * FUNCTION: acpi_ex_field_datum_io * * PARAMETERS: obj_desc - Field to be read * field_datum_byte_offset - Byte offset of this datum within the * parent field * Value - Where to store value (must be 64 bits) * read_write - Read or Write flag * * RETURN: Status * * DESCRIPTION: Read or Write a single datum of a field. The field_type is * demultiplexed here to handle the different types of fields * (buffer_field, region_field, index_field, bank_field) * ******************************************************************************/ static acpi_status acpi_ex_field_datum_io(union acpi_operand_object *obj_desc, u32 field_datum_byte_offset, u64 *value, u32 read_write) { acpi_status status; u64 local_value; ACPI_FUNCTION_TRACE_U32(ex_field_datum_io, field_datum_byte_offset); if (read_write == ACPI_READ) { if (!value) { local_value = 0; /* To support reads without saving return value */ value = &local_value; } /* Clear the entire return buffer first, [Very Important!] */ *value = 0; } /* * The four types of fields are: * * buffer_field - Read/write from/to a Buffer * region_field - Read/write from/to a Operation Region. * bank_field - Write to a Bank Register, then read/write from/to an * operation_region * index_field - Write to an Index Register, then read/write from/to a * Data Register */ switch (obj_desc->common.type) { case ACPI_TYPE_BUFFER_FIELD: /* * If the buffer_field arguments have not been previously evaluated, * evaluate them now and save the results. */ if (!(obj_desc->common.flags & AOPOBJ_DATA_VALID)) { status = acpi_ds_get_buffer_field_arguments(obj_desc); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } if (read_write == ACPI_READ) { /* * Copy the data from the source buffer. * Length is the field width in bytes. */ ACPI_MEMCPY(value, (obj_desc->buffer_field.buffer_obj)->buffer. pointer + obj_desc->buffer_field.base_byte_offset + field_datum_byte_offset, obj_desc->common_field.access_byte_width); } else { /* * Copy the data to the target buffer. * Length is the field width in bytes. */ ACPI_MEMCPY((obj_desc->buffer_field.buffer_obj)->buffer. pointer + obj_desc->buffer_field.base_byte_offset + field_datum_byte_offset, value, obj_desc->common_field.access_byte_width); } status = AE_OK; break; case ACPI_TYPE_LOCAL_BANK_FIELD: /* * Ensure that the bank_value is not beyond the capacity of * the register */ if (acpi_ex_register_overflow(obj_desc->bank_field.bank_obj, (u64) obj_desc->bank_field. value)) { return_ACPI_STATUS(AE_AML_REGISTER_LIMIT); } /* * For bank_fields, we must write the bank_value to the bank_register * (itself a region_field) before we can access the data. */ status = acpi_ex_insert_into_field(obj_desc->bank_field.bank_obj, &obj_desc->bank_field.value, sizeof(obj_desc->bank_field. value)); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* * Now that the Bank has been selected, fall through to the * region_field case and write the datum to the Operation Region */ /*lint -fallthrough */ case ACPI_TYPE_LOCAL_REGION_FIELD: /* * For simple region_fields, we just directly access the owning * Operation Region. */ status = acpi_ex_access_region(obj_desc, field_datum_byte_offset, value, read_write); break; case ACPI_TYPE_LOCAL_INDEX_FIELD: /* * Ensure that the index_value is not beyond the capacity of * the register */ if (acpi_ex_register_overflow(obj_desc->index_field.index_obj, (u64) obj_desc->index_field. value)) { return_ACPI_STATUS(AE_AML_REGISTER_LIMIT); } /* Write the index value to the index_register (itself a region_field) */ field_datum_byte_offset += obj_desc->index_field.value; ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, "Write to Index Register: Value %8.8X\n", field_datum_byte_offset)); status = acpi_ex_insert_into_field(obj_desc->index_field.index_obj, &field_datum_byte_offset, sizeof(field_datum_byte_offset)); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } if (read_write == ACPI_READ) { /* Read the datum from the data_register */ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, "Read from Data Register\n")); status = acpi_ex_extract_from_field(obj_desc->index_field. data_obj, value, sizeof(u64)); } else { /* Write the datum to the data_register */ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, "Write to Data Register: Value %8.8X%8.8X\n", ACPI_FORMAT_UINT64(*value))); status = acpi_ex_insert_into_field(obj_desc->index_field. data_obj, value, sizeof(u64)); } break; default: ACPI_ERROR((AE_INFO, "Wrong object type in field I/O %u", obj_desc->common.type)); status = AE_AML_INTERNAL; break; } if (ACPI_SUCCESS(status)) { if (read_write == ACPI_READ) { ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, "Value Read %8.8X%8.8X, Width %u\n", ACPI_FORMAT_UINT64(*value), obj_desc->common_field. access_byte_width)); } else { ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, "Value Written %8.8X%8.8X, Width %u\n", ACPI_FORMAT_UINT64(*value), obj_desc->common_field. access_byte_width)); } } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_write_with_update_rule * * PARAMETERS: obj_desc - Field to be written * Mask - bitmask within field datum * field_value - Value to write * field_datum_byte_offset - Offset of datum within field * * RETURN: Status * * DESCRIPTION: Apply the field update rule to a field write * ******************************************************************************/ acpi_status acpi_ex_write_with_update_rule(union acpi_operand_object *obj_desc, u64 mask, u64 field_value, u32 field_datum_byte_offset) { acpi_status status = AE_OK; u64 merged_value; u64 current_value; ACPI_FUNCTION_TRACE_U32(ex_write_with_update_rule, mask); /* Start with the new bits */ merged_value = field_value; /* If the mask is all ones, we don't need to worry about the update rule */ if (mask != ACPI_UINT64_MAX) { /* Decode the update rule */ switch (obj_desc->common_field. field_flags & AML_FIELD_UPDATE_RULE_MASK) { case AML_FIELD_UPDATE_PRESERVE: /* * Check if update rule needs to be applied (not if mask is all * ones) The left shift drops the bits we want to ignore. */ if ((~mask << (ACPI_MUL_8(sizeof(mask)) - ACPI_MUL_8(obj_desc->common_field. access_byte_width))) != 0) { /* * Read the current contents of the byte/word/dword containing * the field, and merge with the new field value. */ status = acpi_ex_field_datum_io(obj_desc, field_datum_byte_offset, &current_value, ACPI_READ); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } merged_value |= (current_value & ~mask); } break; case AML_FIELD_UPDATE_WRITE_AS_ONES: /* Set positions outside the field to all ones */ merged_value |= ~mask; break; case AML_FIELD_UPDATE_WRITE_AS_ZEROS: /* Set positions outside the field to all zeros */ merged_value &= mask; break; default: ACPI_ERROR((AE_INFO, "Unknown UpdateRule value: 0x%X", (obj_desc->common_field. field_flags & AML_FIELD_UPDATE_RULE_MASK))); return_ACPI_STATUS(AE_AML_OPERAND_VALUE); } } ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, "Mask %8.8X%8.8X, DatumOffset %X, Width %X, Value %8.8X%8.8X, MergedValue %8.8X%8.8X\n", ACPI_FORMAT_UINT64(mask), field_datum_byte_offset, obj_desc->common_field.access_byte_width, ACPI_FORMAT_UINT64(field_value), ACPI_FORMAT_UINT64(merged_value))); /* Write the merged value */ status = acpi_ex_field_datum_io(obj_desc, field_datum_byte_offset, &merged_value, ACPI_WRITE); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_extract_from_field * * PARAMETERS: obj_desc - Field to be read * Buffer - Where to store the field data * buffer_length - Length of Buffer * * RETURN: Status * * DESCRIPTION: Retrieve the current value of the given field * ******************************************************************************/ acpi_status acpi_ex_extract_from_field(union acpi_operand_object *obj_desc, void *buffer, u32 buffer_length) { acpi_status status; u64 raw_datum; u64 merged_datum; u32 field_offset = 0; u32 buffer_offset = 0; u32 buffer_tail_bits; u32 datum_count; u32 field_datum_count; u32 access_bit_width; u32 i; ACPI_FUNCTION_TRACE(ex_extract_from_field); /* Validate target buffer and clear it */ if (buffer_length < ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length)) { ACPI_ERROR((AE_INFO, "Field size %u (bits) is too large for buffer (%u)", obj_desc->common_field.bit_length, buffer_length)); return_ACPI_STATUS(AE_BUFFER_OVERFLOW); } ACPI_MEMSET(buffer, 0, buffer_length); access_bit_width = ACPI_MUL_8(obj_desc->common_field.access_byte_width); /* Handle the simple case here */ if ((obj_desc->common_field.start_field_bit_offset == 0) && (obj_desc->common_field.bit_length == access_bit_width)) { status = acpi_ex_field_datum_io(obj_desc, 0, buffer, ACPI_READ); return_ACPI_STATUS(status); } /* TBD: Move to common setup code */ /* Field algorithm is limited to sizeof(u64), truncate if needed */ if (obj_desc->common_field.access_byte_width > sizeof(u64)) { obj_desc->common_field.access_byte_width = sizeof(u64); access_bit_width = sizeof(u64) * 8; } /* Compute the number of datums (access width data items) */ datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length, access_bit_width); field_datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length + obj_desc->common_field. start_field_bit_offset, access_bit_width); /* Priming read from the field */ status = acpi_ex_field_datum_io(obj_desc, field_offset, &raw_datum, ACPI_READ); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } merged_datum = raw_datum >> obj_desc->common_field.start_field_bit_offset; /* Read the rest of the field */ for (i = 1; i < field_datum_count; i++) { /* Get next input datum from the field */ field_offset += obj_desc->common_field.access_byte_width; status = acpi_ex_field_datum_io(obj_desc, field_offset, &raw_datum, ACPI_READ); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* * Merge with previous datum if necessary. * * Note: Before the shift, check if the shift value will be larger than * the integer size. If so, there is no need to perform the operation. * This avoids the differences in behavior between different compilers * concerning shift values larger than the target data width. */ if (access_bit_width - obj_desc->common_field.start_field_bit_offset < ACPI_INTEGER_BIT_SIZE) { merged_datum |= raw_datum << (access_bit_width - obj_desc->common_field. start_field_bit_offset); } if (i == datum_count) { break; } /* Write merged datum to target buffer */ ACPI_MEMCPY(((char *)buffer) + buffer_offset, &merged_datum, ACPI_MIN(obj_desc->common_field.access_byte_width, buffer_length - buffer_offset)); buffer_offset += obj_desc->common_field.access_byte_width; merged_datum = raw_datum >> obj_desc->common_field.start_field_bit_offset; } /* Mask off any extra bits in the last datum */ buffer_tail_bits = obj_desc->common_field.bit_length % access_bit_width; if (buffer_tail_bits) { merged_datum &= ACPI_MASK_BITS_ABOVE(buffer_tail_bits); } /* Write the last datum to the buffer */ ACPI_MEMCPY(((char *)buffer) + buffer_offset, &merged_datum, ACPI_MIN(obj_desc->common_field.access_byte_width, buffer_length - buffer_offset)); return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ex_insert_into_field * * PARAMETERS: obj_desc - Field to be written * Buffer - Data to be written * buffer_length - Length of Buffer * * RETURN: Status * * DESCRIPTION: Store the Buffer contents into the given field * ******************************************************************************/ acpi_status acpi_ex_insert_into_field(union acpi_operand_object *obj_desc, void *buffer, u32 buffer_length) { void *new_buffer; acpi_status status; u64 mask; u64 width_mask; u64 merged_datum; u64 raw_datum = 0; u32 field_offset = 0; u32 buffer_offset = 0; u32 buffer_tail_bits; u32 datum_count; u32 field_datum_count; u32 access_bit_width; u32 required_length; u32 i; ACPI_FUNCTION_TRACE(ex_insert_into_field); /* Validate input buffer */ new_buffer = NULL; required_length = ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length); /* * We must have a buffer that is at least as long as the field * we are writing to. This is because individual fields are * indivisible and partial writes are not supported -- as per * the ACPI specification. */ if (buffer_length < required_length) { /* We need to create a new buffer */ new_buffer = ACPI_ALLOCATE_ZEROED(required_length); if (!new_buffer) { return_ACPI_STATUS(AE_NO_MEMORY); } /* * Copy the original data to the new buffer, starting * at Byte zero. All unused (upper) bytes of the * buffer will be 0. */ ACPI_MEMCPY((char *)new_buffer, (char *)buffer, buffer_length); buffer = new_buffer; buffer_length = required_length; } /* TBD: Move to common setup code */ /* Algo is limited to sizeof(u64), so cut the access_byte_width */ if (obj_desc->common_field.access_byte_width > sizeof(u64)) { obj_desc->common_field.access_byte_width = sizeof(u64); } access_bit_width = ACPI_MUL_8(obj_desc->common_field.access_byte_width); /* * Create the bitmasks used for bit insertion. * Note: This if/else is used to bypass compiler differences with the * shift operator */ if (access_bit_width == ACPI_INTEGER_BIT_SIZE) { width_mask = ACPI_UINT64_MAX; } else { width_mask = ACPI_MASK_BITS_ABOVE(access_bit_width); } mask = width_mask & ACPI_MASK_BITS_BELOW(obj_desc->common_field.start_field_bit_offset); /* Compute the number of datums (access width data items) */ datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length, access_bit_width); field_datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length + obj_desc->common_field. start_field_bit_offset, access_bit_width); /* Get initial Datum from the input buffer */ ACPI_MEMCPY(&raw_datum, buffer, ACPI_MIN(obj_desc->common_field.access_byte_width, buffer_length - buffer_offset)); merged_datum = raw_datum << obj_desc->common_field.start_field_bit_offset; /* Write the entire field */ for (i = 1; i < field_datum_count; i++) { /* Write merged datum to the target field */ merged_datum &= mask; status = acpi_ex_write_with_update_rule(obj_desc, mask, merged_datum, field_offset); if (ACPI_FAILURE(status)) { goto exit; } field_offset += obj_desc->common_field.access_byte_width; /* * Start new output datum by merging with previous input datum * if necessary. * * Note: Before the shift, check if the shift value will be larger than * the integer size. If so, there is no need to perform the operation. * This avoids the differences in behavior between different compilers * concerning shift values larger than the target data width. */ if ((access_bit_width - obj_desc->common_field.start_field_bit_offset) < ACPI_INTEGER_BIT_SIZE) { merged_datum = raw_datum >> (access_bit_width - obj_desc->common_field. start_field_bit_offset); } else { merged_datum = 0; } mask = width_mask; if (i == datum_count) { break; } /* Get the next input datum from the buffer */ buffer_offset += obj_desc->common_field.access_byte_width; ACPI_MEMCPY(&raw_datum, ((char *)buffer) + buffer_offset, ACPI_MIN(obj_desc->common_field.access_byte_width, buffer_length - buffer_offset)); merged_datum |= raw_datum << obj_desc->common_field.start_field_bit_offset; } /* Mask off any extra bits in the last datum */ buffer_tail_bits = (obj_desc->common_field.bit_length + obj_desc->common_field.start_field_bit_offset) % access_bit_width; if (buffer_tail_bits) { mask &= ACPI_MASK_BITS_ABOVE(buffer_tail_bits); } /* Write the last datum to the field */ merged_datum &= mask; status = acpi_ex_write_with_update_rule(obj_desc, mask, merged_datum, field_offset); exit: /* Free temporary buffer if we used one */ if (new_buffer) { ACPI_FREE(new_buffer); } return_ACPI_STATUS(status); }
gpl-2.0
crimeofheart/n7000_tw_jb_kernel
drivers/media/video/adv7170.c
3261
9861
/* * adv7170 - adv7170, adv7171 video encoder driver version 0.0.1 * * Copyright (C) 2002 Maxim Yevtyushkin <max@linuxmedialabs.com> * * Based on adv7176 driver by: * * Copyright (C) 1998 Dave Perks <dperks@ibm.net> * Copyright (C) 1999 Wolfgang Scherr <scherr@net4you.net> * Copyright (C) 2000 Serguei Miridonov <mirsev@cicese.mx> * - some corrections for Pinnacle Systems Inc. DC10plus card. * * Changes by Ronald Bultje <rbultje@ronald.bitfreak.net> * - moved over to linux>=2.4.x i2c protocol (1/1/2003) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/ioctl.h> #include <asm/uaccess.h> #include <linux/i2c.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> #include <media/v4l2-chip-ident.h> MODULE_DESCRIPTION("Analog Devices ADV7170 video encoder driver"); MODULE_AUTHOR("Maxim Yevtyushkin"); MODULE_LICENSE("GPL"); static int debug; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0-1)"); /* ----------------------------------------------------------------------- */ struct adv7170 { struct v4l2_subdev sd; unsigned char reg[128]; v4l2_std_id norm; int input; }; static inline struct adv7170 *to_adv7170(struct v4l2_subdev *sd) { return container_of(sd, struct adv7170, sd); } static char *inputs[] = { "pass_through", "play_back" }; /* ----------------------------------------------------------------------- */ static inline int adv7170_write(struct v4l2_subdev *sd, u8 reg, u8 value) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct adv7170 *encoder = to_adv7170(sd); encoder->reg[reg] = value; return i2c_smbus_write_byte_data(client, reg, value); } static inline int adv7170_read(struct v4l2_subdev *sd, u8 reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); return i2c_smbus_read_byte_data(client, reg); } static int adv7170_write_block(struct v4l2_subdev *sd, const u8 *data, unsigned int len) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct adv7170 *encoder = to_adv7170(sd); int ret = -1; u8 reg; /* the adv7170 has an autoincrement function, use it if * the adapter understands raw I2C */ if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { /* do raw I2C, not smbus compatible */ u8 block_data[32]; int block_len; while (len >= 2) { block_len = 0; block_data[block_len++] = reg = data[0]; do { block_data[block_len++] = encoder->reg[reg++] = data[1]; len -= 2; data += 2; } while (len >= 2 && data[0] == reg && block_len < 32); ret = i2c_master_send(client, block_data, block_len); if (ret < 0) break; } } else { /* do some slow I2C emulation kind of thing */ while (len >= 2) { reg = *data++; ret = adv7170_write(sd, reg, *data++); if (ret < 0) break; len -= 2; } } return ret; } /* ----------------------------------------------------------------------- */ #define TR0MODE 0x4c #define TR0RST 0x80 #define TR1CAPT 0x00 #define TR1PLAY 0x00 static const unsigned char init_NTSC[] = { 0x00, 0x10, /* MR0 */ 0x01, 0x20, /* MR1 */ 0x02, 0x0e, /* MR2 RTC control: bits 2 and 1 */ 0x03, 0x80, /* MR3 */ 0x04, 0x30, /* MR4 */ 0x05, 0x00, /* Reserved */ 0x06, 0x00, /* Reserved */ 0x07, TR0MODE, /* TM0 */ 0x08, TR1CAPT, /* TM1 */ 0x09, 0x16, /* Fsc0 */ 0x0a, 0x7c, /* Fsc1 */ 0x0b, 0xf0, /* Fsc2 */ 0x0c, 0x21, /* Fsc3 */ 0x0d, 0x00, /* Subcarrier Phase */ 0x0e, 0x00, /* Closed Capt. Ext 0 */ 0x0f, 0x00, /* Closed Capt. Ext 1 */ 0x10, 0x00, /* Closed Capt. 0 */ 0x11, 0x00, /* Closed Capt. 1 */ 0x12, 0x00, /* Pedestal Ctl 0 */ 0x13, 0x00, /* Pedestal Ctl 1 */ 0x14, 0x00, /* Pedestal Ctl 2 */ 0x15, 0x00, /* Pedestal Ctl 3 */ 0x16, 0x00, /* CGMS_WSS_0 */ 0x17, 0x00, /* CGMS_WSS_1 */ 0x18, 0x00, /* CGMS_WSS_2 */ 0x19, 0x00, /* Teletext Ctl */ }; static const unsigned char init_PAL[] = { 0x00, 0x71, /* MR0 */ 0x01, 0x20, /* MR1 */ 0x02, 0x0e, /* MR2 RTC control: bits 2 and 1 */ 0x03, 0x80, /* MR3 */ 0x04, 0x30, /* MR4 */ 0x05, 0x00, /* Reserved */ 0x06, 0x00, /* Reserved */ 0x07, TR0MODE, /* TM0 */ 0x08, TR1CAPT, /* TM1 */ 0x09, 0xcb, /* Fsc0 */ 0x0a, 0x8a, /* Fsc1 */ 0x0b, 0x09, /* Fsc2 */ 0x0c, 0x2a, /* Fsc3 */ 0x0d, 0x00, /* Subcarrier Phase */ 0x0e, 0x00, /* Closed Capt. Ext 0 */ 0x0f, 0x00, /* Closed Capt. Ext 1 */ 0x10, 0x00, /* Closed Capt. 0 */ 0x11, 0x00, /* Closed Capt. 1 */ 0x12, 0x00, /* Pedestal Ctl 0 */ 0x13, 0x00, /* Pedestal Ctl 1 */ 0x14, 0x00, /* Pedestal Ctl 2 */ 0x15, 0x00, /* Pedestal Ctl 3 */ 0x16, 0x00, /* CGMS_WSS_0 */ 0x17, 0x00, /* CGMS_WSS_1 */ 0x18, 0x00, /* CGMS_WSS_2 */ 0x19, 0x00, /* Teletext Ctl */ }; static int adv7170_s_std_output(struct v4l2_subdev *sd, v4l2_std_id std) { struct adv7170 *encoder = to_adv7170(sd); v4l2_dbg(1, debug, sd, "set norm %llx\n", (unsigned long long)std); if (std & V4L2_STD_NTSC) { adv7170_write_block(sd, init_NTSC, sizeof(init_NTSC)); if (encoder->input == 0) adv7170_write(sd, 0x02, 0x0e); /* Enable genlock */ adv7170_write(sd, 0x07, TR0MODE | TR0RST); adv7170_write(sd, 0x07, TR0MODE); } else if (std & V4L2_STD_PAL) { adv7170_write_block(sd, init_PAL, sizeof(init_PAL)); if (encoder->input == 0) adv7170_write(sd, 0x02, 0x0e); /* Enable genlock */ adv7170_write(sd, 0x07, TR0MODE | TR0RST); adv7170_write(sd, 0x07, TR0MODE); } else { v4l2_dbg(1, debug, sd, "illegal norm: %llx\n", (unsigned long long)std); return -EINVAL; } v4l2_dbg(1, debug, sd, "switched to %llx\n", (unsigned long long)std); encoder->norm = std; return 0; } static int adv7170_s_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct adv7170 *encoder = to_adv7170(sd); /* RJ: input = 0: input is from decoder input = 1: input is from ZR36060 input = 2: color bar */ v4l2_dbg(1, debug, sd, "set input from %s\n", input == 0 ? "decoder" : "ZR36060"); switch (input) { case 0: adv7170_write(sd, 0x01, 0x20); adv7170_write(sd, 0x08, TR1CAPT); /* TR1 */ adv7170_write(sd, 0x02, 0x0e); /* Enable genlock */ adv7170_write(sd, 0x07, TR0MODE | TR0RST); adv7170_write(sd, 0x07, TR0MODE); /* udelay(10); */ break; case 1: adv7170_write(sd, 0x01, 0x00); adv7170_write(sd, 0x08, TR1PLAY); /* TR1 */ adv7170_write(sd, 0x02, 0x08); adv7170_write(sd, 0x07, TR0MODE | TR0RST); adv7170_write(sd, 0x07, TR0MODE); /* udelay(10); */ break; default: v4l2_dbg(1, debug, sd, "illegal input: %d\n", input); return -EINVAL; } v4l2_dbg(1, debug, sd, "switched to %s\n", inputs[input]); encoder->input = input; return 0; } static int adv7170_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { struct i2c_client *client = v4l2_get_subdevdata(sd); return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_ADV7170, 0); } /* ----------------------------------------------------------------------- */ static const struct v4l2_subdev_core_ops adv7170_core_ops = { .g_chip_ident = adv7170_g_chip_ident, }; static const struct v4l2_subdev_video_ops adv7170_video_ops = { .s_std_output = adv7170_s_std_output, .s_routing = adv7170_s_routing, }; static const struct v4l2_subdev_ops adv7170_ops = { .core = &adv7170_core_ops, .video = &adv7170_video_ops, }; /* ----------------------------------------------------------------------- */ static int adv7170_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct adv7170 *encoder; struct v4l2_subdev *sd; int i; /* Check if the adapter supports the needed features */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); encoder = kzalloc(sizeof(struct adv7170), GFP_KERNEL); if (encoder == NULL) return -ENOMEM; sd = &encoder->sd; v4l2_i2c_subdev_init(sd, client, &adv7170_ops); encoder->norm = V4L2_STD_NTSC; encoder->input = 0; i = adv7170_write_block(sd, init_NTSC, sizeof(init_NTSC)); if (i >= 0) { i = adv7170_write(sd, 0x07, TR0MODE | TR0RST); i = adv7170_write(sd, 0x07, TR0MODE); i = adv7170_read(sd, 0x12); v4l2_dbg(1, debug, sd, "revision %d\n", i & 1); } if (i < 0) v4l2_dbg(1, debug, sd, "init error 0x%x\n", i); return 0; } static int adv7170_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); v4l2_device_unregister_subdev(sd); kfree(to_adv7170(sd)); return 0; } /* ----------------------------------------------------------------------- */ static const struct i2c_device_id adv7170_id[] = { { "adv7170", 0 }, { "adv7171", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, adv7170_id); static struct i2c_driver adv7170_driver = { .driver = { .owner = THIS_MODULE, .name = "adv7170", }, .probe = adv7170_probe, .remove = adv7170_remove, .id_table = adv7170_id, }; static __init int init_adv7170(void) { return i2c_add_driver(&adv7170_driver); } static __exit void exit_adv7170(void) { i2c_del_driver(&adv7170_driver); } module_init(init_adv7170); module_exit(exit_adv7170);
gpl-2.0
GeyerA/android_kernel_samsung_tuna
drivers/media/video/adv7170.c
3261
9861
/* * adv7170 - adv7170, adv7171 video encoder driver version 0.0.1 * * Copyright (C) 2002 Maxim Yevtyushkin <max@linuxmedialabs.com> * * Based on adv7176 driver by: * * Copyright (C) 1998 Dave Perks <dperks@ibm.net> * Copyright (C) 1999 Wolfgang Scherr <scherr@net4you.net> * Copyright (C) 2000 Serguei Miridonov <mirsev@cicese.mx> * - some corrections for Pinnacle Systems Inc. DC10plus card. * * Changes by Ronald Bultje <rbultje@ronald.bitfreak.net> * - moved over to linux>=2.4.x i2c protocol (1/1/2003) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/ioctl.h> #include <asm/uaccess.h> #include <linux/i2c.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> #include <media/v4l2-chip-ident.h> MODULE_DESCRIPTION("Analog Devices ADV7170 video encoder driver"); MODULE_AUTHOR("Maxim Yevtyushkin"); MODULE_LICENSE("GPL"); static int debug; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0-1)"); /* ----------------------------------------------------------------------- */ struct adv7170 { struct v4l2_subdev sd; unsigned char reg[128]; v4l2_std_id norm; int input; }; static inline struct adv7170 *to_adv7170(struct v4l2_subdev *sd) { return container_of(sd, struct adv7170, sd); } static char *inputs[] = { "pass_through", "play_back" }; /* ----------------------------------------------------------------------- */ static inline int adv7170_write(struct v4l2_subdev *sd, u8 reg, u8 value) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct adv7170 *encoder = to_adv7170(sd); encoder->reg[reg] = value; return i2c_smbus_write_byte_data(client, reg, value); } static inline int adv7170_read(struct v4l2_subdev *sd, u8 reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); return i2c_smbus_read_byte_data(client, reg); } static int adv7170_write_block(struct v4l2_subdev *sd, const u8 *data, unsigned int len) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct adv7170 *encoder = to_adv7170(sd); int ret = -1; u8 reg; /* the adv7170 has an autoincrement function, use it if * the adapter understands raw I2C */ if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { /* do raw I2C, not smbus compatible */ u8 block_data[32]; int block_len; while (len >= 2) { block_len = 0; block_data[block_len++] = reg = data[0]; do { block_data[block_len++] = encoder->reg[reg++] = data[1]; len -= 2; data += 2; } while (len >= 2 && data[0] == reg && block_len < 32); ret = i2c_master_send(client, block_data, block_len); if (ret < 0) break; } } else { /* do some slow I2C emulation kind of thing */ while (len >= 2) { reg = *data++; ret = adv7170_write(sd, reg, *data++); if (ret < 0) break; len -= 2; } } return ret; } /* ----------------------------------------------------------------------- */ #define TR0MODE 0x4c #define TR0RST 0x80 #define TR1CAPT 0x00 #define TR1PLAY 0x00 static const unsigned char init_NTSC[] = { 0x00, 0x10, /* MR0 */ 0x01, 0x20, /* MR1 */ 0x02, 0x0e, /* MR2 RTC control: bits 2 and 1 */ 0x03, 0x80, /* MR3 */ 0x04, 0x30, /* MR4 */ 0x05, 0x00, /* Reserved */ 0x06, 0x00, /* Reserved */ 0x07, TR0MODE, /* TM0 */ 0x08, TR1CAPT, /* TM1 */ 0x09, 0x16, /* Fsc0 */ 0x0a, 0x7c, /* Fsc1 */ 0x0b, 0xf0, /* Fsc2 */ 0x0c, 0x21, /* Fsc3 */ 0x0d, 0x00, /* Subcarrier Phase */ 0x0e, 0x00, /* Closed Capt. Ext 0 */ 0x0f, 0x00, /* Closed Capt. Ext 1 */ 0x10, 0x00, /* Closed Capt. 0 */ 0x11, 0x00, /* Closed Capt. 1 */ 0x12, 0x00, /* Pedestal Ctl 0 */ 0x13, 0x00, /* Pedestal Ctl 1 */ 0x14, 0x00, /* Pedestal Ctl 2 */ 0x15, 0x00, /* Pedestal Ctl 3 */ 0x16, 0x00, /* CGMS_WSS_0 */ 0x17, 0x00, /* CGMS_WSS_1 */ 0x18, 0x00, /* CGMS_WSS_2 */ 0x19, 0x00, /* Teletext Ctl */ }; static const unsigned char init_PAL[] = { 0x00, 0x71, /* MR0 */ 0x01, 0x20, /* MR1 */ 0x02, 0x0e, /* MR2 RTC control: bits 2 and 1 */ 0x03, 0x80, /* MR3 */ 0x04, 0x30, /* MR4 */ 0x05, 0x00, /* Reserved */ 0x06, 0x00, /* Reserved */ 0x07, TR0MODE, /* TM0 */ 0x08, TR1CAPT, /* TM1 */ 0x09, 0xcb, /* Fsc0 */ 0x0a, 0x8a, /* Fsc1 */ 0x0b, 0x09, /* Fsc2 */ 0x0c, 0x2a, /* Fsc3 */ 0x0d, 0x00, /* Subcarrier Phase */ 0x0e, 0x00, /* Closed Capt. Ext 0 */ 0x0f, 0x00, /* Closed Capt. Ext 1 */ 0x10, 0x00, /* Closed Capt. 0 */ 0x11, 0x00, /* Closed Capt. 1 */ 0x12, 0x00, /* Pedestal Ctl 0 */ 0x13, 0x00, /* Pedestal Ctl 1 */ 0x14, 0x00, /* Pedestal Ctl 2 */ 0x15, 0x00, /* Pedestal Ctl 3 */ 0x16, 0x00, /* CGMS_WSS_0 */ 0x17, 0x00, /* CGMS_WSS_1 */ 0x18, 0x00, /* CGMS_WSS_2 */ 0x19, 0x00, /* Teletext Ctl */ }; static int adv7170_s_std_output(struct v4l2_subdev *sd, v4l2_std_id std) { struct adv7170 *encoder = to_adv7170(sd); v4l2_dbg(1, debug, sd, "set norm %llx\n", (unsigned long long)std); if (std & V4L2_STD_NTSC) { adv7170_write_block(sd, init_NTSC, sizeof(init_NTSC)); if (encoder->input == 0) adv7170_write(sd, 0x02, 0x0e); /* Enable genlock */ adv7170_write(sd, 0x07, TR0MODE | TR0RST); adv7170_write(sd, 0x07, TR0MODE); } else if (std & V4L2_STD_PAL) { adv7170_write_block(sd, init_PAL, sizeof(init_PAL)); if (encoder->input == 0) adv7170_write(sd, 0x02, 0x0e); /* Enable genlock */ adv7170_write(sd, 0x07, TR0MODE | TR0RST); adv7170_write(sd, 0x07, TR0MODE); } else { v4l2_dbg(1, debug, sd, "illegal norm: %llx\n", (unsigned long long)std); return -EINVAL; } v4l2_dbg(1, debug, sd, "switched to %llx\n", (unsigned long long)std); encoder->norm = std; return 0; } static int adv7170_s_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct adv7170 *encoder = to_adv7170(sd); /* RJ: input = 0: input is from decoder input = 1: input is from ZR36060 input = 2: color bar */ v4l2_dbg(1, debug, sd, "set input from %s\n", input == 0 ? "decoder" : "ZR36060"); switch (input) { case 0: adv7170_write(sd, 0x01, 0x20); adv7170_write(sd, 0x08, TR1CAPT); /* TR1 */ adv7170_write(sd, 0x02, 0x0e); /* Enable genlock */ adv7170_write(sd, 0x07, TR0MODE | TR0RST); adv7170_write(sd, 0x07, TR0MODE); /* udelay(10); */ break; case 1: adv7170_write(sd, 0x01, 0x00); adv7170_write(sd, 0x08, TR1PLAY); /* TR1 */ adv7170_write(sd, 0x02, 0x08); adv7170_write(sd, 0x07, TR0MODE | TR0RST); adv7170_write(sd, 0x07, TR0MODE); /* udelay(10); */ break; default: v4l2_dbg(1, debug, sd, "illegal input: %d\n", input); return -EINVAL; } v4l2_dbg(1, debug, sd, "switched to %s\n", inputs[input]); encoder->input = input; return 0; } static int adv7170_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { struct i2c_client *client = v4l2_get_subdevdata(sd); return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_ADV7170, 0); } /* ----------------------------------------------------------------------- */ static const struct v4l2_subdev_core_ops adv7170_core_ops = { .g_chip_ident = adv7170_g_chip_ident, }; static const struct v4l2_subdev_video_ops adv7170_video_ops = { .s_std_output = adv7170_s_std_output, .s_routing = adv7170_s_routing, }; static const struct v4l2_subdev_ops adv7170_ops = { .core = &adv7170_core_ops, .video = &adv7170_video_ops, }; /* ----------------------------------------------------------------------- */ static int adv7170_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct adv7170 *encoder; struct v4l2_subdev *sd; int i; /* Check if the adapter supports the needed features */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); encoder = kzalloc(sizeof(struct adv7170), GFP_KERNEL); if (encoder == NULL) return -ENOMEM; sd = &encoder->sd; v4l2_i2c_subdev_init(sd, client, &adv7170_ops); encoder->norm = V4L2_STD_NTSC; encoder->input = 0; i = adv7170_write_block(sd, init_NTSC, sizeof(init_NTSC)); if (i >= 0) { i = adv7170_write(sd, 0x07, TR0MODE | TR0RST); i = adv7170_write(sd, 0x07, TR0MODE); i = adv7170_read(sd, 0x12); v4l2_dbg(1, debug, sd, "revision %d\n", i & 1); } if (i < 0) v4l2_dbg(1, debug, sd, "init error 0x%x\n", i); return 0; } static int adv7170_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); v4l2_device_unregister_subdev(sd); kfree(to_adv7170(sd)); return 0; } /* ----------------------------------------------------------------------- */ static const struct i2c_device_id adv7170_id[] = { { "adv7170", 0 }, { "adv7171", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, adv7170_id); static struct i2c_driver adv7170_driver = { .driver = { .owner = THIS_MODULE, .name = "adv7170", }, .probe = adv7170_probe, .remove = adv7170_remove, .id_table = adv7170_id, }; static __init int init_adv7170(void) { return i2c_add_driver(&adv7170_driver); } static __exit void exit_adv7170(void) { i2c_del_driver(&adv7170_driver); } module_init(init_adv7170); module_exit(exit_adv7170);
gpl-2.0
EmbeddedAndroid/linaro-android-3.1
drivers/input/keyboard/jornada680_kbd.c
3261
8314
/* * drivers/input/keyboard/jornada680_kbd.c * * HP Jornada 620/660/680/690 scan keyboard platform driver * Copyright (C) 2007 Kristoffer Ericson <Kristoffer.Ericson@gmail.com> * * Based on hp680_keyb.c * Copyright (C) 2006 Paul Mundt * Copyright (C) 2005 Andriy Skulysh * Split from drivers/input/keyboard/hp600_keyb.c * Copyright (C) 2000 Yaegashi Takeshi (hp6xx kbd scan routine and translation table) * Copyright (C) 2000 Niibe Yutaka (HP620 Keyb translation table) * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/input.h> #include <linux/input-polldev.h> #include <linux/interrupt.h> #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <asm/delay.h> #include <asm/io.h> #define PCCR 0xa4000104 #define PDCR 0xa4000106 #define PECR 0xa4000108 #define PFCR 0xa400010a #define PCDR 0xa4000124 #define PDDR 0xa4000126 #define PEDR 0xa4000128 #define PFDR 0xa400012a #define PGDR 0xa400012c #define PHDR 0xa400012e #define PJDR 0xa4000130 #define PKDR 0xa4000132 #define PLDR 0xa4000134 static const unsigned short jornada_scancodes[] = { /* PTD1 */ KEY_CAPSLOCK, KEY_MACRO, KEY_LEFTCTRL, 0, KEY_ESC, KEY_KP5, 0, 0, /* 1 -> 8 */ KEY_F1, KEY_F2, KEY_F3, KEY_F8, KEY_F7, KEY_F6, KEY_F4, KEY_F5, /* 9 -> 16 */ /* PTD5 */ KEY_SLASH, KEY_APOSTROPHE, KEY_ENTER, 0, KEY_Z, 0, 0, 0, /* 17 -> 24 */ KEY_X, KEY_C, KEY_V, KEY_DOT, KEY_COMMA, KEY_M, KEY_B, KEY_N, /* 25 -> 32 */ /* PTD7 */ KEY_KP2, KEY_KP6, KEY_KP3, 0, 0, 0, 0, 0, /* 33 -> 40 */ KEY_F10, KEY_RO, KEY_F9, KEY_KP4, KEY_NUMLOCK, KEY_SCROLLLOCK, KEY_LEFTALT, KEY_HANJA, /* 41 -> 48 */ /* PTE0 */ KEY_KATAKANA, KEY_KP0, KEY_GRAVE, 0, KEY_FINANCE, 0, 0, 0, /* 49 -> 56 */ KEY_KPMINUS, KEY_HIRAGANA, KEY_SPACE, KEY_KPDOT, KEY_VOLUMEUP, 249, 0, 0, /* 57 -> 64 */ /* PTE1 */ KEY_SEMICOLON, KEY_RIGHTBRACE, KEY_BACKSLASH, 0, KEY_A, 0, 0, 0, /* 65 -> 72 */ KEY_S, KEY_D, KEY_F, KEY_L, KEY_K, KEY_J, KEY_G, KEY_H, /* 73 -> 80 */ /* PTE3 */ KEY_KP8, KEY_LEFTMETA, KEY_RIGHTSHIFT, 0, KEY_TAB, 0, 0, 0, /* 81 -> 88 */ 0, KEY_LEFTSHIFT, KEY_KP7, KEY_KP9, KEY_KP1, KEY_F11, KEY_KPPLUS, KEY_KPASTERISK, /* 89 -> 96 */ /* PTE6 */ KEY_P, KEY_LEFTBRACE, KEY_BACKSPACE, 0, KEY_Q, 0, 0, 0, /* 97 -> 104 */ KEY_W, KEY_E, KEY_R, KEY_O, KEY_I, KEY_U, KEY_T, KEY_Y, /* 105 -> 112 */ /* PTE7 */ KEY_0, KEY_MINUS, KEY_EQUAL, 0, KEY_1, 0, 0, 0, /* 113 -> 120 */ KEY_2, KEY_3, KEY_4, KEY_9, KEY_8, KEY_7, KEY_5, KEY_6, /* 121 -> 128 */ /* **** */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; #define JORNADA_SCAN_SIZE 18 struct jornadakbd { struct input_polled_dev *poll_dev; unsigned short keymap[ARRAY_SIZE(jornada_scancodes)]; unsigned char length; unsigned char old_scan[JORNADA_SCAN_SIZE]; unsigned char new_scan[JORNADA_SCAN_SIZE]; }; static void jornada_parse_kbd(struct jornadakbd *jornadakbd) { struct input_dev *input_dev = jornadakbd->poll_dev->input; unsigned short *keymap = jornadakbd->keymap; unsigned int sync_me = 0; unsigned int i, j; for (i = 0; i < JORNADA_SCAN_SIZE; i++) { unsigned char new = jornadakbd->new_scan[i]; unsigned char old = jornadakbd->old_scan[i]; unsigned int xor = new ^ old; if (xor == 0) continue; for (j = 0; j < 8; j++) { unsigned int bit = 1 << j; if (xor & bit) { unsigned int scancode = (i << 3) + j; input_event(input_dev, EV_MSC, MSC_SCAN, scancode); input_report_key(input_dev, keymap[scancode], !(new & bit)); sync_me = 1; } } } if (sync_me) input_sync(input_dev); } static void jornada_scan_keyb(unsigned char *s) { int i; unsigned short ec_static, dc_static; /* = UINT16_t */ unsigned char matrix_switch[] = { 0xfd, 0xff, /* PTD1 PD(1) */ 0xdf, 0xff, /* PTD5 PD(5) */ 0x7f, 0xff, /* PTD7 PD(7) */ 0xff, 0xfe, /* PTE0 PE(0) */ 0xff, 0xfd, /* PTE1 PE(1) */ 0xff, 0xf7, /* PTE3 PE(3) */ 0xff, 0xbf, /* PTE6 PE(6) */ 0xff, 0x7f, /* PTE7 PE(7) */ }, *t = matrix_switch; /* PD(x) : 1. 0xcc0c & (1~(1 << (2*(x)+1))))) 2. (0xf0cf & 0xfffff) */ /* PE(x) : 1. 0xcc0c & 0xffff 2. 0xf0cf & (1~(1 << (2*(x)+1))))) */ unsigned short matrix_PDE[] = { 0xcc04, 0xf0cf, /* PD(1) */ 0xc40c, 0xf0cf, /* PD(5) */ 0x4c0c, 0xf0cf, /* PD(7) */ 0xcc0c, 0xf0cd, /* PE(0) */ 0xcc0c, 0xf0c7, /* PE(1) */ 0xcc0c, 0xf04f, /* PE(3) */ 0xcc0c, 0xd0cf, /* PE(6) */ 0xcc0c, 0x70cf, /* PE(7) */ }, *y = matrix_PDE; /* Save these control reg bits */ dc_static = (__raw_readw(PDCR) & (~0xcc0c)); ec_static = (__raw_readw(PECR) & (~0xf0cf)); for (i = 0; i < 8; i++) { /* disable output for all but the one we want to scan */ __raw_writew((dc_static | *y++), PDCR); __raw_writew((ec_static | *y++), PECR); udelay(5); /* Get scanline row */ __raw_writeb(*t++, PDDR); __raw_writeb(*t++, PEDR); udelay(50); /* Read data */ *s++ = __raw_readb(PCDR); *s++ = __raw_readb(PFDR); } /* Scan no lines */ __raw_writeb(0xff, PDDR); __raw_writeb(0xff, PEDR); /* Enable all scanlines */ __raw_writew((dc_static | (0x5555 & 0xcc0c)),PDCR); __raw_writew((ec_static | (0x5555 & 0xf0cf)),PECR); /* Ignore extra keys and events */ *s++ = __raw_readb(PGDR); *s++ = __raw_readb(PHDR); } static void jornadakbd680_poll(struct input_polled_dev *dev) { struct jornadakbd *jornadakbd = dev->private; jornada_scan_keyb(jornadakbd->new_scan); jornada_parse_kbd(jornadakbd); memcpy(jornadakbd->old_scan, jornadakbd->new_scan, JORNADA_SCAN_SIZE); } static int __devinit jornada680kbd_probe(struct platform_device *pdev) { struct jornadakbd *jornadakbd; struct input_polled_dev *poll_dev; struct input_dev *input_dev; int i, error; jornadakbd = kzalloc(sizeof(struct jornadakbd), GFP_KERNEL); if (!jornadakbd) return -ENOMEM; poll_dev = input_allocate_polled_device(); if (!poll_dev) { error = -ENOMEM; goto failed; } platform_set_drvdata(pdev, jornadakbd); jornadakbd->poll_dev = poll_dev; memcpy(jornadakbd->keymap, jornada_scancodes, sizeof(jornadakbd->keymap)); poll_dev->private = jornadakbd; poll_dev->poll = jornadakbd680_poll; poll_dev->poll_interval = 50; /* msec */ input_dev = poll_dev->input; input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_REP); input_dev->name = "HP Jornada 680 keyboard"; input_dev->phys = "jornadakbd/input0"; input_dev->keycode = jornadakbd->keymap; input_dev->keycodesize = sizeof(unsigned short); input_dev->keycodemax = ARRAY_SIZE(jornada_scancodes); input_dev->dev.parent = &pdev->dev; input_dev->id.bustype = BUS_HOST; for (i = 0; i < 128; i++) if (jornadakbd->keymap[i]) __set_bit(jornadakbd->keymap[i], input_dev->keybit); __clear_bit(KEY_RESERVED, input_dev->keybit); input_set_capability(input_dev, EV_MSC, MSC_SCAN); error = input_register_polled_device(jornadakbd->poll_dev); if (error) goto failed; return 0; failed: printk(KERN_ERR "Jornadakbd: failed to register driver, error: %d\n", error); platform_set_drvdata(pdev, NULL); input_free_polled_device(poll_dev); kfree(jornadakbd); return error; } static int __devexit jornada680kbd_remove(struct platform_device *pdev) { struct jornadakbd *jornadakbd = platform_get_drvdata(pdev); platform_set_drvdata(pdev, NULL); input_unregister_polled_device(jornadakbd->poll_dev); input_free_polled_device(jornadakbd->poll_dev); kfree(jornadakbd); return 0; } static struct platform_driver jornada680kbd_driver = { .driver = { .name = "jornada680_kbd", .owner = THIS_MODULE, }, .probe = jornada680kbd_probe, .remove = __devexit_p(jornada680kbd_remove), }; static int __init jornada680kbd_init(void) { return platform_driver_register(&jornada680kbd_driver); } static void __exit jornada680kbd_exit(void) { platform_driver_unregister(&jornada680kbd_driver); } module_init(jornada680kbd_init); module_exit(jornada680kbd_exit); MODULE_AUTHOR("Kristoffer Ericson <kristoffer.ericson@gmail.com>"); MODULE_DESCRIPTION("HP Jornada 620/660/680/690 Keyboard Driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:jornada680_kbd");
gpl-2.0
djcapelis/linux-kernel-opensparc-fpga
drivers/media/dvb-frontends/lgs8gxx.c
3773
24166
/* * Support for Legend Silicon GB20600 (a.k.a DMB-TH) demodulator * LGS8913, LGS8GL5, LGS8G75 * experimental support LGS8G42, LGS8G52 * * Copyright (C) 2007-2009 David T.L. Wong <davidtlwong@gmail.com> * Copyright (C) 2008 Sirius International (Hong Kong) Limited * Timothy Lee <timothy.lee@siriushk.com> (for initial work on LGS8GL5) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <asm/div64.h> #include <linux/firmware.h> #include "dvb_frontend.h" #include "lgs8gxx.h" #include "lgs8gxx_priv.h" #define dprintk(args...) \ do { \ if (debug) \ printk(KERN_DEBUG "lgs8gxx: " args); \ } while (0) static int debug; static int fake_signal_str = 1; #define LGS8GXX_FIRMWARE "lgs8g75.fw" module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off)."); module_param(fake_signal_str, int, 0644); MODULE_PARM_DESC(fake_signal_str, "fake signal strength for LGS8913." "Signal strength calculation is slow.(default:on)."); /* LGS8GXX internal helper functions */ static int lgs8gxx_write_reg(struct lgs8gxx_state *priv, u8 reg, u8 data) { int ret; u8 buf[] = { reg, data }; struct i2c_msg msg = { .flags = 0, .buf = buf, .len = 2 }; msg.addr = priv->config->demod_address; if (priv->config->prod != LGS8GXX_PROD_LGS8G75 && reg >= 0xC0) msg.addr += 0x02; if (debug >= 2) dprintk("%s: reg=0x%02X, data=0x%02X\n", __func__, reg, data); ret = i2c_transfer(priv->i2c, &msg, 1); if (ret != 1) dprintk("%s: error reg=0x%x, data=0x%x, ret=%i\n", __func__, reg, data, ret); return (ret != 1) ? -1 : 0; } static int lgs8gxx_read_reg(struct lgs8gxx_state *priv, u8 reg, u8 *p_data) { int ret; u8 dev_addr; u8 b0[] = { reg }; u8 b1[] = { 0 }; struct i2c_msg msg[] = { { .flags = 0, .buf = b0, .len = 1 }, { .flags = I2C_M_RD, .buf = b1, .len = 1 }, }; dev_addr = priv->config->demod_address; if (priv->config->prod != LGS8GXX_PROD_LGS8G75 && reg >= 0xC0) dev_addr += 0x02; msg[1].addr = msg[0].addr = dev_addr; ret = i2c_transfer(priv->i2c, msg, 2); if (ret != 2) { dprintk("%s: error reg=0x%x, ret=%i\n", __func__, reg, ret); return -1; } *p_data = b1[0]; if (debug >= 2) dprintk("%s: reg=0x%02X, data=0x%02X\n", __func__, reg, b1[0]); return 0; } static int lgs8gxx_soft_reset(struct lgs8gxx_state *priv) { lgs8gxx_write_reg(priv, 0x02, 0x00); msleep(1); lgs8gxx_write_reg(priv, 0x02, 0x01); msleep(100); return 0; } static int wait_reg_mask(struct lgs8gxx_state *priv, u8 reg, u8 mask, u8 val, u8 delay, u8 tries) { u8 t; int i; for (i = 0; i < tries; i++) { lgs8gxx_read_reg(priv, reg, &t); if ((t & mask) == val) return 0; msleep(delay); } return 1; } static int lgs8gxx_set_ad_mode(struct lgs8gxx_state *priv) { const struct lgs8gxx_config *config = priv->config; u8 if_conf; if_conf = 0x10; /* AGC output on, RF_AGC output off; */ if_conf |= ((config->ext_adc) ? 0x80 : 0x00) | ((config->if_neg_center) ? 0x04 : 0x00) | ((config->if_freq == 0) ? 0x08 : 0x00) | /* Baseband */ ((config->adc_signed) ? 0x02 : 0x00) | ((config->if_neg_edge) ? 0x01 : 0x00); if (config->ext_adc && (config->prod == LGS8GXX_PROD_LGS8G52)) { lgs8gxx_write_reg(priv, 0xBA, 0x40); } lgs8gxx_write_reg(priv, 0x07, if_conf); return 0; } static int lgs8gxx_set_if_freq(struct lgs8gxx_state *priv, u32 freq /*in kHz*/) { u64 val; u32 v32; u32 if_clk; if_clk = priv->config->if_clk_freq; val = freq; if (freq != 0) { val <<= 32; if (if_clk != 0) do_div(val, if_clk); v32 = val & 0xFFFFFFFF; dprintk("Set IF Freq to %dkHz\n", freq); } else { v32 = 0; dprintk("Set IF Freq to baseband\n"); } dprintk("AFC_INIT_FREQ = 0x%08X\n", v32); if (priv->config->prod == LGS8GXX_PROD_LGS8G75) { lgs8gxx_write_reg(priv, 0x08, 0xFF & (v32)); lgs8gxx_write_reg(priv, 0x09, 0xFF & (v32 >> 8)); lgs8gxx_write_reg(priv, 0x0A, 0xFF & (v32 >> 16)); lgs8gxx_write_reg(priv, 0x0B, 0xFF & (v32 >> 24)); } else { lgs8gxx_write_reg(priv, 0x09, 0xFF & (v32)); lgs8gxx_write_reg(priv, 0x0A, 0xFF & (v32 >> 8)); lgs8gxx_write_reg(priv, 0x0B, 0xFF & (v32 >> 16)); lgs8gxx_write_reg(priv, 0x0C, 0xFF & (v32 >> 24)); } return 0; } static int lgs8gxx_get_afc_phase(struct lgs8gxx_state *priv) { u64 val; u32 v32 = 0; u8 reg_addr, t; int i; if (priv->config->prod == LGS8GXX_PROD_LGS8G75) reg_addr = 0x23; else reg_addr = 0x48; for (i = 0; i < 4; i++) { lgs8gxx_read_reg(priv, reg_addr, &t); v32 <<= 8; v32 |= t; reg_addr--; } val = v32; val *= priv->config->if_clk_freq; val >>= 32; dprintk("AFC = %u kHz\n", (u32)val); return 0; } static int lgs8gxx_set_mode_auto(struct lgs8gxx_state *priv) { u8 t; u8 prod = priv->config->prod; if (prod == LGS8GXX_PROD_LGS8913) lgs8gxx_write_reg(priv, 0xC6, 0x01); if (prod == LGS8GXX_PROD_LGS8G75) { lgs8gxx_read_reg(priv, 0x0C, &t); t &= (~0x04); lgs8gxx_write_reg(priv, 0x0C, t | 0x80); lgs8gxx_write_reg(priv, 0x39, 0x00); lgs8gxx_write_reg(priv, 0x3D, 0x04); } else if (prod == LGS8GXX_PROD_LGS8913 || prod == LGS8GXX_PROD_LGS8GL5 || prod == LGS8GXX_PROD_LGS8G42 || prod == LGS8GXX_PROD_LGS8G52 || prod == LGS8GXX_PROD_LGS8G54) { lgs8gxx_read_reg(priv, 0x7E, &t); lgs8gxx_write_reg(priv, 0x7E, t | 0x01); /* clear FEC self reset */ lgs8gxx_read_reg(priv, 0xC5, &t); lgs8gxx_write_reg(priv, 0xC5, t & 0xE0); } if (prod == LGS8GXX_PROD_LGS8913) { /* FEC auto detect */ lgs8gxx_write_reg(priv, 0xC1, 0x03); lgs8gxx_read_reg(priv, 0x7C, &t); t = (t & 0x8C) | 0x03; lgs8gxx_write_reg(priv, 0x7C, t); /* BER test mode */ lgs8gxx_read_reg(priv, 0xC3, &t); t = (t & 0xEF) | 0x10; lgs8gxx_write_reg(priv, 0xC3, t); } if (priv->config->prod == LGS8GXX_PROD_LGS8G52) lgs8gxx_write_reg(priv, 0xD9, 0x40); return 0; } static int lgs8gxx_set_mode_manual(struct lgs8gxx_state *priv) { u8 t; if (priv->config->prod == LGS8GXX_PROD_LGS8G75) { u8 t2; lgs8gxx_read_reg(priv, 0x0C, &t); t &= (~0x80); lgs8gxx_write_reg(priv, 0x0C, t); lgs8gxx_read_reg(priv, 0x0C, &t); lgs8gxx_read_reg(priv, 0x19, &t2); if (((t&0x03) == 0x01) && (t2&0x01)) { lgs8gxx_write_reg(priv, 0x6E, 0x05); lgs8gxx_write_reg(priv, 0x39, 0x02); lgs8gxx_write_reg(priv, 0x39, 0x03); lgs8gxx_write_reg(priv, 0x3D, 0x05); lgs8gxx_write_reg(priv, 0x3E, 0x28); lgs8gxx_write_reg(priv, 0x53, 0x80); } else { lgs8gxx_write_reg(priv, 0x6E, 0x3F); lgs8gxx_write_reg(priv, 0x39, 0x00); lgs8gxx_write_reg(priv, 0x3D, 0x04); } lgs8gxx_soft_reset(priv); return 0; } /* turn off auto-detect; manual settings */ lgs8gxx_write_reg(priv, 0x7E, 0); if (priv->config->prod == LGS8GXX_PROD_LGS8913) lgs8gxx_write_reg(priv, 0xC1, 0); lgs8gxx_read_reg(priv, 0xC5, &t); t = (t & 0xE0) | 0x06; lgs8gxx_write_reg(priv, 0xC5, t); lgs8gxx_soft_reset(priv); return 0; } static int lgs8gxx_is_locked(struct lgs8gxx_state *priv, u8 *locked) { int ret = 0; u8 t; if (priv->config->prod == LGS8GXX_PROD_LGS8G75) ret = lgs8gxx_read_reg(priv, 0x13, &t); else ret = lgs8gxx_read_reg(priv, 0x4B, &t); if (ret != 0) return ret; if (priv->config->prod == LGS8GXX_PROD_LGS8G75) *locked = ((t & 0x80) == 0x80) ? 1 : 0; else *locked = ((t & 0xC0) == 0xC0) ? 1 : 0; return 0; } /* Wait for Code Acquisition Lock */ static int lgs8gxx_wait_ca_lock(struct lgs8gxx_state *priv, u8 *locked) { int ret = 0; u8 reg, mask, val; if (priv->config->prod == LGS8GXX_PROD_LGS8G75) { reg = 0x13; mask = 0x80; val = 0x80; } else { reg = 0x4B; mask = 0xC0; val = 0xC0; } ret = wait_reg_mask(priv, reg, mask, val, 50, 40); *locked = (ret == 0) ? 1 : 0; return 0; } static int lgs8gxx_is_autodetect_finished(struct lgs8gxx_state *priv, u8 *finished) { int ret = 0; u8 reg, mask, val; if (priv->config->prod == LGS8GXX_PROD_LGS8G75) { reg = 0x1f; mask = 0xC0; val = 0x80; } else { reg = 0xA4; mask = 0x03; val = 0x01; } ret = wait_reg_mask(priv, reg, mask, val, 10, 20); *finished = (ret == 0) ? 1 : 0; return 0; } static int lgs8gxx_autolock_gi(struct lgs8gxx_state *priv, u8 gi, u8 cpn, u8 *locked) { int err = 0; u8 ad_fini = 0; u8 t1, t2; if (gi == GI_945) dprintk("try GI 945\n"); else if (gi == GI_595) dprintk("try GI 595\n"); else if (gi == GI_420) dprintk("try GI 420\n"); if (priv->config->prod == LGS8GXX_PROD_LGS8G75) { lgs8gxx_read_reg(priv, 0x0C, &t1); lgs8gxx_read_reg(priv, 0x18, &t2); t1 &= ~(GI_MASK); t1 |= gi; t2 &= 0xFE; t2 |= cpn ? 0x01 : 0x00; lgs8gxx_write_reg(priv, 0x0C, t1); lgs8gxx_write_reg(priv, 0x18, t2); } else { lgs8gxx_write_reg(priv, 0x04, gi); } lgs8gxx_soft_reset(priv); err = lgs8gxx_wait_ca_lock(priv, locked); if (err || !(*locked)) return err; err = lgs8gxx_is_autodetect_finished(priv, &ad_fini); if (err != 0) return err; if (ad_fini) { dprintk("auto detect finished\n"); } else *locked = 0; return 0; } static int lgs8gxx_auto_detect(struct lgs8gxx_state *priv, u8 *detected_param, u8 *gi) { int i, j; int err = 0; u8 locked = 0, tmp_gi; dprintk("%s\n", __func__); lgs8gxx_set_mode_auto(priv); if (priv->config->prod == LGS8GXX_PROD_LGS8G75) { lgs8gxx_write_reg(priv, 0x67, 0xAA); lgs8gxx_write_reg(priv, 0x6E, 0x3F); } else { /* Guard Interval */ lgs8gxx_write_reg(priv, 0x03, 00); } for (i = 0; i < 2; i++) { for (j = 0; j < 2; j++) { tmp_gi = GI_945; err = lgs8gxx_autolock_gi(priv, GI_945, j, &locked); if (err) goto out; if (locked) goto locked; } for (j = 0; j < 2; j++) { tmp_gi = GI_420; err = lgs8gxx_autolock_gi(priv, GI_420, j, &locked); if (err) goto out; if (locked) goto locked; } tmp_gi = GI_595; err = lgs8gxx_autolock_gi(priv, GI_595, 1, &locked); if (err) goto out; if (locked) goto locked; } locked: if ((err == 0) && (locked == 1)) { u8 t; if (priv->config->prod != LGS8GXX_PROD_LGS8G75) { lgs8gxx_read_reg(priv, 0xA2, &t); *detected_param = t; } else { lgs8gxx_read_reg(priv, 0x1F, &t); *detected_param = t & 0x3F; } if (tmp_gi == GI_945) dprintk("GI 945 locked\n"); else if (tmp_gi == GI_595) dprintk("GI 595 locked\n"); else if (tmp_gi == GI_420) dprintk("GI 420 locked\n"); *gi = tmp_gi; } if (!locked) err = -1; out: return err; } static void lgs8gxx_auto_lock(struct lgs8gxx_state *priv) { s8 err; u8 gi = 0x2; u8 detected_param = 0; err = lgs8gxx_auto_detect(priv, &detected_param, &gi); if (err != 0) { dprintk("lgs8gxx_auto_detect failed\n"); } else dprintk("detected param = 0x%02X\n", detected_param); /* Apply detected parameters */ if (priv->config->prod == LGS8GXX_PROD_LGS8913) { u8 inter_leave_len = detected_param & TIM_MASK ; /* Fix 8913 time interleaver detection bug */ inter_leave_len = (inter_leave_len == TIM_MIDDLE) ? 0x60 : 0x40; detected_param &= CF_MASK | SC_MASK | LGS_FEC_MASK; detected_param |= inter_leave_len; } if (priv->config->prod == LGS8GXX_PROD_LGS8G75) { u8 t; lgs8gxx_read_reg(priv, 0x19, &t); t &= 0x81; t |= detected_param << 1; lgs8gxx_write_reg(priv, 0x19, t); } else { lgs8gxx_write_reg(priv, 0x7D, detected_param); if (priv->config->prod == LGS8GXX_PROD_LGS8913) lgs8gxx_write_reg(priv, 0xC0, detected_param); } /* lgs8gxx_soft_reset(priv); */ /* Enter manual mode */ lgs8gxx_set_mode_manual(priv); switch (gi) { case GI_945: priv->curr_gi = 945; break; case GI_595: priv->curr_gi = 595; break; case GI_420: priv->curr_gi = 420; break; default: priv->curr_gi = 945; break; } } static int lgs8gxx_set_mpeg_mode(struct lgs8gxx_state *priv, u8 serial, u8 clk_pol, u8 clk_gated) { int ret = 0; u8 t, reg_addr; reg_addr = (priv->config->prod == LGS8GXX_PROD_LGS8G75) ? 0x30 : 0xC2; ret = lgs8gxx_read_reg(priv, reg_addr, &t); if (ret != 0) return ret; t &= 0xF8; t |= serial ? TS_SERIAL : TS_PARALLEL; t |= clk_pol ? TS_CLK_INVERTED : TS_CLK_NORMAL; t |= clk_gated ? TS_CLK_GATED : TS_CLK_FREERUN; ret = lgs8gxx_write_reg(priv, reg_addr, t); if (ret != 0) return ret; return 0; } /* A/D input peak-to-peak voltage range */ static int lgs8g75_set_adc_vpp(struct lgs8gxx_state *priv, u8 sel) { u8 r26 = 0x73, r27 = 0x90; if (priv->config->prod != LGS8GXX_PROD_LGS8G75) return 0; r26 |= (sel & 0x01) << 7; r27 |= (sel & 0x02) >> 1; lgs8gxx_write_reg(priv, 0x26, r26); lgs8gxx_write_reg(priv, 0x27, r27); return 0; } /* LGS8913 demod frontend functions */ static int lgs8913_init(struct lgs8gxx_state *priv) { u8 t; /* LGS8913 specific */ lgs8gxx_write_reg(priv, 0xc1, 0x3); lgs8gxx_read_reg(priv, 0x7c, &t); lgs8gxx_write_reg(priv, 0x7c, (t&0x8c) | 0x3); /* LGS8913 specific */ lgs8gxx_read_reg(priv, 0xc3, &t); lgs8gxx_write_reg(priv, 0xc3, t&0x10); return 0; } static int lgs8g75_init_data(struct lgs8gxx_state *priv) { const struct firmware *fw; int rc; int i; rc = request_firmware(&fw, LGS8GXX_FIRMWARE, &priv->i2c->dev); if (rc) return rc; lgs8gxx_write_reg(priv, 0xC6, 0x40); lgs8gxx_write_reg(priv, 0x3D, 0x04); lgs8gxx_write_reg(priv, 0x39, 0x00); lgs8gxx_write_reg(priv, 0x3A, 0x00); lgs8gxx_write_reg(priv, 0x38, 0x00); lgs8gxx_write_reg(priv, 0x3B, 0x00); lgs8gxx_write_reg(priv, 0x38, 0x00); for (i = 0; i < fw->size; i++) { lgs8gxx_write_reg(priv, 0x38, 0x00); lgs8gxx_write_reg(priv, 0x3A, (u8)(i&0xff)); lgs8gxx_write_reg(priv, 0x3B, (u8)(i>>8)); lgs8gxx_write_reg(priv, 0x3C, fw->data[i]); } lgs8gxx_write_reg(priv, 0x38, 0x00); release_firmware(fw); return 0; } static int lgs8gxx_init(struct dvb_frontend *fe) { struct lgs8gxx_state *priv = (struct lgs8gxx_state *)fe->demodulator_priv; const struct lgs8gxx_config *config = priv->config; u8 data = 0; s8 err; dprintk("%s\n", __func__); lgs8gxx_read_reg(priv, 0, &data); dprintk("reg 0 = 0x%02X\n", data); if (config->prod == LGS8GXX_PROD_LGS8G75) lgs8g75_set_adc_vpp(priv, config->adc_vpp); /* Setup MPEG output format */ err = lgs8gxx_set_mpeg_mode(priv, config->serial_ts, config->ts_clk_pol, config->ts_clk_gated); if (err != 0) return -EIO; if (config->prod == LGS8GXX_PROD_LGS8913) lgs8913_init(priv); lgs8gxx_set_if_freq(priv, priv->config->if_freq); lgs8gxx_set_ad_mode(priv); return 0; } static void lgs8gxx_release(struct dvb_frontend *fe) { struct lgs8gxx_state *state = fe->demodulator_priv; dprintk("%s\n", __func__); kfree(state); } static int lgs8gxx_write(struct dvb_frontend *fe, const u8 buf[], int len) { struct lgs8gxx_state *priv = fe->demodulator_priv; if (len != 2) return -EINVAL; return lgs8gxx_write_reg(priv, buf[0], buf[1]); } static int lgs8gxx_set_fe(struct dvb_frontend *fe) { struct lgs8gxx_state *priv = fe->demodulator_priv; dprintk("%s\n", __func__); /* set frequency */ if (fe->ops.tuner_ops.set_params) { fe->ops.tuner_ops.set_params(fe); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } /* start auto lock */ lgs8gxx_auto_lock(priv); msleep(10); return 0; } static int lgs8gxx_get_fe(struct dvb_frontend *fe) { struct dtv_frontend_properties *fe_params = &fe->dtv_property_cache; dprintk("%s\n", __func__); /* TODO: get real readings from device */ /* inversion status */ fe_params->inversion = INVERSION_OFF; /* bandwidth */ fe_params->bandwidth_hz = 8000000; fe_params->code_rate_HP = FEC_AUTO; fe_params->code_rate_LP = FEC_AUTO; fe_params->modulation = QAM_AUTO; /* transmission mode */ fe_params->transmission_mode = TRANSMISSION_MODE_AUTO; /* guard interval */ fe_params->guard_interval = GUARD_INTERVAL_AUTO; /* hierarchy */ fe_params->hierarchy = HIERARCHY_NONE; return 0; } static int lgs8gxx_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings *fesettings) { /* FIXME: copy from tda1004x.c */ fesettings->min_delay_ms = 800; fesettings->step_size = 0; fesettings->max_drift = 0; return 0; } static int lgs8gxx_read_status(struct dvb_frontend *fe, fe_status_t *fe_status) { struct lgs8gxx_state *priv = fe->demodulator_priv; s8 ret; u8 t, locked = 0; dprintk("%s\n", __func__); *fe_status = 0; lgs8gxx_get_afc_phase(priv); lgs8gxx_is_locked(priv, &locked); if (priv->config->prod == LGS8GXX_PROD_LGS8G75) { if (locked) *fe_status |= FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK; return 0; } ret = lgs8gxx_read_reg(priv, 0x4B, &t); if (ret != 0) return -EIO; dprintk("Reg 0x4B: 0x%02X\n", t); *fe_status = 0; if (priv->config->prod == LGS8GXX_PROD_LGS8913) { if ((t & 0x40) == 0x40) *fe_status |= FE_HAS_SIGNAL | FE_HAS_CARRIER; if ((t & 0x80) == 0x80) *fe_status |= FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK; } else { if ((t & 0x80) == 0x80) *fe_status |= FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK; } /* success */ dprintk("%s: fe_status=0x%x\n", __func__, *fe_status); return 0; } static int lgs8gxx_read_signal_agc(struct lgs8gxx_state *priv, u16 *signal) { u16 v; u8 agc_lvl[2], cat; dprintk("%s()\n", __func__); lgs8gxx_read_reg(priv, 0x3F, &agc_lvl[0]); lgs8gxx_read_reg(priv, 0x3E, &agc_lvl[1]); v = agc_lvl[0]; v <<= 8; v |= agc_lvl[1]; dprintk("agc_lvl: 0x%04X\n", v); if (v < 0x100) cat = 0; else if (v < 0x190) cat = 5; else if (v < 0x2A8) cat = 4; else if (v < 0x381) cat = 3; else if (v < 0x400) cat = 2; else if (v == 0x400) cat = 1; else cat = 0; *signal = cat * 65535 / 5; return 0; } static int lgs8913_read_signal_strength(struct lgs8gxx_state *priv, u16 *signal) { u8 t; s8 ret; s16 max_strength = 0; u8 str; u16 i, gi = priv->curr_gi; dprintk("%s\n", __func__); ret = lgs8gxx_read_reg(priv, 0x4B, &t); if (ret != 0) return -EIO; if (fake_signal_str) { if ((t & 0xC0) == 0xC0) { dprintk("Fake signal strength\n"); *signal = 0x7FFF; } else *signal = 0; return 0; } dprintk("gi = %d\n", gi); for (i = 0; i < gi; i++) { if ((i & 0xFF) == 0) lgs8gxx_write_reg(priv, 0x84, 0x03 & (i >> 8)); lgs8gxx_write_reg(priv, 0x83, i & 0xFF); lgs8gxx_read_reg(priv, 0x94, &str); if (max_strength < str) max_strength = str; } *signal = max_strength; dprintk("%s: signal=0x%02X\n", __func__, *signal); lgs8gxx_read_reg(priv, 0x95, &t); dprintk("%s: AVG Noise=0x%02X\n", __func__, t); return 0; } static int lgs8g75_read_signal_strength(struct lgs8gxx_state *priv, u16 *signal) { u8 t; s16 v = 0; dprintk("%s\n", __func__); lgs8gxx_read_reg(priv, 0xB1, &t); v |= t; v <<= 8; lgs8gxx_read_reg(priv, 0xB0, &t); v |= t; *signal = v; dprintk("%s: signal=0x%02X\n", __func__, *signal); return 0; } static int lgs8gxx_read_signal_strength(struct dvb_frontend *fe, u16 *signal) { struct lgs8gxx_state *priv = fe->demodulator_priv; if (priv->config->prod == LGS8GXX_PROD_LGS8913) return lgs8913_read_signal_strength(priv, signal); else if (priv->config->prod == LGS8GXX_PROD_LGS8G75) return lgs8g75_read_signal_strength(priv, signal); else return lgs8gxx_read_signal_agc(priv, signal); } static int lgs8gxx_read_snr(struct dvb_frontend *fe, u16 *snr) { struct lgs8gxx_state *priv = fe->demodulator_priv; u8 t; *snr = 0; if (priv->config->prod == LGS8GXX_PROD_LGS8G75) lgs8gxx_read_reg(priv, 0x34, &t); else lgs8gxx_read_reg(priv, 0x95, &t); dprintk("AVG Noise=0x%02X\n", t); *snr = 256 - t; *snr <<= 8; dprintk("snr=0x%x\n", *snr); return 0; } static int lgs8gxx_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) { *ucblocks = 0; dprintk("%s: ucblocks=0x%x\n", __func__, *ucblocks); return 0; } static void packet_counter_start(struct lgs8gxx_state *priv) { u8 orig, t; if (priv->config->prod == LGS8GXX_PROD_LGS8G75) { lgs8gxx_read_reg(priv, 0x30, &orig); orig &= 0xE7; t = orig | 0x10; lgs8gxx_write_reg(priv, 0x30, t); t = orig | 0x18; lgs8gxx_write_reg(priv, 0x30, t); t = orig | 0x10; lgs8gxx_write_reg(priv, 0x30, t); } else { lgs8gxx_write_reg(priv, 0xC6, 0x01); lgs8gxx_write_reg(priv, 0xC6, 0x41); lgs8gxx_write_reg(priv, 0xC6, 0x01); } } static void packet_counter_stop(struct lgs8gxx_state *priv) { u8 t; if (priv->config->prod == LGS8GXX_PROD_LGS8G75) { lgs8gxx_read_reg(priv, 0x30, &t); t &= 0xE7; lgs8gxx_write_reg(priv, 0x30, t); } else { lgs8gxx_write_reg(priv, 0xC6, 0x81); } } static int lgs8gxx_read_ber(struct dvb_frontend *fe, u32 *ber) { struct lgs8gxx_state *priv = fe->demodulator_priv; u8 reg_err, reg_total, t; u32 total_cnt = 0, err_cnt = 0; int i; dprintk("%s\n", __func__); packet_counter_start(priv); msleep(200); packet_counter_stop(priv); if (priv->config->prod == LGS8GXX_PROD_LGS8G75) { reg_total = 0x28; reg_err = 0x2C; } else { reg_total = 0xD0; reg_err = 0xD4; } for (i = 0; i < 4; i++) { total_cnt <<= 8; lgs8gxx_read_reg(priv, reg_total+3-i, &t); total_cnt |= t; } for (i = 0; i < 4; i++) { err_cnt <<= 8; lgs8gxx_read_reg(priv, reg_err+3-i, &t); err_cnt |= t; } dprintk("error=%d total=%d\n", err_cnt, total_cnt); if (total_cnt == 0) *ber = 0; else *ber = err_cnt * 100 / total_cnt; dprintk("%s: ber=0x%x\n", __func__, *ber); return 0; } static int lgs8gxx_i2c_gate_ctrl(struct dvb_frontend *fe, int enable) { struct lgs8gxx_state *priv = fe->demodulator_priv; if (priv->config->tuner_address == 0) return 0; if (enable) { u8 v = 0x80 | priv->config->tuner_address; return lgs8gxx_write_reg(priv, 0x01, v); } return lgs8gxx_write_reg(priv, 0x01, 0); } static struct dvb_frontend_ops lgs8gxx_ops = { .delsys = { SYS_DTMB }, .info = { .name = "Legend Silicon LGS8913/LGS8GXX DMB-TH", .frequency_min = 474000000, .frequency_max = 858000000, .frequency_stepsize = 10000, .caps = FE_CAN_FEC_AUTO | FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO }, .release = lgs8gxx_release, .init = lgs8gxx_init, .write = lgs8gxx_write, .i2c_gate_ctrl = lgs8gxx_i2c_gate_ctrl, .set_frontend = lgs8gxx_set_fe, .get_frontend = lgs8gxx_get_fe, .get_tune_settings = lgs8gxx_get_tune_settings, .read_status = lgs8gxx_read_status, .read_ber = lgs8gxx_read_ber, .read_signal_strength = lgs8gxx_read_signal_strength, .read_snr = lgs8gxx_read_snr, .read_ucblocks = lgs8gxx_read_ucblocks, }; struct dvb_frontend *lgs8gxx_attach(const struct lgs8gxx_config *config, struct i2c_adapter *i2c) { struct lgs8gxx_state *priv = NULL; u8 data = 0; dprintk("%s()\n", __func__); if (config == NULL || i2c == NULL) return NULL; priv = kzalloc(sizeof(struct lgs8gxx_state), GFP_KERNEL); if (priv == NULL) goto error_out; priv->config = config; priv->i2c = i2c; /* check if the demod is there */ if (lgs8gxx_read_reg(priv, 0, &data) != 0) { dprintk("%s lgs8gxx not found at i2c addr 0x%02X\n", __func__, priv->config->demod_address); goto error_out; } lgs8gxx_read_reg(priv, 1, &data); memcpy(&priv->frontend.ops, &lgs8gxx_ops, sizeof(struct dvb_frontend_ops)); priv->frontend.demodulator_priv = priv; if (config->prod == LGS8GXX_PROD_LGS8G75) lgs8g75_init_data(priv); return &priv->frontend; error_out: dprintk("%s() error_out\n", __func__); kfree(priv); return NULL; } EXPORT_SYMBOL(lgs8gxx_attach); MODULE_DESCRIPTION("Legend Silicon LGS8913/LGS8GXX DMB-TH demodulator driver"); MODULE_AUTHOR("David T. L. Wong <davidtlwong@gmail.com>"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE(LGS8GXX_FIRMWARE);
gpl-2.0
jxxhwy/NewWorld_f160_JB_kernel
drivers/net/usb/rtl8150.c
4797
22754
/* * Copyright (c) 2002 Petko Manolov (petkan@users.sourceforge.net) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/signal.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/mii.h> #include <linux/ethtool.h> #include <linux/usb.h> #include <asm/uaccess.h> /* Version Information */ #define DRIVER_VERSION "v0.6.2 (2004/08/27)" #define DRIVER_AUTHOR "Petko Manolov <petkan@users.sourceforge.net>" #define DRIVER_DESC "rtl8150 based usb-ethernet driver" #define IDR 0x0120 #define MAR 0x0126 #define CR 0x012e #define TCR 0x012f #define RCR 0x0130 #define TSR 0x0132 #define RSR 0x0133 #define CON0 0x0135 #define CON1 0x0136 #define MSR 0x0137 #define PHYADD 0x0138 #define PHYDAT 0x0139 #define PHYCNT 0x013b #define GPPC 0x013d #define BMCR 0x0140 #define BMSR 0x0142 #define ANAR 0x0144 #define ANLP 0x0146 #define AER 0x0148 #define CSCR 0x014C /* This one has the link status */ #define CSCR_LINK_STATUS (1 << 3) #define IDR_EEPROM 0x1202 #define PHY_READ 0 #define PHY_WRITE 0x20 #define PHY_GO 0x40 #define MII_TIMEOUT 10 #define INTBUFSIZE 8 #define RTL8150_REQT_READ 0xc0 #define RTL8150_REQT_WRITE 0x40 #define RTL8150_REQ_GET_REGS 0x05 #define RTL8150_REQ_SET_REGS 0x05 /* Transmit status register errors */ #define TSR_ECOL (1<<5) #define TSR_LCOL (1<<4) #define TSR_LOSS_CRS (1<<3) #define TSR_JBR (1<<2) #define TSR_ERRORS (TSR_ECOL | TSR_LCOL | TSR_LOSS_CRS | TSR_JBR) /* Receive status register errors */ #define RSR_CRC (1<<2) #define RSR_FAE (1<<1) #define RSR_ERRORS (RSR_CRC | RSR_FAE) /* Media status register definitions */ #define MSR_DUPLEX (1<<4) #define MSR_SPEED (1<<3) #define MSR_LINK (1<<2) /* Interrupt pipe data */ #define INT_TSR 0x00 #define INT_RSR 0x01 #define INT_MSR 0x02 #define INT_WAKSR 0x03 #define INT_TXOK_CNT 0x04 #define INT_RXLOST_CNT 0x05 #define INT_CRERR_CNT 0x06 #define INT_COL_CNT 0x07 #define RTL8150_MTU 1540 #define RTL8150_TX_TIMEOUT (HZ) #define RX_SKB_POOL_SIZE 4 /* rtl8150 flags */ #define RTL8150_HW_CRC 0 #define RX_REG_SET 1 #define RTL8150_UNPLUG 2 #define RX_URB_FAIL 3 /* Define these values to match your device */ #define VENDOR_ID_REALTEK 0x0bda #define VENDOR_ID_MELCO 0x0411 #define VENDOR_ID_MICRONET 0x3980 #define VENDOR_ID_LONGSHINE 0x07b8 #define VENDOR_ID_OQO 0x1557 #define VENDOR_ID_ZYXEL 0x0586 #define PRODUCT_ID_RTL8150 0x8150 #define PRODUCT_ID_LUAKTX 0x0012 #define PRODUCT_ID_LCS8138TX 0x401a #define PRODUCT_ID_SP128AR 0x0003 #define PRODUCT_ID_PRESTIGE 0x401a #undef EEPROM_WRITE /* table of devices that work with this driver */ static struct usb_device_id rtl8150_table[] = { {USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8150)}, {USB_DEVICE(VENDOR_ID_MELCO, PRODUCT_ID_LUAKTX)}, {USB_DEVICE(VENDOR_ID_MICRONET, PRODUCT_ID_SP128AR)}, {USB_DEVICE(VENDOR_ID_LONGSHINE, PRODUCT_ID_LCS8138TX)}, {USB_DEVICE(VENDOR_ID_OQO, PRODUCT_ID_RTL8150)}, {USB_DEVICE(VENDOR_ID_ZYXEL, PRODUCT_ID_PRESTIGE)}, {} }; MODULE_DEVICE_TABLE(usb, rtl8150_table); struct rtl8150 { unsigned long flags; struct usb_device *udev; struct tasklet_struct tl; struct net_device *netdev; struct urb *rx_urb, *tx_urb, *intr_urb, *ctrl_urb; struct sk_buff *tx_skb, *rx_skb; struct sk_buff *rx_skb_pool[RX_SKB_POOL_SIZE]; spinlock_t rx_pool_lock; struct usb_ctrlrequest dr; int intr_interval; __le16 rx_creg; u8 *intr_buff; u8 phy; }; typedef struct rtl8150 rtl8150_t; static const char driver_name [] = "rtl8150"; /* ** ** device related part of the code ** */ static int get_registers(rtl8150_t * dev, u16 indx, u16 size, void *data) { return usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), RTL8150_REQ_GET_REGS, RTL8150_REQT_READ, indx, 0, data, size, 500); } static int set_registers(rtl8150_t * dev, u16 indx, u16 size, void *data) { return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE, indx, 0, data, size, 500); } static void ctrl_callback(struct urb *urb) { rtl8150_t *dev; int status = urb->status; switch (status) { case 0: break; case -EINPROGRESS: break; case -ENOENT: break; default: if (printk_ratelimit()) dev_warn(&urb->dev->dev, "ctrl urb status %d\n", status); } dev = urb->context; clear_bit(RX_REG_SET, &dev->flags); } static int async_set_registers(rtl8150_t * dev, u16 indx, u16 size) { int ret; if (test_bit(RX_REG_SET, &dev->flags)) return -EAGAIN; dev->dr.bRequestType = RTL8150_REQT_WRITE; dev->dr.bRequest = RTL8150_REQ_SET_REGS; dev->dr.wValue = cpu_to_le16(indx); dev->dr.wIndex = 0; dev->dr.wLength = cpu_to_le16(size); dev->ctrl_urb->transfer_buffer_length = size; usb_fill_control_urb(dev->ctrl_urb, dev->udev, usb_sndctrlpipe(dev->udev, 0), (char *) &dev->dr, &dev->rx_creg, size, ctrl_callback, dev); if ((ret = usb_submit_urb(dev->ctrl_urb, GFP_ATOMIC))) { if (ret == -ENODEV) netif_device_detach(dev->netdev); err("control request submission failed: %d", ret); } else set_bit(RX_REG_SET, &dev->flags); return ret; } static int read_mii_word(rtl8150_t * dev, u8 phy, __u8 indx, u16 * reg) { int i; u8 data[3], tmp; data[0] = phy; data[1] = data[2] = 0; tmp = indx | PHY_READ | PHY_GO; i = 0; set_registers(dev, PHYADD, sizeof(data), data); set_registers(dev, PHYCNT, 1, &tmp); do { get_registers(dev, PHYCNT, 1, data); } while ((data[0] & PHY_GO) && (i++ < MII_TIMEOUT)); if (i <= MII_TIMEOUT) { get_registers(dev, PHYDAT, 2, data); *reg = data[0] | (data[1] << 8); return 0; } else return 1; } static int write_mii_word(rtl8150_t * dev, u8 phy, __u8 indx, u16 reg) { int i; u8 data[3], tmp; data[0] = phy; data[1] = reg & 0xff; data[2] = (reg >> 8) & 0xff; tmp = indx | PHY_WRITE | PHY_GO; i = 0; set_registers(dev, PHYADD, sizeof(data), data); set_registers(dev, PHYCNT, 1, &tmp); do { get_registers(dev, PHYCNT, 1, data); } while ((data[0] & PHY_GO) && (i++ < MII_TIMEOUT)); if (i <= MII_TIMEOUT) return 0; else return 1; } static inline void set_ethernet_addr(rtl8150_t * dev) { u8 node_id[6]; get_registers(dev, IDR, sizeof(node_id), node_id); memcpy(dev->netdev->dev_addr, node_id, sizeof(node_id)); } static int rtl8150_set_mac_address(struct net_device *netdev, void *p) { struct sockaddr *addr = p; rtl8150_t *dev = netdev_priv(netdev); if (netif_running(netdev)) return -EBUSY; memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); dbg("%s: Setting MAC address to %pM\n", netdev->name, netdev->dev_addr); /* Set the IDR registers. */ set_registers(dev, IDR, netdev->addr_len, netdev->dev_addr); #ifdef EEPROM_WRITE { int i; u8 cr; /* Get the CR contents. */ get_registers(dev, CR, 1, &cr); /* Set the WEPROM bit (eeprom write enable). */ cr |= 0x20; set_registers(dev, CR, 1, &cr); /* Write the MAC address into eeprom. Eeprom writes must be word-sized, so we need to split them up. */ for (i = 0; i * 2 < netdev->addr_len; i++) { set_registers(dev, IDR_EEPROM + (i * 2), 2, netdev->dev_addr + (i * 2)); } /* Clear the WEPROM bit (preventing accidental eeprom writes). */ cr &= 0xdf; set_registers(dev, CR, 1, &cr); } #endif return 0; } static int rtl8150_reset(rtl8150_t * dev) { u8 data = 0x10; int i = HZ; set_registers(dev, CR, 1, &data); do { get_registers(dev, CR, 1, &data); } while ((data & 0x10) && --i); return (i > 0) ? 1 : 0; } static int alloc_all_urbs(rtl8150_t * dev) { dev->rx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->rx_urb) return 0; dev->tx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->tx_urb) { usb_free_urb(dev->rx_urb); return 0; } dev->intr_urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->intr_urb) { usb_free_urb(dev->rx_urb); usb_free_urb(dev->tx_urb); return 0; } dev->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->ctrl_urb) { usb_free_urb(dev->rx_urb); usb_free_urb(dev->tx_urb); usb_free_urb(dev->intr_urb); return 0; } return 1; } static void free_all_urbs(rtl8150_t * dev) { usb_free_urb(dev->rx_urb); usb_free_urb(dev->tx_urb); usb_free_urb(dev->intr_urb); usb_free_urb(dev->ctrl_urb); } static void unlink_all_urbs(rtl8150_t * dev) { usb_kill_urb(dev->rx_urb); usb_kill_urb(dev->tx_urb); usb_kill_urb(dev->intr_urb); usb_kill_urb(dev->ctrl_urb); } static inline struct sk_buff *pull_skb(rtl8150_t *dev) { struct sk_buff *skb; int i; for (i = 0; i < RX_SKB_POOL_SIZE; i++) { if (dev->rx_skb_pool[i]) { skb = dev->rx_skb_pool[i]; dev->rx_skb_pool[i] = NULL; return skb; } } return NULL; } static void read_bulk_callback(struct urb *urb) { rtl8150_t *dev; unsigned pkt_len, res; struct sk_buff *skb; struct net_device *netdev; u16 rx_stat; int status = urb->status; int result; dev = urb->context; if (!dev) return; if (test_bit(RTL8150_UNPLUG, &dev->flags)) return; netdev = dev->netdev; if (!netif_device_present(netdev)) return; switch (status) { case 0: break; case -ENOENT: return; /* the urb is in unlink state */ case -ETIME: if (printk_ratelimit()) dev_warn(&urb->dev->dev, "may be reset is needed?..\n"); goto goon; default: if (printk_ratelimit()) dev_warn(&urb->dev->dev, "Rx status %d\n", status); goto goon; } if (!dev->rx_skb) goto resched; /* protect against short packets (tell me why we got some?!?) */ if (urb->actual_length < 4) goto goon; res = urb->actual_length; rx_stat = le16_to_cpu(*(__le16 *)(urb->transfer_buffer + res - 4)); pkt_len = res - 4; skb_put(dev->rx_skb, pkt_len); dev->rx_skb->protocol = eth_type_trans(dev->rx_skb, netdev); netif_rx(dev->rx_skb); netdev->stats.rx_packets++; netdev->stats.rx_bytes += pkt_len; spin_lock(&dev->rx_pool_lock); skb = pull_skb(dev); spin_unlock(&dev->rx_pool_lock); if (!skb) goto resched; dev->rx_skb = skb; goon: usb_fill_bulk_urb(dev->rx_urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1), dev->rx_skb->data, RTL8150_MTU, read_bulk_callback, dev); result = usb_submit_urb(dev->rx_urb, GFP_ATOMIC); if (result == -ENODEV) netif_device_detach(dev->netdev); else if (result) { set_bit(RX_URB_FAIL, &dev->flags); goto resched; } else { clear_bit(RX_URB_FAIL, &dev->flags); } return; resched: tasklet_schedule(&dev->tl); } static void write_bulk_callback(struct urb *urb) { rtl8150_t *dev; int status = urb->status; dev = urb->context; if (!dev) return; dev_kfree_skb_irq(dev->tx_skb); if (!netif_device_present(dev->netdev)) return; if (status) dev_info(&urb->dev->dev, "%s: Tx status %d\n", dev->netdev->name, status); dev->netdev->trans_start = jiffies; netif_wake_queue(dev->netdev); } static void intr_callback(struct urb *urb) { rtl8150_t *dev; __u8 *d; int status = urb->status; int res; dev = urb->context; if (!dev) return; switch (status) { case 0: /* success */ break; case -ECONNRESET: /* unlink */ case -ENOENT: case -ESHUTDOWN: return; /* -EPIPE: should clear the halt */ default: dev_info(&urb->dev->dev, "%s: intr status %d\n", dev->netdev->name, status); goto resubmit; } d = urb->transfer_buffer; if (d[0] & TSR_ERRORS) { dev->netdev->stats.tx_errors++; if (d[INT_TSR] & (TSR_ECOL | TSR_JBR)) dev->netdev->stats.tx_aborted_errors++; if (d[INT_TSR] & TSR_LCOL) dev->netdev->stats.tx_window_errors++; if (d[INT_TSR] & TSR_LOSS_CRS) dev->netdev->stats.tx_carrier_errors++; } /* Report link status changes to the network stack */ if ((d[INT_MSR] & MSR_LINK) == 0) { if (netif_carrier_ok(dev->netdev)) { netif_carrier_off(dev->netdev); dbg("%s: LINK LOST\n", __func__); } } else { if (!netif_carrier_ok(dev->netdev)) { netif_carrier_on(dev->netdev); dbg("%s: LINK CAME BACK\n", __func__); } } resubmit: res = usb_submit_urb (urb, GFP_ATOMIC); if (res == -ENODEV) netif_device_detach(dev->netdev); else if (res) err ("can't resubmit intr, %s-%s/input0, status %d", dev->udev->bus->bus_name, dev->udev->devpath, res); } static int rtl8150_suspend(struct usb_interface *intf, pm_message_t message) { rtl8150_t *dev = usb_get_intfdata(intf); netif_device_detach(dev->netdev); if (netif_running(dev->netdev)) { usb_kill_urb(dev->rx_urb); usb_kill_urb(dev->intr_urb); } return 0; } static int rtl8150_resume(struct usb_interface *intf) { rtl8150_t *dev = usb_get_intfdata(intf); netif_device_attach(dev->netdev); if (netif_running(dev->netdev)) { dev->rx_urb->status = 0; dev->rx_urb->actual_length = 0; read_bulk_callback(dev->rx_urb); dev->intr_urb->status = 0; dev->intr_urb->actual_length = 0; intr_callback(dev->intr_urb); } return 0; } /* ** ** network related part of the code ** */ static void fill_skb_pool(rtl8150_t *dev) { struct sk_buff *skb; int i; for (i = 0; i < RX_SKB_POOL_SIZE; i++) { if (dev->rx_skb_pool[i]) continue; skb = dev_alloc_skb(RTL8150_MTU + 2); if (!skb) { return; } skb_reserve(skb, 2); dev->rx_skb_pool[i] = skb; } } static void free_skb_pool(rtl8150_t *dev) { int i; for (i = 0; i < RX_SKB_POOL_SIZE; i++) if (dev->rx_skb_pool[i]) dev_kfree_skb(dev->rx_skb_pool[i]); } static void rx_fixup(unsigned long data) { struct rtl8150 *dev = (struct rtl8150 *)data; struct sk_buff *skb; int status; spin_lock_irq(&dev->rx_pool_lock); fill_skb_pool(dev); spin_unlock_irq(&dev->rx_pool_lock); if (test_bit(RX_URB_FAIL, &dev->flags)) if (dev->rx_skb) goto try_again; spin_lock_irq(&dev->rx_pool_lock); skb = pull_skb(dev); spin_unlock_irq(&dev->rx_pool_lock); if (skb == NULL) goto tlsched; dev->rx_skb = skb; usb_fill_bulk_urb(dev->rx_urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1), dev->rx_skb->data, RTL8150_MTU, read_bulk_callback, dev); try_again: status = usb_submit_urb(dev->rx_urb, GFP_ATOMIC); if (status == -ENODEV) { netif_device_detach(dev->netdev); } else if (status) { set_bit(RX_URB_FAIL, &dev->flags); goto tlsched; } else { clear_bit(RX_URB_FAIL, &dev->flags); } return; tlsched: tasklet_schedule(&dev->tl); } static int enable_net_traffic(rtl8150_t * dev) { u8 cr, tcr, rcr, msr; if (!rtl8150_reset(dev)) { dev_warn(&dev->udev->dev, "device reset failed\n"); } /* RCR bit7=1 attach Rx info at the end; =0 HW CRC (which is broken) */ rcr = 0x9e; dev->rx_creg = cpu_to_le16(rcr); tcr = 0xd8; cr = 0x0c; if (!(rcr & 0x80)) set_bit(RTL8150_HW_CRC, &dev->flags); set_registers(dev, RCR, 1, &rcr); set_registers(dev, TCR, 1, &tcr); set_registers(dev, CR, 1, &cr); get_registers(dev, MSR, 1, &msr); return 0; } static void disable_net_traffic(rtl8150_t * dev) { u8 cr; get_registers(dev, CR, 1, &cr); cr &= 0xf3; set_registers(dev, CR, 1, &cr); } static void rtl8150_tx_timeout(struct net_device *netdev) { rtl8150_t *dev = netdev_priv(netdev); dev_warn(&netdev->dev, "Tx timeout.\n"); usb_unlink_urb(dev->tx_urb); netdev->stats.tx_errors++; } static void rtl8150_set_multicast(struct net_device *netdev) { rtl8150_t *dev = netdev_priv(netdev); netif_stop_queue(netdev); if (netdev->flags & IFF_PROMISC) { dev->rx_creg |= cpu_to_le16(0x0001); dev_info(&netdev->dev, "%s: promiscuous mode\n", netdev->name); } else if (!netdev_mc_empty(netdev) || (netdev->flags & IFF_ALLMULTI)) { dev->rx_creg &= cpu_to_le16(0xfffe); dev->rx_creg |= cpu_to_le16(0x0002); dev_info(&netdev->dev, "%s: allmulti set\n", netdev->name); } else { /* ~RX_MULTICAST, ~RX_PROMISCUOUS */ dev->rx_creg &= cpu_to_le16(0x00fc); } async_set_registers(dev, RCR, 2); netif_wake_queue(netdev); } static netdev_tx_t rtl8150_start_xmit(struct sk_buff *skb, struct net_device *netdev) { rtl8150_t *dev = netdev_priv(netdev); int count, res; netif_stop_queue(netdev); count = (skb->len < 60) ? 60 : skb->len; count = (count & 0x3f) ? count : count + 1; dev->tx_skb = skb; usb_fill_bulk_urb(dev->tx_urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), skb->data, count, write_bulk_callback, dev); if ((res = usb_submit_urb(dev->tx_urb, GFP_ATOMIC))) { /* Can we get/handle EPIPE here? */ if (res == -ENODEV) netif_device_detach(dev->netdev); else { dev_warn(&netdev->dev, "failed tx_urb %d\n", res); netdev->stats.tx_errors++; netif_start_queue(netdev); } } else { netdev->stats.tx_packets++; netdev->stats.tx_bytes += skb->len; netdev->trans_start = jiffies; } return NETDEV_TX_OK; } static void set_carrier(struct net_device *netdev) { rtl8150_t *dev = netdev_priv(netdev); short tmp; get_registers(dev, CSCR, 2, &tmp); if (tmp & CSCR_LINK_STATUS) netif_carrier_on(netdev); else netif_carrier_off(netdev); } static int rtl8150_open(struct net_device *netdev) { rtl8150_t *dev = netdev_priv(netdev); int res; if (dev->rx_skb == NULL) dev->rx_skb = pull_skb(dev); if (!dev->rx_skb) return -ENOMEM; set_registers(dev, IDR, 6, netdev->dev_addr); usb_fill_bulk_urb(dev->rx_urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1), dev->rx_skb->data, RTL8150_MTU, read_bulk_callback, dev); if ((res = usb_submit_urb(dev->rx_urb, GFP_KERNEL))) { if (res == -ENODEV) netif_device_detach(dev->netdev); dev_warn(&netdev->dev, "rx_urb submit failed: %d\n", res); return res; } usb_fill_int_urb(dev->intr_urb, dev->udev, usb_rcvintpipe(dev->udev, 3), dev->intr_buff, INTBUFSIZE, intr_callback, dev, dev->intr_interval); if ((res = usb_submit_urb(dev->intr_urb, GFP_KERNEL))) { if (res == -ENODEV) netif_device_detach(dev->netdev); dev_warn(&netdev->dev, "intr_urb submit failed: %d\n", res); usb_kill_urb(dev->rx_urb); return res; } enable_net_traffic(dev); set_carrier(netdev); netif_start_queue(netdev); return res; } static int rtl8150_close(struct net_device *netdev) { rtl8150_t *dev = netdev_priv(netdev); int res = 0; netif_stop_queue(netdev); if (!test_bit(RTL8150_UNPLUG, &dev->flags)) disable_net_traffic(dev); unlink_all_urbs(dev); return res; } static void rtl8150_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { rtl8150_t *dev = netdev_priv(netdev); strncpy(info->driver, driver_name, ETHTOOL_BUSINFO_LEN); strncpy(info->version, DRIVER_VERSION, ETHTOOL_BUSINFO_LEN); usb_make_path(dev->udev, info->bus_info, sizeof info->bus_info); } static int rtl8150_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { rtl8150_t *dev = netdev_priv(netdev); short lpa, bmcr; ecmd->supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII); ecmd->port = PORT_TP; ecmd->transceiver = XCVR_INTERNAL; ecmd->phy_address = dev->phy; get_registers(dev, BMCR, 2, &bmcr); get_registers(dev, ANLP, 2, &lpa); if (bmcr & BMCR_ANENABLE) { u32 speed = ((lpa & (LPA_100HALF | LPA_100FULL)) ? SPEED_100 : SPEED_10); ethtool_cmd_speed_set(ecmd, speed); ecmd->autoneg = AUTONEG_ENABLE; if (speed == SPEED_100) ecmd->duplex = (lpa & LPA_100FULL) ? DUPLEX_FULL : DUPLEX_HALF; else ecmd->duplex = (lpa & LPA_10FULL) ? DUPLEX_FULL : DUPLEX_HALF; } else { ecmd->autoneg = AUTONEG_DISABLE; ethtool_cmd_speed_set(ecmd, ((bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10)); ecmd->duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; } return 0; } static const struct ethtool_ops ops = { .get_drvinfo = rtl8150_get_drvinfo, .get_settings = rtl8150_get_settings, .get_link = ethtool_op_get_link }; static int rtl8150_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) { rtl8150_t *dev = netdev_priv(netdev); u16 *data = (u16 *) & rq->ifr_ifru; int res = 0; switch (cmd) { case SIOCDEVPRIVATE: data[0] = dev->phy; case SIOCDEVPRIVATE + 1: read_mii_word(dev, dev->phy, (data[1] & 0x1f), &data[3]); break; case SIOCDEVPRIVATE + 2: if (!capable(CAP_NET_ADMIN)) return -EPERM; write_mii_word(dev, dev->phy, (data[1] & 0x1f), data[2]); break; default: res = -EOPNOTSUPP; } return res; } static const struct net_device_ops rtl8150_netdev_ops = { .ndo_open = rtl8150_open, .ndo_stop = rtl8150_close, .ndo_do_ioctl = rtl8150_ioctl, .ndo_start_xmit = rtl8150_start_xmit, .ndo_tx_timeout = rtl8150_tx_timeout, .ndo_set_rx_mode = rtl8150_set_multicast, .ndo_set_mac_address = rtl8150_set_mac_address, .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, }; static int rtl8150_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(intf); rtl8150_t *dev; struct net_device *netdev; netdev = alloc_etherdev(sizeof(rtl8150_t)); if (!netdev) return -ENOMEM; dev = netdev_priv(netdev); dev->intr_buff = kmalloc(INTBUFSIZE, GFP_KERNEL); if (!dev->intr_buff) { free_netdev(netdev); return -ENOMEM; } tasklet_init(&dev->tl, rx_fixup, (unsigned long)dev); spin_lock_init(&dev->rx_pool_lock); dev->udev = udev; dev->netdev = netdev; netdev->netdev_ops = &rtl8150_netdev_ops; netdev->watchdog_timeo = RTL8150_TX_TIMEOUT; SET_ETHTOOL_OPS(netdev, &ops); dev->intr_interval = 100; /* 100ms */ if (!alloc_all_urbs(dev)) { err("out of memory"); goto out; } if (!rtl8150_reset(dev)) { err("couldn't reset the device"); goto out1; } fill_skb_pool(dev); set_ethernet_addr(dev); usb_set_intfdata(intf, dev); SET_NETDEV_DEV(netdev, &intf->dev); if (register_netdev(netdev) != 0) { err("couldn't register the device"); goto out2; } dev_info(&intf->dev, "%s: rtl8150 is detected\n", netdev->name); return 0; out2: usb_set_intfdata(intf, NULL); free_skb_pool(dev); out1: free_all_urbs(dev); out: kfree(dev->intr_buff); free_netdev(netdev); return -EIO; } static void rtl8150_disconnect(struct usb_interface *intf) { rtl8150_t *dev = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); if (dev) { set_bit(RTL8150_UNPLUG, &dev->flags); tasklet_kill(&dev->tl); unregister_netdev(dev->netdev); unlink_all_urbs(dev); free_all_urbs(dev); free_skb_pool(dev); if (dev->rx_skb) dev_kfree_skb(dev->rx_skb); kfree(dev->intr_buff); free_netdev(dev->netdev); } } static struct usb_driver rtl8150_driver = { .name = driver_name, .probe = rtl8150_probe, .disconnect = rtl8150_disconnect, .id_table = rtl8150_table, .suspend = rtl8150_suspend, .resume = rtl8150_resume }; module_usb_driver(rtl8150_driver); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
gpl-2.0
syhost/android_kernel_kitkat
arch/powerpc/perf/power7-pmu.c
4797
9850
/* * Performance counter support for POWER7 processors. * * Copyright 2009 Paul Mackerras, IBM Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/perf_event.h> #include <linux/string.h> #include <asm/reg.h> #include <asm/cputable.h> /* * Bits in event code for POWER7 */ #define PM_PMC_SH 16 /* PMC number (1-based) for direct events */ #define PM_PMC_MSK 0xf #define PM_PMC_MSKS (PM_PMC_MSK << PM_PMC_SH) #define PM_UNIT_SH 12 /* TTMMUX number and setting - unit select */ #define PM_UNIT_MSK 0xf #define PM_COMBINE_SH 11 /* Combined event bit */ #define PM_COMBINE_MSK 1 #define PM_COMBINE_MSKS 0x800 #define PM_L2SEL_SH 8 /* L2 event select */ #define PM_L2SEL_MSK 7 #define PM_PMCSEL_MSK 0xff /* * Bits in MMCR1 for POWER7 */ #define MMCR1_TTM0SEL_SH 60 #define MMCR1_TTM1SEL_SH 56 #define MMCR1_TTM2SEL_SH 52 #define MMCR1_TTM3SEL_SH 48 #define MMCR1_TTMSEL_MSK 0xf #define MMCR1_L2SEL_SH 45 #define MMCR1_L2SEL_MSK 7 #define MMCR1_PMC1_COMBINE_SH 35 #define MMCR1_PMC2_COMBINE_SH 34 #define MMCR1_PMC3_COMBINE_SH 33 #define MMCR1_PMC4_COMBINE_SH 32 #define MMCR1_PMC1SEL_SH 24 #define MMCR1_PMC2SEL_SH 16 #define MMCR1_PMC3SEL_SH 8 #define MMCR1_PMC4SEL_SH 0 #define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8) #define MMCR1_PMCSEL_MSK 0xff /* * Layout of constraint bits: * 6666555555555544444444443333333333222222222211111111110000000000 * 3210987654321098765432109876543210987654321098765432109876543210 * [ ><><><><><><> * NC P6P5P4P3P2P1 * * NC - number of counters * 15: NC error 0x8000 * 12-14: number of events needing PMC1-4 0x7000 * * P6 * 11: P6 error 0x800 * 10-11: Count of events needing PMC6 * * P1..P5 * 0-9: Count of events needing PMC1..PMC5 */ static int power7_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) { int pmc, sh; unsigned long mask = 0, value = 0; pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; if (pmc) { if (pmc > 6) return -1; sh = (pmc - 1) * 2; mask |= 2 << sh; value |= 1 << sh; if (pmc >= 5 && !(event == 0x500fa || event == 0x600f4)) return -1; } if (pmc < 5) { /* need a counter from PMC1-4 set */ mask |= 0x8000; value |= 0x1000; } *maskp = mask; *valp = value; return 0; } #define MAX_ALT 2 /* at most 2 alternatives for any event */ static const unsigned int event_alternatives[][MAX_ALT] = { { 0x200f2, 0x300f2 }, /* PM_INST_DISP */ { 0x200f4, 0x600f4 }, /* PM_RUN_CYC */ { 0x400fa, 0x500fa }, /* PM_RUN_INST_CMPL */ }; /* * Scan the alternatives table for a match and return the * index into the alternatives table if found, else -1. */ static int find_alternative(u64 event) { int i, j; for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) { if (event < event_alternatives[i][0]) break; for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j) if (event == event_alternatives[i][j]) return i; } return -1; } static s64 find_alternative_decode(u64 event) { int pmc, psel; /* this only handles the 4x decode events */ pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; psel = event & PM_PMCSEL_MSK; if ((pmc == 2 || pmc == 4) && (psel & ~7) == 0x40) return event - (1 << PM_PMC_SH) + 8; if ((pmc == 1 || pmc == 3) && (psel & ~7) == 0x48) return event + (1 << PM_PMC_SH) - 8; return -1; } static int power7_get_alternatives(u64 event, unsigned int flags, u64 alt[]) { int i, j, nalt = 1; s64 ae; alt[0] = event; nalt = 1; i = find_alternative(event); if (i >= 0) { for (j = 0; j < MAX_ALT; ++j) { ae = event_alternatives[i][j]; if (ae && ae != event) alt[nalt++] = ae; } } else { ae = find_alternative_decode(event); if (ae > 0) alt[nalt++] = ae; } if (flags & PPMU_ONLY_COUNT_RUN) { /* * We're only counting in RUN state, * so PM_CYC is equivalent to PM_RUN_CYC * and PM_INST_CMPL === PM_RUN_INST_CMPL. * This doesn't include alternatives that don't provide * any extra flexibility in assigning PMCs. */ j = nalt; for (i = 0; i < nalt; ++i) { switch (alt[i]) { case 0x1e: /* PM_CYC */ alt[j++] = 0x600f4; /* PM_RUN_CYC */ break; case 0x600f4: /* PM_RUN_CYC */ alt[j++] = 0x1e; break; case 0x2: /* PM_PPC_CMPL */ alt[j++] = 0x500fa; /* PM_RUN_INST_CMPL */ break; case 0x500fa: /* PM_RUN_INST_CMPL */ alt[j++] = 0x2; /* PM_PPC_CMPL */ break; } } nalt = j; } return nalt; } /* * Returns 1 if event counts things relating to marked instructions * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not. */ static int power7_marked_instr_event(u64 event) { int pmc, psel; int unit; pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; psel = event & PM_PMCSEL_MSK & ~1; /* trim off edge/level bit */ if (pmc >= 5) return 0; switch (psel >> 4) { case 2: return pmc == 2 || pmc == 4; case 3: if (psel == 0x3c) return pmc == 1; if (psel == 0x3e) return pmc != 2; return 1; case 4: case 5: return unit == 0xd; case 6: if (psel == 0x64) return pmc >= 3; case 8: return unit == 0xd; } return 0; } static int power7_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[], unsigned long mmcr[]) { unsigned long mmcr1 = 0; unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS; unsigned int pmc, unit, combine, l2sel, psel; unsigned int pmc_inuse = 0; int i; /* First pass to count resource use */ for (i = 0; i < n_ev; ++i) { pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; if (pmc) { if (pmc > 6) return -1; if (pmc_inuse & (1 << (pmc - 1))) return -1; pmc_inuse |= 1 << (pmc - 1); } } /* Second pass: assign PMCs, set all MMCR1 fields */ for (i = 0; i < n_ev; ++i) { pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; combine = (event[i] >> PM_COMBINE_SH) & PM_COMBINE_MSK; l2sel = (event[i] >> PM_L2SEL_SH) & PM_L2SEL_MSK; psel = event[i] & PM_PMCSEL_MSK; if (!pmc) { /* Bus event or any-PMC direct event */ for (pmc = 0; pmc < 4; ++pmc) { if (!(pmc_inuse & (1 << pmc))) break; } if (pmc >= 4) return -1; pmc_inuse |= 1 << pmc; } else { /* Direct or decoded event */ --pmc; } if (pmc <= 3) { mmcr1 |= (unsigned long) unit << (MMCR1_TTM0SEL_SH - 4 * pmc); mmcr1 |= (unsigned long) combine << (MMCR1_PMC1_COMBINE_SH - pmc); mmcr1 |= psel << MMCR1_PMCSEL_SH(pmc); if (unit == 6) /* L2 events */ mmcr1 |= (unsigned long) l2sel << MMCR1_L2SEL_SH; } if (power7_marked_instr_event(event[i])) mmcra |= MMCRA_SAMPLE_ENABLE; hwc[i] = pmc; } /* Return MMCRx values */ mmcr[0] = 0; if (pmc_inuse & 1) mmcr[0] = MMCR0_PMC1CE; if (pmc_inuse & 0x3e) mmcr[0] |= MMCR0_PMCjCE; mmcr[1] = mmcr1; mmcr[2] = mmcra; return 0; } static void power7_disable_pmc(unsigned int pmc, unsigned long mmcr[]) { if (pmc <= 3) mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SH(pmc)); } static int power7_generic_events[] = { [PERF_COUNT_HW_CPU_CYCLES] = 0x1e, [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x100f8, /* GCT_NOSLOT_CYC */ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x4000a, /* CMPLU_STALL */ [PERF_COUNT_HW_INSTRUCTIONS] = 2, [PERF_COUNT_HW_CACHE_REFERENCES] = 0xc880, /* LD_REF_L1_LSU*/ [PERF_COUNT_HW_CACHE_MISSES] = 0x400f0, /* LD_MISS_L1 */ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x10068, /* BRU_FIN */ [PERF_COUNT_HW_BRANCH_MISSES] = 0x400f6, /* BR_MPRED */ }; #define C(x) PERF_COUNT_HW_CACHE_##x /* * Table of generalized cache-related events. * 0 means not supported, -1 means nonsensical, other values * are event codes. */ static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0xc880, 0x400f0 }, [C(OP_WRITE)] = { 0, 0x300f0 }, [C(OP_PREFETCH)] = { 0xd8b8, 0 }, }, [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0, 0x200fc }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { 0x408a, 0 }, }, [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0x16080, 0x26080 }, [C(OP_WRITE)] = { 0x16082, 0x26082 }, [C(OP_PREFETCH)] = { 0, 0 }, }, [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0, 0x300fc }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, [C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0, 0x400fc }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0x10068, 0x400f6 }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { -1, -1 }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, }; static struct power_pmu power7_pmu = { .name = "POWER7", .n_counter = 6, .max_alternatives = MAX_ALT + 1, .add_fields = 0x1555ul, .test_adder = 0x3000ul, .compute_mmcr = power7_compute_mmcr, .get_constraint = power7_get_constraint, .get_alternatives = power7_get_alternatives, .disable_pmc = power7_disable_pmc, .flags = PPMU_ALT_SIPR, .n_generic = ARRAY_SIZE(power7_generic_events), .generic_events = power7_generic_events, .cache_events = &power7_cache_events, }; static int __init init_power7_pmu(void) { if (!cur_cpu_spec->oprofile_cpu_type || strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power7")) return -ENODEV; return register_power_pmu(&power7_pmu); } early_initcall(init_power7_pmu);
gpl-2.0
Arc-Team/android_kernel_samsung_afyonlte
arch/arm/mach-sa1100/ssp.c
9661
4892
/* * linux/arch/arm/mach-sa1100/ssp.c * * Copyright (C) 2003 Russell King. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Generic SSP driver. This provides the generic core for simple * IO-based SSP applications. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/io.h> #include <mach/hardware.h> #include <mach/irqs.h> #include <asm/hardware/ssp.h> #define TIMEOUT 100000 static irqreturn_t ssp_interrupt(int irq, void *dev_id) { unsigned int status = Ser4SSSR; if (status & SSSR_ROR) printk(KERN_WARNING "SSP: receiver overrun\n"); Ser4SSSR = SSSR_ROR; return status ? IRQ_HANDLED : IRQ_NONE; } /** * ssp_write_word - write a word to the SSP port * @data: 16-bit, MSB justified data to write. * * Wait for a free entry in the SSP transmit FIFO, and write a data * word to the SSP port. Wait for the SSP port to start sending * the data. * * The caller is expected to perform the necessary locking. * * Returns: * %-ETIMEDOUT timeout occurred * 0 success */ int ssp_write_word(u16 data) { int timeout = TIMEOUT; while (!(Ser4SSSR & SSSR_TNF)) { if (!--timeout) return -ETIMEDOUT; cpu_relax(); } Ser4SSDR = data; timeout = TIMEOUT; while (!(Ser4SSSR & SSSR_BSY)) { if (!--timeout) return -ETIMEDOUT; cpu_relax(); } return 0; } /** * ssp_read_word - read a word from the SSP port * * Wait for a data word in the SSP receive FIFO, and return the * received data. Data is LSB justified. * * Note: Currently, if data is not expected to be received, this * function will wait for ever. * * The caller is expected to perform the necessary locking. * * Returns: * %-ETIMEDOUT timeout occurred * 16-bit data success */ int ssp_read_word(u16 *data) { int timeout = TIMEOUT; while (!(Ser4SSSR & SSSR_RNE)) { if (!--timeout) return -ETIMEDOUT; cpu_relax(); } *data = (u16)Ser4SSDR; return 0; } /** * ssp_flush - flush the transmit and receive FIFOs * * Wait for the SSP to idle, and ensure that the receive FIFO * is empty. * * The caller is expected to perform the necessary locking. * * Returns: * %-ETIMEDOUT timeout occurred * 0 success */ int ssp_flush(void) { int timeout = TIMEOUT * 2; do { while (Ser4SSSR & SSSR_RNE) { if (!--timeout) return -ETIMEDOUT; (void) Ser4SSDR; } if (!--timeout) return -ETIMEDOUT; } while (Ser4SSSR & SSSR_BSY); return 0; } /** * ssp_enable - enable the SSP port * * Turn on the SSP port. */ void ssp_enable(void) { Ser4SSCR0 |= SSCR0_SSE; } /** * ssp_disable - shut down the SSP port * * Turn off the SSP port, optionally powering it down. */ void ssp_disable(void) { Ser4SSCR0 &= ~SSCR0_SSE; } /** * ssp_save_state - save the SSP configuration * @ssp: pointer to structure to save SSP configuration * * Save the configured SSP state for suspend. */ void ssp_save_state(struct ssp_state *ssp) { ssp->cr0 = Ser4SSCR0; ssp->cr1 = Ser4SSCR1; Ser4SSCR0 &= ~SSCR0_SSE; } /** * ssp_restore_state - restore a previously saved SSP configuration * @ssp: pointer to configuration saved by ssp_save_state * * Restore the SSP configuration saved previously by ssp_save_state. */ void ssp_restore_state(struct ssp_state *ssp) { Ser4SSSR = SSSR_ROR; Ser4SSCR0 = ssp->cr0 & ~SSCR0_SSE; Ser4SSCR1 = ssp->cr1; Ser4SSCR0 = ssp->cr0; } /** * ssp_init - setup the SSP port * * initialise and claim resources for the SSP port. * * Returns: * %-ENODEV if the SSP port is unavailable * %-EBUSY if the resources are already in use * %0 on success */ int ssp_init(void) { int ret; if (!(PPAR & PPAR_SPR) && (Ser4MCCR0 & MCCR0_MCE)) return -ENODEV; if (!request_mem_region(__PREG(Ser4SSCR0), 0x18, "SSP")) { return -EBUSY; } Ser4SSSR = SSSR_ROR; ret = request_irq(IRQ_Ser4SSP, ssp_interrupt, 0, "SSP", NULL); if (ret) goto out_region; return 0; out_region: release_mem_region(__PREG(Ser4SSCR0), 0x18); return ret; } /** * ssp_exit - undo the effects of ssp_init * * release and free resources for the SSP port. */ void ssp_exit(void) { Ser4SSCR0 &= ~SSCR0_SSE; free_irq(IRQ_Ser4SSP, NULL); release_mem_region(__PREG(Ser4SSCR0), 0x18); } MODULE_AUTHOR("Russell King"); MODULE_DESCRIPTION("SA11x0 SSP PIO driver"); MODULE_LICENSE("GPL"); EXPORT_SYMBOL(ssp_write_word); EXPORT_SYMBOL(ssp_read_word); EXPORT_SYMBOL(ssp_flush); EXPORT_SYMBOL(ssp_enable); EXPORT_SYMBOL(ssp_disable); EXPORT_SYMBOL(ssp_save_state); EXPORT_SYMBOL(ssp_restore_state); EXPORT_SYMBOL(ssp_init); EXPORT_SYMBOL(ssp_exit);
gpl-2.0
bshiznit/android_kernel_asus_grouper
drivers/media/dvb/mantis/mantis_ioc.c
10429
3192
/* Mantis PCI bridge driver Copyright (C) Manu Abraham (abraham.manu@gmail.com) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/i2c.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <asm/io.h> #include "dmxdev.h" #include "dvbdev.h" #include "dvb_demux.h" #include "dvb_frontend.h" #include "dvb_net.h" #include "mantis_common.h" #include "mantis_reg.h" #include "mantis_ioc.h" static int read_eeprom_bytes(struct mantis_pci *mantis, u8 reg, u8 *data, u8 length) { struct i2c_adapter *adapter = &mantis->adapter; int err; u8 buf = reg; struct i2c_msg msg[] = { { .addr = 0x50, .flags = 0, .buf = &buf, .len = 1 }, { .addr = 0x50, .flags = I2C_M_RD, .buf = data, .len = length }, }; err = i2c_transfer(adapter, msg, 2); if (err < 0) { dprintk(MANTIS_ERROR, 1, "ERROR: i2c read: < err=%i d0=0x%02x d1=0x%02x >", err, data[0], data[1]); return err; } return 0; } int mantis_get_mac(struct mantis_pci *mantis) { int err; u8 mac_addr[6] = {0}; err = read_eeprom_bytes(mantis, 0x08, mac_addr, 6); if (err < 0) { dprintk(MANTIS_ERROR, 1, "ERROR: Mantis EEPROM read error <%d>", err); return err; } dprintk(MANTIS_ERROR, 0, " MAC Address=[%pM]\n", mac_addr); return 0; } EXPORT_SYMBOL_GPL(mantis_get_mac); /* Turn the given bit on or off. */ void mantis_gpio_set_bits(struct mantis_pci *mantis, u32 bitpos, u8 value) { u32 cur; dprintk(MANTIS_DEBUG, 1, "Set Bit <%d> to <%d>", bitpos, value); cur = mmread(MANTIS_GPIF_ADDR); if (value) mantis->gpio_status = cur | (1 << bitpos); else mantis->gpio_status = cur & (~(1 << bitpos)); dprintk(MANTIS_DEBUG, 1, "GPIO Value <%02x>", mantis->gpio_status); mmwrite(mantis->gpio_status, MANTIS_GPIF_ADDR); mmwrite(0x00, MANTIS_GPIF_DOUT); } EXPORT_SYMBOL_GPL(mantis_gpio_set_bits); int mantis_stream_control(struct mantis_pci *mantis, enum mantis_stream_control stream_ctl) { u32 reg; reg = mmread(MANTIS_CONTROL); switch (stream_ctl) { case STREAM_TO_HIF: dprintk(MANTIS_DEBUG, 1, "Set stream to HIF"); reg &= 0xff - MANTIS_BYPASS; mmwrite(reg, MANTIS_CONTROL); reg |= MANTIS_BYPASS; mmwrite(reg, MANTIS_CONTROL); break; case STREAM_TO_CAM: dprintk(MANTIS_DEBUG, 1, "Set stream to CAM"); reg |= MANTIS_BYPASS; mmwrite(reg, MANTIS_CONTROL); reg &= 0xff - MANTIS_BYPASS; mmwrite(reg, MANTIS_CONTROL); break; default: dprintk(MANTIS_ERROR, 1, "Unknown MODE <%02x>", stream_ctl); return -1; } return 0; } EXPORT_SYMBOL_GPL(mantis_stream_control);
gpl-2.0
silence-star/android_kernel_nubia_NX503A
fs/ocfs2/uptodate.c
10941
18082
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * uptodate.c * * Tracking the up-to-date-ness of a local buffer_head with respect to * the cluster. * * Copyright (C) 2002, 2004, 2005 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. * * Standard buffer head caching flags (uptodate, etc) are insufficient * in a clustered environment - a buffer may be marked up to date on * our local node but could have been modified by another cluster * member. As a result an additional (and performant) caching scheme * is required. A further requirement is that we consume as little * memory as possible - we never pin buffer_head structures in order * to cache them. * * We track the existence of up to date buffers on the inodes which * are associated with them. Because we don't want to pin * buffer_heads, this is only a (strong) hint and several other checks * are made in the I/O path to ensure that we don't use a stale or * invalid buffer without going to disk: * - buffer_jbd is used liberally - if a bh is in the journal on * this node then it *must* be up to date. * - the standard buffer_uptodate() macro is used to detect buffers * which may be invalid (even if we have an up to date tracking * item for them) * * For a full understanding of how this code works together, one * should read the callers in dlmglue.c, the I/O functions in * buffer_head_io.c and ocfs2_journal_access in journal.c */ #include <linux/fs.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/highmem.h> #include <linux/buffer_head.h> #include <linux/rbtree.h> #include <cluster/masklog.h> #include "ocfs2.h" #include "inode.h" #include "uptodate.h" #include "ocfs2_trace.h" struct ocfs2_meta_cache_item { struct rb_node c_node; sector_t c_block; }; static struct kmem_cache *ocfs2_uptodate_cachep = NULL; u64 ocfs2_metadata_cache_owner(struct ocfs2_caching_info *ci) { BUG_ON(!ci || !ci->ci_ops); return ci->ci_ops->co_owner(ci); } struct super_block *ocfs2_metadata_cache_get_super(struct ocfs2_caching_info *ci) { BUG_ON(!ci || !ci->ci_ops); return ci->ci_ops->co_get_super(ci); } static void ocfs2_metadata_cache_lock(struct ocfs2_caching_info *ci) { BUG_ON(!ci || !ci->ci_ops); ci->ci_ops->co_cache_lock(ci); } static void ocfs2_metadata_cache_unlock(struct ocfs2_caching_info *ci) { BUG_ON(!ci || !ci->ci_ops); ci->ci_ops->co_cache_unlock(ci); } void ocfs2_metadata_cache_io_lock(struct ocfs2_caching_info *ci) { BUG_ON(!ci || !ci->ci_ops); ci->ci_ops->co_io_lock(ci); } void ocfs2_metadata_cache_io_unlock(struct ocfs2_caching_info *ci) { BUG_ON(!ci || !ci->ci_ops); ci->ci_ops->co_io_unlock(ci); } static void ocfs2_metadata_cache_reset(struct ocfs2_caching_info *ci, int clear) { ci->ci_flags |= OCFS2_CACHE_FL_INLINE; ci->ci_num_cached = 0; if (clear) { ci->ci_created_trans = 0; ci->ci_last_trans = 0; } } void ocfs2_metadata_cache_init(struct ocfs2_caching_info *ci, const struct ocfs2_caching_operations *ops) { BUG_ON(!ops); ci->ci_ops = ops; ocfs2_metadata_cache_reset(ci, 1); } void ocfs2_metadata_cache_exit(struct ocfs2_caching_info *ci) { ocfs2_metadata_cache_purge(ci); ocfs2_metadata_cache_reset(ci, 1); } /* No lock taken here as 'root' is not expected to be visible to other * processes. */ static unsigned int ocfs2_purge_copied_metadata_tree(struct rb_root *root) { unsigned int purged = 0; struct rb_node *node; struct ocfs2_meta_cache_item *item; while ((node = rb_last(root)) != NULL) { item = rb_entry(node, struct ocfs2_meta_cache_item, c_node); trace_ocfs2_purge_copied_metadata_tree( (unsigned long long) item->c_block); rb_erase(&item->c_node, root); kmem_cache_free(ocfs2_uptodate_cachep, item); purged++; } return purged; } /* Called from locking and called from ocfs2_clear_inode. Dump the * cache for a given inode. * * This function is a few more lines longer than necessary due to some * accounting done here, but I think it's worth tracking down those * bugs sooner -- Mark */ void ocfs2_metadata_cache_purge(struct ocfs2_caching_info *ci) { unsigned int tree, to_purge, purged; struct rb_root root = RB_ROOT; BUG_ON(!ci || !ci->ci_ops); ocfs2_metadata_cache_lock(ci); tree = !(ci->ci_flags & OCFS2_CACHE_FL_INLINE); to_purge = ci->ci_num_cached; trace_ocfs2_metadata_cache_purge( (unsigned long long)ocfs2_metadata_cache_owner(ci), to_purge, tree); /* If we're a tree, save off the root so that we can safely * initialize the cache. We do the work to free tree members * without the spinlock. */ if (tree) root = ci->ci_cache.ci_tree; ocfs2_metadata_cache_reset(ci, 0); ocfs2_metadata_cache_unlock(ci); purged = ocfs2_purge_copied_metadata_tree(&root); /* If possible, track the number wiped so that we can more * easily detect counting errors. Unfortunately, this is only * meaningful for trees. */ if (tree && purged != to_purge) mlog(ML_ERROR, "Owner %llu, count = %u, purged = %u\n", (unsigned long long)ocfs2_metadata_cache_owner(ci), to_purge, purged); } /* Returns the index in the cache array, -1 if not found. * Requires ip_lock. */ static int ocfs2_search_cache_array(struct ocfs2_caching_info *ci, sector_t item) { int i; for (i = 0; i < ci->ci_num_cached; i++) { if (item == ci->ci_cache.ci_array[i]) return i; } return -1; } /* Returns the cache item if found, otherwise NULL. * Requires ip_lock. */ static struct ocfs2_meta_cache_item * ocfs2_search_cache_tree(struct ocfs2_caching_info *ci, sector_t block) { struct rb_node * n = ci->ci_cache.ci_tree.rb_node; struct ocfs2_meta_cache_item *item = NULL; while (n) { item = rb_entry(n, struct ocfs2_meta_cache_item, c_node); if (block < item->c_block) n = n->rb_left; else if (block > item->c_block) n = n->rb_right; else return item; } return NULL; } static int ocfs2_buffer_cached(struct ocfs2_caching_info *ci, struct buffer_head *bh) { int index = -1; struct ocfs2_meta_cache_item *item = NULL; ocfs2_metadata_cache_lock(ci); trace_ocfs2_buffer_cached_begin( (unsigned long long)ocfs2_metadata_cache_owner(ci), (unsigned long long) bh->b_blocknr, !!(ci->ci_flags & OCFS2_CACHE_FL_INLINE)); if (ci->ci_flags & OCFS2_CACHE_FL_INLINE) index = ocfs2_search_cache_array(ci, bh->b_blocknr); else item = ocfs2_search_cache_tree(ci, bh->b_blocknr); ocfs2_metadata_cache_unlock(ci); trace_ocfs2_buffer_cached_end(index, item); return (index != -1) || (item != NULL); } /* Warning: even if it returns true, this does *not* guarantee that * the block is stored in our inode metadata cache. * * This can be called under lock_buffer() */ int ocfs2_buffer_uptodate(struct ocfs2_caching_info *ci, struct buffer_head *bh) { /* Doesn't matter if the bh is in our cache or not -- if it's * not marked uptodate then we know it can't have correct * data. */ if (!buffer_uptodate(bh)) return 0; /* OCFS2 does not allow multiple nodes to be changing the same * block at the same time. */ if (buffer_jbd(bh)) return 1; /* Ok, locally the buffer is marked as up to date, now search * our cache to see if we can trust that. */ return ocfs2_buffer_cached(ci, bh); } /* * Determine whether a buffer is currently out on a read-ahead request. * ci_io_sem should be held to serialize submitters with the logic here. */ int ocfs2_buffer_read_ahead(struct ocfs2_caching_info *ci, struct buffer_head *bh) { return buffer_locked(bh) && ocfs2_buffer_cached(ci, bh); } /* Requires ip_lock */ static void ocfs2_append_cache_array(struct ocfs2_caching_info *ci, sector_t block) { BUG_ON(ci->ci_num_cached >= OCFS2_CACHE_INFO_MAX_ARRAY); trace_ocfs2_append_cache_array( (unsigned long long)ocfs2_metadata_cache_owner(ci), (unsigned long long)block, ci->ci_num_cached); ci->ci_cache.ci_array[ci->ci_num_cached] = block; ci->ci_num_cached++; } /* By now the caller should have checked that the item does *not* * exist in the tree. * Requires ip_lock. */ static void __ocfs2_insert_cache_tree(struct ocfs2_caching_info *ci, struct ocfs2_meta_cache_item *new) { sector_t block = new->c_block; struct rb_node *parent = NULL; struct rb_node **p = &ci->ci_cache.ci_tree.rb_node; struct ocfs2_meta_cache_item *tmp; trace_ocfs2_insert_cache_tree( (unsigned long long)ocfs2_metadata_cache_owner(ci), (unsigned long long)block, ci->ci_num_cached); while(*p) { parent = *p; tmp = rb_entry(parent, struct ocfs2_meta_cache_item, c_node); if (block < tmp->c_block) p = &(*p)->rb_left; else if (block > tmp->c_block) p = &(*p)->rb_right; else { /* This should never happen! */ mlog(ML_ERROR, "Duplicate block %llu cached!\n", (unsigned long long) block); BUG(); } } rb_link_node(&new->c_node, parent, p); rb_insert_color(&new->c_node, &ci->ci_cache.ci_tree); ci->ci_num_cached++; } /* co_cache_lock() must be held */ static inline int ocfs2_insert_can_use_array(struct ocfs2_caching_info *ci) { return (ci->ci_flags & OCFS2_CACHE_FL_INLINE) && (ci->ci_num_cached < OCFS2_CACHE_INFO_MAX_ARRAY); } /* tree should be exactly OCFS2_CACHE_INFO_MAX_ARRAY wide. NULL the * pointers in tree after we use them - this allows caller to detect * when to free in case of error. * * The co_cache_lock() must be held. */ static void ocfs2_expand_cache(struct ocfs2_caching_info *ci, struct ocfs2_meta_cache_item **tree) { int i; mlog_bug_on_msg(ci->ci_num_cached != OCFS2_CACHE_INFO_MAX_ARRAY, "Owner %llu, num cached = %u, should be %u\n", (unsigned long long)ocfs2_metadata_cache_owner(ci), ci->ci_num_cached, OCFS2_CACHE_INFO_MAX_ARRAY); mlog_bug_on_msg(!(ci->ci_flags & OCFS2_CACHE_FL_INLINE), "Owner %llu not marked as inline anymore!\n", (unsigned long long)ocfs2_metadata_cache_owner(ci)); /* Be careful to initialize the tree members *first* because * once the ci_tree is used, the array is junk... */ for (i = 0; i < OCFS2_CACHE_INFO_MAX_ARRAY; i++) tree[i]->c_block = ci->ci_cache.ci_array[i]; ci->ci_flags &= ~OCFS2_CACHE_FL_INLINE; ci->ci_cache.ci_tree = RB_ROOT; /* this will be set again by __ocfs2_insert_cache_tree */ ci->ci_num_cached = 0; for (i = 0; i < OCFS2_CACHE_INFO_MAX_ARRAY; i++) { __ocfs2_insert_cache_tree(ci, tree[i]); tree[i] = NULL; } trace_ocfs2_expand_cache( (unsigned long long)ocfs2_metadata_cache_owner(ci), ci->ci_flags, ci->ci_num_cached); } /* Slow path function - memory allocation is necessary. See the * comment above ocfs2_set_buffer_uptodate for more information. */ static void __ocfs2_set_buffer_uptodate(struct ocfs2_caching_info *ci, sector_t block, int expand_tree) { int i; struct ocfs2_meta_cache_item *new = NULL; struct ocfs2_meta_cache_item *tree[OCFS2_CACHE_INFO_MAX_ARRAY] = { NULL, }; trace_ocfs2_set_buffer_uptodate( (unsigned long long)ocfs2_metadata_cache_owner(ci), (unsigned long long)block, expand_tree); new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_NOFS); if (!new) { mlog_errno(-ENOMEM); return; } new->c_block = block; if (expand_tree) { /* Do *not* allocate an array here - the removal code * has no way of tracking that. */ for (i = 0; i < OCFS2_CACHE_INFO_MAX_ARRAY; i++) { tree[i] = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_NOFS); if (!tree[i]) { mlog_errno(-ENOMEM); goto out_free; } /* These are initialized in ocfs2_expand_cache! */ } } ocfs2_metadata_cache_lock(ci); if (ocfs2_insert_can_use_array(ci)) { /* Ok, items were removed from the cache in between * locks. Detect this and revert back to the fast path */ ocfs2_append_cache_array(ci, block); ocfs2_metadata_cache_unlock(ci); goto out_free; } if (expand_tree) ocfs2_expand_cache(ci, tree); __ocfs2_insert_cache_tree(ci, new); ocfs2_metadata_cache_unlock(ci); new = NULL; out_free: if (new) kmem_cache_free(ocfs2_uptodate_cachep, new); /* If these were used, then ocfs2_expand_cache re-set them to * NULL for us. */ if (tree[0]) { for (i = 0; i < OCFS2_CACHE_INFO_MAX_ARRAY; i++) if (tree[i]) kmem_cache_free(ocfs2_uptodate_cachep, tree[i]); } } /* Item insertion is guarded by co_io_lock(), so the insertion path takes * advantage of this by not rechecking for a duplicate insert during * the slow case. Additionally, if the cache needs to be bumped up to * a tree, the code will not recheck after acquiring the lock -- * multiple paths cannot be expanding to a tree at the same time. * * The slow path takes into account that items can be removed * (including the whole tree wiped and reset) when this process it out * allocating memory. In those cases, it reverts back to the fast * path. * * Note that this function may actually fail to insert the block if * memory cannot be allocated. This is not fatal however (but may * result in a performance penalty) * * Readahead buffers can be passed in here before the I/O request is * completed. */ void ocfs2_set_buffer_uptodate(struct ocfs2_caching_info *ci, struct buffer_head *bh) { int expand; /* The block may very well exist in our cache already, so avoid * doing any more work in that case. */ if (ocfs2_buffer_cached(ci, bh)) return; trace_ocfs2_set_buffer_uptodate_begin( (unsigned long long)ocfs2_metadata_cache_owner(ci), (unsigned long long)bh->b_blocknr); /* No need to recheck under spinlock - insertion is guarded by * co_io_lock() */ ocfs2_metadata_cache_lock(ci); if (ocfs2_insert_can_use_array(ci)) { /* Fast case - it's an array and there's a free * spot. */ ocfs2_append_cache_array(ci, bh->b_blocknr); ocfs2_metadata_cache_unlock(ci); return; } expand = 0; if (ci->ci_flags & OCFS2_CACHE_FL_INLINE) { /* We need to bump things up to a tree. */ expand = 1; } ocfs2_metadata_cache_unlock(ci); __ocfs2_set_buffer_uptodate(ci, bh->b_blocknr, expand); } /* Called against a newly allocated buffer. Most likely nobody should * be able to read this sort of metadata while it's still being * allocated, but this is careful to take co_io_lock() anyway. */ void ocfs2_set_new_buffer_uptodate(struct ocfs2_caching_info *ci, struct buffer_head *bh) { /* This should definitely *not* exist in our cache */ BUG_ON(ocfs2_buffer_cached(ci, bh)); set_buffer_uptodate(bh); ocfs2_metadata_cache_io_lock(ci); ocfs2_set_buffer_uptodate(ci, bh); ocfs2_metadata_cache_io_unlock(ci); } /* Requires ip_lock. */ static void ocfs2_remove_metadata_array(struct ocfs2_caching_info *ci, int index) { sector_t *array = ci->ci_cache.ci_array; int bytes; BUG_ON(index < 0 || index >= OCFS2_CACHE_INFO_MAX_ARRAY); BUG_ON(index >= ci->ci_num_cached); BUG_ON(!ci->ci_num_cached); trace_ocfs2_remove_metadata_array( (unsigned long long)ocfs2_metadata_cache_owner(ci), index, ci->ci_num_cached); ci->ci_num_cached--; /* don't need to copy if the array is now empty, or if we * removed at the tail */ if (ci->ci_num_cached && index < ci->ci_num_cached) { bytes = sizeof(sector_t) * (ci->ci_num_cached - index); memmove(&array[index], &array[index + 1], bytes); } } /* Requires ip_lock. */ static void ocfs2_remove_metadata_tree(struct ocfs2_caching_info *ci, struct ocfs2_meta_cache_item *item) { trace_ocfs2_remove_metadata_tree( (unsigned long long)ocfs2_metadata_cache_owner(ci), (unsigned long long)item->c_block); rb_erase(&item->c_node, &ci->ci_cache.ci_tree); ci->ci_num_cached--; } static void ocfs2_remove_block_from_cache(struct ocfs2_caching_info *ci, sector_t block) { int index; struct ocfs2_meta_cache_item *item = NULL; ocfs2_metadata_cache_lock(ci); trace_ocfs2_remove_block_from_cache( (unsigned long long)ocfs2_metadata_cache_owner(ci), (unsigned long long) block, ci->ci_num_cached, ci->ci_flags); if (ci->ci_flags & OCFS2_CACHE_FL_INLINE) { index = ocfs2_search_cache_array(ci, block); if (index != -1) ocfs2_remove_metadata_array(ci, index); } else { item = ocfs2_search_cache_tree(ci, block); if (item) ocfs2_remove_metadata_tree(ci, item); } ocfs2_metadata_cache_unlock(ci); if (item) kmem_cache_free(ocfs2_uptodate_cachep, item); } /* * Called when we remove a chunk of metadata from an inode. We don't * bother reverting things to an inlined array in the case of a remove * which moves us back under the limit. */ void ocfs2_remove_from_cache(struct ocfs2_caching_info *ci, struct buffer_head *bh) { sector_t block = bh->b_blocknr; ocfs2_remove_block_from_cache(ci, block); } /* Called when we remove xattr clusters from an inode. */ void ocfs2_remove_xattr_clusters_from_cache(struct ocfs2_caching_info *ci, sector_t block, u32 c_len) { struct super_block *sb = ocfs2_metadata_cache_get_super(ci); unsigned int i, b_len = ocfs2_clusters_to_blocks(sb, 1) * c_len; for (i = 0; i < b_len; i++, block++) ocfs2_remove_block_from_cache(ci, block); } int __init init_ocfs2_uptodate_cache(void) { ocfs2_uptodate_cachep = kmem_cache_create("ocfs2_uptodate", sizeof(struct ocfs2_meta_cache_item), 0, SLAB_HWCACHE_ALIGN, NULL); if (!ocfs2_uptodate_cachep) return -ENOMEM; return 0; } void exit_ocfs2_uptodate_cache(void) { if (ocfs2_uptodate_cachep) kmem_cache_destroy(ocfs2_uptodate_cachep); }
gpl-2.0
ryncsn/jordan-kernel
arch/mips/dec/prom/identify.c
13757
4690
/* * identify.c: machine identification code. * * Copyright (C) 1998 Harald Koerfgen and Paul M. Antoine * Copyright (C) 2002, 2003, 2004, 2005 Maciej W. Rozycki */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/mc146818rtc.h> #include <linux/module.h> #include <linux/string.h> #include <linux/types.h> #include <asm/bootinfo.h> #include <asm/dec/ioasic.h> #include <asm/dec/ioasic_addrs.h> #include <asm/dec/kn01.h> #include <asm/dec/kn02.h> #include <asm/dec/kn02ba.h> #include <asm/dec/kn02ca.h> #include <asm/dec/kn03.h> #include <asm/dec/kn230.h> #include <asm/dec/prom.h> #include <asm/dec/system.h> #include "dectypes.h" static const char *dec_system_strings[] = { [MACH_DSUNKNOWN] "unknown DECstation", [MACH_DS23100] "DECstation 2100/3100", [MACH_DS5100] "DECsystem 5100", [MACH_DS5000_200] "DECstation 5000/200", [MACH_DS5000_1XX] "DECstation 5000/1xx", [MACH_DS5000_XX] "Personal DECstation 5000/xx", [MACH_DS5000_2X0] "DECstation 5000/2x0", [MACH_DS5400] "DECsystem 5400", [MACH_DS5500] "DECsystem 5500", [MACH_DS5800] "DECsystem 5800", [MACH_DS5900] "DECsystem 5900", }; const char *get_system_type(void) { #define STR_BUF_LEN 64 static char system[STR_BUF_LEN]; static int called = 0; if (called == 0) { called = 1; snprintf(system, STR_BUF_LEN, "Digital %s", dec_system_strings[mips_machtype]); } return system; } /* * Setup essential system-specific memory addresses. We need them * early. Semantically the functions belong to prom/init.c, but they * are compact enough we want them inlined. --macro */ volatile u8 *dec_rtc_base; EXPORT_SYMBOL(dec_rtc_base); static inline void prom_init_kn01(void) { dec_kn_slot_base = KN01_SLOT_BASE; dec_kn_slot_size = KN01_SLOT_SIZE; dec_rtc_base = (void *)CKSEG1ADDR(dec_kn_slot_base + KN01_RTC); } static inline void prom_init_kn230(void) { dec_kn_slot_base = KN01_SLOT_BASE; dec_kn_slot_size = KN01_SLOT_SIZE; dec_rtc_base = (void *)CKSEG1ADDR(dec_kn_slot_base + KN01_RTC); } static inline void prom_init_kn02(void) { dec_kn_slot_base = KN02_SLOT_BASE; dec_kn_slot_size = KN02_SLOT_SIZE; dec_tc_bus = 1; dec_rtc_base = (void *)CKSEG1ADDR(dec_kn_slot_base + KN02_RTC); } static inline void prom_init_kn02xa(void) { dec_kn_slot_base = KN02XA_SLOT_BASE; dec_kn_slot_size = IOASIC_SLOT_SIZE; dec_tc_bus = 1; ioasic_base = (void *)CKSEG1ADDR(dec_kn_slot_base + IOASIC_IOCTL); dec_rtc_base = (void *)CKSEG1ADDR(dec_kn_slot_base + IOASIC_TOY); } static inline void prom_init_kn03(void) { dec_kn_slot_base = KN03_SLOT_BASE; dec_kn_slot_size = IOASIC_SLOT_SIZE; dec_tc_bus = 1; ioasic_base = (void *)CKSEG1ADDR(dec_kn_slot_base + IOASIC_IOCTL); dec_rtc_base = (void *)CKSEG1ADDR(dec_kn_slot_base + IOASIC_TOY); } void __init prom_identify_arch(u32 magic) { unsigned char dec_cpunum, dec_firmrev, dec_etc, dec_systype; u32 dec_sysid; if (!prom_is_rex(magic)) { dec_sysid = simple_strtoul(prom_getenv("systype"), (char **)0, 0); } else { dec_sysid = rex_getsysid(); if (dec_sysid == 0) { printk("Zero sysid returned from PROM! " "Assuming a PMAX-like machine.\n"); dec_sysid = 1; } } dec_cpunum = (dec_sysid & 0xff000000) >> 24; dec_systype = (dec_sysid & 0xff0000) >> 16; dec_firmrev = (dec_sysid & 0xff00) >> 8; dec_etc = dec_sysid & 0xff; /* * FIXME: This may not be an exhaustive list of DECStations/Servers! * Put all model-specific initialisation calls here. */ switch (dec_systype) { case DS2100_3100: mips_machtype = MACH_DS23100; prom_init_kn01(); break; case DS5100: /* DS5100 MIPSMATE */ mips_machtype = MACH_DS5100; prom_init_kn230(); break; case DS5000_200: /* DS5000 3max */ mips_machtype = MACH_DS5000_200; prom_init_kn02(); break; case DS5000_1XX: /* DS5000/100 3min */ mips_machtype = MACH_DS5000_1XX; prom_init_kn02xa(); break; case DS5000_2X0: /* DS5000/240 3max+ or DS5900 bigmax */ mips_machtype = MACH_DS5000_2X0; prom_init_kn03(); if (!(ioasic_read(IO_REG_SIR) & KN03_IO_INR_3MAXP)) mips_machtype = MACH_DS5900; break; case DS5000_XX: /* Personal DS5000/xx maxine */ mips_machtype = MACH_DS5000_XX; prom_init_kn02xa(); break; case DS5800: /* DS5800 Isis */ mips_machtype = MACH_DS5800; break; case DS5400: /* DS5400 MIPSfair */ mips_machtype = MACH_DS5400; break; case DS5500: /* DS5500 MIPSfair-2 */ mips_machtype = MACH_DS5500; break; default: mips_machtype = MACH_DSUNKNOWN; break; } if (mips_machtype == MACH_DSUNKNOWN) printk("This is an %s, id is %x\n", dec_system_strings[mips_machtype], dec_systype); else printk("This is a %s\n", dec_system_strings[mips_machtype]); }
gpl-2.0
Outernet-Project/rpi-linux
arch/alpha/boot/tools/mkbb.c
13757
3562
/* This utility makes a bootblock suitable for the SRM console/miniloader */ /* Usage: * mkbb <device> <lxboot> * * Where <device> is the name of the device to install the bootblock on, * and <lxboot> is the name of a bootblock to merge in. This bootblock * contains the offset and size of the bootloader. It must be exactly * 512 bytes long. */ #include <fcntl.h> #include <unistd.h> #include <stdlib.h> #include <stdio.h> /* Minimal definition of disklabel, so we don't have to include * asm/disklabel.h (confuses make) */ #ifndef MAXPARTITIONS #define MAXPARTITIONS 8 /* max. # of partitions */ #endif #ifndef u8 #define u8 unsigned char #endif #ifndef u16 #define u16 unsigned short #endif #ifndef u32 #define u32 unsigned int #endif struct disklabel { u32 d_magic; /* must be DISKLABELMAGIC */ u16 d_type, d_subtype; u8 d_typename[16]; u8 d_packname[16]; u32 d_secsize; u32 d_nsectors; u32 d_ntracks; u32 d_ncylinders; u32 d_secpercyl; u32 d_secprtunit; u16 d_sparespertrack; u16 d_sparespercyl; u32 d_acylinders; u16 d_rpm, d_interleave, d_trackskew, d_cylskew; u32 d_headswitch, d_trkseek, d_flags; u32 d_drivedata[5]; u32 d_spare[5]; u32 d_magic2; /* must be DISKLABELMAGIC */ u16 d_checksum; u16 d_npartitions; u32 d_bbsize, d_sbsize; struct d_partition { u32 p_size; u32 p_offset; u32 p_fsize; u8 p_fstype; u8 p_frag; u16 p_cpg; } d_partitions[MAXPARTITIONS]; }; typedef union __bootblock { struct { char __pad1[64]; struct disklabel __label; } __u1; struct { unsigned long __pad2[63]; unsigned long __checksum; } __u2; char bootblock_bytes[512]; unsigned long bootblock_quadwords[64]; } bootblock; #define bootblock_label __u1.__label #define bootblock_checksum __u2.__checksum int main(int argc, char ** argv) { bootblock bootblock_from_disk; bootblock bootloader_image; int dev, fd; int i; int nread; /* Make sure of the arg count */ if(argc != 3) { fprintf(stderr, "Usage: %s device lxboot\n", argv[0]); exit(0); } /* First, open the device and make sure it's accessible */ dev = open(argv[1], O_RDWR); if(dev < 0) { perror(argv[1]); exit(0); } /* Now open the lxboot and make sure it's reasonable */ fd = open(argv[2], O_RDONLY); if(fd < 0) { perror(argv[2]); close(dev); exit(0); } /* Read in the lxboot */ nread = read(fd, &bootloader_image, sizeof(bootblock)); if(nread != sizeof(bootblock)) { perror("lxboot read"); fprintf(stderr, "expected %zd, got %d\n", sizeof(bootblock), nread); exit(0); } /* Read in the bootblock from disk. */ nread = read(dev, &bootblock_from_disk, sizeof(bootblock)); if(nread != sizeof(bootblock)) { perror("bootblock read"); fprintf(stderr, "expected %zd, got %d\n", sizeof(bootblock), nread); exit(0); } /* Swap the bootblock's disklabel into the bootloader */ bootloader_image.bootblock_label = bootblock_from_disk.bootblock_label; /* Calculate the bootblock checksum */ bootloader_image.bootblock_checksum = 0; for(i = 0; i < 63; i++) { bootloader_image.bootblock_checksum += bootloader_image.bootblock_quadwords[i]; } /* Write the whole thing out! */ lseek(dev, 0L, SEEK_SET); if(write(dev, &bootloader_image, sizeof(bootblock)) != sizeof(bootblock)) { perror("bootblock write"); exit(0); } close(fd); close(dev); exit(0); }
gpl-2.0
etnie/Huawei-Ascend-m860-kernel-source
arch/x86/kernel/cpu/cpufreq/speedstep-smi.c
190
11471
/* * Intel SpeedStep SMI driver. * * (C) 2003 Hiroshi Miura <miura@da-cha.org> * * Licensed under the terms of the GNU GPL License version 2. * */ /********************************************************************* * SPEEDSTEP - DEFINITIONS * *********************************************************************/ #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/cpufreq.h> #include <linux/slab.h> #include <linux/delay.h> #include <asm/ist.h> #include <asm/io.h> #include "speedstep-lib.h" /* speedstep system management interface port/command. * * These parameters are got from IST-SMI BIOS call. * If user gives it, these are used. * */ static int smi_port = 0; static int smi_cmd = 0; static unsigned int smi_sig = 0; /* info about the processor */ static unsigned int speedstep_processor = 0; /* * There are only two frequency states for each processor. Values * are in kHz for the time being. */ static struct cpufreq_frequency_table speedstep_freqs[] = { {SPEEDSTEP_HIGH, 0}, {SPEEDSTEP_LOW, 0}, {0, CPUFREQ_TABLE_END}, }; #define GET_SPEEDSTEP_OWNER 0 #define GET_SPEEDSTEP_STATE 1 #define SET_SPEEDSTEP_STATE 2 #define GET_SPEEDSTEP_FREQS 4 /* how often shall the SMI call be tried if it failed, e.g. because * of DMA activity going on? */ #define SMI_TRIES 5 #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-smi", msg) /** * speedstep_smi_ownership */ static int speedstep_smi_ownership (void) { u32 command, result, magic, dummy; u32 function = GET_SPEEDSTEP_OWNER; unsigned char magic_data[] = "Copyright (c) 1999 Intel Corporation"; command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); magic = virt_to_phys(magic_data); dprintk("trying to obtain ownership with command %x at port %x\n", command, smi_port); __asm__ __volatile__( "push %%ebp\n" "out %%al, (%%dx)\n" "pop %%ebp\n" : "=D" (result), "=a" (dummy), "=b" (dummy), "=c" (dummy), "=d" (dummy), "=S" (dummy) : "a" (command), "b" (function), "c" (0), "d" (smi_port), "D" (0), "S" (magic) : "memory" ); dprintk("result is %x\n", result); return result; } /** * speedstep_smi_get_freqs - get SpeedStep preferred & current freq. * @low: the low frequency value is placed here * @high: the high frequency value is placed here * * Only available on later SpeedStep-enabled systems, returns false results or * even hangs [cf. bugme.osdl.org # 1422] on earlier systems. Empirical testing * shows that the latter occurs if !(ist_info.event & 0xFFFF). */ static int speedstep_smi_get_freqs (unsigned int *low, unsigned int *high) { u32 command, result = 0, edi, high_mhz, low_mhz, dummy; u32 state=0; u32 function = GET_SPEEDSTEP_FREQS; if (!(ist_info.event & 0xFFFF)) { dprintk("bug #1422 -- can't read freqs from BIOS\n"); return -ENODEV; } command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); dprintk("trying to determine frequencies with command %x at port %x\n", command, smi_port); __asm__ __volatile__( "push %%ebp\n" "out %%al, (%%dx)\n" "pop %%ebp" : "=a" (result), "=b" (high_mhz), "=c" (low_mhz), "=d" (state), "=D" (edi), "=S" (dummy) : "a" (command), "b" (function), "c" (state), "d" (smi_port), "S" (0), "D" (0) ); dprintk("result %x, low_freq %u, high_freq %u\n", result, low_mhz, high_mhz); /* abort if results are obviously incorrect... */ if ((high_mhz + low_mhz) < 600) return -EINVAL; *high = high_mhz * 1000; *low = low_mhz * 1000; return result; } /** * speedstep_get_state - set the SpeedStep state * @state: processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH) * */ static int speedstep_get_state (void) { u32 function=GET_SPEEDSTEP_STATE; u32 result, state, edi, command, dummy; command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); dprintk("trying to determine current setting with command %x at port %x\n", command, smi_port); __asm__ __volatile__( "push %%ebp\n" "out %%al, (%%dx)\n" "pop %%ebp\n" : "=a" (result), "=b" (state), "=D" (edi), "=c" (dummy), "=d" (dummy), "=S" (dummy) : "a" (command), "b" (function), "c" (0), "d" (smi_port), "S" (0), "D" (0) ); dprintk("state is %x, result is %x\n", state, result); return (state & 1); } /** * speedstep_set_state - set the SpeedStep state * @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH) * */ static void speedstep_set_state (unsigned int state) { unsigned int result = 0, command, new_state, dummy; unsigned long flags; unsigned int function=SET_SPEEDSTEP_STATE; unsigned int retry = 0; if (state > 0x1) return; /* Disable IRQs */ local_irq_save(flags); command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); dprintk("trying to set frequency to state %u with command %x at port %x\n", state, command, smi_port); do { if (retry) { dprintk("retry %u, previous result %u, waiting...\n", retry, result); mdelay(retry * 50); } retry++; __asm__ __volatile__( "push %%ebp\n" "out %%al, (%%dx)\n" "pop %%ebp" : "=b" (new_state), "=D" (result), "=c" (dummy), "=a" (dummy), "=d" (dummy), "=S" (dummy) : "a" (command), "b" (function), "c" (state), "d" (smi_port), "S" (0), "D" (0) ); } while ((new_state != state) && (retry <= SMI_TRIES)); /* enable IRQs */ local_irq_restore(flags); if (new_state == state) { dprintk("change to %u MHz succeeded after %u tries with result %u\n", (speedstep_freqs[new_state].frequency / 1000), retry, result); } else { printk(KERN_ERR "cpufreq: change to state %u failed with new_state %u and result %u\n", state, new_state, result); } return; } /** * speedstep_target - set a new CPUFreq policy * @policy: new policy * @target_freq: new freq * @relation: * * Sets a new CPUFreq policy/freq. */ static int speedstep_target (struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { unsigned int newstate = 0; struct cpufreq_freqs freqs; if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate)) return -EINVAL; freqs.old = speedstep_freqs[speedstep_get_state()].frequency; freqs.new = speedstep_freqs[newstate].frequency; freqs.cpu = 0; /* speedstep.c is UP only driver */ if (freqs.old == freqs.new) return 0; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); speedstep_set_state(newstate); cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); return 0; } /** * speedstep_verify - verifies a new CPUFreq policy * @policy: new policy * * Limit must be within speedstep_low_freq and speedstep_high_freq, with * at least one border included. */ static int speedstep_verify (struct cpufreq_policy *policy) { return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]); } static int speedstep_cpu_init(struct cpufreq_policy *policy) { int result; unsigned int speed,state; /* capability check */ if (policy->cpu != 0) return -ENODEV; result = speedstep_smi_ownership(); if (result) { dprintk("fails in aquiring ownership of a SMI interface.\n"); return -EINVAL; } /* detect low and high frequency */ result = speedstep_smi_get_freqs(&speedstep_freqs[SPEEDSTEP_LOW].frequency, &speedstep_freqs[SPEEDSTEP_HIGH].frequency); if (result) { /* fall back to speedstep_lib.c dection mechanism: try both states out */ dprintk("could not detect low and high frequencies by SMI call.\n"); result = speedstep_get_freqs(speedstep_processor, &speedstep_freqs[SPEEDSTEP_LOW].frequency, &speedstep_freqs[SPEEDSTEP_HIGH].frequency, NULL, &speedstep_set_state); if (result) { dprintk("could not detect two different speeds -- aborting.\n"); return result; } else dprintk("workaround worked.\n"); } /* get current speed setting */ state = speedstep_get_state(); speed = speedstep_freqs[state].frequency; dprintk("currently at %s speed setting - %i MHz\n", (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) ? "low" : "high", (speed / 1000)); /* cpuinfo and default policy values */ policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; policy->cur = speed; result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs); if (result) return (result); cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu); return 0; } static int speedstep_cpu_exit(struct cpufreq_policy *policy) { cpufreq_frequency_table_put_attr(policy->cpu); return 0; } static unsigned int speedstep_get(unsigned int cpu) { if (cpu) return -ENODEV; return speedstep_get_processor_frequency(speedstep_processor); } static int speedstep_resume(struct cpufreq_policy *policy) { int result = speedstep_smi_ownership(); if (result) dprintk("fails in re-aquiring ownership of a SMI interface.\n"); return result; } static struct freq_attr* speedstep_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; static struct cpufreq_driver speedstep_driver = { .name = "speedstep-smi", .verify = speedstep_verify, .target = speedstep_target, .init = speedstep_cpu_init, .exit = speedstep_cpu_exit, .get = speedstep_get, .resume = speedstep_resume, .owner = THIS_MODULE, .attr = speedstep_attr, }; /** * speedstep_init - initializes the SpeedStep CPUFreq driver * * Initializes the SpeedStep support. Returns -ENODEV on unsupported * BIOS, -EINVAL on problems during initiatization, and zero on * success. */ static int __init speedstep_init(void) { speedstep_processor = speedstep_detect_processor(); switch (speedstep_processor) { case SPEEDSTEP_PROCESSOR_PIII_T: case SPEEDSTEP_PROCESSOR_PIII_C: case SPEEDSTEP_PROCESSOR_PIII_C_EARLY: break; default: speedstep_processor = 0; } if (!speedstep_processor) { dprintk ("No supported Intel CPU detected.\n"); return -ENODEV; } dprintk("signature:0x%.8lx, command:0x%.8lx, event:0x%.8lx, perf_level:0x%.8lx.\n", ist_info.signature, ist_info.command, ist_info.event, ist_info.perf_level); /* Error if no IST-SMI BIOS or no PARM sig= 'ISGE' aka 'Intel Speedstep Gate E' */ if ((ist_info.signature != 0x47534943) && ( (smi_port == 0) || (smi_cmd == 0))) return -ENODEV; if (smi_sig == 1) smi_sig = 0x47534943; else smi_sig = ist_info.signature; /* setup smi_port from MODLULE_PARM or BIOS */ if ((smi_port > 0xff) || (smi_port < 0)) return -EINVAL; else if (smi_port == 0) smi_port = ist_info.command & 0xff; if ((smi_cmd > 0xff) || (smi_cmd < 0)) return -EINVAL; else if (smi_cmd == 0) smi_cmd = (ist_info.command >> 16) & 0xff; return cpufreq_register_driver(&speedstep_driver); } /** * speedstep_exit - unregisters SpeedStep support * * Unregisters SpeedStep support. */ static void __exit speedstep_exit(void) { cpufreq_unregister_driver(&speedstep_driver); } module_param(smi_port, int, 0444); module_param(smi_cmd, int, 0444); module_param(smi_sig, uint, 0444); MODULE_PARM_DESC(smi_port, "Override the BIOS-given IST port with this value -- Intel's default setting is 0xb2"); MODULE_PARM_DESC(smi_cmd, "Override the BIOS-given IST command with this value -- Intel's default setting is 0x82"); MODULE_PARM_DESC(smi_sig, "Set to 1 to fake the IST signature when using the SMI interface."); MODULE_AUTHOR ("Hiroshi Miura"); MODULE_DESCRIPTION ("Speedstep driver for IST applet SMI interface."); MODULE_LICENSE ("GPL"); module_init(speedstep_init); module_exit(speedstep_exit);
gpl-2.0
geneyeung/linux-3.10.17
drivers/scsi/qla2xxx/qla_target.c
446
141734
/* * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx * * based on qla2x00t.c code: * * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net> * Copyright (C) 2004 - 2005 Leonid Stoljar * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us> * Copyright (C) 2006 - 2010 ID7 Ltd. * * Forward port and refactoring to modern qla2xxx and target/configfs * * Copyright (C) 2010-2011 Nicholas A. Bellinger <nab@kernel.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2 * of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/blkdev.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/list.h> #include <linux/workqueue.h> #include <asm/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include <target/target_core_base.h> #include <target/target_core_fabric.h> #include "qla_def.h" #include "qla_target.h" static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED; module_param(qlini_mode, charp, S_IRUGO); MODULE_PARM_DESC(qlini_mode, "Determines when initiator mode will be enabled. Possible values: " "\"exclusive\" - initiator mode will be enabled on load, " "disabled on enabling target mode and then on disabling target mode " "enabled back; " "\"disabled\" - initiator mode will never be enabled; " "\"enabled\" (default) - initiator mode will always stay enabled."); int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; /* * From scsi/fc/fc_fcp.h */ enum fcp_resp_rsp_codes { FCP_TMF_CMPL = 0, FCP_DATA_LEN_INVALID = 1, FCP_CMND_FIELDS_INVALID = 2, FCP_DATA_PARAM_MISMATCH = 3, FCP_TMF_REJECTED = 4, FCP_TMF_FAILED = 5, FCP_TMF_INVALID_LUN = 9, }; /* * fc_pri_ta from scsi/fc/fc_fcp.h */ #define FCP_PTA_SIMPLE 0 /* simple task attribute */ #define FCP_PTA_HEADQ 1 /* head of queue task attribute */ #define FCP_PTA_ORDERED 2 /* ordered task attribute */ #define FCP_PTA_ACA 4 /* auto. contingent allegiance */ #define FCP_PTA_MASK 7 /* mask for task attribute field */ #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */ #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */ /* * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which * must be called under HW lock and could unlock/lock it inside. * It isn't an issue, since in the current implementation on the time when * those functions are called: * * - Either context is IRQ and only IRQ handler can modify HW data, * including rings related fields, * * - Or access to target mode variables from struct qla_tgt doesn't * cross those functions boundaries, except tgt_stop, which * additionally protected by irq_cmd_count. */ /* Predefs for callbacks handed to qla2xxx LLD */ static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha, struct atio_from_isp *pkt); static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt); static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, int fn, void *iocb, int flags); static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked); static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha, struct qla_tgt_srr_imm *imm, int ha_lock); /* * Global Variables */ static struct kmem_cache *qla_tgt_cmd_cachep; static struct kmem_cache *qla_tgt_mgmt_cmd_cachep; static mempool_t *qla_tgt_mgmt_cmd_mempool; static struct workqueue_struct *qla_tgt_wq; static DEFINE_MUTEX(qla_tgt_mutex); static LIST_HEAD(qla_tgt_glist); /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */ static struct qla_tgt_sess *qlt_find_sess_by_port_name( struct qla_tgt *tgt, const uint8_t *port_name) { struct qla_tgt_sess *sess; list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) { if (!memcmp(sess->port_name, port_name, WWN_SIZE)) return sess; } return NULL; } /* Might release hw lock, then reaquire!! */ static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked) { /* Send marker if required */ if (unlikely(vha->marker_needed != 0)) { int rc = qla2x00_issue_marker(vha, vha_locked); if (rc != QLA_SUCCESS) { ql_dbg(ql_dbg_tgt, vha, 0xe03d, "qla_target(%d): issue_marker() failed\n", vha->vp_idx); } return rc; } return QLA_SUCCESS; } static inline struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha, uint8_t *d_id) { struct qla_hw_data *ha = vha->hw; uint8_t vp_idx; if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0])) return NULL; if (vha->d_id.b.al_pa == d_id[2]) return vha; BUG_ON(ha->tgt.tgt_vp_map == NULL); vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx; if (likely(test_bit(vp_idx, ha->vp_idx_map))) return ha->tgt.tgt_vp_map[vp_idx].vha; return NULL; } static inline struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha, uint16_t vp_idx) { struct qla_hw_data *ha = vha->hw; if (vha->vp_idx == vp_idx) return vha; BUG_ON(ha->tgt.tgt_vp_map == NULL); if (likely(test_bit(vp_idx, ha->vp_idx_map))) return ha->tgt.tgt_vp_map[vp_idx].vha; return NULL; } void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, struct atio_from_isp *atio) { switch (atio->u.raw.entry_type) { case ATIO_TYPE7: { struct scsi_qla_host *host = qlt_find_host_by_d_id(vha, atio->u.isp24.fcp_hdr.d_id); if (unlikely(NULL == host)) { ql_dbg(ql_dbg_tgt, vha, 0xe03e, "qla_target(%d): Received ATIO_TYPE7 " "with unknown d_id %x:%x:%x\n", vha->vp_idx, atio->u.isp24.fcp_hdr.d_id[0], atio->u.isp24.fcp_hdr.d_id[1], atio->u.isp24.fcp_hdr.d_id[2]); break; } qlt_24xx_atio_pkt(host, atio); break; } case IMMED_NOTIFY_TYPE: { struct scsi_qla_host *host = vha; struct imm_ntfy_from_isp *entry = (struct imm_ntfy_from_isp *)atio; if ((entry->u.isp24.vp_index != 0xFF) && (entry->u.isp24.nport_handle != 0xFFFF)) { host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index); if (unlikely(!host)) { ql_dbg(ql_dbg_tgt, vha, 0xe03f, "qla_target(%d): Received " "ATIO (IMMED_NOTIFY_TYPE) " "with unknown vp_index %d\n", vha->vp_idx, entry->u.isp24.vp_index); break; } } qlt_24xx_atio_pkt(host, atio); break; } default: ql_dbg(ql_dbg_tgt, vha, 0xe040, "qla_target(%d): Received unknown ATIO atio " "type %x\n", vha->vp_idx, atio->u.raw.entry_type); break; } return; } void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt) { switch (pkt->entry_type) { case CTIO_TYPE7: { struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, entry->vp_index); if (unlikely(!host)) { ql_dbg(ql_dbg_tgt, vha, 0xe041, "qla_target(%d): Response pkt (CTIO_TYPE7) " "received, with unknown vp_index %d\n", vha->vp_idx, entry->vp_index); break; } qlt_response_pkt(host, pkt); break; } case IMMED_NOTIFY_TYPE: { struct scsi_qla_host *host = vha; struct imm_ntfy_from_isp *entry = (struct imm_ntfy_from_isp *)pkt; host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index); if (unlikely(!host)) { ql_dbg(ql_dbg_tgt, vha, 0xe042, "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) " "received, with unknown vp_index %d\n", vha->vp_idx, entry->u.isp24.vp_index); break; } qlt_response_pkt(host, pkt); break; } case NOTIFY_ACK_TYPE: { struct scsi_qla_host *host = vha; struct nack_to_isp *entry = (struct nack_to_isp *)pkt; if (0xFF != entry->u.isp24.vp_index) { host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index); if (unlikely(!host)) { ql_dbg(ql_dbg_tgt, vha, 0xe043, "qla_target(%d): Response " "pkt (NOTIFY_ACK_TYPE) " "received, with unknown " "vp_index %d\n", vha->vp_idx, entry->u.isp24.vp_index); break; } } qlt_response_pkt(host, pkt); break; } case ABTS_RECV_24XX: { struct abts_recv_from_24xx *entry = (struct abts_recv_from_24xx *)pkt; struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, entry->vp_index); if (unlikely(!host)) { ql_dbg(ql_dbg_tgt, vha, 0xe044, "qla_target(%d): Response pkt " "(ABTS_RECV_24XX) received, with unknown " "vp_index %d\n", vha->vp_idx, entry->vp_index); break; } qlt_response_pkt(host, pkt); break; } case ABTS_RESP_24XX: { struct abts_resp_to_24xx *entry = (struct abts_resp_to_24xx *)pkt; struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, entry->vp_index); if (unlikely(!host)) { ql_dbg(ql_dbg_tgt, vha, 0xe045, "qla_target(%d): Response pkt " "(ABTS_RECV_24XX) received, with unknown " "vp_index %d\n", vha->vp_idx, entry->vp_index); break; } qlt_response_pkt(host, pkt); break; } default: qlt_response_pkt(vha, pkt); break; } } static void qlt_free_session_done(struct work_struct *work) { struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess, free_work); struct qla_tgt *tgt = sess->tgt; struct scsi_qla_host *vha = sess->vha; struct qla_hw_data *ha = vha->hw; BUG_ON(!tgt); /* * Release the target session for FC Nexus from fabric module code. */ if (sess->se_sess != NULL) ha->tgt.tgt_ops->free_session(sess); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001, "Unregistration of sess %p finished\n", sess); kfree(sess); /* * We need to protect against race, when tgt is freed before or * inside wake_up() */ tgt->sess_count--; if (tgt->sess_count == 0) wake_up_all(&tgt->waitQ); } /* ha->hardware_lock supposed to be held on entry */ void qlt_unreg_sess(struct qla_tgt_sess *sess) { struct scsi_qla_host *vha = sess->vha; vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); list_del(&sess->sess_list_entry); if (sess->deleted) list_del(&sess->del_list_entry); INIT_WORK(&sess->free_work, qlt_free_session_done); schedule_work(&sess->free_work); } EXPORT_SYMBOL(qlt_unreg_sess); /* ha->hardware_lock supposed to be held on entry */ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) { struct qla_hw_data *ha = vha->hw; struct qla_tgt_sess *sess = NULL; uint32_t unpacked_lun, lun = 0; uint16_t loop_id; int res = 0; struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb; struct atio_from_isp *a = (struct atio_from_isp *)iocb; loop_id = le16_to_cpu(n->u.isp24.nport_handle); if (loop_id == 0xFFFF) { #if 0 /* FIXME: Re-enable Global event handling.. */ /* Global event */ atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count); qlt_clear_tgt_db(ha->tgt.qla_tgt, 1); if (!list_empty(&ha->tgt.qla_tgt->sess_list)) { sess = list_entry(ha->tgt.qla_tgt->sess_list.next, typeof(*sess), sess_list_entry); switch (mcmd) { case QLA_TGT_NEXUS_LOSS_SESS: mcmd = QLA_TGT_NEXUS_LOSS; break; case QLA_TGT_ABORT_ALL_SESS: mcmd = QLA_TGT_ABORT_ALL; break; case QLA_TGT_NEXUS_LOSS: case QLA_TGT_ABORT_ALL: break; default: ql_dbg(ql_dbg_tgt, vha, 0xe046, "qla_target(%d): Not allowed " "command %x in %s", vha->vp_idx, mcmd, __func__); sess = NULL; break; } } else sess = NULL; #endif } else { sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); } ql_dbg(ql_dbg_tgt, vha, 0xe000, "Using sess for qla_tgt_reset: %p\n", sess); if (!sess) { res = -ESRCH; return res; } ql_dbg(ql_dbg_tgt, vha, 0xe047, "scsi(%ld): resetting (session %p from port " "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x, " "mcmd %x, loop_id %d)\n", vha->host_no, sess, sess->port_name[0], sess->port_name[1], sess->port_name[2], sess->port_name[3], sess->port_name[4], sess->port_name[5], sess->port_name[6], sess->port_name[7], mcmd, loop_id); lun = a->u.isp24.fcp_cmnd.lun; unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd, iocb, QLA24XX_MGMT_SEND_NACK); } /* ha->hardware_lock supposed to be held on entry */ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess, bool immediate) { struct qla_tgt *tgt = sess->tgt; uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5; if (sess->deleted) return; ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, "Scheduling sess %p for deletion\n", sess); list_add_tail(&sess->del_list_entry, &tgt->del_sess_list); sess->deleted = 1; if (immediate) dev_loss_tmo = 0; sess->expires = jiffies + dev_loss_tmo * HZ; ql_dbg(ql_dbg_tgt, sess->vha, 0xe048, "qla_target(%d): session for port %02x:%02x:%02x:" "%02x:%02x:%02x:%02x:%02x (loop ID %d) scheduled for " "deletion in %u secs (expires: %lu) immed: %d\n", sess->vha->vp_idx, sess->port_name[0], sess->port_name[1], sess->port_name[2], sess->port_name[3], sess->port_name[4], sess->port_name[5], sess->port_name[6], sess->port_name[7], sess->loop_id, dev_loss_tmo, sess->expires, immediate); if (immediate) schedule_delayed_work(&tgt->sess_del_work, 0); else schedule_delayed_work(&tgt->sess_del_work, jiffies - sess->expires); } /* ha->hardware_lock supposed to be held on entry */ static void qlt_clear_tgt_db(struct qla_tgt *tgt, bool local_only) { struct qla_tgt_sess *sess; list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) qlt_schedule_sess_for_deletion(sess, true); /* At this point tgt could be already dead */ } static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id, uint16_t *loop_id) { struct qla_hw_data *ha = vha->hw; dma_addr_t gid_list_dma; struct gid_list_info *gid_list; char *id_iter; int res, rc, i; uint16_t entries; gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), &gid_list_dma, GFP_KERNEL); if (!gid_list) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044, "qla_target(%d): DMA Alloc failed of %u\n", vha->vp_idx, qla2x00_gid_list_size(ha)); return -ENOMEM; } /* Get list of logged in devices */ rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries); if (rc != QLA_SUCCESS) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045, "qla_target(%d): get_id_list() failed: %x\n", vha->vp_idx, rc); res = -1; goto out_free_id_list; } id_iter = (char *)gid_list; res = -1; for (i = 0; i < entries; i++) { struct gid_list_info *gid = (struct gid_list_info *)id_iter; if ((gid->al_pa == s_id[2]) && (gid->area == s_id[1]) && (gid->domain == s_id[0])) { *loop_id = le16_to_cpu(gid->loop_id); res = 0; break; } id_iter += ha->gid_list_info_size; } out_free_id_list: dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), gid_list, gid_list_dma); return res; } static bool qlt_check_fcport_exist(struct scsi_qla_host *vha, struct qla_tgt_sess *sess) { struct qla_hw_data *ha = vha->hw; struct qla_port_24xx_data *pmap24; bool res, found = false; int rc, i; uint16_t loop_id = 0xFFFF; /* to eliminate compiler's warning */ uint16_t entries; void *pmap; int pmap_len; fc_port_t *fcport; int global_resets; unsigned long flags; retry: global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count); rc = qla2x00_get_node_name_list(vha, &pmap, &pmap_len); if (rc != QLA_SUCCESS) { res = false; goto out; } pmap24 = pmap; entries = pmap_len/sizeof(*pmap24); for (i = 0; i < entries; ++i) { if (!memcmp(sess->port_name, pmap24[i].port_name, WWN_SIZE)) { loop_id = le16_to_cpu(pmap24[i].loop_id); found = true; break; } } kfree(pmap); if (!found) { res = false; goto out; } ql_dbg(ql_dbg_tgt_mgt, vha, 0xf046, "qlt_check_fcport_exist(): loop_id %d", loop_id); fcport = kzalloc(sizeof(*fcport), GFP_KERNEL); if (fcport == NULL) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf047, "qla_target(%d): Allocation of tmp FC port failed", vha->vp_idx); res = false; goto out; } fcport->loop_id = loop_id; rc = qla2x00_get_port_database(vha, fcport, 0); if (rc != QLA_SUCCESS) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf048, "qla_target(%d): Failed to retrieve fcport " "information -- get_port_database() returned %x " "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id); res = false; goto out_free_fcport; } if (global_resets != atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002, "qla_target(%d): global reset during session discovery" " (counter was %d, new %d), retrying", vha->vp_idx, global_resets, atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)); goto retry; } ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003, "Updating sess %p s_id %x:%x:%x, loop_id %d) to d_id %x:%x:%x, " "loop_id %d", sess, sess->s_id.b.domain, sess->s_id.b.al_pa, sess->s_id.b.area, sess->loop_id, fcport->d_id.b.domain, fcport->d_id.b.al_pa, fcport->d_id.b.area, fcport->loop_id); spin_lock_irqsave(&ha->hardware_lock, flags); ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, (fcport->flags & FCF_CONF_COMP_SUPPORTED)); spin_unlock_irqrestore(&ha->hardware_lock, flags); res = true; out_free_fcport: kfree(fcport); out: return res; } /* ha->hardware_lock supposed to be held on entry */ static void qlt_undelete_sess(struct qla_tgt_sess *sess) { BUG_ON(!sess->deleted); list_del(&sess->del_list_entry); sess->deleted = 0; } static void qlt_del_sess_work_fn(struct delayed_work *work) { struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_del_work); struct scsi_qla_host *vha = tgt->vha; struct qla_hw_data *ha = vha->hw; struct qla_tgt_sess *sess; unsigned long flags; spin_lock_irqsave(&ha->hardware_lock, flags); while (!list_empty(&tgt->del_sess_list)) { sess = list_entry(tgt->del_sess_list.next, typeof(*sess), del_list_entry); if (time_after_eq(jiffies, sess->expires)) { bool cancel; qlt_undelete_sess(sess); spin_unlock_irqrestore(&ha->hardware_lock, flags); cancel = qlt_check_fcport_exist(vha, sess); if (cancel) { if (sess->deleted) { /* * sess was again deleted while we were * discovering it */ spin_lock_irqsave(&ha->hardware_lock, flags); continue; } ql_dbg(ql_dbg_tgt_mgt, vha, 0xf049, "qla_target(%d): cancel deletion of " "session for port %02x:%02x:%02x:%02x:%02x:" "%02x:%02x:%02x (loop ID %d), because " " it isn't deleted by firmware", vha->vp_idx, sess->port_name[0], sess->port_name[1], sess->port_name[2], sess->port_name[3], sess->port_name[4], sess->port_name[5], sess->port_name[6], sess->port_name[7], sess->loop_id); } else { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, "Timeout: sess %p about to be deleted\n", sess); ha->tgt.tgt_ops->shutdown_sess(sess); ha->tgt.tgt_ops->put_sess(sess); } spin_lock_irqsave(&ha->hardware_lock, flags); } else { schedule_delayed_work(&tgt->sess_del_work, jiffies - sess->expires); break; } } spin_unlock_irqrestore(&ha->hardware_lock, flags); } /* * Adds an extra ref to allow to drop hw lock after adding sess to the list. * Caller must put it. */ static struct qla_tgt_sess *qlt_create_sess( struct scsi_qla_host *vha, fc_port_t *fcport, bool local) { struct qla_hw_data *ha = vha->hw; struct qla_tgt_sess *sess; unsigned long flags; unsigned char be_sid[3]; /* Check to avoid double sessions */ spin_lock_irqsave(&ha->hardware_lock, flags); list_for_each_entry(sess, &ha->tgt.qla_tgt->sess_list, sess_list_entry) { if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005, "Double sess %p found (s_id %x:%x:%x, " "loop_id %d), updating to d_id %x:%x:%x, " "loop_id %d", sess, sess->s_id.b.domain, sess->s_id.b.al_pa, sess->s_id.b.area, sess->loop_id, fcport->d_id.b.domain, fcport->d_id.b.al_pa, fcport->d_id.b.area, fcport->loop_id); if (sess->deleted) qlt_undelete_sess(sess); kref_get(&sess->se_sess->sess_kref); ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, (fcport->flags & FCF_CONF_COMP_SUPPORTED)); if (sess->local && !local) sess->local = 0; spin_unlock_irqrestore(&ha->hardware_lock, flags); return sess; } } spin_unlock_irqrestore(&ha->hardware_lock, flags); sess = kzalloc(sizeof(*sess), GFP_KERNEL); if (!sess) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a, "qla_target(%u): session allocation failed, " "all commands from port %02x:%02x:%02x:%02x:" "%02x:%02x:%02x:%02x will be refused", vha->vp_idx, fcport->port_name[0], fcport->port_name[1], fcport->port_name[2], fcport->port_name[3], fcport->port_name[4], fcport->port_name[5], fcport->port_name[6], fcport->port_name[7]); return NULL; } sess->tgt = ha->tgt.qla_tgt; sess->vha = vha; sess->s_id = fcport->d_id; sess->loop_id = fcport->loop_id; sess->local = local; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006, "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n", sess, ha->tgt.qla_tgt); be_sid[0] = sess->s_id.b.domain; be_sid[1] = sess->s_id.b.area; be_sid[2] = sess->s_id.b.al_pa; /* * Determine if this fc_port->port_name is allowed to access * target mode using explict NodeACLs+MappedLUNs, or using * TPG demo mode. If this is successful a target mode FC nexus * is created. */ if (ha->tgt.tgt_ops->check_initiator_node_acl(vha, &fcport->port_name[0], sess, &be_sid[0], fcport->loop_id) < 0) { kfree(sess); return NULL; } /* * Take an extra reference to ->sess_kref here to handle qla_tgt_sess * access across ->hardware_lock reaquire. */ kref_get(&sess->se_sess->sess_kref); sess->conf_compl_supported = (fcport->flags & FCF_CONF_COMP_SUPPORTED); BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name)); memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name)); spin_lock_irqsave(&ha->hardware_lock, flags); list_add_tail(&sess->sess_list_entry, &ha->tgt.qla_tgt->sess_list); ha->tgt.qla_tgt->sess_count++; spin_unlock_irqrestore(&ha->hardware_lock, flags); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, "qla_target(%d): %ssession for wwn %02x:%02x:%02x:%02x:" "%02x:%02x:%02x:%02x (loop_id %d, s_id %x:%x:%x, confirmed" " completion %ssupported) added\n", vha->vp_idx, local ? "local " : "", fcport->port_name[0], fcport->port_name[1], fcport->port_name[2], fcport->port_name[3], fcport->port_name[4], fcport->port_name[5], fcport->port_name[6], fcport->port_name[7], fcport->loop_id, sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa, sess->conf_compl_supported ? "" : "not "); return sess; } /* * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port() */ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) { struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt = ha->tgt.qla_tgt; struct qla_tgt_sess *sess; unsigned long flags; if (!vha->hw->tgt.tgt_ops) return; if (!tgt || (fcport->port_type != FCT_INITIATOR)) return; spin_lock_irqsave(&ha->hardware_lock, flags); if (tgt->tgt_stop) { spin_unlock_irqrestore(&ha->hardware_lock, flags); return; } sess = qlt_find_sess_by_port_name(tgt, fcport->port_name); if (!sess) { spin_unlock_irqrestore(&ha->hardware_lock, flags); mutex_lock(&ha->tgt.tgt_mutex); sess = qlt_create_sess(vha, fcport, false); mutex_unlock(&ha->tgt.tgt_mutex); spin_lock_irqsave(&ha->hardware_lock, flags); } else { kref_get(&sess->se_sess->sess_kref); if (sess->deleted) { qlt_undelete_sess(sess); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c, "qla_target(%u): %ssession for port %02x:" "%02x:%02x:%02x:%02x:%02x:%02x:%02x (loop ID %d) " "reappeared\n", vha->vp_idx, sess->local ? "local " : "", sess->port_name[0], sess->port_name[1], sess->port_name[2], sess->port_name[3], sess->port_name[4], sess->port_name[5], sess->port_name[6], sess->port_name[7], sess->loop_id); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007, "Reappeared sess %p\n", sess); } ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, (fcport->flags & FCF_CONF_COMP_SUPPORTED)); } if (sess && sess->local) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d, "qla_target(%u): local session for " "port %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " "(loop ID %d) became global\n", vha->vp_idx, fcport->port_name[0], fcport->port_name[1], fcport->port_name[2], fcport->port_name[3], fcport->port_name[4], fcport->port_name[5], fcport->port_name[6], fcport->port_name[7], sess->loop_id); sess->local = 0; } spin_unlock_irqrestore(&ha->hardware_lock, flags); ha->tgt.tgt_ops->put_sess(sess); } void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport) { struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt = ha->tgt.qla_tgt; struct qla_tgt_sess *sess; unsigned long flags; if (!vha->hw->tgt.tgt_ops) return; if (!tgt || (fcport->port_type != FCT_INITIATOR)) return; spin_lock_irqsave(&ha->hardware_lock, flags); if (tgt->tgt_stop) { spin_unlock_irqrestore(&ha->hardware_lock, flags); return; } sess = qlt_find_sess_by_port_name(tgt, fcport->port_name); if (!sess) { spin_unlock_irqrestore(&ha->hardware_lock, flags); return; } ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess); sess->local = 1; qlt_schedule_sess_for_deletion(sess, false); spin_unlock_irqrestore(&ha->hardware_lock, flags); } static inline int test_tgt_sess_count(struct qla_tgt *tgt) { struct qla_hw_data *ha = tgt->ha; unsigned long flags; int res; /* * We need to protect against race, when tgt is freed before or * inside wake_up() */ spin_lock_irqsave(&ha->hardware_lock, flags); ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002, "tgt %p, empty(sess_list)=%d sess_count=%d\n", tgt, list_empty(&tgt->sess_list), tgt->sess_count); res = (tgt->sess_count == 0); spin_unlock_irqrestore(&ha->hardware_lock, flags); return res; } /* Called by tcm_qla2xxx configfs code */ void qlt_stop_phase1(struct qla_tgt *tgt) { struct scsi_qla_host *vha = tgt->vha; struct qla_hw_data *ha = tgt->ha; unsigned long flags; if (tgt->tgt_stop || tgt->tgt_stopped) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e, "Already in tgt->tgt_stop or tgt_stopped state\n"); dump_stack(); return; } ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n", vha->host_no, vha); /* * Mutex needed to sync with qla_tgt_fc_port_[added,deleted]. * Lock is needed, because we still can get an incoming packet. */ mutex_lock(&ha->tgt.tgt_mutex); spin_lock_irqsave(&ha->hardware_lock, flags); tgt->tgt_stop = 1; qlt_clear_tgt_db(tgt, true); spin_unlock_irqrestore(&ha->hardware_lock, flags); mutex_unlock(&ha->tgt.tgt_mutex); flush_delayed_work(&tgt->sess_del_work); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009, "Waiting for sess works (tgt %p)", tgt); spin_lock_irqsave(&tgt->sess_work_lock, flags); while (!list_empty(&tgt->sess_works_list)) { spin_unlock_irqrestore(&tgt->sess_work_lock, flags); flush_scheduled_work(); spin_lock_irqsave(&tgt->sess_work_lock, flags); } spin_unlock_irqrestore(&tgt->sess_work_lock, flags); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a, "Waiting for tgt %p: list_empty(sess_list)=%d " "sess_count=%d\n", tgt, list_empty(&tgt->sess_list), tgt->sess_count); wait_event(tgt->waitQ, test_tgt_sess_count(tgt)); /* Big hammer */ if (!ha->flags.host_shutting_down && qla_tgt_mode_enabled(vha)) qlt_disable_vha(vha); /* Wait for sessions to clear out (just in case) */ wait_event(tgt->waitQ, test_tgt_sess_count(tgt)); } EXPORT_SYMBOL(qlt_stop_phase1); /* Called by tcm_qla2xxx configfs code */ void qlt_stop_phase2(struct qla_tgt *tgt) { struct qla_hw_data *ha = tgt->ha; unsigned long flags; if (tgt->tgt_stopped) { ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf04f, "Already in tgt->tgt_stopped state\n"); dump_stack(); return; } ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00b, "Waiting for %d IRQ commands to complete (tgt %p)", tgt->irq_cmd_count, tgt); mutex_lock(&ha->tgt.tgt_mutex); spin_lock_irqsave(&ha->hardware_lock, flags); while (tgt->irq_cmd_count != 0) { spin_unlock_irqrestore(&ha->hardware_lock, flags); udelay(2); spin_lock_irqsave(&ha->hardware_lock, flags); } tgt->tgt_stop = 0; tgt->tgt_stopped = 1; spin_unlock_irqrestore(&ha->hardware_lock, flags); mutex_unlock(&ha->tgt.tgt_mutex); ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00c, "Stop of tgt %p finished", tgt); } EXPORT_SYMBOL(qlt_stop_phase2); /* Called from qlt_remove_target() -> qla2x00_remove_one() */ static void qlt_release(struct qla_tgt *tgt) { struct qla_hw_data *ha = tgt->ha; if ((ha->tgt.qla_tgt != NULL) && !tgt->tgt_stopped) qlt_stop_phase2(tgt); ha->tgt.qla_tgt = NULL; ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00d, "Release of tgt %p finished\n", tgt); kfree(tgt); } /* ha->hardware_lock supposed to be held on entry */ static int qlt_sched_sess_work(struct qla_tgt *tgt, int type, const void *param, unsigned int param_size) { struct qla_tgt_sess_work_param *prm; unsigned long flags; prm = kzalloc(sizeof(*prm), GFP_ATOMIC); if (!prm) { ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050, "qla_target(%d): Unable to create session " "work, command will be refused", 0); return -ENOMEM; } ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e, "Scheduling work (type %d, prm %p)" " to find session for param %p (size %d, tgt %p)\n", type, prm, param, param_size, tgt); prm->type = type; memcpy(&prm->tm_iocb, param, param_size); spin_lock_irqsave(&tgt->sess_work_lock, flags); list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list); spin_unlock_irqrestore(&tgt->sess_work_lock, flags); schedule_work(&tgt->sess_work); return 0; } /* * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire */ static void qlt_send_notify_ack(struct scsi_qla_host *vha, struct imm_ntfy_from_isp *ntfy, uint32_t add_flags, uint16_t resp_code, int resp_code_valid, uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan) { struct qla_hw_data *ha = vha->hw; request_t *pkt; struct nack_to_isp *nack; ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha); /* Send marker if required */ if (qlt_issue_marker(vha, 1) != QLA_SUCCESS) return; pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); if (!pkt) { ql_dbg(ql_dbg_tgt, vha, 0xe049, "qla_target(%d): %s failed: unable to allocate " "request packet\n", vha->vp_idx, __func__); return; } if (ha->tgt.qla_tgt != NULL) ha->tgt.qla_tgt->notify_ack_expected++; pkt->entry_type = NOTIFY_ACK_TYPE; pkt->entry_count = 1; nack = (struct nack_to_isp *)pkt; nack->ox_id = ntfy->ox_id; nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { nack->u.isp24.flags = ntfy->u.isp24.flags & __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB); } nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; nack->u.isp24.status = ntfy->u.isp24.status; nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; nack->u.isp24.srr_flags = cpu_to_le16(srr_flags); nack->u.isp24.srr_reject_code = srr_reject_code; nack->u.isp24.srr_reject_code_expl = srr_explan; nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; ql_dbg(ql_dbg_tgt, vha, 0xe005, "qla_target(%d): Sending 24xx Notify Ack %d\n", vha->vp_idx, nack->u.isp24.status); qla2x00_start_iocbs(vha, vha->req); } /* * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire */ static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha, struct abts_recv_from_24xx *abts, uint32_t status, bool ids_reversed) { struct qla_hw_data *ha = vha->hw; struct abts_resp_to_24xx *resp; uint32_t f_ctl; uint8_t *p; ql_dbg(ql_dbg_tgt, vha, 0xe006, "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n", ha, abts, status); /* Send marker if required */ if (qlt_issue_marker(vha, 1) != QLA_SUCCESS) return; resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs(vha, NULL); if (!resp) { ql_dbg(ql_dbg_tgt, vha, 0xe04a, "qla_target(%d): %s failed: unable to allocate " "request packet", vha->vp_idx, __func__); return; } resp->entry_type = ABTS_RESP_24XX; resp->entry_count = 1; resp->nport_handle = abts->nport_handle; resp->vp_index = vha->vp_idx; resp->sof_type = abts->sof_type; resp->exchange_address = abts->exchange_address; resp->fcp_hdr_le = abts->fcp_hdr_le; f_ctl = __constant_cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP | F_CTL_LAST_SEQ | F_CTL_END_SEQ | F_CTL_SEQ_INITIATIVE); p = (uint8_t *)&f_ctl; resp->fcp_hdr_le.f_ctl[0] = *p++; resp->fcp_hdr_le.f_ctl[1] = *p++; resp->fcp_hdr_le.f_ctl[2] = *p; if (ids_reversed) { resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0]; resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1]; resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2]; resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0]; resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1]; resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2]; } else { resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0]; resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1]; resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2]; resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0]; resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1]; resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2]; } resp->exchange_addr_to_abort = abts->exchange_addr_to_abort; if (status == FCP_TMF_CMPL) { resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC; resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID; resp->payload.ba_acct.low_seq_cnt = 0x0000; resp->payload.ba_acct.high_seq_cnt = 0xFFFF; resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id; resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id; } else { resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT; resp->payload.ba_rjt.reason_code = BA_RJT_REASON_CODE_UNABLE_TO_PERFORM; /* Other bytes are zero */ } ha->tgt.qla_tgt->abts_resp_expected++; qla2x00_start_iocbs(vha, vha->req); } /* * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire */ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha, struct abts_resp_from_24xx_fw *entry) { struct ctio7_to_24xx *ctio; ql_dbg(ql_dbg_tgt, vha, 0xe007, "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw); /* Send marker if required */ if (qlt_issue_marker(vha, 1) != QLA_SUCCESS) return; ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL); if (ctio == NULL) { ql_dbg(ql_dbg_tgt, vha, 0xe04b, "qla_target(%d): %s failed: unable to allocate " "request packet\n", vha->vp_idx, __func__); return; } /* * We've got on entrance firmware's response on by us generated * ABTS response. So, in it ID fields are reversed. */ ctio->entry_type = CTIO_TYPE7; ctio->entry_count = 1; ctio->nport_handle = entry->nport_handle; ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); ctio->vp_index = vha->vp_idx; ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0]; ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1]; ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2]; ctio->exchange_addr = entry->exchange_addr_to_abort; ctio->u.status1.flags = __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE); ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id; qla2x00_start_iocbs(vha, vha->req); qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry, FCP_TMF_CMPL, true); } /* ha->hardware_lock supposed to be held on entry */ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess) { struct qla_hw_data *ha = vha->hw; struct se_session *se_sess = sess->se_sess; struct qla_tgt_mgmt_cmd *mcmd; struct se_cmd *se_cmd; u32 lun = 0; int rc; bool found_lun = false; spin_lock(&se_sess->sess_cmd_lock); list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) { struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); if (cmd->tag == abts->exchange_addr_to_abort) { lun = cmd->unpacked_lun; found_lun = true; break; } } spin_unlock(&se_sess->sess_cmd_lock); if (!found_lun) return -ENOENT; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f, "qla_target(%d): task abort (tag=%d)\n", vha->vp_idx, abts->exchange_addr_to_abort); mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); if (mcmd == NULL) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051, "qla_target(%d): %s: Allocation of ABORT cmd failed", vha->vp_idx, __func__); return -ENOMEM; } memset(mcmd, 0, sizeof(*mcmd)); mcmd->sess = sess; memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts)); rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK, abts->exchange_addr_to_abort); if (rc != 0) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052, "qla_target(%d): tgt_ops->handle_tmr()" " failed: %d", vha->vp_idx, rc); mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); return -EFAULT; } return 0; } /* * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire */ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, struct abts_recv_from_24xx *abts) { struct qla_hw_data *ha = vha->hw; struct qla_tgt_sess *sess; uint32_t tag = abts->exchange_addr_to_abort; uint8_t s_id[3]; int rc; if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053, "qla_target(%d): ABTS: Abort Sequence not " "supported\n", vha->vp_idx); qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); return; } if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010, "qla_target(%d): ABTS: Unknown Exchange " "Address received\n", vha->vp_idx); qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); return; } ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011, "qla_target(%d): task abort (s_id=%x:%x:%x, " "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2], abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag, le32_to_cpu(abts->fcp_hdr_le.parameter)); s_id[0] = abts->fcp_hdr_le.s_id[2]; s_id[1] = abts->fcp_hdr_le.s_id[1]; s_id[2] = abts->fcp_hdr_le.s_id[0]; sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); if (!sess) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012, "qla_target(%d): task abort for non-existant session\n", vha->vp_idx); rc = qlt_sched_sess_work(ha->tgt.qla_tgt, QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts)); if (rc != 0) { qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); } return; } rc = __qlt_24xx_handle_abts(vha, abts, sess); if (rc != 0) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054, "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n", vha->vp_idx, rc); qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); return; } } /* * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire */ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha, struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code) { struct atio_from_isp *atio = &mcmd->orig_iocb.atio; struct ctio7_to_24xx *ctio; ql_dbg(ql_dbg_tgt, ha, 0xe008, "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n", ha, atio, resp_code); /* Send marker if required */ if (qlt_issue_marker(ha, 1) != QLA_SUCCESS) return; ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(ha, NULL); if (ctio == NULL) { ql_dbg(ql_dbg_tgt, ha, 0xe04c, "qla_target(%d): %s failed: unable to allocate " "request packet\n", ha->vp_idx, __func__); return; } ctio->entry_type = CTIO_TYPE7; ctio->entry_count = 1; ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; ctio->nport_handle = mcmd->sess->loop_id; ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); ctio->vp_index = ha->vp_idx; ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; ctio->exchange_addr = atio->u.isp24.exchange_addr; ctio->u.status1.flags = (atio->u.isp24.attr << 9) | __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS); ctio->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); ctio->u.status1.scsi_status = __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID); ctio->u.status1.response_len = __constant_cpu_to_le16(8); ctio->u.status1.sense_data[0] = resp_code; qla2x00_start_iocbs(ha, ha->req); } void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd) { mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); } EXPORT_SYMBOL(qlt_free_mcmd); /* callback from target fabric module code */ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) { struct scsi_qla_host *vha = mcmd->sess->vha; struct qla_hw_data *ha = vha->hw; unsigned long flags; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013, "TM response mcmd (%p) status %#x state %#x", mcmd, mcmd->fc_tm_rsp, mcmd->flags); spin_lock_irqsave(&ha->hardware_lock, flags); if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0); else { if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK) qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts, mcmd->fc_tm_rsp, false); else qlt_24xx_send_task_mgmt_ctio(vha, mcmd, mcmd->fc_tm_rsp); } /* * Make the callback for ->free_mcmd() to queue_work() and invoke * target_put_sess_cmd() to drop cmd_kref to 1. The final * target_put_sess_cmd() call will be made from TFO->check_stop_free() * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() -> * qlt_xmit_tm_rsp() returns here.. */ ha->tgt.tgt_ops->free_mcmd(mcmd); spin_unlock_irqrestore(&ha->hardware_lock, flags); } EXPORT_SYMBOL(qlt_xmit_tm_rsp); /* No locks */ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm) { struct qla_tgt_cmd *cmd = prm->cmd; BUG_ON(cmd->sg_cnt == 0); prm->sg = (struct scatterlist *)cmd->sg; prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); if (unlikely(prm->seg_cnt == 0)) goto out_err; prm->cmd->sg_mapped = 1; /* * If greater than four sg entries then we need to allocate * the continuation entries */ if (prm->seg_cnt > prm->tgt->datasegs_per_cmd) prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt - prm->tgt->datasegs_per_cmd, prm->tgt->datasegs_per_cont); ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n", prm->seg_cnt, prm->req_cnt); return 0; out_err: ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe04d, "qla_target(%d): PCI mapping failed: sg_cnt=%d", 0, prm->cmd->sg_cnt); return -1; } static inline void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd) { struct qla_hw_data *ha = vha->hw; BUG_ON(!cmd->sg_mapped); pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); cmd->sg_mapped = 0; } static int qlt_check_reserve_free_req(struct scsi_qla_host *vha, uint32_t req_cnt) { struct qla_hw_data *ha = vha->hw; device_reg_t __iomem *reg = ha->iobase; uint32_t cnt; if (vha->req->cnt < (req_cnt + 2)) { cnt = (uint16_t)RD_REG_DWORD(&reg->isp24.req_q_out); ql_dbg(ql_dbg_tgt, vha, 0xe00a, "Request ring circled: cnt=%d, vha->->ring_index=%d, " "vha->req->cnt=%d, req_cnt=%d\n", cnt, vha->req->ring_index, vha->req->cnt, req_cnt); if (vha->req->ring_index < cnt) vha->req->cnt = cnt - vha->req->ring_index; else vha->req->cnt = vha->req->length - (vha->req->ring_index - cnt); } if (unlikely(vha->req->cnt < (req_cnt + 2))) { ql_dbg(ql_dbg_tgt, vha, 0xe00b, "qla_target(%d): There is no room in the " "request ring: vha->req->ring_index=%d, vha->req->cnt=%d, " "req_cnt=%d\n", vha->vp_idx, vha->req->ring_index, vha->req->cnt, req_cnt); return -EAGAIN; } vha->req->cnt -= req_cnt; return 0; } /* * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire */ static inline void *qlt_get_req_pkt(struct scsi_qla_host *vha) { /* Adjust ring index. */ vha->req->ring_index++; if (vha->req->ring_index == vha->req->length) { vha->req->ring_index = 0; vha->req->ring_ptr = vha->req->ring; } else { vha->req->ring_ptr++; } return (cont_entry_t *)vha->req->ring_ptr; } /* ha->hardware_lock supposed to be held on entry */ static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; uint32_t h; h = ha->tgt.current_handle; /* always increment cmd handle */ do { ++h; if (h > DEFAULT_OUTSTANDING_COMMANDS) h = 1; /* 0 is QLA_TGT_NULL_HANDLE */ if (h == ha->tgt.current_handle) { ql_dbg(ql_dbg_tgt, vha, 0xe04e, "qla_target(%d): Ran out of " "empty cmd slots in ha %p\n", vha->vp_idx, ha); h = QLA_TGT_NULL_HANDLE; break; } } while ((h == QLA_TGT_NULL_HANDLE) || (h == QLA_TGT_SKIP_HANDLE) || (ha->tgt.cmds[h-1] != NULL)); if (h != QLA_TGT_NULL_HANDLE) ha->tgt.current_handle = h; return h; } /* ha->hardware_lock supposed to be held on entry */ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm, struct scsi_qla_host *vha) { uint32_t h; struct ctio7_to_24xx *pkt; struct qla_hw_data *ha = vha->hw; struct atio_from_isp *atio = &prm->cmd->atio; pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr; prm->pkt = pkt; memset(pkt, 0, sizeof(*pkt)); pkt->entry_type = CTIO_TYPE7; pkt->entry_count = (uint8_t)prm->req_cnt; pkt->vp_index = vha->vp_idx; h = qlt_make_handle(vha); if (unlikely(h == QLA_TGT_NULL_HANDLE)) { /* * CTIO type 7 from the firmware doesn't provide a way to * know the initiator's LOOP ID, hence we can't find * the session and, so, the command. */ return -EAGAIN; } else ha->tgt.cmds[h-1] = prm->cmd; pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; pkt->nport_handle = prm->cmd->loop_id; pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; pkt->exchange_addr = atio->u.isp24.exchange_addr; pkt->u.status0.flags |= (atio->u.isp24.attr << 9); pkt->u.status0.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset); ql_dbg(ql_dbg_tgt, vha, 0xe00c, "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n", vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT, le16_to_cpu(pkt->u.status0.ox_id)); return 0; } /* * ha->hardware_lock supposed to be held on entry. We have already made sure * that there is sufficient amount of request entries to not drop it. */ static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm, struct scsi_qla_host *vha) { int cnt; uint32_t *dword_ptr; int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr; /* Build continuation packets */ while (prm->seg_cnt > 0) { cont_a64_entry_t *cont_pkt64 = (cont_a64_entry_t *)qlt_get_req_pkt(vha); /* * Make sure that from cont_pkt64 none of * 64-bit specific fields used for 32-bit * addressing. Cast to (cont_entry_t *) for * that. */ memset(cont_pkt64, 0, sizeof(*cont_pkt64)); cont_pkt64->entry_count = 1; cont_pkt64->sys_define = 0; if (enable_64bit_addressing) { cont_pkt64->entry_type = CONTINUE_A64_TYPE; dword_ptr = (uint32_t *)&cont_pkt64->dseg_0_address; } else { cont_pkt64->entry_type = CONTINUE_TYPE; dword_ptr = (uint32_t *)&((cont_entry_t *) cont_pkt64)->dseg_0_address; } /* Load continuation entry data segments */ for (cnt = 0; cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt; cnt++, prm->seg_cnt--) { *dword_ptr++ = cpu_to_le32(pci_dma_lo32 (sg_dma_address(prm->sg))); if (enable_64bit_addressing) { *dword_ptr++ = cpu_to_le32(pci_dma_hi32 (sg_dma_address (prm->sg))); } *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg)); ql_dbg(ql_dbg_tgt, vha, 0xe00d, "S/G Segment Cont. phys_addr=%llx:%llx, len=%d\n", (long long unsigned int) pci_dma_hi32(sg_dma_address(prm->sg)), (long long unsigned int) pci_dma_lo32(sg_dma_address(prm->sg)), (int)sg_dma_len(prm->sg)); prm->sg = sg_next(prm->sg); } } } /* * ha->hardware_lock supposed to be held on entry. We have already made sure * that there is sufficient amount of request entries to not drop it. */ static void qlt_load_data_segments(struct qla_tgt_prm *prm, struct scsi_qla_host *vha) { int cnt; uint32_t *dword_ptr; int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr; struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt; ql_dbg(ql_dbg_tgt, vha, 0xe00e, "iocb->scsi_status=%x, iocb->flags=%x\n", le16_to_cpu(pkt24->u.status0.scsi_status), le16_to_cpu(pkt24->u.status0.flags)); pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen); /* Setup packet address segment pointer */ dword_ptr = pkt24->u.status0.dseg_0_address; /* Set total data segment count */ if (prm->seg_cnt) pkt24->dseg_count = cpu_to_le16(prm->seg_cnt); if (prm->seg_cnt == 0) { /* No data transfer */ *dword_ptr++ = 0; *dword_ptr = 0; return; } /* If scatter gather */ ql_dbg(ql_dbg_tgt, vha, 0xe00f, "%s", "Building S/G data segments..."); /* Load command entry data segments */ for (cnt = 0; (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt; cnt++, prm->seg_cnt--) { *dword_ptr++ = cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg))); if (enable_64bit_addressing) { *dword_ptr++ = cpu_to_le32(pci_dma_hi32( sg_dma_address(prm->sg))); } *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg)); ql_dbg(ql_dbg_tgt, vha, 0xe010, "S/G Segment phys_addr=%llx:%llx, len=%d\n", (long long unsigned int)pci_dma_hi32(sg_dma_address( prm->sg)), (long long unsigned int)pci_dma_lo32(sg_dma_address( prm->sg)), (int)sg_dma_len(prm->sg)); prm->sg = sg_next(prm->sg); } qlt_load_cont_data_segments(prm, vha); } static inline int qlt_has_data(struct qla_tgt_cmd *cmd) { return cmd->bufflen > 0; } /* * Called without ha->hardware_lock held */ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status, uint32_t *full_req_cnt) { struct qla_tgt *tgt = cmd->tgt; struct scsi_qla_host *vha = tgt->vha; struct qla_hw_data *ha = vha->hw; struct se_cmd *se_cmd = &cmd->se_cmd; if (unlikely(cmd->aborted)) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014, "qla_target(%d): terminating exchange " "for aborted cmd=%p (se_cmd=%p, tag=%d)", vha->vp_idx, cmd, se_cmd, cmd->tag); cmd->state = QLA_TGT_STATE_ABORTED; qlt_send_term_exchange(vha, cmd, &cmd->atio, 0); /* !! At this point cmd could be already freed !! */ return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED; } ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u\n", vha->vp_idx, cmd->tag); prm->cmd = cmd; prm->tgt = tgt; prm->rq_result = scsi_status; prm->sense_buffer = &cmd->sense_buffer[0]; prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER; prm->sg = NULL; prm->seg_cnt = -1; prm->req_cnt = 1; prm->add_status_pkt = 0; ql_dbg(ql_dbg_tgt, vha, 0xe012, "rq_result=%x, xmit_type=%x\n", prm->rq_result, xmit_type); /* Send marker if required */ if (qlt_issue_marker(vha, 0) != QLA_SUCCESS) return -EFAULT; ql_dbg(ql_dbg_tgt, vha, 0xe013, "CTIO start: vha(%d)\n", vha->vp_idx); if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) { if (qlt_pci_map_calc_cnt(prm) != 0) return -EAGAIN; } *full_req_cnt = prm->req_cnt; if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { prm->residual = se_cmd->residual_count; ql_dbg(ql_dbg_tgt, vha, 0xe014, "Residual underflow: %d (tag %d, " "op %x, bufflen %d, rq_result %x)\n", prm->residual, cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result); prm->rq_result |= SS_RESIDUAL_UNDER; } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { prm->residual = se_cmd->residual_count; ql_dbg(ql_dbg_tgt, vha, 0xe015, "Residual overflow: %d (tag %d, " "op %x, bufflen %d, rq_result %x)\n", prm->residual, cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result); prm->rq_result |= SS_RESIDUAL_OVER; } if (xmit_type & QLA_TGT_XMIT_STATUS) { /* * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be * ignored in *xmit_response() below */ if (qlt_has_data(cmd)) { if (QLA_TGT_SENSE_VALID(prm->sense_buffer) || (IS_FWI2_CAPABLE(ha) && (prm->rq_result != 0))) { prm->add_status_pkt = 1; (*full_req_cnt)++; } } } ql_dbg(ql_dbg_tgt, vha, 0xe016, "req_cnt=%d, full_req_cnt=%d, add_status_pkt=%d\n", prm->req_cnt, *full_req_cnt, prm->add_status_pkt); return 0; } static inline int qlt_need_explicit_conf(struct qla_hw_data *ha, struct qla_tgt_cmd *cmd, int sending_sense) { if (ha->tgt.enable_class_2) return 0; if (sending_sense) return cmd->conf_compl_supported; else return ha->tgt.enable_explicit_conf && cmd->conf_compl_supported; } #ifdef CONFIG_QLA_TGT_DEBUG_SRR /* * Original taken from the XFS code */ static unsigned long qlt_srr_random(void) { static int Inited; static unsigned long RandomValue; static DEFINE_SPINLOCK(lock); /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */ register long rv; register long lo; register long hi; unsigned long flags; spin_lock_irqsave(&lock, flags); if (!Inited) { RandomValue = jiffies; Inited = 1; } rv = RandomValue; hi = rv / 127773; lo = rv % 127773; rv = 16807 * lo - 2836 * hi; if (rv <= 0) rv += 2147483647; RandomValue = rv; spin_unlock_irqrestore(&lock, flags); return rv; } static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type) { #if 0 /* This is not a real status packets lost, so it won't lead to SRR */ if ((*xmit_type & QLA_TGT_XMIT_STATUS) && (qlt_srr_random() % 200) == 50) { *xmit_type &= ~QLA_TGT_XMIT_STATUS; ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015, "Dropping cmd %p (tag %d) status", cmd, cmd->tag); } #endif /* * It's currently not possible to simulate SRRs for FCP_WRITE without * a physical link layer failure, so don't even try here.. */ if (cmd->dma_data_direction != DMA_FROM_DEVICE) return; if (qlt_has_data(cmd) && (cmd->sg_cnt > 1) && ((qlt_srr_random() % 100) == 20)) { int i, leave = 0; unsigned int tot_len = 0; while (leave == 0) leave = qlt_srr_random() % cmd->sg_cnt; for (i = 0; i < leave; i++) tot_len += cmd->sg[i].length; ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016, "Cutting cmd %p (tag %d) buffer" " tail to len %d, sg_cnt %d (cmd->bufflen %d," " cmd->sg_cnt %d)", cmd, cmd->tag, tot_len, leave, cmd->bufflen, cmd->sg_cnt); cmd->bufflen = tot_len; cmd->sg_cnt = leave; } if (qlt_has_data(cmd) && ((qlt_srr_random() % 100) == 70)) { unsigned int offset = qlt_srr_random() % cmd->bufflen; ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017, "Cutting cmd %p (tag %d) buffer head " "to offset %d (cmd->bufflen %d)", cmd, cmd->tag, offset, cmd->bufflen); if (offset == 0) *xmit_type &= ~QLA_TGT_XMIT_DATA; else if (qlt_set_data_offset(cmd, offset)) { ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018, "qlt_set_data_offset() failed (tag %d)", cmd->tag); } } } #else static inline void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type) {} #endif static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio, struct qla_tgt_prm *prm) { prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len, (uint32_t)sizeof(ctio->u.status1.sense_data)); ctio->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_SEND_STATUS); if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) { ctio->u.status0.flags |= __constant_cpu_to_le16( CTIO7_FLAGS_EXPLICIT_CONFORM | CTIO7_FLAGS_CONFORM_REQ); } ctio->u.status0.residual = cpu_to_le32(prm->residual); ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result); if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) { int i; if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) { if (prm->cmd->se_cmd.scsi_status != 0) { ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017, "Skipping EXPLICIT_CONFORM and " "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ " "non GOOD status\n"); goto skip_explict_conf; } ctio->u.status1.flags |= __constant_cpu_to_le16( CTIO7_FLAGS_EXPLICIT_CONFORM | CTIO7_FLAGS_CONFORM_REQ); } skip_explict_conf: ctio->u.status1.flags &= ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); ctio->u.status1.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); ctio->u.status1.scsi_status |= __constant_cpu_to_le16(SS_SENSE_LEN_VALID); ctio->u.status1.sense_length = cpu_to_le16(prm->sense_buffer_len); for (i = 0; i < prm->sense_buffer_len/4; i++) ((uint32_t *)ctio->u.status1.sense_data)[i] = cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]); #if 0 if (unlikely((prm->sense_buffer_len % 4) != 0)) { static int q; if (q < 10) { ql_dbg(ql_dbg_tgt, vha, 0xe04f, "qla_target(%d): %d bytes of sense " "lost", prm->tgt->ha->vp_idx, prm->sense_buffer_len % 4); q++; } } #endif } else { ctio->u.status1.flags &= ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); ctio->u.status1.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); ctio->u.status1.sense_length = 0; memset(ctio->u.status1.sense_data, 0, sizeof(ctio->u.status1.sense_data)); } /* Sense with len > 24, is it possible ??? */ } /* * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and * * QLA_TGT_XMIT_STATUS for >= 24xx silicon */ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, uint8_t scsi_status) { struct scsi_qla_host *vha = cmd->vha; struct qla_hw_data *ha = vha->hw; struct ctio7_to_24xx *pkt; struct qla_tgt_prm prm; uint32_t full_req_cnt = 0; unsigned long flags = 0; int res; memset(&prm, 0, sizeof(prm)); qlt_check_srr_debug(cmd, &xmit_type); ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018, "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, " "cmd->dma_data_direction=%d\n", (xmit_type & QLA_TGT_XMIT_STATUS) ? 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction); res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, &full_req_cnt); if (unlikely(res != 0)) { if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED) return 0; return res; } spin_lock_irqsave(&ha->hardware_lock, flags); /* Does F/W have an IOCBs for this request */ res = qlt_check_reserve_free_req(vha, full_req_cnt); if (unlikely(res)) goto out_unmap_unlock; res = qlt_24xx_build_ctio_pkt(&prm, vha); if (unlikely(res != 0)) goto out_unmap_unlock; pkt = (struct ctio7_to_24xx *)prm.pkt; if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) { pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN | CTIO7_FLAGS_STATUS_MODE_0); qlt_load_data_segments(&prm, vha); if (prm.add_status_pkt == 0) { if (xmit_type & QLA_TGT_XMIT_STATUS) { pkt->u.status0.scsi_status = cpu_to_le16(prm.rq_result); pkt->u.status0.residual = cpu_to_le32(prm.residual); pkt->u.status0.flags |= __constant_cpu_to_le16( CTIO7_FLAGS_SEND_STATUS); if (qlt_need_explicit_conf(ha, cmd, 0)) { pkt->u.status0.flags |= __constant_cpu_to_le16( CTIO7_FLAGS_EXPLICIT_CONFORM | CTIO7_FLAGS_CONFORM_REQ); } } } else { /* * We have already made sure that there is sufficient * amount of request entries to not drop HW lock in * req_pkt(). */ struct ctio7_to_24xx *ctio = (struct ctio7_to_24xx *)qlt_get_req_pkt(vha); ql_dbg(ql_dbg_tgt, vha, 0xe019, "Building additional status packet\n"); memcpy(ctio, pkt, sizeof(*ctio)); ctio->entry_count = 1; ctio->dseg_count = 0; ctio->u.status1.flags &= ~__constant_cpu_to_le16( CTIO7_FLAGS_DATA_IN); /* Real finish is ctio_m1's finish */ pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK; pkt->u.status0.flags |= __constant_cpu_to_le16( CTIO7_FLAGS_DONT_RET_CTIO); qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio, &prm); pr_debug("Status CTIO7: %p\n", ctio); } } else qlt_24xx_init_ctio_to_isp(pkt, &prm); cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */ ql_dbg(ql_dbg_tgt, vha, 0xe01a, "Xmitting CTIO7 response pkt for 24xx: %p scsi_status: 0x%02x\n", pkt, scsi_status); qla2x00_start_iocbs(vha, vha->req); spin_unlock_irqrestore(&ha->hardware_lock, flags); return 0; out_unmap_unlock: if (cmd->sg_mapped) qlt_unmap_sg(vha, cmd); spin_unlock_irqrestore(&ha->hardware_lock, flags); return res; } EXPORT_SYMBOL(qlt_xmit_response); int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) { struct ctio7_to_24xx *pkt; struct scsi_qla_host *vha = cmd->vha; struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt = cmd->tgt; struct qla_tgt_prm prm; unsigned long flags; int res = 0; memset(&prm, 0, sizeof(prm)); prm.cmd = cmd; prm.tgt = tgt; prm.sg = NULL; prm.req_cnt = 1; /* Send marker if required */ if (qlt_issue_marker(vha, 0) != QLA_SUCCESS) return -EIO; ql_dbg(ql_dbg_tgt, vha, 0xe01b, "CTIO_start: vha(%d)", (int)vha->vp_idx); /* Calculate number of entries and segments required */ if (qlt_pci_map_calc_cnt(&prm) != 0) return -EAGAIN; spin_lock_irqsave(&ha->hardware_lock, flags); /* Does F/W have an IOCBs for this request */ res = qlt_check_reserve_free_req(vha, prm.req_cnt); if (res != 0) goto out_unlock_free_unmap; res = qlt_24xx_build_ctio_pkt(&prm, vha); if (unlikely(res != 0)) goto out_unlock_free_unmap; pkt = (struct ctio7_to_24xx *)prm.pkt; pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT | CTIO7_FLAGS_STATUS_MODE_0); qlt_load_data_segments(&prm, vha); cmd->state = QLA_TGT_STATE_NEED_DATA; qla2x00_start_iocbs(vha, vha->req); spin_unlock_irqrestore(&ha->hardware_lock, flags); return res; out_unlock_free_unmap: if (cmd->sg_mapped) qlt_unmap_sg(vha, cmd); spin_unlock_irqrestore(&ha->hardware_lock, flags); return res; } EXPORT_SYMBOL(qlt_rdy_to_xfer); /* If hardware_lock held on entry, might drop it, then reaquire */ /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ static int __qlt_send_term_exchange(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd, struct atio_from_isp *atio) { struct ctio7_to_24xx *ctio24; struct qla_hw_data *ha = vha->hw; request_t *pkt; int ret = 0; ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha); pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); if (pkt == NULL) { ql_dbg(ql_dbg_tgt, vha, 0xe050, "qla_target(%d): %s failed: unable to allocate " "request packet\n", vha->vp_idx, __func__); return -ENOMEM; } if (cmd != NULL) { if (cmd->state < QLA_TGT_STATE_PROCESSED) { ql_dbg(ql_dbg_tgt, vha, 0xe051, "qla_target(%d): Terminating cmd %p with " "incorrect state %d\n", vha->vp_idx, cmd, cmd->state); } else ret = 1; } pkt->entry_count = 1; pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; ctio24 = (struct ctio7_to_24xx *)pkt; ctio24->entry_type = CTIO_TYPE7; ctio24->nport_handle = cmd ? cmd->loop_id : CTIO7_NHANDLE_UNRECOGNIZED; ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); ctio24->vp_index = vha->vp_idx; ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; ctio24->exchange_addr = atio->u.isp24.exchange_addr; ctio24->u.status1.flags = (atio->u.isp24.attr << 9) | __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE); ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); /* Most likely, it isn't needed */ ctio24->u.status1.residual = get_unaligned((uint32_t *) &atio->u.isp24.fcp_cmnd.add_cdb[ atio->u.isp24.fcp_cmnd.add_cdb_len]); if (ctio24->u.status1.residual != 0) ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER; qla2x00_start_iocbs(vha, vha->req); return ret; } static void qlt_send_term_exchange(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked) { unsigned long flags; int rc; if (qlt_issue_marker(vha, ha_locked) < 0) return; if (ha_locked) { rc = __qlt_send_term_exchange(vha, cmd, atio); goto done; } spin_lock_irqsave(&vha->hw->hardware_lock, flags); rc = __qlt_send_term_exchange(vha, cmd, atio); spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); done: if (rc == 1) { if (!ha_locked && !in_interrupt()) msleep(250); /* just in case */ vha->hw->tgt.tgt_ops->free_cmd(cmd); } } void qlt_free_cmd(struct qla_tgt_cmd *cmd) { BUG_ON(cmd->sg_mapped); if (unlikely(cmd->free_sg)) kfree(cmd->sg); kmem_cache_free(qla_tgt_cmd_cachep, cmd); } EXPORT_SYMBOL(qlt_free_cmd); /* ha->hardware_lock supposed to be held on entry */ static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd, void *ctio) { struct qla_tgt_srr_ctio *sc; struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt = ha->tgt.qla_tgt; struct qla_tgt_srr_imm *imm; tgt->ctio_srr_id++; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019, "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx); if (!ctio) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf055, "qla_target(%d): SRR CTIO, but ctio is NULL\n", vha->vp_idx); return -EINVAL; } sc = kzalloc(sizeof(*sc), GFP_ATOMIC); if (sc != NULL) { sc->cmd = cmd; /* IRQ is already OFF */ spin_lock(&tgt->srr_lock); sc->srr_id = tgt->ctio_srr_id; list_add_tail(&sc->srr_list_entry, &tgt->srr_ctio_list); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a, "CTIO SRR %p added (id %d)\n", sc, sc->srr_id); if (tgt->imm_srr_id == tgt->ctio_srr_id) { int found = 0; list_for_each_entry(imm, &tgt->srr_imm_list, srr_list_entry) { if (imm->srr_id == sc->srr_id) { found = 1; break; } } if (found) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01b, "Scheduling srr work\n"); schedule_work(&tgt->srr_work); } else { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf056, "qla_target(%d): imm_srr_id " "== ctio_srr_id (%d), but there is no " "corresponding SRR IMM, deleting CTIO " "SRR %p\n", vha->vp_idx, tgt->ctio_srr_id, sc); list_del(&sc->srr_list_entry); spin_unlock(&tgt->srr_lock); kfree(sc); return -EINVAL; } } spin_unlock(&tgt->srr_lock); } else { struct qla_tgt_srr_imm *ti; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf057, "qla_target(%d): Unable to allocate SRR CTIO entry\n", vha->vp_idx); spin_lock(&tgt->srr_lock); list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list, srr_list_entry) { if (imm->srr_id == tgt->ctio_srr_id) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01c, "IMM SRR %p deleted (id %d)\n", imm, imm->srr_id); list_del(&imm->srr_list_entry); qlt_reject_free_srr_imm(vha, imm, 1); } } spin_unlock(&tgt->srr_lock); return -ENOMEM; } return 0; } /* * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire */ static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio, struct qla_tgt_cmd *cmd, uint32_t status) { int term = 0; if (ctio != NULL) { struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio; term = !(c->flags & __constant_cpu_to_le16(OF_TERM_EXCH)); } else term = 1; if (term) qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); return term; } /* ha->hardware_lock supposed to be held on entry */ static inline struct qla_tgt_cmd *qlt_get_cmd(struct scsi_qla_host *vha, uint32_t handle) { struct qla_hw_data *ha = vha->hw; handle--; if (ha->tgt.cmds[handle] != NULL) { struct qla_tgt_cmd *cmd = ha->tgt.cmds[handle]; ha->tgt.cmds[handle] = NULL; return cmd; } else return NULL; } /* ha->hardware_lock supposed to be held on entry */ static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha, uint32_t handle, void *ctio) { struct qla_tgt_cmd *cmd = NULL; /* Clear out internal marks */ handle &= ~(CTIO_COMPLETION_HANDLE_MARK | CTIO_INTERMEDIATE_HANDLE_MARK); if (handle != QLA_TGT_NULL_HANDLE) { if (unlikely(handle == QLA_TGT_SKIP_HANDLE)) { ql_dbg(ql_dbg_tgt, vha, 0xe01d, "%s", "SKIP_HANDLE CTIO\n"); return NULL; } /* handle-1 is actually used */ if (unlikely(handle > DEFAULT_OUTSTANDING_COMMANDS)) { ql_dbg(ql_dbg_tgt, vha, 0xe052, "qla_target(%d): Wrong handle %x received\n", vha->vp_idx, handle); return NULL; } cmd = qlt_get_cmd(vha, handle); if (unlikely(cmd == NULL)) { ql_dbg(ql_dbg_tgt, vha, 0xe053, "qla_target(%d): Suspicious: unable to " "find the command with handle %x\n", vha->vp_idx, handle); return NULL; } } else if (ctio != NULL) { /* We can't get loop ID from CTIO7 */ ql_dbg(ql_dbg_tgt, vha, 0xe054, "qla_target(%d): Wrong CTIO received: QLA24xx doesn't " "support NULL handles\n", vha->vp_idx); return NULL; } return cmd; } /* * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire */ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, uint32_t status, void *ctio) { struct qla_hw_data *ha = vha->hw; struct se_cmd *se_cmd; struct target_core_fabric_ops *tfo; struct qla_tgt_cmd *cmd; ql_dbg(ql_dbg_tgt, vha, 0xe01e, "qla_target(%d): handle(ctio %p status %#x) <- %08x\n", vha->vp_idx, ctio, status, handle); if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) { /* That could happen only in case of an error/reset/abort */ if (status != CTIO_SUCCESS) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d, "Intermediate CTIO received" " (status %x)\n", status); } return; } cmd = qlt_ctio_to_cmd(vha, handle, ctio); if (cmd == NULL) return; se_cmd = &cmd->se_cmd; tfo = se_cmd->se_tfo; if (cmd->sg_mapped) qlt_unmap_sg(vha, cmd); if (unlikely(status != CTIO_SUCCESS)) { switch (status & 0xFFFF) { case CTIO_LIP_RESET: case CTIO_TARGET_RESET: case CTIO_ABORTED: case CTIO_TIMEOUT: case CTIO_INVALID_RX_ID: /* They are OK */ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058, "qla_target(%d): CTIO with " "status %#x received, state %x, se_cmd %p, " "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, " "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx, status, cmd->state, se_cmd); break; case CTIO_PORT_LOGGED_OUT: case CTIO_PORT_UNAVAILABLE: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059, "qla_target(%d): CTIO with PORT LOGGED " "OUT (29) or PORT UNAVAILABLE (28) status %x " "received (state %x, se_cmd %p)\n", vha->vp_idx, status, cmd->state, se_cmd); break; case CTIO_SRR_RECEIVED: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a, "qla_target(%d): CTIO with SRR_RECEIVED" " status %x received (state %x, se_cmd %p)\n", vha->vp_idx, status, cmd->state, se_cmd); if (qlt_prepare_srr_ctio(vha, cmd, ctio) != 0) break; else return; default: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, "qla_target(%d): CTIO with error status " "0x%x received (state %x, se_cmd %p\n", vha->vp_idx, status, cmd->state, se_cmd); break; } if (cmd->state != QLA_TGT_STATE_NEED_DATA) if (qlt_term_ctio_exchange(vha, ctio, cmd, status)) return; } if (cmd->state == QLA_TGT_STATE_PROCESSED) { ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd); } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) { int rx_status = 0; cmd->state = QLA_TGT_STATE_DATA_IN; if (unlikely(status != CTIO_SUCCESS)) rx_status = -EIO; else cmd->write_data_transferred = 1; ql_dbg(ql_dbg_tgt, vha, 0xe020, "Data received, context %x, rx_status %d\n", 0x0, rx_status); ha->tgt.tgt_ops->handle_data(cmd); return; } else if (cmd->state == QLA_TGT_STATE_ABORTED) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e, "Aborted command %p (tag %d) finished\n", cmd, cmd->tag); } else { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c, "qla_target(%d): A command in state (%d) should " "not return a CTIO complete\n", vha->vp_idx, cmd->state); } if (unlikely(status != CTIO_SUCCESS)) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n"); dump_stack(); } ha->tgt.tgt_ops->free_cmd(cmd); } static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha, uint8_t task_codes) { int fcp_task_attr; switch (task_codes) { case ATIO_SIMPLE_QUEUE: fcp_task_attr = MSG_SIMPLE_TAG; break; case ATIO_HEAD_OF_QUEUE: fcp_task_attr = MSG_HEAD_TAG; break; case ATIO_ORDERED_QUEUE: fcp_task_attr = MSG_ORDERED_TAG; break; case ATIO_ACA_QUEUE: fcp_task_attr = MSG_ACA_TAG; break; case ATIO_UNTAGGED: fcp_task_attr = MSG_SIMPLE_TAG; break; default: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d, "qla_target: unknown task code %x, use ORDERED instead\n", task_codes); fcp_task_attr = MSG_ORDERED_TAG; break; } return fcp_task_attr; } static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *, uint8_t *); /* * Process context for I/O path into tcm_qla2xxx code */ static void qlt_do_work(struct work_struct *work) { struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); scsi_qla_host_t *vha = cmd->vha; struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt = ha->tgt.qla_tgt; struct qla_tgt_sess *sess = NULL; struct atio_from_isp *atio = &cmd->atio; unsigned char *cdb; unsigned long flags; uint32_t data_length; int ret, fcp_task_attr, data_dir, bidi = 0; if (tgt->tgt_stop) goto out_term; spin_lock_irqsave(&ha->hardware_lock, flags); sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id); /* Do kref_get() before dropping qla_hw_data->hardware_lock. */ if (sess) kref_get(&sess->se_sess->sess_kref); spin_unlock_irqrestore(&ha->hardware_lock, flags); if (unlikely(!sess)) { uint8_t *s_id = atio->u.isp24.fcp_hdr.s_id; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022, "qla_target(%d): Unable to find wwn login" " (s_id %x:%x:%x), trying to create it manually\n", vha->vp_idx, s_id[0], s_id[1], s_id[2]); if (atio->u.raw.entry_count > 1) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023, "Dropping multy entry cmd %p\n", cmd); goto out_term; } mutex_lock(&ha->tgt.tgt_mutex); sess = qlt_make_local_sess(vha, s_id); /* sess has an extra creation ref. */ mutex_unlock(&ha->tgt.tgt_mutex); if (!sess) goto out_term; } cmd->sess = sess; cmd->loop_id = sess->loop_id; cmd->conf_compl_supported = sess->conf_compl_supported; cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; cmd->tag = atio->u.isp24.exchange_addr; cmd->unpacked_lun = scsilun_to_int( (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun); if (atio->u.isp24.fcp_cmnd.rddata && atio->u.isp24.fcp_cmnd.wrdata) { bidi = 1; data_dir = DMA_TO_DEVICE; } else if (atio->u.isp24.fcp_cmnd.rddata) data_dir = DMA_FROM_DEVICE; else if (atio->u.isp24.fcp_cmnd.wrdata) data_dir = DMA_TO_DEVICE; else data_dir = DMA_NONE; fcp_task_attr = qlt_get_fcp_task_attr(vha, atio->u.isp24.fcp_cmnd.task_attr); data_length = be32_to_cpu(get_unaligned((uint32_t *) &atio->u.isp24.fcp_cmnd.add_cdb[ atio->u.isp24.fcp_cmnd.add_cdb_len])); ql_dbg(ql_dbg_tgt, vha, 0xe022, "qla_target: START qla command: %p lun: 0x%04x (tag %d)\n", cmd, cmd->unpacked_lun, cmd->tag); ret = vha->hw->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length, fcp_task_attr, data_dir, bidi); if (ret != 0) goto out_term; /* * Drop extra session reference from qla_tgt_handle_cmd_for_atio*( */ ha->tgt.tgt_ops->put_sess(sess); return; out_term: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf020, "Terminating work cmd %p", cmd); /* * cmd has not sent to target yet, so pass NULL as the second * argument to qlt_send_term_exchange() and free the memory here. */ spin_lock_irqsave(&ha->hardware_lock, flags); qlt_send_term_exchange(vha, NULL, &cmd->atio, 1); kmem_cache_free(qla_tgt_cmd_cachep, cmd); spin_unlock_irqrestore(&ha->hardware_lock, flags); if (sess) ha->tgt.tgt_ops->put_sess(sess); } /* ha->hardware_lock supposed to be held on entry */ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, struct atio_from_isp *atio) { struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt = ha->tgt.qla_tgt; struct qla_tgt_cmd *cmd; if (unlikely(tgt->tgt_stop)) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021, "New command while device %p is shutting down\n", tgt); return -EFAULT; } cmd = kmem_cache_zalloc(qla_tgt_cmd_cachep, GFP_ATOMIC); if (!cmd) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e, "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx); return -ENOMEM; } INIT_LIST_HEAD(&cmd->cmd_list); memcpy(&cmd->atio, atio, sizeof(*atio)); cmd->state = QLA_TGT_STATE_NEW; cmd->tgt = ha->tgt.qla_tgt; cmd->vha = vha; INIT_WORK(&cmd->work, qlt_do_work); queue_work(qla_tgt_wq, &cmd->work); return 0; } /* ha->hardware_lock supposed to be held on entry */ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, int fn, void *iocb, int flags) { struct scsi_qla_host *vha = sess->vha; struct qla_hw_data *ha = vha->hw; struct qla_tgt_mgmt_cmd *mcmd; int res; uint8_t tmr_func; mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); if (!mcmd) { ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009, "qla_target(%d): Allocation of management " "command failed, some commands and their data could " "leak\n", vha->vp_idx); return -ENOMEM; } memset(mcmd, 0, sizeof(*mcmd)); mcmd->sess = sess; if (iocb) { memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, sizeof(mcmd->orig_iocb.imm_ntfy)); } mcmd->tmr_func = fn; mcmd->flags = flags; switch (fn) { case QLA_TGT_CLEAR_ACA: ql_dbg(ql_dbg_tgt_tmr, vha, 0x10000, "qla_target(%d): CLEAR_ACA received\n", sess->vha->vp_idx); tmr_func = TMR_CLEAR_ACA; break; case QLA_TGT_TARGET_RESET: ql_dbg(ql_dbg_tgt_tmr, vha, 0x10001, "qla_target(%d): TARGET_RESET received\n", sess->vha->vp_idx); tmr_func = TMR_TARGET_WARM_RESET; break; case QLA_TGT_LUN_RESET: ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002, "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx); tmr_func = TMR_LUN_RESET; break; case QLA_TGT_CLEAR_TS: ql_dbg(ql_dbg_tgt_tmr, vha, 0x10003, "qla_target(%d): CLEAR_TS received\n", sess->vha->vp_idx); tmr_func = TMR_CLEAR_TASK_SET; break; case QLA_TGT_ABORT_TS: ql_dbg(ql_dbg_tgt_tmr, vha, 0x10004, "qla_target(%d): ABORT_TS received\n", sess->vha->vp_idx); tmr_func = TMR_ABORT_TASK_SET; break; #if 0 case QLA_TGT_ABORT_ALL: ql_dbg(ql_dbg_tgt_tmr, vha, 0x10005, "qla_target(%d): Doing ABORT_ALL_TASKS\n", sess->vha->vp_idx); tmr_func = 0; break; case QLA_TGT_ABORT_ALL_SESS: ql_dbg(ql_dbg_tgt_tmr, vha, 0x10006, "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n", sess->vha->vp_idx); tmr_func = 0; break; case QLA_TGT_NEXUS_LOSS_SESS: ql_dbg(ql_dbg_tgt_tmr, vha, 0x10007, "qla_target(%d): Doing NEXUS_LOSS_SESS\n", sess->vha->vp_idx); tmr_func = 0; break; case QLA_TGT_NEXUS_LOSS: ql_dbg(ql_dbg_tgt_tmr, vha, 0x10008, "qla_target(%d): Doing NEXUS_LOSS\n", sess->vha->vp_idx); tmr_func = 0; break; #endif default: ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000a, "qla_target(%d): Unknown task mgmt fn 0x%x\n", sess->vha->vp_idx, fn); mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); return -ENOSYS; } res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, tmr_func, 0); if (res != 0) { ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b, "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n", sess->vha->vp_idx, res); mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); return -EFAULT; } return 0; } /* ha->hardware_lock supposed to be held on entry */ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb) { struct atio_from_isp *a = (struct atio_from_isp *)iocb; struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt; struct qla_tgt_sess *sess; uint32_t lun, unpacked_lun; int lun_size, fn; tgt = ha->tgt.qla_tgt; lun = a->u.isp24.fcp_cmnd.lun; lun_size = sizeof(a->u.isp24.fcp_cmnd.lun); fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, a->u.isp24.fcp_hdr.s_id); unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); if (!sess) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024, "qla_target(%d): task mgmt fn 0x%x for " "non-existant session\n", vha->vp_idx, fn); return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb, sizeof(struct atio_from_isp)); } return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); } /* ha->hardware_lock supposed to be held on entry */ static int __qlt_abort_task(struct scsi_qla_host *vha, struct imm_ntfy_from_isp *iocb, struct qla_tgt_sess *sess) { struct atio_from_isp *a = (struct atio_from_isp *)iocb; struct qla_hw_data *ha = vha->hw; struct qla_tgt_mgmt_cmd *mcmd; uint32_t lun, unpacked_lun; int rc; mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); if (mcmd == NULL) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f, "qla_target(%d): %s: Allocation of ABORT cmd failed\n", vha->vp_idx, __func__); return -ENOMEM; } memset(mcmd, 0, sizeof(*mcmd)); mcmd->sess = sess; memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, sizeof(mcmd->orig_iocb.imm_ntfy)); lun = a->u.isp24.fcp_cmnd.lun; unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK, le16_to_cpu(iocb->u.isp2x.seq_id)); if (rc != 0) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060, "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n", vha->vp_idx, rc); mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); return -EFAULT; } return 0; } /* ha->hardware_lock supposed to be held on entry */ static int qlt_abort_task(struct scsi_qla_host *vha, struct imm_ntfy_from_isp *iocb) { struct qla_hw_data *ha = vha->hw; struct qla_tgt_sess *sess; int loop_id; loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb); sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); if (sess == NULL) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025, "qla_target(%d): task abort for unexisting " "session\n", vha->vp_idx); return qlt_sched_sess_work(ha->tgt.qla_tgt, QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb)); } return __qlt_abort_task(vha, iocb, sess); } /* * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire */ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, struct imm_ntfy_from_isp *iocb) { struct qla_hw_data *ha = vha->hw; int res = 0; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026, "qla_target(%d): Port ID: 0x%02x:%02x:%02x" " ELS opcode: 0x%02x\n", vha->vp_idx, iocb->u.isp24.port_id[0], iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[2], iocb->u.isp24.status_subcode); switch (iocb->u.isp24.status_subcode) { case ELS_PLOGI: case ELS_FLOGI: case ELS_PRLI: case ELS_LOGO: case ELS_PRLO: res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); break; case ELS_PDISC: case ELS_ADISC: { struct qla_tgt *tgt = ha->tgt.qla_tgt; if (tgt->link_reinit_iocb_pending) { qlt_send_notify_ack(vha, &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0); tgt->link_reinit_iocb_pending = 0; } res = 1; /* send notify ack */ break; } default: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061, "qla_target(%d): Unsupported ELS command %x " "received\n", vha->vp_idx, iocb->u.isp24.status_subcode); res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); break; } return res; } static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset) { struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL; size_t first_offset = 0, rem_offset = offset, tmp = 0; int i, sg_srr_cnt, bufflen = 0; ql_dbg(ql_dbg_tgt, cmd->vha, 0xe023, "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, " "cmd->sg_cnt: %u, direction: %d\n", cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); /* * FIXME: Reject non zero SRR relative offset until we can test * this code properly. */ pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset); return -1; if (!cmd->sg || !cmd->sg_cnt) { ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055, "Missing cmd->sg or zero cmd->sg_cnt in" " qla_tgt_set_data_offset\n"); return -EINVAL; } /* * Walk the current cmd->sg list until we locate the new sg_srr_start */ for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) { ql_dbg(ql_dbg_tgt, cmd->vha, 0xe024, "sg[%d]: %p page: %p, length: %d, offset: %d\n", i, sg, sg_page(sg), sg->length, sg->offset); if ((sg->length + tmp) > offset) { first_offset = rem_offset; sg_srr_start = sg; ql_dbg(ql_dbg_tgt, cmd->vha, 0xe025, "Found matching sg[%d], using %p as sg_srr_start, " "and using first_offset: %zu\n", i, sg, first_offset); break; } tmp += sg->length; rem_offset -= sg->length; } if (!sg_srr_start) { ql_dbg(ql_dbg_tgt, cmd->vha, 0xe056, "Unable to locate sg_srr_start for offset: %u\n", offset); return -EINVAL; } sg_srr_cnt = (cmd->sg_cnt - i); sg_srr = kzalloc(sizeof(struct scatterlist) * sg_srr_cnt, GFP_KERNEL); if (!sg_srr) { ql_dbg(ql_dbg_tgt, cmd->vha, 0xe057, "Unable to allocate sgp\n"); return -ENOMEM; } sg_init_table(sg_srr, sg_srr_cnt); sgp = &sg_srr[0]; /* * Walk the remaining list for sg_srr_start, mapping to the newly * allocated sg_srr taking first_offset into account. */ for_each_sg(sg_srr_start, sg, sg_srr_cnt, i) { if (first_offset) { sg_set_page(sgp, sg_page(sg), (sg->length - first_offset), first_offset); first_offset = 0; } else { sg_set_page(sgp, sg_page(sg), sg->length, 0); } bufflen += sgp->length; sgp = sg_next(sgp); if (!sgp) break; } cmd->sg = sg_srr; cmd->sg_cnt = sg_srr_cnt; cmd->bufflen = bufflen; cmd->offset += offset; cmd->free_sg = 1; ql_dbg(ql_dbg_tgt, cmd->vha, 0xe026, "New cmd->sg: %p\n", cmd->sg); ql_dbg(ql_dbg_tgt, cmd->vha, 0xe027, "New cmd->sg_cnt: %u\n", cmd->sg_cnt); ql_dbg(ql_dbg_tgt, cmd->vha, 0xe028, "New cmd->bufflen: %u\n", cmd->bufflen); ql_dbg(ql_dbg_tgt, cmd->vha, 0xe029, "New cmd->offset: %u\n", cmd->offset); if (cmd->sg_cnt < 0) BUG(); if (cmd->bufflen < 0) BUG(); return 0; } static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd, uint32_t srr_rel_offs, int *xmit_type) { int res = 0, rel_offs; rel_offs = srr_rel_offs - cmd->offset; ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf027, "srr_rel_offs=%d, rel_offs=%d", srr_rel_offs, rel_offs); *xmit_type = QLA_TGT_XMIT_ALL; if (rel_offs < 0) { ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf062, "qla_target(%d): SRR rel_offs (%d) < 0", cmd->vha->vp_idx, rel_offs); res = -1; } else if (rel_offs == cmd->bufflen) *xmit_type = QLA_TGT_XMIT_STATUS; else if (rel_offs > 0) res = qlt_set_data_offset(cmd, rel_offs); return res; } /* No locks, thread context */ static void qlt_handle_srr(struct scsi_qla_host *vha, struct qla_tgt_srr_ctio *sctio, struct qla_tgt_srr_imm *imm) { struct imm_ntfy_from_isp *ntfy = (struct imm_ntfy_from_isp *)&imm->imm_ntfy; struct qla_hw_data *ha = vha->hw; struct qla_tgt_cmd *cmd = sctio->cmd; struct se_cmd *se_cmd = &cmd->se_cmd; unsigned long flags; int xmit_type = 0, resp = 0; uint32_t offset; uint16_t srr_ui; offset = le32_to_cpu(ntfy->u.isp24.srr_rel_offs); srr_ui = ntfy->u.isp24.srr_ui; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf028, "SRR cmd %p, srr_ui %x\n", cmd, srr_ui); switch (srr_ui) { case SRR_IU_STATUS: spin_lock_irqsave(&ha->hardware_lock, flags); qlt_send_notify_ack(vha, ntfy, 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); spin_unlock_irqrestore(&ha->hardware_lock, flags); xmit_type = QLA_TGT_XMIT_STATUS; resp = 1; break; case SRR_IU_DATA_IN: if (!cmd->sg || !cmd->sg_cnt) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf063, "Unable to process SRR_IU_DATA_IN due to" " missing cmd->sg, state: %d\n", cmd->state); dump_stack(); goto out_reject; } if (se_cmd->scsi_status != 0) { ql_dbg(ql_dbg_tgt, vha, 0xe02a, "Rejecting SRR_IU_DATA_IN with non GOOD " "scsi_status\n"); goto out_reject; } cmd->bufflen = se_cmd->data_length; if (qlt_has_data(cmd)) { if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0) goto out_reject; spin_lock_irqsave(&ha->hardware_lock, flags); qlt_send_notify_ack(vha, ntfy, 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); spin_unlock_irqrestore(&ha->hardware_lock, flags); resp = 1; } else { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064, "qla_target(%d): SRR for in data for cmd " "without them (tag %d, SCSI status %d), " "reject", vha->vp_idx, cmd->tag, cmd->se_cmd.scsi_status); goto out_reject; } break; case SRR_IU_DATA_OUT: if (!cmd->sg || !cmd->sg_cnt) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf065, "Unable to process SRR_IU_DATA_OUT due to" " missing cmd->sg\n"); dump_stack(); goto out_reject; } if (se_cmd->scsi_status != 0) { ql_dbg(ql_dbg_tgt, vha, 0xe02b, "Rejecting SRR_IU_DATA_OUT" " with non GOOD scsi_status\n"); goto out_reject; } cmd->bufflen = se_cmd->data_length; if (qlt_has_data(cmd)) { if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0) goto out_reject; spin_lock_irqsave(&ha->hardware_lock, flags); qlt_send_notify_ack(vha, ntfy, 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); spin_unlock_irqrestore(&ha->hardware_lock, flags); if (xmit_type & QLA_TGT_XMIT_DATA) qlt_rdy_to_xfer(cmd); } else { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066, "qla_target(%d): SRR for out data for cmd " "without them (tag %d, SCSI status %d), " "reject", vha->vp_idx, cmd->tag, cmd->se_cmd.scsi_status); goto out_reject; } break; default: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf067, "qla_target(%d): Unknown srr_ui value %x", vha->vp_idx, srr_ui); goto out_reject; } /* Transmit response in case of status and data-in cases */ if (resp) qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status); return; out_reject: spin_lock_irqsave(&ha->hardware_lock, flags); qlt_send_notify_ack(vha, ntfy, 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_REJECT, NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); if (cmd->state == QLA_TGT_STATE_NEED_DATA) { cmd->state = QLA_TGT_STATE_DATA_IN; dump_stack(); } else qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); spin_unlock_irqrestore(&ha->hardware_lock, flags); } static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha, struct qla_tgt_srr_imm *imm, int ha_locked) { struct qla_hw_data *ha = vha->hw; unsigned long flags = 0; if (!ha_locked) spin_lock_irqsave(&ha->hardware_lock, flags); qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_REJECT, NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); if (!ha_locked) spin_unlock_irqrestore(&ha->hardware_lock, flags); kfree(imm); } static void qlt_handle_srr_work(struct work_struct *work) { struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work); struct scsi_qla_host *vha = tgt->vha; struct qla_tgt_srr_ctio *sctio; unsigned long flags; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf029, "Entering SRR work (tgt %p)\n", tgt); restart: spin_lock_irqsave(&tgt->srr_lock, flags); list_for_each_entry(sctio, &tgt->srr_ctio_list, srr_list_entry) { struct qla_tgt_srr_imm *imm, *i, *ti; struct qla_tgt_cmd *cmd; struct se_cmd *se_cmd; imm = NULL; list_for_each_entry_safe(i, ti, &tgt->srr_imm_list, srr_list_entry) { if (i->srr_id == sctio->srr_id) { list_del(&i->srr_list_entry); if (imm) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf068, "qla_target(%d): There must be " "only one IMM SRR per CTIO SRR " "(IMM SRR %p, id %d, CTIO %p\n", vha->vp_idx, i, i->srr_id, sctio); qlt_reject_free_srr_imm(tgt->vha, i, 0); } else imm = i; } } ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02a, "IMM SRR %p, CTIO SRR %p (id %d)\n", imm, sctio, sctio->srr_id); if (imm == NULL) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02b, "Not found matching IMM for SRR CTIO (id %d)\n", sctio->srr_id); continue; } else list_del(&sctio->srr_list_entry); spin_unlock_irqrestore(&tgt->srr_lock, flags); cmd = sctio->cmd; /* * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in() * logic.. */ cmd->offset = 0; if (cmd->free_sg) { kfree(cmd->sg); cmd->sg = NULL; cmd->free_sg = 0; } se_cmd = &cmd->se_cmd; cmd->sg_cnt = se_cmd->t_data_nents; cmd->sg = se_cmd->t_data_sg; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c, "SRR cmd %p (se_cmd %p, tag %d, op %x), " "sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag, se_cmd->t_task_cdb[0], cmd->sg_cnt, cmd->offset); qlt_handle_srr(vha, sctio, imm); kfree(imm); kfree(sctio); goto restart; } spin_unlock_irqrestore(&tgt->srr_lock, flags); } /* ha->hardware_lock supposed to be held on entry */ static void qlt_prepare_srr_imm(struct scsi_qla_host *vha, struct imm_ntfy_from_isp *iocb) { struct qla_tgt_srr_imm *imm; struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt = ha->tgt.qla_tgt; struct qla_tgt_srr_ctio *sctio; tgt->imm_srr_id++; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02d, "qla_target(%d): SRR received\n", vha->vp_idx); imm = kzalloc(sizeof(*imm), GFP_ATOMIC); if (imm != NULL) { memcpy(&imm->imm_ntfy, iocb, sizeof(imm->imm_ntfy)); /* IRQ is already OFF */ spin_lock(&tgt->srr_lock); imm->srr_id = tgt->imm_srr_id; list_add_tail(&imm->srr_list_entry, &tgt->srr_imm_list); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02e, "IMM NTFY SRR %p added (id %d, ui %x)\n", imm, imm->srr_id, iocb->u.isp24.srr_ui); if (tgt->imm_srr_id == tgt->ctio_srr_id) { int found = 0; list_for_each_entry(sctio, &tgt->srr_ctio_list, srr_list_entry) { if (sctio->srr_id == imm->srr_id) { found = 1; break; } } if (found) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02f, "%s", "Scheduling srr work\n"); schedule_work(&tgt->srr_work); } else { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf030, "qla_target(%d): imm_srr_id " "== ctio_srr_id (%d), but there is no " "corresponding SRR CTIO, deleting IMM " "SRR %p\n", vha->vp_idx, tgt->ctio_srr_id, imm); list_del(&imm->srr_list_entry); kfree(imm); spin_unlock(&tgt->srr_lock); goto out_reject; } } spin_unlock(&tgt->srr_lock); } else { struct qla_tgt_srr_ctio *ts; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf069, "qla_target(%d): Unable to allocate SRR IMM " "entry, SRR request will be rejected\n", vha->vp_idx); /* IRQ is already OFF */ spin_lock(&tgt->srr_lock); list_for_each_entry_safe(sctio, ts, &tgt->srr_ctio_list, srr_list_entry) { if (sctio->srr_id == tgt->imm_srr_id) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf031, "CTIO SRR %p deleted (id %d)\n", sctio, sctio->srr_id); list_del(&sctio->srr_list_entry); qlt_send_term_exchange(vha, sctio->cmd, &sctio->cmd->atio, 1); kfree(sctio); } } spin_unlock(&tgt->srr_lock); goto out_reject; } return; out_reject: qlt_send_notify_ack(vha, iocb, 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_REJECT, NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); } /* * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire */ static void qlt_handle_imm_notify(struct scsi_qla_host *vha, struct imm_ntfy_from_isp *iocb) { struct qla_hw_data *ha = vha->hw; uint32_t add_flags = 0; int send_notify_ack = 1; uint16_t status; status = le16_to_cpu(iocb->u.isp2x.status); switch (status) { case IMM_NTFY_LIP_RESET: { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032, "qla_target(%d): LIP reset (loop %#x), subcode %x\n", vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle), iocb->u.isp24.status_subcode); if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0) send_notify_ack = 0; break; } case IMM_NTFY_LIP_LINK_REINIT: { struct qla_tgt *tgt = ha->tgt.qla_tgt; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033, "qla_target(%d): LINK REINIT (loop %#x, " "subcode %x)\n", vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle), iocb->u.isp24.status_subcode); if (tgt->link_reinit_iocb_pending) { qlt_send_notify_ack(vha, &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0); } memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb)); tgt->link_reinit_iocb_pending = 1; /* * QLogic requires to wait after LINK REINIT for possible * PDISC or ADISC ELS commands */ send_notify_ack = 0; break; } case IMM_NTFY_PORT_LOGOUT: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034, "qla_target(%d): Port logout (loop " "%#x, subcode %x)\n", vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle), iocb->u.isp24.status_subcode); if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0) send_notify_ack = 0; /* The sessions will be cleared in the callback, if needed */ break; case IMM_NTFY_GLBL_TPRLO: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035, "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status); if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0) send_notify_ack = 0; /* The sessions will be cleared in the callback, if needed */ break; case IMM_NTFY_PORT_CONFIG: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036, "qla_target(%d): Port config changed (%x)\n", vha->vp_idx, status); if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0) send_notify_ack = 0; /* The sessions will be cleared in the callback, if needed */ break; case IMM_NTFY_GLBL_LOGO: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a, "qla_target(%d): Link failure detected\n", vha->vp_idx); /* I_T nexus loss */ if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0) send_notify_ack = 0; break; case IMM_NTFY_IOCB_OVERFLOW: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b, "qla_target(%d): Cannot provide requested " "capability (IOCB overflowed the immediate notify " "resource count)\n", vha->vp_idx); break; case IMM_NTFY_ABORT_TASK: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037, "qla_target(%d): Abort Task (S %08x I %#x -> " "L %#x)\n", vha->vp_idx, le16_to_cpu(iocb->u.isp2x.seq_id), GET_TARGET_ID(ha, (struct atio_from_isp *)iocb), le16_to_cpu(iocb->u.isp2x.lun)); if (qlt_abort_task(vha, iocb) == 0) send_notify_ack = 0; break; case IMM_NTFY_RESOURCE: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c, "qla_target(%d): Out of resources, host %ld\n", vha->vp_idx, vha->host_no); break; case IMM_NTFY_MSG_RX: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038, "qla_target(%d): Immediate notify task %x\n", vha->vp_idx, iocb->u.isp2x.task_flags); if (qlt_handle_task_mgmt(vha, iocb) == 0) send_notify_ack = 0; break; case IMM_NTFY_ELS: if (qlt_24xx_handle_els(vha, iocb) == 0) send_notify_ack = 0; break; case IMM_NTFY_SRR: qlt_prepare_srr_imm(vha, iocb); send_notify_ack = 0; break; default: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d, "qla_target(%d): Received unknown immediate " "notify status %x\n", vha->vp_idx, status); break; } if (send_notify_ack) qlt_send_notify_ack(vha, iocb, add_flags, 0, 0, 0, 0, 0); } /* * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire * This function sends busy to ISP 2xxx or 24xx. */ static void qlt_send_busy(struct scsi_qla_host *vha, struct atio_from_isp *atio, uint16_t status) { struct ctio7_to_24xx *ctio24; struct qla_hw_data *ha = vha->hw; request_t *pkt; struct qla_tgt_sess *sess = NULL; sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id); if (!sess) { qlt_send_term_exchange(vha, NULL, atio, 1); return; } /* Sending marker isn't necessary, since we called from ISR */ pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); if (!pkt) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06e, "qla_target(%d): %s failed: unable to allocate " "request packet", vha->vp_idx, __func__); return; } pkt->entry_count = 1; pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; ctio24 = (struct ctio7_to_24xx *)pkt; ctio24->entry_type = CTIO_TYPE7; ctio24->nport_handle = sess->loop_id; ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); ctio24->vp_index = vha->vp_idx; ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; ctio24->exchange_addr = atio->u.isp24.exchange_addr; ctio24->u.status1.flags = (atio->u.isp24.attr << 9) | __constant_cpu_to_le16( CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS | CTIO7_FLAGS_DONT_RET_CTIO); /* * CTIO from fw w/o se_cmd doesn't provide enough info to retry it, * if the explicit conformation is used. */ ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); ctio24->u.status1.scsi_status = cpu_to_le16(status); ctio24->u.status1.residual = get_unaligned((uint32_t *) &atio->u.isp24.fcp_cmnd.add_cdb[ atio->u.isp24.fcp_cmnd.add_cdb_len]); if (ctio24->u.status1.residual != 0) ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER; qla2x00_start_iocbs(vha, vha->req); } /* ha->hardware_lock supposed to be held on entry */ /* called via callback from qla2xxx */ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, struct atio_from_isp *atio) { struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt = ha->tgt.qla_tgt; int rc; if (unlikely(tgt == NULL)) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf039, "ATIO pkt, but no tgt (ha %p)", ha); return; } ql_dbg(ql_dbg_tgt, vha, 0xe02c, "qla_target(%d): ATIO pkt %p: type %02x count %02x", vha->vp_idx, atio, atio->u.raw.entry_type, atio->u.raw.entry_count); /* * In tgt_stop mode we also should allow all requests to pass. * Otherwise, some commands can stuck. */ tgt->irq_cmd_count++; switch (atio->u.raw.entry_type) { case ATIO_TYPE7: ql_dbg(ql_dbg_tgt, vha, 0xe02d, "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, " "add_cdb_len %d, data_length %04x, s_id %x:%x:%x\n", vha->vp_idx, atio->u.isp24.fcp_cmnd.lun, atio->u.isp24.fcp_cmnd.rddata, atio->u.isp24.fcp_cmnd.wrdata, atio->u.isp24.fcp_cmnd.add_cdb_len, be32_to_cpu(get_unaligned((uint32_t *) &atio->u.isp24.fcp_cmnd.add_cdb[ atio->u.isp24.fcp_cmnd.add_cdb_len])), atio->u.isp24.fcp_hdr.s_id[0], atio->u.isp24.fcp_hdr.s_id[1], atio->u.isp24.fcp_hdr.s_id[2]); if (unlikely(atio->u.isp24.exchange_addr == ATIO_EXCHANGE_ADDRESS_UNKNOWN)) { ql_dbg(ql_dbg_tgt, vha, 0xe058, "qla_target(%d): ATIO_TYPE7 " "received with UNKNOWN exchange address, " "sending QUEUE_FULL\n", vha->vp_idx); qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL); break; } if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) rc = qlt_handle_cmd_for_atio(vha, atio); else rc = qlt_handle_task_mgmt(vha, atio); if (unlikely(rc != 0)) { if (rc == -ESRCH) { #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ qlt_send_busy(vha, atio, SAM_STAT_BUSY); #else qlt_send_term_exchange(vha, NULL, atio, 1); #endif } else { if (tgt->tgt_stop) { ql_dbg(ql_dbg_tgt, vha, 0xe059, "qla_target: Unable to send " "command to target for req, " "ignoring.\n"); } else { ql_dbg(ql_dbg_tgt, vha, 0xe05a, "qla_target(%d): Unable to send " "command to target, sending BUSY " "status.\n", vha->vp_idx); qlt_send_busy(vha, atio, SAM_STAT_BUSY); } } } break; case IMMED_NOTIFY_TYPE: { if (unlikely(atio->u.isp2x.entry_status != 0)) { ql_dbg(ql_dbg_tgt, vha, 0xe05b, "qla_target(%d): Received ATIO packet %x " "with error status %x\n", vha->vp_idx, atio->u.raw.entry_type, atio->u.isp2x.entry_status); break; } ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO"); qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio); break; } default: ql_dbg(ql_dbg_tgt, vha, 0xe05c, "qla_target(%d): Received unknown ATIO atio " "type %x\n", vha->vp_idx, atio->u.raw.entry_type); break; } tgt->irq_cmd_count--; } /* ha->hardware_lock supposed to be held on entry */ /* called via callback from qla2xxx */ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) { struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt = ha->tgt.qla_tgt; if (unlikely(tgt == NULL)) { ql_dbg(ql_dbg_tgt, vha, 0xe05d, "qla_target(%d): Response pkt %x received, but no " "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha); return; } ql_dbg(ql_dbg_tgt, vha, 0xe02f, "qla_target(%d): response pkt %p: T %02x C %02x S %02x " "handle %#x\n", vha->vp_idx, pkt, pkt->entry_type, pkt->entry_count, pkt->entry_status, pkt->handle); /* * In tgt_stop mode we also should allow all requests to pass. * Otherwise, some commands can stuck. */ tgt->irq_cmd_count++; switch (pkt->entry_type) { case CTIO_TYPE7: { struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; ql_dbg(ql_dbg_tgt, vha, 0xe030, "CTIO_TYPE7: instance %d\n", vha->vp_idx); qlt_do_ctio_completion(vha, entry->handle, le16_to_cpu(entry->status)|(pkt->entry_status << 16), entry); break; } case ACCEPT_TGT_IO_TYPE: { struct atio_from_isp *atio = (struct atio_from_isp *)pkt; int rc; ql_dbg(ql_dbg_tgt, vha, 0xe031, "ACCEPT_TGT_IO instance %d status %04x " "lun %04x read/write %d data_length %04x " "target_id %02x rx_id %04x\n ", vha->vp_idx, le16_to_cpu(atio->u.isp2x.status), le16_to_cpu(atio->u.isp2x.lun), atio->u.isp2x.execution_codes, le32_to_cpu(atio->u.isp2x.data_length), GET_TARGET_ID(ha, atio), atio->u.isp2x.rx_id); if (atio->u.isp2x.status != __constant_cpu_to_le16(ATIO_CDB_VALID)) { ql_dbg(ql_dbg_tgt, vha, 0xe05e, "qla_target(%d): ATIO with error " "status %x received\n", vha->vp_idx, le16_to_cpu(atio->u.isp2x.status)); break; } ql_dbg(ql_dbg_tgt, vha, 0xe032, "FCP CDB: 0x%02x, sizeof(cdb): %lu", atio->u.isp2x.cdb[0], (unsigned long int)sizeof(atio->u.isp2x.cdb)); rc = qlt_handle_cmd_for_atio(vha, atio); if (unlikely(rc != 0)) { if (rc == -ESRCH) { #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ qlt_send_busy(vha, atio, 0); #else qlt_send_term_exchange(vha, NULL, atio, 1); #endif } else { if (tgt->tgt_stop) { ql_dbg(ql_dbg_tgt, vha, 0xe05f, "qla_target: Unable to send " "command to target, sending TERM " "EXCHANGE for rsp\n"); qlt_send_term_exchange(vha, NULL, atio, 1); } else { ql_dbg(ql_dbg_tgt, vha, 0xe060, "qla_target(%d): Unable to send " "command to target, sending BUSY " "status\n", vha->vp_idx); qlt_send_busy(vha, atio, 0); } } } } break; case CONTINUE_TGT_IO_TYPE: { struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; ql_dbg(ql_dbg_tgt, vha, 0xe033, "CONTINUE_TGT_IO: instance %d\n", vha->vp_idx); qlt_do_ctio_completion(vha, entry->handle, le16_to_cpu(entry->status)|(pkt->entry_status << 16), entry); break; } case CTIO_A64_TYPE: { struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; ql_dbg(ql_dbg_tgt, vha, 0xe034, "CTIO_A64: instance %d\n", vha->vp_idx); qlt_do_ctio_completion(vha, entry->handle, le16_to_cpu(entry->status)|(pkt->entry_status << 16), entry); break; } case IMMED_NOTIFY_TYPE: ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n"); qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt); break; case NOTIFY_ACK_TYPE: if (tgt->notify_ack_expected > 0) { struct nack_to_isp *entry = (struct nack_to_isp *)pkt; ql_dbg(ql_dbg_tgt, vha, 0xe036, "NOTIFY_ACK seq %08x status %x\n", le16_to_cpu(entry->u.isp2x.seq_id), le16_to_cpu(entry->u.isp2x.status)); tgt->notify_ack_expected--; if (entry->u.isp2x.status != __constant_cpu_to_le16(NOTIFY_ACK_SUCCESS)) { ql_dbg(ql_dbg_tgt, vha, 0xe061, "qla_target(%d): NOTIFY_ACK " "failed %x\n", vha->vp_idx, le16_to_cpu(entry->u.isp2x.status)); } } else { ql_dbg(ql_dbg_tgt, vha, 0xe062, "qla_target(%d): Unexpected NOTIFY_ACK received\n", vha->vp_idx); } break; case ABTS_RECV_24XX: ql_dbg(ql_dbg_tgt, vha, 0xe037, "ABTS_RECV_24XX: instance %d\n", vha->vp_idx); qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt); break; case ABTS_RESP_24XX: if (tgt->abts_resp_expected > 0) { struct abts_resp_from_24xx_fw *entry = (struct abts_resp_from_24xx_fw *)pkt; ql_dbg(ql_dbg_tgt, vha, 0xe038, "ABTS_RESP_24XX: compl_status %x\n", entry->compl_status); tgt->abts_resp_expected--; if (le16_to_cpu(entry->compl_status) != ABTS_RESP_COMPL_SUCCESS) { if ((entry->error_subcode1 == 0x1E) && (entry->error_subcode2 == 0)) { /* * We've got a race here: aborted * exchange not terminated, i.e. * response for the aborted command was * sent between the abort request was * received and processed. * Unfortunately, the firmware has a * silly requirement that all aborted * exchanges must be explicitely * terminated, otherwise it refuses to * send responses for the abort * requests. So, we have to * (re)terminate the exchange and retry * the abort response. */ qlt_24xx_retry_term_exchange(vha, entry); } else ql_dbg(ql_dbg_tgt, vha, 0xe063, "qla_target(%d): ABTS_RESP_24XX " "failed %x (subcode %x:%x)", vha->vp_idx, entry->compl_status, entry->error_subcode1, entry->error_subcode2); } } else { ql_dbg(ql_dbg_tgt, vha, 0xe064, "qla_target(%d): Unexpected ABTS_RESP_24XX " "received\n", vha->vp_idx); } break; default: ql_dbg(ql_dbg_tgt, vha, 0xe065, "qla_target(%d): Received unknown response pkt " "type %x\n", vha->vp_idx, pkt->entry_type); break; } tgt->irq_cmd_count--; } /* * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire */ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, uint16_t *mailbox) { struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt = ha->tgt.qla_tgt; int login_code; ql_dbg(ql_dbg_tgt, vha, 0xe039, "scsi(%ld): ha state %d init_done %d oper_mode %d topo %d\n", vha->host_no, atomic_read(&vha->loop_state), vha->flags.init_done, ha->operating_mode, ha->current_topology); if (!ha->tgt.tgt_ops) return; if (unlikely(tgt == NULL)) { ql_dbg(ql_dbg_tgt, vha, 0xe03a, "ASYNC EVENT %#x, but no tgt (ha %p)\n", code, ha); return; } if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) && IS_QLA2100(ha)) return; /* * In tgt_stop mode we also should allow all requests to pass. * Otherwise, some commands can stuck. */ tgt->irq_cmd_count++; switch (code) { case MBA_RESET: /* Reset */ case MBA_SYSTEM_ERR: /* System Error */ case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a, "qla_target(%d): System error async event %#x " "occurred", vha->vp_idx, code); break; case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; case MBA_LOOP_UP: { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b, "qla_target(%d): Async LOOP_UP occurred " "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); if (tgt->link_reinit_iocb_pending) { qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0); tgt->link_reinit_iocb_pending = 0; } break; } case MBA_LIP_OCCURRED: case MBA_LOOP_DOWN: case MBA_LIP_RESET: case MBA_RSCN_UPDATE: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c, "qla_target(%d): Async event %#x occurred " "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code, le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); break; case MBA_PORT_UPDATE: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d, "qla_target(%d): Port update async event %#x " "occurred: updating the ports database (m[0]=%x, m[1]=%x, " "m[2]=%x, m[3]=%x)", vha->vp_idx, code, le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); login_code = le16_to_cpu(mailbox[2]); if (login_code == 0x4) ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e, "Async MB 2: Got PLOGI Complete\n"); else if (login_code == 0x7) ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f, "Async MB 2: Port Logged Out\n"); break; default: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf040, "qla_target(%d): Async event %#x occurred: " "ignore (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code, le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); break; } tgt->irq_cmd_count--; } static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, uint16_t loop_id) { fc_port_t *fcport; int rc; fcport = kzalloc(sizeof(*fcport), GFP_KERNEL); if (!fcport) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f, "qla_target(%d): Allocation of tmp FC port failed", vha->vp_idx); return NULL; } ql_dbg(ql_dbg_tgt_mgt, vha, 0xf041, "loop_id %d", loop_id); fcport->loop_id = loop_id; rc = qla2x00_get_port_database(vha, fcport, 0); if (rc != QLA_SUCCESS) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070, "qla_target(%d): Failed to retrieve fcport " "information -- get_port_database() returned %x " "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id); kfree(fcport); return NULL; } return fcport; } /* Must be called under tgt_mutex */ static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha, uint8_t *s_id) { struct qla_hw_data *ha = vha->hw; struct qla_tgt_sess *sess = NULL; fc_port_t *fcport = NULL; int rc, global_resets; uint16_t loop_id = 0; retry: global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count); rc = qla24xx_get_loop_id(vha, s_id, &loop_id); if (rc != 0) { if ((s_id[0] == 0xFF) && (s_id[1] == 0xFC)) { /* * This is Domain Controller, so it should be * OK to drop SCSI commands from it. */ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042, "Unable to find initiator with S_ID %x:%x:%x", s_id[0], s_id[1], s_id[2]); } else ql_dbg(ql_dbg_tgt_mgt, vha, 0xf071, "qla_target(%d): Unable to find " "initiator with S_ID %x:%x:%x", vha->vp_idx, s_id[0], s_id[1], s_id[2]); return NULL; } fcport = qlt_get_port_database(vha, loop_id); if (!fcport) return NULL; if (global_resets != atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043, "qla_target(%d): global reset during session discovery " "(counter was %d, new %d), retrying", vha->vp_idx, global_resets, atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)); goto retry; } sess = qlt_create_sess(vha, fcport, true); kfree(fcport); return sess; } static void qlt_abort_work(struct qla_tgt *tgt, struct qla_tgt_sess_work_param *prm) { struct scsi_qla_host *vha = tgt->vha; struct qla_hw_data *ha = vha->hw; struct qla_tgt_sess *sess = NULL; unsigned long flags; uint32_t be_s_id; uint8_t s_id[3]; int rc; spin_lock_irqsave(&ha->hardware_lock, flags); if (tgt->tgt_stop) goto out_term; s_id[0] = prm->abts.fcp_hdr_le.s_id[2]; s_id[1] = prm->abts.fcp_hdr_le.s_id[1]; s_id[2] = prm->abts.fcp_hdr_le.s_id[0]; sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, (unsigned char *)&be_s_id); if (!sess) { spin_unlock_irqrestore(&ha->hardware_lock, flags); mutex_lock(&ha->tgt.tgt_mutex); sess = qlt_make_local_sess(vha, s_id); /* sess has got an extra creation ref */ mutex_unlock(&ha->tgt.tgt_mutex); spin_lock_irqsave(&ha->hardware_lock, flags); if (!sess) goto out_term; } else { kref_get(&sess->se_sess->sess_kref); } if (tgt->tgt_stop) goto out_term; rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess); if (rc != 0) goto out_term; spin_unlock_irqrestore(&ha->hardware_lock, flags); ha->tgt.tgt_ops->put_sess(sess); return; out_term: qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false); spin_unlock_irqrestore(&ha->hardware_lock, flags); if (sess) ha->tgt.tgt_ops->put_sess(sess); } static void qlt_tmr_work(struct qla_tgt *tgt, struct qla_tgt_sess_work_param *prm) { struct atio_from_isp *a = &prm->tm_iocb2; struct scsi_qla_host *vha = tgt->vha; struct qla_hw_data *ha = vha->hw; struct qla_tgt_sess *sess = NULL; unsigned long flags; uint8_t *s_id = NULL; /* to hide compiler warnings */ int rc; uint32_t lun, unpacked_lun; int lun_size, fn; void *iocb; spin_lock_irqsave(&ha->hardware_lock, flags); if (tgt->tgt_stop) goto out_term; s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id; sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); if (!sess) { spin_unlock_irqrestore(&ha->hardware_lock, flags); mutex_lock(&ha->tgt.tgt_mutex); sess = qlt_make_local_sess(vha, s_id); /* sess has got an extra creation ref */ mutex_unlock(&ha->tgt.tgt_mutex); spin_lock_irqsave(&ha->hardware_lock, flags); if (!sess) goto out_term; } else { kref_get(&sess->se_sess->sess_kref); } iocb = a; lun = a->u.isp24.fcp_cmnd.lun; lun_size = sizeof(lun); fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); if (rc != 0) goto out_term; spin_unlock_irqrestore(&ha->hardware_lock, flags); ha->tgt.tgt_ops->put_sess(sess); return; out_term: qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1); spin_unlock_irqrestore(&ha->hardware_lock, flags); if (sess) ha->tgt.tgt_ops->put_sess(sess); } static void qlt_sess_work_fn(struct work_struct *work) { struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work); struct scsi_qla_host *vha = tgt->vha; unsigned long flags; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt); spin_lock_irqsave(&tgt->sess_work_lock, flags); while (!list_empty(&tgt->sess_works_list)) { struct qla_tgt_sess_work_param *prm = list_entry( tgt->sess_works_list.next, typeof(*prm), sess_works_list_entry); /* * This work can be scheduled on several CPUs at time, so we * must delete the entry to eliminate double processing */ list_del(&prm->sess_works_list_entry); spin_unlock_irqrestore(&tgt->sess_work_lock, flags); switch (prm->type) { case QLA_TGT_SESS_WORK_ABORT: qlt_abort_work(tgt, prm); break; case QLA_TGT_SESS_WORK_TM: qlt_tmr_work(tgt, prm); break; default: BUG_ON(1); break; } spin_lock_irqsave(&tgt->sess_work_lock, flags); kfree(prm); } spin_unlock_irqrestore(&tgt->sess_work_lock, flags); } /* Must be called under tgt_host_action_mutex */ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) { struct qla_tgt *tgt; if (!QLA_TGT_MODE_ENABLED()) return 0; if (!IS_TGT_MODE_CAPABLE(ha)) { ql_log(ql_log_warn, base_vha, 0xe070, "This adapter does not support target mode.\n"); return 0; } ql_dbg(ql_dbg_tgt, base_vha, 0xe03b, "Registering target for host %ld(%p)", base_vha->host_no, ha); BUG_ON((ha->tgt.qla_tgt != NULL) || (ha->tgt.tgt_ops != NULL)); tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL); if (!tgt) { ql_dbg(ql_dbg_tgt, base_vha, 0xe066, "Unable to allocate struct qla_tgt\n"); return -ENOMEM; } if (!(base_vha->host->hostt->supported_mode & MODE_TARGET)) base_vha->host->hostt->supported_mode |= MODE_TARGET; tgt->ha = ha; tgt->vha = base_vha; init_waitqueue_head(&tgt->waitQ); INIT_LIST_HEAD(&tgt->sess_list); INIT_LIST_HEAD(&tgt->del_sess_list); INIT_DELAYED_WORK(&tgt->sess_del_work, (void (*)(struct work_struct *))qlt_del_sess_work_fn); spin_lock_init(&tgt->sess_work_lock); INIT_WORK(&tgt->sess_work, qlt_sess_work_fn); INIT_LIST_HEAD(&tgt->sess_works_list); spin_lock_init(&tgt->srr_lock); INIT_LIST_HEAD(&tgt->srr_ctio_list); INIT_LIST_HEAD(&tgt->srr_imm_list); INIT_WORK(&tgt->srr_work, qlt_handle_srr_work); atomic_set(&tgt->tgt_global_resets_count, 0); ha->tgt.qla_tgt = tgt; ql_dbg(ql_dbg_tgt, base_vha, 0xe067, "qla_target(%d): using 64 Bit PCI addressing", base_vha->vp_idx); tgt->tgt_enable_64bit_addr = 1; /* 3 is reserved */ tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3); tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX; tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX; mutex_lock(&qla_tgt_mutex); list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist); mutex_unlock(&qla_tgt_mutex); return 0; } /* Must be called under tgt_host_action_mutex */ int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha) { if (!ha->tgt.qla_tgt) return 0; mutex_lock(&qla_tgt_mutex); list_del(&ha->tgt.qla_tgt->tgt_list_entry); mutex_unlock(&qla_tgt_mutex); ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)", vha->host_no, ha); qlt_release(ha->tgt.qla_tgt); return 0; } static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn, unsigned char *b) { int i; pr_debug("qla2xxx HW vha->node_name: "); for (i = 0; i < WWN_SIZE; i++) pr_debug("%02x ", vha->node_name[i]); pr_debug("\n"); pr_debug("qla2xxx HW vha->port_name: "); for (i = 0; i < WWN_SIZE; i++) pr_debug("%02x ", vha->port_name[i]); pr_debug("\n"); pr_debug("qla2xxx passed configfs WWPN: "); put_unaligned_be64(wwpn, b); for (i = 0; i < WWN_SIZE; i++) pr_debug("%02x ", b[i]); pr_debug("\n"); } /** * qla_tgt_lport_register - register lport with external module * * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops * @wwpn: Passwd FC target WWPN * @callback: lport initialization callback for tcm_qla2xxx code * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data */ int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn, int (*callback)(struct scsi_qla_host *), void *target_lport_ptr) { struct qla_tgt *tgt; struct scsi_qla_host *vha; struct qla_hw_data *ha; struct Scsi_Host *host; unsigned long flags; int rc; u8 b[WWN_SIZE]; mutex_lock(&qla_tgt_mutex); list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) { vha = tgt->vha; ha = vha->hw; host = vha->host; if (!host) continue; if (ha->tgt.tgt_ops != NULL) continue; if (!(host->hostt->supported_mode & MODE_TARGET)) continue; spin_lock_irqsave(&ha->hardware_lock, flags); if (host->active_mode & MODE_TARGET) { pr_debug("MODE_TARGET already active on qla2xxx(%d)\n", host->host_no); spin_unlock_irqrestore(&ha->hardware_lock, flags); continue; } spin_unlock_irqrestore(&ha->hardware_lock, flags); if (!scsi_host_get(host)) { ql_dbg(ql_dbg_tgt, vha, 0xe068, "Unable to scsi_host_get() for" " qla2xxx scsi_host\n"); continue; } qlt_lport_dump(vha, wwpn, b); if (memcmp(vha->port_name, b, WWN_SIZE)) { scsi_host_put(host); continue; } /* * Setup passed parameters ahead of invoking callback */ ha->tgt.tgt_ops = qla_tgt_ops; ha->tgt.target_lport_ptr = target_lport_ptr; rc = (*callback)(vha); if (rc != 0) { ha->tgt.tgt_ops = NULL; ha->tgt.target_lport_ptr = NULL; } mutex_unlock(&qla_tgt_mutex); return rc; } mutex_unlock(&qla_tgt_mutex); return -ENODEV; } EXPORT_SYMBOL(qlt_lport_register); /** * qla_tgt_lport_deregister - Degister lport * * @vha: Registered scsi_qla_host pointer */ void qlt_lport_deregister(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; struct Scsi_Host *sh = vha->host; /* * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data */ ha->tgt.target_lport_ptr = NULL; ha->tgt.tgt_ops = NULL; /* * Release the Scsi_Host reference for the underlying qla2xxx host */ scsi_host_put(sh); } EXPORT_SYMBOL(qlt_lport_deregister); /* Must be called under HW lock */ void qlt_set_mode(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; switch (ql2x_ini_mode) { case QLA2XXX_INI_MODE_DISABLED: case QLA2XXX_INI_MODE_EXCLUSIVE: vha->host->active_mode = MODE_TARGET; break; case QLA2XXX_INI_MODE_ENABLED: vha->host->active_mode |= MODE_TARGET; break; default: break; } if (ha->tgt.ini_mode_force_reverse) qla_reverse_ini_mode(vha); } /* Must be called under HW lock */ void qlt_clear_mode(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; switch (ql2x_ini_mode) { case QLA2XXX_INI_MODE_DISABLED: vha->host->active_mode = MODE_UNKNOWN; break; case QLA2XXX_INI_MODE_EXCLUSIVE: vha->host->active_mode = MODE_INITIATOR; break; case QLA2XXX_INI_MODE_ENABLED: vha->host->active_mode &= ~MODE_TARGET; break; default: break; } if (ha->tgt.ini_mode_force_reverse) qla_reverse_ini_mode(vha); } /* * qla_tgt_enable_vha - NO LOCK HELD * * host_reset, bring up w/ Target Mode Enabled */ void qlt_enable_vha(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt = ha->tgt.qla_tgt; unsigned long flags; if (!tgt) { ql_dbg(ql_dbg_tgt, vha, 0xe069, "Unable to locate qla_tgt pointer from" " struct qla_hw_data\n"); dump_stack(); return; } spin_lock_irqsave(&ha->hardware_lock, flags); tgt->tgt_stopped = 0; qlt_set_mode(vha); spin_unlock_irqrestore(&ha->hardware_lock, flags); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); qla2x00_wait_for_hba_online(vha); } EXPORT_SYMBOL(qlt_enable_vha); /* * qla_tgt_disable_vha - NO LOCK HELD * * Disable Target Mode and reset the adapter */ void qlt_disable_vha(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt = ha->tgt.qla_tgt; unsigned long flags; if (!tgt) { ql_dbg(ql_dbg_tgt, vha, 0xe06a, "Unable to locate qla_tgt pointer from" " struct qla_hw_data\n"); dump_stack(); return; } spin_lock_irqsave(&ha->hardware_lock, flags); qlt_clear_mode(vha); spin_unlock_irqrestore(&ha->hardware_lock, flags); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); qla2x00_wait_for_hba_online(vha); } /* * Called from qla_init.c:qla24xx_vport_create() contex to setup * the target mode specific struct scsi_qla_host and struct qla_hw_data * members. */ void qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha) { if (!qla_tgt_mode_enabled(vha)) return; mutex_init(&ha->tgt.tgt_mutex); mutex_init(&ha->tgt.tgt_host_action_mutex); qlt_clear_mode(vha); /* * NOTE: Currently the value is kept the same for <24xx and * >=24xx ISPs. If it is necessary to change it, * the check should be added for specific ISPs, * assigning the value appropriately. */ ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; } void qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req) { /* * FC-4 Feature bit 0 indicates target functionality to the name server. */ if (qla_tgt_mode_enabled(vha)) { if (qla_ini_mode_enabled(vha)) ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1; else ct_req->req.rff_id.fc4_feature = BIT_0; } else if (qla_ini_mode_enabled(vha)) { ct_req->req.rff_id.fc4_feature = BIT_1; } } /* * qlt_init_atio_q_entries() - Initializes ATIO queue entries. * @ha: HA context * * Beginning of ATIO ring has initialization control block already built * by nvram config routine. * * Returns 0 on success. */ void qlt_init_atio_q_entries(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; uint16_t cnt; struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring; if (!qla_tgt_mode_enabled(vha)) return; for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) { pkt->u.raw.signature = ATIO_PROCESSED; pkt++; } } /* * qlt_24xx_process_atio_queue() - Process ATIO queue entries. * @ha: SCSI driver HA context */ void qlt_24xx_process_atio_queue(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; struct atio_from_isp *pkt; int cnt, i; if (!vha->flags.online) return; while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) { pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; cnt = pkt->u.raw.entry_count; qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt); for (i = 0; i < cnt; i++) { ha->tgt.atio_ring_index++; if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) { ha->tgt.atio_ring_index = 0; ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; } else ha->tgt.atio_ring_ptr++; pkt->u.raw.signature = ATIO_PROCESSED; pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; } wmb(); } /* Adjust ring index */ WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index); } void qlt_24xx_config_rings(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; if (!QLA_TGT_MODE_ENABLED()) return; WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0); WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0); RD_REG_DWORD(ISP_ATIO_Q_OUT(vha)); if (IS_ATIO_MSIX_CAPABLE(ha)) { struct qla_msix_entry *msix = &ha->msix_entries[2]; struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb; icb->msix_atio = cpu_to_le16(msix->entry); ql_dbg(ql_dbg_init, vha, 0xf072, "Registering ICB vector 0x%x for atio que.\n", msix->entry); } } void qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) { struct qla_hw_data *ha = vha->hw; if (qla_tgt_mode_enabled(vha)) { if (!ha->tgt.saved_set) { /* We save only once */ ha->tgt.saved_exchange_count = nv->exchange_count; ha->tgt.saved_firmware_options_1 = nv->firmware_options_1; ha->tgt.saved_firmware_options_2 = nv->firmware_options_2; ha->tgt.saved_firmware_options_3 = nv->firmware_options_3; ha->tgt.saved_set = 1; } nv->exchange_count = __constant_cpu_to_le16(0xFFFF); /* Enable target mode */ nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4); /* Disable ini mode, if requested */ if (!qla_ini_mode_enabled(vha)) nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_5); /* Disable Full Login after LIP */ nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13); /* Enable initial LIP */ nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9); /* Enable FC tapes support */ nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12); /* Disable Full Login after LIP */ nv->host_p &= __constant_cpu_to_le32(~BIT_10); /* Enable target PRLI control */ nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14); } else { if (ha->tgt.saved_set) { nv->exchange_count = ha->tgt.saved_exchange_count; nv->firmware_options_1 = ha->tgt.saved_firmware_options_1; nv->firmware_options_2 = ha->tgt.saved_firmware_options_2; nv->firmware_options_3 = ha->tgt.saved_firmware_options_3; } return; } /* out-of-order frames reassembly */ nv->firmware_options_3 |= BIT_6|BIT_9; if (ha->tgt.enable_class_2) { if (vha->flags.init_done) fc_host_supported_classes(vha->host) = FC_COS_CLASS2 | FC_COS_CLASS3; nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8); } else { if (vha->flags.init_done) fc_host_supported_classes(vha->host) = FC_COS_CLASS3; nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8); } } void qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha, struct init_cb_24xx *icb) { struct qla_hw_data *ha = vha->hw; if (ha->tgt.node_name_set) { memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14); } } void qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) { struct qla_hw_data *ha = vha->hw; if (!QLA_TGT_MODE_ENABLED()) return; if (qla_tgt_mode_enabled(vha)) { if (!ha->tgt.saved_set) { /* We save only once */ ha->tgt.saved_exchange_count = nv->exchange_count; ha->tgt.saved_firmware_options_1 = nv->firmware_options_1; ha->tgt.saved_firmware_options_2 = nv->firmware_options_2; ha->tgt.saved_firmware_options_3 = nv->firmware_options_3; ha->tgt.saved_set = 1; } nv->exchange_count = __constant_cpu_to_le16(0xFFFF); /* Enable target mode */ nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4); /* Disable ini mode, if requested */ if (!qla_ini_mode_enabled(vha)) nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_5); /* Disable Full Login after LIP */ nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13); /* Enable initial LIP */ nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9); /* Enable FC tapes support */ nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12); /* Disable Full Login after LIP */ nv->host_p &= __constant_cpu_to_le32(~BIT_10); /* Enable target PRLI control */ nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14); } else { if (ha->tgt.saved_set) { nv->exchange_count = ha->tgt.saved_exchange_count; nv->firmware_options_1 = ha->tgt.saved_firmware_options_1; nv->firmware_options_2 = ha->tgt.saved_firmware_options_2; nv->firmware_options_3 = ha->tgt.saved_firmware_options_3; } return; } /* out-of-order frames reassembly */ nv->firmware_options_3 |= BIT_6|BIT_9; if (ha->tgt.enable_class_2) { if (vha->flags.init_done) fc_host_supported_classes(vha->host) = FC_COS_CLASS2 | FC_COS_CLASS3; nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8); } else { if (vha->flags.init_done) fc_host_supported_classes(vha->host) = FC_COS_CLASS3; nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8); } } void qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha, struct init_cb_81xx *icb) { struct qla_hw_data *ha = vha->hw; if (!QLA_TGT_MODE_ENABLED()) return; if (ha->tgt.node_name_set) { memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14); } } void qlt_83xx_iospace_config(struct qla_hw_data *ha) { if (!QLA_TGT_MODE_ENABLED()) return; ha->msix_count += 1; /* For ATIO Q */ } int qlt_24xx_process_response_error(struct scsi_qla_host *vha, struct sts_entry_24xx *pkt) { switch (pkt->entry_type) { case ABTS_RECV_24XX: case ABTS_RESP_24XX: case CTIO_TYPE7: case NOTIFY_ACK_TYPE: return 1; default: return 0; } } void qlt_modify_vp_config(struct scsi_qla_host *vha, struct vp_config_entry_24xx *vpmod) { if (qla_tgt_mode_enabled(vha)) vpmod->options_idx1 &= ~BIT_5; /* Disable ini mode, if requested */ if (!qla_ini_mode_enabled(vha)) vpmod->options_idx1 &= ~BIT_4; } void qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) { if (!QLA_TGT_MODE_ENABLED()) return; if (ha->mqenable || IS_QLA83XX(ha)) { ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in; ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out; } else { ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in; ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out; } mutex_init(&ha->tgt.tgt_mutex); mutex_init(&ha->tgt.tgt_host_action_mutex); qlt_clear_mode(base_vha); } irqreturn_t qla83xx_msix_atio_q(int irq, void *dev_id) { struct rsp_que *rsp; scsi_qla_host_t *vha; struct qla_hw_data *ha; unsigned long flags; rsp = (struct rsp_que *) dev_id; ha = rsp->hw; vha = pci_get_drvdata(ha->pdev); spin_lock_irqsave(&ha->hardware_lock, flags); qlt_24xx_process_atio_queue(vha); qla24xx_process_response_queue(vha, rsp); spin_unlock_irqrestore(&ha->hardware_lock, flags); return IRQ_HANDLED; } int qlt_mem_alloc(struct qla_hw_data *ha) { if (!QLA_TGT_MODE_ENABLED()) return 0; ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) * MAX_MULTI_ID_FABRIC, GFP_KERNEL); if (!ha->tgt.tgt_vp_map) return -ENOMEM; ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp), &ha->tgt.atio_dma, GFP_KERNEL); if (!ha->tgt.atio_ring) { kfree(ha->tgt.tgt_vp_map); return -ENOMEM; } return 0; } void qlt_mem_free(struct qla_hw_data *ha) { if (!QLA_TGT_MODE_ENABLED()) return; if (ha->tgt.atio_ring) { dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp), ha->tgt.atio_ring, ha->tgt.atio_dma); } kfree(ha->tgt.tgt_vp_map); } /* vport_slock to be held by the caller */ void qlt_update_vp_map(struct scsi_qla_host *vha, int cmd) { if (!QLA_TGT_MODE_ENABLED()) return; switch (cmd) { case SET_VP_IDX: vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha; break; case SET_AL_PA: vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx; break; case RESET_VP_IDX: vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL; break; case RESET_AL_PA: vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0; break; } } static int __init qlt_parse_ini_mode(void) { if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0) ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0) ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED; else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0) ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED; else return false; return true; } int __init qlt_init(void) { int ret; if (!qlt_parse_ini_mode()) { ql_log(ql_log_fatal, NULL, 0xe06b, "qlt_parse_ini_mode() failed\n"); return -EINVAL; } if (!QLA_TGT_MODE_ENABLED()) return 0; qla_tgt_cmd_cachep = kmem_cache_create("qla_tgt_cmd_cachep", sizeof(struct qla_tgt_cmd), __alignof__(struct qla_tgt_cmd), 0, NULL); if (!qla_tgt_cmd_cachep) { ql_log(ql_log_fatal, NULL, 0xe06c, "kmem_cache_create for qla_tgt_cmd_cachep failed\n"); return -ENOMEM; } qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep", sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct qla_tgt_mgmt_cmd), 0, NULL); if (!qla_tgt_mgmt_cmd_cachep) { ql_log(ql_log_fatal, NULL, 0xe06d, "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n"); ret = -ENOMEM; goto out; } qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab, mempool_free_slab, qla_tgt_mgmt_cmd_cachep); if (!qla_tgt_mgmt_cmd_mempool) { ql_log(ql_log_fatal, NULL, 0xe06e, "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n"); ret = -ENOMEM; goto out_mgmt_cmd_cachep; } qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0); if (!qla_tgt_wq) { ql_log(ql_log_fatal, NULL, 0xe06f, "alloc_workqueue for qla_tgt_wq failed\n"); ret = -ENOMEM; goto out_cmd_mempool; } /* * Return 1 to signal that initiator-mode is being disabled */ return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0; out_cmd_mempool: mempool_destroy(qla_tgt_mgmt_cmd_mempool); out_mgmt_cmd_cachep: kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); out: kmem_cache_destroy(qla_tgt_cmd_cachep); return ret; } void qlt_exit(void) { if (!QLA_TGT_MODE_ENABLED()) return; destroy_workqueue(qla_tgt_wq); mempool_destroy(qla_tgt_mgmt_cmd_mempool); kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); kmem_cache_destroy(qla_tgt_cmd_cachep); }
gpl-2.0
vmobi-d2vmu/android_kernel_samsung_d2vmu
drivers/bluetooth/bluetooth-power.c
958
3157
/* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * Bluetooth Power Switch Module * controls power to external Bluetooth device * with interface to power management device */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/rfkill.h> static bool previous; static int bluetooth_toggle_radio(void *data, bool blocked) { int ret = 0; int (*power_control)(int enable); power_control = data; if (previous != blocked) ret = (*power_control)(!blocked); if (!ret) previous = blocked; return ret; } static const struct rfkill_ops bluetooth_power_rfkill_ops = { .set_block = bluetooth_toggle_radio, }; static int bluetooth_power_rfkill_probe(struct platform_device *pdev) { struct rfkill *rfkill; int ret; rfkill = rfkill_alloc("bt_power", &pdev->dev, RFKILL_TYPE_BLUETOOTH, &bluetooth_power_rfkill_ops, pdev->dev.platform_data); if (!rfkill) { dev_err(&pdev->dev, "rfkill allocate failed\n"); return -ENOMEM; } /* force Bluetooth off during init to allow for user control */ rfkill_init_sw_state(rfkill, 1); previous = 1; ret = rfkill_register(rfkill); if (ret) { dev_err(&pdev->dev, "rfkill register failed=%d\n", ret); rfkill_destroy(rfkill); return ret; } platform_set_drvdata(pdev, rfkill); return 0; } static void bluetooth_power_rfkill_remove(struct platform_device *pdev) { struct rfkill *rfkill; dev_dbg(&pdev->dev, "%s\n", __func__); rfkill = platform_get_drvdata(pdev); if (rfkill) rfkill_unregister(rfkill); rfkill_destroy(rfkill); platform_set_drvdata(pdev, NULL); } static int __devinit bt_power_probe(struct platform_device *pdev) { int ret = 0; dev_dbg(&pdev->dev, "%s\n", __func__); if (!pdev->dev.platform_data) { dev_err(&pdev->dev, "platform data not initialized\n"); return -ENOSYS; } ret = bluetooth_power_rfkill_probe(pdev); return ret; } static int __devexit bt_power_remove(struct platform_device *pdev) { dev_dbg(&pdev->dev, "%s\n", __func__); bluetooth_power_rfkill_remove(pdev); return 0; } static struct platform_driver bt_power_driver = { .probe = bt_power_probe, .remove = __devexit_p(bt_power_remove), .driver = { .name = "bt_power", .owner = THIS_MODULE, }, }; static int __init bluetooth_power_init(void) { int ret; ret = platform_driver_register(&bt_power_driver); return ret; } static void __exit bluetooth_power_exit(void) { platform_driver_unregister(&bt_power_driver); } MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("MSM Bluetooth power control driver"); MODULE_VERSION("1.40"); module_init(bluetooth_power_init); module_exit(bluetooth_power_exit);
gpl-2.0
singleman/linux
arch/arm/probes/decode-arm.c
1470
28973
/* * * arch/arm/probes/decode-arm.c * * Some code moved here from arch/arm/kernel/kprobes-arm.c * * Copyright (C) 2006, 2007 Motorola Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/stddef.h> #include <linux/ptrace.h> #include "decode.h" #include "decode-arm.h" #define sign_extend(x, signbit) ((x) | (0 - ((x) & (1 << (signbit))))) #define branch_displacement(insn) sign_extend(((insn) & 0xffffff) << 2, 25) /* * To avoid the complications of mimicing single-stepping on a * processor without a Next-PC or a single-step mode, and to * avoid having to deal with the side-effects of boosting, we * simulate or emulate (almost) all ARM instructions. * * "Simulation" is where the instruction's behavior is duplicated in * C code. "Emulation" is where the original instruction is rewritten * and executed, often by altering its registers. * * By having all behavior of the kprobe'd instruction completed before * returning from the kprobe_handler(), all locks (scheduler and * interrupt) can safely be released. There is no need for secondary * breakpoints, no race with MP or preemptable kernels, nor having to * clean up resources counts at a later time impacting overall system * performance. By rewriting the instruction, only the minimum registers * need to be loaded and saved back optimizing performance. * * Calling the insnslot_*_rwflags version of a function doesn't hurt * anything even when the CPSR flags aren't updated by the * instruction. It's just a little slower in return for saving * a little space by not having a duplicate function that doesn't * update the flags. (The same optimization can be said for * instructions that do or don't perform register writeback) * Also, instructions can either read the flags, only write the * flags, or read and write the flags. To save combinations * rather than for sheer performance, flag functions just assume * read and write of flags. */ void __kprobes simulate_bbl(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { long iaddr = (long) regs->ARM_pc - 4; int disp = branch_displacement(insn); if (insn & (1 << 24)) regs->ARM_lr = iaddr + 4; regs->ARM_pc = iaddr + 8 + disp; } void __kprobes simulate_blx1(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { long iaddr = (long) regs->ARM_pc - 4; int disp = branch_displacement(insn); regs->ARM_lr = iaddr + 4; regs->ARM_pc = iaddr + 8 + disp + ((insn >> 23) & 0x2); regs->ARM_cpsr |= PSR_T_BIT; } void __kprobes simulate_blx2bx(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { int rm = insn & 0xf; long rmv = regs->uregs[rm]; if (insn & (1 << 5)) regs->ARM_lr = (long) regs->ARM_pc; regs->ARM_pc = rmv & ~0x1; regs->ARM_cpsr &= ~PSR_T_BIT; if (rmv & 0x1) regs->ARM_cpsr |= PSR_T_BIT; } void __kprobes simulate_mrs(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { int rd = (insn >> 12) & 0xf; unsigned long mask = 0xf8ff03df; /* Mask out execution state */ regs->uregs[rd] = regs->ARM_cpsr & mask; } void __kprobes simulate_mov_ipsp(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { regs->uregs[12] = regs->uregs[13]; } /* * For the instruction masking and comparisons in all the "space_*" * functions below, Do _not_ rearrange the order of tests unless * you're very, very sure of what you are doing. For the sake of * efficiency, the masks for some tests sometimes assume other test * have been done prior to them so the number of patterns to test * for an instruction set can be as broad as possible to reduce the * number of tests needed. */ static const union decode_item arm_1111_table[] = { /* Unconditional instructions */ /* memory hint 1111 0100 x001 xxxx xxxx xxxx xxxx xxxx */ /* PLDI (immediate) 1111 0100 x101 xxxx xxxx xxxx xxxx xxxx */ /* PLDW (immediate) 1111 0101 x001 xxxx xxxx xxxx xxxx xxxx */ /* PLD (immediate) 1111 0101 x101 xxxx xxxx xxxx xxxx xxxx */ DECODE_SIMULATE (0xfe300000, 0xf4100000, PROBES_PRELOAD_IMM), /* memory hint 1111 0110 x001 xxxx xxxx xxxx xxx0 xxxx */ /* PLDI (register) 1111 0110 x101 xxxx xxxx xxxx xxx0 xxxx */ /* PLDW (register) 1111 0111 x001 xxxx xxxx xxxx xxx0 xxxx */ /* PLD (register) 1111 0111 x101 xxxx xxxx xxxx xxx0 xxxx */ DECODE_SIMULATE (0xfe300010, 0xf6100000, PROBES_PRELOAD_REG), /* BLX (immediate) 1111 101x xxxx xxxx xxxx xxxx xxxx xxxx */ DECODE_SIMULATE (0xfe000000, 0xfa000000, PROBES_BRANCH_IMM), /* CPS 1111 0001 0000 xxx0 xxxx xxxx xx0x xxxx */ /* SETEND 1111 0001 0000 0001 xxxx xxxx 0000 xxxx */ /* SRS 1111 100x x1x0 xxxx xxxx xxxx xxxx xxxx */ /* RFE 1111 100x x0x1 xxxx xxxx xxxx xxxx xxxx */ /* Coprocessor instructions... */ /* MCRR2 1111 1100 0100 xxxx xxxx xxxx xxxx xxxx */ /* MRRC2 1111 1100 0101 xxxx xxxx xxxx xxxx xxxx */ /* LDC2 1111 110x xxx1 xxxx xxxx xxxx xxxx xxxx */ /* STC2 1111 110x xxx0 xxxx xxxx xxxx xxxx xxxx */ /* CDP2 1111 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */ /* MCR2 1111 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */ /* MRC2 1111 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */ /* Other unallocated instructions... */ DECODE_END }; static const union decode_item arm_cccc_0001_0xx0____0xxx_table[] = { /* Miscellaneous instructions */ /* MRS cpsr cccc 0001 0000 xxxx xxxx xxxx 0000 xxxx */ DECODE_SIMULATEX(0x0ff000f0, 0x01000000, PROBES_MRS, REGS(0, NOPC, 0, 0, 0)), /* BX cccc 0001 0010 xxxx xxxx xxxx 0001 xxxx */ DECODE_SIMULATE (0x0ff000f0, 0x01200010, PROBES_BRANCH_REG), /* BLX (register) cccc 0001 0010 xxxx xxxx xxxx 0011 xxxx */ DECODE_SIMULATEX(0x0ff000f0, 0x01200030, PROBES_BRANCH_REG, REGS(0, 0, 0, 0, NOPC)), /* CLZ cccc 0001 0110 xxxx xxxx xxxx 0001 xxxx */ DECODE_EMULATEX (0x0ff000f0, 0x01600010, PROBES_CLZ, REGS(0, NOPC, 0, 0, NOPC)), /* QADD cccc 0001 0000 xxxx xxxx xxxx 0101 xxxx */ /* QSUB cccc 0001 0010 xxxx xxxx xxxx 0101 xxxx */ /* QDADD cccc 0001 0100 xxxx xxxx xxxx 0101 xxxx */ /* QDSUB cccc 0001 0110 xxxx xxxx xxxx 0101 xxxx */ DECODE_EMULATEX (0x0f9000f0, 0x01000050, PROBES_SATURATING_ARITHMETIC, REGS(NOPC, NOPC, 0, 0, NOPC)), /* BXJ cccc 0001 0010 xxxx xxxx xxxx 0010 xxxx */ /* MSR cccc 0001 0x10 xxxx xxxx xxxx 0000 xxxx */ /* MRS spsr cccc 0001 0100 xxxx xxxx xxxx 0000 xxxx */ /* BKPT 1110 0001 0010 xxxx xxxx xxxx 0111 xxxx */ /* SMC cccc 0001 0110 xxxx xxxx xxxx 0111 xxxx */ /* And unallocated instructions... */ DECODE_END }; static const union decode_item arm_cccc_0001_0xx0____1xx0_table[] = { /* Halfword multiply and multiply-accumulate */ /* SMLALxy cccc 0001 0100 xxxx xxxx xxxx 1xx0 xxxx */ DECODE_EMULATEX (0x0ff00090, 0x01400080, PROBES_MUL1, REGS(NOPC, NOPC, NOPC, 0, NOPC)), /* SMULWy cccc 0001 0010 xxxx xxxx xxxx 1x10 xxxx */ DECODE_OR (0x0ff000b0, 0x012000a0), /* SMULxy cccc 0001 0110 xxxx xxxx xxxx 1xx0 xxxx */ DECODE_EMULATEX (0x0ff00090, 0x01600080, PROBES_MUL2, REGS(NOPC, 0, NOPC, 0, NOPC)), /* SMLAxy cccc 0001 0000 xxxx xxxx xxxx 1xx0 xxxx */ DECODE_OR (0x0ff00090, 0x01000080), /* SMLAWy cccc 0001 0010 xxxx xxxx xxxx 1x00 xxxx */ DECODE_EMULATEX (0x0ff000b0, 0x01200080, PROBES_MUL2, REGS(NOPC, NOPC, NOPC, 0, NOPC)), DECODE_END }; static const union decode_item arm_cccc_0000_____1001_table[] = { /* Multiply and multiply-accumulate */ /* MUL cccc 0000 0000 xxxx xxxx xxxx 1001 xxxx */ /* MULS cccc 0000 0001 xxxx xxxx xxxx 1001 xxxx */ DECODE_EMULATEX (0x0fe000f0, 0x00000090, PROBES_MUL2, REGS(NOPC, 0, NOPC, 0, NOPC)), /* MLA cccc 0000 0010 xxxx xxxx xxxx 1001 xxxx */ /* MLAS cccc 0000 0011 xxxx xxxx xxxx 1001 xxxx */ DECODE_OR (0x0fe000f0, 0x00200090), /* MLS cccc 0000 0110 xxxx xxxx xxxx 1001 xxxx */ DECODE_EMULATEX (0x0ff000f0, 0x00600090, PROBES_MUL2, REGS(NOPC, NOPC, NOPC, 0, NOPC)), /* UMAAL cccc 0000 0100 xxxx xxxx xxxx 1001 xxxx */ DECODE_OR (0x0ff000f0, 0x00400090), /* UMULL cccc 0000 1000 xxxx xxxx xxxx 1001 xxxx */ /* UMULLS cccc 0000 1001 xxxx xxxx xxxx 1001 xxxx */ /* UMLAL cccc 0000 1010 xxxx xxxx xxxx 1001 xxxx */ /* UMLALS cccc 0000 1011 xxxx xxxx xxxx 1001 xxxx */ /* SMULL cccc 0000 1100 xxxx xxxx xxxx 1001 xxxx */ /* SMULLS cccc 0000 1101 xxxx xxxx xxxx 1001 xxxx */ /* SMLAL cccc 0000 1110 xxxx xxxx xxxx 1001 xxxx */ /* SMLALS cccc 0000 1111 xxxx xxxx xxxx 1001 xxxx */ DECODE_EMULATEX (0x0f8000f0, 0x00800090, PROBES_MUL1, REGS(NOPC, NOPC, NOPC, 0, NOPC)), DECODE_END }; static const union decode_item arm_cccc_0001_____1001_table[] = { /* Synchronization primitives */ #if __LINUX_ARM_ARCH__ < 6 /* Deprecated on ARMv6 and may be UNDEFINED on v7 */ /* SMP/SWPB cccc 0001 0x00 xxxx xxxx xxxx 1001 xxxx */ DECODE_EMULATEX (0x0fb000f0, 0x01000090, PROBES_SWP, REGS(NOPC, NOPC, 0, 0, NOPC)), #endif /* LDREX/STREX{,D,B,H} cccc 0001 1xxx xxxx xxxx xxxx 1001 xxxx */ /* And unallocated instructions... */ DECODE_END }; static const union decode_item arm_cccc_000x_____1xx1_table[] = { /* Extra load/store instructions */ /* STRHT cccc 0000 xx10 xxxx xxxx xxxx 1011 xxxx */ /* ??? cccc 0000 xx10 xxxx xxxx xxxx 11x1 xxxx */ /* LDRHT cccc 0000 xx11 xxxx xxxx xxxx 1011 xxxx */ /* LDRSBT cccc 0000 xx11 xxxx xxxx xxxx 1101 xxxx */ /* LDRSHT cccc 0000 xx11 xxxx xxxx xxxx 1111 xxxx */ DECODE_REJECT (0x0f200090, 0x00200090), /* LDRD/STRD lr,pc,{... cccc 000x x0x0 xxxx 111x xxxx 1101 xxxx */ DECODE_REJECT (0x0e10e0d0, 0x0000e0d0), /* LDRD (register) cccc 000x x0x0 xxxx xxxx xxxx 1101 xxxx */ /* STRD (register) cccc 000x x0x0 xxxx xxxx xxxx 1111 xxxx */ DECODE_EMULATEX (0x0e5000d0, 0x000000d0, PROBES_LDRSTRD, REGS(NOPCWB, NOPCX, 0, 0, NOPC)), /* LDRD (immediate) cccc 000x x1x0 xxxx xxxx xxxx 1101 xxxx */ /* STRD (immediate) cccc 000x x1x0 xxxx xxxx xxxx 1111 xxxx */ DECODE_EMULATEX (0x0e5000d0, 0x004000d0, PROBES_LDRSTRD, REGS(NOPCWB, NOPCX, 0, 0, 0)), /* STRH (register) cccc 000x x0x0 xxxx xxxx xxxx 1011 xxxx */ DECODE_EMULATEX (0x0e5000f0, 0x000000b0, PROBES_STORE_EXTRA, REGS(NOPCWB, NOPC, 0, 0, NOPC)), /* LDRH (register) cccc 000x x0x1 xxxx xxxx xxxx 1011 xxxx */ /* LDRSB (register) cccc 000x x0x1 xxxx xxxx xxxx 1101 xxxx */ /* LDRSH (register) cccc 000x x0x1 xxxx xxxx xxxx 1111 xxxx */ DECODE_EMULATEX (0x0e500090, 0x00100090, PROBES_LOAD_EXTRA, REGS(NOPCWB, NOPC, 0, 0, NOPC)), /* STRH (immediate) cccc 000x x1x0 xxxx xxxx xxxx 1011 xxxx */ DECODE_EMULATEX (0x0e5000f0, 0x004000b0, PROBES_STORE_EXTRA, REGS(NOPCWB, NOPC, 0, 0, 0)), /* LDRH (immediate) cccc 000x x1x1 xxxx xxxx xxxx 1011 xxxx */ /* LDRSB (immediate) cccc 000x x1x1 xxxx xxxx xxxx 1101 xxxx */ /* LDRSH (immediate) cccc 000x x1x1 xxxx xxxx xxxx 1111 xxxx */ DECODE_EMULATEX (0x0e500090, 0x00500090, PROBES_LOAD_EXTRA, REGS(NOPCWB, NOPC, 0, 0, 0)), DECODE_END }; static const union decode_item arm_cccc_000x_table[] = { /* Data-processing (register) */ /* <op>S PC, ... cccc 000x xxx1 xxxx 1111 xxxx xxxx xxxx */ DECODE_REJECT (0x0e10f000, 0x0010f000), /* MOV IP, SP 1110 0001 1010 0000 1100 0000 0000 1101 */ DECODE_SIMULATE (0xffffffff, 0xe1a0c00d, PROBES_MOV_IP_SP), /* TST (register) cccc 0001 0001 xxxx xxxx xxxx xxx0 xxxx */ /* TEQ (register) cccc 0001 0011 xxxx xxxx xxxx xxx0 xxxx */ /* CMP (register) cccc 0001 0101 xxxx xxxx xxxx xxx0 xxxx */ /* CMN (register) cccc 0001 0111 xxxx xxxx xxxx xxx0 xxxx */ DECODE_EMULATEX (0x0f900010, 0x01100000, PROBES_DATA_PROCESSING_REG, REGS(ANY, 0, 0, 0, ANY)), /* MOV (register) cccc 0001 101x xxxx xxxx xxxx xxx0 xxxx */ /* MVN (register) cccc 0001 111x xxxx xxxx xxxx xxx0 xxxx */ DECODE_EMULATEX (0x0fa00010, 0x01a00000, PROBES_DATA_PROCESSING_REG, REGS(0, ANY, 0, 0, ANY)), /* AND (register) cccc 0000 000x xxxx xxxx xxxx xxx0 xxxx */ /* EOR (register) cccc 0000 001x xxxx xxxx xxxx xxx0 xxxx */ /* SUB (register) cccc 0000 010x xxxx xxxx xxxx xxx0 xxxx */ /* RSB (register) cccc 0000 011x xxxx xxxx xxxx xxx0 xxxx */ /* ADD (register) cccc 0000 100x xxxx xxxx xxxx xxx0 xxxx */ /* ADC (register) cccc 0000 101x xxxx xxxx xxxx xxx0 xxxx */ /* SBC (register) cccc 0000 110x xxxx xxxx xxxx xxx0 xxxx */ /* RSC (register) cccc 0000 111x xxxx xxxx xxxx xxx0 xxxx */ /* ORR (register) cccc 0001 100x xxxx xxxx xxxx xxx0 xxxx */ /* BIC (register) cccc 0001 110x xxxx xxxx xxxx xxx0 xxxx */ DECODE_EMULATEX (0x0e000010, 0x00000000, PROBES_DATA_PROCESSING_REG, REGS(ANY, ANY, 0, 0, ANY)), /* TST (reg-shift reg) cccc 0001 0001 xxxx xxxx xxxx 0xx1 xxxx */ /* TEQ (reg-shift reg) cccc 0001 0011 xxxx xxxx xxxx 0xx1 xxxx */ /* CMP (reg-shift reg) cccc 0001 0101 xxxx xxxx xxxx 0xx1 xxxx */ /* CMN (reg-shift reg) cccc 0001 0111 xxxx xxxx xxxx 0xx1 xxxx */ DECODE_EMULATEX (0x0f900090, 0x01100010, PROBES_DATA_PROCESSING_REG, REGS(NOPC, 0, NOPC, 0, NOPC)), /* MOV (reg-shift reg) cccc 0001 101x xxxx xxxx xxxx 0xx1 xxxx */ /* MVN (reg-shift reg) cccc 0001 111x xxxx xxxx xxxx 0xx1 xxxx */ DECODE_EMULATEX (0x0fa00090, 0x01a00010, PROBES_DATA_PROCESSING_REG, REGS(0, NOPC, NOPC, 0, NOPC)), /* AND (reg-shift reg) cccc 0000 000x xxxx xxxx xxxx 0xx1 xxxx */ /* EOR (reg-shift reg) cccc 0000 001x xxxx xxxx xxxx 0xx1 xxxx */ /* SUB (reg-shift reg) cccc 0000 010x xxxx xxxx xxxx 0xx1 xxxx */ /* RSB (reg-shift reg) cccc 0000 011x xxxx xxxx xxxx 0xx1 xxxx */ /* ADD (reg-shift reg) cccc 0000 100x xxxx xxxx xxxx 0xx1 xxxx */ /* ADC (reg-shift reg) cccc 0000 101x xxxx xxxx xxxx 0xx1 xxxx */ /* SBC (reg-shift reg) cccc 0000 110x xxxx xxxx xxxx 0xx1 xxxx */ /* RSC (reg-shift reg) cccc 0000 111x xxxx xxxx xxxx 0xx1 xxxx */ /* ORR (reg-shift reg) cccc 0001 100x xxxx xxxx xxxx 0xx1 xxxx */ /* BIC (reg-shift reg) cccc 0001 110x xxxx xxxx xxxx 0xx1 xxxx */ DECODE_EMULATEX (0x0e000090, 0x00000010, PROBES_DATA_PROCESSING_REG, REGS(NOPC, NOPC, NOPC, 0, NOPC)), DECODE_END }; static const union decode_item arm_cccc_001x_table[] = { /* Data-processing (immediate) */ /* MOVW cccc 0011 0000 xxxx xxxx xxxx xxxx xxxx */ /* MOVT cccc 0011 0100 xxxx xxxx xxxx xxxx xxxx */ DECODE_EMULATEX (0x0fb00000, 0x03000000, PROBES_MOV_HALFWORD, REGS(0, NOPC, 0, 0, 0)), /* YIELD cccc 0011 0010 0000 xxxx xxxx 0000 0001 */ DECODE_OR (0x0fff00ff, 0x03200001), /* SEV cccc 0011 0010 0000 xxxx xxxx 0000 0100 */ DECODE_EMULATE (0x0fff00ff, 0x03200004, PROBES_SEV), /* NOP cccc 0011 0010 0000 xxxx xxxx 0000 0000 */ /* WFE cccc 0011 0010 0000 xxxx xxxx 0000 0010 */ /* WFI cccc 0011 0010 0000 xxxx xxxx 0000 0011 */ DECODE_SIMULATE (0x0fff00fc, 0x03200000, PROBES_WFE), /* DBG cccc 0011 0010 0000 xxxx xxxx ffff xxxx */ /* unallocated hints cccc 0011 0010 0000 xxxx xxxx xxxx xxxx */ /* MSR (immediate) cccc 0011 0x10 xxxx xxxx xxxx xxxx xxxx */ DECODE_REJECT (0x0fb00000, 0x03200000), /* <op>S PC, ... cccc 001x xxx1 xxxx 1111 xxxx xxxx xxxx */ DECODE_REJECT (0x0e10f000, 0x0210f000), /* TST (immediate) cccc 0011 0001 xxxx xxxx xxxx xxxx xxxx */ /* TEQ (immediate) cccc 0011 0011 xxxx xxxx xxxx xxxx xxxx */ /* CMP (immediate) cccc 0011 0101 xxxx xxxx xxxx xxxx xxxx */ /* CMN (immediate) cccc 0011 0111 xxxx xxxx xxxx xxxx xxxx */ DECODE_EMULATEX (0x0f900000, 0x03100000, PROBES_DATA_PROCESSING_IMM, REGS(ANY, 0, 0, 0, 0)), /* MOV (immediate) cccc 0011 101x xxxx xxxx xxxx xxxx xxxx */ /* MVN (immediate) cccc 0011 111x xxxx xxxx xxxx xxxx xxxx */ DECODE_EMULATEX (0x0fa00000, 0x03a00000, PROBES_DATA_PROCESSING_IMM, REGS(0, ANY, 0, 0, 0)), /* AND (immediate) cccc 0010 000x xxxx xxxx xxxx xxxx xxxx */ /* EOR (immediate) cccc 0010 001x xxxx xxxx xxxx xxxx xxxx */ /* SUB (immediate) cccc 0010 010x xxxx xxxx xxxx xxxx xxxx */ /* RSB (immediate) cccc 0010 011x xxxx xxxx xxxx xxxx xxxx */ /* ADD (immediate) cccc 0010 100x xxxx xxxx xxxx xxxx xxxx */ /* ADC (immediate) cccc 0010 101x xxxx xxxx xxxx xxxx xxxx */ /* SBC (immediate) cccc 0010 110x xxxx xxxx xxxx xxxx xxxx */ /* RSC (immediate) cccc 0010 111x xxxx xxxx xxxx xxxx xxxx */ /* ORR (immediate) cccc 0011 100x xxxx xxxx xxxx xxxx xxxx */ /* BIC (immediate) cccc 0011 110x xxxx xxxx xxxx xxxx xxxx */ DECODE_EMULATEX (0x0e000000, 0x02000000, PROBES_DATA_PROCESSING_IMM, REGS(ANY, ANY, 0, 0, 0)), DECODE_END }; static const union decode_item arm_cccc_0110_____xxx1_table[] = { /* Media instructions */ /* SEL cccc 0110 1000 xxxx xxxx xxxx 1011 xxxx */ DECODE_EMULATEX (0x0ff000f0, 0x068000b0, PROBES_SATURATE, REGS(NOPC, NOPC, 0, 0, NOPC)), /* SSAT cccc 0110 101x xxxx xxxx xxxx xx01 xxxx */ /* USAT cccc 0110 111x xxxx xxxx xxxx xx01 xxxx */ DECODE_OR(0x0fa00030, 0x06a00010), /* SSAT16 cccc 0110 1010 xxxx xxxx xxxx 0011 xxxx */ /* USAT16 cccc 0110 1110 xxxx xxxx xxxx 0011 xxxx */ DECODE_EMULATEX (0x0fb000f0, 0x06a00030, PROBES_SATURATE, REGS(0, NOPC, 0, 0, NOPC)), /* REV cccc 0110 1011 xxxx xxxx xxxx 0011 xxxx */ /* REV16 cccc 0110 1011 xxxx xxxx xxxx 1011 xxxx */ /* RBIT cccc 0110 1111 xxxx xxxx xxxx 0011 xxxx */ /* REVSH cccc 0110 1111 xxxx xxxx xxxx 1011 xxxx */ DECODE_EMULATEX (0x0fb00070, 0x06b00030, PROBES_REV, REGS(0, NOPC, 0, 0, NOPC)), /* ??? cccc 0110 0x00 xxxx xxxx xxxx xxx1 xxxx */ DECODE_REJECT (0x0fb00010, 0x06000010), /* ??? cccc 0110 0xxx xxxx xxxx xxxx 1011 xxxx */ DECODE_REJECT (0x0f8000f0, 0x060000b0), /* ??? cccc 0110 0xxx xxxx xxxx xxxx 1101 xxxx */ DECODE_REJECT (0x0f8000f0, 0x060000d0), /* SADD16 cccc 0110 0001 xxxx xxxx xxxx 0001 xxxx */ /* SADDSUBX cccc 0110 0001 xxxx xxxx xxxx 0011 xxxx */ /* SSUBADDX cccc 0110 0001 xxxx xxxx xxxx 0101 xxxx */ /* SSUB16 cccc 0110 0001 xxxx xxxx xxxx 0111 xxxx */ /* SADD8 cccc 0110 0001 xxxx xxxx xxxx 1001 xxxx */ /* SSUB8 cccc 0110 0001 xxxx xxxx xxxx 1111 xxxx */ /* QADD16 cccc 0110 0010 xxxx xxxx xxxx 0001 xxxx */ /* QADDSUBX cccc 0110 0010 xxxx xxxx xxxx 0011 xxxx */ /* QSUBADDX cccc 0110 0010 xxxx xxxx xxxx 0101 xxxx */ /* QSUB16 cccc 0110 0010 xxxx xxxx xxxx 0111 xxxx */ /* QADD8 cccc 0110 0010 xxxx xxxx xxxx 1001 xxxx */ /* QSUB8 cccc 0110 0010 xxxx xxxx xxxx 1111 xxxx */ /* SHADD16 cccc 0110 0011 xxxx xxxx xxxx 0001 xxxx */ /* SHADDSUBX cccc 0110 0011 xxxx xxxx xxxx 0011 xxxx */ /* SHSUBADDX cccc 0110 0011 xxxx xxxx xxxx 0101 xxxx */ /* SHSUB16 cccc 0110 0011 xxxx xxxx xxxx 0111 xxxx */ /* SHADD8 cccc 0110 0011 xxxx xxxx xxxx 1001 xxxx */ /* SHSUB8 cccc 0110 0011 xxxx xxxx xxxx 1111 xxxx */ /* UADD16 cccc 0110 0101 xxxx xxxx xxxx 0001 xxxx */ /* UADDSUBX cccc 0110 0101 xxxx xxxx xxxx 0011 xxxx */ /* USUBADDX cccc 0110 0101 xxxx xxxx xxxx 0101 xxxx */ /* USUB16 cccc 0110 0101 xxxx xxxx xxxx 0111 xxxx */ /* UADD8 cccc 0110 0101 xxxx xxxx xxxx 1001 xxxx */ /* USUB8 cccc 0110 0101 xxxx xxxx xxxx 1111 xxxx */ /* UQADD16 cccc 0110 0110 xxxx xxxx xxxx 0001 xxxx */ /* UQADDSUBX cccc 0110 0110 xxxx xxxx xxxx 0011 xxxx */ /* UQSUBADDX cccc 0110 0110 xxxx xxxx xxxx 0101 xxxx */ /* UQSUB16 cccc 0110 0110 xxxx xxxx xxxx 0111 xxxx */ /* UQADD8 cccc 0110 0110 xxxx xxxx xxxx 1001 xxxx */ /* UQSUB8 cccc 0110 0110 xxxx xxxx xxxx 1111 xxxx */ /* UHADD16 cccc 0110 0111 xxxx xxxx xxxx 0001 xxxx */ /* UHADDSUBX cccc 0110 0111 xxxx xxxx xxxx 0011 xxxx */ /* UHSUBADDX cccc 0110 0111 xxxx xxxx xxxx 0101 xxxx */ /* UHSUB16 cccc 0110 0111 xxxx xxxx xxxx 0111 xxxx */ /* UHADD8 cccc 0110 0111 xxxx xxxx xxxx 1001 xxxx */ /* UHSUB8 cccc 0110 0111 xxxx xxxx xxxx 1111 xxxx */ DECODE_EMULATEX (0x0f800010, 0x06000010, PROBES_MMI, REGS(NOPC, NOPC, 0, 0, NOPC)), /* PKHBT cccc 0110 1000 xxxx xxxx xxxx x001 xxxx */ /* PKHTB cccc 0110 1000 xxxx xxxx xxxx x101 xxxx */ DECODE_EMULATEX (0x0ff00030, 0x06800010, PROBES_PACK, REGS(NOPC, NOPC, 0, 0, NOPC)), /* ??? cccc 0110 1001 xxxx xxxx xxxx 0111 xxxx */ /* ??? cccc 0110 1101 xxxx xxxx xxxx 0111 xxxx */ DECODE_REJECT (0x0fb000f0, 0x06900070), /* SXTB16 cccc 0110 1000 1111 xxxx xxxx 0111 xxxx */ /* SXTB cccc 0110 1010 1111 xxxx xxxx 0111 xxxx */ /* SXTH cccc 0110 1011 1111 xxxx xxxx 0111 xxxx */ /* UXTB16 cccc 0110 1100 1111 xxxx xxxx 0111 xxxx */ /* UXTB cccc 0110 1110 1111 xxxx xxxx 0111 xxxx */ /* UXTH cccc 0110 1111 1111 xxxx xxxx 0111 xxxx */ DECODE_EMULATEX (0x0f8f00f0, 0x068f0070, PROBES_EXTEND, REGS(0, NOPC, 0, 0, NOPC)), /* SXTAB16 cccc 0110 1000 xxxx xxxx xxxx 0111 xxxx */ /* SXTAB cccc 0110 1010 xxxx xxxx xxxx 0111 xxxx */ /* SXTAH cccc 0110 1011 xxxx xxxx xxxx 0111 xxxx */ /* UXTAB16 cccc 0110 1100 xxxx xxxx xxxx 0111 xxxx */ /* UXTAB cccc 0110 1110 xxxx xxxx xxxx 0111 xxxx */ /* UXTAH cccc 0110 1111 xxxx xxxx xxxx 0111 xxxx */ DECODE_EMULATEX (0x0f8000f0, 0x06800070, PROBES_EXTEND_ADD, REGS(NOPCX, NOPC, 0, 0, NOPC)), DECODE_END }; static const union decode_item arm_cccc_0111_____xxx1_table[] = { /* Media instructions */ /* UNDEFINED cccc 0111 1111 xxxx xxxx xxxx 1111 xxxx */ DECODE_REJECT (0x0ff000f0, 0x07f000f0), /* SMLALD cccc 0111 0100 xxxx xxxx xxxx 00x1 xxxx */ /* SMLSLD cccc 0111 0100 xxxx xxxx xxxx 01x1 xxxx */ DECODE_EMULATEX (0x0ff00090, 0x07400010, PROBES_MUL_ADD_LONG, REGS(NOPC, NOPC, NOPC, 0, NOPC)), /* SMUAD cccc 0111 0000 xxxx 1111 xxxx 00x1 xxxx */ /* SMUSD cccc 0111 0000 xxxx 1111 xxxx 01x1 xxxx */ DECODE_OR (0x0ff0f090, 0x0700f010), /* SMMUL cccc 0111 0101 xxxx 1111 xxxx 00x1 xxxx */ DECODE_OR (0x0ff0f0d0, 0x0750f010), /* USAD8 cccc 0111 1000 xxxx 1111 xxxx 0001 xxxx */ DECODE_EMULATEX (0x0ff0f0f0, 0x0780f010, PROBES_MUL_ADD, REGS(NOPC, 0, NOPC, 0, NOPC)), /* SMLAD cccc 0111 0000 xxxx xxxx xxxx 00x1 xxxx */ /* SMLSD cccc 0111 0000 xxxx xxxx xxxx 01x1 xxxx */ DECODE_OR (0x0ff00090, 0x07000010), /* SMMLA cccc 0111 0101 xxxx xxxx xxxx 00x1 xxxx */ DECODE_OR (0x0ff000d0, 0x07500010), /* USADA8 cccc 0111 1000 xxxx xxxx xxxx 0001 xxxx */ DECODE_EMULATEX (0x0ff000f0, 0x07800010, PROBES_MUL_ADD, REGS(NOPC, NOPCX, NOPC, 0, NOPC)), /* SMMLS cccc 0111 0101 xxxx xxxx xxxx 11x1 xxxx */ DECODE_EMULATEX (0x0ff000d0, 0x075000d0, PROBES_MUL_ADD, REGS(NOPC, NOPC, NOPC, 0, NOPC)), /* SBFX cccc 0111 101x xxxx xxxx xxxx x101 xxxx */ /* UBFX cccc 0111 111x xxxx xxxx xxxx x101 xxxx */ DECODE_EMULATEX (0x0fa00070, 0x07a00050, PROBES_BITFIELD, REGS(0, NOPC, 0, 0, NOPC)), /* BFC cccc 0111 110x xxxx xxxx xxxx x001 1111 */ DECODE_EMULATEX (0x0fe0007f, 0x07c0001f, PROBES_BITFIELD, REGS(0, NOPC, 0, 0, 0)), /* BFI cccc 0111 110x xxxx xxxx xxxx x001 xxxx */ DECODE_EMULATEX (0x0fe00070, 0x07c00010, PROBES_BITFIELD, REGS(0, NOPC, 0, 0, NOPCX)), DECODE_END }; static const union decode_item arm_cccc_01xx_table[] = { /* Load/store word and unsigned byte */ /* LDRB/STRB pc,[...] cccc 01xx x0xx xxxx xxxx xxxx xxxx xxxx */ DECODE_REJECT (0x0c40f000, 0x0440f000), /* STRT cccc 01x0 x010 xxxx xxxx xxxx xxxx xxxx */ /* LDRT cccc 01x0 x011 xxxx xxxx xxxx xxxx xxxx */ /* STRBT cccc 01x0 x110 xxxx xxxx xxxx xxxx xxxx */ /* LDRBT cccc 01x0 x111 xxxx xxxx xxxx xxxx xxxx */ DECODE_REJECT (0x0d200000, 0x04200000), /* STR (immediate) cccc 010x x0x0 xxxx xxxx xxxx xxxx xxxx */ /* STRB (immediate) cccc 010x x1x0 xxxx xxxx xxxx xxxx xxxx */ DECODE_EMULATEX (0x0e100000, 0x04000000, PROBES_STORE, REGS(NOPCWB, ANY, 0, 0, 0)), /* LDR (immediate) cccc 010x x0x1 xxxx xxxx xxxx xxxx xxxx */ /* LDRB (immediate) cccc 010x x1x1 xxxx xxxx xxxx xxxx xxxx */ DECODE_EMULATEX (0x0e100000, 0x04100000, PROBES_LOAD, REGS(NOPCWB, ANY, 0, 0, 0)), /* STR (register) cccc 011x x0x0 xxxx xxxx xxxx xxxx xxxx */ /* STRB (register) cccc 011x x1x0 xxxx xxxx xxxx xxxx xxxx */ DECODE_EMULATEX (0x0e100000, 0x06000000, PROBES_STORE, REGS(NOPCWB, ANY, 0, 0, NOPC)), /* LDR (register) cccc 011x x0x1 xxxx xxxx xxxx xxxx xxxx */ /* LDRB (register) cccc 011x x1x1 xxxx xxxx xxxx xxxx xxxx */ DECODE_EMULATEX (0x0e100000, 0x06100000, PROBES_LOAD, REGS(NOPCWB, ANY, 0, 0, NOPC)), DECODE_END }; static const union decode_item arm_cccc_100x_table[] = { /* Block data transfer instructions */ /* LDM cccc 100x x0x1 xxxx xxxx xxxx xxxx xxxx */ /* STM cccc 100x x0x0 xxxx xxxx xxxx xxxx xxxx */ DECODE_CUSTOM (0x0e400000, 0x08000000, PROBES_LDMSTM), /* STM (user registers) cccc 100x x1x0 xxxx xxxx xxxx xxxx xxxx */ /* LDM (user registers) cccc 100x x1x1 xxxx 0xxx xxxx xxxx xxxx */ /* LDM (exception ret) cccc 100x x1x1 xxxx 1xxx xxxx xxxx xxxx */ DECODE_END }; const union decode_item probes_decode_arm_table[] = { /* * Unconditional instructions * 1111 xxxx xxxx xxxx xxxx xxxx xxxx xxxx */ DECODE_TABLE (0xf0000000, 0xf0000000, arm_1111_table), /* * Miscellaneous instructions * cccc 0001 0xx0 xxxx xxxx xxxx 0xxx xxxx */ DECODE_TABLE (0x0f900080, 0x01000000, arm_cccc_0001_0xx0____0xxx_table), /* * Halfword multiply and multiply-accumulate * cccc 0001 0xx0 xxxx xxxx xxxx 1xx0 xxxx */ DECODE_TABLE (0x0f900090, 0x01000080, arm_cccc_0001_0xx0____1xx0_table), /* * Multiply and multiply-accumulate * cccc 0000 xxxx xxxx xxxx xxxx 1001 xxxx */ DECODE_TABLE (0x0f0000f0, 0x00000090, arm_cccc_0000_____1001_table), /* * Synchronization primitives * cccc 0001 xxxx xxxx xxxx xxxx 1001 xxxx */ DECODE_TABLE (0x0f0000f0, 0x01000090, arm_cccc_0001_____1001_table), /* * Extra load/store instructions * cccc 000x xxxx xxxx xxxx xxxx 1xx1 xxxx */ DECODE_TABLE (0x0e000090, 0x00000090, arm_cccc_000x_____1xx1_table), /* * Data-processing (register) * cccc 000x xxxx xxxx xxxx xxxx xxx0 xxxx * Data-processing (register-shifted register) * cccc 000x xxxx xxxx xxxx xxxx 0xx1 xxxx */ DECODE_TABLE (0x0e000000, 0x00000000, arm_cccc_000x_table), /* * Data-processing (immediate) * cccc 001x xxxx xxxx xxxx xxxx xxxx xxxx */ DECODE_TABLE (0x0e000000, 0x02000000, arm_cccc_001x_table), /* * Media instructions * cccc 011x xxxx xxxx xxxx xxxx xxx1 xxxx */ DECODE_TABLE (0x0f000010, 0x06000010, arm_cccc_0110_____xxx1_table), DECODE_TABLE (0x0f000010, 0x07000010, arm_cccc_0111_____xxx1_table), /* * Load/store word and unsigned byte * cccc 01xx xxxx xxxx xxxx xxxx xxxx xxxx */ DECODE_TABLE (0x0c000000, 0x04000000, arm_cccc_01xx_table), /* * Block data transfer instructions * cccc 100x xxxx xxxx xxxx xxxx xxxx xxxx */ DECODE_TABLE (0x0e000000, 0x08000000, arm_cccc_100x_table), /* B cccc 1010 xxxx xxxx xxxx xxxx xxxx xxxx */ /* BL cccc 1011 xxxx xxxx xxxx xxxx xxxx xxxx */ DECODE_SIMULATE (0x0e000000, 0x0a000000, PROBES_BRANCH), /* * Supervisor Call, and coprocessor instructions */ /* MCRR cccc 1100 0100 xxxx xxxx xxxx xxxx xxxx */ /* MRRC cccc 1100 0101 xxxx xxxx xxxx xxxx xxxx */ /* LDC cccc 110x xxx1 xxxx xxxx xxxx xxxx xxxx */ /* STC cccc 110x xxx0 xxxx xxxx xxxx xxxx xxxx */ /* CDP cccc 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */ /* MCR cccc 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */ /* MRC cccc 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */ /* SVC cccc 1111 xxxx xxxx xxxx xxxx xxxx xxxx */ DECODE_REJECT (0x0c000000, 0x0c000000), DECODE_END }; #ifdef CONFIG_ARM_KPROBES_TEST_MODULE EXPORT_SYMBOL_GPL(probes_decode_arm_table); #endif static void __kprobes arm_singlestep(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { regs->ARM_pc += 4; asi->insn_handler(insn, asi, regs); } /* Return: * INSN_REJECTED If instruction is one not allowed to kprobe, * INSN_GOOD If instruction is supported and uses instruction slot, * INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot. * * For instructions we don't want to kprobe (INSN_REJECTED return result): * These are generally ones that modify the processor state making * them "hard" to simulate such as switches processor modes or * make accesses in alternate modes. Any of these could be simulated * if the work was put into it, but low return considering they * should also be very rare. */ enum probes_insn __kprobes arm_probes_decode_insn(probes_opcode_t insn, struct arch_probes_insn *asi, bool emulate, const union decode_action *actions, const struct decode_checker *checkers[]) { asi->insn_singlestep = arm_singlestep; asi->insn_check_cc = probes_condition_checks[insn>>28]; return probes_decode_insn(insn, asi, probes_decode_arm_table, false, emulate, actions, checkers); }
gpl-2.0
lbule/ALPS.L0.MP8.V2.1_LCSH6735_65C_HZ_L_KERNEL
drivers/staging/comedi/drivers/amplc_pci263.c
1726
3841
/* comedi/drivers/amplc_pci263.c Driver for Amplicon PCI263 relay board. Copyright (C) 2002 MEV Ltd. <http://www.mev.co.uk/> COMEDI - Linux Control and Measurement Device Interface Copyright (C) 2000 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: amplc_pci263 Description: Amplicon PCI263 Author: Ian Abbott <abbotti@mev.co.uk> Devices: [Amplicon] PCI263 (amplc_pci263) Updated: Fri, 12 Apr 2013 15:19:36 +0100 Status: works Configuration options: not applicable, uses PCI auto config The board appears as one subdevice, with 16 digital outputs, each connected to a reed-relay. Relay contacts are closed when output is 1. The state of the outputs can be read. */ #include <linux/pci.h> #include "../comedidev.h" #define PCI263_DRIVER_NAME "amplc_pci263" /* PCI263 PCI configuration register information */ #define PCI_DEVICE_ID_AMPLICON_PCI263 0x000c static int pci263_do_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { /* The insn data is a mask in data[0] and the new data * in data[1], each channel cooresponding to a bit. */ if (data[0]) { s->state &= ~data[0]; s->state |= data[0] & data[1]; /* Write out the new digital output lines */ outb(s->state & 0xFF, dev->iobase); outb(s->state >> 8, dev->iobase + 1); } data[1] = s->state; return insn->n; } static int pci263_auto_attach(struct comedi_device *dev, unsigned long context_unused) { struct pci_dev *pci_dev = comedi_to_pci_dev(dev); struct comedi_subdevice *s; int ret; ret = comedi_pci_enable(dev); if (ret) return ret; dev->iobase = pci_resource_start(pci_dev, 2); ret = comedi_alloc_subdevices(dev, 1); if (ret) return ret; s = &dev->subdevices[0]; /* digital output subdevice */ s->type = COMEDI_SUBD_DO; s->subdev_flags = SDF_READABLE | SDF_WRITABLE; s->n_chan = 16; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = pci263_do_insn_bits; /* read initial relay state */ s->state = inb(dev->iobase) | (inb(dev->iobase + 1) << 8); dev_info(dev->class_dev, "%s (pci %s) attached\n", dev->board_name, pci_name(pci_dev)); return 0; } static struct comedi_driver amplc_pci263_driver = { .driver_name = PCI263_DRIVER_NAME, .module = THIS_MODULE, .auto_attach = pci263_auto_attach, .detach = comedi_pci_disable, }; static DEFINE_PCI_DEVICE_TABLE(pci263_pci_table) = { { PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI263) }, {0} }; MODULE_DEVICE_TABLE(pci, pci263_pci_table); static int amplc_pci263_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { return comedi_pci_auto_config(dev, &amplc_pci263_driver, id->driver_data); } static struct pci_driver amplc_pci263_pci_driver = { .name = PCI263_DRIVER_NAME, .id_table = pci263_pci_table, .probe = &amplc_pci263_pci_probe, .remove = comedi_pci_auto_unconfig, }; module_comedi_pci_driver(amplc_pci263_driver, amplc_pci263_pci_driver); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi driver for Amplicon PCI263 relay board"); MODULE_LICENSE("GPL");
gpl-2.0
Multirom-mi4i/android_kernel_xiaomi_ferrari
arch/mips/kernel/cevt-sb1250.c
1982
4379
/* * Copyright (C) 2000, 2001 Broadcom Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/clockchips.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/percpu.h> #include <linux/smp.h> #include <asm/addrspace.h> #include <asm/io.h> #include <asm/time.h> #include <asm/sibyte/sb1250.h> #include <asm/sibyte/sb1250_regs.h> #include <asm/sibyte/sb1250_int.h> #include <asm/sibyte/sb1250_scd.h> #define IMR_IP2_VAL K_INT_MAP_I0 #define IMR_IP3_VAL K_INT_MAP_I1 #define IMR_IP4_VAL K_INT_MAP_I2 /* * The general purpose timer ticks at 1MHz independent if * the rest of the system */ static void sibyte_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) { unsigned int cpu = smp_processor_id(); void __iomem *cfg, *init; cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT)); switch (mode) { case CLOCK_EVT_MODE_PERIODIC: __raw_writeq(0, cfg); __raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init); __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS, cfg); break; case CLOCK_EVT_MODE_ONESHOT: /* Stop the timer until we actually program a shot */ case CLOCK_EVT_MODE_SHUTDOWN: __raw_writeq(0, cfg); break; case CLOCK_EVT_MODE_UNUSED: /* shuddup gcc */ case CLOCK_EVT_MODE_RESUME: ; } } static int sibyte_next_event(unsigned long delta, struct clock_event_device *cd) { unsigned int cpu = smp_processor_id(); void __iomem *cfg, *init; cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT)); __raw_writeq(0, cfg); __raw_writeq(delta - 1, init); __raw_writeq(M_SCD_TIMER_ENABLE, cfg); return 0; } static irqreturn_t sibyte_counter_handler(int irq, void *dev_id) { unsigned int cpu = smp_processor_id(); struct clock_event_device *cd = dev_id; void __iomem *cfg; unsigned long tmode; if (cd->mode == CLOCK_EVT_MODE_PERIODIC) tmode = M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS; else tmode = 0; /* ACK interrupt */ cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); ____raw_writeq(tmode, cfg); cd->event_handler(cd); return IRQ_HANDLED; } static DEFINE_PER_CPU(struct clock_event_device, sibyte_hpt_clockevent); static DEFINE_PER_CPU(struct irqaction, sibyte_hpt_irqaction); static DEFINE_PER_CPU(char [18], sibyte_hpt_name); void __cpuinit sb1250_clockevent_init(void) { unsigned int cpu = smp_processor_id(); unsigned int irq = K_INT_TIMER_0 + cpu; struct irqaction *action = &per_cpu(sibyte_hpt_irqaction, cpu); struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu); unsigned char *name = per_cpu(sibyte_hpt_name, cpu); /* Only have 4 general purpose timers, and we use last one as hpt */ BUG_ON(cpu > 2); sprintf(name, "sb1250-counter-%d", cpu); cd->name = name; cd->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; clockevent_set_clock(cd, V_SCD_TIMER_FREQ); cd->max_delta_ns = clockevent_delta2ns(0x7fffff, cd); cd->min_delta_ns = clockevent_delta2ns(2, cd); cd->rating = 200; cd->irq = irq; cd->cpumask = cpumask_of(cpu); cd->set_next_event = sibyte_next_event; cd->set_mode = sibyte_set_mode; clockevents_register_device(cd); sb1250_mask_irq(cpu, irq); /* * Map the timer interrupt to IP[4] of this cpu */ __raw_writeq(IMR_IP4_VAL, IOADDR(A_IMR_REGISTER(cpu, R_IMR_INTERRUPT_MAP_BASE) + (irq << 3))); sb1250_unmask_irq(cpu, irq); action->handler = sibyte_counter_handler; action->flags = IRQF_PERCPU | IRQF_TIMER; action->name = name; action->dev_id = cd; irq_set_affinity(irq, cpumask_of(cpu)); setup_irq(irq, action); }
gpl-2.0
AOSPA-L/android_kernel_htc_msm8994
drivers/net/ethernet/toshiba/spider_net.c
2238
72130
/* * Network device driver for Cell Processor-Based Blade and Celleb platform * * (C) Copyright IBM Corp. 2005 * (C) Copyright 2006 TOSHIBA CORPORATION * * Authors : Utz Bacher <utz.bacher@de.ibm.com> * Jens Osterkamp <Jens.Osterkamp@de.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/compiler.h> #include <linux/crc32.h> #include <linux/delay.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/firmware.h> #include <linux/if_vlan.h> #include <linux/in.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/gfp.h> #include <linux/ioport.h> #include <linux/ip.h> #include <linux/kernel.h> #include <linux/mii.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/device.h> #include <linux/pci.h> #include <linux/skbuff.h> #include <linux/tcp.h> #include <linux/types.h> #include <linux/vmalloc.h> #include <linux/wait.h> #include <linux/workqueue.h> #include <linux/bitops.h> #include <asm/pci-bridge.h> #include <net/checksum.h> #include "spider_net.h" MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com> and Jens Osterkamp " \ "<Jens.Osterkamp@de.ibm.com>"); MODULE_DESCRIPTION("Spider Southbridge Gigabit Ethernet driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(VERSION); MODULE_FIRMWARE(SPIDER_NET_FIRMWARE_NAME); static int rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_DEFAULT; static int tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_DEFAULT; module_param(rx_descriptors, int, 0444); module_param(tx_descriptors, int, 0444); MODULE_PARM_DESC(rx_descriptors, "number of descriptors used " \ "in rx chains"); MODULE_PARM_DESC(tx_descriptors, "number of descriptors used " \ "in tx chain"); char spider_net_driver_name[] = "spidernet"; static DEFINE_PCI_DEVICE_TABLE(spider_net_pci_tbl) = { { PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SPIDER_NET, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { 0, } }; MODULE_DEVICE_TABLE(pci, spider_net_pci_tbl); /** * spider_net_read_reg - reads an SMMIO register of a card * @card: device structure * @reg: register to read from * * returns the content of the specified SMMIO register. */ static inline u32 spider_net_read_reg(struct spider_net_card *card, u32 reg) { /* We use the powerpc specific variants instead of readl_be() because * we know spidernet is not a real PCI device and we can thus avoid the * performance hit caused by the PCI workarounds. */ return in_be32(card->regs + reg); } /** * spider_net_write_reg - writes to an SMMIO register of a card * @card: device structure * @reg: register to write to * @value: value to write into the specified SMMIO register */ static inline void spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value) { /* We use the powerpc specific variants instead of writel_be() because * we know spidernet is not a real PCI device and we can thus avoid the * performance hit caused by the PCI workarounds. */ out_be32(card->regs + reg, value); } /** * spider_net_write_phy - write to phy register * @netdev: adapter to be written to * @mii_id: id of MII * @reg: PHY register * @val: value to be written to phy register * * spider_net_write_phy_register writes to an arbitrary PHY * register via the spider GPCWOPCMD register. We assume the queue does * not run full (not more than 15 commands outstanding). **/ static void spider_net_write_phy(struct net_device *netdev, int mii_id, int reg, int val) { struct spider_net_card *card = netdev_priv(netdev); u32 writevalue; writevalue = ((u32)mii_id << 21) | ((u32)reg << 16) | ((u32)val); spider_net_write_reg(card, SPIDER_NET_GPCWOPCMD, writevalue); } /** * spider_net_read_phy - read from phy register * @netdev: network device to be read from * @mii_id: id of MII * @reg: PHY register * * Returns value read from PHY register * * spider_net_write_phy reads from an arbitrary PHY * register via the spider GPCROPCMD register **/ static int spider_net_read_phy(struct net_device *netdev, int mii_id, int reg) { struct spider_net_card *card = netdev_priv(netdev); u32 readvalue; readvalue = ((u32)mii_id << 21) | ((u32)reg << 16); spider_net_write_reg(card, SPIDER_NET_GPCROPCMD, readvalue); /* we don't use semaphores to wait for an SPIDER_NET_GPROPCMPINT * interrupt, as we poll for the completion of the read operation * in spider_net_read_phy. Should take about 50 us */ do { readvalue = spider_net_read_reg(card, SPIDER_NET_GPCROPCMD); } while (readvalue & SPIDER_NET_GPREXEC); readvalue &= SPIDER_NET_GPRDAT_MASK; return readvalue; } /** * spider_net_setup_aneg - initial auto-negotiation setup * @card: device structure **/ static void spider_net_setup_aneg(struct spider_net_card *card) { struct mii_phy *phy = &card->phy; u32 advertise = 0; u16 bmsr, estat; bmsr = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR); estat = spider_net_read_phy(card->netdev, phy->mii_id, MII_ESTATUS); if (bmsr & BMSR_10HALF) advertise |= ADVERTISED_10baseT_Half; if (bmsr & BMSR_10FULL) advertise |= ADVERTISED_10baseT_Full; if (bmsr & BMSR_100HALF) advertise |= ADVERTISED_100baseT_Half; if (bmsr & BMSR_100FULL) advertise |= ADVERTISED_100baseT_Full; if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_TFULL)) advertise |= SUPPORTED_1000baseT_Full; if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_THALF)) advertise |= SUPPORTED_1000baseT_Half; sungem_phy_probe(phy, phy->mii_id); phy->def->ops->setup_aneg(phy, advertise); } /** * spider_net_rx_irq_off - switch off rx irq on this spider card * @card: device structure * * switches off rx irq by masking them out in the GHIINTnMSK register */ static void spider_net_rx_irq_off(struct spider_net_card *card) { u32 regvalue; regvalue = SPIDER_NET_INT0_MASK_VALUE & (~SPIDER_NET_RXINT); spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue); } /** * spider_net_rx_irq_on - switch on rx irq on this spider card * @card: device structure * * switches on rx irq by enabling them in the GHIINTnMSK register */ static void spider_net_rx_irq_on(struct spider_net_card *card) { u32 regvalue; regvalue = SPIDER_NET_INT0_MASK_VALUE | SPIDER_NET_RXINT; spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue); } /** * spider_net_set_promisc - sets the unicast address or the promiscuous mode * @card: card structure * * spider_net_set_promisc sets the unicast destination address filter and * thus either allows for non-promisc mode or promisc mode */ static void spider_net_set_promisc(struct spider_net_card *card) { u32 macu, macl; struct net_device *netdev = card->netdev; if (netdev->flags & IFF_PROMISC) { /* clear destination entry 0 */ spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, 0); spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, 0); spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R, SPIDER_NET_PROMISC_VALUE); } else { macu = netdev->dev_addr[0]; macu <<= 8; macu |= netdev->dev_addr[1]; memcpy(&macl, &netdev->dev_addr[2], sizeof(macl)); macu |= SPIDER_NET_UA_DESCR_VALUE; spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, macu); spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, macl); spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R, SPIDER_NET_NONPROMISC_VALUE); } } /** * spider_net_get_mac_address - read mac address from spider card * @card: device structure * * reads MAC address from GMACUNIMACU and GMACUNIMACL registers */ static int spider_net_get_mac_address(struct net_device *netdev) { struct spider_net_card *card = netdev_priv(netdev); u32 macl, macu; macl = spider_net_read_reg(card, SPIDER_NET_GMACUNIMACL); macu = spider_net_read_reg(card, SPIDER_NET_GMACUNIMACU); netdev->dev_addr[0] = (macu >> 24) & 0xff; netdev->dev_addr[1] = (macu >> 16) & 0xff; netdev->dev_addr[2] = (macu >> 8) & 0xff; netdev->dev_addr[3] = macu & 0xff; netdev->dev_addr[4] = (macl >> 8) & 0xff; netdev->dev_addr[5] = macl & 0xff; if (!is_valid_ether_addr(&netdev->dev_addr[0])) return -EINVAL; return 0; } /** * spider_net_get_descr_status -- returns the status of a descriptor * @descr: descriptor to look at * * returns the status as in the dmac_cmd_status field of the descriptor */ static inline int spider_net_get_descr_status(struct spider_net_hw_descr *hwdescr) { return hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK; } /** * spider_net_free_chain - free descriptor chain * @card: card structure * @chain: address of chain * */ static void spider_net_free_chain(struct spider_net_card *card, struct spider_net_descr_chain *chain) { struct spider_net_descr *descr; descr = chain->ring; do { descr->bus_addr = 0; descr->hwdescr->next_descr_addr = 0; descr = descr->next; } while (descr != chain->ring); dma_free_coherent(&card->pdev->dev, chain->num_desc, chain->hwring, chain->dma_addr); } /** * spider_net_init_chain - alloc and link descriptor chain * @card: card structure * @chain: address of chain * * We manage a circular list that mirrors the hardware structure, * except that the hardware uses bus addresses. * * Returns 0 on success, <0 on failure */ static int spider_net_init_chain(struct spider_net_card *card, struct spider_net_descr_chain *chain) { int i; struct spider_net_descr *descr; struct spider_net_hw_descr *hwdescr; dma_addr_t buf; size_t alloc_size; alloc_size = chain->num_desc * sizeof(struct spider_net_hw_descr); chain->hwring = dma_alloc_coherent(&card->pdev->dev, alloc_size, &chain->dma_addr, GFP_KERNEL); if (!chain->hwring) return -ENOMEM; memset(chain->ring, 0, chain->num_desc * sizeof(struct spider_net_descr)); /* Set up the hardware pointers in each descriptor */ descr = chain->ring; hwdescr = chain->hwring; buf = chain->dma_addr; for (i=0; i < chain->num_desc; i++, descr++, hwdescr++) { hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; hwdescr->next_descr_addr = 0; descr->hwdescr = hwdescr; descr->bus_addr = buf; descr->next = descr + 1; descr->prev = descr - 1; buf += sizeof(struct spider_net_hw_descr); } /* do actual circular list */ (descr-1)->next = chain->ring; chain->ring->prev = descr-1; spin_lock_init(&chain->lock); chain->head = chain->ring; chain->tail = chain->ring; return 0; } /** * spider_net_free_rx_chain_contents - frees descr contents in rx chain * @card: card structure * * returns 0 on success, <0 on failure */ static void spider_net_free_rx_chain_contents(struct spider_net_card *card) { struct spider_net_descr *descr; descr = card->rx_chain.head; do { if (descr->skb) { pci_unmap_single(card->pdev, descr->hwdescr->buf_addr, SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL); dev_kfree_skb(descr->skb); descr->skb = NULL; } descr = descr->next; } while (descr != card->rx_chain.head); } /** * spider_net_prepare_rx_descr - Reinitialize RX descriptor * @card: card structure * @descr: descriptor to re-init * * Return 0 on success, <0 on failure. * * Allocates a new rx skb, iommu-maps it and attaches it to the * descriptor. Mark the descriptor as activated, ready-to-use. */ static int spider_net_prepare_rx_descr(struct spider_net_card *card, struct spider_net_descr *descr) { struct spider_net_hw_descr *hwdescr = descr->hwdescr; dma_addr_t buf; int offset; int bufsize; /* we need to round up the buffer size to a multiple of 128 */ bufsize = (SPIDER_NET_MAX_FRAME + SPIDER_NET_RXBUF_ALIGN - 1) & (~(SPIDER_NET_RXBUF_ALIGN - 1)); /* and we need to have it 128 byte aligned, therefore we allocate a * bit more */ /* allocate an skb */ descr->skb = netdev_alloc_skb(card->netdev, bufsize + SPIDER_NET_RXBUF_ALIGN - 1); if (!descr->skb) { if (netif_msg_rx_err(card) && net_ratelimit()) dev_err(&card->netdev->dev, "Not enough memory to allocate rx buffer\n"); card->spider_stats.alloc_rx_skb_error++; return -ENOMEM; } hwdescr->buf_size = bufsize; hwdescr->result_size = 0; hwdescr->valid_size = 0; hwdescr->data_status = 0; hwdescr->data_error = 0; offset = ((unsigned long)descr->skb->data) & (SPIDER_NET_RXBUF_ALIGN - 1); if (offset) skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset); /* iommu-map the skb */ buf = pci_map_single(card->pdev, descr->skb->data, SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); if (pci_dma_mapping_error(card->pdev, buf)) { dev_kfree_skb_any(descr->skb); descr->skb = NULL; if (netif_msg_rx_err(card) && net_ratelimit()) dev_err(&card->netdev->dev, "Could not iommu-map rx buffer\n"); card->spider_stats.rx_iommu_map_error++; hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; } else { hwdescr->buf_addr = buf; wmb(); hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOINTR_COMPLETE; } return 0; } /** * spider_net_enable_rxchtails - sets RX dmac chain tail addresses * @card: card structure * * spider_net_enable_rxchtails sets the RX DMAC chain tail addresses in the * chip by writing to the appropriate register. DMA is enabled in * spider_net_enable_rxdmac. */ static inline void spider_net_enable_rxchtails(struct spider_net_card *card) { /* assume chain is aligned correctly */ spider_net_write_reg(card, SPIDER_NET_GDADCHA , card->rx_chain.tail->bus_addr); } /** * spider_net_enable_rxdmac - enables a receive DMA controller * @card: card structure * * spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN * in the GDADMACCNTR register */ static inline void spider_net_enable_rxdmac(struct spider_net_card *card) { wmb(); spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR, SPIDER_NET_DMA_RX_VALUE); } /** * spider_net_disable_rxdmac - disables the receive DMA controller * @card: card structure * * spider_net_disable_rxdmac terminates processing on the DMA controller * by turing off the DMA controller, with the force-end flag set. */ static inline void spider_net_disable_rxdmac(struct spider_net_card *card) { spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR, SPIDER_NET_DMA_RX_FEND_VALUE); } /** * spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains * @card: card structure * * refills descriptors in the rx chain: allocates skbs and iommu-maps them. */ static void spider_net_refill_rx_chain(struct spider_net_card *card) { struct spider_net_descr_chain *chain = &card->rx_chain; unsigned long flags; /* one context doing the refill (and a second context seeing that * and omitting it) is ok. If called by NAPI, we'll be called again * as spider_net_decode_one_descr is called several times. If some * interrupt calls us, the NAPI is about to clean up anyway. */ if (!spin_trylock_irqsave(&chain->lock, flags)) return; while (spider_net_get_descr_status(chain->head->hwdescr) == SPIDER_NET_DESCR_NOT_IN_USE) { if (spider_net_prepare_rx_descr(card, chain->head)) break; chain->head = chain->head->next; } spin_unlock_irqrestore(&chain->lock, flags); } /** * spider_net_alloc_rx_skbs - Allocates rx skbs in rx descriptor chains * @card: card structure * * Returns 0 on success, <0 on failure. */ static int spider_net_alloc_rx_skbs(struct spider_net_card *card) { struct spider_net_descr_chain *chain = &card->rx_chain; struct spider_net_descr *start = chain->tail; struct spider_net_descr *descr = start; /* Link up the hardware chain pointers */ do { descr->prev->hwdescr->next_descr_addr = descr->bus_addr; descr = descr->next; } while (descr != start); /* Put at least one buffer into the chain. if this fails, * we've got a problem. If not, spider_net_refill_rx_chain * will do the rest at the end of this function. */ if (spider_net_prepare_rx_descr(card, chain->head)) goto error; else chain->head = chain->head->next; /* This will allocate the rest of the rx buffers; * if not, it's business as usual later on. */ spider_net_refill_rx_chain(card); spider_net_enable_rxdmac(card); return 0; error: spider_net_free_rx_chain_contents(card); return -ENOMEM; } /** * spider_net_get_multicast_hash - generates hash for multicast filter table * @addr: multicast address * * returns the hash value. * * spider_net_get_multicast_hash calculates a hash value for a given multicast * address, that is used to set the multicast filter tables */ static u8 spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr) { u32 crc; u8 hash; char addr_for_crc[ETH_ALEN] = { 0, }; int i, bit; for (i = 0; i < ETH_ALEN * 8; i++) { bit = (addr[i / 8] >> (i % 8)) & 1; addr_for_crc[ETH_ALEN - 1 - i / 8] += bit << (7 - (i % 8)); } crc = crc32_be(~0, addr_for_crc, netdev->addr_len); hash = (crc >> 27); hash <<= 3; hash |= crc & 7; hash &= 0xff; return hash; } /** * spider_net_set_multi - sets multicast addresses and promisc flags * @netdev: interface device structure * * spider_net_set_multi configures multicast addresses as needed for the * netdev interface. It also sets up multicast, allmulti and promisc * flags appropriately */ static void spider_net_set_multi(struct net_device *netdev) { struct netdev_hw_addr *ha; u8 hash; int i; u32 reg; struct spider_net_card *card = netdev_priv(netdev); unsigned long bitmask[SPIDER_NET_MULTICAST_HASHES / BITS_PER_LONG] = {0, }; spider_net_set_promisc(card); if (netdev->flags & IFF_ALLMULTI) { for (i = 0; i < SPIDER_NET_MULTICAST_HASHES; i++) { set_bit(i, bitmask); } goto write_hash; } /* well, we know, what the broadcast hash value is: it's xfd hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */ set_bit(0xfd, bitmask); netdev_for_each_mc_addr(ha, netdev) { hash = spider_net_get_multicast_hash(netdev, ha->addr); set_bit(hash, bitmask); } write_hash: for (i = 0; i < SPIDER_NET_MULTICAST_HASHES / 4; i++) { reg = 0; if (test_bit(i * 4, bitmask)) reg += 0x08; reg <<= 8; if (test_bit(i * 4 + 1, bitmask)) reg += 0x08; reg <<= 8; if (test_bit(i * 4 + 2, bitmask)) reg += 0x08; reg <<= 8; if (test_bit(i * 4 + 3, bitmask)) reg += 0x08; spider_net_write_reg(card, SPIDER_NET_GMRMHFILnR + i * 4, reg); } } /** * spider_net_prepare_tx_descr - fill tx descriptor with skb data * @card: card structure * @skb: packet to use * * returns 0 on success, <0 on failure. * * fills out the descriptor structure with skb data and len. Copies data, * if needed (32bit DMA!) */ static int spider_net_prepare_tx_descr(struct spider_net_card *card, struct sk_buff *skb) { struct spider_net_descr_chain *chain = &card->tx_chain; struct spider_net_descr *descr; struct spider_net_hw_descr *hwdescr; dma_addr_t buf; unsigned long flags; buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); if (pci_dma_mapping_error(card->pdev, buf)) { if (netif_msg_tx_err(card) && net_ratelimit()) dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). " "Dropping packet\n", skb->data, skb->len); card->spider_stats.tx_iommu_map_error++; return -ENOMEM; } spin_lock_irqsave(&chain->lock, flags); descr = card->tx_chain.head; if (descr->next == chain->tail->prev) { spin_unlock_irqrestore(&chain->lock, flags); pci_unmap_single(card->pdev, buf, skb->len, PCI_DMA_TODEVICE); return -ENOMEM; } hwdescr = descr->hwdescr; chain->head = descr->next; descr->skb = skb; hwdescr->buf_addr = buf; hwdescr->buf_size = skb->len; hwdescr->next_descr_addr = 0; hwdescr->data_status = 0; hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_TXFRMTL; spin_unlock_irqrestore(&chain->lock, flags); if (skb->ip_summed == CHECKSUM_PARTIAL) switch (ip_hdr(skb)->protocol) { case IPPROTO_TCP: hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP; break; case IPPROTO_UDP: hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP; break; } /* Chain the bus address, so that the DMA engine finds this descr. */ wmb(); descr->prev->hwdescr->next_descr_addr = descr->bus_addr; card->netdev->trans_start = jiffies; /* set netdev watchdog timer */ return 0; } static int spider_net_set_low_watermark(struct spider_net_card *card) { struct spider_net_descr *descr = card->tx_chain.tail; struct spider_net_hw_descr *hwdescr; unsigned long flags; int status; int cnt=0; int i; /* Measure the length of the queue. Measurement does not * need to be precise -- does not need a lock. */ while (descr != card->tx_chain.head) { status = descr->hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_NOT_IN_USE; if (status == SPIDER_NET_DESCR_NOT_IN_USE) break; descr = descr->next; cnt++; } /* If TX queue is short, don't even bother with interrupts */ if (cnt < card->tx_chain.num_desc/4) return cnt; /* Set low-watermark 3/4th's of the way into the queue. */ descr = card->tx_chain.tail; cnt = (cnt*3)/4; for (i=0;i<cnt; i++) descr = descr->next; /* Set the new watermark, clear the old watermark */ spin_lock_irqsave(&card->tx_chain.lock, flags); descr->hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_TXDESFLG; if (card->low_watermark && card->low_watermark != descr) { hwdescr = card->low_watermark->hwdescr; hwdescr->dmac_cmd_status = hwdescr->dmac_cmd_status & ~SPIDER_NET_DESCR_TXDESFLG; } card->low_watermark = descr; spin_unlock_irqrestore(&card->tx_chain.lock, flags); return cnt; } /** * spider_net_release_tx_chain - processes sent tx descriptors * @card: adapter structure * @brutal: if set, don't care about whether descriptor seems to be in use * * returns 0 if the tx ring is empty, otherwise 1. * * spider_net_release_tx_chain releases the tx descriptors that spider has * finished with (if non-brutal) or simply release tx descriptors (if brutal). * If some other context is calling this function, we return 1 so that we're * scheduled again (if we were scheduled) and will not lose initiative. */ static int spider_net_release_tx_chain(struct spider_net_card *card, int brutal) { struct net_device *dev = card->netdev; struct spider_net_descr_chain *chain = &card->tx_chain; struct spider_net_descr *descr; struct spider_net_hw_descr *hwdescr; struct sk_buff *skb; u32 buf_addr; unsigned long flags; int status; while (1) { spin_lock_irqsave(&chain->lock, flags); if (chain->tail == chain->head) { spin_unlock_irqrestore(&chain->lock, flags); return 0; } descr = chain->tail; hwdescr = descr->hwdescr; status = spider_net_get_descr_status(hwdescr); switch (status) { case SPIDER_NET_DESCR_COMPLETE: dev->stats.tx_packets++; dev->stats.tx_bytes += descr->skb->len; break; case SPIDER_NET_DESCR_CARDOWNED: if (!brutal) { spin_unlock_irqrestore(&chain->lock, flags); return 1; } /* fallthrough, if we release the descriptors * brutally (then we don't care about * SPIDER_NET_DESCR_CARDOWNED) */ case SPIDER_NET_DESCR_RESPONSE_ERROR: case SPIDER_NET_DESCR_PROTECTION_ERROR: case SPIDER_NET_DESCR_FORCE_END: if (netif_msg_tx_err(card)) dev_err(&card->netdev->dev, "forcing end of tx descriptor " "with status x%02x\n", status); dev->stats.tx_errors++; break; default: dev->stats.tx_dropped++; if (!brutal) { spin_unlock_irqrestore(&chain->lock, flags); return 1; } } chain->tail = descr->next; hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE; skb = descr->skb; descr->skb = NULL; buf_addr = hwdescr->buf_addr; spin_unlock_irqrestore(&chain->lock, flags); /* unmap the skb */ if (skb) { pci_unmap_single(card->pdev, buf_addr, skb->len, PCI_DMA_TODEVICE); dev_kfree_skb(skb); } } return 0; } /** * spider_net_kick_tx_dma - enables TX DMA processing * @card: card structure * * This routine will start the transmit DMA running if * it is not already running. This routine ned only be * called when queueing a new packet to an empty tx queue. * Writes the current tx chain head as start address * of the tx descriptor chain and enables the transmission * DMA engine. */ static inline void spider_net_kick_tx_dma(struct spider_net_card *card) { struct spider_net_descr *descr; if (spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR) & SPIDER_NET_TX_DMA_EN) goto out; descr = card->tx_chain.tail; for (;;) { if (spider_net_get_descr_status(descr->hwdescr) == SPIDER_NET_DESCR_CARDOWNED) { spider_net_write_reg(card, SPIDER_NET_GDTDCHA, descr->bus_addr); spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, SPIDER_NET_DMA_TX_VALUE); break; } if (descr == card->tx_chain.head) break; descr = descr->next; } out: mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER); } /** * spider_net_xmit - transmits a frame over the device * @skb: packet to send out * @netdev: interface device structure * * returns 0 on success, !0 on failure */ static int spider_net_xmit(struct sk_buff *skb, struct net_device *netdev) { int cnt; struct spider_net_card *card = netdev_priv(netdev); spider_net_release_tx_chain(card, 0); if (spider_net_prepare_tx_descr(card, skb) != 0) { netdev->stats.tx_dropped++; netif_stop_queue(netdev); return NETDEV_TX_BUSY; } cnt = spider_net_set_low_watermark(card); if (cnt < 5) spider_net_kick_tx_dma(card); return NETDEV_TX_OK; } /** * spider_net_cleanup_tx_ring - cleans up the TX ring * @card: card structure * * spider_net_cleanup_tx_ring is called by either the tx_timer * or from the NAPI polling routine. * This routine releases resources associted with transmitted * packets, including updating the queue tail pointer. */ static void spider_net_cleanup_tx_ring(struct spider_net_card *card) { if ((spider_net_release_tx_chain(card, 0) != 0) && (card->netdev->flags & IFF_UP)) { spider_net_kick_tx_dma(card); netif_wake_queue(card->netdev); } } /** * spider_net_do_ioctl - called for device ioctls * @netdev: interface device structure * @ifr: request parameter structure for ioctl * @cmd: command code for ioctl * * returns 0 on success, <0 on failure. Currently, we have no special ioctls. * -EOPNOTSUPP is returned, if an unknown ioctl was requested */ static int spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { switch (cmd) { default: return -EOPNOTSUPP; } } /** * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on * @descr: descriptor to process * @card: card structure * * Fills out skb structure and passes the data to the stack. * The descriptor state is not changed. */ static void spider_net_pass_skb_up(struct spider_net_descr *descr, struct spider_net_card *card) { struct spider_net_hw_descr *hwdescr = descr->hwdescr; struct sk_buff *skb = descr->skb; struct net_device *netdev = card->netdev; u32 data_status = hwdescr->data_status; u32 data_error = hwdescr->data_error; skb_put(skb, hwdescr->valid_size); /* the card seems to add 2 bytes of junk in front * of the ethernet frame */ #define SPIDER_MISALIGN 2 skb_pull(skb, SPIDER_MISALIGN); skb->protocol = eth_type_trans(skb, netdev); /* checksum offload */ skb_checksum_none_assert(skb); if (netdev->features & NETIF_F_RXCSUM) { if ( ( (data_status & SPIDER_NET_DATA_STATUS_CKSUM_MASK) == SPIDER_NET_DATA_STATUS_CKSUM_MASK) && !(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK)) skb->ip_summed = CHECKSUM_UNNECESSARY; } if (data_status & SPIDER_NET_VLAN_PACKET) { /* further enhancements: HW-accel VLAN */ } /* update netdevice statistics */ netdev->stats.rx_packets++; netdev->stats.rx_bytes += skb->len; /* pass skb up to stack */ netif_receive_skb(skb); } static void show_rx_chain(struct spider_net_card *card) { struct spider_net_descr_chain *chain = &card->rx_chain; struct spider_net_descr *start= chain->tail; struct spider_net_descr *descr= start; struct spider_net_hw_descr *hwd = start->hwdescr; struct device *dev = &card->netdev->dev; u32 curr_desc, next_desc; int status; int tot = 0; int cnt = 0; int off = start - chain->ring; int cstat = hwd->dmac_cmd_status; dev_info(dev, "Total number of descrs=%d\n", chain->num_desc); dev_info(dev, "Chain tail located at descr=%d, status=0x%x\n", off, cstat); curr_desc = spider_net_read_reg(card, SPIDER_NET_GDACTDPA); next_desc = spider_net_read_reg(card, SPIDER_NET_GDACNEXTDA); status = cstat; do { hwd = descr->hwdescr; off = descr - chain->ring; status = hwd->dmac_cmd_status; if (descr == chain->head) dev_info(dev, "Chain head is at %d, head status=0x%x\n", off, status); if (curr_desc == descr->bus_addr) dev_info(dev, "HW curr desc (GDACTDPA) is at %d, status=0x%x\n", off, status); if (next_desc == descr->bus_addr) dev_info(dev, "HW next desc (GDACNEXTDA) is at %d, status=0x%x\n", off, status); if (hwd->next_descr_addr == 0) dev_info(dev, "chain is cut at %d\n", off); if (cstat != status) { int from = (chain->num_desc + off - cnt) % chain->num_desc; int to = (chain->num_desc + off - 1) % chain->num_desc; dev_info(dev, "Have %d (from %d to %d) descrs " "with stat=0x%08x\n", cnt, from, to, cstat); cstat = status; cnt = 0; } cnt ++; tot ++; descr = descr->next; } while (descr != start); dev_info(dev, "Last %d descrs with stat=0x%08x " "for a total of %d descrs\n", cnt, cstat, tot); #ifdef DEBUG /* Now dump the whole ring */ descr = start; do { struct spider_net_hw_descr *hwd = descr->hwdescr; status = spider_net_get_descr_status(hwd); cnt = descr - chain->ring; dev_info(dev, "Descr %d stat=0x%08x skb=%p\n", cnt, status, descr->skb); dev_info(dev, "bus addr=%08x buf addr=%08x sz=%d\n", descr->bus_addr, hwd->buf_addr, hwd->buf_size); dev_info(dev, "next=%08x result sz=%d valid sz=%d\n", hwd->next_descr_addr, hwd->result_size, hwd->valid_size); dev_info(dev, "dmac=%08x data stat=%08x data err=%08x\n", hwd->dmac_cmd_status, hwd->data_status, hwd->data_error); dev_info(dev, "\n"); descr = descr->next; } while (descr != start); #endif } /** * spider_net_resync_head_ptr - Advance head ptr past empty descrs * * If the driver fails to keep up and empty the queue, then the * hardware wil run out of room to put incoming packets. This * will cause the hardware to skip descrs that are full (instead * of halting/retrying). Thus, once the driver runs, it wil need * to "catch up" to where the hardware chain pointer is at. */ static void spider_net_resync_head_ptr(struct spider_net_card *card) { unsigned long flags; struct spider_net_descr_chain *chain = &card->rx_chain; struct spider_net_descr *descr; int i, status; /* Advance head pointer past any empty descrs */ descr = chain->head; status = spider_net_get_descr_status(descr->hwdescr); if (status == SPIDER_NET_DESCR_NOT_IN_USE) return; spin_lock_irqsave(&chain->lock, flags); descr = chain->head; status = spider_net_get_descr_status(descr->hwdescr); for (i=0; i<chain->num_desc; i++) { if (status != SPIDER_NET_DESCR_CARDOWNED) break; descr = descr->next; status = spider_net_get_descr_status(descr->hwdescr); } chain->head = descr; spin_unlock_irqrestore(&chain->lock, flags); } static int spider_net_resync_tail_ptr(struct spider_net_card *card) { struct spider_net_descr_chain *chain = &card->rx_chain; struct spider_net_descr *descr; int i, status; /* Advance tail pointer past any empty and reaped descrs */ descr = chain->tail; status = spider_net_get_descr_status(descr->hwdescr); for (i=0; i<chain->num_desc; i++) { if ((status != SPIDER_NET_DESCR_CARDOWNED) && (status != SPIDER_NET_DESCR_NOT_IN_USE)) break; descr = descr->next; status = spider_net_get_descr_status(descr->hwdescr); } chain->tail = descr; if ((i == chain->num_desc) || (i == 0)) return 1; return 0; } /** * spider_net_decode_one_descr - processes an RX descriptor * @card: card structure * * Returns 1 if a packet has been sent to the stack, otherwise 0. * * Processes an RX descriptor by iommu-unmapping the data buffer * and passing the packet up to the stack. This function is called * in softirq context, e.g. either bottom half from interrupt or * NAPI polling context. */ static int spider_net_decode_one_descr(struct spider_net_card *card) { struct net_device *dev = card->netdev; struct spider_net_descr_chain *chain = &card->rx_chain; struct spider_net_descr *descr = chain->tail; struct spider_net_hw_descr *hwdescr = descr->hwdescr; u32 hw_buf_addr; int status; status = spider_net_get_descr_status(hwdescr); /* Nothing in the descriptor, or ring must be empty */ if ((status == SPIDER_NET_DESCR_CARDOWNED) || (status == SPIDER_NET_DESCR_NOT_IN_USE)) return 0; /* descriptor definitively used -- move on tail */ chain->tail = descr->next; /* unmap descriptor */ hw_buf_addr = hwdescr->buf_addr; hwdescr->buf_addr = 0xffffffff; pci_unmap_single(card->pdev, hw_buf_addr, SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) || (status == SPIDER_NET_DESCR_PROTECTION_ERROR) || (status == SPIDER_NET_DESCR_FORCE_END) ) { if (netif_msg_rx_err(card)) dev_err(&dev->dev, "dropping RX descriptor with state %d\n", status); dev->stats.rx_dropped++; goto bad_desc; } if ( (status != SPIDER_NET_DESCR_COMPLETE) && (status != SPIDER_NET_DESCR_FRAME_END) ) { if (netif_msg_rx_err(card)) dev_err(&card->netdev->dev, "RX descriptor with unknown state %d\n", status); card->spider_stats.rx_desc_unk_state++; goto bad_desc; } /* The cases we'll throw away the packet immediately */ if (hwdescr->data_error & SPIDER_NET_DESTROY_RX_FLAGS) { if (netif_msg_rx_err(card)) dev_err(&card->netdev->dev, "error in received descriptor found, " "data_status=x%08x, data_error=x%08x\n", hwdescr->data_status, hwdescr->data_error); goto bad_desc; } if (hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_BAD_STATUS) { dev_err(&card->netdev->dev, "bad status, cmd_status=x%08x\n", hwdescr->dmac_cmd_status); pr_err("buf_addr=x%08x\n", hw_buf_addr); pr_err("buf_size=x%08x\n", hwdescr->buf_size); pr_err("next_descr_addr=x%08x\n", hwdescr->next_descr_addr); pr_err("result_size=x%08x\n", hwdescr->result_size); pr_err("valid_size=x%08x\n", hwdescr->valid_size); pr_err("data_status=x%08x\n", hwdescr->data_status); pr_err("data_error=x%08x\n", hwdescr->data_error); pr_err("which=%ld\n", descr - card->rx_chain.ring); card->spider_stats.rx_desc_error++; goto bad_desc; } /* Ok, we've got a packet in descr */ spider_net_pass_skb_up(descr, card); descr->skb = NULL; hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; return 1; bad_desc: if (netif_msg_rx_err(card)) show_rx_chain(card); dev_kfree_skb_irq(descr->skb); descr->skb = NULL; hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; return 0; } /** * spider_net_poll - NAPI poll function called by the stack to return packets * @netdev: interface device structure * @budget: number of packets we can pass to the stack at most * * returns 0 if no more packets available to the driver/stack. Returns 1, * if the quota is exceeded, but the driver has still packets. * * spider_net_poll returns all packets from the rx descriptors to the stack * (using netif_receive_skb). If all/enough packets are up, the driver * reenables interrupts and returns 0. If not, 1 is returned. */ static int spider_net_poll(struct napi_struct *napi, int budget) { struct spider_net_card *card = container_of(napi, struct spider_net_card, napi); int packets_done = 0; while (packets_done < budget) { if (!spider_net_decode_one_descr(card)) break; packets_done++; } if ((packets_done == 0) && (card->num_rx_ints != 0)) { if (!spider_net_resync_tail_ptr(card)) packets_done = budget; spider_net_resync_head_ptr(card); } card->num_rx_ints = 0; spider_net_refill_rx_chain(card); spider_net_enable_rxdmac(card); spider_net_cleanup_tx_ring(card); /* if all packets are in the stack, enable interrupts and return 0 */ /* if not, return 1 */ if (packets_done < budget) { napi_complete(napi); spider_net_rx_irq_on(card); card->ignore_rx_ramfull = 0; } return packets_done; } /** * spider_net_change_mtu - changes the MTU of an interface * @netdev: interface device structure * @new_mtu: new MTU value * * returns 0 on success, <0 on failure */ static int spider_net_change_mtu(struct net_device *netdev, int new_mtu) { /* no need to re-alloc skbs or so -- the max mtu is about 2.3k * and mtu is outbound only anyway */ if ( (new_mtu < SPIDER_NET_MIN_MTU ) || (new_mtu > SPIDER_NET_MAX_MTU) ) return -EINVAL; netdev->mtu = new_mtu; return 0; } /** * spider_net_set_mac - sets the MAC of an interface * @netdev: interface device structure * @ptr: pointer to new MAC address * * Returns 0 on success, <0 on failure. Currently, we don't support this * and will always return EOPNOTSUPP. */ static int spider_net_set_mac(struct net_device *netdev, void *p) { struct spider_net_card *card = netdev_priv(netdev); u32 macl, macu, regvalue; struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; /* switch off GMACTPE and GMACRPE */ regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD); regvalue &= ~((1 << 5) | (1 << 6)); spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue); /* write mac */ macu = (addr->sa_data[0]<<24) + (addr->sa_data[1]<<16) + (addr->sa_data[2]<<8) + (addr->sa_data[3]); macl = (addr->sa_data[4]<<8) + (addr->sa_data[5]); spider_net_write_reg(card, SPIDER_NET_GMACUNIMACU, macu); spider_net_write_reg(card, SPIDER_NET_GMACUNIMACL, macl); /* switch GMACTPE and GMACRPE back on */ regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD); regvalue |= ((1 << 5) | (1 << 6)); spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue); spider_net_set_promisc(card); /* look up, whether we have been successful */ if (spider_net_get_mac_address(netdev)) return -EADDRNOTAVAIL; if (memcmp(netdev->dev_addr,addr->sa_data,netdev->addr_len)) return -EADDRNOTAVAIL; return 0; } /** * spider_net_link_reset * @netdev: net device structure * * This is called when the PHY_LINK signal is asserted. For the blade this is * not connected so we should never get here. * */ static void spider_net_link_reset(struct net_device *netdev) { struct spider_net_card *card = netdev_priv(netdev); del_timer_sync(&card->aneg_timer); /* clear interrupt, block further interrupts */ spider_net_write_reg(card, SPIDER_NET_GMACST, spider_net_read_reg(card, SPIDER_NET_GMACST)); spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0); /* reset phy and setup aneg */ card->aneg_count = 0; card->medium = BCM54XX_COPPER; spider_net_setup_aneg(card); mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER); } /** * spider_net_handle_error_irq - handles errors raised by an interrupt * @card: card structure * @status_reg: interrupt status register 0 (GHIINT0STS) * * spider_net_handle_error_irq treats or ignores all error conditions * found when an interrupt is presented */ static void spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg, u32 error_reg1, u32 error_reg2) { u32 i; int show_error = 1; /* check GHIINT0STS ************************************/ if (status_reg) for (i = 0; i < 32; i++) if (status_reg & (1<<i)) switch (i) { /* let error_reg1 and error_reg2 evaluation decide, what to do case SPIDER_NET_PHYINT: case SPIDER_NET_GMAC2INT: case SPIDER_NET_GMAC1INT: case SPIDER_NET_GFIFOINT: case SPIDER_NET_DMACINT: case SPIDER_NET_GSYSINT: break; */ case SPIDER_NET_GIPSINT: show_error = 0; break; case SPIDER_NET_GPWOPCMPINT: /* PHY write operation completed */ show_error = 0; break; case SPIDER_NET_GPROPCMPINT: /* PHY read operation completed */ /* we don't use semaphores, as we poll for the completion * of the read operation in spider_net_read_phy. Should take * about 50 us */ show_error = 0; break; case SPIDER_NET_GPWFFINT: /* PHY command queue full */ if (netif_msg_intr(card)) dev_err(&card->netdev->dev, "PHY write queue full\n"); show_error = 0; break; /* case SPIDER_NET_GRMDADRINT: not used. print a message */ /* case SPIDER_NET_GRMARPINT: not used. print a message */ /* case SPIDER_NET_GRMMPINT: not used. print a message */ case SPIDER_NET_GDTDEN0INT: /* someone has set TX_DMA_EN to 0 */ show_error = 0; break; case SPIDER_NET_GDDDEN0INT: /* fallthrough */ case SPIDER_NET_GDCDEN0INT: /* fallthrough */ case SPIDER_NET_GDBDEN0INT: /* fallthrough */ case SPIDER_NET_GDADEN0INT: /* someone has set RX_DMA_EN to 0 */ show_error = 0; break; /* RX interrupts */ case SPIDER_NET_GDDFDCINT: case SPIDER_NET_GDCFDCINT: case SPIDER_NET_GDBFDCINT: case SPIDER_NET_GDAFDCINT: /* case SPIDER_NET_GDNMINT: not used. print a message */ /* case SPIDER_NET_GCNMINT: not used. print a message */ /* case SPIDER_NET_GBNMINT: not used. print a message */ /* case SPIDER_NET_GANMINT: not used. print a message */ /* case SPIDER_NET_GRFNMINT: not used. print a message */ show_error = 0; break; /* TX interrupts */ case SPIDER_NET_GDTFDCINT: show_error = 0; break; case SPIDER_NET_GTTEDINT: show_error = 0; break; case SPIDER_NET_GDTDCEINT: /* chain end. If a descriptor should be sent, kick off * tx dma if (card->tx_chain.tail != card->tx_chain.head) spider_net_kick_tx_dma(card); */ show_error = 0; break; /* case SPIDER_NET_G1TMCNTINT: not used. print a message */ /* case SPIDER_NET_GFREECNTINT: not used. print a message */ } /* check GHIINT1STS ************************************/ if (error_reg1) for (i = 0; i < 32; i++) if (error_reg1 & (1<<i)) switch (i) { case SPIDER_NET_GTMFLLINT: /* TX RAM full may happen on a usual case. * Logging is not needed. */ show_error = 0; break; case SPIDER_NET_GRFDFLLINT: /* fallthrough */ case SPIDER_NET_GRFCFLLINT: /* fallthrough */ case SPIDER_NET_GRFBFLLINT: /* fallthrough */ case SPIDER_NET_GRFAFLLINT: /* fallthrough */ case SPIDER_NET_GRMFLLINT: /* Could happen when rx chain is full */ if (card->ignore_rx_ramfull == 0) { card->ignore_rx_ramfull = 1; spider_net_resync_head_ptr(card); spider_net_refill_rx_chain(card); spider_net_enable_rxdmac(card); card->num_rx_ints ++; napi_schedule(&card->napi); } show_error = 0; break; /* case SPIDER_NET_GTMSHTINT: problem, print a message */ case SPIDER_NET_GDTINVDINT: /* allrighty. tx from previous descr ok */ show_error = 0; break; /* chain end */ case SPIDER_NET_GDDDCEINT: /* fallthrough */ case SPIDER_NET_GDCDCEINT: /* fallthrough */ case SPIDER_NET_GDBDCEINT: /* fallthrough */ case SPIDER_NET_GDADCEINT: spider_net_resync_head_ptr(card); spider_net_refill_rx_chain(card); spider_net_enable_rxdmac(card); card->num_rx_ints ++; napi_schedule(&card->napi); show_error = 0; break; /* invalid descriptor */ case SPIDER_NET_GDDINVDINT: /* fallthrough */ case SPIDER_NET_GDCINVDINT: /* fallthrough */ case SPIDER_NET_GDBINVDINT: /* fallthrough */ case SPIDER_NET_GDAINVDINT: /* Could happen when rx chain is full */ spider_net_resync_head_ptr(card); spider_net_refill_rx_chain(card); spider_net_enable_rxdmac(card); card->num_rx_ints ++; napi_schedule(&card->napi); show_error = 0; break; /* case SPIDER_NET_GDTRSERINT: problem, print a message */ /* case SPIDER_NET_GDDRSERINT: problem, print a message */ /* case SPIDER_NET_GDCRSERINT: problem, print a message */ /* case SPIDER_NET_GDBRSERINT: problem, print a message */ /* case SPIDER_NET_GDARSERINT: problem, print a message */ /* case SPIDER_NET_GDSERINT: problem, print a message */ /* case SPIDER_NET_GDTPTERINT: problem, print a message */ /* case SPIDER_NET_GDDPTERINT: problem, print a message */ /* case SPIDER_NET_GDCPTERINT: problem, print a message */ /* case SPIDER_NET_GDBPTERINT: problem, print a message */ /* case SPIDER_NET_GDAPTERINT: problem, print a message */ default: show_error = 1; break; } /* check GHIINT2STS ************************************/ if (error_reg2) for (i = 0; i < 32; i++) if (error_reg2 & (1<<i)) switch (i) { /* there is nothing we can (want to) do at this time. Log a * message, we can switch on and off the specific values later on case SPIDER_NET_GPROPERINT: case SPIDER_NET_GMCTCRSNGINT: case SPIDER_NET_GMCTLCOLINT: case SPIDER_NET_GMCTTMOTINT: case SPIDER_NET_GMCRCAERINT: case SPIDER_NET_GMCRCALERINT: case SPIDER_NET_GMCRALNERINT: case SPIDER_NET_GMCROVRINT: case SPIDER_NET_GMCRRNTINT: case SPIDER_NET_GMCRRXERINT: case SPIDER_NET_GTITCSERINT: case SPIDER_NET_GTIFMTERINT: case SPIDER_NET_GTIPKTRVKINT: case SPIDER_NET_GTISPINGINT: case SPIDER_NET_GTISADNGINT: case SPIDER_NET_GTISPDNGINT: case SPIDER_NET_GRIFMTERINT: case SPIDER_NET_GRIPKTRVKINT: case SPIDER_NET_GRISPINGINT: case SPIDER_NET_GRISADNGINT: case SPIDER_NET_GRISPDNGINT: break; */ default: break; } if ((show_error) && (netif_msg_intr(card)) && net_ratelimit()) dev_err(&card->netdev->dev, "Error interrupt, GHIINT0STS = 0x%08x, " "GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n", status_reg, error_reg1, error_reg2); /* clear interrupt sources */ spider_net_write_reg(card, SPIDER_NET_GHIINT1STS, error_reg1); spider_net_write_reg(card, SPIDER_NET_GHIINT2STS, error_reg2); } /** * spider_net_interrupt - interrupt handler for spider_net * @irq: interrupt number * @ptr: pointer to net_device * * returns IRQ_HANDLED, if interrupt was for driver, or IRQ_NONE, if no * interrupt found raised by card. * * This is the interrupt handler, that turns off * interrupts for this device and makes the stack poll the driver */ static irqreturn_t spider_net_interrupt(int irq, void *ptr) { struct net_device *netdev = ptr; struct spider_net_card *card = netdev_priv(netdev); u32 status_reg, error_reg1, error_reg2; status_reg = spider_net_read_reg(card, SPIDER_NET_GHIINT0STS); error_reg1 = spider_net_read_reg(card, SPIDER_NET_GHIINT1STS); error_reg2 = spider_net_read_reg(card, SPIDER_NET_GHIINT2STS); if (!(status_reg & SPIDER_NET_INT0_MASK_VALUE) && !(error_reg1 & SPIDER_NET_INT1_MASK_VALUE) && !(error_reg2 & SPIDER_NET_INT2_MASK_VALUE)) return IRQ_NONE; if (status_reg & SPIDER_NET_RXINT ) { spider_net_rx_irq_off(card); napi_schedule(&card->napi); card->num_rx_ints ++; } if (status_reg & SPIDER_NET_TXINT) napi_schedule(&card->napi); if (status_reg & SPIDER_NET_LINKINT) spider_net_link_reset(netdev); if (status_reg & SPIDER_NET_ERRINT ) spider_net_handle_error_irq(card, status_reg, error_reg1, error_reg2); /* clear interrupt sources */ spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg); return IRQ_HANDLED; } #ifdef CONFIG_NET_POLL_CONTROLLER /** * spider_net_poll_controller - artificial interrupt for netconsole etc. * @netdev: interface device structure * * see Documentation/networking/netconsole.txt */ static void spider_net_poll_controller(struct net_device *netdev) { disable_irq(netdev->irq); spider_net_interrupt(netdev->irq, netdev); enable_irq(netdev->irq); } #endif /* CONFIG_NET_POLL_CONTROLLER */ /** * spider_net_enable_interrupts - enable interrupts * @card: card structure * * spider_net_enable_interrupt enables several interrupts */ static void spider_net_enable_interrupts(struct spider_net_card *card) { spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, SPIDER_NET_INT0_MASK_VALUE); spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, SPIDER_NET_INT1_MASK_VALUE); spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, SPIDER_NET_INT2_MASK_VALUE); } /** * spider_net_disable_interrupts - disable interrupts * @card: card structure * * spider_net_disable_interrupts disables all the interrupts */ static void spider_net_disable_interrupts(struct spider_net_card *card) { spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0); spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0); spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0); spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0); } /** * spider_net_init_card - initializes the card * @card: card structure * * spider_net_init_card initializes the card so that other registers can * be used */ static void spider_net_init_card(struct spider_net_card *card) { spider_net_write_reg(card, SPIDER_NET_CKRCTRL, SPIDER_NET_CKRCTRL_STOP_VALUE); spider_net_write_reg(card, SPIDER_NET_CKRCTRL, SPIDER_NET_CKRCTRL_RUN_VALUE); /* trigger ETOMOD signal */ spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, spider_net_read_reg(card, SPIDER_NET_GMACOPEMD) | 0x4); spider_net_disable_interrupts(card); } /** * spider_net_enable_card - enables the card by setting all kinds of regs * @card: card structure * * spider_net_enable_card sets a lot of SMMIO registers to enable the device */ static void spider_net_enable_card(struct spider_net_card *card) { int i; /* the following array consists of (register),(value) pairs * that are set in this function. A register of 0 ends the list */ u32 regs[][2] = { { SPIDER_NET_GRESUMINTNUM, 0 }, { SPIDER_NET_GREINTNUM, 0 }, /* set interrupt frame number registers */ /* clear the single DMA engine registers first */ { SPIDER_NET_GFAFRMNUM, SPIDER_NET_GFXFRAMES_VALUE }, { SPIDER_NET_GFBFRMNUM, SPIDER_NET_GFXFRAMES_VALUE }, { SPIDER_NET_GFCFRMNUM, SPIDER_NET_GFXFRAMES_VALUE }, { SPIDER_NET_GFDFRMNUM, SPIDER_NET_GFXFRAMES_VALUE }, /* then set, what we really need */ { SPIDER_NET_GFFRMNUM, SPIDER_NET_FRAMENUM_VALUE }, /* timer counter registers and stuff */ { SPIDER_NET_GFREECNNUM, 0 }, { SPIDER_NET_GONETIMENUM, 0 }, { SPIDER_NET_GTOUTFRMNUM, 0 }, /* RX mode setting */ { SPIDER_NET_GRXMDSET, SPIDER_NET_RXMODE_VALUE }, /* TX mode setting */ { SPIDER_NET_GTXMDSET, SPIDER_NET_TXMODE_VALUE }, /* IPSEC mode setting */ { SPIDER_NET_GIPSECINIT, SPIDER_NET_IPSECINIT_VALUE }, { SPIDER_NET_GFTRESTRT, SPIDER_NET_RESTART_VALUE }, { SPIDER_NET_GMRWOLCTRL, 0 }, { SPIDER_NET_GTESTMD, 0x10000000 }, { SPIDER_NET_GTTQMSK, 0x00400040 }, { SPIDER_NET_GMACINTEN, 0 }, /* flow control stuff */ { SPIDER_NET_GMACAPAUSE, SPIDER_NET_MACAPAUSE_VALUE }, { SPIDER_NET_GMACTXPAUSE, SPIDER_NET_TXPAUSE_VALUE }, { SPIDER_NET_GMACBSTLMT, SPIDER_NET_BURSTLMT_VALUE }, { 0, 0} }; i = 0; while (regs[i][0]) { spider_net_write_reg(card, regs[i][0], regs[i][1]); i++; } /* clear unicast filter table entries 1 to 14 */ for (i = 1; i <= 14; i++) { spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + i * 8, 0x00080000); spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + i * 8 + 4, 0x00000000); } spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R, 0x08080000); spider_net_write_reg(card, SPIDER_NET_ECMODE, SPIDER_NET_ECMODE_VALUE); /* set chain tail address for RX chains and * enable DMA */ spider_net_enable_rxchtails(card); spider_net_enable_rxdmac(card); spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE); spider_net_write_reg(card, SPIDER_NET_GMACLENLMT, SPIDER_NET_LENLMT_VALUE); spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, SPIDER_NET_OPMODE_VALUE); spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, SPIDER_NET_GDTBSTA); } /** * spider_net_download_firmware - loads firmware into the adapter * @card: card structure * @firmware_ptr: pointer to firmware data * * spider_net_download_firmware loads the firmware data into the * adapter. It assumes the length etc. to be allright. */ static int spider_net_download_firmware(struct spider_net_card *card, const void *firmware_ptr) { int sequencer, i; const u32 *fw_ptr = firmware_ptr; /* stop sequencers */ spider_net_write_reg(card, SPIDER_NET_GSINIT, SPIDER_NET_STOP_SEQ_VALUE); for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS; sequencer++) { spider_net_write_reg(card, SPIDER_NET_GSnPRGADR + sequencer * 8, 0); for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) { spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + sequencer * 8, *fw_ptr); fw_ptr++; } } if (spider_net_read_reg(card, SPIDER_NET_GSINIT)) return -EIO; spider_net_write_reg(card, SPIDER_NET_GSINIT, SPIDER_NET_RUN_SEQ_VALUE); return 0; } /** * spider_net_init_firmware - reads in firmware parts * @card: card structure * * Returns 0 on success, <0 on failure * * spider_net_init_firmware opens the sequencer firmware and does some basic * checks. This function opens and releases the firmware structure. A call * to download the firmware is performed before the release. * * Firmware format * =============== * spider_fw.bin is expected to be a file containing 6*1024*4 bytes, 4k being * the program for each sequencer. Use the command * tail -q -n +2 Seq_code1_0x088.txt Seq_code2_0x090.txt \ * Seq_code3_0x098.txt Seq_code4_0x0A0.txt Seq_code5_0x0A8.txt \ * Seq_code6_0x0B0.txt | xxd -r -p -c4 > spider_fw.bin * * to generate spider_fw.bin, if you have sequencer programs with something * like the following contents for each sequencer: * <ONE LINE COMMENT> * <FIRST 4-BYTES-WORD FOR SEQUENCER> * <SECOND 4-BYTES-WORD FOR SEQUENCER> * ... * <1024th 4-BYTES-WORD FOR SEQUENCER> */ static int spider_net_init_firmware(struct spider_net_card *card) { struct firmware *firmware = NULL; struct device_node *dn; const u8 *fw_prop = NULL; int err = -ENOENT; int fw_size; if (request_firmware((const struct firmware **)&firmware, SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) == 0) { if ( (firmware->size != SPIDER_NET_FIRMWARE_LEN) && netif_msg_probe(card) ) { dev_err(&card->netdev->dev, "Incorrect size of spidernet firmware in " \ "filesystem. Looking in host firmware...\n"); goto try_host_fw; } err = spider_net_download_firmware(card, firmware->data); release_firmware(firmware); if (err) goto try_host_fw; goto done; } try_host_fw: dn = pci_device_to_OF_node(card->pdev); if (!dn) goto out_err; fw_prop = of_get_property(dn, "firmware", &fw_size); if (!fw_prop) goto out_err; if ( (fw_size != SPIDER_NET_FIRMWARE_LEN) && netif_msg_probe(card) ) { dev_err(&card->netdev->dev, "Incorrect size of spidernet firmware in host firmware\n"); goto done; } err = spider_net_download_firmware(card, fw_prop); done: return err; out_err: if (netif_msg_probe(card)) dev_err(&card->netdev->dev, "Couldn't find spidernet firmware in filesystem " \ "or host firmware\n"); return err; } /** * spider_net_open - called upon ifonfig up * @netdev: interface device structure * * returns 0 on success, <0 on failure * * spider_net_open allocates all the descriptors and memory needed for * operation, sets up multicast list and enables interrupts */ int spider_net_open(struct net_device *netdev) { struct spider_net_card *card = netdev_priv(netdev); int result; result = spider_net_init_firmware(card); if (result) goto init_firmware_failed; /* start probing with copper */ card->aneg_count = 0; card->medium = BCM54XX_COPPER; spider_net_setup_aneg(card); if (card->phy.def->phy_id) mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER); result = spider_net_init_chain(card, &card->tx_chain); if (result) goto alloc_tx_failed; card->low_watermark = NULL; result = spider_net_init_chain(card, &card->rx_chain); if (result) goto alloc_rx_failed; /* Allocate rx skbs */ result = spider_net_alloc_rx_skbs(card); if (result) goto alloc_skbs_failed; spider_net_set_multi(netdev); /* further enhancement: setup hw vlan, if needed */ result = -EBUSY; if (request_irq(netdev->irq, spider_net_interrupt, IRQF_SHARED, netdev->name, netdev)) goto register_int_failed; spider_net_enable_card(card); netif_start_queue(netdev); netif_carrier_on(netdev); napi_enable(&card->napi); spider_net_enable_interrupts(card); return 0; register_int_failed: spider_net_free_rx_chain_contents(card); alloc_skbs_failed: spider_net_free_chain(card, &card->rx_chain); alloc_rx_failed: spider_net_free_chain(card, &card->tx_chain); alloc_tx_failed: del_timer_sync(&card->aneg_timer); init_firmware_failed: return result; } /** * spider_net_link_phy * @data: used for pointer to card structure * */ static void spider_net_link_phy(unsigned long data) { struct spider_net_card *card = (struct spider_net_card *)data; struct mii_phy *phy = &card->phy; /* if link didn't come up after SPIDER_NET_ANEG_TIMEOUT tries, setup phy again */ if (card->aneg_count > SPIDER_NET_ANEG_TIMEOUT) { pr_debug("%s: link is down trying to bring it up\n", card->netdev->name); switch (card->medium) { case BCM54XX_COPPER: /* enable fiber with autonegotiation first */ if (phy->def->ops->enable_fiber) phy->def->ops->enable_fiber(phy, 1); card->medium = BCM54XX_FIBER; break; case BCM54XX_FIBER: /* fiber didn't come up, try to disable fiber autoneg */ if (phy->def->ops->enable_fiber) phy->def->ops->enable_fiber(phy, 0); card->medium = BCM54XX_UNKNOWN; break; case BCM54XX_UNKNOWN: /* copper, fiber with and without failed, * retry from beginning */ spider_net_setup_aneg(card); card->medium = BCM54XX_COPPER; break; } card->aneg_count = 0; mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER); return; } /* link still not up, try again later */ if (!(phy->def->ops->poll_link(phy))) { card->aneg_count++; mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER); return; } /* link came up, get abilities */ phy->def->ops->read_link(phy); spider_net_write_reg(card, SPIDER_NET_GMACST, spider_net_read_reg(card, SPIDER_NET_GMACST)); spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0x4); if (phy->speed == 1000) spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0x00000001); else spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0); card->aneg_count = 0; pr_info("%s: link up, %i Mbps, %s-duplex %sautoneg.\n", card->netdev->name, phy->speed, phy->duplex == 1 ? "Full" : "Half", phy->autoneg == 1 ? "" : "no "); } /** * spider_net_setup_phy - setup PHY * @card: card structure * * returns 0 on success, <0 on failure * * spider_net_setup_phy is used as part of spider_net_probe. **/ static int spider_net_setup_phy(struct spider_net_card *card) { struct mii_phy *phy = &card->phy; spider_net_write_reg(card, SPIDER_NET_GDTDMASEL, SPIDER_NET_DMASEL_VALUE); spider_net_write_reg(card, SPIDER_NET_GPCCTRL, SPIDER_NET_PHY_CTRL_VALUE); phy->dev = card->netdev; phy->mdio_read = spider_net_read_phy; phy->mdio_write = spider_net_write_phy; for (phy->mii_id = 1; phy->mii_id <= 31; phy->mii_id++) { unsigned short id; id = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR); if (id != 0x0000 && id != 0xffff) { if (!sungem_phy_probe(phy, phy->mii_id)) { pr_info("Found %s.\n", phy->def->name); break; } } } return 0; } /** * spider_net_workaround_rxramfull - work around firmware bug * @card: card structure * * no return value **/ static void spider_net_workaround_rxramfull(struct spider_net_card *card) { int i, sequencer = 0; /* cancel reset */ spider_net_write_reg(card, SPIDER_NET_CKRCTRL, SPIDER_NET_CKRCTRL_RUN_VALUE); /* empty sequencer data */ for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS; sequencer++) { spider_net_write_reg(card, SPIDER_NET_GSnPRGADR + sequencer * 8, 0x0); for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) { spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + sequencer * 8, 0x0); } } /* set sequencer operation */ spider_net_write_reg(card, SPIDER_NET_GSINIT, 0x000000fe); /* reset */ spider_net_write_reg(card, SPIDER_NET_CKRCTRL, SPIDER_NET_CKRCTRL_STOP_VALUE); } /** * spider_net_stop - called upon ifconfig down * @netdev: interface device structure * * always returns 0 */ int spider_net_stop(struct net_device *netdev) { struct spider_net_card *card = netdev_priv(netdev); napi_disable(&card->napi); netif_carrier_off(netdev); netif_stop_queue(netdev); del_timer_sync(&card->tx_timer); del_timer_sync(&card->aneg_timer); spider_net_disable_interrupts(card); free_irq(netdev->irq, netdev); spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, SPIDER_NET_DMA_TX_FEND_VALUE); /* turn off DMA, force end */ spider_net_disable_rxdmac(card); /* release chains */ spider_net_release_tx_chain(card, 1); spider_net_free_rx_chain_contents(card); spider_net_free_chain(card, &card->tx_chain); spider_net_free_chain(card, &card->rx_chain); return 0; } /** * spider_net_tx_timeout_task - task scheduled by the watchdog timeout * function (to be called not under interrupt status) * @data: data, is interface device structure * * called as task when tx hangs, resets interface (if interface is up) */ static void spider_net_tx_timeout_task(struct work_struct *work) { struct spider_net_card *card = container_of(work, struct spider_net_card, tx_timeout_task); struct net_device *netdev = card->netdev; if (!(netdev->flags & IFF_UP)) goto out; netif_device_detach(netdev); spider_net_stop(netdev); spider_net_workaround_rxramfull(card); spider_net_init_card(card); if (spider_net_setup_phy(card)) goto out; spider_net_open(netdev); spider_net_kick_tx_dma(card); netif_device_attach(netdev); out: atomic_dec(&card->tx_timeout_task_counter); } /** * spider_net_tx_timeout - called when the tx timeout watchdog kicks in. * @netdev: interface device structure * * called, if tx hangs. Schedules a task that resets the interface */ static void spider_net_tx_timeout(struct net_device *netdev) { struct spider_net_card *card; card = netdev_priv(netdev); atomic_inc(&card->tx_timeout_task_counter); if (netdev->flags & IFF_UP) schedule_work(&card->tx_timeout_task); else atomic_dec(&card->tx_timeout_task_counter); card->spider_stats.tx_timeouts++; } static const struct net_device_ops spider_net_ops = { .ndo_open = spider_net_open, .ndo_stop = spider_net_stop, .ndo_start_xmit = spider_net_xmit, .ndo_set_rx_mode = spider_net_set_multi, .ndo_set_mac_address = spider_net_set_mac, .ndo_change_mtu = spider_net_change_mtu, .ndo_do_ioctl = spider_net_do_ioctl, .ndo_tx_timeout = spider_net_tx_timeout, .ndo_validate_addr = eth_validate_addr, /* HW VLAN */ #ifdef CONFIG_NET_POLL_CONTROLLER /* poll controller */ .ndo_poll_controller = spider_net_poll_controller, #endif /* CONFIG_NET_POLL_CONTROLLER */ }; /** * spider_net_setup_netdev_ops - initialization of net_device operations * @netdev: net_device structure * * fills out function pointers in the net_device structure */ static void spider_net_setup_netdev_ops(struct net_device *netdev) { netdev->netdev_ops = &spider_net_ops; netdev->watchdog_timeo = SPIDER_NET_WATCHDOG_TIMEOUT; /* ethtool ops */ netdev->ethtool_ops = &spider_net_ethtool_ops; } /** * spider_net_setup_netdev - initialization of net_device * @card: card structure * * Returns 0 on success or <0 on failure * * spider_net_setup_netdev initializes the net_device structure **/ static int spider_net_setup_netdev(struct spider_net_card *card) { int result; struct net_device *netdev = card->netdev; struct device_node *dn; struct sockaddr addr; const u8 *mac; SET_NETDEV_DEV(netdev, &card->pdev->dev); pci_set_drvdata(card->pdev, netdev); init_timer(&card->tx_timer); card->tx_timer.function = (void (*)(unsigned long)) spider_net_cleanup_tx_ring; card->tx_timer.data = (unsigned long) card; netdev->irq = card->pdev->irq; card->aneg_count = 0; init_timer(&card->aneg_timer); card->aneg_timer.function = spider_net_link_phy; card->aneg_timer.data = (unsigned long) card; netif_napi_add(netdev, &card->napi, spider_net_poll, SPIDER_NET_NAPI_WEIGHT); spider_net_setup_netdev_ops(netdev); netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM; if (SPIDER_NET_RX_CSUM_DEFAULT) netdev->features |= NETIF_F_RXCSUM; netdev->features |= NETIF_F_IP_CSUM | NETIF_F_LLTX; /* some time: NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | * NETIF_F_HW_VLAN_CTAG_FILTER */ netdev->irq = card->pdev->irq; card->num_rx_ints = 0; card->ignore_rx_ramfull = 0; dn = pci_device_to_OF_node(card->pdev); if (!dn) return -EIO; mac = of_get_property(dn, "local-mac-address", NULL); if (!mac) return -EIO; memcpy(addr.sa_data, mac, ETH_ALEN); result = spider_net_set_mac(netdev, &addr); if ((result) && (netif_msg_probe(card))) dev_err(&card->netdev->dev, "Failed to set MAC address: %i\n", result); result = register_netdev(netdev); if (result) { if (netif_msg_probe(card)) dev_err(&card->netdev->dev, "Couldn't register net_device: %i\n", result); return result; } if (netif_msg_probe(card)) pr_info("Initialized device %s.\n", netdev->name); return 0; } /** * spider_net_alloc_card - allocates net_device and card structure * * returns the card structure or NULL in case of errors * * the card and net_device structures are linked to each other */ static struct spider_net_card * spider_net_alloc_card(void) { struct net_device *netdev; struct spider_net_card *card; size_t alloc_size; alloc_size = sizeof(struct spider_net_card) + (tx_descriptors + rx_descriptors) * sizeof(struct spider_net_descr); netdev = alloc_etherdev(alloc_size); if (!netdev) return NULL; card = netdev_priv(netdev); card->netdev = netdev; card->msg_enable = SPIDER_NET_DEFAULT_MSG; INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task); init_waitqueue_head(&card->waitq); atomic_set(&card->tx_timeout_task_counter, 0); card->rx_chain.num_desc = rx_descriptors; card->rx_chain.ring = card->darray; card->tx_chain.num_desc = tx_descriptors; card->tx_chain.ring = card->darray + rx_descriptors; return card; } /** * spider_net_undo_pci_setup - releases PCI ressources * @card: card structure * * spider_net_undo_pci_setup releases the mapped regions */ static void spider_net_undo_pci_setup(struct spider_net_card *card) { iounmap(card->regs); pci_release_regions(card->pdev); } /** * spider_net_setup_pci_dev - sets up the device in terms of PCI operations * @pdev: PCI device * * Returns the card structure or NULL if any errors occur * * spider_net_setup_pci_dev initializes pdev and together with the * functions called in spider_net_open configures the device so that * data can be transferred over it * The net_device structure is attached to the card structure, if the * function returns without error. **/ static struct spider_net_card * spider_net_setup_pci_dev(struct pci_dev *pdev) { struct spider_net_card *card; unsigned long mmio_start, mmio_len; if (pci_enable_device(pdev)) { dev_err(&pdev->dev, "Couldn't enable PCI device\n"); return NULL; } if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { dev_err(&pdev->dev, "Couldn't find proper PCI device base address.\n"); goto out_disable_dev; } if (pci_request_regions(pdev, spider_net_driver_name)) { dev_err(&pdev->dev, "Couldn't obtain PCI resources, aborting.\n"); goto out_disable_dev; } pci_set_master(pdev); card = spider_net_alloc_card(); if (!card) { dev_err(&pdev->dev, "Couldn't allocate net_device structure, aborting.\n"); goto out_release_regions; } card->pdev = pdev; /* fetch base address and length of first resource */ mmio_start = pci_resource_start(pdev, 0); mmio_len = pci_resource_len(pdev, 0); card->netdev->mem_start = mmio_start; card->netdev->mem_end = mmio_start + mmio_len; card->regs = ioremap(mmio_start, mmio_len); if (!card->regs) { dev_err(&pdev->dev, "Couldn't obtain PCI resources, aborting.\n"); goto out_release_regions; } return card; out_release_regions: pci_release_regions(pdev); out_disable_dev: pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); return NULL; } /** * spider_net_probe - initialization of a device * @pdev: PCI device * @ent: entry in the device id list * * Returns 0 on success, <0 on failure * * spider_net_probe initializes pdev and registers a net_device * structure for it. After that, the device can be ifconfig'ed up **/ static int spider_net_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { int err = -EIO; struct spider_net_card *card; card = spider_net_setup_pci_dev(pdev); if (!card) goto out; spider_net_workaround_rxramfull(card); spider_net_init_card(card); err = spider_net_setup_phy(card); if (err) goto out_undo_pci; err = spider_net_setup_netdev(card); if (err) goto out_undo_pci; return 0; out_undo_pci: spider_net_undo_pci_setup(card); free_netdev(card->netdev); out: return err; } /** * spider_net_remove - removal of a device * @pdev: PCI device * * Returns 0 on success, <0 on failure * * spider_net_remove is called to remove the device and unregisters the * net_device **/ static void spider_net_remove(struct pci_dev *pdev) { struct net_device *netdev; struct spider_net_card *card; netdev = pci_get_drvdata(pdev); card = netdev_priv(netdev); wait_event(card->waitq, atomic_read(&card->tx_timeout_task_counter) == 0); unregister_netdev(netdev); /* switch off card */ spider_net_write_reg(card, SPIDER_NET_CKRCTRL, SPIDER_NET_CKRCTRL_STOP_VALUE); spider_net_write_reg(card, SPIDER_NET_CKRCTRL, SPIDER_NET_CKRCTRL_RUN_VALUE); spider_net_undo_pci_setup(card); free_netdev(netdev); } static struct pci_driver spider_net_driver = { .name = spider_net_driver_name, .id_table = spider_net_pci_tbl, .probe = spider_net_probe, .remove = spider_net_remove }; /** * spider_net_init - init function when the driver is loaded * * spider_net_init registers the device driver */ static int __init spider_net_init(void) { printk(KERN_INFO "Spidernet version %s.\n", VERSION); if (rx_descriptors < SPIDER_NET_RX_DESCRIPTORS_MIN) { rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MIN; pr_info("adjusting rx descriptors to %i.\n", rx_descriptors); } if (rx_descriptors > SPIDER_NET_RX_DESCRIPTORS_MAX) { rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MAX; pr_info("adjusting rx descriptors to %i.\n", rx_descriptors); } if (tx_descriptors < SPIDER_NET_TX_DESCRIPTORS_MIN) { tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MIN; pr_info("adjusting tx descriptors to %i.\n", tx_descriptors); } if (tx_descriptors > SPIDER_NET_TX_DESCRIPTORS_MAX) { tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MAX; pr_info("adjusting tx descriptors to %i.\n", tx_descriptors); } return pci_register_driver(&spider_net_driver); } /** * spider_net_cleanup - exit function when driver is unloaded * * spider_net_cleanup unregisters the device driver */ static void __exit spider_net_cleanup(void) { pci_unregister_driver(&spider_net_driver); } module_init(spider_net_init); module_exit(spider_net_cleanup);
gpl-2.0
arnavgosain/tomato
drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c
2494
6604
/* * Copyright 2011 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include <subdev/mxm.h> #include <subdev/bios.h> #include <subdev/bios/conn.h> #include <subdev/bios/dcb.h> #include <subdev/bios/mxm.h> #include "mxms.h" struct nv50_mxm_priv { struct nouveau_mxm base; }; struct context { u32 *outp; struct mxms_odev desc; }; static bool mxm_match_tmds_partner(struct nouveau_mxm *mxm, u8 *data, void *info) { struct context *ctx = info; struct mxms_odev desc; mxms_output_device(mxm, data, &desc); if (desc.outp_type == 2 && desc.dig_conn == ctx->desc.dig_conn) return false; return true; } static bool mxm_match_dcb(struct nouveau_mxm *mxm, u8 *data, void *info) { struct nouveau_bios *bios = nouveau_bios(mxm); struct context *ctx = info; u64 desc = *(u64 *)data; mxms_output_device(mxm, data, &ctx->desc); /* match dcb encoder type to mxm-ods device type */ if ((ctx->outp[0] & 0x0000000f) != ctx->desc.outp_type) return true; /* digital output, have some extra stuff to match here, there's a * table in the vbios that provides a mapping from the mxm digital * connection enum values to SOR/link */ if ((desc & 0x00000000000000f0) >= 0x20) { /* check against sor index */ u8 link = mxm_sor_map(bios, ctx->desc.dig_conn); if ((ctx->outp[0] & 0x0f000000) != (link & 0x0f) << 24) return true; /* check dcb entry has a compatible link field */ link = (link & 0x30) >> 4; if ((link & ((ctx->outp[1] & 0x00000030) >> 4)) != link) return true; } /* mark this descriptor accounted for by setting invalid device type, * except of course some manufactures don't follow specs properly and * we need to avoid killing off the TMDS function on DP connectors * if MXM-SIS is missing an entry for it. */ data[0] &= ~0xf0; if (ctx->desc.outp_type == 6 && ctx->desc.conn_type == 6 && mxms_foreach(mxm, 0x01, mxm_match_tmds_partner, ctx)) { data[0] |= 0x20; /* modify descriptor to match TMDS now */ } else { data[0] |= 0xf0; } return false; } static int mxm_dcb_sanitise_entry(struct nouveau_bios *bios, void *data, int idx, u16 pdcb) { struct nouveau_mxm *mxm = nouveau_mxm(bios); struct context ctx = { .outp = (u32 *)(bios->data + pdcb) }; u8 type, i2cidx, link, ver, len; u8 *conn; /* look for an output device structure that matches this dcb entry. * if one isn't found, disable it. */ if (mxms_foreach(mxm, 0x01, mxm_match_dcb, &ctx)) { nv_debug(mxm, "disable %d: 0x%08x 0x%08x\n", idx, ctx.outp[0], ctx.outp[1]); ctx.outp[0] |= 0x0000000f; return 0; } /* modify the output's ddc/aux port, there's a pointer to a table * with the mapping from mxm ddc/aux port to dcb i2c_index in the * vbios mxm table */ i2cidx = mxm_ddc_map(bios, ctx.desc.ddc_port); if ((ctx.outp[0] & 0x0000000f) != DCB_OUTPUT_DP) i2cidx = (i2cidx & 0x0f) << 4; else i2cidx = (i2cidx & 0xf0); if (i2cidx != 0xf0) { ctx.outp[0] &= ~0x000000f0; ctx.outp[0] |= i2cidx; } /* override dcb sorconf.link, based on what mxm data says */ switch (ctx.desc.outp_type) { case 0x00: /* Analog CRT */ case 0x01: /* Analog TV/HDTV */ break; default: link = mxm_sor_map(bios, ctx.desc.dig_conn) & 0x30; ctx.outp[1] &= ~0x00000030; ctx.outp[1] |= link; break; } /* we may need to fixup various other vbios tables based on what * the descriptor says the connector type should be. * * in a lot of cases, the vbios tables will claim DVI-I is possible, * and the mxm data says the connector is really HDMI. another * common example is DP->eDP. */ conn = bios->data; conn += dcb_conn(bios, (ctx.outp[0] & 0x0000f000) >> 12, &ver, &len); type = conn[0]; switch (ctx.desc.conn_type) { case 0x01: /* LVDS */ ctx.outp[1] |= 0x00000004; /* use_power_scripts */ /* XXX: modify default link width in LVDS table */ break; case 0x02: /* HDMI */ type = DCB_CONNECTOR_HDMI_1; break; case 0x03: /* DVI-D */ type = DCB_CONNECTOR_DVI_D; break; case 0x0e: /* eDP, falls through to DPint */ ctx.outp[1] |= 0x00010000; case 0x07: /* DP internal, wtf is this?? HP8670w */ ctx.outp[1] |= 0x00000004; /* use_power_scripts? */ type = DCB_CONNECTOR_eDP; break; default: break; } if (mxms_version(mxm) >= 0x0300) conn[0] = type; return 0; } static bool mxm_show_unmatched(struct nouveau_mxm *mxm, u8 *data, void *info) { u64 desc = *(u64 *)data; if ((desc & 0xf0) != 0xf0) nv_info(mxm, "unmatched output device 0x%016llx\n", desc); return true; } static void mxm_dcb_sanitise(struct nouveau_mxm *mxm) { struct nouveau_bios *bios = nouveau_bios(mxm); u8 ver, hdr, cnt, len; u16 dcb = dcb_table(bios, &ver, &hdr, &cnt, &len); if (dcb == 0x0000 || ver != 0x40) { nv_debug(mxm, "unsupported DCB version\n"); return; } dcb_outp_foreach(bios, NULL, mxm_dcb_sanitise_entry); mxms_foreach(mxm, 0x01, mxm_show_unmatched, NULL); } static int nv50_mxm_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nv50_mxm_priv *priv; int ret; ret = nouveau_mxm_create(parent, engine, oclass, &priv); *pobject = nv_object(priv); if (ret) return ret; if (priv->base.action & MXM_SANITISE_DCB) mxm_dcb_sanitise(&priv->base); return 0; } struct nouveau_oclass nv50_mxm_oclass = { .handle = NV_SUBDEV(MXM, 0x50), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nv50_mxm_ctor, .dtor = _nouveau_mxm_dtor, .init = _nouveau_mxm_init, .fini = _nouveau_mxm_fini, }, };
gpl-2.0
chevanlol360/Kernel_LGE_W5
net/ipv4/netfilter/ip_tables.c
4542
56134
/* * Packet matching code. * * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/cache.h> #include <linux/capability.h> #include <linux/skbuff.h> #include <linux/kmod.h> #include <linux/vmalloc.h> #include <linux/netdevice.h> #include <linux/module.h> #include <linux/icmp.h> #include <net/ip.h> #include <net/compat.h> #include <asm/uaccess.h> #include <linux/mutex.h> #include <linux/proc_fs.h> #include <linux/err.h> #include <linux/cpumask.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_ipv4/ip_tables.h> #include <net/netfilter/nf_log.h> #include "../../netfilter/xt_repldata.h" MODULE_LICENSE("GPL"); MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); MODULE_DESCRIPTION("IPv4 packet filter"); /*#define DEBUG_IP_FIREWALL*/ /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */ /*#define DEBUG_IP_FIREWALL_USER*/ #ifdef DEBUG_IP_FIREWALL #define dprintf(format, args...) pr_info(format , ## args) #else #define dprintf(format, args...) #endif #ifdef DEBUG_IP_FIREWALL_USER #define duprintf(format, args...) pr_info(format , ## args) #else #define duprintf(format, args...) #endif #ifdef CONFIG_NETFILTER_DEBUG #define IP_NF_ASSERT(x) WARN_ON(!(x)) #else #define IP_NF_ASSERT(x) #endif #if 0 /* All the better to debug you with... */ #define static #define inline #endif void *ipt_alloc_initial_table(const struct xt_table *info) { return xt_alloc_initial_table(ipt, IPT); } EXPORT_SYMBOL_GPL(ipt_alloc_initial_table); /* Returns whether matches rule or not. */ /* Performance critical - called for every packet */ static inline bool ip_packet_match(const struct iphdr *ip, const char *indev, const char *outdev, const struct ipt_ip *ipinfo, int isfrag) { unsigned long ret; #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg))) if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr, IPT_INV_SRCIP) || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr, IPT_INV_DSTIP)) { dprintf("Source or dest mismatch.\n"); dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n", &ip->saddr, &ipinfo->smsk.s_addr, &ipinfo->src.s_addr, ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : ""); dprintf("DST: %pI4 Mask: %pI4 Target: %pI4.%s\n", &ip->daddr, &ipinfo->dmsk.s_addr, &ipinfo->dst.s_addr, ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : ""); return false; } ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask); if (FWINV(ret != 0, IPT_INV_VIA_IN)) { dprintf("VIA in mismatch (%s vs %s).%s\n", indev, ipinfo->iniface, ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":""); return false; } ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask); if (FWINV(ret != 0, IPT_INV_VIA_OUT)) { dprintf("VIA out mismatch (%s vs %s).%s\n", outdev, ipinfo->outiface, ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":""); return false; } /* Check specific protocol */ if (ipinfo->proto && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) { dprintf("Packet protocol %hi does not match %hi.%s\n", ip->protocol, ipinfo->proto, ipinfo->invflags&IPT_INV_PROTO ? " (INV)":""); return false; } /* If we have a fragment rule but the packet is not a fragment * then we return zero */ if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) { dprintf("Fragment rule but not fragment.%s\n", ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : ""); return false; } return true; } static bool ip_checkentry(const struct ipt_ip *ip) { if (ip->flags & ~IPT_F_MASK) { duprintf("Unknown flag bits set: %08X\n", ip->flags & ~IPT_F_MASK); return false; } if (ip->invflags & ~IPT_INV_MASK) { duprintf("Unknown invflag bits set: %08X\n", ip->invflags & ~IPT_INV_MASK); return false; } return true; } static unsigned int ipt_error(struct sk_buff *skb, const struct xt_action_param *par) { if (net_ratelimit()) pr_info("error: `%s'\n", (const char *)par->targinfo); return NF_DROP; } /* Performance critical */ static inline struct ipt_entry * get_entry(const void *base, unsigned int offset) { return (struct ipt_entry *)(base + offset); } /* All zeroes == unconditional rule. */ /* Mildly perf critical (only if packet tracing is on) */ static inline bool unconditional(const struct ipt_ip *ip) { static const struct ipt_ip uncond; return memcmp(ip, &uncond, sizeof(uncond)) == 0; #undef FWINV } /* for const-correctness */ static inline const struct xt_entry_target * ipt_get_target_c(const struct ipt_entry *e) { return ipt_get_target((struct ipt_entry *)e); } #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) static const char *const hooknames[] = { [NF_INET_PRE_ROUTING] = "PREROUTING", [NF_INET_LOCAL_IN] = "INPUT", [NF_INET_FORWARD] = "FORWARD", [NF_INET_LOCAL_OUT] = "OUTPUT", [NF_INET_POST_ROUTING] = "POSTROUTING", }; enum nf_ip_trace_comments { NF_IP_TRACE_COMMENT_RULE, NF_IP_TRACE_COMMENT_RETURN, NF_IP_TRACE_COMMENT_POLICY, }; static const char *const comments[] = { [NF_IP_TRACE_COMMENT_RULE] = "rule", [NF_IP_TRACE_COMMENT_RETURN] = "return", [NF_IP_TRACE_COMMENT_POLICY] = "policy", }; static struct nf_loginfo trace_loginfo = { .type = NF_LOG_TYPE_LOG, .u = { .log = { .level = 4, .logflags = NF_LOG_MASK, }, }, }; /* Mildly perf critical (only if packet tracing is on) */ static inline int get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e, const char *hookname, const char **chainname, const char **comment, unsigned int *rulenum) { const struct xt_standard_target *t = (void *)ipt_get_target_c(s); if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) { /* Head of user chain: ERROR target with chainname */ *chainname = t->target.data; (*rulenum) = 0; } else if (s == e) { (*rulenum)++; if (s->target_offset == sizeof(struct ipt_entry) && strcmp(t->target.u.kernel.target->name, XT_STANDARD_TARGET) == 0 && t->verdict < 0 && unconditional(&s->ip)) { /* Tail of chains: STANDARD target (return/policy) */ *comment = *chainname == hookname ? comments[NF_IP_TRACE_COMMENT_POLICY] : comments[NF_IP_TRACE_COMMENT_RETURN]; } return 1; } else (*rulenum)++; return 0; } static void trace_packet(const struct sk_buff *skb, unsigned int hook, const struct net_device *in, const struct net_device *out, const char *tablename, const struct xt_table_info *private, const struct ipt_entry *e) { const void *table_base; const struct ipt_entry *root; const char *hookname, *chainname, *comment; const struct ipt_entry *iter; unsigned int rulenum = 0; table_base = private->entries[smp_processor_id()]; root = get_entry(table_base, private->hook_entry[hook]); hookname = chainname = hooknames[hook]; comment = comments[NF_IP_TRACE_COMMENT_RULE]; xt_entry_foreach(iter, root, private->size - private->hook_entry[hook]) if (get_chainname_rulenum(iter, e, hookname, &chainname, &comment, &rulenum) != 0) break; nf_log_packet(AF_INET, hook, skb, in, out, &trace_loginfo, "TRACE: %s:%s:%s:%u ", tablename, chainname, comment, rulenum); } #endif static inline __pure struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry) { return (void *)entry + entry->next_offset; } /* Returns one of the generic firewall policies, like NF_ACCEPT. */ unsigned int ipt_do_table(struct sk_buff *skb, unsigned int hook, const struct net_device *in, const struct net_device *out, struct xt_table *table) { static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); const struct iphdr *ip; /* Initializing verdict to NF_DROP keeps gcc happy. */ unsigned int verdict = NF_DROP; const char *indev, *outdev; const void *table_base; struct ipt_entry *e, **jumpstack; unsigned int *stackptr, origptr, cpu; const struct xt_table_info *private; struct xt_action_param acpar; unsigned int addend; /* Initialization */ ip = ip_hdr(skb); indev = in ? in->name : nulldevname; outdev = out ? out->name : nulldevname; /* We handle fragments by dealing with the first fragment as * if it was a normal packet. All other fragments are treated * normally, except that they will NEVER match rules that ask * things we don't know, ie. tcp syn flag or ports). If the * rule is also a fragment-specific rule, non-fragments won't * match it. */ acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET; acpar.thoff = ip_hdrlen(skb); acpar.hotdrop = false; acpar.in = in; acpar.out = out; acpar.family = NFPROTO_IPV4; acpar.hooknum = hook; IP_NF_ASSERT(table->valid_hooks & (1 << hook)); local_bh_disable(); addend = xt_write_recseq_begin(); private = table->private; cpu = smp_processor_id(); table_base = private->entries[cpu]; jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; stackptr = per_cpu_ptr(private->stackptr, cpu); origptr = *stackptr; e = get_entry(table_base, private->hook_entry[hook]); pr_debug("Entering %s(hook %u); sp at %u (UF %p)\n", table->name, hook, origptr, get_entry(table_base, private->underflow[hook])); do { const struct xt_entry_target *t; const struct xt_entry_match *ematch; IP_NF_ASSERT(e); if (!ip_packet_match(ip, indev, outdev, &e->ip, acpar.fragoff)) { no_match: e = ipt_next_entry(e); continue; } xt_ematch_foreach(ematch, e) { acpar.match = ematch->u.kernel.match; acpar.matchinfo = ematch->data; if (!acpar.match->match(skb, &acpar)) goto no_match; } ADD_COUNTER(e->counters, skb->len, 1); t = ipt_get_target(e); IP_NF_ASSERT(t->u.kernel.target); #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) /* The packet is traced: log it */ if (unlikely(skb->nf_trace)) trace_packet(skb, hook, in, out, table->name, private, e); #endif /* Standard target? */ if (!t->u.kernel.target->target) { int v; v = ((struct xt_standard_target *)t)->verdict; if (v < 0) { /* Pop from stack? */ if (v != XT_RETURN) { verdict = (unsigned)(-v) - 1; break; } if (*stackptr <= origptr) { e = get_entry(table_base, private->underflow[hook]); pr_debug("Underflow (this is normal) " "to %p\n", e); } else { e = jumpstack[--*stackptr]; pr_debug("Pulled %p out from pos %u\n", e, *stackptr); e = ipt_next_entry(e); } continue; } if (table_base + v != ipt_next_entry(e) && !(e->ip.flags & IPT_F_GOTO)) { if (*stackptr >= private->stacksize) { verdict = NF_DROP; break; } jumpstack[(*stackptr)++] = e; pr_debug("Pushed %p into pos %u\n", e, *stackptr - 1); } e = get_entry(table_base, v); continue; } acpar.target = t->u.kernel.target; acpar.targinfo = t->data; verdict = t->u.kernel.target->target(skb, &acpar); /* Target might have changed stuff. */ ip = ip_hdr(skb); if (verdict == XT_CONTINUE) e = ipt_next_entry(e); else /* Verdict */ break; } while (!acpar.hotdrop); pr_debug("Exiting %s; resetting sp from %u to %u\n", __func__, *stackptr, origptr); *stackptr = origptr; xt_write_recseq_end(addend); local_bh_enable(); #ifdef DEBUG_ALLOW_ALL return NF_ACCEPT; #else if (acpar.hotdrop) return NF_DROP; else return verdict; #endif } /* Figures out from what hook each rule can be called: returns 0 if there are loops. Puts hook bitmask in comefrom. */ static int mark_source_chains(const struct xt_table_info *newinfo, unsigned int valid_hooks, void *entry0) { unsigned int hook; /* No recursion; use packet counter to save back ptrs (reset to 0 as we leave), and comefrom to save source hook bitmask */ for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) { unsigned int pos = newinfo->hook_entry[hook]; struct ipt_entry *e = (struct ipt_entry *)(entry0 + pos); if (!(valid_hooks & (1 << hook))) continue; /* Set initial back pointer. */ e->counters.pcnt = pos; for (;;) { const struct xt_standard_target *t = (void *)ipt_get_target_c(e); int visited = e->comefrom & (1 << hook); if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { pr_err("iptables: loop hook %u pos %u %08X.\n", hook, pos, e->comefrom); return 0; } e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS)); /* Unconditional return/END. */ if ((e->target_offset == sizeof(struct ipt_entry) && (strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0) && t->verdict < 0 && unconditional(&e->ip)) || visited) { unsigned int oldpos, size; if ((strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0) && t->verdict < -NF_MAX_VERDICT - 1) { duprintf("mark_source_chains: bad " "negative verdict (%i)\n", t->verdict); return 0; } /* Return: backtrack through the last big jump. */ do { e->comefrom ^= (1<<NF_INET_NUMHOOKS); #ifdef DEBUG_IP_FIREWALL_USER if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { duprintf("Back unset " "on hook %u " "rule %u\n", hook, pos); } #endif oldpos = pos; pos = e->counters.pcnt; e->counters.pcnt = 0; /* We're at the start. */ if (pos == oldpos) goto next; e = (struct ipt_entry *) (entry0 + pos); } while (oldpos == pos + e->next_offset); /* Move along one */ size = e->next_offset; e = (struct ipt_entry *) (entry0 + pos + size); e->counters.pcnt = pos; pos += size; } else { int newpos = t->verdict; if (strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0 && newpos >= 0) { if (newpos > newinfo->size - sizeof(struct ipt_entry)) { duprintf("mark_source_chains: " "bad verdict (%i)\n", newpos); return 0; } /* This a jump; chase it. */ duprintf("Jump rule %u -> %u\n", pos, newpos); } else { /* ... this is a fallthru */ newpos = pos + e->next_offset; } e = (struct ipt_entry *) (entry0 + newpos); e->counters.pcnt = pos; pos = newpos; } } next: duprintf("Finished chain %u\n", hook); } return 1; } static void cleanup_match(struct xt_entry_match *m, struct net *net) { struct xt_mtdtor_param par; par.net = net; par.match = m->u.kernel.match; par.matchinfo = m->data; par.family = NFPROTO_IPV4; if (par.match->destroy != NULL) par.match->destroy(&par); module_put(par.match->me); } static int check_entry(const struct ipt_entry *e, const char *name) { const struct xt_entry_target *t; if (!ip_checkentry(&e->ip)) { duprintf("ip check failed %p %s.\n", e, name); return -EINVAL; } if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset) return -EINVAL; t = ipt_get_target_c(e); if (e->target_offset + t->u.target_size > e->next_offset) return -EINVAL; return 0; } static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par) { const struct ipt_ip *ip = par->entryinfo; int ret; par->match = m->u.kernel.match; par->matchinfo = m->data; ret = xt_check_match(par, m->u.match_size - sizeof(*m), ip->proto, ip->invflags & IPT_INV_PROTO); if (ret < 0) { duprintf("check failed for `%s'.\n", par->match->name); return ret; } return 0; } static int find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par) { struct xt_match *match; int ret; match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name, m->u.user.revision); if (IS_ERR(match)) { duprintf("find_check_match: `%s' not found\n", m->u.user.name); return PTR_ERR(match); } m->u.kernel.match = match; ret = check_match(m, par); if (ret) goto err; return 0; err: module_put(m->u.kernel.match->me); return ret; } static int check_target(struct ipt_entry *e, struct net *net, const char *name) { struct xt_entry_target *t = ipt_get_target(e); struct xt_tgchk_param par = { .net = net, .table = name, .entryinfo = e, .target = t->u.kernel.target, .targinfo = t->data, .hook_mask = e->comefrom, .family = NFPROTO_IPV4, }; int ret; ret = xt_check_target(&par, t->u.target_size - sizeof(*t), e->ip.proto, e->ip.invflags & IPT_INV_PROTO); if (ret < 0) { duprintf("check failed for `%s'.\n", t->u.kernel.target->name); return ret; } return 0; } static int find_check_entry(struct ipt_entry *e, struct net *net, const char *name, unsigned int size) { struct xt_entry_target *t; struct xt_target *target; int ret; unsigned int j; struct xt_mtchk_param mtpar; struct xt_entry_match *ematch; ret = check_entry(e, name); if (ret) return ret; j = 0; mtpar.net = net; mtpar.table = name; mtpar.entryinfo = &e->ip; mtpar.hook_mask = e->comefrom; mtpar.family = NFPROTO_IPV4; xt_ematch_foreach(ematch, e) { ret = find_check_match(ematch, &mtpar); if (ret != 0) goto cleanup_matches; ++j; } t = ipt_get_target(e); target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { duprintf("find_check_entry: `%s' not found\n", t->u.user.name); ret = PTR_ERR(target); goto cleanup_matches; } t->u.kernel.target = target; ret = check_target(e, net, name); if (ret) goto err; return 0; err: module_put(t->u.kernel.target->me); cleanup_matches: xt_ematch_foreach(ematch, e) { if (j-- == 0) break; cleanup_match(ematch, net); } return ret; } static bool check_underflow(const struct ipt_entry *e) { const struct xt_entry_target *t; unsigned int verdict; if (!unconditional(&e->ip)) return false; t = ipt_get_target_c(e); if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) return false; verdict = ((struct xt_standard_target *)t)->verdict; verdict = -verdict - 1; return verdict == NF_DROP || verdict == NF_ACCEPT; } static int check_entry_size_and_hooks(struct ipt_entry *e, struct xt_table_info *newinfo, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, unsigned int valid_hooks) { unsigned int h; if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) { duprintf("Bad offset %p\n", e); return -EINVAL; } if (e->next_offset < sizeof(struct ipt_entry) + sizeof(struct xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } /* Check hooks & underflows */ for (h = 0; h < NF_INET_NUMHOOKS; h++) { if (!(valid_hooks & (1 << h))) continue; if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) { if (!check_underflow(e)) { pr_err("Underflows must be unconditional and " "use the STANDARD target with " "ACCEPT/DROP\n"); return -EINVAL; } newinfo->underflow[h] = underflows[h]; } } /* Clear counters and comefrom */ e->counters = ((struct xt_counters) { 0, 0 }); e->comefrom = 0; return 0; } static void cleanup_entry(struct ipt_entry *e, struct net *net) { struct xt_tgdtor_param par; struct xt_entry_target *t; struct xt_entry_match *ematch; /* Cleanup all matches */ xt_ematch_foreach(ematch, e) cleanup_match(ematch, net); t = ipt_get_target(e); par.net = net; par.target = t->u.kernel.target; par.targinfo = t->data; par.family = NFPROTO_IPV4; if (par.target->destroy != NULL) par.target->destroy(&par); module_put(par.target->me); } /* Checks and translates the user-supplied table segment (held in newinfo) */ static int translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, const struct ipt_replace *repl) { struct ipt_entry *iter; unsigned int i; int ret = 0; newinfo->size = repl->size; newinfo->number = repl->num_entries; /* Init all hooks to impossible value. */ for (i = 0; i < NF_INET_NUMHOOKS; i++) { newinfo->hook_entry[i] = 0xFFFFFFFF; newinfo->underflow[i] = 0xFFFFFFFF; } duprintf("translate_table: size %u\n", newinfo->size); i = 0; /* Walk through entries, checking offsets. */ xt_entry_foreach(iter, entry0, newinfo->size) { ret = check_entry_size_and_hooks(iter, newinfo, entry0, entry0 + repl->size, repl->hook_entry, repl->underflow, repl->valid_hooks); if (ret != 0) return ret; ++i; if (strcmp(ipt_get_target(iter)->u.user.name, XT_ERROR_TARGET) == 0) ++newinfo->stacksize; } if (i != repl->num_entries) { duprintf("translate_table: %u not %u entries\n", i, repl->num_entries); return -EINVAL; } /* Check hooks all assigned */ for (i = 0; i < NF_INET_NUMHOOKS; i++) { /* Only hooks which are valid */ if (!(repl->valid_hooks & (1 << i))) continue; if (newinfo->hook_entry[i] == 0xFFFFFFFF) { duprintf("Invalid hook entry %u %u\n", i, repl->hook_entry[i]); return -EINVAL; } if (newinfo->underflow[i] == 0xFFFFFFFF) { duprintf("Invalid underflow %u %u\n", i, repl->underflow[i]); return -EINVAL; } } if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) return -ELOOP; /* Finally, each sanity check must pass */ i = 0; xt_entry_foreach(iter, entry0, newinfo->size) { ret = find_check_entry(iter, net, repl->name, repl->size); if (ret != 0) break; ++i; } if (ret != 0) { xt_entry_foreach(iter, entry0, newinfo->size) { if (i-- == 0) break; cleanup_entry(iter, net); } return ret; } /* And one copy for every other CPU */ for_each_possible_cpu(i) { if (newinfo->entries[i] && newinfo->entries[i] != entry0) memcpy(newinfo->entries[i], entry0, newinfo->size); } return ret; } static void get_counters(const struct xt_table_info *t, struct xt_counters counters[]) { struct ipt_entry *iter; unsigned int cpu; unsigned int i; for_each_possible_cpu(cpu) { seqcount_t *s = &per_cpu(xt_recseq, cpu); i = 0; xt_entry_foreach(iter, t->entries[cpu], t->size) { u64 bcnt, pcnt; unsigned int start; do { start = read_seqcount_begin(s); bcnt = iter->counters.bcnt; pcnt = iter->counters.pcnt; } while (read_seqcount_retry(s, start)); ADD_COUNTER(counters[i], bcnt, pcnt); ++i; /* macro does multi eval of i */ } } } static struct xt_counters *alloc_counters(const struct xt_table *table) { unsigned int countersize; struct xt_counters *counters; const struct xt_table_info *private = table->private; /* We need atomic snapshot of counters: rest doesn't change (other than comefrom, which userspace doesn't care about). */ countersize = sizeof(struct xt_counters) * private->number; counters = vzalloc(countersize); if (counters == NULL) return ERR_PTR(-ENOMEM); get_counters(private, counters); return counters; } static int copy_entries_to_user(unsigned int total_size, const struct xt_table *table, void __user *userptr) { unsigned int off, num; const struct ipt_entry *e; struct xt_counters *counters; const struct xt_table_info *private = table->private; int ret = 0; const void *loc_cpu_entry; counters = alloc_counters(table); if (IS_ERR(counters)) return PTR_ERR(counters); /* choose the copy that is on our node/cpu, ... * This choice is lazy (because current thread is * allowed to migrate to another cpu) */ loc_cpu_entry = private->entries[raw_smp_processor_id()]; if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { ret = -EFAULT; goto free_counters; } /* FIXME: use iterator macros --RR */ /* ... then go back and fix counters and names */ for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ unsigned int i; const struct xt_entry_match *m; const struct xt_entry_target *t; e = (struct ipt_entry *)(loc_cpu_entry + off); if (copy_to_user(userptr + off + offsetof(struct ipt_entry, counters), &counters[num], sizeof(counters[num])) != 0) { ret = -EFAULT; goto free_counters; } for (i = sizeof(struct ipt_entry); i < e->target_offset; i += m->u.match_size) { m = (void *)e + i; if (copy_to_user(userptr + off + i + offsetof(struct xt_entry_match, u.user.name), m->u.kernel.match->name, strlen(m->u.kernel.match->name)+1) != 0) { ret = -EFAULT; goto free_counters; } } t = ipt_get_target_c(e); if (copy_to_user(userptr + off + e->target_offset + offsetof(struct xt_entry_target, u.user.name), t->u.kernel.target->name, strlen(t->u.kernel.target->name)+1) != 0) { ret = -EFAULT; goto free_counters; } } free_counters: vfree(counters); return ret; } #ifdef CONFIG_COMPAT static void compat_standard_from_user(void *dst, const void *src) { int v = *(compat_int_t *)src; if (v > 0) v += xt_compat_calc_jump(AF_INET, v); memcpy(dst, &v, sizeof(v)); } static int compat_standard_to_user(void __user *dst, const void *src) { compat_int_t cv = *(int *)src; if (cv > 0) cv -= xt_compat_calc_jump(AF_INET, cv); return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; } static int compat_calc_entry(const struct ipt_entry *e, const struct xt_table_info *info, const void *base, struct xt_table_info *newinfo) { const struct xt_entry_match *ematch; const struct xt_entry_target *t; unsigned int entry_offset; int off, i, ret; off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); entry_offset = (void *)e - base; xt_ematch_foreach(ematch, e) off += xt_compat_match_offset(ematch->u.kernel.match); t = ipt_get_target_c(e); off += xt_compat_target_offset(t->u.kernel.target); newinfo->size -= off; ret = xt_compat_add_offset(AF_INET, entry_offset, off); if (ret) return ret; for (i = 0; i < NF_INET_NUMHOOKS; i++) { if (info->hook_entry[i] && (e < (struct ipt_entry *)(base + info->hook_entry[i]))) newinfo->hook_entry[i] -= off; if (info->underflow[i] && (e < (struct ipt_entry *)(base + info->underflow[i]))) newinfo->underflow[i] -= off; } return 0; } static int compat_table_info(const struct xt_table_info *info, struct xt_table_info *newinfo) { struct ipt_entry *iter; void *loc_cpu_entry; int ret; if (!newinfo || !info) return -EINVAL; /* we dont care about newinfo->entries[] */ memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); newinfo->initial_entries = 0; loc_cpu_entry = info->entries[raw_smp_processor_id()]; xt_compat_init_offsets(AF_INET, info->number); xt_entry_foreach(iter, loc_cpu_entry, info->size) { ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo); if (ret != 0) return ret; } return 0; } #endif static int get_info(struct net *net, void __user *user, const int *len, int compat) { char name[XT_TABLE_MAXNAMELEN]; struct xt_table *t; int ret; if (*len != sizeof(struct ipt_getinfo)) { duprintf("length %u != %zu\n", *len, sizeof(struct ipt_getinfo)); return -EINVAL; } if (copy_from_user(name, user, sizeof(name)) != 0) return -EFAULT; name[XT_TABLE_MAXNAMELEN-1] = '\0'; #ifdef CONFIG_COMPAT if (compat) xt_compat_lock(AF_INET); #endif t = try_then_request_module(xt_find_table_lock(net, AF_INET, name), "iptable_%s", name); if (t && !IS_ERR(t)) { struct ipt_getinfo info; const struct xt_table_info *private = t->private; #ifdef CONFIG_COMPAT struct xt_table_info tmp; if (compat) { ret = compat_table_info(private, &tmp); xt_compat_flush_offsets(AF_INET); private = &tmp; } #endif memset(&info, 0, sizeof(info)); info.valid_hooks = t->valid_hooks; memcpy(info.hook_entry, private->hook_entry, sizeof(info.hook_entry)); memcpy(info.underflow, private->underflow, sizeof(info.underflow)); info.num_entries = private->number; info.size = private->size; strcpy(info.name, name); if (copy_to_user(user, &info, *len) != 0) ret = -EFAULT; else ret = 0; xt_table_unlock(t); module_put(t->me); } else ret = t ? PTR_ERR(t) : -ENOENT; #ifdef CONFIG_COMPAT if (compat) xt_compat_unlock(AF_INET); #endif return ret; } static int get_entries(struct net *net, struct ipt_get_entries __user *uptr, const int *len) { int ret; struct ipt_get_entries get; struct xt_table *t; if (*len < sizeof(get)) { duprintf("get_entries: %u < %zu\n", *len, sizeof(get)); return -EINVAL; } if (copy_from_user(&get, uptr, sizeof(get)) != 0) return -EFAULT; if (*len != sizeof(struct ipt_get_entries) + get.size) { duprintf("get_entries: %u != %zu\n", *len, sizeof(get) + get.size); return -EINVAL; } t = xt_find_table_lock(net, AF_INET, get.name); if (t && !IS_ERR(t)) { const struct xt_table_info *private = t->private; duprintf("t->private->number = %u\n", private->number); if (get.size == private->size) ret = copy_entries_to_user(private->size, t, uptr->entrytable); else { duprintf("get_entries: I've got %u not %u!\n", private->size, get.size); ret = -EAGAIN; } module_put(t->me); xt_table_unlock(t); } else ret = t ? PTR_ERR(t) : -ENOENT; return ret; } static int __do_replace(struct net *net, const char *name, unsigned int valid_hooks, struct xt_table_info *newinfo, unsigned int num_counters, void __user *counters_ptr) { int ret; struct xt_table *t; struct xt_table_info *oldinfo; struct xt_counters *counters; void *loc_cpu_old_entry; struct ipt_entry *iter; ret = 0; counters = vzalloc(num_counters * sizeof(struct xt_counters)); if (!counters) { ret = -ENOMEM; goto out; } t = try_then_request_module(xt_find_table_lock(net, AF_INET, name), "iptable_%s", name); if (!t || IS_ERR(t)) { ret = t ? PTR_ERR(t) : -ENOENT; goto free_newinfo_counters_untrans; } /* You lied! */ if (valid_hooks != t->valid_hooks) { duprintf("Valid hook crap: %08X vs %08X\n", valid_hooks, t->valid_hooks); ret = -EINVAL; goto put_module; } oldinfo = xt_replace_table(t, num_counters, newinfo, &ret); if (!oldinfo) goto put_module; /* Update module usage count based on number of rules */ duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n", oldinfo->number, oldinfo->initial_entries, newinfo->number); if ((oldinfo->number > oldinfo->initial_entries) || (newinfo->number <= oldinfo->initial_entries)) module_put(t->me); if ((oldinfo->number > oldinfo->initial_entries) && (newinfo->number <= oldinfo->initial_entries)) module_put(t->me); /* Get the old counters, and synchronize with replace */ get_counters(oldinfo, counters); /* Decrease module usage counts and free resource */ loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size) cleanup_entry(iter, net); xt_free_table_info(oldinfo); if (copy_to_user(counters_ptr, counters, sizeof(struct xt_counters) * num_counters) != 0) ret = -EFAULT; vfree(counters); xt_table_unlock(t); return ret; put_module: module_put(t->me); xt_table_unlock(t); free_newinfo_counters_untrans: vfree(counters); out: return ret; } static int do_replace(struct net *net, const void __user *user, unsigned int len) { int ret; struct ipt_replace tmp; struct xt_table_info *newinfo; void *loc_cpu_entry; struct ipt_entry *iter; if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; tmp.name[sizeof(tmp.name)-1] = 0; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) return -ENOMEM; /* choose the copy that is on our node/cpu */ loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } ret = translate_table(net, newinfo, loc_cpu_entry, &tmp); if (ret != 0) goto free_newinfo; duprintf("Translated table\n"); ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, tmp.num_counters, tmp.counters); if (ret) goto free_newinfo_untrans; return 0; free_newinfo_untrans: xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) cleanup_entry(iter, net); free_newinfo: xt_free_table_info(newinfo); return ret; } static int do_add_counters(struct net *net, const void __user *user, unsigned int len, int compat) { unsigned int i, curcpu; struct xt_counters_info tmp; struct xt_counters *paddc; unsigned int num_counters; const char *name; int size; void *ptmp; struct xt_table *t; const struct xt_table_info *private; int ret = 0; void *loc_cpu_entry; struct ipt_entry *iter; unsigned int addend; #ifdef CONFIG_COMPAT struct compat_xt_counters_info compat_tmp; if (compat) { ptmp = &compat_tmp; size = sizeof(struct compat_xt_counters_info); } else #endif { ptmp = &tmp; size = sizeof(struct xt_counters_info); } if (copy_from_user(ptmp, user, size) != 0) return -EFAULT; #ifdef CONFIG_COMPAT if (compat) { num_counters = compat_tmp.num_counters; name = compat_tmp.name; } else #endif { num_counters = tmp.num_counters; name = tmp.name; } if (len != size + num_counters * sizeof(struct xt_counters)) return -EINVAL; paddc = vmalloc(len - size); if (!paddc) return -ENOMEM; if (copy_from_user(paddc, user + size, len - size) != 0) { ret = -EFAULT; goto free; } t = xt_find_table_lock(net, AF_INET, name); if (!t || IS_ERR(t)) { ret = t ? PTR_ERR(t) : -ENOENT; goto free; } local_bh_disable(); private = t->private; if (private->number != num_counters) { ret = -EINVAL; goto unlock_up_free; } i = 0; /* Choose the copy that is on our node */ curcpu = smp_processor_id(); loc_cpu_entry = private->entries[curcpu]; addend = xt_write_recseq_begin(); xt_entry_foreach(iter, loc_cpu_entry, private->size) { ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt); ++i; } xt_write_recseq_end(addend); unlock_up_free: local_bh_enable(); xt_table_unlock(t); module_put(t->me); free: vfree(paddc); return ret; } #ifdef CONFIG_COMPAT struct compat_ipt_replace { char name[XT_TABLE_MAXNAMELEN]; u32 valid_hooks; u32 num_entries; u32 size; u32 hook_entry[NF_INET_NUMHOOKS]; u32 underflow[NF_INET_NUMHOOKS]; u32 num_counters; compat_uptr_t counters; /* struct xt_counters * */ struct compat_ipt_entry entries[0]; }; static int compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr, unsigned int *size, struct xt_counters *counters, unsigned int i) { struct xt_entry_target *t; struct compat_ipt_entry __user *ce; u_int16_t target_offset, next_offset; compat_uint_t origsize; const struct xt_entry_match *ematch; int ret = 0; origsize = *size; ce = (struct compat_ipt_entry __user *)*dstptr; if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 || copy_to_user(&ce->counters, &counters[i], sizeof(counters[i])) != 0) return -EFAULT; *dstptr += sizeof(struct compat_ipt_entry); *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); xt_ematch_foreach(ematch, e) { ret = xt_compat_match_to_user(ematch, dstptr, size); if (ret != 0) return ret; } target_offset = e->target_offset - (origsize - *size); t = ipt_get_target(e); ret = xt_compat_target_to_user(t, dstptr, size); if (ret) return ret; next_offset = e->next_offset - (origsize - *size); if (put_user(target_offset, &ce->target_offset) != 0 || put_user(next_offset, &ce->next_offset) != 0) return -EFAULT; return 0; } static int compat_find_calc_match(struct xt_entry_match *m, const char *name, const struct ipt_ip *ip, unsigned int hookmask, int *size) { struct xt_match *match; match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name, m->u.user.revision); if (IS_ERR(match)) { duprintf("compat_check_calc_match: `%s' not found\n", m->u.user.name); return PTR_ERR(match); } m->u.kernel.match = match; *size += xt_compat_match_offset(match); return 0; } static void compat_release_entry(struct compat_ipt_entry *e) { struct xt_entry_target *t; struct xt_entry_match *ematch; /* Cleanup all matches */ xt_ematch_foreach(ematch, e) module_put(ematch->u.kernel.match->me); t = compat_ipt_get_target(e); module_put(t->u.kernel.target->me); } static int check_compat_entry_size_and_hooks(struct compat_ipt_entry *e, struct xt_table_info *newinfo, unsigned int *size, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, const char *name) { struct xt_entry_match *ematch; struct xt_entry_target *t; struct xt_target *target; unsigned int entry_offset; unsigned int j; int ret, off, h; duprintf("check_compat_entry_size_and_hooks %p\n", e); if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) { duprintf("Bad offset %p, limit = %p\n", e, limit); return -EINVAL; } if (e->next_offset < sizeof(struct compat_ipt_entry) + sizeof(struct compat_xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } /* For purposes of check_entry casting the compat entry is fine */ ret = check_entry((struct ipt_entry *)e, name); if (ret) return ret; off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); entry_offset = (void *)e - (void *)base; j = 0; xt_ematch_foreach(ematch, e) { ret = compat_find_calc_match(ematch, name, &e->ip, e->comefrom, &off); if (ret != 0) goto release_matches; ++j; } t = compat_ipt_get_target(e); target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", t->u.user.name); ret = PTR_ERR(target); goto release_matches; } t->u.kernel.target = target; off += xt_compat_target_offset(target); *size += off; ret = xt_compat_add_offset(AF_INET, entry_offset, off); if (ret) goto out; /* Check hooks & underflows */ for (h = 0; h < NF_INET_NUMHOOKS; h++) { if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) newinfo->underflow[h] = underflows[h]; } /* Clear counters and comefrom */ memset(&e->counters, 0, sizeof(e->counters)); e->comefrom = 0; return 0; out: module_put(t->u.kernel.target->me); release_matches: xt_ematch_foreach(ematch, e) { if (j-- == 0) break; module_put(ematch->u.kernel.match->me); } return ret; } static int compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr, unsigned int *size, const char *name, struct xt_table_info *newinfo, unsigned char *base) { struct xt_entry_target *t; struct xt_target *target; struct ipt_entry *de; unsigned int origsize; int ret, h; struct xt_entry_match *ematch; ret = 0; origsize = *size; de = (struct ipt_entry *)*dstptr; memcpy(de, e, sizeof(struct ipt_entry)); memcpy(&de->counters, &e->counters, sizeof(e->counters)); *dstptr += sizeof(struct ipt_entry); *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); xt_ematch_foreach(ematch, e) { ret = xt_compat_match_from_user(ematch, dstptr, size); if (ret != 0) return ret; } de->target_offset = e->target_offset - (origsize - *size); t = compat_ipt_get_target(e); target = t->u.kernel.target; xt_compat_target_from_user(t, dstptr, size); de->next_offset = e->next_offset - (origsize - *size); for (h = 0; h < NF_INET_NUMHOOKS; h++) { if ((unsigned char *)de - base < newinfo->hook_entry[h]) newinfo->hook_entry[h] -= origsize - *size; if ((unsigned char *)de - base < newinfo->underflow[h]) newinfo->underflow[h] -= origsize - *size; } return ret; } static int compat_check_entry(struct ipt_entry *e, struct net *net, const char *name) { struct xt_entry_match *ematch; struct xt_mtchk_param mtpar; unsigned int j; int ret = 0; j = 0; mtpar.net = net; mtpar.table = name; mtpar.entryinfo = &e->ip; mtpar.hook_mask = e->comefrom; mtpar.family = NFPROTO_IPV4; xt_ematch_foreach(ematch, e) { ret = check_match(ematch, &mtpar); if (ret != 0) goto cleanup_matches; ++j; } ret = check_target(e, net, name); if (ret) goto cleanup_matches; return 0; cleanup_matches: xt_ematch_foreach(ematch, e) { if (j-- == 0) break; cleanup_match(ematch, net); } return ret; } static int translate_compat_table(struct net *net, const char *name, unsigned int valid_hooks, struct xt_table_info **pinfo, void **pentry0, unsigned int total_size, unsigned int number, unsigned int *hook_entries, unsigned int *underflows) { unsigned int i, j; struct xt_table_info *newinfo, *info; void *pos, *entry0, *entry1; struct compat_ipt_entry *iter0; struct ipt_entry *iter1; unsigned int size; int ret; info = *pinfo; entry0 = *pentry0; size = total_size; info->number = number; /* Init all hooks to impossible value. */ for (i = 0; i < NF_INET_NUMHOOKS; i++) { info->hook_entry[i] = 0xFFFFFFFF; info->underflow[i] = 0xFFFFFFFF; } duprintf("translate_compat_table: size %u\n", info->size); j = 0; xt_compat_lock(AF_INET); xt_compat_init_offsets(AF_INET, number); /* Walk through entries, checking offsets. */ xt_entry_foreach(iter0, entry0, total_size) { ret = check_compat_entry_size_and_hooks(iter0, info, &size, entry0, entry0 + total_size, hook_entries, underflows, name); if (ret != 0) goto out_unlock; ++j; } ret = -EINVAL; if (j != number) { duprintf("translate_compat_table: %u not %u entries\n", j, number); goto out_unlock; } /* Check hooks all assigned */ for (i = 0; i < NF_INET_NUMHOOKS; i++) { /* Only hooks which are valid */ if (!(valid_hooks & (1 << i))) continue; if (info->hook_entry[i] == 0xFFFFFFFF) { duprintf("Invalid hook entry %u %u\n", i, hook_entries[i]); goto out_unlock; } if (info->underflow[i] == 0xFFFFFFFF) { duprintf("Invalid underflow %u %u\n", i, underflows[i]); goto out_unlock; } } ret = -ENOMEM; newinfo = xt_alloc_table_info(size); if (!newinfo) goto out_unlock; newinfo->number = number; for (i = 0; i < NF_INET_NUMHOOKS; i++) { newinfo->hook_entry[i] = info->hook_entry[i]; newinfo->underflow[i] = info->underflow[i]; } entry1 = newinfo->entries[raw_smp_processor_id()]; pos = entry1; size = total_size; xt_entry_foreach(iter0, entry0, total_size) { ret = compat_copy_entry_from_user(iter0, &pos, &size, name, newinfo, entry1); if (ret != 0) break; } xt_compat_flush_offsets(AF_INET); xt_compat_unlock(AF_INET); if (ret) goto free_newinfo; ret = -ELOOP; if (!mark_source_chains(newinfo, valid_hooks, entry1)) goto free_newinfo; i = 0; xt_entry_foreach(iter1, entry1, newinfo->size) { ret = compat_check_entry(iter1, net, name); if (ret != 0) break; ++i; if (strcmp(ipt_get_target(iter1)->u.user.name, XT_ERROR_TARGET) == 0) ++newinfo->stacksize; } if (ret) { /* * The first i matches need cleanup_entry (calls ->destroy) * because they had called ->check already. The other j-i * entries need only release. */ int skip = i; j -= i; xt_entry_foreach(iter0, entry0, newinfo->size) { if (skip-- > 0) continue; if (j-- == 0) break; compat_release_entry(iter0); } xt_entry_foreach(iter1, entry1, newinfo->size) { if (i-- == 0) break; cleanup_entry(iter1, net); } xt_free_table_info(newinfo); return ret; } /* And one copy for every other CPU */ for_each_possible_cpu(i) if (newinfo->entries[i] && newinfo->entries[i] != entry1) memcpy(newinfo->entries[i], entry1, newinfo->size); *pinfo = newinfo; *pentry0 = entry1; xt_free_table_info(info); return 0; free_newinfo: xt_free_table_info(newinfo); out: xt_entry_foreach(iter0, entry0, total_size) { if (j-- == 0) break; compat_release_entry(iter0); } return ret; out_unlock: xt_compat_flush_offsets(AF_INET); xt_compat_unlock(AF_INET); goto out; } static int compat_do_replace(struct net *net, void __user *user, unsigned int len) { int ret; struct compat_ipt_replace tmp; struct xt_table_info *newinfo; void *loc_cpu_entry; struct ipt_entry *iter; if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ if (tmp.size >= INT_MAX / num_possible_cpus()) return -ENOMEM; if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; tmp.name[sizeof(tmp.name)-1] = 0; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) return -ENOMEM; /* choose the copy that is on our node/cpu */ loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } ret = translate_compat_table(net, tmp.name, tmp.valid_hooks, &newinfo, &loc_cpu_entry, tmp.size, tmp.num_entries, tmp.hook_entry, tmp.underflow); if (ret != 0) goto free_newinfo; duprintf("compat_do_replace: Translated table\n"); ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, tmp.num_counters, compat_ptr(tmp.counters)); if (ret) goto free_newinfo_untrans; return 0; free_newinfo_untrans: xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) cleanup_entry(iter, net); free_newinfo: xt_free_table_info(newinfo); return ret; } static int compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) { int ret; if (!capable(CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case IPT_SO_SET_REPLACE: ret = compat_do_replace(sock_net(sk), user, len); break; case IPT_SO_SET_ADD_COUNTERS: ret = do_add_counters(sock_net(sk), user, len, 1); break; default: duprintf("do_ipt_set_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } struct compat_ipt_get_entries { char name[XT_TABLE_MAXNAMELEN]; compat_uint_t size; struct compat_ipt_entry entrytable[0]; }; static int compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table, void __user *userptr) { struct xt_counters *counters; const struct xt_table_info *private = table->private; void __user *pos; unsigned int size; int ret = 0; const void *loc_cpu_entry; unsigned int i = 0; struct ipt_entry *iter; counters = alloc_counters(table); if (IS_ERR(counters)) return PTR_ERR(counters); /* choose the copy that is on our node/cpu, ... * This choice is lazy (because current thread is * allowed to migrate to another cpu) */ loc_cpu_entry = private->entries[raw_smp_processor_id()]; pos = userptr; size = total_size; xt_entry_foreach(iter, loc_cpu_entry, total_size) { ret = compat_copy_entry_to_user(iter, &pos, &size, counters, i++); if (ret != 0) break; } vfree(counters); return ret; } static int compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr, int *len) { int ret; struct compat_ipt_get_entries get; struct xt_table *t; if (*len < sizeof(get)) { duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get)); return -EINVAL; } if (copy_from_user(&get, uptr, sizeof(get)) != 0) return -EFAULT; if (*len != sizeof(struct compat_ipt_get_entries) + get.size) { duprintf("compat_get_entries: %u != %zu\n", *len, sizeof(get) + get.size); return -EINVAL; } xt_compat_lock(AF_INET); t = xt_find_table_lock(net, AF_INET, get.name); if (t && !IS_ERR(t)) { const struct xt_table_info *private = t->private; struct xt_table_info info; duprintf("t->private->number = %u\n", private->number); ret = compat_table_info(private, &info); if (!ret && get.size == info.size) { ret = compat_copy_entries_to_user(private->size, t, uptr->entrytable); } else if (!ret) { duprintf("compat_get_entries: I've got %u not %u!\n", private->size, get.size); ret = -EAGAIN; } xt_compat_flush_offsets(AF_INET); module_put(t->me); xt_table_unlock(t); } else ret = t ? PTR_ERR(t) : -ENOENT; xt_compat_unlock(AF_INET); return ret; } static int do_ipt_get_ctl(struct sock *, int, void __user *, int *); static int compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { int ret; if (!capable(CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case IPT_SO_GET_INFO: ret = get_info(sock_net(sk), user, len, 1); break; case IPT_SO_GET_ENTRIES: ret = compat_get_entries(sock_net(sk), user, len); break; default: ret = do_ipt_get_ctl(sk, cmd, user, len); } return ret; } #endif static int do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) { int ret; if (!capable(CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case IPT_SO_SET_REPLACE: ret = do_replace(sock_net(sk), user, len); break; case IPT_SO_SET_ADD_COUNTERS: ret = do_add_counters(sock_net(sk), user, len, 0); break; default: duprintf("do_ipt_set_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } static int do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { int ret; if (!capable(CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case IPT_SO_GET_INFO: ret = get_info(sock_net(sk), user, len, 0); break; case IPT_SO_GET_ENTRIES: ret = get_entries(sock_net(sk), user, len); break; case IPT_SO_GET_REVISION_MATCH: case IPT_SO_GET_REVISION_TARGET: { struct xt_get_revision rev; int target; if (*len != sizeof(rev)) { ret = -EINVAL; break; } if (copy_from_user(&rev, user, sizeof(rev)) != 0) { ret = -EFAULT; break; } rev.name[sizeof(rev.name)-1] = 0; if (cmd == IPT_SO_GET_REVISION_TARGET) target = 1; else target = 0; try_then_request_module(xt_find_revision(AF_INET, rev.name, rev.revision, target, &ret), "ipt_%s", rev.name); break; } default: duprintf("do_ipt_get_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } struct xt_table *ipt_register_table(struct net *net, const struct xt_table *table, const struct ipt_replace *repl) { int ret; struct xt_table_info *newinfo; struct xt_table_info bootstrap = {0}; void *loc_cpu_entry; struct xt_table *new_table; newinfo = xt_alloc_table_info(repl->size); if (!newinfo) { ret = -ENOMEM; goto out; } /* choose the copy on our node/cpu, but dont care about preemption */ loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; memcpy(loc_cpu_entry, repl->entries, repl->size); ret = translate_table(net, newinfo, loc_cpu_entry, repl); if (ret != 0) goto out_free; new_table = xt_register_table(net, table, &bootstrap, newinfo); if (IS_ERR(new_table)) { ret = PTR_ERR(new_table); goto out_free; } return new_table; out_free: xt_free_table_info(newinfo); out: return ERR_PTR(ret); } void ipt_unregister_table(struct net *net, struct xt_table *table) { struct xt_table_info *private; void *loc_cpu_entry; struct module *table_owner = table->me; struct ipt_entry *iter; private = xt_unregister_table(table); /* Decrease module usage counts and free resources */ loc_cpu_entry = private->entries[raw_smp_processor_id()]; xt_entry_foreach(iter, loc_cpu_entry, private->size) cleanup_entry(iter, net); if (private->number > private->initial_entries) module_put(table_owner); xt_free_table_info(private); } /* Returns 1 if the type and code is matched by the range, 0 otherwise */ static inline bool icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code, u_int8_t type, u_int8_t code, bool invert) { return ((test_type == 0xFF) || (type == test_type && code >= min_code && code <= max_code)) ^ invert; } static bool icmp_match(const struct sk_buff *skb, struct xt_action_param *par) { const struct icmphdr *ic; struct icmphdr _icmph; const struct ipt_icmp *icmpinfo = par->matchinfo; /* Must not be a fragment. */ if (par->fragoff != 0) return false; ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph); if (ic == NULL) { /* We've been asked to examine this packet, and we * can't. Hence, no choice but to drop. */ duprintf("Dropping evil ICMP tinygram.\n"); par->hotdrop = true; return false; } return icmp_type_code_match(icmpinfo->type, icmpinfo->code[0], icmpinfo->code[1], ic->type, ic->code, !!(icmpinfo->invflags&IPT_ICMP_INV)); } static int icmp_checkentry(const struct xt_mtchk_param *par) { const struct ipt_icmp *icmpinfo = par->matchinfo; /* Must specify no unknown invflags */ return (icmpinfo->invflags & ~IPT_ICMP_INV) ? -EINVAL : 0; } static struct xt_target ipt_builtin_tg[] __read_mostly = { { .name = XT_STANDARD_TARGET, .targetsize = sizeof(int), .family = NFPROTO_IPV4, #ifdef CONFIG_COMPAT .compatsize = sizeof(compat_int_t), .compat_from_user = compat_standard_from_user, .compat_to_user = compat_standard_to_user, #endif }, { .name = XT_ERROR_TARGET, .target = ipt_error, .targetsize = XT_FUNCTION_MAXNAMELEN, .family = NFPROTO_IPV4, }, }; static struct nf_sockopt_ops ipt_sockopts = { .pf = PF_INET, .set_optmin = IPT_BASE_CTL, .set_optmax = IPT_SO_SET_MAX+1, .set = do_ipt_set_ctl, #ifdef CONFIG_COMPAT .compat_set = compat_do_ipt_set_ctl, #endif .get_optmin = IPT_BASE_CTL, .get_optmax = IPT_SO_GET_MAX+1, .get = do_ipt_get_ctl, #ifdef CONFIG_COMPAT .compat_get = compat_do_ipt_get_ctl, #endif .owner = THIS_MODULE, }; static struct xt_match ipt_builtin_mt[] __read_mostly = { { .name = "icmp", .match = icmp_match, .matchsize = sizeof(struct ipt_icmp), .checkentry = icmp_checkentry, .proto = IPPROTO_ICMP, .family = NFPROTO_IPV4, }, }; static int __net_init ip_tables_net_init(struct net *net) { return xt_proto_init(net, NFPROTO_IPV4); } static void __net_exit ip_tables_net_exit(struct net *net) { xt_proto_fini(net, NFPROTO_IPV4); } static struct pernet_operations ip_tables_net_ops = { .init = ip_tables_net_init, .exit = ip_tables_net_exit, }; static int __init ip_tables_init(void) { int ret; ret = register_pernet_subsys(&ip_tables_net_ops); if (ret < 0) goto err1; /* No one else will be downing sem now, so we won't sleep */ ret = xt_register_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg)); if (ret < 0) goto err2; ret = xt_register_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt)); if (ret < 0) goto err4; /* Register setsockopt */ ret = nf_register_sockopt(&ipt_sockopts); if (ret < 0) goto err5; pr_info("(C) 2000-2006 Netfilter Core Team\n"); return 0; err5: xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt)); err4: xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg)); err2: unregister_pernet_subsys(&ip_tables_net_ops); err1: return ret; } static void __exit ip_tables_fini(void) { nf_unregister_sockopt(&ipt_sockopts); xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt)); xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg)); unregister_pernet_subsys(&ip_tables_net_ops); } EXPORT_SYMBOL(ipt_register_table); EXPORT_SYMBOL(ipt_unregister_table); EXPORT_SYMBOL(ipt_do_table); module_init(ip_tables_init); module_exit(ip_tables_fini);
gpl-2.0
mcaraman/kvm
arch/powerpc/platforms/512x/clock.c
4798
14068
/* * Copyright (C) 2007,2008 Freescale Semiconductor, Inc. All rights reserved. * * Author: John Rigby <jrigby@freescale.com> * * Implements the clk api defined in include/linux/clk.h * * Original based on linux/arch/arm/mach-integrator/clock.c * * Copyright (C) 2004 ARM Limited. * Written by Deep Blue Solutions Limited. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/list.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/module.h> #include <linux/string.h> #include <linux/clk.h> #include <linux/mutex.h> #include <linux/io.h> #include <linux/of_platform.h> #include <asm/mpc5xxx.h> #include <asm/clk_interface.h> #undef CLK_DEBUG static int clocks_initialized; #define CLK_HAS_RATE 0x1 /* has rate in MHz */ #define CLK_HAS_CTRL 0x2 /* has control reg and bit */ struct clk { struct list_head node; char name[32]; int flags; struct device *dev; unsigned long rate; struct module *owner; void (*calc) (struct clk *); struct clk *parent; int reg, bit; /* CLK_HAS_CTRL */ int div_shift; /* only used by generic_div_clk_calc */ }; static LIST_HEAD(clocks); static DEFINE_MUTEX(clocks_mutex); static struct clk *mpc5121_clk_get(struct device *dev, const char *id) { struct clk *p, *clk = ERR_PTR(-ENOENT); int dev_match = 0; int id_match = 0; if (dev == NULL || id == NULL) return clk; mutex_lock(&clocks_mutex); list_for_each_entry(p, &clocks, node) { if (dev == p->dev) dev_match++; if (strcmp(id, p->name) == 0) id_match++; if ((dev_match || id_match) && try_module_get(p->owner)) { clk = p; break; } } mutex_unlock(&clocks_mutex); return clk; } #ifdef CLK_DEBUG static void dump_clocks(void) { struct clk *p; mutex_lock(&clocks_mutex); printk(KERN_INFO "CLOCKS:\n"); list_for_each_entry(p, &clocks, node) { pr_info(" %s=%ld", p->name, p->rate); if (p->parent) pr_cont(" %s=%ld", p->parent->name, p->parent->rate); if (p->flags & CLK_HAS_CTRL) pr_cont(" reg/bit=%d/%d", p->reg, p->bit); pr_cont("\n"); } mutex_unlock(&clocks_mutex); } #define DEBUG_CLK_DUMP() dump_clocks() #else #define DEBUG_CLK_DUMP() #endif static void mpc5121_clk_put(struct clk *clk) { module_put(clk->owner); } #define NRPSC 12 struct mpc512x_clockctl { u32 spmr; /* System PLL Mode Reg */ u32 sccr[2]; /* System Clk Ctrl Reg 1 & 2 */ u32 scfr1; /* System Clk Freq Reg 1 */ u32 scfr2; /* System Clk Freq Reg 2 */ u32 reserved; u32 bcr; /* Bread Crumb Reg */ u32 pccr[NRPSC]; /* PSC Clk Ctrl Reg 0-11 */ u32 spccr; /* SPDIF Clk Ctrl Reg */ u32 cccr; /* CFM Clk Ctrl Reg */ u32 dccr; /* DIU Clk Cnfg Reg */ }; struct mpc512x_clockctl __iomem *clockctl; static int mpc5121_clk_enable(struct clk *clk) { unsigned int mask; if (clk->flags & CLK_HAS_CTRL) { mask = in_be32(&clockctl->sccr[clk->reg]); mask |= 1 << clk->bit; out_be32(&clockctl->sccr[clk->reg], mask); } return 0; } static void mpc5121_clk_disable(struct clk *clk) { unsigned int mask; if (clk->flags & CLK_HAS_CTRL) { mask = in_be32(&clockctl->sccr[clk->reg]); mask &= ~(1 << clk->bit); out_be32(&clockctl->sccr[clk->reg], mask); } } static unsigned long mpc5121_clk_get_rate(struct clk *clk) { if (clk->flags & CLK_HAS_RATE) return clk->rate; else return 0; } static long mpc5121_clk_round_rate(struct clk *clk, unsigned long rate) { return rate; } static int mpc5121_clk_set_rate(struct clk *clk, unsigned long rate) { return 0; } static int clk_register(struct clk *clk) { mutex_lock(&clocks_mutex); list_add(&clk->node, &clocks); mutex_unlock(&clocks_mutex); return 0; } static unsigned long spmf_mult(void) { /* * Convert spmf to multiplier */ static int spmf_to_mult[] = { 68, 1, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64 }; int spmf = (clockctl->spmr >> 24) & 0xf; return spmf_to_mult[spmf]; } static unsigned long sysdiv_div_x_2(void) { /* * Convert sysdiv to divisor x 2 * Some divisors have fractional parts so * multiply by 2 then divide by this value */ static int sysdiv_to_div_x_2[] = { 4, 5, 6, 7, 8, 9, 10, 14, 12, 16, 18, 22, 20, 24, 26, 30, 28, 32, 34, 38, 36, 40, 42, 46, 44, 48, 50, 54, 52, 56, 58, 62, 60, 64, 66, }; int sysdiv = (clockctl->scfr2 >> 26) & 0x3f; return sysdiv_to_div_x_2[sysdiv]; } static unsigned long ref_to_sys(unsigned long rate) { rate *= spmf_mult(); rate *= 2; rate /= sysdiv_div_x_2(); return rate; } static unsigned long sys_to_ref(unsigned long rate) { rate *= sysdiv_div_x_2(); rate /= 2; rate /= spmf_mult(); return rate; } static long ips_to_ref(unsigned long rate) { int ips_div = (clockctl->scfr1 >> 23) & 0x7; rate *= ips_div; /* csb_clk = ips_clk * ips_div */ rate *= 2; /* sys_clk = csb_clk * 2 */ return sys_to_ref(rate); } static unsigned long devtree_getfreq(char *clockname) { struct device_node *np; const unsigned int *prop; unsigned int val = 0; np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-immr"); if (np) { prop = of_get_property(np, clockname, NULL); if (prop) val = *prop; of_node_put(np); } return val; } static void ref_clk_calc(struct clk *clk) { unsigned long rate; rate = devtree_getfreq("bus-frequency"); if (rate == 0) { printk(KERN_ERR "No bus-frequency in dev tree\n"); clk->rate = 0; return; } clk->rate = ips_to_ref(rate); } static struct clk ref_clk = { .name = "ref_clk", .calc = ref_clk_calc, }; static void sys_clk_calc(struct clk *clk) { clk->rate = ref_to_sys(ref_clk.rate); } static struct clk sys_clk = { .name = "sys_clk", .calc = sys_clk_calc, }; static void diu_clk_calc(struct clk *clk) { int diudiv_x_2 = clockctl->scfr1 & 0xff; unsigned long rate; rate = sys_clk.rate; rate *= 2; rate /= diudiv_x_2; clk->rate = rate; } static void viu_clk_calc(struct clk *clk) { unsigned long rate; rate = sys_clk.rate; rate /= 2; clk->rate = rate; } static void half_clk_calc(struct clk *clk) { clk->rate = clk->parent->rate / 2; } static void generic_div_clk_calc(struct clk *clk) { int div = (clockctl->scfr1 >> clk->div_shift) & 0x7; clk->rate = clk->parent->rate / div; } static void unity_clk_calc(struct clk *clk) { clk->rate = clk->parent->rate; } static struct clk csb_clk = { .name = "csb_clk", .calc = half_clk_calc, .parent = &sys_clk, }; static void e300_clk_calc(struct clk *clk) { int spmf = (clockctl->spmr >> 16) & 0xf; int ratex2 = clk->parent->rate * spmf; clk->rate = ratex2 / 2; } static struct clk e300_clk = { .name = "e300_clk", .calc = e300_clk_calc, .parent = &csb_clk, }; static struct clk ips_clk = { .name = "ips_clk", .calc = generic_div_clk_calc, .parent = &csb_clk, .div_shift = 23, }; /* * Clocks controlled by SCCR1 (.reg = 0) */ static struct clk lpc_clk = { .name = "lpc_clk", .flags = CLK_HAS_CTRL, .reg = 0, .bit = 30, .calc = generic_div_clk_calc, .parent = &ips_clk, .div_shift = 11, }; static struct clk nfc_clk = { .name = "nfc_clk", .flags = CLK_HAS_CTRL, .reg = 0, .bit = 29, .calc = generic_div_clk_calc, .parent = &ips_clk, .div_shift = 8, }; static struct clk pata_clk = { .name = "pata_clk", .flags = CLK_HAS_CTRL, .reg = 0, .bit = 28, .calc = unity_clk_calc, .parent = &ips_clk, }; /* * PSC clocks (bits 27 - 16) * are setup elsewhere */ static struct clk sata_clk = { .name = "sata_clk", .flags = CLK_HAS_CTRL, .reg = 0, .bit = 14, .calc = unity_clk_calc, .parent = &ips_clk, }; static struct clk fec_clk = { .name = "fec_clk", .flags = CLK_HAS_CTRL, .reg = 0, .bit = 13, .calc = unity_clk_calc, .parent = &ips_clk, }; static struct clk pci_clk = { .name = "pci_clk", .flags = CLK_HAS_CTRL, .reg = 0, .bit = 11, .calc = generic_div_clk_calc, .parent = &csb_clk, .div_shift = 20, }; /* * Clocks controlled by SCCR2 (.reg = 1) */ static struct clk diu_clk = { .name = "diu_clk", .flags = CLK_HAS_CTRL, .reg = 1, .bit = 31, .calc = diu_clk_calc, }; static struct clk viu_clk = { .name = "viu_clk", .flags = CLK_HAS_CTRL, .reg = 1, .bit = 18, .calc = viu_clk_calc, }; static struct clk axe_clk = { .name = "axe_clk", .flags = CLK_HAS_CTRL, .reg = 1, .bit = 30, .calc = unity_clk_calc, .parent = &csb_clk, }; static struct clk usb1_clk = { .name = "usb1_clk", .flags = CLK_HAS_CTRL, .reg = 1, .bit = 28, .calc = unity_clk_calc, .parent = &csb_clk, }; static struct clk usb2_clk = { .name = "usb2_clk", .flags = CLK_HAS_CTRL, .reg = 1, .bit = 27, .calc = unity_clk_calc, .parent = &csb_clk, }; static struct clk i2c_clk = { .name = "i2c_clk", .flags = CLK_HAS_CTRL, .reg = 1, .bit = 26, .calc = unity_clk_calc, .parent = &ips_clk, }; static struct clk mscan_clk = { .name = "mscan_clk", .flags = CLK_HAS_CTRL, .reg = 1, .bit = 25, .calc = unity_clk_calc, .parent = &ips_clk, }; static struct clk sdhc_clk = { .name = "sdhc_clk", .flags = CLK_HAS_CTRL, .reg = 1, .bit = 24, .calc = unity_clk_calc, .parent = &ips_clk, }; static struct clk mbx_bus_clk = { .name = "mbx_bus_clk", .flags = CLK_HAS_CTRL, .reg = 1, .bit = 22, .calc = half_clk_calc, .parent = &csb_clk, }; static struct clk mbx_clk = { .name = "mbx_clk", .flags = CLK_HAS_CTRL, .reg = 1, .bit = 21, .calc = unity_clk_calc, .parent = &csb_clk, }; static struct clk mbx_3d_clk = { .name = "mbx_3d_clk", .flags = CLK_HAS_CTRL, .reg = 1, .bit = 20, .calc = generic_div_clk_calc, .parent = &mbx_bus_clk, .div_shift = 14, }; static void psc_mclk_in_calc(struct clk *clk) { clk->rate = devtree_getfreq("psc_mclk_in"); if (!clk->rate) clk->rate = 25000000; } static struct clk psc_mclk_in = { .name = "psc_mclk_in", .calc = psc_mclk_in_calc, }; static struct clk spdif_txclk = { .name = "spdif_txclk", .flags = CLK_HAS_CTRL, .reg = 1, .bit = 23, }; static struct clk spdif_rxclk = { .name = "spdif_rxclk", .flags = CLK_HAS_CTRL, .reg = 1, .bit = 23, }; static void ac97_clk_calc(struct clk *clk) { /* ac97 bit clock is always 24.567 MHz */ clk->rate = 24567000; } static struct clk ac97_clk = { .name = "ac97_clk_in", .calc = ac97_clk_calc, }; struct clk *rate_clks[] = { &ref_clk, &sys_clk, &diu_clk, &viu_clk, &csb_clk, &e300_clk, &ips_clk, &fec_clk, &sata_clk, &pata_clk, &nfc_clk, &lpc_clk, &mbx_bus_clk, &mbx_clk, &mbx_3d_clk, &axe_clk, &usb1_clk, &usb2_clk, &i2c_clk, &mscan_clk, &sdhc_clk, &pci_clk, &psc_mclk_in, &spdif_txclk, &spdif_rxclk, &ac97_clk, NULL }; static void rate_clk_init(struct clk *clk) { if (clk->calc) { clk->calc(clk); clk->flags |= CLK_HAS_RATE; clk_register(clk); } else { printk(KERN_WARNING "Could not initialize clk %s without a calc routine\n", clk->name); } } static void rate_clks_init(void) { struct clk **cpp, *clk; cpp = rate_clks; while ((clk = *cpp++)) rate_clk_init(clk); } /* * There are two clk enable registers with 32 enable bits each * psc clocks and device clocks are all stored in dev_clks */ struct clk dev_clks[2][32]; /* * Given a psc number return the dev_clk * associated with it */ static struct clk *psc_dev_clk(int pscnum) { int reg, bit; struct clk *clk; reg = 0; bit = 27 - pscnum; clk = &dev_clks[reg][bit]; clk->reg = 0; clk->bit = bit; return clk; } /* * PSC clock rate calculation */ static void psc_calc_rate(struct clk *clk, int pscnum, struct device_node *np) { unsigned long mclk_src = sys_clk.rate; unsigned long mclk_div; /* * Can only change value of mclk divider * when the divider is disabled. * * Zero is not a valid divider so minimum * divider is 1 * * disable/set divider/enable */ out_be32(&clockctl->pccr[pscnum], 0); out_be32(&clockctl->pccr[pscnum], 0x00020000); out_be32(&clockctl->pccr[pscnum], 0x00030000); if (clockctl->pccr[pscnum] & 0x80) { clk->rate = spdif_rxclk.rate; return; } switch ((clockctl->pccr[pscnum] >> 14) & 0x3) { case 0: mclk_src = sys_clk.rate; break; case 1: mclk_src = ref_clk.rate; break; case 2: mclk_src = psc_mclk_in.rate; break; case 3: mclk_src = spdif_txclk.rate; break; } mclk_div = ((clockctl->pccr[pscnum] >> 17) & 0x7fff) + 1; clk->rate = mclk_src / mclk_div; } /* * Find all psc nodes in device tree and assign a clock * with name "psc%d_mclk" and dev pointing at the device * returned from of_find_device_by_node */ static void psc_clks_init(void) { struct device_node *np; const u32 *cell_index; struct platform_device *ofdev; for_each_compatible_node(np, NULL, "fsl,mpc5121-psc") { cell_index = of_get_property(np, "cell-index", NULL); if (cell_index) { int pscnum = *cell_index; struct clk *clk = psc_dev_clk(pscnum); clk->flags = CLK_HAS_RATE | CLK_HAS_CTRL; ofdev = of_find_device_by_node(np); clk->dev = &ofdev->dev; /* * AC97 is special rate clock does * not go through normal path */ if (strcmp("ac97", np->name) == 0) clk->rate = ac97_clk.rate; else psc_calc_rate(clk, pscnum, np); sprintf(clk->name, "psc%d_mclk", pscnum); clk_register(clk); clk_enable(clk); } } } static struct clk_interface mpc5121_clk_functions = { .clk_get = mpc5121_clk_get, .clk_enable = mpc5121_clk_enable, .clk_disable = mpc5121_clk_disable, .clk_get_rate = mpc5121_clk_get_rate, .clk_put = mpc5121_clk_put, .clk_round_rate = mpc5121_clk_round_rate, .clk_set_rate = mpc5121_clk_set_rate, .clk_set_parent = NULL, .clk_get_parent = NULL, }; int __init mpc5121_clk_init(void) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-clock"); if (np) { clockctl = of_iomap(np, 0); of_node_put(np); } if (!clockctl) { printk(KERN_ERR "Could not map clock control registers\n"); return 0; } rate_clks_init(); psc_clks_init(); /* leave clockctl mapped forever */ /*iounmap(clockctl); */ DEBUG_CLK_DUMP(); clocks_initialized++; clk_functions = mpc5121_clk_functions; return 0; }
gpl-2.0
utkaar099/m7ul_kernel
drivers/video/via/via_aux_vt1631.c
9662
1347
/* * Copyright 2011 Florian Tobias Schandinat <FlorianSchandinat@gmx.de> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; * either version 2, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even * the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE.See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* * driver for VIA VT1631 LVDS Transmitter */ #include <linux/slab.h> #include "via_aux.h" static const char *name = "VT1631 LVDS Transmitter"; void via_aux_vt1631_probe(struct via_aux_bus *bus) { struct via_aux_drv drv = { .bus = bus, .addr = 0x38, .name = name}; /* check vendor id and device id */ const u8 id[] = {0x06, 0x11, 0x91, 0x31}, len = ARRAY_SIZE(id); u8 tmp[len]; if (!via_aux_read(&drv, 0x00, tmp, len) || memcmp(id, tmp, len)) return; printk(KERN_INFO "viafb: Found %s\n", name); via_aux_add(&drv); }
gpl-2.0
minipli/linux-grsec
sound/soc/rockchip/rockchip_rt5645.c
191
5987
/* * Rockchip machine ASoC driver for boards using a RT5645/RT5650 CODEC. * * Copyright (c) 2015, ROCKCHIP CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/gpio.h> #include <linux/of_gpio.h> #include <linux/delay.h> #include <sound/core.h> #include <sound/jack.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include "rockchip_i2s.h" #define DRV_NAME "rockchip-snd-rt5645" static struct snd_soc_jack headset_jack; /* Jack detect via rt5645 driver. */ extern int rt5645_set_jack_detect(struct snd_soc_codec *codec, struct snd_soc_jack *hp_jack, struct snd_soc_jack *mic_jack, struct snd_soc_jack *btn_jack); static const struct snd_soc_dapm_widget rk_dapm_widgets[] = { SND_SOC_DAPM_HP("Headphones", NULL), SND_SOC_DAPM_SPK("Speakers", NULL), SND_SOC_DAPM_MIC("Headset Mic", NULL), SND_SOC_DAPM_MIC("Int Mic", NULL), }; static const struct snd_soc_dapm_route rk_audio_map[] = { /* Input Lines */ {"DMIC L2", NULL, "Int Mic"}, {"DMIC R2", NULL, "Int Mic"}, {"RECMIXL", NULL, "Headset Mic"}, {"RECMIXR", NULL, "Headset Mic"}, /* Output Lines */ {"Headphones", NULL, "HPOR"}, {"Headphones", NULL, "HPOL"}, {"Speakers", NULL, "SPOL"}, {"Speakers", NULL, "SPOR"}, }; static const struct snd_kcontrol_new rk_mc_controls[] = { SOC_DAPM_PIN_SWITCH("Headphones"), SOC_DAPM_PIN_SWITCH("Speakers"), SOC_DAPM_PIN_SWITCH("Headset Mic"), SOC_DAPM_PIN_SWITCH("Int Mic"), }; static int rk_aif1_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { int ret = 0; struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; struct snd_soc_dai *codec_dai = rtd->codec_dai; int mclk; switch (params_rate(params)) { case 8000: case 16000: case 24000: case 32000: case 48000: case 64000: case 96000: mclk = 12288000; break; case 11025: case 22050: case 44100: case 88200: mclk = 11289600; break; default: return -EINVAL; } ret = snd_soc_dai_set_sysclk(cpu_dai, 0, mclk, SND_SOC_CLOCK_OUT); if (ret < 0) { dev_err(codec_dai->dev, "Can't set codec clock %d\n", ret); return ret; } ret = snd_soc_dai_set_sysclk(codec_dai, 0, mclk, SND_SOC_CLOCK_IN); if (ret < 0) { dev_err(codec_dai->dev, "Can't set codec clock %d\n", ret); return ret; } return ret; } static int rk_init(struct snd_soc_pcm_runtime *runtime) { struct snd_soc_card *card = runtime->card; int ret; /* Enable Headset and 4 Buttons Jack detection */ ret = snd_soc_card_jack_new(card, "Headset Jack", SND_JACK_HEADPHONE | SND_JACK_MICROPHONE | SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2 | SND_JACK_BTN_3, &headset_jack, NULL, 0); if (ret) { dev_err(card->dev, "New Headset Jack failed! (%d)\n", ret); return ret; } return rt5645_set_jack_detect(runtime->codec, &headset_jack, &headset_jack, &headset_jack); } static const struct snd_soc_ops rk_aif1_ops = { .hw_params = rk_aif1_hw_params, }; static struct snd_soc_dai_link rk_dailink = { .name = "rt5645", .stream_name = "rt5645 PCM", .codec_dai_name = "rt5645-aif1", .init = rk_init, .ops = &rk_aif1_ops, /* set rt5645 as slave */ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS, }; static struct snd_soc_card snd_soc_card_rk = { .name = "I2S-RT5650", .owner = THIS_MODULE, .dai_link = &rk_dailink, .num_links = 1, .dapm_widgets = rk_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(rk_dapm_widgets), .dapm_routes = rk_audio_map, .num_dapm_routes = ARRAY_SIZE(rk_audio_map), .controls = rk_mc_controls, .num_controls = ARRAY_SIZE(rk_mc_controls), }; static int snd_rk_mc_probe(struct platform_device *pdev) { int ret = 0; struct snd_soc_card *card = &snd_soc_card_rk; struct device_node *np = pdev->dev.of_node; /* register the soc card */ card->dev = &pdev->dev; rk_dailink.codec_of_node = of_parse_phandle(np, "rockchip,audio-codec", 0); if (!rk_dailink.codec_of_node) { dev_err(&pdev->dev, "Property 'rockchip,audio-codec' missing or invalid\n"); return -EINVAL; } rk_dailink.cpu_of_node = of_parse_phandle(np, "rockchip,i2s-controller", 0); if (!rk_dailink.cpu_of_node) { dev_err(&pdev->dev, "Property 'rockchip,i2s-controller' missing or invalid\n"); return -EINVAL; } rk_dailink.platform_of_node = rk_dailink.cpu_of_node; ret = snd_soc_of_parse_card_name(card, "rockchip,model"); if (ret) { dev_err(&pdev->dev, "Soc parse card name failed %d\n", ret); return ret; } ret = devm_snd_soc_register_card(&pdev->dev, card); if (ret) { dev_err(&pdev->dev, "Soc register card failed %d\n", ret); return ret; } return ret; } static const struct of_device_id rockchip_rt5645_of_match[] = { { .compatible = "rockchip,rockchip-audio-rt5645", }, {}, }; MODULE_DEVICE_TABLE(of, rockchip_rt5645_of_match); static struct platform_driver snd_rk_mc_driver = { .probe = snd_rk_mc_probe, .driver = { .name = DRV_NAME, .pm = &snd_soc_pm_ops, .of_match_table = rockchip_rt5645_of_match, }, }; module_platform_driver(snd_rk_mc_driver); MODULE_AUTHOR("Xing Zheng <zhengxing@rock-chips.com>"); MODULE_DESCRIPTION("Rockchip rt5645 machine ASoC driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:" DRV_NAME);
gpl-2.0
loxdegio/linux-patched
drivers/media/platform/exynos4-is/media-dev.c
447
37858
/* * S5P/EXYNOS4 SoC series camera host interface media device driver * * Copyright (C) 2011 - 2013 Samsung Electronics Co., Ltd. * Author: Sylwester Nawrocki <s.nawrocki@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 2 of the License, * or (at your option) any later version. */ #include <linux/bug.h> #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/device.h> #include <linux/errno.h> #include <linux/i2c.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/of_device.h> #include <linux/of_graph.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/types.h> #include <linux/slab.h> #include <media/v4l2-async.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-of.h> #include <media/media-device.h> #include <media/exynos-fimc.h> #include "media-dev.h" #include "fimc-core.h" #include "fimc-is.h" #include "fimc-lite.h" #include "mipi-csis.h" /* Set up image sensor subdev -> FIMC capture node notifications. */ static void __setup_sensor_notification(struct fimc_md *fmd, struct v4l2_subdev *sensor, struct v4l2_subdev *fimc_sd) { struct fimc_source_info *src_inf; struct fimc_sensor_info *md_si; unsigned long flags; src_inf = v4l2_get_subdev_hostdata(sensor); if (!src_inf || WARN_ON(fmd == NULL)) return; md_si = source_to_sensor_info(src_inf); spin_lock_irqsave(&fmd->slock, flags); md_si->host = v4l2_get_subdevdata(fimc_sd); spin_unlock_irqrestore(&fmd->slock, flags); } /** * fimc_pipeline_prepare - update pipeline information with subdevice pointers * @me: media entity terminating the pipeline * * Caller holds the graph mutex. */ static void fimc_pipeline_prepare(struct fimc_pipeline *p, struct media_entity *me) { struct fimc_md *fmd = entity_to_fimc_mdev(me); struct v4l2_subdev *sd; struct v4l2_subdev *sensor = NULL; int i; for (i = 0; i < IDX_MAX; i++) p->subdevs[i] = NULL; while (1) { struct media_pad *pad = NULL; /* Find remote source pad */ for (i = 0; i < me->num_pads; i++) { struct media_pad *spad = &me->pads[i]; if (!(spad->flags & MEDIA_PAD_FL_SINK)) continue; pad = media_entity_remote_pad(spad); if (pad) break; } if (pad == NULL || media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV) break; sd = media_entity_to_v4l2_subdev(pad->entity); switch (sd->grp_id) { case GRP_ID_SENSOR: sensor = sd; /* fall through */ case GRP_ID_FIMC_IS_SENSOR: p->subdevs[IDX_SENSOR] = sd; break; case GRP_ID_CSIS: p->subdevs[IDX_CSIS] = sd; break; case GRP_ID_FLITE: p->subdevs[IDX_FLITE] = sd; break; case GRP_ID_FIMC: p->subdevs[IDX_FIMC] = sd; break; case GRP_ID_FIMC_IS: p->subdevs[IDX_IS_ISP] = sd; break; default: break; } me = &sd->entity; if (me->num_pads == 1) break; } if (sensor && p->subdevs[IDX_FIMC]) __setup_sensor_notification(fmd, sensor, p->subdevs[IDX_FIMC]); } /** * __subdev_set_power - change power state of a single subdev * @sd: subdevice to change power state for * @on: 1 to enable power or 0 to disable * * Return result of s_power subdev operation or -ENXIO if sd argument * is NULL. Return 0 if the subdevice does not implement s_power. */ static int __subdev_set_power(struct v4l2_subdev *sd, int on) { int *use_count; int ret; if (sd == NULL) return -ENXIO; use_count = &sd->entity.use_count; if (on && (*use_count)++ > 0) return 0; else if (!on && (*use_count == 0 || --(*use_count) > 0)) return 0; ret = v4l2_subdev_call(sd, core, s_power, on); return ret != -ENOIOCTLCMD ? ret : 0; } /** * fimc_pipeline_s_power - change power state of all pipeline subdevs * @fimc: fimc device terminating the pipeline * @state: true to power on, false to power off * * Needs to be called with the graph mutex held. */ static int fimc_pipeline_s_power(struct fimc_pipeline *p, bool on) { static const u8 seq[2][IDX_MAX - 1] = { { IDX_IS_ISP, IDX_SENSOR, IDX_CSIS, IDX_FLITE }, { IDX_CSIS, IDX_FLITE, IDX_SENSOR, IDX_IS_ISP }, }; int i, ret = 0; if (p->subdevs[IDX_SENSOR] == NULL) return -ENXIO; for (i = 0; i < IDX_MAX - 1; i++) { unsigned int idx = seq[on][i]; ret = __subdev_set_power(p->subdevs[idx], on); if (ret < 0 && ret != -ENXIO) goto error; } return 0; error: for (; i >= 0; i--) { unsigned int idx = seq[on][i]; __subdev_set_power(p->subdevs[idx], !on); } return ret; } /** * __fimc_pipeline_open - update the pipeline information, enable power * of all pipeline subdevs and the sensor clock * @me: media entity to start graph walk with * @prepare: true to walk the current pipeline and acquire all subdevs * * Called with the graph mutex held. */ static int __fimc_pipeline_open(struct exynos_media_pipeline *ep, struct media_entity *me, bool prepare) { struct fimc_md *fmd = entity_to_fimc_mdev(me); struct fimc_pipeline *p = to_fimc_pipeline(ep); struct v4l2_subdev *sd; int ret; if (WARN_ON(p == NULL || me == NULL)) return -EINVAL; if (prepare) fimc_pipeline_prepare(p, me); sd = p->subdevs[IDX_SENSOR]; if (sd == NULL) return -EINVAL; /* Disable PXLASYNC clock if this pipeline includes FIMC-IS */ if (!IS_ERR(fmd->wbclk[CLK_IDX_WB_B]) && p->subdevs[IDX_IS_ISP]) { ret = clk_prepare_enable(fmd->wbclk[CLK_IDX_WB_B]); if (ret < 0) return ret; } ret = fimc_pipeline_s_power(p, 1); if (!ret) return 0; if (!IS_ERR(fmd->wbclk[CLK_IDX_WB_B]) && p->subdevs[IDX_IS_ISP]) clk_disable_unprepare(fmd->wbclk[CLK_IDX_WB_B]); return ret; } /** * __fimc_pipeline_close - disable the sensor clock and pipeline power * @fimc: fimc device terminating the pipeline * * Disable power of all subdevs and turn the external sensor clock off. */ static int __fimc_pipeline_close(struct exynos_media_pipeline *ep) { struct fimc_pipeline *p = to_fimc_pipeline(ep); struct v4l2_subdev *sd = p ? p->subdevs[IDX_SENSOR] : NULL; struct fimc_md *fmd; int ret; if (sd == NULL) { pr_warn("%s(): No sensor subdev\n", __func__); return 0; } ret = fimc_pipeline_s_power(p, 0); fmd = entity_to_fimc_mdev(&sd->entity); /* Disable PXLASYNC clock if this pipeline includes FIMC-IS */ if (!IS_ERR(fmd->wbclk[CLK_IDX_WB_B]) && p->subdevs[IDX_IS_ISP]) clk_disable_unprepare(fmd->wbclk[CLK_IDX_WB_B]); return ret == -ENXIO ? 0 : ret; } /** * __fimc_pipeline_s_stream - call s_stream() on pipeline subdevs * @pipeline: video pipeline structure * @on: passed as the s_stream() callback argument */ static int __fimc_pipeline_s_stream(struct exynos_media_pipeline *ep, bool on) { static const u8 seq[2][IDX_MAX] = { { IDX_FIMC, IDX_SENSOR, IDX_IS_ISP, IDX_CSIS, IDX_FLITE }, { IDX_CSIS, IDX_FLITE, IDX_FIMC, IDX_SENSOR, IDX_IS_ISP }, }; struct fimc_pipeline *p = to_fimc_pipeline(ep); int i, ret = 0; if (p->subdevs[IDX_SENSOR] == NULL) return -ENODEV; for (i = 0; i < IDX_MAX; i++) { unsigned int idx = seq[on][i]; ret = v4l2_subdev_call(p->subdevs[idx], video, s_stream, on); if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) goto error; } return 0; error: for (; i >= 0; i--) { unsigned int idx = seq[on][i]; v4l2_subdev_call(p->subdevs[idx], video, s_stream, !on); } return ret; } /* Media pipeline operations for the FIMC/FIMC-LITE video device driver */ static const struct exynos_media_pipeline_ops fimc_pipeline_ops = { .open = __fimc_pipeline_open, .close = __fimc_pipeline_close, .set_stream = __fimc_pipeline_s_stream, }; static struct exynos_media_pipeline *fimc_md_pipeline_create( struct fimc_md *fmd) { struct fimc_pipeline *p; p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) return NULL; list_add_tail(&p->list, &fmd->pipelines); p->ep.ops = &fimc_pipeline_ops; return &p->ep; } static void fimc_md_pipelines_free(struct fimc_md *fmd) { while (!list_empty(&fmd->pipelines)) { struct fimc_pipeline *p; p = list_entry(fmd->pipelines.next, typeof(*p), list); list_del(&p->list); kfree(p); } } /* Parse port node and register as a sub-device any sensor specified there. */ static int fimc_md_parse_port_node(struct fimc_md *fmd, struct device_node *port, unsigned int index) { struct fimc_source_info *pd = &fmd->sensor[index].pdata; struct device_node *rem, *ep, *np; struct v4l2_of_endpoint endpoint; /* Assume here a port node can have only one endpoint node. */ ep = of_get_next_child(port, NULL); if (!ep) return 0; v4l2_of_parse_endpoint(ep, &endpoint); if (WARN_ON(endpoint.base.port == 0) || index >= FIMC_MAX_SENSORS) return -EINVAL; pd->mux_id = (endpoint.base.port - 1) & 0x1; rem = of_graph_get_remote_port_parent(ep); of_node_put(ep); if (rem == NULL) { v4l2_info(&fmd->v4l2_dev, "Remote device at %s not found\n", ep->full_name); return 0; } if (fimc_input_is_parallel(endpoint.base.port)) { if (endpoint.bus_type == V4L2_MBUS_PARALLEL) pd->sensor_bus_type = FIMC_BUS_TYPE_ITU_601; else pd->sensor_bus_type = FIMC_BUS_TYPE_ITU_656; pd->flags = endpoint.bus.parallel.flags; } else if (fimc_input_is_mipi_csi(endpoint.base.port)) { /* * MIPI CSI-2: only input mux selection and * the sensor's clock frequency is needed. */ pd->sensor_bus_type = FIMC_BUS_TYPE_MIPI_CSI2; } else { v4l2_err(&fmd->v4l2_dev, "Wrong port id (%u) at node %s\n", endpoint.base.port, rem->full_name); } /* * For FIMC-IS handled sensors, that are placed under i2c-isp device * node, FIMC is connected to the FIMC-IS through its ISP Writeback * input. Sensors are attached to the FIMC-LITE hostdata interface * directly or through MIPI-CSIS, depending on the external media bus * used. This needs to be handled in a more reliable way, not by just * checking parent's node name. */ np = of_get_parent(rem); if (np && !of_node_cmp(np->name, "i2c-isp")) pd->fimc_bus_type = FIMC_BUS_TYPE_ISP_WRITEBACK; else pd->fimc_bus_type = pd->sensor_bus_type; if (WARN_ON(index >= ARRAY_SIZE(fmd->sensor))) return -EINVAL; fmd->sensor[index].asd.match_type = V4L2_ASYNC_MATCH_OF; fmd->sensor[index].asd.match.of.node = rem; fmd->async_subdevs[index] = &fmd->sensor[index].asd; fmd->num_sensors++; of_node_put(rem); return 0; } /* Register all SoC external sub-devices */ static int fimc_md_register_sensor_entities(struct fimc_md *fmd) { struct device_node *parent = fmd->pdev->dev.of_node; struct device_node *node, *ports; int index = 0; int ret; /* * Runtime resume one of the FIMC entities to make sure * the sclk_cam clocks are not globally disabled. */ if (!fmd->pmf) return -ENXIO; ret = pm_runtime_get_sync(fmd->pmf); if (ret < 0) return ret; fmd->num_sensors = 0; /* Attach sensors linked to MIPI CSI-2 receivers */ for_each_available_child_of_node(parent, node) { struct device_node *port; if (of_node_cmp(node->name, "csis")) continue; /* The csis node can have only port subnode. */ port = of_get_next_child(node, NULL); if (!port) continue; ret = fimc_md_parse_port_node(fmd, port, index); if (ret < 0) goto rpm_put; index++; } /* Attach sensors listed in the parallel-ports node */ ports = of_get_child_by_name(parent, "parallel-ports"); if (!ports) goto rpm_put; for_each_child_of_node(ports, node) { ret = fimc_md_parse_port_node(fmd, node, index); if (ret < 0) break; index++; } rpm_put: pm_runtime_put(fmd->pmf); return ret; } static int __of_get_csis_id(struct device_node *np) { u32 reg = 0; np = of_get_child_by_name(np, "port"); if (!np) return -EINVAL; of_property_read_u32(np, "reg", &reg); return reg - FIMC_INPUT_MIPI_CSI2_0; } /* * MIPI-CSIS, FIMC and FIMC-LITE platform devices registration. */ static int register_fimc_lite_entity(struct fimc_md *fmd, struct fimc_lite *fimc_lite) { struct v4l2_subdev *sd; struct exynos_media_pipeline *ep; int ret; if (WARN_ON(fimc_lite->index >= FIMC_LITE_MAX_DEVS || fmd->fimc_lite[fimc_lite->index])) return -EBUSY; sd = &fimc_lite->subdev; sd->grp_id = GRP_ID_FLITE; ep = fimc_md_pipeline_create(fmd); if (!ep) return -ENOMEM; v4l2_set_subdev_hostdata(sd, ep); ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd); if (!ret) fmd->fimc_lite[fimc_lite->index] = fimc_lite; else v4l2_err(&fmd->v4l2_dev, "Failed to register FIMC.LITE%d\n", fimc_lite->index); return ret; } static int register_fimc_entity(struct fimc_md *fmd, struct fimc_dev *fimc) { struct v4l2_subdev *sd; struct exynos_media_pipeline *ep; int ret; if (WARN_ON(fimc->id >= FIMC_MAX_DEVS || fmd->fimc[fimc->id])) return -EBUSY; sd = &fimc->vid_cap.subdev; sd->grp_id = GRP_ID_FIMC; ep = fimc_md_pipeline_create(fmd); if (!ep) return -ENOMEM; v4l2_set_subdev_hostdata(sd, ep); ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd); if (!ret) { if (!fmd->pmf && fimc->pdev) fmd->pmf = &fimc->pdev->dev; fmd->fimc[fimc->id] = fimc; fimc->vid_cap.user_subdev_api = fmd->user_subdev_api; } else { v4l2_err(&fmd->v4l2_dev, "Failed to register FIMC.%d (%d)\n", fimc->id, ret); } return ret; } static int register_csis_entity(struct fimc_md *fmd, struct platform_device *pdev, struct v4l2_subdev *sd) { struct device_node *node = pdev->dev.of_node; int id, ret; id = node ? __of_get_csis_id(node) : max(0, pdev->id); if (WARN_ON(id < 0 || id >= CSIS_MAX_ENTITIES)) return -ENOENT; if (WARN_ON(fmd->csis[id].sd)) return -EBUSY; sd->grp_id = GRP_ID_CSIS; ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd); if (!ret) fmd->csis[id].sd = sd; else v4l2_err(&fmd->v4l2_dev, "Failed to register MIPI-CSIS.%d (%d)\n", id, ret); return ret; } static int register_fimc_is_entity(struct fimc_md *fmd, struct fimc_is *is) { struct v4l2_subdev *sd = &is->isp.subdev; struct exynos_media_pipeline *ep; int ret; /* Allocate pipeline object for the ISP capture video node. */ ep = fimc_md_pipeline_create(fmd); if (!ep) return -ENOMEM; v4l2_set_subdev_hostdata(sd, ep); ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd); if (ret) { v4l2_err(&fmd->v4l2_dev, "Failed to register FIMC-ISP (%d)\n", ret); return ret; } fmd->fimc_is = is; return 0; } static int fimc_md_register_platform_entity(struct fimc_md *fmd, struct platform_device *pdev, int plat_entity) { struct device *dev = &pdev->dev; int ret = -EPROBE_DEFER; void *drvdata; /* Lock to ensure dev->driver won't change. */ device_lock(dev); if (!dev->driver || !try_module_get(dev->driver->owner)) goto dev_unlock; drvdata = dev_get_drvdata(dev); /* Some subdev didn't probe successfully id drvdata is NULL */ if (drvdata) { switch (plat_entity) { case IDX_FIMC: ret = register_fimc_entity(fmd, drvdata); break; case IDX_FLITE: ret = register_fimc_lite_entity(fmd, drvdata); break; case IDX_CSIS: ret = register_csis_entity(fmd, pdev, drvdata); break; case IDX_IS_ISP: ret = register_fimc_is_entity(fmd, drvdata); break; default: ret = -ENODEV; } } module_put(dev->driver->owner); dev_unlock: device_unlock(dev); if (ret == -EPROBE_DEFER) dev_info(&fmd->pdev->dev, "deferring %s device registration\n", dev_name(dev)); else if (ret < 0) dev_err(&fmd->pdev->dev, "%s device registration failed (%d)\n", dev_name(dev), ret); return ret; } /* Register FIMC, FIMC-LITE and CSIS media entities */ static int fimc_md_register_platform_entities(struct fimc_md *fmd, struct device_node *parent) { struct device_node *node; int ret = 0; for_each_available_child_of_node(parent, node) { struct platform_device *pdev; int plat_entity = -1; pdev = of_find_device_by_node(node); if (!pdev) continue; /* If driver of any entity isn't ready try all again later. */ if (!strcmp(node->name, CSIS_OF_NODE_NAME)) plat_entity = IDX_CSIS; else if (!strcmp(node->name, FIMC_IS_OF_NODE_NAME)) plat_entity = IDX_IS_ISP; else if (!strcmp(node->name, FIMC_LITE_OF_NODE_NAME)) plat_entity = IDX_FLITE; else if (!strcmp(node->name, FIMC_OF_NODE_NAME) && !of_property_read_bool(node, "samsung,lcd-wb")) plat_entity = IDX_FIMC; if (plat_entity >= 0) ret = fimc_md_register_platform_entity(fmd, pdev, plat_entity); put_device(&pdev->dev); if (ret < 0) break; } return ret; } static void fimc_md_unregister_entities(struct fimc_md *fmd) { int i; for (i = 0; i < FIMC_MAX_DEVS; i++) { struct fimc_dev *dev = fmd->fimc[i]; if (dev == NULL) continue; v4l2_device_unregister_subdev(&dev->vid_cap.subdev); dev->vid_cap.ve.pipe = NULL; fmd->fimc[i] = NULL; } for (i = 0; i < FIMC_LITE_MAX_DEVS; i++) { struct fimc_lite *dev = fmd->fimc_lite[i]; if (dev == NULL) continue; v4l2_device_unregister_subdev(&dev->subdev); dev->ve.pipe = NULL; fmd->fimc_lite[i] = NULL; } for (i = 0; i < CSIS_MAX_ENTITIES; i++) { if (fmd->csis[i].sd == NULL) continue; v4l2_device_unregister_subdev(fmd->csis[i].sd); fmd->csis[i].sd = NULL; } if (fmd->fimc_is) v4l2_device_unregister_subdev(&fmd->fimc_is->isp.subdev); v4l2_info(&fmd->v4l2_dev, "Unregistered all entities\n"); } /** * __fimc_md_create_fimc_links - create links to all FIMC entities * @fmd: fimc media device * @source: the source entity to create links to all fimc entities from * @sensor: sensor subdev linked to FIMC[fimc_id] entity, may be null * @pad: the source entity pad index * @link_mask: bitmask of the fimc devices for which link should be enabled */ static int __fimc_md_create_fimc_sink_links(struct fimc_md *fmd, struct media_entity *source, struct v4l2_subdev *sensor, int pad, int link_mask) { struct fimc_source_info *si = NULL; struct media_entity *sink; unsigned int flags = 0; int i, ret = 0; if (sensor) { si = v4l2_get_subdev_hostdata(sensor); /* Skip direct FIMC links in the logical FIMC-IS sensor path */ if (si && si->fimc_bus_type == FIMC_BUS_TYPE_ISP_WRITEBACK) ret = 1; } for (i = 0; !ret && i < FIMC_MAX_DEVS; i++) { if (!fmd->fimc[i]) continue; /* * Some FIMC variants are not fitted with camera capture * interface. Skip creating a link from sensor for those. */ if (!fmd->fimc[i]->variant->has_cam_if) continue; flags = ((1 << i) & link_mask) ? MEDIA_LNK_FL_ENABLED : 0; sink = &fmd->fimc[i]->vid_cap.subdev.entity; ret = media_entity_create_link(source, pad, sink, FIMC_SD_PAD_SINK_CAM, flags); if (ret) return ret; /* Notify FIMC capture subdev entity */ ret = media_entity_call(sink, link_setup, &sink->pads[0], &source->pads[pad], flags); if (ret) break; v4l2_info(&fmd->v4l2_dev, "created link [%s] %c> [%s]\n", source->name, flags ? '=' : '-', sink->name); } for (i = 0; i < FIMC_LITE_MAX_DEVS; i++) { if (!fmd->fimc_lite[i]) continue; sink = &fmd->fimc_lite[i]->subdev.entity; ret = media_entity_create_link(source, pad, sink, FLITE_SD_PAD_SINK, 0); if (ret) return ret; /* Notify FIMC-LITE subdev entity */ ret = media_entity_call(sink, link_setup, &sink->pads[0], &source->pads[pad], 0); if (ret) break; v4l2_info(&fmd->v4l2_dev, "created link [%s] -> [%s]\n", source->name, sink->name); } return 0; } /* Create links from FIMC-LITE source pads to other entities */ static int __fimc_md_create_flite_source_links(struct fimc_md *fmd) { struct media_entity *source, *sink; int i, ret = 0; for (i = 0; i < FIMC_LITE_MAX_DEVS; i++) { struct fimc_lite *fimc = fmd->fimc_lite[i]; if (fimc == NULL) continue; source = &fimc->subdev.entity; sink = &fimc->ve.vdev.entity; /* FIMC-LITE's subdev and video node */ ret = media_entity_create_link(source, FLITE_SD_PAD_SOURCE_DMA, sink, 0, 0); if (ret) break; /* Link from FIMC-LITE to IS-ISP subdev */ sink = &fmd->fimc_is->isp.subdev.entity; ret = media_entity_create_link(source, FLITE_SD_PAD_SOURCE_ISP, sink, 0, 0); if (ret) break; } return ret; } /* Create FIMC-IS links */ static int __fimc_md_create_fimc_is_links(struct fimc_md *fmd) { struct fimc_isp *isp = &fmd->fimc_is->isp; struct media_entity *source, *sink; int i, ret; source = &isp->subdev.entity; for (i = 0; i < FIMC_MAX_DEVS; i++) { if (fmd->fimc[i] == NULL) continue; /* Link from FIMC-IS-ISP subdev to FIMC */ sink = &fmd->fimc[i]->vid_cap.subdev.entity; ret = media_entity_create_link(source, FIMC_ISP_SD_PAD_SRC_FIFO, sink, FIMC_SD_PAD_SINK_FIFO, 0); if (ret) return ret; } /* Link from FIMC-IS-ISP subdev to fimc-is-isp.capture video node */ sink = &isp->video_capture.ve.vdev.entity; /* Skip this link if the fimc-is-isp video node driver isn't built-in */ if (sink->num_pads == 0) return 0; return media_entity_create_link(source, FIMC_ISP_SD_PAD_SRC_DMA, sink, 0, 0); } /** * fimc_md_create_links - create default links between registered entities * * Parallel interface sensor entities are connected directly to FIMC capture * entities. The sensors using MIPI CSIS bus are connected through immutable * link with CSI receiver entity specified by mux_id. Any registered CSIS * entity has a link to each registered FIMC capture entity. Enabled links * are created by default between each subsequent registered sensor and * subsequent FIMC capture entity. The number of default active links is * determined by the number of available sensors or FIMC entities, * whichever is less. */ static int fimc_md_create_links(struct fimc_md *fmd) { struct v4l2_subdev *csi_sensors[CSIS_MAX_ENTITIES] = { NULL }; struct v4l2_subdev *sensor, *csis; struct fimc_source_info *pdata; struct media_entity *source, *sink; int i, pad, fimc_id = 0, ret = 0; u32 flags, link_mask = 0; for (i = 0; i < fmd->num_sensors; i++) { if (fmd->sensor[i].subdev == NULL) continue; sensor = fmd->sensor[i].subdev; pdata = v4l2_get_subdev_hostdata(sensor); if (!pdata) continue; source = NULL; switch (pdata->sensor_bus_type) { case FIMC_BUS_TYPE_MIPI_CSI2: if (WARN(pdata->mux_id >= CSIS_MAX_ENTITIES, "Wrong CSI channel id: %d\n", pdata->mux_id)) return -EINVAL; csis = fmd->csis[pdata->mux_id].sd; if (WARN(csis == NULL, "MIPI-CSI interface specified " "but s5p-csis module is not loaded!\n")) return -EINVAL; pad = sensor->entity.num_pads - 1; ret = media_entity_create_link(&sensor->entity, pad, &csis->entity, CSIS_PAD_SINK, MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); if (ret) return ret; v4l2_info(&fmd->v4l2_dev, "created link [%s] => [%s]\n", sensor->entity.name, csis->entity.name); source = NULL; csi_sensors[pdata->mux_id] = sensor; break; case FIMC_BUS_TYPE_ITU_601...FIMC_BUS_TYPE_ITU_656: source = &sensor->entity; pad = 0; break; default: v4l2_err(&fmd->v4l2_dev, "Wrong bus_type: %x\n", pdata->sensor_bus_type); return -EINVAL; } if (source == NULL) continue; link_mask = 1 << fimc_id++; ret = __fimc_md_create_fimc_sink_links(fmd, source, sensor, pad, link_mask); } for (i = 0; i < CSIS_MAX_ENTITIES; i++) { if (fmd->csis[i].sd == NULL) continue; source = &fmd->csis[i].sd->entity; pad = CSIS_PAD_SOURCE; sensor = csi_sensors[i]; link_mask = 1 << fimc_id++; ret = __fimc_md_create_fimc_sink_links(fmd, source, sensor, pad, link_mask); } /* Create immutable links between each FIMC's subdev and video node */ flags = MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED; for (i = 0; i < FIMC_MAX_DEVS; i++) { if (!fmd->fimc[i]) continue; source = &fmd->fimc[i]->vid_cap.subdev.entity; sink = &fmd->fimc[i]->vid_cap.ve.vdev.entity; ret = media_entity_create_link(source, FIMC_SD_PAD_SOURCE, sink, 0, flags); if (ret) break; } ret = __fimc_md_create_flite_source_links(fmd); if (ret < 0) return ret; if (fmd->use_isp) ret = __fimc_md_create_fimc_is_links(fmd); return ret; } /* * The peripheral sensor and CAM_BLK (PIXELASYNCMx) clocks management. */ static void fimc_md_put_clocks(struct fimc_md *fmd) { int i = FIMC_MAX_CAMCLKS; while (--i >= 0) { if (IS_ERR(fmd->camclk[i].clock)) continue; clk_put(fmd->camclk[i].clock); fmd->camclk[i].clock = ERR_PTR(-EINVAL); } /* Writeback (PIXELASYNCMx) clocks */ for (i = 0; i < FIMC_MAX_WBCLKS; i++) { if (IS_ERR(fmd->wbclk[i])) continue; clk_put(fmd->wbclk[i]); fmd->wbclk[i] = ERR_PTR(-EINVAL); } } static int fimc_md_get_clocks(struct fimc_md *fmd) { struct device *dev = &fmd->pdev->dev; char clk_name[32]; struct clk *clock; int i, ret = 0; for (i = 0; i < FIMC_MAX_CAMCLKS; i++) fmd->camclk[i].clock = ERR_PTR(-EINVAL); for (i = 0; i < FIMC_MAX_CAMCLKS; i++) { snprintf(clk_name, sizeof(clk_name), "sclk_cam%u", i); clock = clk_get(dev, clk_name); if (IS_ERR(clock)) { dev_err(dev, "Failed to get clock: %s\n", clk_name); ret = PTR_ERR(clock); break; } fmd->camclk[i].clock = clock; } if (ret) fimc_md_put_clocks(fmd); if (!fmd->use_isp) return 0; /* * For now get only PIXELASYNCM1 clock (Writeback B/ISP), * leave PIXELASYNCM0 out for the LCD Writeback driver. */ fmd->wbclk[CLK_IDX_WB_A] = ERR_PTR(-EINVAL); for (i = CLK_IDX_WB_B; i < FIMC_MAX_WBCLKS; i++) { snprintf(clk_name, sizeof(clk_name), "pxl_async%u", i); clock = clk_get(dev, clk_name); if (IS_ERR(clock)) { v4l2_err(&fmd->v4l2_dev, "Failed to get clock: %s\n", clk_name); ret = PTR_ERR(clock); break; } fmd->wbclk[i] = clock; } if (ret) fimc_md_put_clocks(fmd); return ret; } static int __fimc_md_modify_pipeline(struct media_entity *entity, bool enable) { struct exynos_video_entity *ve; struct fimc_pipeline *p; struct video_device *vdev; int ret; vdev = media_entity_to_video_device(entity); if (vdev->entity.use_count == 0) return 0; ve = vdev_to_exynos_video_entity(vdev); p = to_fimc_pipeline(ve->pipe); /* * Nothing to do if we are disabling the pipeline, some link * has been disconnected and p->subdevs array is cleared now. */ if (!enable && p->subdevs[IDX_SENSOR] == NULL) return 0; if (enable) ret = __fimc_pipeline_open(ve->pipe, entity, true); else ret = __fimc_pipeline_close(ve->pipe); if (ret == 0 && !enable) memset(p->subdevs, 0, sizeof(p->subdevs)); return ret; } /* Locking: called with entity->parent->graph_mutex mutex held. */ static int __fimc_md_modify_pipelines(struct media_entity *entity, bool enable) { struct media_entity *entity_err = entity; struct media_entity_graph graph; int ret; /* * Walk current graph and call the pipeline open/close routine for each * opened video node that belongs to the graph of entities connected * through active links. This is needed as we cannot power on/off the * subdevs in random order. */ media_entity_graph_walk_start(&graph, entity); while ((entity = media_entity_graph_walk_next(&graph))) { if (media_entity_type(entity) != MEDIA_ENT_T_DEVNODE) continue; ret = __fimc_md_modify_pipeline(entity, enable); if (ret < 0) goto err; } return 0; err: media_entity_graph_walk_start(&graph, entity_err); while ((entity_err = media_entity_graph_walk_next(&graph))) { if (media_entity_type(entity_err) != MEDIA_ENT_T_DEVNODE) continue; __fimc_md_modify_pipeline(entity_err, !enable); if (entity_err == entity) break; } return ret; } static int fimc_md_link_notify(struct media_link *link, unsigned int flags, unsigned int notification) { struct media_entity *sink = link->sink->entity; int ret = 0; /* Before link disconnection */ if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH) { if (!(flags & MEDIA_LNK_FL_ENABLED)) ret = __fimc_md_modify_pipelines(sink, false); #if 0 else /* TODO: Link state change validation */ #endif /* After link activation */ } else if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH && (link->flags & MEDIA_LNK_FL_ENABLED)) { ret = __fimc_md_modify_pipelines(sink, true); } return ret ? -EPIPE : 0; } static ssize_t fimc_md_sysfs_show(struct device *dev, struct device_attribute *attr, char *buf) { struct platform_device *pdev = to_platform_device(dev); struct fimc_md *fmd = platform_get_drvdata(pdev); if (fmd->user_subdev_api) return strlcpy(buf, "Sub-device API (sub-dev)\n", PAGE_SIZE); return strlcpy(buf, "V4L2 video node only API (vid-dev)\n", PAGE_SIZE); } static ssize_t fimc_md_sysfs_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct platform_device *pdev = to_platform_device(dev); struct fimc_md *fmd = platform_get_drvdata(pdev); bool subdev_api; int i; if (!strcmp(buf, "vid-dev\n")) subdev_api = false; else if (!strcmp(buf, "sub-dev\n")) subdev_api = true; else return count; fmd->user_subdev_api = subdev_api; for (i = 0; i < FIMC_MAX_DEVS; i++) if (fmd->fimc[i]) fmd->fimc[i]->vid_cap.user_subdev_api = subdev_api; return count; } /* * This device attribute is to select video pipeline configuration method. * There are following valid values: * vid-dev - for V4L2 video node API only, subdevice will be configured * by the host driver. * sub-dev - for media controller API, subdevs must be configured in user * space before starting streaming. */ static DEVICE_ATTR(subdev_conf_mode, S_IWUSR | S_IRUGO, fimc_md_sysfs_show, fimc_md_sysfs_store); static int fimc_md_get_pinctrl(struct fimc_md *fmd) { struct device *dev = &fmd->pdev->dev; struct fimc_pinctrl *pctl = &fmd->pinctl; pctl->pinctrl = devm_pinctrl_get(dev); if (IS_ERR(pctl->pinctrl)) return PTR_ERR(pctl->pinctrl); pctl->state_default = pinctrl_lookup_state(pctl->pinctrl, PINCTRL_STATE_DEFAULT); if (IS_ERR(pctl->state_default)) return PTR_ERR(pctl->state_default); pctl->state_idle = pinctrl_lookup_state(pctl->pinctrl, PINCTRL_STATE_IDLE); return 0; } static int cam_clk_prepare(struct clk_hw *hw) { struct cam_clk *camclk = to_cam_clk(hw); int ret; if (camclk->fmd->pmf == NULL) return -ENODEV; ret = pm_runtime_get_sync(camclk->fmd->pmf); return ret < 0 ? ret : 0; } static void cam_clk_unprepare(struct clk_hw *hw) { struct cam_clk *camclk = to_cam_clk(hw); if (camclk->fmd->pmf == NULL) return; pm_runtime_put_sync(camclk->fmd->pmf); } static const struct clk_ops cam_clk_ops = { .prepare = cam_clk_prepare, .unprepare = cam_clk_unprepare, }; static void fimc_md_unregister_clk_provider(struct fimc_md *fmd) { struct cam_clk_provider *cp = &fmd->clk_provider; unsigned int i; if (cp->of_node) of_clk_del_provider(cp->of_node); for (i = 0; i < cp->num_clocks; i++) clk_unregister(cp->clks[i]); } static int fimc_md_register_clk_provider(struct fimc_md *fmd) { struct cam_clk_provider *cp = &fmd->clk_provider; struct device *dev = &fmd->pdev->dev; int i, ret; for (i = 0; i < FIMC_MAX_CAMCLKS; i++) { struct cam_clk *camclk = &cp->camclk[i]; struct clk_init_data init; const char *p_name; ret = of_property_read_string_index(dev->of_node, "clock-output-names", i, &init.name); if (ret < 0) break; p_name = __clk_get_name(fmd->camclk[i].clock); /* It's safe since clk_register() will duplicate the string. */ init.parent_names = &p_name; init.num_parents = 1; init.ops = &cam_clk_ops; init.flags = CLK_SET_RATE_PARENT; camclk->hw.init = &init; camclk->fmd = fmd; cp->clks[i] = clk_register(NULL, &camclk->hw); if (IS_ERR(cp->clks[i])) { dev_err(dev, "failed to register clock: %s (%ld)\n", init.name, PTR_ERR(cp->clks[i])); ret = PTR_ERR(cp->clks[i]); goto err; } cp->num_clocks++; } if (cp->num_clocks == 0) { dev_warn(dev, "clk provider not registered\n"); return 0; } cp->clk_data.clks = cp->clks; cp->clk_data.clk_num = cp->num_clocks; cp->of_node = dev->of_node; ret = of_clk_add_provider(dev->of_node, of_clk_src_onecell_get, &cp->clk_data); if (ret == 0) return 0; err: fimc_md_unregister_clk_provider(fmd); return ret; } static int subdev_notifier_bound(struct v4l2_async_notifier *notifier, struct v4l2_subdev *subdev, struct v4l2_async_subdev *asd) { struct fimc_md *fmd = notifier_to_fimc_md(notifier); struct fimc_sensor_info *si = NULL; int i; /* Find platform data for this sensor subdev */ for (i = 0; i < ARRAY_SIZE(fmd->sensor); i++) if (fmd->sensor[i].asd.match.of.node == subdev->dev->of_node) si = &fmd->sensor[i]; if (si == NULL) return -EINVAL; v4l2_set_subdev_hostdata(subdev, &si->pdata); if (si->pdata.fimc_bus_type == FIMC_BUS_TYPE_ISP_WRITEBACK) subdev->grp_id = GRP_ID_FIMC_IS_SENSOR; else subdev->grp_id = GRP_ID_SENSOR; si->subdev = subdev; v4l2_info(&fmd->v4l2_dev, "Registered sensor subdevice: %s (%d)\n", subdev->name, fmd->num_sensors); fmd->num_sensors++; return 0; } static int subdev_notifier_complete(struct v4l2_async_notifier *notifier) { struct fimc_md *fmd = notifier_to_fimc_md(notifier); int ret; mutex_lock(&fmd->media_dev.graph_mutex); ret = fimc_md_create_links(fmd); if (ret < 0) goto unlock; ret = v4l2_device_register_subdev_nodes(&fmd->v4l2_dev); unlock: mutex_unlock(&fmd->media_dev.graph_mutex); return ret; } static int fimc_md_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct v4l2_device *v4l2_dev; struct fimc_md *fmd; int ret; fmd = devm_kzalloc(dev, sizeof(*fmd), GFP_KERNEL); if (!fmd) return -ENOMEM; spin_lock_init(&fmd->slock); INIT_LIST_HEAD(&fmd->pipelines); fmd->pdev = pdev; strlcpy(fmd->media_dev.model, "SAMSUNG S5P FIMC", sizeof(fmd->media_dev.model)); fmd->media_dev.link_notify = fimc_md_link_notify; fmd->media_dev.dev = dev; v4l2_dev = &fmd->v4l2_dev; v4l2_dev->mdev = &fmd->media_dev; v4l2_dev->notify = fimc_sensor_notify; strlcpy(v4l2_dev->name, "s5p-fimc-md", sizeof(v4l2_dev->name)); fmd->use_isp = fimc_md_is_isp_available(dev->of_node); fmd->user_subdev_api = true; ret = v4l2_device_register(dev, &fmd->v4l2_dev); if (ret < 0) { v4l2_err(v4l2_dev, "Failed to register v4l2_device: %d\n", ret); return ret; } ret = media_device_register(&fmd->media_dev); if (ret < 0) { v4l2_err(v4l2_dev, "Failed to register media device: %d\n", ret); goto err_v4l2_dev; } ret = fimc_md_get_clocks(fmd); if (ret) goto err_md; ret = fimc_md_get_pinctrl(fmd); if (ret < 0) { if (ret != EPROBE_DEFER) dev_err(dev, "Failed to get pinctrl: %d\n", ret); goto err_clk; } platform_set_drvdata(pdev, fmd); /* Protect the media graph while we're registering entities */ mutex_lock(&fmd->media_dev.graph_mutex); ret = fimc_md_register_platform_entities(fmd, dev->of_node); if (ret) { mutex_unlock(&fmd->media_dev.graph_mutex); goto err_clk; } ret = fimc_md_register_sensor_entities(fmd); if (ret) { mutex_unlock(&fmd->media_dev.graph_mutex); goto err_m_ent; } mutex_unlock(&fmd->media_dev.graph_mutex); ret = device_create_file(&pdev->dev, &dev_attr_subdev_conf_mode); if (ret) goto err_m_ent; /* * FIMC platform devices need to be registered before the sclk_cam * clocks provider, as one of these devices needs to be activated * to enable the clock. */ ret = fimc_md_register_clk_provider(fmd); if (ret < 0) { v4l2_err(v4l2_dev, "clock provider registration failed\n"); goto err_attr; } if (fmd->num_sensors > 0) { fmd->subdev_notifier.subdevs = fmd->async_subdevs; fmd->subdev_notifier.num_subdevs = fmd->num_sensors; fmd->subdev_notifier.bound = subdev_notifier_bound; fmd->subdev_notifier.complete = subdev_notifier_complete; fmd->num_sensors = 0; ret = v4l2_async_notifier_register(&fmd->v4l2_dev, &fmd->subdev_notifier); if (ret) goto err_clk_p; } return 0; err_clk_p: fimc_md_unregister_clk_provider(fmd); err_attr: device_remove_file(&pdev->dev, &dev_attr_subdev_conf_mode); err_clk: fimc_md_put_clocks(fmd); err_m_ent: fimc_md_unregister_entities(fmd); err_md: media_device_unregister(&fmd->media_dev); err_v4l2_dev: v4l2_device_unregister(&fmd->v4l2_dev); return ret; } static int fimc_md_remove(struct platform_device *pdev) { struct fimc_md *fmd = platform_get_drvdata(pdev); if (!fmd) return 0; fimc_md_unregister_clk_provider(fmd); v4l2_async_notifier_unregister(&fmd->subdev_notifier); v4l2_device_unregister(&fmd->v4l2_dev); device_remove_file(&pdev->dev, &dev_attr_subdev_conf_mode); fimc_md_unregister_entities(fmd); fimc_md_pipelines_free(fmd); media_device_unregister(&fmd->media_dev); fimc_md_put_clocks(fmd); return 0; } static struct platform_device_id fimc_driver_ids[] __always_unused = { { .name = "s5p-fimc-md" }, { }, }; MODULE_DEVICE_TABLE(platform, fimc_driver_ids); static const struct of_device_id fimc_md_of_match[] = { { .compatible = "samsung,fimc" }, { }, }; MODULE_DEVICE_TABLE(of, fimc_md_of_match); static struct platform_driver fimc_md_driver = { .probe = fimc_md_probe, .remove = fimc_md_remove, .driver = { .of_match_table = of_match_ptr(fimc_md_of_match), .name = "s5p-fimc-md", } }; static int __init fimc_md_init(void) { int ret; request_module("s5p-csis"); ret = fimc_register_driver(); if (ret) return ret; return platform_driver_register(&fimc_md_driver); } static void __exit fimc_md_exit(void) { platform_driver_unregister(&fimc_md_driver); fimc_unregister_driver(); } module_init(fimc_md_init); module_exit(fimc_md_exit); MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>"); MODULE_DESCRIPTION("S5P FIMC camera host interface/video postprocessor driver"); MODULE_LICENSE("GPL"); MODULE_VERSION("2.0.1");
gpl-2.0
parheliamm/T440p-kernel
drivers/pcmcia/m32r_cfc.c
959
20408
/* * drivers/pcmcia/m32r_cfc.c * * Device driver for the CFC functionality of M32R. * * Copyright (c) 2001, 2002, 2003, 2004 * Hiroyuki Kondo, Naoto Sugai, Hayato Fujiwara */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/timer.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/workqueue.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/bitops.h> #include <asm/irq.h> #include <asm/io.h> #include <pcmcia/ss.h> #undef MAX_IO_WIN /* FIXME */ #define MAX_IO_WIN 1 #undef MAX_WIN /* FIXME */ #define MAX_WIN 1 #include "m32r_cfc.h" /* Poll status interval -- 0 means default to interrupt */ static int poll_interval = 0; typedef enum pcc_space { as_none = 0, as_comm, as_attr, as_io } pcc_as_t; typedef struct pcc_socket { u_short type, flags; struct pcmcia_socket socket; unsigned int number; unsigned int ioaddr; u_long mapaddr; u_long base; /* PCC register base */ u_char cs_irq1, cs_irq2, intr; pccard_io_map io_map[MAX_IO_WIN]; pccard_mem_map mem_map[MAX_WIN]; u_char io_win; u_char mem_win; pcc_as_t current_space; u_char last_iodbex; #ifdef CONFIG_PROC_FS struct proc_dir_entry *proc; #endif } pcc_socket_t; static int pcc_sockets = 0; static pcc_socket_t socket[M32R_MAX_PCC] = { { 0, }, /* ... */ }; /*====================================================================*/ static unsigned int pcc_get(u_short, unsigned int); static void pcc_set(u_short, unsigned int , unsigned int ); static DEFINE_SPINLOCK(pcc_lock); #if !defined(CONFIG_PLAT_USRV) static inline u_long pcc_port2addr(unsigned long port, int size) { u_long addr = 0; u_long odd; if (size == 1) { /* byte access */ odd = (port&1) << 11; port -= port & 1; addr = CFC_IO_MAPBASE_BYTE - CFC_IOPORT_BASE + odd + port; } else if (size == 2) addr = CFC_IO_MAPBASE_WORD - CFC_IOPORT_BASE + port; return addr; } #else /* CONFIG_PLAT_USRV */ static inline u_long pcc_port2addr(unsigned long port, int size) { u_long odd; u_long addr = ((port - CFC_IOPORT_BASE) & 0xf000) << 8; if (size == 1) { /* byte access */ odd = port & 1; port -= odd; odd <<= 11; addr = (addr | CFC_IO_MAPBASE_BYTE) + odd + (port & 0xfff); } else if (size == 2) /* word access */ addr = (addr | CFC_IO_MAPBASE_WORD) + (port & 0xfff); return addr; } #endif /* CONFIG_PLAT_USRV */ void pcc_ioread_byte(int sock, unsigned long port, void *buf, size_t size, size_t nmemb, int flag) { u_long addr; unsigned char *bp = (unsigned char *)buf; unsigned long flags; pr_debug("m32r_cfc: pcc_ioread_byte: sock=%d, port=%#lx, buf=%p, " "size=%u, nmemb=%d, flag=%d\n", sock, port, buf, size, nmemb, flag); addr = pcc_port2addr(port, 1); if (!addr) { printk("m32r_cfc:ioread_byte null port :%#lx\n",port); return; } pr_debug("m32r_cfc: pcc_ioread_byte: addr=%#lx\n", addr); spin_lock_irqsave(&pcc_lock, flags); /* read Byte */ while (nmemb--) *bp++ = readb(addr); spin_unlock_irqrestore(&pcc_lock, flags); } void pcc_ioread_word(int sock, unsigned long port, void *buf, size_t size, size_t nmemb, int flag) { u_long addr; unsigned short *bp = (unsigned short *)buf; unsigned long flags; pr_debug("m32r_cfc: pcc_ioread_word: sock=%d, port=%#lx, " "buf=%p, size=%u, nmemb=%d, flag=%d\n", sock, port, buf, size, nmemb, flag); if (size != 2) printk("m32r_cfc: ioread_word :illigal size %u : %#lx\n", size, port); if (size == 9) printk("m32r_cfc: ioread_word :insw \n"); addr = pcc_port2addr(port, 2); if (!addr) { printk("m32r_cfc:ioread_word null port :%#lx\n",port); return; } pr_debug("m32r_cfc: pcc_ioread_word: addr=%#lx\n", addr); spin_lock_irqsave(&pcc_lock, flags); /* read Word */ while (nmemb--) *bp++ = readw(addr); spin_unlock_irqrestore(&pcc_lock, flags); } void pcc_iowrite_byte(int sock, unsigned long port, void *buf, size_t size, size_t nmemb, int flag) { u_long addr; unsigned char *bp = (unsigned char *)buf; unsigned long flags; pr_debug("m32r_cfc: pcc_iowrite_byte: sock=%d, port=%#lx, " "buf=%p, size=%u, nmemb=%d, flag=%d\n", sock, port, buf, size, nmemb, flag); /* write Byte */ addr = pcc_port2addr(port, 1); if (!addr) { printk("m32r_cfc:iowrite_byte null port:%#lx\n",port); return; } pr_debug("m32r_cfc: pcc_iowrite_byte: addr=%#lx\n", addr); spin_lock_irqsave(&pcc_lock, flags); while (nmemb--) writeb(*bp++, addr); spin_unlock_irqrestore(&pcc_lock, flags); } void pcc_iowrite_word(int sock, unsigned long port, void *buf, size_t size, size_t nmemb, int flag) { u_long addr; unsigned short *bp = (unsigned short *)buf; unsigned long flags; pr_debug("m32r_cfc: pcc_iowrite_word: sock=%d, port=%#lx, " "buf=%p, size=%u, nmemb=%d, flag=%d\n", sock, port, buf, size, nmemb, flag); if(size != 2) printk("m32r_cfc: iowrite_word :illigal size %u : %#lx\n", size, port); if(size == 9) printk("m32r_cfc: iowrite_word :outsw \n"); addr = pcc_port2addr(port, 2); if (!addr) { printk("m32r_cfc:iowrite_word null addr :%#lx\n",port); return; } #if 1 if (addr & 1) { printk("m32r_cfc:iowrite_word port addr (%#lx):%#lx\n", port, addr); return; } #endif pr_debug("m32r_cfc: pcc_iowrite_word: addr=%#lx\n", addr); spin_lock_irqsave(&pcc_lock, flags); while (nmemb--) writew(*bp++, addr); spin_unlock_irqrestore(&pcc_lock, flags); } /*====================================================================*/ #define IS_REGISTERED 0x2000 #define IS_ALIVE 0x8000 typedef struct pcc_t { char *name; u_short flags; } pcc_t; static pcc_t pcc[] = { #if !defined(CONFIG_PLAT_USRV) { "m32r_cfc", 0 }, { "", 0 }, #else /* CONFIG_PLAT_USRV */ { "m32r_cfc", 0 }, { "m32r_cfc", 0 }, { "m32r_cfc", 0 }, { "m32r_cfc", 0 }, { "m32r_cfc", 0 }, { "", 0 }, #endif /* CONFIG_PLAT_USRV */ }; static irqreturn_t pcc_interrupt(int, void *); /*====================================================================*/ static struct timer_list poll_timer; static unsigned int pcc_get(u_short sock, unsigned int reg) { unsigned int val = inw(reg); pr_debug("m32r_cfc: pcc_get: reg(0x%08x)=0x%04x\n", reg, val); return val; } static void pcc_set(u_short sock, unsigned int reg, unsigned int data) { outw(data, reg); pr_debug("m32r_cfc: pcc_set: reg(0x%08x)=0x%04x\n", reg, data); } /*====================================================================== See if a card is present, powered up, in IO mode, and already bound to a (non PC Card) Linux driver. We leave these alone. We make an exception for cards that seem to be serial devices. ======================================================================*/ static int __init is_alive(u_short sock) { unsigned int stat; pr_debug("m32r_cfc: is_alive:\n"); printk("CF: "); stat = pcc_get(sock, (unsigned int)PLD_CFSTS); if (!stat) printk("No "); printk("Card is detected at socket %d : stat = 0x%08x\n", sock, stat); pr_debug("m32r_cfc: is_alive: sock stat is 0x%04x\n", stat); return 0; } static void add_pcc_socket(ulong base, int irq, ulong mapaddr, unsigned int ioaddr) { pcc_socket_t *t = &socket[pcc_sockets]; pr_debug("m32r_cfc: add_pcc_socket: base=%#lx, irq=%d, " "mapaddr=%#lx, ioaddr=%08x\n", base, irq, mapaddr, ioaddr); /* add sockets */ t->ioaddr = ioaddr; t->mapaddr = mapaddr; #if !defined(CONFIG_PLAT_USRV) t->base = 0; t->flags = 0; t->cs_irq1 = irq; // insert irq t->cs_irq2 = irq + 1; // eject irq #else /* CONFIG_PLAT_USRV */ t->base = base; t->flags = 0; t->cs_irq1 = 0; // insert irq t->cs_irq2 = 0; // eject irq #endif /* CONFIG_PLAT_USRV */ if (is_alive(pcc_sockets)) t->flags |= IS_ALIVE; /* add pcc */ #if !defined(CONFIG_PLAT_USRV) request_region((unsigned int)PLD_CFRSTCR, 0x20, "m32r_cfc"); #else /* CONFIG_PLAT_USRV */ { unsigned int reg_base; reg_base = (unsigned int)PLD_CFRSTCR; reg_base |= pcc_sockets << 8; request_region(reg_base, 0x20, "m32r_cfc"); } #endif /* CONFIG_PLAT_USRV */ printk(KERN_INFO " %s ", pcc[pcc_sockets].name); printk("pcc at 0x%08lx\n", t->base); /* Update socket interrupt information, capabilities */ t->socket.features |= (SS_CAP_PCCARD | SS_CAP_STATIC_MAP); t->socket.map_size = M32R_PCC_MAPSIZE; t->socket.io_offset = ioaddr; /* use for io access offset */ t->socket.irq_mask = 0; #if !defined(CONFIG_PLAT_USRV) t->socket.pci_irq = PLD_IRQ_CFIREQ ; /* card interrupt */ #else /* CONFIG_PLAT_USRV */ t->socket.pci_irq = PLD_IRQ_CF0 + pcc_sockets; #endif /* CONFIG_PLAT_USRV */ #ifndef CONFIG_PLAT_USRV /* insert interrupt */ request_irq(irq, pcc_interrupt, 0, "m32r_cfc", pcc_interrupt); #ifndef CONFIG_PLAT_MAPPI3 /* eject interrupt */ request_irq(irq+1, pcc_interrupt, 0, "m32r_cfc", pcc_interrupt); #endif pr_debug("m32r_cfc: enable CFMSK, RDYSEL\n"); pcc_set(pcc_sockets, (unsigned int)PLD_CFIMASK, 0x01); #endif /* CONFIG_PLAT_USRV */ #if defined(CONFIG_PLAT_M32700UT) || defined(CONFIG_PLAT_USRV) || defined(CONFIG_PLAT_OPSPUT) pcc_set(pcc_sockets, (unsigned int)PLD_CFCR1, 0x0200); #endif pcc_sockets++; return; } /*====================================================================*/ static irqreturn_t pcc_interrupt(int irq, void *dev) { int i; u_int events = 0; int handled = 0; pr_debug("m32r_cfc: pcc_interrupt: irq=%d, dev=%p\n", irq, dev); for (i = 0; i < pcc_sockets; i++) { if (socket[i].cs_irq1 != irq && socket[i].cs_irq2 != irq) continue; handled = 1; pr_debug("m32r_cfc: pcc_interrupt: socket %d irq 0x%02x ", i, irq); events |= SS_DETECT; /* insert or eject */ if (events) pcmcia_parse_events(&socket[i].socket, events); } pr_debug("m32r_cfc: pcc_interrupt: done\n"); return IRQ_RETVAL(handled); } /* pcc_interrupt */ static void pcc_interrupt_wrapper(u_long data) { pr_debug("m32r_cfc: pcc_interrupt_wrapper:\n"); pcc_interrupt(0, NULL); init_timer(&poll_timer); poll_timer.expires = jiffies + poll_interval; add_timer(&poll_timer); } /*====================================================================*/ static int _pcc_get_status(u_short sock, u_int *value) { u_int status; pr_debug("m32r_cfc: _pcc_get_status:\n"); status = pcc_get(sock, (unsigned int)PLD_CFSTS); *value = (status) ? SS_DETECT : 0; pr_debug("m32r_cfc: _pcc_get_status: status=0x%08x\n", status); #if defined(CONFIG_PLAT_M32700UT) || defined(CONFIG_PLAT_USRV) || defined(CONFIG_PLAT_OPSPUT) if ( status ) { /* enable CF power */ status = inw((unsigned int)PLD_CPCR); if (!(status & PLD_CPCR_CF)) { pr_debug("m32r_cfc: _pcc_get_status: " "power on (CPCR=0x%08x)\n", status); status |= PLD_CPCR_CF; outw(status, (unsigned int)PLD_CPCR); udelay(100); } *value |= SS_POWERON; pcc_set(sock, (unsigned int)PLD_CFBUFCR,0);/* enable buffer */ udelay(100); *value |= SS_READY; /* always ready */ *value |= SS_3VCARD; } else { /* disable CF power */ status = inw((unsigned int)PLD_CPCR); status &= ~PLD_CPCR_CF; outw(status, (unsigned int)PLD_CPCR); udelay(100); pr_debug("m32r_cfc: _pcc_get_status: " "power off (CPCR=0x%08x)\n", status); } #elif defined(CONFIG_PLAT_MAPPI2) || defined(CONFIG_PLAT_MAPPI3) if ( status ) { status = pcc_get(sock, (unsigned int)PLD_CPCR); if (status == 0) { /* power off */ pcc_set(sock, (unsigned int)PLD_CPCR, 1); pcc_set(sock, (unsigned int)PLD_CFBUFCR,0); /* force buffer off for ZA-36 */ udelay(50); } *value |= SS_POWERON; pcc_set(sock, (unsigned int)PLD_CFBUFCR,0); udelay(50); pcc_set(sock, (unsigned int)PLD_CFRSTCR, 0x0101); udelay(25); /* for IDE reset */ pcc_set(sock, (unsigned int)PLD_CFRSTCR, 0x0100); mdelay(2); /* for IDE reset */ *value |= SS_READY; *value |= SS_3VCARD; } else { /* disable CF power */ pcc_set(sock, (unsigned int)PLD_CPCR, 0); udelay(100); pr_debug("m32r_cfc: _pcc_get_status: " "power off (CPCR=0x%08x)\n", status); } #else #error no platform configuration #endif pr_debug("m32r_cfc: _pcc_get_status: GetStatus(%d) = %#4.4x\n", sock, *value); return 0; } /* _get_status */ /*====================================================================*/ static int _pcc_set_socket(u_short sock, socket_state_t *state) { pr_debug("m32r_cfc: SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, " "io_irq %d, csc_mask %#2.2x)\n", sock, state->flags, state->Vcc, state->Vpp, state->io_irq, state->csc_mask); #if defined(CONFIG_PLAT_M32700UT) || defined(CONFIG_PLAT_USRV) || defined(CONFIG_PLAT_OPSPUT) || defined(CONFIG_PLAT_MAPPI2) || defined(CONFIG_PLAT_MAPPI3) if (state->Vcc) { if ((state->Vcc != 50) && (state->Vcc != 33)) return -EINVAL; /* accept 5V and 3.3V */ } #endif if (state->flags & SS_RESET) { pr_debug(":RESET\n"); pcc_set(sock,(unsigned int)PLD_CFRSTCR,0x101); }else{ pcc_set(sock,(unsigned int)PLD_CFRSTCR,0x100); } if (state->flags & SS_OUTPUT_ENA){ pr_debug(":OUTPUT_ENA\n"); /* bit clear */ pcc_set(sock,(unsigned int)PLD_CFBUFCR,0); } else { pcc_set(sock,(unsigned int)PLD_CFBUFCR,1); } if(state->flags & SS_IOCARD){ pr_debug(":IOCARD"); } if (state->flags & SS_PWR_AUTO) { pr_debug(":PWR_AUTO"); } if (state->csc_mask & SS_DETECT) pr_debug(":csc-SS_DETECT"); if (state->flags & SS_IOCARD) { if (state->csc_mask & SS_STSCHG) pr_debug(":STSCHG"); } else { if (state->csc_mask & SS_BATDEAD) pr_debug(":BATDEAD"); if (state->csc_mask & SS_BATWARN) pr_debug(":BATWARN"); if (state->csc_mask & SS_READY) pr_debug(":READY"); } pr_debug("\n"); return 0; } /* _set_socket */ /*====================================================================*/ static int _pcc_set_io_map(u_short sock, struct pccard_io_map *io) { u_char map; pr_debug("m32r_cfc: SetIOMap(%d, %d, %#2.2x, %d ns, " "%#llx-%#llx)\n", sock, io->map, io->flags, io->speed, (unsigned long long)io->start, (unsigned long long)io->stop); map = io->map; return 0; } /* _set_io_map */ /*====================================================================*/ static int _pcc_set_mem_map(u_short sock, struct pccard_mem_map *mem) { u_char map = mem->map; u_long addr; pcc_socket_t *t = &socket[sock]; pr_debug("m32r_cfc: SetMemMap(%d, %d, %#2.2x, %d ns, " "%#llx, %#x)\n", sock, map, mem->flags, mem->speed, (unsigned long long)mem->static_start, mem->card_start); /* * sanity check */ if ((map > MAX_WIN) || (mem->card_start > 0x3ffffff)){ return -EINVAL; } /* * de-activate */ if ((mem->flags & MAP_ACTIVE) == 0) { t->current_space = as_none; return 0; } /* * Set mode */ if (mem->flags & MAP_ATTRIB) { t->current_space = as_attr; } else { t->current_space = as_comm; } /* * Set address */ addr = t->mapaddr + (mem->card_start & M32R_PCC_MAPMASK); mem->static_start = addr + mem->card_start; return 0; } /* _set_mem_map */ #if 0 /* driver model ordering issue */ /*====================================================================== Routines for accessing socket information and register dumps via /proc/bus/pccard/... ======================================================================*/ static ssize_t show_info(struct class_device *class_dev, char *buf) { pcc_socket_t *s = container_of(class_dev, struct pcc_socket, socket.dev); return sprintf(buf, "type: %s\nbase addr: 0x%08lx\n", pcc[s->type].name, s->base); } static ssize_t show_exca(struct class_device *class_dev, char *buf) { /* FIXME */ return 0; } static CLASS_DEVICE_ATTR(info, S_IRUGO, show_info, NULL); static CLASS_DEVICE_ATTR(exca, S_IRUGO, show_exca, NULL); #endif /*====================================================================*/ /* this is horribly ugly... proper locking needs to be done here at * some time... */ #define LOCKED(x) do { \ int retval; \ unsigned long flags; \ spin_lock_irqsave(&pcc_lock, flags); \ retval = x; \ spin_unlock_irqrestore(&pcc_lock, flags); \ return retval; \ } while (0) static int pcc_get_status(struct pcmcia_socket *s, u_int *value) { unsigned int sock = container_of(s, struct pcc_socket, socket)->number; if (socket[sock].flags & IS_ALIVE) { dev_dbg(&s->dev, "pcc_get_status: sock(%d) -EINVAL\n", sock); *value = 0; return -EINVAL; } dev_dbg(&s->dev, "pcc_get_status: sock(%d)\n", sock); LOCKED(_pcc_get_status(sock, value)); } static int pcc_set_socket(struct pcmcia_socket *s, socket_state_t *state) { unsigned int sock = container_of(s, struct pcc_socket, socket)->number; if (socket[sock].flags & IS_ALIVE) { dev_dbg(&s->dev, "pcc_set_socket: sock(%d) -EINVAL\n", sock); return -EINVAL; } dev_dbg(&s->dev, "pcc_set_socket: sock(%d)\n", sock); LOCKED(_pcc_set_socket(sock, state)); } static int pcc_set_io_map(struct pcmcia_socket *s, struct pccard_io_map *io) { unsigned int sock = container_of(s, struct pcc_socket, socket)->number; if (socket[sock].flags & IS_ALIVE) { dev_dbg(&s->dev, "pcc_set_io_map: sock(%d) -EINVAL\n", sock); return -EINVAL; } dev_dbg(&s->dev, "pcc_set_io_map: sock(%d)\n", sock); LOCKED(_pcc_set_io_map(sock, io)); } static int pcc_set_mem_map(struct pcmcia_socket *s, struct pccard_mem_map *mem) { unsigned int sock = container_of(s, struct pcc_socket, socket)->number; if (socket[sock].flags & IS_ALIVE) { dev_dbg(&s->dev, "pcc_set_mem_map: sock(%d) -EINVAL\n", sock); return -EINVAL; } dev_dbg(&s->dev, "pcc_set_mem_map: sock(%d)\n", sock); LOCKED(_pcc_set_mem_map(sock, mem)); } static int pcc_init(struct pcmcia_socket *s) { dev_dbg(&s->dev, "pcc_init()\n"); return 0; } static struct pccard_operations pcc_operations = { .init = pcc_init, .get_status = pcc_get_status, .set_socket = pcc_set_socket, .set_io_map = pcc_set_io_map, .set_mem_map = pcc_set_mem_map, }; /*====================================================================*/ static struct platform_driver pcc_driver = { .driver = { .name = "cfc", }, }; static struct platform_device pcc_device = { .name = "cfc", .id = 0, }; /*====================================================================*/ static int __init init_m32r_pcc(void) { int i, ret; ret = platform_driver_register(&pcc_driver); if (ret) return ret; ret = platform_device_register(&pcc_device); if (ret){ platform_driver_unregister(&pcc_driver); return ret; } #if defined(CONFIG_PLAT_MAPPI2) || defined(CONFIG_PLAT_MAPPI3) pcc_set(0, (unsigned int)PLD_CFCR0, 0x0f0f); pcc_set(0, (unsigned int)PLD_CFCR1, 0x0200); #endif pcc_sockets = 0; #if !defined(CONFIG_PLAT_USRV) add_pcc_socket(M32R_PCC0_BASE, PLD_IRQ_CFC_INSERT, CFC_ATTR_MAPBASE, CFC_IOPORT_BASE); #else /* CONFIG_PLAT_USRV */ { ulong base, mapaddr; unsigned int ioaddr; for (i = 0 ; i < M32R_MAX_PCC ; i++) { base = (ulong)PLD_CFRSTCR; base = base | (i << 8); ioaddr = (i + 1) << 12; mapaddr = CFC_ATTR_MAPBASE | (i << 20); add_pcc_socket(base, 0, mapaddr, ioaddr); } } #endif /* CONFIG_PLAT_USRV */ if (pcc_sockets == 0) { printk("socket is not found.\n"); platform_device_unregister(&pcc_device); platform_driver_unregister(&pcc_driver); return -ENODEV; } /* Set up interrupt handler(s) */ for (i = 0 ; i < pcc_sockets ; i++) { socket[i].socket.dev.parent = &pcc_device.dev; socket[i].socket.ops = &pcc_operations; socket[i].socket.resource_ops = &pccard_static_ops; socket[i].socket.owner = THIS_MODULE; socket[i].number = i; ret = pcmcia_register_socket(&socket[i].socket); if (!ret) socket[i].flags |= IS_REGISTERED; } /* Finally, schedule a polling interrupt */ if (poll_interval != 0) { poll_timer.function = pcc_interrupt_wrapper; poll_timer.data = 0; init_timer(&poll_timer); poll_timer.expires = jiffies + poll_interval; add_timer(&poll_timer); } return 0; } /* init_m32r_pcc */ static void __exit exit_m32r_pcc(void) { int i; for (i = 0; i < pcc_sockets; i++) if (socket[i].flags & IS_REGISTERED) pcmcia_unregister_socket(&socket[i].socket); platform_device_unregister(&pcc_device); if (poll_interval != 0) del_timer_sync(&poll_timer); platform_driver_unregister(&pcc_driver); } /* exit_m32r_pcc */ module_init(init_m32r_pcc); module_exit(exit_m32r_pcc); MODULE_LICENSE("Dual MPL/GPL"); /*====================================================================*/
gpl-2.0
latlontude/linux
drivers/gpu/host1x/hw/host1x01.c
2239
1228
/* * Host1x init for T20 and T30 Architecture Chips * * Copyright (c) 2011-2013, NVIDIA Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* include hw specification */ #include "host1x01.h" #include "host1x01_hardware.h" /* include code */ #include "cdma_hw.c" #include "channel_hw.c" #include "debug_hw.c" #include "intr_hw.c" #include "syncpt_hw.c" #include "../dev.h" int host1x01_init(struct host1x *host) { host->channel_op = &host1x_channel_ops; host->cdma_op = &host1x_cdma_ops; host->cdma_pb_op = &host1x_pushbuffer_ops; host->syncpt_op = &host1x_syncpt_ops; host->intr_op = &host1x_intr_ops; host->debug_op = &host1x_debug_ops; return 0; }
gpl-2.0
cpaasch/mptcp-net-next
drivers/media/common/saa7146/saa7146_fops.c
2751
17062
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <media/saa7146_vv.h> #include <linux/module.h> /****************************************************************************/ /* resource management functions, shamelessly stolen from saa7134 driver */ int saa7146_res_get(struct saa7146_fh *fh, unsigned int bit) { struct saa7146_dev *dev = fh->dev; struct saa7146_vv *vv = dev->vv_data; if (fh->resources & bit) { DEB_D("already allocated! want: 0x%02x, cur:0x%02x\n", bit, vv->resources); /* have it already allocated */ return 1; } /* is it free? */ if (vv->resources & bit) { DEB_D("locked! vv->resources:0x%02x, we want:0x%02x\n", vv->resources, bit); /* no, someone else uses it */ return 0; } /* it's free, grab it */ fh->resources |= bit; vv->resources |= bit; DEB_D("res: get 0x%02x, cur:0x%02x\n", bit, vv->resources); return 1; } void saa7146_res_free(struct saa7146_fh *fh, unsigned int bits) { struct saa7146_dev *dev = fh->dev; struct saa7146_vv *vv = dev->vv_data; BUG_ON((fh->resources & bits) != bits); fh->resources &= ~bits; vv->resources &= ~bits; DEB_D("res: put 0x%02x, cur:0x%02x\n", bits, vv->resources); } /********************************************************************************/ /* common dma functions */ void saa7146_dma_free(struct saa7146_dev *dev,struct videobuf_queue *q, struct saa7146_buf *buf) { struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb); DEB_EE("dev:%p, buf:%p\n", dev, buf); BUG_ON(in_interrupt()); videobuf_waiton(q, &buf->vb, 0, 0); videobuf_dma_unmap(q->dev, dma); videobuf_dma_free(dma); buf->vb.state = VIDEOBUF_NEEDS_INIT; } /********************************************************************************/ /* common buffer functions */ int saa7146_buffer_queue(struct saa7146_dev *dev, struct saa7146_dmaqueue *q, struct saa7146_buf *buf) { assert_spin_locked(&dev->slock); DEB_EE("dev:%p, dmaq:%p, buf:%p\n", dev, q, buf); BUG_ON(!q); if (NULL == q->curr) { q->curr = buf; DEB_D("immediately activating buffer %p\n", buf); buf->activate(dev,buf,NULL); } else { list_add_tail(&buf->vb.queue,&q->queue); buf->vb.state = VIDEOBUF_QUEUED; DEB_D("adding buffer %p to queue. (active buffer present)\n", buf); } return 0; } void saa7146_buffer_finish(struct saa7146_dev *dev, struct saa7146_dmaqueue *q, int state) { assert_spin_locked(&dev->slock); DEB_EE("dev:%p, dmaq:%p, state:%d\n", dev, q, state); DEB_EE("q->curr:%p\n", q->curr); BUG_ON(!q->curr); /* finish current buffer */ if (NULL == q->curr) { DEB_D("aiii. no current buffer\n"); return; } q->curr->vb.state = state; v4l2_get_timestamp(&q->curr->vb.ts); wake_up(&q->curr->vb.done); q->curr = NULL; } void saa7146_buffer_next(struct saa7146_dev *dev, struct saa7146_dmaqueue *q, int vbi) { struct saa7146_buf *buf,*next = NULL; BUG_ON(!q); DEB_INT("dev:%p, dmaq:%p, vbi:%d\n", dev, q, vbi); assert_spin_locked(&dev->slock); if (!list_empty(&q->queue)) { /* activate next one from queue */ buf = list_entry(q->queue.next,struct saa7146_buf,vb.queue); list_del(&buf->vb.queue); if (!list_empty(&q->queue)) next = list_entry(q->queue.next,struct saa7146_buf, vb.queue); q->curr = buf; DEB_INT("next buffer: buf:%p, prev:%p, next:%p\n", buf, q->queue.prev, q->queue.next); buf->activate(dev,buf,next); } else { DEB_INT("no next buffer. stopping.\n"); if( 0 != vbi ) { /* turn off video-dma3 */ saa7146_write(dev,MC1, MASK_20); } else { /* nothing to do -- just prevent next video-dma1 transfer by lowering the protection address */ // fixme: fix this for vflip != 0 saa7146_write(dev, PROT_ADDR1, 0); saa7146_write(dev, MC2, (MASK_02|MASK_18)); /* write the address of the rps-program */ saa7146_write(dev, RPS_ADDR0, dev->d_rps0.dma_handle); /* turn on rps */ saa7146_write(dev, MC1, (MASK_12 | MASK_28)); /* printk("vdma%d.base_even: 0x%08x\n", 1,saa7146_read(dev,BASE_EVEN1)); printk("vdma%d.base_odd: 0x%08x\n", 1,saa7146_read(dev,BASE_ODD1)); printk("vdma%d.prot_addr: 0x%08x\n", 1,saa7146_read(dev,PROT_ADDR1)); printk("vdma%d.base_page: 0x%08x\n", 1,saa7146_read(dev,BASE_PAGE1)); printk("vdma%d.pitch: 0x%08x\n", 1,saa7146_read(dev,PITCH1)); printk("vdma%d.num_line_byte: 0x%08x\n", 1,saa7146_read(dev,NUM_LINE_BYTE1)); */ } del_timer(&q->timeout); } } void saa7146_buffer_timeout(unsigned long data) { struct saa7146_dmaqueue *q = (struct saa7146_dmaqueue*)data; struct saa7146_dev *dev = q->dev; unsigned long flags; DEB_EE("dev:%p, dmaq:%p\n", dev, q); spin_lock_irqsave(&dev->slock,flags); if (q->curr) { DEB_D("timeout on %p\n", q->curr); saa7146_buffer_finish(dev,q,VIDEOBUF_ERROR); } /* we don't restart the transfer here like other drivers do. when a streaming capture is disabled, the timeout function will be called for the current buffer. if we activate the next buffer now, we mess up our capture logic. if a timeout occurs on another buffer, then something is seriously broken before, so no need to buffer the next capture IMHO... */ /* saa7146_buffer_next(dev,q); */ spin_unlock_irqrestore(&dev->slock,flags); } /********************************************************************************/ /* file operations */ static int fops_open(struct file *file) { struct video_device *vdev = video_devdata(file); struct saa7146_dev *dev = video_drvdata(file); struct saa7146_fh *fh = NULL; int result = 0; DEB_EE("file:%p, dev:%s\n", file, video_device_node_name(vdev)); if (mutex_lock_interruptible(vdev->lock)) return -ERESTARTSYS; DEB_D("using: %p\n", dev); /* check if an extension is registered */ if( NULL == dev->ext ) { DEB_S("no extension registered for this device\n"); result = -ENODEV; goto out; } /* allocate per open data */ fh = kzalloc(sizeof(*fh),GFP_KERNEL); if (NULL == fh) { DEB_S("cannot allocate memory for per open data\n"); result = -ENOMEM; goto out; } v4l2_fh_init(&fh->fh, vdev); file->private_data = &fh->fh; fh->dev = dev; if (vdev->vfl_type == VFL_TYPE_VBI) { DEB_S("initializing vbi...\n"); if (dev->ext_vv_data->capabilities & V4L2_CAP_VBI_CAPTURE) result = saa7146_vbi_uops.open(dev,file); if (dev->ext_vv_data->vbi_fops.open) dev->ext_vv_data->vbi_fops.open(file); } else { DEB_S("initializing video...\n"); result = saa7146_video_uops.open(dev,file); } if (0 != result) { goto out; } if( 0 == try_module_get(dev->ext->module)) { result = -EINVAL; goto out; } result = 0; v4l2_fh_add(&fh->fh); out: if (fh && result != 0) { kfree(fh); file->private_data = NULL; } mutex_unlock(vdev->lock); return result; } static int fops_release(struct file *file) { struct video_device *vdev = video_devdata(file); struct saa7146_fh *fh = file->private_data; struct saa7146_dev *dev = fh->dev; DEB_EE("file:%p\n", file); mutex_lock(vdev->lock); if (vdev->vfl_type == VFL_TYPE_VBI) { if (dev->ext_vv_data->capabilities & V4L2_CAP_VBI_CAPTURE) saa7146_vbi_uops.release(dev,file); if (dev->ext_vv_data->vbi_fops.release) dev->ext_vv_data->vbi_fops.release(file); } else { saa7146_video_uops.release(dev,file); } v4l2_fh_del(&fh->fh); v4l2_fh_exit(&fh->fh); module_put(dev->ext->module); file->private_data = NULL; kfree(fh); mutex_unlock(vdev->lock); return 0; } static int fops_mmap(struct file *file, struct vm_area_struct * vma) { struct video_device *vdev = video_devdata(file); struct saa7146_fh *fh = file->private_data; struct videobuf_queue *q; int res; switch (vdev->vfl_type) { case VFL_TYPE_GRABBER: { DEB_EE("V4L2_BUF_TYPE_VIDEO_CAPTURE: file:%p, vma:%p\n", file, vma); q = &fh->video_q; break; } case VFL_TYPE_VBI: { DEB_EE("V4L2_BUF_TYPE_VBI_CAPTURE: file:%p, vma:%p\n", file, vma); if (fh->dev->ext_vv_data->capabilities & V4L2_CAP_SLICED_VBI_OUTPUT) return -ENODEV; q = &fh->vbi_q; break; } default: BUG(); return 0; } if (mutex_lock_interruptible(vdev->lock)) return -ERESTARTSYS; res = videobuf_mmap_mapper(q, vma); mutex_unlock(vdev->lock); return res; } static unsigned int __fops_poll(struct file *file, struct poll_table_struct *wait) { struct video_device *vdev = video_devdata(file); struct saa7146_fh *fh = file->private_data; struct videobuf_buffer *buf = NULL; struct videobuf_queue *q; unsigned int res = v4l2_ctrl_poll(file, wait); DEB_EE("file:%p, poll:%p\n", file, wait); if (vdev->vfl_type == VFL_TYPE_VBI) { if (fh->dev->ext_vv_data->capabilities & V4L2_CAP_SLICED_VBI_OUTPUT) return res | POLLOUT | POLLWRNORM; if( 0 == fh->vbi_q.streaming ) return res | videobuf_poll_stream(file, &fh->vbi_q, wait); q = &fh->vbi_q; } else { DEB_D("using video queue\n"); q = &fh->video_q; } if (!list_empty(&q->stream)) buf = list_entry(q->stream.next, struct videobuf_buffer, stream); if (!buf) { DEB_D("buf == NULL!\n"); return res | POLLERR; } poll_wait(file, &buf->done, wait); if (buf->state == VIDEOBUF_DONE || buf->state == VIDEOBUF_ERROR) { DEB_D("poll succeeded!\n"); return res | POLLIN | POLLRDNORM; } DEB_D("nothing to poll for, buf->state:%d\n", buf->state); return res; } static unsigned int fops_poll(struct file *file, struct poll_table_struct *wait) { struct video_device *vdev = video_devdata(file); unsigned int res; mutex_lock(vdev->lock); res = __fops_poll(file, wait); mutex_unlock(vdev->lock); return res; } static ssize_t fops_read(struct file *file, char __user *data, size_t count, loff_t *ppos) { struct video_device *vdev = video_devdata(file); struct saa7146_fh *fh = file->private_data; int ret; switch (vdev->vfl_type) { case VFL_TYPE_GRABBER: /* DEB_EE("V4L2_BUF_TYPE_VIDEO_CAPTURE: file:%p, data:%p, count:%lun", file, data, (unsigned long)count); */ return saa7146_video_uops.read(file,data,count,ppos); case VFL_TYPE_VBI: /* DEB_EE("V4L2_BUF_TYPE_VBI_CAPTURE: file:%p, data:%p, count:%lu\n", file, data, (unsigned long)count); */ if (fh->dev->ext_vv_data->capabilities & V4L2_CAP_VBI_CAPTURE) { if (mutex_lock_interruptible(vdev->lock)) return -ERESTARTSYS; ret = saa7146_vbi_uops.read(file, data, count, ppos); mutex_unlock(vdev->lock); return ret; } return -EINVAL; default: BUG(); return 0; } } static ssize_t fops_write(struct file *file, const char __user *data, size_t count, loff_t *ppos) { struct video_device *vdev = video_devdata(file); struct saa7146_fh *fh = file->private_data; int ret; switch (vdev->vfl_type) { case VFL_TYPE_GRABBER: return -EINVAL; case VFL_TYPE_VBI: if (fh->dev->ext_vv_data->vbi_fops.write) { if (mutex_lock_interruptible(vdev->lock)) return -ERESTARTSYS; ret = fh->dev->ext_vv_data->vbi_fops.write(file, data, count, ppos); mutex_unlock(vdev->lock); return ret; } return -EINVAL; default: BUG(); return -EINVAL; } } static const struct v4l2_file_operations video_fops = { .owner = THIS_MODULE, .open = fops_open, .release = fops_release, .read = fops_read, .write = fops_write, .poll = fops_poll, .mmap = fops_mmap, .unlocked_ioctl = video_ioctl2, }; static void vv_callback(struct saa7146_dev *dev, unsigned long status) { u32 isr = status; DEB_INT("dev:%p, isr:0x%08x\n", dev, (u32)status); if (0 != (isr & (MASK_27))) { DEB_INT("irq: RPS0 (0x%08x)\n", isr); saa7146_video_uops.irq_done(dev,isr); } if (0 != (isr & (MASK_28))) { u32 mc2 = saa7146_read(dev, MC2); if( 0 != (mc2 & MASK_15)) { DEB_INT("irq: RPS1 vbi workaround (0x%08x)\n", isr); wake_up(&dev->vv_data->vbi_wq); saa7146_write(dev,MC2, MASK_31); return; } DEB_INT("irq: RPS1 (0x%08x)\n", isr); saa7146_vbi_uops.irq_done(dev,isr); } } static const struct v4l2_ctrl_ops saa7146_ctrl_ops = { .s_ctrl = saa7146_s_ctrl, }; int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv) { struct v4l2_ctrl_handler *hdl = &dev->ctrl_handler; struct v4l2_pix_format *fmt; struct v4l2_vbi_format *vbi; struct saa7146_vv *vv; int err; err = v4l2_device_register(&dev->pci->dev, &dev->v4l2_dev); if (err) return err; v4l2_ctrl_handler_init(hdl, 6); v4l2_ctrl_new_std(hdl, &saa7146_ctrl_ops, V4L2_CID_BRIGHTNESS, 0, 255, 1, 128); v4l2_ctrl_new_std(hdl, &saa7146_ctrl_ops, V4L2_CID_CONTRAST, 0, 127, 1, 64); v4l2_ctrl_new_std(hdl, &saa7146_ctrl_ops, V4L2_CID_SATURATION, 0, 127, 1, 64); v4l2_ctrl_new_std(hdl, &saa7146_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0); v4l2_ctrl_new_std(hdl, &saa7146_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0); if (hdl->error) { err = hdl->error; v4l2_ctrl_handler_free(hdl); return err; } dev->v4l2_dev.ctrl_handler = hdl; vv = kzalloc(sizeof(struct saa7146_vv), GFP_KERNEL); if (vv == NULL) { ERR("out of memory. aborting.\n"); v4l2_ctrl_handler_free(hdl); return -ENOMEM; } ext_vv->vid_ops = saa7146_video_ioctl_ops; ext_vv->vbi_ops = saa7146_vbi_ioctl_ops; ext_vv->core_ops = &saa7146_video_ioctl_ops; DEB_EE("dev:%p\n", dev); /* set default values for video parts of the saa7146 */ saa7146_write(dev, BCS_CTRL, 0x80400040); /* enable video-port pins */ saa7146_write(dev, MC1, (MASK_10 | MASK_26)); /* save per-device extension data (one extension can handle different devices that might need different configuration data) */ dev->ext_vv_data = ext_vv; vv->d_clipping.cpu_addr = pci_alloc_consistent(dev->pci, SAA7146_CLIPPING_MEM, &vv->d_clipping.dma_handle); if( NULL == vv->d_clipping.cpu_addr ) { ERR("out of memory. aborting.\n"); kfree(vv); v4l2_ctrl_handler_free(hdl); return -1; } memset(vv->d_clipping.cpu_addr, 0x0, SAA7146_CLIPPING_MEM); saa7146_video_uops.init(dev,vv); if (dev->ext_vv_data->capabilities & V4L2_CAP_VBI_CAPTURE) saa7146_vbi_uops.init(dev,vv); fmt = &vv->ov_fb.fmt; fmt->width = vv->standard->h_max_out; fmt->height = vv->standard->v_max_out; fmt->pixelformat = V4L2_PIX_FMT_RGB565; fmt->bytesperline = 2 * fmt->width; fmt->sizeimage = fmt->bytesperline * fmt->height; fmt->colorspace = V4L2_COLORSPACE_SRGB; fmt = &vv->video_fmt; fmt->width = 384; fmt->height = 288; fmt->pixelformat = V4L2_PIX_FMT_BGR24; fmt->field = V4L2_FIELD_ANY; fmt->colorspace = V4L2_COLORSPACE_SMPTE170M; fmt->bytesperline = 3 * fmt->width; fmt->sizeimage = fmt->bytesperline * fmt->height; vbi = &vv->vbi_fmt; vbi->sampling_rate = 27000000; vbi->offset = 248; /* todo */ vbi->samples_per_line = 720 * 2; vbi->sample_format = V4L2_PIX_FMT_GREY; /* fixme: this only works for PAL */ vbi->start[0] = 5; vbi->count[0] = 16; vbi->start[1] = 312; vbi->count[1] = 16; init_timer(&vv->vbi_read_timeout); vv->ov_fb.capability = V4L2_FBUF_CAP_LIST_CLIPPING; vv->ov_fb.flags = V4L2_FBUF_FLAG_PRIMARY; dev->vv_data = vv; dev->vv_callback = &vv_callback; return 0; } EXPORT_SYMBOL_GPL(saa7146_vv_init); int saa7146_vv_release(struct saa7146_dev* dev) { struct saa7146_vv *vv = dev->vv_data; DEB_EE("dev:%p\n", dev); v4l2_device_unregister(&dev->v4l2_dev); pci_free_consistent(dev->pci, SAA7146_CLIPPING_MEM, vv->d_clipping.cpu_addr, vv->d_clipping.dma_handle); v4l2_ctrl_handler_free(&dev->ctrl_handler); kfree(vv); dev->vv_data = NULL; dev->vv_callback = NULL; return 0; } EXPORT_SYMBOL_GPL(saa7146_vv_release); int saa7146_register_device(struct video_device **vid, struct saa7146_dev* dev, char *name, int type) { struct video_device *vfd; int err; int i; DEB_EE("dev:%p, name:'%s', type:%d\n", dev, name, type); // released by vfd->release vfd = video_device_alloc(); if (vfd == NULL) return -ENOMEM; vfd->fops = &video_fops; if (type == VFL_TYPE_GRABBER) vfd->ioctl_ops = &dev->ext_vv_data->vid_ops; else vfd->ioctl_ops = &dev->ext_vv_data->vbi_ops; vfd->release = video_device_release; vfd->lock = &dev->v4l2_lock; vfd->v4l2_dev = &dev->v4l2_dev; vfd->tvnorms = 0; set_bit(V4L2_FL_USE_FH_PRIO, &vfd->flags); for (i = 0; i < dev->ext_vv_data->num_stds; i++) vfd->tvnorms |= dev->ext_vv_data->stds[i].id; strlcpy(vfd->name, name, sizeof(vfd->name)); video_set_drvdata(vfd, dev); err = video_register_device(vfd, type, -1); if (err < 0) { ERR("cannot register v4l2 device. skipping.\n"); video_device_release(vfd); return err; } pr_info("%s: registered device %s [v4l2]\n", dev->name, video_device_node_name(vfd)); *vid = vfd; return 0; } EXPORT_SYMBOL_GPL(saa7146_register_device); int saa7146_unregister_device(struct video_device **vid, struct saa7146_dev* dev) { DEB_EE("dev:%p\n", dev); video_unregister_device(*vid); *vid = NULL; return 0; } EXPORT_SYMBOL_GPL(saa7146_unregister_device); static int __init saa7146_vv_init_module(void) { return 0; } static void __exit saa7146_vv_cleanup_module(void) { } module_init(saa7146_vv_init_module); module_exit(saa7146_vv_cleanup_module); MODULE_AUTHOR("Michael Hunold <michael@mihu.de>"); MODULE_DESCRIPTION("video4linux driver for saa7146-based hardware"); MODULE_LICENSE("GPL");
gpl-2.0
boyan3010/Villec2_ShooterU_Kernel_3.0.X
drivers/infiniband/hw/ehca/ehca_eq.c
3263
5128
/* * IBM eServer eHCA Infiniband device driver for Linux on POWER * * Event queue handling * * Authors: Waleri Fomin <fomin@de.ibm.com> * Khadija Souissi <souissi@de.ibm.com> * Reinhard Ernst <rernst@de.ibm.com> * Heiko J Schick <schickhj@de.ibm.com> * Hoang-Nam Nguyen <hnguyen@de.ibm.com> * * * Copyright (c) 2005 IBM Corporation * * All rights reserved. * * This source code is distributed under a dual license of GPL v2.0 and OpenIB * BSD. * * OpenIB BSD License * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "ehca_classes.h" #include "ehca_irq.h" #include "ehca_iverbs.h" #include "ehca_qes.h" #include "hcp_if.h" #include "ipz_pt_fn.h" int ehca_create_eq(struct ehca_shca *shca, struct ehca_eq *eq, const enum ehca_eq_type type, const u32 length) { int ret; u64 h_ret; u32 nr_pages; u32 i; void *vpage; struct ib_device *ib_dev = &shca->ib_device; spin_lock_init(&eq->spinlock); spin_lock_init(&eq->irq_spinlock); eq->is_initialized = 0; if (type != EHCA_EQ && type != EHCA_NEQ) { ehca_err(ib_dev, "Invalid EQ type %x. eq=%p", type, eq); return -EINVAL; } if (!length) { ehca_err(ib_dev, "EQ length must not be zero. eq=%p", eq); return -EINVAL; } h_ret = hipz_h_alloc_resource_eq(shca->ipz_hca_handle, &eq->pf, type, length, &eq->ipz_eq_handle, &eq->length, &nr_pages, &eq->ist); if (h_ret != H_SUCCESS) { ehca_err(ib_dev, "Can't allocate EQ/NEQ. eq=%p", eq); return -EINVAL; } ret = ipz_queue_ctor(NULL, &eq->ipz_queue, nr_pages, EHCA_PAGESIZE, sizeof(struct ehca_eqe), 0, 0); if (!ret) { ehca_err(ib_dev, "Can't allocate EQ pages eq=%p", eq); goto create_eq_exit1; } for (i = 0; i < nr_pages; i++) { u64 rpage; vpage = ipz_qpageit_get_inc(&eq->ipz_queue); if (!vpage) goto create_eq_exit2; rpage = virt_to_abs(vpage); h_ret = hipz_h_register_rpage_eq(shca->ipz_hca_handle, eq->ipz_eq_handle, &eq->pf, 0, 0, rpage, 1); if (i == (nr_pages - 1)) { /* last page */ vpage = ipz_qpageit_get_inc(&eq->ipz_queue); if (h_ret != H_SUCCESS || vpage) goto create_eq_exit2; } else { if (h_ret != H_PAGE_REGISTERED) goto create_eq_exit2; } } ipz_qeit_reset(&eq->ipz_queue); /* register interrupt handlers and initialize work queues */ if (type == EHCA_EQ) { tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca); ret = ibmebus_request_irq(eq->ist, ehca_interrupt_eq, IRQF_DISABLED, "ehca_eq", (void *)shca); if (ret < 0) ehca_err(ib_dev, "Can't map interrupt handler."); } else if (type == EHCA_NEQ) { tasklet_init(&eq->interrupt_task, ehca_tasklet_neq, (long)shca); ret = ibmebus_request_irq(eq->ist, ehca_interrupt_neq, IRQF_DISABLED, "ehca_neq", (void *)shca); if (ret < 0) ehca_err(ib_dev, "Can't map interrupt handler."); } eq->is_initialized = 1; return 0; create_eq_exit2: ipz_queue_dtor(NULL, &eq->ipz_queue); create_eq_exit1: hipz_h_destroy_eq(shca->ipz_hca_handle, eq); return -EINVAL; } void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq) { unsigned long flags; void *eqe; spin_lock_irqsave(&eq->spinlock, flags); eqe = ipz_eqit_eq_get_inc_valid(&eq->ipz_queue); spin_unlock_irqrestore(&eq->spinlock, flags); return eqe; } int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq) { unsigned long flags; u64 h_ret; ibmebus_free_irq(eq->ist, (void *)shca); spin_lock_irqsave(&shca_list_lock, flags); eq->is_initialized = 0; spin_unlock_irqrestore(&shca_list_lock, flags); tasklet_kill(&eq->interrupt_task); h_ret = hipz_h_destroy_eq(shca->ipz_hca_handle, eq); if (h_ret != H_SUCCESS) { ehca_err(&shca->ib_device, "Can't free EQ resources."); return -EINVAL; } ipz_queue_dtor(NULL, &eq->ipz_queue); return 0; }
gpl-2.0
h8rift/android_kernel_htc_msm8960-evita
net/ipv4/tunnel4.c
3263
4396
/* tunnel4.c: Generic IP tunnel transformer. * * Copyright (C) 2003 David S. Miller (davem@redhat.com) */ #include <linux/init.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <net/icmp.h> #include <net/ip.h> #include <net/protocol.h> #include <net/xfrm.h> static struct xfrm_tunnel __rcu *tunnel4_handlers __read_mostly; static struct xfrm_tunnel __rcu *tunnel64_handlers __read_mostly; static DEFINE_MUTEX(tunnel4_mutex); static inline struct xfrm_tunnel __rcu **fam_handlers(unsigned short family) { return (family == AF_INET) ? &tunnel4_handlers : &tunnel64_handlers; } int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family) { struct xfrm_tunnel __rcu **pprev; struct xfrm_tunnel *t; int ret = -EEXIST; int priority = handler->priority; mutex_lock(&tunnel4_mutex); for (pprev = fam_handlers(family); (t = rcu_dereference_protected(*pprev, lockdep_is_held(&tunnel4_mutex))) != NULL; pprev = &t->next) { if (t->priority > priority) break; if (t->priority == priority) goto err; } handler->next = *pprev; rcu_assign_pointer(*pprev, handler); ret = 0; err: mutex_unlock(&tunnel4_mutex); return ret; } EXPORT_SYMBOL(xfrm4_tunnel_register); int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family) { struct xfrm_tunnel __rcu **pprev; struct xfrm_tunnel *t; int ret = -ENOENT; mutex_lock(&tunnel4_mutex); for (pprev = fam_handlers(family); (t = rcu_dereference_protected(*pprev, lockdep_is_held(&tunnel4_mutex))) != NULL; pprev = &t->next) { if (t == handler) { *pprev = handler->next; ret = 0; break; } } mutex_unlock(&tunnel4_mutex); synchronize_net(); return ret; } EXPORT_SYMBOL(xfrm4_tunnel_deregister); #define for_each_tunnel_rcu(head, handler) \ for (handler = rcu_dereference(head); \ handler != NULL; \ handler = rcu_dereference(handler->next)) \ static int tunnel4_rcv(struct sk_buff *skb) { struct xfrm_tunnel *handler; if (!pskb_may_pull(skb, sizeof(struct iphdr))) goto drop; for_each_tunnel_rcu(tunnel4_handlers, handler) if (!handler->handler(skb)) return 0; icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); drop: kfree_skb(skb); return 0; } #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) static int tunnel64_rcv(struct sk_buff *skb) { struct xfrm_tunnel *handler; if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) goto drop; for_each_tunnel_rcu(tunnel64_handlers, handler) if (!handler->handler(skb)) return 0; icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); drop: kfree_skb(skb); return 0; } #endif static void tunnel4_err(struct sk_buff *skb, u32 info) { struct xfrm_tunnel *handler; for_each_tunnel_rcu(tunnel4_handlers, handler) if (!handler->err_handler(skb, info)) break; } #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) static void tunnel64_err(struct sk_buff *skb, u32 info) { struct xfrm_tunnel *handler; for_each_tunnel_rcu(tunnel64_handlers, handler) if (!handler->err_handler(skb, info)) break; } #endif static const struct net_protocol tunnel4_protocol = { .handler = tunnel4_rcv, .err_handler = tunnel4_err, .no_policy = 1, .netns_ok = 1, }; #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) static const struct net_protocol tunnel64_protocol = { .handler = tunnel64_rcv, .err_handler = tunnel64_err, .no_policy = 1, .netns_ok = 1, }; #endif static int __init tunnel4_init(void) { if (inet_add_protocol(&tunnel4_protocol, IPPROTO_IPIP)) { printk(KERN_ERR "tunnel4 init: can't add protocol\n"); return -EAGAIN; } #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) if (inet_add_protocol(&tunnel64_protocol, IPPROTO_IPV6)) { printk(KERN_ERR "tunnel64 init: can't add protocol\n"); inet_del_protocol(&tunnel4_protocol, IPPROTO_IPIP); return -EAGAIN; } #endif return 0; } static void __exit tunnel4_fini(void) { #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) if (inet_del_protocol(&tunnel64_protocol, IPPROTO_IPV6)) printk(KERN_ERR "tunnel64 close: can't remove protocol\n"); #endif if (inet_del_protocol(&tunnel4_protocol, IPPROTO_IPIP)) printk(KERN_ERR "tunnel4 close: can't remove protocol\n"); } module_init(tunnel4_init); module_exit(tunnel4_fini); MODULE_LICENSE("GPL");
gpl-2.0
mongoose700/xen-coalesce-kernel
drivers/usb/storage/sddr55.c
3519
25447
/* Driver for SanDisk SDDR-55 SmartMedia reader * * SDDR55 driver v0.1: * * First release * * Current development and maintenance by: * (c) 2002 Simon Munton * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/jiffies.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/slab.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include "usb.h" #include "transport.h" #include "protocol.h" #include "debug.h" MODULE_DESCRIPTION("Driver for SanDisk SDDR-55 SmartMedia reader"); MODULE_AUTHOR("Simon Munton"); MODULE_LICENSE("GPL"); /* * The table of devices */ #define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \ vendorName, productName, useProtocol, useTransport, \ initFunction, flags) \ { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \ .driver_info = (flags) } static struct usb_device_id sddr55_usb_ids[] = { # include "unusual_sddr55.h" { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, sddr55_usb_ids); #undef UNUSUAL_DEV /* * The flags table */ #define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \ vendor_name, product_name, use_protocol, use_transport, \ init_function, Flags) \ { \ .vendorName = vendor_name, \ .productName = product_name, \ .useProtocol = use_protocol, \ .useTransport = use_transport, \ .initFunction = init_function, \ } static struct us_unusual_dev sddr55_unusual_dev_list[] = { # include "unusual_sddr55.h" { } /* Terminating entry */ }; #undef UNUSUAL_DEV #define short_pack(lsb,msb) ( ((u16)(lsb)) | ( ((u16)(msb))<<8 ) ) #define LSB_of(s) ((s)&0xFF) #define MSB_of(s) ((s)>>8) #define PAGESIZE 512 #define set_sense_info(sk, asc, ascq) \ do { \ info->sense_data[2] = sk; \ info->sense_data[12] = asc; \ info->sense_data[13] = ascq; \ } while (0) struct sddr55_card_info { unsigned long capacity; /* Size of card in bytes */ int max_log_blks; /* maximum number of logical blocks */ int pageshift; /* log2 of pagesize */ int smallpageshift; /* 1 if pagesize == 256 */ int blocksize; /* Size of block in pages */ int blockshift; /* log2 of blocksize */ int blockmask; /* 2^blockshift - 1 */ int read_only; /* non zero if card is write protected */ int force_read_only; /* non zero if we find a map error*/ int *lba_to_pba; /* logical to physical map */ int *pba_to_lba; /* physical to logical map */ int fatal_error; /* set if we detect something nasty */ unsigned long last_access; /* number of jiffies since we last talked to device */ unsigned char sense_data[18]; }; #define NOT_ALLOCATED 0xffffffff #define BAD_BLOCK 0xffff #define CIS_BLOCK 0x400 #define UNUSED_BLOCK 0x3ff static int sddr55_bulk_transport(struct us_data *us, int direction, unsigned char *data, unsigned int len) { struct sddr55_card_info *info = (struct sddr55_card_info *)us->extra; unsigned int pipe = (direction == DMA_FROM_DEVICE) ? us->recv_bulk_pipe : us->send_bulk_pipe; if (!len) return USB_STOR_XFER_GOOD; info->last_access = jiffies; return usb_stor_bulk_transfer_buf(us, pipe, data, len, NULL); } /* check if card inserted, if there is, update read_only status * return non zero if no card */ static int sddr55_status(struct us_data *us) { int result; unsigned char *command = us->iobuf; unsigned char *status = us->iobuf; struct sddr55_card_info *info = (struct sddr55_card_info *)us->extra; /* send command */ memset(command, 0, 8); command[5] = 0xB0; command[7] = 0x80; result = sddr55_bulk_transport(us, DMA_TO_DEVICE, command, 8); usb_stor_dbg(us, "Result for send_command in status %d\n", result); if (result != USB_STOR_XFER_GOOD) { set_sense_info (4, 0, 0); /* hardware error */ return USB_STOR_TRANSPORT_ERROR; } result = sddr55_bulk_transport(us, DMA_FROM_DEVICE, status, 4); /* expect to get short transfer if no card fitted */ if (result == USB_STOR_XFER_SHORT || result == USB_STOR_XFER_STALLED) { /* had a short transfer, no card inserted, free map memory */ kfree(info->lba_to_pba); kfree(info->pba_to_lba); info->lba_to_pba = NULL; info->pba_to_lba = NULL; info->fatal_error = 0; info->force_read_only = 0; set_sense_info (2, 0x3a, 0); /* not ready, medium not present */ return USB_STOR_TRANSPORT_FAILED; } if (result != USB_STOR_XFER_GOOD) { set_sense_info (4, 0, 0); /* hardware error */ return USB_STOR_TRANSPORT_FAILED; } /* check write protect status */ info->read_only = (status[0] & 0x20); /* now read status */ result = sddr55_bulk_transport(us, DMA_FROM_DEVICE, status, 2); if (result != USB_STOR_XFER_GOOD) { set_sense_info (4, 0, 0); /* hardware error */ } return (result == USB_STOR_XFER_GOOD ? USB_STOR_TRANSPORT_GOOD : USB_STOR_TRANSPORT_FAILED); } static int sddr55_read_data(struct us_data *us, unsigned int lba, unsigned int page, unsigned short sectors) { int result = USB_STOR_TRANSPORT_GOOD; unsigned char *command = us->iobuf; unsigned char *status = us->iobuf; struct sddr55_card_info *info = (struct sddr55_card_info *)us->extra; unsigned char *buffer; unsigned int pba; unsigned long address; unsigned short pages; unsigned int len, offset; struct scatterlist *sg; // Since we only read in one block at a time, we have to create // a bounce buffer and move the data a piece at a time between the // bounce buffer and the actual transfer buffer. len = min((unsigned int) sectors, (unsigned int) info->blocksize >> info->smallpageshift) * PAGESIZE; buffer = kmalloc(len, GFP_NOIO); if (buffer == NULL) return USB_STOR_TRANSPORT_ERROR; /* out of memory */ offset = 0; sg = NULL; while (sectors>0) { /* have we got to end? */ if (lba >= info->max_log_blks) break; pba = info->lba_to_pba[lba]; // Read as many sectors as possible in this block pages = min((unsigned int) sectors << info->smallpageshift, info->blocksize - page); len = pages << info->pageshift; usb_stor_dbg(us, "Read %02X pages, from PBA %04X (LBA %04X) page %02X\n", pages, pba, lba, page); if (pba == NOT_ALLOCATED) { /* no pba for this lba, fill with zeroes */ memset (buffer, 0, len); } else { address = (pba << info->blockshift) + page; command[0] = 0; command[1] = LSB_of(address>>16); command[2] = LSB_of(address>>8); command[3] = LSB_of(address); command[4] = 0; command[5] = 0xB0; command[6] = LSB_of(pages << (1 - info->smallpageshift)); command[7] = 0x85; /* send command */ result = sddr55_bulk_transport(us, DMA_TO_DEVICE, command, 8); usb_stor_dbg(us, "Result for send_command in read_data %d\n", result); if (result != USB_STOR_XFER_GOOD) { result = USB_STOR_TRANSPORT_ERROR; goto leave; } /* read data */ result = sddr55_bulk_transport(us, DMA_FROM_DEVICE, buffer, len); if (result != USB_STOR_XFER_GOOD) { result = USB_STOR_TRANSPORT_ERROR; goto leave; } /* now read status */ result = sddr55_bulk_transport(us, DMA_FROM_DEVICE, status, 2); if (result != USB_STOR_XFER_GOOD) { result = USB_STOR_TRANSPORT_ERROR; goto leave; } /* check status for error */ if (status[0] == 0xff && status[1] == 0x4) { set_sense_info (3, 0x11, 0); result = USB_STOR_TRANSPORT_FAILED; goto leave; } } // Store the data in the transfer buffer usb_stor_access_xfer_buf(buffer, len, us->srb, &sg, &offset, TO_XFER_BUF); page = 0; lba++; sectors -= pages >> info->smallpageshift; } result = USB_STOR_TRANSPORT_GOOD; leave: kfree(buffer); return result; } static int sddr55_write_data(struct us_data *us, unsigned int lba, unsigned int page, unsigned short sectors) { int result = USB_STOR_TRANSPORT_GOOD; unsigned char *command = us->iobuf; unsigned char *status = us->iobuf; struct sddr55_card_info *info = (struct sddr55_card_info *)us->extra; unsigned char *buffer; unsigned int pba; unsigned int new_pba; unsigned long address; unsigned short pages; int i; unsigned int len, offset; struct scatterlist *sg; /* check if we are allowed to write */ if (info->read_only || info->force_read_only) { set_sense_info (7, 0x27, 0); /* read only */ return USB_STOR_TRANSPORT_FAILED; } // Since we only write one block at a time, we have to create // a bounce buffer and move the data a piece at a time between the // bounce buffer and the actual transfer buffer. len = min((unsigned int) sectors, (unsigned int) info->blocksize >> info->smallpageshift) * PAGESIZE; buffer = kmalloc(len, GFP_NOIO); if (buffer == NULL) return USB_STOR_TRANSPORT_ERROR; offset = 0; sg = NULL; while (sectors > 0) { /* have we got to end? */ if (lba >= info->max_log_blks) break; pba = info->lba_to_pba[lba]; // Write as many sectors as possible in this block pages = min((unsigned int) sectors << info->smallpageshift, info->blocksize - page); len = pages << info->pageshift; // Get the data from the transfer buffer usb_stor_access_xfer_buf(buffer, len, us->srb, &sg, &offset, FROM_XFER_BUF); usb_stor_dbg(us, "Write %02X pages, to PBA %04X (LBA %04X) page %02X\n", pages, pba, lba, page); command[4] = 0; if (pba == NOT_ALLOCATED) { /* no pba allocated for this lba, find a free pba to use */ int max_pba = (info->max_log_blks / 250 ) * 256; int found_count = 0; int found_pba = -1; /* set pba to first block in zone lba is in */ pba = (lba / 1000) * 1024; usb_stor_dbg(us, "No PBA for LBA %04X\n", lba); if (max_pba > 1024) max_pba = 1024; /* * Scan through the map looking for an unused block * leave 16 unused blocks at start (or as many as * possible) since the sddr55 seems to reuse a used * block when it shouldn't if we don't leave space. */ for (i = 0; i < max_pba; i++, pba++) { if (info->pba_to_lba[pba] == UNUSED_BLOCK) { found_pba = pba; if (found_count++ > 16) break; } } pba = found_pba; if (pba == -1) { /* oh dear */ usb_stor_dbg(us, "Couldn't find unallocated block\n"); set_sense_info (3, 0x31, 0); /* medium error */ result = USB_STOR_TRANSPORT_FAILED; goto leave; } usb_stor_dbg(us, "Allocating PBA %04X for LBA %04X\n", pba, lba); /* set writing to unallocated block flag */ command[4] = 0x40; } address = (pba << info->blockshift) + page; command[1] = LSB_of(address>>16); command[2] = LSB_of(address>>8); command[3] = LSB_of(address); /* set the lba into the command, modulo 1000 */ command[0] = LSB_of(lba % 1000); command[6] = MSB_of(lba % 1000); command[4] |= LSB_of(pages >> info->smallpageshift); command[5] = 0xB0; command[7] = 0x86; /* send command */ result = sddr55_bulk_transport(us, DMA_TO_DEVICE, command, 8); if (result != USB_STOR_XFER_GOOD) { usb_stor_dbg(us, "Result for send_command in write_data %d\n", result); /* set_sense_info is superfluous here? */ set_sense_info (3, 0x3, 0);/* peripheral write error */ result = USB_STOR_TRANSPORT_FAILED; goto leave; } /* send the data */ result = sddr55_bulk_transport(us, DMA_TO_DEVICE, buffer, len); if (result != USB_STOR_XFER_GOOD) { usb_stor_dbg(us, "Result for send_data in write_data %d\n", result); /* set_sense_info is superfluous here? */ set_sense_info (3, 0x3, 0);/* peripheral write error */ result = USB_STOR_TRANSPORT_FAILED; goto leave; } /* now read status */ result = sddr55_bulk_transport(us, DMA_FROM_DEVICE, status, 6); if (result != USB_STOR_XFER_GOOD) { usb_stor_dbg(us, "Result for get_status in write_data %d\n", result); /* set_sense_info is superfluous here? */ set_sense_info (3, 0x3, 0);/* peripheral write error */ result = USB_STOR_TRANSPORT_FAILED; goto leave; } new_pba = (status[3] + (status[4] << 8) + (status[5] << 16)) >> info->blockshift; /* check status for error */ if (status[0] == 0xff && status[1] == 0x4) { info->pba_to_lba[new_pba] = BAD_BLOCK; set_sense_info (3, 0x0c, 0); result = USB_STOR_TRANSPORT_FAILED; goto leave; } usb_stor_dbg(us, "Updating maps for LBA %04X: old PBA %04X, new PBA %04X\n", lba, pba, new_pba); /* update the lba<->pba maps, note new_pba might be the same as pba */ info->lba_to_pba[lba] = new_pba; info->pba_to_lba[pba] = UNUSED_BLOCK; /* check that new_pba wasn't already being used */ if (info->pba_to_lba[new_pba] != UNUSED_BLOCK) { printk(KERN_ERR "sddr55 error: new PBA %04X already in use for LBA %04X\n", new_pba, info->pba_to_lba[new_pba]); info->fatal_error = 1; set_sense_info (3, 0x31, 0); result = USB_STOR_TRANSPORT_FAILED; goto leave; } /* update the pba<->lba maps for new_pba */ info->pba_to_lba[new_pba] = lba % 1000; page = 0; lba++; sectors -= pages >> info->smallpageshift; } result = USB_STOR_TRANSPORT_GOOD; leave: kfree(buffer); return result; } static int sddr55_read_deviceID(struct us_data *us, unsigned char *manufacturerID, unsigned char *deviceID) { int result; unsigned char *command = us->iobuf; unsigned char *content = us->iobuf; memset(command, 0, 8); command[5] = 0xB0; command[7] = 0x84; result = sddr55_bulk_transport(us, DMA_TO_DEVICE, command, 8); usb_stor_dbg(us, "Result of send_control for device ID is %d\n", result); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; result = sddr55_bulk_transport(us, DMA_FROM_DEVICE, content, 4); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; *manufacturerID = content[0]; *deviceID = content[1]; if (content[0] != 0xff) { result = sddr55_bulk_transport(us, DMA_FROM_DEVICE, content, 2); } return USB_STOR_TRANSPORT_GOOD; } static int sddr55_reset(struct us_data *us) { return 0; } static unsigned long sddr55_get_capacity(struct us_data *us) { unsigned char uninitialized_var(manufacturerID); unsigned char uninitialized_var(deviceID); int result; struct sddr55_card_info *info = (struct sddr55_card_info *)us->extra; usb_stor_dbg(us, "Reading capacity...\n"); result = sddr55_read_deviceID(us, &manufacturerID, &deviceID); usb_stor_dbg(us, "Result of read_deviceID is %d\n", result); if (result != USB_STOR_XFER_GOOD) return 0; usb_stor_dbg(us, "Device ID = %02X\n", deviceID); usb_stor_dbg(us, "Manuf ID = %02X\n", manufacturerID); info->pageshift = 9; info->smallpageshift = 0; info->blocksize = 16; info->blockshift = 4; info->blockmask = 15; switch (deviceID) { case 0x6e: // 1MB case 0xe8: case 0xec: info->pageshift = 8; info->smallpageshift = 1; return 0x00100000; case 0xea: // 2MB case 0x64: info->pageshift = 8; info->smallpageshift = 1; case 0x5d: // 5d is a ROM card with pagesize 512. return 0x00200000; case 0xe3: // 4MB case 0xe5: case 0x6b: case 0xd5: return 0x00400000; case 0xe6: // 8MB case 0xd6: return 0x00800000; case 0x73: // 16MB info->blocksize = 32; info->blockshift = 5; info->blockmask = 31; return 0x01000000; case 0x75: // 32MB info->blocksize = 32; info->blockshift = 5; info->blockmask = 31; return 0x02000000; case 0x76: // 64MB info->blocksize = 32; info->blockshift = 5; info->blockmask = 31; return 0x04000000; case 0x79: // 128MB info->blocksize = 32; info->blockshift = 5; info->blockmask = 31; return 0x08000000; default: // unknown return 0; } } static int sddr55_read_map(struct us_data *us) { struct sddr55_card_info *info = (struct sddr55_card_info *)(us->extra); int numblocks; unsigned char *buffer; unsigned char *command = us->iobuf; int i; unsigned short lba; unsigned short max_lba; int result; if (!info->capacity) return -1; numblocks = info->capacity >> (info->blockshift + info->pageshift); buffer = kmalloc( numblocks * 2, GFP_NOIO ); if (!buffer) return -1; memset(command, 0, 8); command[5] = 0xB0; command[6] = numblocks * 2 / 256; command[7] = 0x8A; result = sddr55_bulk_transport(us, DMA_TO_DEVICE, command, 8); if ( result != USB_STOR_XFER_GOOD) { kfree (buffer); return -1; } result = sddr55_bulk_transport(us, DMA_FROM_DEVICE, buffer, numblocks * 2); if ( result != USB_STOR_XFER_GOOD) { kfree (buffer); return -1; } result = sddr55_bulk_transport(us, DMA_FROM_DEVICE, command, 2); if ( result != USB_STOR_XFER_GOOD) { kfree (buffer); return -1; } kfree(info->lba_to_pba); kfree(info->pba_to_lba); info->lba_to_pba = kmalloc(numblocks*sizeof(int), GFP_NOIO); info->pba_to_lba = kmalloc(numblocks*sizeof(int), GFP_NOIO); if (info->lba_to_pba == NULL || info->pba_to_lba == NULL) { kfree(info->lba_to_pba); kfree(info->pba_to_lba); info->lba_to_pba = NULL; info->pba_to_lba = NULL; kfree(buffer); return -1; } memset(info->lba_to_pba, 0xff, numblocks*sizeof(int)); memset(info->pba_to_lba, 0xff, numblocks*sizeof(int)); /* set maximum lba */ max_lba = info->max_log_blks; if (max_lba > 1000) max_lba = 1000; // Each block is 64 bytes of control data, so block i is located in // scatterlist block i*64/128k = i*(2^6)*(2^-17) = i*(2^-11) for (i=0; i<numblocks; i++) { int zone = i / 1024; lba = short_pack(buffer[i * 2], buffer[i * 2 + 1]); /* Every 1024 physical blocks ("zone"), the LBA numbers * go back to zero, but are within a higher * block of LBA's. Also, there is a maximum of * 1000 LBA's per zone. In other words, in PBA * 1024-2047 you will find LBA 0-999 which are * really LBA 1000-1999. Yes, this wastes 24 * physical blocks per zone. Go figure. * These devices can have blocks go bad, so there * are 24 spare blocks to use when blocks do go bad. */ /* SDDR55 returns 0xffff for a bad block, and 0x400 for the * CIS block. (Is this true for cards 8MB or less??) * Record these in the physical to logical map */ info->pba_to_lba[i] = lba; if (lba >= max_lba) { continue; } if (info->lba_to_pba[lba + zone * 1000] != NOT_ALLOCATED && !info->force_read_only) { printk(KERN_WARNING "sddr55: map inconsistency at LBA %04X\n", lba + zone * 1000); info->force_read_only = 1; } if (lba<0x10 || (lba>=0x3E0 && lba<0x3EF)) usb_stor_dbg(us, "LBA %04X <-> PBA %04X\n", lba, i); info->lba_to_pba[lba + zone * 1000] = i; } kfree(buffer); return 0; } static void sddr55_card_info_destructor(void *extra) { struct sddr55_card_info *info = (struct sddr55_card_info *)extra; if (!extra) return; kfree(info->lba_to_pba); kfree(info->pba_to_lba); } /* * Transport for the Sandisk SDDR-55 */ static int sddr55_transport(struct scsi_cmnd *srb, struct us_data *us) { int result; static unsigned char inquiry_response[8] = { 0x00, 0x80, 0x00, 0x02, 0x1F, 0x00, 0x00, 0x00 }; // write-protected for now, no block descriptor support static unsigned char mode_page_01[20] = { 0x0, 0x12, 0x00, 0x80, 0x0, 0x0, 0x0, 0x0, 0x01, 0x0A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; unsigned char *ptr = us->iobuf; unsigned long capacity; unsigned int lba; unsigned int pba; unsigned int page; unsigned short pages; struct sddr55_card_info *info; if (!us->extra) { us->extra = kzalloc( sizeof(struct sddr55_card_info), GFP_NOIO); if (!us->extra) return USB_STOR_TRANSPORT_ERROR; us->extra_destructor = sddr55_card_info_destructor; } info = (struct sddr55_card_info *)(us->extra); if (srb->cmnd[0] == REQUEST_SENSE) { usb_stor_dbg(us, "request sense %02x/%02x/%02x\n", info->sense_data[2], info->sense_data[12], info->sense_data[13]); memcpy (ptr, info->sense_data, sizeof info->sense_data); ptr[0] = 0x70; ptr[7] = 11; usb_stor_set_xfer_buf (ptr, sizeof info->sense_data, srb); memset (info->sense_data, 0, sizeof info->sense_data); return USB_STOR_TRANSPORT_GOOD; } memset (info->sense_data, 0, sizeof info->sense_data); /* Dummy up a response for INQUIRY since SDDR55 doesn't respond to INQUIRY commands */ if (srb->cmnd[0] == INQUIRY) { memcpy(ptr, inquiry_response, 8); fill_inquiry_response(us, ptr, 36); return USB_STOR_TRANSPORT_GOOD; } /* only check card status if the map isn't allocated, ie no card seen yet * or if it's been over half a second since we last accessed it */ if (info->lba_to_pba == NULL || time_after(jiffies, info->last_access + HZ/2)) { /* check to see if a card is fitted */ result = sddr55_status (us); if (result) { result = sddr55_status (us); if (!result) { set_sense_info (6, 0x28, 0); /* new media, set unit attention, not ready to ready */ } return USB_STOR_TRANSPORT_FAILED; } } /* if we detected a problem with the map when writing, don't allow any more access */ if (info->fatal_error) { set_sense_info (3, 0x31, 0); return USB_STOR_TRANSPORT_FAILED; } if (srb->cmnd[0] == READ_CAPACITY) { capacity = sddr55_get_capacity(us); if (!capacity) { set_sense_info (3, 0x30, 0); /* incompatible medium */ return USB_STOR_TRANSPORT_FAILED; } info->capacity = capacity; /* figure out the maximum logical block number, allowing for * the fact that only 250 out of every 256 are used */ info->max_log_blks = ((info->capacity >> (info->pageshift + info->blockshift)) / 256) * 250; /* Last page in the card, adjust as we only use 250 out of * every 256 pages */ capacity = (capacity / 256) * 250; capacity /= PAGESIZE; capacity--; ((__be32 *) ptr)[0] = cpu_to_be32(capacity); ((__be32 *) ptr)[1] = cpu_to_be32(PAGESIZE); usb_stor_set_xfer_buf(ptr, 8, srb); sddr55_read_map(us); return USB_STOR_TRANSPORT_GOOD; } if (srb->cmnd[0] == MODE_SENSE_10) { memcpy(ptr, mode_page_01, sizeof mode_page_01); ptr[3] = (info->read_only || info->force_read_only) ? 0x80 : 0; usb_stor_set_xfer_buf(ptr, sizeof(mode_page_01), srb); if ( (srb->cmnd[2] & 0x3F) == 0x01 ) { usb_stor_dbg(us, "Dummy up request for mode page 1\n"); return USB_STOR_TRANSPORT_GOOD; } else if ( (srb->cmnd[2] & 0x3F) == 0x3F ) { usb_stor_dbg(us, "Dummy up request for all mode pages\n"); return USB_STOR_TRANSPORT_GOOD; } set_sense_info (5, 0x24, 0); /* invalid field in command */ return USB_STOR_TRANSPORT_FAILED; } if (srb->cmnd[0] == ALLOW_MEDIUM_REMOVAL) { usb_stor_dbg(us, "%s medium removal. Not that I can do anything about it...\n", (srb->cmnd[4]&0x03) ? "Prevent" : "Allow"); return USB_STOR_TRANSPORT_GOOD; } if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10) { page = short_pack(srb->cmnd[3], srb->cmnd[2]); page <<= 16; page |= short_pack(srb->cmnd[5], srb->cmnd[4]); pages = short_pack(srb->cmnd[8], srb->cmnd[7]); page <<= info->smallpageshift; // convert page to block and page-within-block lba = page >> info->blockshift; page = page & info->blockmask; // locate physical block corresponding to logical block if (lba >= info->max_log_blks) { usb_stor_dbg(us, "Error: Requested LBA %04X exceeds maximum block %04X\n", lba, info->max_log_blks - 1); set_sense_info (5, 0x24, 0); /* invalid field in command */ return USB_STOR_TRANSPORT_FAILED; } pba = info->lba_to_pba[lba]; if (srb->cmnd[0] == WRITE_10) { usb_stor_dbg(us, "WRITE_10: write block %04X (LBA %04X) page %01X pages %d\n", pba, lba, page, pages); return sddr55_write_data(us, lba, page, pages); } else { usb_stor_dbg(us, "READ_10: read block %04X (LBA %04X) page %01X pages %d\n", pba, lba, page, pages); return sddr55_read_data(us, lba, page, pages); } } if (srb->cmnd[0] == TEST_UNIT_READY) { return USB_STOR_TRANSPORT_GOOD; } if (srb->cmnd[0] == START_STOP) { return USB_STOR_TRANSPORT_GOOD; } set_sense_info (5, 0x20, 0); /* illegal command */ return USB_STOR_TRANSPORT_FAILED; // FIXME: sense buffer? } static int sddr55_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct us_data *us; int result; result = usb_stor_probe1(&us, intf, id, (id - sddr55_usb_ids) + sddr55_unusual_dev_list); if (result) return result; us->transport_name = "SDDR55"; us->transport = sddr55_transport; us->transport_reset = sddr55_reset; us->max_lun = 0; result = usb_stor_probe2(us); return result; } static struct usb_driver sddr55_driver = { .name = "ums-sddr55", .probe = sddr55_probe, .disconnect = usb_stor_disconnect, .suspend = usb_stor_suspend, .resume = usb_stor_resume, .reset_resume = usb_stor_reset_resume, .pre_reset = usb_stor_pre_reset, .post_reset = usb_stor_post_reset, .id_table = sddr55_usb_ids, .soft_unbind = 1, .no_dynamic_id = 1, }; module_usb_driver(sddr55_driver);
gpl-2.0
TeamCarbonXtremeARMv7/android_kernel_samsung_golden
sound/isa/sscape.c
4031
33649
/* * Low-level ALSA driver for the ENSONIQ SoundScape * Copyright (c) by Chris Rankin * * This driver was written in part using information obtained from * the OSS/Free SoundScape driver, written by Hannu Savolainen. * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/err.h> #include <linux/isa.h> #include <linux/delay.h> #include <linux/firmware.h> #include <linux/pnp.h> #include <linux/spinlock.h> #include <linux/moduleparam.h> #include <asm/dma.h> #include <sound/core.h> #include <sound/wss.h> #include <sound/mpu401.h> #include <sound/initval.h> MODULE_AUTHOR("Chris Rankin"); MODULE_DESCRIPTION("ENSONIQ SoundScape driver"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("sndscape.co0"); MODULE_FIRMWARE("sndscape.co1"); MODULE_FIRMWARE("sndscape.co2"); MODULE_FIRMWARE("sndscape.co3"); MODULE_FIRMWARE("sndscape.co4"); MODULE_FIRMWARE("scope.cod"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; static char* id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; static long port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; static long wss_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; static int mpu_irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; static int dma[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; static int dma2[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; static bool joystick[SNDRV_CARDS]; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index number for SoundScape soundcard"); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "Description for SoundScape card"); module_param_array(port, long, NULL, 0444); MODULE_PARM_DESC(port, "Port # for SoundScape driver."); module_param_array(wss_port, long, NULL, 0444); MODULE_PARM_DESC(wss_port, "WSS Port # for SoundScape driver."); module_param_array(irq, int, NULL, 0444); MODULE_PARM_DESC(irq, "IRQ # for SoundScape driver."); module_param_array(mpu_irq, int, NULL, 0444); MODULE_PARM_DESC(mpu_irq, "MPU401 IRQ # for SoundScape driver."); module_param_array(dma, int, NULL, 0444); MODULE_PARM_DESC(dma, "DMA # for SoundScape driver."); module_param_array(dma2, int, NULL, 0444); MODULE_PARM_DESC(dma2, "DMA2 # for SoundScape driver."); module_param_array(joystick, bool, NULL, 0444); MODULE_PARM_DESC(joystick, "Enable gameport."); #ifdef CONFIG_PNP static int isa_registered; static int pnp_registered; static struct pnp_card_device_id sscape_pnpids[] = { { .id = "ENS3081", .devs = { { "ENS0000" } } }, /* Soundscape PnP */ { .id = "ENS4081", .devs = { { "ENS1011" } } }, /* VIVO90 */ { .id = "" } /* end */ }; MODULE_DEVICE_TABLE(pnp_card, sscape_pnpids); #endif #define HOST_CTRL_IO(i) ((i) + 2) #define HOST_DATA_IO(i) ((i) + 3) #define ODIE_ADDR_IO(i) ((i) + 4) #define ODIE_DATA_IO(i) ((i) + 5) #define CODEC_IO(i) ((i) + 8) #define IC_ODIE 1 #define IC_OPUS 2 #define RX_READY 0x01 #define TX_READY 0x02 #define CMD_ACK 0x80 #define CMD_SET_MIDI_VOL 0x84 #define CMD_GET_MIDI_VOL 0x85 #define CMD_XXX_MIDI_VOL 0x86 #define CMD_SET_EXTMIDI 0x8a #define CMD_GET_EXTMIDI 0x8b #define CMD_SET_MT32 0x8c #define CMD_GET_MT32 0x8d enum GA_REG { GA_INTSTAT_REG = 0, GA_INTENA_REG, GA_DMAA_REG, GA_DMAB_REG, GA_INTCFG_REG, GA_DMACFG_REG, GA_CDCFG_REG, GA_SMCFGA_REG, GA_SMCFGB_REG, GA_HMCTL_REG }; #define DMA_8BIT 0x80 enum card_type { MEDIA_FX, /* Sequoia S-1000 */ SSCAPE, /* Sequoia S-2000 */ SSCAPE_PNP, SSCAPE_VIVO, }; struct soundscape { spinlock_t lock; unsigned io_base; int ic_type; enum card_type type; struct resource *io_res; struct resource *wss_res; struct snd_wss *chip; unsigned char midi_vol; }; #define INVALID_IRQ ((unsigned)-1) static inline struct soundscape *get_card_soundscape(struct snd_card *c) { return (struct soundscape *) (c->private_data); } /* * Allocates some kernel memory that we can use for DMA. * I think this means that the memory has to map to * contiguous pages of physical memory. */ static struct snd_dma_buffer *get_dmabuf(struct snd_dma_buffer *buf, unsigned long size) { if (buf) { if (snd_dma_alloc_pages_fallback(SNDRV_DMA_TYPE_DEV, snd_dma_isa_data(), size, buf) < 0) { snd_printk(KERN_ERR "sscape: Failed to allocate " "%lu bytes for DMA\n", size); return NULL; } } return buf; } /* * Release the DMA-able kernel memory ... */ static void free_dmabuf(struct snd_dma_buffer *buf) { if (buf && buf->area) snd_dma_free_pages(buf); } /* * This function writes to the SoundScape's control registers, * but doesn't do any locking. It's up to the caller to do that. * This is why this function is "unsafe" ... */ static inline void sscape_write_unsafe(unsigned io_base, enum GA_REG reg, unsigned char val) { outb(reg, ODIE_ADDR_IO(io_base)); outb(val, ODIE_DATA_IO(io_base)); } /* * Write to the SoundScape's control registers, and do the * necessary locking ... */ static void sscape_write(struct soundscape *s, enum GA_REG reg, unsigned char val) { unsigned long flags; spin_lock_irqsave(&s->lock, flags); sscape_write_unsafe(s->io_base, reg, val); spin_unlock_irqrestore(&s->lock, flags); } /* * Read from the SoundScape's control registers, but leave any * locking to the caller. This is why the function is "unsafe" ... */ static inline unsigned char sscape_read_unsafe(unsigned io_base, enum GA_REG reg) { outb(reg, ODIE_ADDR_IO(io_base)); return inb(ODIE_DATA_IO(io_base)); } /* * Puts the SoundScape into "host" mode, as compared to "MIDI" mode */ static inline void set_host_mode_unsafe(unsigned io_base) { outb(0x0, HOST_CTRL_IO(io_base)); } /* * Puts the SoundScape into "MIDI" mode, as compared to "host" mode */ static inline void set_midi_mode_unsafe(unsigned io_base) { outb(0x3, HOST_CTRL_IO(io_base)); } /* * Read the SoundScape's host-mode control register, but leave * any locking issues to the caller ... */ static inline int host_read_unsafe(unsigned io_base) { int data = -1; if ((inb(HOST_CTRL_IO(io_base)) & RX_READY) != 0) data = inb(HOST_DATA_IO(io_base)); return data; } /* * Read the SoundScape's host-mode control register, performing * a limited amount of busy-waiting if the register isn't ready. * Also leaves all locking-issues to the caller ... */ static int host_read_ctrl_unsafe(unsigned io_base, unsigned timeout) { int data; while (((data = host_read_unsafe(io_base)) < 0) && (timeout != 0)) { udelay(100); --timeout; } /* while */ return data; } /* * Write to the SoundScape's host-mode control registers, but * leave any locking issues to the caller ... */ static inline int host_write_unsafe(unsigned io_base, unsigned char data) { if ((inb(HOST_CTRL_IO(io_base)) & TX_READY) != 0) { outb(data, HOST_DATA_IO(io_base)); return 1; } return 0; } /* * Write to the SoundScape's host-mode control registers, performing * a limited amount of busy-waiting if the register isn't ready. * Also leaves all locking-issues to the caller ... */ static int host_write_ctrl_unsafe(unsigned io_base, unsigned char data, unsigned timeout) { int err; while (!(err = host_write_unsafe(io_base, data)) && (timeout != 0)) { udelay(100); --timeout; } /* while */ return err; } /* * Check that the MIDI subsystem is operational. If it isn't, * then we will hang the computer if we try to use it ... * * NOTE: This check is based upon observation, not documentation. */ static inline int verify_mpu401(const struct snd_mpu401 *mpu) { return ((inb(MPU401C(mpu)) & 0xc0) == 0x80); } /* * This is apparently the standard way to initailise an MPU-401 */ static inline void initialise_mpu401(const struct snd_mpu401 *mpu) { outb(0, MPU401D(mpu)); } /* * Tell the SoundScape to activate the AD1845 chip (I think). * The AD1845 detection fails if we *don't* do this, so I * think that this is a good idea ... */ static void activate_ad1845_unsafe(unsigned io_base) { unsigned char val = sscape_read_unsafe(io_base, GA_HMCTL_REG); sscape_write_unsafe(io_base, GA_HMCTL_REG, (val & 0xcf) | 0x10); sscape_write_unsafe(io_base, GA_CDCFG_REG, 0x80); } /* * Do the necessary ALSA-level cleanup to deallocate our driver ... */ static void soundscape_free(struct snd_card *c) { struct soundscape *sscape = get_card_soundscape(c); release_and_free_resource(sscape->io_res); release_and_free_resource(sscape->wss_res); free_dma(sscape->chip->dma1); } /* * Tell the SoundScape to begin a DMA tranfer using the given channel. * All locking issues are left to the caller. */ static void sscape_start_dma_unsafe(unsigned io_base, enum GA_REG reg) { sscape_write_unsafe(io_base, reg, sscape_read_unsafe(io_base, reg) | 0x01); sscape_write_unsafe(io_base, reg, sscape_read_unsafe(io_base, reg) & 0xfe); } /* * Wait for a DMA transfer to complete. This is a "limited busy-wait", * and all locking issues are left to the caller. */ static int sscape_wait_dma_unsafe(unsigned io_base, enum GA_REG reg, unsigned timeout) { while (!(sscape_read_unsafe(io_base, reg) & 0x01) && (timeout != 0)) { udelay(100); --timeout; } /* while */ return sscape_read_unsafe(io_base, reg) & 0x01; } /* * Wait for the On-Board Processor to return its start-up * acknowledgement sequence. This wait is too long for * us to perform "busy-waiting", and so we must sleep. * This in turn means that we must not be holding any * spinlocks when we call this function. */ static int obp_startup_ack(struct soundscape *s, unsigned timeout) { unsigned long end_time = jiffies + msecs_to_jiffies(timeout); do { unsigned long flags; int x; spin_lock_irqsave(&s->lock, flags); x = host_read_unsafe(s->io_base); spin_unlock_irqrestore(&s->lock, flags); if (x == 0xfe || x == 0xff) return 1; msleep(10); } while (time_before(jiffies, end_time)); return 0; } /* * Wait for the host to return its start-up acknowledgement * sequence. This wait is too long for us to perform * "busy-waiting", and so we must sleep. This in turn means * that we must not be holding any spinlocks when we call * this function. */ static int host_startup_ack(struct soundscape *s, unsigned timeout) { unsigned long end_time = jiffies + msecs_to_jiffies(timeout); do { unsigned long flags; int x; spin_lock_irqsave(&s->lock, flags); x = host_read_unsafe(s->io_base); spin_unlock_irqrestore(&s->lock, flags); if (x == 0xfe) return 1; msleep(10); } while (time_before(jiffies, end_time)); return 0; } /* * Upload a byte-stream into the SoundScape using DMA channel A. */ static int upload_dma_data(struct soundscape *s, const unsigned char *data, size_t size) { unsigned long flags; struct snd_dma_buffer dma; int ret; unsigned char val; if (!get_dmabuf(&dma, PAGE_ALIGN(32 * 1024))) return -ENOMEM; spin_lock_irqsave(&s->lock, flags); /* * Reset the board ... */ val = sscape_read_unsafe(s->io_base, GA_HMCTL_REG); sscape_write_unsafe(s->io_base, GA_HMCTL_REG, val & 0x3f); /* * Enable the DMA channels and configure them ... */ val = (s->chip->dma1 << 4) | DMA_8BIT; sscape_write_unsafe(s->io_base, GA_DMAA_REG, val); sscape_write_unsafe(s->io_base, GA_DMAB_REG, 0x20); /* * Take the board out of reset ... */ val = sscape_read_unsafe(s->io_base, GA_HMCTL_REG); sscape_write_unsafe(s->io_base, GA_HMCTL_REG, val | 0x80); /* * Upload the firmware to the SoundScape * board through the DMA channel ... */ while (size != 0) { unsigned long len; len = min(size, dma.bytes); memcpy(dma.area, data, len); data += len; size -= len; snd_dma_program(s->chip->dma1, dma.addr, len, DMA_MODE_WRITE); sscape_start_dma_unsafe(s->io_base, GA_DMAA_REG); if (!sscape_wait_dma_unsafe(s->io_base, GA_DMAA_REG, 5000)) { /* * Don't forget to release this spinlock we're holding */ spin_unlock_irqrestore(&s->lock, flags); snd_printk(KERN_ERR "sscape: DMA upload has timed out\n"); ret = -EAGAIN; goto _release_dma; } } /* while */ set_host_mode_unsafe(s->io_base); outb(0x0, s->io_base); /* * Boot the board ... (I think) */ val = sscape_read_unsafe(s->io_base, GA_HMCTL_REG); sscape_write_unsafe(s->io_base, GA_HMCTL_REG, val | 0x40); spin_unlock_irqrestore(&s->lock, flags); /* * If all has gone well, then the board should acknowledge * the new upload and tell us that it has rebooted OK. We * give it 5 seconds (max) ... */ ret = 0; if (!obp_startup_ack(s, 5000)) { snd_printk(KERN_ERR "sscape: No response " "from on-board processor after upload\n"); ret = -EAGAIN; } else if (!host_startup_ack(s, 5000)) { snd_printk(KERN_ERR "sscape: SoundScape failed to initialise\n"); ret = -EAGAIN; } _release_dma: /* * NOTE!!! We are NOT holding any spinlocks at this point !!! */ sscape_write(s, GA_DMAA_REG, (s->ic_type == IC_OPUS ? 0x40 : 0x70)); free_dmabuf(&dma); return ret; } /* * Upload the bootblock(?) into the SoundScape. The only * purpose of this block of code seems to be to tell * us which version of the microcode we should be using. */ static int sscape_upload_bootblock(struct snd_card *card) { struct soundscape *sscape = get_card_soundscape(card); unsigned long flags; const struct firmware *init_fw = NULL; int data = 0; int ret; ret = request_firmware(&init_fw, "scope.cod", card->dev); if (ret < 0) { snd_printk(KERN_ERR "sscape: Error loading scope.cod"); return ret; } ret = upload_dma_data(sscape, init_fw->data, init_fw->size); release_firmware(init_fw); spin_lock_irqsave(&sscape->lock, flags); if (ret == 0) data = host_read_ctrl_unsafe(sscape->io_base, 100); if (data & 0x10) sscape_write_unsafe(sscape->io_base, GA_SMCFGA_REG, 0x2f); spin_unlock_irqrestore(&sscape->lock, flags); data &= 0xf; if (ret == 0 && data > 7) { snd_printk(KERN_ERR "sscape: timeout reading firmware version\n"); ret = -EAGAIN; } return (ret == 0) ? data : ret; } /* * Upload the microcode into the SoundScape. */ static int sscape_upload_microcode(struct snd_card *card, int version) { struct soundscape *sscape = get_card_soundscape(card); const struct firmware *init_fw = NULL; char name[14]; int err; snprintf(name, sizeof(name), "sndscape.co%d", version); err = request_firmware(&init_fw, name, card->dev); if (err < 0) { snd_printk(KERN_ERR "sscape: Error loading sndscape.co%d", version); return err; } err = upload_dma_data(sscape, init_fw->data, init_fw->size); if (err == 0) snd_printk(KERN_INFO "sscape: MIDI firmware loaded %d KBs\n", init_fw->size >> 10); release_firmware(init_fw); return err; } /* * Mixer control for the SoundScape's MIDI device. */ static int sscape_midi_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 127; return 0; } static int sscape_midi_get(struct snd_kcontrol *kctl, struct snd_ctl_elem_value *uctl) { struct snd_wss *chip = snd_kcontrol_chip(kctl); struct snd_card *card = chip->card; register struct soundscape *s = get_card_soundscape(card); unsigned long flags; spin_lock_irqsave(&s->lock, flags); uctl->value.integer.value[0] = s->midi_vol; spin_unlock_irqrestore(&s->lock, flags); return 0; } static int sscape_midi_put(struct snd_kcontrol *kctl, struct snd_ctl_elem_value *uctl) { struct snd_wss *chip = snd_kcontrol_chip(kctl); struct snd_card *card = chip->card; struct soundscape *s = get_card_soundscape(card); unsigned long flags; int change; unsigned char new_val; spin_lock_irqsave(&s->lock, flags); new_val = uctl->value.integer.value[0] & 127; /* * We need to put the board into HOST mode before we * can send any volume-changing HOST commands ... */ set_host_mode_unsafe(s->io_base); /* * To successfully change the MIDI volume setting, you seem to * have to write a volume command, write the new volume value, * and then perform another volume-related command. Perhaps the * first command is an "open" and the second command is a "close"? */ if (s->midi_vol == new_val) { change = 0; goto __skip_change; } change = host_write_ctrl_unsafe(s->io_base, CMD_SET_MIDI_VOL, 100) && host_write_ctrl_unsafe(s->io_base, new_val, 100) && host_write_ctrl_unsafe(s->io_base, CMD_XXX_MIDI_VOL, 100) && host_write_ctrl_unsafe(s->io_base, new_val, 100); s->midi_vol = new_val; __skip_change: /* * Take the board out of HOST mode and back into MIDI mode ... */ set_midi_mode_unsafe(s->io_base); spin_unlock_irqrestore(&s->lock, flags); return change; } static struct snd_kcontrol_new midi_mixer_ctl = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "MIDI", .info = sscape_midi_info, .get = sscape_midi_get, .put = sscape_midi_put }; /* * The SoundScape can use two IRQs from a possible set of four. * These IRQs are encoded as bit patterns so that they can be * written to the control registers. */ static unsigned __devinit get_irq_config(int sscape_type, int irq) { static const int valid_irq[] = { 9, 5, 7, 10 }; static const int old_irq[] = { 9, 7, 5, 15 }; unsigned cfg; if (sscape_type == MEDIA_FX) { for (cfg = 0; cfg < ARRAY_SIZE(old_irq); ++cfg) if (irq == old_irq[cfg]) return cfg; } else { for (cfg = 0; cfg < ARRAY_SIZE(valid_irq); ++cfg) if (irq == valid_irq[cfg]) return cfg; } return INVALID_IRQ; } /* * Perform certain arcane port-checks to see whether there * is a SoundScape board lurking behind the given ports. */ static int __devinit detect_sscape(struct soundscape *s, long wss_io) { unsigned long flags; unsigned d; int retval = 0; spin_lock_irqsave(&s->lock, flags); /* * The following code is lifted from the original OSS driver, * and as I don't have a datasheet I cannot really comment * on what it is doing... */ if ((inb(HOST_CTRL_IO(s->io_base)) & 0x78) != 0) goto _done; d = inb(ODIE_ADDR_IO(s->io_base)) & 0xf0; if ((d & 0x80) != 0) goto _done; if (d == 0) s->ic_type = IC_ODIE; else if ((d & 0x60) != 0) s->ic_type = IC_OPUS; else goto _done; outb(0xfa, ODIE_ADDR_IO(s->io_base)); if ((inb(ODIE_ADDR_IO(s->io_base)) & 0x9f) != 0x0a) goto _done; outb(0xfe, ODIE_ADDR_IO(s->io_base)); if ((inb(ODIE_ADDR_IO(s->io_base)) & 0x9f) != 0x0e) goto _done; outb(0xfe, ODIE_ADDR_IO(s->io_base)); d = inb(ODIE_DATA_IO(s->io_base)); if (s->type != SSCAPE_VIVO && (d & 0x9f) != 0x0e) goto _done; if (s->ic_type == IC_OPUS) activate_ad1845_unsafe(s->io_base); if (s->type == SSCAPE_VIVO) wss_io += 4; d = sscape_read_unsafe(s->io_base, GA_HMCTL_REG); sscape_write_unsafe(s->io_base, GA_HMCTL_REG, d | 0xc0); /* wait for WSS codec */ for (d = 0; d < 500; d++) { if ((inb(wss_io) & 0x80) == 0) break; spin_unlock_irqrestore(&s->lock, flags); msleep(1); spin_lock_irqsave(&s->lock, flags); } if ((inb(wss_io) & 0x80) != 0) goto _done; if (inb(wss_io + 2) == 0xff) goto _done; d = sscape_read_unsafe(s->io_base, GA_HMCTL_REG) & 0x3f; sscape_write_unsafe(s->io_base, GA_HMCTL_REG, d); if ((inb(wss_io) & 0x80) != 0) s->type = MEDIA_FX; d = sscape_read_unsafe(s->io_base, GA_HMCTL_REG); sscape_write_unsafe(s->io_base, GA_HMCTL_REG, d | 0xc0); /* wait for WSS codec */ for (d = 0; d < 500; d++) { if ((inb(wss_io) & 0x80) == 0) break; spin_unlock_irqrestore(&s->lock, flags); msleep(1); spin_lock_irqsave(&s->lock, flags); } /* * SoundScape successfully detected! */ retval = 1; _done: spin_unlock_irqrestore(&s->lock, flags); return retval; } /* * ALSA callback function, called when attempting to open the MIDI device. * Check that the MIDI firmware has been loaded, because we don't want * to crash the machine. Also check that someone isn't using the hardware * IOCTL device. */ static int mpu401_open(struct snd_mpu401 *mpu) { if (!verify_mpu401(mpu)) { snd_printk(KERN_ERR "sscape: MIDI disabled, " "please load firmware\n"); return -ENODEV; } return 0; } /* * Initialse an MPU-401 subdevice for MIDI support on the SoundScape. */ static int __devinit create_mpu401(struct snd_card *card, int devnum, unsigned long port, int irq) { struct soundscape *sscape = get_card_soundscape(card); struct snd_rawmidi *rawmidi; int err; err = snd_mpu401_uart_new(card, devnum, MPU401_HW_MPU401, port, MPU401_INFO_INTEGRATED, irq, IRQF_DISABLED, &rawmidi); if (err == 0) { struct snd_mpu401 *mpu = rawmidi->private_data; mpu->open_input = mpu401_open; mpu->open_output = mpu401_open; mpu->private_data = sscape; initialise_mpu401(mpu); } return err; } /* * Create an AD1845 PCM subdevice on the SoundScape. The AD1845 * is very much like a CS4231, with a few extra bits. We will * try to support at least some of the extra bits by overriding * some of the CS4231 callback. */ static int __devinit create_ad1845(struct snd_card *card, unsigned port, int irq, int dma1, int dma2) { register struct soundscape *sscape = get_card_soundscape(card); struct snd_wss *chip; int err; int codec_type = WSS_HW_DETECT; switch (sscape->type) { case MEDIA_FX: case SSCAPE: /* * There are some freak examples of early Soundscape cards * with CS4231 instead of AD1848/CS4248. Unfortunately, the * CS4231 works only in CS4248 compatibility mode on * these cards so force it. */ if (sscape->ic_type != IC_OPUS) codec_type = WSS_HW_AD1848; break; case SSCAPE_VIVO: port += 4; break; default: break; } err = snd_wss_create(card, port, -1, irq, dma1, dma2, codec_type, WSS_HWSHARE_DMA1, &chip); if (!err) { unsigned long flags; struct snd_pcm *pcm; if (sscape->type != SSCAPE_VIVO) { /* * The input clock frequency on the SoundScape must * be 14.31818 MHz, because we must set this register * to get the playback to sound correct ... */ snd_wss_mce_up(chip); spin_lock_irqsave(&chip->reg_lock, flags); snd_wss_out(chip, AD1845_CLOCK, 0x20); spin_unlock_irqrestore(&chip->reg_lock, flags); snd_wss_mce_down(chip); } err = snd_wss_pcm(chip, 0, &pcm); if (err < 0) { snd_printk(KERN_ERR "sscape: No PCM device " "for AD1845 chip\n"); goto _error; } err = snd_wss_mixer(chip); if (err < 0) { snd_printk(KERN_ERR "sscape: No mixer device " "for AD1845 chip\n"); goto _error; } if (chip->hardware != WSS_HW_AD1848) { err = snd_wss_timer(chip, 0, NULL); if (err < 0) { snd_printk(KERN_ERR "sscape: No timer device " "for AD1845 chip\n"); goto _error; } } if (sscape->type != SSCAPE_VIVO) { err = snd_ctl_add(card, snd_ctl_new1(&midi_mixer_ctl, chip)); if (err < 0) { snd_printk(KERN_ERR "sscape: Could not create " "MIDI mixer control\n"); goto _error; } } sscape->chip = chip; } _error: return err; } /* * Create an ALSA soundcard entry for the SoundScape, using * the given list of port, IRQ and DMA resources. */ static int __devinit create_sscape(int dev, struct snd_card *card) { struct soundscape *sscape = get_card_soundscape(card); unsigned dma_cfg; unsigned irq_cfg; unsigned mpu_irq_cfg; struct resource *io_res; struct resource *wss_res; unsigned long flags; int err; int val; const char *name; /* * Grab IO ports that we will need to probe so that we * can detect and control this hardware ... */ io_res = request_region(port[dev], 8, "SoundScape"); if (!io_res) { snd_printk(KERN_ERR "sscape: can't grab port 0x%lx\n", port[dev]); return -EBUSY; } wss_res = NULL; if (sscape->type == SSCAPE_VIVO) { wss_res = request_region(wss_port[dev], 4, "SoundScape"); if (!wss_res) { snd_printk(KERN_ERR "sscape: can't grab port 0x%lx\n", wss_port[dev]); err = -EBUSY; goto _release_region; } } /* * Grab one DMA channel ... */ err = request_dma(dma[dev], "SoundScape"); if (err < 0) { snd_printk(KERN_ERR "sscape: can't grab DMA %d\n", dma[dev]); goto _release_region; } spin_lock_init(&sscape->lock); sscape->io_res = io_res; sscape->wss_res = wss_res; sscape->io_base = port[dev]; if (!detect_sscape(sscape, wss_port[dev])) { printk(KERN_ERR "sscape: hardware not detected at 0x%x\n", sscape->io_base); err = -ENODEV; goto _release_dma; } switch (sscape->type) { case MEDIA_FX: name = "MediaFX/SoundFX"; break; case SSCAPE: name = "Soundscape"; break; case SSCAPE_PNP: name = "Soundscape PnP"; break; case SSCAPE_VIVO: name = "Soundscape VIVO"; break; default: name = "unknown Soundscape"; break; } printk(KERN_INFO "sscape: %s card detected at 0x%x, using IRQ %d, DMA %d\n", name, sscape->io_base, irq[dev], dma[dev]); /* * Check that the user didn't pass us garbage data ... */ irq_cfg = get_irq_config(sscape->type, irq[dev]); if (irq_cfg == INVALID_IRQ) { snd_printk(KERN_ERR "sscape: Invalid IRQ %d\n", irq[dev]); return -ENXIO; } mpu_irq_cfg = get_irq_config(sscape->type, mpu_irq[dev]); if (mpu_irq_cfg == INVALID_IRQ) { snd_printk(KERN_ERR "sscape: Invalid IRQ %d\n", mpu_irq[dev]); return -ENXIO; } /* * Tell the on-board devices where their resources are (I think - * I can't be sure without a datasheet ... So many magic values!) */ spin_lock_irqsave(&sscape->lock, flags); sscape_write_unsafe(sscape->io_base, GA_SMCFGA_REG, 0x2e); sscape_write_unsafe(sscape->io_base, GA_SMCFGB_REG, 0x00); /* * Enable and configure the DMA channels ... */ sscape_write_unsafe(sscape->io_base, GA_DMACFG_REG, 0x50); dma_cfg = (sscape->ic_type == IC_OPUS ? 0x40 : 0x70); sscape_write_unsafe(sscape->io_base, GA_DMAA_REG, dma_cfg); sscape_write_unsafe(sscape->io_base, GA_DMAB_REG, 0x20); mpu_irq_cfg |= mpu_irq_cfg << 2; val = sscape_read_unsafe(sscape->io_base, GA_HMCTL_REG) & 0xF7; if (joystick[dev]) val |= 8; sscape_write_unsafe(sscape->io_base, GA_HMCTL_REG, val | 0x10); sscape_write_unsafe(sscape->io_base, GA_INTCFG_REG, 0xf0 | mpu_irq_cfg); sscape_write_unsafe(sscape->io_base, GA_CDCFG_REG, 0x09 | DMA_8BIT | (dma[dev] << 4) | (irq_cfg << 1)); /* * Enable the master IRQ ... */ sscape_write_unsafe(sscape->io_base, GA_INTENA_REG, 0x80); spin_unlock_irqrestore(&sscape->lock, flags); /* * We have now enabled the codec chip, and so we should * detect the AD1845 device ... */ err = create_ad1845(card, wss_port[dev], irq[dev], dma[dev], dma2[dev]); if (err < 0) { snd_printk(KERN_ERR "sscape: No AD1845 device at 0x%lx, IRQ %d\n", wss_port[dev], irq[dev]); goto _release_dma; } strcpy(card->driver, "SoundScape"); strcpy(card->shortname, name); snprintf(card->longname, sizeof(card->longname), "%s at 0x%lx, IRQ %d, DMA1 %d, DMA2 %d\n", name, sscape->chip->port, sscape->chip->irq, sscape->chip->dma1, sscape->chip->dma2); #define MIDI_DEVNUM 0 if (sscape->type != SSCAPE_VIVO) { err = sscape_upload_bootblock(card); if (err >= 0) err = sscape_upload_microcode(card, err); if (err == 0) { err = create_mpu401(card, MIDI_DEVNUM, port[dev], mpu_irq[dev]); if (err < 0) { snd_printk(KERN_ERR "sscape: Failed to create " "MPU-401 device at 0x%lx\n", port[dev]); goto _release_dma; } /* * Initialize mixer */ spin_lock_irqsave(&sscape->lock, flags); sscape->midi_vol = 0; host_write_ctrl_unsafe(sscape->io_base, CMD_SET_MIDI_VOL, 100); host_write_ctrl_unsafe(sscape->io_base, sscape->midi_vol, 100); host_write_ctrl_unsafe(sscape->io_base, CMD_XXX_MIDI_VOL, 100); host_write_ctrl_unsafe(sscape->io_base, sscape->midi_vol, 100); host_write_ctrl_unsafe(sscape->io_base, CMD_SET_EXTMIDI, 100); host_write_ctrl_unsafe(sscape->io_base, 0, 100); host_write_ctrl_unsafe(sscape->io_base, CMD_ACK, 100); set_midi_mode_unsafe(sscape->io_base); spin_unlock_irqrestore(&sscape->lock, flags); } } /* * Now that we have successfully created this sound card, * it is safe to store the pointer. * NOTE: we only register the sound card's "destructor" * function now that our "constructor" has completed. */ card->private_free = soundscape_free; return 0; _release_dma: free_dma(dma[dev]); _release_region: release_and_free_resource(wss_res); release_and_free_resource(io_res); return err; } static int __devinit snd_sscape_match(struct device *pdev, unsigned int i) { /* * Make sure we were given ALL of the other parameters. */ if (port[i] == SNDRV_AUTO_PORT) return 0; if (irq[i] == SNDRV_AUTO_IRQ || mpu_irq[i] == SNDRV_AUTO_IRQ || dma[i] == SNDRV_AUTO_DMA) { printk(KERN_INFO "sscape: insufficient parameters, " "need IO, IRQ, MPU-IRQ and DMA\n"); return 0; } return 1; } static int __devinit snd_sscape_probe(struct device *pdev, unsigned int dev) { struct snd_card *card; struct soundscape *sscape; int ret; ret = snd_card_create(index[dev], id[dev], THIS_MODULE, sizeof(struct soundscape), &card); if (ret < 0) return ret; sscape = get_card_soundscape(card); sscape->type = SSCAPE; dma[dev] &= 0x03; snd_card_set_dev(card, pdev); ret = create_sscape(dev, card); if (ret < 0) goto _release_card; ret = snd_card_register(card); if (ret < 0) { snd_printk(KERN_ERR "sscape: Failed to register sound card\n"); goto _release_card; } dev_set_drvdata(pdev, card); return 0; _release_card: snd_card_free(card); return ret; } static int __devexit snd_sscape_remove(struct device *devptr, unsigned int dev) { snd_card_free(dev_get_drvdata(devptr)); dev_set_drvdata(devptr, NULL); return 0; } #define DEV_NAME "sscape" static struct isa_driver snd_sscape_driver = { .match = snd_sscape_match, .probe = snd_sscape_probe, .remove = __devexit_p(snd_sscape_remove), /* FIXME: suspend/resume */ .driver = { .name = DEV_NAME }, }; #ifdef CONFIG_PNP static inline int __devinit get_next_autoindex(int i) { while (i < SNDRV_CARDS && port[i] != SNDRV_AUTO_PORT) ++i; return i; } static int __devinit sscape_pnp_detect(struct pnp_card_link *pcard, const struct pnp_card_device_id *pid) { static int idx = 0; struct pnp_dev *dev; struct snd_card *card; struct soundscape *sscape; int ret; /* * Allow this function to fail *quietly* if all the ISA PnP * devices were configured using module parameters instead. */ idx = get_next_autoindex(idx); if (idx >= SNDRV_CARDS) return -ENOSPC; /* * Check that we still have room for another sound card ... */ dev = pnp_request_card_device(pcard, pid->devs[0].id, NULL); if (!dev) return -ENODEV; if (!pnp_is_active(dev)) { if (pnp_activate_dev(dev) < 0) { snd_printk(KERN_INFO "sscape: device is inactive\n"); return -EBUSY; } } /* * Create a new ALSA sound card entry, in anticipation * of detecting our hardware ... */ ret = snd_card_create(index[idx], id[idx], THIS_MODULE, sizeof(struct soundscape), &card); if (ret < 0) return ret; sscape = get_card_soundscape(card); /* * Identify card model ... */ if (!strncmp("ENS4081", pid->id, 7)) sscape->type = SSCAPE_VIVO; else sscape->type = SSCAPE_PNP; /* * Read the correct parameters off the ISA PnP bus ... */ port[idx] = pnp_port_start(dev, 0); irq[idx] = pnp_irq(dev, 0); mpu_irq[idx] = pnp_irq(dev, 1); dma[idx] = pnp_dma(dev, 0) & 0x03; if (sscape->type == SSCAPE_PNP) { dma2[idx] = dma[idx]; wss_port[idx] = CODEC_IO(port[idx]); } else { wss_port[idx] = pnp_port_start(dev, 1); dma2[idx] = pnp_dma(dev, 1); } snd_card_set_dev(card, &pcard->card->dev); ret = create_sscape(idx, card); if (ret < 0) goto _release_card; ret = snd_card_register(card); if (ret < 0) { snd_printk(KERN_ERR "sscape: Failed to register sound card\n"); goto _release_card; } pnp_set_card_drvdata(pcard, card); ++idx; return 0; _release_card: snd_card_free(card); return ret; } static void __devexit sscape_pnp_remove(struct pnp_card_link * pcard) { snd_card_free(pnp_get_card_drvdata(pcard)); pnp_set_card_drvdata(pcard, NULL); } static struct pnp_card_driver sscape_pnpc_driver = { .flags = PNP_DRIVER_RES_DO_NOT_CHANGE, .name = "sscape", .id_table = sscape_pnpids, .probe = sscape_pnp_detect, .remove = __devexit_p(sscape_pnp_remove), }; #endif /* CONFIG_PNP */ static int __init sscape_init(void) { int err; err = isa_register_driver(&snd_sscape_driver, SNDRV_CARDS); #ifdef CONFIG_PNP if (!err) isa_registered = 1; err = pnp_register_card_driver(&sscape_pnpc_driver); if (!err) pnp_registered = 1; if (isa_registered) err = 0; #endif return err; } static void __exit sscape_exit(void) { #ifdef CONFIG_PNP if (pnp_registered) pnp_unregister_card_driver(&sscape_pnpc_driver); if (isa_registered) #endif isa_unregister_driver(&snd_sscape_driver); } module_init(sscape_init); module_exit(sscape_exit);
gpl-2.0
mkasick/android_kernel_samsung_d2spr
drivers/mtd/tests/mtd_oobtest.c
4031
17742
/* * Copyright (C) 2006-2008 Nokia Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; see the file COPYING. If not, write to the Free Software * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Test OOB read and write on MTD device. * * Author: Adrian Hunter <ext-adrian.hunter@nokia.com> */ #include <asm/div64.h> #include <linux/init.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/err.h> #include <linux/mtd/mtd.h> #include <linux/slab.h> #include <linux/sched.h> #define PRINT_PREF KERN_INFO "mtd_oobtest: " static int dev; module_param(dev, int, S_IRUGO); MODULE_PARM_DESC(dev, "MTD device number to use"); static struct mtd_info *mtd; static unsigned char *readbuf; static unsigned char *writebuf; static unsigned char *bbt; static int ebcnt; static int pgcnt; static int errcnt; static int use_offset; static int use_len; static int use_len_max; static int vary_offset; static unsigned long next = 1; static inline unsigned int simple_rand(void) { next = next * 1103515245 + 12345; return (unsigned int)((next / 65536) % 32768); } static inline void simple_srand(unsigned long seed) { next = seed; } static void set_random_data(unsigned char *buf, size_t len) { size_t i; for (i = 0; i < len; ++i) buf[i] = simple_rand(); } static int erase_eraseblock(int ebnum) { int err; struct erase_info ei; loff_t addr = ebnum * mtd->erasesize; memset(&ei, 0, sizeof(struct erase_info)); ei.mtd = mtd; ei.addr = addr; ei.len = mtd->erasesize; err = mtd->erase(mtd, &ei); if (err) { printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum); return err; } if (ei.state == MTD_ERASE_FAILED) { printk(PRINT_PREF "some erase error occurred at EB %d\n", ebnum); return -EIO; } return 0; } static int erase_whole_device(void) { int err; unsigned int i; printk(PRINT_PREF "erasing whole device\n"); for (i = 0; i < ebcnt; ++i) { if (bbt[i]) continue; err = erase_eraseblock(i); if (err) return err; cond_resched(); } printk(PRINT_PREF "erased %u eraseblocks\n", i); return 0; } static void do_vary_offset(void) { use_len -= 1; if (use_len < 1) { use_offset += 1; if (use_offset >= use_len_max) use_offset = 0; use_len = use_len_max - use_offset; } } static int write_eraseblock(int ebnum) { int i; struct mtd_oob_ops ops; int err = 0; loff_t addr = ebnum * mtd->erasesize; for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) { set_random_data(writebuf, use_len); ops.mode = MTD_OOB_AUTO; ops.len = 0; ops.retlen = 0; ops.ooblen = use_len; ops.oobretlen = 0; ops.ooboffs = use_offset; ops.datbuf = NULL; ops.oobbuf = writebuf; err = mtd->write_oob(mtd, addr, &ops); if (err || ops.oobretlen != use_len) { printk(PRINT_PREF "error: writeoob failed at %#llx\n", (long long)addr); printk(PRINT_PREF "error: use_len %d, use_offset %d\n", use_len, use_offset); errcnt += 1; return err ? err : -1; } if (vary_offset) do_vary_offset(); } return err; } static int write_whole_device(void) { int err; unsigned int i; printk(PRINT_PREF "writing OOBs of whole device\n"); for (i = 0; i < ebcnt; ++i) { if (bbt[i]) continue; err = write_eraseblock(i); if (err) return err; if (i % 256 == 0) printk(PRINT_PREF "written up to eraseblock %u\n", i); cond_resched(); } printk(PRINT_PREF "written %u eraseblocks\n", i); return 0; } static int verify_eraseblock(int ebnum) { int i; struct mtd_oob_ops ops; int err = 0; loff_t addr = ebnum * mtd->erasesize; for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) { set_random_data(writebuf, use_len); ops.mode = MTD_OOB_AUTO; ops.len = 0; ops.retlen = 0; ops.ooblen = use_len; ops.oobretlen = 0; ops.ooboffs = use_offset; ops.datbuf = NULL; ops.oobbuf = readbuf; err = mtd->read_oob(mtd, addr, &ops); if (err || ops.oobretlen != use_len) { printk(PRINT_PREF "error: readoob failed at %#llx\n", (long long)addr); errcnt += 1; return err ? err : -1; } if (memcmp(readbuf, writebuf, use_len)) { printk(PRINT_PREF "error: verify failed at %#llx\n", (long long)addr); errcnt += 1; if (errcnt > 1000) { printk(PRINT_PREF "error: too many errors\n"); return -1; } } if (use_offset != 0 || use_len < mtd->ecclayout->oobavail) { int k; ops.mode = MTD_OOB_AUTO; ops.len = 0; ops.retlen = 0; ops.ooblen = mtd->ecclayout->oobavail; ops.oobretlen = 0; ops.ooboffs = 0; ops.datbuf = NULL; ops.oobbuf = readbuf; err = mtd->read_oob(mtd, addr, &ops); if (err || ops.oobretlen != mtd->ecclayout->oobavail) { printk(PRINT_PREF "error: readoob failed at " "%#llx\n", (long long)addr); errcnt += 1; return err ? err : -1; } if (memcmp(readbuf + use_offset, writebuf, use_len)) { printk(PRINT_PREF "error: verify failed at " "%#llx\n", (long long)addr); errcnt += 1; if (errcnt > 1000) { printk(PRINT_PREF "error: too many " "errors\n"); return -1; } } for (k = 0; k < use_offset; ++k) if (readbuf[k] != 0xff) { printk(PRINT_PREF "error: verify 0xff " "failed at %#llx\n", (long long)addr); errcnt += 1; if (errcnt > 1000) { printk(PRINT_PREF "error: too " "many errors\n"); return -1; } } for (k = use_offset + use_len; k < mtd->ecclayout->oobavail; ++k) if (readbuf[k] != 0xff) { printk(PRINT_PREF "error: verify 0xff " "failed at %#llx\n", (long long)addr); errcnt += 1; if (errcnt > 1000) { printk(PRINT_PREF "error: too " "many errors\n"); return -1; } } } if (vary_offset) do_vary_offset(); } return err; } static int verify_eraseblock_in_one_go(int ebnum) { struct mtd_oob_ops ops; int err = 0; loff_t addr = ebnum * mtd->erasesize; size_t len = mtd->ecclayout->oobavail * pgcnt; set_random_data(writebuf, len); ops.mode = MTD_OOB_AUTO; ops.len = 0; ops.retlen = 0; ops.ooblen = len; ops.oobretlen = 0; ops.ooboffs = 0; ops.datbuf = NULL; ops.oobbuf = readbuf; err = mtd->read_oob(mtd, addr, &ops); if (err || ops.oobretlen != len) { printk(PRINT_PREF "error: readoob failed at %#llx\n", (long long)addr); errcnt += 1; return err ? err : -1; } if (memcmp(readbuf, writebuf, len)) { printk(PRINT_PREF "error: verify failed at %#llx\n", (long long)addr); errcnt += 1; if (errcnt > 1000) { printk(PRINT_PREF "error: too many errors\n"); return -1; } } return err; } static int verify_all_eraseblocks(void) { int err; unsigned int i; printk(PRINT_PREF "verifying all eraseblocks\n"); for (i = 0; i < ebcnt; ++i) { if (bbt[i]) continue; err = verify_eraseblock(i); if (err) return err; if (i % 256 == 0) printk(PRINT_PREF "verified up to eraseblock %u\n", i); cond_resched(); } printk(PRINT_PREF "verified %u eraseblocks\n", i); return 0; } static int is_block_bad(int ebnum) { int ret; loff_t addr = ebnum * mtd->erasesize; ret = mtd->block_isbad(mtd, addr); if (ret) printk(PRINT_PREF "block %d is bad\n", ebnum); return ret; } static int scan_for_bad_eraseblocks(void) { int i, bad = 0; bbt = kmalloc(ebcnt, GFP_KERNEL); if (!bbt) { printk(PRINT_PREF "error: cannot allocate memory\n"); return -ENOMEM; } printk(PRINT_PREF "scanning for bad eraseblocks\n"); for (i = 0; i < ebcnt; ++i) { bbt[i] = is_block_bad(i) ? 1 : 0; if (bbt[i]) bad += 1; cond_resched(); } printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad); return 0; } static int __init mtd_oobtest_init(void) { int err = 0; unsigned int i; uint64_t tmp; struct mtd_oob_ops ops; loff_t addr = 0, addr0; printk(KERN_INFO "\n"); printk(KERN_INFO "=================================================\n"); printk(PRINT_PREF "MTD device: %d\n", dev); mtd = get_mtd_device(NULL, dev); if (IS_ERR(mtd)) { err = PTR_ERR(mtd); printk(PRINT_PREF "error: cannot get MTD device\n"); return err; } if (mtd->type != MTD_NANDFLASH) { printk(PRINT_PREF "this test requires NAND flash\n"); goto out; } tmp = mtd->size; do_div(tmp, mtd->erasesize); ebcnt = tmp; pgcnt = mtd->erasesize / mtd->writesize; printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " "page size %u, count of eraseblocks %u, pages per " "eraseblock %u, OOB size %u\n", (unsigned long long)mtd->size, mtd->erasesize, mtd->writesize, ebcnt, pgcnt, mtd->oobsize); err = -ENOMEM; readbuf = kmalloc(mtd->erasesize, GFP_KERNEL); if (!readbuf) { printk(PRINT_PREF "error: cannot allocate memory\n"); goto out; } writebuf = kmalloc(mtd->erasesize, GFP_KERNEL); if (!writebuf) { printk(PRINT_PREF "error: cannot allocate memory\n"); goto out; } err = scan_for_bad_eraseblocks(); if (err) goto out; use_offset = 0; use_len = mtd->ecclayout->oobavail; use_len_max = mtd->ecclayout->oobavail; vary_offset = 0; /* First test: write all OOB, read it back and verify */ printk(PRINT_PREF "test 1 of 5\n"); err = erase_whole_device(); if (err) goto out; simple_srand(1); err = write_whole_device(); if (err) goto out; simple_srand(1); err = verify_all_eraseblocks(); if (err) goto out; /* * Second test: write all OOB, a block at a time, read it back and * verify. */ printk(PRINT_PREF "test 2 of 5\n"); err = erase_whole_device(); if (err) goto out; simple_srand(3); err = write_whole_device(); if (err) goto out; /* Check all eraseblocks */ simple_srand(3); printk(PRINT_PREF "verifying all eraseblocks\n"); for (i = 0; i < ebcnt; ++i) { if (bbt[i]) continue; err = verify_eraseblock_in_one_go(i); if (err) goto out; if (i % 256 == 0) printk(PRINT_PREF "verified up to eraseblock %u\n", i); cond_resched(); } printk(PRINT_PREF "verified %u eraseblocks\n", i); /* * Third test: write OOB at varying offsets and lengths, read it back * and verify. */ printk(PRINT_PREF "test 3 of 5\n"); err = erase_whole_device(); if (err) goto out; /* Write all eraseblocks */ use_offset = 0; use_len = mtd->ecclayout->oobavail; use_len_max = mtd->ecclayout->oobavail; vary_offset = 1; simple_srand(5); err = write_whole_device(); if (err) goto out; /* Check all eraseblocks */ use_offset = 0; use_len = mtd->ecclayout->oobavail; use_len_max = mtd->ecclayout->oobavail; vary_offset = 1; simple_srand(5); err = verify_all_eraseblocks(); if (err) goto out; use_offset = 0; use_len = mtd->ecclayout->oobavail; use_len_max = mtd->ecclayout->oobavail; vary_offset = 0; /* Fourth test: try to write off end of device */ printk(PRINT_PREF "test 4 of 5\n"); err = erase_whole_device(); if (err) goto out; addr0 = 0; for (i = 0; i < ebcnt && bbt[i]; ++i) addr0 += mtd->erasesize; /* Attempt to write off end of OOB */ ops.mode = MTD_OOB_AUTO; ops.len = 0; ops.retlen = 0; ops.ooblen = 1; ops.oobretlen = 0; ops.ooboffs = mtd->ecclayout->oobavail; ops.datbuf = NULL; ops.oobbuf = writebuf; printk(PRINT_PREF "attempting to start write past end of OOB\n"); printk(PRINT_PREF "an error is expected...\n"); err = mtd->write_oob(mtd, addr0, &ops); if (err) { printk(PRINT_PREF "error occurred as expected\n"); err = 0; } else { printk(PRINT_PREF "error: can write past end of OOB\n"); errcnt += 1; } /* Attempt to read off end of OOB */ ops.mode = MTD_OOB_AUTO; ops.len = 0; ops.retlen = 0; ops.ooblen = 1; ops.oobretlen = 0; ops.ooboffs = mtd->ecclayout->oobavail; ops.datbuf = NULL; ops.oobbuf = readbuf; printk(PRINT_PREF "attempting to start read past end of OOB\n"); printk(PRINT_PREF "an error is expected...\n"); err = mtd->read_oob(mtd, addr0, &ops); if (err) { printk(PRINT_PREF "error occurred as expected\n"); err = 0; } else { printk(PRINT_PREF "error: can read past end of OOB\n"); errcnt += 1; } if (bbt[ebcnt - 1]) printk(PRINT_PREF "skipping end of device tests because last " "block is bad\n"); else { /* Attempt to write off end of device */ ops.mode = MTD_OOB_AUTO; ops.len = 0; ops.retlen = 0; ops.ooblen = mtd->ecclayout->oobavail + 1; ops.oobretlen = 0; ops.ooboffs = 0; ops.datbuf = NULL; ops.oobbuf = writebuf; printk(PRINT_PREF "attempting to write past end of device\n"); printk(PRINT_PREF "an error is expected...\n"); err = mtd->write_oob(mtd, mtd->size - mtd->writesize, &ops); if (err) { printk(PRINT_PREF "error occurred as expected\n"); err = 0; } else { printk(PRINT_PREF "error: wrote past end of device\n"); errcnt += 1; } /* Attempt to read off end of device */ ops.mode = MTD_OOB_AUTO; ops.len = 0; ops.retlen = 0; ops.ooblen = mtd->ecclayout->oobavail + 1; ops.oobretlen = 0; ops.ooboffs = 0; ops.datbuf = NULL; ops.oobbuf = readbuf; printk(PRINT_PREF "attempting to read past end of device\n"); printk(PRINT_PREF "an error is expected...\n"); err = mtd->read_oob(mtd, mtd->size - mtd->writesize, &ops); if (err) { printk(PRINT_PREF "error occurred as expected\n"); err = 0; } else { printk(PRINT_PREF "error: read past end of device\n"); errcnt += 1; } err = erase_eraseblock(ebcnt - 1); if (err) goto out; /* Attempt to write off end of device */ ops.mode = MTD_OOB_AUTO; ops.len = 0; ops.retlen = 0; ops.ooblen = mtd->ecclayout->oobavail; ops.oobretlen = 0; ops.ooboffs = 1; ops.datbuf = NULL; ops.oobbuf = writebuf; printk(PRINT_PREF "attempting to write past end of device\n"); printk(PRINT_PREF "an error is expected...\n"); err = mtd->write_oob(mtd, mtd->size - mtd->writesize, &ops); if (err) { printk(PRINT_PREF "error occurred as expected\n"); err = 0; } else { printk(PRINT_PREF "error: wrote past end of device\n"); errcnt += 1; } /* Attempt to read off end of device */ ops.mode = MTD_OOB_AUTO; ops.len = 0; ops.retlen = 0; ops.ooblen = mtd->ecclayout->oobavail; ops.oobretlen = 0; ops.ooboffs = 1; ops.datbuf = NULL; ops.oobbuf = readbuf; printk(PRINT_PREF "attempting to read past end of device\n"); printk(PRINT_PREF "an error is expected...\n"); err = mtd->read_oob(mtd, mtd->size - mtd->writesize, &ops); if (err) { printk(PRINT_PREF "error occurred as expected\n"); err = 0; } else { printk(PRINT_PREF "error: read past end of device\n"); errcnt += 1; } } /* Fifth test: write / read across block boundaries */ printk(PRINT_PREF "test 5 of 5\n"); /* Erase all eraseblocks */ err = erase_whole_device(); if (err) goto out; /* Write all eraseblocks */ simple_srand(11); printk(PRINT_PREF "writing OOBs of whole device\n"); for (i = 0; i < ebcnt - 1; ++i) { int cnt = 2; int pg; size_t sz = mtd->ecclayout->oobavail; if (bbt[i] || bbt[i + 1]) continue; addr = (i + 1) * mtd->erasesize - mtd->writesize; for (pg = 0; pg < cnt; ++pg) { set_random_data(writebuf, sz); ops.mode = MTD_OOB_AUTO; ops.len = 0; ops.retlen = 0; ops.ooblen = sz; ops.oobretlen = 0; ops.ooboffs = 0; ops.datbuf = NULL; ops.oobbuf = writebuf; err = mtd->write_oob(mtd, addr, &ops); if (err) goto out; if (i % 256 == 0) printk(PRINT_PREF "written up to eraseblock " "%u\n", i); cond_resched(); addr += mtd->writesize; } } printk(PRINT_PREF "written %u eraseblocks\n", i); /* Check all eraseblocks */ simple_srand(11); printk(PRINT_PREF "verifying all eraseblocks\n"); for (i = 0; i < ebcnt - 1; ++i) { if (bbt[i] || bbt[i + 1]) continue; set_random_data(writebuf, mtd->ecclayout->oobavail * 2); addr = (i + 1) * mtd->erasesize - mtd->writesize; ops.mode = MTD_OOB_AUTO; ops.len = 0; ops.retlen = 0; ops.ooblen = mtd->ecclayout->oobavail * 2; ops.oobretlen = 0; ops.ooboffs = 0; ops.datbuf = NULL; ops.oobbuf = readbuf; err = mtd->read_oob(mtd, addr, &ops); if (err) goto out; if (memcmp(readbuf, writebuf, mtd->ecclayout->oobavail * 2)) { printk(PRINT_PREF "error: verify failed at %#llx\n", (long long)addr); errcnt += 1; if (errcnt > 1000) { printk(PRINT_PREF "error: too many errors\n"); goto out; } } if (i % 256 == 0) printk(PRINT_PREF "verified up to eraseblock %u\n", i); cond_resched(); } printk(PRINT_PREF "verified %u eraseblocks\n", i); printk(PRINT_PREF "finished with %d errors\n", errcnt); out: kfree(bbt); kfree(writebuf); kfree(readbuf); put_mtd_device(mtd); if (err) printk(PRINT_PREF "error %d occurred\n", err); printk(KERN_INFO "=================================================\n"); return err; } module_init(mtd_oobtest_init); static void __exit mtd_oobtest_exit(void) { return; } module_exit(mtd_oobtest_exit); MODULE_DESCRIPTION("Out-of-band test module"); MODULE_AUTHOR("Adrian Hunter"); MODULE_LICENSE("GPL");
gpl-2.0
CyanideL/android_kernel_asus_grouper
arch/arm/kernel/crunch.c
4543
2112
/* * arch/arm/kernel/crunch.c * Cirrus MaverickCrunch context switching and handling * * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/io.h> #include <mach/ep93xx-regs.h> #include <asm/thread_notify.h> struct crunch_state *crunch_owner; void crunch_task_release(struct thread_info *thread) { local_irq_disable(); if (crunch_owner == &thread->crunchstate) crunch_owner = NULL; local_irq_enable(); } static int crunch_enabled(u32 devcfg) { return !!(devcfg & EP93XX_SYSCON_DEVCFG_CPENA); } static int crunch_do(struct notifier_block *self, unsigned long cmd, void *t) { struct thread_info *thread = (struct thread_info *)t; struct crunch_state *crunch_state; u32 devcfg; crunch_state = &thread->crunchstate; switch (cmd) { case THREAD_NOTIFY_FLUSH: memset(crunch_state, 0, sizeof(*crunch_state)); /* * FALLTHROUGH: Ensure we don't try to overwrite our newly * initialised state information on the first fault. */ case THREAD_NOTIFY_EXIT: crunch_task_release(thread); break; case THREAD_NOTIFY_SWITCH: devcfg = __raw_readl(EP93XX_SYSCON_DEVCFG); if (crunch_enabled(devcfg) || crunch_owner == crunch_state) { /* * We don't use ep93xx_syscon_swlocked_write() here * because we are on the context switch path and * preemption is already disabled. */ devcfg ^= EP93XX_SYSCON_DEVCFG_CPENA; __raw_writel(0xaa, EP93XX_SYSCON_SWLOCK); __raw_writel(devcfg, EP93XX_SYSCON_DEVCFG); } break; } return NOTIFY_DONE; } static struct notifier_block crunch_notifier_block = { .notifier_call = crunch_do, }; static int __init crunch_init(void) { thread_register_notifier(&crunch_notifier_block); elf_hwcap |= HWCAP_CRUNCH; return 0; } late_initcall(crunch_init);
gpl-2.0
charles1018/kernel_msm
drivers/net/ethernet/micrel/ks8842.c
4799
33329
/* * ks8842.c timberdale KS8842 ethernet driver * Copyright (c) 2009 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Supports: * The Micrel KS8842 behind the timberdale FPGA * The genuine Micrel KS8841/42 device with ISA 16/32bit bus interface */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/ks8842.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/scatterlist.h> #define DRV_NAME "ks8842" /* Timberdale specific Registers */ #define REG_TIMB_RST 0x1c #define REG_TIMB_FIFO 0x20 #define REG_TIMB_ISR 0x24 #define REG_TIMB_IER 0x28 #define REG_TIMB_IAR 0x2C #define REQ_TIMB_DMA_RESUME 0x30 /* KS8842 registers */ #define REG_SELECT_BANK 0x0e /* bank 0 registers */ #define REG_QRFCR 0x04 /* bank 2 registers */ #define REG_MARL 0x00 #define REG_MARM 0x02 #define REG_MARH 0x04 /* bank 3 registers */ #define REG_GRR 0x06 /* bank 16 registers */ #define REG_TXCR 0x00 #define REG_TXSR 0x02 #define REG_RXCR 0x04 #define REG_TXMIR 0x08 #define REG_RXMIR 0x0A /* bank 17 registers */ #define REG_TXQCR 0x00 #define REG_RXQCR 0x02 #define REG_TXFDPR 0x04 #define REG_RXFDPR 0x06 #define REG_QMU_DATA_LO 0x08 #define REG_QMU_DATA_HI 0x0A /* bank 18 registers */ #define REG_IER 0x00 #define IRQ_LINK_CHANGE 0x8000 #define IRQ_TX 0x4000 #define IRQ_RX 0x2000 #define IRQ_RX_OVERRUN 0x0800 #define IRQ_TX_STOPPED 0x0200 #define IRQ_RX_STOPPED 0x0100 #define IRQ_RX_ERROR 0x0080 #define ENABLED_IRQS (IRQ_LINK_CHANGE | IRQ_TX | IRQ_RX | IRQ_RX_STOPPED | \ IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR) /* When running via timberdale in DMA mode, the RX interrupt should be enabled in the KS8842, but not in the FPGA IP, since the IP handles RX DMA internally. TX interrupts are not needed it is handled by the FPGA the driver is notified via DMA callbacks. */ #define ENABLED_IRQS_DMA_IP (IRQ_LINK_CHANGE | IRQ_RX_STOPPED | \ IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR) #define ENABLED_IRQS_DMA (ENABLED_IRQS_DMA_IP | IRQ_RX) #define REG_ISR 0x02 #define REG_RXSR 0x04 #define RXSR_VALID 0x8000 #define RXSR_BROADCAST 0x80 #define RXSR_MULTICAST 0x40 #define RXSR_UNICAST 0x20 #define RXSR_FRAMETYPE 0x08 #define RXSR_TOO_LONG 0x04 #define RXSR_RUNT 0x02 #define RXSR_CRC_ERROR 0x01 #define RXSR_ERROR (RXSR_TOO_LONG | RXSR_RUNT | RXSR_CRC_ERROR) /* bank 32 registers */ #define REG_SW_ID_AND_ENABLE 0x00 #define REG_SGCR1 0x02 #define REG_SGCR2 0x04 #define REG_SGCR3 0x06 /* bank 39 registers */ #define REG_MACAR1 0x00 #define REG_MACAR2 0x02 #define REG_MACAR3 0x04 /* bank 45 registers */ #define REG_P1MBCR 0x00 #define REG_P1MBSR 0x02 /* bank 46 registers */ #define REG_P2MBCR 0x00 #define REG_P2MBSR 0x02 /* bank 48 registers */ #define REG_P1CR2 0x02 /* bank 49 registers */ #define REG_P1CR4 0x02 #define REG_P1SR 0x04 /* flags passed by platform_device for configuration */ #define MICREL_KS884X 0x01 /* 0=Timeberdale(FPGA), 1=Micrel */ #define KS884X_16BIT 0x02 /* 1=16bit, 0=32bit */ #define DMA_BUFFER_SIZE 2048 struct ks8842_tx_dma_ctl { struct dma_chan *chan; struct dma_async_tx_descriptor *adesc; void *buf; struct scatterlist sg; int channel; }; struct ks8842_rx_dma_ctl { struct dma_chan *chan; struct dma_async_tx_descriptor *adesc; struct sk_buff *skb; struct scatterlist sg; struct tasklet_struct tasklet; int channel; }; #define KS8842_USE_DMA(adapter) (((adapter)->dma_tx.channel != -1) && \ ((adapter)->dma_rx.channel != -1)) struct ks8842_adapter { void __iomem *hw_addr; int irq; unsigned long conf_flags; /* copy of platform_device config */ struct tasklet_struct tasklet; spinlock_t lock; /* spinlock to be interrupt safe */ struct work_struct timeout_work; struct net_device *netdev; struct device *dev; struct ks8842_tx_dma_ctl dma_tx; struct ks8842_rx_dma_ctl dma_rx; }; static void ks8842_dma_rx_cb(void *data); static void ks8842_dma_tx_cb(void *data); static inline void ks8842_resume_dma(struct ks8842_adapter *adapter) { iowrite32(1, adapter->hw_addr + REQ_TIMB_DMA_RESUME); } static inline void ks8842_select_bank(struct ks8842_adapter *adapter, u16 bank) { iowrite16(bank, adapter->hw_addr + REG_SELECT_BANK); } static inline void ks8842_write8(struct ks8842_adapter *adapter, u16 bank, u8 value, int offset) { ks8842_select_bank(adapter, bank); iowrite8(value, adapter->hw_addr + offset); } static inline void ks8842_write16(struct ks8842_adapter *adapter, u16 bank, u16 value, int offset) { ks8842_select_bank(adapter, bank); iowrite16(value, adapter->hw_addr + offset); } static inline void ks8842_enable_bits(struct ks8842_adapter *adapter, u16 bank, u16 bits, int offset) { u16 reg; ks8842_select_bank(adapter, bank); reg = ioread16(adapter->hw_addr + offset); reg |= bits; iowrite16(reg, adapter->hw_addr + offset); } static inline void ks8842_clear_bits(struct ks8842_adapter *adapter, u16 bank, u16 bits, int offset) { u16 reg; ks8842_select_bank(adapter, bank); reg = ioread16(adapter->hw_addr + offset); reg &= ~bits; iowrite16(reg, adapter->hw_addr + offset); } static inline void ks8842_write32(struct ks8842_adapter *adapter, u16 bank, u32 value, int offset) { ks8842_select_bank(adapter, bank); iowrite32(value, adapter->hw_addr + offset); } static inline u8 ks8842_read8(struct ks8842_adapter *adapter, u16 bank, int offset) { ks8842_select_bank(adapter, bank); return ioread8(adapter->hw_addr + offset); } static inline u16 ks8842_read16(struct ks8842_adapter *adapter, u16 bank, int offset) { ks8842_select_bank(adapter, bank); return ioread16(adapter->hw_addr + offset); } static inline u32 ks8842_read32(struct ks8842_adapter *adapter, u16 bank, int offset) { ks8842_select_bank(adapter, bank); return ioread32(adapter->hw_addr + offset); } static void ks8842_reset(struct ks8842_adapter *adapter) { if (adapter->conf_flags & MICREL_KS884X) { ks8842_write16(adapter, 3, 1, REG_GRR); msleep(10); iowrite16(0, adapter->hw_addr + REG_GRR); } else { /* The KS8842 goes haywire when doing softare reset * a work around in the timberdale IP is implemented to * do a hardware reset instead ks8842_write16(adapter, 3, 1, REG_GRR); msleep(10); iowrite16(0, adapter->hw_addr + REG_GRR); */ iowrite32(0x1, adapter->hw_addr + REG_TIMB_RST); msleep(20); } } static void ks8842_update_link_status(struct net_device *netdev, struct ks8842_adapter *adapter) { /* check the status of the link */ if (ks8842_read16(adapter, 45, REG_P1MBSR) & 0x4) { netif_carrier_on(netdev); netif_wake_queue(netdev); } else { netif_stop_queue(netdev); netif_carrier_off(netdev); } } static void ks8842_enable_tx(struct ks8842_adapter *adapter) { ks8842_enable_bits(adapter, 16, 0x01, REG_TXCR); } static void ks8842_disable_tx(struct ks8842_adapter *adapter) { ks8842_clear_bits(adapter, 16, 0x01, REG_TXCR); } static void ks8842_enable_rx(struct ks8842_adapter *adapter) { ks8842_enable_bits(adapter, 16, 0x01, REG_RXCR); } static void ks8842_disable_rx(struct ks8842_adapter *adapter) { ks8842_clear_bits(adapter, 16, 0x01, REG_RXCR); } static void ks8842_reset_hw(struct ks8842_adapter *adapter) { /* reset the HW */ ks8842_reset(adapter); /* Enable QMU Transmit flow control / transmit padding / Transmit CRC */ ks8842_write16(adapter, 16, 0x000E, REG_TXCR); /* enable the receiver, uni + multi + broadcast + flow ctrl + crc strip */ ks8842_write16(adapter, 16, 0x8 | 0x20 | 0x40 | 0x80 | 0x400, REG_RXCR); /* TX frame pointer autoincrement */ ks8842_write16(adapter, 17, 0x4000, REG_TXFDPR); /* RX frame pointer autoincrement */ ks8842_write16(adapter, 17, 0x4000, REG_RXFDPR); /* RX 2 kb high watermark */ ks8842_write16(adapter, 0, 0x1000, REG_QRFCR); /* aggressive back off in half duplex */ ks8842_enable_bits(adapter, 32, 1 << 8, REG_SGCR1); /* enable no excessive collison drop */ ks8842_enable_bits(adapter, 32, 1 << 3, REG_SGCR2); /* Enable port 1 force flow control / back pressure / transmit / recv */ ks8842_write16(adapter, 48, 0x1E07, REG_P1CR2); /* restart port auto-negotiation */ ks8842_enable_bits(adapter, 49, 1 << 13, REG_P1CR4); /* Enable the transmitter */ ks8842_enable_tx(adapter); /* Enable the receiver */ ks8842_enable_rx(adapter); /* clear all interrupts */ ks8842_write16(adapter, 18, 0xffff, REG_ISR); /* enable interrupts */ if (KS8842_USE_DMA(adapter)) { /* When running in DMA Mode the RX interrupt is not enabled in timberdale because RX data is received by DMA callbacks it must still be enabled in the KS8842 because it indicates to timberdale when there is RX data for it's DMA FIFOs */ iowrite16(ENABLED_IRQS_DMA_IP, adapter->hw_addr + REG_TIMB_IER); ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER); } else { if (!(adapter->conf_flags & MICREL_KS884X)) iowrite16(ENABLED_IRQS, adapter->hw_addr + REG_TIMB_IER); ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER); } /* enable the switch */ ks8842_write16(adapter, 32, 0x1, REG_SW_ID_AND_ENABLE); } static void ks8842_read_mac_addr(struct ks8842_adapter *adapter, u8 *dest) { int i; u16 mac; for (i = 0; i < ETH_ALEN; i++) dest[ETH_ALEN - i - 1] = ks8842_read8(adapter, 2, REG_MARL + i); if (adapter->conf_flags & MICREL_KS884X) { /* the sequence of saving mac addr between MAC and Switch is different. */ mac = ks8842_read16(adapter, 2, REG_MARL); ks8842_write16(adapter, 39, mac, REG_MACAR3); mac = ks8842_read16(adapter, 2, REG_MARM); ks8842_write16(adapter, 39, mac, REG_MACAR2); mac = ks8842_read16(adapter, 2, REG_MARH); ks8842_write16(adapter, 39, mac, REG_MACAR1); } else { /* make sure the switch port uses the same MAC as the QMU */ mac = ks8842_read16(adapter, 2, REG_MARL); ks8842_write16(adapter, 39, mac, REG_MACAR1); mac = ks8842_read16(adapter, 2, REG_MARM); ks8842_write16(adapter, 39, mac, REG_MACAR2); mac = ks8842_read16(adapter, 2, REG_MARH); ks8842_write16(adapter, 39, mac, REG_MACAR3); } } static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, u8 *mac) { unsigned long flags; unsigned i; spin_lock_irqsave(&adapter->lock, flags); for (i = 0; i < ETH_ALEN; i++) { ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i); if (!(adapter->conf_flags & MICREL_KS884X)) ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1], REG_MACAR1 + i); } if (adapter->conf_flags & MICREL_KS884X) { /* the sequence of saving mac addr between MAC and Switch is different. */ u16 mac; mac = ks8842_read16(adapter, 2, REG_MARL); ks8842_write16(adapter, 39, mac, REG_MACAR3); mac = ks8842_read16(adapter, 2, REG_MARM); ks8842_write16(adapter, 39, mac, REG_MACAR2); mac = ks8842_read16(adapter, 2, REG_MARH); ks8842_write16(adapter, 39, mac, REG_MACAR1); } spin_unlock_irqrestore(&adapter->lock, flags); } static inline u16 ks8842_tx_fifo_space(struct ks8842_adapter *adapter) { return ks8842_read16(adapter, 16, REG_TXMIR) & 0x1fff; } static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev) { struct ks8842_adapter *adapter = netdev_priv(netdev); struct ks8842_tx_dma_ctl *ctl = &adapter->dma_tx; u8 *buf = ctl->buf; if (ctl->adesc) { netdev_dbg(netdev, "%s: TX ongoing\n", __func__); /* transfer ongoing */ return NETDEV_TX_BUSY; } sg_dma_len(&ctl->sg) = skb->len + sizeof(u32); /* copy data to the TX buffer */ /* the control word, enable IRQ, port 1 and the length */ *buf++ = 0x00; *buf++ = 0x01; /* Port 1 */ *buf++ = skb->len & 0xff; *buf++ = (skb->len >> 8) & 0xff; skb_copy_from_linear_data(skb, buf, skb->len); dma_sync_single_range_for_device(adapter->dev, sg_dma_address(&ctl->sg), 0, sg_dma_len(&ctl->sg), DMA_TO_DEVICE); /* make sure the length is a multiple of 4 */ if (sg_dma_len(&ctl->sg) % 4) sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4; ctl->adesc = dmaengine_prep_slave_sg(ctl->chan, &ctl->sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); if (!ctl->adesc) return NETDEV_TX_BUSY; ctl->adesc->callback_param = netdev; ctl->adesc->callback = ks8842_dma_tx_cb; ctl->adesc->tx_submit(ctl->adesc); netdev->stats.tx_bytes += skb->len; dev_kfree_skb(skb); return NETDEV_TX_OK; } static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev) { struct ks8842_adapter *adapter = netdev_priv(netdev); int len = skb->len; netdev_dbg(netdev, "%s: len %u head %p data %p tail %p end %p\n", __func__, skb->len, skb->head, skb->data, skb_tail_pointer(skb), skb_end_pointer(skb)); /* check FIFO buffer space, we need space for CRC and command bits */ if (ks8842_tx_fifo_space(adapter) < len + 8) return NETDEV_TX_BUSY; if (adapter->conf_flags & KS884X_16BIT) { u16 *ptr16 = (u16 *)skb->data; ks8842_write16(adapter, 17, 0x8000 | 0x100, REG_QMU_DATA_LO); ks8842_write16(adapter, 17, (u16)len, REG_QMU_DATA_HI); netdev->stats.tx_bytes += len; /* copy buffer */ while (len > 0) { iowrite16(*ptr16++, adapter->hw_addr + REG_QMU_DATA_LO); iowrite16(*ptr16++, adapter->hw_addr + REG_QMU_DATA_HI); len -= sizeof(u32); } } else { u32 *ptr = (u32 *)skb->data; u32 ctrl; /* the control word, enable IRQ, port 1 and the length */ ctrl = 0x8000 | 0x100 | (len << 16); ks8842_write32(adapter, 17, ctrl, REG_QMU_DATA_LO); netdev->stats.tx_bytes += len; /* copy buffer */ while (len > 0) { iowrite32(*ptr, adapter->hw_addr + REG_QMU_DATA_LO); len -= sizeof(u32); ptr++; } } /* enqueue packet */ ks8842_write16(adapter, 17, 1, REG_TXQCR); dev_kfree_skb(skb); return NETDEV_TX_OK; } static void ks8842_update_rx_err_counters(struct net_device *netdev, u32 status) { netdev_dbg(netdev, "RX error, status: %x\n", status); netdev->stats.rx_errors++; if (status & RXSR_TOO_LONG) netdev->stats.rx_length_errors++; if (status & RXSR_CRC_ERROR) netdev->stats.rx_crc_errors++; if (status & RXSR_RUNT) netdev->stats.rx_frame_errors++; } static void ks8842_update_rx_counters(struct net_device *netdev, u32 status, int len) { netdev_dbg(netdev, "RX packet, len: %d\n", len); netdev->stats.rx_packets++; netdev->stats.rx_bytes += len; if (status & RXSR_MULTICAST) netdev->stats.multicast++; } static int __ks8842_start_new_rx_dma(struct net_device *netdev) { struct ks8842_adapter *adapter = netdev_priv(netdev); struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx; struct scatterlist *sg = &ctl->sg; int err; ctl->skb = netdev_alloc_skb(netdev, DMA_BUFFER_SIZE); if (ctl->skb) { sg_init_table(sg, 1); sg_dma_address(sg) = dma_map_single(adapter->dev, ctl->skb->data, DMA_BUFFER_SIZE, DMA_FROM_DEVICE); err = dma_mapping_error(adapter->dev, sg_dma_address(sg)); if (unlikely(err)) { sg_dma_address(sg) = 0; goto out; } sg_dma_len(sg) = DMA_BUFFER_SIZE; ctl->adesc = dmaengine_prep_slave_sg(ctl->chan, sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); if (!ctl->adesc) goto out; ctl->adesc->callback_param = netdev; ctl->adesc->callback = ks8842_dma_rx_cb; ctl->adesc->tx_submit(ctl->adesc); } else { err = -ENOMEM; sg_dma_address(sg) = 0; goto out; } return err; out: if (sg_dma_address(sg)) dma_unmap_single(adapter->dev, sg_dma_address(sg), DMA_BUFFER_SIZE, DMA_FROM_DEVICE); sg_dma_address(sg) = 0; if (ctl->skb) dev_kfree_skb(ctl->skb); ctl->skb = NULL; printk(KERN_ERR DRV_NAME": Failed to start RX DMA: %d\n", err); return err; } static void ks8842_rx_frame_dma_tasklet(unsigned long arg) { struct net_device *netdev = (struct net_device *)arg; struct ks8842_adapter *adapter = netdev_priv(netdev); struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx; struct sk_buff *skb = ctl->skb; dma_addr_t addr = sg_dma_address(&ctl->sg); u32 status; ctl->adesc = NULL; /* kick next transfer going */ __ks8842_start_new_rx_dma(netdev); /* now handle the data we got */ dma_unmap_single(adapter->dev, addr, DMA_BUFFER_SIZE, DMA_FROM_DEVICE); status = *((u32 *)skb->data); netdev_dbg(netdev, "%s - rx_data: status: %x\n", __func__, status & 0xffff); /* check the status */ if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) { int len = (status >> 16) & 0x7ff; ks8842_update_rx_counters(netdev, status, len); /* reserve 4 bytes which is the status word */ skb_reserve(skb, 4); skb_put(skb, len); skb->protocol = eth_type_trans(skb, netdev); netif_rx(skb); } else { ks8842_update_rx_err_counters(netdev, status); dev_kfree_skb(skb); } } static void ks8842_rx_frame(struct net_device *netdev, struct ks8842_adapter *adapter) { u32 status; int len; if (adapter->conf_flags & KS884X_16BIT) { status = ks8842_read16(adapter, 17, REG_QMU_DATA_LO); len = ks8842_read16(adapter, 17, REG_QMU_DATA_HI); netdev_dbg(netdev, "%s - rx_data: status: %x\n", __func__, status); } else { status = ks8842_read32(adapter, 17, REG_QMU_DATA_LO); len = (status >> 16) & 0x7ff; status &= 0xffff; netdev_dbg(netdev, "%s - rx_data: status: %x\n", __func__, status); } /* check the status */ if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) { struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len + 3); if (skb) { ks8842_update_rx_counters(netdev, status, len); if (adapter->conf_flags & KS884X_16BIT) { u16 *data16 = (u16 *)skb_put(skb, len); ks8842_select_bank(adapter, 17); while (len > 0) { *data16++ = ioread16(adapter->hw_addr + REG_QMU_DATA_LO); *data16++ = ioread16(adapter->hw_addr + REG_QMU_DATA_HI); len -= sizeof(u32); } } else { u32 *data = (u32 *)skb_put(skb, len); ks8842_select_bank(adapter, 17); while (len > 0) { *data++ = ioread32(adapter->hw_addr + REG_QMU_DATA_LO); len -= sizeof(u32); } } skb->protocol = eth_type_trans(skb, netdev); netif_rx(skb); } else netdev->stats.rx_dropped++; } else ks8842_update_rx_err_counters(netdev, status); /* set high watermark to 3K */ ks8842_clear_bits(adapter, 0, 1 << 12, REG_QRFCR); /* release the frame */ ks8842_write16(adapter, 17, 0x01, REG_RXQCR); /* set high watermark to 2K */ ks8842_enable_bits(adapter, 0, 1 << 12, REG_QRFCR); } void ks8842_handle_rx(struct net_device *netdev, struct ks8842_adapter *adapter) { u16 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff; netdev_dbg(netdev, "%s Entry - rx_data: %d\n", __func__, rx_data); while (rx_data) { ks8842_rx_frame(netdev, adapter); rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff; } } void ks8842_handle_tx(struct net_device *netdev, struct ks8842_adapter *adapter) { u16 sr = ks8842_read16(adapter, 16, REG_TXSR); netdev_dbg(netdev, "%s - entry, sr: %x\n", __func__, sr); netdev->stats.tx_packets++; if (netif_queue_stopped(netdev)) netif_wake_queue(netdev); } void ks8842_handle_rx_overrun(struct net_device *netdev, struct ks8842_adapter *adapter) { netdev_dbg(netdev, "%s: entry\n", __func__); netdev->stats.rx_errors++; netdev->stats.rx_fifo_errors++; } void ks8842_tasklet(unsigned long arg) { struct net_device *netdev = (struct net_device *)arg; struct ks8842_adapter *adapter = netdev_priv(netdev); u16 isr; unsigned long flags; u16 entry_bank; /* read current bank to be able to set it back */ spin_lock_irqsave(&adapter->lock, flags); entry_bank = ioread16(adapter->hw_addr + REG_SELECT_BANK); spin_unlock_irqrestore(&adapter->lock, flags); isr = ks8842_read16(adapter, 18, REG_ISR); netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr); /* when running in DMA mode, do not ack RX interrupts, it is handled internally by timberdale, otherwise it's DMA FIFO:s would stop */ if (KS8842_USE_DMA(adapter)) isr &= ~IRQ_RX; /* Ack */ ks8842_write16(adapter, 18, isr, REG_ISR); if (!(adapter->conf_flags & MICREL_KS884X)) /* Ack in the timberdale IP as well */ iowrite32(0x1, adapter->hw_addr + REG_TIMB_IAR); if (!netif_running(netdev)) return; if (isr & IRQ_LINK_CHANGE) ks8842_update_link_status(netdev, adapter); /* should not get IRQ_RX when running DMA mode */ if (isr & (IRQ_RX | IRQ_RX_ERROR) && !KS8842_USE_DMA(adapter)) ks8842_handle_rx(netdev, adapter); /* should only happen when in PIO mode */ if (isr & IRQ_TX) ks8842_handle_tx(netdev, adapter); if (isr & IRQ_RX_OVERRUN) ks8842_handle_rx_overrun(netdev, adapter); if (isr & IRQ_TX_STOPPED) { ks8842_disable_tx(adapter); ks8842_enable_tx(adapter); } if (isr & IRQ_RX_STOPPED) { ks8842_disable_rx(adapter); ks8842_enable_rx(adapter); } /* re-enable interrupts, put back the bank selection register */ spin_lock_irqsave(&adapter->lock, flags); if (KS8842_USE_DMA(adapter)) ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER); else ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER); iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK); /* Make sure timberdale continues DMA operations, they are stopped while we are handling the ks8842 because we might change bank */ if (KS8842_USE_DMA(adapter)) ks8842_resume_dma(adapter); spin_unlock_irqrestore(&adapter->lock, flags); } static irqreturn_t ks8842_irq(int irq, void *devid) { struct net_device *netdev = devid; struct ks8842_adapter *adapter = netdev_priv(netdev); u16 isr; u16 entry_bank = ioread16(adapter->hw_addr + REG_SELECT_BANK); irqreturn_t ret = IRQ_NONE; isr = ks8842_read16(adapter, 18, REG_ISR); netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr); if (isr) { if (KS8842_USE_DMA(adapter)) /* disable all but RX IRQ, since the FPGA relies on it*/ ks8842_write16(adapter, 18, IRQ_RX, REG_IER); else /* disable IRQ */ ks8842_write16(adapter, 18, 0x00, REG_IER); /* schedule tasklet */ tasklet_schedule(&adapter->tasklet); ret = IRQ_HANDLED; } iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK); /* After an interrupt, tell timberdale to continue DMA operations. DMA is disabled while we are handling the ks8842 because we might change bank */ ks8842_resume_dma(adapter); return ret; } static void ks8842_dma_rx_cb(void *data) { struct net_device *netdev = data; struct ks8842_adapter *adapter = netdev_priv(netdev); netdev_dbg(netdev, "RX DMA finished\n"); /* schedule tasklet */ if (adapter->dma_rx.adesc) tasklet_schedule(&adapter->dma_rx.tasklet); } static void ks8842_dma_tx_cb(void *data) { struct net_device *netdev = data; struct ks8842_adapter *adapter = netdev_priv(netdev); struct ks8842_tx_dma_ctl *ctl = &adapter->dma_tx; netdev_dbg(netdev, "TX DMA finished\n"); if (!ctl->adesc) return; netdev->stats.tx_packets++; ctl->adesc = NULL; if (netif_queue_stopped(netdev)) netif_wake_queue(netdev); } static void ks8842_stop_dma(struct ks8842_adapter *adapter) { struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx; struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx; tx_ctl->adesc = NULL; if (tx_ctl->chan) tx_ctl->chan->device->device_control(tx_ctl->chan, DMA_TERMINATE_ALL, 0); rx_ctl->adesc = NULL; if (rx_ctl->chan) rx_ctl->chan->device->device_control(rx_ctl->chan, DMA_TERMINATE_ALL, 0); if (sg_dma_address(&rx_ctl->sg)) dma_unmap_single(adapter->dev, sg_dma_address(&rx_ctl->sg), DMA_BUFFER_SIZE, DMA_FROM_DEVICE); sg_dma_address(&rx_ctl->sg) = 0; dev_kfree_skb(rx_ctl->skb); rx_ctl->skb = NULL; } static void ks8842_dealloc_dma_bufs(struct ks8842_adapter *adapter) { struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx; struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx; ks8842_stop_dma(adapter); if (tx_ctl->chan) dma_release_channel(tx_ctl->chan); tx_ctl->chan = NULL; if (rx_ctl->chan) dma_release_channel(rx_ctl->chan); rx_ctl->chan = NULL; tasklet_kill(&rx_ctl->tasklet); if (sg_dma_address(&tx_ctl->sg)) dma_unmap_single(adapter->dev, sg_dma_address(&tx_ctl->sg), DMA_BUFFER_SIZE, DMA_TO_DEVICE); sg_dma_address(&tx_ctl->sg) = 0; kfree(tx_ctl->buf); tx_ctl->buf = NULL; } static bool ks8842_dma_filter_fn(struct dma_chan *chan, void *filter_param) { return chan->chan_id == (long)filter_param; } static int ks8842_alloc_dma_bufs(struct net_device *netdev) { struct ks8842_adapter *adapter = netdev_priv(netdev); struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx; struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx; int err; dma_cap_mask_t mask; dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); dma_cap_set(DMA_PRIVATE, mask); sg_init_table(&tx_ctl->sg, 1); tx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn, (void *)(long)tx_ctl->channel); if (!tx_ctl->chan) { err = -ENODEV; goto err; } /* allocate DMA buffer */ tx_ctl->buf = kmalloc(DMA_BUFFER_SIZE, GFP_KERNEL); if (!tx_ctl->buf) { err = -ENOMEM; goto err; } sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev, tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE); err = dma_mapping_error(adapter->dev, sg_dma_address(&tx_ctl->sg)); if (err) { sg_dma_address(&tx_ctl->sg) = 0; goto err; } rx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn, (void *)(long)rx_ctl->channel); if (!rx_ctl->chan) { err = -ENODEV; goto err; } tasklet_init(&rx_ctl->tasklet, ks8842_rx_frame_dma_tasklet, (unsigned long)netdev); return 0; err: ks8842_dealloc_dma_bufs(adapter); return err; } /* Netdevice operations */ static int ks8842_open(struct net_device *netdev) { struct ks8842_adapter *adapter = netdev_priv(netdev); int err; netdev_dbg(netdev, "%s - entry\n", __func__); if (KS8842_USE_DMA(adapter)) { err = ks8842_alloc_dma_bufs(netdev); if (!err) { /* start RX dma */ err = __ks8842_start_new_rx_dma(netdev); if (err) ks8842_dealloc_dma_bufs(adapter); } if (err) { printk(KERN_WARNING DRV_NAME ": Failed to initiate DMA, running PIO\n"); ks8842_dealloc_dma_bufs(adapter); adapter->dma_rx.channel = -1; adapter->dma_tx.channel = -1; } } /* reset the HW */ ks8842_reset_hw(adapter); ks8842_write_mac_addr(adapter, netdev->dev_addr); ks8842_update_link_status(netdev, adapter); err = request_irq(adapter->irq, ks8842_irq, IRQF_SHARED, DRV_NAME, netdev); if (err) { pr_err("Failed to request IRQ: %d: %d\n", adapter->irq, err); return err; } return 0; } static int ks8842_close(struct net_device *netdev) { struct ks8842_adapter *adapter = netdev_priv(netdev); netdev_dbg(netdev, "%s - entry\n", __func__); cancel_work_sync(&adapter->timeout_work); if (KS8842_USE_DMA(adapter)) ks8842_dealloc_dma_bufs(adapter); /* free the irq */ free_irq(adapter->irq, netdev); /* disable the switch */ ks8842_write16(adapter, 32, 0x0, REG_SW_ID_AND_ENABLE); return 0; } static netdev_tx_t ks8842_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { int ret; struct ks8842_adapter *adapter = netdev_priv(netdev); netdev_dbg(netdev, "%s: entry\n", __func__); if (KS8842_USE_DMA(adapter)) { unsigned long flags; ret = ks8842_tx_frame_dma(skb, netdev); /* for now only allow one transfer at the time */ spin_lock_irqsave(&adapter->lock, flags); if (adapter->dma_tx.adesc) netif_stop_queue(netdev); spin_unlock_irqrestore(&adapter->lock, flags); return ret; } ret = ks8842_tx_frame(skb, netdev); if (ks8842_tx_fifo_space(adapter) < netdev->mtu + 8) netif_stop_queue(netdev); return ret; } static int ks8842_set_mac(struct net_device *netdev, void *p) { struct ks8842_adapter *adapter = netdev_priv(netdev); struct sockaddr *addr = p; char *mac = (u8 *)addr->sa_data; netdev_dbg(netdev, "%s: entry\n", __func__); if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; netdev->addr_assign_type &= ~NET_ADDR_RANDOM; memcpy(netdev->dev_addr, mac, netdev->addr_len); ks8842_write_mac_addr(adapter, mac); return 0; } static void ks8842_tx_timeout_work(struct work_struct *work) { struct ks8842_adapter *adapter = container_of(work, struct ks8842_adapter, timeout_work); struct net_device *netdev = adapter->netdev; unsigned long flags; netdev_dbg(netdev, "%s: entry\n", __func__); spin_lock_irqsave(&adapter->lock, flags); if (KS8842_USE_DMA(adapter)) ks8842_stop_dma(adapter); /* disable interrupts */ ks8842_write16(adapter, 18, 0, REG_IER); ks8842_write16(adapter, 18, 0xFFFF, REG_ISR); netif_stop_queue(netdev); spin_unlock_irqrestore(&adapter->lock, flags); ks8842_reset_hw(adapter); ks8842_write_mac_addr(adapter, netdev->dev_addr); ks8842_update_link_status(netdev, adapter); if (KS8842_USE_DMA(adapter)) __ks8842_start_new_rx_dma(netdev); } static void ks8842_tx_timeout(struct net_device *netdev) { struct ks8842_adapter *adapter = netdev_priv(netdev); netdev_dbg(netdev, "%s: entry\n", __func__); schedule_work(&adapter->timeout_work); } static const struct net_device_ops ks8842_netdev_ops = { .ndo_open = ks8842_open, .ndo_stop = ks8842_close, .ndo_start_xmit = ks8842_xmit_frame, .ndo_set_mac_address = ks8842_set_mac, .ndo_tx_timeout = ks8842_tx_timeout, .ndo_validate_addr = eth_validate_addr }; static const struct ethtool_ops ks8842_ethtool_ops = { .get_link = ethtool_op_get_link, }; static int __devinit ks8842_probe(struct platform_device *pdev) { int err = -ENOMEM; struct resource *iomem; struct net_device *netdev; struct ks8842_adapter *adapter; struct ks8842_platform_data *pdata = pdev->dev.platform_data; u16 id; unsigned i; iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!request_mem_region(iomem->start, resource_size(iomem), DRV_NAME)) goto err_mem_region; netdev = alloc_etherdev(sizeof(struct ks8842_adapter)); if (!netdev) goto err_alloc_etherdev; SET_NETDEV_DEV(netdev, &pdev->dev); adapter = netdev_priv(netdev); adapter->netdev = netdev; INIT_WORK(&adapter->timeout_work, ks8842_tx_timeout_work); adapter->hw_addr = ioremap(iomem->start, resource_size(iomem)); adapter->conf_flags = iomem->flags; if (!adapter->hw_addr) goto err_ioremap; adapter->irq = platform_get_irq(pdev, 0); if (adapter->irq < 0) { err = adapter->irq; goto err_get_irq; } adapter->dev = (pdev->dev.parent) ? pdev->dev.parent : &pdev->dev; /* DMA is only supported when accessed via timberdale */ if (!(adapter->conf_flags & MICREL_KS884X) && pdata && (pdata->tx_dma_channel != -1) && (pdata->rx_dma_channel != -1)) { adapter->dma_rx.channel = pdata->rx_dma_channel; adapter->dma_tx.channel = pdata->tx_dma_channel; } else { adapter->dma_rx.channel = -1; adapter->dma_tx.channel = -1; } tasklet_init(&adapter->tasklet, ks8842_tasklet, (unsigned long)netdev); spin_lock_init(&adapter->lock); netdev->netdev_ops = &ks8842_netdev_ops; netdev->ethtool_ops = &ks8842_ethtool_ops; /* Check if a mac address was given */ i = netdev->addr_len; if (pdata) { for (i = 0; i < netdev->addr_len; i++) if (pdata->macaddr[i] != 0) break; if (i < netdev->addr_len) /* an address was passed, use it */ memcpy(netdev->dev_addr, pdata->macaddr, netdev->addr_len); } if (i == netdev->addr_len) { ks8842_read_mac_addr(adapter, netdev->dev_addr); if (!is_valid_ether_addr(netdev->dev_addr)) eth_hw_addr_random(netdev); } id = ks8842_read16(adapter, 32, REG_SW_ID_AND_ENABLE); strcpy(netdev->name, "eth%d"); err = register_netdev(netdev); if (err) goto err_register; platform_set_drvdata(pdev, netdev); pr_info("Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n", (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7); return 0; err_register: err_get_irq: iounmap(adapter->hw_addr); err_ioremap: free_netdev(netdev); err_alloc_etherdev: release_mem_region(iomem->start, resource_size(iomem)); err_mem_region: return err; } static int __devexit ks8842_remove(struct platform_device *pdev) { struct net_device *netdev = platform_get_drvdata(pdev); struct ks8842_adapter *adapter = netdev_priv(netdev); struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); unregister_netdev(netdev); tasklet_kill(&adapter->tasklet); iounmap(adapter->hw_addr); free_netdev(netdev); release_mem_region(iomem->start, resource_size(iomem)); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver ks8842_platform_driver = { .driver = { .name = DRV_NAME, .owner = THIS_MODULE, }, .probe = ks8842_probe, .remove = ks8842_remove, }; module_platform_driver(ks8842_platform_driver); MODULE_DESCRIPTION("Timberdale KS8842 ethernet driver"); MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:ks8842");
gpl-2.0
Trinityhaxxor/platform_kernel_msm8x60_stock
drivers/macintosh/windfarm_smu_controls.c
4799
8267
/* * Windfarm PowerMac thermal control. SMU based controls * * (c) Copyright 2005 Benjamin Herrenschmidt, IBM Corp. * <benh@kernel.crashing.org> * * Released under the term of the GNU GPL v2. */ #include <linux/types.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/wait.h> #include <linux/completion.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/io.h> #include <asm/sections.h> #include <asm/smu.h> #include "windfarm.h" #define VERSION "0.4" #undef DEBUG #ifdef DEBUG #define DBG(args...) printk(args) #else #define DBG(args...) do { } while(0) #endif static int smu_supports_new_fans_ops = 1; /* * SMU fans control object */ static LIST_HEAD(smu_fans); struct smu_fan_control { struct list_head link; int fan_type; /* 0 = rpm, 1 = pwm */ u32 reg; /* index in SMU */ s32 value; /* current value */ s32 min, max; /* min/max values */ struct wf_control ctrl; }; #define to_smu_fan(c) container_of(c, struct smu_fan_control, ctrl) static int smu_set_fan(int pwm, u8 id, u16 value) { struct smu_cmd cmd; u8 buffer[16]; DECLARE_COMPLETION_ONSTACK(comp); int rc; /* Fill SMU command structure */ cmd.cmd = SMU_CMD_FAN_COMMAND; /* The SMU has an "old" and a "new" way of setting the fan speed * Unfortunately, I found no reliable way to know which one works * on a given machine model. After some investigations it appears * that MacOS X just tries the new one, and if it fails fallbacks * to the old ones ... Ugh. */ retry: if (smu_supports_new_fans_ops) { buffer[0] = 0x30; buffer[1] = id; *((u16 *)(&buffer[2])) = value; cmd.data_len = 4; } else { if (id > 7) return -EINVAL; /* Fill argument buffer */ memset(buffer, 0, 16); buffer[0] = pwm ? 0x10 : 0x00; buffer[1] = 0x01 << id; *((u16 *)&buffer[2 + id * 2]) = value; cmd.data_len = 14; } cmd.reply_len = 16; cmd.data_buf = cmd.reply_buf = buffer; cmd.status = 0; cmd.done = smu_done_complete; cmd.misc = &comp; rc = smu_queue_cmd(&cmd); if (rc) return rc; wait_for_completion(&comp); /* Handle fallback (see coment above) */ if (cmd.status != 0 && smu_supports_new_fans_ops) { printk(KERN_WARNING "windfarm: SMU failed new fan command " "falling back to old method\n"); smu_supports_new_fans_ops = 0; goto retry; } return cmd.status; } static void smu_fan_release(struct wf_control *ct) { struct smu_fan_control *fct = to_smu_fan(ct); kfree(fct); } static int smu_fan_set(struct wf_control *ct, s32 value) { struct smu_fan_control *fct = to_smu_fan(ct); if (value < fct->min) value = fct->min; if (value > fct->max) value = fct->max; fct->value = value; return smu_set_fan(fct->fan_type, fct->reg, value); } static int smu_fan_get(struct wf_control *ct, s32 *value) { struct smu_fan_control *fct = to_smu_fan(ct); *value = fct->value; /* todo: read from SMU */ return 0; } static s32 smu_fan_min(struct wf_control *ct) { struct smu_fan_control *fct = to_smu_fan(ct); return fct->min; } static s32 smu_fan_max(struct wf_control *ct) { struct smu_fan_control *fct = to_smu_fan(ct); return fct->max; } static struct wf_control_ops smu_fan_ops = { .set_value = smu_fan_set, .get_value = smu_fan_get, .get_min = smu_fan_min, .get_max = smu_fan_max, .release = smu_fan_release, .owner = THIS_MODULE, }; static struct smu_fan_control *smu_fan_create(struct device_node *node, int pwm_fan) { struct smu_fan_control *fct; const s32 *v; const u32 *reg; const char *l; fct = kmalloc(sizeof(struct smu_fan_control), GFP_KERNEL); if (fct == NULL) return NULL; fct->ctrl.ops = &smu_fan_ops; l = of_get_property(node, "location", NULL); if (l == NULL) goto fail; fct->fan_type = pwm_fan; fct->ctrl.type = pwm_fan ? WF_CONTROL_PWM_FAN : WF_CONTROL_RPM_FAN; sysfs_attr_init(&fct->ctrl.attr.attr); /* We use the name & location here the same way we do for SMU sensors, * see the comment in windfarm_smu_sensors.c. The locations are a bit * less consistent here between the iMac and the desktop models, but * that is good enough for our needs for now at least. * * One problem though is that Apple seem to be inconsistent with case * and the kernel doesn't have strcasecmp =P */ fct->ctrl.name = NULL; /* Names used on desktop models */ if (!strcmp(l, "Rear Fan 0") || !strcmp(l, "Rear Fan") || !strcmp(l, "Rear fan 0") || !strcmp(l, "Rear fan") || !strcmp(l, "CPU A EXHAUST")) fct->ctrl.name = "cpu-rear-fan-0"; else if (!strcmp(l, "Rear Fan 1") || !strcmp(l, "Rear fan 1") || !strcmp(l, "CPU B EXHAUST")) fct->ctrl.name = "cpu-rear-fan-1"; else if (!strcmp(l, "Front Fan 0") || !strcmp(l, "Front Fan") || !strcmp(l, "Front fan 0") || !strcmp(l, "Front fan") || !strcmp(l, "CPU A INTAKE")) fct->ctrl.name = "cpu-front-fan-0"; else if (!strcmp(l, "Front Fan 1") || !strcmp(l, "Front fan 1") || !strcmp(l, "CPU B INTAKE")) fct->ctrl.name = "cpu-front-fan-1"; else if (!strcmp(l, "CPU A PUMP")) fct->ctrl.name = "cpu-pump-0"; else if (!strcmp(l, "CPU B PUMP")) fct->ctrl.name = "cpu-pump-1"; else if (!strcmp(l, "Slots Fan") || !strcmp(l, "Slots fan") || !strcmp(l, "EXPANSION SLOTS INTAKE")) fct->ctrl.name = "slots-fan"; else if (!strcmp(l, "Drive Bay") || !strcmp(l, "Drive bay") || !strcmp(l, "DRIVE BAY A INTAKE")) fct->ctrl.name = "drive-bay-fan"; else if (!strcmp(l, "BACKSIDE")) fct->ctrl.name = "backside-fan"; /* Names used on iMac models */ if (!strcmp(l, "System Fan") || !strcmp(l, "System fan")) fct->ctrl.name = "system-fan"; else if (!strcmp(l, "CPU Fan") || !strcmp(l, "CPU fan")) fct->ctrl.name = "cpu-fan"; else if (!strcmp(l, "Hard Drive") || !strcmp(l, "Hard drive")) fct->ctrl.name = "drive-bay-fan"; else if (!strcmp(l, "HDD Fan")) /* seen on iMac G5 iSight */ fct->ctrl.name = "hard-drive-fan"; else if (!strcmp(l, "ODD Fan")) /* same */ fct->ctrl.name = "optical-drive-fan"; /* Unrecognized fan, bail out */ if (fct->ctrl.name == NULL) goto fail; /* Get min & max values*/ v = of_get_property(node, "min-value", NULL); if (v == NULL) goto fail; fct->min = *v; v = of_get_property(node, "max-value", NULL); if (v == NULL) goto fail; fct->max = *v; /* Get "reg" value */ reg = of_get_property(node, "reg", NULL); if (reg == NULL) goto fail; fct->reg = *reg; if (wf_register_control(&fct->ctrl)) goto fail; return fct; fail: kfree(fct); return NULL; } static int __init smu_controls_init(void) { struct device_node *smu, *fans, *fan; if (!smu_present()) return -ENODEV; smu = of_find_node_by_type(NULL, "smu"); if (smu == NULL) return -ENODEV; /* Look for RPM fans */ for (fans = NULL; (fans = of_get_next_child(smu, fans)) != NULL;) if (!strcmp(fans->name, "rpm-fans") || of_device_is_compatible(fans, "smu-rpm-fans")) break; for (fan = NULL; fans && (fan = of_get_next_child(fans, fan)) != NULL;) { struct smu_fan_control *fct; fct = smu_fan_create(fan, 0); if (fct == NULL) { printk(KERN_WARNING "windfarm: Failed to create SMU " "RPM fan %s\n", fan->name); continue; } list_add(&fct->link, &smu_fans); } of_node_put(fans); /* Look for PWM fans */ for (fans = NULL; (fans = of_get_next_child(smu, fans)) != NULL;) if (!strcmp(fans->name, "pwm-fans")) break; for (fan = NULL; fans && (fan = of_get_next_child(fans, fan)) != NULL;) { struct smu_fan_control *fct; fct = smu_fan_create(fan, 1); if (fct == NULL) { printk(KERN_WARNING "windfarm: Failed to create SMU " "PWM fan %s\n", fan->name); continue; } list_add(&fct->link, &smu_fans); } of_node_put(fans); of_node_put(smu); return 0; } static void __exit smu_controls_exit(void) { struct smu_fan_control *fct; while (!list_empty(&smu_fans)) { fct = list_entry(smu_fans.next, struct smu_fan_control, link); list_del(&fct->link); wf_unregister_control(&fct->ctrl); } } module_init(smu_controls_init); module_exit(smu_controls_exit); MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>"); MODULE_DESCRIPTION("SMU control objects for PowerMacs thermal control"); MODULE_LICENSE("GPL");
gpl-2.0
pershoot/vision-2632
drivers/watchdog/wdt285.c
5055
4888
/* * Intel 21285 watchdog driver * Copyright (c) Phil Blundell <pb@nexus.co.uk>, 1998 * * based on * * SoftDog 0.05: A Software Watchdog Device * * (c) Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk>, * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/reboot.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/uaccess.h> #include <linux/irq.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/hardware/dec21285.h> /* * Define this to stop the watchdog actually rebooting the machine. */ #undef ONLY_TESTING static unsigned int soft_margin = 60; /* in seconds */ static unsigned int reload; static unsigned long timer_alive; #ifdef ONLY_TESTING /* * If the timer expires.. */ static void watchdog_fire(int irq, void *dev_id) { printk(KERN_CRIT "Watchdog: Would Reboot.\n"); *CSR_TIMER4_CNTL = 0; *CSR_TIMER4_CLR = 0; } #endif /* * Refresh the timer. */ static void watchdog_ping(void) { *CSR_TIMER4_LOAD = reload; } /* * Allow only one person to hold it open */ static int watchdog_open(struct inode *inode, struct file *file) { int ret; if (*CSR_SA110_CNTL & (1 << 13)) return -EBUSY; if (test_and_set_bit(1, &timer_alive)) return -EBUSY; reload = soft_margin * (mem_fclk_21285 / 256); *CSR_TIMER4_CLR = 0; watchdog_ping(); *CSR_TIMER4_CNTL = TIMER_CNTL_ENABLE | TIMER_CNTL_AUTORELOAD | TIMER_CNTL_DIV256; #ifdef ONLY_TESTING ret = request_irq(IRQ_TIMER4, watchdog_fire, 0, "watchdog", NULL); if (ret) { *CSR_TIMER4_CNTL = 0; clear_bit(1, &timer_alive); } #else /* * Setting this bit is irreversible; once enabled, there is * no way to disable the watchdog. */ *CSR_SA110_CNTL |= 1 << 13; ret = 0; #endif nonseekable_open(inode, file); return ret; } /* * Shut off the timer. * Note: if we really have enabled the watchdog, there * is no way to turn off. */ static int watchdog_release(struct inode *inode, struct file *file) { #ifdef ONLY_TESTING free_irq(IRQ_TIMER4, NULL); clear_bit(1, &timer_alive); #endif return 0; } static ssize_t watchdog_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { /* * Refresh the timer. */ if (len) watchdog_ping(); return len; } static const struct watchdog_info ident = { .options = WDIOF_SETTIMEOUT, .identity = "Footbridge Watchdog", }; static long watchdog_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { unsigned int new_margin; int __user *int_arg = (int __user *)arg; int ret = -ENOTTY; switch (cmd) { case WDIOC_GETSUPPORT: ret = 0; if (copy_to_user((void __user *)arg, &ident, sizeof(ident))) ret = -EFAULT; break; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: ret = put_user(0, int_arg); break; case WDIOC_KEEPALIVE: watchdog_ping(); ret = 0; break; case WDIOC_SETTIMEOUT: ret = get_user(new_margin, int_arg); if (ret) break; /* Arbitrary, can't find the card's limits */ if (new_margin < 0 || new_margin > 60) { ret = -EINVAL; break; } soft_margin = new_margin; reload = soft_margin * (mem_fclk_21285 / 256); watchdog_ping(); /* Fall */ case WDIOC_GETTIMEOUT: ret = put_user(soft_margin, int_arg); break; } return ret; } static const struct file_operations watchdog_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = watchdog_write, .unlocked_ioctl = watchdog_ioctl, .open = watchdog_open, .release = watchdog_release, }; static struct miscdevice watchdog_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &watchdog_fops, }; static int __init footbridge_watchdog_init(void) { int retval; if (machine_is_netwinder()) return -ENODEV; retval = misc_register(&watchdog_miscdev); if (retval < 0) return retval; printk(KERN_INFO "Footbridge Watchdog Timer: 0.01, timer margin: %d sec\n", soft_margin); if (machine_is_cats()) printk(KERN_WARNING "Warning: Watchdog reset may not work on this machine.\n"); return 0; } static void __exit footbridge_watchdog_exit(void) { misc_deregister(&watchdog_miscdev); } MODULE_AUTHOR("Phil Blundell <pb@nexus.co.uk>"); MODULE_DESCRIPTION("Footbridge watchdog driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); module_param(soft_margin, int, 0); MODULE_PARM_DESC(soft_margin, "Watchdog timeout in seconds"); module_init(footbridge_watchdog_init); module_exit(footbridge_watchdog_exit);
gpl-2.0
ShinySide/HispAsian_Kernel
drivers/char/lp.c
5055
26820
/* * Generic parallel printer driver * * Copyright (C) 1992 by Jim Weigand and Linus Torvalds * Copyright (C) 1992,1993 by Michael K. Johnson * - Thanks much to Gunter Windau for pointing out to me where the error * checking ought to be. * Copyright (C) 1993 by Nigel Gamble (added interrupt code) * Copyright (C) 1994 by Alan Cox (Modularised it) * LPCAREFUL, LPABORT, LPGETSTATUS added by Chris Metcalf, metcalf@lcs.mit.edu * Statistics and support for slow printers by Rob Janssen, rob@knoware.nl * "lp=" command line parameters added by Grant Guenther, grant@torque.net * lp_read (Status readback) support added by Carsten Gross, * carsten@sol.wohnheim.uni-ulm.de * Support for parport by Philip Blundell <philb@gnu.org> * Parport sharing hacking by Andrea Arcangeli * Fixed kernel_(to/from)_user memory copy to check for errors * by Riccardo Facchetti <fizban@tin.it> * 22-JAN-1998 Added support for devfs Richard Gooch <rgooch@atnf.csiro.au> * Redesigned interrupt handling for handle printers with buggy handshake * by Andrea Arcangeli, 11 May 1998 * Full efficient handling of printer with buggy irq handshake (now I have * understood the meaning of the strange handshake). This is done sending new * characters if the interrupt is just happened, even if the printer say to * be still BUSY. This is needed at least with Epson Stylus Color. To enable * the new TRUST_IRQ mode read the `LP OPTIMIZATION' section below... * Fixed the irq on the rising edge of the strobe case. * Obsoleted the CAREFUL flag since a printer that doesn' t work with * CAREFUL will block a bit after in lp_check_status(). * Andrea Arcangeli, 15 Oct 1998 * Obsoleted and removed all the lowlevel stuff implemented in the last * month to use the IEEE1284 functions (that handle the _new_ compatibilty * mode fine). */ /* This driver should, in theory, work with any parallel port that has an * appropriate low-level driver; all I/O is done through the parport * abstraction layer. * * If this driver is built into the kernel, you can configure it using the * kernel command-line. For example: * * lp=parport1,none,parport2 (bind lp0 to parport1, disable lp1 and * bind lp2 to parport2) * * lp=auto (assign lp devices to all ports that * have printers attached, as determined * by the IEEE-1284 autoprobe) * * lp=reset (reset the printer during * initialisation) * * lp=off (disable the printer driver entirely) * * If the driver is loaded as a module, similar functionality is available * using module parameters. The equivalent of the above commands would be: * * # insmod lp.o parport=1,none,2 * * # insmod lp.o parport=auto * * # insmod lp.o reset=1 */ /* COMPATIBILITY WITH OLD KERNELS * * Under Linux 2.0 and previous versions, lp devices were bound to ports at * particular I/O addresses, as follows: * * lp0 0x3bc * lp1 0x378 * lp2 0x278 * * The new driver, by default, binds lp devices to parport devices as it * finds them. This means that if you only have one port, it will be bound * to lp0 regardless of its I/O address. If you need the old behaviour, you * can force it using the parameters described above. */ /* * The new interrupt handling code take care of the buggy handshake * of some HP and Epson printer: * ___ * ACK _______________ ___________ * |__| * ____ * BUSY _________ _______ * |____________| * * I discovered this using the printer scanner that you can find at: * * ftp://e-mind.com/pub/linux/pscan/ * * 11 May 98, Andrea Arcangeli * * My printer scanner run on an Epson Stylus Color show that such printer * generates the irq on the _rising_ edge of the STROBE. Now lp handle * this case fine too. * * 15 Oct 1998, Andrea Arcangeli * * The so called `buggy' handshake is really the well documented * compatibility mode IEEE1284 handshake. They changed the well known * Centronics handshake acking in the middle of busy expecting to not * break drivers or legacy application, while they broken linux lp * until I fixed it reverse engineering the protocol by hand some * month ago... * * 14 Dec 1998, Andrea Arcangeli * * Copyright (C) 2000 by Tim Waugh (added LPSETTIMEOUT ioctl) */ #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/major.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/fcntl.h> #include <linux/delay.h> #include <linux/poll.h> #include <linux/console.h> #include <linux/device.h> #include <linux/wait.h> #include <linux/jiffies.h> #include <linux/mutex.h> #include <linux/compat.h> #include <linux/parport.h> #undef LP_STATS #include <linux/lp.h> #include <asm/irq.h> #include <asm/uaccess.h> /* if you have more than 8 printers, remember to increase LP_NO */ #define LP_NO 8 static DEFINE_MUTEX(lp_mutex); static struct lp_struct lp_table[LP_NO]; static unsigned int lp_count = 0; static struct class *lp_class; #ifdef CONFIG_LP_CONSOLE static struct parport *console_registered; #endif /* CONFIG_LP_CONSOLE */ #undef LP_DEBUG /* Bits used to manage claiming the parport device */ #define LP_PREEMPT_REQUEST 1 #define LP_PARPORT_CLAIMED 2 /* --- low-level port access ----------------------------------- */ #define r_dtr(x) (parport_read_data(lp_table[(x)].dev->port)) #define r_str(x) (parport_read_status(lp_table[(x)].dev->port)) #define w_ctr(x,y) do { parport_write_control(lp_table[(x)].dev->port, (y)); } while (0) #define w_dtr(x,y) do { parport_write_data(lp_table[(x)].dev->port, (y)); } while (0) /* Claim the parport or block trying unless we've already claimed it */ static void lp_claim_parport_or_block(struct lp_struct *this_lp) { if (!test_and_set_bit(LP_PARPORT_CLAIMED, &this_lp->bits)) { parport_claim_or_block (this_lp->dev); } } /* Claim the parport or block trying unless we've already claimed it */ static void lp_release_parport(struct lp_struct *this_lp) { if (test_and_clear_bit(LP_PARPORT_CLAIMED, &this_lp->bits)) { parport_release (this_lp->dev); } } static int lp_preempt(void *handle) { struct lp_struct *this_lp = (struct lp_struct *)handle; set_bit(LP_PREEMPT_REQUEST, &this_lp->bits); return (1); } /* * Try to negotiate to a new mode; if unsuccessful negotiate to * compatibility mode. Return the mode we ended up in. */ static int lp_negotiate(struct parport * port, int mode) { if (parport_negotiate (port, mode) != 0) { mode = IEEE1284_MODE_COMPAT; parport_negotiate (port, mode); } return (mode); } static int lp_reset(int minor) { int retval; lp_claim_parport_or_block (&lp_table[minor]); w_ctr(minor, LP_PSELECP); udelay (LP_DELAY); w_ctr(minor, LP_PSELECP | LP_PINITP); retval = r_str(minor); lp_release_parport (&lp_table[minor]); return retval; } static void lp_error (int minor) { DEFINE_WAIT(wait); int polling; if (LP_F(minor) & LP_ABORT) return; polling = lp_table[minor].dev->port->irq == PARPORT_IRQ_NONE; if (polling) lp_release_parport (&lp_table[minor]); prepare_to_wait(&lp_table[minor].waitq, &wait, TASK_INTERRUPTIBLE); schedule_timeout(LP_TIMEOUT_POLLED); finish_wait(&lp_table[minor].waitq, &wait); if (polling) lp_claim_parport_or_block (&lp_table[minor]); else parport_yield_blocking (lp_table[minor].dev); } static int lp_check_status(int minor) { int error = 0; unsigned int last = lp_table[minor].last_error; unsigned char status = r_str(minor); if ((status & LP_PERRORP) && !(LP_F(minor) & LP_CAREFUL)) /* No error. */ last = 0; else if ((status & LP_POUTPA)) { if (last != LP_POUTPA) { last = LP_POUTPA; printk(KERN_INFO "lp%d out of paper\n", minor); } error = -ENOSPC; } else if (!(status & LP_PSELECD)) { if (last != LP_PSELECD) { last = LP_PSELECD; printk(KERN_INFO "lp%d off-line\n", minor); } error = -EIO; } else if (!(status & LP_PERRORP)) { if (last != LP_PERRORP) { last = LP_PERRORP; printk(KERN_INFO "lp%d on fire\n", minor); } error = -EIO; } else { last = 0; /* Come here if LP_CAREFUL is set and no errors are reported. */ } lp_table[minor].last_error = last; if (last != 0) lp_error(minor); return error; } static int lp_wait_ready(int minor, int nonblock) { int error = 0; /* If we're not in compatibility mode, we're ready now! */ if (lp_table[minor].current_mode != IEEE1284_MODE_COMPAT) { return (0); } do { error = lp_check_status (minor); if (error && (nonblock || (LP_F(minor) & LP_ABORT))) break; if (signal_pending (current)) { error = -EINTR; break; } } while (error); return error; } static ssize_t lp_write(struct file * file, const char __user * buf, size_t count, loff_t *ppos) { unsigned int minor = iminor(file->f_path.dentry->d_inode); struct parport *port = lp_table[minor].dev->port; char *kbuf = lp_table[minor].lp_buffer; ssize_t retv = 0; ssize_t written; size_t copy_size = count; int nonblock = ((file->f_flags & O_NONBLOCK) || (LP_F(minor) & LP_ABORT)); #ifdef LP_STATS if (time_after(jiffies, lp_table[minor].lastcall + LP_TIME(minor))) lp_table[minor].runchars = 0; lp_table[minor].lastcall = jiffies; #endif /* Need to copy the data from user-space. */ if (copy_size > LP_BUFFER_SIZE) copy_size = LP_BUFFER_SIZE; if (mutex_lock_interruptible(&lp_table[minor].port_mutex)) return -EINTR; if (copy_from_user (kbuf, buf, copy_size)) { retv = -EFAULT; goto out_unlock; } /* Claim Parport or sleep until it becomes available */ lp_claim_parport_or_block (&lp_table[minor]); /* Go to the proper mode. */ lp_table[minor].current_mode = lp_negotiate (port, lp_table[minor].best_mode); parport_set_timeout (lp_table[minor].dev, (nonblock ? PARPORT_INACTIVITY_O_NONBLOCK : lp_table[minor].timeout)); if ((retv = lp_wait_ready (minor, nonblock)) == 0) do { /* Write the data. */ written = parport_write (port, kbuf, copy_size); if (written > 0) { copy_size -= written; count -= written; buf += written; retv += written; } if (signal_pending (current)) { if (retv == 0) retv = -EINTR; break; } if (copy_size > 0) { /* incomplete write -> check error ! */ int error; parport_negotiate (lp_table[minor].dev->port, IEEE1284_MODE_COMPAT); lp_table[minor].current_mode = IEEE1284_MODE_COMPAT; error = lp_wait_ready (minor, nonblock); if (error) { if (retv == 0) retv = error; break; } else if (nonblock) { if (retv == 0) retv = -EAGAIN; break; } parport_yield_blocking (lp_table[minor].dev); lp_table[minor].current_mode = lp_negotiate (port, lp_table[minor].best_mode); } else if (need_resched()) schedule (); if (count) { copy_size = count; if (copy_size > LP_BUFFER_SIZE) copy_size = LP_BUFFER_SIZE; if (copy_from_user(kbuf, buf, copy_size)) { if (retv == 0) retv = -EFAULT; break; } } } while (count > 0); if (test_and_clear_bit(LP_PREEMPT_REQUEST, &lp_table[minor].bits)) { printk(KERN_INFO "lp%d releasing parport\n", minor); parport_negotiate (lp_table[minor].dev->port, IEEE1284_MODE_COMPAT); lp_table[minor].current_mode = IEEE1284_MODE_COMPAT; lp_release_parport (&lp_table[minor]); } out_unlock: mutex_unlock(&lp_table[minor].port_mutex); return retv; } #ifdef CONFIG_PARPORT_1284 /* Status readback conforming to ieee1284 */ static ssize_t lp_read(struct file * file, char __user * buf, size_t count, loff_t *ppos) { DEFINE_WAIT(wait); unsigned int minor=iminor(file->f_path.dentry->d_inode); struct parport *port = lp_table[minor].dev->port; ssize_t retval = 0; char *kbuf = lp_table[minor].lp_buffer; int nonblock = ((file->f_flags & O_NONBLOCK) || (LP_F(minor) & LP_ABORT)); if (count > LP_BUFFER_SIZE) count = LP_BUFFER_SIZE; if (mutex_lock_interruptible(&lp_table[minor].port_mutex)) return -EINTR; lp_claim_parport_or_block (&lp_table[minor]); parport_set_timeout (lp_table[minor].dev, (nonblock ? PARPORT_INACTIVITY_O_NONBLOCK : lp_table[minor].timeout)); parport_negotiate (lp_table[minor].dev->port, IEEE1284_MODE_COMPAT); if (parport_negotiate (lp_table[minor].dev->port, IEEE1284_MODE_NIBBLE)) { retval = -EIO; goto out; } while (retval == 0) { retval = parport_read (port, kbuf, count); if (retval > 0) break; if (nonblock) { retval = -EAGAIN; break; } /* Wait for data. */ if (lp_table[minor].dev->port->irq == PARPORT_IRQ_NONE) { parport_negotiate (lp_table[minor].dev->port, IEEE1284_MODE_COMPAT); lp_error (minor); if (parport_negotiate (lp_table[minor].dev->port, IEEE1284_MODE_NIBBLE)) { retval = -EIO; goto out; } } else { prepare_to_wait(&lp_table[minor].waitq, &wait, TASK_INTERRUPTIBLE); schedule_timeout(LP_TIMEOUT_POLLED); finish_wait(&lp_table[minor].waitq, &wait); } if (signal_pending (current)) { retval = -ERESTARTSYS; break; } cond_resched (); } parport_negotiate (lp_table[minor].dev->port, IEEE1284_MODE_COMPAT); out: lp_release_parport (&lp_table[minor]); if (retval > 0 && copy_to_user (buf, kbuf, retval)) retval = -EFAULT; mutex_unlock(&lp_table[minor].port_mutex); return retval; } #endif /* IEEE 1284 support */ static int lp_open(struct inode * inode, struct file * file) { unsigned int minor = iminor(inode); int ret = 0; mutex_lock(&lp_mutex); if (minor >= LP_NO) { ret = -ENXIO; goto out; } if ((LP_F(minor) & LP_EXIST) == 0) { ret = -ENXIO; goto out; } if (test_and_set_bit(LP_BUSY_BIT_POS, &LP_F(minor))) { ret = -EBUSY; goto out; } /* If ABORTOPEN is set and the printer is offline or out of paper, we may still want to open it to perform ioctl()s. Therefore we have commandeered O_NONBLOCK, even though it is being used in a non-standard manner. This is strictly a Linux hack, and should most likely only ever be used by the tunelp application. */ if ((LP_F(minor) & LP_ABORTOPEN) && !(file->f_flags & O_NONBLOCK)) { int status; lp_claim_parport_or_block (&lp_table[minor]); status = r_str(minor); lp_release_parport (&lp_table[minor]); if (status & LP_POUTPA) { printk(KERN_INFO "lp%d out of paper\n", minor); LP_F(minor) &= ~LP_BUSY; ret = -ENOSPC; goto out; } else if (!(status & LP_PSELECD)) { printk(KERN_INFO "lp%d off-line\n", minor); LP_F(minor) &= ~LP_BUSY; ret = -EIO; goto out; } else if (!(status & LP_PERRORP)) { printk(KERN_ERR "lp%d printer error\n", minor); LP_F(minor) &= ~LP_BUSY; ret = -EIO; goto out; } } lp_table[minor].lp_buffer = kmalloc(LP_BUFFER_SIZE, GFP_KERNEL); if (!lp_table[minor].lp_buffer) { LP_F(minor) &= ~LP_BUSY; ret = -ENOMEM; goto out; } /* Determine if the peripheral supports ECP mode */ lp_claim_parport_or_block (&lp_table[minor]); if ( (lp_table[minor].dev->port->modes & PARPORT_MODE_ECP) && !parport_negotiate (lp_table[minor].dev->port, IEEE1284_MODE_ECP)) { printk (KERN_INFO "lp%d: ECP mode\n", minor); lp_table[minor].best_mode = IEEE1284_MODE_ECP; } else { lp_table[minor].best_mode = IEEE1284_MODE_COMPAT; } /* Leave peripheral in compatibility mode */ parport_negotiate (lp_table[minor].dev->port, IEEE1284_MODE_COMPAT); lp_release_parport (&lp_table[minor]); lp_table[minor].current_mode = IEEE1284_MODE_COMPAT; out: mutex_unlock(&lp_mutex); return ret; } static int lp_release(struct inode * inode, struct file * file) { unsigned int minor = iminor(inode); lp_claim_parport_or_block (&lp_table[minor]); parport_negotiate (lp_table[minor].dev->port, IEEE1284_MODE_COMPAT); lp_table[minor].current_mode = IEEE1284_MODE_COMPAT; lp_release_parport (&lp_table[minor]); kfree(lp_table[minor].lp_buffer); lp_table[minor].lp_buffer = NULL; LP_F(minor) &= ~LP_BUSY; return 0; } static int lp_do_ioctl(unsigned int minor, unsigned int cmd, unsigned long arg, void __user *argp) { int status; int retval = 0; #ifdef LP_DEBUG printk(KERN_DEBUG "lp%d ioctl, cmd: 0x%x, arg: 0x%lx\n", minor, cmd, arg); #endif if (minor >= LP_NO) return -ENODEV; if ((LP_F(minor) & LP_EXIST) == 0) return -ENODEV; switch ( cmd ) { case LPTIME: LP_TIME(minor) = arg * HZ/100; break; case LPCHAR: LP_CHAR(minor) = arg; break; case LPABORT: if (arg) LP_F(minor) |= LP_ABORT; else LP_F(minor) &= ~LP_ABORT; break; case LPABORTOPEN: if (arg) LP_F(minor) |= LP_ABORTOPEN; else LP_F(minor) &= ~LP_ABORTOPEN; break; case LPCAREFUL: if (arg) LP_F(minor) |= LP_CAREFUL; else LP_F(minor) &= ~LP_CAREFUL; break; case LPWAIT: LP_WAIT(minor) = arg; break; case LPSETIRQ: return -EINVAL; break; case LPGETIRQ: if (copy_to_user(argp, &LP_IRQ(minor), sizeof(int))) return -EFAULT; break; case LPGETSTATUS: lp_claim_parport_or_block (&lp_table[minor]); status = r_str(minor); lp_release_parport (&lp_table[minor]); if (copy_to_user(argp, &status, sizeof(int))) return -EFAULT; break; case LPRESET: lp_reset(minor); break; #ifdef LP_STATS case LPGETSTATS: if (copy_to_user(argp, &LP_STAT(minor), sizeof(struct lp_stats))) return -EFAULT; if (capable(CAP_SYS_ADMIN)) memset(&LP_STAT(minor), 0, sizeof(struct lp_stats)); break; #endif case LPGETFLAGS: status = LP_F(minor); if (copy_to_user(argp, &status, sizeof(int))) return -EFAULT; break; default: retval = -EINVAL; } return retval; } static int lp_set_timeout(unsigned int minor, struct timeval *par_timeout) { long to_jiffies; /* Convert to jiffies, place in lp_table */ if ((par_timeout->tv_sec < 0) || (par_timeout->tv_usec < 0)) { return -EINVAL; } to_jiffies = DIV_ROUND_UP(par_timeout->tv_usec, 1000000/HZ); to_jiffies += par_timeout->tv_sec * (long) HZ; if (to_jiffies <= 0) { return -EINVAL; } lp_table[minor].timeout = to_jiffies; return 0; } static long lp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { unsigned int minor; struct timeval par_timeout; int ret; minor = iminor(file->f_path.dentry->d_inode); mutex_lock(&lp_mutex); switch (cmd) { case LPSETTIMEOUT: if (copy_from_user(&par_timeout, (void __user *)arg, sizeof (struct timeval))) { ret = -EFAULT; break; } ret = lp_set_timeout(minor, &par_timeout); break; default: ret = lp_do_ioctl(minor, cmd, arg, (void __user *)arg); break; } mutex_unlock(&lp_mutex); return ret; } #ifdef CONFIG_COMPAT static long lp_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { unsigned int minor; struct timeval par_timeout; int ret; minor = iminor(file->f_path.dentry->d_inode); mutex_lock(&lp_mutex); switch (cmd) { case LPSETTIMEOUT: if (compat_get_timeval(&par_timeout, compat_ptr(arg))) { ret = -EFAULT; break; } ret = lp_set_timeout(minor, &par_timeout); break; #ifdef LP_STATS case LPGETSTATS: /* FIXME: add an implementation if you set LP_STATS */ ret = -EINVAL; break; #endif default: ret = lp_do_ioctl(minor, cmd, arg, compat_ptr(arg)); break; } mutex_unlock(&lp_mutex); return ret; } #endif static const struct file_operations lp_fops = { .owner = THIS_MODULE, .write = lp_write, .unlocked_ioctl = lp_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = lp_compat_ioctl, #endif .open = lp_open, .release = lp_release, #ifdef CONFIG_PARPORT_1284 .read = lp_read, #endif .llseek = noop_llseek, }; /* --- support for console on the line printer ----------------- */ #ifdef CONFIG_LP_CONSOLE #define CONSOLE_LP 0 /* If the printer is out of paper, we can either lose the messages or * stall until the printer is happy again. Define CONSOLE_LP_STRICT * non-zero to get the latter behaviour. */ #define CONSOLE_LP_STRICT 1 /* The console must be locked when we get here. */ static void lp_console_write (struct console *co, const char *s, unsigned count) { struct pardevice *dev = lp_table[CONSOLE_LP].dev; struct parport *port = dev->port; ssize_t written; if (parport_claim (dev)) /* Nothing we can do. */ return; parport_set_timeout (dev, 0); /* Go to compatibility mode. */ parport_negotiate (port, IEEE1284_MODE_COMPAT); do { /* Write the data, converting LF->CRLF as we go. */ ssize_t canwrite = count; char *lf = memchr (s, '\n', count); if (lf) canwrite = lf - s; if (canwrite > 0) { written = parport_write (port, s, canwrite); if (written <= 0) continue; s += written; count -= written; canwrite -= written; } if (lf && canwrite <= 0) { const char *crlf = "\r\n"; int i = 2; /* Dodge the original '\n', and put '\r\n' instead. */ s++; count--; do { written = parport_write (port, crlf, i); if (written > 0) i -= written, crlf += written; } while (i > 0 && (CONSOLE_LP_STRICT || written > 0)); } } while (count > 0 && (CONSOLE_LP_STRICT || written > 0)); parport_release (dev); } static struct console lpcons = { .name = "lp", .write = lp_console_write, .flags = CON_PRINTBUFFER, }; #endif /* console on line printer */ /* --- initialisation code ------------------------------------- */ static int parport_nr[LP_NO] = { [0 ... LP_NO-1] = LP_PARPORT_UNSPEC }; static char *parport[LP_NO]; static bool reset; module_param_array(parport, charp, NULL, 0); module_param(reset, bool, 0); #ifndef MODULE static int __init lp_setup (char *str) { static int parport_ptr; int x; if (get_option(&str, &x)) { if (x == 0) { /* disable driver on "lp=" or "lp=0" */ parport_nr[0] = LP_PARPORT_OFF; } else { printk(KERN_WARNING "warning: 'lp=0x%x' is deprecated, ignored\n", x); return 0; } } else if (!strncmp(str, "parport", 7)) { int n = simple_strtoul(str+7, NULL, 10); if (parport_ptr < LP_NO) parport_nr[parport_ptr++] = n; else printk(KERN_INFO "lp: too many ports, %s ignored.\n", str); } else if (!strcmp(str, "auto")) { parport_nr[0] = LP_PARPORT_AUTO; } else if (!strcmp(str, "none")) { parport_nr[parport_ptr++] = LP_PARPORT_NONE; } else if (!strcmp(str, "reset")) { reset = 1; } return 1; } #endif static int lp_register(int nr, struct parport *port) { lp_table[nr].dev = parport_register_device(port, "lp", lp_preempt, NULL, NULL, 0, (void *) &lp_table[nr]); if (lp_table[nr].dev == NULL) return 1; lp_table[nr].flags |= LP_EXIST; if (reset) lp_reset(nr); device_create(lp_class, port->dev, MKDEV(LP_MAJOR, nr), NULL, "lp%d", nr); printk(KERN_INFO "lp%d: using %s (%s).\n", nr, port->name, (port->irq == PARPORT_IRQ_NONE)?"polling":"interrupt-driven"); #ifdef CONFIG_LP_CONSOLE if (!nr) { if (port->modes & PARPORT_MODE_SAFEININT) { register_console(&lpcons); console_registered = port; printk (KERN_INFO "lp%d: console ready\n", CONSOLE_LP); } else printk (KERN_ERR "lp%d: cannot run console on %s\n", CONSOLE_LP, port->name); } #endif return 0; } static void lp_attach (struct parport *port) { unsigned int i; switch (parport_nr[0]) { case LP_PARPORT_UNSPEC: case LP_PARPORT_AUTO: if (parport_nr[0] == LP_PARPORT_AUTO && port->probe_info[0].class != PARPORT_CLASS_PRINTER) return; if (lp_count == LP_NO) { printk(KERN_INFO "lp: ignoring parallel port (max. %d)\n",LP_NO); return; } if (!lp_register(lp_count, port)) lp_count++; break; default: for (i = 0; i < LP_NO; i++) { if (port->number == parport_nr[i]) { if (!lp_register(i, port)) lp_count++; break; } } break; } } static void lp_detach (struct parport *port) { /* Write this some day. */ #ifdef CONFIG_LP_CONSOLE if (console_registered == port) { unregister_console(&lpcons); console_registered = NULL; } #endif /* CONFIG_LP_CONSOLE */ } static struct parport_driver lp_driver = { .name = "lp", .attach = lp_attach, .detach = lp_detach, }; static int __init lp_init (void) { int i, err = 0; if (parport_nr[0] == LP_PARPORT_OFF) return 0; for (i = 0; i < LP_NO; i++) { lp_table[i].dev = NULL; lp_table[i].flags = 0; lp_table[i].chars = LP_INIT_CHAR; lp_table[i].time = LP_INIT_TIME; lp_table[i].wait = LP_INIT_WAIT; lp_table[i].lp_buffer = NULL; #ifdef LP_STATS lp_table[i].lastcall = 0; lp_table[i].runchars = 0; memset (&lp_table[i].stats, 0, sizeof (struct lp_stats)); #endif lp_table[i].last_error = 0; init_waitqueue_head (&lp_table[i].waitq); init_waitqueue_head (&lp_table[i].dataq); mutex_init(&lp_table[i].port_mutex); lp_table[i].timeout = 10 * HZ; } if (register_chrdev (LP_MAJOR, "lp", &lp_fops)) { printk (KERN_ERR "lp: unable to get major %d\n", LP_MAJOR); return -EIO; } lp_class = class_create(THIS_MODULE, "printer"); if (IS_ERR(lp_class)) { err = PTR_ERR(lp_class); goto out_reg; } if (parport_register_driver (&lp_driver)) { printk (KERN_ERR "lp: unable to register with parport\n"); err = -EIO; goto out_class; } if (!lp_count) { printk (KERN_INFO "lp: driver loaded but no devices found\n"); #ifndef CONFIG_PARPORT_1284 if (parport_nr[0] == LP_PARPORT_AUTO) printk (KERN_INFO "lp: (is IEEE 1284 support enabled?)\n"); #endif } return 0; out_class: class_destroy(lp_class); out_reg: unregister_chrdev(LP_MAJOR, "lp"); return err; } static int __init lp_init_module (void) { if (parport[0]) { /* The user gave some parameters. Let's see what they were. */ if (!strncmp(parport[0], "auto", 4)) parport_nr[0] = LP_PARPORT_AUTO; else { int n; for (n = 0; n < LP_NO && parport[n]; n++) { if (!strncmp(parport[n], "none", 4)) parport_nr[n] = LP_PARPORT_NONE; else { char *ep; unsigned long r = simple_strtoul(parport[n], &ep, 0); if (ep != parport[n]) parport_nr[n] = r; else { printk(KERN_ERR "lp: bad port specifier `%s'\n", parport[n]); return -ENODEV; } } } } } return lp_init(); } static void lp_cleanup_module (void) { unsigned int offset; parport_unregister_driver (&lp_driver); #ifdef CONFIG_LP_CONSOLE unregister_console (&lpcons); #endif unregister_chrdev(LP_MAJOR, "lp"); for (offset = 0; offset < LP_NO; offset++) { if (lp_table[offset].dev == NULL) continue; parport_unregister_device(lp_table[offset].dev); device_destroy(lp_class, MKDEV(LP_MAJOR, offset)); } class_destroy(lp_class); } __setup("lp=", lp_setup); module_init(lp_init_module); module_exit(lp_cleanup_module); MODULE_ALIAS_CHARDEV_MAJOR(LP_MAJOR); MODULE_LICENSE("GPL");
gpl-2.0
AttiJeong98/Solid_Kernel-Stock
net/decnet/dn_timer.c
8127
3096
/* * DECnet An implementation of the DECnet protocol suite for the LINUX * operating system. DECnet is implemented using the BSD Socket * interface as the means of communication with the user level. * * DECnet Socket Timer Functions * * Author: Steve Whitehouse <SteveW@ACM.org> * * * Changes: * Steve Whitehouse : Made keepalive timer part of the same * timer idea. * Steve Whitehouse : Added checks for sk->sock_readers * David S. Miller : New socket locking * Steve Whitehouse : Timer grabs socket ref. */ #include <linux/net.h> #include <linux/socket.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/timer.h> #include <linux/spinlock.h> #include <net/sock.h> #include <linux/atomic.h> #include <net/flow.h> #include <net/dn.h> /* * Slow timer is for everything else (n * 500mS) */ #define SLOW_INTERVAL (HZ/2) static void dn_slow_timer(unsigned long arg); void dn_start_slow_timer(struct sock *sk) { setup_timer(&sk->sk_timer, dn_slow_timer, (unsigned long)sk); sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL); } void dn_stop_slow_timer(struct sock *sk) { sk_stop_timer(sk, &sk->sk_timer); } static void dn_slow_timer(unsigned long arg) { struct sock *sk = (struct sock *)arg; struct dn_scp *scp = DN_SK(sk); bh_lock_sock(sk); if (sock_owned_by_user(sk)) { sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 10); goto out; } /* * The persist timer is the standard slow timer used for retransmits * in both connection establishment and disconnection as well as * in the RUN state. The different states are catered for by changing * the function pointer in the socket. Setting the timer to a value * of zero turns it off. We allow the persist_fxn to turn the * timer off in a permant way by returning non-zero, so that * timer based routines may remove sockets. This is why we have a * sock_hold()/sock_put() around the timer to prevent the socket * going away in the middle. */ if (scp->persist && scp->persist_fxn) { if (scp->persist <= SLOW_INTERVAL) { scp->persist = 0; if (scp->persist_fxn(sk)) goto out; } else { scp->persist -= SLOW_INTERVAL; } } /* * Check for keepalive timeout. After the other timer 'cos if * the previous timer caused a retransmit, we don't need to * do this. scp->stamp is the last time that we sent a packet. * The keepalive function sends a link service packet to the * other end. If it remains unacknowledged, the standard * socket timers will eventually shut the socket down. Each * time we do this, scp->stamp will be updated, thus * we won't try and send another until scp->keepalive has passed * since the last successful transmission. */ if (scp->keepalive && scp->keepalive_fxn && (scp->state == DN_RUN)) { if ((jiffies - scp->stamp) >= scp->keepalive) scp->keepalive_fxn(sk); } sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL); out: bh_unlock_sock(sk); sock_put(sk); }
gpl-2.0
crdroid-devices/android_kernel_motorola_msm8226
arch/alpha/kernel/smc37c669.c
8383
61682
/* * SMC 37C669 initialization code */ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/spinlock.h> #include <asm/hwrpb.h> #include <asm/io.h> #include <asm/segment.h> #if 0 # define DBG_DEVS(args) printk args #else # define DBG_DEVS(args) #endif #define KB 1024 #define MB (1024*KB) #define GB (1024*MB) #define SMC_DEBUG 0 /* File: smcc669_def.h * * Copyright (C) 1997 by * Digital Equipment Corporation, Maynard, Massachusetts. * All rights reserved. * * This software is furnished under a license and may be used and copied * only in accordance of the terms of such license and with the * inclusion of the above copyright notice. This software or any other * copies thereof may not be provided or otherwise made available to any * other person. No title to and ownership of the software is hereby * transferred. * * The information in this software is subject to change without notice * and should not be construed as a commitment by Digital Equipment * Corporation. * * Digital assumes no responsibility for the use or reliability of its * software on equipment which is not supplied by Digital. * * * Abstract: * * This file contains header definitions for the SMC37c669 * Super I/O controller. * * Author: * * Eric Rasmussen * * Modification History: * * er 28-Jan-1997 Initial Entry */ #ifndef __SMC37c669_H #define __SMC37c669_H /* ** Macros for handling device IRQs ** ** The mask acts as a flag used in mapping actual ISA IRQs (0 - 15) ** to device IRQs (A - H). */ #define SMC37c669_DEVICE_IRQ_MASK 0x80000000 #define SMC37c669_DEVICE_IRQ( __i ) \ ((SMC37c669_DEVICE_IRQ_MASK) | (__i)) #define SMC37c669_IS_DEVICE_IRQ(__i) \ (((__i) & (SMC37c669_DEVICE_IRQ_MASK)) == (SMC37c669_DEVICE_IRQ_MASK)) #define SMC37c669_RAW_DEVICE_IRQ(__i) \ ((__i) & ~(SMC37c669_DEVICE_IRQ_MASK)) /* ** Macros for handling device DRQs ** ** The mask acts as a flag used in mapping actual ISA DMA ** channels to device DMA channels (A - C). */ #define SMC37c669_DEVICE_DRQ_MASK 0x80000000 #define SMC37c669_DEVICE_DRQ(__d) \ ((SMC37c669_DEVICE_DRQ_MASK) | (__d)) #define SMC37c669_IS_DEVICE_DRQ(__d) \ (((__d) & (SMC37c669_DEVICE_DRQ_MASK)) == (SMC37c669_DEVICE_DRQ_MASK)) #define SMC37c669_RAW_DEVICE_DRQ(__d) \ ((__d) & ~(SMC37c669_DEVICE_DRQ_MASK)) #define SMC37c669_DEVICE_ID 0x3 /* ** SMC37c669 Device Function Definitions */ #define SERIAL_0 0 #define SERIAL_1 1 #define PARALLEL_0 2 #define FLOPPY_0 3 #define IDE_0 4 #define NUM_FUNCS 5 /* ** Default Device Function Mappings */ #define COM1_BASE 0x3F8 #define COM1_IRQ 4 #define COM2_BASE 0x2F8 #define COM2_IRQ 3 #define PARP_BASE 0x3BC #define PARP_IRQ 7 #define PARP_DRQ 3 #define FDC_BASE 0x3F0 #define FDC_IRQ 6 #define FDC_DRQ 2 /* ** Configuration On/Off Key Definitions */ #define SMC37c669_CONFIG_ON_KEY 0x55 #define SMC37c669_CONFIG_OFF_KEY 0xAA /* ** SMC 37c669 Device IRQs */ #define SMC37c669_DEVICE_IRQ_A ( SMC37c669_DEVICE_IRQ( 0x01 ) ) #define SMC37c669_DEVICE_IRQ_B ( SMC37c669_DEVICE_IRQ( 0x02 ) ) #define SMC37c669_DEVICE_IRQ_C ( SMC37c669_DEVICE_IRQ( 0x03 ) ) #define SMC37c669_DEVICE_IRQ_D ( SMC37c669_DEVICE_IRQ( 0x04 ) ) #define SMC37c669_DEVICE_IRQ_E ( SMC37c669_DEVICE_IRQ( 0x05 ) ) #define SMC37c669_DEVICE_IRQ_F ( SMC37c669_DEVICE_IRQ( 0x06 ) ) /* SMC37c669_DEVICE_IRQ_G *** RESERVED ***/ #define SMC37c669_DEVICE_IRQ_H ( SMC37c669_DEVICE_IRQ( 0x08 ) ) /* ** SMC 37c669 Device DMA Channel Definitions */ #define SMC37c669_DEVICE_DRQ_A ( SMC37c669_DEVICE_DRQ( 0x01 ) ) #define SMC37c669_DEVICE_DRQ_B ( SMC37c669_DEVICE_DRQ( 0x02 ) ) #define SMC37c669_DEVICE_DRQ_C ( SMC37c669_DEVICE_DRQ( 0x03 ) ) /* ** Configuration Register Index Definitions */ #define SMC37c669_CR00_INDEX 0x00 #define SMC37c669_CR01_INDEX 0x01 #define SMC37c669_CR02_INDEX 0x02 #define SMC37c669_CR03_INDEX 0x03 #define SMC37c669_CR04_INDEX 0x04 #define SMC37c669_CR05_INDEX 0x05 #define SMC37c669_CR06_INDEX 0x06 #define SMC37c669_CR07_INDEX 0x07 #define SMC37c669_CR08_INDEX 0x08 #define SMC37c669_CR09_INDEX 0x09 #define SMC37c669_CR0A_INDEX 0x0A #define SMC37c669_CR0B_INDEX 0x0B #define SMC37c669_CR0C_INDEX 0x0C #define SMC37c669_CR0D_INDEX 0x0D #define SMC37c669_CR0E_INDEX 0x0E #define SMC37c669_CR0F_INDEX 0x0F #define SMC37c669_CR10_INDEX 0x10 #define SMC37c669_CR11_INDEX 0x11 #define SMC37c669_CR12_INDEX 0x12 #define SMC37c669_CR13_INDEX 0x13 #define SMC37c669_CR14_INDEX 0x14 #define SMC37c669_CR15_INDEX 0x15 #define SMC37c669_CR16_INDEX 0x16 #define SMC37c669_CR17_INDEX 0x17 #define SMC37c669_CR18_INDEX 0x18 #define SMC37c669_CR19_INDEX 0x19 #define SMC37c669_CR1A_INDEX 0x1A #define SMC37c669_CR1B_INDEX 0x1B #define SMC37c669_CR1C_INDEX 0x1C #define SMC37c669_CR1D_INDEX 0x1D #define SMC37c669_CR1E_INDEX 0x1E #define SMC37c669_CR1F_INDEX 0x1F #define SMC37c669_CR20_INDEX 0x20 #define SMC37c669_CR21_INDEX 0x21 #define SMC37c669_CR22_INDEX 0x22 #define SMC37c669_CR23_INDEX 0x23 #define SMC37c669_CR24_INDEX 0x24 #define SMC37c669_CR25_INDEX 0x25 #define SMC37c669_CR26_INDEX 0x26 #define SMC37c669_CR27_INDEX 0x27 #define SMC37c669_CR28_INDEX 0x28 #define SMC37c669_CR29_INDEX 0x29 /* ** Configuration Register Alias Definitions */ #define SMC37c669_DEVICE_ID_INDEX SMC37c669_CR0D_INDEX #define SMC37c669_DEVICE_REVISION_INDEX SMC37c669_CR0E_INDEX #define SMC37c669_FDC_BASE_ADDRESS_INDEX SMC37c669_CR20_INDEX #define SMC37c669_IDE_BASE_ADDRESS_INDEX SMC37c669_CR21_INDEX #define SMC37c669_IDE_ALTERNATE_ADDRESS_INDEX SMC37c669_CR22_INDEX #define SMC37c669_PARALLEL0_BASE_ADDRESS_INDEX SMC37c669_CR23_INDEX #define SMC37c669_SERIAL0_BASE_ADDRESS_INDEX SMC37c669_CR24_INDEX #define SMC37c669_SERIAL1_BASE_ADDRESS_INDEX SMC37c669_CR25_INDEX #define SMC37c669_PARALLEL_FDC_DRQ_INDEX SMC37c669_CR26_INDEX #define SMC37c669_PARALLEL_FDC_IRQ_INDEX SMC37c669_CR27_INDEX #define SMC37c669_SERIAL_IRQ_INDEX SMC37c669_CR28_INDEX /* ** Configuration Register Definitions ** ** The INDEX (write only) and DATA (read/write) ports are effective ** only when the chip is in the Configuration State. */ typedef struct _SMC37c669_CONFIG_REGS { unsigned char index_port; unsigned char data_port; } SMC37c669_CONFIG_REGS; /* ** CR00 - default value 0x28 ** ** IDE_EN (CR00<1:0>): ** 0x - 30ua pull-ups on nIDEEN, nHDCS0, NHDCS1 ** 11 - IRQ_H available as IRQ output, ** IRRX2, IRTX2 available as alternate IR pins ** 10 - nIDEEN, nHDCS0, nHDCS1 used to control IDE ** ** VALID (CR00<7>): ** A high level on this software controlled bit can ** be used to indicate that a valid configuration ** cycle has occurred. The control software must ** take care to set this bit at the appropriate times. ** Set to zero after power up. This bit has no ** effect on any other hardware in the chip. ** */ typedef union _SMC37c669_CR00 { unsigned char as_uchar; struct { unsigned ide_en : 2; /* See note above */ unsigned reserved1 : 1; /* RAZ */ unsigned fdc_pwr : 1; /* 1 = supply power to FDC */ unsigned reserved2 : 3; /* Read as 010b */ unsigned valid : 1; /* See note above */ } by_field; } SMC37c669_CR00; /* ** CR01 - default value 0x9C */ typedef union _SMC37c669_CR01 { unsigned char as_uchar; struct { unsigned reserved1 : 2; /* RAZ */ unsigned ppt_pwr : 1; /* 1 = supply power to PPT */ unsigned ppt_mode : 1; /* 1 = Printer mode, 0 = EPP */ unsigned reserved2 : 1; /* Read as 1 */ unsigned reserved3 : 2; /* RAZ */ unsigned lock_crx: 1; /* Lock CR00 - CR18 */ } by_field; } SMC37c669_CR01; /* ** CR02 - default value 0x88 */ typedef union _SMC37c669_CR02 { unsigned char as_uchar; struct { unsigned reserved1 : 3; /* RAZ */ unsigned uart1_pwr : 1; /* 1 = supply power to UART1 */ unsigned reserved2 : 3; /* RAZ */ unsigned uart2_pwr : 1; /* 1 = supply power to UART2 */ } by_field; } SMC37c669_CR02; /* ** CR03 - default value 0x78 ** ** CR03<7> CR03<2> Pin 94 ** ------- ------- ------ ** 0 X DRV2 (input) ** 1 0 ADRX ** 1 1 IRQ_B ** ** CR03<6> CR03<5> Op Mode ** ------- ------- ------- ** 0 0 Model 30 ** 0 1 PS/2 ** 1 0 Reserved ** 1 1 AT Mode */ typedef union _SMC37c669_CR03 { unsigned char as_uchar; struct { unsigned pwrgd_gamecs : 1; /* 1 = PWRGD, 0 = GAMECS */ unsigned fdc_mode2 : 1; /* 1 = Enhanced Mode 2 */ unsigned pin94_0 : 1; /* See note above */ unsigned reserved1 : 1; /* RAZ */ unsigned drvden : 1; /* 1 = high, 0 - output */ unsigned op_mode : 2; /* See note above */ unsigned pin94_1 : 1; /* See note above */ } by_field; } SMC37c669_CR03; /* ** CR04 - default value 0x00 ** ** PP_EXT_MODE: ** If CR01<PP_MODE> = 0 and PP_EXT_MODE = ** 00 - Standard and Bidirectional ** 01 - EPP mode and SPP ** 10 - ECP mode ** In this mode, 2 drives can be supported ** directly, 3 or 4 drives must use external ** 4 drive support. SPP can be selected ** through the ECR register of ECP as mode 000. ** 11 - ECP mode and EPP mode ** In this mode, 2 drives can be supported ** directly, 3 or 4 drives must use external ** 4 drive support. SPP can be selected ** through the ECR register of ECP as mode 000. ** In this mode, EPP can be selected through ** the ECR register of ECP as mode 100. ** ** PP_FDC: ** 00 - Normal ** 01 - PPFD1 ** 10 - PPFD2 ** 11 - Reserved ** ** MIDI1: ** Serial Clock Select: ** A low level on this bit disables MIDI support, ** clock = divide by 13. A high level on this ** bit enables MIDI support, clock = divide by 12. ** ** MIDI operates at 31.25 Kbps which can be derived ** from 125 KHz (24 MHz / 12 = 2 MHz, 2 MHz / 16 = 125 KHz) ** ** ALT_IO: ** 0 - Use pins IRRX, IRTX ** 1 - Use pins IRRX2, IRTX2 ** ** If this bit is set, the IR receive and transmit ** functions will not be available on pins 25 and 26 ** unless CR00<IDE_EN> = 11. */ typedef union _SMC37c669_CR04 { unsigned char as_uchar; struct { unsigned ppt_ext_mode : 2; /* See note above */ unsigned ppt_fdc : 2; /* See note above */ unsigned midi1 : 1; /* See note above */ unsigned midi2 : 1; /* See note above */ unsigned epp_type : 1; /* 0 = EPP 1.9, 1 = EPP 1.7 */ unsigned alt_io : 1; /* See note above */ } by_field; } SMC37c669_CR04; /* ** CR05 - default value 0x00 ** ** DEN_SEL: ** 00 - Densel output normal ** 01 - Reserved ** 10 - Densel output 1 ** 11 - Densel output 0 ** */ typedef union _SMC37c669_CR05 { unsigned char as_uchar; struct { unsigned reserved1 : 2; /* RAZ */ unsigned fdc_dma_mode : 1; /* 0 = burst, 1 = non-burst */ unsigned den_sel : 2; /* See note above */ unsigned swap_drv : 1; /* Swap the FDC motor selects */ unsigned extx4 : 1; /* 0 = 2 drive, 1 = external 4 drive decode */ unsigned reserved2 : 1; /* RAZ */ } by_field; } SMC37c669_CR05; /* ** CR06 - default value 0xFF */ typedef union _SMC37c669_CR06 { unsigned char as_uchar; struct { unsigned floppy_a : 2; /* Type of floppy drive A */ unsigned floppy_b : 2; /* Type of floppy drive B */ unsigned floppy_c : 2; /* Type of floppy drive C */ unsigned floppy_d : 2; /* Type of floppy drive D */ } by_field; } SMC37c669_CR06; /* ** CR07 - default value 0x00 ** ** Auto Power Management CR07<7:4>: ** 0 - Auto Powerdown disabled (default) ** 1 - Auto Powerdown enabled ** ** This bit is reset to the default state by POR or ** a hardware reset. ** */ typedef union _SMC37c669_CR07 { unsigned char as_uchar; struct { unsigned floppy_boot : 2; /* 0 = A:, 1 = B: */ unsigned reserved1 : 2; /* RAZ */ unsigned ppt_en : 1; /* See note above */ unsigned uart1_en : 1; /* See note above */ unsigned uart2_en : 1; /* See note above */ unsigned fdc_en : 1; /* See note above */ } by_field; } SMC37c669_CR07; /* ** CR08 - default value 0x00 */ typedef union _SMC37c669_CR08 { unsigned char as_uchar; struct { unsigned zero : 4; /* 0 */ unsigned addrx7_4 : 4; /* ADR<7:3> for ADRx decode */ } by_field; } SMC37c669_CR08; /* ** CR09 - default value 0x00 ** ** ADRx_CONFIG: ** 00 - ADRx disabled ** 01 - 1 byte decode A<3:0> = 0000b ** 10 - 8 byte block decode A<3:0> = 0XXXb ** 11 - 16 byte block decode A<3:0> = XXXXb ** */ typedef union _SMC37c669_CR09 { unsigned char as_uchar; struct { unsigned adra8 : 3; /* ADR<10:8> for ADRx decode */ unsigned reserved1 : 3; unsigned adrx_config : 2; /* See note above */ } by_field; } SMC37c669_CR09; /* ** CR0A - default value 0x00 */ typedef union _SMC37c669_CR0A { unsigned char as_uchar; struct { unsigned ecp_fifo_threshold : 4; unsigned reserved1 : 4; } by_field; } SMC37c669_CR0A; /* ** CR0B - default value 0x00 */ typedef union _SMC37c669_CR0B { unsigned char as_uchar; struct { unsigned fdd0_drtx : 2; /* FDD0 Data Rate Table */ unsigned fdd1_drtx : 2; /* FDD1 Data Rate Table */ unsigned fdd2_drtx : 2; /* FDD2 Data Rate Table */ unsigned fdd3_drtx : 2; /* FDD3 Data Rate Table */ } by_field; } SMC37c669_CR0B; /* ** CR0C - default value 0x00 ** ** UART2_MODE: ** 000 - Standard (default) ** 001 - IrDA (HPSIR) ** 010 - Amplitude Shift Keyed IR @500 KHz ** 011 - Reserved ** 1xx - Reserved ** */ typedef union _SMC37c669_CR0C { unsigned char as_uchar; struct { unsigned uart2_rcv_polarity : 1; /* 1 = invert RX */ unsigned uart2_xmit_polarity : 1; /* 1 = invert TX */ unsigned uart2_duplex : 1; /* 1 = full, 0 = half */ unsigned uart2_mode : 3; /* See note above */ unsigned uart1_speed : 1; /* 1 = high speed enabled */ unsigned uart2_speed : 1; /* 1 = high speed enabled */ } by_field; } SMC37c669_CR0C; /* ** CR0D - default value 0x03 ** ** Device ID Register - read only */ typedef union _SMC37c669_CR0D { unsigned char as_uchar; struct { unsigned device_id : 8; /* Returns 0x3 in this field */ } by_field; } SMC37c669_CR0D; /* ** CR0E - default value 0x02 ** ** Device Revision Register - read only */ typedef union _SMC37c669_CR0E { unsigned char as_uchar; struct { unsigned device_rev : 8; /* Returns 0x2 in this field */ } by_field; } SMC37c669_CR0E; /* ** CR0F - default value 0x00 */ typedef union _SMC37c669_CR0F { unsigned char as_uchar; struct { unsigned test0 : 1; /* Reserved - set to 0 */ unsigned test1 : 1; /* Reserved - set to 0 */ unsigned test2 : 1; /* Reserved - set to 0 */ unsigned test3 : 1; /* Reserved - set t0 0 */ unsigned test4 : 1; /* Reserved - set to 0 */ unsigned test5 : 1; /* Reserved - set t0 0 */ unsigned test6 : 1; /* Reserved - set t0 0 */ unsigned test7 : 1; /* Reserved - set to 0 */ } by_field; } SMC37c669_CR0F; /* ** CR10 - default value 0x00 */ typedef union _SMC37c669_CR10 { unsigned char as_uchar; struct { unsigned reserved1 : 3; /* RAZ */ unsigned pll_gain : 1; /* 1 = 3V, 2 = 5V operation */ unsigned pll_stop : 1; /* 1 = stop PLLs */ unsigned ace_stop : 1; /* 1 = stop UART clocks */ unsigned pll_clock_ctrl : 1; /* 0 = 14.318 MHz, 1 = 24 MHz */ unsigned ir_test : 1; /* Enable IR test mode */ } by_field; } SMC37c669_CR10; /* ** CR11 - default value 0x00 */ typedef union _SMC37c669_CR11 { unsigned char as_uchar; struct { unsigned ir_loopback : 1; /* Internal IR loop back */ unsigned test_10ms : 1; /* Test 10ms autopowerdown FDC timeout */ unsigned reserved1 : 6; /* RAZ */ } by_field; } SMC37c669_CR11; /* ** CR12 - CR1D are reserved registers */ /* ** CR1E - default value 0x80 ** ** GAMECS: ** 00 - GAMECS disabled ** 01 - 1 byte decode ADR<3:0> = 0001b ** 10 - 8 byte block decode ADR<3:0> = 0XXXb ** 11 - 16 byte block decode ADR<3:0> = XXXXb ** */ typedef union _SMC37c66_CR1E { unsigned char as_uchar; struct { unsigned gamecs_config: 2; /* See note above */ unsigned gamecs_addr9_4 : 6; /* GAMECS Addr<9:4> */ } by_field; } SMC37c669_CR1E; /* ** CR1F - default value 0x00 ** ** DT0 DT1 DRVDEN0 DRVDEN1 Drive Type ** --- --- ------- ------- ---------- ** 0 0 DENSEL DRATE0 4/2/1 MB 3.5" ** 2/1 MB 5.25" ** 2/1.6/1 MB 3.5" (3-mode) ** 0 1 DRATE1 DRATE0 ** 1 0 nDENSEL DRATE0 PS/2 ** 1 1 DRATE0 DRATE1 ** ** Note: DENSEL, DRATE1, and DRATE0 map onto two output ** pins - DRVDEN0 and DRVDEN1. ** */ typedef union _SMC37c669_CR1F { unsigned char as_uchar; struct { unsigned fdd0_drive_type : 2; /* FDD0 drive type */ unsigned fdd1_drive_type : 2; /* FDD1 drive type */ unsigned fdd2_drive_type : 2; /* FDD2 drive type */ unsigned fdd3_drive_type : 2; /* FDD3 drive type */ } by_field; } SMC37c669_CR1F; /* ** CR20 - default value 0x3C ** ** FDC Base Address Register ** - To disable this decode set Addr<9:8> = 0 ** - A<10> = 0, A<3:0> = 0XXXb to access. ** */ typedef union _SMC37c669_CR20 { unsigned char as_uchar; struct { unsigned zero : 2; /* 0 */ unsigned addr9_4 : 6; /* FDC Addr<9:4> */ } by_field; } SMC37c669_CR20; /* ** CR21 - default value 0x3C ** ** IDE Base Address Register ** - To disable this decode set Addr<9:8> = 0 ** - A<10> = 0, A<3:0> = 0XXXb to access. ** */ typedef union _SMC37c669_CR21 { unsigned char as_uchar; struct { unsigned zero : 2; /* 0 */ unsigned addr9_4 : 6; /* IDE Addr<9:4> */ } by_field; } SMC37c669_CR21; /* ** CR22 - default value 0x3D ** ** IDE Alternate Status Base Address Register ** - To disable this decode set Addr<9:8> = 0 ** - A<10> = 0, A<3:0> = 0110b to access. ** */ typedef union _SMC37c669_CR22 { unsigned char as_uchar; struct { unsigned zero : 2; /* 0 */ unsigned addr9_4 : 6; /* IDE Alt Status Addr<9:4> */ } by_field; } SMC37c669_CR22; /* ** CR23 - default value 0x00 ** ** Parallel Port Base Address Register ** - To disable this decode set Addr<9:8> = 0 ** - A<10> = 0 to access. ** - If EPP is enabled, A<2:0> = XXXb to access. ** If EPP is NOT enabled, A<1:0> = XXb to access ** */ typedef union _SMC37c669_CR23 { unsigned char as_uchar; struct { unsigned addr9_2 : 8; /* Parallel Port Addr<9:2> */ } by_field; } SMC37c669_CR23; /* ** CR24 - default value 0x00 ** ** UART1 Base Address Register ** - To disable this decode set Addr<9:8> = 0 ** - A<10> = 0, A<2:0> = XXXb to access. ** */ typedef union _SMC37c669_CR24 { unsigned char as_uchar; struct { unsigned zero : 1; /* 0 */ unsigned addr9_3 : 7; /* UART1 Addr<9:3> */ } by_field; } SMC37c669_CR24; /* ** CR25 - default value 0x00 ** ** UART2 Base Address Register ** - To disable this decode set Addr<9:8> = 0 ** - A<10> = 0, A<2:0> = XXXb to access. ** */ typedef union _SMC37c669_CR25 { unsigned char as_uchar; struct { unsigned zero : 1; /* 0 */ unsigned addr9_3 : 7; /* UART2 Addr<9:3> */ } by_field; } SMC37c669_CR25; /* ** CR26 - default value 0x00 ** ** Parallel Port / FDC DMA Select Register ** ** D3 - D0 DMA ** D7 - D4 Selected ** ------- -------- ** 0000 None ** 0001 DMA_A ** 0010 DMA_B ** 0011 DMA_C ** */ typedef union _SMC37c669_CR26 { unsigned char as_uchar; struct { unsigned ppt_drq : 4; /* See note above */ unsigned fdc_drq : 4; /* See note above */ } by_field; } SMC37c669_CR26; /* ** CR27 - default value 0x00 ** ** Parallel Port / FDC IRQ Select Register ** ** D3 - D0 IRQ ** D7 - D4 Selected ** ------- -------- ** 0000 None ** 0001 IRQ_A ** 0010 IRQ_B ** 0011 IRQ_C ** 0100 IRQ_D ** 0101 IRQ_E ** 0110 IRQ_F ** 0111 Reserved ** 1000 IRQ_H ** ** Any unselected IRQ REQ is in tristate ** */ typedef union _SMC37c669_CR27 { unsigned char as_uchar; struct { unsigned ppt_irq : 4; /* See note above */ unsigned fdc_irq : 4; /* See note above */ } by_field; } SMC37c669_CR27; /* ** CR28 - default value 0x00 ** ** UART IRQ Select Register ** ** D3 - D0 IRQ ** D7 - D4 Selected ** ------- -------- ** 0000 None ** 0001 IRQ_A ** 0010 IRQ_B ** 0011 IRQ_C ** 0100 IRQ_D ** 0101 IRQ_E ** 0110 IRQ_F ** 0111 Reserved ** 1000 IRQ_H ** 1111 share with UART1 (only for UART2) ** ** Any unselected IRQ REQ is in tristate ** ** To share an IRQ between UART1 and UART2, set ** UART1 to use the desired IRQ and set UART2 to ** 0xF to enable sharing mechanism. ** */ typedef union _SMC37c669_CR28 { unsigned char as_uchar; struct { unsigned uart2_irq : 4; /* See note above */ unsigned uart1_irq : 4; /* See note above */ } by_field; } SMC37c669_CR28; /* ** CR29 - default value 0x00 ** ** IRQIN IRQ Select Register ** ** D3 - D0 IRQ ** D7 - D4 Selected ** ------- -------- ** 0000 None ** 0001 IRQ_A ** 0010 IRQ_B ** 0011 IRQ_C ** 0100 IRQ_D ** 0101 IRQ_E ** 0110 IRQ_F ** 0111 Reserved ** 1000 IRQ_H ** ** Any unselected IRQ REQ is in tristate ** */ typedef union _SMC37c669_CR29 { unsigned char as_uchar; struct { unsigned irqin_irq : 4; /* See note above */ unsigned reserved1 : 4; /* RAZ */ } by_field; } SMC37c669_CR29; /* ** Aliases of Configuration Register formats (should match ** the set of index aliases). ** ** Note that CR24 and CR25 have the same format and are the ** base address registers for UART1 and UART2. Because of ** this we only define 1 alias here - for CR24 - as the serial ** base address register. ** ** Note that CR21 and CR22 have the same format and are the ** base address and alternate status address registers for ** the IDE controller. Because of this we only define 1 alias ** here - for CR21 - as the IDE address register. ** */ typedef SMC37c669_CR0D SMC37c669_DEVICE_ID_REGISTER; typedef SMC37c669_CR0E SMC37c669_DEVICE_REVISION_REGISTER; typedef SMC37c669_CR20 SMC37c669_FDC_BASE_ADDRESS_REGISTER; typedef SMC37c669_CR21 SMC37c669_IDE_ADDRESS_REGISTER; typedef SMC37c669_CR23 SMC37c669_PARALLEL_BASE_ADDRESS_REGISTER; typedef SMC37c669_CR24 SMC37c669_SERIAL_BASE_ADDRESS_REGISTER; typedef SMC37c669_CR26 SMC37c669_PARALLEL_FDC_DRQ_REGISTER; typedef SMC37c669_CR27 SMC37c669_PARALLEL_FDC_IRQ_REGISTER; typedef SMC37c669_CR28 SMC37c669_SERIAL_IRQ_REGISTER; /* ** ISA/Device IRQ Translation Table Entry Definition */ typedef struct _SMC37c669_IRQ_TRANSLATION_ENTRY { int device_irq; int isa_irq; } SMC37c669_IRQ_TRANSLATION_ENTRY; /* ** ISA/Device DMA Translation Table Entry Definition */ typedef struct _SMC37c669_DRQ_TRANSLATION_ENTRY { int device_drq; int isa_drq; } SMC37c669_DRQ_TRANSLATION_ENTRY; /* ** External Interface Function Prototype Declarations */ SMC37c669_CONFIG_REGS *SMC37c669_detect( int ); unsigned int SMC37c669_enable_device( unsigned int func ); unsigned int SMC37c669_disable_device( unsigned int func ); unsigned int SMC37c669_configure_device( unsigned int func, int port, int irq, int drq ); void SMC37c669_display_device_info( void ); #endif /* __SMC37c669_H */ /* file: smcc669.c * * Copyright (C) 1997 by * Digital Equipment Corporation, Maynard, Massachusetts. * All rights reserved. * * This software is furnished under a license and may be used and copied * only in accordance of the terms of such license and with the * inclusion of the above copyright notice. This software or any other * copies thereof may not be provided or otherwise made available to any * other person. No title to and ownership of the software is hereby * transferred. * * The information in this software is subject to change without notice * and should not be construed as a commitment by digital equipment * corporation. * * Digital assumes no responsibility for the use or reliability of its * software on equipment which is not supplied by digital. */ /* *++ * FACILITY: * * Alpha SRM Console Firmware * * MODULE DESCRIPTION: * * SMC37c669 Super I/O controller configuration routines. * * AUTHORS: * * Eric Rasmussen * * CREATION DATE: * * 28-Jan-1997 * * MODIFICATION HISTORY: * * er 01-May-1997 Fixed pointer conversion errors in * SMC37c669_get_device_config(). * er 28-Jan-1997 Initial version. * *-- */ #if 0 /* $INCLUDE_OPTIONS$ */ #include "cp$inc:platform_io.h" /* $INCLUDE_OPTIONS_END$ */ #include "cp$src:common.h" #include "cp$inc:prototypes.h" #include "cp$src:kernel_def.h" #include "cp$src:msg_def.h" #include "cp$src:smcc669_def.h" /* Platform-specific includes */ #include "cp$src:platform.h" #endif #ifndef TRUE #define TRUE 1 #endif #ifndef FALSE #define FALSE 0 #endif #define wb( _x_, _y_ ) outb( _y_, (unsigned int)((unsigned long)_x_) ) #define rb( _x_ ) inb( (unsigned int)((unsigned long)_x_) ) /* ** Local storage for device configuration information. ** ** Since the SMC37c669 does not provide an explicit ** mechanism for enabling/disabling individual device ** functions, other than unmapping the device, local ** storage for device configuration information is ** allocated here for use in implementing our own ** function enable/disable scheme. */ static struct DEVICE_CONFIG { unsigned int port1; unsigned int port2; int irq; int drq; } local_config [NUM_FUNCS]; /* ** List of all possible addresses for the Super I/O chip */ static unsigned long SMC37c669_Addresses[] __initdata = { 0x3F0UL, /* Primary address */ 0x370UL, /* Secondary address */ 0UL /* End of list */ }; /* ** Global Pointer to the Super I/O device */ static SMC37c669_CONFIG_REGS *SMC37c669 __initdata = NULL; /* ** IRQ Translation Table ** ** The IRQ translation table is a list of SMC37c669 device ** and standard ISA IRQs. ** */ static SMC37c669_IRQ_TRANSLATION_ENTRY *SMC37c669_irq_table __initdata; /* ** The following definition is for the default IRQ ** translation table. */ static SMC37c669_IRQ_TRANSLATION_ENTRY SMC37c669_default_irq_table[] __initdata = { { SMC37c669_DEVICE_IRQ_A, -1 }, { SMC37c669_DEVICE_IRQ_B, -1 }, { SMC37c669_DEVICE_IRQ_C, 7 }, { SMC37c669_DEVICE_IRQ_D, 6 }, { SMC37c669_DEVICE_IRQ_E, 4 }, { SMC37c669_DEVICE_IRQ_F, 3 }, { SMC37c669_DEVICE_IRQ_H, -1 }, { -1, -1 } /* End of table */ }; /* ** The following definition is for the MONET (XP1000) IRQ ** translation table. */ static SMC37c669_IRQ_TRANSLATION_ENTRY SMC37c669_monet_irq_table[] __initdata = { { SMC37c669_DEVICE_IRQ_A, -1 }, { SMC37c669_DEVICE_IRQ_B, -1 }, { SMC37c669_DEVICE_IRQ_C, 6 }, { SMC37c669_DEVICE_IRQ_D, 7 }, { SMC37c669_DEVICE_IRQ_E, 4 }, { SMC37c669_DEVICE_IRQ_F, 3 }, { SMC37c669_DEVICE_IRQ_H, -1 }, { -1, -1 } /* End of table */ }; static SMC37c669_IRQ_TRANSLATION_ENTRY *SMC37c669_irq_tables[] __initdata = { SMC37c669_default_irq_table, SMC37c669_monet_irq_table }; /* ** DRQ Translation Table ** ** The DRQ translation table is a list of SMC37c669 device and ** ISA DMA channels. ** */ static SMC37c669_DRQ_TRANSLATION_ENTRY *SMC37c669_drq_table __initdata; /* ** The following definition is the default DRQ ** translation table. */ static SMC37c669_DRQ_TRANSLATION_ENTRY SMC37c669_default_drq_table[] __initdata = { { SMC37c669_DEVICE_DRQ_A, 2 }, { SMC37c669_DEVICE_DRQ_B, 3 }, { SMC37c669_DEVICE_DRQ_C, -1 }, { -1, -1 } /* End of table */ }; /* ** Local Function Prototype Declarations */ static unsigned int SMC37c669_is_device_enabled( unsigned int func ); #if 0 static unsigned int SMC37c669_get_device_config( unsigned int func, int *port, int *irq, int *drq ); #endif static void SMC37c669_config_mode( unsigned int enable ); static unsigned char SMC37c669_read_config( unsigned char index ); static void SMC37c669_write_config( unsigned char index, unsigned char data ); static void SMC37c669_init_local_config( void ); static struct DEVICE_CONFIG *SMC37c669_get_config( unsigned int func ); static int SMC37c669_xlate_irq( int irq ); static int SMC37c669_xlate_drq( int drq ); static __cacheline_aligned DEFINE_SPINLOCK(smc_lock); /* **++ ** FUNCTIONAL DESCRIPTION: ** ** This function detects the presence of an SMC37c669 Super I/O ** controller. ** ** FORMAL PARAMETERS: ** ** None ** ** RETURN VALUE: ** ** Returns a pointer to the device if found, otherwise, ** the NULL pointer is returned. ** ** SIDE EFFECTS: ** ** None ** **-- */ SMC37c669_CONFIG_REGS * __init SMC37c669_detect( int index ) { int i; SMC37c669_DEVICE_ID_REGISTER id; for ( i = 0; SMC37c669_Addresses[i] != 0; i++ ) { /* ** Initialize the device pointer even though we don't yet know if ** the controller is at this address. The support functions access ** the controller through this device pointer so we need to set it ** even when we are looking ... */ SMC37c669 = ( SMC37c669_CONFIG_REGS * )SMC37c669_Addresses[i]; /* ** Enter configuration mode */ SMC37c669_config_mode( TRUE ); /* ** Read the device id */ id.as_uchar = SMC37c669_read_config( SMC37c669_DEVICE_ID_INDEX ); /* ** Exit configuration mode */ SMC37c669_config_mode( FALSE ); /* ** Does the device id match? If so, assume we have found an ** SMC37c669 controller at this address. */ if ( id.by_field.device_id == SMC37c669_DEVICE_ID ) { /* ** Initialize the IRQ and DRQ translation tables. */ SMC37c669_irq_table = SMC37c669_irq_tables[ index ]; SMC37c669_drq_table = SMC37c669_default_drq_table; /* ** erfix ** ** If the platform can't use the IRQ and DRQ defaults set up in this ** file, it should call a platform-specific external routine at this ** point to reset the IRQ and DRQ translation table pointers to point ** at the appropriate tables for the platform. If the defaults are ** acceptable, then the external routine should do nothing. */ /* ** Put the chip back into configuration mode */ SMC37c669_config_mode( TRUE ); /* ** Initialize local storage for configuration information */ SMC37c669_init_local_config( ); /* ** Exit configuration mode */ SMC37c669_config_mode( FALSE ); /* ** SMC37c669 controller found, break out of search loop */ break; } else { /* ** Otherwise, we did not find an SMC37c669 controller at this ** address so set the device pointer to NULL. */ SMC37c669 = NULL; } } return SMC37c669; } /* **++ ** FUNCTIONAL DESCRIPTION: ** ** This function enables an SMC37c669 device function. ** ** FORMAL PARAMETERS: ** ** func: ** Which device function to enable ** ** RETURN VALUE: ** ** Returns TRUE is the device function was enabled, otherwise, FALSE ** ** SIDE EFFECTS: ** ** {@description or none@} ** ** DESIGN: ** ** Enabling a device function in the SMC37c669 controller involves ** setting all of its mappings (port, irq, drq ...). A local ** "shadow" copy of the device configuration is kept so we can ** just set each mapping to what the local copy says. ** ** This function ALWAYS updates the local shadow configuration of ** the device function being enabled, even if the device is always ** enabled. To avoid replication of code, functions such as ** configure_device set up the local copy and then call this ** function to the update the real device. ** **-- */ unsigned int __init SMC37c669_enable_device ( unsigned int func ) { unsigned int ret_val = FALSE; /* ** Put the device into configuration mode */ SMC37c669_config_mode( TRUE ); switch ( func ) { case SERIAL_0: { SMC37c669_SERIAL_BASE_ADDRESS_REGISTER base_addr; SMC37c669_SERIAL_IRQ_REGISTER irq; /* ** Enable the serial 1 IRQ mapping */ irq.as_uchar = SMC37c669_read_config( SMC37c669_SERIAL_IRQ_INDEX ); irq.by_field.uart1_irq = SMC37c669_RAW_DEVICE_IRQ( SMC37c669_xlate_irq( local_config[ func ].irq ) ); SMC37c669_write_config( SMC37c669_SERIAL_IRQ_INDEX, irq.as_uchar ); /* ** Enable the serial 1 port base address mapping */ base_addr.as_uchar = 0; base_addr.by_field.addr9_3 = local_config[ func ].port1 >> 3; SMC37c669_write_config( SMC37c669_SERIAL0_BASE_ADDRESS_INDEX, base_addr.as_uchar ); ret_val = TRUE; break; } case SERIAL_1: { SMC37c669_SERIAL_BASE_ADDRESS_REGISTER base_addr; SMC37c669_SERIAL_IRQ_REGISTER irq; /* ** Enable the serial 2 IRQ mapping */ irq.as_uchar = SMC37c669_read_config( SMC37c669_SERIAL_IRQ_INDEX ); irq.by_field.uart2_irq = SMC37c669_RAW_DEVICE_IRQ( SMC37c669_xlate_irq( local_config[ func ].irq ) ); SMC37c669_write_config( SMC37c669_SERIAL_IRQ_INDEX, irq.as_uchar ); /* ** Enable the serial 2 port base address mapping */ base_addr.as_uchar = 0; base_addr.by_field.addr9_3 = local_config[ func ].port1 >> 3; SMC37c669_write_config( SMC37c669_SERIAL1_BASE_ADDRESS_INDEX, base_addr.as_uchar ); ret_val = TRUE; break; } case PARALLEL_0: { SMC37c669_PARALLEL_BASE_ADDRESS_REGISTER base_addr; SMC37c669_PARALLEL_FDC_IRQ_REGISTER irq; SMC37c669_PARALLEL_FDC_DRQ_REGISTER drq; /* ** Enable the parallel port DMA channel mapping */ drq.as_uchar = SMC37c669_read_config( SMC37c669_PARALLEL_FDC_DRQ_INDEX ); drq.by_field.ppt_drq = SMC37c669_RAW_DEVICE_DRQ( SMC37c669_xlate_drq( local_config[ func ].drq ) ); SMC37c669_write_config( SMC37c669_PARALLEL_FDC_DRQ_INDEX, drq.as_uchar ); /* ** Enable the parallel port IRQ mapping */ irq.as_uchar = SMC37c669_read_config( SMC37c669_PARALLEL_FDC_IRQ_INDEX ); irq.by_field.ppt_irq = SMC37c669_RAW_DEVICE_IRQ( SMC37c669_xlate_irq( local_config[ func ].irq ) ); SMC37c669_write_config( SMC37c669_PARALLEL_FDC_IRQ_INDEX, irq.as_uchar ); /* ** Enable the parallel port base address mapping */ base_addr.as_uchar = 0; base_addr.by_field.addr9_2 = local_config[ func ].port1 >> 2; SMC37c669_write_config( SMC37c669_PARALLEL0_BASE_ADDRESS_INDEX, base_addr.as_uchar ); ret_val = TRUE; break; } case FLOPPY_0: { SMC37c669_FDC_BASE_ADDRESS_REGISTER base_addr; SMC37c669_PARALLEL_FDC_IRQ_REGISTER irq; SMC37c669_PARALLEL_FDC_DRQ_REGISTER drq; /* ** Enable the floppy controller DMA channel mapping */ drq.as_uchar = SMC37c669_read_config( SMC37c669_PARALLEL_FDC_DRQ_INDEX ); drq.by_field.fdc_drq = SMC37c669_RAW_DEVICE_DRQ( SMC37c669_xlate_drq( local_config[ func ].drq ) ); SMC37c669_write_config( SMC37c669_PARALLEL_FDC_DRQ_INDEX, drq.as_uchar ); /* ** Enable the floppy controller IRQ mapping */ irq.as_uchar = SMC37c669_read_config( SMC37c669_PARALLEL_FDC_IRQ_INDEX ); irq.by_field.fdc_irq = SMC37c669_RAW_DEVICE_IRQ( SMC37c669_xlate_irq( local_config[ func ].irq ) ); SMC37c669_write_config( SMC37c669_PARALLEL_FDC_IRQ_INDEX, irq.as_uchar ); /* ** Enable the floppy controller base address mapping */ base_addr.as_uchar = 0; base_addr.by_field.addr9_4 = local_config[ func ].port1 >> 4; SMC37c669_write_config( SMC37c669_FDC_BASE_ADDRESS_INDEX, base_addr.as_uchar ); ret_val = TRUE; break; } case IDE_0: { SMC37c669_IDE_ADDRESS_REGISTER ide_addr; /* ** Enable the IDE alternate status base address mapping */ ide_addr.as_uchar = 0; ide_addr.by_field.addr9_4 = local_config[ func ].port2 >> 4; SMC37c669_write_config( SMC37c669_IDE_ALTERNATE_ADDRESS_INDEX, ide_addr.as_uchar ); /* ** Enable the IDE controller base address mapping */ ide_addr.as_uchar = 0; ide_addr.by_field.addr9_4 = local_config[ func ].port1 >> 4; SMC37c669_write_config( SMC37c669_IDE_BASE_ADDRESS_INDEX, ide_addr.as_uchar ); ret_val = TRUE; break; } } /* ** Exit configuration mode and return */ SMC37c669_config_mode( FALSE ); return ret_val; } /* **++ ** FUNCTIONAL DESCRIPTION: ** ** This function disables a device function within the ** SMC37c669 Super I/O controller. ** ** FORMAL PARAMETERS: ** ** func: ** Which function to disable ** ** RETURN VALUE: ** ** Return TRUE if the device function was disabled, otherwise, FALSE ** ** SIDE EFFECTS: ** ** {@description or none@} ** ** DESIGN: ** ** Disabling a function in the SMC37c669 device involves ** disabling all the function's mappings (port, irq, drq ...). ** A shadow copy of the device configuration is maintained ** in local storage so we won't worry aboving saving the ** current configuration information. ** **-- */ unsigned int __init SMC37c669_disable_device ( unsigned int func ) { unsigned int ret_val = FALSE; /* ** Put the device into configuration mode */ SMC37c669_config_mode( TRUE ); switch ( func ) { case SERIAL_0: { SMC37c669_SERIAL_BASE_ADDRESS_REGISTER base_addr; SMC37c669_SERIAL_IRQ_REGISTER irq; /* ** Disable the serial 1 IRQ mapping */ irq.as_uchar = SMC37c669_read_config( SMC37c669_SERIAL_IRQ_INDEX ); irq.by_field.uart1_irq = 0; SMC37c669_write_config( SMC37c669_SERIAL_IRQ_INDEX, irq.as_uchar ); /* ** Disable the serial 1 port base address mapping */ base_addr.as_uchar = 0; SMC37c669_write_config( SMC37c669_SERIAL0_BASE_ADDRESS_INDEX, base_addr.as_uchar ); ret_val = TRUE; break; } case SERIAL_1: { SMC37c669_SERIAL_BASE_ADDRESS_REGISTER base_addr; SMC37c669_SERIAL_IRQ_REGISTER irq; /* ** Disable the serial 2 IRQ mapping */ irq.as_uchar = SMC37c669_read_config( SMC37c669_SERIAL_IRQ_INDEX ); irq.by_field.uart2_irq = 0; SMC37c669_write_config( SMC37c669_SERIAL_IRQ_INDEX, irq.as_uchar ); /* ** Disable the serial 2 port base address mapping */ base_addr.as_uchar = 0; SMC37c669_write_config( SMC37c669_SERIAL1_BASE_ADDRESS_INDEX, base_addr.as_uchar ); ret_val = TRUE; break; } case PARALLEL_0: { SMC37c669_PARALLEL_BASE_ADDRESS_REGISTER base_addr; SMC37c669_PARALLEL_FDC_IRQ_REGISTER irq; SMC37c669_PARALLEL_FDC_DRQ_REGISTER drq; /* ** Disable the parallel port DMA channel mapping */ drq.as_uchar = SMC37c669_read_config( SMC37c669_PARALLEL_FDC_DRQ_INDEX ); drq.by_field.ppt_drq = 0; SMC37c669_write_config( SMC37c669_PARALLEL_FDC_DRQ_INDEX, drq.as_uchar ); /* ** Disable the parallel port IRQ mapping */ irq.as_uchar = SMC37c669_read_config( SMC37c669_PARALLEL_FDC_IRQ_INDEX ); irq.by_field.ppt_irq = 0; SMC37c669_write_config( SMC37c669_PARALLEL_FDC_IRQ_INDEX, irq.as_uchar ); /* ** Disable the parallel port base address mapping */ base_addr.as_uchar = 0; SMC37c669_write_config( SMC37c669_PARALLEL0_BASE_ADDRESS_INDEX, base_addr.as_uchar ); ret_val = TRUE; break; } case FLOPPY_0: { SMC37c669_FDC_BASE_ADDRESS_REGISTER base_addr; SMC37c669_PARALLEL_FDC_IRQ_REGISTER irq; SMC37c669_PARALLEL_FDC_DRQ_REGISTER drq; /* ** Disable the floppy controller DMA channel mapping */ drq.as_uchar = SMC37c669_read_config( SMC37c669_PARALLEL_FDC_DRQ_INDEX ); drq.by_field.fdc_drq = 0; SMC37c669_write_config( SMC37c669_PARALLEL_FDC_DRQ_INDEX, drq.as_uchar ); /* ** Disable the floppy controller IRQ mapping */ irq.as_uchar = SMC37c669_read_config( SMC37c669_PARALLEL_FDC_IRQ_INDEX ); irq.by_field.fdc_irq = 0; SMC37c669_write_config( SMC37c669_PARALLEL_FDC_IRQ_INDEX, irq.as_uchar ); /* ** Disable the floppy controller base address mapping */ base_addr.as_uchar = 0; SMC37c669_write_config( SMC37c669_FDC_BASE_ADDRESS_INDEX, base_addr.as_uchar ); ret_val = TRUE; break; } case IDE_0: { SMC37c669_IDE_ADDRESS_REGISTER ide_addr; /* ** Disable the IDE alternate status base address mapping */ ide_addr.as_uchar = 0; SMC37c669_write_config( SMC37c669_IDE_ALTERNATE_ADDRESS_INDEX, ide_addr.as_uchar ); /* ** Disable the IDE controller base address mapping */ ide_addr.as_uchar = 0; SMC37c669_write_config( SMC37c669_IDE_BASE_ADDRESS_INDEX, ide_addr.as_uchar ); ret_val = TRUE; break; } } /* ** Exit configuration mode and return */ SMC37c669_config_mode( FALSE ); return ret_val; } /* **++ ** FUNCTIONAL DESCRIPTION: ** ** This function configures a device function within the ** SMC37c669 Super I/O controller. ** ** FORMAL PARAMETERS: ** ** func: ** Which device function ** ** port: ** I/O port for the function to use ** ** irq: ** IRQ for the device function to use ** ** drq: ** DMA channel for the device function to use ** ** RETURN VALUE: ** ** Returns TRUE if the device function was configured, ** otherwise, FALSE. ** ** SIDE EFFECTS: ** ** {@description or none@} ** ** DESIGN: ** ** If this function returns TRUE, the local shadow copy of ** the configuration is also updated. If the device function ** is currently disabled, only the local shadow copy is ** updated and the actual device function will be updated ** if/when it is enabled. ** **-- */ unsigned int __init SMC37c669_configure_device ( unsigned int func, int port, int irq, int drq ) { struct DEVICE_CONFIG *cp; /* ** Check for a valid configuration */ if ( ( cp = SMC37c669_get_config ( func ) ) != NULL ) { /* ** Configuration is valid, update the local shadow copy */ if ( ( drq & ~0xFF ) == 0 ) { cp->drq = drq; } if ( ( irq & ~0xFF ) == 0 ) { cp->irq = irq; } if ( ( port & ~0xFFFF ) == 0 ) { cp->port1 = port; } /* ** If the device function is enabled, update the actual ** device configuration. */ if ( SMC37c669_is_device_enabled( func ) ) { SMC37c669_enable_device( func ); } return TRUE; } return FALSE; } /* **++ ** FUNCTIONAL DESCRIPTION: ** ** This function determines whether a device function ** within the SMC37c669 controller is enabled. ** ** FORMAL PARAMETERS: ** ** func: ** Which device function ** ** RETURN VALUE: ** ** Returns TRUE if the device function is enabled, otherwise, FALSE ** ** SIDE EFFECTS: ** ** {@description or none@} ** ** DESIGN: ** ** To check whether a device is enabled we will only look at ** the port base address mapping. According to the SMC37c669 ** specification, all of the port base address mappings are ** disabled if the addr<9:8> (bits <7:6> of the register) are ** zero. ** **-- */ static unsigned int __init SMC37c669_is_device_enabled ( unsigned int func ) { unsigned char base_addr = 0; unsigned int dev_ok = FALSE; unsigned int ret_val = FALSE; /* ** Enter configuration mode */ SMC37c669_config_mode( TRUE ); switch ( func ) { case SERIAL_0: base_addr = SMC37c669_read_config( SMC37c669_SERIAL0_BASE_ADDRESS_INDEX ); dev_ok = TRUE; break; case SERIAL_1: base_addr = SMC37c669_read_config( SMC37c669_SERIAL1_BASE_ADDRESS_INDEX ); dev_ok = TRUE; break; case PARALLEL_0: base_addr = SMC37c669_read_config( SMC37c669_PARALLEL0_BASE_ADDRESS_INDEX ); dev_ok = TRUE; break; case FLOPPY_0: base_addr = SMC37c669_read_config( SMC37c669_FDC_BASE_ADDRESS_INDEX ); dev_ok = TRUE; break; case IDE_0: base_addr = SMC37c669_read_config( SMC37c669_IDE_BASE_ADDRESS_INDEX ); dev_ok = TRUE; break; } /* ** If we have a valid device, check base_addr<7:6> to see if the ** device is enabled (mapped). */ if ( ( dev_ok ) && ( ( base_addr & 0xC0 ) != 0 ) ) { /* ** The mapping is not disabled, so assume that the function is ** enabled. */ ret_val = TRUE; } /* ** Exit configuration mode */ SMC37c669_config_mode( FALSE ); return ret_val; } #if 0 /* **++ ** FUNCTIONAL DESCRIPTION: ** ** This function retrieves the configuration information of a ** device function within the SMC37c699 Super I/O controller. ** ** FORMAL PARAMETERS: ** ** func: ** Which device function ** ** port: ** I/O port returned ** ** irq: ** IRQ returned ** ** drq: ** DMA channel returned ** ** RETURN VALUE: ** ** Returns TRUE if the device configuration was successfully ** retrieved, otherwise, FALSE. ** ** SIDE EFFECTS: ** ** The data pointed to by the port, irq, and drq parameters ** my be modified even if the configuration is not successfully ** retrieved. ** ** DESIGN: ** ** The device configuration is fetched from the local shadow ** copy. Any unused parameters will be set to -1. Any ** parameter which is not desired can specify the NULL ** pointer. ** **-- */ static unsigned int __init SMC37c669_get_device_config ( unsigned int func, int *port, int *irq, int *drq ) { struct DEVICE_CONFIG *cp; unsigned int ret_val = FALSE; /* ** Check for a valid device configuration */ if ( ( cp = SMC37c669_get_config( func ) ) != NULL ) { if ( drq != NULL ) { *drq = cp->drq; ret_val = TRUE; } if ( irq != NULL ) { *irq = cp->irq; ret_val = TRUE; } if ( port != NULL ) { *port = cp->port1; ret_val = TRUE; } } return ret_val; } #endif /* **++ ** FUNCTIONAL DESCRIPTION: ** ** This function displays the current state of the SMC37c699 ** Super I/O controller's device functions. ** ** FORMAL PARAMETERS: ** ** None ** ** RETURN VALUE: ** ** None ** ** SIDE EFFECTS: ** ** None ** **-- */ void __init SMC37c669_display_device_info ( void ) { if ( SMC37c669_is_device_enabled( SERIAL_0 ) ) { printk( " Serial 0: Enabled [ Port 0x%x, IRQ %d ]\n", local_config[ SERIAL_0 ].port1, local_config[ SERIAL_0 ].irq ); } else { printk( " Serial 0: Disabled\n" ); } if ( SMC37c669_is_device_enabled( SERIAL_1 ) ) { printk( " Serial 1: Enabled [ Port 0x%x, IRQ %d ]\n", local_config[ SERIAL_1 ].port1, local_config[ SERIAL_1 ].irq ); } else { printk( " Serial 1: Disabled\n" ); } if ( SMC37c669_is_device_enabled( PARALLEL_0 ) ) { printk( " Parallel: Enabled [ Port 0x%x, IRQ %d/%d ]\n", local_config[ PARALLEL_0 ].port1, local_config[ PARALLEL_0 ].irq, local_config[ PARALLEL_0 ].drq ); } else { printk( " Parallel: Disabled\n" ); } if ( SMC37c669_is_device_enabled( FLOPPY_0 ) ) { printk( " Floppy Ctrl: Enabled [ Port 0x%x, IRQ %d/%d ]\n", local_config[ FLOPPY_0 ].port1, local_config[ FLOPPY_0 ].irq, local_config[ FLOPPY_0 ].drq ); } else { printk( " Floppy Ctrl: Disabled\n" ); } if ( SMC37c669_is_device_enabled( IDE_0 ) ) { printk( " IDE 0: Enabled [ Port 0x%x, IRQ %d ]\n", local_config[ IDE_0 ].port1, local_config[ IDE_0 ].irq ); } else { printk( " IDE 0: Disabled\n" ); } } /* **++ ** FUNCTIONAL DESCRIPTION: ** ** This function puts the SMC37c669 Super I/O controller into, ** and takes it out of, configuration mode. ** ** FORMAL PARAMETERS: ** ** enable: ** TRUE to enter configuration mode, FALSE to exit. ** ** RETURN VALUE: ** ** None ** ** SIDE EFFECTS: ** ** The SMC37c669 controller may be left in configuration mode. ** **-- */ static void __init SMC37c669_config_mode( unsigned int enable ) { if ( enable ) { /* ** To enter configuration mode, two writes in succession to the index ** port are required. If a write to another address or port occurs ** between these two writes, the chip does not enter configuration ** mode. Therefore, a spinlock is placed around the two writes to ** guarantee that they complete uninterrupted. */ spin_lock(&smc_lock); wb( &SMC37c669->index_port, SMC37c669_CONFIG_ON_KEY ); wb( &SMC37c669->index_port, SMC37c669_CONFIG_ON_KEY ); spin_unlock(&smc_lock); } else { wb( &SMC37c669->index_port, SMC37c669_CONFIG_OFF_KEY ); } } /* **++ ** FUNCTIONAL DESCRIPTION: ** ** This function reads an SMC37c669 Super I/O controller ** configuration register. This function assumes that the ** device is already in configuration mode. ** ** FORMAL PARAMETERS: ** ** index: ** Index value of configuration register to read ** ** RETURN VALUE: ** ** Data read from configuration register ** ** SIDE EFFECTS: ** ** None ** **-- */ static unsigned char __init SMC37c669_read_config( unsigned char index ) { unsigned char data; wb( &SMC37c669->index_port, index ); data = rb( &SMC37c669->data_port ); return data; } /* **++ ** FUNCTIONAL DESCRIPTION: ** ** This function writes an SMC37c669 Super I/O controller ** configuration register. This function assumes that the ** device is already in configuration mode. ** ** FORMAL PARAMETERS: ** ** index: ** Index of configuration register to write ** ** data: ** Data to be written ** ** RETURN VALUE: ** ** None ** ** SIDE EFFECTS: ** ** None ** **-- */ static void __init SMC37c669_write_config( unsigned char index, unsigned char data ) { wb( &SMC37c669->index_port, index ); wb( &SMC37c669->data_port, data ); } /* **++ ** FUNCTIONAL DESCRIPTION: ** ** This function initializes the local device ** configuration storage. This function assumes ** that the device is already in configuration ** mode. ** ** FORMAL PARAMETERS: ** ** None ** ** RETURN VALUE: ** ** None ** ** SIDE EFFECTS: ** ** Local storage for device configuration information ** is initialized. ** **-- */ static void __init SMC37c669_init_local_config ( void ) { SMC37c669_SERIAL_BASE_ADDRESS_REGISTER uart_base; SMC37c669_SERIAL_IRQ_REGISTER uart_irqs; SMC37c669_PARALLEL_BASE_ADDRESS_REGISTER ppt_base; SMC37c669_PARALLEL_FDC_IRQ_REGISTER ppt_fdc_irqs; SMC37c669_PARALLEL_FDC_DRQ_REGISTER ppt_fdc_drqs; SMC37c669_FDC_BASE_ADDRESS_REGISTER fdc_base; SMC37c669_IDE_ADDRESS_REGISTER ide_base; SMC37c669_IDE_ADDRESS_REGISTER ide_alt; /* ** Get serial port 1 base address */ uart_base.as_uchar = SMC37c669_read_config( SMC37c669_SERIAL0_BASE_ADDRESS_INDEX ); /* ** Get IRQs for serial ports 1 & 2 */ uart_irqs.as_uchar = SMC37c669_read_config( SMC37c669_SERIAL_IRQ_INDEX ); /* ** Store local configuration information for serial port 1 */ local_config[SERIAL_0].port1 = uart_base.by_field.addr9_3 << 3; local_config[SERIAL_0].irq = SMC37c669_xlate_irq( SMC37c669_DEVICE_IRQ( uart_irqs.by_field.uart1_irq ) ); /* ** Get serial port 2 base address */ uart_base.as_uchar = SMC37c669_read_config( SMC37c669_SERIAL1_BASE_ADDRESS_INDEX ); /* ** Store local configuration information for serial port 2 */ local_config[SERIAL_1].port1 = uart_base.by_field.addr9_3 << 3; local_config[SERIAL_1].irq = SMC37c669_xlate_irq( SMC37c669_DEVICE_IRQ( uart_irqs.by_field.uart2_irq ) ); /* ** Get parallel port base address */ ppt_base.as_uchar = SMC37c669_read_config( SMC37c669_PARALLEL0_BASE_ADDRESS_INDEX ); /* ** Get IRQs for parallel port and floppy controller */ ppt_fdc_irqs.as_uchar = SMC37c669_read_config( SMC37c669_PARALLEL_FDC_IRQ_INDEX ); /* ** Get DRQs for parallel port and floppy controller */ ppt_fdc_drqs.as_uchar = SMC37c669_read_config( SMC37c669_PARALLEL_FDC_DRQ_INDEX ); /* ** Store local configuration information for parallel port */ local_config[PARALLEL_0].port1 = ppt_base.by_field.addr9_2 << 2; local_config[PARALLEL_0].irq = SMC37c669_xlate_irq( SMC37c669_DEVICE_IRQ( ppt_fdc_irqs.by_field.ppt_irq ) ); local_config[PARALLEL_0].drq = SMC37c669_xlate_drq( SMC37c669_DEVICE_DRQ( ppt_fdc_drqs.by_field.ppt_drq ) ); /* ** Get floppy controller base address */ fdc_base.as_uchar = SMC37c669_read_config( SMC37c669_FDC_BASE_ADDRESS_INDEX ); /* ** Store local configuration information for floppy controller */ local_config[FLOPPY_0].port1 = fdc_base.by_field.addr9_4 << 4; local_config[FLOPPY_0].irq = SMC37c669_xlate_irq( SMC37c669_DEVICE_IRQ( ppt_fdc_irqs.by_field.fdc_irq ) ); local_config[FLOPPY_0].drq = SMC37c669_xlate_drq( SMC37c669_DEVICE_DRQ( ppt_fdc_drqs.by_field.fdc_drq ) ); /* ** Get IDE controller base address */ ide_base.as_uchar = SMC37c669_read_config( SMC37c669_IDE_BASE_ADDRESS_INDEX ); /* ** Get IDE alternate status base address */ ide_alt.as_uchar = SMC37c669_read_config( SMC37c669_IDE_ALTERNATE_ADDRESS_INDEX ); /* ** Store local configuration information for IDE controller */ local_config[IDE_0].port1 = ide_base.by_field.addr9_4 << 4; local_config[IDE_0].port2 = ide_alt.by_field.addr9_4 << 4; local_config[IDE_0].irq = 14; } /* **++ ** FUNCTIONAL DESCRIPTION: ** ** This function returns a pointer to the local shadow ** configuration of the requested device function. ** ** FORMAL PARAMETERS: ** ** func: ** Which device function ** ** RETURN VALUE: ** ** Returns a pointer to the DEVICE_CONFIG structure for the ** requested function, otherwise, NULL. ** ** SIDE EFFECTS: ** ** {@description or none@} ** **-- */ static struct DEVICE_CONFIG * __init SMC37c669_get_config( unsigned int func ) { struct DEVICE_CONFIG *cp = NULL; switch ( func ) { case SERIAL_0: cp = &local_config[ SERIAL_0 ]; break; case SERIAL_1: cp = &local_config[ SERIAL_1 ]; break; case PARALLEL_0: cp = &local_config[ PARALLEL_0 ]; break; case FLOPPY_0: cp = &local_config[ FLOPPY_0 ]; break; case IDE_0: cp = &local_config[ IDE_0 ]; break; } return cp; } /* **++ ** FUNCTIONAL DESCRIPTION: ** ** This function translates IRQs back and forth between ISA ** IRQs and SMC37c669 device IRQs. ** ** FORMAL PARAMETERS: ** ** irq: ** The IRQ to translate ** ** RETURN VALUE: ** ** Returns the translated IRQ, otherwise, returns -1. ** ** SIDE EFFECTS: ** ** {@description or none@} ** **-- */ static int __init SMC37c669_xlate_irq ( int irq ) { int i, translated_irq = -1; if ( SMC37c669_IS_DEVICE_IRQ( irq ) ) { /* ** We are translating a device IRQ to an ISA IRQ */ for ( i = 0; ( SMC37c669_irq_table[i].device_irq != -1 ) || ( SMC37c669_irq_table[i].isa_irq != -1 ); i++ ) { if ( irq == SMC37c669_irq_table[i].device_irq ) { translated_irq = SMC37c669_irq_table[i].isa_irq; break; } } } else { /* ** We are translating an ISA IRQ to a device IRQ */ for ( i = 0; ( SMC37c669_irq_table[i].isa_irq != -1 ) || ( SMC37c669_irq_table[i].device_irq != -1 ); i++ ) { if ( irq == SMC37c669_irq_table[i].isa_irq ) { translated_irq = SMC37c669_irq_table[i].device_irq; break; } } } return translated_irq; } /* **++ ** FUNCTIONAL DESCRIPTION: ** ** This function translates DMA channels back and forth between ** ISA DMA channels and SMC37c669 device DMA channels. ** ** FORMAL PARAMETERS: ** ** drq: ** The DMA channel to translate ** ** RETURN VALUE: ** ** Returns the translated DMA channel, otherwise, returns -1 ** ** SIDE EFFECTS: ** ** {@description or none@} ** **-- */ static int __init SMC37c669_xlate_drq ( int drq ) { int i, translated_drq = -1; if ( SMC37c669_IS_DEVICE_DRQ( drq ) ) { /* ** We are translating a device DMA channel to an ISA DMA channel */ for ( i = 0; ( SMC37c669_drq_table[i].device_drq != -1 ) || ( SMC37c669_drq_table[i].isa_drq != -1 ); i++ ) { if ( drq == SMC37c669_drq_table[i].device_drq ) { translated_drq = SMC37c669_drq_table[i].isa_drq; break; } } } else { /* ** We are translating an ISA DMA channel to a device DMA channel */ for ( i = 0; ( SMC37c669_drq_table[i].isa_drq != -1 ) || ( SMC37c669_drq_table[i].device_drq != -1 ); i++ ) { if ( drq == SMC37c669_drq_table[i].isa_drq ) { translated_drq = SMC37c669_drq_table[i].device_drq; break; } } } return translated_drq; } #if 0 int __init smcc669_init ( void ) { struct INODE *ip; allocinode( smc_ddb.name, 1, &ip ); ip->dva = &smc_ddb; ip->attr = ATTR$M_WRITE | ATTR$M_READ; ip->len[0] = 0x30; ip->misc = 0; INODE_UNLOCK( ip ); return msg_success; } int __init smcc669_open( struct FILE *fp, char *info, char *next, char *mode ) { struct INODE *ip; /* ** Allow multiple readers but only one writer. ip->misc keeps track ** of the number of writers */ ip = fp->ip; INODE_LOCK( ip ); if ( fp->mode & ATTR$M_WRITE ) { if ( ip->misc ) { INODE_UNLOCK( ip ); return msg_failure; /* too many writers */ } ip->misc++; } /* ** Treat the information field as a byte offset */ *fp->offset = xtoi( info ); INODE_UNLOCK( ip ); return msg_success; } int __init smcc669_close( struct FILE *fp ) { struct INODE *ip; ip = fp->ip; if ( fp->mode & ATTR$M_WRITE ) { INODE_LOCK( ip ); ip->misc--; INODE_UNLOCK( ip ); } return msg_success; } int __init smcc669_read( struct FILE *fp, int size, int number, unsigned char *buf ) { int i; int length; int nbytes; struct INODE *ip; /* ** Always access a byte at a time */ ip = fp->ip; length = size * number; nbytes = 0; SMC37c669_config_mode( TRUE ); for ( i = 0; i < length; i++ ) { if ( !inrange( *fp->offset, 0, ip->len[0] ) ) break; *buf++ = SMC37c669_read_config( *fp->offset ); *fp->offset += 1; nbytes++; } SMC37c669_config_mode( FALSE ); return nbytes; } int __init smcc669_write( struct FILE *fp, int size, int number, unsigned char *buf ) { int i; int length; int nbytes; struct INODE *ip; /* ** Always access a byte at a time */ ip = fp->ip; length = size * number; nbytes = 0; SMC37c669_config_mode( TRUE ); for ( i = 0; i < length; i++ ) { if ( !inrange( *fp->offset, 0, ip->len[0] ) ) break; SMC37c669_write_config( *fp->offset, *buf ); *fp->offset += 1; buf++; nbytes++; } SMC37c669_config_mode( FALSE ); return nbytes; } #endif void __init SMC37c669_dump_registers(void) { int i; for (i = 0; i <= 0x29; i++) printk("-- CR%02x : %02x\n", i, SMC37c669_read_config(i)); } /*+ * ============================================================================ * = SMC_init - SMC37c669 Super I/O controller initialization = * ============================================================================ * * OVERVIEW: * * This routine configures and enables device functions on the * SMC37c669 Super I/O controller. * * FORM OF CALL: * * SMC_init( ); * * RETURNS: * * Nothing * * ARGUMENTS: * * None * * SIDE EFFECTS: * * None * */ void __init SMC669_Init ( int index ) { SMC37c669_CONFIG_REGS *SMC_base; unsigned long flags; local_irq_save(flags); if ( ( SMC_base = SMC37c669_detect( index ) ) != NULL ) { #if SMC_DEBUG SMC37c669_config_mode( TRUE ); SMC37c669_dump_registers( ); SMC37c669_config_mode( FALSE ); SMC37c669_display_device_info( ); #endif SMC37c669_disable_device( SERIAL_0 ); SMC37c669_configure_device( SERIAL_0, COM1_BASE, COM1_IRQ, -1 ); SMC37c669_enable_device( SERIAL_0 ); SMC37c669_disable_device( SERIAL_1 ); SMC37c669_configure_device( SERIAL_1, COM2_BASE, COM2_IRQ, -1 ); SMC37c669_enable_device( SERIAL_1 ); SMC37c669_disable_device( PARALLEL_0 ); SMC37c669_configure_device( PARALLEL_0, PARP_BASE, PARP_IRQ, PARP_DRQ ); SMC37c669_enable_device( PARALLEL_0 ); SMC37c669_disable_device( FLOPPY_0 ); SMC37c669_configure_device( FLOPPY_0, FDC_BASE, FDC_IRQ, FDC_DRQ ); SMC37c669_enable_device( FLOPPY_0 ); /* Wake up sometimes forgotten floppy, especially on DP264. */ outb(0xc, 0x3f2); SMC37c669_disable_device( IDE_0 ); #if SMC_DEBUG SMC37c669_config_mode( TRUE ); SMC37c669_dump_registers( ); SMC37c669_config_mode( FALSE ); SMC37c669_display_device_info( ); #endif local_irq_restore(flags); printk( "SMC37c669 Super I/O Controller found @ 0x%p\n", SMC_base ); } else { local_irq_restore(flags); #if SMC_DEBUG printk( "No SMC37c669 Super I/O Controller found\n" ); #endif } }
gpl-2.0
Rerito/linux-ubi
drivers/nubus/proc.c
10175
4602
/* drivers/nubus/proc.c: Proc FS interface for NuBus. By David Huggins-Daines <dhd@debian.org> Much code and many ideas from drivers/pci/proc.c: Copyright (c) 1997, 1998 Martin Mares <mj@atrey.karlin.mff.cuni.cz> This is initially based on the Zorro and PCI interfaces. However, it works somewhat differently. The intent is to provide a structure in /proc analogous to the structure of the NuBus ROM resources. Therefore each NuBus device is in fact a directory, which may in turn contain subdirectories. The "files" correspond to NuBus resource records. For those types of records which we know how to convert to formats that are meaningful to userspace (mostly just icons) these files will provide "cooked" data. Otherwise they will simply provide raw access (read-only of course) to the ROM. */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/nubus.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/init.h> #include <linux/module.h> #include <asm/uaccess.h> #include <asm/byteorder.h> static int nubus_devices_proc_show(struct seq_file *m, void *v) { struct nubus_dev *dev = nubus_devices; while (dev) { seq_printf(m, "%x\t%04x %04x %04x %04x", dev->board->slot, dev->category, dev->type, dev->dr_sw, dev->dr_hw); seq_printf(m, "\t%08lx\n", dev->board->slot_addr); dev = dev->next; } return 0; } static int nubus_devices_proc_open(struct inode *inode, struct file *file) { return single_open(file, nubus_devices_proc_show, NULL); } static const struct file_operations nubus_devices_proc_fops = { .owner = THIS_MODULE, .open = nubus_devices_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static struct proc_dir_entry *proc_bus_nubus_dir; static void nubus_proc_subdir(struct nubus_dev* dev, struct proc_dir_entry* parent, struct nubus_dir* dir) { struct nubus_dirent ent; /* Some of these are directories, others aren't */ while (nubus_readdir(dir, &ent) != -1) { char name[8]; struct proc_dir_entry* e; sprintf(name, "%x", ent.type); e = create_proc_entry(name, S_IFREG | S_IRUGO | S_IWUSR, parent); if (!e) return; } } /* Can't do this recursively since the root directory is structured somewhat differently from the subdirectories */ static void nubus_proc_populate(struct nubus_dev* dev, struct proc_dir_entry* parent, struct nubus_dir* root) { struct nubus_dirent ent; /* We know these are all directories (board resource + one or more functional resources) */ while (nubus_readdir(root, &ent) != -1) { char name[8]; struct proc_dir_entry* e; struct nubus_dir dir; sprintf(name, "%x", ent.type); e = proc_mkdir(name, parent); if (!e) return; /* And descend */ if (nubus_get_subdir(&ent, &dir) == -1) { /* This shouldn't happen */ printk(KERN_ERR "NuBus root directory node %x:%x has no subdir!\n", dev->board->slot, ent.type); continue; } else { nubus_proc_subdir(dev, e, &dir); } } } int nubus_proc_attach_device(struct nubus_dev *dev) { struct proc_dir_entry *e; struct nubus_dir root; char name[8]; if (dev == NULL) { printk(KERN_ERR "NULL pointer in nubus_proc_attach_device, shoot the programmer!\n"); return -1; } if (dev->board == NULL) { printk(KERN_ERR "NULL pointer in nubus_proc_attach_device, shoot the programmer!\n"); printk("dev = %p, dev->board = %p\n", dev, dev->board); return -1; } /* Create a directory */ sprintf(name, "%x", dev->board->slot); e = dev->procdir = proc_mkdir(name, proc_bus_nubus_dir); if (!e) return -ENOMEM; /* Now recursively populate it with files */ nubus_get_root_dir(dev->board, &root); nubus_proc_populate(dev, e, &root); return 0; } EXPORT_SYMBOL(nubus_proc_attach_device); /* FIXME: this is certainly broken! */ int nubus_proc_detach_device(struct nubus_dev *dev) { struct proc_dir_entry *e; if ((e = dev->procdir)) { if (atomic_read(&e->count)) return -EBUSY; remove_proc_entry(e->name, proc_bus_nubus_dir); dev->procdir = NULL; } return 0; } EXPORT_SYMBOL(nubus_proc_detach_device); void __init proc_bus_nubus_add_devices(void) { struct nubus_dev *dev; for(dev = nubus_devices; dev; dev = dev->next) nubus_proc_attach_device(dev); } void __init nubus_proc_init(void) { if (!MACH_IS_MAC) return; proc_bus_nubus_dir = proc_mkdir("bus/nubus", NULL); proc_create("devices", 0, proc_bus_nubus_dir, &nubus_devices_proc_fops); proc_bus_nubus_add_devices(); }
gpl-2.0
cubieboard/Cubieboard5-kernel-source
drivers/nubus/proc.c
10175
4602
/* drivers/nubus/proc.c: Proc FS interface for NuBus. By David Huggins-Daines <dhd@debian.org> Much code and many ideas from drivers/pci/proc.c: Copyright (c) 1997, 1998 Martin Mares <mj@atrey.karlin.mff.cuni.cz> This is initially based on the Zorro and PCI interfaces. However, it works somewhat differently. The intent is to provide a structure in /proc analogous to the structure of the NuBus ROM resources. Therefore each NuBus device is in fact a directory, which may in turn contain subdirectories. The "files" correspond to NuBus resource records. For those types of records which we know how to convert to formats that are meaningful to userspace (mostly just icons) these files will provide "cooked" data. Otherwise they will simply provide raw access (read-only of course) to the ROM. */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/nubus.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/init.h> #include <linux/module.h> #include <asm/uaccess.h> #include <asm/byteorder.h> static int nubus_devices_proc_show(struct seq_file *m, void *v) { struct nubus_dev *dev = nubus_devices; while (dev) { seq_printf(m, "%x\t%04x %04x %04x %04x", dev->board->slot, dev->category, dev->type, dev->dr_sw, dev->dr_hw); seq_printf(m, "\t%08lx\n", dev->board->slot_addr); dev = dev->next; } return 0; } static int nubus_devices_proc_open(struct inode *inode, struct file *file) { return single_open(file, nubus_devices_proc_show, NULL); } static const struct file_operations nubus_devices_proc_fops = { .owner = THIS_MODULE, .open = nubus_devices_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static struct proc_dir_entry *proc_bus_nubus_dir; static void nubus_proc_subdir(struct nubus_dev* dev, struct proc_dir_entry* parent, struct nubus_dir* dir) { struct nubus_dirent ent; /* Some of these are directories, others aren't */ while (nubus_readdir(dir, &ent) != -1) { char name[8]; struct proc_dir_entry* e; sprintf(name, "%x", ent.type); e = create_proc_entry(name, S_IFREG | S_IRUGO | S_IWUSR, parent); if (!e) return; } } /* Can't do this recursively since the root directory is structured somewhat differently from the subdirectories */ static void nubus_proc_populate(struct nubus_dev* dev, struct proc_dir_entry* parent, struct nubus_dir* root) { struct nubus_dirent ent; /* We know these are all directories (board resource + one or more functional resources) */ while (nubus_readdir(root, &ent) != -1) { char name[8]; struct proc_dir_entry* e; struct nubus_dir dir; sprintf(name, "%x", ent.type); e = proc_mkdir(name, parent); if (!e) return; /* And descend */ if (nubus_get_subdir(&ent, &dir) == -1) { /* This shouldn't happen */ printk(KERN_ERR "NuBus root directory node %x:%x has no subdir!\n", dev->board->slot, ent.type); continue; } else { nubus_proc_subdir(dev, e, &dir); } } } int nubus_proc_attach_device(struct nubus_dev *dev) { struct proc_dir_entry *e; struct nubus_dir root; char name[8]; if (dev == NULL) { printk(KERN_ERR "NULL pointer in nubus_proc_attach_device, shoot the programmer!\n"); return -1; } if (dev->board == NULL) { printk(KERN_ERR "NULL pointer in nubus_proc_attach_device, shoot the programmer!\n"); printk("dev = %p, dev->board = %p\n", dev, dev->board); return -1; } /* Create a directory */ sprintf(name, "%x", dev->board->slot); e = dev->procdir = proc_mkdir(name, proc_bus_nubus_dir); if (!e) return -ENOMEM; /* Now recursively populate it with files */ nubus_get_root_dir(dev->board, &root); nubus_proc_populate(dev, e, &root); return 0; } EXPORT_SYMBOL(nubus_proc_attach_device); /* FIXME: this is certainly broken! */ int nubus_proc_detach_device(struct nubus_dev *dev) { struct proc_dir_entry *e; if ((e = dev->procdir)) { if (atomic_read(&e->count)) return -EBUSY; remove_proc_entry(e->name, proc_bus_nubus_dir); dev->procdir = NULL; } return 0; } EXPORT_SYMBOL(nubus_proc_detach_device); void __init proc_bus_nubus_add_devices(void) { struct nubus_dev *dev; for(dev = nubus_devices; dev; dev = dev->next) nubus_proc_attach_device(dev); } void __init nubus_proc_init(void) { if (!MACH_IS_MAC) return; proc_bus_nubus_dir = proc_mkdir("bus/nubus", NULL); proc_create("devices", 0, proc_bus_nubus_dir, &nubus_devices_proc_fops); proc_bus_nubus_add_devices(); }
gpl-2.0
davidmueller13/TW_Kernel_LP
tools/power/cpupower/bench/main.c
10431
5421
/* cpufreq-bench CPUFreq microbenchmark * * Copyright (C) 2008 Christian Kornacker <ckornacker@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <getopt.h> #include <errno.h> #include "config.h" #include "system.h" #include "benchmark.h" static struct option long_options[] = { {"output", 1, 0, 'o'}, {"sleep", 1, 0, 's'}, {"load", 1, 0, 'l'}, {"verbose", 0, 0, 'v'}, {"cpu", 1, 0, 'c'}, {"governor", 1, 0, 'g'}, {"prio", 1, 0, 'p'}, {"file", 1, 0, 'f'}, {"cycles", 1, 0, 'n'}, {"rounds", 1, 0, 'r'}, {"load-step", 1, 0, 'x'}, {"sleep-step", 1, 0, 'y'}, {"help", 0, 0, 'h'}, {0, 0, 0, 0} }; /******************************************************************* usage *******************************************************************/ void usage() { printf("usage: ./bench\n"); printf("Options:\n"); printf(" -l, --load=<long int>\t\tinitial load time in us\n"); printf(" -s, --sleep=<long int>\t\tinitial sleep time in us\n"); printf(" -x, --load-step=<long int>\ttime to be added to load time, in us\n"); printf(" -y, --sleep-step=<long int>\ttime to be added to sleep time, in us\n"); printf(" -c, --cpu=<cpu #>\t\t\tCPU Nr. to use, starting at 0\n"); printf(" -p, --prio=<priority>\t\t\tscheduler priority, HIGH, LOW or DEFAULT\n"); printf(" -g, --governor=<governor>\t\tcpufreq governor to test\n"); printf(" -n, --cycles=<int>\t\t\tload/sleep cycles\n"); printf(" -r, --rounds<int>\t\t\tload/sleep rounds\n"); printf(" -f, --file=<configfile>\t\tconfig file to use\n"); printf(" -o, --output=<dir>\t\t\toutput path. Filename will be OUTPUTPATH/benchmark_TIMESTAMP.log\n"); printf(" -v, --verbose\t\t\t\tverbose output on/off\n"); printf(" -h, --help\t\t\t\tPrint this help screen\n"); exit(1); } /******************************************************************* main *******************************************************************/ int main(int argc, char **argv) { int c; int option_index = 0; struct config *config = NULL; config = prepare_default_config(); if (config == NULL) return EXIT_FAILURE; while (1) { c = getopt_long (argc, argv, "hg:o:s:l:vc:p:f:n:r:x:y:", long_options, &option_index); if (c == -1) break; switch (c) { case 'o': if (config->output != NULL) fclose(config->output); config->output = prepare_output(optarg); if (config->output == NULL) return EXIT_FAILURE; dprintf("user output path -> %s\n", optarg); break; case 's': sscanf(optarg, "%li", &config->sleep); dprintf("user sleep time -> %s\n", optarg); break; case 'l': sscanf(optarg, "%li", &config->load); dprintf("user load time -> %s\n", optarg); break; case 'c': sscanf(optarg, "%u", &config->cpu); dprintf("user cpu -> %s\n", optarg); break; case 'g': strncpy(config->governor, optarg, 14); dprintf("user governor -> %s\n", optarg); break; case 'p': if (string_to_prio(optarg) != SCHED_ERR) { config->prio = string_to_prio(optarg); dprintf("user prio -> %s\n", optarg); } else { if (config != NULL) { if (config->output != NULL) fclose(config->output); free(config); } usage(); } break; case 'n': sscanf(optarg, "%u", &config->cycles); dprintf("user cycles -> %s\n", optarg); break; case 'r': sscanf(optarg, "%u", &config->rounds); dprintf("user rounds -> %s\n", optarg); break; case 'x': sscanf(optarg, "%li", &config->load_step); dprintf("user load_step -> %s\n", optarg); break; case 'y': sscanf(optarg, "%li", &config->sleep_step); dprintf("user sleep_step -> %s\n", optarg); break; case 'f': if (prepare_config(optarg, config)) return EXIT_FAILURE; break; case 'v': config->verbose = 1; dprintf("verbose output enabled\n"); break; case 'h': case '?': default: if (config != NULL) { if (config->output != NULL) fclose(config->output); free(config); } usage(); } } if (config->verbose) { printf("starting benchmark with parameters:\n"); printf("config:\n\t" "sleep=%li\n\t" "load=%li\n\t" "sleep_step=%li\n\t" "load_step=%li\n\t" "cpu=%u\n\t" "cycles=%u\n\t" "rounds=%u\n\t" "governor=%s\n\n", config->sleep, config->load, config->sleep_step, config->load_step, config->cpu, config->cycles, config->rounds, config->governor); } prepare_user(config); prepare_system(config); start_benchmark(config); if (config->output != stdout) fclose(config->output); free(config); return EXIT_SUCCESS; }
gpl-2.0
juston-li/flo
tools/power/cpupower/bench/main.c
10431
5421
/* cpufreq-bench CPUFreq microbenchmark * * Copyright (C) 2008 Christian Kornacker <ckornacker@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <getopt.h> #include <errno.h> #include "config.h" #include "system.h" #include "benchmark.h" static struct option long_options[] = { {"output", 1, 0, 'o'}, {"sleep", 1, 0, 's'}, {"load", 1, 0, 'l'}, {"verbose", 0, 0, 'v'}, {"cpu", 1, 0, 'c'}, {"governor", 1, 0, 'g'}, {"prio", 1, 0, 'p'}, {"file", 1, 0, 'f'}, {"cycles", 1, 0, 'n'}, {"rounds", 1, 0, 'r'}, {"load-step", 1, 0, 'x'}, {"sleep-step", 1, 0, 'y'}, {"help", 0, 0, 'h'}, {0, 0, 0, 0} }; /******************************************************************* usage *******************************************************************/ void usage() { printf("usage: ./bench\n"); printf("Options:\n"); printf(" -l, --load=<long int>\t\tinitial load time in us\n"); printf(" -s, --sleep=<long int>\t\tinitial sleep time in us\n"); printf(" -x, --load-step=<long int>\ttime to be added to load time, in us\n"); printf(" -y, --sleep-step=<long int>\ttime to be added to sleep time, in us\n"); printf(" -c, --cpu=<cpu #>\t\t\tCPU Nr. to use, starting at 0\n"); printf(" -p, --prio=<priority>\t\t\tscheduler priority, HIGH, LOW or DEFAULT\n"); printf(" -g, --governor=<governor>\t\tcpufreq governor to test\n"); printf(" -n, --cycles=<int>\t\t\tload/sleep cycles\n"); printf(" -r, --rounds<int>\t\t\tload/sleep rounds\n"); printf(" -f, --file=<configfile>\t\tconfig file to use\n"); printf(" -o, --output=<dir>\t\t\toutput path. Filename will be OUTPUTPATH/benchmark_TIMESTAMP.log\n"); printf(" -v, --verbose\t\t\t\tverbose output on/off\n"); printf(" -h, --help\t\t\t\tPrint this help screen\n"); exit(1); } /******************************************************************* main *******************************************************************/ int main(int argc, char **argv) { int c; int option_index = 0; struct config *config = NULL; config = prepare_default_config(); if (config == NULL) return EXIT_FAILURE; while (1) { c = getopt_long (argc, argv, "hg:o:s:l:vc:p:f:n:r:x:y:", long_options, &option_index); if (c == -1) break; switch (c) { case 'o': if (config->output != NULL) fclose(config->output); config->output = prepare_output(optarg); if (config->output == NULL) return EXIT_FAILURE; dprintf("user output path -> %s\n", optarg); break; case 's': sscanf(optarg, "%li", &config->sleep); dprintf("user sleep time -> %s\n", optarg); break; case 'l': sscanf(optarg, "%li", &config->load); dprintf("user load time -> %s\n", optarg); break; case 'c': sscanf(optarg, "%u", &config->cpu); dprintf("user cpu -> %s\n", optarg); break; case 'g': strncpy(config->governor, optarg, 14); dprintf("user governor -> %s\n", optarg); break; case 'p': if (string_to_prio(optarg) != SCHED_ERR) { config->prio = string_to_prio(optarg); dprintf("user prio -> %s\n", optarg); } else { if (config != NULL) { if (config->output != NULL) fclose(config->output); free(config); } usage(); } break; case 'n': sscanf(optarg, "%u", &config->cycles); dprintf("user cycles -> %s\n", optarg); break; case 'r': sscanf(optarg, "%u", &config->rounds); dprintf("user rounds -> %s\n", optarg); break; case 'x': sscanf(optarg, "%li", &config->load_step); dprintf("user load_step -> %s\n", optarg); break; case 'y': sscanf(optarg, "%li", &config->sleep_step); dprintf("user sleep_step -> %s\n", optarg); break; case 'f': if (prepare_config(optarg, config)) return EXIT_FAILURE; break; case 'v': config->verbose = 1; dprintf("verbose output enabled\n"); break; case 'h': case '?': default: if (config != NULL) { if (config->output != NULL) fclose(config->output); free(config); } usage(); } } if (config->verbose) { printf("starting benchmark with parameters:\n"); printf("config:\n\t" "sleep=%li\n\t" "load=%li\n\t" "sleep_step=%li\n\t" "load_step=%li\n\t" "cpu=%u\n\t" "cycles=%u\n\t" "rounds=%u\n\t" "governor=%s\n\n", config->sleep, config->load, config->sleep_step, config->load_step, config->cpu, config->cycles, config->rounds, config->governor); } prepare_user(config); prepare_system(config); start_benchmark(config); if (config->output != stdout) fclose(config->output); free(config); return EXIT_SUCCESS; }
gpl-2.0
Tesla-M-Devices/android_kernel_motorola_msm8226
arch/blackfin/mach-bf527/dma.c
12223
1994
/* * This file contains the simple DMA Implementation for Blackfin * * Copyright 2007-2008 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/module.h> #include <asm/blackfin.h> #include <asm/dma.h> struct dma_register * const dma_io_base_addr[MAX_DMA_CHANNELS] = { (struct dma_register *) DMA0_NEXT_DESC_PTR, (struct dma_register *) DMA1_NEXT_DESC_PTR, (struct dma_register *) DMA2_NEXT_DESC_PTR, (struct dma_register *) DMA3_NEXT_DESC_PTR, (struct dma_register *) DMA4_NEXT_DESC_PTR, (struct dma_register *) DMA5_NEXT_DESC_PTR, (struct dma_register *) DMA6_NEXT_DESC_PTR, (struct dma_register *) DMA7_NEXT_DESC_PTR, (struct dma_register *) DMA8_NEXT_DESC_PTR, (struct dma_register *) DMA9_NEXT_DESC_PTR, (struct dma_register *) DMA10_NEXT_DESC_PTR, (struct dma_register *) DMA11_NEXT_DESC_PTR, (struct dma_register *) MDMA_D0_NEXT_DESC_PTR, (struct dma_register *) MDMA_S0_NEXT_DESC_PTR, (struct dma_register *) MDMA_D1_NEXT_DESC_PTR, (struct dma_register *) MDMA_S1_NEXT_DESC_PTR, }; EXPORT_SYMBOL(dma_io_base_addr); int channel2irq(unsigned int channel) { int ret_irq = -1; switch (channel) { case CH_PPI: ret_irq = IRQ_PPI; break; case CH_EMAC_RX: ret_irq = IRQ_MAC_RX; break; case CH_EMAC_TX: ret_irq = IRQ_MAC_TX; break; case CH_UART1_RX: ret_irq = IRQ_UART1_RX; break; case CH_UART1_TX: ret_irq = IRQ_UART1_TX; break; case CH_SPORT0_RX: ret_irq = IRQ_SPORT0_RX; break; case CH_SPORT0_TX: ret_irq = IRQ_SPORT0_TX; break; case CH_SPORT1_RX: ret_irq = IRQ_SPORT1_RX; break; case CH_SPORT1_TX: ret_irq = IRQ_SPORT1_TX; break; case CH_SPI: ret_irq = IRQ_SPI; break; case CH_UART0_RX: ret_irq = IRQ_UART0_RX; break; case CH_UART0_TX: ret_irq = IRQ_UART0_TX; break; case CH_MEM_STREAM0_SRC: case CH_MEM_STREAM0_DEST: ret_irq = IRQ_MEM_DMA0; break; case CH_MEM_STREAM1_SRC: case CH_MEM_STREAM1_DEST: ret_irq = IRQ_MEM_DMA1; break; } return ret_irq; }
gpl-2.0
cphelps76/elite_kernel_grouper
sound/pci/echoaudio/indigo_dsp.c
12479
4043
/**************************************************************************** Copyright Echo Digital Audio Corporation (c) 1998 - 2004 All rights reserved www.echoaudio.com This file is part of Echo Digital Audio's generic driver library. Echo Digital Audio's generic driver library is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. ************************************************************************* Translation from C++ and adaptation for use in ALSA-Driver were made by Giuliano Pochini <pochini@shiny.it> ****************************************************************************/ static int set_vmixer_gain(struct echoaudio *chip, u16 output, u16 pipe, int gain); static int update_vmixer_level(struct echoaudio *chip); static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id) { int err; DE_INIT(("init_hw() - Indigo\n")); if (snd_BUG_ON((subdevice_id & 0xfff0) != INDIGO)) return -ENODEV; if ((err = init_dsp_comm_page(chip))) { DE_INIT(("init_hw - could not initialize DSP comm page\n")); return err; } chip->device_id = device_id; chip->subdevice_id = subdevice_id; chip->bad_board = TRUE; chip->dsp_code_to_load = FW_INDIGO_DSP; /* Since this card has no ASIC, mark it as loaded so everything works OK */ chip->asic_loaded = TRUE; chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL; if ((err = load_firmware(chip)) < 0) return err; chip->bad_board = FALSE; DE_INIT(("init_hw done\n")); return err; } static int set_mixer_defaults(struct echoaudio *chip) { return init_line_levels(chip); } static u32 detect_input_clocks(const struct echoaudio *chip) { return ECHO_CLOCK_BIT_INTERNAL; } /* The Indigo has no ASIC. Just do nothing */ static int load_asic(struct echoaudio *chip) { return 0; } static int set_sample_rate(struct echoaudio *chip, u32 rate) { u32 control_reg; switch (rate) { case 96000: control_reg = MIA_96000; break; case 88200: control_reg = MIA_88200; break; case 48000: control_reg = MIA_48000; break; case 44100: control_reg = MIA_44100; break; case 32000: control_reg = MIA_32000; break; default: DE_ACT(("set_sample_rate: %d invalid!\n", rate)); return -EINVAL; } /* Set the control register if it has changed */ if (control_reg != le32_to_cpu(chip->comm_page->control_register)) { if (wait_handshake(chip)) return -EIO; chip->comm_page->sample_rate = cpu_to_le32(rate); /* ignored by the DSP */ chip->comm_page->control_register = cpu_to_le32(control_reg); chip->sample_rate = rate; clear_handshake(chip); return send_vector(chip, DSP_VC_UPDATE_CLOCKS); } return 0; } /* This function routes the sound from a virtual channel to a real output */ static int set_vmixer_gain(struct echoaudio *chip, u16 output, u16 pipe, int gain) { int index; if (snd_BUG_ON(pipe >= num_pipes_out(chip) || output >= num_busses_out(chip))) return -EINVAL; if (wait_handshake(chip)) return -EIO; chip->vmixer_gain[output][pipe] = gain; index = output * num_pipes_out(chip) + pipe; chip->comm_page->vmixer[index] = gain; DE_ACT(("set_vmixer_gain: pipe %d, out %d = %d\n", pipe, output, gain)); return 0; } /* Tell the DSP to read and update virtual mixer levels in comm page. */ static int update_vmixer_level(struct echoaudio *chip) { if (wait_handshake(chip)) return -EIO; clear_handshake(chip); return send_vector(chip, DSP_VC_SET_VMIXER_GAIN); }
gpl-2.0
MoKee/android_kernel_oppo_n3
sound/pci/echoaudio/indigo_dsp.c
12479
4043
/**************************************************************************** Copyright Echo Digital Audio Corporation (c) 1998 - 2004 All rights reserved www.echoaudio.com This file is part of Echo Digital Audio's generic driver library. Echo Digital Audio's generic driver library is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. ************************************************************************* Translation from C++ and adaptation for use in ALSA-Driver were made by Giuliano Pochini <pochini@shiny.it> ****************************************************************************/ static int set_vmixer_gain(struct echoaudio *chip, u16 output, u16 pipe, int gain); static int update_vmixer_level(struct echoaudio *chip); static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id) { int err; DE_INIT(("init_hw() - Indigo\n")); if (snd_BUG_ON((subdevice_id & 0xfff0) != INDIGO)) return -ENODEV; if ((err = init_dsp_comm_page(chip))) { DE_INIT(("init_hw - could not initialize DSP comm page\n")); return err; } chip->device_id = device_id; chip->subdevice_id = subdevice_id; chip->bad_board = TRUE; chip->dsp_code_to_load = FW_INDIGO_DSP; /* Since this card has no ASIC, mark it as loaded so everything works OK */ chip->asic_loaded = TRUE; chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL; if ((err = load_firmware(chip)) < 0) return err; chip->bad_board = FALSE; DE_INIT(("init_hw done\n")); return err; } static int set_mixer_defaults(struct echoaudio *chip) { return init_line_levels(chip); } static u32 detect_input_clocks(const struct echoaudio *chip) { return ECHO_CLOCK_BIT_INTERNAL; } /* The Indigo has no ASIC. Just do nothing */ static int load_asic(struct echoaudio *chip) { return 0; } static int set_sample_rate(struct echoaudio *chip, u32 rate) { u32 control_reg; switch (rate) { case 96000: control_reg = MIA_96000; break; case 88200: control_reg = MIA_88200; break; case 48000: control_reg = MIA_48000; break; case 44100: control_reg = MIA_44100; break; case 32000: control_reg = MIA_32000; break; default: DE_ACT(("set_sample_rate: %d invalid!\n", rate)); return -EINVAL; } /* Set the control register if it has changed */ if (control_reg != le32_to_cpu(chip->comm_page->control_register)) { if (wait_handshake(chip)) return -EIO; chip->comm_page->sample_rate = cpu_to_le32(rate); /* ignored by the DSP */ chip->comm_page->control_register = cpu_to_le32(control_reg); chip->sample_rate = rate; clear_handshake(chip); return send_vector(chip, DSP_VC_UPDATE_CLOCKS); } return 0; } /* This function routes the sound from a virtual channel to a real output */ static int set_vmixer_gain(struct echoaudio *chip, u16 output, u16 pipe, int gain) { int index; if (snd_BUG_ON(pipe >= num_pipes_out(chip) || output >= num_busses_out(chip))) return -EINVAL; if (wait_handshake(chip)) return -EIO; chip->vmixer_gain[output][pipe] = gain; index = output * num_pipes_out(chip) + pipe; chip->comm_page->vmixer[index] = gain; DE_ACT(("set_vmixer_gain: pipe %d, out %d = %d\n", pipe, output, gain)); return 0; } /* Tell the DSP to read and update virtual mixer levels in comm page. */ static int update_vmixer_level(struct echoaudio *chip) { if (wait_handshake(chip)) return -EIO; clear_handshake(chip); return send_vector(chip, DSP_VC_SET_VMIXER_GAIN); }
gpl-2.0
KaijiHakaroku/kernel_acer_picasso
net/wimax/debugfs.c
14527
2283
/* * Linux WiMAX * Debugfs support * * * Copyright (C) 2005-2006 Intel Corporation <linux-wimax@intel.com> * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #include <linux/debugfs.h> #include <linux/wimax.h> #include "wimax-internal.h" #define D_SUBMODULE debugfs #include "debug-levels.h" #define __debugfs_register(prefix, name, parent) \ do { \ result = d_level_register_debugfs(prefix, name, parent); \ if (result < 0) \ goto error; \ } while (0) int wimax_debugfs_add(struct wimax_dev *wimax_dev) { int result; struct net_device *net_dev = wimax_dev->net_dev; struct device *dev = net_dev->dev.parent; struct dentry *dentry; char buf[128]; snprintf(buf, sizeof(buf), "wimax:%s", net_dev->name); dentry = debugfs_create_dir(buf, NULL); result = PTR_ERR(dentry); if (IS_ERR(dentry)) { if (result == -ENODEV) result = 0; /* No debugfs support */ else dev_err(dev, "Can't create debugfs dentry: %d\n", result); goto out; } wimax_dev->debugfs_dentry = dentry; __debugfs_register("wimax_dl_", debugfs, dentry); __debugfs_register("wimax_dl_", id_table, dentry); __debugfs_register("wimax_dl_", op_msg, dentry); __debugfs_register("wimax_dl_", op_reset, dentry); __debugfs_register("wimax_dl_", op_rfkill, dentry); __debugfs_register("wimax_dl_", op_state_get, dentry); __debugfs_register("wimax_dl_", stack, dentry); result = 0; out: return result; error: debugfs_remove_recursive(wimax_dev->debugfs_dentry); return result; } void wimax_debugfs_rm(struct wimax_dev *wimax_dev) { debugfs_remove_recursive(wimax_dev->debugfs_dentry); }
gpl-2.0
gt945/git-ptp
merge-recursive.c
192
59074
/* * Recursive Merge algorithm stolen from git-merge-recursive.py by * Fredrik Kuivinen. * The thieves were Alex Riesen and Johannes Schindelin, in June/July 2006 */ #include "cache.h" #include "advice.h" #include "lockfile.h" #include "cache-tree.h" #include "commit.h" #include "blob.h" #include "builtin.h" #include "tree-walk.h" #include "diff.h" #include "diffcore.h" #include "tag.h" #include "unpack-trees.h" #include "string-list.h" #include "xdiff-interface.h" #include "ll-merge.h" #include "attr.h" #include "merge-recursive.h" #include "dir.h" #include "submodule.h" static struct tree *shift_tree_object(struct tree *one, struct tree *two, const char *subtree_shift) { unsigned char shifted[20]; if (!*subtree_shift) { shift_tree(one->object.sha1, two->object.sha1, shifted, 0); } else { shift_tree_by(one->object.sha1, two->object.sha1, shifted, subtree_shift); } if (!hashcmp(two->object.sha1, shifted)) return two; return lookup_tree(shifted); } static struct commit *make_virtual_commit(struct tree *tree, const char *comment) { struct commit *commit = alloc_commit_node(); struct merge_remote_desc *desc = xmalloc(sizeof(*desc)); desc->name = comment; desc->obj = (struct object *)commit; commit->tree = tree; commit->util = desc; commit->object.parsed = 1; return commit; } /* * Since we use get_tree_entry(), which does not put the read object into * the object pool, we cannot rely on a == b. */ static int sha_eq(const unsigned char *a, const unsigned char *b) { if (!a && !b) return 2; return a && b && hashcmp(a, b) == 0; } enum rename_type { RENAME_NORMAL = 0, RENAME_DELETE, RENAME_ONE_FILE_TO_ONE, RENAME_ONE_FILE_TO_TWO, RENAME_TWO_FILES_TO_ONE }; struct rename_conflict_info { enum rename_type rename_type; struct diff_filepair *pair1; struct diff_filepair *pair2; const char *branch1; const char *branch2; struct stage_data *dst_entry1; struct stage_data *dst_entry2; struct diff_filespec ren1_other; struct diff_filespec ren2_other; }; /* * Since we want to write the index eventually, we cannot reuse the index * for these (temporary) data. */ struct stage_data { struct { unsigned mode; unsigned char sha[20]; } stages[4]; struct rename_conflict_info *rename_conflict_info; unsigned processed:1; }; static inline void setup_rename_conflict_info(enum rename_type rename_type, struct diff_filepair *pair1, struct diff_filepair *pair2, const char *branch1, const char *branch2, struct stage_data *dst_entry1, struct stage_data *dst_entry2, struct merge_options *o, struct stage_data *src_entry1, struct stage_data *src_entry2) { struct rename_conflict_info *ci = xcalloc(1, sizeof(struct rename_conflict_info)); ci->rename_type = rename_type; ci->pair1 = pair1; ci->branch1 = branch1; ci->branch2 = branch2; ci->dst_entry1 = dst_entry1; dst_entry1->rename_conflict_info = ci; dst_entry1->processed = 0; assert(!pair2 == !dst_entry2); if (dst_entry2) { ci->dst_entry2 = dst_entry2; ci->pair2 = pair2; dst_entry2->rename_conflict_info = ci; } if (rename_type == RENAME_TWO_FILES_TO_ONE) { /* * For each rename, there could have been * modifications on the side of history where that * file was not renamed. */ int ostage1 = o->branch1 == branch1 ? 3 : 2; int ostage2 = ostage1 ^ 1; ci->ren1_other.path = pair1->one->path; hashcpy(ci->ren1_other.sha1, src_entry1->stages[ostage1].sha); ci->ren1_other.mode = src_entry1->stages[ostage1].mode; ci->ren2_other.path = pair2->one->path; hashcpy(ci->ren2_other.sha1, src_entry2->stages[ostage2].sha); ci->ren2_other.mode = src_entry2->stages[ostage2].mode; } } static int show(struct merge_options *o, int v) { return (!o->call_depth && o->verbosity >= v) || o->verbosity >= 5; } static void flush_output(struct merge_options *o) { if (o->obuf.len) { fputs(o->obuf.buf, stdout); strbuf_reset(&o->obuf); } } __attribute__((format (printf, 3, 4))) static void output(struct merge_options *o, int v, const char *fmt, ...) { va_list ap; if (!show(o, v)) return; strbuf_addchars(&o->obuf, ' ', o->call_depth * 2); va_start(ap, fmt); strbuf_vaddf(&o->obuf, fmt, ap); va_end(ap); strbuf_addch(&o->obuf, '\n'); if (!o->buffer_output) flush_output(o); } static void output_commit_title(struct merge_options *o, struct commit *commit) { int i; flush_output(o); for (i = o->call_depth; i--;) fputs(" ", stdout); if (commit->util) printf("virtual %s\n", merge_remote_util(commit)->name); else { printf("%s ", find_unique_abbrev(commit->object.sha1, DEFAULT_ABBREV)); if (parse_commit(commit) != 0) printf(_("(bad commit)\n")); else { const char *title; const char *msg = get_commit_buffer(commit, NULL); int len = find_commit_subject(msg, &title); if (len) printf("%.*s\n", len, title); unuse_commit_buffer(commit, msg); } } } static int add_cacheinfo(unsigned int mode, const unsigned char *sha1, const char *path, int stage, int refresh, int options) { struct cache_entry *ce; ce = make_cache_entry(mode, sha1 ? sha1 : null_sha1, path, stage, (refresh ? (CE_MATCH_REFRESH | CE_MATCH_IGNORE_MISSING) : 0 )); if (!ce) return error(_("addinfo_cache failed for path '%s'"), path); return add_cache_entry(ce, options); } static void init_tree_desc_from_tree(struct tree_desc *desc, struct tree *tree) { parse_tree(tree); init_tree_desc(desc, tree->buffer, tree->size); } static int git_merge_trees(int index_only, struct tree *common, struct tree *head, struct tree *merge) { int rc; struct tree_desc t[3]; struct unpack_trees_options opts; memset(&opts, 0, sizeof(opts)); if (index_only) opts.index_only = 1; else opts.update = 1; opts.merge = 1; opts.head_idx = 2; opts.fn = threeway_merge; opts.src_index = &the_index; opts.dst_index = &the_index; setup_unpack_trees_porcelain(&opts, "merge"); init_tree_desc_from_tree(t+0, common); init_tree_desc_from_tree(t+1, head); init_tree_desc_from_tree(t+2, merge); rc = unpack_trees(3, t, &opts); cache_tree_free(&active_cache_tree); return rc; } struct tree *write_tree_from_memory(struct merge_options *o) { struct tree *result = NULL; if (unmerged_cache()) { int i; fprintf(stderr, "BUG: There are unmerged index entries:\n"); for (i = 0; i < active_nr; i++) { const struct cache_entry *ce = active_cache[i]; if (ce_stage(ce)) fprintf(stderr, "BUG: %d %.*s\n", ce_stage(ce), (int)ce_namelen(ce), ce->name); } die("Bug in merge-recursive.c"); } if (!active_cache_tree) active_cache_tree = cache_tree(); if (!cache_tree_fully_valid(active_cache_tree) && cache_tree_update(&the_index, 0) < 0) die(_("error building trees")); result = lookup_tree(active_cache_tree->sha1); return result; } static int save_files_dirs(const unsigned char *sha1, struct strbuf *base, const char *path, unsigned int mode, int stage, void *context) { int baselen = base->len; struct merge_options *o = context; strbuf_addstr(base, path); if (S_ISDIR(mode)) string_list_insert(&o->current_directory_set, base->buf); else string_list_insert(&o->current_file_set, base->buf); strbuf_setlen(base, baselen); return (S_ISDIR(mode) ? READ_TREE_RECURSIVE : 0); } static int get_files_dirs(struct merge_options *o, struct tree *tree) { int n; struct pathspec match_all; memset(&match_all, 0, sizeof(match_all)); if (read_tree_recursive(tree, "", 0, 0, &match_all, save_files_dirs, o)) return 0; n = o->current_file_set.nr + o->current_directory_set.nr; return n; } /* * Returns an index_entry instance which doesn't have to correspond to * a real cache entry in Git's index. */ static struct stage_data *insert_stage_data(const char *path, struct tree *o, struct tree *a, struct tree *b, struct string_list *entries) { struct string_list_item *item; struct stage_data *e = xcalloc(1, sizeof(struct stage_data)); get_tree_entry(o->object.sha1, path, e->stages[1].sha, &e->stages[1].mode); get_tree_entry(a->object.sha1, path, e->stages[2].sha, &e->stages[2].mode); get_tree_entry(b->object.sha1, path, e->stages[3].sha, &e->stages[3].mode); item = string_list_insert(entries, path); item->util = e; return e; } /* * Create a dictionary mapping file names to stage_data objects. The * dictionary contains one entry for every path with a non-zero stage entry. */ static struct string_list *get_unmerged(void) { struct string_list *unmerged = xcalloc(1, sizeof(struct string_list)); int i; unmerged->strdup_strings = 1; for (i = 0; i < active_nr; i++) { struct string_list_item *item; struct stage_data *e; const struct cache_entry *ce = active_cache[i]; if (!ce_stage(ce)) continue; item = string_list_lookup(unmerged, ce->name); if (!item) { item = string_list_insert(unmerged, ce->name); item->util = xcalloc(1, sizeof(struct stage_data)); } e = item->util; e->stages[ce_stage(ce)].mode = ce->ce_mode; hashcpy(e->stages[ce_stage(ce)].sha, ce->sha1); } return unmerged; } static int string_list_df_name_compare(const void *a, const void *b) { const struct string_list_item *one = a; const struct string_list_item *two = b; int onelen = strlen(one->string); int twolen = strlen(two->string); /* * Here we only care that entries for D/F conflicts are * adjacent, in particular with the file of the D/F conflict * appearing before files below the corresponding directory. * The order of the rest of the list is irrelevant for us. * * To achieve this, we sort with df_name_compare and provide * the mode S_IFDIR so that D/F conflicts will sort correctly. * We use the mode S_IFDIR for everything else for simplicity, * since in other cases any changes in their order due to * sorting cause no problems for us. */ int cmp = df_name_compare(one->string, onelen, S_IFDIR, two->string, twolen, S_IFDIR); /* * Now that 'foo' and 'foo/bar' compare equal, we have to make sure * that 'foo' comes before 'foo/bar'. */ if (cmp) return cmp; return onelen - twolen; } static void record_df_conflict_files(struct merge_options *o, struct string_list *entries) { /* If there is a D/F conflict and the file for such a conflict * currently exist in the working tree, we want to allow it to be * removed to make room for the corresponding directory if needed. * The files underneath the directories of such D/F conflicts will * be processed before the corresponding file involved in the D/F * conflict. If the D/F directory ends up being removed by the * merge, then we won't have to touch the D/F file. If the D/F * directory needs to be written to the working copy, then the D/F * file will simply be removed (in make_room_for_path()) to make * room for the necessary paths. Note that if both the directory * and the file need to be present, then the D/F file will be * reinstated with a new unique name at the time it is processed. */ struct string_list df_sorted_entries; const char *last_file = NULL; int last_len = 0; int i; /* * If we're merging merge-bases, we don't want to bother with * any working directory changes. */ if (o->call_depth) return; /* Ensure D/F conflicts are adjacent in the entries list. */ memset(&df_sorted_entries, 0, sizeof(struct string_list)); for (i = 0; i < entries->nr; i++) { struct string_list_item *next = &entries->items[i]; string_list_append(&df_sorted_entries, next->string)->util = next->util; } qsort(df_sorted_entries.items, entries->nr, sizeof(*entries->items), string_list_df_name_compare); string_list_clear(&o->df_conflict_file_set, 1); for (i = 0; i < df_sorted_entries.nr; i++) { const char *path = df_sorted_entries.items[i].string; int len = strlen(path); struct stage_data *e = df_sorted_entries.items[i].util; /* * Check if last_file & path correspond to a D/F conflict; * i.e. whether path is last_file+'/'+<something>. * If so, record that it's okay to remove last_file to make * room for path and friends if needed. */ if (last_file && len > last_len && memcmp(path, last_file, last_len) == 0 && path[last_len] == '/') { string_list_insert(&o->df_conflict_file_set, last_file); } /* * Determine whether path could exist as a file in the * working directory as a possible D/F conflict. This * will only occur when it exists in stage 2 as a * file. */ if (S_ISREG(e->stages[2].mode) || S_ISLNK(e->stages[2].mode)) { last_file = path; last_len = len; } else { last_file = NULL; } } string_list_clear(&df_sorted_entries, 0); } struct rename { struct diff_filepair *pair; struct stage_data *src_entry; struct stage_data *dst_entry; unsigned processed:1; }; /* * Get information of all renames which occurred between 'o_tree' and * 'tree'. We need the three trees in the merge ('o_tree', 'a_tree' and * 'b_tree') to be able to associate the correct cache entries with * the rename information. 'tree' is always equal to either a_tree or b_tree. */ static struct string_list *get_renames(struct merge_options *o, struct tree *tree, struct tree *o_tree, struct tree *a_tree, struct tree *b_tree, struct string_list *entries) { int i; struct string_list *renames; struct diff_options opts; renames = xcalloc(1, sizeof(struct string_list)); diff_setup(&opts); DIFF_OPT_SET(&opts, RECURSIVE); DIFF_OPT_CLR(&opts, RENAME_EMPTY); opts.detect_rename = DIFF_DETECT_RENAME; opts.rename_limit = o->merge_rename_limit >= 0 ? o->merge_rename_limit : o->diff_rename_limit >= 0 ? o->diff_rename_limit : 1000; opts.rename_score = o->rename_score; opts.show_rename_progress = o->show_rename_progress; opts.output_format = DIFF_FORMAT_NO_OUTPUT; diff_setup_done(&opts); diff_tree_sha1(o_tree->object.sha1, tree->object.sha1, "", &opts); diffcore_std(&opts); if (opts.needed_rename_limit > o->needed_rename_limit) o->needed_rename_limit = opts.needed_rename_limit; for (i = 0; i < diff_queued_diff.nr; ++i) { struct string_list_item *item; struct rename *re; struct diff_filepair *pair = diff_queued_diff.queue[i]; if (pair->status != 'R') { diff_free_filepair(pair); continue; } re = xmalloc(sizeof(*re)); re->processed = 0; re->pair = pair; item = string_list_lookup(entries, re->pair->one->path); if (!item) re->src_entry = insert_stage_data(re->pair->one->path, o_tree, a_tree, b_tree, entries); else re->src_entry = item->util; item = string_list_lookup(entries, re->pair->two->path); if (!item) re->dst_entry = insert_stage_data(re->pair->two->path, o_tree, a_tree, b_tree, entries); else re->dst_entry = item->util; item = string_list_insert(renames, pair->one->path); item->util = re; } opts.output_format = DIFF_FORMAT_NO_OUTPUT; diff_queued_diff.nr = 0; diff_flush(&opts); return renames; } static int update_stages(const char *path, const struct diff_filespec *o, const struct diff_filespec *a, const struct diff_filespec *b) { /* * NOTE: It is usually a bad idea to call update_stages on a path * before calling update_file on that same path, since it can * sometimes lead to spurious "refusing to lose untracked file..." * messages from update_file (via make_room_for path via * would_lose_untracked). Instead, reverse the order of the calls * (executing update_file first and then update_stages). */ int clear = 1; int options = ADD_CACHE_OK_TO_ADD | ADD_CACHE_SKIP_DFCHECK; if (clear) if (remove_file_from_cache(path)) return -1; if (o) if (add_cacheinfo(o->mode, o->sha1, path, 1, 0, options)) return -1; if (a) if (add_cacheinfo(a->mode, a->sha1, path, 2, 0, options)) return -1; if (b) if (add_cacheinfo(b->mode, b->sha1, path, 3, 0, options)) return -1; return 0; } static void update_entry(struct stage_data *entry, struct diff_filespec *o, struct diff_filespec *a, struct diff_filespec *b) { entry->processed = 0; entry->stages[1].mode = o->mode; entry->stages[2].mode = a->mode; entry->stages[3].mode = b->mode; hashcpy(entry->stages[1].sha, o->sha1); hashcpy(entry->stages[2].sha, a->sha1); hashcpy(entry->stages[3].sha, b->sha1); } static int remove_file(struct merge_options *o, int clean, const char *path, int no_wd) { int update_cache = o->call_depth || clean; int update_working_directory = !o->call_depth && !no_wd; if (update_cache) { if (remove_file_from_cache(path)) return -1; } if (update_working_directory) { if (ignore_case) { struct cache_entry *ce; ce = cache_file_exists(path, strlen(path), ignore_case); if (ce && ce_stage(ce) == 0) return 0; } if (remove_path(path)) return -1; } return 0; } /* add a string to a strbuf, but converting "/" to "_" */ static void add_flattened_path(struct strbuf *out, const char *s) { size_t i = out->len; strbuf_addstr(out, s); for (; i < out->len; i++) if (out->buf[i] == '/') out->buf[i] = '_'; } static char *unique_path(struct merge_options *o, const char *path, const char *branch) { struct strbuf newpath = STRBUF_INIT; int suffix = 0; size_t base_len; strbuf_addf(&newpath, "%s~", path); add_flattened_path(&newpath, branch); base_len = newpath.len; while (string_list_has_string(&o->current_file_set, newpath.buf) || string_list_has_string(&o->current_directory_set, newpath.buf) || file_exists(newpath.buf)) { strbuf_setlen(&newpath, base_len); strbuf_addf(&newpath, "_%d", suffix++); } string_list_insert(&o->current_file_set, newpath.buf); return strbuf_detach(&newpath, NULL); } static int dir_in_way(const char *path, int check_working_copy) { int pos, pathlen = strlen(path); char *dirpath = xmalloc(pathlen + 2); struct stat st; strcpy(dirpath, path); dirpath[pathlen] = '/'; dirpath[pathlen+1] = '\0'; pos = cache_name_pos(dirpath, pathlen+1); if (pos < 0) pos = -1 - pos; if (pos < active_nr && !strncmp(dirpath, active_cache[pos]->name, pathlen+1)) { free(dirpath); return 1; } free(dirpath); return check_working_copy && !lstat(path, &st) && S_ISDIR(st.st_mode); } static int was_tracked(const char *path) { int pos = cache_name_pos(path, strlen(path)); if (pos < 0) pos = -1 - pos; while (pos < active_nr && !strcmp(path, active_cache[pos]->name)) { /* * If stage #0, it is definitely tracked. * If it has stage #2 then it was tracked * before this merge started. All other * cases the path was not tracked. */ switch (ce_stage(active_cache[pos])) { case 0: case 2: return 1; } pos++; } return 0; } static int would_lose_untracked(const char *path) { return !was_tracked(path) && file_exists(path); } static int make_room_for_path(struct merge_options *o, const char *path) { int status, i; const char *msg = _("failed to create path '%s'%s"); /* Unlink any D/F conflict files that are in the way */ for (i = 0; i < o->df_conflict_file_set.nr; i++) { const char *df_path = o->df_conflict_file_set.items[i].string; size_t pathlen = strlen(path); size_t df_pathlen = strlen(df_path); if (df_pathlen < pathlen && path[df_pathlen] == '/' && strncmp(path, df_path, df_pathlen) == 0) { output(o, 3, _("Removing %s to make room for subdirectory\n"), df_path); unlink(df_path); unsorted_string_list_delete_item(&o->df_conflict_file_set, i, 0); break; } } /* Make sure leading directories are created */ status = safe_create_leading_directories_const(path); if (status) { if (status == SCLD_EXISTS) { /* something else exists */ error(msg, path, _(": perhaps a D/F conflict?")); return -1; } die(msg, path, ""); } /* * Do not unlink a file in the work tree if we are not * tracking it. */ if (would_lose_untracked(path)) return error(_("refusing to lose untracked file at '%s'"), path); /* Successful unlink is good.. */ if (!unlink(path)) return 0; /* .. and so is no existing file */ if (errno == ENOENT) return 0; /* .. but not some other error (who really cares what?) */ return error(msg, path, _(": perhaps a D/F conflict?")); } static void update_file_flags(struct merge_options *o, const unsigned char *sha, unsigned mode, const char *path, int update_cache, int update_wd) { if (o->call_depth) update_wd = 0; if (update_wd) { enum object_type type; void *buf; unsigned long size; if (S_ISGITLINK(mode)) { /* * We may later decide to recursively descend into * the submodule directory and update its index * and/or work tree, but we do not do that now. */ update_wd = 0; goto update_index; } buf = read_sha1_file(sha, &type, &size); if (!buf) die(_("cannot read object %s '%s'"), sha1_to_hex(sha), path); if (type != OBJ_BLOB) die(_("blob expected for %s '%s'"), sha1_to_hex(sha), path); if (S_ISREG(mode)) { struct strbuf strbuf = STRBUF_INIT; if (convert_to_working_tree(path, buf, size, &strbuf)) { free(buf); size = strbuf.len; buf = strbuf_detach(&strbuf, NULL); } } if (make_room_for_path(o, path) < 0) { update_wd = 0; free(buf); goto update_index; } if (S_ISREG(mode) || (!has_symlinks && S_ISLNK(mode))) { int fd; if (mode & 0100) mode = 0777; else mode = 0666; fd = open(path, O_WRONLY | O_TRUNC | O_CREAT, mode); if (fd < 0) die_errno(_("failed to open '%s'"), path); write_in_full(fd, buf, size); close(fd); } else if (S_ISLNK(mode)) { char *lnk = xmemdupz(buf, size); safe_create_leading_directories_const(path); unlink(path); if (symlink(lnk, path)) die_errno(_("failed to symlink '%s'"), path); free(lnk); } else die(_("do not know what to do with %06o %s '%s'"), mode, sha1_to_hex(sha), path); free(buf); } update_index: if (update_cache) add_cacheinfo(mode, sha, path, 0, update_wd, ADD_CACHE_OK_TO_ADD); } static void update_file(struct merge_options *o, int clean, const unsigned char *sha, unsigned mode, const char *path) { update_file_flags(o, sha, mode, path, o->call_depth || clean, !o->call_depth); } /* Low level file merging, update and removal */ struct merge_file_info { unsigned char sha[20]; unsigned mode; unsigned clean:1, merge:1; }; static int merge_3way(struct merge_options *o, mmbuffer_t *result_buf, const struct diff_filespec *one, const struct diff_filespec *a, const struct diff_filespec *b, const char *branch1, const char *branch2) { mmfile_t orig, src1, src2; struct ll_merge_options ll_opts = {0}; char *base_name, *name1, *name2; int merge_status; ll_opts.renormalize = o->renormalize; ll_opts.xdl_opts = o->xdl_opts; if (o->call_depth) { ll_opts.virtual_ancestor = 1; ll_opts.variant = 0; } else { switch (o->recursive_variant) { case MERGE_RECURSIVE_OURS: ll_opts.variant = XDL_MERGE_FAVOR_OURS; break; case MERGE_RECURSIVE_THEIRS: ll_opts.variant = XDL_MERGE_FAVOR_THEIRS; break; default: ll_opts.variant = 0; break; } } if (strcmp(a->path, b->path) || (o->ancestor != NULL && strcmp(a->path, one->path) != 0)) { base_name = o->ancestor == NULL ? NULL : mkpathdup("%s:%s", o->ancestor, one->path); name1 = mkpathdup("%s:%s", branch1, a->path); name2 = mkpathdup("%s:%s", branch2, b->path); } else { base_name = o->ancestor == NULL ? NULL : mkpathdup("%s", o->ancestor); name1 = mkpathdup("%s", branch1); name2 = mkpathdup("%s", branch2); } read_mmblob(&orig, one->sha1); read_mmblob(&src1, a->sha1); read_mmblob(&src2, b->sha1); merge_status = ll_merge(result_buf, a->path, &orig, base_name, &src1, name1, &src2, name2, &ll_opts); free(base_name); free(name1); free(name2); free(orig.ptr); free(src1.ptr); free(src2.ptr); return merge_status; } static struct merge_file_info merge_file_1(struct merge_options *o, const struct diff_filespec *one, const struct diff_filespec *a, const struct diff_filespec *b, const char *branch1, const char *branch2) { struct merge_file_info result; result.merge = 0; result.clean = 1; if ((S_IFMT & a->mode) != (S_IFMT & b->mode)) { result.clean = 0; if (S_ISREG(a->mode)) { result.mode = a->mode; hashcpy(result.sha, a->sha1); } else { result.mode = b->mode; hashcpy(result.sha, b->sha1); } } else { if (!sha_eq(a->sha1, one->sha1) && !sha_eq(b->sha1, one->sha1)) result.merge = 1; /* * Merge modes */ if (a->mode == b->mode || a->mode == one->mode) result.mode = b->mode; else { result.mode = a->mode; if (b->mode != one->mode) { result.clean = 0; result.merge = 1; } } if (sha_eq(a->sha1, b->sha1) || sha_eq(a->sha1, one->sha1)) hashcpy(result.sha, b->sha1); else if (sha_eq(b->sha1, one->sha1)) hashcpy(result.sha, a->sha1); else if (S_ISREG(a->mode)) { mmbuffer_t result_buf; int merge_status; merge_status = merge_3way(o, &result_buf, one, a, b, branch1, branch2); if ((merge_status < 0) || !result_buf.ptr) die(_("Failed to execute internal merge")); if (write_sha1_file(result_buf.ptr, result_buf.size, blob_type, result.sha)) die(_("Unable to add %s to database"), a->path); free(result_buf.ptr); result.clean = (merge_status == 0); } else if (S_ISGITLINK(a->mode)) { result.clean = merge_submodule(result.sha, one->path, one->sha1, a->sha1, b->sha1, !o->call_depth); } else if (S_ISLNK(a->mode)) { hashcpy(result.sha, a->sha1); if (!sha_eq(a->sha1, b->sha1)) result.clean = 0; } else { die(_("unsupported object type in the tree")); } } return result; } static struct merge_file_info merge_file_special_markers(struct merge_options *o, const struct diff_filespec *one, const struct diff_filespec *a, const struct diff_filespec *b, const char *branch1, const char *filename1, const char *branch2, const char *filename2) { char *side1 = NULL; char *side2 = NULL; struct merge_file_info mfi; if (filename1) side1 = xstrfmt("%s:%s", branch1, filename1); if (filename2) side2 = xstrfmt("%s:%s", branch2, filename2); mfi = merge_file_1(o, one, a, b, side1 ? side1 : branch1, side2 ? side2 : branch2); free(side1); free(side2); return mfi; } static struct merge_file_info merge_file_one(struct merge_options *o, const char *path, const unsigned char *o_sha, int o_mode, const unsigned char *a_sha, int a_mode, const unsigned char *b_sha, int b_mode, const char *branch1, const char *branch2) { struct diff_filespec one, a, b; one.path = a.path = b.path = (char *)path; hashcpy(one.sha1, o_sha); one.mode = o_mode; hashcpy(a.sha1, a_sha); a.mode = a_mode; hashcpy(b.sha1, b_sha); b.mode = b_mode; return merge_file_1(o, &one, &a, &b, branch1, branch2); } static void handle_change_delete(struct merge_options *o, const char *path, const unsigned char *o_sha, int o_mode, const unsigned char *a_sha, int a_mode, const unsigned char *b_sha, int b_mode, const char *change, const char *change_past) { char *renamed = NULL; if (dir_in_way(path, !o->call_depth)) { renamed = unique_path(o, path, a_sha ? o->branch1 : o->branch2); } if (o->call_depth) { /* * We cannot arbitrarily accept either a_sha or b_sha as * correct; since there is no true "middle point" between * them, simply reuse the base version for virtual merge base. */ remove_file_from_cache(path); update_file(o, 0, o_sha, o_mode, renamed ? renamed : path); } else if (!a_sha) { if (!renamed) { output(o, 1, _("CONFLICT (%s/delete): %s deleted in %s " "and %s in %s. Version %s of %s left in tree."), change, path, o->branch1, change_past, o->branch2, o->branch2, path); update_file(o, 0, b_sha, b_mode, path); } else { output(o, 1, _("CONFLICT (%s/delete): %s deleted in %s " "and %s in %s. Version %s of %s left in tree at %s."), change, path, o->branch1, change_past, o->branch2, o->branch2, path, renamed); update_file(o, 0, b_sha, b_mode, renamed); } } else { if (!renamed) { output(o, 1, _("CONFLICT (%s/delete): %s deleted in %s " "and %s in %s. Version %s of %s left in tree."), change, path, o->branch2, change_past, o->branch1, o->branch1, path); } else { output(o, 1, _("CONFLICT (%s/delete): %s deleted in %s " "and %s in %s. Version %s of %s left in tree at %s."), change, path, o->branch2, change_past, o->branch1, o->branch1, path, renamed); update_file(o, 0, a_sha, a_mode, renamed); } /* * No need to call update_file() on path when !renamed, since * that would needlessly touch path. We could call * update_file_flags() with update_cache=0 and update_wd=0, * but that's a no-op. */ } free(renamed); } static void conflict_rename_delete(struct merge_options *o, struct diff_filepair *pair, const char *rename_branch, const char *other_branch) { const struct diff_filespec *orig = pair->one; const struct diff_filespec *dest = pair->two; const unsigned char *a_sha = NULL; const unsigned char *b_sha = NULL; int a_mode = 0; int b_mode = 0; if (rename_branch == o->branch1) { a_sha = dest->sha1; a_mode = dest->mode; } else { b_sha = dest->sha1; b_mode = dest->mode; } handle_change_delete(o, o->call_depth ? orig->path : dest->path, orig->sha1, orig->mode, a_sha, a_mode, b_sha, b_mode, _("rename"), _("renamed")); if (o->call_depth) { remove_file_from_cache(dest->path); } else { update_stages(dest->path, NULL, rename_branch == o->branch1 ? dest : NULL, rename_branch == o->branch1 ? NULL : dest); } } static struct diff_filespec *filespec_from_entry(struct diff_filespec *target, struct stage_data *entry, int stage) { unsigned char *sha = entry->stages[stage].sha; unsigned mode = entry->stages[stage].mode; if (mode == 0 || is_null_sha1(sha)) return NULL; hashcpy(target->sha1, sha); target->mode = mode; return target; } static void handle_file(struct merge_options *o, struct diff_filespec *rename, int stage, struct rename_conflict_info *ci) { char *dst_name = rename->path; struct stage_data *dst_entry; const char *cur_branch, *other_branch; struct diff_filespec other; struct diff_filespec *add; if (stage == 2) { dst_entry = ci->dst_entry1; cur_branch = ci->branch1; other_branch = ci->branch2; } else { dst_entry = ci->dst_entry2; cur_branch = ci->branch2; other_branch = ci->branch1; } add = filespec_from_entry(&other, dst_entry, stage ^ 1); if (add) { char *add_name = unique_path(o, rename->path, other_branch); update_file(o, 0, add->sha1, add->mode, add_name); remove_file(o, 0, rename->path, 0); dst_name = unique_path(o, rename->path, cur_branch); } else { if (dir_in_way(rename->path, !o->call_depth)) { dst_name = unique_path(o, rename->path, cur_branch); output(o, 1, _("%s is a directory in %s adding as %s instead"), rename->path, other_branch, dst_name); } } update_file(o, 0, rename->sha1, rename->mode, dst_name); if (stage == 2) update_stages(rename->path, NULL, rename, add); else update_stages(rename->path, NULL, add, rename); if (dst_name != rename->path) free(dst_name); } static void conflict_rename_rename_1to2(struct merge_options *o, struct rename_conflict_info *ci) { /* One file was renamed in both branches, but to different names. */ struct diff_filespec *one = ci->pair1->one; struct diff_filespec *a = ci->pair1->two; struct diff_filespec *b = ci->pair2->two; output(o, 1, _("CONFLICT (rename/rename): " "Rename \"%s\"->\"%s\" in branch \"%s\" " "rename \"%s\"->\"%s\" in \"%s\"%s"), one->path, a->path, ci->branch1, one->path, b->path, ci->branch2, o->call_depth ? _(" (left unresolved)") : ""); if (o->call_depth) { struct merge_file_info mfi; struct diff_filespec other; struct diff_filespec *add; mfi = merge_file_one(o, one->path, one->sha1, one->mode, a->sha1, a->mode, b->sha1, b->mode, ci->branch1, ci->branch2); /* * FIXME: For rename/add-source conflicts (if we could detect * such), this is wrong. We should instead find a unique * pathname and then either rename the add-source file to that * unique path, or use that unique path instead of src here. */ update_file(o, 0, mfi.sha, mfi.mode, one->path); /* * Above, we put the merged content at the merge-base's * path. Now we usually need to delete both a->path and * b->path. However, the rename on each side of the merge * could also be involved in a rename/add conflict. In * such cases, we should keep the added file around, * resolving the conflict at that path in its favor. */ add = filespec_from_entry(&other, ci->dst_entry1, 2 ^ 1); if (add) update_file(o, 0, add->sha1, add->mode, a->path); else remove_file_from_cache(a->path); add = filespec_from_entry(&other, ci->dst_entry2, 3 ^ 1); if (add) update_file(o, 0, add->sha1, add->mode, b->path); else remove_file_from_cache(b->path); } else { handle_file(o, a, 2, ci); handle_file(o, b, 3, ci); } } static void conflict_rename_rename_2to1(struct merge_options *o, struct rename_conflict_info *ci) { /* Two files, a & b, were renamed to the same thing, c. */ struct diff_filespec *a = ci->pair1->one; struct diff_filespec *b = ci->pair2->one; struct diff_filespec *c1 = ci->pair1->two; struct diff_filespec *c2 = ci->pair2->two; char *path = c1->path; /* == c2->path */ struct merge_file_info mfi_c1; struct merge_file_info mfi_c2; output(o, 1, _("CONFLICT (rename/rename): " "Rename %s->%s in %s. " "Rename %s->%s in %s"), a->path, c1->path, ci->branch1, b->path, c2->path, ci->branch2); remove_file(o, 1, a->path, would_lose_untracked(a->path)); remove_file(o, 1, b->path, would_lose_untracked(b->path)); mfi_c1 = merge_file_special_markers(o, a, c1, &ci->ren1_other, o->branch1, c1->path, o->branch2, ci->ren1_other.path); mfi_c2 = merge_file_special_markers(o, b, &ci->ren2_other, c2, o->branch1, ci->ren2_other.path, o->branch2, c2->path); if (o->call_depth) { /* * If mfi_c1.clean && mfi_c2.clean, then it might make * sense to do a two-way merge of those results. But, I * think in all cases, it makes sense to have the virtual * merge base just undo the renames; they can be detected * again later for the non-recursive merge. */ remove_file(o, 0, path, 0); update_file(o, 0, mfi_c1.sha, mfi_c1.mode, a->path); update_file(o, 0, mfi_c2.sha, mfi_c2.mode, b->path); } else { char *new_path1 = unique_path(o, path, ci->branch1); char *new_path2 = unique_path(o, path, ci->branch2); output(o, 1, _("Renaming %s to %s and %s to %s instead"), a->path, new_path1, b->path, new_path2); remove_file(o, 0, path, 0); update_file(o, 0, mfi_c1.sha, mfi_c1.mode, new_path1); update_file(o, 0, mfi_c2.sha, mfi_c2.mode, new_path2); free(new_path2); free(new_path1); } } static int process_renames(struct merge_options *o, struct string_list *a_renames, struct string_list *b_renames) { int clean_merge = 1, i, j; struct string_list a_by_dst = STRING_LIST_INIT_NODUP; struct string_list b_by_dst = STRING_LIST_INIT_NODUP; const struct rename *sre; for (i = 0; i < a_renames->nr; i++) { sre = a_renames->items[i].util; string_list_insert(&a_by_dst, sre->pair->two->path)->util = (void *)sre; } for (i = 0; i < b_renames->nr; i++) { sre = b_renames->items[i].util; string_list_insert(&b_by_dst, sre->pair->two->path)->util = (void *)sre; } for (i = 0, j = 0; i < a_renames->nr || j < b_renames->nr;) { struct string_list *renames1, *renames2Dst; struct rename *ren1 = NULL, *ren2 = NULL; const char *branch1, *branch2; const char *ren1_src, *ren1_dst; struct string_list_item *lookup; if (i >= a_renames->nr) { ren2 = b_renames->items[j++].util; } else if (j >= b_renames->nr) { ren1 = a_renames->items[i++].util; } else { int compare = strcmp(a_renames->items[i].string, b_renames->items[j].string); if (compare <= 0) ren1 = a_renames->items[i++].util; if (compare >= 0) ren2 = b_renames->items[j++].util; } /* TODO: refactor, so that 1/2 are not needed */ if (ren1) { renames1 = a_renames; renames2Dst = &b_by_dst; branch1 = o->branch1; branch2 = o->branch2; } else { struct rename *tmp; renames1 = b_renames; renames2Dst = &a_by_dst; branch1 = o->branch2; branch2 = o->branch1; tmp = ren2; ren2 = ren1; ren1 = tmp; } if (ren1->processed) continue; ren1->processed = 1; ren1->dst_entry->processed = 1; /* BUG: We should only mark src_entry as processed if we * are not dealing with a rename + add-source case. */ ren1->src_entry->processed = 1; ren1_src = ren1->pair->one->path; ren1_dst = ren1->pair->two->path; if (ren2) { /* One file renamed on both sides */ const char *ren2_src = ren2->pair->one->path; const char *ren2_dst = ren2->pair->two->path; enum rename_type rename_type; if (strcmp(ren1_src, ren2_src) != 0) die("ren1_src != ren2_src"); ren2->dst_entry->processed = 1; ren2->processed = 1; if (strcmp(ren1_dst, ren2_dst) != 0) { rename_type = RENAME_ONE_FILE_TO_TWO; clean_merge = 0; } else { rename_type = RENAME_ONE_FILE_TO_ONE; /* BUG: We should only remove ren1_src in * the base stage (think of rename + * add-source cases). */ remove_file(o, 1, ren1_src, 1); update_entry(ren1->dst_entry, ren1->pair->one, ren1->pair->two, ren2->pair->two); } setup_rename_conflict_info(rename_type, ren1->pair, ren2->pair, branch1, branch2, ren1->dst_entry, ren2->dst_entry, o, NULL, NULL); } else if ((lookup = string_list_lookup(renames2Dst, ren1_dst))) { /* Two different files renamed to the same thing */ char *ren2_dst; ren2 = lookup->util; ren2_dst = ren2->pair->two->path; if (strcmp(ren1_dst, ren2_dst) != 0) die("ren1_dst != ren2_dst"); clean_merge = 0; ren2->processed = 1; /* * BUG: We should only mark src_entry as processed * if we are not dealing with a rename + add-source * case. */ ren2->src_entry->processed = 1; setup_rename_conflict_info(RENAME_TWO_FILES_TO_ONE, ren1->pair, ren2->pair, branch1, branch2, ren1->dst_entry, ren2->dst_entry, o, ren1->src_entry, ren2->src_entry); } else { /* Renamed in 1, maybe changed in 2 */ /* we only use sha1 and mode of these */ struct diff_filespec src_other, dst_other; int try_merge; /* * unpack_trees loads entries from common-commit * into stage 1, from head-commit into stage 2, and * from merge-commit into stage 3. We keep track * of which side corresponds to the rename. */ int renamed_stage = a_renames == renames1 ? 2 : 3; int other_stage = a_renames == renames1 ? 3 : 2; /* BUG: We should only remove ren1_src in the base * stage and in other_stage (think of rename + * add-source case). */ remove_file(o, 1, ren1_src, renamed_stage == 2 || !was_tracked(ren1_src)); hashcpy(src_other.sha1, ren1->src_entry->stages[other_stage].sha); src_other.mode = ren1->src_entry->stages[other_stage].mode; hashcpy(dst_other.sha1, ren1->dst_entry->stages[other_stage].sha); dst_other.mode = ren1->dst_entry->stages[other_stage].mode; try_merge = 0; if (sha_eq(src_other.sha1, null_sha1)) { setup_rename_conflict_info(RENAME_DELETE, ren1->pair, NULL, branch1, branch2, ren1->dst_entry, NULL, o, NULL, NULL); } else if ((dst_other.mode == ren1->pair->two->mode) && sha_eq(dst_other.sha1, ren1->pair->two->sha1)) { /* * Added file on the other side identical to * the file being renamed: clean merge. * Also, there is no need to overwrite the * file already in the working copy, so call * update_file_flags() instead of * update_file(). */ update_file_flags(o, ren1->pair->two->sha1, ren1->pair->two->mode, ren1_dst, 1, /* update_cache */ 0 /* update_wd */); } else if (!sha_eq(dst_other.sha1, null_sha1)) { clean_merge = 0; try_merge = 1; output(o, 1, _("CONFLICT (rename/add): Rename %s->%s in %s. " "%s added in %s"), ren1_src, ren1_dst, branch1, ren1_dst, branch2); if (o->call_depth) { struct merge_file_info mfi; mfi = merge_file_one(o, ren1_dst, null_sha1, 0, ren1->pair->two->sha1, ren1->pair->two->mode, dst_other.sha1, dst_other.mode, branch1, branch2); output(o, 1, _("Adding merged %s"), ren1_dst); update_file(o, 0, mfi.sha, mfi.mode, ren1_dst); try_merge = 0; } else { char *new_path = unique_path(o, ren1_dst, branch2); output(o, 1, _("Adding as %s instead"), new_path); update_file(o, 0, dst_other.sha1, dst_other.mode, new_path); free(new_path); } } else try_merge = 1; if (try_merge) { struct diff_filespec *one, *a, *b; src_other.path = (char *)ren1_src; one = ren1->pair->one; if (a_renames == renames1) { a = ren1->pair->two; b = &src_other; } else { b = ren1->pair->two; a = &src_other; } update_entry(ren1->dst_entry, one, a, b); setup_rename_conflict_info(RENAME_NORMAL, ren1->pair, NULL, branch1, NULL, ren1->dst_entry, NULL, o, NULL, NULL); } } } string_list_clear(&a_by_dst, 0); string_list_clear(&b_by_dst, 0); return clean_merge; } static unsigned char *stage_sha(const unsigned char *sha, unsigned mode) { return (is_null_sha1(sha) || mode == 0) ? NULL: (unsigned char *)sha; } static int read_sha1_strbuf(const unsigned char *sha1, struct strbuf *dst) { void *buf; enum object_type type; unsigned long size; buf = read_sha1_file(sha1, &type, &size); if (!buf) return error(_("cannot read object %s"), sha1_to_hex(sha1)); if (type != OBJ_BLOB) { free(buf); return error(_("object %s is not a blob"), sha1_to_hex(sha1)); } strbuf_attach(dst, buf, size, size + 1); return 0; } static int blob_unchanged(const unsigned char *o_sha, const unsigned char *a_sha, int renormalize, const char *path) { struct strbuf o = STRBUF_INIT; struct strbuf a = STRBUF_INIT; int ret = 0; /* assume changed for safety */ if (sha_eq(o_sha, a_sha)) return 1; if (!renormalize) return 0; assert(o_sha && a_sha); if (read_sha1_strbuf(o_sha, &o) || read_sha1_strbuf(a_sha, &a)) goto error_return; /* * Note: binary | is used so that both renormalizations are * performed. Comparison can be skipped if both files are * unchanged since their sha1s have already been compared. */ if (renormalize_buffer(path, o.buf, o.len, &o) | renormalize_buffer(path, a.buf, a.len, &a)) ret = (o.len == a.len && !memcmp(o.buf, a.buf, o.len)); error_return: strbuf_release(&o); strbuf_release(&a); return ret; } static void handle_modify_delete(struct merge_options *o, const char *path, unsigned char *o_sha, int o_mode, unsigned char *a_sha, int a_mode, unsigned char *b_sha, int b_mode) { handle_change_delete(o, path, o_sha, o_mode, a_sha, a_mode, b_sha, b_mode, _("modify"), _("modified")); } static int merge_content(struct merge_options *o, const char *path, unsigned char *o_sha, int o_mode, unsigned char *a_sha, int a_mode, unsigned char *b_sha, int b_mode, struct rename_conflict_info *rename_conflict_info) { const char *reason = _("content"); const char *path1 = NULL, *path2 = NULL; struct merge_file_info mfi; struct diff_filespec one, a, b; unsigned df_conflict_remains = 0; if (!o_sha) { reason = _("add/add"); o_sha = (unsigned char *)null_sha1; } one.path = a.path = b.path = (char *)path; hashcpy(one.sha1, o_sha); one.mode = o_mode; hashcpy(a.sha1, a_sha); a.mode = a_mode; hashcpy(b.sha1, b_sha); b.mode = b_mode; if (rename_conflict_info) { struct diff_filepair *pair1 = rename_conflict_info->pair1; path1 = (o->branch1 == rename_conflict_info->branch1) ? pair1->two->path : pair1->one->path; /* If rename_conflict_info->pair2 != NULL, we are in * RENAME_ONE_FILE_TO_ONE case. Otherwise, we have a * normal rename. */ path2 = (rename_conflict_info->pair2 || o->branch2 == rename_conflict_info->branch1) ? pair1->two->path : pair1->one->path; if (dir_in_way(path, !o->call_depth)) df_conflict_remains = 1; } mfi = merge_file_special_markers(o, &one, &a, &b, o->branch1, path1, o->branch2, path2); if (mfi.clean && !df_conflict_remains && sha_eq(mfi.sha, a_sha) && mfi.mode == a_mode) { int path_renamed_outside_HEAD; output(o, 3, _("Skipped %s (merged same as existing)"), path); /* * The content merge resulted in the same file contents we * already had. We can return early if those file contents * are recorded at the correct path (which may not be true * if the merge involves a rename). */ path_renamed_outside_HEAD = !path2 || !strcmp(path, path2); if (!path_renamed_outside_HEAD) { add_cacheinfo(mfi.mode, mfi.sha, path, 0, (!o->call_depth), 0); return mfi.clean; } } else output(o, 2, _("Auto-merging %s"), path); if (!mfi.clean) { if (S_ISGITLINK(mfi.mode)) reason = _("submodule"); output(o, 1, _("CONFLICT (%s): Merge conflict in %s"), reason, path); if (rename_conflict_info && !df_conflict_remains) update_stages(path, &one, &a, &b); } if (df_conflict_remains) { char *new_path; if (o->call_depth) { remove_file_from_cache(path); } else { if (!mfi.clean) update_stages(path, &one, &a, &b); else { int file_from_stage2 = was_tracked(path); struct diff_filespec merged; hashcpy(merged.sha1, mfi.sha); merged.mode = mfi.mode; update_stages(path, NULL, file_from_stage2 ? &merged : NULL, file_from_stage2 ? NULL : &merged); } } new_path = unique_path(o, path, rename_conflict_info->branch1); output(o, 1, _("Adding as %s instead"), new_path); update_file(o, 0, mfi.sha, mfi.mode, new_path); free(new_path); mfi.clean = 0; } else { update_file(o, mfi.clean, mfi.sha, mfi.mode, path); } return mfi.clean; } /* Per entry merge function */ static int process_entry(struct merge_options *o, const char *path, struct stage_data *entry) { int clean_merge = 1; int normalize = o->renormalize; unsigned o_mode = entry->stages[1].mode; unsigned a_mode = entry->stages[2].mode; unsigned b_mode = entry->stages[3].mode; unsigned char *o_sha = stage_sha(entry->stages[1].sha, o_mode); unsigned char *a_sha = stage_sha(entry->stages[2].sha, a_mode); unsigned char *b_sha = stage_sha(entry->stages[3].sha, b_mode); entry->processed = 1; if (entry->rename_conflict_info) { struct rename_conflict_info *conflict_info = entry->rename_conflict_info; switch (conflict_info->rename_type) { case RENAME_NORMAL: case RENAME_ONE_FILE_TO_ONE: clean_merge = merge_content(o, path, o_sha, o_mode, a_sha, a_mode, b_sha, b_mode, conflict_info); break; case RENAME_DELETE: clean_merge = 0; conflict_rename_delete(o, conflict_info->pair1, conflict_info->branch1, conflict_info->branch2); break; case RENAME_ONE_FILE_TO_TWO: clean_merge = 0; conflict_rename_rename_1to2(o, conflict_info); break; case RENAME_TWO_FILES_TO_ONE: clean_merge = 0; conflict_rename_rename_2to1(o, conflict_info); break; default: entry->processed = 0; break; } } else if (o_sha && (!a_sha || !b_sha)) { /* Case A: Deleted in one */ if ((!a_sha && !b_sha) || (!b_sha && blob_unchanged(o_sha, a_sha, normalize, path)) || (!a_sha && blob_unchanged(o_sha, b_sha, normalize, path))) { /* Deleted in both or deleted in one and * unchanged in the other */ if (a_sha) output(o, 2, _("Removing %s"), path); /* do not touch working file if it did not exist */ remove_file(o, 1, path, !a_sha); } else { /* Modify/delete; deleted side may have put a directory in the way */ clean_merge = 0; handle_modify_delete(o, path, o_sha, o_mode, a_sha, a_mode, b_sha, b_mode); } } else if ((!o_sha && a_sha && !b_sha) || (!o_sha && !a_sha && b_sha)) { /* Case B: Added in one. */ /* [nothing|directory] -> ([nothing|directory], file) */ const char *add_branch; const char *other_branch; unsigned mode; const unsigned char *sha; const char *conf; if (a_sha) { add_branch = o->branch1; other_branch = o->branch2; mode = a_mode; sha = a_sha; conf = _("file/directory"); } else { add_branch = o->branch2; other_branch = o->branch1; mode = b_mode; sha = b_sha; conf = _("directory/file"); } if (dir_in_way(path, !o->call_depth)) { char *new_path = unique_path(o, path, add_branch); clean_merge = 0; output(o, 1, _("CONFLICT (%s): There is a directory with name %s in %s. " "Adding %s as %s"), conf, path, other_branch, path, new_path); if (o->call_depth) remove_file_from_cache(path); update_file(o, 0, sha, mode, new_path); if (o->call_depth) remove_file_from_cache(path); free(new_path); } else { output(o, 2, _("Adding %s"), path); /* do not overwrite file if already present */ update_file_flags(o, sha, mode, path, 1, !a_sha); } } else if (a_sha && b_sha) { /* Case C: Added in both (check for same permissions) and */ /* case D: Modified in both, but differently. */ clean_merge = merge_content(o, path, o_sha, o_mode, a_sha, a_mode, b_sha, b_mode, NULL); } else if (!o_sha && !a_sha && !b_sha) { /* * this entry was deleted altogether. a_mode == 0 means * we had that path and want to actively remove it. */ remove_file(o, 1, path, !a_mode); } else die(_("Fatal merge failure, shouldn't happen.")); return clean_merge; } int merge_trees(struct merge_options *o, struct tree *head, struct tree *merge, struct tree *common, struct tree **result) { int code, clean; if (o->subtree_shift) { merge = shift_tree_object(head, merge, o->subtree_shift); common = shift_tree_object(head, common, o->subtree_shift); } if (sha_eq(common->object.sha1, merge->object.sha1)) { output(o, 0, _("Already up-to-date!")); *result = head; return 1; } code = git_merge_trees(o->call_depth, common, head, merge); if (code != 0) { if (show(o, 4) || o->call_depth) die(_("merging of trees %s and %s failed"), sha1_to_hex(head->object.sha1), sha1_to_hex(merge->object.sha1)); else exit(128); } if (unmerged_cache()) { struct string_list *entries, *re_head, *re_merge; int i; string_list_clear(&o->current_file_set, 1); string_list_clear(&o->current_directory_set, 1); get_files_dirs(o, head); get_files_dirs(o, merge); entries = get_unmerged(); record_df_conflict_files(o, entries); re_head = get_renames(o, head, common, head, merge, entries); re_merge = get_renames(o, merge, common, head, merge, entries); clean = process_renames(o, re_head, re_merge); for (i = entries->nr-1; 0 <= i; i--) { const char *path = entries->items[i].string; struct stage_data *e = entries->items[i].util; if (!e->processed && !process_entry(o, path, e)) clean = 0; } for (i = 0; i < entries->nr; i++) { struct stage_data *e = entries->items[i].util; if (!e->processed) die(_("Unprocessed path??? %s"), entries->items[i].string); } string_list_clear(re_merge, 0); string_list_clear(re_head, 0); string_list_clear(entries, 1); free(re_merge); free(re_head); free(entries); } else clean = 1; if (o->call_depth) *result = write_tree_from_memory(o); return clean; } static struct commit_list *reverse_commit_list(struct commit_list *list) { struct commit_list *next = NULL, *current, *backup; for (current = list; current; current = backup) { backup = current->next; current->next = next; next = current; } return next; } /* * Merge the commits h1 and h2, return the resulting virtual * commit object and a flag indicating the cleanness of the merge. */ int merge_recursive(struct merge_options *o, struct commit *h1, struct commit *h2, struct commit_list *ca, struct commit **result) { struct commit_list *iter; struct commit *merged_common_ancestors; struct tree *mrtree = mrtree; int clean; if (show(o, 4)) { output(o, 4, _("Merging:")); output_commit_title(o, h1); output_commit_title(o, h2); } if (!ca) { ca = get_merge_bases(h1, h2); ca = reverse_commit_list(ca); } if (show(o, 5)) { unsigned cnt = commit_list_count(ca); output(o, 5, Q_("found %u common ancestor:", "found %u common ancestors:", cnt), cnt); for (iter = ca; iter; iter = iter->next) output_commit_title(o, iter->item); } merged_common_ancestors = pop_commit(&ca); if (merged_common_ancestors == NULL) { /* if there is no common ancestor, use an empty tree */ struct tree *tree; tree = lookup_tree(EMPTY_TREE_SHA1_BIN); merged_common_ancestors = make_virtual_commit(tree, "ancestor"); } for (iter = ca; iter; iter = iter->next) { const char *saved_b1, *saved_b2; o->call_depth++; /* * When the merge fails, the result contains files * with conflict markers. The cleanness flag is * ignored, it was never actually used, as result of * merge_trees has always overwritten it: the committed * "conflicts" were already resolved. */ discard_cache(); saved_b1 = o->branch1; saved_b2 = o->branch2; o->branch1 = "Temporary merge branch 1"; o->branch2 = "Temporary merge branch 2"; merge_recursive(o, merged_common_ancestors, iter->item, NULL, &merged_common_ancestors); o->branch1 = saved_b1; o->branch2 = saved_b2; o->call_depth--; if (!merged_common_ancestors) die(_("merge returned no commit")); } discard_cache(); if (!o->call_depth) read_cache(); o->ancestor = "merged common ancestors"; clean = merge_trees(o, h1->tree, h2->tree, merged_common_ancestors->tree, &mrtree); if (o->call_depth) { *result = make_virtual_commit(mrtree, "merged tree"); commit_list_insert(h1, &(*result)->parents); commit_list_insert(h2, &(*result)->parents->next); } flush_output(o); if (show(o, 2)) diff_warn_rename_limit("merge.renamelimit", o->needed_rename_limit, 0); return clean; } static struct commit *get_ref(const unsigned char *sha1, const char *name) { struct object *object; object = deref_tag(parse_object(sha1), name, strlen(name)); if (!object) return NULL; if (object->type == OBJ_TREE) return make_virtual_commit((struct tree*)object, name); if (object->type != OBJ_COMMIT) return NULL; if (parse_commit((struct commit *)object)) return NULL; return (struct commit *)object; } int merge_recursive_generic(struct merge_options *o, const unsigned char *head, const unsigned char *merge, int num_base_list, const unsigned char **base_list, struct commit **result) { int clean; struct lock_file *lock = xcalloc(1, sizeof(struct lock_file)); struct commit *head_commit = get_ref(head, o->branch1); struct commit *next_commit = get_ref(merge, o->branch2); struct commit_list *ca = NULL; if (base_list) { int i; for (i = 0; i < num_base_list; ++i) { struct commit *base; if (!(base = get_ref(base_list[i], sha1_to_hex(base_list[i])))) return error(_("Could not parse object '%s'"), sha1_to_hex(base_list[i])); commit_list_insert(base, &ca); } } hold_locked_index(lock, 1); clean = merge_recursive(o, head_commit, next_commit, ca, result); if (active_cache_changed && write_locked_index(&the_index, lock, COMMIT_LOCK)) return error(_("Unable to write index.")); return clean ? 0 : 1; } static void merge_recursive_config(struct merge_options *o) { git_config_get_int("merge.verbosity", &o->verbosity); git_config_get_int("diff.renamelimit", &o->diff_rename_limit); git_config_get_int("merge.renamelimit", &o->merge_rename_limit); git_config(git_xmerge_config, NULL); } void init_merge_options(struct merge_options *o) { memset(o, 0, sizeof(struct merge_options)); o->verbosity = 2; o->buffer_output = 1; o->diff_rename_limit = -1; o->merge_rename_limit = -1; o->renormalize = 0; merge_recursive_config(o); if (getenv("GIT_MERGE_VERBOSITY")) o->verbosity = strtol(getenv("GIT_MERGE_VERBOSITY"), NULL, 10); if (o->verbosity >= 5) o->buffer_output = 0; strbuf_init(&o->obuf, 0); string_list_init(&o->current_file_set, 1); string_list_init(&o->current_directory_set, 1); string_list_init(&o->df_conflict_file_set, 1); } int parse_merge_opt(struct merge_options *o, const char *s) { const char *arg; if (!s || !*s) return -1; if (!strcmp(s, "ours")) o->recursive_variant = MERGE_RECURSIVE_OURS; else if (!strcmp(s, "theirs")) o->recursive_variant = MERGE_RECURSIVE_THEIRS; else if (!strcmp(s, "subtree")) o->subtree_shift = ""; else if (skip_prefix(s, "subtree=", &arg)) o->subtree_shift = arg; else if (!strcmp(s, "patience")) o->xdl_opts = DIFF_WITH_ALG(o, PATIENCE_DIFF); else if (!strcmp(s, "histogram")) o->xdl_opts = DIFF_WITH_ALG(o, HISTOGRAM_DIFF); else if (skip_prefix(s, "diff-algorithm=", &arg)) { long value = parse_algorithm_value(arg); if (value < 0) return -1; /* clear out previous settings */ DIFF_XDL_CLR(o, NEED_MINIMAL); o->xdl_opts &= ~XDF_DIFF_ALGORITHM_MASK; o->xdl_opts |= value; } else if (!strcmp(s, "ignore-space-change")) o->xdl_opts |= XDF_IGNORE_WHITESPACE_CHANGE; else if (!strcmp(s, "ignore-all-space")) o->xdl_opts |= XDF_IGNORE_WHITESPACE; else if (!strcmp(s, "ignore-space-at-eol")) o->xdl_opts |= XDF_IGNORE_WHITESPACE_AT_EOL; else if (!strcmp(s, "renormalize")) o->renormalize = 1; else if (!strcmp(s, "no-renormalize")) o->renormalize = 0; else if (skip_prefix(s, "rename-threshold=", &arg)) { if ((o->rename_score = parse_rename_score(&arg)) == -1 || *arg != 0) return -1; } else return -1; return 0; }
gpl-2.0
HCDRJacob/htc-kernel-wildfire-2.6.32
sound/soc/blackfin/bf5xx-ac97.c
448
10828
/* * bf5xx-ac97.c -- AC97 support for the ADI blackfin chip. * * Author: Roy Huang * Created: 11th. June 2007 * Copyright: Analog Device Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/wait.h> #include <linux/delay.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/ac97_codec.h> #include <sound/initval.h> #include <sound/soc.h> #include <asm/irq.h> #include <asm/portmux.h> #include <linux/mutex.h> #include <linux/gpio.h> #include "bf5xx-sport.h" #include "bf5xx-ac97.h" /* Anomaly notes: * 05000250 - AD1980 is running in TDM mode and RFS/TFS are generated by SPORT * contrtoller. But, RFSDIV and TFSDIV are always set to 16*16-1, * while the max AC97 data size is 13*16. The DIV is always larger * than data size. AD73311 and ad2602 are not running in TDM mode. * AD1836 and AD73322 depend on external RFS/TFS only. So, this * anomaly does not affect blackfin sound drivers. */ static int *cmd_count; static int sport_num = CONFIG_SND_BF5XX_SPORT_NUM; #define SPORT_REQ(x) \ [x] = {P_SPORT##x##_TFS, P_SPORT##x##_DTPRI, P_SPORT##x##_TSCLK, \ P_SPORT##x##_RFS, P_SPORT##x##_DRPRI, P_SPORT##x##_RSCLK, 0} static u16 sport_req[][7] = { #ifdef SPORT0_TCR1 SPORT_REQ(0), #endif #ifdef SPORT1_TCR1 SPORT_REQ(1), #endif #ifdef SPORT2_TCR1 SPORT_REQ(2), #endif #ifdef SPORT3_TCR1 SPORT_REQ(3), #endif }; #define SPORT_PARAMS(x) \ [x] = { \ .dma_rx_chan = CH_SPORT##x##_RX, \ .dma_tx_chan = CH_SPORT##x##_TX, \ .err_irq = IRQ_SPORT##x##_ERROR, \ .regs = (struct sport_register *)SPORT##x##_TCR1, \ } static struct sport_param sport_params[4] = { #ifdef SPORT0_TCR1 SPORT_PARAMS(0), #endif #ifdef SPORT1_TCR1 SPORT_PARAMS(1), #endif #ifdef SPORT2_TCR1 SPORT_PARAMS(2), #endif #ifdef SPORT3_TCR1 SPORT_PARAMS(3), #endif }; void bf5xx_pcm_to_ac97(struct ac97_frame *dst, const __u16 *src, size_t count, unsigned int chan_mask) { while (count--) { dst->ac97_tag = TAG_VALID; if (chan_mask & SP_FL) { dst->ac97_pcm_r = *src++; dst->ac97_tag |= TAG_PCM_RIGHT; } if (chan_mask & SP_FR) { dst->ac97_pcm_l = *src++; dst->ac97_tag |= TAG_PCM_LEFT; } #if defined(CONFIG_SND_BF5XX_MULTICHAN_SUPPORT) if (chan_mask & SP_SR) { dst->ac97_sl = *src++; dst->ac97_tag |= TAG_PCM_SL; } if (chan_mask & SP_SL) { dst->ac97_sr = *src++; dst->ac97_tag |= TAG_PCM_SR; } if (chan_mask & SP_LFE) { dst->ac97_lfe = *src++; dst->ac97_tag |= TAG_PCM_LFE; } if (chan_mask & SP_FC) { dst->ac97_center = *src++; dst->ac97_tag |= TAG_PCM_CENTER; } #endif dst++; } } EXPORT_SYMBOL(bf5xx_pcm_to_ac97); void bf5xx_ac97_to_pcm(const struct ac97_frame *src, __u16 *dst, size_t count) { while (count--) { *(dst++) = src->ac97_pcm_l; *(dst++) = src->ac97_pcm_r; src++; } } EXPORT_SYMBOL(bf5xx_ac97_to_pcm); static unsigned int sport_tx_curr_frag(struct sport_device *sport) { return sport->tx_curr_frag = sport_curr_offset_tx(sport) / sport->tx_fragsize; } static void enqueue_cmd(struct snd_ac97 *ac97, __u16 addr, __u16 data) { struct sport_device *sport = sport_handle; int nextfrag = sport_tx_curr_frag(sport); struct ac97_frame *nextwrite; sport_incfrag(sport, &nextfrag, 1); nextwrite = (struct ac97_frame *)(sport->tx_buf + nextfrag * sport->tx_fragsize); pr_debug("sport->tx_buf:%p, nextfrag:0x%x nextwrite:%p, cmd_count:%d\n", sport->tx_buf, nextfrag, nextwrite, cmd_count[nextfrag]); nextwrite[cmd_count[nextfrag]].ac97_tag |= TAG_CMD; nextwrite[cmd_count[nextfrag]].ac97_addr = addr; nextwrite[cmd_count[nextfrag]].ac97_data = data; ++cmd_count[nextfrag]; pr_debug("ac97_sport: Inserting %02x/%04x into fragment %d\n", addr >> 8, data, nextfrag); } static unsigned short bf5xx_ac97_read(struct snd_ac97 *ac97, unsigned short reg) { struct ac97_frame out_frame[2], in_frame[2]; pr_debug("%s enter 0x%x\n", __func__, reg); /* When dma descriptor is enabled, the register should not be read */ if (sport_handle->tx_run || sport_handle->rx_run) { pr_err("Could you send a mail to cliff.cai@analog.com " "to report this?\n"); return -EFAULT; } memset(&out_frame, 0, 2 * sizeof(struct ac97_frame)); memset(&in_frame, 0, 2 * sizeof(struct ac97_frame)); out_frame[0].ac97_tag = TAG_VALID | TAG_CMD; out_frame[0].ac97_addr = ((reg << 8) | 0x8000); sport_send_and_recv(sport_handle, (unsigned char *)&out_frame, (unsigned char *)&in_frame, 2 * sizeof(struct ac97_frame)); return in_frame[1].ac97_data; } void bf5xx_ac97_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short val) { pr_debug("%s enter 0x%x:0x%04x\n", __func__, reg, val); if (sport_handle->tx_run) { enqueue_cmd(ac97, (reg << 8), val); /* write */ enqueue_cmd(ac97, (reg << 8) | 0x8000, 0); /* read back */ } else { struct ac97_frame frame; memset(&frame, 0, sizeof(struct ac97_frame)); frame.ac97_tag = TAG_VALID | TAG_CMD; frame.ac97_addr = (reg << 8); frame.ac97_data = val; sport_send_and_recv(sport_handle, (unsigned char *)&frame, \ NULL, sizeof(struct ac97_frame)); } } static void bf5xx_ac97_warm_reset(struct snd_ac97 *ac97) { #if defined(CONFIG_BF54x) || defined(CONFIG_BF561) || \ (defined(BF537_FAMILY) && (CONFIG_SND_BF5XX_SPORT_NUM == 1)) #define CONCAT(a, b, c) a ## b ## c #define BFIN_SPORT_RFS(x) CONCAT(P_SPORT, x, _RFS) u16 per = BFIN_SPORT_RFS(CONFIG_SND_BF5XX_SPORT_NUM); u16 gpio = P_IDENT(BFIN_SPORT_RFS(CONFIG_SND_BF5XX_SPORT_NUM)); pr_debug("%s enter\n", __func__); peripheral_free(per); gpio_request(gpio, "bf5xx-ac97"); gpio_direction_output(gpio, 1); udelay(2); gpio_set_value(gpio, 0); udelay(1); gpio_free(gpio); peripheral_request(per, "soc-audio"); #else pr_info("%s: Not implemented\n", __func__); #endif } static void bf5xx_ac97_cold_reset(struct snd_ac97 *ac97) { #ifdef CONFIG_SND_BF5XX_HAVE_COLD_RESET pr_debug("%s enter\n", __func__); /* It is specified for bf548-ezkit */ gpio_set_value(CONFIG_SND_BF5XX_RESET_GPIO_NUM, 0); /* Keep reset pin low for 1 ms */ mdelay(1); gpio_set_value(CONFIG_SND_BF5XX_RESET_GPIO_NUM, 1); /* Wait for bit clock recover */ mdelay(1); #else pr_info("%s: Not implemented\n", __func__); #endif } struct snd_ac97_bus_ops soc_ac97_ops = { .read = bf5xx_ac97_read, .write = bf5xx_ac97_write, .warm_reset = bf5xx_ac97_warm_reset, .reset = bf5xx_ac97_cold_reset, }; EXPORT_SYMBOL_GPL(soc_ac97_ops); #ifdef CONFIG_PM static int bf5xx_ac97_suspend(struct snd_soc_dai *dai) { struct sport_device *sport = (struct sport_device *)dai->private_data; pr_debug("%s : sport %d\n", __func__, dai->id); if (!dai->active) return 0; if (dai->capture.active) sport_rx_stop(sport); if (dai->playback.active) sport_tx_stop(sport); return 0; } static int bf5xx_ac97_resume(struct snd_soc_dai *dai) { int ret; struct sport_device *sport = (struct sport_device *)dai->private_data; pr_debug("%s : sport %d\n", __func__, dai->id); if (!dai->active) return 0; #if defined(CONFIG_SND_BF5XX_MULTICHAN_SUPPORT) ret = sport_set_multichannel(sport, 16, 0x3FF, 1); #else ret = sport_set_multichannel(sport, 16, 0x1F, 1); #endif if (ret) { pr_err("SPORT is busy!\n"); return -EBUSY; } ret = sport_config_rx(sport, IRFS, 0xF, 0, (16*16-1)); if (ret) { pr_err("SPORT is busy!\n"); return -EBUSY; } ret = sport_config_tx(sport, ITFS, 0xF, 0, (16*16-1)); if (ret) { pr_err("SPORT is busy!\n"); return -EBUSY; } return 0; } #else #define bf5xx_ac97_suspend NULL #define bf5xx_ac97_resume NULL #endif static int bf5xx_ac97_probe(struct platform_device *pdev, struct snd_soc_dai *dai) { int ret = 0; cmd_count = (int *)get_zeroed_page(GFP_KERNEL); if (cmd_count == NULL) return -ENOMEM; if (peripheral_request_list(sport_req[sport_num], "soc-audio")) { pr_err("Requesting Peripherals failed\n"); ret = -EFAULT; goto peripheral_err; } #ifdef CONFIG_SND_BF5XX_HAVE_COLD_RESET /* Request PB3 as reset pin */ if (gpio_request(CONFIG_SND_BF5XX_RESET_GPIO_NUM, "SND_AD198x RESET")) { pr_err("Failed to request GPIO_%d for reset\n", CONFIG_SND_BF5XX_RESET_GPIO_NUM); ret = -1; goto gpio_err; } gpio_direction_output(CONFIG_SND_BF5XX_RESET_GPIO_NUM, 1); #endif sport_handle = sport_init(&sport_params[sport_num], 2, \ sizeof(struct ac97_frame), NULL); if (!sport_handle) { ret = -ENODEV; goto sport_err; } /*SPORT works in TDM mode to simulate AC97 transfers*/ #if defined(CONFIG_SND_BF5XX_MULTICHAN_SUPPORT) ret = sport_set_multichannel(sport_handle, 16, 0x3FF, 1); #else ret = sport_set_multichannel(sport_handle, 16, 0x1F, 1); #endif if (ret) { pr_err("SPORT is busy!\n"); ret = -EBUSY; goto sport_config_err; } ret = sport_config_rx(sport_handle, IRFS, 0xF, 0, (16*16-1)); if (ret) { pr_err("SPORT is busy!\n"); ret = -EBUSY; goto sport_config_err; } ret = sport_config_tx(sport_handle, ITFS, 0xF, 0, (16*16-1)); if (ret) { pr_err("SPORT is busy!\n"); ret = -EBUSY; goto sport_config_err; } return 0; sport_config_err: kfree(sport_handle); sport_err: #ifdef CONFIG_SND_BF5XX_HAVE_COLD_RESET gpio_free(CONFIG_SND_BF5XX_RESET_GPIO_NUM); gpio_err: #endif peripheral_free_list(sport_req[sport_num]); peripheral_err: free_page((unsigned long)cmd_count); cmd_count = NULL; return ret; } static void bf5xx_ac97_remove(struct platform_device *pdev, struct snd_soc_dai *dai) { free_page((unsigned long)cmd_count); cmd_count = NULL; peripheral_free_list(sport_req[sport_num]); #ifdef CONFIG_SND_BF5XX_HAVE_COLD_RESET gpio_free(CONFIG_SND_BF5XX_RESET_GPIO_NUM); #endif } struct snd_soc_dai bfin_ac97_dai = { .name = "bf5xx-ac97", .id = 0, .ac97_control = 1, .probe = bf5xx_ac97_probe, .remove = bf5xx_ac97_remove, .suspend = bf5xx_ac97_suspend, .resume = bf5xx_ac97_resume, .playback = { .stream_name = "AC97 Playback", .channels_min = 2, #if defined(CONFIG_SND_BF5XX_MULTICHAN_SUPPORT) .channels_max = 6, #else .channels_max = 2, #endif .rates = SNDRV_PCM_RATE_48000, .formats = SNDRV_PCM_FMTBIT_S16_LE, }, .capture = { .stream_name = "AC97 Capture", .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_48000, .formats = SNDRV_PCM_FMTBIT_S16_LE, }, }; EXPORT_SYMBOL_GPL(bfin_ac97_dai); static int __init bfin_ac97_init(void) { return snd_soc_register_dai(&bfin_ac97_dai); } module_init(bfin_ac97_init); static void __exit bfin_ac97_exit(void) { snd_soc_unregister_dai(&bfin_ac97_dai); } module_exit(bfin_ac97_exit); MODULE_AUTHOR("Roy Huang"); MODULE_DESCRIPTION("AC97 driver for ADI Blackfin"); MODULE_LICENSE("GPL");
gpl-2.0
mingit/mstcp_v0.89.4
drivers/i2c/busses/i2c-viperboard.c
960
12171
/* * Nano River Technologies viperboard i2c master driver * * (C) 2012 by Lemonage GmbH * Author: Lars Poeschel <poeschel@lemonage.de> * All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/usb.h> #include <linux/i2c.h> #include <linux/mfd/viperboard.h> struct vprbrd_i2c { struct i2c_adapter i2c; u8 bus_freq_param; }; /* i2c bus frequency module parameter */ static u8 i2c_bus_param; static unsigned int i2c_bus_freq = 100; module_param(i2c_bus_freq, int, 0); MODULE_PARM_DESC(i2c_bus_freq, "i2c bus frequency in khz (default is 100) valid values: 10, 100, 200, 400, 1000, 3000, 6000"); static int vprbrd_i2c_status(struct i2c_adapter *i2c, struct vprbrd_i2c_status *status, bool prev_error) { u16 bytes_xfer; int ret; struct vprbrd *vb = (struct vprbrd *)i2c->algo_data; /* check for protocol error */ bytes_xfer = sizeof(struct vprbrd_i2c_status); ret = usb_control_msg(vb->usb_dev, usb_rcvctrlpipe(vb->usb_dev, 0), VPRBRD_USB_REQUEST_I2C, VPRBRD_USB_TYPE_IN, 0x0000, 0x0000, status, bytes_xfer, VPRBRD_USB_TIMEOUT_MS); if (ret != bytes_xfer) prev_error = true; if (prev_error) { dev_err(&i2c->dev, "failure in usb communication\n"); return -EREMOTEIO; } dev_dbg(&i2c->dev, " status = %d\n", status->status); if (status->status != 0x00) { dev_err(&i2c->dev, "failure: i2c protocol error\n"); return -EPROTO; } return 0; } static int vprbrd_i2c_receive(struct usb_device *usb_dev, struct vprbrd_i2c_read_msg *rmsg, int bytes_xfer) { int ret, bytes_actual; int error = 0; /* send the read request */ ret = usb_bulk_msg(usb_dev, usb_sndbulkpipe(usb_dev, VPRBRD_EP_OUT), rmsg, sizeof(struct vprbrd_i2c_read_hdr), &bytes_actual, VPRBRD_USB_TIMEOUT_MS); if ((ret < 0) || (bytes_actual != sizeof(struct vprbrd_i2c_read_hdr))) { dev_err(&usb_dev->dev, "failure transmitting usb\n"); error = -EREMOTEIO; } /* read the actual data */ ret = usb_bulk_msg(usb_dev, usb_rcvbulkpipe(usb_dev, VPRBRD_EP_IN), rmsg, bytes_xfer, &bytes_actual, VPRBRD_USB_TIMEOUT_MS); if ((ret < 0) || (bytes_xfer != bytes_actual)) { dev_err(&usb_dev->dev, "failure receiving usb\n"); error = -EREMOTEIO; } return error; } static int vprbrd_i2c_addr(struct usb_device *usb_dev, struct vprbrd_i2c_addr_msg *amsg) { int ret, bytes_actual; ret = usb_bulk_msg(usb_dev, usb_sndbulkpipe(usb_dev, VPRBRD_EP_OUT), amsg, sizeof(struct vprbrd_i2c_addr_msg), &bytes_actual, VPRBRD_USB_TIMEOUT_MS); if ((ret < 0) || (sizeof(struct vprbrd_i2c_addr_msg) != bytes_actual)) { dev_err(&usb_dev->dev, "failure transmitting usb\n"); return -EREMOTEIO; } return 0; } static int vprbrd_i2c_read(struct vprbrd *vb, struct i2c_msg *msg) { int ret; u16 remain_len, len1, len2, start = 0x0000; struct vprbrd_i2c_read_msg *rmsg = (struct vprbrd_i2c_read_msg *)vb->buf; remain_len = msg->len; rmsg->header.cmd = VPRBRD_I2C_CMD_READ; while (remain_len > 0) { rmsg->header.addr = cpu_to_le16(start + 0x4000); if (remain_len <= 255) { len1 = remain_len; len2 = 0x00; rmsg->header.len0 = remain_len; rmsg->header.len1 = 0x00; rmsg->header.len2 = 0x00; rmsg->header.len3 = 0x00; rmsg->header.len4 = 0x00; rmsg->header.len5 = 0x00; remain_len = 0; } else if (remain_len <= 510) { len1 = remain_len; len2 = 0x00; rmsg->header.len0 = remain_len - 255; rmsg->header.len1 = 0xff; rmsg->header.len2 = 0x00; rmsg->header.len3 = 0x00; rmsg->header.len4 = 0x00; rmsg->header.len5 = 0x00; remain_len = 0; } else if (remain_len <= 512) { len1 = remain_len; len2 = 0x00; rmsg->header.len0 = remain_len - 510; rmsg->header.len1 = 0xff; rmsg->header.len2 = 0xff; rmsg->header.len3 = 0x00; rmsg->header.len4 = 0x00; rmsg->header.len5 = 0x00; remain_len = 0; } else if (remain_len <= 767) { len1 = 512; len2 = remain_len - 512; rmsg->header.len0 = 0x02; rmsg->header.len1 = 0xff; rmsg->header.len2 = 0xff; rmsg->header.len3 = remain_len - 512; rmsg->header.len4 = 0x00; rmsg->header.len5 = 0x00; remain_len = 0; } else if (remain_len <= 1022) { len1 = 512; len2 = remain_len - 512; rmsg->header.len0 = 0x02; rmsg->header.len1 = 0xff; rmsg->header.len2 = 0xff; rmsg->header.len3 = remain_len - 767; rmsg->header.len4 = 0xff; rmsg->header.len5 = 0x00; remain_len = 0; } else if (remain_len <= 1024) { len1 = 512; len2 = remain_len - 512; rmsg->header.len0 = 0x02; rmsg->header.len1 = 0xff; rmsg->header.len2 = 0xff; rmsg->header.len3 = remain_len - 1022; rmsg->header.len4 = 0xff; rmsg->header.len5 = 0xff; remain_len = 0; } else { len1 = 512; len2 = 512; rmsg->header.len0 = 0x02; rmsg->header.len1 = 0xff; rmsg->header.len2 = 0xff; rmsg->header.len3 = 0x02; rmsg->header.len4 = 0xff; rmsg->header.len5 = 0xff; remain_len -= 1024; start += 1024; } rmsg->header.tf1 = cpu_to_le16(len1); rmsg->header.tf2 = cpu_to_le16(len2); /* first read transfer */ ret = vprbrd_i2c_receive(vb->usb_dev, rmsg, len1); if (ret < 0) return ret; /* copy the received data */ memcpy(msg->buf + start, rmsg, len1); /* second read transfer if neccessary */ if (len2 > 0) { ret = vprbrd_i2c_receive(vb->usb_dev, rmsg, len2); if (ret < 0) return ret; /* copy the received data */ memcpy(msg->buf + start + 512, rmsg, len2); } } return 0; } static int vprbrd_i2c_write(struct vprbrd *vb, struct i2c_msg *msg) { int ret, bytes_actual; u16 remain_len, bytes_xfer, start = 0x0000; struct vprbrd_i2c_write_msg *wmsg = (struct vprbrd_i2c_write_msg *)vb->buf; remain_len = msg->len; wmsg->header.cmd = VPRBRD_I2C_CMD_WRITE; wmsg->header.last = 0x00; wmsg->header.chan = 0x00; wmsg->header.spi = 0x0000; while (remain_len > 0) { wmsg->header.addr = cpu_to_le16(start + 0x4000); if (remain_len > 503) { wmsg->header.len1 = 0xff; wmsg->header.len2 = 0xf8; remain_len -= 503; bytes_xfer = 503 + sizeof(struct vprbrd_i2c_write_hdr); start += 503; } else if (remain_len > 255) { wmsg->header.len1 = 0xff; wmsg->header.len2 = (remain_len - 255); bytes_xfer = remain_len + sizeof(struct vprbrd_i2c_write_hdr); remain_len = 0; } else { wmsg->header.len1 = remain_len; wmsg->header.len2 = 0x00; bytes_xfer = remain_len + sizeof(struct vprbrd_i2c_write_hdr); remain_len = 0; } memcpy(wmsg->data, msg->buf + start, bytes_xfer - sizeof(struct vprbrd_i2c_write_hdr)); ret = usb_bulk_msg(vb->usb_dev, usb_sndbulkpipe(vb->usb_dev, VPRBRD_EP_OUT), wmsg, bytes_xfer, &bytes_actual, VPRBRD_USB_TIMEOUT_MS); if ((ret < 0) || (bytes_xfer != bytes_actual)) return -EREMOTEIO; } return 0; } static int vprbrd_i2c_xfer(struct i2c_adapter *i2c, struct i2c_msg *msgs, int num) { struct i2c_msg *pmsg; int i, ret, error = 0; struct vprbrd *vb = (struct vprbrd *)i2c->algo_data; struct vprbrd_i2c_addr_msg *amsg = (struct vprbrd_i2c_addr_msg *)vb->buf; struct vprbrd_i2c_status *smsg = (struct vprbrd_i2c_status *)vb->buf; dev_dbg(&i2c->dev, "master xfer %d messages:\n", num); for (i = 0 ; i < num ; i++) { pmsg = &msgs[i]; dev_dbg(&i2c->dev, " %d: %s (flags %d) %d bytes to 0x%02x\n", i, pmsg->flags & I2C_M_RD ? "read" : "write", pmsg->flags, pmsg->len, pmsg->addr); /* msgs longer than 2048 bytes are not supported by adapter */ if (pmsg->len > 2048) return -EINVAL; mutex_lock(&vb->lock); /* directly send the message */ if (pmsg->flags & I2C_M_RD) { /* read data */ amsg->cmd = VPRBRD_I2C_CMD_ADDR; amsg->unknown2 = 0x00; amsg->unknown3 = 0x00; amsg->addr = pmsg->addr; amsg->unknown1 = 0x01; amsg->len = cpu_to_le16(pmsg->len); /* send the addr and len, we're interested to board */ ret = vprbrd_i2c_addr(vb->usb_dev, amsg); if (ret < 0) error = ret; ret = vprbrd_i2c_read(vb, pmsg); if (ret < 0) error = ret; ret = vprbrd_i2c_status(i2c, smsg, error); if (ret < 0) error = ret; /* in case of protocol error, return the error */ if (error < 0) goto error; } else { /* write data */ ret = vprbrd_i2c_write(vb, pmsg); amsg->cmd = VPRBRD_I2C_CMD_ADDR; amsg->unknown2 = 0x00; amsg->unknown3 = 0x00; amsg->addr = pmsg->addr; amsg->unknown1 = 0x00; amsg->len = cpu_to_le16(pmsg->len); /* send the addr, the data goes to to board */ ret = vprbrd_i2c_addr(vb->usb_dev, amsg); if (ret < 0) error = ret; ret = vprbrd_i2c_status(i2c, smsg, error); if (ret < 0) error = ret; if (error < 0) goto error; } mutex_unlock(&vb->lock); } return 0; error: mutex_unlock(&vb->lock); return error; } static u32 vprbrd_i2c_func(struct i2c_adapter *i2c) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } /* This is the actual algorithm we define */ static const struct i2c_algorithm vprbrd_algorithm = { .master_xfer = vprbrd_i2c_xfer, .functionality = vprbrd_i2c_func, }; static int vprbrd_i2c_probe(struct platform_device *pdev) { struct vprbrd *vb = dev_get_drvdata(pdev->dev.parent); struct vprbrd_i2c *vb_i2c; int ret; int pipe; vb_i2c = devm_kzalloc(&pdev->dev, sizeof(*vb_i2c), GFP_KERNEL); if (vb_i2c == NULL) return -ENOMEM; /* setup i2c adapter description */ vb_i2c->i2c.owner = THIS_MODULE; vb_i2c->i2c.class = I2C_CLASS_HWMON; vb_i2c->i2c.algo = &vprbrd_algorithm; vb_i2c->i2c.algo_data = vb; /* save the param in usb capabable memory */ vb_i2c->bus_freq_param = i2c_bus_param; snprintf(vb_i2c->i2c.name, sizeof(vb_i2c->i2c.name), "viperboard at bus %03d device %03d", vb->usb_dev->bus->busnum, vb->usb_dev->devnum); /* setting the bus frequency */ if ((i2c_bus_param <= VPRBRD_I2C_FREQ_10KHZ) && (i2c_bus_param >= VPRBRD_I2C_FREQ_6MHZ)) { pipe = usb_sndctrlpipe(vb->usb_dev, 0); ret = usb_control_msg(vb->usb_dev, pipe, VPRBRD_USB_REQUEST_I2C_FREQ, VPRBRD_USB_TYPE_OUT, 0x0000, 0x0000, &vb_i2c->bus_freq_param, 1, VPRBRD_USB_TIMEOUT_MS); if (ret != 1) { dev_err(&pdev->dev, "failure setting i2c_bus_freq to %d\n", i2c_bus_freq); return -EIO; } } else { dev_err(&pdev->dev, "invalid i2c_bus_freq setting:%d\n", i2c_bus_freq); return -EIO; } vb_i2c->i2c.dev.parent = &pdev->dev; /* attach to i2c layer */ i2c_add_adapter(&vb_i2c->i2c); platform_set_drvdata(pdev, vb_i2c); return 0; } static int vprbrd_i2c_remove(struct platform_device *pdev) { struct vprbrd_i2c *vb_i2c = platform_get_drvdata(pdev); i2c_del_adapter(&vb_i2c->i2c); return 0; } static struct platform_driver vprbrd_i2c_driver = { .driver.name = "viperboard-i2c", .driver.owner = THIS_MODULE, .probe = vprbrd_i2c_probe, .remove = vprbrd_i2c_remove, }; static int __init vprbrd_i2c_init(void) { switch (i2c_bus_freq) { case 6000: i2c_bus_param = VPRBRD_I2C_FREQ_6MHZ; break; case 3000: i2c_bus_param = VPRBRD_I2C_FREQ_3MHZ; break; case 1000: i2c_bus_param = VPRBRD_I2C_FREQ_1MHZ; break; case 400: i2c_bus_param = VPRBRD_I2C_FREQ_400KHZ; break; case 200: i2c_bus_param = VPRBRD_I2C_FREQ_200KHZ; break; case 100: i2c_bus_param = VPRBRD_I2C_FREQ_100KHZ; break; case 10: i2c_bus_param = VPRBRD_I2C_FREQ_10KHZ; break; default: pr_warn("invalid i2c_bus_freq (%d)\n", i2c_bus_freq); i2c_bus_param = VPRBRD_I2C_FREQ_100KHZ; } return platform_driver_register(&vprbrd_i2c_driver); } subsys_initcall(vprbrd_i2c_init); static void __exit vprbrd_i2c_exit(void) { platform_driver_unregister(&vprbrd_i2c_driver); } module_exit(vprbrd_i2c_exit); MODULE_AUTHOR("Lars Poeschel <poeschel@lemonage.de>"); MODULE_DESCRIPTION("I2C master driver for Nano River Techs Viperboard"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:viperboard-i2c");
gpl-2.0
goodwinos/linux-latest
drivers/clk/clk-moxart.c
960
2416
/* * MOXA ART SoCs clock driver. * * Copyright (C) 2013 Jonas Jensen * * Jonas Jensen <jonas.jensen@gmail.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/clk-provider.h> #include <linux/io.h> #include <linux/of_address.h> #include <linux/clkdev.h> void __init moxart_of_pll_clk_init(struct device_node *node) { static void __iomem *base; struct clk *clk, *ref_clk; unsigned int mul; const char *name = node->name; const char *parent_name; of_property_read_string(node, "clock-output-names", &name); parent_name = of_clk_get_parent_name(node, 0); base = of_iomap(node, 0); if (!base) { pr_err("%s: of_iomap failed\n", node->full_name); return; } mul = readl(base + 0x30) >> 3 & 0x3f; iounmap(base); ref_clk = of_clk_get(node, 0); if (IS_ERR(ref_clk)) { pr_err("%s: of_clk_get failed\n", node->full_name); return; } clk = clk_register_fixed_factor(NULL, name, parent_name, 0, mul, 1); if (IS_ERR(clk)) { pr_err("%s: failed to register clock\n", node->full_name); return; } clk_register_clkdev(clk, NULL, name); of_clk_add_provider(node, of_clk_src_simple_get, clk); } CLK_OF_DECLARE(moxart_pll_clock, "moxa,moxart-pll-clock", moxart_of_pll_clk_init); void __init moxart_of_apb_clk_init(struct device_node *node) { static void __iomem *base; struct clk *clk, *pll_clk; unsigned int div, val; unsigned int div_idx[] = { 2, 3, 4, 6, 8}; const char *name = node->name; const char *parent_name; of_property_read_string(node, "clock-output-names", &name); parent_name = of_clk_get_parent_name(node, 0); base = of_iomap(node, 0); if (!base) { pr_err("%s: of_iomap failed\n", node->full_name); return; } val = readl(base + 0xc) >> 4 & 0x7; iounmap(base); if (val > 4) val = 0; div = div_idx[val] * 2; pll_clk = of_clk_get(node, 0); if (IS_ERR(pll_clk)) { pr_err("%s: of_clk_get failed\n", node->full_name); return; } clk = clk_register_fixed_factor(NULL, name, parent_name, 0, 1, div); if (IS_ERR(clk)) { pr_err("%s: failed to register clock\n", node->full_name); return; } clk_register_clkdev(clk, NULL, name); of_clk_add_provider(node, of_clk_src_simple_get, clk); } CLK_OF_DECLARE(moxart_apb_clock, "moxa,moxart-apb-clock", moxart_of_apb_clk_init);
gpl-2.0
bachtk/linux
drivers/bcma/driver_pcie2.c
1216
5699
/* * Broadcom specific AMBA * PCIe Gen 2 Core * * Copyright 2014, Broadcom Corporation * Copyright 2014, Rafał Miłecki <zajec5@gmail.com> * * Licensed under the GNU/GPL. See COPYING for details. */ #include "bcma_private.h" #include <linux/bcma/bcma.h> #include <linux/pci.h> /************************************************** * R/W ops. **************************************************/ #if 0 static u32 bcma_core_pcie2_cfg_read(struct bcma_drv_pcie2 *pcie2, u32 addr) { pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, addr); pcie2_read32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR); return pcie2_read32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA); } #endif static void bcma_core_pcie2_cfg_write(struct bcma_drv_pcie2 *pcie2, u32 addr, u32 val) { pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, addr); pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, val); } /************************************************** * Init. **************************************************/ static u32 bcma_core_pcie2_war_delay_perst_enab(struct bcma_drv_pcie2 *pcie2, bool enable) { u32 val; /* restore back to default */ val = pcie2_read32(pcie2, BCMA_CORE_PCIE2_CLK_CONTROL); val |= PCIE2_CLKC_DLYPERST; val &= ~PCIE2_CLKC_DISSPROMLD; if (enable) { val &= ~PCIE2_CLKC_DLYPERST; val |= PCIE2_CLKC_DISSPROMLD; } pcie2_write32(pcie2, (BCMA_CORE_PCIE2_CLK_CONTROL), val); /* flush */ return pcie2_read32(pcie2, BCMA_CORE_PCIE2_CLK_CONTROL); } static void bcma_core_pcie2_set_ltr_vals(struct bcma_drv_pcie2 *pcie2) { /* LTR0 */ pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, 0x844); pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, 0x883c883c); /* LTR1 */ pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, 0x848); pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, 0x88648864); /* LTR2 */ pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, 0x84C); pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, 0x90039003); } static void bcma_core_pcie2_hw_ltr_war(struct bcma_drv_pcie2 *pcie2) { u8 core_rev = pcie2->core->id.rev; u32 devstsctr2; if (core_rev < 2 || core_rev == 10 || core_rev > 13) return; pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, PCIE2_CAP_DEVSTSCTRL2_OFFSET); devstsctr2 = pcie2_read32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA); if (devstsctr2 & PCIE2_CAP_DEVSTSCTRL2_LTRENAB) { /* force the right LTR values */ bcma_core_pcie2_set_ltr_vals(pcie2); /* TODO: si_core_wrapperreg(pcie2, 3, 0x60, 0x8080, 0); */ /* enable the LTR */ devstsctr2 |= PCIE2_CAP_DEVSTSCTRL2_LTRENAB; pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, PCIE2_CAP_DEVSTSCTRL2_OFFSET); pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, devstsctr2); /* set the LTR state to be active */ pcie2_write32(pcie2, BCMA_CORE_PCIE2_LTR_STATE, PCIE2_LTR_ACTIVE); usleep_range(1000, 2000); /* set the LTR state to be sleep */ pcie2_write32(pcie2, BCMA_CORE_PCIE2_LTR_STATE, PCIE2_LTR_SLEEP); usleep_range(1000, 2000); } } static void pciedev_crwlpciegen2(struct bcma_drv_pcie2 *pcie2) { u8 core_rev = pcie2->core->id.rev; bool pciewar160, pciewar162; pciewar160 = core_rev == 7 || core_rev == 9 || core_rev == 11; pciewar162 = core_rev == 5 || core_rev == 7 || core_rev == 8 || core_rev == 9 || core_rev == 11; if (!pciewar160 && !pciewar162) return; /* TODO */ #if 0 pcie2_set32(pcie2, BCMA_CORE_PCIE2_CLK_CONTROL, PCIE_DISABLE_L1CLK_GATING); #if 0 pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, PCIEGEN2_COE_PVT_TL_CTRL_0); pcie2_mask32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, ~(1 << COE_PVT_TL_CTRL_0_PM_DIS_L1_REENTRY_BIT)); #endif #endif } static void pciedev_crwlpciegen2_180(struct bcma_drv_pcie2 *pcie2) { pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, PCIE2_PMCR_REFUP); pcie2_set32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, 0x1f); } static void pciedev_crwlpciegen2_182(struct bcma_drv_pcie2 *pcie2) { pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, PCIE2_SBMBX); pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, 1 << 0); } static void pciedev_reg_pm_clk_period(struct bcma_drv_pcie2 *pcie2) { struct bcma_drv_cc *drv_cc = &pcie2->core->bus->drv_cc; u8 core_rev = pcie2->core->id.rev; u32 alp_khz, pm_value; if (core_rev <= 13) { alp_khz = bcma_pmu_get_alp_clock(drv_cc) / 1000; pm_value = (1000000 * 2) / alp_khz; pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, PCIE2_PVT_REG_PM_CLK_PERIOD); pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, pm_value); } } void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2) { struct bcma_bus *bus = pcie2->core->bus; struct bcma_chipinfo *ci = &bus->chipinfo; u32 tmp; tmp = pcie2_read32(pcie2, BCMA_CORE_PCIE2_SPROM(54)); if ((tmp & 0xe) >> 1 == 2) bcma_core_pcie2_cfg_write(pcie2, 0x4e0, 0x17); switch (bus->chipinfo.id) { case BCMA_CHIP_ID_BCM4360: case BCMA_CHIP_ID_BCM4352: pcie2->reqsize = 1024; break; default: pcie2->reqsize = 128; break; } if (ci->id == BCMA_CHIP_ID_BCM4360 && ci->rev > 3) bcma_core_pcie2_war_delay_perst_enab(pcie2, true); bcma_core_pcie2_hw_ltr_war(pcie2); pciedev_crwlpciegen2(pcie2); pciedev_reg_pm_clk_period(pcie2); pciedev_crwlpciegen2_180(pcie2); pciedev_crwlpciegen2_182(pcie2); } /************************************************** * Runtime ops. **************************************************/ void bcma_core_pcie2_up(struct bcma_drv_pcie2 *pcie2) { struct bcma_bus *bus = pcie2->core->bus; struct pci_dev *dev = bus->host_pci; int err; err = pcie_set_readrq(dev, pcie2->reqsize); if (err) bcma_err(bus, "Error setting PCI_EXP_DEVCTL_READRQ: %d\n", err); }
gpl-2.0
Psycho666/Simplicity_trlte_kernel
sound/soc/codecs/adau1373.c
2240
47291
/* * Analog Devices ADAU1373 Audio Codec drive * * Copyright 2011 Analog Devices Inc. * Author: Lars-Peter Clausen <lars@metafoo.de> * * Licensed under the GPL-2 or later. */ #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/gcd.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/tlv.h> #include <sound/soc.h> #include <sound/adau1373.h> #include "adau1373.h" struct adau1373_dai { unsigned int clk_src; unsigned int sysclk; bool enable_src; bool master; }; struct adau1373 { struct adau1373_dai dais[3]; }; #define ADAU1373_INPUT_MODE 0x00 #define ADAU1373_AINL_CTRL(x) (0x01 + (x) * 2) #define ADAU1373_AINR_CTRL(x) (0x02 + (x) * 2) #define ADAU1373_LLINE_OUT(x) (0x9 + (x) * 2) #define ADAU1373_RLINE_OUT(x) (0xa + (x) * 2) #define ADAU1373_LSPK_OUT 0x0d #define ADAU1373_RSPK_OUT 0x0e #define ADAU1373_LHP_OUT 0x0f #define ADAU1373_RHP_OUT 0x10 #define ADAU1373_ADC_GAIN 0x11 #define ADAU1373_LADC_MIXER 0x12 #define ADAU1373_RADC_MIXER 0x13 #define ADAU1373_LLINE1_MIX 0x14 #define ADAU1373_RLINE1_MIX 0x15 #define ADAU1373_LLINE2_MIX 0x16 #define ADAU1373_RLINE2_MIX 0x17 #define ADAU1373_LSPK_MIX 0x18 #define ADAU1373_RSPK_MIX 0x19 #define ADAU1373_LHP_MIX 0x1a #define ADAU1373_RHP_MIX 0x1b #define ADAU1373_EP_MIX 0x1c #define ADAU1373_HP_CTRL 0x1d #define ADAU1373_HP_CTRL2 0x1e #define ADAU1373_LS_CTRL 0x1f #define ADAU1373_EP_CTRL 0x21 #define ADAU1373_MICBIAS_CTRL1 0x22 #define ADAU1373_MICBIAS_CTRL2 0x23 #define ADAU1373_OUTPUT_CTRL 0x24 #define ADAU1373_PWDN_CTRL1 0x25 #define ADAU1373_PWDN_CTRL2 0x26 #define ADAU1373_PWDN_CTRL3 0x27 #define ADAU1373_DPLL_CTRL(x) (0x28 + (x) * 7) #define ADAU1373_PLL_CTRL1(x) (0x29 + (x) * 7) #define ADAU1373_PLL_CTRL2(x) (0x2a + (x) * 7) #define ADAU1373_PLL_CTRL3(x) (0x2b + (x) * 7) #define ADAU1373_PLL_CTRL4(x) (0x2c + (x) * 7) #define ADAU1373_PLL_CTRL5(x) (0x2d + (x) * 7) #define ADAU1373_PLL_CTRL6(x) (0x2e + (x) * 7) #define ADAU1373_PLL_CTRL7(x) (0x2f + (x) * 7) #define ADAU1373_HEADDECT 0x36 #define ADAU1373_ADC_DAC_STATUS 0x37 #define ADAU1373_ADC_CTRL 0x3c #define ADAU1373_DAI(x) (0x44 + (x)) #define ADAU1373_CLK_SRC_DIV(x) (0x40 + (x) * 2) #define ADAU1373_BCLKDIV(x) (0x47 + (x)) #define ADAU1373_SRC_RATIOA(x) (0x4a + (x) * 2) #define ADAU1373_SRC_RATIOB(x) (0x4b + (x) * 2) #define ADAU1373_DEEMP_CTRL 0x50 #define ADAU1373_SRC_DAI_CTRL(x) (0x51 + (x)) #define ADAU1373_DIN_MIX_CTRL(x) (0x56 + (x)) #define ADAU1373_DOUT_MIX_CTRL(x) (0x5b + (x)) #define ADAU1373_DAI_PBL_VOL(x) (0x62 + (x) * 2) #define ADAU1373_DAI_PBR_VOL(x) (0x63 + (x) * 2) #define ADAU1373_DAI_RECL_VOL(x) (0x68 + (x) * 2) #define ADAU1373_DAI_RECR_VOL(x) (0x69 + (x) * 2) #define ADAU1373_DAC1_PBL_VOL 0x6e #define ADAU1373_DAC1_PBR_VOL 0x6f #define ADAU1373_DAC2_PBL_VOL 0x70 #define ADAU1373_DAC2_PBR_VOL 0x71 #define ADAU1373_ADC_RECL_VOL 0x72 #define ADAU1373_ADC_RECR_VOL 0x73 #define ADAU1373_DMIC_RECL_VOL 0x74 #define ADAU1373_DMIC_RECR_VOL 0x75 #define ADAU1373_VOL_GAIN1 0x76 #define ADAU1373_VOL_GAIN2 0x77 #define ADAU1373_VOL_GAIN3 0x78 #define ADAU1373_HPF_CTRL 0x7d #define ADAU1373_BASS1 0x7e #define ADAU1373_BASS2 0x7f #define ADAU1373_DRC(x) (0x80 + (x) * 0x10) #define ADAU1373_3D_CTRL1 0xc0 #define ADAU1373_3D_CTRL2 0xc1 #define ADAU1373_FDSP_SEL1 0xdc #define ADAU1373_FDSP_SEL2 0xdd #define ADAU1373_FDSP_SEL3 0xde #define ADAU1373_FDSP_SEL4 0xdf #define ADAU1373_DIGMICCTRL 0xe2 #define ADAU1373_DIGEN 0xeb #define ADAU1373_SOFT_RESET 0xff #define ADAU1373_PLL_CTRL6_DPLL_BYPASS BIT(1) #define ADAU1373_PLL_CTRL6_PLL_EN BIT(0) #define ADAU1373_DAI_INVERT_BCLK BIT(7) #define ADAU1373_DAI_MASTER BIT(6) #define ADAU1373_DAI_INVERT_LRCLK BIT(4) #define ADAU1373_DAI_WLEN_16 0x0 #define ADAU1373_DAI_WLEN_20 0x4 #define ADAU1373_DAI_WLEN_24 0x8 #define ADAU1373_DAI_WLEN_32 0xc #define ADAU1373_DAI_WLEN_MASK 0xc #define ADAU1373_DAI_FORMAT_RIGHT_J 0x0 #define ADAU1373_DAI_FORMAT_LEFT_J 0x1 #define ADAU1373_DAI_FORMAT_I2S 0x2 #define ADAU1373_DAI_FORMAT_DSP 0x3 #define ADAU1373_BCLKDIV_SOURCE BIT(5) #define ADAU1373_BCLKDIV_SR_MASK (0x07 << 2) #define ADAU1373_BCLKDIV_BCLK_MASK 0x03 #define ADAU1373_BCLKDIV_32 0x03 #define ADAU1373_BCLKDIV_64 0x02 #define ADAU1373_BCLKDIV_128 0x01 #define ADAU1373_BCLKDIV_256 0x00 #define ADAU1373_ADC_CTRL_PEAK_DETECT BIT(0) #define ADAU1373_ADC_CTRL_RESET BIT(1) #define ADAU1373_ADC_CTRL_RESET_FORCE BIT(2) #define ADAU1373_OUTPUT_CTRL_LDIFF BIT(3) #define ADAU1373_OUTPUT_CTRL_LNFBEN BIT(2) #define ADAU1373_PWDN_CTRL3_PWR_EN BIT(0) #define ADAU1373_EP_CTRL_MICBIAS1_OFFSET 4 #define ADAU1373_EP_CTRL_MICBIAS2_OFFSET 2 static const uint8_t adau1373_default_regs[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, /* 0x30 */ 0x00, 0x00, 0x00, 0x80, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x0a, 0x0a, 0x00, /* 0x40 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x08, 0x00, 0x00, 0x00, 0x00, /* 0x50 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0x18, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, /* 0x80 */ 0x00, 0xc0, 0x88, 0x7a, 0xdf, 0x20, 0x00, 0x00, 0x78, 0x18, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, /* 0x90 */ 0x00, 0xc0, 0x88, 0x7a, 0xdf, 0x20, 0x00, 0x00, 0x78, 0x18, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, /* 0xa0 */ 0x00, 0xc0, 0x88, 0x7a, 0xdf, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, /* 0xe0 */ 0x00, 0x1f, 0x0f, 0x00, 0x00, }; static const unsigned int adau1373_out_tlv[] = { TLV_DB_RANGE_HEAD(4), 0, 7, TLV_DB_SCALE_ITEM(-7900, 400, 1), 8, 15, TLV_DB_SCALE_ITEM(-4700, 300, 0), 16, 23, TLV_DB_SCALE_ITEM(-2300, 200, 0), 24, 31, TLV_DB_SCALE_ITEM(-700, 100, 0), }; static const DECLARE_TLV_DB_MINMAX(adau1373_digital_tlv, -9563, 0); static const DECLARE_TLV_DB_SCALE(adau1373_in_pga_tlv, -1300, 100, 1); static const DECLARE_TLV_DB_SCALE(adau1373_ep_tlv, -600, 600, 1); static const DECLARE_TLV_DB_SCALE(adau1373_input_boost_tlv, 0, 2000, 0); static const DECLARE_TLV_DB_SCALE(adau1373_gain_boost_tlv, 0, 600, 0); static const DECLARE_TLV_DB_SCALE(adau1373_speaker_boost_tlv, 1200, 600, 0); static const char *adau1373_fdsp_sel_text[] = { "None", "Channel 1", "Channel 2", "Channel 3", "Channel 4", "Channel 5", }; static const SOC_ENUM_SINGLE_DECL(adau1373_drc1_channel_enum, ADAU1373_FDSP_SEL1, 4, adau1373_fdsp_sel_text); static const SOC_ENUM_SINGLE_DECL(adau1373_drc2_channel_enum, ADAU1373_FDSP_SEL1, 0, adau1373_fdsp_sel_text); static const SOC_ENUM_SINGLE_DECL(adau1373_drc3_channel_enum, ADAU1373_FDSP_SEL2, 0, adau1373_fdsp_sel_text); static const SOC_ENUM_SINGLE_DECL(adau1373_hpf_channel_enum, ADAU1373_FDSP_SEL3, 0, adau1373_fdsp_sel_text); static const SOC_ENUM_SINGLE_DECL(adau1373_bass_channel_enum, ADAU1373_FDSP_SEL4, 4, adau1373_fdsp_sel_text); static const char *adau1373_hpf_cutoff_text[] = { "3.7Hz", "50Hz", "100Hz", "150Hz", "200Hz", "250Hz", "300Hz", "350Hz", "400Hz", "450Hz", "500Hz", "550Hz", "600Hz", "650Hz", "700Hz", "750Hz", "800Hz", }; static const SOC_ENUM_SINGLE_DECL(adau1373_hpf_cutoff_enum, ADAU1373_HPF_CTRL, 3, adau1373_hpf_cutoff_text); static const char *adau1373_bass_lpf_cutoff_text[] = { "801Hz", "1001Hz", }; static const char *adau1373_bass_clip_level_text[] = { "0.125", "0.250", "0.370", "0.500", "0.625", "0.750", "0.875", }; static const unsigned int adau1373_bass_clip_level_values[] = { 1, 2, 3, 4, 5, 6, 7, }; static const char *adau1373_bass_hpf_cutoff_text[] = { "158Hz", "232Hz", "347Hz", "520Hz", }; static const unsigned int adau1373_bass_tlv[] = { TLV_DB_RANGE_HEAD(3), 0, 2, TLV_DB_SCALE_ITEM(-600, 600, 1), 3, 4, TLV_DB_SCALE_ITEM(950, 250, 0), 5, 7, TLV_DB_SCALE_ITEM(1400, 150, 0), }; static const SOC_ENUM_SINGLE_DECL(adau1373_bass_lpf_cutoff_enum, ADAU1373_BASS1, 5, adau1373_bass_lpf_cutoff_text); static const SOC_VALUE_ENUM_SINGLE_DECL(adau1373_bass_clip_level_enum, ADAU1373_BASS1, 2, 7, adau1373_bass_clip_level_text, adau1373_bass_clip_level_values); static const SOC_ENUM_SINGLE_DECL(adau1373_bass_hpf_cutoff_enum, ADAU1373_BASS1, 0, adau1373_bass_hpf_cutoff_text); static const char *adau1373_3d_level_text[] = { "0%", "6.67%", "13.33%", "20%", "26.67%", "33.33%", "40%", "46.67%", "53.33%", "60%", "66.67%", "73.33%", "80%", "86.67", "99.33%", "100%" }; static const char *adau1373_3d_cutoff_text[] = { "No 3D", "0.03125 fs", "0.04583 fs", "0.075 fs", "0.11458 fs", "0.16875 fs", "0.27083 fs" }; static const SOC_ENUM_SINGLE_DECL(adau1373_3d_level_enum, ADAU1373_3D_CTRL1, 4, adau1373_3d_level_text); static const SOC_ENUM_SINGLE_DECL(adau1373_3d_cutoff_enum, ADAU1373_3D_CTRL1, 0, adau1373_3d_cutoff_text); static const unsigned int adau1373_3d_tlv[] = { TLV_DB_RANGE_HEAD(2), 0, 0, TLV_DB_SCALE_ITEM(0, 0, 0), 1, 7, TLV_DB_LINEAR_ITEM(-1800, -120), }; static const char *adau1373_lr_mux_text[] = { "Mute", "Right Channel (L+R)", "Left Channel (L+R)", "Stereo", }; static const SOC_ENUM_SINGLE_DECL(adau1373_lineout1_lr_mux_enum, ADAU1373_OUTPUT_CTRL, 4, adau1373_lr_mux_text); static const SOC_ENUM_SINGLE_DECL(adau1373_lineout2_lr_mux_enum, ADAU1373_OUTPUT_CTRL, 6, adau1373_lr_mux_text); static const SOC_ENUM_SINGLE_DECL(adau1373_speaker_lr_mux_enum, ADAU1373_LS_CTRL, 4, adau1373_lr_mux_text); static const struct snd_kcontrol_new adau1373_controls[] = { SOC_DOUBLE_R_TLV("AIF1 Capture Volume", ADAU1373_DAI_RECL_VOL(0), ADAU1373_DAI_RECR_VOL(0), 0, 0xff, 1, adau1373_digital_tlv), SOC_DOUBLE_R_TLV("AIF2 Capture Volume", ADAU1373_DAI_RECL_VOL(1), ADAU1373_DAI_RECR_VOL(1), 0, 0xff, 1, adau1373_digital_tlv), SOC_DOUBLE_R_TLV("AIF3 Capture Volume", ADAU1373_DAI_RECL_VOL(2), ADAU1373_DAI_RECR_VOL(2), 0, 0xff, 1, adau1373_digital_tlv), SOC_DOUBLE_R_TLV("ADC Capture Volume", ADAU1373_ADC_RECL_VOL, ADAU1373_ADC_RECR_VOL, 0, 0xff, 1, adau1373_digital_tlv), SOC_DOUBLE_R_TLV("DMIC Capture Volume", ADAU1373_DMIC_RECL_VOL, ADAU1373_DMIC_RECR_VOL, 0, 0xff, 1, adau1373_digital_tlv), SOC_DOUBLE_R_TLV("AIF1 Playback Volume", ADAU1373_DAI_PBL_VOL(0), ADAU1373_DAI_PBR_VOL(0), 0, 0xff, 1, adau1373_digital_tlv), SOC_DOUBLE_R_TLV("AIF2 Playback Volume", ADAU1373_DAI_PBL_VOL(1), ADAU1373_DAI_PBR_VOL(1), 0, 0xff, 1, adau1373_digital_tlv), SOC_DOUBLE_R_TLV("AIF3 Playback Volume", ADAU1373_DAI_PBL_VOL(2), ADAU1373_DAI_PBR_VOL(2), 0, 0xff, 1, adau1373_digital_tlv), SOC_DOUBLE_R_TLV("DAC1 Playback Volume", ADAU1373_DAC1_PBL_VOL, ADAU1373_DAC1_PBR_VOL, 0, 0xff, 1, adau1373_digital_tlv), SOC_DOUBLE_R_TLV("DAC2 Playback Volume", ADAU1373_DAC2_PBL_VOL, ADAU1373_DAC2_PBR_VOL, 0, 0xff, 1, adau1373_digital_tlv), SOC_DOUBLE_R_TLV("Lineout1 Playback Volume", ADAU1373_LLINE_OUT(0), ADAU1373_RLINE_OUT(0), 0, 0x1f, 0, adau1373_out_tlv), SOC_DOUBLE_R_TLV("Speaker Playback Volume", ADAU1373_LSPK_OUT, ADAU1373_RSPK_OUT, 0, 0x1f, 0, adau1373_out_tlv), SOC_DOUBLE_R_TLV("Headphone Playback Volume", ADAU1373_LHP_OUT, ADAU1373_RHP_OUT, 0, 0x1f, 0, adau1373_out_tlv), SOC_DOUBLE_R_TLV("Input 1 Capture Volume", ADAU1373_AINL_CTRL(0), ADAU1373_AINR_CTRL(0), 0, 0x1f, 0, adau1373_in_pga_tlv), SOC_DOUBLE_R_TLV("Input 2 Capture Volume", ADAU1373_AINL_CTRL(1), ADAU1373_AINR_CTRL(1), 0, 0x1f, 0, adau1373_in_pga_tlv), SOC_DOUBLE_R_TLV("Input 3 Capture Volume", ADAU1373_AINL_CTRL(2), ADAU1373_AINR_CTRL(2), 0, 0x1f, 0, adau1373_in_pga_tlv), SOC_DOUBLE_R_TLV("Input 4 Capture Volume", ADAU1373_AINL_CTRL(3), ADAU1373_AINR_CTRL(3), 0, 0x1f, 0, adau1373_in_pga_tlv), SOC_SINGLE_TLV("Earpiece Playback Volume", ADAU1373_EP_CTRL, 0, 3, 0, adau1373_ep_tlv), SOC_DOUBLE_TLV("AIF3 Boost Playback Volume", ADAU1373_VOL_GAIN1, 4, 5, 1, 0, adau1373_gain_boost_tlv), SOC_DOUBLE_TLV("AIF2 Boost Playback Volume", ADAU1373_VOL_GAIN1, 2, 3, 1, 0, adau1373_gain_boost_tlv), SOC_DOUBLE_TLV("AIF1 Boost Playback Volume", ADAU1373_VOL_GAIN1, 0, 1, 1, 0, adau1373_gain_boost_tlv), SOC_DOUBLE_TLV("AIF3 Boost Capture Volume", ADAU1373_VOL_GAIN2, 4, 5, 1, 0, adau1373_gain_boost_tlv), SOC_DOUBLE_TLV("AIF2 Boost Capture Volume", ADAU1373_VOL_GAIN2, 2, 3, 1, 0, adau1373_gain_boost_tlv), SOC_DOUBLE_TLV("AIF1 Boost Capture Volume", ADAU1373_VOL_GAIN2, 0, 1, 1, 0, adau1373_gain_boost_tlv), SOC_DOUBLE_TLV("DMIC Boost Capture Volume", ADAU1373_VOL_GAIN3, 6, 7, 1, 0, adau1373_gain_boost_tlv), SOC_DOUBLE_TLV("ADC Boost Capture Volume", ADAU1373_VOL_GAIN3, 4, 5, 1, 0, adau1373_gain_boost_tlv), SOC_DOUBLE_TLV("DAC2 Boost Playback Volume", ADAU1373_VOL_GAIN3, 2, 3, 1, 0, adau1373_gain_boost_tlv), SOC_DOUBLE_TLV("DAC1 Boost Playback Volume", ADAU1373_VOL_GAIN3, 0, 1, 1, 0, adau1373_gain_boost_tlv), SOC_DOUBLE_TLV("Input 1 Boost Capture Volume", ADAU1373_ADC_GAIN, 0, 4, 1, 0, adau1373_input_boost_tlv), SOC_DOUBLE_TLV("Input 2 Boost Capture Volume", ADAU1373_ADC_GAIN, 1, 5, 1, 0, adau1373_input_boost_tlv), SOC_DOUBLE_TLV("Input 3 Boost Capture Volume", ADAU1373_ADC_GAIN, 2, 6, 1, 0, adau1373_input_boost_tlv), SOC_DOUBLE_TLV("Input 4 Boost Capture Volume", ADAU1373_ADC_GAIN, 3, 7, 1, 0, adau1373_input_boost_tlv), SOC_DOUBLE_TLV("Speaker Boost Playback Volume", ADAU1373_LS_CTRL, 2, 3, 1, 0, adau1373_speaker_boost_tlv), SOC_ENUM("Lineout1 LR Mux", adau1373_lineout1_lr_mux_enum), SOC_ENUM("Speaker LR Mux", adau1373_speaker_lr_mux_enum), SOC_ENUM("HPF Cutoff", adau1373_hpf_cutoff_enum), SOC_DOUBLE("HPF Switch", ADAU1373_HPF_CTRL, 1, 0, 1, 0), SOC_ENUM("HPF Channel", adau1373_hpf_channel_enum), SOC_ENUM("Bass HPF Cutoff", adau1373_bass_hpf_cutoff_enum), SOC_VALUE_ENUM("Bass Clip Level Threshold", adau1373_bass_clip_level_enum), SOC_ENUM("Bass LPF Cutoff", adau1373_bass_lpf_cutoff_enum), SOC_DOUBLE("Bass Playback Switch", ADAU1373_BASS2, 0, 1, 1, 0), SOC_SINGLE_TLV("Bass Playback Volume", ADAU1373_BASS2, 2, 7, 0, adau1373_bass_tlv), SOC_ENUM("Bass Channel", adau1373_bass_channel_enum), SOC_ENUM("3D Freq", adau1373_3d_cutoff_enum), SOC_ENUM("3D Level", adau1373_3d_level_enum), SOC_SINGLE("3D Playback Switch", ADAU1373_3D_CTRL2, 0, 1, 0), SOC_SINGLE_TLV("3D Playback Volume", ADAU1373_3D_CTRL2, 2, 7, 0, adau1373_3d_tlv), SOC_ENUM("3D Channel", adau1373_bass_channel_enum), SOC_SINGLE("Zero Cross Switch", ADAU1373_PWDN_CTRL3, 7, 1, 0), }; static const struct snd_kcontrol_new adau1373_lineout2_controls[] = { SOC_DOUBLE_R_TLV("Lineout2 Playback Volume", ADAU1373_LLINE_OUT(1), ADAU1373_RLINE_OUT(1), 0, 0x1f, 0, adau1373_out_tlv), SOC_ENUM("Lineout2 LR Mux", adau1373_lineout2_lr_mux_enum), }; static const struct snd_kcontrol_new adau1373_drc_controls[] = { SOC_ENUM("DRC1 Channel", adau1373_drc1_channel_enum), SOC_ENUM("DRC2 Channel", adau1373_drc2_channel_enum), SOC_ENUM("DRC3 Channel", adau1373_drc3_channel_enum), }; static int adau1373_pll_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_codec *codec = w->codec; unsigned int pll_id = w->name[3] - '1'; unsigned int val; if (SND_SOC_DAPM_EVENT_ON(event)) val = ADAU1373_PLL_CTRL6_PLL_EN; else val = 0; snd_soc_update_bits(codec, ADAU1373_PLL_CTRL6(pll_id), ADAU1373_PLL_CTRL6_PLL_EN, val); if (SND_SOC_DAPM_EVENT_ON(event)) mdelay(5); return 0; } static const char *adau1373_decimator_text[] = { "ADC", "DMIC1", }; static const struct soc_enum adau1373_decimator_enum = SOC_ENUM_SINGLE(0, 0, 2, adau1373_decimator_text); static const struct snd_kcontrol_new adau1373_decimator_mux = SOC_DAPM_ENUM_VIRT("Decimator Mux", adau1373_decimator_enum); static const struct snd_kcontrol_new adau1373_left_adc_mixer_controls[] = { SOC_DAPM_SINGLE("DAC1 Switch", ADAU1373_LADC_MIXER, 4, 1, 0), SOC_DAPM_SINGLE("Input 4 Switch", ADAU1373_LADC_MIXER, 3, 1, 0), SOC_DAPM_SINGLE("Input 3 Switch", ADAU1373_LADC_MIXER, 2, 1, 0), SOC_DAPM_SINGLE("Input 2 Switch", ADAU1373_LADC_MIXER, 1, 1, 0), SOC_DAPM_SINGLE("Input 1 Switch", ADAU1373_LADC_MIXER, 0, 1, 0), }; static const struct snd_kcontrol_new adau1373_right_adc_mixer_controls[] = { SOC_DAPM_SINGLE("DAC1 Switch", ADAU1373_RADC_MIXER, 4, 1, 0), SOC_DAPM_SINGLE("Input 4 Switch", ADAU1373_RADC_MIXER, 3, 1, 0), SOC_DAPM_SINGLE("Input 3 Switch", ADAU1373_RADC_MIXER, 2, 1, 0), SOC_DAPM_SINGLE("Input 2 Switch", ADAU1373_RADC_MIXER, 1, 1, 0), SOC_DAPM_SINGLE("Input 1 Switch", ADAU1373_RADC_MIXER, 0, 1, 0), }; #define DECLARE_ADAU1373_OUTPUT_MIXER_CTRLS(_name, _reg) \ const struct snd_kcontrol_new _name[] = { \ SOC_DAPM_SINGLE("Left DAC2 Switch", _reg, 7, 1, 0), \ SOC_DAPM_SINGLE("Right DAC2 Switch", _reg, 6, 1, 0), \ SOC_DAPM_SINGLE("Left DAC1 Switch", _reg, 5, 1, 0), \ SOC_DAPM_SINGLE("Right DAC1 Switch", _reg, 4, 1, 0), \ SOC_DAPM_SINGLE("Input 4 Bypass Switch", _reg, 3, 1, 0), \ SOC_DAPM_SINGLE("Input 3 Bypass Switch", _reg, 2, 1, 0), \ SOC_DAPM_SINGLE("Input 2 Bypass Switch", _reg, 1, 1, 0), \ SOC_DAPM_SINGLE("Input 1 Bypass Switch", _reg, 0, 1, 0), \ } static DECLARE_ADAU1373_OUTPUT_MIXER_CTRLS(adau1373_left_line1_mixer_controls, ADAU1373_LLINE1_MIX); static DECLARE_ADAU1373_OUTPUT_MIXER_CTRLS(adau1373_right_line1_mixer_controls, ADAU1373_RLINE1_MIX); static DECLARE_ADAU1373_OUTPUT_MIXER_CTRLS(adau1373_left_line2_mixer_controls, ADAU1373_LLINE2_MIX); static DECLARE_ADAU1373_OUTPUT_MIXER_CTRLS(adau1373_right_line2_mixer_controls, ADAU1373_RLINE2_MIX); static DECLARE_ADAU1373_OUTPUT_MIXER_CTRLS(adau1373_left_spk_mixer_controls, ADAU1373_LSPK_MIX); static DECLARE_ADAU1373_OUTPUT_MIXER_CTRLS(adau1373_right_spk_mixer_controls, ADAU1373_RSPK_MIX); static DECLARE_ADAU1373_OUTPUT_MIXER_CTRLS(adau1373_ep_mixer_controls, ADAU1373_EP_MIX); static const struct snd_kcontrol_new adau1373_left_hp_mixer_controls[] = { SOC_DAPM_SINGLE("Left DAC1 Switch", ADAU1373_LHP_MIX, 5, 1, 0), SOC_DAPM_SINGLE("Left DAC2 Switch", ADAU1373_LHP_MIX, 4, 1, 0), SOC_DAPM_SINGLE("Input 4 Bypass Switch", ADAU1373_LHP_MIX, 3, 1, 0), SOC_DAPM_SINGLE("Input 3 Bypass Switch", ADAU1373_LHP_MIX, 2, 1, 0), SOC_DAPM_SINGLE("Input 2 Bypass Switch", ADAU1373_LHP_MIX, 1, 1, 0), SOC_DAPM_SINGLE("Input 1 Bypass Switch", ADAU1373_LHP_MIX, 0, 1, 0), }; static const struct snd_kcontrol_new adau1373_right_hp_mixer_controls[] = { SOC_DAPM_SINGLE("Right DAC1 Switch", ADAU1373_RHP_MIX, 5, 1, 0), SOC_DAPM_SINGLE("Right DAC2 Switch", ADAU1373_RHP_MIX, 4, 1, 0), SOC_DAPM_SINGLE("Input 4 Bypass Switch", ADAU1373_RHP_MIX, 3, 1, 0), SOC_DAPM_SINGLE("Input 3 Bypass Switch", ADAU1373_RHP_MIX, 2, 1, 0), SOC_DAPM_SINGLE("Input 2 Bypass Switch", ADAU1373_RHP_MIX, 1, 1, 0), SOC_DAPM_SINGLE("Input 1 Bypass Switch", ADAU1373_RHP_MIX, 0, 1, 0), }; #define DECLARE_ADAU1373_DSP_CHANNEL_MIXER_CTRLS(_name, _reg) \ const struct snd_kcontrol_new _name[] = { \ SOC_DAPM_SINGLE("DMIC2 Swapped Switch", _reg, 6, 1, 0), \ SOC_DAPM_SINGLE("DMIC2 Switch", _reg, 5, 1, 0), \ SOC_DAPM_SINGLE("ADC/DMIC1 Swapped Switch", _reg, 4, 1, 0), \ SOC_DAPM_SINGLE("ADC/DMIC1 Switch", _reg, 3, 1, 0), \ SOC_DAPM_SINGLE("AIF3 Switch", _reg, 2, 1, 0), \ SOC_DAPM_SINGLE("AIF2 Switch", _reg, 1, 1, 0), \ SOC_DAPM_SINGLE("AIF1 Switch", _reg, 0, 1, 0), \ } static DECLARE_ADAU1373_DSP_CHANNEL_MIXER_CTRLS(adau1373_dsp_channel1_mixer_controls, ADAU1373_DIN_MIX_CTRL(0)); static DECLARE_ADAU1373_DSP_CHANNEL_MIXER_CTRLS(adau1373_dsp_channel2_mixer_controls, ADAU1373_DIN_MIX_CTRL(1)); static DECLARE_ADAU1373_DSP_CHANNEL_MIXER_CTRLS(adau1373_dsp_channel3_mixer_controls, ADAU1373_DIN_MIX_CTRL(2)); static DECLARE_ADAU1373_DSP_CHANNEL_MIXER_CTRLS(adau1373_dsp_channel4_mixer_controls, ADAU1373_DIN_MIX_CTRL(3)); static DECLARE_ADAU1373_DSP_CHANNEL_MIXER_CTRLS(adau1373_dsp_channel5_mixer_controls, ADAU1373_DIN_MIX_CTRL(4)); #define DECLARE_ADAU1373_DSP_OUTPUT_MIXER_CTRLS(_name, _reg) \ const struct snd_kcontrol_new _name[] = { \ SOC_DAPM_SINGLE("DSP Channel5 Switch", _reg, 4, 1, 0), \ SOC_DAPM_SINGLE("DSP Channel4 Switch", _reg, 3, 1, 0), \ SOC_DAPM_SINGLE("DSP Channel3 Switch", _reg, 2, 1, 0), \ SOC_DAPM_SINGLE("DSP Channel2 Switch", _reg, 1, 1, 0), \ SOC_DAPM_SINGLE("DSP Channel1 Switch", _reg, 0, 1, 0), \ } static DECLARE_ADAU1373_DSP_OUTPUT_MIXER_CTRLS(adau1373_aif1_mixer_controls, ADAU1373_DOUT_MIX_CTRL(0)); static DECLARE_ADAU1373_DSP_OUTPUT_MIXER_CTRLS(adau1373_aif2_mixer_controls, ADAU1373_DOUT_MIX_CTRL(1)); static DECLARE_ADAU1373_DSP_OUTPUT_MIXER_CTRLS(adau1373_aif3_mixer_controls, ADAU1373_DOUT_MIX_CTRL(2)); static DECLARE_ADAU1373_DSP_OUTPUT_MIXER_CTRLS(adau1373_dac1_mixer_controls, ADAU1373_DOUT_MIX_CTRL(3)); static DECLARE_ADAU1373_DSP_OUTPUT_MIXER_CTRLS(adau1373_dac2_mixer_controls, ADAU1373_DOUT_MIX_CTRL(4)); static const struct snd_soc_dapm_widget adau1373_dapm_widgets[] = { /* Datasheet claims Left ADC is bit 6 and Right ADC is bit 7, but that * doesn't seem to be the case. */ SND_SOC_DAPM_ADC("Left ADC", NULL, ADAU1373_PWDN_CTRL1, 7, 0), SND_SOC_DAPM_ADC("Right ADC", NULL, ADAU1373_PWDN_CTRL1, 6, 0), SND_SOC_DAPM_ADC("DMIC1", NULL, ADAU1373_DIGMICCTRL, 0, 0), SND_SOC_DAPM_ADC("DMIC2", NULL, ADAU1373_DIGMICCTRL, 2, 0), SND_SOC_DAPM_VIRT_MUX("Decimator Mux", SND_SOC_NOPM, 0, 0, &adau1373_decimator_mux), SND_SOC_DAPM_SUPPLY("MICBIAS2", ADAU1373_PWDN_CTRL1, 5, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("MICBIAS1", ADAU1373_PWDN_CTRL1, 4, 0, NULL, 0), SND_SOC_DAPM_PGA("IN4PGA", ADAU1373_PWDN_CTRL1, 3, 0, NULL, 0), SND_SOC_DAPM_PGA("IN3PGA", ADAU1373_PWDN_CTRL1, 2, 0, NULL, 0), SND_SOC_DAPM_PGA("IN2PGA", ADAU1373_PWDN_CTRL1, 1, 0, NULL, 0), SND_SOC_DAPM_PGA("IN1PGA", ADAU1373_PWDN_CTRL1, 0, 0, NULL, 0), SND_SOC_DAPM_DAC("Left DAC2", NULL, ADAU1373_PWDN_CTRL2, 7, 0), SND_SOC_DAPM_DAC("Right DAC2", NULL, ADAU1373_PWDN_CTRL2, 6, 0), SND_SOC_DAPM_DAC("Left DAC1", NULL, ADAU1373_PWDN_CTRL2, 5, 0), SND_SOC_DAPM_DAC("Right DAC1", NULL, ADAU1373_PWDN_CTRL2, 4, 0), SOC_MIXER_ARRAY("Left ADC Mixer", SND_SOC_NOPM, 0, 0, adau1373_left_adc_mixer_controls), SOC_MIXER_ARRAY("Right ADC Mixer", SND_SOC_NOPM, 0, 0, adau1373_right_adc_mixer_controls), SOC_MIXER_ARRAY("Left Lineout2 Mixer", ADAU1373_PWDN_CTRL2, 3, 0, adau1373_left_line2_mixer_controls), SOC_MIXER_ARRAY("Right Lineout2 Mixer", ADAU1373_PWDN_CTRL2, 2, 0, adau1373_right_line2_mixer_controls), SOC_MIXER_ARRAY("Left Lineout1 Mixer", ADAU1373_PWDN_CTRL2, 1, 0, adau1373_left_line1_mixer_controls), SOC_MIXER_ARRAY("Right Lineout1 Mixer", ADAU1373_PWDN_CTRL2, 0, 0, adau1373_right_line1_mixer_controls), SOC_MIXER_ARRAY("Earpiece Mixer", ADAU1373_PWDN_CTRL3, 4, 0, adau1373_ep_mixer_controls), SOC_MIXER_ARRAY("Left Speaker Mixer", ADAU1373_PWDN_CTRL3, 3, 0, adau1373_left_spk_mixer_controls), SOC_MIXER_ARRAY("Right Speaker Mixer", ADAU1373_PWDN_CTRL3, 2, 0, adau1373_right_spk_mixer_controls), SOC_MIXER_ARRAY("Left Headphone Mixer", SND_SOC_NOPM, 0, 0, adau1373_left_hp_mixer_controls), SOC_MIXER_ARRAY("Right Headphone Mixer", SND_SOC_NOPM, 0, 0, adau1373_right_hp_mixer_controls), SND_SOC_DAPM_SUPPLY("Headphone Enable", ADAU1373_PWDN_CTRL3, 1, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("AIF1 CLK", ADAU1373_SRC_DAI_CTRL(0), 0, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("AIF2 CLK", ADAU1373_SRC_DAI_CTRL(1), 0, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("AIF3 CLK", ADAU1373_SRC_DAI_CTRL(2), 0, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("AIF1 IN SRC", ADAU1373_SRC_DAI_CTRL(0), 2, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("AIF1 OUT SRC", ADAU1373_SRC_DAI_CTRL(0), 1, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("AIF2 IN SRC", ADAU1373_SRC_DAI_CTRL(1), 2, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("AIF2 OUT SRC", ADAU1373_SRC_DAI_CTRL(1), 1, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("AIF3 IN SRC", ADAU1373_SRC_DAI_CTRL(2), 2, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("AIF3 OUT SRC", ADAU1373_SRC_DAI_CTRL(2), 1, 0, NULL, 0), SND_SOC_DAPM_AIF_IN("AIF1 IN", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("AIF1 OUT", "AIF1 Capture", 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("AIF2 IN", "AIF2 Playback", 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("AIF2 OUT", "AIF2 Capture", 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("AIF3 IN", "AIF3 Playback", 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("AIF3 OUT", "AIF3 Capture", 0, SND_SOC_NOPM, 0, 0), SOC_MIXER_ARRAY("DSP Channel1 Mixer", SND_SOC_NOPM, 0, 0, adau1373_dsp_channel1_mixer_controls), SOC_MIXER_ARRAY("DSP Channel2 Mixer", SND_SOC_NOPM, 0, 0, adau1373_dsp_channel2_mixer_controls), SOC_MIXER_ARRAY("DSP Channel3 Mixer", SND_SOC_NOPM, 0, 0, adau1373_dsp_channel3_mixer_controls), SOC_MIXER_ARRAY("DSP Channel4 Mixer", SND_SOC_NOPM, 0, 0, adau1373_dsp_channel4_mixer_controls), SOC_MIXER_ARRAY("DSP Channel5 Mixer", SND_SOC_NOPM, 0, 0, adau1373_dsp_channel5_mixer_controls), SOC_MIXER_ARRAY("AIF1 Mixer", SND_SOC_NOPM, 0, 0, adau1373_aif1_mixer_controls), SOC_MIXER_ARRAY("AIF2 Mixer", SND_SOC_NOPM, 0, 0, adau1373_aif2_mixer_controls), SOC_MIXER_ARRAY("AIF3 Mixer", SND_SOC_NOPM, 0, 0, adau1373_aif3_mixer_controls), SOC_MIXER_ARRAY("DAC1 Mixer", SND_SOC_NOPM, 0, 0, adau1373_dac1_mixer_controls), SOC_MIXER_ARRAY("DAC2 Mixer", SND_SOC_NOPM, 0, 0, adau1373_dac2_mixer_controls), SND_SOC_DAPM_SUPPLY("DSP", ADAU1373_DIGEN, 4, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("Recording Engine B", ADAU1373_DIGEN, 3, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("Recording Engine A", ADAU1373_DIGEN, 2, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("Playback Engine B", ADAU1373_DIGEN, 1, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("Playback Engine A", ADAU1373_DIGEN, 0, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("PLL1", SND_SOC_NOPM, 0, 0, adau1373_pll_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_SUPPLY("PLL2", SND_SOC_NOPM, 0, 0, adau1373_pll_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_SUPPLY("SYSCLK1", ADAU1373_CLK_SRC_DIV(0), 7, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("SYSCLK2", ADAU1373_CLK_SRC_DIV(1), 7, 0, NULL, 0), SND_SOC_DAPM_INPUT("AIN1L"), SND_SOC_DAPM_INPUT("AIN1R"), SND_SOC_DAPM_INPUT("AIN2L"), SND_SOC_DAPM_INPUT("AIN2R"), SND_SOC_DAPM_INPUT("AIN3L"), SND_SOC_DAPM_INPUT("AIN3R"), SND_SOC_DAPM_INPUT("AIN4L"), SND_SOC_DAPM_INPUT("AIN4R"), SND_SOC_DAPM_INPUT("DMIC1DAT"), SND_SOC_DAPM_INPUT("DMIC2DAT"), SND_SOC_DAPM_OUTPUT("LOUT1L"), SND_SOC_DAPM_OUTPUT("LOUT1R"), SND_SOC_DAPM_OUTPUT("LOUT2L"), SND_SOC_DAPM_OUTPUT("LOUT2R"), SND_SOC_DAPM_OUTPUT("HPL"), SND_SOC_DAPM_OUTPUT("HPR"), SND_SOC_DAPM_OUTPUT("SPKL"), SND_SOC_DAPM_OUTPUT("SPKR"), SND_SOC_DAPM_OUTPUT("EP"), }; static int adau1373_check_aif_clk(struct snd_soc_dapm_widget *source, struct snd_soc_dapm_widget *sink) { struct snd_soc_codec *codec = source->codec; struct adau1373 *adau1373 = snd_soc_codec_get_drvdata(codec); unsigned int dai; const char *clk; dai = sink->name[3] - '1'; if (!adau1373->dais[dai].master) return 0; if (adau1373->dais[dai].clk_src == ADAU1373_CLK_SRC_PLL1) clk = "SYSCLK1"; else clk = "SYSCLK2"; return strcmp(source->name, clk) == 0; } static int adau1373_check_src(struct snd_soc_dapm_widget *source, struct snd_soc_dapm_widget *sink) { struct snd_soc_codec *codec = source->codec; struct adau1373 *adau1373 = snd_soc_codec_get_drvdata(codec); unsigned int dai; dai = sink->name[3] - '1'; return adau1373->dais[dai].enable_src; } #define DSP_CHANNEL_MIXER_ROUTES(_sink) \ { _sink, "DMIC2 Swapped Switch", "DMIC2" }, \ { _sink, "DMIC2 Switch", "DMIC2" }, \ { _sink, "ADC/DMIC1 Swapped Switch", "Decimator Mux" }, \ { _sink, "ADC/DMIC1 Switch", "Decimator Mux" }, \ { _sink, "AIF1 Switch", "AIF1 IN" }, \ { _sink, "AIF2 Switch", "AIF2 IN" }, \ { _sink, "AIF3 Switch", "AIF3 IN" } #define DSP_OUTPUT_MIXER_ROUTES(_sink) \ { _sink, "DSP Channel1 Switch", "DSP Channel1 Mixer" }, \ { _sink, "DSP Channel2 Switch", "DSP Channel2 Mixer" }, \ { _sink, "DSP Channel3 Switch", "DSP Channel3 Mixer" }, \ { _sink, "DSP Channel4 Switch", "DSP Channel4 Mixer" }, \ { _sink, "DSP Channel5 Switch", "DSP Channel5 Mixer" } #define LEFT_OUTPUT_MIXER_ROUTES(_sink) \ { _sink, "Right DAC2 Switch", "Right DAC2" }, \ { _sink, "Left DAC2 Switch", "Left DAC2" }, \ { _sink, "Right DAC1 Switch", "Right DAC1" }, \ { _sink, "Left DAC1 Switch", "Left DAC1" }, \ { _sink, "Input 1 Bypass Switch", "IN1PGA" }, \ { _sink, "Input 2 Bypass Switch", "IN2PGA" }, \ { _sink, "Input 3 Bypass Switch", "IN3PGA" }, \ { _sink, "Input 4 Bypass Switch", "IN4PGA" } #define RIGHT_OUTPUT_MIXER_ROUTES(_sink) \ { _sink, "Right DAC2 Switch", "Right DAC2" }, \ { _sink, "Left DAC2 Switch", "Left DAC2" }, \ { _sink, "Right DAC1 Switch", "Right DAC1" }, \ { _sink, "Left DAC1 Switch", "Left DAC1" }, \ { _sink, "Input 1 Bypass Switch", "IN1PGA" }, \ { _sink, "Input 2 Bypass Switch", "IN2PGA" }, \ { _sink, "Input 3 Bypass Switch", "IN3PGA" }, \ { _sink, "Input 4 Bypass Switch", "IN4PGA" } static const struct snd_soc_dapm_route adau1373_dapm_routes[] = { { "Left ADC Mixer", "DAC1 Switch", "Left DAC1" }, { "Left ADC Mixer", "Input 1 Switch", "IN1PGA" }, { "Left ADC Mixer", "Input 2 Switch", "IN2PGA" }, { "Left ADC Mixer", "Input 3 Switch", "IN3PGA" }, { "Left ADC Mixer", "Input 4 Switch", "IN4PGA" }, { "Right ADC Mixer", "DAC1 Switch", "Right DAC1" }, { "Right ADC Mixer", "Input 1 Switch", "IN1PGA" }, { "Right ADC Mixer", "Input 2 Switch", "IN2PGA" }, { "Right ADC Mixer", "Input 3 Switch", "IN3PGA" }, { "Right ADC Mixer", "Input 4 Switch", "IN4PGA" }, { "Left ADC", NULL, "Left ADC Mixer" }, { "Right ADC", NULL, "Right ADC Mixer" }, { "Decimator Mux", "ADC", "Left ADC" }, { "Decimator Mux", "ADC", "Right ADC" }, { "Decimator Mux", "DMIC1", "DMIC1" }, DSP_CHANNEL_MIXER_ROUTES("DSP Channel1 Mixer"), DSP_CHANNEL_MIXER_ROUTES("DSP Channel2 Mixer"), DSP_CHANNEL_MIXER_ROUTES("DSP Channel3 Mixer"), DSP_CHANNEL_MIXER_ROUTES("DSP Channel4 Mixer"), DSP_CHANNEL_MIXER_ROUTES("DSP Channel5 Mixer"), DSP_OUTPUT_MIXER_ROUTES("AIF1 Mixer"), DSP_OUTPUT_MIXER_ROUTES("AIF2 Mixer"), DSP_OUTPUT_MIXER_ROUTES("AIF3 Mixer"), DSP_OUTPUT_MIXER_ROUTES("DAC1 Mixer"), DSP_OUTPUT_MIXER_ROUTES("DAC2 Mixer"), { "AIF1 OUT", NULL, "AIF1 Mixer" }, { "AIF2 OUT", NULL, "AIF2 Mixer" }, { "AIF3 OUT", NULL, "AIF3 Mixer" }, { "Left DAC1", NULL, "DAC1 Mixer" }, { "Right DAC1", NULL, "DAC1 Mixer" }, { "Left DAC2", NULL, "DAC2 Mixer" }, { "Right DAC2", NULL, "DAC2 Mixer" }, LEFT_OUTPUT_MIXER_ROUTES("Left Lineout1 Mixer"), RIGHT_OUTPUT_MIXER_ROUTES("Right Lineout1 Mixer"), LEFT_OUTPUT_MIXER_ROUTES("Left Lineout2 Mixer"), RIGHT_OUTPUT_MIXER_ROUTES("Right Lineout2 Mixer"), LEFT_OUTPUT_MIXER_ROUTES("Left Speaker Mixer"), RIGHT_OUTPUT_MIXER_ROUTES("Right Speaker Mixer"), { "Left Headphone Mixer", "Left DAC2 Switch", "Left DAC2" }, { "Left Headphone Mixer", "Left DAC1 Switch", "Left DAC1" }, { "Left Headphone Mixer", "Input 1 Bypass Switch", "IN1PGA" }, { "Left Headphone Mixer", "Input 2 Bypass Switch", "IN2PGA" }, { "Left Headphone Mixer", "Input 3 Bypass Switch", "IN3PGA" }, { "Left Headphone Mixer", "Input 4 Bypass Switch", "IN4PGA" }, { "Right Headphone Mixer", "Right DAC2 Switch", "Right DAC2" }, { "Right Headphone Mixer", "Right DAC1 Switch", "Right DAC1" }, { "Right Headphone Mixer", "Input 1 Bypass Switch", "IN1PGA" }, { "Right Headphone Mixer", "Input 2 Bypass Switch", "IN2PGA" }, { "Right Headphone Mixer", "Input 3 Bypass Switch", "IN3PGA" }, { "Right Headphone Mixer", "Input 4 Bypass Switch", "IN4PGA" }, { "Left Headphone Mixer", NULL, "Headphone Enable" }, { "Right Headphone Mixer", NULL, "Headphone Enable" }, { "Earpiece Mixer", "Right DAC2 Switch", "Right DAC2" }, { "Earpiece Mixer", "Left DAC2 Switch", "Left DAC2" }, { "Earpiece Mixer", "Right DAC1 Switch", "Right DAC1" }, { "Earpiece Mixer", "Left DAC1 Switch", "Left DAC1" }, { "Earpiece Mixer", "Input 1 Bypass Switch", "IN1PGA" }, { "Earpiece Mixer", "Input 2 Bypass Switch", "IN2PGA" }, { "Earpiece Mixer", "Input 3 Bypass Switch", "IN3PGA" }, { "Earpiece Mixer", "Input 4 Bypass Switch", "IN4PGA" }, { "LOUT1L", NULL, "Left Lineout1 Mixer" }, { "LOUT1R", NULL, "Right Lineout1 Mixer" }, { "LOUT2L", NULL, "Left Lineout2 Mixer" }, { "LOUT2R", NULL, "Right Lineout2 Mixer" }, { "SPKL", NULL, "Left Speaker Mixer" }, { "SPKR", NULL, "Right Speaker Mixer" }, { "HPL", NULL, "Left Headphone Mixer" }, { "HPR", NULL, "Right Headphone Mixer" }, { "EP", NULL, "Earpiece Mixer" }, { "IN1PGA", NULL, "AIN1L" }, { "IN2PGA", NULL, "AIN2L" }, { "IN3PGA", NULL, "AIN3L" }, { "IN4PGA", NULL, "AIN4L" }, { "IN1PGA", NULL, "AIN1R" }, { "IN2PGA", NULL, "AIN2R" }, { "IN3PGA", NULL, "AIN3R" }, { "IN4PGA", NULL, "AIN4R" }, { "SYSCLK1", NULL, "PLL1" }, { "SYSCLK2", NULL, "PLL2" }, { "Left DAC1", NULL, "SYSCLK1" }, { "Right DAC1", NULL, "SYSCLK1" }, { "Left DAC2", NULL, "SYSCLK1" }, { "Right DAC2", NULL, "SYSCLK1" }, { "Left ADC", NULL, "SYSCLK1" }, { "Right ADC", NULL, "SYSCLK1" }, { "DSP", NULL, "SYSCLK1" }, { "AIF1 Mixer", NULL, "DSP" }, { "AIF2 Mixer", NULL, "DSP" }, { "AIF3 Mixer", NULL, "DSP" }, { "DAC1 Mixer", NULL, "DSP" }, { "DAC2 Mixer", NULL, "DSP" }, { "DAC1 Mixer", NULL, "Playback Engine A" }, { "DAC2 Mixer", NULL, "Playback Engine B" }, { "Left ADC Mixer", NULL, "Recording Engine A" }, { "Right ADC Mixer", NULL, "Recording Engine A" }, { "AIF1 CLK", NULL, "SYSCLK1", adau1373_check_aif_clk }, { "AIF2 CLK", NULL, "SYSCLK1", adau1373_check_aif_clk }, { "AIF3 CLK", NULL, "SYSCLK1", adau1373_check_aif_clk }, { "AIF1 CLK", NULL, "SYSCLK2", adau1373_check_aif_clk }, { "AIF2 CLK", NULL, "SYSCLK2", adau1373_check_aif_clk }, { "AIF3 CLK", NULL, "SYSCLK2", adau1373_check_aif_clk }, { "AIF1 IN", NULL, "AIF1 CLK" }, { "AIF1 OUT", NULL, "AIF1 CLK" }, { "AIF2 IN", NULL, "AIF2 CLK" }, { "AIF2 OUT", NULL, "AIF2 CLK" }, { "AIF3 IN", NULL, "AIF3 CLK" }, { "AIF3 OUT", NULL, "AIF3 CLK" }, { "AIF1 IN", NULL, "AIF1 IN SRC", adau1373_check_src }, { "AIF1 OUT", NULL, "AIF1 OUT SRC", adau1373_check_src }, { "AIF2 IN", NULL, "AIF2 IN SRC", adau1373_check_src }, { "AIF2 OUT", NULL, "AIF2 OUT SRC", adau1373_check_src }, { "AIF3 IN", NULL, "AIF3 IN SRC", adau1373_check_src }, { "AIF3 OUT", NULL, "AIF3 OUT SRC", adau1373_check_src }, { "DMIC1", NULL, "DMIC1DAT" }, { "DMIC1", NULL, "SYSCLK1" }, { "DMIC1", NULL, "Recording Engine A" }, { "DMIC2", NULL, "DMIC2DAT" }, { "DMIC2", NULL, "SYSCLK1" }, { "DMIC2", NULL, "Recording Engine B" }, }; static int adau1373_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_codec *codec = dai->codec; struct adau1373 *adau1373 = snd_soc_codec_get_drvdata(codec); struct adau1373_dai *adau1373_dai = &adau1373->dais[dai->id]; unsigned int div; unsigned int freq; unsigned int ctrl; freq = adau1373_dai->sysclk; if (freq % params_rate(params) != 0) return -EINVAL; switch (freq / params_rate(params)) { case 1024: /* sysclk / 256 */ div = 0; break; case 1536: /* 2/3 sysclk / 256 */ div = 1; break; case 2048: /* 1/2 sysclk / 256 */ div = 2; break; case 3072: /* 1/3 sysclk / 256 */ div = 3; break; case 4096: /* 1/4 sysclk / 256 */ div = 4; break; case 6144: /* 1/6 sysclk / 256 */ div = 5; break; case 5632: /* 2/11 sysclk / 256 */ div = 6; break; default: return -EINVAL; } adau1373_dai->enable_src = (div != 0); snd_soc_update_bits(codec, ADAU1373_BCLKDIV(dai->id), ADAU1373_BCLKDIV_SR_MASK | ADAU1373_BCLKDIV_BCLK_MASK, (div << 2) | ADAU1373_BCLKDIV_64); switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: ctrl = ADAU1373_DAI_WLEN_16; break; case SNDRV_PCM_FORMAT_S20_3LE: ctrl = ADAU1373_DAI_WLEN_20; break; case SNDRV_PCM_FORMAT_S24_LE: ctrl = ADAU1373_DAI_WLEN_24; break; case SNDRV_PCM_FORMAT_S32_LE: ctrl = ADAU1373_DAI_WLEN_32; break; default: return -EINVAL; } return snd_soc_update_bits(codec, ADAU1373_DAI(dai->id), ADAU1373_DAI_WLEN_MASK, ctrl); } static int adau1373_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt) { struct snd_soc_codec *codec = dai->codec; struct adau1373 *adau1373 = snd_soc_codec_get_drvdata(codec); struct adau1373_dai *adau1373_dai = &adau1373->dais[dai->id]; unsigned int ctrl; switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: ctrl = ADAU1373_DAI_MASTER; adau1373_dai->master = true; break; case SND_SOC_DAIFMT_CBS_CFS: ctrl = 0; adau1373_dai->master = false; break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: ctrl |= ADAU1373_DAI_FORMAT_I2S; break; case SND_SOC_DAIFMT_LEFT_J: ctrl |= ADAU1373_DAI_FORMAT_LEFT_J; break; case SND_SOC_DAIFMT_RIGHT_J: ctrl |= ADAU1373_DAI_FORMAT_RIGHT_J; break; case SND_SOC_DAIFMT_DSP_B: ctrl |= ADAU1373_DAI_FORMAT_DSP; break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; case SND_SOC_DAIFMT_IB_NF: ctrl |= ADAU1373_DAI_INVERT_BCLK; break; case SND_SOC_DAIFMT_NB_IF: ctrl |= ADAU1373_DAI_INVERT_LRCLK; break; case SND_SOC_DAIFMT_IB_IF: ctrl |= ADAU1373_DAI_INVERT_LRCLK | ADAU1373_DAI_INVERT_BCLK; break; default: return -EINVAL; } snd_soc_update_bits(codec, ADAU1373_DAI(dai->id), ~ADAU1373_DAI_WLEN_MASK, ctrl); return 0; } static int adau1373_set_dai_sysclk(struct snd_soc_dai *dai, int clk_id, unsigned int freq, int dir) { struct adau1373 *adau1373 = snd_soc_codec_get_drvdata(dai->codec); struct adau1373_dai *adau1373_dai = &adau1373->dais[dai->id]; switch (clk_id) { case ADAU1373_CLK_SRC_PLL1: case ADAU1373_CLK_SRC_PLL2: break; default: return -EINVAL; } adau1373_dai->sysclk = freq; adau1373_dai->clk_src = clk_id; snd_soc_update_bits(dai->codec, ADAU1373_BCLKDIV(dai->id), ADAU1373_BCLKDIV_SOURCE, clk_id << 5); return 0; } static const struct snd_soc_dai_ops adau1373_dai_ops = { .hw_params = adau1373_hw_params, .set_sysclk = adau1373_set_dai_sysclk, .set_fmt = adau1373_set_dai_fmt, }; #define ADAU1373_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE) static struct snd_soc_dai_driver adau1373_dai_driver[] = { { .id = 0, .name = "adau1373-aif1", .playback = { .stream_name = "AIF1 Playback", .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_48000, .formats = ADAU1373_FORMATS, }, .capture = { .stream_name = "AIF1 Capture", .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_48000, .formats = ADAU1373_FORMATS, }, .ops = &adau1373_dai_ops, .symmetric_rates = 1, }, { .id = 1, .name = "adau1373-aif2", .playback = { .stream_name = "AIF2 Playback", .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_48000, .formats = ADAU1373_FORMATS, }, .capture = { .stream_name = "AIF2 Capture", .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_48000, .formats = ADAU1373_FORMATS, }, .ops = &adau1373_dai_ops, .symmetric_rates = 1, }, { .id = 2, .name = "adau1373-aif3", .playback = { .stream_name = "AIF3 Playback", .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_48000, .formats = ADAU1373_FORMATS, }, .capture = { .stream_name = "AIF3 Capture", .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_48000, .formats = ADAU1373_FORMATS, }, .ops = &adau1373_dai_ops, .symmetric_rates = 1, }, }; static int adau1373_set_pll(struct snd_soc_codec *codec, int pll_id, int source, unsigned int freq_in, unsigned int freq_out) { unsigned int dpll_div = 0; unsigned int x, r, n, m, i, j, mode; switch (pll_id) { case ADAU1373_PLL1: case ADAU1373_PLL2: break; default: return -EINVAL; } switch (source) { case ADAU1373_PLL_SRC_BCLK1: case ADAU1373_PLL_SRC_BCLK2: case ADAU1373_PLL_SRC_BCLK3: case ADAU1373_PLL_SRC_LRCLK1: case ADAU1373_PLL_SRC_LRCLK2: case ADAU1373_PLL_SRC_LRCLK3: case ADAU1373_PLL_SRC_MCLK1: case ADAU1373_PLL_SRC_MCLK2: case ADAU1373_PLL_SRC_GPIO1: case ADAU1373_PLL_SRC_GPIO2: case ADAU1373_PLL_SRC_GPIO3: case ADAU1373_PLL_SRC_GPIO4: break; default: return -EINVAL; } if (freq_in < 7813 || freq_in > 27000000) return -EINVAL; if (freq_out < 45158000 || freq_out > 49152000) return -EINVAL; /* APLL input needs to be >= 8Mhz, so in case freq_in is less we use the * DPLL to get it there. DPLL_out = (DPLL_in / div) * 1024 */ while (freq_in < 8000000) { freq_in *= 2; dpll_div++; } if (freq_out % freq_in != 0) { /* fout = fin * (r + (n/m)) / x */ x = DIV_ROUND_UP(freq_in, 13500000); freq_in /= x; r = freq_out / freq_in; i = freq_out % freq_in; j = gcd(i, freq_in); n = i / j; m = freq_in / j; x--; mode = 1; } else { /* fout = fin / r */ r = freq_out / freq_in; n = 0; m = 0; x = 0; mode = 0; } if (r < 2 || r > 8 || x > 3 || m > 0xffff || n > 0xffff) return -EINVAL; if (dpll_div) { dpll_div = 11 - dpll_div; snd_soc_update_bits(codec, ADAU1373_PLL_CTRL6(pll_id), ADAU1373_PLL_CTRL6_DPLL_BYPASS, 0); } else { snd_soc_update_bits(codec, ADAU1373_PLL_CTRL6(pll_id), ADAU1373_PLL_CTRL6_DPLL_BYPASS, ADAU1373_PLL_CTRL6_DPLL_BYPASS); } snd_soc_write(codec, ADAU1373_DPLL_CTRL(pll_id), (source << 4) | dpll_div); snd_soc_write(codec, ADAU1373_PLL_CTRL1(pll_id), (m >> 8) & 0xff); snd_soc_write(codec, ADAU1373_PLL_CTRL2(pll_id), m & 0xff); snd_soc_write(codec, ADAU1373_PLL_CTRL3(pll_id), (n >> 8) & 0xff); snd_soc_write(codec, ADAU1373_PLL_CTRL4(pll_id), n & 0xff); snd_soc_write(codec, ADAU1373_PLL_CTRL5(pll_id), (r << 3) | (x << 1) | mode); /* Set sysclk to pll_rate / 4 */ snd_soc_update_bits(codec, ADAU1373_CLK_SRC_DIV(pll_id), 0x3f, 0x09); return 0; } static void adau1373_load_drc_settings(struct snd_soc_codec *codec, unsigned int nr, uint8_t *drc) { unsigned int i; for (i = 0; i < ADAU1373_DRC_SIZE; ++i) snd_soc_write(codec, ADAU1373_DRC(nr) + i, drc[i]); } static bool adau1373_valid_micbias(enum adau1373_micbias_voltage micbias) { switch (micbias) { case ADAU1373_MICBIAS_2_9V: case ADAU1373_MICBIAS_2_2V: case ADAU1373_MICBIAS_2_6V: case ADAU1373_MICBIAS_1_8V: return true; default: break; } return false; } static int adau1373_probe(struct snd_soc_codec *codec) { struct adau1373_platform_data *pdata = codec->dev->platform_data; bool lineout_differential = false; unsigned int val; int ret; int i; ret = snd_soc_codec_set_cache_io(codec, 8, 8, SND_SOC_I2C); if (ret) { dev_err(codec->dev, "failed to set cache I/O: %d\n", ret); return ret; } if (pdata) { if (pdata->num_drc > ARRAY_SIZE(pdata->drc_setting)) return -EINVAL; if (!adau1373_valid_micbias(pdata->micbias1) || !adau1373_valid_micbias(pdata->micbias2)) return -EINVAL; for (i = 0; i < pdata->num_drc; ++i) { adau1373_load_drc_settings(codec, i, pdata->drc_setting[i]); } snd_soc_add_codec_controls(codec, adau1373_drc_controls, pdata->num_drc); val = 0; for (i = 0; i < 4; ++i) { if (pdata->input_differential[i]) val |= BIT(i); } snd_soc_write(codec, ADAU1373_INPUT_MODE, val); val = 0; if (pdata->lineout_differential) val |= ADAU1373_OUTPUT_CTRL_LDIFF; if (pdata->lineout_ground_sense) val |= ADAU1373_OUTPUT_CTRL_LNFBEN; snd_soc_write(codec, ADAU1373_OUTPUT_CTRL, val); lineout_differential = pdata->lineout_differential; snd_soc_write(codec, ADAU1373_EP_CTRL, (pdata->micbias1 << ADAU1373_EP_CTRL_MICBIAS1_OFFSET) | (pdata->micbias2 << ADAU1373_EP_CTRL_MICBIAS2_OFFSET)); } if (!lineout_differential) { snd_soc_add_codec_controls(codec, adau1373_lineout2_controls, ARRAY_SIZE(adau1373_lineout2_controls)); } snd_soc_write(codec, ADAU1373_ADC_CTRL, ADAU1373_ADC_CTRL_RESET_FORCE | ADAU1373_ADC_CTRL_PEAK_DETECT); return 0; } static int adau1373_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { switch (level) { case SND_SOC_BIAS_ON: break; case SND_SOC_BIAS_PREPARE: break; case SND_SOC_BIAS_STANDBY: snd_soc_update_bits(codec, ADAU1373_PWDN_CTRL3, ADAU1373_PWDN_CTRL3_PWR_EN, ADAU1373_PWDN_CTRL3_PWR_EN); break; case SND_SOC_BIAS_OFF: snd_soc_update_bits(codec, ADAU1373_PWDN_CTRL3, ADAU1373_PWDN_CTRL3_PWR_EN, 0); break; } codec->dapm.bias_level = level; return 0; } static int adau1373_remove(struct snd_soc_codec *codec) { adau1373_set_bias_level(codec, SND_SOC_BIAS_OFF); return 0; } static int adau1373_suspend(struct snd_soc_codec *codec) { return adau1373_set_bias_level(codec, SND_SOC_BIAS_OFF); } static int adau1373_resume(struct snd_soc_codec *codec) { adau1373_set_bias_level(codec, SND_SOC_BIAS_STANDBY); snd_soc_cache_sync(codec); return 0; } static struct snd_soc_codec_driver adau1373_codec_driver = { .probe = adau1373_probe, .remove = adau1373_remove, .suspend = adau1373_suspend, .resume = adau1373_resume, .set_bias_level = adau1373_set_bias_level, .idle_bias_off = true, .reg_cache_size = ARRAY_SIZE(adau1373_default_regs), .reg_cache_default = adau1373_default_regs, .reg_word_size = sizeof(uint8_t), .set_pll = adau1373_set_pll, .controls = adau1373_controls, .num_controls = ARRAY_SIZE(adau1373_controls), .dapm_widgets = adau1373_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(adau1373_dapm_widgets), .dapm_routes = adau1373_dapm_routes, .num_dapm_routes = ARRAY_SIZE(adau1373_dapm_routes), }; static int adau1373_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct adau1373 *adau1373; int ret; adau1373 = devm_kzalloc(&client->dev, sizeof(*adau1373), GFP_KERNEL); if (!adau1373) return -ENOMEM; dev_set_drvdata(&client->dev, adau1373); ret = snd_soc_register_codec(&client->dev, &adau1373_codec_driver, adau1373_dai_driver, ARRAY_SIZE(adau1373_dai_driver)); return ret; } static int adau1373_i2c_remove(struct i2c_client *client) { snd_soc_unregister_codec(&client->dev); return 0; } static const struct i2c_device_id adau1373_i2c_id[] = { { "adau1373", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, adau1373_i2c_id); static struct i2c_driver adau1373_i2c_driver = { .driver = { .name = "adau1373", .owner = THIS_MODULE, }, .probe = adau1373_i2c_probe, .remove = adau1373_i2c_remove, .id_table = adau1373_i2c_id, }; module_i2c_driver(adau1373_i2c_driver); MODULE_DESCRIPTION("ASoC ADAU1373 driver"); MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); MODULE_LICENSE("GPL");
gpl-2.0
satgass/android_kernel_lenovo_msm8916
sound/pci/ad1889.c
2240
26777
/* Analog Devices 1889 audio driver * * This is a driver for the AD1889 PCI audio chipset found * on the HP PA-RISC [BCJ]-xxx0 workstations. * * Copyright (C) 2004-2005, Kyle McMartin <kyle@parisc-linux.org> * Copyright (C) 2005, Thibaut Varene <varenet@parisc-linux.org> * Based on the OSS AD1889 driver by Randolph Chung <tausq@debian.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * TODO: * Do we need to take care of CCS register? * Maybe we could use finer grained locking (separate locks for pb/cap)? * Wishlist: * Control Interface (mixer) support * Better AC97 support (VSR...)? * PM support * MIDI support * Game Port support * SG DMA support (this will need *a lot* of work) */ #include <linux/init.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/compiler.h> #include <linux/delay.h> #include <linux/module.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/initval.h> #include <sound/ac97_codec.h> #include <asm/io.h> #include "ad1889.h" #include "ac97/ac97_id.h" #define AD1889_DRVVER "Version: 1.7" MODULE_AUTHOR("Kyle McMartin <kyle@parisc-linux.org>, Thibaut Varene <t-bone@parisc-linux.org>"); MODULE_DESCRIPTION("Analog Devices AD1889 ALSA sound driver"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Analog Devices,AD1889}}"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for the AD1889 soundcard."); static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for the AD1889 soundcard."); static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable AD1889 soundcard."); static char *ac97_quirk[SNDRV_CARDS]; module_param_array(ac97_quirk, charp, NULL, 0444); MODULE_PARM_DESC(ac97_quirk, "AC'97 workaround for strange hardware."); #define DEVNAME "ad1889" #define PFX DEVNAME ": " /* let's use the global sound debug interfaces */ #define ad1889_debug(fmt, arg...) snd_printd(KERN_DEBUG fmt, ## arg) /* keep track of some hw registers */ struct ad1889_register_state { u16 reg; /* reg setup */ u32 addr; /* dma base address */ unsigned long size; /* DMA buffer size */ }; struct snd_ad1889 { struct snd_card *card; struct pci_dev *pci; int irq; unsigned long bar; void __iomem *iobase; struct snd_ac97 *ac97; struct snd_ac97_bus *ac97_bus; struct snd_pcm *pcm; struct snd_info_entry *proc; struct snd_pcm_substream *psubs; struct snd_pcm_substream *csubs; /* playback register state */ struct ad1889_register_state wave; struct ad1889_register_state ramc; spinlock_t lock; }; static inline u16 ad1889_readw(struct snd_ad1889 *chip, unsigned reg) { return readw(chip->iobase + reg); } static inline void ad1889_writew(struct snd_ad1889 *chip, unsigned reg, u16 val) { writew(val, chip->iobase + reg); } static inline u32 ad1889_readl(struct snd_ad1889 *chip, unsigned reg) { return readl(chip->iobase + reg); } static inline void ad1889_writel(struct snd_ad1889 *chip, unsigned reg, u32 val) { writel(val, chip->iobase + reg); } static inline void ad1889_unmute(struct snd_ad1889 *chip) { u16 st; st = ad1889_readw(chip, AD_DS_WADA) & ~(AD_DS_WADA_RWAM | AD_DS_WADA_LWAM); ad1889_writew(chip, AD_DS_WADA, st); ad1889_readw(chip, AD_DS_WADA); } static inline void ad1889_mute(struct snd_ad1889 *chip) { u16 st; st = ad1889_readw(chip, AD_DS_WADA) | AD_DS_WADA_RWAM | AD_DS_WADA_LWAM; ad1889_writew(chip, AD_DS_WADA, st); ad1889_readw(chip, AD_DS_WADA); } static inline void ad1889_load_adc_buffer_address(struct snd_ad1889 *chip, u32 address) { ad1889_writel(chip, AD_DMA_ADCBA, address); ad1889_writel(chip, AD_DMA_ADCCA, address); } static inline void ad1889_load_adc_buffer_count(struct snd_ad1889 *chip, u32 count) { ad1889_writel(chip, AD_DMA_ADCBC, count); ad1889_writel(chip, AD_DMA_ADCCC, count); } static inline void ad1889_load_adc_interrupt_count(struct snd_ad1889 *chip, u32 count) { ad1889_writel(chip, AD_DMA_ADCIB, count); ad1889_writel(chip, AD_DMA_ADCIC, count); } static inline void ad1889_load_wave_buffer_address(struct snd_ad1889 *chip, u32 address) { ad1889_writel(chip, AD_DMA_WAVBA, address); ad1889_writel(chip, AD_DMA_WAVCA, address); } static inline void ad1889_load_wave_buffer_count(struct snd_ad1889 *chip, u32 count) { ad1889_writel(chip, AD_DMA_WAVBC, count); ad1889_writel(chip, AD_DMA_WAVCC, count); } static inline void ad1889_load_wave_interrupt_count(struct snd_ad1889 *chip, u32 count) { ad1889_writel(chip, AD_DMA_WAVIB, count); ad1889_writel(chip, AD_DMA_WAVIC, count); } static void ad1889_channel_reset(struct snd_ad1889 *chip, unsigned int channel) { u16 reg; if (channel & AD_CHAN_WAV) { /* Disable wave channel */ reg = ad1889_readw(chip, AD_DS_WSMC) & ~AD_DS_WSMC_WAEN; ad1889_writew(chip, AD_DS_WSMC, reg); chip->wave.reg = reg; /* disable IRQs */ reg = ad1889_readw(chip, AD_DMA_WAV); reg &= AD_DMA_IM_DIS; reg &= ~AD_DMA_LOOP; ad1889_writew(chip, AD_DMA_WAV, reg); /* clear IRQ and address counters and pointers */ ad1889_load_wave_buffer_address(chip, 0x0); ad1889_load_wave_buffer_count(chip, 0x0); ad1889_load_wave_interrupt_count(chip, 0x0); /* flush */ ad1889_readw(chip, AD_DMA_WAV); } if (channel & AD_CHAN_ADC) { /* Disable ADC channel */ reg = ad1889_readw(chip, AD_DS_RAMC) & ~AD_DS_RAMC_ADEN; ad1889_writew(chip, AD_DS_RAMC, reg); chip->ramc.reg = reg; reg = ad1889_readw(chip, AD_DMA_ADC); reg &= AD_DMA_IM_DIS; reg &= ~AD_DMA_LOOP; ad1889_writew(chip, AD_DMA_ADC, reg); ad1889_load_adc_buffer_address(chip, 0x0); ad1889_load_adc_buffer_count(chip, 0x0); ad1889_load_adc_interrupt_count(chip, 0x0); /* flush */ ad1889_readw(chip, AD_DMA_ADC); } } static u16 snd_ad1889_ac97_read(struct snd_ac97 *ac97, unsigned short reg) { struct snd_ad1889 *chip = ac97->private_data; return ad1889_readw(chip, AD_AC97_BASE + reg); } static void snd_ad1889_ac97_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short val) { struct snd_ad1889 *chip = ac97->private_data; ad1889_writew(chip, AD_AC97_BASE + reg, val); } static int snd_ad1889_ac97_ready(struct snd_ad1889 *chip) { int retry = 400; /* average needs 352 msec */ while (!(ad1889_readw(chip, AD_AC97_ACIC) & AD_AC97_ACIC_ACRDY) && --retry) mdelay(1); if (!retry) { snd_printk(KERN_ERR PFX "[%s] Link is not ready.\n", __func__); return -EIO; } ad1889_debug("[%s] ready after %d ms\n", __func__, 400 - retry); return 0; } static int snd_ad1889_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); } static int snd_ad1889_hw_free(struct snd_pcm_substream *substream) { return snd_pcm_lib_free_pages(substream); } static struct snd_pcm_hardware snd_ad1889_playback_hw = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_BLOCK_TRANSFER, .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 8000, /* docs say 7000, but we're lazy */ .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = BUFFER_BYTES_MAX, .period_bytes_min = PERIOD_BYTES_MIN, .period_bytes_max = PERIOD_BYTES_MAX, .periods_min = PERIODS_MIN, .periods_max = PERIODS_MAX, /*.fifo_size = 0,*/ }; static struct snd_pcm_hardware snd_ad1889_capture_hw = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_BLOCK_TRANSFER, .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_48000, .rate_min = 48000, /* docs say we could to VSR, but we're lazy */ .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = BUFFER_BYTES_MAX, .period_bytes_min = PERIOD_BYTES_MIN, .period_bytes_max = PERIOD_BYTES_MAX, .periods_min = PERIODS_MIN, .periods_max = PERIODS_MAX, /*.fifo_size = 0,*/ }; static int snd_ad1889_playback_open(struct snd_pcm_substream *ss) { struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); struct snd_pcm_runtime *rt = ss->runtime; chip->psubs = ss; rt->hw = snd_ad1889_playback_hw; return 0; } static int snd_ad1889_capture_open(struct snd_pcm_substream *ss) { struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); struct snd_pcm_runtime *rt = ss->runtime; chip->csubs = ss; rt->hw = snd_ad1889_capture_hw; return 0; } static int snd_ad1889_playback_close(struct snd_pcm_substream *ss) { struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); chip->psubs = NULL; return 0; } static int snd_ad1889_capture_close(struct snd_pcm_substream *ss) { struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); chip->csubs = NULL; return 0; } static int snd_ad1889_playback_prepare(struct snd_pcm_substream *ss) { struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); struct snd_pcm_runtime *rt = ss->runtime; unsigned int size = snd_pcm_lib_buffer_bytes(ss); unsigned int count = snd_pcm_lib_period_bytes(ss); u16 reg; ad1889_channel_reset(chip, AD_CHAN_WAV); reg = ad1889_readw(chip, AD_DS_WSMC); /* Mask out 16-bit / Stereo */ reg &= ~(AD_DS_WSMC_WA16 | AD_DS_WSMC_WAST); if (snd_pcm_format_width(rt->format) == 16) reg |= AD_DS_WSMC_WA16; if (rt->channels > 1) reg |= AD_DS_WSMC_WAST; /* let's make sure we don't clobber ourselves */ spin_lock_irq(&chip->lock); chip->wave.size = size; chip->wave.reg = reg; chip->wave.addr = rt->dma_addr; ad1889_writew(chip, AD_DS_WSMC, chip->wave.reg); /* Set sample rates on the codec */ ad1889_writew(chip, AD_DS_WAS, rt->rate); /* Set up DMA */ ad1889_load_wave_buffer_address(chip, chip->wave.addr); ad1889_load_wave_buffer_count(chip, size); ad1889_load_wave_interrupt_count(chip, count); /* writes flush */ ad1889_readw(chip, AD_DS_WSMC); spin_unlock_irq(&chip->lock); ad1889_debug("prepare playback: addr = 0x%x, count = %u, " "size = %u, reg = 0x%x, rate = %u\n", chip->wave.addr, count, size, reg, rt->rate); return 0; } static int snd_ad1889_capture_prepare(struct snd_pcm_substream *ss) { struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); struct snd_pcm_runtime *rt = ss->runtime; unsigned int size = snd_pcm_lib_buffer_bytes(ss); unsigned int count = snd_pcm_lib_period_bytes(ss); u16 reg; ad1889_channel_reset(chip, AD_CHAN_ADC); reg = ad1889_readw(chip, AD_DS_RAMC); /* Mask out 16-bit / Stereo */ reg &= ~(AD_DS_RAMC_AD16 | AD_DS_RAMC_ADST); if (snd_pcm_format_width(rt->format) == 16) reg |= AD_DS_RAMC_AD16; if (rt->channels > 1) reg |= AD_DS_RAMC_ADST; /* let's make sure we don't clobber ourselves */ spin_lock_irq(&chip->lock); chip->ramc.size = size; chip->ramc.reg = reg; chip->ramc.addr = rt->dma_addr; ad1889_writew(chip, AD_DS_RAMC, chip->ramc.reg); /* Set up DMA */ ad1889_load_adc_buffer_address(chip, chip->ramc.addr); ad1889_load_adc_buffer_count(chip, size); ad1889_load_adc_interrupt_count(chip, count); /* writes flush */ ad1889_readw(chip, AD_DS_RAMC); spin_unlock_irq(&chip->lock); ad1889_debug("prepare capture: addr = 0x%x, count = %u, " "size = %u, reg = 0x%x, rate = %u\n", chip->ramc.addr, count, size, reg, rt->rate); return 0; } /* this is called in atomic context with IRQ disabled. Must be as fast as possible and not sleep. DMA should be *triggered* by this call. The WSMC "WAEN" bit triggers DMA Wave On/Off */ static int snd_ad1889_playback_trigger(struct snd_pcm_substream *ss, int cmd) { u16 wsmc; struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); wsmc = ad1889_readw(chip, AD_DS_WSMC); switch (cmd) { case SNDRV_PCM_TRIGGER_START: /* enable DMA loop & interrupts */ ad1889_writew(chip, AD_DMA_WAV, AD_DMA_LOOP | AD_DMA_IM_CNT); wsmc |= AD_DS_WSMC_WAEN; /* 1 to clear CHSS bit */ ad1889_writel(chip, AD_DMA_CHSS, AD_DMA_CHSS_WAVS); ad1889_unmute(chip); break; case SNDRV_PCM_TRIGGER_STOP: ad1889_mute(chip); wsmc &= ~AD_DS_WSMC_WAEN; break; default: snd_BUG(); return -EINVAL; } chip->wave.reg = wsmc; ad1889_writew(chip, AD_DS_WSMC, wsmc); ad1889_readw(chip, AD_DS_WSMC); /* flush */ /* reset the chip when STOP - will disable IRQs */ if (cmd == SNDRV_PCM_TRIGGER_STOP) ad1889_channel_reset(chip, AD_CHAN_WAV); return 0; } /* this is called in atomic context with IRQ disabled. Must be as fast as possible and not sleep. DMA should be *triggered* by this call. The RAMC "ADEN" bit triggers DMA ADC On/Off */ static int snd_ad1889_capture_trigger(struct snd_pcm_substream *ss, int cmd) { u16 ramc; struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); ramc = ad1889_readw(chip, AD_DS_RAMC); switch (cmd) { case SNDRV_PCM_TRIGGER_START: /* enable DMA loop & interrupts */ ad1889_writew(chip, AD_DMA_ADC, AD_DMA_LOOP | AD_DMA_IM_CNT); ramc |= AD_DS_RAMC_ADEN; /* 1 to clear CHSS bit */ ad1889_writel(chip, AD_DMA_CHSS, AD_DMA_CHSS_ADCS); break; case SNDRV_PCM_TRIGGER_STOP: ramc &= ~AD_DS_RAMC_ADEN; break; default: return -EINVAL; } chip->ramc.reg = ramc; ad1889_writew(chip, AD_DS_RAMC, ramc); ad1889_readw(chip, AD_DS_RAMC); /* flush */ /* reset the chip when STOP - will disable IRQs */ if (cmd == SNDRV_PCM_TRIGGER_STOP) ad1889_channel_reset(chip, AD_CHAN_ADC); return 0; } /* Called in atomic context with IRQ disabled */ static snd_pcm_uframes_t snd_ad1889_playback_pointer(struct snd_pcm_substream *ss) { size_t ptr = 0; struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); if (unlikely(!(chip->wave.reg & AD_DS_WSMC_WAEN))) return 0; ptr = ad1889_readl(chip, AD_DMA_WAVCA); ptr -= chip->wave.addr; if (snd_BUG_ON(ptr >= chip->wave.size)) return 0; return bytes_to_frames(ss->runtime, ptr); } /* Called in atomic context with IRQ disabled */ static snd_pcm_uframes_t snd_ad1889_capture_pointer(struct snd_pcm_substream *ss) { size_t ptr = 0; struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); if (unlikely(!(chip->ramc.reg & AD_DS_RAMC_ADEN))) return 0; ptr = ad1889_readl(chip, AD_DMA_ADCCA); ptr -= chip->ramc.addr; if (snd_BUG_ON(ptr >= chip->ramc.size)) return 0; return bytes_to_frames(ss->runtime, ptr); } static struct snd_pcm_ops snd_ad1889_playback_ops = { .open = snd_ad1889_playback_open, .close = snd_ad1889_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_ad1889_hw_params, .hw_free = snd_ad1889_hw_free, .prepare = snd_ad1889_playback_prepare, .trigger = snd_ad1889_playback_trigger, .pointer = snd_ad1889_playback_pointer, }; static struct snd_pcm_ops snd_ad1889_capture_ops = { .open = snd_ad1889_capture_open, .close = snd_ad1889_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_ad1889_hw_params, .hw_free = snd_ad1889_hw_free, .prepare = snd_ad1889_capture_prepare, .trigger = snd_ad1889_capture_trigger, .pointer = snd_ad1889_capture_pointer, }; static irqreturn_t snd_ad1889_interrupt(int irq, void *dev_id) { unsigned long st; struct snd_ad1889 *chip = dev_id; st = ad1889_readl(chip, AD_DMA_DISR); /* clear ISR */ ad1889_writel(chip, AD_DMA_DISR, st); st &= AD_INTR_MASK; if (unlikely(!st)) return IRQ_NONE; if (st & (AD_DMA_DISR_PMAI|AD_DMA_DISR_PTAI)) ad1889_debug("Unexpected master or target abort interrupt!\n"); if ((st & AD_DMA_DISR_WAVI) && chip->psubs) snd_pcm_period_elapsed(chip->psubs); if ((st & AD_DMA_DISR_ADCI) && chip->csubs) snd_pcm_period_elapsed(chip->csubs); return IRQ_HANDLED; } static int snd_ad1889_pcm_init(struct snd_ad1889 *chip, int device, struct snd_pcm **rpcm) { int err; struct snd_pcm *pcm; if (rpcm) *rpcm = NULL; err = snd_pcm_new(chip->card, chip->card->driver, device, 1, 1, &pcm); if (err < 0) return err; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_ad1889_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_ad1889_capture_ops); pcm->private_data = chip; pcm->info_flags = 0; strcpy(pcm->name, chip->card->shortname); chip->pcm = pcm; chip->psubs = NULL; chip->csubs = NULL; err = snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), BUFFER_BYTES_MAX / 2, BUFFER_BYTES_MAX); if (err < 0) { snd_printk(KERN_ERR PFX "buffer allocation error: %d\n", err); return err; } if (rpcm) *rpcm = pcm; return 0; } static void snd_ad1889_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_ad1889 *chip = entry->private_data; u16 reg; int tmp; reg = ad1889_readw(chip, AD_DS_WSMC); snd_iprintf(buffer, "Wave output: %s\n", (reg & AD_DS_WSMC_WAEN) ? "enabled" : "disabled"); snd_iprintf(buffer, "Wave Channels: %s\n", (reg & AD_DS_WSMC_WAST) ? "stereo" : "mono"); snd_iprintf(buffer, "Wave Quality: %d-bit linear\n", (reg & AD_DS_WSMC_WA16) ? 16 : 8); /* WARQ is at offset 12 */ tmp = (reg & AD_DS_WSMC_WARQ) ? (((reg & AD_DS_WSMC_WARQ >> 12) & 0x01) ? 12 : 18) : 4; tmp /= (reg & AD_DS_WSMC_WAST) ? 2 : 1; snd_iprintf(buffer, "Wave FIFO: %d %s words\n\n", tmp, (reg & AD_DS_WSMC_WAST) ? "stereo" : "mono"); snd_iprintf(buffer, "Synthesis output: %s\n", reg & AD_DS_WSMC_SYEN ? "enabled" : "disabled"); /* SYRQ is at offset 4 */ tmp = (reg & AD_DS_WSMC_SYRQ) ? (((reg & AD_DS_WSMC_SYRQ >> 4) & 0x01) ? 12 : 18) : 4; tmp /= (reg & AD_DS_WSMC_WAST) ? 2 : 1; snd_iprintf(buffer, "Synthesis FIFO: %d %s words\n\n", tmp, (reg & AD_DS_WSMC_WAST) ? "stereo" : "mono"); reg = ad1889_readw(chip, AD_DS_RAMC); snd_iprintf(buffer, "ADC input: %s\n", (reg & AD_DS_RAMC_ADEN) ? "enabled" : "disabled"); snd_iprintf(buffer, "ADC Channels: %s\n", (reg & AD_DS_RAMC_ADST) ? "stereo" : "mono"); snd_iprintf(buffer, "ADC Quality: %d-bit linear\n", (reg & AD_DS_RAMC_AD16) ? 16 : 8); /* ACRQ is at offset 4 */ tmp = (reg & AD_DS_RAMC_ACRQ) ? (((reg & AD_DS_RAMC_ACRQ >> 4) & 0x01) ? 12 : 18) : 4; tmp /= (reg & AD_DS_RAMC_ADST) ? 2 : 1; snd_iprintf(buffer, "ADC FIFO: %d %s words\n\n", tmp, (reg & AD_DS_RAMC_ADST) ? "stereo" : "mono"); snd_iprintf(buffer, "Resampler input: %s\n", reg & AD_DS_RAMC_REEN ? "enabled" : "disabled"); /* RERQ is at offset 12 */ tmp = (reg & AD_DS_RAMC_RERQ) ? (((reg & AD_DS_RAMC_RERQ >> 12) & 0x01) ? 12 : 18) : 4; tmp /= (reg & AD_DS_RAMC_ADST) ? 2 : 1; snd_iprintf(buffer, "Resampler FIFO: %d %s words\n\n", tmp, (reg & AD_DS_WSMC_WAST) ? "stereo" : "mono"); /* doc says LSB represents -1.5dB, but the max value (-94.5dB) suggests that LSB is -3dB, which is more coherent with the logarithmic nature of the dB scale */ reg = ad1889_readw(chip, AD_DS_WADA); snd_iprintf(buffer, "Left: %s, -%d dB\n", (reg & AD_DS_WADA_LWAM) ? "mute" : "unmute", ((reg & AD_DS_WADA_LWAA) >> 8) * 3); reg = ad1889_readw(chip, AD_DS_WADA); snd_iprintf(buffer, "Right: %s, -%d dB\n", (reg & AD_DS_WADA_RWAM) ? "mute" : "unmute", ((reg & AD_DS_WADA_RWAA) >> 8) * 3); reg = ad1889_readw(chip, AD_DS_WAS); snd_iprintf(buffer, "Wave samplerate: %u Hz\n", reg); reg = ad1889_readw(chip, AD_DS_RES); snd_iprintf(buffer, "Resampler samplerate: %u Hz\n", reg); } static void snd_ad1889_proc_init(struct snd_ad1889 *chip) { struct snd_info_entry *entry; if (!snd_card_proc_new(chip->card, chip->card->driver, &entry)) snd_info_set_text_ops(entry, chip, snd_ad1889_proc_read); } static struct ac97_quirk ac97_quirks[] = { { .subvendor = 0x11d4, /* AD */ .subdevice = 0x1889, /* AD1889 */ .codec_id = AC97_ID_AD1819, .name = "AD1889", .type = AC97_TUNE_HP_ONLY }, { } /* terminator */ }; static void snd_ad1889_ac97_xinit(struct snd_ad1889 *chip) { u16 reg; reg = ad1889_readw(chip, AD_AC97_ACIC); reg |= AD_AC97_ACIC_ACRD; /* Reset Disable */ ad1889_writew(chip, AD_AC97_ACIC, reg); ad1889_readw(chip, AD_AC97_ACIC); /* flush posted write */ udelay(10); /* Interface Enable */ reg |= AD_AC97_ACIC_ACIE; ad1889_writew(chip, AD_AC97_ACIC, reg); snd_ad1889_ac97_ready(chip); /* Audio Stream Output | Variable Sample Rate Mode */ reg = ad1889_readw(chip, AD_AC97_ACIC); reg |= AD_AC97_ACIC_ASOE | AD_AC97_ACIC_VSRM; ad1889_writew(chip, AD_AC97_ACIC, reg); ad1889_readw(chip, AD_AC97_ACIC); /* flush posted write */ } static void snd_ad1889_ac97_bus_free(struct snd_ac97_bus *bus) { struct snd_ad1889 *chip = bus->private_data; chip->ac97_bus = NULL; } static void snd_ad1889_ac97_free(struct snd_ac97 *ac97) { struct snd_ad1889 *chip = ac97->private_data; chip->ac97 = NULL; } static int snd_ad1889_ac97_init(struct snd_ad1889 *chip, const char *quirk_override) { int err; struct snd_ac97_template ac97; static struct snd_ac97_bus_ops ops = { .write = snd_ad1889_ac97_write, .read = snd_ad1889_ac97_read, }; /* doing that here, it works. */ snd_ad1889_ac97_xinit(chip); err = snd_ac97_bus(chip->card, 0, &ops, chip, &chip->ac97_bus); if (err < 0) return err; chip->ac97_bus->private_free = snd_ad1889_ac97_bus_free; memset(&ac97, 0, sizeof(ac97)); ac97.private_data = chip; ac97.private_free = snd_ad1889_ac97_free; ac97.pci = chip->pci; err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97); if (err < 0) return err; snd_ac97_tune_hardware(chip->ac97, ac97_quirks, quirk_override); return 0; } static int snd_ad1889_free(struct snd_ad1889 *chip) { if (chip->irq < 0) goto skip_hw; spin_lock_irq(&chip->lock); ad1889_mute(chip); /* Turn off interrupt on count and zero DMA registers */ ad1889_channel_reset(chip, AD_CHAN_WAV | AD_CHAN_ADC); /* clear DISR. If we don't, we'd better jump off the Eiffel Tower */ ad1889_writel(chip, AD_DMA_DISR, AD_DMA_DISR_PTAI | AD_DMA_DISR_PMAI); ad1889_readl(chip, AD_DMA_DISR); /* flush, dammit! */ spin_unlock_irq(&chip->lock); if (chip->irq >= 0) free_irq(chip->irq, chip); skip_hw: if (chip->iobase) iounmap(chip->iobase); pci_release_regions(chip->pci); pci_disable_device(chip->pci); kfree(chip); return 0; } static int snd_ad1889_dev_free(struct snd_device *device) { struct snd_ad1889 *chip = device->device_data; return snd_ad1889_free(chip); } static int snd_ad1889_init(struct snd_ad1889 *chip) { ad1889_writew(chip, AD_DS_CCS, AD_DS_CCS_CLKEN); /* turn on clock */ ad1889_readw(chip, AD_DS_CCS); /* flush posted write */ mdelay(10); /* enable Master and Target abort interrupts */ ad1889_writel(chip, AD_DMA_DISR, AD_DMA_DISR_PMAE | AD_DMA_DISR_PTAE); return 0; } static int snd_ad1889_create(struct snd_card *card, struct pci_dev *pci, struct snd_ad1889 **rchip) { int err; struct snd_ad1889 *chip; static struct snd_device_ops ops = { .dev_free = snd_ad1889_dev_free, }; *rchip = NULL; if ((err = pci_enable_device(pci)) < 0) return err; /* check PCI availability (32bit DMA) */ if (pci_set_dma_mask(pci, DMA_BIT_MASK(32)) < 0 || pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(32)) < 0) { printk(KERN_ERR PFX "error setting 32-bit DMA mask.\n"); pci_disable_device(pci); return -ENXIO; } /* allocate chip specific data with zero-filled memory */ if ((chip = kzalloc(sizeof(*chip), GFP_KERNEL)) == NULL) { pci_disable_device(pci); return -ENOMEM; } chip->card = card; card->private_data = chip; chip->pci = pci; chip->irq = -1; /* (1) PCI resource allocation */ if ((err = pci_request_regions(pci, card->driver)) < 0) goto free_and_ret; chip->bar = pci_resource_start(pci, 0); chip->iobase = pci_ioremap_bar(pci, 0); if (chip->iobase == NULL) { printk(KERN_ERR PFX "unable to reserve region.\n"); err = -EBUSY; goto free_and_ret; } pci_set_master(pci); spin_lock_init(&chip->lock); /* only now can we call ad1889_free */ if (request_irq(pci->irq, snd_ad1889_interrupt, IRQF_SHARED, KBUILD_MODNAME, chip)) { printk(KERN_ERR PFX "cannot obtain IRQ %d\n", pci->irq); snd_ad1889_free(chip); return -EBUSY; } chip->irq = pci->irq; synchronize_irq(chip->irq); /* (2) initialization of the chip hardware */ if ((err = snd_ad1889_init(chip)) < 0) { snd_ad1889_free(chip); return err; } if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) { snd_ad1889_free(chip); return err; } snd_card_set_dev(card, &pci->dev); *rchip = chip; return 0; free_and_ret: kfree(chip); pci_disable_device(pci); return err; } static int snd_ad1889_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { int err; static int devno; struct snd_card *card; struct snd_ad1889 *chip; /* (1) */ if (devno >= SNDRV_CARDS) return -ENODEV; if (!enable[devno]) { devno++; return -ENOENT; } /* (2) */ err = snd_card_create(index[devno], id[devno], THIS_MODULE, 0, &card); /* XXX REVISIT: we can probably allocate chip in this call */ if (err < 0) return err; strcpy(card->driver, "AD1889"); strcpy(card->shortname, "Analog Devices AD1889"); /* (3) */ err = snd_ad1889_create(card, pci, &chip); if (err < 0) goto free_and_ret; /* (4) */ sprintf(card->longname, "%s at 0x%lx irq %i", card->shortname, chip->bar, chip->irq); /* (5) */ /* register AC97 mixer */ err = snd_ad1889_ac97_init(chip, ac97_quirk[devno]); if (err < 0) goto free_and_ret; err = snd_ad1889_pcm_init(chip, 0, NULL); if (err < 0) goto free_and_ret; /* register proc interface */ snd_ad1889_proc_init(chip); /* (6) */ err = snd_card_register(card); if (err < 0) goto free_and_ret; /* (7) */ pci_set_drvdata(pci, card); devno++; return 0; free_and_ret: snd_card_free(card); return err; } static void snd_ad1889_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } static DEFINE_PCI_DEVICE_TABLE(snd_ad1889_ids) = { { PCI_DEVICE(PCI_VENDOR_ID_ANALOG_DEVICES, PCI_DEVICE_ID_AD1889JS) }, { 0, }, }; MODULE_DEVICE_TABLE(pci, snd_ad1889_ids); static struct pci_driver ad1889_pci_driver = { .name = KBUILD_MODNAME, .id_table = snd_ad1889_ids, .probe = snd_ad1889_probe, .remove = snd_ad1889_remove, }; module_pci_driver(ad1889_pci_driver);
gpl-2.0
ayushtyagi28/android_kernel_cyanogen_msm8994
drivers/macintosh/smu.c
2240
31022
/* * PowerMac G5 SMU driver * * Copyright 2004 J. Mayer <l_indien@magic.fr> * Copyright 2005 Benjamin Herrenschmidt, IBM Corp. * * Released under the term of the GNU GPL v2. */ /* * TODO: * - maybe add timeout to commands ? * - blocking version of time functions * - polling version of i2c commands (including timer that works with * interrupts off) * - maybe avoid some data copies with i2c by directly using the smu cmd * buffer and a lower level internal interface * - understand SMU -> CPU events and implement reception of them via * the userland interface */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/dmapool.h> #include <linux/bootmem.h> #include <linux/vmalloc.h> #include <linux/highmem.h> #include <linux/jiffies.h> #include <linux/interrupt.h> #include <linux/rtc.h> #include <linux/completion.h> #include <linux/miscdevice.h> #include <linux/delay.h> #include <linux/poll.h> #include <linux/mutex.h> #include <linux/of_device.h> #include <linux/of_platform.h> #include <linux/slab.h> #include <asm/byteorder.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/pmac_feature.h> #include <asm/smu.h> #include <asm/sections.h> #include <asm/uaccess.h> #define VERSION "0.7" #define AUTHOR "(c) 2005 Benjamin Herrenschmidt, IBM Corp." #undef DEBUG_SMU #ifdef DEBUG_SMU #define DPRINTK(fmt, args...) do { printk(KERN_DEBUG fmt , ##args); } while (0) #else #define DPRINTK(fmt, args...) do { } while (0) #endif /* * This is the command buffer passed to the SMU hardware */ #define SMU_MAX_DATA 254 struct smu_cmd_buf { u8 cmd; u8 length; u8 data[SMU_MAX_DATA]; }; struct smu_device { spinlock_t lock; struct device_node *of_node; struct platform_device *of_dev; int doorbell; /* doorbell gpio */ u32 __iomem *db_buf; /* doorbell buffer */ struct device_node *db_node; unsigned int db_irq; int msg; struct device_node *msg_node; unsigned int msg_irq; struct smu_cmd_buf *cmd_buf; /* command buffer virtual */ u32 cmd_buf_abs; /* command buffer absolute */ struct list_head cmd_list; struct smu_cmd *cmd_cur; /* pending command */ int broken_nap; struct list_head cmd_i2c_list; struct smu_i2c_cmd *cmd_i2c_cur; /* pending i2c command */ struct timer_list i2c_timer; }; /* * I don't think there will ever be more than one SMU, so * for now, just hard code that */ static DEFINE_MUTEX(smu_mutex); static struct smu_device *smu; static DEFINE_MUTEX(smu_part_access); static int smu_irq_inited; static void smu_i2c_retry(unsigned long data); /* * SMU driver low level stuff */ static void smu_start_cmd(void) { unsigned long faddr, fend; struct smu_cmd *cmd; if (list_empty(&smu->cmd_list)) return; /* Fetch first command in queue */ cmd = list_entry(smu->cmd_list.next, struct smu_cmd, link); smu->cmd_cur = cmd; list_del(&cmd->link); DPRINTK("SMU: starting cmd %x, %d bytes data\n", cmd->cmd, cmd->data_len); DPRINTK("SMU: data buffer: %8ph\n", cmd->data_buf); /* Fill the SMU command buffer */ smu->cmd_buf->cmd = cmd->cmd; smu->cmd_buf->length = cmd->data_len; memcpy(smu->cmd_buf->data, cmd->data_buf, cmd->data_len); /* Flush command and data to RAM */ faddr = (unsigned long)smu->cmd_buf; fend = faddr + smu->cmd_buf->length + 2; flush_inval_dcache_range(faddr, fend); /* We also disable NAP mode for the duration of the command * on U3 based machines. * This is slightly racy as it can be written back to 1 by a sysctl * but that never happens in practice. There seem to be an issue with * U3 based machines such as the iMac G5 where napping for the * whole duration of the command prevents the SMU from fetching it * from memory. This might be related to the strange i2c based * mechanism the SMU uses to access memory. */ if (smu->broken_nap) powersave_nap = 0; /* This isn't exactly a DMA mapping here, I suspect * the SMU is actually communicating with us via i2c to the * northbridge or the CPU to access RAM. */ writel(smu->cmd_buf_abs, smu->db_buf); /* Ring the SMU doorbell */ pmac_do_feature_call(PMAC_FTR_WRITE_GPIO, NULL, smu->doorbell, 4); } static irqreturn_t smu_db_intr(int irq, void *arg) { unsigned long flags; struct smu_cmd *cmd; void (*done)(struct smu_cmd *cmd, void *misc) = NULL; void *misc = NULL; u8 gpio; int rc = 0; /* SMU completed the command, well, we hope, let's make sure * of it */ spin_lock_irqsave(&smu->lock, flags); gpio = pmac_do_feature_call(PMAC_FTR_READ_GPIO, NULL, smu->doorbell); if ((gpio & 7) != 7) { spin_unlock_irqrestore(&smu->lock, flags); return IRQ_HANDLED; } cmd = smu->cmd_cur; smu->cmd_cur = NULL; if (cmd == NULL) goto bail; if (rc == 0) { unsigned long faddr; int reply_len; u8 ack; /* CPU might have brought back the cache line, so we need * to flush again before peeking at the SMU response. We * flush the entire buffer for now as we haven't read the * reply length (it's only 2 cache lines anyway) */ faddr = (unsigned long)smu->cmd_buf; flush_inval_dcache_range(faddr, faddr + 256); /* Now check ack */ ack = (~cmd->cmd) & 0xff; if (ack != smu->cmd_buf->cmd) { DPRINTK("SMU: incorrect ack, want %x got %x\n", ack, smu->cmd_buf->cmd); rc = -EIO; } reply_len = rc == 0 ? smu->cmd_buf->length : 0; DPRINTK("SMU: reply len: %d\n", reply_len); if (reply_len > cmd->reply_len) { printk(KERN_WARNING "SMU: reply buffer too small," "got %d bytes for a %d bytes buffer\n", reply_len, cmd->reply_len); reply_len = cmd->reply_len; } cmd->reply_len = reply_len; if (cmd->reply_buf && reply_len) memcpy(cmd->reply_buf, smu->cmd_buf->data, reply_len); } /* Now complete the command. Write status last in order as we lost * ownership of the command structure as soon as it's no longer -1 */ done = cmd->done; misc = cmd->misc; mb(); cmd->status = rc; /* Re-enable NAP mode */ if (smu->broken_nap) powersave_nap = 1; bail: /* Start next command if any */ smu_start_cmd(); spin_unlock_irqrestore(&smu->lock, flags); /* Call command completion handler if any */ if (done) done(cmd, misc); /* It's an edge interrupt, nothing to do */ return IRQ_HANDLED; } static irqreturn_t smu_msg_intr(int irq, void *arg) { /* I don't quite know what to do with this one, we seem to never * receive it, so I suspect we have to arm it someway in the SMU * to start getting events that way. */ printk(KERN_INFO "SMU: message interrupt !\n"); /* It's an edge interrupt, nothing to do */ return IRQ_HANDLED; } /* * Queued command management. * */ int smu_queue_cmd(struct smu_cmd *cmd) { unsigned long flags; if (smu == NULL) return -ENODEV; if (cmd->data_len > SMU_MAX_DATA || cmd->reply_len > SMU_MAX_DATA) return -EINVAL; cmd->status = 1; spin_lock_irqsave(&smu->lock, flags); list_add_tail(&cmd->link, &smu->cmd_list); if (smu->cmd_cur == NULL) smu_start_cmd(); spin_unlock_irqrestore(&smu->lock, flags); /* Workaround for early calls when irq isn't available */ if (!smu_irq_inited || smu->db_irq == NO_IRQ) smu_spinwait_cmd(cmd); return 0; } EXPORT_SYMBOL(smu_queue_cmd); int smu_queue_simple(struct smu_simple_cmd *scmd, u8 command, unsigned int data_len, void (*done)(struct smu_cmd *cmd, void *misc), void *misc, ...) { struct smu_cmd *cmd = &scmd->cmd; va_list list; int i; if (data_len > sizeof(scmd->buffer)) return -EINVAL; memset(scmd, 0, sizeof(*scmd)); cmd->cmd = command; cmd->data_len = data_len; cmd->data_buf = scmd->buffer; cmd->reply_len = sizeof(scmd->buffer); cmd->reply_buf = scmd->buffer; cmd->done = done; cmd->misc = misc; va_start(list, misc); for (i = 0; i < data_len; ++i) scmd->buffer[i] = (u8)va_arg(list, int); va_end(list); return smu_queue_cmd(cmd); } EXPORT_SYMBOL(smu_queue_simple); void smu_poll(void) { u8 gpio; if (smu == NULL) return; gpio = pmac_do_feature_call(PMAC_FTR_READ_GPIO, NULL, smu->doorbell); if ((gpio & 7) == 7) smu_db_intr(smu->db_irq, smu); } EXPORT_SYMBOL(smu_poll); void smu_done_complete(struct smu_cmd *cmd, void *misc) { struct completion *comp = misc; complete(comp); } EXPORT_SYMBOL(smu_done_complete); void smu_spinwait_cmd(struct smu_cmd *cmd) { while(cmd->status == 1) smu_poll(); } EXPORT_SYMBOL(smu_spinwait_cmd); /* RTC low level commands */ static inline int bcd2hex (int n) { return (((n & 0xf0) >> 4) * 10) + (n & 0xf); } static inline int hex2bcd (int n) { return ((n / 10) << 4) + (n % 10); } static inline void smu_fill_set_rtc_cmd(struct smu_cmd_buf *cmd_buf, struct rtc_time *time) { cmd_buf->cmd = 0x8e; cmd_buf->length = 8; cmd_buf->data[0] = 0x80; cmd_buf->data[1] = hex2bcd(time->tm_sec); cmd_buf->data[2] = hex2bcd(time->tm_min); cmd_buf->data[3] = hex2bcd(time->tm_hour); cmd_buf->data[4] = time->tm_wday; cmd_buf->data[5] = hex2bcd(time->tm_mday); cmd_buf->data[6] = hex2bcd(time->tm_mon) + 1; cmd_buf->data[7] = hex2bcd(time->tm_year - 100); } int smu_get_rtc_time(struct rtc_time *time, int spinwait) { struct smu_simple_cmd cmd; int rc; if (smu == NULL) return -ENODEV; memset(time, 0, sizeof(struct rtc_time)); rc = smu_queue_simple(&cmd, SMU_CMD_RTC_COMMAND, 1, NULL, NULL, SMU_CMD_RTC_GET_DATETIME); if (rc) return rc; smu_spinwait_simple(&cmd); time->tm_sec = bcd2hex(cmd.buffer[0]); time->tm_min = bcd2hex(cmd.buffer[1]); time->tm_hour = bcd2hex(cmd.buffer[2]); time->tm_wday = bcd2hex(cmd.buffer[3]); time->tm_mday = bcd2hex(cmd.buffer[4]); time->tm_mon = bcd2hex(cmd.buffer[5]) - 1; time->tm_year = bcd2hex(cmd.buffer[6]) + 100; return 0; } int smu_set_rtc_time(struct rtc_time *time, int spinwait) { struct smu_simple_cmd cmd; int rc; if (smu == NULL) return -ENODEV; rc = smu_queue_simple(&cmd, SMU_CMD_RTC_COMMAND, 8, NULL, NULL, SMU_CMD_RTC_SET_DATETIME, hex2bcd(time->tm_sec), hex2bcd(time->tm_min), hex2bcd(time->tm_hour), time->tm_wday, hex2bcd(time->tm_mday), hex2bcd(time->tm_mon) + 1, hex2bcd(time->tm_year - 100)); if (rc) return rc; smu_spinwait_simple(&cmd); return 0; } void smu_shutdown(void) { struct smu_simple_cmd cmd; if (smu == NULL) return; if (smu_queue_simple(&cmd, SMU_CMD_POWER_COMMAND, 9, NULL, NULL, 'S', 'H', 'U', 'T', 'D', 'O', 'W', 'N', 0)) return; smu_spinwait_simple(&cmd); for (;;) ; } void smu_restart(void) { struct smu_simple_cmd cmd; if (smu == NULL) return; if (smu_queue_simple(&cmd, SMU_CMD_POWER_COMMAND, 8, NULL, NULL, 'R', 'E', 'S', 'T', 'A', 'R', 'T', 0)) return; smu_spinwait_simple(&cmd); for (;;) ; } int smu_present(void) { return smu != NULL; } EXPORT_SYMBOL(smu_present); int __init smu_init (void) { struct device_node *np; const u32 *data; int ret = 0; np = of_find_node_by_type(NULL, "smu"); if (np == NULL) return -ENODEV; printk(KERN_INFO "SMU: Driver %s %s\n", VERSION, AUTHOR); if (smu_cmdbuf_abs == 0) { printk(KERN_ERR "SMU: Command buffer not allocated !\n"); ret = -EINVAL; goto fail_np; } smu = alloc_bootmem(sizeof(struct smu_device)); spin_lock_init(&smu->lock); INIT_LIST_HEAD(&smu->cmd_list); INIT_LIST_HEAD(&smu->cmd_i2c_list); smu->of_node = np; smu->db_irq = NO_IRQ; smu->msg_irq = NO_IRQ; /* smu_cmdbuf_abs is in the low 2G of RAM, can be converted to a * 32 bits value safely */ smu->cmd_buf_abs = (u32)smu_cmdbuf_abs; smu->cmd_buf = __va(smu_cmdbuf_abs); smu->db_node = of_find_node_by_name(NULL, "smu-doorbell"); if (smu->db_node == NULL) { printk(KERN_ERR "SMU: Can't find doorbell GPIO !\n"); ret = -ENXIO; goto fail_bootmem; } data = of_get_property(smu->db_node, "reg", NULL); if (data == NULL) { printk(KERN_ERR "SMU: Can't find doorbell GPIO address !\n"); ret = -ENXIO; goto fail_db_node; } /* Current setup has one doorbell GPIO that does both doorbell * and ack. GPIOs are at 0x50, best would be to find that out * in the device-tree though. */ smu->doorbell = *data; if (smu->doorbell < 0x50) smu->doorbell += 0x50; /* Now look for the smu-interrupt GPIO */ do { smu->msg_node = of_find_node_by_name(NULL, "smu-interrupt"); if (smu->msg_node == NULL) break; data = of_get_property(smu->msg_node, "reg", NULL); if (data == NULL) { of_node_put(smu->msg_node); smu->msg_node = NULL; break; } smu->msg = *data; if (smu->msg < 0x50) smu->msg += 0x50; } while(0); /* Doorbell buffer is currently hard-coded, I didn't find a proper * device-tree entry giving the address. Best would probably to use * an offset for K2 base though, but let's do it that way for now. */ smu->db_buf = ioremap(0x8000860c, 0x1000); if (smu->db_buf == NULL) { printk(KERN_ERR "SMU: Can't map doorbell buffer pointer !\n"); ret = -ENXIO; goto fail_msg_node; } /* U3 has an issue with NAP mode when issuing SMU commands */ smu->broken_nap = pmac_get_uninorth_variant() < 4; if (smu->broken_nap) printk(KERN_INFO "SMU: using NAP mode workaround\n"); sys_ctrler = SYS_CTRLER_SMU; return 0; fail_msg_node: if (smu->msg_node) of_node_put(smu->msg_node); fail_db_node: of_node_put(smu->db_node); fail_bootmem: free_bootmem(__pa(smu), sizeof(struct smu_device)); smu = NULL; fail_np: of_node_put(np); return ret; } static int smu_late_init(void) { if (!smu) return 0; init_timer(&smu->i2c_timer); smu->i2c_timer.function = smu_i2c_retry; smu->i2c_timer.data = (unsigned long)smu; if (smu->db_node) { smu->db_irq = irq_of_parse_and_map(smu->db_node, 0); if (smu->db_irq == NO_IRQ) printk(KERN_ERR "smu: failed to map irq for node %s\n", smu->db_node->full_name); } if (smu->msg_node) { smu->msg_irq = irq_of_parse_and_map(smu->msg_node, 0); if (smu->msg_irq == NO_IRQ) printk(KERN_ERR "smu: failed to map irq for node %s\n", smu->msg_node->full_name); } /* * Try to request the interrupts */ if (smu->db_irq != NO_IRQ) { if (request_irq(smu->db_irq, smu_db_intr, IRQF_SHARED, "SMU doorbell", smu) < 0) { printk(KERN_WARNING "SMU: can't " "request interrupt %d\n", smu->db_irq); smu->db_irq = NO_IRQ; } } if (smu->msg_irq != NO_IRQ) { if (request_irq(smu->msg_irq, smu_msg_intr, IRQF_SHARED, "SMU message", smu) < 0) { printk(KERN_WARNING "SMU: can't " "request interrupt %d\n", smu->msg_irq); smu->msg_irq = NO_IRQ; } } smu_irq_inited = 1; return 0; } /* This has to be before arch_initcall as the low i2c stuff relies on the * above having been done before we reach arch_initcalls */ core_initcall(smu_late_init); /* * sysfs visibility */ static void smu_expose_childs(struct work_struct *unused) { struct device_node *np; for (np = NULL; (np = of_get_next_child(smu->of_node, np)) != NULL;) if (of_device_is_compatible(np, "smu-sensors")) of_platform_device_create(np, "smu-sensors", &smu->of_dev->dev); } static DECLARE_WORK(smu_expose_childs_work, smu_expose_childs); static int smu_platform_probe(struct platform_device* dev) { if (!smu) return -ENODEV; smu->of_dev = dev; /* * Ok, we are matched, now expose all i2c busses. We have to defer * that unfortunately or it would deadlock inside the device model */ schedule_work(&smu_expose_childs_work); return 0; } static const struct of_device_id smu_platform_match[] = { { .type = "smu", }, {}, }; static struct platform_driver smu_of_platform_driver = { .driver = { .name = "smu", .owner = THIS_MODULE, .of_match_table = smu_platform_match, }, .probe = smu_platform_probe, }; static int __init smu_init_sysfs(void) { /* * For now, we don't power manage machines with an SMU chip, * I'm a bit too far from figuring out how that works with those * new chipsets, but that will come back and bite us */ platform_driver_register(&smu_of_platform_driver); return 0; } device_initcall(smu_init_sysfs); struct platform_device *smu_get_ofdev(void) { if (!smu) return NULL; return smu->of_dev; } EXPORT_SYMBOL_GPL(smu_get_ofdev); /* * i2c interface */ static void smu_i2c_complete_command(struct smu_i2c_cmd *cmd, int fail) { void (*done)(struct smu_i2c_cmd *cmd, void *misc) = cmd->done; void *misc = cmd->misc; unsigned long flags; /* Check for read case */ if (!fail && cmd->read) { if (cmd->pdata[0] < 1) fail = 1; else memcpy(cmd->info.data, &cmd->pdata[1], cmd->info.datalen); } DPRINTK("SMU: completing, success: %d\n", !fail); /* Update status and mark no pending i2c command with lock * held so nobody comes in while we dequeue an eventual * pending next i2c command */ spin_lock_irqsave(&smu->lock, flags); smu->cmd_i2c_cur = NULL; wmb(); cmd->status = fail ? -EIO : 0; /* Is there another i2c command waiting ? */ if (!list_empty(&smu->cmd_i2c_list)) { struct smu_i2c_cmd *newcmd; /* Fetch it, new current, remove from list */ newcmd = list_entry(smu->cmd_i2c_list.next, struct smu_i2c_cmd, link); smu->cmd_i2c_cur = newcmd; list_del(&cmd->link); /* Queue with low level smu */ list_add_tail(&cmd->scmd.link, &smu->cmd_list); if (smu->cmd_cur == NULL) smu_start_cmd(); } spin_unlock_irqrestore(&smu->lock, flags); /* Call command completion handler if any */ if (done) done(cmd, misc); } static void smu_i2c_retry(unsigned long data) { struct smu_i2c_cmd *cmd = smu->cmd_i2c_cur; DPRINTK("SMU: i2c failure, requeuing...\n"); /* requeue command simply by resetting reply_len */ cmd->pdata[0] = 0xff; cmd->scmd.reply_len = sizeof(cmd->pdata); smu_queue_cmd(&cmd->scmd); } static void smu_i2c_low_completion(struct smu_cmd *scmd, void *misc) { struct smu_i2c_cmd *cmd = misc; int fail = 0; DPRINTK("SMU: i2c compl. stage=%d status=%x pdata[0]=%x rlen: %x\n", cmd->stage, scmd->status, cmd->pdata[0], scmd->reply_len); /* Check for possible status */ if (scmd->status < 0) fail = 1; else if (cmd->read) { if (cmd->stage == 0) fail = cmd->pdata[0] != 0; else fail = cmd->pdata[0] >= 0x80; } else { fail = cmd->pdata[0] != 0; } /* Handle failures by requeuing command, after 5ms interval */ if (fail && --cmd->retries > 0) { DPRINTK("SMU: i2c failure, starting timer...\n"); BUG_ON(cmd != smu->cmd_i2c_cur); if (!smu_irq_inited) { mdelay(5); smu_i2c_retry(0); return; } mod_timer(&smu->i2c_timer, jiffies + msecs_to_jiffies(5)); return; } /* If failure or stage 1, command is complete */ if (fail || cmd->stage != 0) { smu_i2c_complete_command(cmd, fail); return; } DPRINTK("SMU: going to stage 1\n"); /* Ok, initial command complete, now poll status */ scmd->reply_buf = cmd->pdata; scmd->reply_len = sizeof(cmd->pdata); scmd->data_buf = cmd->pdata; scmd->data_len = 1; cmd->pdata[0] = 0; cmd->stage = 1; cmd->retries = 20; smu_queue_cmd(scmd); } int smu_queue_i2c(struct smu_i2c_cmd *cmd) { unsigned long flags; if (smu == NULL) return -ENODEV; /* Fill most fields of scmd */ cmd->scmd.cmd = SMU_CMD_I2C_COMMAND; cmd->scmd.done = smu_i2c_low_completion; cmd->scmd.misc = cmd; cmd->scmd.reply_buf = cmd->pdata; cmd->scmd.reply_len = sizeof(cmd->pdata); cmd->scmd.data_buf = (u8 *)(char *)&cmd->info; cmd->scmd.status = 1; cmd->stage = 0; cmd->pdata[0] = 0xff; cmd->retries = 20; cmd->status = 1; /* Check transfer type, sanitize some "info" fields * based on transfer type and do more checking */ cmd->info.caddr = cmd->info.devaddr; cmd->read = cmd->info.devaddr & 0x01; switch(cmd->info.type) { case SMU_I2C_TRANSFER_SIMPLE: memset(&cmd->info.sublen, 0, 4); break; case SMU_I2C_TRANSFER_COMBINED: cmd->info.devaddr &= 0xfe; case SMU_I2C_TRANSFER_STDSUB: if (cmd->info.sublen > 3) return -EINVAL; break; default: return -EINVAL; } /* Finish setting up command based on transfer direction */ if (cmd->read) { if (cmd->info.datalen > SMU_I2C_READ_MAX) return -EINVAL; memset(cmd->info.data, 0xff, cmd->info.datalen); cmd->scmd.data_len = 9; } else { if (cmd->info.datalen > SMU_I2C_WRITE_MAX) return -EINVAL; cmd->scmd.data_len = 9 + cmd->info.datalen; } DPRINTK("SMU: i2c enqueuing command\n"); DPRINTK("SMU: %s, len=%d bus=%x addr=%x sub0=%x type=%x\n", cmd->read ? "read" : "write", cmd->info.datalen, cmd->info.bus, cmd->info.caddr, cmd->info.subaddr[0], cmd->info.type); /* Enqueue command in i2c list, and if empty, enqueue also in * main command list */ spin_lock_irqsave(&smu->lock, flags); if (smu->cmd_i2c_cur == NULL) { smu->cmd_i2c_cur = cmd; list_add_tail(&cmd->scmd.link, &smu->cmd_list); if (smu->cmd_cur == NULL) smu_start_cmd(); } else list_add_tail(&cmd->link, &smu->cmd_i2c_list); spin_unlock_irqrestore(&smu->lock, flags); return 0; } /* * Handling of "partitions" */ static int smu_read_datablock(u8 *dest, unsigned int addr, unsigned int len) { DECLARE_COMPLETION_ONSTACK(comp); unsigned int chunk; struct smu_cmd cmd; int rc; u8 params[8]; /* We currently use a chunk size of 0xe. We could check the * SMU firmware version and use bigger sizes though */ chunk = 0xe; while (len) { unsigned int clen = min(len, chunk); cmd.cmd = SMU_CMD_MISC_ee_COMMAND; cmd.data_len = 7; cmd.data_buf = params; cmd.reply_len = chunk; cmd.reply_buf = dest; cmd.done = smu_done_complete; cmd.misc = &comp; params[0] = SMU_CMD_MISC_ee_GET_DATABLOCK_REC; params[1] = 0x4; *((u32 *)&params[2]) = addr; params[6] = clen; rc = smu_queue_cmd(&cmd); if (rc) return rc; wait_for_completion(&comp); if (cmd.status != 0) return rc; if (cmd.reply_len != clen) { printk(KERN_DEBUG "SMU: short read in " "smu_read_datablock, got: %d, want: %d\n", cmd.reply_len, clen); return -EIO; } len -= clen; addr += clen; dest += clen; } return 0; } static struct smu_sdbp_header *smu_create_sdb_partition(int id) { DECLARE_COMPLETION_ONSTACK(comp); struct smu_simple_cmd cmd; unsigned int addr, len, tlen; struct smu_sdbp_header *hdr; struct property *prop; /* First query the partition info */ DPRINTK("SMU: Query partition infos ... (irq=%d)\n", smu->db_irq); smu_queue_simple(&cmd, SMU_CMD_PARTITION_COMMAND, 2, smu_done_complete, &comp, SMU_CMD_PARTITION_LATEST, id); wait_for_completion(&comp); DPRINTK("SMU: done, status: %d, reply_len: %d\n", cmd.cmd.status, cmd.cmd.reply_len); /* Partition doesn't exist (or other error) */ if (cmd.cmd.status != 0 || cmd.cmd.reply_len != 6) return NULL; /* Fetch address and length from reply */ addr = *((u16 *)cmd.buffer); len = cmd.buffer[3] << 2; /* Calucluate total length to allocate, including the 17 bytes * for "sdb-partition-XX" that we append at the end of the buffer */ tlen = sizeof(struct property) + len + 18; prop = kzalloc(tlen, GFP_KERNEL); if (prop == NULL) return NULL; hdr = (struct smu_sdbp_header *)(prop + 1); prop->name = ((char *)prop) + tlen - 18; sprintf(prop->name, "sdb-partition-%02x", id); prop->length = len; prop->value = hdr; prop->next = NULL; /* Read the datablock */ if (smu_read_datablock((u8 *)hdr, addr, len)) { printk(KERN_DEBUG "SMU: datablock read failed while reading " "partition %02x !\n", id); goto failure; } /* Got it, check a few things and create the property */ if (hdr->id != id) { printk(KERN_DEBUG "SMU: Reading partition %02x and got " "%02x !\n", id, hdr->id); goto failure; } if (of_add_property(smu->of_node, prop)) { printk(KERN_DEBUG "SMU: Failed creating sdb-partition-%02x " "property !\n", id); goto failure; } return hdr; failure: kfree(prop); return NULL; } /* Note: Only allowed to return error code in pointers (using ERR_PTR) * when interruptible is 1 */ const struct smu_sdbp_header *__smu_get_sdb_partition(int id, unsigned int *size, int interruptible) { char pname[32]; const struct smu_sdbp_header *part; if (!smu) return NULL; sprintf(pname, "sdb-partition-%02x", id); DPRINTK("smu_get_sdb_partition(%02x)\n", id); if (interruptible) { int rc; rc = mutex_lock_interruptible(&smu_part_access); if (rc) return ERR_PTR(rc); } else mutex_lock(&smu_part_access); part = of_get_property(smu->of_node, pname, size); if (part == NULL) { DPRINTK("trying to extract from SMU ...\n"); part = smu_create_sdb_partition(id); if (part != NULL && size) *size = part->len << 2; } mutex_unlock(&smu_part_access); return part; } const struct smu_sdbp_header *smu_get_sdb_partition(int id, unsigned int *size) { return __smu_get_sdb_partition(id, size, 0); } EXPORT_SYMBOL(smu_get_sdb_partition); /* * Userland driver interface */ static LIST_HEAD(smu_clist); static DEFINE_SPINLOCK(smu_clist_lock); enum smu_file_mode { smu_file_commands, smu_file_events, smu_file_closing }; struct smu_private { struct list_head list; enum smu_file_mode mode; int busy; struct smu_cmd cmd; spinlock_t lock; wait_queue_head_t wait; u8 buffer[SMU_MAX_DATA]; }; static int smu_open(struct inode *inode, struct file *file) { struct smu_private *pp; unsigned long flags; pp = kzalloc(sizeof(struct smu_private), GFP_KERNEL); if (pp == 0) return -ENOMEM; spin_lock_init(&pp->lock); pp->mode = smu_file_commands; init_waitqueue_head(&pp->wait); mutex_lock(&smu_mutex); spin_lock_irqsave(&smu_clist_lock, flags); list_add(&pp->list, &smu_clist); spin_unlock_irqrestore(&smu_clist_lock, flags); file->private_data = pp; mutex_unlock(&smu_mutex); return 0; } static void smu_user_cmd_done(struct smu_cmd *cmd, void *misc) { struct smu_private *pp = misc; wake_up_all(&pp->wait); } static ssize_t smu_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct smu_private *pp = file->private_data; unsigned long flags; struct smu_user_cmd_hdr hdr; int rc = 0; if (pp->busy) return -EBUSY; else if (copy_from_user(&hdr, buf, sizeof(hdr))) return -EFAULT; else if (hdr.cmdtype == SMU_CMDTYPE_WANTS_EVENTS) { pp->mode = smu_file_events; return 0; } else if (hdr.cmdtype == SMU_CMDTYPE_GET_PARTITION) { const struct smu_sdbp_header *part; part = __smu_get_sdb_partition(hdr.cmd, NULL, 1); if (part == NULL) return -EINVAL; else if (IS_ERR(part)) return PTR_ERR(part); return 0; } else if (hdr.cmdtype != SMU_CMDTYPE_SMU) return -EINVAL; else if (pp->mode != smu_file_commands) return -EBADFD; else if (hdr.data_len > SMU_MAX_DATA) return -EINVAL; spin_lock_irqsave(&pp->lock, flags); if (pp->busy) { spin_unlock_irqrestore(&pp->lock, flags); return -EBUSY; } pp->busy = 1; pp->cmd.status = 1; spin_unlock_irqrestore(&pp->lock, flags); if (copy_from_user(pp->buffer, buf + sizeof(hdr), hdr.data_len)) { pp->busy = 0; return -EFAULT; } pp->cmd.cmd = hdr.cmd; pp->cmd.data_len = hdr.data_len; pp->cmd.reply_len = SMU_MAX_DATA; pp->cmd.data_buf = pp->buffer; pp->cmd.reply_buf = pp->buffer; pp->cmd.done = smu_user_cmd_done; pp->cmd.misc = pp; rc = smu_queue_cmd(&pp->cmd); if (rc < 0) return rc; return count; } static ssize_t smu_read_command(struct file *file, struct smu_private *pp, char __user *buf, size_t count) { DECLARE_WAITQUEUE(wait, current); struct smu_user_reply_hdr hdr; unsigned long flags; int size, rc = 0; if (!pp->busy) return 0; if (count < sizeof(struct smu_user_reply_hdr)) return -EOVERFLOW; spin_lock_irqsave(&pp->lock, flags); if (pp->cmd.status == 1) { if (file->f_flags & O_NONBLOCK) { spin_unlock_irqrestore(&pp->lock, flags); return -EAGAIN; } add_wait_queue(&pp->wait, &wait); for (;;) { set_current_state(TASK_INTERRUPTIBLE); rc = 0; if (pp->cmd.status != 1) break; rc = -ERESTARTSYS; if (signal_pending(current)) break; spin_unlock_irqrestore(&pp->lock, flags); schedule(); spin_lock_irqsave(&pp->lock, flags); } set_current_state(TASK_RUNNING); remove_wait_queue(&pp->wait, &wait); } spin_unlock_irqrestore(&pp->lock, flags); if (rc) return rc; if (pp->cmd.status != 0) pp->cmd.reply_len = 0; size = sizeof(hdr) + pp->cmd.reply_len; if (count < size) size = count; rc = size; hdr.status = pp->cmd.status; hdr.reply_len = pp->cmd.reply_len; if (copy_to_user(buf, &hdr, sizeof(hdr))) return -EFAULT; size -= sizeof(hdr); if (size && copy_to_user(buf + sizeof(hdr), pp->buffer, size)) return -EFAULT; pp->busy = 0; return rc; } static ssize_t smu_read_events(struct file *file, struct smu_private *pp, char __user *buf, size_t count) { /* Not implemented */ msleep_interruptible(1000); return 0; } static ssize_t smu_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct smu_private *pp = file->private_data; if (pp->mode == smu_file_commands) return smu_read_command(file, pp, buf, count); if (pp->mode == smu_file_events) return smu_read_events(file, pp, buf, count); return -EBADFD; } static unsigned int smu_fpoll(struct file *file, poll_table *wait) { struct smu_private *pp = file->private_data; unsigned int mask = 0; unsigned long flags; if (pp == 0) return 0; if (pp->mode == smu_file_commands) { poll_wait(file, &pp->wait, wait); spin_lock_irqsave(&pp->lock, flags); if (pp->busy && pp->cmd.status != 1) mask |= POLLIN; spin_unlock_irqrestore(&pp->lock, flags); } if (pp->mode == smu_file_events) { /* Not yet implemented */ } return mask; } static int smu_release(struct inode *inode, struct file *file) { struct smu_private *pp = file->private_data; unsigned long flags; unsigned int busy; if (pp == 0) return 0; file->private_data = NULL; /* Mark file as closing to avoid races with new request */ spin_lock_irqsave(&pp->lock, flags); pp->mode = smu_file_closing; busy = pp->busy; /* Wait for any pending request to complete */ if (busy && pp->cmd.status == 1) { DECLARE_WAITQUEUE(wait, current); add_wait_queue(&pp->wait, &wait); for (;;) { set_current_state(TASK_UNINTERRUPTIBLE); if (pp->cmd.status != 1) break; spin_unlock_irqrestore(&pp->lock, flags); schedule(); spin_lock_irqsave(&pp->lock, flags); } set_current_state(TASK_RUNNING); remove_wait_queue(&pp->wait, &wait); } spin_unlock_irqrestore(&pp->lock, flags); spin_lock_irqsave(&smu_clist_lock, flags); list_del(&pp->list); spin_unlock_irqrestore(&smu_clist_lock, flags); kfree(pp); return 0; } static const struct file_operations smu_device_fops = { .llseek = no_llseek, .read = smu_read, .write = smu_write, .poll = smu_fpoll, .open = smu_open, .release = smu_release, }; static struct miscdevice pmu_device = { MISC_DYNAMIC_MINOR, "smu", &smu_device_fops }; static int smu_device_init(void) { if (!smu) return -ENODEV; if (misc_register(&pmu_device) < 0) printk(KERN_ERR "via-pmu: cannot register misc device.\n"); return 0; } device_initcall(smu_device_init);
gpl-2.0
cnexus/kernel_tw_43
sound/soc/msm/msm7x30.c
3520
26779
/* Copyright (c) 2008-2011, The Linux Foundation. All rights reserved. * * All source code in this file is licensed under the following license except * where indicated. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * See the GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program; if not, you can find it at http://www.fsf.org. */ #include <linux/init.h> #include <linux/err.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/time.h> #include <linux/wait.h> #include <linux/platform_device.h> #include <sound/core.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include <sound/pcm.h> #include <sound/tlv.h> #include <sound/initval.h> #include <sound/control.h> #include <asm/dma.h> #include <linux/dma-mapping.h> #include <linux/msm_audio.h> #include "msm7kv2-pcm.h" #include <asm/mach-types.h> #include <mach/qdsp5v2/audio_dev_ctl.h> #include <mach/debug_mm.h> #include <mach/qdsp5v2/afe.h> static struct platform_device *msm_audio_snd_device; struct audio_locks the_locks; EXPORT_SYMBOL(the_locks); struct msm_volume msm_vol_ctl; EXPORT_SYMBOL(msm_vol_ctl); static struct snd_kcontrol_new snd_msm_controls[]; char snddev_name[AUDIO_DEV_CTL_MAX_DEV][44]; #define MSM_MAX_VOLUME 0x2000 #define MSM_VOLUME_STEP ((MSM_MAX_VOLUME+17)/100) /* 17 added to avoid more deviation */ #define LOOPBACK_ENABLE 0x1 #define LOOPBACK_DISABLE 0x0 static int device_index; /* Count of Device controls */ static int simple_control; /* Count of simple controls*/ static int src_dev; static int dst_dev; static int loopback_status; static int msm_scontrol_count_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; return 0; } static int msm_scontrol_count_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = simple_control; return 0; } static int msm_v_call_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 1; return 0; } static int msm_v_call_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = 0; return 0; } static int msm_v_call_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int start = ucontrol->value.integer.value[0]; if (start) broadcast_event(AUDDEV_EVT_START_VOICE, DEVICE_IGNORE, SESSION_IGNORE); else broadcast_event(AUDDEV_EVT_END_VOICE, DEVICE_IGNORE, SESSION_IGNORE); return 0; } static int msm_v_mute_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = 2; return 0; } static int msm_v_mute_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = 0; return 0; } static int msm_v_mute_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int dir = ucontrol->value.integer.value[0]; int mute = ucontrol->value.integer.value[1]; return msm_set_voice_mute(dir, mute); } static int msm_v_volume_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; /* Volume */ uinfo->value.integer.min = 0; uinfo->value.integer.max = 100; return 0; } static int msm_v_volume_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = 0; return 0; } static int msm_v_volume_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int dir = ucontrol->value.integer.value[0]; int volume = ucontrol->value.integer.value[1]; return msm_set_voice_vol(dir, volume); } static int msm_volume_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 3; /* Volume and 10-base multiply factor*/ uinfo->value.integer.min = 0; /* limit the muliply factor to 4 decimal digit */ uinfo->value.integer.max = 1000000; return 0; } static int msm_volume_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = 0; return 0; } static int msm_volume_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int ret = 0; int session_id = ucontrol->value.integer.value[0]; int volume = ucontrol->value.integer.value[1]; int factor = ucontrol->value.integer.value[2]; u32 session_mask = 0; if (factor > 10000) return -EINVAL; if ((volume < 0) || (volume/factor > 100)) return -EINVAL; volume = (MSM_VOLUME_STEP * volume); /* Convert back to original decimal point by removing the 10-base factor * and discard the fractional portion */ volume = volume/factor; if (volume > MSM_MAX_VOLUME) volume = MSM_MAX_VOLUME; /* Only Decoder volume control supported */ session_mask = (0x1 << (session_id) << (8 * ((int)AUDDEV_CLNT_DEC-1))); msm_vol_ctl.volume = volume; MM_DBG("session_id %d, volume %d", session_id, volume); broadcast_event(AUDDEV_EVT_STREAM_VOL_CHG, DEVICE_IGNORE, session_mask); return ret; } static int msm_voice_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 3; /* Device */ uinfo->value.integer.min = 0; uinfo->value.integer.max = msm_snddev_devcount(); return 0; } static int msm_voice_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int rc = 0; uint32_t rx_dev_id; uint32_t tx_dev_id; struct msm_snddev_info *rx_dev_info; struct msm_snddev_info *tx_dev_info; int set = ucontrol->value.integer.value[2]; u32 session_mask; if (!set) return -EPERM; /* Rx Device Routing */ rx_dev_id = ucontrol->value.integer.value[0]; rx_dev_info = audio_dev_ctrl_find_dev(rx_dev_id); if (IS_ERR(rx_dev_info)) { MM_ERR("pass invalid dev_id\n"); rc = PTR_ERR(rx_dev_info); return rc; } if (!(rx_dev_info->capability & SNDDEV_CAP_RX)) { MM_ERR("First Dev is supposed to be RX\n"); return -EFAULT; } MM_DBG("route cfg %d STREAM_VOICE_RX type\n", rx_dev_id); msm_set_voc_route(rx_dev_info, AUDIO_ROUTE_STREAM_VOICE_RX, rx_dev_id); session_mask = 0x1 << (8 * ((int)AUDDEV_CLNT_VOC-1)); broadcast_event(AUDDEV_EVT_DEV_CHG_VOICE, rx_dev_id, session_mask); /* Tx Device Routing */ tx_dev_id = ucontrol->value.integer.value[1]; tx_dev_info = audio_dev_ctrl_find_dev(tx_dev_id); if (IS_ERR(tx_dev_info)) { MM_ERR("pass invalid dev_id\n"); rc = PTR_ERR(tx_dev_info); return rc; } if (!(tx_dev_info->capability & SNDDEV_CAP_TX)) { MM_ERR("Second Dev is supposed to be Tx\n"); return -EFAULT; } MM_DBG("route cfg %d %d type\n", tx_dev_id, AUDIO_ROUTE_STREAM_VOICE_TX); msm_set_voc_route(tx_dev_info, AUDIO_ROUTE_STREAM_VOICE_TX, tx_dev_id); broadcast_event(AUDDEV_EVT_DEV_CHG_VOICE, tx_dev_id, session_mask); if (rx_dev_info->opened) broadcast_event(AUDDEV_EVT_DEV_RDY, rx_dev_id, session_mask); if (tx_dev_info->opened) broadcast_event(AUDDEV_EVT_DEV_RDY, tx_dev_id, session_mask); return rc; } static int msm_voice_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = 0; /* TODO: query Device list */ return 0; } static int msm_device_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; /* Device */ uinfo->value.integer.min = 0; uinfo->value.integer.max = msm_snddev_devcount(); return 0; } static int msm_device_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int rc = 0; int set = 0; struct msm_audio_route_config route_cfg; struct msm_snddev_info *dev_info; struct msm_snddev_info *dst_dev_info; struct msm_snddev_info *src_dev_info; int tx_freq = 0; int rx_freq = 0; u32 set_freq = 0; set = ucontrol->value.integer.value[0]; route_cfg.dev_id = ucontrol->id.numid - device_index; dev_info = audio_dev_ctrl_find_dev(route_cfg.dev_id); if (IS_ERR(dev_info)) { MM_ERR("pass invalid dev_id\n"); rc = PTR_ERR(dev_info); return rc; } MM_INFO("device %s set %d\n", dev_info->name, set); if (set) { if (!dev_info->opened) { set_freq = dev_info->sample_rate; if (!msm_device_is_voice(route_cfg.dev_id)) { msm_get_voc_freq(&tx_freq, &rx_freq); if (dev_info->capability & SNDDEV_CAP_TX) set_freq = tx_freq; if (set_freq == 0) set_freq = dev_info->sample_rate; } else set_freq = dev_info->sample_rate; MM_ERR("device freq =%d\n", set_freq); rc = dev_info->dev_ops.set_freq(dev_info, set_freq); if (rc < 0) { MM_ERR("device freq failed!\n"); return rc; } dev_info->set_sample_rate = rc; rc = 0; rc = dev_info->dev_ops.open(dev_info); if (rc < 0) { MM_ERR("Enabling %s failed", dev_info->name); return rc; } dev_info->opened = 1; broadcast_event(AUDDEV_EVT_DEV_RDY, route_cfg.dev_id, SESSION_IGNORE); /* Event to notify client for device info */ broadcast_event(AUDDEV_EVT_DEVICE_INFO, route_cfg.dev_id, SESSION_IGNORE); if ((route_cfg.dev_id == src_dev) || (route_cfg.dev_id == dst_dev)) { dst_dev_info = audio_dev_ctrl_find_dev( dst_dev); if (IS_ERR(dst_dev_info)) { pr_err("dst_dev:%s:pass invalid" "dev_id\n", __func__); rc = PTR_ERR(dst_dev_info); return rc; } src_dev_info = audio_dev_ctrl_find_dev( src_dev); if (IS_ERR(src_dev_info)) { pr_err("src_dev:%s:pass invalid" "dev_id\n", __func__); rc = PTR_ERR(src_dev_info); return rc; } if ((dst_dev_info->opened) && (src_dev_info->opened)) { pr_debug("%d: Enable afe_loopback\n", __LINE__); afe_ext_loopback(LOOPBACK_ENABLE, dst_dev_info->copp_id, src_dev_info->copp_id); loopback_status = 1; } } } } else { if (dev_info->opened) { broadcast_event(AUDDEV_EVT_REL_PENDING, route_cfg.dev_id, SESSION_IGNORE); rc = dev_info->dev_ops.close(dev_info); if (rc < 0) { MM_ERR("Snd device failed close!\n"); return rc; } else { dev_info->opened = 0; broadcast_event(AUDDEV_EVT_DEV_RLS, route_cfg.dev_id, SESSION_IGNORE); } if (loopback_status == 1) { if ((route_cfg.dev_id == src_dev) || (route_cfg.dev_id == dst_dev)) { dst_dev_info = audio_dev_ctrl_find_dev( dst_dev); if (IS_ERR(dst_dev_info)) { pr_err("dst_dev:%s:pass invalid" "dev_id\n", __func__); rc = PTR_ERR(dst_dev_info); return rc; } src_dev_info = audio_dev_ctrl_find_dev( src_dev); if (IS_ERR(src_dev_info)) { pr_err("dst_dev:%s:pass invalid" "dev_id\n", __func__); rc = PTR_ERR(src_dev_info); return rc; } pr_debug("%d: Disable afe_loopback\n", __LINE__); afe_ext_loopback(LOOPBACK_DISABLE, dst_dev_info->copp_id, src_dev_info->copp_id); loopback_status = 0; } } } } return rc; } static int msm_device_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int rc = 0; struct msm_audio_route_config route_cfg; struct msm_snddev_info *dev_info; route_cfg.dev_id = ucontrol->id.numid - device_index; dev_info = audio_dev_ctrl_find_dev(route_cfg.dev_id); if (IS_ERR(dev_info)) { MM_ERR("pass invalid dev_id\n"); rc = PTR_ERR(dev_info); return rc; } ucontrol->value.integer.value[0] = dev_info->copp_id; ucontrol->value.integer.value[1] = dev_info->capability; return 0; } static int msm_route_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 3; /* Device */ uinfo->value.integer.min = 0; uinfo->value.integer.max = msm_snddev_devcount(); return 0; } static int msm_route_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = 0; /* TODO: query Device list */ return 0; } static int msm_route_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int rc = 0; int enc_freq = 0; int requested_freq = 0; struct msm_audio_route_config route_cfg; struct msm_snddev_info *dev_info; int session_id = ucontrol->value.integer.value[0]; int set = ucontrol->value.integer.value[2]; u32 session_mask = 0; route_cfg.dev_id = ucontrol->value.integer.value[1]; if (ucontrol->id.numid == 2) route_cfg.stream_type = AUDIO_ROUTE_STREAM_PLAYBACK; else route_cfg.stream_type = AUDIO_ROUTE_STREAM_REC; MM_DBG("route cfg %d %d type for popp %d set value %d\n", route_cfg.dev_id, route_cfg.stream_type, session_id, set); dev_info = audio_dev_ctrl_find_dev(route_cfg.dev_id); if (IS_ERR(dev_info)) { MM_ERR("pass invalid dev_id\n"); rc = PTR_ERR(dev_info); return rc; } if (route_cfg.stream_type == AUDIO_ROUTE_STREAM_PLAYBACK) { rc = msm_snddev_set_dec(session_id, dev_info->copp_id, set); session_mask = (0x1 << (session_id) << (8 * ((int)AUDDEV_CLNT_DEC-1))); if (!set) { if (dev_info->opened) { broadcast_event(AUDDEV_EVT_REL_PENDING, route_cfg.dev_id, session_mask); broadcast_event(AUDDEV_EVT_DEV_RLS, route_cfg.dev_id, session_mask); } dev_info->sessions &= ~(session_mask); } else { dev_info->sessions = dev_info->sessions | session_mask; if (dev_info->opened) { broadcast_event(AUDDEV_EVT_DEV_RDY, route_cfg.dev_id, session_mask); /* Event to notify client for device info */ broadcast_event(AUDDEV_EVT_DEVICE_INFO, route_cfg.dev_id, session_mask); } } } else { rc = msm_snddev_set_enc(session_id, dev_info->copp_id, set); session_mask = (0x1 << (session_id)) << (8 * ((int)AUDDEV_CLNT_ENC-1)); if (!set) { if (dev_info->opened) broadcast_event(AUDDEV_EVT_DEV_RLS, route_cfg.dev_id, session_mask); dev_info->sessions &= ~(session_mask); } else { dev_info->sessions = dev_info->sessions | session_mask; enc_freq = msm_snddev_get_enc_freq(session_id); requested_freq = enc_freq; if (enc_freq > 0) { rc = msm_snddev_request_freq(&enc_freq, session_id, SNDDEV_CAP_TX, AUDDEV_CLNT_ENC); MM_DBG("sample rate configured %d" "sample rate requested %d\n", enc_freq, requested_freq); if ((rc <= 0) || (enc_freq != requested_freq)) { MM_DBG("msm_snddev_withdraw_freq\n"); rc = msm_snddev_withdraw_freq (session_id, SNDDEV_CAP_TX, AUDDEV_CLNT_ENC); broadcast_event(AUDDEV_EVT_FREQ_CHG, route_cfg.dev_id, SESSION_IGNORE); } } if (dev_info->opened) { broadcast_event(AUDDEV_EVT_DEV_RDY, route_cfg.dev_id, session_mask); /* Event to notify client for device info */ broadcast_event(AUDDEV_EVT_DEVICE_INFO, route_cfg.dev_id, session_mask); } } } if (rc < 0) { MM_ERR("device could not be assigned!\n"); return -EFAULT; } return rc; } static int msm_device_volume_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = 100; return 0; } static int msm_device_volume_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct msm_snddev_info *dev_info; int dev_id = ucontrol->value.integer.value[0]; dev_info = audio_dev_ctrl_find_dev(dev_id); ucontrol->value.integer.value[0] = dev_info->dev_volume; return 0; } static int msm_device_volume_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int rc = -EPERM; struct msm_snddev_info *dev_info; int dev_id = ucontrol->value.integer.value[0]; int volume = ucontrol->value.integer.value[1]; MM_DBG("dev_id = %d, volume = %d\n", dev_id, volume); dev_info = audio_dev_ctrl_find_dev(dev_id); if (IS_ERR(dev_info)) { rc = PTR_ERR(dev_info); MM_ERR("audio_dev_ctrl_find_dev failed. %ld\n", PTR_ERR(dev_info)); return rc; } MM_DBG("dev_name = %s dev_id = %d, volume = %d\n", dev_info->name, dev_id, volume); if (dev_info->dev_ops.set_device_volume) rc = dev_info->dev_ops.set_device_volume(dev_info, volume); else { MM_INFO("device %s does not support device volume " "control.", dev_info->name); return -EPERM; } return rc; } static int msm_reset_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 0; return 0; } static int msm_reset_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = 0; return 0; } static int msm_reset_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { MM_DBG("Resetting all devices\n"); return msm_reset_all_device(); } static int msm_dual_mic_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; /*Max value is decided based on MAX ENC sessions*/ uinfo->value.integer.max = MAX_AUDREC_SESSIONS - 1; return 0; } static int msm_dual_mic_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int enc_session_id = ucontrol->value.integer.value[0]; ucontrol->value.integer.value[1] = msm_get_dual_mic_config(enc_session_id); MM_DBG("session id = %d, config = %ld\n", enc_session_id, ucontrol->value.integer.value[1]); return 0; } static int msm_dual_mic_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int enc_session_id = ucontrol->value.integer.value[0]; int dual_mic_config = ucontrol->value.integer.value[1]; MM_DBG("session id = %d, config = %d\n", enc_session_id, dual_mic_config); return msm_set_dual_mic_config(enc_session_id, dual_mic_config); } static int msm_device_mute_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = msm_snddev_devcount(); return 0; } static int msm_device_mute_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { return 0; } static int msm_device_mute_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int dev_id = ucontrol->value.integer.value[0]; int mute = ucontrol->value.integer.value[1]; struct msm_snddev_info *dev_info; int afe_dev_id = 0; int volume = 0x4000; dev_info = audio_dev_ctrl_find_dev(dev_id); if (IS_ERR(dev_info)) { MM_ERR("pass invalid dev_id %d\n", dev_id); return PTR_ERR(dev_info); } if (dev_info->capability & SNDDEV_CAP_RX) return -EPERM; MM_DBG("Muting device id %d(%s)\n", dev_id, dev_info->name); if (dev_info->copp_id == 0) afe_dev_id = AFE_HW_PATH_CODEC_TX; if (dev_info->copp_id == 1) afe_dev_id = AFE_HW_PATH_AUXPCM_TX; if (dev_info->copp_id == 2) afe_dev_id = AFE_HW_PATH_MI2S_TX; if (mute) volume = 0; afe_device_volume_ctrl(afe_dev_id, volume); return 0; } static int msm_loopback_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 3; uinfo->value.integer.min = 0; uinfo->value.integer.max = msm_snddev_devcount(); return 0; } static int msm_loopback_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = 0; return 0; } static int msm_loopback_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int rc = 0; struct msm_snddev_info *src_dev_info = NULL; /* TX device */ struct msm_snddev_info *dst_dev_info = NULL; /* RX device */ int dst_dev_id = ucontrol->value.integer.value[0]; int src_dev_id = ucontrol->value.integer.value[1]; int set = ucontrol->value.integer.value[2]; pr_debug("%s: set=%d\n", __func__, set); dst_dev_info = audio_dev_ctrl_find_dev(dst_dev_id); if (IS_ERR(dst_dev_info)) { pr_err("dst_dev:%s:pass invalid dev_id\n", __func__); rc = PTR_ERR(dst_dev_info); return rc; } if (!(dst_dev_info->capability & SNDDEV_CAP_RX)) { pr_err("Destination device %d is not RX device\n", dst_dev_id); return -EFAULT; } src_dev_info = audio_dev_ctrl_find_dev(src_dev_id); if (IS_ERR(src_dev_info)) { pr_err("src_dev:%s:pass invalid dev_id\n", __func__); rc = PTR_ERR(src_dev_info); return rc; } if (!(src_dev_info->capability & SNDDEV_CAP_TX)) { pr_err("Source device %d is not TX device\n", src_dev_id); return -EFAULT; } if (set) { pr_debug("%s:%d:Enabling AFE_Loopback\n", __func__, __LINE__); src_dev = src_dev_id; dst_dev = dst_dev_id; loopback_status = 1; if ((dst_dev_info->opened) && (src_dev_info->opened)) afe_ext_loopback(LOOPBACK_ENABLE, dst_dev_info->copp_id, src_dev_info->copp_id); } else { pr_debug("%s:%d:Disabling AFE_Loopback\n", __func__, __LINE__); src_dev = DEVICE_IGNORE; dst_dev = DEVICE_IGNORE; loopback_status = 0; afe_ext_loopback(LOOPBACK_DISABLE, dst_dev_info->copp_id, src_dev_info->copp_id); } return 0; } static struct snd_kcontrol_new snd_dev_controls[AUDIO_DEV_CTL_MAX_DEV]; static int snd_dev_ctl_index(int idx) { struct msm_snddev_info *dev_info; dev_info = audio_dev_ctrl_find_dev(idx); if (IS_ERR(dev_info)) { MM_ERR("pass invalid dev_id\n"); return PTR_ERR(dev_info); } if (sizeof(dev_info->name) <= 44) sprintf(&snddev_name[idx][0] , "%s", dev_info->name); snd_dev_controls[idx].iface = SNDRV_CTL_ELEM_IFACE_MIXER; snd_dev_controls[idx].access = SNDRV_CTL_ELEM_ACCESS_READWRITE; snd_dev_controls[idx].name = &snddev_name[idx][0]; snd_dev_controls[idx].index = idx; snd_dev_controls[idx].info = msm_device_info; snd_dev_controls[idx].get = msm_device_get; snd_dev_controls[idx].put = msm_device_put; snd_dev_controls[idx].private_value = 0; return 0; } #define MSM_EXT(xname, fp_info, fp_get, fp_put, addr) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, \ .name = xname, \ .info = fp_info,\ .get = fp_get, .put = fp_put, \ .private_value = addr, \ } static struct snd_kcontrol_new snd_msm_controls[] = { MSM_EXT("Count", msm_scontrol_count_info, msm_scontrol_count_get, \ NULL, 0), MSM_EXT("Stream", msm_route_info, msm_route_get, \ msm_route_put, 0), MSM_EXT("Record", msm_route_info, msm_route_get, \ msm_route_put, 0), MSM_EXT("Voice", msm_voice_info, msm_voice_get, \ msm_voice_put, 0), MSM_EXT("Volume", msm_volume_info, msm_volume_get, \ msm_volume_put, 0), MSM_EXT("VoiceVolume", msm_v_volume_info, msm_v_volume_get, \ msm_v_volume_put, 0), MSM_EXT("VoiceMute", msm_v_mute_info, msm_v_mute_get, \ msm_v_mute_put, 0), MSM_EXT("Voice Call", msm_v_call_info, msm_v_call_get, \ msm_v_call_put, 0), MSM_EXT("Device_Volume", msm_device_volume_info, msm_device_volume_get, msm_device_volume_put, 0), MSM_EXT("Reset", msm_reset_info, msm_reset_get, msm_reset_put, 0), MSM_EXT("DualMic Switch", msm_dual_mic_info, msm_dual_mic_get, msm_dual_mic_put, 0), MSM_EXT("Device_Mute", msm_device_mute_info, msm_device_mute_get, msm_device_mute_put, 0), MSM_EXT("Sound Device Loopback", msm_loopback_info, msm_loopback_get, msm_loopback_put, 0), }; static int msm_new_mixer(struct snd_soc_codec *codec) { unsigned int idx; int err; int dev_cnt; strcpy(codec->card->snd_card->mixername, "MSM Mixer"); for (idx = 0; idx < ARRAY_SIZE(snd_msm_controls); idx++) { err = snd_ctl_add(codec->card->snd_card, snd_ctl_new1(&snd_msm_controls[idx], NULL)); if (err < 0) MM_ERR("ERR adding ctl\n"); } dev_cnt = msm_snddev_devcount(); for (idx = 0; idx < dev_cnt; idx++) { if (!snd_dev_ctl_index(idx)) { err = snd_ctl_add(codec->card->snd_card, snd_ctl_new1(&snd_dev_controls[idx], NULL)); if (err < 0) MM_ERR("ERR adding ctl\n"); } else return 0; } simple_control = ARRAY_SIZE(snd_msm_controls); device_index = simple_control + 1; return 0; } static int msm_soc_dai_init( struct snd_soc_pcm_runtime *rtd) { int ret = 0; struct snd_soc_codec *codec = rtd->codec; ret = msm_new_mixer(codec); if (ret < 0) MM_ERR("msm_soc: ALSA MSM Mixer Fail\n"); mutex_init(&the_locks.lock); mutex_init(&the_locks.write_lock); mutex_init(&the_locks.read_lock); spin_lock_init(&the_locks.read_dsp_lock); spin_lock_init(&the_locks.write_dsp_lock); spin_lock_init(&the_locks.mixer_lock); init_waitqueue_head(&the_locks.enable_wait); init_waitqueue_head(&the_locks.eos_wait); init_waitqueue_head(&the_locks.write_wait); init_waitqueue_head(&the_locks.read_wait); src_dev = DEVICE_IGNORE; dst_dev = DEVICE_IGNORE; return ret; } static struct snd_soc_dai_link msm_dai[] = { { .name = "MSM Primary I2S", .stream_name = "DSP 1", .cpu_dai_name = "msm-cpu-dai.0", .platform_name = "msm-dsp-audio.0", .codec_name = "msm-codec-dai.0", .codec_dai_name = "msm-codec-dai", .init = &msm_soc_dai_init, }, #ifdef CONFIG_SND_MVS_SOC { .name = "MSM Primary Voip", .stream_name = "MVS", .cpu_dai_name = "mvs-cpu-dai.0", .platform_name = "msm-mvs-audio.0", .codec_name = "mvs-codec-dai.0", .codec_dai_name = "mvs-codec-dai", }, #endif }; static struct snd_soc_card snd_soc_card_msm = { .name = "msm-audio", .dai_link = msm_dai, .num_links = ARRAY_SIZE(msm_dai), }; static int __init msm_audio_init(void) { int ret; msm_audio_snd_device = platform_device_alloc("soc-audio", -1); if (!msm_audio_snd_device) return -ENOMEM; platform_set_drvdata(msm_audio_snd_device, &snd_soc_card_msm); ret = platform_device_add(msm_audio_snd_device); if (ret) { platform_device_put(msm_audio_snd_device); return ret; } return ret; } static void __exit msm_audio_exit(void) { platform_device_unregister(msm_audio_snd_device); } module_init(msm_audio_init); module_exit(msm_audio_exit); MODULE_DESCRIPTION("PCM module"); MODULE_LICENSE("GPL v2");
gpl-2.0
DirtyUnicorns/android_kernel_motorola_msm8226
drivers/input/touchscreen/atmel_maxtouch.c
3776
60619
/* * Atmel maXTouch Touchscreen Controller Driver * * * Copyright (C) 2010 Atmel Corporation * Copyright (C) 2010 Ulf Samuelsson (ulf@atmel.com) * Copyright (C) 2009 Raphael Derosso Pereira <raphaelpereira@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * * Driver for Atmel maXTouch family of touch controllers. * */ #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/input.h> #include <linux/debugfs.h> #include <linux/cdev.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/pm_runtime.h> #include <linux/module.h> #include <asm/uaccess.h> #include <linux/atmel_maxtouch.h> #if defined(CONFIG_HAS_EARLYSUSPEND) #include <linux/earlysuspend.h> /* Early-suspend level */ #define MXT_SUSPEND_LEVEL 1 #endif #define DRIVER_VERSION "0.91a_mod" static int debug = DEBUG_INFO; static int comms = 0; module_param(debug, int, 0644); module_param(comms, int, 0644); MODULE_PARM_DESC(debug, "Activate debugging output"); MODULE_PARM_DESC(comms, "Select communications mode"); #define T7_DATA_SIZE 3 /* Device Info descriptor */ /* Parsed from maXTouch "Id information" inside device */ struct mxt_device_info { u8 family_id; u8 variant_id; u8 major; u8 minor; u8 build; u8 num_objs; u8 x_size; u8 y_size; char family_name[16]; /* Family name */ char variant_name[16]; /* Variant name */ u16 num_nodes; /* Number of sensor nodes */ }; /* object descriptor table, parsed from maXTouch "object table" */ struct mxt_object { u16 chip_addr; u8 type; u8 size; u8 instances; u8 num_report_ids; }; /* Mapping from report id to object type and instance */ struct report_id_map { u8 object; u8 instance; /* * This is the first report ID belonging to object. It enables us to * find out easily the touch number: each touch has different report * ID (which are assigned to touches in increasing order). By * subtracting the first report ID from current, we get the touch * number. */ u8 first_rid; }; /* Driver datastructure */ struct mxt_data { struct i2c_client *client; struct input_dev *input; char phys_name[32]; int irq; u16 last_read_addr; bool new_msgs; u8 *last_message; int valid_irq_counter; int invalid_irq_counter; int irq_counter; int message_counter; int read_fail_counter; int bytes_to_read; struct delayed_work dwork; u8 xpos_format; u8 ypos_format; u8 numtouch; struct mxt_device_info device_info; u32 info_block_crc; u32 configuration_crc; u16 report_id_count; struct report_id_map *rid_map; struct mxt_object *object_table; u16 msg_proc_addr; u8 message_size; u16 min_x_val; u16 min_y_val; u16 max_x_val; u16 max_y_val; int (*init_hw)(struct i2c_client *client); int (*exit_hw)(struct i2c_client *client); int (*power_on)(bool on); u8 (*valid_interrupt)(void); u8 (*read_chg)(void); /* debugfs variables */ struct dentry *debug_dir; int current_debug_datap; struct mutex debug_mutex; u16 *debug_data; /* Character device variables */ struct cdev cdev; struct cdev cdev_messages; /* 2nd Char dev for messages */ dev_t dev_num; struct class *mxt_class; u16 address_pointer; bool valid_ap; /* Message buffer & pointers */ char *messages; int msg_buffer_startp, msg_buffer_endp; /* Put only non-touch messages to buffer if this is set */ char nontouch_msg_only; struct mutex msg_mutex; #if defined(CONFIG_HAS_EARLYSUSPEND) struct early_suspend early_suspend; #endif u8 t7_data[T7_DATA_SIZE]; bool is_suspended; }; /*default value, enough to read versioning*/ #define CONFIG_DATA_SIZE 6 static u16 t38_size = CONFIG_DATA_SIZE; static int mxt_read_block(struct i2c_client *client, u16 addr, u16 length, u8 *value); static int mxt_write_byte(struct i2c_client *client, u16 addr, u8 value); static int mxt_write_block(struct i2c_client *client, u16 addr, u16 length, u8 *value); static u8 mxt_valid_interrupt_dummy(void) { return 1; } #define I2C_RETRY_COUNT 5 #define I2C_PAYLOAD_SIZE 254 /* Returns the start address of object in mXT memory. */ #define MXT_BASE_ADDR(object_type, mxt) \ get_object_address(object_type, 0, mxt->object_table, \ mxt->device_info.num_objs) /* Maps a report ID to an object type (object type number). */ #define REPORT_ID_TO_OBJECT(rid, mxt) \ (((rid) == 0xff) ? 0 : mxt->rid_map[rid].object) /* Maps a report ID to an object type (string). */ #define REPORT_ID_TO_OBJECT_NAME(rid, mxt) \ object_type_name[REPORT_ID_TO_OBJECT(rid, mxt)] /* Returns non-zero if given object is a touch object */ #define IS_TOUCH_OBJECT(object) \ ((object == MXT_TOUCH_MULTITOUCHSCREEN_T9) || \ (object == MXT_TOUCH_KEYARRAY_T15) || \ (object == MXT_TOUCH_PROXIMITY_T23) || \ (object == MXT_TOUCH_SINGLETOUCHSCREEN_T10) || \ (object == MXT_TOUCH_XSLIDER_T11) || \ (object == MXT_TOUCH_YSLIDER_T12) || \ (object == MXT_TOUCH_XWHEEL_T13) || \ (object == MXT_TOUCH_YWHEEL_T14) || \ (object == MXT_TOUCH_KEYSET_T31) || \ (object == MXT_TOUCH_XSLIDERSET_T32) ? 1 : 0) #define mxt_debug(level, ...) \ do { \ if (debug >= (level)) \ pr_debug(__VA_ARGS__); \ } while (0) /* * Check whether we have multi-touch enabled kernel; if not, report just the * first touch (on mXT224, the maximum is 10 simultaneous touches). * Because just the 1st one is reported, it might seem that the screen is not * responding to touch if the first touch is removed while the screen is being * touched by another finger, so beware. * */ #ifdef ABS_MT_TRACKING_ID static inline void report_mt(int touch_number, int size, int x, int y, struct mxt_data *mxt) { input_report_abs(mxt->input, ABS_MT_TRACKING_ID, touch_number); input_report_abs(mxt->input, ABS_MT_TOUCH_MAJOR, size); input_report_abs(mxt->input, ABS_MT_POSITION_X, x); input_report_abs(mxt->input, ABS_MT_POSITION_Y, y); input_mt_sync(mxt->input); } #else static inline void report_mt(int touch_number, int size, int x, int y, struct mxt_data *mxt) { if (touch_number == 0) { input_report_abs(mxt->input, ABS_TOOL_WIDTH, size); input_report_abs(mxt->input, ABS_X, x); input_report_abs(mxt->input, ABS_Y, y); } } #endif static inline void report_gesture(int data, struct mxt_data *mxt) { input_event(mxt->input, EV_MSC, MSC_GESTURE, data); } static const u8 *object_type_name[] = { [0] = "Reserved", [5] = "GEN_MESSAGEPROCESSOR_T5", [6] = "GEN_COMMANDPROCESSOR_T6", [7] = "GEN_POWERCONFIG_T7", [8] = "GEN_ACQUIRECONFIG_T8", [9] = "TOUCH_MULTITOUCHSCREEN_T9", [15] = "TOUCH_KEYARRAY_T15", [17] = "SPT_COMMSCONFIG_T18", [19] = "SPT_GPIOPWM_T19", [20] = "PROCI_GRIPFACESUPPRESSION_T20", [22] = "PROCG_NOISESUPPRESSION_T22", [23] = "TOUCH_PROXIMITY_T23", [24] = "PROCI_ONETOUCHGESTUREPROCESSOR_T24", [25] = "SPT_SELFTEST_T25", [27] = "PROCI_TWOTOUCHGESTUREPROCESSOR_T27", [28] = "SPT_CTECONFIG_T28", [37] = "DEBUG_DIAGNOSTICS_T37", [38] = "SPT_USER_DATA_T38", [40] = "PROCI_GRIPSUPPRESSION_T40", [41] = "PROCI_PALMSUPPRESSION_T41", [42] = "PROCI_FACESUPPRESSION_T42", [43] = "SPT_DIGITIZER_T43", [44] = "SPT_MESSAGECOUNT_T44", }; static u16 get_object_address(uint8_t object_type, uint8_t instance, struct mxt_object *object_table, int max_objs); int mxt_write_ap(struct mxt_data *mxt, u16 ap); static int mxt_read_block_wo_addr(struct i2c_client *client, u16 length, u8 *value); ssize_t debug_data_read(struct mxt_data *mxt, char *buf, size_t count, loff_t *ppos, u8 debug_command){ int i; u16 *data; u16 diagnostics_reg; int offset = 0; int size; int read_size; int error; char *buf_start; u16 debug_data_addr; u16 page_address; u8 page; u8 debug_command_reg; data = mxt->debug_data; if (data == NULL) return -EIO; /* If first read after open, read all data to buffer. */ if (mxt->current_debug_datap == 0){ diagnostics_reg = MXT_BASE_ADDR(MXT_GEN_COMMANDPROCESSOR_T6, mxt) + MXT_ADR_T6_DIAGNOSTIC; if (count > (mxt->device_info.num_nodes * 2)) count = mxt->device_info.num_nodes; debug_data_addr = MXT_BASE_ADDR(MXT_DEBUG_DIAGNOSTIC_T37, mxt)+ MXT_ADR_T37_DATA; page_address = MXT_BASE_ADDR(MXT_DEBUG_DIAGNOSTIC_T37, mxt) + MXT_ADR_T37_PAGE; error = mxt_read_block(mxt->client, page_address, 1, &page); if (error < 0) return error; mxt_debug(DEBUG_TRACE, "debug data page = %d\n", page); while (page != 0) { error = mxt_write_byte(mxt->client, diagnostics_reg, MXT_CMD_T6_PAGE_DOWN); if (error < 0) return error; /* Wait for command to be handled; when it has, the register will be cleared. */ debug_command_reg = 1; while (debug_command_reg != 0) { error = mxt_read_block(mxt->client, diagnostics_reg, 1, &debug_command_reg); if (error < 0) return error; mxt_debug(DEBUG_TRACE, "Waiting for debug diag command " "to propagate...\n"); } error = mxt_read_block(mxt->client, page_address, 1, &page); if (error < 0) return error; mxt_debug(DEBUG_TRACE, "debug data page = %d\n", page); } /* * Lock mutex to prevent writing some unwanted data to debug * command register. User can still write through the char * device interface though. TODO: fix? */ mutex_lock(&mxt->debug_mutex); /* Configure Debug Diagnostics object to show deltas/refs */ error = mxt_write_byte(mxt->client, diagnostics_reg, debug_command); /* Wait for command to be handled; when it has, the * register will be cleared. */ debug_command_reg = 1; while (debug_command_reg != 0) { error = mxt_read_block(mxt->client, diagnostics_reg, 1, &debug_command_reg); if (error < 0) return error; mxt_debug(DEBUG_TRACE, "Waiting for debug diag command " "to propagate...\n"); } if (error < 0) { printk (KERN_WARNING "Error writing to maXTouch device!\n"); return error; } size = mxt->device_info.num_nodes * sizeof(u16); while (size > 0) { read_size = size > 128 ? 128 : size; mxt_debug(DEBUG_TRACE, "Debug data read loop, reading %d bytes...\n", read_size); error = mxt_read_block(mxt->client, debug_data_addr, read_size, (u8 *) &data[offset]); if (error < 0) { printk(KERN_WARNING "Error reading debug data\n"); goto error; } offset += read_size/2; size -= read_size; /* Select next page */ error = mxt_write_byte(mxt->client, diagnostics_reg, MXT_CMD_T6_PAGE_UP); if (error < 0) { printk(KERN_WARNING "Error writing to maXTouch device!\n"); goto error; } } mutex_unlock(&mxt->debug_mutex); } buf_start = buf; i = mxt->current_debug_datap; while (((buf- buf_start) < (count - 6)) && (i < mxt->device_info.num_nodes)){ mxt->current_debug_datap++; if (debug_command == MXT_CMD_T6_REFERENCES_MODE) buf += sprintf(buf, "%d: %5d\n", i, (u16) le16_to_cpu(data[i])); else if (debug_command == MXT_CMD_T6_DELTAS_MODE) buf += sprintf(buf, "%d: %5d\n", i, (s16) le16_to_cpu(data[i])); i++; } return (buf - buf_start); error: mutex_unlock(&mxt->debug_mutex); return error; } ssize_t deltas_read(struct file *file, char *buf, size_t count, loff_t *ppos) { return debug_data_read(file->private_data, buf, count, ppos, MXT_CMD_T6_DELTAS_MODE); } ssize_t refs_read(struct file *file, char *buf, size_t count, loff_t *ppos) { return debug_data_read(file->private_data, buf, count, ppos, MXT_CMD_T6_REFERENCES_MODE); } int debug_data_open(struct inode *inode, struct file *file) { struct mxt_data *mxt; int i; mxt = inode->i_private; if (mxt == NULL) return -EIO; mxt->current_debug_datap = 0; mxt->debug_data = kmalloc(mxt->device_info.num_nodes * sizeof(u16), GFP_KERNEL); if (mxt->debug_data == NULL) return -ENOMEM; for (i = 0; i < mxt->device_info.num_nodes; i++) mxt->debug_data[i] = 7777; file->private_data = mxt; return 0; } int debug_data_release(struct inode *inode, struct file *file) { struct mxt_data *mxt; mxt = file->private_data; kfree(mxt->debug_data); return 0; } static struct file_operations delta_fops = { .owner = THIS_MODULE, .open = debug_data_open, .release = debug_data_release, .read = deltas_read, }; static struct file_operations refs_fops = { .owner = THIS_MODULE, .open = debug_data_open, .release = debug_data_release, .read = refs_read, }; int mxt_memory_open(struct inode *inode, struct file *file) { struct mxt_data *mxt; mxt = container_of(inode->i_cdev, struct mxt_data, cdev); if (mxt == NULL) return -EIO; file->private_data = mxt; return 0; } int mxt_message_open(struct inode *inode, struct file *file) { struct mxt_data *mxt; mxt = container_of(inode->i_cdev, struct mxt_data, cdev_messages); if (mxt == NULL) return -EIO; file->private_data = mxt; return 0; } ssize_t mxt_memory_read(struct file *file, char *buf, size_t count, loff_t *ppos) { int i; struct mxt_data *mxt; mxt = file->private_data; if (mxt->valid_ap){ mxt_debug(DEBUG_TRACE, "Reading %d bytes from current ap\n", (int) count); i = mxt_read_block_wo_addr(mxt->client, count, (u8 *) buf); } else { mxt_debug(DEBUG_TRACE, "Address pointer changed since set;" "writing AP (%d) before reading %d bytes", mxt->address_pointer, (int) count); i = mxt_read_block(mxt->client, mxt->address_pointer, count, buf); } return i; } ssize_t mxt_memory_write(struct file *file, const char *buf, size_t count, loff_t *ppos) { int i; int whole_blocks; int last_block_size; struct mxt_data *mxt; u16 address; mxt = file->private_data; address = mxt->address_pointer; mxt_debug(DEBUG_TRACE, "mxt_memory_write entered\n"); whole_blocks = count / I2C_PAYLOAD_SIZE; last_block_size = count % I2C_PAYLOAD_SIZE; for (i = 0; i < whole_blocks; i++) { mxt_debug(DEBUG_TRACE, "About to write to %d...", address); mxt_write_block(mxt->client, address, I2C_PAYLOAD_SIZE, (u8 *) buf); address += I2C_PAYLOAD_SIZE; buf += I2C_PAYLOAD_SIZE; } mxt_write_block(mxt->client, address, last_block_size, (u8 *) buf); return count; } static long mxt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int retval; struct mxt_data *mxt; retval = 0; mxt = file->private_data; switch (cmd) { case MXT_SET_ADDRESS_IOCTL: retval = mxt_write_ap(mxt, (u16) arg); if (retval >= 0) { mxt->address_pointer = (u16) arg; mxt->valid_ap = 1; } break; case MXT_RESET_IOCTL: retval = mxt_write_byte(mxt->client, MXT_BASE_ADDR(MXT_GEN_COMMANDPROCESSOR_T6, mxt) + MXT_ADR_T6_RESET, 1); break; case MXT_CALIBRATE_IOCTL: retval = mxt_write_byte(mxt->client, MXT_BASE_ADDR(MXT_GEN_COMMANDPROCESSOR_T6, mxt) + MXT_ADR_T6_CALIBRATE, 1); break; case MXT_BACKUP_IOCTL: retval = mxt_write_byte(mxt->client, MXT_BASE_ADDR(MXT_GEN_COMMANDPROCESSOR_T6, mxt) + MXT_ADR_T6_BACKUPNV, MXT_CMD_T6_BACKUP); break; case MXT_NONTOUCH_MSG_IOCTL: mxt->nontouch_msg_only = 1; break; case MXT_ALL_MSG_IOCTL: mxt->nontouch_msg_only = 0; break; default: return -EIO; } return retval; } /* * Copies messages from buffer to user space. * * NOTE: if less than (mxt->message_size * 5 + 1) bytes requested, * this will return 0! * */ ssize_t mxt_message_read(struct file *file, char *buf, size_t count, loff_t *ppos) { int i; struct mxt_data *mxt; char *buf_start; mxt = file->private_data; if (mxt == NULL) return -EIO; buf_start = buf; mutex_lock(&mxt->msg_mutex); /* Copy messages until buffer empty, or 'count' bytes written */ while ((mxt->msg_buffer_startp != mxt->msg_buffer_endp) && ((buf - buf_start) < (count - (5 * mxt->message_size) - 1))){ for (i = 0; i < mxt->message_size; i++){ buf += sprintf(buf, "[%2X] ", *(mxt->messages + mxt->msg_buffer_endp * mxt->message_size + i)); } buf += sprintf(buf, "\n"); if (mxt->msg_buffer_endp < MXT_MESSAGE_BUFFER_SIZE) mxt->msg_buffer_endp++; else mxt->msg_buffer_endp = 0; } mutex_unlock(&mxt->msg_mutex); return (buf - buf_start); } static struct file_operations mxt_message_fops = { .owner = THIS_MODULE, .open = mxt_message_open, .read = mxt_message_read, }; static struct file_operations mxt_memory_fops = { .owner = THIS_MODULE, .open = mxt_memory_open, .read = mxt_memory_read, .write = mxt_memory_write, .unlocked_ioctl = mxt_ioctl, }; /* Writes the address pointer (to set up following reads). */ int mxt_write_ap(struct mxt_data *mxt, u16 ap) { struct i2c_client *client; __le16 le_ap = cpu_to_le16(ap); client = mxt->client; if (mxt != NULL) mxt->last_read_addr = -1; if (i2c_master_send(client, (u8 *) &le_ap, 2) == 2) { mxt_debug(DEBUG_TRACE, "Address pointer set to %d\n", ap); return 0; } else { mxt_debug(DEBUG_INFO, "Error writing address pointer!\n"); return -EIO; } } /* Calculates the 24-bit CRC sum. */ static u32 CRC_24(u32 crc, u8 byte1, u8 byte2) { static const u32 crcpoly = 0x80001B; u32 result; u32 data_word; data_word = ((((u16) byte2) << 8u) | byte1); result = ((crc << 1u) ^ data_word); if (result & 0x1000000) result ^= crcpoly; return result; } /* Returns object address in mXT chip, or zero if object is not found */ static u16 get_object_address(uint8_t object_type, uint8_t instance, struct mxt_object *object_table, int max_objs) { uint8_t object_table_index = 0; uint8_t address_found = 0; uint16_t address = 0; struct mxt_object *obj; while ((object_table_index < max_objs) && !address_found) { obj = &object_table[object_table_index]; if (obj->type == object_type) { address_found = 1; /* Are there enough instances defined in the FW? */ if (obj->instances >= instance) { address = obj->chip_addr + (obj->size + 1) * instance; } else { return 0; } } object_table_index++; } return address; } /* * Reads a block of bytes from given address from mXT chip. If we are * reading from message window, and previous read was from message window, * there's no need to write the address pointer: the mXT chip will * automatically set the address pointer back to message window start. */ static int mxt_read_block(struct i2c_client *client, u16 addr, u16 length, u8 *value) { struct i2c_adapter *adapter = client->adapter; struct i2c_msg msg[2]; __le16 le_addr; struct mxt_data *mxt; mxt = i2c_get_clientdata(client); if (mxt != NULL) { if ((mxt->last_read_addr == addr) && (addr == mxt->msg_proc_addr)) { if (i2c_master_recv(client, value, length) == length) return length; else return -EIO; } else { mxt->last_read_addr = addr; } } mxt_debug(DEBUG_TRACE, "Writing address pointer & reading %d bytes " "in on i2c transaction...\n", length); le_addr = cpu_to_le16(addr); msg[0].addr = client->addr; msg[0].flags = 0x00; msg[0].len = 2; msg[0].buf = (u8 *) &le_addr; msg[1].addr = client->addr; msg[1].flags = I2C_M_RD; msg[1].len = length; msg[1].buf = (u8 *) value; if (i2c_transfer(adapter, msg, 2) == 2) return length; else return -EIO; } /* Reads a block of bytes from current address from mXT chip. */ static int mxt_read_block_wo_addr(struct i2c_client *client, u16 length, u8 *value) { if (i2c_master_recv(client, value, length) == length) { mxt_debug(DEBUG_TRACE, "I2C block read ok\n"); return length; } else { mxt_debug(DEBUG_INFO, "I2C block read failed\n"); return -EIO; } } /* Writes one byte to given address in mXT chip. */ static int mxt_write_byte(struct i2c_client *client, u16 addr, u8 value) { struct { __le16 le_addr; u8 data; } i2c_byte_transfer; struct mxt_data *mxt; mxt = i2c_get_clientdata(client); if (mxt != NULL) mxt->last_read_addr = -1; i2c_byte_transfer.le_addr = cpu_to_le16(addr); i2c_byte_transfer.data = value; if (i2c_master_send(client, (u8 *) &i2c_byte_transfer, 3) == 3) return 0; else return -EIO; } /* Writes a block of bytes (max 256) to given address in mXT chip. */ static int mxt_write_block(struct i2c_client *client, u16 addr, u16 length, u8 *value) { int i; struct { __le16 le_addr; u8 data[256]; } i2c_block_transfer; struct mxt_data *mxt; mxt_debug(DEBUG_TRACE, "Writing %d bytes to %d...", length, addr); if (length > 256) return -EINVAL; mxt = i2c_get_clientdata(client); if (mxt != NULL) mxt->last_read_addr = -1; for (i = 0; i < length; i++) i2c_block_transfer.data[i] = *value++; i2c_block_transfer.le_addr = cpu_to_le16(addr); i = i2c_master_send(client, (u8 *) &i2c_block_transfer, length + 2); if (i == (length + 2)) return length; else return -EIO; } /* Calculates the CRC value for mXT infoblock. */ int calculate_infoblock_crc(u32 *crc_result, u8 *data, int crc_area_size) { u32 crc = 0; int i; for (i = 0; i < (crc_area_size - 1); i = i + 2) crc = CRC_24(crc, *(data + i), *(data + i + 1)); /* If uneven size, pad with zero */ if (crc_area_size & 0x0001) crc = CRC_24(crc, *(data + i), 0); /* Return only 24 bits of CRC. */ *crc_result = (crc & 0x00FFFFFF); return 0; } /* Processes a touchscreen message. */ void process_T9_message(u8 *message, struct mxt_data *mxt, int last_touch) { struct input_dev *input; u8 status; u16 xpos = 0xFFFF; u16 ypos = 0xFFFF; u8 touch_size = 255; u8 touch_number; u8 amplitude; u8 report_id; static int stored_size[10]; static int stored_x[10]; static int stored_y[10]; int i; int active_touches = 0; /* * If the 'last_touch' flag is set, we have received all the touch * messages * there are available in this cycle, so send the events for touches * that are * active. */ if (last_touch){ /* TODO: For compatibility with single-touch systems, send ABS_X & * ABS_Y */ /* if (stored_size[0]){ input_report_abs(mxt->input, ABS_X, stored_x[0]); input_report_abs(mxt->input, ABS_Y, stored_y[0]); }*/ for (i = 0; i < 10; i++){ if (stored_size[i]){ active_touches++; input_report_abs(mxt->input, ABS_MT_TRACKING_ID, i); input_report_abs(mxt->input, ABS_MT_TOUCH_MAJOR, stored_size[i]); input_report_abs(mxt->input, ABS_MT_POSITION_X, stored_x[i]); input_report_abs(mxt->input, ABS_MT_POSITION_Y, stored_y[i]); input_mt_sync(mxt->input); } } input_report_key(mxt->input, BTN_TOUCH, !!active_touches); if (active_touches == 0) input_mt_sync(mxt->input); input_sync(mxt->input); }else{ input = mxt->input; status = message[MXT_MSG_T9_STATUS]; report_id = message[0]; if (status & MXT_MSGB_T9_SUPPRESS) { /* Touch has been suppressed by grip/face */ /* detection */ mxt_debug(DEBUG_TRACE, "SUPRESS"); } else { /* Put together the 10-/12-bit coordinate values. */ xpos = message[MXT_MSG_T9_XPOSMSB] * 16 + ((message[MXT_MSG_T9_XYPOSLSB] >> 4) & 0xF); ypos = message[MXT_MSG_T9_YPOSMSB] * 16 + ((message[MXT_MSG_T9_XYPOSLSB] >> 0) & 0xF); if (mxt->max_x_val < 1024) xpos >>= 2; if (mxt->max_y_val < 1024) ypos >>= 2; touch_number = message[MXT_MSG_REPORTID] - mxt->rid_map[report_id].first_rid; stored_x[touch_number] = xpos; stored_y[touch_number] = ypos; if (status & MXT_MSGB_T9_DETECT) { /* * mXT224 reports the number of touched nodes, * so the exact value for touch ellipse major * axis length in nodes would be 2*sqrt(touch_size/pi) * (assuming round touch shape), which would then need * to be scaled using information about how many sensor * lines we do have. So this is very much simplified, * but sufficient for most if not all apps? */ touch_size = message[MXT_MSG_T9_TCHAREA]; touch_size = touch_size >> 2; if (!touch_size) touch_size = 1; /* * report_mt(touch_number, touch_size, xpos, ypos, mxt); */ stored_size[touch_number] = touch_size; if (status & MXT_MSGB_T9_AMP) /* Amplitude of touch has changed */ amplitude = message[MXT_MSG_T9_TCHAMPLITUDE]; } if (status & MXT_MSGB_T9_RELEASE) { /* The previously reported touch has been removed.*/ /* report_mt(touch_number, 0, xpos, ypos, mxt); */ stored_size[touch_number] = 0; } /* input_sync(input); */ } if (status & MXT_MSGB_T9_SUPPRESS) { mxt_debug(DEBUG_TRACE, "SUPRESS"); } else { if (status & MXT_MSGB_T9_DETECT) { mxt_debug(DEBUG_TRACE, "DETECT:%s%s%s%s", ((status & MXT_MSGB_T9_PRESS) ? " PRESS" : ""), ((status & MXT_MSGB_T9_MOVE) ? " MOVE" : ""), ((status & MXT_MSGB_T9_AMP) ? " AMP" : ""), ((status & MXT_MSGB_T9_VECTOR) ? " VECT" : "")); } else if (status & MXT_MSGB_T9_RELEASE) { mxt_debug(DEBUG_TRACE, "RELEASE"); } } mxt_debug(DEBUG_TRACE, "X=%d, Y=%d, TOUCHSIZE=%d", xpos, ypos, touch_size); } return; } int process_message(u8 *message, u8 object, struct mxt_data *mxt) { struct i2c_client *client; u8 status; u16 xpos = 0xFFFF; u16 ypos = 0xFFFF; u8 event; u8 direction; u16 distance; u8 length; u8 report_id; static u8 error_cond = 0; client = mxt->client; length = mxt->message_size; report_id = message[0]; if ((mxt->nontouch_msg_only == 0) || (!IS_TOUCH_OBJECT(object))){ mutex_lock(&mxt->msg_mutex); /* Copy the message to buffer */ if (mxt->msg_buffer_startp < MXT_MESSAGE_BUFFER_SIZE) { mxt->msg_buffer_startp++; } else { mxt->msg_buffer_startp = 0; } if (mxt->msg_buffer_startp == mxt->msg_buffer_endp) { mxt_debug(DEBUG_TRACE, "Message buf full, discarding last entry.\n"); if (mxt->msg_buffer_endp < MXT_MESSAGE_BUFFER_SIZE) { mxt->msg_buffer_endp++; } else { mxt->msg_buffer_endp = 0; } } memcpy((mxt->messages + mxt->msg_buffer_startp * length), message, length); mutex_unlock(&mxt->msg_mutex); } switch (object) { case MXT_GEN_COMMANDPROCESSOR_T6: status = message[1]; if (status & MXT_MSGB_T6_COMSERR) { if ((!error_cond) & MXT_MSGB_T6_COMSERR){ dev_err(&client->dev, "maXTouch checksum error\n"); error_cond |= MXT_MSGB_T6_COMSERR; } } if (status & MXT_MSGB_T6_CFGERR) { /* * Configuration error. A proper configuration * needs to be written to chip and backed up. */ if ((!error_cond) & MXT_MSGB_T6_CFGERR){ dev_err(&client->dev, "maXTouch configuration error\n"); error_cond |= MXT_MSGB_T6_CFGERR; } } if (status & MXT_MSGB_T6_CAL) { /* Calibration in action, no need to react */ dev_dbg(&client->dev, "maXTouch calibration in progress\n"); } if (status & MXT_MSGB_T6_SIGERR) { /* * Signal acquisition error, something is seriously * wrong, not much we can in the driver to correct * this */ if ((!error_cond) & MXT_MSGB_T6_SIGERR){ dev_err(&client->dev, "maXTouch acquisition error\n"); error_cond |= MXT_MSGB_T6_SIGERR; } } if (status & MXT_MSGB_T6_OFL) { /* * Cycle overflow, the acquisition interval is too * short. */ dev_err(&client->dev, "maXTouch cycle overflow\n"); } if (status & MXT_MSGB_T6_RESET) { /* Chip has reseted, no need to react. */ dev_dbg(&client->dev, "maXTouch chip reset\n"); } if (status == 0) { /* Chip status back to normal. */ dev_dbg(&client->dev, "maXTouch status normal\n"); error_cond = 0; } break; case MXT_TOUCH_MULTITOUCHSCREEN_T9: process_T9_message(message, mxt, 0); break; case MXT_SPT_GPIOPWM_T19: if (debug >= DEBUG_TRACE) dev_info(&client->dev, "Receiving GPIO message\n"); break; case MXT_PROCI_GRIPFACESUPPRESSION_T20: if (debug >= DEBUG_TRACE) dev_info(&client->dev, "Receiving face suppression msg\n"); break; case MXT_PROCG_NOISESUPPRESSION_T22: if (debug >= DEBUG_TRACE) dev_info(&client->dev, "Receiving noise suppression msg\n"); status = message[MXT_MSG_T22_STATUS]; if (status & MXT_MSGB_T22_FHCHG) { if (debug >= DEBUG_TRACE) dev_info(&client->dev, "maXTouch: Freq changed\n"); } if (status & MXT_MSGB_T22_GCAFERR) { if (debug >= DEBUG_TRACE) dev_info(&client->dev, "maXTouch: High noise " "level\n"); } if (status & MXT_MSGB_T22_FHERR) { if (debug >= DEBUG_TRACE) dev_info(&client->dev, "maXTouch: Freq changed - " "Noise level too high\n"); } break; case MXT_PROCI_ONETOUCHGESTUREPROCESSOR_T24: if (debug >= DEBUG_TRACE) dev_info(&client->dev, "Receiving one-touch gesture msg\n"); event = message[MXT_MSG_T24_STATUS] & 0x0F; xpos = message[MXT_MSG_T24_XPOSMSB] * 16 + ((message[MXT_MSG_T24_XYPOSLSB] >> 4) & 0x0F); ypos = message[MXT_MSG_T24_YPOSMSB] * 16 + ((message[MXT_MSG_T24_XYPOSLSB] >> 0) & 0x0F); if (mxt->max_x_val < 1024) xpos >>= 2; if (mxt->max_y_val < 1024) ypos >>= 2; direction = message[MXT_MSG_T24_DIR]; distance = message[MXT_MSG_T24_DIST] + (message[MXT_MSG_T24_DIST + 1] << 16); report_gesture((event << 24) | (direction << 16) | distance, mxt); report_gesture((xpos << 16) | ypos, mxt); break; case MXT_SPT_SELFTEST_T25: if (debug >= DEBUG_TRACE) dev_info(&client->dev, "Receiving Self-Test msg\n"); if (message[MXT_MSG_T25_STATUS] == MXT_MSGR_T25_OK) { if (debug >= DEBUG_TRACE) dev_info(&client->dev, "maXTouch: Self-Test OK\n"); } else { dev_err(&client->dev, "maXTouch: Self-Test Failed [%02x]:" "{%02x,%02x,%02x,%02x,%02x}\n", message[MXT_MSG_T25_STATUS], message[MXT_MSG_T25_STATUS + 0], message[MXT_MSG_T25_STATUS + 1], message[MXT_MSG_T25_STATUS + 2], message[MXT_MSG_T25_STATUS + 3], message[MXT_MSG_T25_STATUS + 4] ); } break; case MXT_PROCI_TWOTOUCHGESTUREPROCESSOR_T27: if (debug >= DEBUG_TRACE) dev_info(&client->dev, "Receiving 2-touch gesture message\n"); event = message[MXT_MSG_T27_STATUS] & 0xF0; xpos = message[MXT_MSG_T27_XPOSMSB] * 16 + ((message[MXT_MSG_T27_XYPOSLSB] >> 4) & 0x0F); ypos = message[MXT_MSG_T27_YPOSMSB] * 16 + ((message[MXT_MSG_T27_XYPOSLSB] >> 0) & 0x0F); if (mxt->max_x_val < 1024) xpos >>= 2; if (mxt->max_y_val < 1024) ypos >>= 2; direction = message[MXT_MSG_T27_ANGLE]; distance = message[MXT_MSG_T27_SEPARATION] + (message[MXT_MSG_T27_SEPARATION + 1] << 16); report_gesture((event << 24) | (direction << 16) | distance, mxt); report_gesture((xpos << 16) | ypos, mxt); break; case MXT_SPT_CTECONFIG_T28: if (debug >= DEBUG_TRACE) dev_info(&client->dev, "Receiving CTE message...\n"); status = message[MXT_MSG_T28_STATUS]; if (status & MXT_MSGB_T28_CHKERR) dev_err(&client->dev, "maXTouch: Power-Up CRC failure\n"); break; default: if (debug >= DEBUG_TRACE) dev_info(&client->dev, "maXTouch: Unknown message!\n"); break; } return 0; } /* * Processes messages when the interrupt line (CHG) is asserted. Keeps * reading messages until a message with report ID 0xFF is received, * which indicates that there is no more new messages. * */ static void mxt_worker(struct work_struct *work) { struct mxt_data *mxt; struct i2c_client *client; u8 *message; u16 message_length; u16 message_addr; u8 report_id; u8 object; int error; int i; char *message_string; char *message_start; message = NULL; mxt = container_of(work, struct mxt_data, dwork.work); client = mxt->client; message_addr = mxt->msg_proc_addr; message_length = mxt->message_size; if (message_length < 256) { message = kmalloc(message_length, GFP_KERNEL); if (message == NULL) { dev_err(&client->dev, "Error allocating memory\n"); goto fail_worker; } } else { dev_err(&client->dev, "Message length larger than 256 bytes not supported\n"); goto fail_worker; } mxt_debug(DEBUG_TRACE, "maXTouch worker active:\n"); do { /* Read next message, reread on failure. */ /* TODO: message length, CRC included? */ mxt->message_counter++; for (i = 1; i < I2C_RETRY_COUNT; i++) { error = mxt_read_block(client, message_addr, message_length - 1, message); if (error >= 0) break; mxt->read_fail_counter++; dev_err(&client->dev, "Failure reading maxTouch device\n"); } if (error < 0) { kfree(message); goto fail_worker; } if (mxt->address_pointer != message_addr) mxt->valid_ap = 0; report_id = message[0]; if (debug >= DEBUG_RAW) { mxt_debug(DEBUG_RAW, "%s message [msg count: %08x]:", REPORT_ID_TO_OBJECT_NAME(report_id, mxt), mxt->message_counter ); /* 5 characters per one byte */ message_string = kmalloc(message_length * 5, GFP_KERNEL); if (message_string == NULL) { dev_err(&client->dev, "Error allocating memory\n"); kfree(message); goto fail_worker; } message_start = message_string; for (i = 0; i < message_length; i++) { message_string += sprintf(message_string, "0x%02X ", message[i]); } mxt_debug(DEBUG_RAW, "%s", message_start); kfree(message_start); } if ((report_id != MXT_END_OF_MESSAGES) && (report_id != 0)) { memcpy(mxt->last_message, message, message_length); mxt->new_msgs = 1; smp_wmb(); /* Get type of object and process the message */ object = mxt->rid_map[report_id].object; process_message(message, object, mxt); } mxt_debug(DEBUG_TRACE, "chgline: %d\n", mxt->read_chg()); } while (comms ? (mxt->read_chg() == 0) : ((report_id != MXT_END_OF_MESSAGES) && (report_id != 0))); /* All messages processed, send the events) */ process_T9_message(NULL, mxt, 1); kfree(message); fail_worker: /* Make sure we just didn't miss a interrupt. */ if (mxt->read_chg() == 0){ schedule_delayed_work(&mxt->dwork, 0); } else enable_irq(mxt->irq); } /* * The maXTouch device will signal the host about a new message by asserting * the CHG line. This ISR schedules a worker routine to read the message when * that happens. */ static irqreturn_t mxt_irq_handler(int irq, void *_mxt) { struct mxt_data *mxt = _mxt; mxt->irq_counter++; if (mxt->valid_interrupt()) { /* Send the signal only if falling edge generated the irq. */ disable_irq_nosync(mxt->irq); schedule_delayed_work(&mxt->dwork, 0); mxt->valid_irq_counter++; } else { mxt->invalid_irq_counter++; return IRQ_NONE; } return IRQ_HANDLED; } /******************************************************************************/ /* Initialization of driver */ /******************************************************************************/ static int __devinit mxt_identify(struct i2c_client *client, struct mxt_data *mxt, u8 *id_block_data) { u8 buf[MXT_ID_BLOCK_SIZE]; int error; int identified; identified = 0; /* Read Device info to check if chip is valid */ error = mxt_read_block(client, MXT_ADDR_INFO_BLOCK, MXT_ID_BLOCK_SIZE, (u8 *) buf); if (error < 0) { mxt->read_fail_counter++; dev_err(&client->dev, "Failure accessing maXTouch device\n"); return -EIO; } memcpy(id_block_data, buf, MXT_ID_BLOCK_SIZE); mxt->device_info.family_id = buf[0]; mxt->device_info.variant_id = buf[1]; mxt->device_info.major = ((buf[2] >> 4) & 0x0F); mxt->device_info.minor = (buf[2] & 0x0F); mxt->device_info.build = buf[3]; mxt->device_info.x_size = buf[4]; mxt->device_info.y_size = buf[5]; mxt->device_info.num_objs = buf[6]; mxt->device_info.num_nodes = mxt->device_info.x_size * mxt->device_info.y_size; /* * Check Family & Variant Info; warn if not recognized but * still continue. */ /* MXT224 */ if (mxt->device_info.family_id == MXT224_FAMILYID) { strcpy(mxt->device_info.family_name, "mXT224"); if (mxt->device_info.variant_id == MXT224_CAL_VARIANTID) { strcpy(mxt->device_info.variant_name, "Calibrated"); } else if (mxt->device_info.variant_id == MXT224_UNCAL_VARIANTID) { strcpy(mxt->device_info.variant_name, "Uncalibrated"); } else { dev_err(&client->dev, "Warning: maXTouch Variant ID [%d] not " "supported\n", mxt->device_info.variant_id); strcpy(mxt->device_info.variant_name, "UNKNOWN"); /* identified = -ENXIO; */ } /* MXT1386 */ } else if (mxt->device_info.family_id == MXT1386_FAMILYID) { strcpy(mxt->device_info.family_name, "mXT1386"); if (mxt->device_info.variant_id == MXT1386_CAL_VARIANTID) { strcpy(mxt->device_info.variant_name, "Calibrated"); } else { dev_err(&client->dev, "Warning: maXTouch Variant ID [%d] not " "supported\n", mxt->device_info.variant_id); strcpy(mxt->device_info.variant_name, "UNKNOWN"); /* identified = -ENXIO; */ } /* Unknown family ID! */ } else { dev_err(&client->dev, "Warning: maXTouch Family ID [%d] not supported\n", mxt->device_info.family_id); strcpy(mxt->device_info.family_name, "UNKNOWN"); strcpy(mxt->device_info.variant_name, "UNKNOWN"); /* identified = -ENXIO; */ } dev_info( &client->dev, "Atmel maXTouch (Family %s (%X), Variant %s (%X)) Firmware " "version [%d.%d] Build %d\n", mxt->device_info.family_name, mxt->device_info.family_id, mxt->device_info.variant_name, mxt->device_info.variant_id, mxt->device_info.major, mxt->device_info.minor, mxt->device_info.build ); dev_dbg( &client->dev, "Atmel maXTouch Configuration " "[X: %d] x [Y: %d]\n", mxt->device_info.x_size, mxt->device_info.y_size ); return identified; } /* * Reads the object table from maXTouch chip to get object data like * address, size, report id. For Info Block CRC calculation, already read * id data is passed to this function too (Info Block consists of the ID * block and object table). * */ static int __devinit mxt_read_object_table(struct i2c_client *client, struct mxt_data *mxt, u8 *raw_id_data) { u16 report_id_count; u8 buf[MXT_OBJECT_TABLE_ELEMENT_SIZE]; u8 *raw_ib_data; u8 object_type; u16 object_address; u16 object_size; u8 object_instances; u8 object_report_ids; u16 object_info_address; u32 crc; u32 calculated_crc; int i; int error; u8 object_instance; u8 object_report_id; u8 report_id; int first_report_id; int ib_pointer; struct mxt_object *object_table; mxt_debug(DEBUG_TRACE, "maXTouch driver reading configuration\n"); object_table = kzalloc(sizeof(struct mxt_object) * mxt->device_info.num_objs, GFP_KERNEL); if (object_table == NULL) { printk(KERN_WARNING "maXTouch: Memory allocation failed!\n"); error = -ENOMEM; goto err_object_table_alloc; } raw_ib_data = kmalloc(MXT_OBJECT_TABLE_ELEMENT_SIZE * mxt->device_info.num_objs + MXT_ID_BLOCK_SIZE, GFP_KERNEL); if (raw_ib_data == NULL) { printk(KERN_WARNING "maXTouch: Memory allocation failed!\n"); error = -ENOMEM; goto err_ib_alloc; } /* Copy the ID data for CRC calculation. */ memcpy(raw_ib_data, raw_id_data, MXT_ID_BLOCK_SIZE); ib_pointer = MXT_ID_BLOCK_SIZE; mxt->object_table = object_table; mxt_debug(DEBUG_TRACE, "maXTouch driver Memory allocated\n"); object_info_address = MXT_ADDR_OBJECT_TABLE; report_id_count = 0; for (i = 0; i < mxt->device_info.num_objs; i++) { mxt_debug(DEBUG_TRACE, "Reading maXTouch at [0x%04x]: ", object_info_address); error = mxt_read_block(client, object_info_address, MXT_OBJECT_TABLE_ELEMENT_SIZE, buf); if (error < 0) { mxt->read_fail_counter++; dev_err(&client->dev, "maXTouch Object %d could not be read\n", i); error = -EIO; goto err_object_read; } memcpy(raw_ib_data + ib_pointer, buf, MXT_OBJECT_TABLE_ELEMENT_SIZE); ib_pointer += MXT_OBJECT_TABLE_ELEMENT_SIZE; object_type = buf[0]; object_address = (buf[2] << 8) + buf[1]; object_size = buf[3] + 1; object_instances = buf[4] + 1; object_report_ids = buf[5]; mxt_debug(DEBUG_TRACE, "Type=%03d, Address=0x%04x, " "Size=0x%02x, %d instances, %d report id's\n", object_type, object_address, object_size, object_instances, object_report_ids ); if (object_type == 38) t38_size = object_size; /* TODO: check whether object is known and supported? */ /* Save frequently needed info. */ if (object_type == MXT_GEN_MESSAGEPROCESSOR_T5) { mxt->msg_proc_addr = object_address; mxt->message_size = object_size; } object_table[i].type = object_type; object_table[i].chip_addr = object_address; object_table[i].size = object_size; object_table[i].instances = object_instances; object_table[i].num_report_ids = object_report_ids; report_id_count += object_instances * object_report_ids; object_info_address += MXT_OBJECT_TABLE_ELEMENT_SIZE; } mxt->rid_map = kzalloc(sizeof(struct report_id_map) * (report_id_count + 1), /* allocate for report_id 0, even if not used */ GFP_KERNEL); if (mxt->rid_map == NULL) { printk(KERN_WARNING "maXTouch: Can't allocate memory!\n"); error = -ENOMEM; goto err_rid_map_alloc; } mxt->messages = kzalloc(mxt->message_size * MXT_MESSAGE_BUFFER_SIZE, GFP_KERNEL); if (mxt->messages == NULL) { printk(KERN_WARNING "maXTouch: Can't allocate memory!\n"); error = -ENOMEM; goto err_msg_alloc; } mxt->last_message = kzalloc(mxt->message_size, GFP_KERNEL); if (mxt->last_message == NULL) { printk(KERN_WARNING "maXTouch: Can't allocate memory!\n"); error = -ENOMEM; goto err_msg_alloc; } mxt->report_id_count = report_id_count; if (report_id_count > 254) { /* 0 & 255 are reserved */ dev_err(&client->dev, "Too many maXTouch report id's [%d]\n", report_id_count); error = -ENXIO; goto err_max_rid; } /* Create a mapping from report id to object type */ report_id = 1; /* Start from 1, 0 is reserved. */ /* Create table associating report id's with objects & instances */ for (i = 0; i < mxt->device_info.num_objs; i++) { for (object_instance = 0; object_instance < object_table[i].instances; object_instance++){ first_report_id = report_id; for (object_report_id = 0; object_report_id < object_table[i].num_report_ids; object_report_id++) { mxt->rid_map[report_id].object = object_table[i].type; mxt->rid_map[report_id].instance = object_instance; mxt->rid_map[report_id].first_rid = first_report_id; report_id++; } } } /* Read 3 byte CRC */ error = mxt_read_block(client, object_info_address, 3, buf); if (error < 0) { mxt->read_fail_counter++; dev_err(&client->dev, "Error reading CRC\n"); } crc = (buf[2] << 16) | (buf[1] << 8) | buf[0]; if (calculate_infoblock_crc(&calculated_crc, raw_ib_data, ib_pointer)) { printk(KERN_WARNING "Error while calculating CRC!\n"); calculated_crc = 0; } kfree(raw_ib_data); mxt_debug(DEBUG_TRACE, "\nReported info block CRC = 0x%6X\n", crc); mxt_debug(DEBUG_TRACE, "Calculated info block CRC = 0x%6X\n\n", calculated_crc); if (crc == calculated_crc) { mxt->info_block_crc = crc; } else { mxt->info_block_crc = 0; printk(KERN_ALERT "maXTouch: Info block CRC invalid!\n"); } if (debug >= DEBUG_VERBOSE) { dev_info(&client->dev, "maXTouch: %d Objects\n", mxt->device_info.num_objs); for (i = 0; i < mxt->device_info.num_objs; i++) { dev_info(&client->dev, "Type:\t\t\t[%d]: %s\n", object_table[i].type, object_type_name[object_table[i].type]); dev_info(&client->dev, "\tAddress:\t0x%04X\n", object_table[i].chip_addr); dev_info(&client->dev, "\tSize:\t\t%d Bytes\n", object_table[i].size); dev_info(&client->dev, "\tInstances:\t%d\n", object_table[i].instances); dev_info(&client->dev, "\tReport Id's:\t%d\n", object_table[i].num_report_ids); } } return 0; err_max_rid: kfree(mxt->last_message); err_msg_alloc: kfree(mxt->rid_map); err_rid_map_alloc: err_object_read: kfree(raw_ib_data); err_ib_alloc: kfree(object_table); err_object_table_alloc: return error; } #if defined(CONFIG_PM) static int mxt_suspend(struct device *dev) { struct mxt_data *mxt = dev_get_drvdata(dev); int error, i; u8 t7_deepsl_data[T7_DATA_SIZE]; u16 t7_addr; if (device_may_wakeup(dev)) { enable_irq_wake(mxt->irq); return 0; } disable_irq(mxt->irq); flush_delayed_work_sync(&mxt->dwork); for (i = 0; i < T7_DATA_SIZE; i++) t7_deepsl_data[i] = 0; t7_addr = MXT_BASE_ADDR(MXT_GEN_POWERCONFIG_T7, mxt); /* save current power state values */ error = mxt_read_block(mxt->client, t7_addr, ARRAY_SIZE(mxt->t7_data), mxt->t7_data); if (error < 0) goto err_enable_irq; /* configure deep sleep mode */ error = mxt_write_block(mxt->client, t7_addr, ARRAY_SIZE(t7_deepsl_data), t7_deepsl_data); if (error < 0) goto err_enable_irq; /* power off the device */ if (mxt->power_on) { error = mxt->power_on(false); if (error) { dev_err(dev, "power off failed"); goto err_write_block; } } mxt->is_suspended = true; return 0; err_write_block: mxt_write_block(mxt->client, t7_addr, ARRAY_SIZE(mxt->t7_data), mxt->t7_data); err_enable_irq: enable_irq(mxt->irq); return error; } static int mxt_resume(struct device *dev) { struct mxt_data *mxt = dev_get_drvdata(dev); int error; u16 t7_addr; if (device_may_wakeup(dev)) { disable_irq_wake(mxt->irq); return 0; } if (!mxt->is_suspended) return 0; /* power on the device */ if (mxt->power_on) { error = mxt->power_on(true); if (error) { dev_err(dev, "power on failed"); return error; } } t7_addr = MXT_BASE_ADDR(MXT_GEN_POWERCONFIG_T7, mxt); /* restore the old power state values */ error = mxt_write_block(mxt->client, t7_addr, ARRAY_SIZE(mxt->t7_data), mxt->t7_data); if (error < 0) goto err_write_block; /* Make sure we just didn't miss a interrupt. */ if (mxt->read_chg() == 0) schedule_delayed_work(&mxt->dwork, 0); else enable_irq(mxt->irq); mxt->is_suspended = false; return 0; err_write_block: if (mxt->power_on) mxt->power_on(false); return error; } #if defined(CONFIG_HAS_EARLYSUSPEND) static void mxt_early_suspend(struct early_suspend *h) { struct mxt_data *mxt = container_of(h, struct mxt_data, early_suspend); mxt_suspend(&mxt->client->dev); } static void mxt_late_resume(struct early_suspend *h) { struct mxt_data *mxt = container_of(h, struct mxt_data, early_suspend); mxt_resume(&mxt->client->dev); } #endif static const struct dev_pm_ops mxt_pm_ops = { #ifndef CONFIG_HAS_EARLYSUSPEND .suspend = mxt_suspend, .resume = mxt_resume, #endif }; #endif static int __devinit mxt_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct mxt_data *mxt; struct maxtouch_platform_data *pdata; struct input_dev *input; u8 *id_data; u8 *t38_data; u16 t38_addr; int error; mxt_debug(DEBUG_INFO, "mXT224: mxt_probe\n"); if (client == NULL) { pr_debug("maXTouch: client == NULL\n"); return -EINVAL; } else if (client->adapter == NULL) { pr_debug("maXTouch: client->adapter == NULL\n"); return -EINVAL; } else if (&client->dev == NULL) { pr_debug("maXTouch: client->dev == NULL\n"); return -EINVAL; } else if (&client->adapter->dev == NULL) { pr_debug("maXTouch: client->adapter->dev == NULL\n"); return -EINVAL; } else if (id == NULL) { pr_debug("maXTouch: id == NULL\n"); return -EINVAL; } /* Enable runtime PM ops, start in ACTIVE mode */ error = pm_runtime_set_active(&client->dev); if (error < 0) dev_dbg(&client->dev, "unable to set runtime pm state\n"); pm_runtime_enable(&client->dev); mxt_debug(DEBUG_INFO, "maXTouch driver v. %s\n", DRIVER_VERSION); mxt_debug(DEBUG_INFO, "\t \"%s\"\n", client->name); mxt_debug(DEBUG_INFO, "\taddr:\t0x%04x\n", client->addr); mxt_debug(DEBUG_INFO, "\tirq:\t%d\n", client->irq); mxt_debug(DEBUG_INFO, "\tflags:\t0x%04x\n", client->flags); mxt_debug(DEBUG_INFO, "\tadapter:\"%s\"\n", client->adapter->name); mxt_debug(DEBUG_INFO, "\tdevice:\t\"%s\"\n", client->dev.init_name); mxt_debug(DEBUG_TRACE, "maXTouch driver functionality OK\n"); /* Allocate structure - we need it to identify device */ mxt = kzalloc(sizeof(struct mxt_data), GFP_KERNEL); if (mxt == NULL) { dev_err(&client->dev, "insufficient memory\n"); error = -ENOMEM; goto err_mxt_alloc; } id_data = kmalloc(MXT_ID_BLOCK_SIZE, GFP_KERNEL); if (id_data == NULL) { dev_err(&client->dev, "insufficient memory\n"); error = -ENOMEM; goto err_id_alloc; } input = input_allocate_device(); if (!input) { dev_err(&client->dev, "error allocating input device\n"); error = -ENOMEM; goto err_input_dev_alloc; } /* Initialize Platform data */ pdata = client->dev.platform_data; if (pdata == NULL) { dev_err(&client->dev, "platform data is required!\n"); error = -EINVAL; goto err_pdata; } if (debug >= DEBUG_TRACE) printk(KERN_INFO "Platform OK: pdata = 0x%08x\n", (unsigned int) pdata); mxt->is_suspended = false; mxt->read_fail_counter = 0; mxt->message_counter = 0; if (pdata->min_x) mxt->min_x_val = pdata->min_x; else mxt->min_x_val = 0; if (pdata->min_y) mxt->min_y_val = pdata->min_y; else mxt->min_y_val = 0; mxt->max_x_val = pdata->max_x; mxt->max_y_val = pdata->max_y; /* Get data that is defined in board specific code. */ mxt->init_hw = pdata->init_platform_hw; mxt->exit_hw = pdata->exit_platform_hw; mxt->power_on = pdata->power_on; mxt->read_chg = pdata->read_chg; if (pdata->valid_interrupt != NULL) mxt->valid_interrupt = pdata->valid_interrupt; else mxt->valid_interrupt = mxt_valid_interrupt_dummy; if (mxt->init_hw) { error = mxt->init_hw(client); if (error) { dev_err(&client->dev, "hw init failed"); goto err_init_hw; } } /* power on the device */ if (mxt->power_on) { error = mxt->power_on(true); if (error) { dev_err(&client->dev, "power on failed"); goto err_pwr_on; } } if (debug >= DEBUG_TRACE) printk(KERN_INFO "maXTouch driver identifying chip\n"); if (mxt_identify(client, mxt, id_data) < 0) { dev_err(&client->dev, "Chip could not be identified\n"); error = -ENODEV; goto err_identify; } /* Chip is valid and active. */ if (debug >= DEBUG_TRACE) printk(KERN_INFO "maXTouch driver allocating input device\n"); mxt->client = client; mxt->input = input; INIT_DELAYED_WORK(&mxt->dwork, mxt_worker); mutex_init(&mxt->debug_mutex); mutex_init(&mxt->msg_mutex); mxt_debug(DEBUG_TRACE, "maXTouch driver creating device name\n"); snprintf( mxt->phys_name, sizeof(mxt->phys_name), "%s/input0", dev_name(&client->dev) ); input->name = "Atmel maXTouch Touchscreen controller"; input->phys = mxt->phys_name; input->id.bustype = BUS_I2C; input->dev.parent = &client->dev; mxt_debug(DEBUG_INFO, "maXTouch name: \"%s\"\n", input->name); mxt_debug(DEBUG_INFO, "maXTouch phys: \"%s\"\n", input->phys); mxt_debug(DEBUG_INFO, "maXTouch driver setting abs parameters\n"); set_bit(BTN_TOUCH, input->keybit); set_bit(INPUT_PROP_DIRECT, input->propbit); /* Single touch */ input_set_abs_params(input, ABS_X, mxt->min_x_val, mxt->max_x_val, 0, 0); input_set_abs_params(input, ABS_Y, mxt->min_y_val, mxt->max_y_val, 0, 0); input_set_abs_params(input, ABS_PRESSURE, 0, MXT_MAX_REPORTED_PRESSURE, 0, 0); input_set_abs_params(input, ABS_TOOL_WIDTH, 0, MXT_MAX_REPORTED_WIDTH, 0, 0); /* Multitouch */ input_set_abs_params(input, ABS_MT_POSITION_X, mxt->min_x_val, mxt->max_x_val, 0, 0); input_set_abs_params(input, ABS_MT_POSITION_Y, mxt->min_y_val, mxt->max_y_val, 0, 0); input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, MXT_MAX_TOUCH_SIZE, 0, 0); input_set_abs_params(input, ABS_MT_TRACKING_ID, 0, MXT_MAX_NUM_TOUCHES, 0, 0); __set_bit(EV_ABS, input->evbit); __set_bit(EV_SYN, input->evbit); __set_bit(EV_KEY, input->evbit); __set_bit(EV_MSC, input->evbit); input->mscbit[0] = BIT_MASK(MSC_GESTURE); mxt_debug(DEBUG_TRACE, "maXTouch driver setting client data\n"); i2c_set_clientdata(client, mxt); mxt_debug(DEBUG_TRACE, "maXTouch driver setting drv data\n"); input_set_drvdata(input, mxt); mxt_debug(DEBUG_TRACE, "maXTouch driver input register device\n"); error = input_register_device(mxt->input); if (error < 0) { dev_err(&client->dev, "Failed to register input device\n"); goto err_register_device; } error = mxt_read_object_table(client, mxt, id_data); if (error < 0) goto err_read_ot; /* Create debugfs entries. */ mxt->debug_dir = debugfs_create_dir("maXTouch", NULL); if (mxt->debug_dir == ERR_PTR(-ENODEV)) { /* debugfs is not enabled. */ printk(KERN_WARNING "debugfs not enabled in kernel\n"); } else if (mxt->debug_dir == NULL) { printk(KERN_WARNING "error creating debugfs dir\n"); } else { mxt_debug(DEBUG_TRACE, "created \"maXTouch\" debugfs dir\n"); debugfs_create_file("deltas", S_IRUSR, mxt->debug_dir, mxt, &delta_fops); debugfs_create_file("refs", S_IRUSR, mxt->debug_dir, mxt, &refs_fops); } /* Create character device nodes for reading & writing registers */ mxt->mxt_class = class_create(THIS_MODULE, "maXTouch_memory"); if (IS_ERR(mxt->mxt_class)){ printk(KERN_WARNING "class create failed! exiting..."); goto err_class_create; } /* 2 numbers; one for memory and one for messages */ error = alloc_chrdev_region(&mxt->dev_num, 0, 2, "maXTouch_memory"); mxt_debug(DEBUG_VERBOSE, "device number %d allocated!\n", MAJOR(mxt->dev_num)); if (error){ printk(KERN_WARNING "Error registering device\n"); } cdev_init(&mxt->cdev, &mxt_memory_fops); cdev_init(&mxt->cdev_messages, &mxt_message_fops); mxt_debug(DEBUG_VERBOSE, "cdev initialized\n"); mxt->cdev.owner = THIS_MODULE; mxt->cdev_messages.owner = THIS_MODULE; error = cdev_add(&mxt->cdev, mxt->dev_num, 1); if (error){ printk(KERN_WARNING "Bad cdev\n"); } error = cdev_add(&mxt->cdev_messages, mxt->dev_num + 1, 1); if (error){ printk(KERN_WARNING "Bad cdev\n"); } mxt_debug(DEBUG_VERBOSE, "cdev added\n"); device_create(mxt->mxt_class, NULL, MKDEV(MAJOR(mxt->dev_num), 0), NULL, "maXTouch"); device_create(mxt->mxt_class, NULL, MKDEV(MAJOR(mxt->dev_num), 1), NULL, "maXTouch_messages"); mxt->msg_buffer_startp = 0; mxt->msg_buffer_endp = 0; /* Allocate the interrupt */ mxt_debug(DEBUG_TRACE, "maXTouch driver allocating interrupt...\n"); mxt->irq = client->irq; mxt->valid_irq_counter = 0; mxt->invalid_irq_counter = 0; mxt->irq_counter = 0; if (mxt->irq) { /* Try to request IRQ with falling edge first. This is * not always supported. If it fails, try with any edge. */ error = request_irq(mxt->irq, mxt_irq_handler, IRQF_TRIGGER_FALLING, client->dev.driver->name, mxt); if (error < 0) { /* TODO: why only 0 works on STK1000? */ error = request_irq(mxt->irq, mxt_irq_handler, 0, client->dev.driver->name, mxt); } if (error < 0) { dev_err(&client->dev, "failed to allocate irq %d\n", mxt->irq); goto err_irq; } } if (debug > DEBUG_INFO) dev_info(&client->dev, "touchscreen, irq %d\n", mxt->irq); t38_data = kmalloc(t38_size*sizeof(u8), GFP_KERNEL); if (t38_data == NULL) { dev_err(&client->dev, "insufficient memory\n"); error = -ENOMEM; goto err_t38; } t38_addr = MXT_BASE_ADDR(MXT_USER_INFO_T38, mxt); mxt_read_block(client, t38_addr, t38_size, t38_data); dev_info(&client->dev, "VERSION:%02x.%02x.%02x, DATE: %d/%d/%d\n", t38_data[0], t38_data[1], t38_data[2], t38_data[3], t38_data[4], t38_data[5]); /* Schedule a worker routine to read any messages that might have * been sent before interrupts were enabled. */ cancel_delayed_work(&mxt->dwork); disable_irq(mxt->irq); schedule_delayed_work(&mxt->dwork, 0); kfree(t38_data); kfree(id_data); device_init_wakeup(&client->dev, pdata->wakeup); #if defined(CONFIG_HAS_EARLYSUSPEND) mxt->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + MXT_SUSPEND_LEVEL; mxt->early_suspend.suspend = mxt_early_suspend; mxt->early_suspend.resume = mxt_late_resume; register_early_suspend(&mxt->early_suspend); #endif return 0; err_t38: free_irq(mxt->irq, mxt); err_irq: kfree(mxt->rid_map); kfree(mxt->object_table); kfree(mxt->last_message); err_class_create: if (mxt->debug_dir) debugfs_remove(mxt->debug_dir); kfree(mxt->last_message); kfree(mxt->rid_map); kfree(mxt->object_table); err_read_ot: input_unregister_device(mxt->input); mxt->input = NULL; err_register_device: mutex_destroy(&mxt->debug_mutex); mutex_destroy(&mxt->msg_mutex); err_identify: if (mxt->power_on) mxt->power_on(false); err_pwr_on: if (mxt->exit_hw != NULL) mxt->exit_hw(client); err_init_hw: err_pdata: input_free_device(input); err_input_dev_alloc: kfree(id_data); err_id_alloc: kfree(mxt); err_mxt_alloc: pm_runtime_set_suspended(&client->dev); pm_runtime_disable(&client->dev); return error; } static int __devexit mxt_remove(struct i2c_client *client) { struct mxt_data *mxt; pm_runtime_set_suspended(&client->dev); pm_runtime_disable(&client->dev); mxt = i2c_get_clientdata(client); /* Remove debug dir entries */ debugfs_remove_recursive(mxt->debug_dir); device_init_wakeup(&client->dev, 0); #if defined(CONFIG_HAS_EARLYSUSPEND) unregister_early_suspend(&mxt->early_suspend); #endif if (mxt != NULL) { if (mxt->power_on) mxt->power_on(false); if (mxt->exit_hw != NULL) mxt->exit_hw(client); if (mxt->irq) { free_irq(mxt->irq, mxt); } unregister_chrdev_region(mxt->dev_num, 2); device_destroy(mxt->mxt_class, MKDEV(MAJOR(mxt->dev_num), 0)); device_destroy(mxt->mxt_class, MKDEV(MAJOR(mxt->dev_num), 1)); cdev_del(&mxt->cdev); cdev_del(&mxt->cdev_messages); cancel_delayed_work_sync(&mxt->dwork); input_unregister_device(mxt->input); class_destroy(mxt->mxt_class); debugfs_remove(mxt->debug_dir); kfree(mxt->rid_map); kfree(mxt->object_table); kfree(mxt->last_message); } kfree(mxt); i2c_set_clientdata(client, NULL); if (debug >= DEBUG_TRACE) dev_info(&client->dev, "Touchscreen unregistered\n"); return 0; } static const struct i2c_device_id mxt_idtable[] = { {"maXTouch", 0,}, { } }; MODULE_DEVICE_TABLE(i2c, mxt_idtable); static struct i2c_driver mxt_driver = { .driver = { .name = "maXTouch", .owner = THIS_MODULE, #if defined(CONFIG_PM) .pm = &mxt_pm_ops, #endif }, .id_table = mxt_idtable, .probe = mxt_probe, .remove = __devexit_p(mxt_remove), }; static int __init mxt_init(void) { int err; err = i2c_add_driver(&mxt_driver); if (err) { printk(KERN_WARNING "Adding maXTouch driver failed " "(errno = %d)\n", err); } else { mxt_debug(DEBUG_TRACE, "Successfully added driver %s\n", mxt_driver.driver.name); } return err; } static void __exit mxt_cleanup(void) { i2c_del_driver(&mxt_driver); } module_init(mxt_init); module_exit(mxt_cleanup); MODULE_AUTHOR("Iiro Valkonen"); MODULE_DESCRIPTION("Driver for Atmel maXTouch Touchscreen Controller"); MODULE_LICENSE("GPL");
gpl-2.0
trungnc/aosp_kernel_samsung_js01lte
drivers/video/backlight/lcd.c
4544
6605
/* * LCD Lowlevel Control Abstraction * * Copyright (C) 2003,2004 Hewlett-Packard Company * */ #include <linux/module.h> #include <linux/init.h> #include <linux/device.h> #include <linux/lcd.h> #include <linux/notifier.h> #include <linux/ctype.h> #include <linux/err.h> #include <linux/fb.h> #include <linux/slab.h> #if defined(CONFIG_FB) || (defined(CONFIG_FB_MODULE) && \ defined(CONFIG_LCD_CLASS_DEVICE_MODULE)) /* This callback gets called when something important happens inside a * framebuffer driver. We're looking if that important event is blanking, * and if it is, we're switching lcd power as well ... */ static int fb_notifier_callback(struct notifier_block *self, unsigned long event, void *data) { struct lcd_device *ld; struct fb_event *evdata = data; /* If we aren't interested in this event, skip it immediately ... */ switch (event) { case FB_EVENT_BLANK: case FB_EVENT_MODE_CHANGE: case FB_EVENT_MODE_CHANGE_ALL: break; default: return 0; } ld = container_of(self, struct lcd_device, fb_notif); if (!ld->ops) return 0; mutex_lock(&ld->ops_lock); if (!ld->ops->check_fb || ld->ops->check_fb(ld, evdata->info)) { if (event == FB_EVENT_BLANK) { if (ld->ops->set_power) ld->ops->set_power(ld, *(int *)evdata->data); } else { if (ld->ops->set_mode) ld->ops->set_mode(ld, evdata->data); } } mutex_unlock(&ld->ops_lock); return 0; } static int lcd_register_fb(struct lcd_device *ld) { memset(&ld->fb_notif, 0, sizeof(ld->fb_notif)); ld->fb_notif.notifier_call = fb_notifier_callback; return fb_register_client(&ld->fb_notif); } static void lcd_unregister_fb(struct lcd_device *ld) { fb_unregister_client(&ld->fb_notif); } #else static int lcd_register_fb(struct lcd_device *ld) { return 0; } static inline void lcd_unregister_fb(struct lcd_device *ld) { } #endif /* CONFIG_FB */ static ssize_t lcd_show_power(struct device *dev, struct device_attribute *attr, char *buf) { int rc; struct lcd_device *ld = to_lcd_device(dev); mutex_lock(&ld->ops_lock); if (ld->ops && ld->ops->get_power) rc = sprintf(buf, "%d\n", ld->ops->get_power(ld)); else rc = -ENXIO; mutex_unlock(&ld->ops_lock); return rc; } static ssize_t lcd_store_power(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int rc = -ENXIO; struct lcd_device *ld = to_lcd_device(dev); unsigned long power; rc = kstrtoul(buf, 0, &power); if (rc) return rc; mutex_lock(&ld->ops_lock); if (ld->ops && ld->ops->set_power) { pr_debug("lcd: set power to %lu\n", power); ld->ops->set_power(ld, power); rc = count; } mutex_unlock(&ld->ops_lock); return rc; } static ssize_t lcd_show_contrast(struct device *dev, struct device_attribute *attr, char *buf) { int rc = -ENXIO; struct lcd_device *ld = to_lcd_device(dev); mutex_lock(&ld->ops_lock); if (ld->ops && ld->ops->get_contrast) rc = sprintf(buf, "%d\n", ld->ops->get_contrast(ld)); mutex_unlock(&ld->ops_lock); return rc; } static ssize_t lcd_store_contrast(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int rc = -ENXIO; struct lcd_device *ld = to_lcd_device(dev); unsigned long contrast; rc = kstrtoul(buf, 0, &contrast); if (rc) return rc; mutex_lock(&ld->ops_lock); if (ld->ops && ld->ops->set_contrast) { pr_debug("lcd: set contrast to %lu\n", contrast); ld->ops->set_contrast(ld, contrast); rc = count; } mutex_unlock(&ld->ops_lock); return rc; } static ssize_t lcd_show_max_contrast(struct device *dev, struct device_attribute *attr, char *buf) { struct lcd_device *ld = to_lcd_device(dev); return sprintf(buf, "%d\n", ld->props.max_contrast); } static struct class *lcd_class; static void lcd_device_release(struct device *dev) { struct lcd_device *ld = to_lcd_device(dev); kfree(ld); } static struct device_attribute lcd_device_attributes[] = { __ATTR(lcd_power, 0644, lcd_show_power, lcd_store_power), __ATTR(contrast, 0644, lcd_show_contrast, lcd_store_contrast), __ATTR(max_contrast, 0444, lcd_show_max_contrast, NULL), __ATTR_NULL, }; /** * lcd_device_register - register a new object of lcd_device class. * @name: the name of the new object(must be the same as the name of the * respective framebuffer device). * @devdata: an optional pointer to be stored in the device. The * methods may retrieve it by using lcd_get_data(ld). * @ops: the lcd operations structure. * * Creates and registers a new lcd device. Returns either an ERR_PTR() * or a pointer to the newly allocated device. */ struct lcd_device *lcd_device_register(const char *name, struct device *parent, void *devdata, struct lcd_ops *ops) { struct lcd_device *new_ld; int rc; pr_debug("lcd_device_register: name=%s\n", name); new_ld = kzalloc(sizeof(struct lcd_device), GFP_KERNEL); if (!new_ld) return ERR_PTR(-ENOMEM); mutex_init(&new_ld->ops_lock); mutex_init(&new_ld->update_lock); new_ld->dev.class = lcd_class; new_ld->dev.parent = parent; new_ld->dev.release = lcd_device_release; dev_set_name(&new_ld->dev, name); dev_set_drvdata(&new_ld->dev, devdata); rc = device_register(&new_ld->dev); if (rc) { kfree(new_ld); return ERR_PTR(rc); } rc = lcd_register_fb(new_ld); if (rc) { device_unregister(&new_ld->dev); return ERR_PTR(rc); } new_ld->ops = ops; return new_ld; } EXPORT_SYMBOL(lcd_device_register); /** * lcd_device_unregister - unregisters a object of lcd_device class. * @ld: the lcd device object to be unregistered and freed. * * Unregisters a previously registered via lcd_device_register object. */ void lcd_device_unregister(struct lcd_device *ld) { if (!ld) return; mutex_lock(&ld->ops_lock); ld->ops = NULL; mutex_unlock(&ld->ops_lock); lcd_unregister_fb(ld); device_unregister(&ld->dev); } EXPORT_SYMBOL(lcd_device_unregister); static void __exit lcd_class_exit(void) { class_destroy(lcd_class); } static int __init lcd_class_init(void) { lcd_class = class_create(THIS_MODULE, "lcd"); if (IS_ERR(lcd_class)) { printk(KERN_WARNING "Unable to create backlight class; errno = %ld\n", PTR_ERR(lcd_class)); return PTR_ERR(lcd_class); } lcd_class->dev_attrs = lcd_device_attributes; return 0; } /* * if this is compiled into the kernel, we need to ensure that the * class is registered before users of the class try to register lcd's */ postcore_initcall(lcd_class_init); module_exit(lcd_class_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jamey Hicks <jamey.hicks@hp.com>, Andrew Zabolotny <zap@homelink.ru>"); MODULE_DESCRIPTION("LCD Lowlevel Control Abstraction");
gpl-2.0