repo_name
string
path
string
copies
string
size
string
content
string
license
string
dadcup18/cm11_kernel_samsung_codinalte
arch/arm/plat-spear/clock.c
2274
23950
/* * arch/arm/plat-spear/clock.c * * Clock framework for SPEAr platform * * Copyright (C) 2009 ST Microelectronics * Viresh Kumar<viresh.kumar@st.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/bug.h> #include <linux/clk.h> #include <linux/debugfs.h> #include <linux/err.h> #include <linux/io.h> #include <linux/list.h> #include <linux/module.h> #include <linux/spinlock.h> #include <plat/clock.h> static DEFINE_SPINLOCK(clocks_lock); static LIST_HEAD(root_clks); #ifdef CONFIG_DEBUG_FS static LIST_HEAD(clocks); #endif static void propagate_rate(struct clk *, int on_init); #ifdef CONFIG_DEBUG_FS static int clk_debugfs_reparent(struct clk *); #endif static int generic_clk_enable(struct clk *clk) { unsigned int val; if (!clk->en_reg) return -EFAULT; val = readl(clk->en_reg); if (unlikely(clk->flags & RESET_TO_ENABLE)) val &= ~(1 << clk->en_reg_bit); else val |= 1 << clk->en_reg_bit; writel(val, clk->en_reg); return 0; } static void generic_clk_disable(struct clk *clk) { unsigned int val; if (!clk->en_reg) return; val = readl(clk->en_reg); if (unlikely(clk->flags & RESET_TO_ENABLE)) val |= 1 << clk->en_reg_bit; else val &= ~(1 << clk->en_reg_bit); writel(val, clk->en_reg); } /* generic clk ops */ static struct clkops generic_clkops = { .enable = generic_clk_enable, .disable = generic_clk_disable, }; /* returns current programmed clocks clock info structure */ static struct pclk_info *pclk_info_get(struct clk *clk) { unsigned int val, i; struct pclk_info *info = NULL; val = (readl(clk->pclk_sel->pclk_sel_reg) >> clk->pclk_sel_shift) & clk->pclk_sel->pclk_sel_mask; for (i = 0; i < clk->pclk_sel->pclk_count; i++) { if (clk->pclk_sel->pclk_info[i].pclk_val == val) info = &clk->pclk_sel->pclk_info[i]; } return info; } /* * Set Update pclk, and pclk_info of clk and add clock sibling node to current * parents children list */ static void clk_reparent(struct clk *clk, struct pclk_info *pclk_info) { unsigned long flags; spin_lock_irqsave(&clocks_lock, flags); list_del(&clk->sibling); list_add(&clk->sibling, &pclk_info->pclk->children); clk->pclk = pclk_info->pclk; spin_unlock_irqrestore(&clocks_lock, flags); #ifdef CONFIG_DEBUG_FS clk_debugfs_reparent(clk); #endif } static void do_clk_disable(struct clk *clk) { if (!clk) return; if (!clk->usage_count) { WARN_ON(1); return; } clk->usage_count--; if (clk->usage_count == 0) { /* * Surely, there are no active childrens or direct users * of this clock */ if (clk->pclk) do_clk_disable(clk->pclk); if (clk->ops && clk->ops->disable) clk->ops->disable(clk); } } static int do_clk_enable(struct clk *clk) { int ret = 0; if (!clk) return -EFAULT; if (clk->usage_count == 0) { if (clk->pclk) { ret = do_clk_enable(clk->pclk); if (ret) goto err; } if (clk->ops && clk->ops->enable) { ret = clk->ops->enable(clk); if (ret) { if (clk->pclk) do_clk_disable(clk->pclk); goto err; } } /* * Since the clock is going to be used for the first * time please reclac */ if (clk->recalc) { ret = clk->recalc(clk); if (ret) goto err; } } clk->usage_count++; err: return ret; } /* * clk_enable - inform the system when the clock source should be running. * @clk: clock source * * If the clock can not be enabled/disabled, this should return success. * * Returns success (0) or negative errno. */ int clk_enable(struct clk *clk) { unsigned long flags; int ret = 0; spin_lock_irqsave(&clocks_lock, flags); ret = do_clk_enable(clk); spin_unlock_irqrestore(&clocks_lock, flags); return ret; } EXPORT_SYMBOL(clk_enable); /* * clk_disable - inform the system when the clock source is no longer required. * @clk: clock source * * Inform the system that a clock source is no longer required by * a driver and may be shut down. * * Implementation detail: if the clock source is shared between * multiple drivers, clk_enable() calls must be balanced by the * same number of clk_disable() calls for the clock source to be * disabled. */ void clk_disable(struct clk *clk) { unsigned long flags; spin_lock_irqsave(&clocks_lock, flags); do_clk_disable(clk); spin_unlock_irqrestore(&clocks_lock, flags); } EXPORT_SYMBOL(clk_disable); /** * clk_get_rate - obtain the current clock rate (in Hz) for a clock source. * This is only valid once the clock source has been enabled. * @clk: clock source */ unsigned long clk_get_rate(struct clk *clk) { unsigned long flags, rate; spin_lock_irqsave(&clocks_lock, flags); rate = clk->rate; spin_unlock_irqrestore(&clocks_lock, flags); return rate; } EXPORT_SYMBOL(clk_get_rate); /** * clk_set_parent - set the parent clock source for this clock * @clk: clock source * @parent: parent clock source * * Returns success (0) or negative errno. */ int clk_set_parent(struct clk *clk, struct clk *parent) { int i, found = 0, val = 0; unsigned long flags; if (!clk || !parent) return -EFAULT; if (clk->pclk == parent) return 0; if (!clk->pclk_sel) return -EPERM; /* check if requested parent is in clk parent list */ for (i = 0; i < clk->pclk_sel->pclk_count; i++) { if (clk->pclk_sel->pclk_info[i].pclk == parent) { found = 1; break; } } if (!found) return -EINVAL; spin_lock_irqsave(&clocks_lock, flags); /* reflect parent change in hardware */ val = readl(clk->pclk_sel->pclk_sel_reg); val &= ~(clk->pclk_sel->pclk_sel_mask << clk->pclk_sel_shift); val |= clk->pclk_sel->pclk_info[i].pclk_val << clk->pclk_sel_shift; writel(val, clk->pclk_sel->pclk_sel_reg); spin_unlock_irqrestore(&clocks_lock, flags); /* reflect parent change in software */ clk_reparent(clk, &clk->pclk_sel->pclk_info[i]); propagate_rate(clk, 0); return 0; } EXPORT_SYMBOL(clk_set_parent); /** * clk_set_rate - set the clock rate for a clock source * @clk: clock source * @rate: desired clock rate in Hz * * Returns success (0) or negative errno. */ int clk_set_rate(struct clk *clk, unsigned long rate) { unsigned long flags; int ret = -EINVAL; if (!clk || !rate) return -EFAULT; if (clk->set_rate) { spin_lock_irqsave(&clocks_lock, flags); ret = clk->set_rate(clk, rate); if (!ret) /* if successful -> propagate */ propagate_rate(clk, 0); spin_unlock_irqrestore(&clocks_lock, flags); } else if (clk->pclk) { u32 mult = clk->div_factor ? clk->div_factor : 1; ret = clk_set_rate(clk->pclk, mult * rate); } return ret; } EXPORT_SYMBOL(clk_set_rate); /* registers clock in platform clock framework */ void clk_register(struct clk_lookup *cl) { struct clk *clk; unsigned long flags; if (!cl || !cl->clk) return; clk = cl->clk; spin_lock_irqsave(&clocks_lock, flags); INIT_LIST_HEAD(&clk->children); if (clk->flags & ALWAYS_ENABLED) clk->ops = NULL; else if (!clk->ops) clk->ops = &generic_clkops; /* root clock don't have any parents */ if (!clk->pclk && !clk->pclk_sel) { list_add(&clk->sibling, &root_clks); } else if (clk->pclk && !clk->pclk_sel) { /* add clocks with only one parent to parent's children list */ list_add(&clk->sibling, &clk->pclk->children); } else { /* clocks with more than one parent */ struct pclk_info *pclk_info; pclk_info = pclk_info_get(clk); if (!pclk_info) { pr_err("CLKDEV: invalid pclk info of clk with" " %s dev_id and %s con_id\n", cl->dev_id, cl->con_id); } else { clk->pclk = pclk_info->pclk; list_add(&clk->sibling, &pclk_info->pclk->children); } } spin_unlock_irqrestore(&clocks_lock, flags); /* debugfs specific */ #ifdef CONFIG_DEBUG_FS list_add(&clk->node, &clocks); clk->cl = cl; #endif /* add clock to arm clockdev framework */ clkdev_add(cl); } /** * propagate_rate - recalculate and propagate all clocks to children * @pclk: parent clock required to be propogated * @on_init: flag for enabling clocks which are ENABLED_ON_INIT. * * Recalculates all children clocks */ void propagate_rate(struct clk *pclk, int on_init) { struct clk *clk, *_temp; int ret = 0; list_for_each_entry_safe(clk, _temp, &pclk->children, sibling) { if (clk->recalc) { ret = clk->recalc(clk); /* * recalc will return error if clk out is not programmed * In this case configure default rate. */ if (ret && clk->set_rate) clk->set_rate(clk, 0); } propagate_rate(clk, on_init); if (!on_init) continue; /* Enable clks enabled on init, in software view */ if (clk->flags & ENABLED_ON_INIT) do_clk_enable(clk); } } /** * round_rate_index - return closest programmable rate index in rate_config tbl * @clk: ptr to clock structure * @drate: desired rate * @rate: final rate will be returned in this variable only. * * Finds index in rate_config for highest clk rate which is less than * requested rate. If there is no clk rate lesser than requested rate then * -EINVAL is returned. This routine assumes that rate_config is written * in incrementing order of clk rates. * If drate passed is zero then default rate is programmed. */ static int round_rate_index(struct clk *clk, unsigned long drate, unsigned long *rate) { unsigned long tmp = 0, prev_rate = 0; int index; if (!clk->calc_rate) return -EFAULT; if (!drate) return -EINVAL; /* * This loops ends on two conditions: * - as soon as clk is found with rate greater than requested rate. * - if all clks in rate_config are smaller than requested rate. */ for (index = 0; index < clk->rate_config.count; index++) { prev_rate = tmp; tmp = clk->calc_rate(clk, index); if (drate < tmp) { index--; break; } } /* return if can't find suitable clock */ if (index < 0) { index = -EINVAL; *rate = 0; } else if (index == clk->rate_config.count) { /* program with highest clk rate possible */ index = clk->rate_config.count - 1; *rate = tmp; } else *rate = prev_rate; return index; } /** * clk_round_rate - adjust a rate to the exact rate a clock can provide * @clk: clock source * @rate: desired clock rate in Hz * * Returns rounded clock rate in Hz, or negative errno. */ long clk_round_rate(struct clk *clk, unsigned long drate) { long rate = 0; int index; /* * propagate call to parent who supports calc_rate. Similar approach is * used in clk_set_rate. */ if (!clk->calc_rate) { u32 mult; if (!clk->pclk) return clk->rate; mult = clk->div_factor ? clk->div_factor : 1; return clk_round_rate(clk->pclk, mult * drate) / mult; } index = round_rate_index(clk, drate, &rate); if (index >= 0) return rate; else return index; } EXPORT_SYMBOL(clk_round_rate); /*All below functions are called with lock held */ /* * Calculates pll clk rate for specific value of mode, m, n and p * * In normal mode * rate = (2 * M[15:8] * Fin)/(N * 2^P) * * In Dithered mode * rate = (2 * M[15:0] * Fin)/(256 * N * 2^P) */ unsigned long pll_calc_rate(struct clk *clk, int index) { unsigned long rate = clk->pclk->rate; struct pll_rate_tbl *tbls = clk->rate_config.tbls; unsigned int mode; mode = tbls[index].mode ? 256 : 1; return (((2 * rate / 10000) * tbls[index].m) / (mode * tbls[index].n * (1 << tbls[index].p))) * 10000; } /* * calculates current programmed rate of pll1 * * In normal mode * rate = (2 * M[15:8] * Fin)/(N * 2^P) * * In Dithered mode * rate = (2 * M[15:0] * Fin)/(256 * N * 2^P) */ int pll_clk_recalc(struct clk *clk) { struct pll_clk_config *config = clk->private_data; unsigned int num = 2, den = 0, val, mode = 0; mode = (readl(config->mode_reg) >> config->masks->mode_shift) & config->masks->mode_mask; val = readl(config->cfg_reg); /* calculate denominator */ den = (val >> config->masks->div_p_shift) & config->masks->div_p_mask; den = 1 << den; den *= (val >> config->masks->div_n_shift) & config->masks->div_n_mask; /* calculate numerator & denominator */ if (!mode) { /* Normal mode */ num *= (val >> config->masks->norm_fdbk_m_shift) & config->masks->norm_fdbk_m_mask; } else { /* Dithered mode */ num *= (val >> config->masks->dith_fdbk_m_shift) & config->masks->dith_fdbk_m_mask; den *= 256; } if (!den) return -EINVAL; clk->rate = (((clk->pclk->rate/10000) * num) / den) * 10000; return 0; } /* * Configures new clock rate of pll */ int pll_clk_set_rate(struct clk *clk, unsigned long desired_rate) { struct pll_rate_tbl *tbls = clk->rate_config.tbls; struct pll_clk_config *config = clk->private_data; unsigned long val, rate; int i; i = round_rate_index(clk, desired_rate, &rate); if (i < 0) return i; val = readl(config->mode_reg) & ~(config->masks->mode_mask << config->masks->mode_shift); val |= (tbls[i].mode & config->masks->mode_mask) << config->masks->mode_shift; writel(val, config->mode_reg); val = readl(config->cfg_reg) & ~(config->masks->div_p_mask << config->masks->div_p_shift); val |= (tbls[i].p & config->masks->div_p_mask) << config->masks->div_p_shift; val &= ~(config->masks->div_n_mask << config->masks->div_n_shift); val |= (tbls[i].n & config->masks->div_n_mask) << config->masks->div_n_shift; val &= ~(config->masks->dith_fdbk_m_mask << config->masks->dith_fdbk_m_shift); if (tbls[i].mode) val |= (tbls[i].m & config->masks->dith_fdbk_m_mask) << config->masks->dith_fdbk_m_shift; else val |= (tbls[i].m & config->masks->norm_fdbk_m_mask) << config->masks->norm_fdbk_m_shift; writel(val, config->cfg_reg); clk->rate = rate; return 0; } /* * Calculates ahb, apb clk rate for specific value of div */ unsigned long bus_calc_rate(struct clk *clk, int index) { unsigned long rate = clk->pclk->rate; struct bus_rate_tbl *tbls = clk->rate_config.tbls; return rate / (tbls[index].div + 1); } /* calculates current programmed rate of ahb or apb bus */ int bus_clk_recalc(struct clk *clk) { struct bus_clk_config *config = clk->private_data; unsigned int div; div = ((readl(config->reg) >> config->masks->shift) & config->masks->mask) + 1; if (!div) return -EINVAL; clk->rate = (unsigned long)clk->pclk->rate / div; return 0; } /* Configures new clock rate of AHB OR APB bus */ int bus_clk_set_rate(struct clk *clk, unsigned long desired_rate) { struct bus_rate_tbl *tbls = clk->rate_config.tbls; struct bus_clk_config *config = clk->private_data; unsigned long val, rate; int i; i = round_rate_index(clk, desired_rate, &rate); if (i < 0) return i; val = readl(config->reg) & ~(config->masks->mask << config->masks->shift); val |= (tbls[i].div & config->masks->mask) << config->masks->shift; writel(val, config->reg); clk->rate = rate; return 0; } /* * gives rate for different values of eq, x and y * * Fout from synthesizer can be given from two equations: * Fout1 = (Fin * X/Y)/2 EQ1 * Fout2 = Fin * X/Y EQ2 */ unsigned long aux_calc_rate(struct clk *clk, int index) { unsigned long rate = clk->pclk->rate; struct aux_rate_tbl *tbls = clk->rate_config.tbls; u8 eq = tbls[index].eq ? 1 : 2; return (((rate/10000) * tbls[index].xscale) / (tbls[index].yscale * eq)) * 10000; } /* * calculates current programmed rate of auxiliary synthesizers * used by: UART, FIRDA * * Fout from synthesizer can be given from two equations: * Fout1 = (Fin * X/Y)/2 * Fout2 = Fin * X/Y * * Selection of eqn 1 or 2 is programmed in register */ int aux_clk_recalc(struct clk *clk) { struct aux_clk_config *config = clk->private_data; unsigned int num = 1, den = 1, val, eqn; val = readl(config->synth_reg); eqn = (val >> config->masks->eq_sel_shift) & config->masks->eq_sel_mask; if (eqn == config->masks->eq1_mask) den *= 2; /* calculate numerator */ num = (val >> config->masks->xscale_sel_shift) & config->masks->xscale_sel_mask; /* calculate denominator */ den *= (val >> config->masks->yscale_sel_shift) & config->masks->yscale_sel_mask; if (!den) return -EINVAL; clk->rate = (((clk->pclk->rate/10000) * num) / den) * 10000; return 0; } /* Configures new clock rate of auxiliary synthesizers used by: UART, FIRDA*/ int aux_clk_set_rate(struct clk *clk, unsigned long desired_rate) { struct aux_rate_tbl *tbls = clk->rate_config.tbls; struct aux_clk_config *config = clk->private_data; unsigned long val, rate; int i; i = round_rate_index(clk, desired_rate, &rate); if (i < 0) return i; val = readl(config->synth_reg) & ~(config->masks->eq_sel_mask << config->masks->eq_sel_shift); val |= (tbls[i].eq & config->masks->eq_sel_mask) << config->masks->eq_sel_shift; val &= ~(config->masks->xscale_sel_mask << config->masks->xscale_sel_shift); val |= (tbls[i].xscale & config->masks->xscale_sel_mask) << config->masks->xscale_sel_shift; val &= ~(config->masks->yscale_sel_mask << config->masks->yscale_sel_shift); val |= (tbls[i].yscale & config->masks->yscale_sel_mask) << config->masks->yscale_sel_shift; writel(val, config->synth_reg); clk->rate = rate; return 0; } /* * Calculates gpt clk rate for different values of mscale and nscale * * Fout= Fin/((2 ^ (N+1)) * (M+1)) */ unsigned long gpt_calc_rate(struct clk *clk, int index) { unsigned long rate = clk->pclk->rate; struct gpt_rate_tbl *tbls = clk->rate_config.tbls; return rate / ((1 << (tbls[index].nscale + 1)) * (tbls[index].mscale + 1)); } /* * calculates current programmed rate of gpt synthesizers * Fout from synthesizer can be given from below equations: * Fout= Fin/((2 ^ (N+1)) * (M+1)) */ int gpt_clk_recalc(struct clk *clk) { struct gpt_clk_config *config = clk->private_data; unsigned int div = 1, val; val = readl(config->synth_reg); div += (val >> config->masks->mscale_sel_shift) & config->masks->mscale_sel_mask; div *= 1 << (((val >> config->masks->nscale_sel_shift) & config->masks->nscale_sel_mask) + 1); if (!div) return -EINVAL; clk->rate = (unsigned long)clk->pclk->rate / div; return 0; } /* Configures new clock rate of gptiliary synthesizers used by: UART, FIRDA*/ int gpt_clk_set_rate(struct clk *clk, unsigned long desired_rate) { struct gpt_rate_tbl *tbls = clk->rate_config.tbls; struct gpt_clk_config *config = clk->private_data; unsigned long val, rate; int i; i = round_rate_index(clk, desired_rate, &rate); if (i < 0) return i; val = readl(config->synth_reg) & ~(config->masks->mscale_sel_mask << config->masks->mscale_sel_shift); val |= (tbls[i].mscale & config->masks->mscale_sel_mask) << config->masks->mscale_sel_shift; val &= ~(config->masks->nscale_sel_mask << config->masks->nscale_sel_shift); val |= (tbls[i].nscale & config->masks->nscale_sel_mask) << config->masks->nscale_sel_shift; writel(val, config->synth_reg); clk->rate = rate; return 0; } /* * Calculates clcd clk rate for different values of div * * Fout from synthesizer can be given from below equation: * Fout= Fin/2*div (division factor) * div is 17 bits:- * 0-13 (fractional part) * 14-16 (integer part) * To calculate Fout we left shift val by 14 bits and divide Fin by * complete div (including fractional part) and then right shift the * result by 14 places. */ unsigned long clcd_calc_rate(struct clk *clk, int index) { unsigned long rate = clk->pclk->rate; struct clcd_rate_tbl *tbls = clk->rate_config.tbls; rate /= 1000; rate <<= 12; rate /= (2 * tbls[index].div); rate >>= 12; rate *= 1000; return rate; } /* * calculates current programmed rate of clcd synthesizer * Fout from synthesizer can be given from below equation: * Fout= Fin/2*div (division factor) * div is 17 bits:- * 0-13 (fractional part) * 14-16 (integer part) * To calculate Fout we left shift val by 14 bits and divide Fin by * complete div (including fractional part) and then right shift the * result by 14 places. */ int clcd_clk_recalc(struct clk *clk) { struct clcd_clk_config *config = clk->private_data; unsigned int div = 1; unsigned long prate; unsigned int val; val = readl(config->synth_reg); div = (val >> config->masks->div_factor_shift) & config->masks->div_factor_mask; if (!div) return -EINVAL; prate = clk->pclk->rate / 1000; /* first level division, make it KHz */ clk->rate = (((unsigned long)prate << 12) / (2 * div)) >> 12; clk->rate *= 1000; return 0; } /* Configures new clock rate of auxiliary synthesizers used by: UART, FIRDA*/ int clcd_clk_set_rate(struct clk *clk, unsigned long desired_rate) { struct clcd_rate_tbl *tbls = clk->rate_config.tbls; struct clcd_clk_config *config = clk->private_data; unsigned long val, rate; int i; i = round_rate_index(clk, desired_rate, &rate); if (i < 0) return i; val = readl(config->synth_reg) & ~(config->masks->div_factor_mask << config->masks->div_factor_shift); val |= (tbls[i].div & config->masks->div_factor_mask) << config->masks->div_factor_shift; writel(val, config->synth_reg); clk->rate = rate; return 0; } /* * Used for clocks that always have value as the parent clock divided by a * fixed divisor */ int follow_parent(struct clk *clk) { unsigned int div_factor = (clk->div_factor < 1) ? 1 : clk->div_factor; clk->rate = clk->pclk->rate/div_factor; return 0; } /** * recalc_root_clocks - recalculate and propagate all root clocks * * Recalculates all root clocks (clocks with no parent), which if the * clock's .recalc is set correctly, should also propagate their rates. */ void recalc_root_clocks(void) { struct clk *pclk; unsigned long flags; int ret = 0; spin_lock_irqsave(&clocks_lock, flags); list_for_each_entry(pclk, &root_clks, sibling) { if (pclk->recalc) { ret = pclk->recalc(pclk); /* * recalc will return error if clk out is not programmed * In this case configure default clock. */ if (ret && pclk->set_rate) pclk->set_rate(pclk, 0); } propagate_rate(pclk, 1); /* Enable clks enabled on init, in software view */ if (pclk->flags & ENABLED_ON_INIT) do_clk_enable(pclk); } spin_unlock_irqrestore(&clocks_lock, flags); } void __init clk_init(void) { recalc_root_clocks(); } #ifdef CONFIG_DEBUG_FS /* * debugfs support to trace clock tree hierarchy and attributes */ static struct dentry *clk_debugfs_root; static int clk_debugfs_register_one(struct clk *c) { int err; struct dentry *d, *child; struct clk *pa = c->pclk; char s[255]; char *p = s; if (c) { if (c->cl->con_id) p += sprintf(p, "%s", c->cl->con_id); if (c->cl->dev_id) p += sprintf(p, "%s", c->cl->dev_id); } d = debugfs_create_dir(s, pa ? pa->dent : clk_debugfs_root); if (!d) return -ENOMEM; c->dent = d; d = debugfs_create_u32("usage_count", S_IRUGO, c->dent, (u32 *)&c->usage_count); if (!d) { err = -ENOMEM; goto err_out; } d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate); if (!d) { err = -ENOMEM; goto err_out; } d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags); if (!d) { err = -ENOMEM; goto err_out; } return 0; err_out: d = c->dent; list_for_each_entry(child, &d->d_subdirs, d_u.d_child) debugfs_remove(child); debugfs_remove(c->dent); return err; } static int clk_debugfs_register(struct clk *c) { int err; struct clk *pa = c->pclk; if (pa && !pa->dent) { err = clk_debugfs_register(pa); if (err) return err; } if (!c->dent) { err = clk_debugfs_register_one(c); if (err) return err; } return 0; } static int __init clk_debugfs_init(void) { struct clk *c; struct dentry *d; int err; d = debugfs_create_dir("clock", NULL); if (!d) return -ENOMEM; clk_debugfs_root = d; list_for_each_entry(c, &clocks, node) { err = clk_debugfs_register(c); if (err) goto err_out; } return 0; err_out: debugfs_remove_recursive(clk_debugfs_root); return err; } late_initcall(clk_debugfs_init); static int clk_debugfs_reparent(struct clk *c) { debugfs_remove(c->dent); return clk_debugfs_register_one(c); } #endif /* CONFIG_DEBUG_FS */
gpl-2.0
lmajewski/linux-samsung-devel
lib/clz_ctz.c
2274
1332
/* * lib/clz_ctz.c * * Copyright (C) 2013 Chanho Min <chanho.min@lge.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * The functions in this file aren't called directly, but are required by * GCC builtins such as __builtin_ctz, and therefore they can't be removed * despite appearing unreferenced in kernel source. * * __c[lt]z[sd]i2 can be overridden by linking arch-specific versions. */ #include <linux/export.h> #include <linux/kernel.h> int __weak __ctzsi2(int val); int __weak __ctzsi2(int val) { return __ffs(val); } EXPORT_SYMBOL(__ctzsi2); int __weak __clzsi2(int val); int __weak __clzsi2(int val) { return 32 - fls(val); } EXPORT_SYMBOL(__clzsi2); int __weak __clzdi2(long val); int __weak __ctzdi2(long val); #if BITS_PER_LONG == 32 int __weak __clzdi2(long val) { return 32 - fls((int)val); } EXPORT_SYMBOL(__clzdi2); int __weak __ctzdi2(long val) { return __ffs((u32)val); } EXPORT_SYMBOL(__ctzdi2); #elif BITS_PER_LONG == 64 int __weak __clzdi2(long val) { return 64 - fls64((u64)val); } EXPORT_SYMBOL(__clzdi2); int __weak __ctzdi2(long val) { return __ffs64((u64)val); } EXPORT_SYMBOL(__ctzdi2); #else #error BITS_PER_LONG not 32 or 64 #endif
gpl-2.0
avinashphilip/am335x_linux
arch/x86/kernel/apic/summit_32.c
2530
16900
/* * IBM Summit-Specific Code * * Written By: Matthew Dobson, IBM Corporation * * Copyright (c) 2003 IBM Corp. * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to <colpatch@us.ibm.com> * */ #define pr_fmt(fmt) "summit: %s: " fmt, __func__ #include <linux/mm.h> #include <linux/init.h> #include <asm/io.h> #include <asm/bios_ebda.h> /* * APIC driver for the IBM "Summit" chipset. */ #include <linux/threads.h> #include <linux/cpumask.h> #include <asm/mpspec.h> #include <asm/apic.h> #include <asm/smp.h> #include <asm/fixmap.h> #include <asm/apicdef.h> #include <asm/ipi.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/gfp.h> #include <linux/smp.h> static unsigned summit_get_apic_id(unsigned long x) { return (x >> 24) & 0xFF; } static inline void summit_send_IPI_mask(const struct cpumask *mask, int vector) { default_send_IPI_mask_sequence_logical(mask, vector); } static void summit_send_IPI_allbutself(int vector) { default_send_IPI_mask_allbutself_logical(cpu_online_mask, vector); } static void summit_send_IPI_all(int vector) { summit_send_IPI_mask(cpu_online_mask, vector); } #include <asm/tsc.h> extern int use_cyclone; #ifdef CONFIG_X86_SUMMIT_NUMA static void setup_summit(void); #else static inline void setup_summit(void) {} #endif static int summit_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) { if (!strncmp(oem, "IBM ENSW", 8) && (!strncmp(productid, "VIGIL SMP", 9) || !strncmp(productid, "EXA", 3) || !strncmp(productid, "RUTHLESS SMP", 12))){ mark_tsc_unstable("Summit based system"); use_cyclone = 1; /*enable cyclone-timer*/ setup_summit(); return 1; } return 0; } /* Hook from generic ACPI tables.c */ static int summit_acpi_madt_oem_check(char *oem_id, char *oem_table_id) { if (!strncmp(oem_id, "IBM", 3) && (!strncmp(oem_table_id, "SERVIGIL", 8) || !strncmp(oem_table_id, "EXA", 3))){ mark_tsc_unstable("Summit based system"); use_cyclone = 1; /*enable cyclone-timer*/ setup_summit(); return 1; } return 0; } struct rio_table_hdr { unsigned char version; /* Version number of this data structure */ /* Version 3 adds chassis_num & WP_index */ unsigned char num_scal_dev; /* # of Scalability devices (Twisters for Vigil) */ unsigned char num_rio_dev; /* # of RIO I/O devices (Cyclones and Winnipegs) */ } __attribute__((packed)); struct scal_detail { unsigned char node_id; /* Scalability Node ID */ unsigned long CBAR; /* Address of 1MB register space */ unsigned char port0node; /* Node ID port connected to: 0xFF=None */ unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */ unsigned char port1node; /* Node ID port connected to: 0xFF = None */ unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */ unsigned char port2node; /* Node ID port connected to: 0xFF = None */ unsigned char port2port; /* Port num port connected to: 0,1,2, or 0xFF=None */ unsigned char chassis_num; /* 1 based Chassis number (1 = boot node) */ } __attribute__((packed)); struct rio_detail { unsigned char node_id; /* RIO Node ID */ unsigned long BBAR; /* Address of 1MB register space */ unsigned char type; /* Type of device */ unsigned char owner_id; /* For WPEG: Node ID of Cyclone that owns this WPEG*/ /* For CYC: Node ID of Twister that owns this CYC */ unsigned char port0node; /* Node ID port connected to: 0xFF=None */ unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */ unsigned char port1node; /* Node ID port connected to: 0xFF=None */ unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */ unsigned char first_slot; /* For WPEG: Lowest slot number below this WPEG */ /* For CYC: 0 */ unsigned char status; /* For WPEG: Bit 0 = 1 : the XAPIC is used */ /* = 0 : the XAPIC is not used, ie:*/ /* ints fwded to another XAPIC */ /* Bits1:7 Reserved */ /* For CYC: Bits0:7 Reserved */ unsigned char WP_index; /* For WPEG: WPEG instance index - lower ones have */ /* lower slot numbers/PCI bus numbers */ /* For CYC: No meaning */ unsigned char chassis_num; /* 1 based Chassis number */ /* For LookOut WPEGs this field indicates the */ /* Expansion Chassis #, enumerated from Boot */ /* Node WPEG external port, then Boot Node CYC */ /* external port, then Next Vigil chassis WPEG */ /* external port, etc. */ /* Shared Lookouts have only 1 chassis number (the */ /* first one assigned) */ } __attribute__((packed)); typedef enum { CompatTwister = 0, /* Compatibility Twister */ AltTwister = 1, /* Alternate Twister of internal 8-way */ CompatCyclone = 2, /* Compatibility Cyclone */ AltCyclone = 3, /* Alternate Cyclone of internal 8-way */ CompatWPEG = 4, /* Compatibility WPEG */ AltWPEG = 5, /* Second Planar WPEG */ LookOutAWPEG = 6, /* LookOut WPEG */ LookOutBWPEG = 7, /* LookOut WPEG */ } node_type; static inline int is_WPEG(struct rio_detail *rio){ return (rio->type == CompatWPEG || rio->type == AltWPEG || rio->type == LookOutAWPEG || rio->type == LookOutBWPEG); } #define SUMMIT_APIC_DFR_VALUE (APIC_DFR_CLUSTER) static const struct cpumask *summit_target_cpus(void) { /* CPU_MASK_ALL (0xff) has undefined behaviour with * dest_LowestPrio mode logical clustered apic interrupt routing * Just start on cpu 0. IRQ balancing will spread load */ return cpumask_of(0); } static unsigned long summit_check_apicid_used(physid_mask_t *map, int apicid) { return 0; } /* we don't use the phys_cpu_present_map to indicate apicid presence */ static unsigned long summit_check_apicid_present(int bit) { return 1; } static int summit_early_logical_apicid(int cpu) { int count = 0; u8 my_id = early_per_cpu(x86_cpu_to_apicid, cpu); u8 my_cluster = APIC_CLUSTER(my_id); #ifdef CONFIG_SMP u8 lid; int i; /* Create logical APIC IDs by counting CPUs already in cluster. */ for (count = 0, i = nr_cpu_ids; --i >= 0; ) { lid = early_per_cpu(x86_cpu_to_logical_apicid, i); if (lid != BAD_APICID && APIC_CLUSTER(lid) == my_cluster) ++count; } #endif /* We only have a 4 wide bitmap in cluster mode. If a deranged * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */ BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT); return my_cluster | (1UL << count); } static void summit_init_apic_ldr(void) { int cpu = smp_processor_id(); unsigned long id = early_per_cpu(x86_cpu_to_logical_apicid, cpu); unsigned long val; apic_write(APIC_DFR, SUMMIT_APIC_DFR_VALUE); val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; val |= SET_APIC_LOGICAL_ID(id); apic_write(APIC_LDR, val); } static int summit_apic_id_registered(void) { return 1; } static void summit_setup_apic_routing(void) { pr_info("Enabling APIC mode: Summit. Using %d I/O APICs\n", nr_ioapics); } static int summit_cpu_present_to_apicid(int mps_cpu) { if (mps_cpu < nr_cpu_ids) return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); else return BAD_APICID; } static void summit_ioapic_phys_id_map(physid_mask_t *phys_id_map, physid_mask_t *retmap) { /* For clustered we don't have a good way to do this yet - hack */ physids_promote(0x0FL, retmap); } static void summit_apicid_to_cpu_present(int apicid, physid_mask_t *retmap) { physid_set_mask_of_physid(0, retmap); } static int summit_check_phys_apicid_present(int physical_apicid) { return 1; } static inline int summit_cpu_mask_to_apicid(const struct cpumask *cpumask, unsigned int *dest_id) { unsigned int round = 0; unsigned int cpu, apicid = 0; /* * The cpus in the mask must all be on the apic cluster. */ for_each_cpu_and(cpu, cpumask, cpu_online_mask) { int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu); if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { pr_err("Not a valid mask!\n"); return -EINVAL; } apicid |= new_apicid; round++; } if (!round) return -EINVAL; *dest_id = apicid; return 0; } static int summit_cpu_mask_to_apicid_and(const struct cpumask *inmask, const struct cpumask *andmask, unsigned int *apicid) { cpumask_var_t cpumask; *apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0); if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) return 0; cpumask_and(cpumask, inmask, andmask); summit_cpu_mask_to_apicid(cpumask, apicid); free_cpumask_var(cpumask); return 0; } /* * cpuid returns the value latched in the HW at reset, not the APIC ID * register's value. For any box whose BIOS changes APIC IDs, like * clustered APIC systems, we must use hard_smp_processor_id. * * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID. */ static int summit_phys_pkg_id(int cpuid_apic, int index_msb) { return hard_smp_processor_id() >> index_msb; } static int probe_summit(void) { /* probed later in mptable/ACPI hooks */ return 0; } #ifdef CONFIG_X86_SUMMIT_NUMA static struct rio_table_hdr *rio_table_hdr; static struct scal_detail *scal_devs[MAX_NUMNODES]; static struct rio_detail *rio_devs[MAX_NUMNODES*4]; #ifndef CONFIG_X86_NUMAQ static int mp_bus_id_to_node[MAX_MP_BUSSES]; #endif static int setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus) { int twister = 0, node = 0; int i, bus, num_buses; for (i = 0; i < rio_table_hdr->num_rio_dev; i++) { if (rio_devs[i]->node_id == rio_devs[wpeg_num]->owner_id) { twister = rio_devs[i]->owner_id; break; } } if (i == rio_table_hdr->num_rio_dev) { pr_err("Couldn't find owner Cyclone for Winnipeg!\n"); return last_bus; } for (i = 0; i < rio_table_hdr->num_scal_dev; i++) { if (scal_devs[i]->node_id == twister) { node = scal_devs[i]->node_id; break; } } if (i == rio_table_hdr->num_scal_dev) { pr_err("Couldn't find owner Twister for Cyclone!\n"); return last_bus; } switch (rio_devs[wpeg_num]->type) { case CompatWPEG: /* * The Compatibility Winnipeg controls the 2 legacy buses, * the 66MHz PCI bus [2 slots] and the 2 "extra" buses in case * a PCI-PCI bridge card is used in either slot: total 5 buses. */ num_buses = 5; break; case AltWPEG: /* * The Alternate Winnipeg controls the 2 133MHz buses [1 slot * each], their 2 "extra" buses, the 100MHz bus [2 slots] and * the "extra" buses for each of those slots: total 7 buses. */ num_buses = 7; break; case LookOutAWPEG: case LookOutBWPEG: /* * A Lookout Winnipeg controls 3 100MHz buses [2 slots each] * & the "extra" buses for each of those slots: total 9 buses. */ num_buses = 9; break; default: pr_info("Unsupported Winnipeg type!\n"); return last_bus; } for (bus = last_bus; bus < last_bus + num_buses; bus++) mp_bus_id_to_node[bus] = node; return bus; } static int build_detail_arrays(void) { unsigned long ptr; int i, scal_detail_size, rio_detail_size; if (rio_table_hdr->num_scal_dev > MAX_NUMNODES) { pr_warn("MAX_NUMNODES too low! Defined as %d, but system has %d nodes\n", MAX_NUMNODES, rio_table_hdr->num_scal_dev); return 0; } switch (rio_table_hdr->version) { default: pr_warn("Invalid Rio Grande Table Version: %d\n", rio_table_hdr->version); return 0; case 2: scal_detail_size = 11; rio_detail_size = 13; break; case 3: scal_detail_size = 12; rio_detail_size = 15; break; } ptr = (unsigned long)rio_table_hdr + 3; for (i = 0; i < rio_table_hdr->num_scal_dev; i++, ptr += scal_detail_size) scal_devs[i] = (struct scal_detail *)ptr; for (i = 0; i < rio_table_hdr->num_rio_dev; i++, ptr += rio_detail_size) rio_devs[i] = (struct rio_detail *)ptr; return 1; } void setup_summit(void) { unsigned long ptr; unsigned short offset; int i, next_wpeg, next_bus = 0; /* The pointer to the EBDA is stored in the word @ phys 0x40E(40:0E) */ ptr = get_bios_ebda(); ptr = (unsigned long)phys_to_virt(ptr); rio_table_hdr = NULL; offset = 0x180; while (offset) { /* The block id is stored in the 2nd word */ if (*((unsigned short *)(ptr + offset + 2)) == 0x4752) { /* set the pointer past the offset & block id */ rio_table_hdr = (struct rio_table_hdr *)(ptr + offset + 4); break; } /* The next offset is stored in the 1st word. 0 means no more */ offset = *((unsigned short *)(ptr + offset)); } if (!rio_table_hdr) { pr_err("Unable to locate Rio Grande Table in EBDA - bailing!\n"); return; } if (!build_detail_arrays()) return; /* The first Winnipeg we're looking for has an index of 0 */ next_wpeg = 0; do { for (i = 0; i < rio_table_hdr->num_rio_dev; i++) { if (is_WPEG(rio_devs[i]) && rio_devs[i]->WP_index == next_wpeg) { /* It's the Winnipeg we're looking for! */ next_bus = setup_pci_node_map_for_wpeg(i, next_bus); next_wpeg++; break; } } /* * If we go through all Rio devices and don't find one with * the next index, it means we've found all the Winnipegs, * and thus all the PCI buses. */ if (i == rio_table_hdr->num_rio_dev) next_wpeg = 0; } while (next_wpeg != 0); } #endif static struct apic apic_summit = { .name = "summit", .probe = probe_summit, .acpi_madt_oem_check = summit_acpi_madt_oem_check, .apic_id_valid = default_apic_id_valid, .apic_id_registered = summit_apic_id_registered, .irq_delivery_mode = dest_LowestPrio, /* logical delivery broadcast to all CPUs: */ .irq_dest_mode = 1, .target_cpus = summit_target_cpus, .disable_esr = 1, .dest_logical = APIC_DEST_LOGICAL, .check_apicid_used = summit_check_apicid_used, .check_apicid_present = summit_check_apicid_present, .vector_allocation_domain = flat_vector_allocation_domain, .init_apic_ldr = summit_init_apic_ldr, .ioapic_phys_id_map = summit_ioapic_phys_id_map, .setup_apic_routing = summit_setup_apic_routing, .multi_timer_check = NULL, .cpu_present_to_apicid = summit_cpu_present_to_apicid, .apicid_to_cpu_present = summit_apicid_to_cpu_present, .setup_portio_remap = NULL, .check_phys_apicid_present = summit_check_phys_apicid_present, .enable_apic_mode = NULL, .phys_pkg_id = summit_phys_pkg_id, .mps_oem_check = summit_mps_oem_check, .get_apic_id = summit_get_apic_id, .set_apic_id = NULL, .apic_id_mask = 0xFF << 24, .cpu_mask_to_apicid_and = summit_cpu_mask_to_apicid_and, .send_IPI_mask = summit_send_IPI_mask, .send_IPI_mask_allbutself = NULL, .send_IPI_allbutself = summit_send_IPI_allbutself, .send_IPI_all = summit_send_IPI_all, .send_IPI_self = default_send_IPI_self, .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, .wait_for_init_deassert = default_wait_for_init_deassert, .smp_callin_clear_local_apic = NULL, .inquire_remote_apic = default_inquire_remote_apic, .read = native_apic_mem_read, .write = native_apic_mem_write, .eoi_write = native_apic_mem_write, .icr_read = native_apic_icr_read, .icr_write = native_apic_icr_write, .wait_icr_idle = native_apic_wait_icr_idle, .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, .x86_32_early_logical_apicid = summit_early_logical_apicid, }; apic_driver(apic_summit);
gpl-2.0
Crossbones/crossbones_kernel
drivers/video/backlight/adp5520_bl.c
2786
10562
/* * Backlight driver for Analog Devices ADP5520/ADP5501 MFD PMICs * * Copyright 2009 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/fb.h> #include <linux/backlight.h> #include <linux/mfd/adp5520.h> #include <linux/slab.h> struct adp5520_bl { struct device *master; struct adp5520_backlight_platform_data *pdata; struct mutex lock; unsigned long cached_daylight_max; int id; int current_brightness; }; static int adp5520_bl_set(struct backlight_device *bl, int brightness) { struct adp5520_bl *data = bl_get_data(bl); struct device *master = data->master; int ret = 0; if (data->pdata->en_ambl_sens) { if ((brightness > 0) && (brightness < ADP5020_MAX_BRIGHTNESS)) { /* Disable Ambient Light auto adjust */ ret |= adp5520_clr_bits(master, ADP5520_BL_CONTROL, ADP5520_BL_AUTO_ADJ); ret |= adp5520_write(master, ADP5520_DAYLIGHT_MAX, brightness); } else { /* * MAX_BRIGHTNESS -> Enable Ambient Light auto adjust * restore daylight l3 sysfs brightness */ ret |= adp5520_write(master, ADP5520_DAYLIGHT_MAX, data->cached_daylight_max); ret |= adp5520_set_bits(master, ADP5520_BL_CONTROL, ADP5520_BL_AUTO_ADJ); } } else { ret |= adp5520_write(master, ADP5520_DAYLIGHT_MAX, brightness); } if (data->current_brightness && brightness == 0) ret |= adp5520_set_bits(master, ADP5520_MODE_STATUS, ADP5520_DIM_EN); else if (data->current_brightness == 0 && brightness) ret |= adp5520_clr_bits(master, ADP5520_MODE_STATUS, ADP5520_DIM_EN); if (!ret) data->current_brightness = brightness; return ret; } static int adp5520_bl_update_status(struct backlight_device *bl) { int brightness = bl->props.brightness; if (bl->props.power != FB_BLANK_UNBLANK) brightness = 0; if (bl->props.fb_blank != FB_BLANK_UNBLANK) brightness = 0; return adp5520_bl_set(bl, brightness); } static int adp5520_bl_get_brightness(struct backlight_device *bl) { struct adp5520_bl *data = bl_get_data(bl); int error; uint8_t reg_val; error = adp5520_read(data->master, ADP5520_BL_VALUE, &reg_val); return error ? data->current_brightness : reg_val; } static const struct backlight_ops adp5520_bl_ops = { .update_status = adp5520_bl_update_status, .get_brightness = adp5520_bl_get_brightness, }; static int adp5520_bl_setup(struct backlight_device *bl) { struct adp5520_bl *data = bl_get_data(bl); struct device *master = data->master; struct adp5520_backlight_platform_data *pdata = data->pdata; int ret = 0; ret |= adp5520_write(master, ADP5520_DAYLIGHT_MAX, pdata->l1_daylight_max); ret |= adp5520_write(master, ADP5520_DAYLIGHT_DIM, pdata->l1_daylight_dim); if (pdata->en_ambl_sens) { data->cached_daylight_max = pdata->l1_daylight_max; ret |= adp5520_write(master, ADP5520_OFFICE_MAX, pdata->l2_office_max); ret |= adp5520_write(master, ADP5520_OFFICE_DIM, pdata->l2_office_dim); ret |= adp5520_write(master, ADP5520_DARK_MAX, pdata->l3_dark_max); ret |= adp5520_write(master, ADP5520_DARK_DIM, pdata->l3_dark_dim); ret |= adp5520_write(master, ADP5520_L2_TRIP, pdata->l2_trip); ret |= adp5520_write(master, ADP5520_L2_HYS, pdata->l2_hyst); ret |= adp5520_write(master, ADP5520_L3_TRIP, pdata->l3_trip); ret |= adp5520_write(master, ADP5520_L3_HYS, pdata->l3_hyst); ret |= adp5520_write(master, ADP5520_ALS_CMPR_CFG, ALS_CMPR_CFG_VAL(pdata->abml_filt, ADP5520_L3_EN)); } ret |= adp5520_write(master, ADP5520_BL_CONTROL, BL_CTRL_VAL(pdata->fade_led_law, pdata->en_ambl_sens)); ret |= adp5520_write(master, ADP5520_BL_FADE, FADE_VAL(pdata->fade_in, pdata->fade_out)); ret |= adp5520_set_bits(master, ADP5520_MODE_STATUS, ADP5520_BL_EN | ADP5520_DIM_EN); return ret; } static ssize_t adp5520_show(struct device *dev, char *buf, int reg) { struct adp5520_bl *data = dev_get_drvdata(dev); int error; uint8_t reg_val; mutex_lock(&data->lock); error = adp5520_read(data->master, reg, &reg_val); mutex_unlock(&data->lock); return sprintf(buf, "%u\n", reg_val); } static ssize_t adp5520_store(struct device *dev, const char *buf, size_t count, int reg) { struct adp5520_bl *data = dev_get_drvdata(dev); unsigned long val; int ret; ret = strict_strtoul(buf, 10, &val); if (ret) return ret; mutex_lock(&data->lock); adp5520_write(data->master, reg, val); mutex_unlock(&data->lock); return count; } static ssize_t adp5520_bl_dark_max_show(struct device *dev, struct device_attribute *attr, char *buf) { return adp5520_show(dev, buf, ADP5520_DARK_MAX); } static ssize_t adp5520_bl_dark_max_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { return adp5520_store(dev, buf, count, ADP5520_DARK_MAX); } static DEVICE_ATTR(dark_max, 0664, adp5520_bl_dark_max_show, adp5520_bl_dark_max_store); static ssize_t adp5520_bl_office_max_show(struct device *dev, struct device_attribute *attr, char *buf) { return adp5520_show(dev, buf, ADP5520_OFFICE_MAX); } static ssize_t adp5520_bl_office_max_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { return adp5520_store(dev, buf, count, ADP5520_OFFICE_MAX); } static DEVICE_ATTR(office_max, 0664, adp5520_bl_office_max_show, adp5520_bl_office_max_store); static ssize_t adp5520_bl_daylight_max_show(struct device *dev, struct device_attribute *attr, char *buf) { return adp5520_show(dev, buf, ADP5520_DAYLIGHT_MAX); } static ssize_t adp5520_bl_daylight_max_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct adp5520_bl *data = dev_get_drvdata(dev); int ret; ret = strict_strtoul(buf, 10, &data->cached_daylight_max); if (ret < 0) return ret; return adp5520_store(dev, buf, count, ADP5520_DAYLIGHT_MAX); } static DEVICE_ATTR(daylight_max, 0664, adp5520_bl_daylight_max_show, adp5520_bl_daylight_max_store); static ssize_t adp5520_bl_dark_dim_show(struct device *dev, struct device_attribute *attr, char *buf) { return adp5520_show(dev, buf, ADP5520_DARK_DIM); } static ssize_t adp5520_bl_dark_dim_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { return adp5520_store(dev, buf, count, ADP5520_DARK_DIM); } static DEVICE_ATTR(dark_dim, 0664, adp5520_bl_dark_dim_show, adp5520_bl_dark_dim_store); static ssize_t adp5520_bl_office_dim_show(struct device *dev, struct device_attribute *attr, char *buf) { return adp5520_show(dev, buf, ADP5520_OFFICE_DIM); } static ssize_t adp5520_bl_office_dim_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { return adp5520_store(dev, buf, count, ADP5520_OFFICE_DIM); } static DEVICE_ATTR(office_dim, 0664, adp5520_bl_office_dim_show, adp5520_bl_office_dim_store); static ssize_t adp5520_bl_daylight_dim_show(struct device *dev, struct device_attribute *attr, char *buf) { return adp5520_show(dev, buf, ADP5520_DAYLIGHT_DIM); } static ssize_t adp5520_bl_daylight_dim_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { return adp5520_store(dev, buf, count, ADP5520_DAYLIGHT_DIM); } static DEVICE_ATTR(daylight_dim, 0664, adp5520_bl_daylight_dim_show, adp5520_bl_daylight_dim_store); static struct attribute *adp5520_bl_attributes[] = { &dev_attr_dark_max.attr, &dev_attr_dark_dim.attr, &dev_attr_office_max.attr, &dev_attr_office_dim.attr, &dev_attr_daylight_max.attr, &dev_attr_daylight_dim.attr, NULL }; static const struct attribute_group adp5520_bl_attr_group = { .attrs = adp5520_bl_attributes, }; static int __devinit adp5520_bl_probe(struct platform_device *pdev) { struct backlight_properties props; struct backlight_device *bl; struct adp5520_bl *data; int ret = 0; data = kzalloc(sizeof(*data), GFP_KERNEL); if (data == NULL) return -ENOMEM; data->master = pdev->dev.parent; data->pdata = pdev->dev.platform_data; if (data->pdata == NULL) { dev_err(&pdev->dev, "missing platform data\n"); kfree(data); return -ENODEV; } data->id = pdev->id; data->current_brightness = 0; mutex_init(&data->lock); memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_RAW; props.max_brightness = ADP5020_MAX_BRIGHTNESS; bl = backlight_device_register(pdev->name, data->master, data, &adp5520_bl_ops, &props); if (IS_ERR(bl)) { dev_err(&pdev->dev, "failed to register backlight\n"); kfree(data); return PTR_ERR(bl); } bl->props.brightness = ADP5020_MAX_BRIGHTNESS; if (data->pdata->en_ambl_sens) ret = sysfs_create_group(&bl->dev.kobj, &adp5520_bl_attr_group); if (ret) { dev_err(&pdev->dev, "failed to register sysfs\n"); backlight_device_unregister(bl); kfree(data); } platform_set_drvdata(pdev, bl); ret |= adp5520_bl_setup(bl); backlight_update_status(bl); return ret; } static int __devexit adp5520_bl_remove(struct platform_device *pdev) { struct backlight_device *bl = platform_get_drvdata(pdev); struct adp5520_bl *data = bl_get_data(bl); adp5520_clr_bits(data->master, ADP5520_MODE_STATUS, ADP5520_BL_EN); if (data->pdata->en_ambl_sens) sysfs_remove_group(&bl->dev.kobj, &adp5520_bl_attr_group); backlight_device_unregister(bl); kfree(data); return 0; } #ifdef CONFIG_PM static int adp5520_bl_suspend(struct platform_device *pdev, pm_message_t state) { struct backlight_device *bl = platform_get_drvdata(pdev); return adp5520_bl_set(bl, 0); } static int adp5520_bl_resume(struct platform_device *pdev) { struct backlight_device *bl = platform_get_drvdata(pdev); backlight_update_status(bl); return 0; } #else #define adp5520_bl_suspend NULL #define adp5520_bl_resume NULL #endif static struct platform_driver adp5520_bl_driver = { .driver = { .name = "adp5520-backlight", .owner = THIS_MODULE, }, .probe = adp5520_bl_probe, .remove = __devexit_p(adp5520_bl_remove), .suspend = adp5520_bl_suspend, .resume = adp5520_bl_resume, }; static int __init adp5520_bl_init(void) { return platform_driver_register(&adp5520_bl_driver); } module_init(adp5520_bl_init); static void __exit adp5520_bl_exit(void) { platform_driver_unregister(&adp5520_bl_driver); } module_exit(adp5520_bl_exit); MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); MODULE_DESCRIPTION("ADP5520(01) Backlight Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:adp5520-backlight");
gpl-2.0
smac0628/kernel-htc-m8-gpe-stock
kernel/time/clockevents.c
3554
11552
/* * linux/kernel/time/clockevents.c * * This file contains functions which manage clock event devices. * * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner * * This code is licenced under the GPL version 2. For details see * kernel-base/COPYING. */ #include <linux/clockchips.h> #include <linux/hrtimer.h> #include <linux/init.h> #include <linux/module.h> #include <linux/notifier.h> #include <linux/smp.h> #include "tick-internal.h" /* The registered clock event devices */ static LIST_HEAD(clockevent_devices); static LIST_HEAD(clockevents_released); /* Notification for clock events */ static RAW_NOTIFIER_HEAD(clockevents_chain); /* Protection for the above */ static DEFINE_RAW_SPINLOCK(clockevents_lock); /** * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds * @latch: value to convert * @evt: pointer to clock event device descriptor * * Math helper, returns latch value converted to nanoseconds (bound checked) */ u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt) { u64 clc = (u64) latch << evt->shift; if (unlikely(!evt->mult)) { evt->mult = 1; WARN_ON(1); } do_div(clc, evt->mult); if (clc < 1000) clc = 1000; if (clc > KTIME_MAX) clc = KTIME_MAX; return clc; } EXPORT_SYMBOL_GPL(clockevent_delta2ns); /** * clockevents_set_mode - set the operating mode of a clock event device * @dev: device to modify * @mode: new mode * * Must be called with interrupts disabled ! */ void clockevents_set_mode(struct clock_event_device *dev, enum clock_event_mode mode) { if (dev->mode != mode) { dev->set_mode(mode, dev); dev->mode = mode; /* * A nsec2cyc multiplicator of 0 is invalid and we'd crash * on it, so fix it up and emit a warning: */ if (mode == CLOCK_EVT_MODE_ONESHOT) { if (unlikely(!dev->mult)) { dev->mult = 1; WARN_ON(1); } } } } /** * clockevents_shutdown - shutdown the device and clear next_event * @dev: device to shutdown */ void clockevents_shutdown(struct clock_event_device *dev) { clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); dev->next_event.tv64 = KTIME_MAX; } #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST /* Limit min_delta to a jiffie */ #define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ) /** * clockevents_increase_min_delta - raise minimum delta of a clock event device * @dev: device to increase the minimum delta * * Returns 0 on success, -ETIME when the minimum delta reached the limit. */ static int clockevents_increase_min_delta(struct clock_event_device *dev) { /* Nothing to do if we already reached the limit */ if (dev->min_delta_ns >= MIN_DELTA_LIMIT) { printk(KERN_WARNING "CE: Reprogramming failure. Giving up\n"); dev->next_event.tv64 = KTIME_MAX; return -ETIME; } if (dev->min_delta_ns < 5000) dev->min_delta_ns = 5000; else dev->min_delta_ns += dev->min_delta_ns >> 1; if (dev->min_delta_ns > MIN_DELTA_LIMIT) dev->min_delta_ns = MIN_DELTA_LIMIT; printk(KERN_WARNING "CE: %s increased min_delta_ns to %llu nsec\n", dev->name ? dev->name : "?", (unsigned long long) dev->min_delta_ns); return 0; } /** * clockevents_program_min_delta - Set clock event device to the minimum delay. * @dev: device to program * * Returns 0 on success, -ETIME when the retry loop failed. */ static int clockevents_program_min_delta(struct clock_event_device *dev) { unsigned long long clc; int64_t delta; int i; for (i = 0;;) { delta = dev->min_delta_ns; dev->next_event = ktime_add_ns(ktime_get(), delta); if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN) return 0; dev->retries++; clc = ((unsigned long long) delta * dev->mult) >> dev->shift; if (dev->set_next_event((unsigned long) clc, dev) == 0) return 0; if (++i > 2) { /* * We tried 3 times to program the device with the * given min_delta_ns. Try to increase the minimum * delta, if that fails as well get out of here. */ if (clockevents_increase_min_delta(dev)) return -ETIME; i = 0; } } } #else /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */ /** * clockevents_program_min_delta - Set clock event device to the minimum delay. * @dev: device to program * * Returns 0 on success, -ETIME when the retry loop failed. */ static int clockevents_program_min_delta(struct clock_event_device *dev) { unsigned long long clc; int64_t delta; delta = dev->min_delta_ns; dev->next_event = ktime_add_ns(ktime_get(), delta); if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN) return 0; dev->retries++; clc = ((unsigned long long) delta * dev->mult) >> dev->shift; return dev->set_next_event((unsigned long) clc, dev); } #endif /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */ /** * clockevents_program_event - Reprogram the clock event device. * @dev: device to program * @expires: absolute expiry time (monotonic clock) * @force: program minimum delay if expires can not be set * * Returns 0 on success, -ETIME when the event is in the past. */ int clockevents_program_event(struct clock_event_device *dev, ktime_t expires, bool force) { unsigned long long clc; int64_t delta; int rc; if (unlikely(expires.tv64 < 0)) { WARN_ON_ONCE(1); return -ETIME; } dev->next_event = expires; if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN) return 0; /* Shortcut for clockevent devices that can deal with ktime. */ if (dev->features & CLOCK_EVT_FEAT_KTIME) return dev->set_next_ktime(expires, dev); delta = ktime_to_ns(ktime_sub(expires, ktime_get())); if (delta <= 0) return force ? clockevents_program_min_delta(dev) : -ETIME; delta = min(delta, (int64_t) dev->max_delta_ns); delta = max(delta, (int64_t) dev->min_delta_ns); clc = ((unsigned long long) delta * dev->mult) >> dev->shift; rc = dev->set_next_event((unsigned long) clc, dev); return (rc && force) ? clockevents_program_min_delta(dev) : rc; } /** * clockevents_register_notifier - register a clock events change listener */ int clockevents_register_notifier(struct notifier_block *nb) { unsigned long flags; int ret; raw_spin_lock_irqsave(&clockevents_lock, flags); ret = raw_notifier_chain_register(&clockevents_chain, nb); raw_spin_unlock_irqrestore(&clockevents_lock, flags); return ret; } /* * Notify about a clock event change. Called with clockevents_lock * held. */ static void clockevents_do_notify(unsigned long reason, void *dev) { raw_notifier_call_chain(&clockevents_chain, reason, dev); } /* * Called after a notify add to make devices available which were * released from the notifier call. */ static void clockevents_notify_released(void) { struct clock_event_device *dev; while (!list_empty(&clockevents_released)) { dev = list_entry(clockevents_released.next, struct clock_event_device, list); list_del(&dev->list); list_add(&dev->list, &clockevent_devices); clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev); } } /** * clockevents_register_device - register a clock event device * @dev: device to register */ void clockevents_register_device(struct clock_event_device *dev) { unsigned long flags; BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); if (!dev->cpumask) { WARN_ON(num_possible_cpus() > 1); dev->cpumask = cpumask_of(smp_processor_id()); } raw_spin_lock_irqsave(&clockevents_lock, flags); list_add(&dev->list, &clockevent_devices); clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev); clockevents_notify_released(); raw_spin_unlock_irqrestore(&clockevents_lock, flags); } EXPORT_SYMBOL_GPL(clockevents_register_device); static void clockevents_config(struct clock_event_device *dev, u32 freq) { u64 sec; if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) return; /* * Calculate the maximum number of seconds we can sleep. Limit * to 10 minutes for hardware which can program more than * 32bit ticks so we still get reasonable conversion values. */ sec = dev->max_delta_ticks; do_div(sec, freq); if (!sec) sec = 1; else if (sec > 600 && dev->max_delta_ticks > UINT_MAX) sec = 600; clockevents_calc_mult_shift(dev, freq, sec); dev->min_delta_ns = clockevent_delta2ns(dev->min_delta_ticks, dev); dev->max_delta_ns = clockevent_delta2ns(dev->max_delta_ticks, dev); } /** * clockevents_config_and_register - Configure and register a clock event device * @dev: device to register * @freq: The clock frequency * @min_delta: The minimum clock ticks to program in oneshot mode * @max_delta: The maximum clock ticks to program in oneshot mode * * min/max_delta can be 0 for devices which do not support oneshot mode. */ void clockevents_config_and_register(struct clock_event_device *dev, u32 freq, unsigned long min_delta, unsigned long max_delta) { dev->min_delta_ticks = min_delta; dev->max_delta_ticks = max_delta; clockevents_config(dev, freq); clockevents_register_device(dev); } /** * clockevents_update_freq - Update frequency and reprogram a clock event device. * @dev: device to modify * @freq: new device frequency * * Reconfigure and reprogram a clock event device in oneshot * mode. Must be called on the cpu for which the device delivers per * cpu timer events with interrupts disabled! Returns 0 on success, * -ETIME when the event is in the past. */ int clockevents_update_freq(struct clock_event_device *dev, u32 freq) { clockevents_config(dev, freq); if (dev->mode != CLOCK_EVT_MODE_ONESHOT) return 0; return clockevents_program_event(dev, dev->next_event, false); } /* * Noop handler when we shut down an event device */ void clockevents_handle_noop(struct clock_event_device *dev) { } /** * clockevents_exchange_device - release and request clock devices * @old: device to release (can be NULL) * @new: device to request (can be NULL) * * Called from the notifier chain. clockevents_lock is held already */ void clockevents_exchange_device(struct clock_event_device *old, struct clock_event_device *new) { unsigned long flags; local_irq_save(flags); /* * Caller releases a clock event device. We queue it into the * released list and do a notify add later. */ if (old) { clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED); list_del(&old->list); list_add(&old->list, &clockevents_released); } if (new) { BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED); clockevents_shutdown(new); } local_irq_restore(flags); } #ifdef CONFIG_GENERIC_CLOCKEVENTS /** * clockevents_notify - notification about relevant events */ void clockevents_notify(unsigned long reason, void *arg) { struct clock_event_device *dev, *tmp; unsigned long flags; int cpu; raw_spin_lock_irqsave(&clockevents_lock, flags); clockevents_do_notify(reason, arg); switch (reason) { case CLOCK_EVT_NOTIFY_CPU_DEAD: /* * Unregister the clock event devices which were * released from the users in the notify chain. */ list_for_each_entry_safe(dev, tmp, &clockevents_released, list) list_del(&dev->list); /* * Now check whether the CPU has left unused per cpu devices */ cpu = *((int *)arg); list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) { if (cpumask_test_cpu(cpu, dev->cpumask) && cpumask_weight(dev->cpumask) == 1 && !tick_is_broadcast_device(dev)) { BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); list_del(&dev->list); } } break; default: break; } raw_spin_unlock_irqrestore(&clockevents_lock, flags); } EXPORT_SYMBOL_GPL(clockevents_notify); #endif
gpl-2.0
arter97/odroid
drivers/s390/char/keyboard.c
3810
12561
/* * ebcdic keycode functions for s390 console drivers * * S390 version * Copyright IBM Corp. 2003 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), */ #include <linux/module.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/sysrq.h> #include <linux/consolemap.h> #include <linux/kbd_kern.h> #include <linux/kbd_diacr.h> #include <asm/uaccess.h> #include "keyboard.h" /* * Handler Tables. */ #define K_HANDLERS\ k_self, k_fn, k_spec, k_ignore,\ k_dead, k_ignore, k_ignore, k_ignore,\ k_ignore, k_ignore, k_ignore, k_ignore,\ k_ignore, k_ignore, k_ignore, k_ignore typedef void (k_handler_fn)(struct kbd_data *, unsigned char); static k_handler_fn K_HANDLERS; static k_handler_fn *k_handler[16] = { K_HANDLERS }; /* maximum values each key_handler can handle */ static const int kbd_max_vals[] = { 255, ARRAY_SIZE(func_table) - 1, NR_FN_HANDLER - 1, 0, NR_DEAD - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; static const int KBD_NR_TYPES = ARRAY_SIZE(kbd_max_vals); static unsigned char ret_diacr[NR_DEAD] = { '`', '\'', '^', '~', '"', ',' }; /* * Alloc/free of kbd_data structures. */ struct kbd_data * kbd_alloc(void) { struct kbd_data *kbd; int i; kbd = kzalloc(sizeof(struct kbd_data), GFP_KERNEL); if (!kbd) goto out; kbd->key_maps = kzalloc(sizeof(key_maps), GFP_KERNEL); if (!kbd->key_maps) goto out_kbd; for (i = 0; i < ARRAY_SIZE(key_maps); i++) { if (key_maps[i]) { kbd->key_maps[i] = kmemdup(key_maps[i], sizeof(u_short) * NR_KEYS, GFP_KERNEL); if (!kbd->key_maps[i]) goto out_maps; } } kbd->func_table = kzalloc(sizeof(func_table), GFP_KERNEL); if (!kbd->func_table) goto out_maps; for (i = 0; i < ARRAY_SIZE(func_table); i++) { if (func_table[i]) { kbd->func_table[i] = kstrdup(func_table[i], GFP_KERNEL); if (!kbd->func_table[i]) goto out_func; } } kbd->fn_handler = kzalloc(sizeof(fn_handler_fn *) * NR_FN_HANDLER, GFP_KERNEL); if (!kbd->fn_handler) goto out_func; kbd->accent_table = kmemdup(accent_table, sizeof(struct kbdiacruc) * MAX_DIACR, GFP_KERNEL); if (!kbd->accent_table) goto out_fn_handler; kbd->accent_table_size = accent_table_size; return kbd; out_fn_handler: kfree(kbd->fn_handler); out_func: for (i = 0; i < ARRAY_SIZE(func_table); i++) kfree(kbd->func_table[i]); kfree(kbd->func_table); out_maps: for (i = 0; i < ARRAY_SIZE(key_maps); i++) kfree(kbd->key_maps[i]); kfree(kbd->key_maps); out_kbd: kfree(kbd); out: return NULL; } void kbd_free(struct kbd_data *kbd) { int i; kfree(kbd->accent_table); kfree(kbd->fn_handler); for (i = 0; i < ARRAY_SIZE(func_table); i++) kfree(kbd->func_table[i]); kfree(kbd->func_table); for (i = 0; i < ARRAY_SIZE(key_maps); i++) kfree(kbd->key_maps[i]); kfree(kbd->key_maps); kfree(kbd); } /* * Generate ascii -> ebcdic translation table from kbd_data. */ void kbd_ascebc(struct kbd_data *kbd, unsigned char *ascebc) { unsigned short *keymap, keysym; int i, j, k; memset(ascebc, 0x40, 256); for (i = 0; i < ARRAY_SIZE(key_maps); i++) { keymap = kbd->key_maps[i]; if (!keymap) continue; for (j = 0; j < NR_KEYS; j++) { k = ((i & 1) << 7) + j; keysym = keymap[j]; if (KTYP(keysym) == (KT_LATIN | 0xf0) || KTYP(keysym) == (KT_LETTER | 0xf0)) ascebc[KVAL(keysym)] = k; else if (KTYP(keysym) == (KT_DEAD | 0xf0)) ascebc[ret_diacr[KVAL(keysym)]] = k; } } } #if 0 /* * Generate ebcdic -> ascii translation table from kbd_data. */ void kbd_ebcasc(struct kbd_data *kbd, unsigned char *ebcasc) { unsigned short *keymap, keysym; int i, j, k; memset(ebcasc, ' ', 256); for (i = 0; i < ARRAY_SIZE(key_maps); i++) { keymap = kbd->key_maps[i]; if (!keymap) continue; for (j = 0; j < NR_KEYS; j++) { keysym = keymap[j]; k = ((i & 1) << 7) + j; if (KTYP(keysym) == (KT_LATIN | 0xf0) || KTYP(keysym) == (KT_LETTER | 0xf0)) ebcasc[k] = KVAL(keysym); else if (KTYP(keysym) == (KT_DEAD | 0xf0)) ebcasc[k] = ret_diacr[KVAL(keysym)]; } } } #endif /* * We have a combining character DIACR here, followed by the character CH. * If the combination occurs in the table, return the corresponding value. * Otherwise, if CH is a space or equals DIACR, return DIACR. * Otherwise, conclude that DIACR was not combining after all, * queue it and return CH. */ static unsigned int handle_diacr(struct kbd_data *kbd, unsigned int ch) { int i, d; d = kbd->diacr; kbd->diacr = 0; for (i = 0; i < kbd->accent_table_size; i++) { if (kbd->accent_table[i].diacr == d && kbd->accent_table[i].base == ch) return kbd->accent_table[i].result; } if (ch == ' ' || ch == d) return d; kbd_put_queue(kbd->port, d); return ch; } /* * Handle dead key. */ static void k_dead(struct kbd_data *kbd, unsigned char value) { value = ret_diacr[value]; kbd->diacr = (kbd->diacr ? handle_diacr(kbd, value) : value); } /* * Normal character handler. */ static void k_self(struct kbd_data *kbd, unsigned char value) { if (kbd->diacr) value = handle_diacr(kbd, value); kbd_put_queue(kbd->port, value); } /* * Special key handlers */ static void k_ignore(struct kbd_data *kbd, unsigned char value) { } /* * Function key handler. */ static void k_fn(struct kbd_data *kbd, unsigned char value) { if (kbd->func_table[value]) kbd_puts_queue(kbd->port, kbd->func_table[value]); } static void k_spec(struct kbd_data *kbd, unsigned char value) { if (value >= NR_FN_HANDLER) return; if (kbd->fn_handler[value]) kbd->fn_handler[value](kbd); } /* * Put utf8 character to tty flip buffer. * UTF-8 is defined for words of up to 31 bits, * but we need only 16 bits here */ static void to_utf8(struct tty_port *port, ushort c) { if (c < 0x80) /* 0******* */ kbd_put_queue(port, c); else if (c < 0x800) { /* 110***** 10****** */ kbd_put_queue(port, 0xc0 | (c >> 6)); kbd_put_queue(port, 0x80 | (c & 0x3f)); } else { /* 1110**** 10****** 10****** */ kbd_put_queue(port, 0xe0 | (c >> 12)); kbd_put_queue(port, 0x80 | ((c >> 6) & 0x3f)); kbd_put_queue(port, 0x80 | (c & 0x3f)); } } /* * Process keycode. */ void kbd_keycode(struct kbd_data *kbd, unsigned int keycode) { unsigned short keysym; unsigned char type, value; if (!kbd) return; if (keycode >= 384) keysym = kbd->key_maps[5][keycode - 384]; else if (keycode >= 256) keysym = kbd->key_maps[4][keycode - 256]; else if (keycode >= 128) keysym = kbd->key_maps[1][keycode - 128]; else keysym = kbd->key_maps[0][keycode]; type = KTYP(keysym); if (type >= 0xf0) { type -= 0xf0; if (type == KT_LETTER) type = KT_LATIN; value = KVAL(keysym); #ifdef CONFIG_MAGIC_SYSRQ /* Handle the SysRq Hack */ if (kbd->sysrq) { if (kbd->sysrq == K(KT_LATIN, '-')) { kbd->sysrq = 0; handle_sysrq(value); return; } if (value == '-') { kbd->sysrq = K(KT_LATIN, '-'); return; } /* Incomplete sysrq sequence. */ (*k_handler[KTYP(kbd->sysrq)])(kbd, KVAL(kbd->sysrq)); kbd->sysrq = 0; } else if ((type == KT_LATIN && value == '^') || (type == KT_DEAD && ret_diacr[value] == '^')) { kbd->sysrq = K(type, value); return; } #endif (*k_handler[type])(kbd, value); } else to_utf8(kbd->port, keysym); } /* * Ioctl stuff. */ static int do_kdsk_ioctl(struct kbd_data *kbd, struct kbentry __user *user_kbe, int cmd, int perm) { struct kbentry tmp; ushort *key_map, val, ov; if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry))) return -EFAULT; #if NR_KEYS < 256 if (tmp.kb_index >= NR_KEYS) return -EINVAL; #endif #if MAX_NR_KEYMAPS < 256 if (tmp.kb_table >= MAX_NR_KEYMAPS) return -EINVAL; #endif switch (cmd) { case KDGKBENT: key_map = kbd->key_maps[tmp.kb_table]; if (key_map) { val = U(key_map[tmp.kb_index]); if (KTYP(val) >= KBD_NR_TYPES) val = K_HOLE; } else val = (tmp.kb_index ? K_HOLE : K_NOSUCHMAP); return put_user(val, &user_kbe->kb_value); case KDSKBENT: if (!perm) return -EPERM; if (!tmp.kb_index && tmp.kb_value == K_NOSUCHMAP) { /* disallocate map */ key_map = kbd->key_maps[tmp.kb_table]; if (key_map) { kbd->key_maps[tmp.kb_table] = NULL; kfree(key_map); } break; } if (KTYP(tmp.kb_value) >= KBD_NR_TYPES) return -EINVAL; if (KVAL(tmp.kb_value) > kbd_max_vals[KTYP(tmp.kb_value)]) return -EINVAL; if (!(key_map = kbd->key_maps[tmp.kb_table])) { int j; key_map = kmalloc(sizeof(plain_map), GFP_KERNEL); if (!key_map) return -ENOMEM; kbd->key_maps[tmp.kb_table] = key_map; for (j = 0; j < NR_KEYS; j++) key_map[j] = U(K_HOLE); } ov = U(key_map[tmp.kb_index]); if (tmp.kb_value == ov) break; /* nothing to do */ /* * Attention Key. */ if (((ov == K_SAK) || (tmp.kb_value == K_SAK)) && !capable(CAP_SYS_ADMIN)) return -EPERM; key_map[tmp.kb_index] = U(tmp.kb_value); break; } return 0; } static int do_kdgkb_ioctl(struct kbd_data *kbd, struct kbsentry __user *u_kbs, int cmd, int perm) { unsigned char kb_func; char *p; int len; /* Get u_kbs->kb_func. */ if (get_user(kb_func, &u_kbs->kb_func)) return -EFAULT; #if MAX_NR_FUNC < 256 if (kb_func >= MAX_NR_FUNC) return -EINVAL; #endif switch (cmd) { case KDGKBSENT: p = kbd->func_table[kb_func]; if (p) { len = strlen(p); if (len >= sizeof(u_kbs->kb_string)) len = sizeof(u_kbs->kb_string) - 1; if (copy_to_user(u_kbs->kb_string, p, len)) return -EFAULT; } else len = 0; if (put_user('\0', u_kbs->kb_string + len)) return -EFAULT; break; case KDSKBSENT: if (!perm) return -EPERM; len = strnlen_user(u_kbs->kb_string, sizeof(u_kbs->kb_string) - 1); if (!len) return -EFAULT; if (len > sizeof(u_kbs->kb_string) - 1) return -EINVAL; p = kmalloc(len + 1, GFP_KERNEL); if (!p) return -ENOMEM; if (copy_from_user(p, u_kbs->kb_string, len)) { kfree(p); return -EFAULT; } p[len] = 0; kfree(kbd->func_table[kb_func]); kbd->func_table[kb_func] = p; break; } return 0; } int kbd_ioctl(struct kbd_data *kbd, unsigned int cmd, unsigned long arg) { struct tty_struct *tty; void __user *argp; unsigned int ct; int perm; argp = (void __user *)arg; /* * To have permissions to do most of the vt ioctls, we either have * to be the owner of the tty, or have CAP_SYS_TTY_CONFIG. */ tty = tty_port_tty_get(kbd->port); /* FIXME this test is pretty racy */ perm = current->signal->tty == tty || capable(CAP_SYS_TTY_CONFIG); tty_kref_put(tty); switch (cmd) { case KDGKBTYPE: return put_user(KB_101, (char __user *)argp); case KDGKBENT: case KDSKBENT: return do_kdsk_ioctl(kbd, argp, cmd, perm); case KDGKBSENT: case KDSKBSENT: return do_kdgkb_ioctl(kbd, argp, cmd, perm); case KDGKBDIACR: { struct kbdiacrs __user *a = argp; struct kbdiacr diacr; int i; if (put_user(kbd->accent_table_size, &a->kb_cnt)) return -EFAULT; for (i = 0; i < kbd->accent_table_size; i++) { diacr.diacr = kbd->accent_table[i].diacr; diacr.base = kbd->accent_table[i].base; diacr.result = kbd->accent_table[i].result; if (copy_to_user(a->kbdiacr + i, &diacr, sizeof(struct kbdiacr))) return -EFAULT; } return 0; } case KDGKBDIACRUC: { struct kbdiacrsuc __user *a = argp; ct = kbd->accent_table_size; if (put_user(ct, &a->kb_cnt)) return -EFAULT; if (copy_to_user(a->kbdiacruc, kbd->accent_table, ct * sizeof(struct kbdiacruc))) return -EFAULT; return 0; } case KDSKBDIACR: { struct kbdiacrs __user *a = argp; struct kbdiacr diacr; int i; if (!perm) return -EPERM; if (get_user(ct, &a->kb_cnt)) return -EFAULT; if (ct >= MAX_DIACR) return -EINVAL; kbd->accent_table_size = ct; for (i = 0; i < ct; i++) { if (copy_from_user(&diacr, a->kbdiacr + i, sizeof(struct kbdiacr))) return -EFAULT; kbd->accent_table[i].diacr = diacr.diacr; kbd->accent_table[i].base = diacr.base; kbd->accent_table[i].result = diacr.result; } return 0; } case KDSKBDIACRUC: { struct kbdiacrsuc __user *a = argp; if (!perm) return -EPERM; if (get_user(ct, &a->kb_cnt)) return -EFAULT; if (ct >= MAX_DIACR) return -EINVAL; kbd->accent_table_size = ct; if (copy_from_user(kbd->accent_table, a->kbdiacruc, ct * sizeof(struct kbdiacruc))) return -EFAULT; return 0; } default: return -ENOIOCTLCMD; } } EXPORT_SYMBOL(kbd_ioctl); EXPORT_SYMBOL(kbd_ascebc); EXPORT_SYMBOL(kbd_free); EXPORT_SYMBOL(kbd_alloc); EXPORT_SYMBOL(kbd_keycode);
gpl-2.0
percy-g2/android_kernel_motorola_msm8610
drivers/net/wireless/airo_cs.c
5090
6788
/*====================================================================== Aironet driver for 4500 and 4800 series cards This code is released under both the GPL version 2 and BSD licenses. Either license may be used. The respective licenses are found at the end of this file. This code was developed by Benjamin Reed <breed@users.sourceforge.net> including portions of which come from the Aironet PC4500 Developer's Reference Manual and used with permission. Copyright (C) 1999 Benjamin Reed. All Rights Reserved. Permission to use code in the Developer's manual was granted for this driver by Aironet. In addition this module was derived from dummy_cs. The initial developer of dummy_cs is David A. Hinds <dahinds@users.sourceforge.net>. Portions created by David A. Hinds are Copyright (C) 1999 David A. Hinds. All Rights Reserved. ======================================================================*/ #ifdef __IN_PCMCIA_PACKAGE__ #include <pcmcia/k_compat.h> #endif #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/netdevice.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> #include <linux/io.h> #include "airo.h" /*====================================================================*/ MODULE_AUTHOR("Benjamin Reed"); MODULE_DESCRIPTION("Support for Cisco/Aironet 802.11 wireless ethernet " "cards. This is the module that links the PCMCIA card " "with the airo module."); MODULE_LICENSE("Dual BSD/GPL"); MODULE_SUPPORTED_DEVICE("Aironet 4500, 4800 and Cisco 340 PCMCIA cards"); /*====================================================================*/ static int airo_config(struct pcmcia_device *link); static void airo_release(struct pcmcia_device *link); static void airo_detach(struct pcmcia_device *p_dev); typedef struct local_info_t { struct net_device *eth_dev; } local_info_t; static int airo_probe(struct pcmcia_device *p_dev) { local_info_t *local; dev_dbg(&p_dev->dev, "airo_attach()\n"); /* Allocate space for private device-specific data */ local = kzalloc(sizeof(local_info_t), GFP_KERNEL); if (!local) { printk(KERN_ERR "airo_cs: no memory for new device\n"); return -ENOMEM; } p_dev->priv = local; return airo_config(p_dev); } /* airo_attach */ static void airo_detach(struct pcmcia_device *link) { dev_dbg(&link->dev, "airo_detach\n"); airo_release(link); if (((local_info_t *)link->priv)->eth_dev) { stop_airo_card(((local_info_t *)link->priv)->eth_dev, 0); } ((local_info_t *)link->priv)->eth_dev = NULL; kfree(link->priv); } /* airo_detach */ static int airo_cs_config_check(struct pcmcia_device *p_dev, void *priv_data) { if (p_dev->config_index == 0) return -EINVAL; return pcmcia_request_io(p_dev); } static int airo_config(struct pcmcia_device *link) { local_info_t *dev; int ret; dev = link->priv; dev_dbg(&link->dev, "airo_config\n"); link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_VPP | CONF_AUTO_AUDIO | CONF_AUTO_SET_IO; ret = pcmcia_loop_config(link, airo_cs_config_check, NULL); if (ret) goto failed; if (!link->irq) goto failed; ret = pcmcia_enable_device(link); if (ret) goto failed; ((local_info_t *)link->priv)->eth_dev = init_airo_card(link->irq, link->resource[0]->start, 1, &link->dev); if (!((local_info_t *)link->priv)->eth_dev) goto failed; return 0; failed: airo_release(link); return -ENODEV; } /* airo_config */ static void airo_release(struct pcmcia_device *link) { dev_dbg(&link->dev, "airo_release\n"); pcmcia_disable_device(link); } static int airo_suspend(struct pcmcia_device *link) { local_info_t *local = link->priv; netif_device_detach(local->eth_dev); return 0; } static int airo_resume(struct pcmcia_device *link) { local_info_t *local = link->priv; if (link->open) { reset_airo_card(local->eth_dev); netif_device_attach(local->eth_dev); } return 0; } static const struct pcmcia_device_id airo_ids[] = { PCMCIA_DEVICE_MANF_CARD(0x015f, 0x000a), PCMCIA_DEVICE_MANF_CARD(0x015f, 0x0005), PCMCIA_DEVICE_MANF_CARD(0x015f, 0x0007), PCMCIA_DEVICE_MANF_CARD(0x0105, 0x0007), PCMCIA_DEVICE_NULL, }; MODULE_DEVICE_TABLE(pcmcia, airo_ids); static struct pcmcia_driver airo_driver = { .owner = THIS_MODULE, .name = "airo_cs", .probe = airo_probe, .remove = airo_detach, .id_table = airo_ids, .suspend = airo_suspend, .resume = airo_resume, }; static int __init airo_cs_init(void) { return pcmcia_register_driver(&airo_driver); } static void __exit airo_cs_cleanup(void) { pcmcia_unregister_driver(&airo_driver); } /* This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. In addition: Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ module_init(airo_cs_init); module_exit(airo_cs_cleanup);
gpl-2.0
ak-67/kernel_mediatek_wiko
drivers/staging/tidspbridge/core/msg_sm.c
5090
15328
/* * msg_sm.c * * DSP-BIOS Bridge driver support functions for TI OMAP processors. * * Implements upper edge functions for Bridge message module. * * Copyright (C) 2005-2006 Texas Instruments, Inc. * * This package is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ #include <linux/types.h> /* ----------------------------------- DSP/BIOS Bridge */ #include <dspbridge/dbdefs.h> /* ----------------------------------- OS Adaptation Layer */ #include <dspbridge/sync.h> /* ----------------------------------- Platform Manager */ #include <dspbridge/dev.h> /* ----------------------------------- Others */ #include <dspbridge/io_sm.h> /* ----------------------------------- This */ #include <_msg_sm.h> #include <dspbridge/dspmsg.h> /* ----------------------------------- Function Prototypes */ static int add_new_msg(struct list_head *msg_list); static void delete_msg_mgr(struct msg_mgr *hmsg_mgr); static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 num_to_dsp); static void free_msg_list(struct list_head *msg_list); /* * ======== bridge_msg_create ======== * Create an object to manage message queues. Only one of these objects * can exist per device object. */ int bridge_msg_create(struct msg_mgr **msg_man, struct dev_object *hdev_obj, msg_onexit msg_callback) { struct msg_mgr *msg_mgr_obj; struct io_mgr *hio_mgr; int status = 0; if (!msg_man || !msg_callback || !hdev_obj) return -EFAULT; dev_get_io_mgr(hdev_obj, &hio_mgr); if (!hio_mgr) return -EFAULT; *msg_man = NULL; /* Allocate msg_ctrl manager object */ msg_mgr_obj = kzalloc(sizeof(struct msg_mgr), GFP_KERNEL); if (!msg_mgr_obj) return -ENOMEM; msg_mgr_obj->on_exit = msg_callback; msg_mgr_obj->iomgr = hio_mgr; /* List of MSG_QUEUEs */ INIT_LIST_HEAD(&msg_mgr_obj->queue_list); /* * Queues of message frames for messages to the DSP. Message * frames will only be added to the free queue when a * msg_queue object is created. */ INIT_LIST_HEAD(&msg_mgr_obj->msg_free_list); INIT_LIST_HEAD(&msg_mgr_obj->msg_used_list); spin_lock_init(&msg_mgr_obj->msg_mgr_lock); /* * Create an event to be used by bridge_msg_put() in waiting * for an available free frame from the message manager. */ msg_mgr_obj->sync_event = kzalloc(sizeof(struct sync_object), GFP_KERNEL); if (!msg_mgr_obj->sync_event) { kfree(msg_mgr_obj); return -ENOMEM; } sync_init_event(msg_mgr_obj->sync_event); *msg_man = msg_mgr_obj; return status; } /* * ======== bridge_msg_create_queue ======== * Create a msg_queue for sending/receiving messages to/from a node * on the DSP. */ int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr, struct msg_queue **msgq, u32 msgq_id, u32 max_msgs, void *arg) { u32 i; u32 num_allocated = 0; struct msg_queue *msg_q; int status = 0; if (!hmsg_mgr || msgq == NULL) return -EFAULT; *msgq = NULL; /* Allocate msg_queue object */ msg_q = kzalloc(sizeof(struct msg_queue), GFP_KERNEL); if (!msg_q) return -ENOMEM; msg_q->max_msgs = max_msgs; msg_q->msg_mgr = hmsg_mgr; msg_q->arg = arg; /* Node handle */ msg_q->msgq_id = msgq_id; /* Node env (not valid yet) */ /* Queues of Message frames for messages from the DSP */ INIT_LIST_HEAD(&msg_q->msg_free_list); INIT_LIST_HEAD(&msg_q->msg_used_list); /* Create event that will be signalled when a message from * the DSP is available. */ msg_q->sync_event = kzalloc(sizeof(struct sync_object), GFP_KERNEL); if (!msg_q->sync_event) { status = -ENOMEM; goto out_err; } sync_init_event(msg_q->sync_event); /* Create a notification list for message ready notification. */ msg_q->ntfy_obj = kmalloc(sizeof(struct ntfy_object), GFP_KERNEL); if (!msg_q->ntfy_obj) { status = -ENOMEM; goto out_err; } ntfy_init(msg_q->ntfy_obj); /* Create events that will be used to synchronize cleanup * when the object is deleted. sync_done will be set to * unblock threads in MSG_Put() or MSG_Get(). sync_done_ack * will be set by the unblocked thread to signal that it * is unblocked and will no longer reference the object. */ msg_q->sync_done = kzalloc(sizeof(struct sync_object), GFP_KERNEL); if (!msg_q->sync_done) { status = -ENOMEM; goto out_err; } sync_init_event(msg_q->sync_done); msg_q->sync_done_ack = kzalloc(sizeof(struct sync_object), GFP_KERNEL); if (!msg_q->sync_done_ack) { status = -ENOMEM; goto out_err; } sync_init_event(msg_q->sync_done_ack); /* Enter critical section */ spin_lock_bh(&hmsg_mgr->msg_mgr_lock); /* Initialize message frames and put in appropriate queues */ for (i = 0; i < max_msgs && !status; i++) { status = add_new_msg(&hmsg_mgr->msg_free_list); if (!status) { num_allocated++; status = add_new_msg(&msg_q->msg_free_list); } } if (status) { spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); goto out_err; } list_add_tail(&msg_q->list_elem, &hmsg_mgr->queue_list); *msgq = msg_q; /* Signal that free frames are now available */ if (!list_empty(&hmsg_mgr->msg_free_list)) sync_set_event(hmsg_mgr->sync_event); /* Exit critical section */ spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); return 0; out_err: delete_msg_queue(msg_q, num_allocated); return status; } /* * ======== bridge_msg_delete ======== * Delete a msg_ctrl manager allocated in bridge_msg_create(). */ void bridge_msg_delete(struct msg_mgr *hmsg_mgr) { if (hmsg_mgr) delete_msg_mgr(hmsg_mgr); } /* * ======== bridge_msg_delete_queue ======== * Delete a msg_ctrl queue allocated in bridge_msg_create_queue. */ void bridge_msg_delete_queue(struct msg_queue *msg_queue_obj) { struct msg_mgr *hmsg_mgr; u32 io_msg_pend; if (!msg_queue_obj || !msg_queue_obj->msg_mgr) return; hmsg_mgr = msg_queue_obj->msg_mgr; msg_queue_obj->done = true; /* Unblock all threads blocked in MSG_Get() or MSG_Put(). */ io_msg_pend = msg_queue_obj->io_msg_pend; while (io_msg_pend) { /* Unblock thread */ sync_set_event(msg_queue_obj->sync_done); /* Wait for acknowledgement */ sync_wait_on_event(msg_queue_obj->sync_done_ack, SYNC_INFINITE); io_msg_pend = msg_queue_obj->io_msg_pend; } /* Remove message queue from hmsg_mgr->queue_list */ spin_lock_bh(&hmsg_mgr->msg_mgr_lock); list_del(&msg_queue_obj->list_elem); /* Free the message queue object */ delete_msg_queue(msg_queue_obj, msg_queue_obj->max_msgs); if (list_empty(&hmsg_mgr->msg_free_list)) sync_reset_event(hmsg_mgr->sync_event); spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); } /* * ======== bridge_msg_get ======== * Get a message from a msg_ctrl queue. */ int bridge_msg_get(struct msg_queue *msg_queue_obj, struct dsp_msg *pmsg, u32 utimeout) { struct msg_frame *msg_frame_obj; struct msg_mgr *hmsg_mgr; struct sync_object *syncs[2]; u32 index; int status = 0; if (!msg_queue_obj || pmsg == NULL) return -ENOMEM; hmsg_mgr = msg_queue_obj->msg_mgr; spin_lock_bh(&hmsg_mgr->msg_mgr_lock); /* If a message is already there, get it */ if (!list_empty(&msg_queue_obj->msg_used_list)) { msg_frame_obj = list_first_entry(&msg_queue_obj->msg_used_list, struct msg_frame, list_elem); list_del(&msg_frame_obj->list_elem); *pmsg = msg_frame_obj->msg_data.msg; list_add_tail(&msg_frame_obj->list_elem, &msg_queue_obj->msg_free_list); if (list_empty(&msg_queue_obj->msg_used_list)) sync_reset_event(msg_queue_obj->sync_event); spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); return 0; } if (msg_queue_obj->done) { spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); return -EPERM; } msg_queue_obj->io_msg_pend++; spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); /* * Wait til message is available, timeout, or done. We don't * have to schedule the DPC, since the DSP will send messages * when they are available. */ syncs[0] = msg_queue_obj->sync_event; syncs[1] = msg_queue_obj->sync_done; status = sync_wait_on_multiple_events(syncs, 2, utimeout, &index); spin_lock_bh(&hmsg_mgr->msg_mgr_lock); if (msg_queue_obj->done) { msg_queue_obj->io_msg_pend--; spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); /* * Signal that we're not going to access msg_queue_obj * anymore, so it can be deleted. */ sync_set_event(msg_queue_obj->sync_done_ack); return -EPERM; } if (!status && !list_empty(&msg_queue_obj->msg_used_list)) { /* Get msg from used list */ msg_frame_obj = list_first_entry(&msg_queue_obj->msg_used_list, struct msg_frame, list_elem); list_del(&msg_frame_obj->list_elem); /* Copy message into pmsg and put frame on the free list */ *pmsg = msg_frame_obj->msg_data.msg; list_add_tail(&msg_frame_obj->list_elem, &msg_queue_obj->msg_free_list); } msg_queue_obj->io_msg_pend--; /* Reset the event if there are still queued messages */ if (!list_empty(&msg_queue_obj->msg_used_list)) sync_set_event(msg_queue_obj->sync_event); spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); return status; } /* * ======== bridge_msg_put ======== * Put a message onto a msg_ctrl queue. */ int bridge_msg_put(struct msg_queue *msg_queue_obj, const struct dsp_msg *pmsg, u32 utimeout) { struct msg_frame *msg_frame_obj; struct msg_mgr *hmsg_mgr; struct sync_object *syncs[2]; u32 index; int status; if (!msg_queue_obj || !pmsg || !msg_queue_obj->msg_mgr) return -EFAULT; hmsg_mgr = msg_queue_obj->msg_mgr; spin_lock_bh(&hmsg_mgr->msg_mgr_lock); /* If a message frame is available, use it */ if (!list_empty(&hmsg_mgr->msg_free_list)) { msg_frame_obj = list_first_entry(&hmsg_mgr->msg_free_list, struct msg_frame, list_elem); list_del(&msg_frame_obj->list_elem); msg_frame_obj->msg_data.msg = *pmsg; msg_frame_obj->msg_data.msgq_id = msg_queue_obj->msgq_id; list_add_tail(&msg_frame_obj->list_elem, &hmsg_mgr->msg_used_list); hmsg_mgr->msgs_pending++; if (list_empty(&hmsg_mgr->msg_free_list)) sync_reset_event(hmsg_mgr->sync_event); /* Release critical section before scheduling DPC */ spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); /* Schedule a DPC, to do the actual data transfer: */ iosm_schedule(hmsg_mgr->iomgr); return 0; } if (msg_queue_obj->done) { spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); return -EPERM; } msg_queue_obj->io_msg_pend++; spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); /* Wait til a free message frame is available, timeout, or done */ syncs[0] = hmsg_mgr->sync_event; syncs[1] = msg_queue_obj->sync_done; status = sync_wait_on_multiple_events(syncs, 2, utimeout, &index); if (status) return status; /* Enter critical section */ spin_lock_bh(&hmsg_mgr->msg_mgr_lock); if (msg_queue_obj->done) { msg_queue_obj->io_msg_pend--; /* Exit critical section */ spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); /* * Signal that we're not going to access msg_queue_obj * anymore, so it can be deleted. */ sync_set_event(msg_queue_obj->sync_done_ack); return -EPERM; } if (list_empty(&hmsg_mgr->msg_free_list)) { spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); return -EFAULT; } /* Get msg from free list */ msg_frame_obj = list_first_entry(&hmsg_mgr->msg_free_list, struct msg_frame, list_elem); /* * Copy message into pmsg and put frame on the * used list. */ list_del(&msg_frame_obj->list_elem); msg_frame_obj->msg_data.msg = *pmsg; msg_frame_obj->msg_data.msgq_id = msg_queue_obj->msgq_id; list_add_tail(&msg_frame_obj->list_elem, &hmsg_mgr->msg_used_list); hmsg_mgr->msgs_pending++; /* * Schedule a DPC, to do the actual * data transfer. */ iosm_schedule(hmsg_mgr->iomgr); msg_queue_obj->io_msg_pend--; /* Reset event if there are still frames available */ if (!list_empty(&hmsg_mgr->msg_free_list)) sync_set_event(hmsg_mgr->sync_event); /* Exit critical section */ spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); return 0; } /* * ======== bridge_msg_register_notify ======== */ int bridge_msg_register_notify(struct msg_queue *msg_queue_obj, u32 event_mask, u32 notify_type, struct dsp_notification *hnotification) { int status = 0; if (!msg_queue_obj || !hnotification) { status = -ENOMEM; goto func_end; } if (!(event_mask == DSP_NODEMESSAGEREADY || event_mask == 0)) { status = -EPERM; goto func_end; } if (notify_type != DSP_SIGNALEVENT) { status = -EBADR; goto func_end; } if (event_mask) status = ntfy_register(msg_queue_obj->ntfy_obj, hnotification, event_mask, notify_type); else status = ntfy_unregister(msg_queue_obj->ntfy_obj, hnotification); if (status == -EINVAL) { /* Not registered. Ok, since we couldn't have known. Node * notifications are split between node state change handled * by NODE, and message ready handled by msg_ctrl. */ status = 0; } func_end: return status; } /* * ======== bridge_msg_set_queue_id ======== */ void bridge_msg_set_queue_id(struct msg_queue *msg_queue_obj, u32 msgq_id) { /* * A message queue must be created when a node is allocated, * so that node_register_notify() can be called before the node * is created. Since we don't know the node environment until the * node is created, we need this function to set msg_queue_obj->msgq_id * to the node environment, after the node is created. */ if (msg_queue_obj) msg_queue_obj->msgq_id = msgq_id; } /* * ======== add_new_msg ======== * Must be called in message manager critical section. */ static int add_new_msg(struct list_head *msg_list) { struct msg_frame *pmsg; pmsg = kzalloc(sizeof(struct msg_frame), GFP_ATOMIC); if (!pmsg) return -ENOMEM; list_add_tail(&pmsg->list_elem, msg_list); return 0; } /* * ======== delete_msg_mgr ======== */ static void delete_msg_mgr(struct msg_mgr *hmsg_mgr) { if (!hmsg_mgr) return; /* FIXME: free elements from queue_list? */ free_msg_list(&hmsg_mgr->msg_free_list); free_msg_list(&hmsg_mgr->msg_used_list); kfree(hmsg_mgr->sync_event); kfree(hmsg_mgr); } /* * ======== delete_msg_queue ======== */ static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 num_to_dsp) { struct msg_mgr *hmsg_mgr; struct msg_frame *pmsg, *tmp; u32 i; if (!msg_queue_obj || !msg_queue_obj->msg_mgr) return; hmsg_mgr = msg_queue_obj->msg_mgr; /* Pull off num_to_dsp message frames from Msg manager and free */ i = 0; list_for_each_entry_safe(pmsg, tmp, &hmsg_mgr->msg_free_list, list_elem) { list_del(&pmsg->list_elem); kfree(pmsg); if (i++ >= num_to_dsp) break; } free_msg_list(&msg_queue_obj->msg_free_list); free_msg_list(&msg_queue_obj->msg_used_list); if (msg_queue_obj->ntfy_obj) { ntfy_delete(msg_queue_obj->ntfy_obj); kfree(msg_queue_obj->ntfy_obj); } kfree(msg_queue_obj->sync_event); kfree(msg_queue_obj->sync_done); kfree(msg_queue_obj->sync_done_ack); kfree(msg_queue_obj); } /* * ======== free_msg_list ======== */ static void free_msg_list(struct list_head *msg_list) { struct msg_frame *pmsg, *tmp; if (!msg_list) return; list_for_each_entry_safe(pmsg, tmp, msg_list, list_elem) { list_del(&pmsg->list_elem); kfree(pmsg); } }
gpl-2.0
GalaxyTab4/android_kernel_samsung_s3ve3g
fs/jfs/acl.c
5602
4110
/* * Copyright (C) International Business Machines Corp., 2002-2004 * Copyright (C) Andreas Gruenbacher, 2001 * Copyright (C) Linus Torvalds, 1991, 1992 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/sched.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/posix_acl_xattr.h> #include "jfs_incore.h" #include "jfs_txnmgr.h" #include "jfs_xattr.h" #include "jfs_acl.h" struct posix_acl *jfs_get_acl(struct inode *inode, int type) { struct posix_acl *acl; char *ea_name; int size; char *value = NULL; acl = get_cached_acl(inode, type); if (acl != ACL_NOT_CACHED) return acl; switch(type) { case ACL_TYPE_ACCESS: ea_name = POSIX_ACL_XATTR_ACCESS; break; case ACL_TYPE_DEFAULT: ea_name = POSIX_ACL_XATTR_DEFAULT; break; default: return ERR_PTR(-EINVAL); } size = __jfs_getxattr(inode, ea_name, NULL, 0); if (size > 0) { value = kmalloc(size, GFP_KERNEL); if (!value) return ERR_PTR(-ENOMEM); size = __jfs_getxattr(inode, ea_name, value, size); } if (size < 0) { if (size == -ENODATA) acl = NULL; else acl = ERR_PTR(size); } else { acl = posix_acl_from_xattr(value, size); } kfree(value); if (!IS_ERR(acl)) set_cached_acl(inode, type, acl); return acl; } static int jfs_set_acl(tid_t tid, struct inode *inode, int type, struct posix_acl *acl) { char *ea_name; int rc; int size = 0; char *value = NULL; if (S_ISLNK(inode->i_mode)) return -EOPNOTSUPP; switch(type) { case ACL_TYPE_ACCESS: ea_name = POSIX_ACL_XATTR_ACCESS; break; case ACL_TYPE_DEFAULT: ea_name = POSIX_ACL_XATTR_DEFAULT; if (!S_ISDIR(inode->i_mode)) return acl ? -EACCES : 0; break; default: return -EINVAL; } if (acl) { size = posix_acl_xattr_size(acl->a_count); value = kmalloc(size, GFP_KERNEL); if (!value) return -ENOMEM; rc = posix_acl_to_xattr(acl, value, size); if (rc < 0) goto out; } rc = __jfs_setxattr(tid, inode, ea_name, value, size, 0); out: kfree(value); if (!rc) set_cached_acl(inode, type, acl); return rc; } int jfs_init_acl(tid_t tid, struct inode *inode, struct inode *dir) { struct posix_acl *acl = NULL; int rc = 0; if (S_ISLNK(inode->i_mode)) return 0; acl = jfs_get_acl(dir, ACL_TYPE_DEFAULT); if (IS_ERR(acl)) return PTR_ERR(acl); if (acl) { if (S_ISDIR(inode->i_mode)) { rc = jfs_set_acl(tid, inode, ACL_TYPE_DEFAULT, acl); if (rc) goto cleanup; } rc = posix_acl_create(&acl, GFP_KERNEL, &inode->i_mode); if (rc < 0) goto cleanup; /* posix_acl_release(NULL) is no-op */ if (rc > 0) rc = jfs_set_acl(tid, inode, ACL_TYPE_ACCESS, acl); cleanup: posix_acl_release(acl); } else inode->i_mode &= ~current_umask(); JFS_IP(inode)->mode2 = (JFS_IP(inode)->mode2 & 0xffff0000) | inode->i_mode; return rc; } int jfs_acl_chmod(struct inode *inode) { struct posix_acl *acl; int rc; tid_t tid; if (S_ISLNK(inode->i_mode)) return -EOPNOTSUPP; acl = jfs_get_acl(inode, ACL_TYPE_ACCESS); if (IS_ERR(acl) || !acl) return PTR_ERR(acl); rc = posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode); if (rc) return rc; tid = txBegin(inode->i_sb, 0); mutex_lock(&JFS_IP(inode)->commit_mutex); rc = jfs_set_acl(tid, inode, ACL_TYPE_ACCESS, acl); if (!rc) rc = txCommit(tid, 1, &inode, 0); txEnd(tid); mutex_unlock(&JFS_IP(inode)->commit_mutex); posix_acl_release(acl); return rc; }
gpl-2.0
mmukadam/linuxv3.12
fs/minix/bitmap.c
7906
6771
/* * linux/fs/minix/bitmap.c * * Copyright (C) 1991, 1992 Linus Torvalds */ /* * Modified for 680x0 by Hamish Macdonald * Fixed for 680x0 by Andreas Schwab */ /* bitmap.c contains the code that handles the inode and block bitmaps */ #include "minix.h" #include <linux/buffer_head.h> #include <linux/bitops.h> #include <linux/sched.h> static DEFINE_SPINLOCK(bitmap_lock); /* * bitmap consists of blocks filled with 16bit words * bit set == busy, bit clear == free * endianness is a mess, but for counting zero bits it really doesn't matter... */ static __u32 count_free(struct buffer_head *map[], unsigned blocksize, __u32 numbits) { __u32 sum = 0; unsigned blocks = DIV_ROUND_UP(numbits, blocksize * 8); while (blocks--) { unsigned words = blocksize / 2; __u16 *p = (__u16 *)(*map++)->b_data; while (words--) sum += 16 - hweight16(*p++); } return sum; } void minix_free_block(struct inode *inode, unsigned long block) { struct super_block *sb = inode->i_sb; struct minix_sb_info *sbi = minix_sb(sb); struct buffer_head *bh; int k = sb->s_blocksize_bits + 3; unsigned long bit, zone; if (block < sbi->s_firstdatazone || block >= sbi->s_nzones) { printk("Trying to free block not in datazone\n"); return; } zone = block - sbi->s_firstdatazone + 1; bit = zone & ((1<<k) - 1); zone >>= k; if (zone >= sbi->s_zmap_blocks) { printk("minix_free_block: nonexistent bitmap buffer\n"); return; } bh = sbi->s_zmap[zone]; spin_lock(&bitmap_lock); if (!minix_test_and_clear_bit(bit, bh->b_data)) printk("minix_free_block (%s:%lu): bit already cleared\n", sb->s_id, block); spin_unlock(&bitmap_lock); mark_buffer_dirty(bh); return; } int minix_new_block(struct inode * inode) { struct minix_sb_info *sbi = minix_sb(inode->i_sb); int bits_per_zone = 8 * inode->i_sb->s_blocksize; int i; for (i = 0; i < sbi->s_zmap_blocks; i++) { struct buffer_head *bh = sbi->s_zmap[i]; int j; spin_lock(&bitmap_lock); j = minix_find_first_zero_bit(bh->b_data, bits_per_zone); if (j < bits_per_zone) { minix_set_bit(j, bh->b_data); spin_unlock(&bitmap_lock); mark_buffer_dirty(bh); j += i * bits_per_zone + sbi->s_firstdatazone-1; if (j < sbi->s_firstdatazone || j >= sbi->s_nzones) break; return j; } spin_unlock(&bitmap_lock); } return 0; } unsigned long minix_count_free_blocks(struct super_block *sb) { struct minix_sb_info *sbi = minix_sb(sb); u32 bits = sbi->s_nzones - (sbi->s_firstdatazone + 1); return (count_free(sbi->s_zmap, sb->s_blocksize, bits) << sbi->s_log_zone_size); } struct minix_inode * minix_V1_raw_inode(struct super_block *sb, ino_t ino, struct buffer_head **bh) { int block; struct minix_sb_info *sbi = minix_sb(sb); struct minix_inode *p; if (!ino || ino > sbi->s_ninodes) { printk("Bad inode number on dev %s: %ld is out of range\n", sb->s_id, (long)ino); return NULL; } ino--; block = 2 + sbi->s_imap_blocks + sbi->s_zmap_blocks + ino / MINIX_INODES_PER_BLOCK; *bh = sb_bread(sb, block); if (!*bh) { printk("Unable to read inode block\n"); return NULL; } p = (void *)(*bh)->b_data; return p + ino % MINIX_INODES_PER_BLOCK; } struct minix2_inode * minix_V2_raw_inode(struct super_block *sb, ino_t ino, struct buffer_head **bh) { int block; struct minix_sb_info *sbi = minix_sb(sb); struct minix2_inode *p; int minix2_inodes_per_block = sb->s_blocksize / sizeof(struct minix2_inode); *bh = NULL; if (!ino || ino > sbi->s_ninodes) { printk("Bad inode number on dev %s: %ld is out of range\n", sb->s_id, (long)ino); return NULL; } ino--; block = 2 + sbi->s_imap_blocks + sbi->s_zmap_blocks + ino / minix2_inodes_per_block; *bh = sb_bread(sb, block); if (!*bh) { printk("Unable to read inode block\n"); return NULL; } p = (void *)(*bh)->b_data; return p + ino % minix2_inodes_per_block; } /* Clear the link count and mode of a deleted inode on disk. */ static void minix_clear_inode(struct inode *inode) { struct buffer_head *bh = NULL; if (INODE_VERSION(inode) == MINIX_V1) { struct minix_inode *raw_inode; raw_inode = minix_V1_raw_inode(inode->i_sb, inode->i_ino, &bh); if (raw_inode) { raw_inode->i_nlinks = 0; raw_inode->i_mode = 0; } } else { struct minix2_inode *raw_inode; raw_inode = minix_V2_raw_inode(inode->i_sb, inode->i_ino, &bh); if (raw_inode) { raw_inode->i_nlinks = 0; raw_inode->i_mode = 0; } } if (bh) { mark_buffer_dirty(bh); brelse (bh); } } void minix_free_inode(struct inode * inode) { struct super_block *sb = inode->i_sb; struct minix_sb_info *sbi = minix_sb(inode->i_sb); struct buffer_head *bh; int k = sb->s_blocksize_bits + 3; unsigned long ino, bit; ino = inode->i_ino; if (ino < 1 || ino > sbi->s_ninodes) { printk("minix_free_inode: inode 0 or nonexistent inode\n"); return; } bit = ino & ((1<<k) - 1); ino >>= k; if (ino >= sbi->s_imap_blocks) { printk("minix_free_inode: nonexistent imap in superblock\n"); return; } minix_clear_inode(inode); /* clear on-disk copy */ bh = sbi->s_imap[ino]; spin_lock(&bitmap_lock); if (!minix_test_and_clear_bit(bit, bh->b_data)) printk("minix_free_inode: bit %lu already cleared\n", bit); spin_unlock(&bitmap_lock); mark_buffer_dirty(bh); } struct inode *minix_new_inode(const struct inode *dir, umode_t mode, int *error) { struct super_block *sb = dir->i_sb; struct minix_sb_info *sbi = minix_sb(sb); struct inode *inode = new_inode(sb); struct buffer_head * bh; int bits_per_zone = 8 * sb->s_blocksize; unsigned long j; int i; if (!inode) { *error = -ENOMEM; return NULL; } j = bits_per_zone; bh = NULL; *error = -ENOSPC; spin_lock(&bitmap_lock); for (i = 0; i < sbi->s_imap_blocks; i++) { bh = sbi->s_imap[i]; j = minix_find_first_zero_bit(bh->b_data, bits_per_zone); if (j < bits_per_zone) break; } if (!bh || j >= bits_per_zone) { spin_unlock(&bitmap_lock); iput(inode); return NULL; } if (minix_test_and_set_bit(j, bh->b_data)) { /* shouldn't happen */ spin_unlock(&bitmap_lock); printk("minix_new_inode: bit already set\n"); iput(inode); return NULL; } spin_unlock(&bitmap_lock); mark_buffer_dirty(bh); j += i * bits_per_zone; if (!j || j > sbi->s_ninodes) { iput(inode); return NULL; } inode_init_owner(inode, dir, mode); inode->i_ino = j; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; inode->i_blocks = 0; memset(&minix_i(inode)->u, 0, sizeof(minix_i(inode)->u)); insert_inode_hash(inode); mark_inode_dirty(inode); *error = 0; return inode; } unsigned long minix_count_free_inodes(struct super_block *sb) { struct minix_sb_info *sbi = minix_sb(sb); u32 bits = sbi->s_ninodes + 1; return count_free(sbi->s_imap, sb->s_blocksize, bits); }
gpl-2.0
friedrich420/N4-AEL-Kernel-N910F_EUR-Source-
drivers/misc/altera-stapl/altera.c
8162
57451
/* * altera.c * * altera FPGA driver * * Copyright (C) Altera Corporation 1998-2001 * Copyright (C) 2010,2011 NetUP Inc. * Copyright (C) 2010,2011 Igor M. Liplianin <liplianin@netup.ru> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <asm/unaligned.h> #include <linux/ctype.h> #include <linux/string.h> #include <linux/firmware.h> #include <linux/slab.h> #include <linux/module.h> #include <misc/altera.h> #include "altera-exprt.h" #include "altera-jtag.h" static int debug = 1; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "enable debugging information"); MODULE_DESCRIPTION("altera FPGA kernel module"); MODULE_AUTHOR("Igor M. Liplianin <liplianin@netup.ru>"); MODULE_LICENSE("GPL"); #define dprintk(args...) \ if (debug) { \ printk(KERN_DEBUG args); \ } enum altera_fpga_opcode { OP_NOP = 0, OP_DUP, OP_SWP, OP_ADD, OP_SUB, OP_MULT, OP_DIV, OP_MOD, OP_SHL, OP_SHR, OP_NOT, OP_AND, OP_OR, OP_XOR, OP_INV, OP_GT, OP_LT, OP_RET, OP_CMPS, OP_PINT, OP_PRNT, OP_DSS, OP_DSSC, OP_ISS, OP_ISSC, OP_DPR = 0x1c, OP_DPRL, OP_DPO, OP_DPOL, OP_IPR, OP_IPRL, OP_IPO, OP_IPOL, OP_PCHR, OP_EXIT, OP_EQU, OP_POPT, OP_ABS = 0x2c, OP_BCH0, OP_PSH0 = 0x2f, OP_PSHL = 0x40, OP_PSHV, OP_JMP, OP_CALL, OP_NEXT, OP_PSTR, OP_SINT = 0x47, OP_ST, OP_ISTP, OP_DSTP, OP_SWPN, OP_DUPN, OP_POPV, OP_POPE, OP_POPA, OP_JMPZ, OP_DS, OP_IS, OP_DPRA, OP_DPOA, OP_IPRA, OP_IPOA, OP_EXPT, OP_PSHE, OP_PSHA, OP_DYNA, OP_EXPV = 0x5c, OP_COPY = 0x80, OP_REVA, OP_DSC, OP_ISC, OP_WAIT, OP_VS, OP_CMPA = 0xc0, OP_VSC, }; struct altera_procinfo { char *name; u8 attrs; struct altera_procinfo *next; }; /* This function checks if enough parameters are available on the stack. */ static int altera_check_stack(int stack_ptr, int count, int *status) { if (stack_ptr < count) { *status = -EOVERFLOW; return 0; } return 1; } static void altera_export_int(char *key, s32 value) { dprintk("Export: key = \"%s\", value = %d\n", key, value); } #define HEX_LINE_CHARS 72 #define HEX_LINE_BITS (HEX_LINE_CHARS * 4) static void altera_export_bool_array(char *key, u8 *data, s32 count) { char string[HEX_LINE_CHARS + 1]; s32 i, offset; u32 size, line, lines, linebits, value, j, k; if (count > HEX_LINE_BITS) { dprintk("Export: key = \"%s\", %d bits, value = HEX\n", key, count); lines = (count + (HEX_LINE_BITS - 1)) / HEX_LINE_BITS; for (line = 0; line < lines; ++line) { if (line < (lines - 1)) { linebits = HEX_LINE_BITS; size = HEX_LINE_CHARS; offset = count - ((line + 1) * HEX_LINE_BITS); } else { linebits = count - ((lines - 1) * HEX_LINE_BITS); size = (linebits + 3) / 4; offset = 0L; } string[size] = '\0'; j = size - 1; value = 0; for (k = 0; k < linebits; ++k) { i = k + offset; if (data[i >> 3] & (1 << (i & 7))) value |= (1 << (i & 3)); if ((i & 3) == 3) { sprintf(&string[j], "%1x", value); value = 0; --j; } } if ((k & 3) > 0) sprintf(&string[j], "%1x", value); dprintk("%s\n", string); } } else { size = (count + 3) / 4; string[size] = '\0'; j = size - 1; value = 0; for (i = 0; i < count; ++i) { if (data[i >> 3] & (1 << (i & 7))) value |= (1 << (i & 3)); if ((i & 3) == 3) { sprintf(&string[j], "%1x", value); value = 0; --j; } } if ((i & 3) > 0) sprintf(&string[j], "%1x", value); dprintk("Export: key = \"%s\", %d bits, value = HEX %s\n", key, count, string); } } static int altera_execute(struct altera_state *astate, u8 *p, s32 program_size, s32 *error_address, int *exit_code, int *format_version) { struct altera_config *aconf = astate->config; char *msg_buff = astate->msg_buff; long *stack = astate->stack; int status = 0; u32 first_word = 0L; u32 action_table = 0L; u32 proc_table = 0L; u32 str_table = 0L; u32 sym_table = 0L; u32 data_sect = 0L; u32 code_sect = 0L; u32 debug_sect = 0L; u32 action_count = 0L; u32 proc_count = 0L; u32 sym_count = 0L; long *vars = NULL; s32 *var_size = NULL; char *attrs = NULL; u8 *proc_attributes = NULL; u32 pc; u32 opcode_address; u32 args[3]; u32 opcode; u32 name_id; u8 charbuf[4]; long long_tmp; u32 variable_id; u8 *charptr_tmp; u8 *charptr_tmp2; long *longptr_tmp; int version = 0; int delta = 0; int stack_ptr = 0; u32 arg_count; int done = 0; int bad_opcode = 0; u32 count; u32 index; u32 index2; s32 long_count; s32 long_idx; s32 long_idx2; u32 i; u32 j; u32 uncomp_size; u32 offset; u32 value; int current_proc = 0; int reverse; char *name; dprintk("%s\n", __func__); /* Read header information */ if (program_size > 52L) { first_word = get_unaligned_be32(&p[0]); version = (first_word & 1L); *format_version = version + 1; delta = version * 8; action_table = get_unaligned_be32(&p[4]); proc_table = get_unaligned_be32(&p[8]); str_table = get_unaligned_be32(&p[4 + delta]); sym_table = get_unaligned_be32(&p[16 + delta]); data_sect = get_unaligned_be32(&p[20 + delta]); code_sect = get_unaligned_be32(&p[24 + delta]); debug_sect = get_unaligned_be32(&p[28 + delta]); action_count = get_unaligned_be32(&p[40 + delta]); proc_count = get_unaligned_be32(&p[44 + delta]); sym_count = get_unaligned_be32(&p[48 + (2 * delta)]); } if ((first_word != 0x4A414D00L) && (first_word != 0x4A414D01L)) { done = 1; status = -EIO; goto exit_done; } if (sym_count <= 0) goto exit_done; vars = kzalloc(sym_count * sizeof(long), GFP_KERNEL); if (vars == NULL) status = -ENOMEM; if (status == 0) { var_size = kzalloc(sym_count * sizeof(s32), GFP_KERNEL); if (var_size == NULL) status = -ENOMEM; } if (status == 0) { attrs = kzalloc(sym_count, GFP_KERNEL); if (attrs == NULL) status = -ENOMEM; } if ((status == 0) && (version > 0)) { proc_attributes = kzalloc(proc_count, GFP_KERNEL); if (proc_attributes == NULL) status = -ENOMEM; } if (status != 0) goto exit_done; delta = version * 2; for (i = 0; i < sym_count; ++i) { offset = (sym_table + ((11 + delta) * i)); value = get_unaligned_be32(&p[offset + 3 + delta]); attrs[i] = p[offset]; /* * use bit 7 of attribute byte to indicate that * this buffer was dynamically allocated * and should be freed later */ attrs[i] &= 0x7f; var_size[i] = get_unaligned_be32(&p[offset + 7 + delta]); /* * Attribute bits: * bit 0: 0 = read-only, 1 = read-write * bit 1: 0 = not compressed, 1 = compressed * bit 2: 0 = not initialized, 1 = initialized * bit 3: 0 = scalar, 1 = array * bit 4: 0 = Boolean, 1 = integer * bit 5: 0 = declared variable, * 1 = compiler created temporary variable */ if ((attrs[i] & 0x0c) == 0x04) /* initialized scalar variable */ vars[i] = value; else if ((attrs[i] & 0x1e) == 0x0e) { /* initialized compressed Boolean array */ uncomp_size = get_unaligned_le32(&p[data_sect + value]); /* allocate a buffer for the uncompressed data */ vars[i] = (long)kzalloc(uncomp_size, GFP_KERNEL); if (vars[i] == 0L) status = -ENOMEM; else { /* set flag so buffer will be freed later */ attrs[i] |= 0x80; /* uncompress the data */ if (altera_shrink(&p[data_sect + value], var_size[i], (u8 *)vars[i], uncomp_size, version) != uncomp_size) /* decompression failed */ status = -EIO; else var_size[i] = uncomp_size * 8L; } } else if ((attrs[i] & 0x1e) == 0x0c) { /* initialized Boolean array */ vars[i] = value + data_sect + (long)p; } else if ((attrs[i] & 0x1c) == 0x1c) { /* initialized integer array */ vars[i] = value + data_sect; } else if ((attrs[i] & 0x0c) == 0x08) { /* uninitialized array */ /* flag attrs so that memory is freed */ attrs[i] |= 0x80; if (var_size[i] > 0) { u32 size; if (attrs[i] & 0x10) /* integer array */ size = (var_size[i] * sizeof(s32)); else /* Boolean array */ size = ((var_size[i] + 7L) / 8L); vars[i] = (long)kzalloc(size, GFP_KERNEL); if (vars[i] == 0) { status = -ENOMEM; } else { /* zero out memory */ for (j = 0; j < size; ++j) ((u8 *)(vars[i]))[j] = 0; } } else vars[i] = 0; } else vars[i] = 0; } exit_done: if (status != 0) done = 1; altera_jinit(astate); pc = code_sect; msg_buff[0] = '\0'; /* * For JBC version 2, we will execute the procedures corresponding to * the selected ACTION */ if (version > 0) { if (aconf->action == NULL) { status = -EINVAL; done = 1; } else { int action_found = 0; for (i = 0; (i < action_count) && !action_found; ++i) { name_id = get_unaligned_be32(&p[action_table + (12 * i)]); name = &p[str_table + name_id]; if (strnicmp(aconf->action, name, strlen(name)) == 0) { action_found = 1; current_proc = get_unaligned_be32(&p[action_table + (12 * i) + 8]); } } if (!action_found) { status = -EINVAL; done = 1; } } if (status == 0) { int first_time = 1; i = current_proc; while ((i != 0) || first_time) { first_time = 0; /* check procedure attribute byte */ proc_attributes[i] = (p[proc_table + (13 * i) + 8] & 0x03); /* * BIT0 - OPTIONAL * BIT1 - RECOMMENDED * BIT6 - FORCED OFF * BIT7 - FORCED ON */ i = get_unaligned_be32(&p[proc_table + (13 * i) + 4]); } /* * Set current_proc to the first procedure * to be executed */ i = current_proc; while ((i != 0) && ((proc_attributes[i] == 1) || ((proc_attributes[i] & 0xc0) == 0x40))) { i = get_unaligned_be32(&p[proc_table + (13 * i) + 4]); } if ((i != 0) || ((i == 0) && (current_proc == 0) && ((proc_attributes[0] != 1) && ((proc_attributes[0] & 0xc0) != 0x40)))) { current_proc = i; pc = code_sect + get_unaligned_be32(&p[proc_table + (13 * i) + 9]); if ((pc < code_sect) || (pc >= debug_sect)) status = -ERANGE; } else /* there are no procedures to execute! */ done = 1; } } msg_buff[0] = '\0'; while (!done) { opcode = (p[pc] & 0xff); opcode_address = pc; ++pc; if (debug > 1) printk("opcode: %02x\n", opcode); arg_count = (opcode >> 6) & 3; for (i = 0; i < arg_count; ++i) { args[i] = get_unaligned_be32(&p[pc]); pc += 4; } switch (opcode) { case OP_NOP: break; case OP_DUP: if (altera_check_stack(stack_ptr, 1, &status)) { stack[stack_ptr] = stack[stack_ptr - 1]; ++stack_ptr; } break; case OP_SWP: if (altera_check_stack(stack_ptr, 2, &status)) { long_tmp = stack[stack_ptr - 2]; stack[stack_ptr - 2] = stack[stack_ptr - 1]; stack[stack_ptr - 1] = long_tmp; } break; case OP_ADD: if (altera_check_stack(stack_ptr, 2, &status)) { --stack_ptr; stack[stack_ptr - 1] += stack[stack_ptr]; } break; case OP_SUB: if (altera_check_stack(stack_ptr, 2, &status)) { --stack_ptr; stack[stack_ptr - 1] -= stack[stack_ptr]; } break; case OP_MULT: if (altera_check_stack(stack_ptr, 2, &status)) { --stack_ptr; stack[stack_ptr - 1] *= stack[stack_ptr]; } break; case OP_DIV: if (altera_check_stack(stack_ptr, 2, &status)) { --stack_ptr; stack[stack_ptr - 1] /= stack[stack_ptr]; } break; case OP_MOD: if (altera_check_stack(stack_ptr, 2, &status)) { --stack_ptr; stack[stack_ptr - 1] %= stack[stack_ptr]; } break; case OP_SHL: if (altera_check_stack(stack_ptr, 2, &status)) { --stack_ptr; stack[stack_ptr - 1] <<= stack[stack_ptr]; } break; case OP_SHR: if (altera_check_stack(stack_ptr, 2, &status)) { --stack_ptr; stack[stack_ptr - 1] >>= stack[stack_ptr]; } break; case OP_NOT: if (altera_check_stack(stack_ptr, 1, &status)) stack[stack_ptr - 1] ^= (-1L); break; case OP_AND: if (altera_check_stack(stack_ptr, 2, &status)) { --stack_ptr; stack[stack_ptr - 1] &= stack[stack_ptr]; } break; case OP_OR: if (altera_check_stack(stack_ptr, 2, &status)) { --stack_ptr; stack[stack_ptr - 1] |= stack[stack_ptr]; } break; case OP_XOR: if (altera_check_stack(stack_ptr, 2, &status)) { --stack_ptr; stack[stack_ptr - 1] ^= stack[stack_ptr]; } break; case OP_INV: if (!altera_check_stack(stack_ptr, 1, &status)) break; stack[stack_ptr - 1] = stack[stack_ptr - 1] ? 0L : 1L; break; case OP_GT: if (!altera_check_stack(stack_ptr, 2, &status)) break; --stack_ptr; stack[stack_ptr - 1] = (stack[stack_ptr - 1] > stack[stack_ptr]) ? 1L : 0L; break; case OP_LT: if (!altera_check_stack(stack_ptr, 2, &status)) break; --stack_ptr; stack[stack_ptr - 1] = (stack[stack_ptr - 1] < stack[stack_ptr]) ? 1L : 0L; break; case OP_RET: if ((version > 0) && (stack_ptr == 0)) { /* * We completed one of the main procedures * of an ACTION. * Find the next procedure * to be executed and jump to it. * If there are no more procedures, then EXIT. */ i = get_unaligned_be32(&p[proc_table + (13 * current_proc) + 4]); while ((i != 0) && ((proc_attributes[i] == 1) || ((proc_attributes[i] & 0xc0) == 0x40))) i = get_unaligned_be32(&p[proc_table + (13 * i) + 4]); if (i == 0) { /* no procedures to execute! */ done = 1; *exit_code = 0; /* success */ } else { current_proc = i; pc = code_sect + get_unaligned_be32( &p[proc_table + (13 * i) + 9]); if ((pc < code_sect) || (pc >= debug_sect)) status = -ERANGE; } } else if (altera_check_stack(stack_ptr, 1, &status)) { pc = stack[--stack_ptr] + code_sect; if ((pc <= code_sect) || (pc >= debug_sect)) status = -ERANGE; } break; case OP_CMPS: /* * Array short compare * ...stack 0 is source 1 value * ...stack 1 is source 2 value * ...stack 2 is mask value * ...stack 3 is count */ if (altera_check_stack(stack_ptr, 4, &status)) { s32 a = stack[--stack_ptr]; s32 b = stack[--stack_ptr]; long_tmp = stack[--stack_ptr]; count = stack[stack_ptr - 1]; if ((count < 1) || (count > 32)) status = -ERANGE; else { long_tmp &= ((-1L) >> (32 - count)); stack[stack_ptr - 1] = ((a & long_tmp) == (b & long_tmp)) ? 1L : 0L; } } break; case OP_PINT: /* * PRINT add integer * ...stack 0 is integer value */ if (!altera_check_stack(stack_ptr, 1, &status)) break; sprintf(&msg_buff[strlen(msg_buff)], "%ld", stack[--stack_ptr]); break; case OP_PRNT: /* PRINT finish */ if (debug) printk(msg_buff, "\n"); msg_buff[0] = '\0'; break; case OP_DSS: /* * DRSCAN short * ...stack 0 is scan data * ...stack 1 is count */ if (!altera_check_stack(stack_ptr, 2, &status)) break; long_tmp = stack[--stack_ptr]; count = stack[--stack_ptr]; put_unaligned_le32(long_tmp, &charbuf[0]); status = altera_drscan(astate, count, charbuf, 0); break; case OP_DSSC: /* * DRSCAN short with capture * ...stack 0 is scan data * ...stack 1 is count */ if (!altera_check_stack(stack_ptr, 2, &status)) break; long_tmp = stack[--stack_ptr]; count = stack[stack_ptr - 1]; put_unaligned_le32(long_tmp, &charbuf[0]); status = altera_swap_dr(astate, count, charbuf, 0, charbuf, 0); stack[stack_ptr - 1] = get_unaligned_le32(&charbuf[0]); break; case OP_ISS: /* * IRSCAN short * ...stack 0 is scan data * ...stack 1 is count */ if (!altera_check_stack(stack_ptr, 2, &status)) break; long_tmp = stack[--stack_ptr]; count = stack[--stack_ptr]; put_unaligned_le32(long_tmp, &charbuf[0]); status = altera_irscan(astate, count, charbuf, 0); break; case OP_ISSC: /* * IRSCAN short with capture * ...stack 0 is scan data * ...stack 1 is count */ if (!altera_check_stack(stack_ptr, 2, &status)) break; long_tmp = stack[--stack_ptr]; count = stack[stack_ptr - 1]; put_unaligned_le32(long_tmp, &charbuf[0]); status = altera_swap_ir(astate, count, charbuf, 0, charbuf, 0); stack[stack_ptr - 1] = get_unaligned_le32(&charbuf[0]); break; case OP_DPR: if (!altera_check_stack(stack_ptr, 1, &status)) break; count = stack[--stack_ptr]; status = altera_set_dr_pre(&astate->js, count, 0, NULL); break; case OP_DPRL: /* * DRPRE with literal data * ...stack 0 is count * ...stack 1 is literal data */ if (!altera_check_stack(stack_ptr, 2, &status)) break; count = stack[--stack_ptr]; long_tmp = stack[--stack_ptr]; put_unaligned_le32(long_tmp, &charbuf[0]); status = altera_set_dr_pre(&astate->js, count, 0, charbuf); break; case OP_DPO: /* * DRPOST * ...stack 0 is count */ if (altera_check_stack(stack_ptr, 1, &status)) { count = stack[--stack_ptr]; status = altera_set_dr_post(&astate->js, count, 0, NULL); } break; case OP_DPOL: /* * DRPOST with literal data * ...stack 0 is count * ...stack 1 is literal data */ if (!altera_check_stack(stack_ptr, 2, &status)) break; count = stack[--stack_ptr]; long_tmp = stack[--stack_ptr]; put_unaligned_le32(long_tmp, &charbuf[0]); status = altera_set_dr_post(&astate->js, count, 0, charbuf); break; case OP_IPR: if (altera_check_stack(stack_ptr, 1, &status)) { count = stack[--stack_ptr]; status = altera_set_ir_pre(&astate->js, count, 0, NULL); } break; case OP_IPRL: /* * IRPRE with literal data * ...stack 0 is count * ...stack 1 is literal data */ if (altera_check_stack(stack_ptr, 2, &status)) { count = stack[--stack_ptr]; long_tmp = stack[--stack_ptr]; put_unaligned_le32(long_tmp, &charbuf[0]); status = altera_set_ir_pre(&astate->js, count, 0, charbuf); } break; case OP_IPO: /* * IRPOST * ...stack 0 is count */ if (altera_check_stack(stack_ptr, 1, &status)) { count = stack[--stack_ptr]; status = altera_set_ir_post(&astate->js, count, 0, NULL); } break; case OP_IPOL: /* * IRPOST with literal data * ...stack 0 is count * ...stack 1 is literal data */ if (!altera_check_stack(stack_ptr, 2, &status)) break; count = stack[--stack_ptr]; long_tmp = stack[--stack_ptr]; put_unaligned_le32(long_tmp, &charbuf[0]); status = altera_set_ir_post(&astate->js, count, 0, charbuf); break; case OP_PCHR: if (altera_check_stack(stack_ptr, 1, &status)) { u8 ch; count = strlen(msg_buff); ch = (char) stack[--stack_ptr]; if ((ch < 1) || (ch > 127)) { /* * character code out of range * instead of flagging an error, * force the value to 127 */ ch = 127; } msg_buff[count] = ch; msg_buff[count + 1] = '\0'; } break; case OP_EXIT: if (altera_check_stack(stack_ptr, 1, &status)) *exit_code = stack[--stack_ptr]; done = 1; break; case OP_EQU: if (!altera_check_stack(stack_ptr, 2, &status)) break; --stack_ptr; stack[stack_ptr - 1] = (stack[stack_ptr - 1] == stack[stack_ptr]) ? 1L : 0L; break; case OP_POPT: if (altera_check_stack(stack_ptr, 1, &status)) --stack_ptr; break; case OP_ABS: if (!altera_check_stack(stack_ptr, 1, &status)) break; if (stack[stack_ptr - 1] < 0) stack[stack_ptr - 1] = 0 - stack[stack_ptr - 1]; break; case OP_BCH0: /* * Batch operation 0 * SWP * SWPN 7 * SWP * SWPN 6 * DUPN 8 * SWPN 2 * SWP * DUPN 6 * DUPN 6 */ /* SWP */ if (altera_check_stack(stack_ptr, 2, &status)) { long_tmp = stack[stack_ptr - 2]; stack[stack_ptr - 2] = stack[stack_ptr - 1]; stack[stack_ptr - 1] = long_tmp; } /* SWPN 7 */ index = 7 + 1; if (altera_check_stack(stack_ptr, index, &status)) { long_tmp = stack[stack_ptr - index]; stack[stack_ptr - index] = stack[stack_ptr - 1]; stack[stack_ptr - 1] = long_tmp; } /* SWP */ if (altera_check_stack(stack_ptr, 2, &status)) { long_tmp = stack[stack_ptr - 2]; stack[stack_ptr - 2] = stack[stack_ptr - 1]; stack[stack_ptr - 1] = long_tmp; } /* SWPN 6 */ index = 6 + 1; if (altera_check_stack(stack_ptr, index, &status)) { long_tmp = stack[stack_ptr - index]; stack[stack_ptr - index] = stack[stack_ptr - 1]; stack[stack_ptr - 1] = long_tmp; } /* DUPN 8 */ index = 8 + 1; if (altera_check_stack(stack_ptr, index, &status)) { stack[stack_ptr] = stack[stack_ptr - index]; ++stack_ptr; } /* SWPN 2 */ index = 2 + 1; if (altera_check_stack(stack_ptr, index, &status)) { long_tmp = stack[stack_ptr - index]; stack[stack_ptr - index] = stack[stack_ptr - 1]; stack[stack_ptr - 1] = long_tmp; } /* SWP */ if (altera_check_stack(stack_ptr, 2, &status)) { long_tmp = stack[stack_ptr - 2]; stack[stack_ptr - 2] = stack[stack_ptr - 1]; stack[stack_ptr - 1] = long_tmp; } /* DUPN 6 */ index = 6 + 1; if (altera_check_stack(stack_ptr, index, &status)) { stack[stack_ptr] = stack[stack_ptr - index]; ++stack_ptr; } /* DUPN 6 */ index = 6 + 1; if (altera_check_stack(stack_ptr, index, &status)) { stack[stack_ptr] = stack[stack_ptr - index]; ++stack_ptr; } break; case OP_PSH0: stack[stack_ptr++] = 0; break; case OP_PSHL: stack[stack_ptr++] = (s32) args[0]; break; case OP_PSHV: stack[stack_ptr++] = vars[args[0]]; break; case OP_JMP: pc = args[0] + code_sect; if ((pc < code_sect) || (pc >= debug_sect)) status = -ERANGE; break; case OP_CALL: stack[stack_ptr++] = pc; pc = args[0] + code_sect; if ((pc < code_sect) || (pc >= debug_sect)) status = -ERANGE; break; case OP_NEXT: /* * Process FOR / NEXT loop * ...argument 0 is variable ID * ...stack 0 is step value * ...stack 1 is end value * ...stack 2 is top address */ if (altera_check_stack(stack_ptr, 3, &status)) { s32 step = stack[stack_ptr - 1]; s32 end = stack[stack_ptr - 2]; s32 top = stack[stack_ptr - 3]; s32 iterator = vars[args[0]]; int break_out = 0; if (step < 0) { if (iterator <= end) break_out = 1; } else if (iterator >= end) break_out = 1; if (break_out) { stack_ptr -= 3; } else { vars[args[0]] = iterator + step; pc = top + code_sect; if ((pc < code_sect) || (pc >= debug_sect)) status = -ERANGE; } } break; case OP_PSTR: /* * PRINT add string * ...argument 0 is string ID */ count = strlen(msg_buff); strlcpy(&msg_buff[count], &p[str_table + args[0]], ALTERA_MESSAGE_LENGTH - count); break; case OP_SINT: /* * STATE intermediate state * ...argument 0 is state code */ status = altera_goto_jstate(astate, args[0]); break; case OP_ST: /* * STATE final state * ...argument 0 is state code */ status = altera_goto_jstate(astate, args[0]); break; case OP_ISTP: /* * IRSTOP state * ...argument 0 is state code */ status = altera_set_irstop(&astate->js, args[0]); break; case OP_DSTP: /* * DRSTOP state * ...argument 0 is state code */ status = altera_set_drstop(&astate->js, args[0]); break; case OP_SWPN: /* * Exchange top with Nth stack value * ...argument 0 is 0-based stack entry * to swap with top element */ index = (args[0]) + 1; if (altera_check_stack(stack_ptr, index, &status)) { long_tmp = stack[stack_ptr - index]; stack[stack_ptr - index] = stack[stack_ptr - 1]; stack[stack_ptr - 1] = long_tmp; } break; case OP_DUPN: /* * Duplicate Nth stack value * ...argument 0 is 0-based stack entry to duplicate */ index = (args[0]) + 1; if (altera_check_stack(stack_ptr, index, &status)) { stack[stack_ptr] = stack[stack_ptr - index]; ++stack_ptr; } break; case OP_POPV: /* * Pop stack into scalar variable * ...argument 0 is variable ID * ...stack 0 is value */ if (altera_check_stack(stack_ptr, 1, &status)) vars[args[0]] = stack[--stack_ptr]; break; case OP_POPE: /* * Pop stack into integer array element * ...argument 0 is variable ID * ...stack 0 is array index * ...stack 1 is value */ if (!altera_check_stack(stack_ptr, 2, &status)) break; variable_id = args[0]; /* * If variable is read-only, * convert to writable array */ if ((version > 0) && ((attrs[variable_id] & 0x9c) == 0x1c)) { /* Allocate a writable buffer for this array */ count = var_size[variable_id]; long_tmp = vars[variable_id]; longptr_tmp = kzalloc(count * sizeof(long), GFP_KERNEL); vars[variable_id] = (long)longptr_tmp; if (vars[variable_id] == 0) { status = -ENOMEM; break; } /* copy previous contents into buffer */ for (i = 0; i < count; ++i) { longptr_tmp[i] = get_unaligned_be32(&p[long_tmp]); long_tmp += sizeof(long); } /* * set bit 7 - buffer was * dynamically allocated */ attrs[variable_id] |= 0x80; /* clear bit 2 - variable is writable */ attrs[variable_id] &= ~0x04; attrs[variable_id] |= 0x01; } /* check that variable is a writable integer array */ if ((attrs[variable_id] & 0x1c) != 0x18) status = -ERANGE; else { longptr_tmp = (long *)vars[variable_id]; /* pop the array index */ index = stack[--stack_ptr]; /* pop the value and store it into the array */ longptr_tmp[index] = stack[--stack_ptr]; } break; case OP_POPA: /* * Pop stack into Boolean array * ...argument 0 is variable ID * ...stack 0 is count * ...stack 1 is array index * ...stack 2 is value */ if (!altera_check_stack(stack_ptr, 3, &status)) break; variable_id = args[0]; /* * If variable is read-only, * convert to writable array */ if ((version > 0) && ((attrs[variable_id] & 0x9c) == 0x0c)) { /* Allocate a writable buffer for this array */ long_tmp = (var_size[variable_id] + 7L) >> 3L; charptr_tmp2 = (u8 *)vars[variable_id]; charptr_tmp = kzalloc(long_tmp, GFP_KERNEL); vars[variable_id] = (long)charptr_tmp; if (vars[variable_id] == 0) { status = -ENOMEM; break; } /* zero the buffer */ for (long_idx = 0L; long_idx < long_tmp; ++long_idx) { charptr_tmp[long_idx] = 0; } /* copy previous contents into buffer */ for (long_idx = 0L; long_idx < var_size[variable_id]; ++long_idx) { long_idx2 = long_idx; if (charptr_tmp2[long_idx2 >> 3] & (1 << (long_idx2 & 7))) { charptr_tmp[long_idx >> 3] |= (1 << (long_idx & 7)); } } /* * set bit 7 - buffer was * dynamically allocated */ attrs[variable_id] |= 0x80; /* clear bit 2 - variable is writable */ attrs[variable_id] &= ~0x04; attrs[variable_id] |= 0x01; } /* * check that variable is * a writable Boolean array */ if ((attrs[variable_id] & 0x1c) != 0x08) { status = -ERANGE; break; } charptr_tmp = (u8 *)vars[variable_id]; /* pop the count (number of bits to copy) */ long_count = stack[--stack_ptr]; /* pop the array index */ long_idx = stack[--stack_ptr]; reverse = 0; if (version > 0) { /* * stack 0 = array right index * stack 1 = array left index */ if (long_idx > long_count) { reverse = 1; long_tmp = long_count; long_count = 1 + long_idx - long_count; long_idx = long_tmp; /* reverse POPA is not supported */ status = -ERANGE; break; } else long_count = 1 + long_count - long_idx; } /* pop the data */ long_tmp = stack[--stack_ptr]; if (long_count < 1) { status = -ERANGE; break; } for (i = 0; i < long_count; ++i) { if (long_tmp & (1L << (s32) i)) charptr_tmp[long_idx >> 3L] |= (1L << (long_idx & 7L)); else charptr_tmp[long_idx >> 3L] &= ~(1L << (long_idx & 7L)); ++long_idx; } break; case OP_JMPZ: /* * Pop stack and branch if zero * ...argument 0 is address * ...stack 0 is condition value */ if (altera_check_stack(stack_ptr, 1, &status)) { if (stack[--stack_ptr] == 0) { pc = args[0] + code_sect; if ((pc < code_sect) || (pc >= debug_sect)) status = -ERANGE; } } break; case OP_DS: case OP_IS: /* * DRSCAN * IRSCAN * ...argument 0 is scan data variable ID * ...stack 0 is array index * ...stack 1 is count */ if (!altera_check_stack(stack_ptr, 2, &status)) break; long_idx = stack[--stack_ptr]; long_count = stack[--stack_ptr]; reverse = 0; if (version > 0) { /* * stack 0 = array right index * stack 1 = array left index * stack 2 = count */ long_tmp = long_count; long_count = stack[--stack_ptr]; if (long_idx > long_tmp) { reverse = 1; long_idx = long_tmp; } } charptr_tmp = (u8 *)vars[args[0]]; if (reverse) { /* * allocate a buffer * and reverse the data order */ charptr_tmp2 = charptr_tmp; charptr_tmp = kzalloc((long_count >> 3) + 1, GFP_KERNEL); if (charptr_tmp == NULL) { status = -ENOMEM; break; } long_tmp = long_idx + long_count - 1; long_idx2 = 0; while (long_idx2 < long_count) { if (charptr_tmp2[long_tmp >> 3] & (1 << (long_tmp & 7))) charptr_tmp[long_idx2 >> 3] |= (1 << (long_idx2 & 7)); else charptr_tmp[long_idx2 >> 3] &= ~(1 << (long_idx2 & 7)); --long_tmp; ++long_idx2; } } if (opcode == 0x51) /* DS */ status = altera_drscan(astate, long_count, charptr_tmp, long_idx); else /* IS */ status = altera_irscan(astate, long_count, charptr_tmp, long_idx); if (reverse) kfree(charptr_tmp); break; case OP_DPRA: /* * DRPRE with array data * ...argument 0 is variable ID * ...stack 0 is array index * ...stack 1 is count */ if (!altera_check_stack(stack_ptr, 2, &status)) break; index = stack[--stack_ptr]; count = stack[--stack_ptr]; if (version > 0) /* * stack 0 = array right index * stack 1 = array left index */ count = 1 + count - index; charptr_tmp = (u8 *)vars[args[0]]; status = altera_set_dr_pre(&astate->js, count, index, charptr_tmp); break; case OP_DPOA: /* * DRPOST with array data * ...argument 0 is variable ID * ...stack 0 is array index * ...stack 1 is count */ if (!altera_check_stack(stack_ptr, 2, &status)) break; index = stack[--stack_ptr]; count = stack[--stack_ptr]; if (version > 0) /* * stack 0 = array right index * stack 1 = array left index */ count = 1 + count - index; charptr_tmp = (u8 *)vars[args[0]]; status = altera_set_dr_post(&astate->js, count, index, charptr_tmp); break; case OP_IPRA: /* * IRPRE with array data * ...argument 0 is variable ID * ...stack 0 is array index * ...stack 1 is count */ if (!altera_check_stack(stack_ptr, 2, &status)) break; index = stack[--stack_ptr]; count = stack[--stack_ptr]; if (version > 0) /* * stack 0 = array right index * stack 1 = array left index */ count = 1 + count - index; charptr_tmp = (u8 *)vars[args[0]]; status = altera_set_ir_pre(&astate->js, count, index, charptr_tmp); break; case OP_IPOA: /* * IRPOST with array data * ...argument 0 is variable ID * ...stack 0 is array index * ...stack 1 is count */ if (!altera_check_stack(stack_ptr, 2, &status)) break; index = stack[--stack_ptr]; count = stack[--stack_ptr]; if (version > 0) /* * stack 0 = array right index * stack 1 = array left index */ count = 1 + count - index; charptr_tmp = (u8 *)vars[args[0]]; status = altera_set_ir_post(&astate->js, count, index, charptr_tmp); break; case OP_EXPT: /* * EXPORT * ...argument 0 is string ID * ...stack 0 is integer expression */ if (altera_check_stack(stack_ptr, 1, &status)) { name = &p[str_table + args[0]]; long_tmp = stack[--stack_ptr]; altera_export_int(name, long_tmp); } break; case OP_PSHE: /* * Push integer array element * ...argument 0 is variable ID * ...stack 0 is array index */ if (!altera_check_stack(stack_ptr, 1, &status)) break; variable_id = args[0]; index = stack[stack_ptr - 1]; /* check variable type */ if ((attrs[variable_id] & 0x1f) == 0x19) { /* writable integer array */ longptr_tmp = (long *)vars[variable_id]; stack[stack_ptr - 1] = longptr_tmp[index]; } else if ((attrs[variable_id] & 0x1f) == 0x1c) { /* read-only integer array */ long_tmp = vars[variable_id] + (index * sizeof(long)); stack[stack_ptr - 1] = get_unaligned_be32(&p[long_tmp]); } else status = -ERANGE; break; case OP_PSHA: /* * Push Boolean array * ...argument 0 is variable ID * ...stack 0 is count * ...stack 1 is array index */ if (!altera_check_stack(stack_ptr, 2, &status)) break; variable_id = args[0]; /* check that variable is a Boolean array */ if ((attrs[variable_id] & 0x18) != 0x08) { status = -ERANGE; break; } charptr_tmp = (u8 *)vars[variable_id]; /* pop the count (number of bits to copy) */ count = stack[--stack_ptr]; /* pop the array index */ index = stack[stack_ptr - 1]; if (version > 0) /* * stack 0 = array right index * stack 1 = array left index */ count = 1 + count - index; if ((count < 1) || (count > 32)) { status = -ERANGE; break; } long_tmp = 0L; for (i = 0; i < count; ++i) if (charptr_tmp[(i + index) >> 3] & (1 << ((i + index) & 7))) long_tmp |= (1L << i); stack[stack_ptr - 1] = long_tmp; break; case OP_DYNA: /* * Dynamically change size of array * ...argument 0 is variable ID * ...stack 0 is new size */ if (!altera_check_stack(stack_ptr, 1, &status)) break; variable_id = args[0]; long_tmp = stack[--stack_ptr]; if (long_tmp > var_size[variable_id]) { var_size[variable_id] = long_tmp; if (attrs[variable_id] & 0x10) /* allocate integer array */ long_tmp *= sizeof(long); else /* allocate Boolean array */ long_tmp = (long_tmp + 7) >> 3; /* * If the buffer was previously allocated, * free it */ if (attrs[variable_id] & 0x80) { kfree((void *)vars[variable_id]); vars[variable_id] = 0; } /* * Allocate a new buffer * of the requested size */ vars[variable_id] = (long) kzalloc(long_tmp, GFP_KERNEL); if (vars[variable_id] == 0) { status = -ENOMEM; break; } /* * Set the attribute bit to indicate that * this buffer was dynamically allocated and * should be freed later */ attrs[variable_id] |= 0x80; /* zero out memory */ count = ((var_size[variable_id] + 7L) / 8L); charptr_tmp = (u8 *)(vars[variable_id]); for (index = 0; index < count; ++index) charptr_tmp[index] = 0; } break; case OP_EXPV: /* * Export Boolean array * ...argument 0 is string ID * ...stack 0 is variable ID * ...stack 1 is array right index * ...stack 2 is array left index */ if (!altera_check_stack(stack_ptr, 3, &status)) break; if (version == 0) { /* EXPV is not supported in JBC 1.0 */ bad_opcode = 1; break; } name = &p[str_table + args[0]]; variable_id = stack[--stack_ptr]; long_idx = stack[--stack_ptr];/* right indx */ long_idx2 = stack[--stack_ptr];/* left indx */ if (long_idx > long_idx2) { /* reverse indices not supported */ status = -ERANGE; break; } long_count = 1 + long_idx2 - long_idx; charptr_tmp = (u8 *)vars[variable_id]; charptr_tmp2 = NULL; if ((long_idx & 7L) != 0) { s32 k = long_idx; charptr_tmp2 = kzalloc(((long_count + 7L) / 8L), GFP_KERNEL); if (charptr_tmp2 == NULL) { status = -ENOMEM; break; } for (i = 0; i < long_count; ++i) { if (charptr_tmp[k >> 3] & (1 << (k & 7))) charptr_tmp2[i >> 3] |= (1 << (i & 7)); else charptr_tmp2[i >> 3] &= ~(1 << (i & 7)); ++k; } charptr_tmp = charptr_tmp2; } else if (long_idx != 0) charptr_tmp = &charptr_tmp[long_idx >> 3]; altera_export_bool_array(name, charptr_tmp, long_count); /* free allocated buffer */ if ((long_idx & 7L) != 0) kfree(charptr_tmp2); break; case OP_COPY: { /* * Array copy * ...argument 0 is dest ID * ...argument 1 is source ID * ...stack 0 is count * ...stack 1 is dest index * ...stack 2 is source index */ s32 copy_count; s32 copy_index; s32 copy_index2; s32 destleft; s32 src_count; s32 dest_count; int src_reverse = 0; int dest_reverse = 0; if (!altera_check_stack(stack_ptr, 3, &status)) break; copy_count = stack[--stack_ptr]; copy_index = stack[--stack_ptr]; copy_index2 = stack[--stack_ptr]; reverse = 0; if (version > 0) { /* * stack 0 = source right index * stack 1 = source left index * stack 2 = destination right index * stack 3 = destination left index */ destleft = stack[--stack_ptr]; if (copy_count > copy_index) { src_reverse = 1; reverse = 1; src_count = 1 + copy_count - copy_index; /* copy_index = source start index */ } else { src_count = 1 + copy_index - copy_count; /* source start index */ copy_index = copy_count; } if (copy_index2 > destleft) { dest_reverse = 1; reverse = !reverse; dest_count = 1 + copy_index2 - destleft; /* destination start index */ copy_index2 = destleft; } else dest_count = 1 + destleft - copy_index2; copy_count = (src_count < dest_count) ? src_count : dest_count; if ((src_reverse || dest_reverse) && (src_count != dest_count)) /* * If either the source or destination * is reversed, we can't tolerate * a length mismatch, because we * "left justify" arrays when copying. * This won't work correctly * with reversed arrays. */ status = -ERANGE; } count = copy_count; index = copy_index; index2 = copy_index2; /* * If destination is a read-only array, * allocate a buffer and convert it to a writable array */ variable_id = args[1]; if ((version > 0) && ((attrs[variable_id] & 0x9c) == 0x0c)) { /* Allocate a writable buffer for this array */ long_tmp = (var_size[variable_id] + 7L) >> 3L; charptr_tmp2 = (u8 *)vars[variable_id]; charptr_tmp = kzalloc(long_tmp, GFP_KERNEL); vars[variable_id] = (long)charptr_tmp; if (vars[variable_id] == 0) { status = -ENOMEM; break; } /* zero the buffer */ for (long_idx = 0L; long_idx < long_tmp; ++long_idx) charptr_tmp[long_idx] = 0; /* copy previous contents into buffer */ for (long_idx = 0L; long_idx < var_size[variable_id]; ++long_idx) { long_idx2 = long_idx; if (charptr_tmp2[long_idx2 >> 3] & (1 << (long_idx2 & 7))) charptr_tmp[long_idx >> 3] |= (1 << (long_idx & 7)); } /* set bit 7 - buffer was dynamically allocated */ attrs[variable_id] |= 0x80; /* clear bit 2 - variable is writable */ attrs[variable_id] &= ~0x04; attrs[variable_id] |= 0x01; } charptr_tmp = (u8 *)vars[args[1]]; charptr_tmp2 = (u8 *)vars[args[0]]; /* check if destination is a writable Boolean array */ if ((attrs[args[1]] & 0x1c) != 0x08) { status = -ERANGE; break; } if (count < 1) { status = -ERANGE; break; } if (reverse) index2 += (count - 1); for (i = 0; i < count; ++i) { if (charptr_tmp2[index >> 3] & (1 << (index & 7))) charptr_tmp[index2 >> 3] |= (1 << (index2 & 7)); else charptr_tmp[index2 >> 3] &= ~(1 << (index2 & 7)); ++index; if (reverse) --index2; else ++index2; } break; } case OP_DSC: case OP_ISC: { /* * DRSCAN with capture * IRSCAN with capture * ...argument 0 is scan data variable ID * ...argument 1 is capture variable ID * ...stack 0 is capture index * ...stack 1 is scan data index * ...stack 2 is count */ s32 scan_right, scan_left; s32 capture_count = 0; s32 scan_count = 0; s32 capture_index; s32 scan_index; if (!altera_check_stack(stack_ptr, 3, &status)) break; capture_index = stack[--stack_ptr]; scan_index = stack[--stack_ptr]; if (version > 0) { /* * stack 0 = capture right index * stack 1 = capture left index * stack 2 = scan right index * stack 3 = scan left index * stack 4 = count */ scan_right = stack[--stack_ptr]; scan_left = stack[--stack_ptr]; capture_count = 1 + scan_index - capture_index; scan_count = 1 + scan_left - scan_right; scan_index = scan_right; } long_count = stack[--stack_ptr]; /* * If capture array is read-only, allocate a buffer * and convert it to a writable array */ variable_id = args[1]; if ((version > 0) && ((attrs[variable_id] & 0x9c) == 0x0c)) { /* Allocate a writable buffer for this array */ long_tmp = (var_size[variable_id] + 7L) >> 3L; charptr_tmp2 = (u8 *)vars[variable_id]; charptr_tmp = kzalloc(long_tmp, GFP_KERNEL); vars[variable_id] = (long)charptr_tmp; if (vars[variable_id] == 0) { status = -ENOMEM; break; } /* zero the buffer */ for (long_idx = 0L; long_idx < long_tmp; ++long_idx) charptr_tmp[long_idx] = 0; /* copy previous contents into buffer */ for (long_idx = 0L; long_idx < var_size[variable_id]; ++long_idx) { long_idx2 = long_idx; if (charptr_tmp2[long_idx2 >> 3] & (1 << (long_idx2 & 7))) charptr_tmp[long_idx >> 3] |= (1 << (long_idx & 7)); } /* * set bit 7 - buffer was * dynamically allocated */ attrs[variable_id] |= 0x80; /* clear bit 2 - variable is writable */ attrs[variable_id] &= ~0x04; attrs[variable_id] |= 0x01; } charptr_tmp = (u8 *)vars[args[0]]; charptr_tmp2 = (u8 *)vars[args[1]]; if ((version > 0) && ((long_count > capture_count) || (long_count > scan_count))) { status = -ERANGE; break; } /* * check that capture array * is a writable Boolean array */ if ((attrs[args[1]] & 0x1c) != 0x08) { status = -ERANGE; break; } if (status == 0) { if (opcode == 0x82) /* DSC */ status = altera_swap_dr(astate, long_count, charptr_tmp, scan_index, charptr_tmp2, capture_index); else /* ISC */ status = altera_swap_ir(astate, long_count, charptr_tmp, scan_index, charptr_tmp2, capture_index); } break; } case OP_WAIT: /* * WAIT * ...argument 0 is wait state * ...argument 1 is end state * ...stack 0 is cycles * ...stack 1 is microseconds */ if (!altera_check_stack(stack_ptr, 2, &status)) break; long_tmp = stack[--stack_ptr]; if (long_tmp != 0L) status = altera_wait_cycles(astate, long_tmp, args[0]); long_tmp = stack[--stack_ptr]; if ((status == 0) && (long_tmp != 0L)) status = altera_wait_msecs(astate, long_tmp, args[0]); if ((status == 0) && (args[1] != args[0])) status = altera_goto_jstate(astate, args[1]); if (version > 0) { --stack_ptr; /* throw away MAX cycles */ --stack_ptr; /* throw away MAX microseconds */ } break; case OP_CMPA: { /* * Array compare * ...argument 0 is source 1 ID * ...argument 1 is source 2 ID * ...argument 2 is mask ID * ...stack 0 is source 1 index * ...stack 1 is source 2 index * ...stack 2 is mask index * ...stack 3 is count */ s32 a, b; u8 *source1 = (u8 *)vars[args[0]]; u8 *source2 = (u8 *)vars[args[1]]; u8 *mask = (u8 *)vars[args[2]]; u32 index1; u32 index2; u32 mask_index; if (!altera_check_stack(stack_ptr, 4, &status)) break; index1 = stack[--stack_ptr]; index2 = stack[--stack_ptr]; mask_index = stack[--stack_ptr]; long_count = stack[--stack_ptr]; if (version > 0) { /* * stack 0 = source 1 right index * stack 1 = source 1 left index * stack 2 = source 2 right index * stack 3 = source 2 left index * stack 4 = mask right index * stack 5 = mask left index */ s32 mask_right = stack[--stack_ptr]; s32 mask_left = stack[--stack_ptr]; /* source 1 count */ a = 1 + index2 - index1; /* source 2 count */ b = 1 + long_count - mask_index; a = (a < b) ? a : b; /* mask count */ b = 1 + mask_left - mask_right; a = (a < b) ? a : b; /* source 2 start index */ index2 = mask_index; /* mask start index */ mask_index = mask_right; long_count = a; } long_tmp = 1L; if (long_count < 1) status = -ERANGE; else { count = long_count; for (i = 0; i < count; ++i) { if (mask[mask_index >> 3] & (1 << (mask_index & 7))) { a = source1[index1 >> 3] & (1 << (index1 & 7)) ? 1 : 0; b = source2[index2 >> 3] & (1 << (index2 & 7)) ? 1 : 0; if (a != b) /* failure */ long_tmp = 0L; } ++index1; ++index2; ++mask_index; } } stack[stack_ptr++] = long_tmp; break; } default: /* Unrecognized opcode -- ERROR! */ bad_opcode = 1; break; } if (bad_opcode) status = -ENOSYS; if ((stack_ptr < 0) || (stack_ptr >= ALTERA_STACK_SIZE)) status = -EOVERFLOW; if (status != 0) { done = 1; *error_address = (s32)(opcode_address - code_sect); } } altera_free_buffers(astate); /* Free all dynamically allocated arrays */ if ((attrs != NULL) && (vars != NULL)) for (i = 0; i < sym_count; ++i) if (attrs[i] & 0x80) kfree((void *)vars[i]); kfree(vars); kfree(var_size); kfree(attrs); kfree(proc_attributes); return status; } static int altera_get_note(u8 *p, s32 program_size, s32 *offset, char *key, char *value, int length) /* * Gets key and value of NOTE fields in the JBC file. * Can be called in two modes: if offset pointer is NULL, * then the function searches for note fields which match * the key string provided. If offset is not NULL, then * the function finds the next note field of any key, * starting at the offset specified by the offset pointer. * Returns 0 for success, else appropriate error code */ { int status = -ENODATA; u32 note_strings = 0L; u32 note_table = 0L; u32 note_count = 0L; u32 first_word = 0L; int version = 0; int delta = 0; char *key_ptr; char *value_ptr; int i; /* Read header information */ if (program_size > 52L) { first_word = get_unaligned_be32(&p[0]); version = (first_word & 1L); delta = version * 8; note_strings = get_unaligned_be32(&p[8 + delta]); note_table = get_unaligned_be32(&p[12 + delta]); note_count = get_unaligned_be32(&p[44 + (2 * delta)]); } if ((first_word != 0x4A414D00L) && (first_word != 0x4A414D01L)) return -EIO; if (note_count <= 0L) return status; if (offset == NULL) { /* * We will search for the first note with a specific key, * and return only the value */ for (i = 0; (i < note_count) && (status != 0); ++i) { key_ptr = &p[note_strings + get_unaligned_be32( &p[note_table + (8 * i)])]; if ((strnicmp(key, key_ptr, strlen(key_ptr)) == 0) && (key != NULL)) { status = 0; value_ptr = &p[note_strings + get_unaligned_be32( &p[note_table + (8 * i) + 4])]; if (value != NULL) strlcpy(value, value_ptr, length); } } } else { /* * We will search for the next note, regardless of the key, * and return both the value and the key */ i = *offset; if ((i >= 0) && (i < note_count)) { status = 0; if (key != NULL) strlcpy(key, &p[note_strings + get_unaligned_be32( &p[note_table + (8 * i)])], length); if (value != NULL) strlcpy(value, &p[note_strings + get_unaligned_be32( &p[note_table + (8 * i) + 4])], length); *offset = i + 1; } } return status; } static int altera_check_crc(u8 *p, s32 program_size) { int status = 0; u16 local_expected = 0, local_actual = 0, shift_reg = 0xffff; int bit, feedback; u8 databyte; u32 i; u32 crc_section = 0L; u32 first_word = 0L; int version = 0; int delta = 0; if (program_size > 52L) { first_word = get_unaligned_be32(&p[0]); version = (first_word & 1L); delta = version * 8; crc_section = get_unaligned_be32(&p[32 + delta]); } if ((first_word != 0x4A414D00L) && (first_word != 0x4A414D01L)) status = -EIO; if (crc_section >= program_size) status = -EIO; if (status == 0) { local_expected = (u16)get_unaligned_be16(&p[crc_section]); for (i = 0; i < crc_section; ++i) { databyte = p[i]; for (bit = 0; bit < 8; bit++) { feedback = (databyte ^ shift_reg) & 0x01; shift_reg >>= 1; if (feedback) shift_reg ^= 0x8408; databyte >>= 1; } } local_actual = (u16)~shift_reg; if (local_expected != local_actual) status = -EILSEQ; } if (debug || status) { switch (status) { case 0: printk(KERN_INFO "%s: CRC matched: %04x\n", __func__, local_actual); break; case -EILSEQ: printk(KERN_ERR "%s: CRC mismatch: expected %04x, " "actual %04x\n", __func__, local_expected, local_actual); break; case -ENODATA: printk(KERN_ERR "%s: expected CRC not found, " "actual CRC = %04x\n", __func__, local_actual); break; case -EIO: printk(KERN_ERR "%s: error: format isn't " "recognized.\n", __func__); break; default: printk(KERN_ERR "%s: CRC function returned error " "code %d\n", __func__, status); break; } } return status; } static int altera_get_file_info(u8 *p, s32 program_size, int *format_version, int *action_count, int *procedure_count) { int status = -EIO; u32 first_word = 0; int version = 0; if (program_size <= 52L) return status; first_word = get_unaligned_be32(&p[0]); if ((first_word == 0x4A414D00L) || (first_word == 0x4A414D01L)) { status = 0; version = (first_word & 1L); *format_version = version + 1; if (version > 0) { *action_count = get_unaligned_be32(&p[48]); *procedure_count = get_unaligned_be32(&p[52]); } } return status; } static int altera_get_act_info(u8 *p, s32 program_size, int index, char **name, char **description, struct altera_procinfo **proc_list) { int status = -EIO; struct altera_procinfo *procptr = NULL; struct altera_procinfo *tmpptr = NULL; u32 first_word = 0L; u32 action_table = 0L; u32 proc_table = 0L; u32 str_table = 0L; u32 note_strings = 0L; u32 action_count = 0L; u32 proc_count = 0L; u32 act_name_id = 0L; u32 act_desc_id = 0L; u32 act_proc_id = 0L; u32 act_proc_name = 0L; u8 act_proc_attribute = 0; if (program_size <= 52L) return status; /* Read header information */ first_word = get_unaligned_be32(&p[0]); if (first_word != 0x4A414D01L) return status; action_table = get_unaligned_be32(&p[4]); proc_table = get_unaligned_be32(&p[8]); str_table = get_unaligned_be32(&p[12]); note_strings = get_unaligned_be32(&p[16]); action_count = get_unaligned_be32(&p[48]); proc_count = get_unaligned_be32(&p[52]); if (index >= action_count) return status; act_name_id = get_unaligned_be32(&p[action_table + (12 * index)]); act_desc_id = get_unaligned_be32(&p[action_table + (12 * index) + 4]); act_proc_id = get_unaligned_be32(&p[action_table + (12 * index) + 8]); *name = &p[str_table + act_name_id]; if (act_desc_id < (note_strings - str_table)) *description = &p[str_table + act_desc_id]; do { act_proc_name = get_unaligned_be32( &p[proc_table + (13 * act_proc_id)]); act_proc_attribute = (p[proc_table + (13 * act_proc_id) + 8] & 0x03); procptr = kzalloc(sizeof(struct altera_procinfo), GFP_KERNEL); if (procptr == NULL) status = -ENOMEM; else { procptr->name = &p[str_table + act_proc_name]; procptr->attrs = act_proc_attribute; procptr->next = NULL; /* add record to end of linked list */ if (*proc_list == NULL) *proc_list = procptr; else { tmpptr = *proc_list; while (tmpptr->next != NULL) tmpptr = tmpptr->next; tmpptr->next = procptr; } } act_proc_id = get_unaligned_be32( &p[proc_table + (13 * act_proc_id) + 4]); } while ((act_proc_id != 0) && (act_proc_id < proc_count)); return status; } int altera_init(struct altera_config *config, const struct firmware *fw) { struct altera_state *astate = NULL; struct altera_procinfo *proc_list = NULL; struct altera_procinfo *procptr = NULL; char *key = NULL; char *value = NULL; char *action_name = NULL; char *description = NULL; int exec_result = 0; int exit_code = 0; int format_version = 0; int action_count = 0; int procedure_count = 0; int index = 0; s32 offset = 0L; s32 error_address = 0L; int retval = 0; key = kzalloc(33, GFP_KERNEL); if (!key) { retval = -ENOMEM; goto out; } value = kzalloc(257, GFP_KERNEL); if (!value) { retval = -ENOMEM; goto free_key; } astate = kzalloc(sizeof(struct altera_state), GFP_KERNEL); if (!astate) { retval = -ENOMEM; goto free_value; } astate->config = config; if (!astate->config->jtag_io) { dprintk(KERN_INFO "%s: using byteblaster!\n", __func__); astate->config->jtag_io = netup_jtag_io_lpt; } altera_check_crc((u8 *)fw->data, fw->size); if (debug) { altera_get_file_info((u8 *)fw->data, fw->size, &format_version, &action_count, &procedure_count); printk(KERN_INFO "%s: File format is %s ByteCode format\n", __func__, (format_version == 2) ? "Jam STAPL" : "pre-standardized Jam 1.1"); while (altera_get_note((u8 *)fw->data, fw->size, &offset, key, value, 256) == 0) printk(KERN_INFO "%s: NOTE \"%s\" = \"%s\"\n", __func__, key, value); } if (debug && (format_version == 2) && (action_count > 0)) { printk(KERN_INFO "%s: Actions available:\n", __func__); for (index = 0; index < action_count; ++index) { altera_get_act_info((u8 *)fw->data, fw->size, index, &action_name, &description, &proc_list); if (description == NULL) printk(KERN_INFO "%s: %s\n", __func__, action_name); else printk(KERN_INFO "%s: %s \"%s\"\n", __func__, action_name, description); procptr = proc_list; while (procptr != NULL) { if (procptr->attrs != 0) printk(KERN_INFO "%s: %s (%s)\n", __func__, procptr->name, (procptr->attrs == 1) ? "optional" : "recommended"); proc_list = procptr->next; kfree(procptr); procptr = proc_list; } } printk(KERN_INFO "\n"); } exec_result = altera_execute(astate, (u8 *)fw->data, fw->size, &error_address, &exit_code, &format_version); if (exit_code) exec_result = -EREMOTEIO; if ((format_version == 2) && (exec_result == -EINVAL)) { if (astate->config->action == NULL) printk(KERN_ERR "%s: error: no action specified for " "Jam STAPL file.\nprogram terminated.\n", __func__); else printk(KERN_ERR "%s: error: action \"%s\"" " is not supported " "for this Jam STAPL file.\n" "Program terminated.\n", __func__, astate->config->action); } else if (exec_result) printk(KERN_ERR "%s: error %d\n", __func__, exec_result); kfree(astate); free_value: kfree(value); free_key: kfree(key); out: return retval; } EXPORT_SYMBOL(altera_init);
gpl-2.0
gmillz/kernel_lge_msm8974
drivers/mfd/htc-i2cpld.c
8162
18428
/* * htc-i2cpld.c * Chip driver for an unknown CPLD chip found on omap850 HTC devices like * the HTC Wizard and HTC Herald. * The cpld is located on the i2c bus and acts as an input/output GPIO * extender. * * Copyright (C) 2009 Cory Maccarrone <darkstar6262@gmail.com> * * Based on work done in the linwizard project * Copyright (C) 2008-2009 Angelo Arrifano <miknix@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/i2c.h> #include <linux/irq.h> #include <linux/spinlock.h> #include <linux/htcpld.h> #include <linux/gpio.h> #include <linux/slab.h> struct htcpld_chip { spinlock_t lock; /* chip info */ u8 reset; u8 addr; struct device *dev; struct i2c_client *client; /* Output details */ u8 cache_out; struct gpio_chip chip_out; /* Input details */ u8 cache_in; struct gpio_chip chip_in; u16 irqs_enabled; uint irq_start; int nirqs; unsigned int flow_type; /* * Work structure to allow for setting values outside of any * possible interrupt context */ struct work_struct set_val_work; }; struct htcpld_data { /* irq info */ u16 irqs_enabled; uint irq_start; int nirqs; uint chained_irq; unsigned int int_reset_gpio_hi; unsigned int int_reset_gpio_lo; /* htcpld info */ struct htcpld_chip *chip; unsigned int nchips; }; /* There does not appear to be a way to proactively mask interrupts * on the htcpld chip itself. So, we simply ignore interrupts that * aren't desired. */ static void htcpld_mask(struct irq_data *data) { struct htcpld_chip *chip = irq_data_get_irq_chip_data(data); chip->irqs_enabled &= ~(1 << (data->irq - chip->irq_start)); pr_debug("HTCPLD mask %d %04x\n", data->irq, chip->irqs_enabled); } static void htcpld_unmask(struct irq_data *data) { struct htcpld_chip *chip = irq_data_get_irq_chip_data(data); chip->irqs_enabled |= 1 << (data->irq - chip->irq_start); pr_debug("HTCPLD unmask %d %04x\n", data->irq, chip->irqs_enabled); } static int htcpld_set_type(struct irq_data *data, unsigned int flags) { struct htcpld_chip *chip = irq_data_get_irq_chip_data(data); if (flags & ~IRQ_TYPE_SENSE_MASK) return -EINVAL; /* We only allow edge triggering */ if (flags & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)) return -EINVAL; chip->flow_type = flags; return 0; } static struct irq_chip htcpld_muxed_chip = { .name = "htcpld", .irq_mask = htcpld_mask, .irq_unmask = htcpld_unmask, .irq_set_type = htcpld_set_type, }; /* To properly dispatch IRQ events, we need to read from the * chip. This is an I2C action that could possibly sleep * (which is bad in interrupt context) -- so we use a threaded * interrupt handler to get around that. */ static irqreturn_t htcpld_handler(int irq, void *dev) { struct htcpld_data *htcpld = dev; unsigned int i; unsigned long flags; int irqpin; if (!htcpld) { pr_debug("htcpld is null in ISR\n"); return IRQ_HANDLED; } /* * For each chip, do a read of the chip and trigger any interrupts * desired. The interrupts will be triggered from LSB to MSB (i.e. * bit 0 first, then bit 1, etc.) * * For chips that have no interrupt range specified, just skip 'em. */ for (i = 0; i < htcpld->nchips; i++) { struct htcpld_chip *chip = &htcpld->chip[i]; struct i2c_client *client; int val; unsigned long uval, old_val; if (!chip) { pr_debug("chip %d is null in ISR\n", i); continue; } if (chip->nirqs == 0) continue; client = chip->client; if (!client) { pr_debug("client %d is null in ISR\n", i); continue; } /* Scan the chip */ val = i2c_smbus_read_byte_data(client, chip->cache_out); if (val < 0) { /* Throw a warning and skip this chip */ dev_warn(chip->dev, "Unable to read from chip: %d\n", val); continue; } uval = (unsigned long)val; spin_lock_irqsave(&chip->lock, flags); /* Save away the old value so we can compare it */ old_val = chip->cache_in; /* Write the new value */ chip->cache_in = uval; spin_unlock_irqrestore(&chip->lock, flags); /* * For each bit in the data (starting at bit 0), trigger * associated interrupts. */ for (irqpin = 0; irqpin < chip->nirqs; irqpin++) { unsigned oldb, newb, type = chip->flow_type; irq = chip->irq_start + irqpin; /* Run the IRQ handler, but only if the bit value * changed, and the proper flags are set */ oldb = (old_val >> irqpin) & 1; newb = (uval >> irqpin) & 1; if ((!oldb && newb && (type & IRQ_TYPE_EDGE_RISING)) || (oldb && !newb && (type & IRQ_TYPE_EDGE_FALLING))) { pr_debug("fire IRQ %d\n", irqpin); generic_handle_irq(irq); } } } /* * In order to continue receiving interrupts, the int_reset_gpio must * be asserted. */ if (htcpld->int_reset_gpio_hi) gpio_set_value(htcpld->int_reset_gpio_hi, 1); if (htcpld->int_reset_gpio_lo) gpio_set_value(htcpld->int_reset_gpio_lo, 0); return IRQ_HANDLED; } /* * The GPIO set routines can be called from interrupt context, especially if, * for example they're attached to the led-gpio framework and a trigger is * enabled. As such, we declared work above in the htcpld_chip structure, * and that work is scheduled in the set routine. The kernel can then run * the I2C functions, which will sleep, in process context. */ static void htcpld_chip_set(struct gpio_chip *chip, unsigned offset, int val) { struct i2c_client *client; struct htcpld_chip *chip_data; unsigned long flags; chip_data = container_of(chip, struct htcpld_chip, chip_out); if (!chip_data) return; client = chip_data->client; if (client == NULL) return; spin_lock_irqsave(&chip_data->lock, flags); if (val) chip_data->cache_out |= (1 << offset); else chip_data->cache_out &= ~(1 << offset); spin_unlock_irqrestore(&chip_data->lock, flags); schedule_work(&(chip_data->set_val_work)); } static void htcpld_chip_set_ni(struct work_struct *work) { struct htcpld_chip *chip_data; struct i2c_client *client; chip_data = container_of(work, struct htcpld_chip, set_val_work); client = chip_data->client; i2c_smbus_read_byte_data(client, chip_data->cache_out); } static int htcpld_chip_get(struct gpio_chip *chip, unsigned offset) { struct htcpld_chip *chip_data; int val = 0; int is_input = 0; /* Try out first */ chip_data = container_of(chip, struct htcpld_chip, chip_out); if (!chip_data) { /* Try in */ is_input = 1; chip_data = container_of(chip, struct htcpld_chip, chip_in); if (!chip_data) return -EINVAL; } /* Determine if this is an input or output GPIO */ if (!is_input) /* Use the output cache */ val = (chip_data->cache_out >> offset) & 1; else /* Use the input cache */ val = (chip_data->cache_in >> offset) & 1; if (val) return 1; else return 0; } static int htcpld_direction_output(struct gpio_chip *chip, unsigned offset, int value) { htcpld_chip_set(chip, offset, value); return 0; } static int htcpld_direction_input(struct gpio_chip *chip, unsigned offset) { /* * No-op: this function can only be called on the input chip. * We do however make sure the offset is within range. */ return (offset < chip->ngpio) ? 0 : -EINVAL; } static int htcpld_chip_to_irq(struct gpio_chip *chip, unsigned offset) { struct htcpld_chip *chip_data; chip_data = container_of(chip, struct htcpld_chip, chip_in); if (offset < chip_data->nirqs) return chip_data->irq_start + offset; else return -EINVAL; } static void htcpld_chip_reset(struct i2c_client *client) { struct htcpld_chip *chip_data = i2c_get_clientdata(client); if (!chip_data) return; i2c_smbus_read_byte_data( client, (chip_data->cache_out = chip_data->reset)); } static int __devinit htcpld_setup_chip_irq( struct platform_device *pdev, int chip_index) { struct htcpld_data *htcpld; struct device *dev = &pdev->dev; struct htcpld_core_platform_data *pdata; struct htcpld_chip *chip; struct htcpld_chip_platform_data *plat_chip_data; unsigned int irq, irq_end; int ret = 0; /* Get the platform and driver data */ pdata = dev->platform_data; htcpld = platform_get_drvdata(pdev); chip = &htcpld->chip[chip_index]; plat_chip_data = &pdata->chip[chip_index]; /* Setup irq handlers */ irq_end = chip->irq_start + chip->nirqs; for (irq = chip->irq_start; irq < irq_end; irq++) { irq_set_chip_and_handler(irq, &htcpld_muxed_chip, handle_simple_irq); irq_set_chip_data(irq, chip); #ifdef CONFIG_ARM set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); #else irq_set_probe(irq); #endif } return ret; } static int __devinit htcpld_register_chip_i2c( struct platform_device *pdev, int chip_index) { struct htcpld_data *htcpld; struct device *dev = &pdev->dev; struct htcpld_core_platform_data *pdata; struct htcpld_chip *chip; struct htcpld_chip_platform_data *plat_chip_data; struct i2c_adapter *adapter; struct i2c_client *client; struct i2c_board_info info; /* Get the platform and driver data */ pdata = dev->platform_data; htcpld = platform_get_drvdata(pdev); chip = &htcpld->chip[chip_index]; plat_chip_data = &pdata->chip[chip_index]; adapter = i2c_get_adapter(pdata->i2c_adapter_id); if (adapter == NULL) { /* Eek, no such I2C adapter! Bail out. */ dev_warn(dev, "Chip at i2c address 0x%x: Invalid i2c adapter %d\n", plat_chip_data->addr, pdata->i2c_adapter_id); return -ENODEV; } if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_READ_BYTE_DATA)) { dev_warn(dev, "i2c adapter %d non-functional\n", pdata->i2c_adapter_id); return -EINVAL; } memset(&info, 0, sizeof(struct i2c_board_info)); info.addr = plat_chip_data->addr; strlcpy(info.type, "htcpld-chip", I2C_NAME_SIZE); info.platform_data = chip; /* Add the I2C device. This calls the probe() function. */ client = i2c_new_device(adapter, &info); if (!client) { /* I2C device registration failed, contineu with the next */ dev_warn(dev, "Unable to add I2C device for 0x%x\n", plat_chip_data->addr); return -ENODEV; } i2c_set_clientdata(client, chip); snprintf(client->name, I2C_NAME_SIZE, "Chip_0x%d", client->addr); chip->client = client; /* Reset the chip */ htcpld_chip_reset(client); chip->cache_in = i2c_smbus_read_byte_data(client, chip->cache_out); return 0; } static void __devinit htcpld_unregister_chip_i2c( struct platform_device *pdev, int chip_index) { struct htcpld_data *htcpld; struct htcpld_chip *chip; /* Get the platform and driver data */ htcpld = platform_get_drvdata(pdev); chip = &htcpld->chip[chip_index]; if (chip->client) i2c_unregister_device(chip->client); } static int __devinit htcpld_register_chip_gpio( struct platform_device *pdev, int chip_index) { struct htcpld_data *htcpld; struct device *dev = &pdev->dev; struct htcpld_core_platform_data *pdata; struct htcpld_chip *chip; struct htcpld_chip_platform_data *plat_chip_data; struct gpio_chip *gpio_chip; int ret = 0; /* Get the platform and driver data */ pdata = dev->platform_data; htcpld = platform_get_drvdata(pdev); chip = &htcpld->chip[chip_index]; plat_chip_data = &pdata->chip[chip_index]; /* Setup the GPIO chips */ gpio_chip = &(chip->chip_out); gpio_chip->label = "htcpld-out"; gpio_chip->dev = dev; gpio_chip->owner = THIS_MODULE; gpio_chip->get = htcpld_chip_get; gpio_chip->set = htcpld_chip_set; gpio_chip->direction_input = NULL; gpio_chip->direction_output = htcpld_direction_output; gpio_chip->base = plat_chip_data->gpio_out_base; gpio_chip->ngpio = plat_chip_data->num_gpios; gpio_chip = &(chip->chip_in); gpio_chip->label = "htcpld-in"; gpio_chip->dev = dev; gpio_chip->owner = THIS_MODULE; gpio_chip->get = htcpld_chip_get; gpio_chip->set = NULL; gpio_chip->direction_input = htcpld_direction_input; gpio_chip->direction_output = NULL; gpio_chip->to_irq = htcpld_chip_to_irq; gpio_chip->base = plat_chip_data->gpio_in_base; gpio_chip->ngpio = plat_chip_data->num_gpios; /* Add the GPIO chips */ ret = gpiochip_add(&(chip->chip_out)); if (ret) { dev_warn(dev, "Unable to register output GPIOs for 0x%x: %d\n", plat_chip_data->addr, ret); return ret; } ret = gpiochip_add(&(chip->chip_in)); if (ret) { int error; dev_warn(dev, "Unable to register input GPIOs for 0x%x: %d\n", plat_chip_data->addr, ret); error = gpiochip_remove(&(chip->chip_out)); if (error) dev_warn(dev, "Error while trying to unregister gpio chip: %d\n", error); return ret; } return 0; } static int __devinit htcpld_setup_chips(struct platform_device *pdev) { struct htcpld_data *htcpld; struct device *dev = &pdev->dev; struct htcpld_core_platform_data *pdata; int i; /* Get the platform and driver data */ pdata = dev->platform_data; htcpld = platform_get_drvdata(pdev); /* Setup each chip's output GPIOs */ htcpld->nchips = pdata->num_chip; htcpld->chip = kzalloc(sizeof(struct htcpld_chip) * htcpld->nchips, GFP_KERNEL); if (!htcpld->chip) { dev_warn(dev, "Unable to allocate memory for chips\n"); return -ENOMEM; } /* Add the chips as best we can */ for (i = 0; i < htcpld->nchips; i++) { int ret; /* Setup the HTCPLD chips */ htcpld->chip[i].reset = pdata->chip[i].reset; htcpld->chip[i].cache_out = pdata->chip[i].reset; htcpld->chip[i].cache_in = 0; htcpld->chip[i].dev = dev; htcpld->chip[i].irq_start = pdata->chip[i].irq_base; htcpld->chip[i].nirqs = pdata->chip[i].num_irqs; INIT_WORK(&(htcpld->chip[i].set_val_work), &htcpld_chip_set_ni); spin_lock_init(&(htcpld->chip[i].lock)); /* Setup the interrupts for the chip */ if (htcpld->chained_irq) { ret = htcpld_setup_chip_irq(pdev, i); if (ret) continue; } /* Register the chip with I2C */ ret = htcpld_register_chip_i2c(pdev, i); if (ret) continue; /* Register the chips with the GPIO subsystem */ ret = htcpld_register_chip_gpio(pdev, i); if (ret) { /* Unregister the chip from i2c and continue */ htcpld_unregister_chip_i2c(pdev, i); continue; } dev_info(dev, "Registered chip at 0x%x\n", pdata->chip[i].addr); } return 0; } static int __devinit htcpld_core_probe(struct platform_device *pdev) { struct htcpld_data *htcpld; struct device *dev = &pdev->dev; struct htcpld_core_platform_data *pdata; struct resource *res; int ret = 0; if (!dev) return -ENODEV; pdata = dev->platform_data; if (!pdata) { dev_warn(dev, "Platform data not found for htcpld core!\n"); return -ENXIO; } htcpld = kzalloc(sizeof(struct htcpld_data), GFP_KERNEL); if (!htcpld) return -ENOMEM; /* Find chained irq */ ret = -EINVAL; res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (res) { int flags; htcpld->chained_irq = res->start; /* Setup the chained interrupt handler */ flags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING; ret = request_threaded_irq(htcpld->chained_irq, NULL, htcpld_handler, flags, pdev->name, htcpld); if (ret) { dev_warn(dev, "Unable to setup chained irq handler: %d\n", ret); goto fail; } else device_init_wakeup(dev, 0); } /* Set the driver data */ platform_set_drvdata(pdev, htcpld); /* Setup the htcpld chips */ ret = htcpld_setup_chips(pdev); if (ret) goto fail; /* Request the GPIO(s) for the int reset and set them up */ if (pdata->int_reset_gpio_hi) { ret = gpio_request(pdata->int_reset_gpio_hi, "htcpld-core"); if (ret) { /* * If it failed, that sucks, but we can probably * continue on without it. */ dev_warn(dev, "Unable to request int_reset_gpio_hi -- interrupts may not work\n"); htcpld->int_reset_gpio_hi = 0; } else { htcpld->int_reset_gpio_hi = pdata->int_reset_gpio_hi; gpio_set_value(htcpld->int_reset_gpio_hi, 1); } } if (pdata->int_reset_gpio_lo) { ret = gpio_request(pdata->int_reset_gpio_lo, "htcpld-core"); if (ret) { /* * If it failed, that sucks, but we can probably * continue on without it. */ dev_warn(dev, "Unable to request int_reset_gpio_lo -- interrupts may not work\n"); htcpld->int_reset_gpio_lo = 0; } else { htcpld->int_reset_gpio_lo = pdata->int_reset_gpio_lo; gpio_set_value(htcpld->int_reset_gpio_lo, 0); } } dev_info(dev, "Initialized successfully\n"); return 0; fail: kfree(htcpld); return ret; } /* The I2C Driver -- used internally */ static const struct i2c_device_id htcpld_chip_id[] = { { "htcpld-chip", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, htcpld_chip_id); static struct i2c_driver htcpld_chip_driver = { .driver = { .name = "htcpld-chip", }, .id_table = htcpld_chip_id, }; /* The Core Driver */ static struct platform_driver htcpld_core_driver = { .driver = { .name = "i2c-htcpld", }, }; static int __init htcpld_core_init(void) { int ret; /* Register the I2C Chip driver */ ret = i2c_add_driver(&htcpld_chip_driver); if (ret) return ret; /* Probe for our chips */ return platform_driver_probe(&htcpld_core_driver, htcpld_core_probe); } static void __exit htcpld_core_exit(void) { i2c_del_driver(&htcpld_chip_driver); platform_driver_unregister(&htcpld_core_driver); } module_init(htcpld_core_init); module_exit(htcpld_core_exit); MODULE_AUTHOR("Cory Maccarrone <darkstar6262@gmail.com>"); MODULE_DESCRIPTION("I2C HTC PLD Driver"); MODULE_LICENSE("GPL");
gpl-2.0
MIPS/karma-linux-mti
drivers/platform/x86/intel_scu_ipcutil.c
8674
3046
/* * intel_scu_ipc.c: Driver for the Intel SCU IPC mechanism * * (C) Copyright 2008-2010 Intel Corporation * Author: Sreedhara DS (sreedhara.ds@intel.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 * of the License. * * This driver provides ioctl interfaces to call intel scu ipc driver api */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/fs.h> #include <linux/fcntl.h> #include <linux/sched.h> #include <linux/uaccess.h> #include <linux/slab.h> #include <linux/init.h> #include <asm/intel_scu_ipc.h> static int major; /* ioctl commnds */ #define INTE_SCU_IPC_REGISTER_READ 0 #define INTE_SCU_IPC_REGISTER_WRITE 1 #define INTE_SCU_IPC_REGISTER_UPDATE 2 struct scu_ipc_data { u32 count; /* No. of registers */ u16 addr[5]; /* Register addresses */ u8 data[5]; /* Register data */ u8 mask; /* Valid for read-modify-write */ }; /** * scu_reg_access - implement register access ioctls * @cmd: command we are doing (read/write/update) * @data: kernel copy of ioctl data * * Allow the user to perform register accesses on the SCU via the * kernel interface */ static int scu_reg_access(u32 cmd, struct scu_ipc_data *data) { int count = data->count; if (count == 0 || count == 3 || count > 4) return -EINVAL; switch (cmd) { case INTE_SCU_IPC_REGISTER_READ: return intel_scu_ipc_readv(data->addr, data->data, count); case INTE_SCU_IPC_REGISTER_WRITE: return intel_scu_ipc_writev(data->addr, data->data, count); case INTE_SCU_IPC_REGISTER_UPDATE: return intel_scu_ipc_update_register(data->addr[0], data->data[0], data->mask); default: return -ENOTTY; } } /** * scu_ipc_ioctl - control ioctls for the SCU * @fp: file handle of the SCU device * @cmd: ioctl coce * @arg: pointer to user passed structure * * Support the I/O and firmware flashing interfaces of the SCU */ static long scu_ipc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) { int ret; struct scu_ipc_data data; void __user *argp = (void __user *)arg; if (!capable(CAP_SYS_RAWIO)) return -EPERM; if (copy_from_user(&data, argp, sizeof(struct scu_ipc_data))) return -EFAULT; ret = scu_reg_access(cmd, &data); if (ret < 0) return ret; if (copy_to_user(argp, &data, sizeof(struct scu_ipc_data))) return -EFAULT; return 0; } static const struct file_operations scu_ipc_fops = { .unlocked_ioctl = scu_ipc_ioctl, }; static int __init ipc_module_init(void) { major = register_chrdev(0, "intel_mid_scu", &scu_ipc_fops); if (major < 0) return major; return 0; } static void __exit ipc_module_exit(void) { unregister_chrdev(major, "intel_mid_scu"); } module_init(ipc_module_init); module_exit(ipc_module_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Utility driver for intel scu ipc"); MODULE_AUTHOR("Sreedhara <sreedhara.ds@intel.com>");
gpl-2.0
jmarcet/linux-amlogic
arch/mn10300/mm/misalignment.c
8674
30021
/* MN10300 Misalignment fixup handler * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/module.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/ptrace.h> #include <linux/timer.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <asm/processor.h> #include <asm/uaccess.h> #include <asm/io.h> #include <linux/atomic.h> #include <asm/smp.h> #include <asm/pgalloc.h> #include <asm/cpu-regs.h> #include <asm/busctl-regs.h> #include <asm/fpu.h> #include <asm/gdb-stub.h> #include <asm/asm-offsets.h> #if 0 #define kdebug(FMT, ...) printk(KERN_DEBUG "MISALIGN: "FMT"\n", ##__VA_ARGS__) #else #define kdebug(FMT, ...) do {} while (0) #endif static int misalignment_addr(unsigned long *registers, unsigned long sp, unsigned params, unsigned opcode, unsigned long disp, void **_address, unsigned long **_postinc, unsigned long *_inc); static int misalignment_reg(unsigned long *registers, unsigned params, unsigned opcode, unsigned long disp, unsigned long **_register); static void misalignment_MOV_Lcc(struct pt_regs *regs, uint32_t opcode); static const unsigned Dreg_index[] = { REG_D0 >> 2, REG_D1 >> 2, REG_D2 >> 2, REG_D3 >> 2 }; static const unsigned Areg_index[] = { REG_A0 >> 2, REG_A1 >> 2, REG_A2 >> 2, REG_A3 >> 2 }; static const unsigned Rreg_index[] = { REG_E0 >> 2, REG_E1 >> 2, REG_E2 >> 2, REG_E3 >> 2, REG_E4 >> 2, REG_E5 >> 2, REG_E6 >> 2, REG_E7 >> 2, REG_A0 >> 2, REG_A1 >> 2, REG_A2 >> 2, REG_A3 >> 2, REG_D0 >> 2, REG_D1 >> 2, REG_D2 >> 2, REG_D3 >> 2 }; enum format_id { FMT_S0, FMT_S1, FMT_S2, FMT_S4, FMT_D0, FMT_D1, FMT_D2, FMT_D4, FMT_D6, FMT_D7, FMT_D8, FMT_D9, FMT_D10, }; static const struct { u_int8_t opsz, dispsz; } format_tbl[16] = { [FMT_S0] = { 8, 0 }, [FMT_S1] = { 8, 8 }, [FMT_S2] = { 8, 16 }, [FMT_S4] = { 8, 32 }, [FMT_D0] = { 16, 0 }, [FMT_D1] = { 16, 8 }, [FMT_D2] = { 16, 16 }, [FMT_D4] = { 16, 32 }, [FMT_D6] = { 24, 0 }, [FMT_D7] = { 24, 8 }, [FMT_D8] = { 24, 24 }, [FMT_D9] = { 24, 32 }, [FMT_D10] = { 32, 0 }, }; enum value_id { DM0, /* data reg in opcode in bits 0-1 */ DM1, /* data reg in opcode in bits 2-3 */ DM2, /* data reg in opcode in bits 4-5 */ AM0, /* addr reg in opcode in bits 0-1 */ AM1, /* addr reg in opcode in bits 2-3 */ AM2, /* addr reg in opcode in bits 4-5 */ RM0, /* reg in opcode in bits 0-3 */ RM1, /* reg in opcode in bits 2-5 */ RM2, /* reg in opcode in bits 4-7 */ RM4, /* reg in opcode in bits 8-11 */ RM6, /* reg in opcode in bits 12-15 */ RD0, /* reg in displacement in bits 0-3 */ RD2, /* reg in displacement in bits 4-7 */ SP, /* stack pointer */ SD8, /* 8-bit signed displacement */ SD16, /* 16-bit signed displacement */ SD24, /* 24-bit signed displacement */ SIMM4_2, /* 4-bit signed displacement in opcode bits 4-7 */ SIMM8, /* 8-bit signed immediate */ IMM8, /* 8-bit unsigned immediate */ IMM16, /* 16-bit unsigned immediate */ IMM24, /* 24-bit unsigned immediate */ IMM32, /* 32-bit unsigned immediate */ IMM32_HIGH8, /* 32-bit unsigned immediate, LSB in opcode */ IMM32_MEM, /* 32-bit unsigned displacement */ IMM32_HIGH8_MEM, /* 32-bit unsigned displacement, LSB in opcode */ DN0 = DM0, DN1 = DM1, DN2 = DM2, AN0 = AM0, AN1 = AM1, AN2 = AM2, RN0 = RM0, RN1 = RM1, RN2 = RM2, RN4 = RM4, RN6 = RM6, DI = DM1, RI = RM2, }; struct mn10300_opcode { const char name[8]; u_int32_t opcode; u_int32_t opmask; unsigned exclusion; enum format_id format; unsigned cpu_mask; #define AM33 330 unsigned params[2]; #define MEM(ADDR) (0x80000000 | (ADDR)) #define MEM2(ADDR1, ADDR2) (0x80000000 | (ADDR1) << 8 | (ADDR2)) #define MEMINC(ADDR) (0x81000000 | (ADDR)) #define MEMINC2(ADDR, INC) (0x81000000 | (ADDR) << 8 | (INC)) }; /* LIBOPCODES EXCERPT Assemble Matsushita MN10300 instructions. Copyright 1996, 1997, 1998, 1999, 2000 Free Software Foundation, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public Licence as published by the Free Software Foundation; either version 2 of the Licence, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public Licence for more details. You should have received a copy of the GNU General Public Licence along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ static const struct mn10300_opcode mn10300_opcodes[] = { { "mov", 0x4200, 0xf300, 0, FMT_S1, 0, {DM1, MEM2(IMM8, SP)}}, { "mov", 0x4300, 0xf300, 0, FMT_S1, 0, {AM1, MEM2(IMM8, SP)}}, { "mov", 0x5800, 0xfc00, 0, FMT_S1, 0, {MEM2(IMM8, SP), DN0}}, { "mov", 0x5c00, 0xfc00, 0, FMT_S1, 0, {MEM2(IMM8, SP), AN0}}, { "mov", 0x60, 0xf0, 0, FMT_S0, 0, {DM1, MEM(AN0)}}, { "mov", 0x70, 0xf0, 0, FMT_S0, 0, {MEM(AM0), DN1}}, { "mov", 0xf000, 0xfff0, 0, FMT_D0, 0, {MEM(AM0), AN1}}, { "mov", 0xf010, 0xfff0, 0, FMT_D0, 0, {AM1, MEM(AN0)}}, { "mov", 0xf300, 0xffc0, 0, FMT_D0, 0, {MEM2(DI, AM0), DN2}}, { "mov", 0xf340, 0xffc0, 0, FMT_D0, 0, {DM2, MEM2(DI, AN0)}}, { "mov", 0xf380, 0xffc0, 0, FMT_D0, 0, {MEM2(DI, AM0), AN2}}, { "mov", 0xf3c0, 0xffc0, 0, FMT_D0, 0, {AM2, MEM2(DI, AN0)}}, { "mov", 0xf80000, 0xfff000, 0, FMT_D1, 0, {MEM2(SD8, AM0), DN1}}, { "mov", 0xf81000, 0xfff000, 0, FMT_D1, 0, {DM1, MEM2(SD8, AN0)}}, { "mov", 0xf82000, 0xfff000, 0, FMT_D1, 0, {MEM2(SD8,AM0), AN1}}, { "mov", 0xf83000, 0xfff000, 0, FMT_D1, 0, {AM1, MEM2(SD8, AN0)}}, { "mov", 0xf90a00, 0xffff00, 0, FMT_D6, AM33, {MEM(RM0), RN2}}, { "mov", 0xf91a00, 0xffff00, 0, FMT_D6, AM33, {RM2, MEM(RN0)}}, { "mov", 0xf96a00, 0xffff00, 0x12, FMT_D6, AM33, {MEMINC(RM0), RN2}}, { "mov", 0xf97a00, 0xffff00, 0, FMT_D6, AM33, {RM2, MEMINC(RN0)}}, { "mov", 0xfa000000, 0xfff00000, 0, FMT_D2, 0, {MEM2(SD16, AM0), DN1}}, { "mov", 0xfa100000, 0xfff00000, 0, FMT_D2, 0, {DM1, MEM2(SD16, AN0)}}, { "mov", 0xfa200000, 0xfff00000, 0, FMT_D2, 0, {MEM2(SD16, AM0), AN1}}, { "mov", 0xfa300000, 0xfff00000, 0, FMT_D2, 0, {AM1, MEM2(SD16, AN0)}}, { "mov", 0xfa900000, 0xfff30000, 0, FMT_D2, 0, {AM1, MEM2(IMM16, SP)}}, { "mov", 0xfa910000, 0xfff30000, 0, FMT_D2, 0, {DM1, MEM2(IMM16, SP)}}, { "mov", 0xfab00000, 0xfffc0000, 0, FMT_D2, 0, {MEM2(IMM16, SP), AN0}}, { "mov", 0xfab40000, 0xfffc0000, 0, FMT_D2, 0, {MEM2(IMM16, SP), DN0}}, { "mov", 0xfb0a0000, 0xffff0000, 0, FMT_D7, AM33, {MEM2(SD8, RM0), RN2}}, { "mov", 0xfb1a0000, 0xffff0000, 0, FMT_D7, AM33, {RM2, MEM2(SD8, RN0)}}, { "mov", 0xfb6a0000, 0xffff0000, 0x22, FMT_D7, AM33, {MEMINC2 (RM0, SIMM8), RN2}}, { "mov", 0xfb7a0000, 0xffff0000, 0, FMT_D7, AM33, {RM2, MEMINC2 (RN0, SIMM8)}}, { "mov", 0xfb8a0000, 0xffff0f00, 0, FMT_D7, AM33, {MEM2(IMM8, SP), RN2}}, { "mov", 0xfb8e0000, 0xffff000f, 0, FMT_D7, AM33, {MEM2(RI, RM0), RD2}}, { "mov", 0xfb9a0000, 0xffff0f00, 0, FMT_D7, AM33, {RM2, MEM2(IMM8, SP)}}, { "mov", 0xfb9e0000, 0xffff000f, 0, FMT_D7, AM33, {RD2, MEM2(RI, RN0)}}, { "mov", 0xfc000000, 0xfff00000, 0, FMT_D4, 0, {MEM2(IMM32,AM0), DN1}}, { "mov", 0xfc100000, 0xfff00000, 0, FMT_D4, 0, {DM1, MEM2(IMM32,AN0)}}, { "mov", 0xfc200000, 0xfff00000, 0, FMT_D4, 0, {MEM2(IMM32,AM0), AN1}}, { "mov", 0xfc300000, 0xfff00000, 0, FMT_D4, 0, {AM1, MEM2(IMM32,AN0)}}, { "mov", 0xfc800000, 0xfff30000, 0, FMT_D4, 0, {AM1, MEM(IMM32_MEM)}}, { "mov", 0xfc810000, 0xfff30000, 0, FMT_D4, 0, {DM1, MEM(IMM32_MEM)}}, { "mov", 0xfc900000, 0xfff30000, 0, FMT_D4, 0, {AM1, MEM2(IMM32, SP)}}, { "mov", 0xfc910000, 0xfff30000, 0, FMT_D4, 0, {DM1, MEM2(IMM32, SP)}}, { "mov", 0xfca00000, 0xfffc0000, 0, FMT_D4, 0, {MEM(IMM32_MEM), AN0}}, { "mov", 0xfca40000, 0xfffc0000, 0, FMT_D4, 0, {MEM(IMM32_MEM), DN0}}, { "mov", 0xfcb00000, 0xfffc0000, 0, FMT_D4, 0, {MEM2(IMM32, SP), AN0}}, { "mov", 0xfcb40000, 0xfffc0000, 0, FMT_D4, 0, {MEM2(IMM32, SP), DN0}}, { "mov", 0xfd0a0000, 0xffff0000, 0, FMT_D8, AM33, {MEM2(SD24, RM0), RN2}}, { "mov", 0xfd1a0000, 0xffff0000, 0, FMT_D8, AM33, {RM2, MEM2(SD24, RN0)}}, { "mov", 0xfd6a0000, 0xffff0000, 0x22, FMT_D8, AM33, {MEMINC2 (RM0, IMM24), RN2}}, { "mov", 0xfd7a0000, 0xffff0000, 0, FMT_D8, AM33, {RM2, MEMINC2 (RN0, IMM24)}}, { "mov", 0xfd8a0000, 0xffff0f00, 0, FMT_D8, AM33, {MEM2(IMM24, SP), RN2}}, { "mov", 0xfd9a0000, 0xffff0f00, 0, FMT_D8, AM33, {RM2, MEM2(IMM24, SP)}}, { "mov", 0xfe0a0000, 0xffff0000, 0, FMT_D9, AM33, {MEM2(IMM32_HIGH8,RM0), RN2}}, { "mov", 0xfe0a0000, 0xffff0000, 0, FMT_D9, AM33, {MEM2(IMM32_HIGH8,RM0), RN2}}, { "mov", 0xfe0e0000, 0xffff0f00, 0, FMT_D9, AM33, {MEM(IMM32_HIGH8_MEM), RN2}}, { "mov", 0xfe1a0000, 0xffff0000, 0, FMT_D9, AM33, {RM2, MEM2(IMM32_HIGH8, RN0)}}, { "mov", 0xfe1a0000, 0xffff0000, 0, FMT_D9, AM33, {RM2, MEM2(IMM32_HIGH8, RN0)}}, { "mov", 0xfe1e0000, 0xffff0f00, 0, FMT_D9, AM33, {RM2, MEM(IMM32_HIGH8_MEM)}}, { "mov", 0xfe6a0000, 0xffff0000, 0x22, FMT_D9, AM33, {MEMINC2 (RM0, IMM32_HIGH8), RN2}}, { "mov", 0xfe7a0000, 0xffff0000, 0, FMT_D9, AM33, {RN2, MEMINC2 (RM0, IMM32_HIGH8)}}, { "mov", 0xfe8a0000, 0xffff0f00, 0, FMT_D9, AM33, {MEM2(IMM32_HIGH8, SP), RN2}}, { "mov", 0xfe9a0000, 0xffff0f00, 0, FMT_D9, AM33, {RM2, MEM2(IMM32_HIGH8, SP)}}, { "movhu", 0xf060, 0xfff0, 0, FMT_D0, 0, {MEM(AM0), DN1}}, { "movhu", 0xf070, 0xfff0, 0, FMT_D0, 0, {DM1, MEM(AN0)}}, { "movhu", 0xf480, 0xffc0, 0, FMT_D0, 0, {MEM2(DI, AM0), DN2}}, { "movhu", 0xf4c0, 0xffc0, 0, FMT_D0, 0, {DM2, MEM2(DI, AN0)}}, { "movhu", 0xf86000, 0xfff000, 0, FMT_D1, 0, {MEM2(SD8, AM0), DN1}}, { "movhu", 0xf87000, 0xfff000, 0, FMT_D1, 0, {DM1, MEM2(SD8, AN0)}}, { "movhu", 0xf89300, 0xfff300, 0, FMT_D1, 0, {DM1, MEM2(IMM8, SP)}}, { "movhu", 0xf8bc00, 0xfffc00, 0, FMT_D1, 0, {MEM2(IMM8, SP), DN0}}, { "movhu", 0xf94a00, 0xffff00, 0, FMT_D6, AM33, {MEM(RM0), RN2}}, { "movhu", 0xf95a00, 0xffff00, 0, FMT_D6, AM33, {RM2, MEM(RN0)}}, { "movhu", 0xf9ea00, 0xffff00, 0x12, FMT_D6, AM33, {MEMINC(RM0), RN2}}, { "movhu", 0xf9fa00, 0xffff00, 0, FMT_D6, AM33, {RM2, MEMINC(RN0)}}, { "movhu", 0xfa600000, 0xfff00000, 0, FMT_D2, 0, {MEM2(SD16, AM0), DN1}}, { "movhu", 0xfa700000, 0xfff00000, 0, FMT_D2, 0, {DM1, MEM2(SD16, AN0)}}, { "movhu", 0xfa930000, 0xfff30000, 0, FMT_D2, 0, {DM1, MEM2(IMM16, SP)}}, { "movhu", 0xfabc0000, 0xfffc0000, 0, FMT_D2, 0, {MEM2(IMM16, SP), DN0}}, { "movhu", 0xfb4a0000, 0xffff0000, 0, FMT_D7, AM33, {MEM2(SD8, RM0), RN2}}, { "movhu", 0xfb5a0000, 0xffff0000, 0, FMT_D7, AM33, {RM2, MEM2(SD8, RN0)}}, { "movhu", 0xfbca0000, 0xffff0f00, 0, FMT_D7, AM33, {MEM2(IMM8, SP), RN2}}, { "movhu", 0xfbce0000, 0xffff000f, 0, FMT_D7, AM33, {MEM2(RI, RM0), RD2}}, { "movhu", 0xfbda0000, 0xffff0f00, 0, FMT_D7, AM33, {RM2, MEM2(IMM8, SP)}}, { "movhu", 0xfbde0000, 0xffff000f, 0, FMT_D7, AM33, {RD2, MEM2(RI, RN0)}}, { "movhu", 0xfbea0000, 0xffff0000, 0x22, FMT_D7, AM33, {MEMINC2 (RM0, SIMM8), RN2}}, { "movhu", 0xfbfa0000, 0xffff0000, 0, FMT_D7, AM33, {RM2, MEMINC2 (RN0, SIMM8)}}, { "movhu", 0xfc600000, 0xfff00000, 0, FMT_D4, 0, {MEM2(IMM32,AM0), DN1}}, { "movhu", 0xfc700000, 0xfff00000, 0, FMT_D4, 0, {DM1, MEM2(IMM32,AN0)}}, { "movhu", 0xfc830000, 0xfff30000, 0, FMT_D4, 0, {DM1, MEM(IMM32_MEM)}}, { "movhu", 0xfc930000, 0xfff30000, 0, FMT_D4, 0, {DM1, MEM2(IMM32, SP)}}, { "movhu", 0xfcac0000, 0xfffc0000, 0, FMT_D4, 0, {MEM(IMM32_MEM), DN0}}, { "movhu", 0xfcbc0000, 0xfffc0000, 0, FMT_D4, 0, {MEM2(IMM32, SP), DN0}}, { "movhu", 0xfd4a0000, 0xffff0000, 0, FMT_D8, AM33, {MEM2(SD24, RM0), RN2}}, { "movhu", 0xfd5a0000, 0xffff0000, 0, FMT_D8, AM33, {RM2, MEM2(SD24, RN0)}}, { "movhu", 0xfdca0000, 0xffff0f00, 0, FMT_D8, AM33, {MEM2(IMM24, SP), RN2}}, { "movhu", 0xfdda0000, 0xffff0f00, 0, FMT_D8, AM33, {RM2, MEM2(IMM24, SP)}}, { "movhu", 0xfdea0000, 0xffff0000, 0x22, FMT_D8, AM33, {MEMINC2 (RM0, IMM24), RN2}}, { "movhu", 0xfdfa0000, 0xffff0000, 0, FMT_D8, AM33, {RM2, MEMINC2 (RN0, IMM24)}}, { "movhu", 0xfe4a0000, 0xffff0000, 0, FMT_D9, AM33, {MEM2(IMM32_HIGH8,RM0), RN2}}, { "movhu", 0xfe4e0000, 0xffff0f00, 0, FMT_D9, AM33, {MEM(IMM32_HIGH8_MEM), RN2}}, { "movhu", 0xfe5a0000, 0xffff0000, 0, FMT_D9, AM33, {RM2, MEM2(IMM32_HIGH8, RN0)}}, { "movhu", 0xfe5e0000, 0xffff0f00, 0, FMT_D9, AM33, {RM2, MEM(IMM32_HIGH8_MEM)}}, { "movhu", 0xfeca0000, 0xffff0f00, 0, FMT_D9, AM33, {MEM2(IMM32_HIGH8, SP), RN2}}, { "movhu", 0xfeda0000, 0xffff0f00, 0, FMT_D9, AM33, {RM2, MEM2(IMM32_HIGH8, SP)}}, { "movhu", 0xfeea0000, 0xffff0000, 0x22, FMT_D9, AM33, {MEMINC2 (RM0, IMM32_HIGH8), RN2}}, { "movhu", 0xfefa0000, 0xffff0000, 0, FMT_D9, AM33, {RN2, MEMINC2 (RM0, IMM32_HIGH8)}}, { "mov_llt", 0xf7e00000, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, { "mov_lgt", 0xf7e00001, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, { "mov_lge", 0xf7e00002, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, { "mov_lle", 0xf7e00003, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, { "mov_lcs", 0xf7e00004, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, { "mov_lhi", 0xf7e00005, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, { "mov_lcc", 0xf7e00006, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, { "mov_lls", 0xf7e00007, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, { "mov_leq", 0xf7e00008, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, { "mov_lne", 0xf7e00009, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, { "mov_lra", 0xf7e0000a, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, { "", 0, 0, 0, 0, 0, {0}}, }; /* * fix up misalignment problems where possible */ asmlinkage void misalignment(struct pt_regs *regs, enum exception_code code) { const struct exception_table_entry *fixup; const struct mn10300_opcode *pop; unsigned long *registers = (unsigned long *) regs; unsigned long data, *store, *postinc, disp, inc, sp; mm_segment_t seg; siginfo_t info; uint32_t opcode, noc, xo, xm; uint8_t *pc, byte, datasz; void *address; unsigned tmp, npop, dispsz, loop; /* we don't fix up userspace misalignment faults */ if (user_mode(regs)) goto bus_error; sp = (unsigned long) regs + sizeof(*regs); kdebug("==>misalignment({pc=%lx,sp=%lx})", regs->pc, sp); if (regs->epsw & EPSW_IE) asm volatile("or %0,epsw" : : "i"(EPSW_IE)); seg = get_fs(); set_fs(KERNEL_DS); fixup = search_exception_tables(regs->pc); /* first thing to do is to match the opcode */ pc = (u_int8_t *) regs->pc; if (__get_user(byte, pc) != 0) goto fetch_error; opcode = byte; noc = 8; for (pop = mn10300_opcodes; pop->name[0]; pop++) { npop = ilog2(pop->opcode | pop->opmask); if (npop <= 0 || npop > 31) continue; npop = (npop + 8) & ~7; got_more_bits: if (npop == noc) { if ((opcode & pop->opmask) == pop->opcode) goto found_opcode; } else if (npop > noc) { xo = pop->opcode >> (npop - noc); xm = pop->opmask >> (npop - noc); if ((opcode & xm) != xo) continue; /* we've got a partial match (an exact match on the * first N bytes), so we need to get some more data */ pc++; if (__get_user(byte, pc) != 0) goto fetch_error; opcode = opcode << 8 | byte; noc += 8; goto got_more_bits; } else { /* there's already been a partial match as long as the * complete match we're now considering, so this one * should't match */ continue; } } /* didn't manage to find a fixup */ printk(KERN_CRIT "MISALIGN: %lx: unsupported instruction %x\n", regs->pc, opcode); failed: set_fs(seg); if (die_if_no_fixup("misalignment error", regs, code)) return; bus_error: info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRALN; info.si_addr = (void *) regs->pc; force_sig_info(SIGBUS, &info, current); return; /* error reading opcodes */ fetch_error: printk(KERN_CRIT "MISALIGN: %p: fault whilst reading instruction data\n", pc); goto failed; bad_addr_mode: printk(KERN_CRIT "MISALIGN: %lx: unsupported addressing mode %x\n", regs->pc, opcode); goto failed; bad_reg_mode: printk(KERN_CRIT "MISALIGN: %lx: unsupported register mode %x\n", regs->pc, opcode); goto failed; unsupported_instruction: printk(KERN_CRIT "MISALIGN: %lx: unsupported instruction %x (%s)\n", regs->pc, opcode, pop->name); goto failed; transfer_failed: set_fs(seg); if (fixup) { regs->pc = fixup->fixup; return; } if (die_if_no_fixup("misalignment fixup", regs, code)) return; info.si_signo = SIGSEGV; info.si_errno = 0; info.si_code = 0; info.si_addr = (void *) regs->pc; force_sig_info(SIGSEGV, &info, current); return; /* we matched the opcode */ found_opcode: kdebug("%lx: %x==%x { %x, %x }", regs->pc, opcode, pop->opcode, pop->params[0], pop->params[1]); tmp = format_tbl[pop->format].opsz; BUG_ON(tmp > noc); /* match was less complete than it ought to have been */ if (tmp < noc) { tmp = noc - tmp; opcode >>= tmp; pc -= tmp >> 3; } /* grab the extra displacement (note it's LSB first) */ disp = 0; dispsz = format_tbl[pop->format].dispsz; for (loop = 0; loop < dispsz; loop += 8) { pc++; if (__get_user(byte, pc) != 0) goto fetch_error; disp |= byte << loop; kdebug("{%p} disp[%02x]=%02x", pc, loop, byte); } kdebug("disp=%lx", disp); set_fs(KERNEL_XDS); if (fixup) set_fs(seg); tmp = (pop->params[0] ^ pop->params[1]) & 0x80000000; if (!tmp) { printk(KERN_CRIT "MISALIGN: %lx: insn not move to/from memory %x\n", regs->pc, opcode); goto failed; } /* determine the data transfer size of the move */ if (pop->name[3] == 0 || /* "mov" */ pop->name[4] == 'l') /* mov_lcc */ inc = datasz = 4; else if (pop->name[3] == 'h') /* movhu */ inc = datasz = 2; else goto unsupported_instruction; if (pop->params[0] & 0x80000000) { /* move memory to register */ if (!misalignment_addr(registers, sp, pop->params[0], opcode, disp, &address, &postinc, &inc)) goto bad_addr_mode; if (!misalignment_reg(registers, pop->params[1], opcode, disp, &store)) goto bad_reg_mode; kdebug("mov%u (%p),DARn", datasz, address); if (copy_from_user(&data, (void *) address, datasz) != 0) goto transfer_failed; if (pop->params[0] & 0x1000000) { kdebug("inc=%lx", inc); *postinc += inc; } *store = data; kdebug("loaded %lx", data); } else { /* move register to memory */ if (!misalignment_reg(registers, pop->params[0], opcode, disp, &store)) goto bad_reg_mode; if (!misalignment_addr(registers, sp, pop->params[1], opcode, disp, &address, &postinc, &inc)) goto bad_addr_mode; data = *store; kdebug("mov%u %lx,(%p)", datasz, data, address); if (copy_to_user((void *) address, &data, datasz) != 0) goto transfer_failed; if (pop->params[1] & 0x1000000) *postinc += inc; } tmp = format_tbl[pop->format].opsz + format_tbl[pop->format].dispsz; regs->pc += tmp >> 3; /* handle MOV_Lcc, which are currently the only FMT_D10 insns that * access memory */ if (pop->format == FMT_D10) misalignment_MOV_Lcc(regs, opcode); set_fs(seg); } /* * determine the address that was being accessed */ static int misalignment_addr(unsigned long *registers, unsigned long sp, unsigned params, unsigned opcode, unsigned long disp, void **_address, unsigned long **_postinc, unsigned long *_inc) { unsigned long *postinc = NULL, address = 0, tmp; if (!(params & 0x1000000)) { kdebug("noinc"); *_inc = 0; _inc = NULL; } params &= 0x00ffffff; do { switch (params & 0xff) { case DM0: postinc = &registers[Dreg_index[opcode & 0x03]]; address += *postinc; break; case DM1: postinc = &registers[Dreg_index[opcode >> 2 & 0x03]]; address += *postinc; break; case DM2: postinc = &registers[Dreg_index[opcode >> 4 & 0x03]]; address += *postinc; break; case AM0: postinc = &registers[Areg_index[opcode & 0x03]]; address += *postinc; break; case AM1: postinc = &registers[Areg_index[opcode >> 2 & 0x03]]; address += *postinc; break; case AM2: postinc = &registers[Areg_index[opcode >> 4 & 0x03]]; address += *postinc; break; case RM0: postinc = &registers[Rreg_index[opcode & 0x0f]]; address += *postinc; break; case RM1: postinc = &registers[Rreg_index[opcode >> 2 & 0x0f]]; address += *postinc; break; case RM2: postinc = &registers[Rreg_index[opcode >> 4 & 0x0f]]; address += *postinc; break; case RM4: postinc = &registers[Rreg_index[opcode >> 8 & 0x0f]]; address += *postinc; break; case RM6: postinc = &registers[Rreg_index[opcode >> 12 & 0x0f]]; address += *postinc; break; case RD0: postinc = &registers[Rreg_index[disp & 0x0f]]; address += *postinc; break; case RD2: postinc = &registers[Rreg_index[disp >> 4 & 0x0f]]; address += *postinc; break; case SP: address += sp; break; /* displacements are either to be added to the address * before use, or, in the case of post-inc addressing, * to be added into the base register after use */ case SD8: case SIMM8: disp = (long) (int8_t) (disp & 0xff); goto displace_or_inc; case SD16: disp = (long) (int16_t) (disp & 0xffff); goto displace_or_inc; case SD24: tmp = disp << 8; asm("asr 8,%0" : "=r"(tmp) : "0"(tmp) : "cc"); disp = (long) tmp; goto displace_or_inc; case SIMM4_2: tmp = opcode >> 4 & 0x0f; tmp <<= 28; asm("asr 28,%0" : "=r"(tmp) : "0"(tmp) : "cc"); disp = (long) tmp; goto displace_or_inc; case IMM8: disp &= 0x000000ff; goto displace_or_inc; case IMM16: disp &= 0x0000ffff; goto displace_or_inc; case IMM24: disp &= 0x00ffffff; goto displace_or_inc; case IMM32: case IMM32_MEM: case IMM32_HIGH8: case IMM32_HIGH8_MEM: displace_or_inc: kdebug("%s %lx", _inc ? "incr" : "disp", disp); if (!_inc) address += disp; else *_inc = disp; break; default: BUG(); return 0; } } while ((params >>= 8)); *_address = (void *) address; *_postinc = postinc; return 1; } /* * determine the register that is acting as source/dest */ static int misalignment_reg(unsigned long *registers, unsigned params, unsigned opcode, unsigned long disp, unsigned long **_register) { params &= 0x7fffffff; if (params & 0xffffff00) return 0; switch (params & 0xff) { case DM0: *_register = &registers[Dreg_index[opcode & 0x03]]; break; case DM1: *_register = &registers[Dreg_index[opcode >> 2 & 0x03]]; break; case DM2: *_register = &registers[Dreg_index[opcode >> 4 & 0x03]]; break; case AM0: *_register = &registers[Areg_index[opcode & 0x03]]; break; case AM1: *_register = &registers[Areg_index[opcode >> 2 & 0x03]]; break; case AM2: *_register = &registers[Areg_index[opcode >> 4 & 0x03]]; break; case RM0: *_register = &registers[Rreg_index[opcode & 0x0f]]; break; case RM1: *_register = &registers[Rreg_index[opcode >> 2 & 0x0f]]; break; case RM2: *_register = &registers[Rreg_index[opcode >> 4 & 0x0f]]; break; case RM4: *_register = &registers[Rreg_index[opcode >> 8 & 0x0f]]; break; case RM6: *_register = &registers[Rreg_index[opcode >> 12 & 0x0f]]; break; case RD0: *_register = &registers[Rreg_index[disp & 0x0f]]; break; case RD2: *_register = &registers[Rreg_index[disp >> 4 & 0x0f]]; break; case SP: *_register = &registers[REG_SP >> 2]; break; default: BUG(); return 0; } return 1; } /* * handle the conditional loop part of the move-and-loop instructions */ static void misalignment_MOV_Lcc(struct pt_regs *regs, uint32_t opcode) { unsigned long epsw = regs->epsw; unsigned long NxorV; kdebug("MOV_Lcc %x [flags=%lx]", opcode, epsw & 0xf); /* calculate N^V and shift onto the same bit position as Z */ NxorV = ((epsw >> 3) ^ epsw >> 1) & 1; switch (opcode & 0xf) { case 0x0: /* MOV_LLT: N^V */ if (NxorV) goto take_the_loop; return; case 0x1: /* MOV_LGT: ~(Z or (N^V))*/ if (!((epsw & EPSW_FLAG_Z) | NxorV)) goto take_the_loop; return; case 0x2: /* MOV_LGE: ~(N^V) */ if (!NxorV) goto take_the_loop; return; case 0x3: /* MOV_LLE: Z or (N^V) */ if ((epsw & EPSW_FLAG_Z) | NxorV) goto take_the_loop; return; case 0x4: /* MOV_LCS: C */ if (epsw & EPSW_FLAG_C) goto take_the_loop; return; case 0x5: /* MOV_LHI: ~(C or Z) */ if (!(epsw & (EPSW_FLAG_C | EPSW_FLAG_Z))) goto take_the_loop; return; case 0x6: /* MOV_LCC: ~C */ if (!(epsw & EPSW_FLAG_C)) goto take_the_loop; return; case 0x7: /* MOV_LLS: C or Z */ if (epsw & (EPSW_FLAG_C | EPSW_FLAG_Z)) goto take_the_loop; return; case 0x8: /* MOV_LEQ: Z */ if (epsw & EPSW_FLAG_Z) goto take_the_loop; return; case 0x9: /* MOV_LNE: ~Z */ if (!(epsw & EPSW_FLAG_Z)) goto take_the_loop; return; case 0xa: /* MOV_LRA: always */ goto take_the_loop; default: BUG(); } take_the_loop: /* wind the PC back to just after the SETLB insn */ kdebug("loop LAR=%lx", regs->lar); regs->pc = regs->lar - 4; } /* * misalignment handler tests */ #ifdef CONFIG_TEST_MISALIGNMENT_HANDLER static u8 __initdata testbuf[512] __attribute__((aligned(16))) = { [257] = 0x11, [258] = 0x22, [259] = 0x33, [260] = 0x44, }; #define ASSERTCMP(X, OP, Y) \ do { \ if (unlikely(!((X) OP (Y)))) { \ printk(KERN_ERR "\n"); \ printk(KERN_ERR "MISALIGN: Assertion failed at line %u\n", \ __LINE__); \ printk(KERN_ERR "0x%lx " #OP " 0x%lx is false\n", \ (unsigned long)(X), (unsigned long)(Y)); \ BUG(); \ } \ } while(0) static int __init test_misalignment(void) { register void *r asm("e0"); register u32 y asm("e1"); void *p = testbuf, *q; u32 tmp, tmp2, x; printk(KERN_NOTICE "==>test_misalignment() [testbuf=%p]\n", p); p++; printk(KERN_NOTICE "___ MOV (Am),Dn ___\n"); q = p + 256; asm volatile("mov (%0),%1" : "+a"(q), "=d"(x)); ASSERTCMP(q, ==, p + 256); ASSERTCMP(x, ==, 0x44332211); printk(KERN_NOTICE "___ MOV (256,Am),Dn ___\n"); q = p; asm volatile("mov (256,%0),%1" : "+a"(q), "=d"(x)); ASSERTCMP(q, ==, p); ASSERTCMP(x, ==, 0x44332211); printk(KERN_NOTICE "___ MOV (Di,Am),Dn ___\n"); tmp = 256; q = p; asm volatile("mov (%2,%0),%1" : "+a"(q), "=d"(x), "+d"(tmp)); ASSERTCMP(q, ==, p); ASSERTCMP(x, ==, 0x44332211); ASSERTCMP(tmp, ==, 256); printk(KERN_NOTICE "___ MOV (256,Rm),Rn ___\n"); r = p; asm volatile("mov (256,%0),%1" : "+r"(r), "=r"(y)); ASSERTCMP(r, ==, p); ASSERTCMP(y, ==, 0x44332211); printk(KERN_NOTICE "___ MOV (Rm+),Rn ___\n"); r = p + 256; asm volatile("mov (%0+),%1" : "+r"(r), "=r"(y)); ASSERTCMP(r, ==, p + 256 + 4); ASSERTCMP(y, ==, 0x44332211); printk(KERN_NOTICE "___ MOV (Rm+,8),Rn ___\n"); r = p + 256; asm volatile("mov (%0+,8),%1" : "+r"(r), "=r"(y)); ASSERTCMP(r, ==, p + 256 + 8); ASSERTCMP(y, ==, 0x44332211); printk(KERN_NOTICE "___ MOV (7,SP),Rn ___\n"); asm volatile( "add -16,sp \n" "mov +0x11,%0 \n" "movbu %0,(7,sp) \n" "mov +0x22,%0 \n" "movbu %0,(8,sp) \n" "mov +0x33,%0 \n" "movbu %0,(9,sp) \n" "mov +0x44,%0 \n" "movbu %0,(10,sp) \n" "mov (7,sp),%1 \n" "add +16,sp \n" : "+a"(q), "=d"(x)); ASSERTCMP(x, ==, 0x44332211); printk(KERN_NOTICE "___ MOV (259,SP),Rn ___\n"); asm volatile( "add -264,sp \n" "mov +0x11,%0 \n" "movbu %0,(259,sp) \n" "mov +0x22,%0 \n" "movbu %0,(260,sp) \n" "mov +0x33,%0 \n" "movbu %0,(261,sp) \n" "mov +0x55,%0 \n" "movbu %0,(262,sp) \n" "mov (259,sp),%1 \n" "add +264,sp \n" : "+d"(tmp), "=d"(x)); ASSERTCMP(x, ==, 0x55332211); printk(KERN_NOTICE "___ MOV (260,SP),Rn ___\n"); asm volatile( "add -264,sp \n" "mov +0x11,%0 \n" "movbu %0,(260,sp) \n" "mov +0x22,%0 \n" "movbu %0,(261,sp) \n" "mov +0x33,%0 \n" "movbu %0,(262,sp) \n" "mov +0x55,%0 \n" "movbu %0,(263,sp) \n" "mov (260,sp),%1 \n" "add +264,sp \n" : "+d"(tmp), "=d"(x)); ASSERTCMP(x, ==, 0x55332211); printk(KERN_NOTICE "___ MOV_LNE ___\n"); tmp = 1; tmp2 = 2; q = p + 256; asm volatile( "setlb \n" "mov %2,%3 \n" "mov %1,%2 \n" "cmp +0,%1 \n" "mov_lne (%0+,4),%1" : "+r"(q), "+d"(tmp), "+d"(tmp2), "=d"(x) : : "cc"); ASSERTCMP(q, ==, p + 256 + 12); ASSERTCMP(x, ==, 0x44332211); printk(KERN_NOTICE "___ MOV in SETLB ___\n"); tmp = 1; tmp2 = 2; q = p + 256; asm volatile( "setlb \n" "mov %1,%3 \n" "mov (%0+),%1 \n" "cmp +0,%1 \n" "lne " : "+a"(q), "+d"(tmp), "+d"(tmp2), "=d"(x) : : "cc"); ASSERTCMP(q, ==, p + 256 + 8); ASSERTCMP(x, ==, 0x44332211); printk(KERN_NOTICE "<==test_misalignment()\n"); return 0; } arch_initcall(test_misalignment); #endif /* CONFIG_TEST_MISALIGNMENT_HANDLER */
gpl-2.0
mukulsoni/android_kernel_samsung_ms013g-cm11
arch/arm/mach-iop13xx/msi.c
10722
4091
/* * arch/arm/mach-iop13xx/msi.c * * PCI MSI support for the iop13xx processor * * Copyright (c) 2006, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include <linux/pci.h> #include <linux/msi.h> #include <asm/mach/irq.h> #include <asm/irq.h> #define IOP13XX_NUM_MSI_IRQS 128 static DECLARE_BITMAP(msi_irq_in_use, IOP13XX_NUM_MSI_IRQS); /* IMIPR0 CP6 R8 Page 1 */ static u32 read_imipr_0(void) { u32 val; asm volatile("mrc p6, 0, %0, c8, c1, 0":"=r" (val)); return val; } static void write_imipr_0(u32 val) { asm volatile("mcr p6, 0, %0, c8, c1, 0"::"r" (val)); } /* IMIPR1 CP6 R9 Page 1 */ static u32 read_imipr_1(void) { u32 val; asm volatile("mrc p6, 0, %0, c9, c1, 0":"=r" (val)); return val; } static void write_imipr_1(u32 val) { asm volatile("mcr p6, 0, %0, c9, c1, 0"::"r" (val)); } /* IMIPR2 CP6 R10 Page 1 */ static u32 read_imipr_2(void) { u32 val; asm volatile("mrc p6, 0, %0, c10, c1, 0":"=r" (val)); return val; } static void write_imipr_2(u32 val) { asm volatile("mcr p6, 0, %0, c10, c1, 0"::"r" (val)); } /* IMIPR3 CP6 R11 Page 1 */ static u32 read_imipr_3(void) { u32 val; asm volatile("mrc p6, 0, %0, c11, c1, 0":"=r" (val)); return val; } static void write_imipr_3(u32 val) { asm volatile("mcr p6, 0, %0, c11, c1, 0"::"r" (val)); } static u32 (*read_imipr[])(void) = { read_imipr_0, read_imipr_1, read_imipr_2, read_imipr_3, }; static void (*write_imipr[])(u32) = { write_imipr_0, write_imipr_1, write_imipr_2, write_imipr_3, }; static void iop13xx_msi_handler(unsigned int irq, struct irq_desc *desc) { int i, j; unsigned long status; /* read IMIPR registers and find any active interrupts, * then call ISR for each active interrupt */ for (i = 0; i < ARRAY_SIZE(read_imipr); i++) { status = (read_imipr[i])(); if (!status) continue; do { j = find_first_bit(&status, 32); (write_imipr[i])(1 << j); /* write back to clear bit */ generic_handle_irq(IRQ_IOP13XX_MSI_0 + j + (32*i)); status = (read_imipr[i])(); } while (status); } } void __init iop13xx_msi_init(void) { irq_set_chained_handler(IRQ_IOP13XX_INBD_MSI, iop13xx_msi_handler); } /* * Dynamic irq allocate and deallocation */ int create_irq(void) { int irq, pos; again: pos = find_first_zero_bit(msi_irq_in_use, IOP13XX_NUM_MSI_IRQS); irq = IRQ_IOP13XX_MSI_0 + pos; if (irq > NR_IRQS) return -ENOSPC; /* test_and_set_bit operates on 32-bits at a time */ if (test_and_set_bit(pos, msi_irq_in_use)) goto again; dynamic_irq_init(irq); return irq; } void destroy_irq(unsigned int irq) { int pos = irq - IRQ_IOP13XX_MSI_0; dynamic_irq_cleanup(irq); clear_bit(pos, msi_irq_in_use); } void arch_teardown_msi_irq(unsigned int irq) { destroy_irq(irq); } static void iop13xx_msi_nop(struct irq_data *d) { return; } static struct irq_chip iop13xx_msi_chip = { .name = "PCI-MSI", .irq_ack = iop13xx_msi_nop, .irq_enable = unmask_msi_irq, .irq_disable = mask_msi_irq, .irq_mask = mask_msi_irq, .irq_unmask = unmask_msi_irq, }; int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) { int id, irq = create_irq(); struct msi_msg msg; if (irq < 0) return irq; irq_set_msi_desc(irq, desc); msg.address_hi = 0x0; msg.address_lo = IOP13XX_MU_MIMR_PCI; id = iop13xx_cpu_id(); msg.data = (id << IOP13XX_MU_MIMR_CORE_SELECT) | (irq & 0x7f); write_msi_msg(irq, &msg); irq_set_chip_and_handler(irq, &iop13xx_msi_chip, handle_simple_irq); return 0; }
gpl-2.0
InfinitiveOS-Devices/android_kernel_motorola_msm8610
drivers/media/dvb/mantis/mantis_vp3030.c
11234
2653
/* Mantis VP-3030 driver Copyright (C) Manu Abraham (abraham.manu@gmail.com) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/signal.h> #include <linux/sched.h> #include <linux/interrupt.h> #include "dmxdev.h" #include "dvbdev.h" #include "dvb_demux.h" #include "dvb_frontend.h" #include "dvb_net.h" #include "zl10353.h" #include "tda665x.h" #include "mantis_common.h" #include "mantis_ioc.h" #include "mantis_dvb.h" #include "mantis_vp3030.h" struct zl10353_config mantis_vp3030_config = { .demod_address = 0x0f, }; struct tda665x_config env57h12d5_config = { .name = "ENV57H12D5 (ET-50DT)", .addr = 0x60, .frequency_min = 47000000, .frequency_max = 862000000, .frequency_offst = 3616667, .ref_multiplier = 6, /* 1/6 MHz */ .ref_divider = 100000, /* 1/6 MHz */ }; #define MANTIS_MODEL_NAME "VP-3030" #define MANTIS_DEV_TYPE "DVB-T" static int vp3030_frontend_init(struct mantis_pci *mantis, struct dvb_frontend *fe) { struct i2c_adapter *adapter = &mantis->adapter; struct mantis_hwconfig *config = mantis->hwconfig; int err = 0; mantis_gpio_set_bits(mantis, config->reset, 0); msleep(100); err = mantis_frontend_power(mantis, POWER_ON); msleep(100); mantis_gpio_set_bits(mantis, config->reset, 1); if (err == 0) { msleep(250); dprintk(MANTIS_ERROR, 1, "Probing for 10353 (DVB-T)"); fe = dvb_attach(zl10353_attach, &mantis_vp3030_config, adapter); if (!fe) return -1; dvb_attach(tda665x_attach, fe, &env57h12d5_config, adapter); } else { dprintk(MANTIS_ERROR, 1, "Frontend on <%s> POWER ON failed! <%d>", adapter->name, err); return -EIO; } mantis->fe = fe; dprintk(MANTIS_ERROR, 1, "Done!"); return 0; } struct mantis_hwconfig vp3030_config = { .model_name = MANTIS_MODEL_NAME, .dev_type = MANTIS_DEV_TYPE, .ts_size = MANTIS_TS_188, .baud_rate = MANTIS_BAUD_9600, .parity = MANTIS_PARITY_NONE, .bytes = 0, .frontend_init = vp3030_frontend_init, .power = GPIF_A12, .reset = GPIF_A13, .i2c_mode = MANTIS_BYTE_MODE };
gpl-2.0
crseanpaul/muon-catalyzed-fusion
drivers/media/pci/mantis/mantis_vp3030.c
11234
2653
/* Mantis VP-3030 driver Copyright (C) Manu Abraham (abraham.manu@gmail.com) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/signal.h> #include <linux/sched.h> #include <linux/interrupt.h> #include "dmxdev.h" #include "dvbdev.h" #include "dvb_demux.h" #include "dvb_frontend.h" #include "dvb_net.h" #include "zl10353.h" #include "tda665x.h" #include "mantis_common.h" #include "mantis_ioc.h" #include "mantis_dvb.h" #include "mantis_vp3030.h" struct zl10353_config mantis_vp3030_config = { .demod_address = 0x0f, }; struct tda665x_config env57h12d5_config = { .name = "ENV57H12D5 (ET-50DT)", .addr = 0x60, .frequency_min = 47000000, .frequency_max = 862000000, .frequency_offst = 3616667, .ref_multiplier = 6, /* 1/6 MHz */ .ref_divider = 100000, /* 1/6 MHz */ }; #define MANTIS_MODEL_NAME "VP-3030" #define MANTIS_DEV_TYPE "DVB-T" static int vp3030_frontend_init(struct mantis_pci *mantis, struct dvb_frontend *fe) { struct i2c_adapter *adapter = &mantis->adapter; struct mantis_hwconfig *config = mantis->hwconfig; int err = 0; mantis_gpio_set_bits(mantis, config->reset, 0); msleep(100); err = mantis_frontend_power(mantis, POWER_ON); msleep(100); mantis_gpio_set_bits(mantis, config->reset, 1); if (err == 0) { msleep(250); dprintk(MANTIS_ERROR, 1, "Probing for 10353 (DVB-T)"); fe = dvb_attach(zl10353_attach, &mantis_vp3030_config, adapter); if (!fe) return -1; dvb_attach(tda665x_attach, fe, &env57h12d5_config, adapter); } else { dprintk(MANTIS_ERROR, 1, "Frontend on <%s> POWER ON failed! <%d>", adapter->name, err); return -EIO; } mantis->fe = fe; dprintk(MANTIS_ERROR, 1, "Done!"); return 0; } struct mantis_hwconfig vp3030_config = { .model_name = MANTIS_MODEL_NAME, .dev_type = MANTIS_DEV_TYPE, .ts_size = MANTIS_TS_188, .baud_rate = MANTIS_BAUD_9600, .parity = MANTIS_PARITY_NONE, .bytes = 0, .frontend_init = vp3030_frontend_init, .power = GPIF_A12, .reset = GPIF_A13, .i2c_mode = MANTIS_BYTE_MODE };
gpl-2.0
broonie/regulator-2.6
arch/mips/lasat/setup.c
14050
3811
/* * Carsten Langgaard, carstenl@mips.com * Copyright (C) 1999 MIPS Technologies, Inc. All rights reserved. * * Thomas Horsten <thh@lasat.com> * Copyright (C) 2000 LASAT Networks A/S. * * Brian Murphy <brian@murphy.dk> * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * Lasat specific setup. */ #include <linux/init.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/tty.h> #include <asm/time.h> #include <asm/cpu.h> #include <asm/bootinfo.h> #include <asm/irq.h> #include <asm/lasat/lasat.h> #include <asm/lasat/serial.h> #ifdef CONFIG_PICVUE #include <linux/notifier.h> #endif #include "ds1603.h" #include <asm/lasat/ds1603.h> #include <asm/lasat/picvue.h> #include <asm/lasat/eeprom.h> #include "prom.h" int lasat_command_line; void lasatint_init(void); extern void lasat_reboot_setup(void); extern void pcisetup(void); extern void edhac_init(void *, void *, void *); extern void addrflt_init(void); struct lasat_misc lasat_misc_info[N_MACHTYPES] = { { .reset_reg = (void *)KSEG1ADDR(0x1c840000), .flash_wp_reg = (void *)KSEG1ADDR(0x1c800000), 2 }, { .reset_reg = (void *)KSEG1ADDR(0x11080000), .flash_wp_reg = (void *)KSEG1ADDR(0x11000000), 6 } }; struct lasat_misc *lasat_misc; #ifdef CONFIG_DS1603 static struct ds_defs ds_defs[N_MACHTYPES] = { { (void *)DS1603_REG_100, (void *)DS1603_REG_100, DS1603_RST_100, DS1603_CLK_100, DS1603_DATA_100, DS1603_DATA_SHIFT_100, 0, 0 }, { (void *)DS1603_REG_200, (void *)DS1603_DATA_REG_200, DS1603_RST_200, DS1603_CLK_200, DS1603_DATA_200, DS1603_DATA_READ_SHIFT_200, 1, 2000 } }; #endif #ifdef CONFIG_PICVUE #include "picvue.h" static struct pvc_defs pvc_defs[N_MACHTYPES] = { { (void *)PVC_REG_100, PVC_DATA_SHIFT_100, PVC_DATA_M_100, PVC_E_100, PVC_RW_100, PVC_RS_100 }, { (void *)PVC_REG_200, PVC_DATA_SHIFT_200, PVC_DATA_M_200, PVC_E_200, PVC_RW_200, PVC_RS_200 } }; #endif static int lasat_panic_display(struct notifier_block *this, unsigned long event, void *ptr) { #ifdef CONFIG_PICVUE unsigned char *string = ptr; if (string == NULL) string = "Kernel Panic"; pvc_dump_string(string); #endif return NOTIFY_DONE; } static int lasat_panic_prom_monitor(struct notifier_block *this, unsigned long event, void *ptr) { prom_monitor(); return NOTIFY_DONE; } static struct notifier_block lasat_panic_block[] = { { .notifier_call = lasat_panic_display, .priority = INT_MAX }, { .notifier_call = lasat_panic_prom_monitor, .priority = INT_MIN } }; void __init plat_time_init(void) { mips_hpt_frequency = lasat_board_info.li_cpu_hz / 2; change_c0_status(ST0_IM, IE_IRQ0); } void __init plat_mem_setup(void) { int i; int lasat_type = IS_LASAT_200() ? 1 : 0; lasat_misc = &lasat_misc_info[lasat_type]; #ifdef CONFIG_PICVUE picvue = &pvc_defs[lasat_type]; #endif /* Set up panic notifier */ for (i = 0; i < ARRAY_SIZE(lasat_panic_block); i++) atomic_notifier_chain_register(&panic_notifier_list, &lasat_panic_block[i]); lasat_reboot_setup(); #ifdef CONFIG_DS1603 ds1603 = &ds_defs[lasat_type]; #endif #ifdef DYNAMIC_SERIAL_INIT serial_init(); #endif pr_info("Lasat specific initialization complete\n"); }
gpl-2.0
zanezam/boeffla-kernel-samsung-s3
arch/mips/lasat/setup.c
14050
3811
/* * Carsten Langgaard, carstenl@mips.com * Copyright (C) 1999 MIPS Technologies, Inc. All rights reserved. * * Thomas Horsten <thh@lasat.com> * Copyright (C) 2000 LASAT Networks A/S. * * Brian Murphy <brian@murphy.dk> * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * Lasat specific setup. */ #include <linux/init.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/tty.h> #include <asm/time.h> #include <asm/cpu.h> #include <asm/bootinfo.h> #include <asm/irq.h> #include <asm/lasat/lasat.h> #include <asm/lasat/serial.h> #ifdef CONFIG_PICVUE #include <linux/notifier.h> #endif #include "ds1603.h" #include <asm/lasat/ds1603.h> #include <asm/lasat/picvue.h> #include <asm/lasat/eeprom.h> #include "prom.h" int lasat_command_line; void lasatint_init(void); extern void lasat_reboot_setup(void); extern void pcisetup(void); extern void edhac_init(void *, void *, void *); extern void addrflt_init(void); struct lasat_misc lasat_misc_info[N_MACHTYPES] = { { .reset_reg = (void *)KSEG1ADDR(0x1c840000), .flash_wp_reg = (void *)KSEG1ADDR(0x1c800000), 2 }, { .reset_reg = (void *)KSEG1ADDR(0x11080000), .flash_wp_reg = (void *)KSEG1ADDR(0x11000000), 6 } }; struct lasat_misc *lasat_misc; #ifdef CONFIG_DS1603 static struct ds_defs ds_defs[N_MACHTYPES] = { { (void *)DS1603_REG_100, (void *)DS1603_REG_100, DS1603_RST_100, DS1603_CLK_100, DS1603_DATA_100, DS1603_DATA_SHIFT_100, 0, 0 }, { (void *)DS1603_REG_200, (void *)DS1603_DATA_REG_200, DS1603_RST_200, DS1603_CLK_200, DS1603_DATA_200, DS1603_DATA_READ_SHIFT_200, 1, 2000 } }; #endif #ifdef CONFIG_PICVUE #include "picvue.h" static struct pvc_defs pvc_defs[N_MACHTYPES] = { { (void *)PVC_REG_100, PVC_DATA_SHIFT_100, PVC_DATA_M_100, PVC_E_100, PVC_RW_100, PVC_RS_100 }, { (void *)PVC_REG_200, PVC_DATA_SHIFT_200, PVC_DATA_M_200, PVC_E_200, PVC_RW_200, PVC_RS_200 } }; #endif static int lasat_panic_display(struct notifier_block *this, unsigned long event, void *ptr) { #ifdef CONFIG_PICVUE unsigned char *string = ptr; if (string == NULL) string = "Kernel Panic"; pvc_dump_string(string); #endif return NOTIFY_DONE; } static int lasat_panic_prom_monitor(struct notifier_block *this, unsigned long event, void *ptr) { prom_monitor(); return NOTIFY_DONE; } static struct notifier_block lasat_panic_block[] = { { .notifier_call = lasat_panic_display, .priority = INT_MAX }, { .notifier_call = lasat_panic_prom_monitor, .priority = INT_MIN } }; void __init plat_time_init(void) { mips_hpt_frequency = lasat_board_info.li_cpu_hz / 2; change_c0_status(ST0_IM, IE_IRQ0); } void __init plat_mem_setup(void) { int i; int lasat_type = IS_LASAT_200() ? 1 : 0; lasat_misc = &lasat_misc_info[lasat_type]; #ifdef CONFIG_PICVUE picvue = &pvc_defs[lasat_type]; #endif /* Set up panic notifier */ for (i = 0; i < ARRAY_SIZE(lasat_panic_block); i++) atomic_notifier_chain_register(&panic_notifier_list, &lasat_panic_block[i]); lasat_reboot_setup(); #ifdef CONFIG_DS1603 ds1603 = &ds_defs[lasat_type]; #endif #ifdef DYNAMIC_SERIAL_INIT serial_init(); #endif pr_info("Lasat specific initialization complete\n"); }
gpl-2.0
gianmarcorev/rpi_linux
drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
227
6945
/* * Host AP crypt: host-based WEP encryption implementation for Host AP driver * * Copyright (c) 2002-2004, Jouni Malinen <jkmaline@cc.hut.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. See README and COPYING for * more details. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/random.h> #include <linux/skbuff.h> #include <linux/string.h> #include "ieee80211.h" #include <linux/crypto.h> #include <linux/scatterlist.h> #include <linux/crc32.h> MODULE_AUTHOR("Jouni Malinen"); MODULE_DESCRIPTION("Host AP crypt: WEP"); MODULE_LICENSE("GPL"); struct prism2_wep_data { u32 iv; #define WEP_KEY_LEN 13 u8 key[WEP_KEY_LEN + 1]; u8 key_len; u8 key_idx; struct crypto_blkcipher *tx_tfm; struct crypto_blkcipher *rx_tfm; }; static void *prism2_wep_init(int keyidx) { struct prism2_wep_data *priv; priv = kzalloc(sizeof(*priv), GFP_ATOMIC); if (priv == NULL) goto fail; priv->key_idx = keyidx; priv->tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->tx_tfm)) { pr_debug("ieee80211_crypt_wep: could not allocate " "crypto API arc4\n"); priv->tx_tfm = NULL; goto fail; } priv->rx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->rx_tfm)) { pr_debug("ieee80211_crypt_wep: could not allocate " "crypto API arc4\n"); priv->rx_tfm = NULL; goto fail; } /* start WEP IV from a random value */ get_random_bytes(&priv->iv, 4); return priv; fail: if (priv) { if (priv->tx_tfm) crypto_free_blkcipher(priv->tx_tfm); if (priv->rx_tfm) crypto_free_blkcipher(priv->rx_tfm); kfree(priv); } return NULL; } static void prism2_wep_deinit(void *priv) { struct prism2_wep_data *_priv = priv; if (_priv) { if (_priv->tx_tfm) crypto_free_blkcipher(_priv->tx_tfm); if (_priv->rx_tfm) crypto_free_blkcipher(_priv->rx_tfm); } kfree(priv); } /* Perform WEP encryption on given skb that has at least 4 bytes of headroom * for IV and 4 bytes of tailroom for ICV. Both IV and ICV will be transmitted, * so the payload length increases with 8 bytes. * * WEP frame payload: IV + TX key idx, RC4(data), ICV = RC4(CRC32(data)) */ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct prism2_wep_data *wep = priv; u32 klen, len; u8 key[WEP_KEY_LEN + 3]; u8 *pos; cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); struct blkcipher_desc desc = {.tfm = wep->tx_tfm}; u32 crc; u8 *icv; struct scatterlist sg; if (skb_headroom(skb) < 4 || skb_tailroom(skb) < 4 || skb->len < hdr_len) return -1; len = skb->len - hdr_len; pos = skb_push(skb, 4); memmove(pos, pos + 4, hdr_len); pos += hdr_len; klen = 3 + wep->key_len; wep->iv++; /* Fluhrer, Mantin, and Shamir have reported weaknesses in the key * scheduling algorithm of RC4. At least IVs (KeyByte + 3, 0xff, N) * can be used to speedup attacks, so avoid using them. */ if ((wep->iv & 0xff00) == 0xff00) { u8 B = (wep->iv >> 16) & 0xff; if (B >= 3 && B < klen) wep->iv += 0x0100; } /* Prepend 24-bit IV to RC4 key and TX frame */ *pos++ = key[0] = (wep->iv >> 16) & 0xff; *pos++ = key[1] = (wep->iv >> 8) & 0xff; *pos++ = key[2] = wep->iv & 0xff; *pos++ = wep->key_idx << 6; /* Copy rest of the WEP key (the secret part) */ memcpy(key + 3, wep->key, wep->key_len); if (!tcb_desc->bHwSec) { /* Append little-endian CRC32 and encrypt it to produce ICV */ crc = ~crc32_le(~0, pos, len); icv = skb_put(skb, 4); icv[0] = crc; icv[1] = crc >> 8; icv[2] = crc >> 16; icv[3] = crc >> 24; crypto_blkcipher_setkey(wep->tx_tfm, key, klen); sg_init_one(&sg, pos, len+4); return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4); } return 0; } /* Perform WEP decryption on given buffer. Buffer includes whole WEP part of * the frame: IV (4 bytes), encrypted payload (including SNAP header), * ICV (4 bytes). len includes both IV and ICV. * * Returns 0 if frame was decrypted successfully and ICV was correct and -1 on * failure. If frame is OK, IV and ICV will be removed. */ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct prism2_wep_data *wep = priv; u32 klen, plen; u8 key[WEP_KEY_LEN + 3]; u8 keyidx, *pos; cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); struct blkcipher_desc desc = {.tfm = wep->rx_tfm}; u32 crc; u8 icv[4]; struct scatterlist sg; if (skb->len < hdr_len + 8) return -1; pos = skb->data + hdr_len; key[0] = *pos++; key[1] = *pos++; key[2] = *pos++; keyidx = *pos++ >> 6; if (keyidx != wep->key_idx) return -1; klen = 3 + wep->key_len; /* Copy rest of the WEP key (the secret part) */ memcpy(key + 3, wep->key, wep->key_len); /* Apply RC4 to data and compute CRC32 over decrypted data */ plen = skb->len - hdr_len - 8; if (!tcb_desc->bHwSec) { crypto_blkcipher_setkey(wep->rx_tfm, key, klen); sg_init_one(&sg, pos, plen+4); if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) return -7; crc = ~crc32_le(~0, pos, plen); icv[0] = crc; icv[1] = crc >> 8; icv[2] = crc >> 16; icv[3] = crc >> 24; if (memcmp(icv, pos + plen, 4) != 0) { /* ICV mismatch - drop frame */ return -2; } } /* Remove IV and ICV */ memmove(skb->data + 4, skb->data, hdr_len); skb_pull(skb, 4); skb_trim(skb, skb->len - 4); return 0; } static int prism2_wep_set_key(void *key, int len, u8 *seq, void *priv) { struct prism2_wep_data *wep = priv; if (len < 0 || len > WEP_KEY_LEN) return -1; memcpy(wep->key, key, len); wep->key_len = len; return 0; } static int prism2_wep_get_key(void *key, int len, u8 *seq, void *priv) { struct prism2_wep_data *wep = priv; if (len < wep->key_len) return -1; memcpy(key, wep->key, wep->key_len); return wep->key_len; } static char *prism2_wep_print_stats(char *p, void *priv) { struct prism2_wep_data *wep = priv; p += sprintf(p, "key[%d] alg=WEP len=%d\n", wep->key_idx, wep->key_len); return p; } static struct ieee80211_crypto_ops ieee80211_crypt_wep = { .name = "WEP", .init = prism2_wep_init, .deinit = prism2_wep_deinit, .encrypt_mpdu = prism2_wep_encrypt, .decrypt_mpdu = prism2_wep_decrypt, .encrypt_msdu = NULL, .decrypt_msdu = NULL, .set_key = prism2_wep_set_key, .get_key = prism2_wep_get_key, .print_stats = prism2_wep_print_stats, .extra_prefix_len = 4, /* IV */ .extra_postfix_len = 4, /* ICV */ .owner = THIS_MODULE, }; int __init ieee80211_crypto_wep_init(void) { return ieee80211_register_crypto_ops(&ieee80211_crypt_wep); } void __exit ieee80211_crypto_wep_exit(void) { ieee80211_unregister_crypto_ops(&ieee80211_crypt_wep); } void ieee80211_wep_null(void) { // printk("============>%s()\n", __func__); return; }
gpl-2.0
rt-linux/linux
drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
227
94225
/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 only, * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License version 2 for more details (a copy is included * in the LICENSE file that accompanied this code). * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf * * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. * * GPL HEADER END */ /* * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * * Copyright (c) 2012, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ * Lustre is a trademark of Sun Microsystems, Inc. * * lnet/klnds/o2iblnd/o2iblnd_cb.c * * Author: Eric Barton <eric@bartonsoftware.com> */ #include "o2iblnd.h" static void kiblnd_tx_done(lnet_ni_t *ni, kib_tx_t *tx) { lnet_msg_t *lntmsg[2]; kib_net_t *net = ni->ni_data; int rc; int i; LASSERT(net != NULL); LASSERT(!in_interrupt()); LASSERT(!tx->tx_queued); /* mustn't be queued for sending */ LASSERT(tx->tx_sending == 0); /* mustn't be awaiting sent callback */ LASSERT(!tx->tx_waiting); /* mustn't be awaiting peer response */ LASSERT(tx->tx_pool != NULL); kiblnd_unmap_tx(ni, tx); /* tx may have up to 2 lnet msgs to finalise */ lntmsg[0] = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL; lntmsg[1] = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL; rc = tx->tx_status; if (tx->tx_conn != NULL) { LASSERT(ni == tx->tx_conn->ibc_peer->ibp_ni); kiblnd_conn_decref(tx->tx_conn); tx->tx_conn = NULL; } tx->tx_nwrq = 0; tx->tx_status = 0; kiblnd_pool_free_node(&tx->tx_pool->tpo_pool, &tx->tx_list); /* delay finalize until my descs have been freed */ for (i = 0; i < 2; i++) { if (lntmsg[i] == NULL) continue; lnet_finalize(ni, lntmsg[i], rc); } } void kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int status) { kib_tx_t *tx; while (!list_empty(txlist)) { tx = list_entry(txlist->next, kib_tx_t, tx_list); list_del(&tx->tx_list); /* complete now */ tx->tx_waiting = 0; tx->tx_status = status; kiblnd_tx_done(ni, tx); } } static kib_tx_t * kiblnd_get_idle_tx(lnet_ni_t *ni, lnet_nid_t target) { kib_net_t *net = (kib_net_t *)ni->ni_data; struct list_head *node; kib_tx_t *tx; kib_tx_poolset_t *tps; tps = net->ibn_tx_ps[lnet_cpt_of_nid(target)]; node = kiblnd_pool_alloc_node(&tps->tps_poolset); if (node == NULL) return NULL; tx = container_of(node, kib_tx_t, tx_list); LASSERT(tx->tx_nwrq == 0); LASSERT(!tx->tx_queued); LASSERT(tx->tx_sending == 0); LASSERT(!tx->tx_waiting); LASSERT(tx->tx_status == 0); LASSERT(tx->tx_conn == NULL); LASSERT(tx->tx_lntmsg[0] == NULL); LASSERT(tx->tx_lntmsg[1] == NULL); LASSERT(tx->tx_u.pmr == NULL); LASSERT(tx->tx_nfrags == 0); return tx; } static void kiblnd_drop_rx(kib_rx_t *rx) { kib_conn_t *conn = rx->rx_conn; struct kib_sched_info *sched = conn->ibc_sched; unsigned long flags; spin_lock_irqsave(&sched->ibs_lock, flags); LASSERT(conn->ibc_nrx > 0); conn->ibc_nrx--; spin_unlock_irqrestore(&sched->ibs_lock, flags); kiblnd_conn_decref(conn); } int kiblnd_post_rx(kib_rx_t *rx, int credit) { kib_conn_t *conn = rx->rx_conn; kib_net_t *net = conn->ibc_peer->ibp_ni->ni_data; struct ib_recv_wr *bad_wrq = NULL; struct ib_mr *mr; int rc; LASSERT(net != NULL); LASSERT(!in_interrupt()); LASSERT(credit == IBLND_POSTRX_NO_CREDIT || credit == IBLND_POSTRX_PEER_CREDIT || credit == IBLND_POSTRX_RSRVD_CREDIT); mr = kiblnd_find_dma_mr(conn->ibc_hdev, rx->rx_msgaddr, IBLND_MSG_SIZE); LASSERT(mr != NULL); rx->rx_sge.lkey = mr->lkey; rx->rx_sge.addr = rx->rx_msgaddr; rx->rx_sge.length = IBLND_MSG_SIZE; rx->rx_wrq.next = NULL; rx->rx_wrq.sg_list = &rx->rx_sge; rx->rx_wrq.num_sge = 1; rx->rx_wrq.wr_id = kiblnd_ptr2wreqid(rx, IBLND_WID_RX); LASSERT(conn->ibc_state >= IBLND_CONN_INIT); LASSERT(rx->rx_nob >= 0); /* not posted */ if (conn->ibc_state > IBLND_CONN_ESTABLISHED) { kiblnd_drop_rx(rx); /* No more posts for this rx */ return 0; } rx->rx_nob = -1; /* flag posted */ rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq, &bad_wrq); if (rc != 0) { CERROR("Can't post rx for %s: %d, bad_wrq: %p\n", libcfs_nid2str(conn->ibc_peer->ibp_nid), rc, bad_wrq); rx->rx_nob = 0; } if (conn->ibc_state < IBLND_CONN_ESTABLISHED) /* Initial post */ return rc; if (rc != 0) { kiblnd_close_conn(conn, rc); kiblnd_drop_rx(rx); /* No more posts for this rx */ return rc; } if (credit == IBLND_POSTRX_NO_CREDIT) return 0; spin_lock(&conn->ibc_lock); if (credit == IBLND_POSTRX_PEER_CREDIT) conn->ibc_outstanding_credits++; else conn->ibc_reserved_credits++; spin_unlock(&conn->ibc_lock); kiblnd_check_sends(conn); return 0; } static kib_tx_t * kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie) { struct list_head *tmp; list_for_each(tmp, &conn->ibc_active_txs) { kib_tx_t *tx = list_entry(tmp, kib_tx_t, tx_list); LASSERT(!tx->tx_queued); LASSERT(tx->tx_sending != 0 || tx->tx_waiting); if (tx->tx_cookie != cookie) continue; if (tx->tx_waiting && tx->tx_msg->ibm_type == txtype) return tx; CWARN("Bad completion: %swaiting, type %x (wanted %x)\n", tx->tx_waiting ? "" : "NOT ", tx->tx_msg->ibm_type, txtype); } return NULL; } static void kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie) { kib_tx_t *tx; lnet_ni_t *ni = conn->ibc_peer->ibp_ni; int idle; spin_lock(&conn->ibc_lock); tx = kiblnd_find_waiting_tx_locked(conn, txtype, cookie); if (tx == NULL) { spin_unlock(&conn->ibc_lock); CWARN("Unmatched completion type %x cookie %#llx from %s\n", txtype, cookie, libcfs_nid2str(conn->ibc_peer->ibp_nid)); kiblnd_close_conn(conn, -EPROTO); return; } if (tx->tx_status == 0) { /* success so far */ if (status < 0) { /* failed? */ tx->tx_status = status; } else if (txtype == IBLND_MSG_GET_REQ) { lnet_set_reply_msg_len(ni, tx->tx_lntmsg[1], status); } } tx->tx_waiting = 0; idle = !tx->tx_queued && (tx->tx_sending == 0); if (idle) list_del(&tx->tx_list); spin_unlock(&conn->ibc_lock); if (idle) kiblnd_tx_done(ni, tx); } static void kiblnd_send_completion(kib_conn_t *conn, int type, int status, __u64 cookie) { lnet_ni_t *ni = conn->ibc_peer->ibp_ni; kib_tx_t *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid); if (tx == NULL) { CERROR("Can't get tx for completion %x for %s\n", type, libcfs_nid2str(conn->ibc_peer->ibp_nid)); return; } tx->tx_msg->ibm_u.completion.ibcm_status = status; tx->tx_msg->ibm_u.completion.ibcm_cookie = cookie; kiblnd_init_tx_msg(ni, tx, type, sizeof(kib_completion_msg_t)); kiblnd_queue_tx(tx, conn); } static void kiblnd_handle_rx(kib_rx_t *rx) { kib_msg_t *msg = rx->rx_msg; kib_conn_t *conn = rx->rx_conn; lnet_ni_t *ni = conn->ibc_peer->ibp_ni; int credits = msg->ibm_credits; kib_tx_t *tx; int rc = 0; int rc2; int post_credit; LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED); CDEBUG(D_NET, "Received %x[%d] from %s\n", msg->ibm_type, credits, libcfs_nid2str(conn->ibc_peer->ibp_nid)); if (credits != 0) { /* Have I received credits that will let me send? */ spin_lock(&conn->ibc_lock); if (conn->ibc_credits + credits > IBLND_MSG_QUEUE_SIZE(conn->ibc_version)) { rc2 = conn->ibc_credits; spin_unlock(&conn->ibc_lock); CERROR("Bad credits from %s: %d + %d > %d\n", libcfs_nid2str(conn->ibc_peer->ibp_nid), rc2, credits, IBLND_MSG_QUEUE_SIZE(conn->ibc_version)); kiblnd_close_conn(conn, -EPROTO); kiblnd_post_rx(rx, IBLND_POSTRX_NO_CREDIT); return; } conn->ibc_credits += credits; /* This ensures the credit taken by NOOP can be returned */ if (msg->ibm_type == IBLND_MSG_NOOP && !IBLND_OOB_CAPABLE(conn->ibc_version)) /* v1 only */ conn->ibc_outstanding_credits++; spin_unlock(&conn->ibc_lock); kiblnd_check_sends(conn); } switch (msg->ibm_type) { default: CERROR("Bad IBLND message type %x from %s\n", msg->ibm_type, libcfs_nid2str(conn->ibc_peer->ibp_nid)); post_credit = IBLND_POSTRX_NO_CREDIT; rc = -EPROTO; break; case IBLND_MSG_NOOP: if (IBLND_OOB_CAPABLE(conn->ibc_version)) { post_credit = IBLND_POSTRX_NO_CREDIT; break; } if (credits != 0) /* credit already posted */ post_credit = IBLND_POSTRX_NO_CREDIT; else /* a keepalive NOOP */ post_credit = IBLND_POSTRX_PEER_CREDIT; break; case IBLND_MSG_IMMEDIATE: post_credit = IBLND_POSTRX_DONT_POST; rc = lnet_parse(ni, &msg->ibm_u.immediate.ibim_hdr, msg->ibm_srcnid, rx, 0); if (rc < 0) /* repost on error */ post_credit = IBLND_POSTRX_PEER_CREDIT; break; case IBLND_MSG_PUT_REQ: post_credit = IBLND_POSTRX_DONT_POST; rc = lnet_parse(ni, &msg->ibm_u.putreq.ibprm_hdr, msg->ibm_srcnid, rx, 1); if (rc < 0) /* repost on error */ post_credit = IBLND_POSTRX_PEER_CREDIT; break; case IBLND_MSG_PUT_NAK: CWARN("PUT_NACK from %s\n", libcfs_nid2str(conn->ibc_peer->ibp_nid)); post_credit = IBLND_POSTRX_RSRVD_CREDIT; kiblnd_handle_completion(conn, IBLND_MSG_PUT_REQ, msg->ibm_u.completion.ibcm_status, msg->ibm_u.completion.ibcm_cookie); break; case IBLND_MSG_PUT_ACK: post_credit = IBLND_POSTRX_RSRVD_CREDIT; spin_lock(&conn->ibc_lock); tx = kiblnd_find_waiting_tx_locked(conn, IBLND_MSG_PUT_REQ, msg->ibm_u.putack.ibpam_src_cookie); if (tx != NULL) list_del(&tx->tx_list); spin_unlock(&conn->ibc_lock); if (tx == NULL) { CERROR("Unmatched PUT_ACK from %s\n", libcfs_nid2str(conn->ibc_peer->ibp_nid)); rc = -EPROTO; break; } LASSERT(tx->tx_waiting); /* CAVEAT EMPTOR: I could be racing with tx_complete, but... * (a) I can overwrite tx_msg since my peer has received it! * (b) tx_waiting set tells tx_complete() it's not done. */ tx->tx_nwrq = 0; /* overwrite PUT_REQ */ rc2 = kiblnd_init_rdma(conn, tx, IBLND_MSG_PUT_DONE, kiblnd_rd_size(&msg->ibm_u.putack.ibpam_rd), &msg->ibm_u.putack.ibpam_rd, msg->ibm_u.putack.ibpam_dst_cookie); if (rc2 < 0) CERROR("Can't setup rdma for PUT to %s: %d\n", libcfs_nid2str(conn->ibc_peer->ibp_nid), rc2); spin_lock(&conn->ibc_lock); tx->tx_waiting = 0; /* clear waiting and queue atomically */ kiblnd_queue_tx_locked(tx, conn); spin_unlock(&conn->ibc_lock); break; case IBLND_MSG_PUT_DONE: post_credit = IBLND_POSTRX_PEER_CREDIT; kiblnd_handle_completion(conn, IBLND_MSG_PUT_ACK, msg->ibm_u.completion.ibcm_status, msg->ibm_u.completion.ibcm_cookie); break; case IBLND_MSG_GET_REQ: post_credit = IBLND_POSTRX_DONT_POST; rc = lnet_parse(ni, &msg->ibm_u.get.ibgm_hdr, msg->ibm_srcnid, rx, 1); if (rc < 0) /* repost on error */ post_credit = IBLND_POSTRX_PEER_CREDIT; break; case IBLND_MSG_GET_DONE: post_credit = IBLND_POSTRX_RSRVD_CREDIT; kiblnd_handle_completion(conn, IBLND_MSG_GET_REQ, msg->ibm_u.completion.ibcm_status, msg->ibm_u.completion.ibcm_cookie); break; } if (rc < 0) /* protocol error */ kiblnd_close_conn(conn, rc); if (post_credit != IBLND_POSTRX_DONT_POST) kiblnd_post_rx(rx, post_credit); } static void kiblnd_rx_complete(kib_rx_t *rx, int status, int nob) { kib_msg_t *msg = rx->rx_msg; kib_conn_t *conn = rx->rx_conn; lnet_ni_t *ni = conn->ibc_peer->ibp_ni; kib_net_t *net = ni->ni_data; int rc; int err = -EIO; LASSERT(net != NULL); LASSERT(rx->rx_nob < 0); /* was posted */ rx->rx_nob = 0; /* isn't now */ if (conn->ibc_state > IBLND_CONN_ESTABLISHED) goto ignore; if (status != IB_WC_SUCCESS) { CNETERR("Rx from %s failed: %d\n", libcfs_nid2str(conn->ibc_peer->ibp_nid), status); goto failed; } LASSERT(nob >= 0); rx->rx_nob = nob; rc = kiblnd_unpack_msg(msg, rx->rx_nob); if (rc != 0) { CERROR("Error %d unpacking rx from %s\n", rc, libcfs_nid2str(conn->ibc_peer->ibp_nid)); goto failed; } if (msg->ibm_srcnid != conn->ibc_peer->ibp_nid || msg->ibm_dstnid != ni->ni_nid || msg->ibm_srcstamp != conn->ibc_incarnation || msg->ibm_dststamp != net->ibn_incarnation) { CERROR("Stale rx from %s\n", libcfs_nid2str(conn->ibc_peer->ibp_nid)); err = -ESTALE; goto failed; } /* set time last known alive */ kiblnd_peer_alive(conn->ibc_peer); /* racing with connection establishment/teardown! */ if (conn->ibc_state < IBLND_CONN_ESTABLISHED) { rwlock_t *g_lock = &kiblnd_data.kib_global_lock; unsigned long flags; write_lock_irqsave(g_lock, flags); /* must check holding global lock to eliminate race */ if (conn->ibc_state < IBLND_CONN_ESTABLISHED) { list_add_tail(&rx->rx_list, &conn->ibc_early_rxs); write_unlock_irqrestore(g_lock, flags); return; } write_unlock_irqrestore(g_lock, flags); } kiblnd_handle_rx(rx); return; failed: CDEBUG(D_NET, "rx %p conn %p\n", rx, conn); kiblnd_close_conn(conn, err); ignore: kiblnd_drop_rx(rx); /* Don't re-post rx. */ } static struct page * kiblnd_kvaddr_to_page(unsigned long vaddr) { struct page *page; if (is_vmalloc_addr((void *)vaddr)) { page = vmalloc_to_page((void *)vaddr); LASSERT(page != NULL); return page; } #ifdef CONFIG_HIGHMEM if (vaddr >= PKMAP_BASE && vaddr < (PKMAP_BASE + LAST_PKMAP * PAGE_SIZE)) { /* No highmem pages only used for bulk (kiov) I/O */ CERROR("find page for address in highmem\n"); LBUG(); } #endif page = virt_to_page(vaddr); LASSERT(page != NULL); return page; } static int kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob) { kib_hca_dev_t *hdev; __u64 *pages = tx->tx_pages; kib_fmr_poolset_t *fps; int npages; int size; int cpt; int rc; int i; LASSERT(tx->tx_pool != NULL); LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL); hdev = tx->tx_pool->tpo_hdev; for (i = 0, npages = 0; i < rd->rd_nfrags; i++) { for (size = 0; size < rd->rd_frags[i].rf_nob; size += hdev->ibh_page_size) { pages[npages++] = (rd->rd_frags[i].rf_addr & hdev->ibh_page_mask) + size; } } cpt = tx->tx_pool->tpo_pool.po_owner->ps_cpt; fps = net->ibn_fmr_ps[cpt]; rc = kiblnd_fmr_pool_map(fps, pages, npages, 0, &tx->tx_u.fmr); if (rc != 0) { CERROR("Can't map %d pages: %d\n", npages, rc); return rc; } /* If rd is not tx_rd, it's going to get sent to a peer, who will need * the rkey */ rd->rd_key = (rd != tx->tx_rd) ? tx->tx_u.fmr.fmr_pfmr->fmr->rkey : tx->tx_u.fmr.fmr_pfmr->fmr->lkey; rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask; rd->rd_frags[0].rf_nob = nob; rd->rd_nfrags = 1; return 0; } static int kiblnd_pmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob) { kib_hca_dev_t *hdev; kib_pmr_poolset_t *pps; __u64 iova; int cpt; int rc; LASSERT(tx->tx_pool != NULL); LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL); hdev = tx->tx_pool->tpo_hdev; iova = rd->rd_frags[0].rf_addr & ~hdev->ibh_page_mask; cpt = tx->tx_pool->tpo_pool.po_owner->ps_cpt; pps = net->ibn_pmr_ps[cpt]; rc = kiblnd_pmr_pool_map(pps, hdev, rd, &iova, &tx->tx_u.pmr); if (rc != 0) { CERROR("Failed to create MR by phybuf: %d\n", rc); return rc; } /* If rd is not tx_rd, it's going to get sent to a peer, who will need * the rkey */ rd->rd_key = (rd != tx->tx_rd) ? tx->tx_u.pmr->pmr_mr->rkey : tx->tx_u.pmr->pmr_mr->lkey; rd->rd_nfrags = 1; rd->rd_frags[0].rf_addr = iova; rd->rd_frags[0].rf_nob = nob; return 0; } void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx) { kib_net_t *net = ni->ni_data; LASSERT(net != NULL); if (net->ibn_fmr_ps != NULL && tx->tx_u.fmr.fmr_pfmr != NULL) { kiblnd_fmr_pool_unmap(&tx->tx_u.fmr, tx->tx_status); tx->tx_u.fmr.fmr_pfmr = NULL; } else if (net->ibn_pmr_ps != NULL && tx->tx_u.pmr != NULL) { kiblnd_pmr_pool_unmap(tx->tx_u.pmr); tx->tx_u.pmr = NULL; } if (tx->tx_nfrags != 0) { kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev, tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir); tx->tx_nfrags = 0; } } int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, int nfrags) { kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev; kib_net_t *net = ni->ni_data; struct ib_mr *mr = NULL; __u32 nob; int i; /* If rd is not tx_rd, it's going to get sent to a peer and I'm the * RDMA sink */ tx->tx_dmadir = (rd != tx->tx_rd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; tx->tx_nfrags = nfrags; rd->rd_nfrags = kiblnd_dma_map_sg(hdev->ibh_ibdev, tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir); for (i = 0, nob = 0; i < rd->rd_nfrags; i++) { rd->rd_frags[i].rf_nob = kiblnd_sg_dma_len( hdev->ibh_ibdev, &tx->tx_frags[i]); rd->rd_frags[i].rf_addr = kiblnd_sg_dma_address( hdev->ibh_ibdev, &tx->tx_frags[i]); nob += rd->rd_frags[i].rf_nob; } /* looking for pre-mapping MR */ mr = kiblnd_find_rd_dma_mr(hdev, rd); if (mr != NULL) { /* found pre-mapping MR */ rd->rd_key = (rd != tx->tx_rd) ? mr->rkey : mr->lkey; return 0; } if (net->ibn_fmr_ps != NULL) return kiblnd_fmr_map_tx(net, tx, rd, nob); else if (net->ibn_pmr_ps != NULL) return kiblnd_pmr_map_tx(net, tx, rd, nob); return -EINVAL; } static int kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, unsigned int niov, struct kvec *iov, int offset, int nob) { kib_net_t *net = ni->ni_data; struct page *page; struct scatterlist *sg; unsigned long vaddr; int fragnob; int page_offset; LASSERT(nob > 0); LASSERT(niov > 0); LASSERT(net != NULL); while (offset >= iov->iov_len) { offset -= iov->iov_len; niov--; iov++; LASSERT(niov > 0); } sg = tx->tx_frags; do { LASSERT(niov > 0); vaddr = ((unsigned long)iov->iov_base) + offset; page_offset = vaddr & (PAGE_SIZE - 1); page = kiblnd_kvaddr_to_page(vaddr); if (page == NULL) { CERROR("Can't find page\n"); return -EFAULT; } fragnob = min((int)(iov->iov_len - offset), nob); fragnob = min(fragnob, (int)PAGE_SIZE - page_offset); sg_set_page(sg, page, fragnob, page_offset); sg++; if (offset + fragnob < iov->iov_len) { offset += fragnob; } else { offset = 0; iov++; niov--; } nob -= fragnob; } while (nob > 0); return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags); } static int kiblnd_setup_rd_kiov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, int nkiov, lnet_kiov_t *kiov, int offset, int nob) { kib_net_t *net = ni->ni_data; struct scatterlist *sg; int fragnob; CDEBUG(D_NET, "niov %d offset %d nob %d\n", nkiov, offset, nob); LASSERT(nob > 0); LASSERT(nkiov > 0); LASSERT(net != NULL); while (offset >= kiov->kiov_len) { offset -= kiov->kiov_len; nkiov--; kiov++; LASSERT(nkiov > 0); } sg = tx->tx_frags; do { LASSERT(nkiov > 0); fragnob = min((int)(kiov->kiov_len - offset), nob); sg_set_page(sg, kiov->kiov_page, fragnob, kiov->kiov_offset + offset); sg++; offset = 0; kiov++; nkiov--; nob -= fragnob; } while (nob > 0); return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags); } static int kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit) __releases(conn->ibc_lock) __acquires(conn->ibc_lock) { kib_msg_t *msg = tx->tx_msg; kib_peer_t *peer = conn->ibc_peer; int ver = conn->ibc_version; int rc; int done; struct ib_send_wr *bad_wrq; LASSERT(tx->tx_queued); /* We rely on this for QP sizing */ LASSERT(tx->tx_nwrq > 0); LASSERT(tx->tx_nwrq <= 1 + IBLND_RDMA_FRAGS(ver)); LASSERT(credit == 0 || credit == 1); LASSERT(conn->ibc_outstanding_credits >= 0); LASSERT(conn->ibc_outstanding_credits <= IBLND_MSG_QUEUE_SIZE(ver)); LASSERT(conn->ibc_credits >= 0); LASSERT(conn->ibc_credits <= IBLND_MSG_QUEUE_SIZE(ver)); if (conn->ibc_nsends_posted == IBLND_CONCURRENT_SENDS(ver)) { /* tx completions outstanding... */ CDEBUG(D_NET, "%s: posted enough\n", libcfs_nid2str(peer->ibp_nid)); return -EAGAIN; } if (credit != 0 && conn->ibc_credits == 0) { /* no credits */ CDEBUG(D_NET, "%s: no credits\n", libcfs_nid2str(peer->ibp_nid)); return -EAGAIN; } if (credit != 0 && !IBLND_OOB_CAPABLE(ver) && conn->ibc_credits == 1 && /* last credit reserved */ msg->ibm_type != IBLND_MSG_NOOP) { /* for NOOP */ CDEBUG(D_NET, "%s: not using last credit\n", libcfs_nid2str(peer->ibp_nid)); return -EAGAIN; } /* NB don't drop ibc_lock before bumping tx_sending */ list_del(&tx->tx_list); tx->tx_queued = 0; if (msg->ibm_type == IBLND_MSG_NOOP && (!kiblnd_need_noop(conn) || /* redundant NOOP */ (IBLND_OOB_CAPABLE(ver) && /* posted enough NOOP */ conn->ibc_noops_posted == IBLND_OOB_MSGS(ver)))) { /* OK to drop when posted enough NOOPs, since * kiblnd_check_sends will queue NOOP again when * posted NOOPs complete */ spin_unlock(&conn->ibc_lock); kiblnd_tx_done(peer->ibp_ni, tx); spin_lock(&conn->ibc_lock); CDEBUG(D_NET, "%s(%d): redundant or enough NOOP\n", libcfs_nid2str(peer->ibp_nid), conn->ibc_noops_posted); return 0; } kiblnd_pack_msg(peer->ibp_ni, msg, ver, conn->ibc_outstanding_credits, peer->ibp_nid, conn->ibc_incarnation); conn->ibc_credits -= credit; conn->ibc_outstanding_credits = 0; conn->ibc_nsends_posted++; if (msg->ibm_type == IBLND_MSG_NOOP) conn->ibc_noops_posted++; /* CAVEAT EMPTOR! This tx could be the PUT_DONE of an RDMA * PUT. If so, it was first queued here as a PUT_REQ, sent and * stashed on ibc_active_txs, matched by an incoming PUT_ACK, * and then re-queued here. It's (just) possible that * tx_sending is non-zero if we've not done the tx_complete() * from the first send; hence the ++ rather than = below. */ tx->tx_sending++; list_add(&tx->tx_list, &conn->ibc_active_txs); /* I'm still holding ibc_lock! */ if (conn->ibc_state != IBLND_CONN_ESTABLISHED) { rc = -ECONNABORTED; } else if (tx->tx_pool->tpo_pool.po_failed || conn->ibc_hdev != tx->tx_pool->tpo_hdev) { /* close_conn will launch failover */ rc = -ENETDOWN; } else { rc = ib_post_send(conn->ibc_cmid->qp, tx->tx_wrq, &bad_wrq); } conn->ibc_last_send = jiffies; if (rc == 0) return 0; /* NB credits are transferred in the actual * message, which can only be the last work item */ conn->ibc_credits += credit; conn->ibc_outstanding_credits += msg->ibm_credits; conn->ibc_nsends_posted--; if (msg->ibm_type == IBLND_MSG_NOOP) conn->ibc_noops_posted--; tx->tx_status = rc; tx->tx_waiting = 0; tx->tx_sending--; done = (tx->tx_sending == 0); if (done) list_del(&tx->tx_list); spin_unlock(&conn->ibc_lock); if (conn->ibc_state == IBLND_CONN_ESTABLISHED) CERROR("Error %d posting transmit to %s\n", rc, libcfs_nid2str(peer->ibp_nid)); else CDEBUG(D_NET, "Error %d posting transmit to %s\n", rc, libcfs_nid2str(peer->ibp_nid)); kiblnd_close_conn(conn, rc); if (done) kiblnd_tx_done(peer->ibp_ni, tx); spin_lock(&conn->ibc_lock); return -EIO; } void kiblnd_check_sends(kib_conn_t *conn) { int ver = conn->ibc_version; lnet_ni_t *ni = conn->ibc_peer->ibp_ni; kib_tx_t *tx; /* Don't send anything until after the connection is established */ if (conn->ibc_state < IBLND_CONN_ESTABLISHED) { CDEBUG(D_NET, "%s too soon\n", libcfs_nid2str(conn->ibc_peer->ibp_nid)); return; } spin_lock(&conn->ibc_lock); LASSERT(conn->ibc_nsends_posted <= IBLND_CONCURRENT_SENDS(ver)); LASSERT(!IBLND_OOB_CAPABLE(ver) || conn->ibc_noops_posted <= IBLND_OOB_MSGS(ver)); LASSERT(conn->ibc_reserved_credits >= 0); while (conn->ibc_reserved_credits > 0 && !list_empty(&conn->ibc_tx_queue_rsrvd)) { tx = list_entry(conn->ibc_tx_queue_rsrvd.next, kib_tx_t, tx_list); list_del(&tx->tx_list); list_add_tail(&tx->tx_list, &conn->ibc_tx_queue); conn->ibc_reserved_credits--; } if (kiblnd_need_noop(conn)) { spin_unlock(&conn->ibc_lock); tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid); if (tx != NULL) kiblnd_init_tx_msg(ni, tx, IBLND_MSG_NOOP, 0); spin_lock(&conn->ibc_lock); if (tx != NULL) kiblnd_queue_tx_locked(tx, conn); } kiblnd_conn_addref(conn); /* 1 ref for me.... (see b21911) */ for (;;) { int credit; if (!list_empty(&conn->ibc_tx_queue_nocred)) { credit = 0; tx = list_entry(conn->ibc_tx_queue_nocred.next, kib_tx_t, tx_list); } else if (!list_empty(&conn->ibc_tx_noops)) { LASSERT(!IBLND_OOB_CAPABLE(ver)); credit = 1; tx = list_entry(conn->ibc_tx_noops.next, kib_tx_t, tx_list); } else if (!list_empty(&conn->ibc_tx_queue)) { credit = 1; tx = list_entry(conn->ibc_tx_queue.next, kib_tx_t, tx_list); } else break; if (kiblnd_post_tx_locked(conn, tx, credit) != 0) break; } spin_unlock(&conn->ibc_lock); kiblnd_conn_decref(conn); /* ...until here */ } static void kiblnd_tx_complete(kib_tx_t *tx, int status) { int failed = (status != IB_WC_SUCCESS); kib_conn_t *conn = tx->tx_conn; int idle; LASSERT(tx->tx_sending > 0); if (failed) { if (conn->ibc_state == IBLND_CONN_ESTABLISHED) CNETERR("Tx -> %s cookie %#llx sending %d waiting %d: failed %d\n", libcfs_nid2str(conn->ibc_peer->ibp_nid), tx->tx_cookie, tx->tx_sending, tx->tx_waiting, status); kiblnd_close_conn(conn, -EIO); } else { kiblnd_peer_alive(conn->ibc_peer); } spin_lock(&conn->ibc_lock); /* I could be racing with rdma completion. Whoever makes 'tx' idle * gets to free it, which also drops its ref on 'conn'. */ tx->tx_sending--; conn->ibc_nsends_posted--; if (tx->tx_msg->ibm_type == IBLND_MSG_NOOP) conn->ibc_noops_posted--; if (failed) { tx->tx_waiting = 0; /* don't wait for peer */ tx->tx_status = -EIO; } idle = (tx->tx_sending == 0) && /* This is the final callback */ !tx->tx_waiting && /* Not waiting for peer */ !tx->tx_queued; /* Not re-queued (PUT_DONE) */ if (idle) list_del(&tx->tx_list); kiblnd_conn_addref(conn); /* 1 ref for me.... */ spin_unlock(&conn->ibc_lock); if (idle) kiblnd_tx_done(conn->ibc_peer->ibp_ni, tx); kiblnd_check_sends(conn); kiblnd_conn_decref(conn); /* ...until here */ } void kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob) { kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev; struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq]; struct ib_send_wr *wrq = &tx->tx_wrq[tx->tx_nwrq]; int nob = offsetof(kib_msg_t, ibm_u) + body_nob; struct ib_mr *mr; LASSERT(tx->tx_nwrq >= 0); LASSERT(tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1); LASSERT(nob <= IBLND_MSG_SIZE); kiblnd_init_msg(tx->tx_msg, type, body_nob); mr = kiblnd_find_dma_mr(hdev, tx->tx_msgaddr, nob); LASSERT(mr != NULL); sge->lkey = mr->lkey; sge->addr = tx->tx_msgaddr; sge->length = nob; memset(wrq, 0, sizeof(*wrq)); wrq->next = NULL; wrq->wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_TX); wrq->sg_list = sge; wrq->num_sge = 1; wrq->opcode = IB_WR_SEND; wrq->send_flags = IB_SEND_SIGNALED; tx->tx_nwrq++; } int kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type, int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie) { kib_msg_t *ibmsg = tx->tx_msg; kib_rdma_desc_t *srcrd = tx->tx_rd; struct ib_sge *sge = &tx->tx_sge[0]; struct ib_send_wr *wrq = &tx->tx_wrq[0]; int rc = resid; int srcidx; int dstidx; int wrknob; LASSERT(!in_interrupt()); LASSERT(tx->tx_nwrq == 0); LASSERT(type == IBLND_MSG_GET_DONE || type == IBLND_MSG_PUT_DONE); srcidx = dstidx = 0; while (resid > 0) { if (srcidx >= srcrd->rd_nfrags) { CERROR("Src buffer exhausted: %d frags\n", srcidx); rc = -EPROTO; break; } if (dstidx == dstrd->rd_nfrags) { CERROR("Dst buffer exhausted: %d frags\n", dstidx); rc = -EPROTO; break; } if (tx->tx_nwrq == IBLND_RDMA_FRAGS(conn->ibc_version)) { CERROR("RDMA too fragmented for %s (%d): %d/%d src %d/%d dst frags\n", libcfs_nid2str(conn->ibc_peer->ibp_nid), IBLND_RDMA_FRAGS(conn->ibc_version), srcidx, srcrd->rd_nfrags, dstidx, dstrd->rd_nfrags); rc = -EMSGSIZE; break; } wrknob = min(min(kiblnd_rd_frag_size(srcrd, srcidx), kiblnd_rd_frag_size(dstrd, dstidx)), (__u32) resid); sge = &tx->tx_sge[tx->tx_nwrq]; sge->addr = kiblnd_rd_frag_addr(srcrd, srcidx); sge->lkey = kiblnd_rd_frag_key(srcrd, srcidx); sge->length = wrknob; wrq = &tx->tx_wrq[tx->tx_nwrq]; wrq->next = wrq + 1; wrq->wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_RDMA); wrq->sg_list = sge; wrq->num_sge = 1; wrq->opcode = IB_WR_RDMA_WRITE; wrq->send_flags = 0; wrq->wr.rdma.remote_addr = kiblnd_rd_frag_addr(dstrd, dstidx); wrq->wr.rdma.rkey = kiblnd_rd_frag_key(dstrd, dstidx); srcidx = kiblnd_rd_consume_frag(srcrd, srcidx, wrknob); dstidx = kiblnd_rd_consume_frag(dstrd, dstidx, wrknob); resid -= wrknob; tx->tx_nwrq++; wrq++; sge++; } if (rc < 0) /* no RDMA if completing with failure */ tx->tx_nwrq = 0; ibmsg->ibm_u.completion.ibcm_status = rc; ibmsg->ibm_u.completion.ibcm_cookie = dstcookie; kiblnd_init_tx_msg(conn->ibc_peer->ibp_ni, tx, type, sizeof(kib_completion_msg_t)); return rc; } void kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn) { struct list_head *q; LASSERT(tx->tx_nwrq > 0); /* work items set up */ LASSERT(!tx->tx_queued); /* not queued for sending already */ LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED); tx->tx_queued = 1; tx->tx_deadline = jiffies + (*kiblnd_tunables.kib_timeout * HZ); if (tx->tx_conn == NULL) { kiblnd_conn_addref(conn); tx->tx_conn = conn; LASSERT(tx->tx_msg->ibm_type != IBLND_MSG_PUT_DONE); } else { /* PUT_DONE first attached to conn as a PUT_REQ */ LASSERT(tx->tx_conn == conn); LASSERT(tx->tx_msg->ibm_type == IBLND_MSG_PUT_DONE); } switch (tx->tx_msg->ibm_type) { default: LBUG(); case IBLND_MSG_PUT_REQ: case IBLND_MSG_GET_REQ: q = &conn->ibc_tx_queue_rsrvd; break; case IBLND_MSG_PUT_NAK: case IBLND_MSG_PUT_ACK: case IBLND_MSG_PUT_DONE: case IBLND_MSG_GET_DONE: q = &conn->ibc_tx_queue_nocred; break; case IBLND_MSG_NOOP: if (IBLND_OOB_CAPABLE(conn->ibc_version)) q = &conn->ibc_tx_queue_nocred; else q = &conn->ibc_tx_noops; break; case IBLND_MSG_IMMEDIATE: q = &conn->ibc_tx_queue; break; } list_add_tail(&tx->tx_list, q); } void kiblnd_queue_tx(kib_tx_t *tx, kib_conn_t *conn) { spin_lock(&conn->ibc_lock); kiblnd_queue_tx_locked(tx, conn); spin_unlock(&conn->ibc_lock); kiblnd_check_sends(conn); } static int kiblnd_resolve_addr(struct rdma_cm_id *cmid, struct sockaddr_in *srcaddr, struct sockaddr_in *dstaddr, int timeout_ms) { unsigned short port; int rc; /* allow the port to be reused */ rc = rdma_set_reuseaddr(cmid, 1); if (rc != 0) { CERROR("Unable to set reuse on cmid: %d\n", rc); return rc; } /* look for a free privileged port */ for (port = PROT_SOCK-1; port > 0; port--) { srcaddr->sin_port = htons(port); rc = rdma_resolve_addr(cmid, (struct sockaddr *)srcaddr, (struct sockaddr *)dstaddr, timeout_ms); if (rc == 0) { CDEBUG(D_NET, "bound to port %hu\n", port); return 0; } else if (rc == -EADDRINUSE || rc == -EADDRNOTAVAIL) { CDEBUG(D_NET, "bind to port %hu failed: %d\n", port, rc); } else { return rc; } } CERROR("Failed to bind to a free privileged port\n"); return rc; } static void kiblnd_connect_peer(kib_peer_t *peer) { struct rdma_cm_id *cmid; kib_dev_t *dev; kib_net_t *net = peer->ibp_ni->ni_data; struct sockaddr_in srcaddr; struct sockaddr_in dstaddr; int rc; LASSERT(net != NULL); LASSERT(peer->ibp_connecting > 0); cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, peer, RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(cmid)) { CERROR("Can't create CMID for %s: %ld\n", libcfs_nid2str(peer->ibp_nid), PTR_ERR(cmid)); rc = PTR_ERR(cmid); goto failed; } dev = net->ibn_dev; memset(&srcaddr, 0, sizeof(srcaddr)); srcaddr.sin_family = AF_INET; srcaddr.sin_addr.s_addr = htonl(dev->ibd_ifip); memset(&dstaddr, 0, sizeof(dstaddr)); dstaddr.sin_family = AF_INET; dstaddr.sin_port = htons(*kiblnd_tunables.kib_service); dstaddr.sin_addr.s_addr = htonl(LNET_NIDADDR(peer->ibp_nid)); kiblnd_peer_addref(peer); /* cmid's ref */ if (*kiblnd_tunables.kib_use_priv_port) { rc = kiblnd_resolve_addr(cmid, &srcaddr, &dstaddr, *kiblnd_tunables.kib_timeout * 1000); } else { rc = rdma_resolve_addr(cmid, (struct sockaddr *)&srcaddr, (struct sockaddr *)&dstaddr, *kiblnd_tunables.kib_timeout * 1000); } if (rc != 0) { /* Can't initiate address resolution: */ CERROR("Can't resolve addr for %s: %d\n", libcfs_nid2str(peer->ibp_nid), rc); goto failed2; } LASSERT(cmid->device != NULL); CDEBUG(D_NET, "%s: connection bound to %s:%pI4h:%s\n", libcfs_nid2str(peer->ibp_nid), dev->ibd_ifname, &dev->ibd_ifip, cmid->device->name); return; failed2: kiblnd_peer_decref(peer); /* cmid's ref */ rdma_destroy_id(cmid); failed: kiblnd_peer_connect_failed(peer, 1, rc); } void kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) { kib_peer_t *peer; kib_peer_t *peer2; kib_conn_t *conn; rwlock_t *g_lock = &kiblnd_data.kib_global_lock; unsigned long flags; int rc; /* If I get here, I've committed to send, so I complete the tx with * failure on any problems */ LASSERT(tx == NULL || tx->tx_conn == NULL); /* only set when assigned a conn */ LASSERT(tx == NULL || tx->tx_nwrq > 0); /* work items have been set up */ /* First time, just use a read lock since I expect to find my peer * connected */ read_lock_irqsave(g_lock, flags); peer = kiblnd_find_peer_locked(nid); if (peer != NULL && !list_empty(&peer->ibp_conns)) { /* Found a peer with an established connection */ conn = kiblnd_get_conn_locked(peer); kiblnd_conn_addref(conn); /* 1 ref for me... */ read_unlock_irqrestore(g_lock, flags); if (tx != NULL) kiblnd_queue_tx(tx, conn); kiblnd_conn_decref(conn); /* ...to here */ return; } read_unlock(g_lock); /* Re-try with a write lock */ write_lock(g_lock); peer = kiblnd_find_peer_locked(nid); if (peer != NULL) { if (list_empty(&peer->ibp_conns)) { /* found a peer, but it's still connecting... */ LASSERT(peer->ibp_connecting != 0 || peer->ibp_accepting != 0); if (tx != NULL) list_add_tail(&tx->tx_list, &peer->ibp_tx_queue); write_unlock_irqrestore(g_lock, flags); } else { conn = kiblnd_get_conn_locked(peer); kiblnd_conn_addref(conn); /* 1 ref for me... */ write_unlock_irqrestore(g_lock, flags); if (tx != NULL) kiblnd_queue_tx(tx, conn); kiblnd_conn_decref(conn); /* ...to here */ } return; } write_unlock_irqrestore(g_lock, flags); /* Allocate a peer ready to add to the peer table and retry */ rc = kiblnd_create_peer(ni, &peer, nid); if (rc != 0) { CERROR("Can't create peer %s\n", libcfs_nid2str(nid)); if (tx != NULL) { tx->tx_status = -EHOSTUNREACH; tx->tx_waiting = 0; kiblnd_tx_done(ni, tx); } return; } write_lock_irqsave(g_lock, flags); peer2 = kiblnd_find_peer_locked(nid); if (peer2 != NULL) { if (list_empty(&peer2->ibp_conns)) { /* found a peer, but it's still connecting... */ LASSERT(peer2->ibp_connecting != 0 || peer2->ibp_accepting != 0); if (tx != NULL) list_add_tail(&tx->tx_list, &peer2->ibp_tx_queue); write_unlock_irqrestore(g_lock, flags); } else { conn = kiblnd_get_conn_locked(peer2); kiblnd_conn_addref(conn); /* 1 ref for me... */ write_unlock_irqrestore(g_lock, flags); if (tx != NULL) kiblnd_queue_tx(tx, conn); kiblnd_conn_decref(conn); /* ...to here */ } kiblnd_peer_decref(peer); return; } /* Brand new peer */ LASSERT(peer->ibp_connecting == 0); peer->ibp_connecting = 1; /* always called with a ref on ni, which prevents ni being shutdown */ LASSERT(((kib_net_t *)ni->ni_data)->ibn_shutdown == 0); if (tx != NULL) list_add_tail(&tx->tx_list, &peer->ibp_tx_queue); kiblnd_peer_addref(peer); list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid)); write_unlock_irqrestore(g_lock, flags); kiblnd_connect_peer(peer); kiblnd_peer_decref(peer); } int kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) { lnet_hdr_t *hdr = &lntmsg->msg_hdr; int type = lntmsg->msg_type; lnet_process_id_t target = lntmsg->msg_target; int target_is_router = lntmsg->msg_target_is_router; int routing = lntmsg->msg_routing; unsigned int payload_niov = lntmsg->msg_niov; struct kvec *payload_iov = lntmsg->msg_iov; lnet_kiov_t *payload_kiov = lntmsg->msg_kiov; unsigned int payload_offset = lntmsg->msg_offset; unsigned int payload_nob = lntmsg->msg_len; kib_msg_t *ibmsg; kib_tx_t *tx; int nob; int rc; /* NB 'private' is different depending on what we're sending.... */ CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n", payload_nob, payload_niov, libcfs_id2str(target)); LASSERT(payload_nob == 0 || payload_niov > 0); LASSERT(payload_niov <= LNET_MAX_IOV); /* Thread context */ LASSERT(!in_interrupt()); /* payload is either all vaddrs or all pages */ LASSERT(!(payload_kiov != NULL && payload_iov != NULL)); switch (type) { default: LBUG(); return -EIO; case LNET_MSG_ACK: LASSERT(payload_nob == 0); break; case LNET_MSG_GET: if (routing || target_is_router) break; /* send IMMEDIATE */ /* is the REPLY message too small for RDMA? */ nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[lntmsg->msg_md->md_length]); if (nob <= IBLND_MSG_SIZE) break; /* send IMMEDIATE */ tx = kiblnd_get_idle_tx(ni, target.nid); if (tx == NULL) { CERROR("Can't allocate txd for GET to %s\n", libcfs_nid2str(target.nid)); return -ENOMEM; } ibmsg = tx->tx_msg; if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0) rc = kiblnd_setup_rd_iov(ni, tx, &ibmsg->ibm_u.get.ibgm_rd, lntmsg->msg_md->md_niov, lntmsg->msg_md->md_iov.iov, 0, lntmsg->msg_md->md_length); else rc = kiblnd_setup_rd_kiov(ni, tx, &ibmsg->ibm_u.get.ibgm_rd, lntmsg->msg_md->md_niov, lntmsg->msg_md->md_iov.kiov, 0, lntmsg->msg_md->md_length); if (rc != 0) { CERROR("Can't setup GET sink for %s: %d\n", libcfs_nid2str(target.nid), rc); kiblnd_tx_done(ni, tx); return -EIO; } nob = offsetof(kib_get_msg_t, ibgm_rd.rd_frags[tx->tx_nfrags]); ibmsg->ibm_u.get.ibgm_cookie = tx->tx_cookie; ibmsg->ibm_u.get.ibgm_hdr = *hdr; kiblnd_init_tx_msg(ni, tx, IBLND_MSG_GET_REQ, nob); tx->tx_lntmsg[1] = lnet_create_reply_msg(ni, lntmsg); if (tx->tx_lntmsg[1] == NULL) { CERROR("Can't create reply for GET -> %s\n", libcfs_nid2str(target.nid)); kiblnd_tx_done(ni, tx); return -EIO; } tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg[0,1] on completion */ tx->tx_waiting = 1; /* waiting for GET_DONE */ kiblnd_launch_tx(ni, tx, target.nid); return 0; case LNET_MSG_REPLY: case LNET_MSG_PUT: /* Is the payload small enough not to need RDMA? */ nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[payload_nob]); if (nob <= IBLND_MSG_SIZE) break; /* send IMMEDIATE */ tx = kiblnd_get_idle_tx(ni, target.nid); if (tx == NULL) { CERROR("Can't allocate %s txd for %s\n", type == LNET_MSG_PUT ? "PUT" : "REPLY", libcfs_nid2str(target.nid)); return -ENOMEM; } if (payload_kiov == NULL) rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd, payload_niov, payload_iov, payload_offset, payload_nob); else rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd, payload_niov, payload_kiov, payload_offset, payload_nob); if (rc != 0) { CERROR("Can't setup PUT src for %s: %d\n", libcfs_nid2str(target.nid), rc); kiblnd_tx_done(ni, tx); return -EIO; } ibmsg = tx->tx_msg; ibmsg->ibm_u.putreq.ibprm_hdr = *hdr; ibmsg->ibm_u.putreq.ibprm_cookie = tx->tx_cookie; kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_REQ, sizeof(kib_putreq_msg_t)); tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */ tx->tx_waiting = 1; /* waiting for PUT_{ACK,NAK} */ kiblnd_launch_tx(ni, tx, target.nid); return 0; } /* send IMMEDIATE */ LASSERT(offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[payload_nob]) <= IBLND_MSG_SIZE); tx = kiblnd_get_idle_tx(ni, target.nid); if (tx == NULL) { CERROR("Can't send %d to %s: tx descs exhausted\n", type, libcfs_nid2str(target.nid)); return -ENOMEM; } ibmsg = tx->tx_msg; ibmsg->ibm_u.immediate.ibim_hdr = *hdr; if (payload_kiov != NULL) lnet_copy_kiov2flat(IBLND_MSG_SIZE, ibmsg, offsetof(kib_msg_t, ibm_u.immediate.ibim_payload), payload_niov, payload_kiov, payload_offset, payload_nob); else lnet_copy_iov2flat(IBLND_MSG_SIZE, ibmsg, offsetof(kib_msg_t, ibm_u.immediate.ibim_payload), payload_niov, payload_iov, payload_offset, payload_nob); nob = offsetof(kib_immediate_msg_t, ibim_payload[payload_nob]); kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob); tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */ kiblnd_launch_tx(ni, tx, target.nid); return 0; } static void kiblnd_reply(lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg) { lnet_process_id_t target = lntmsg->msg_target; unsigned int niov = lntmsg->msg_niov; struct kvec *iov = lntmsg->msg_iov; lnet_kiov_t *kiov = lntmsg->msg_kiov; unsigned int offset = lntmsg->msg_offset; unsigned int nob = lntmsg->msg_len; kib_tx_t *tx; int rc; tx = kiblnd_get_idle_tx(ni, rx->rx_conn->ibc_peer->ibp_nid); if (tx == NULL) { CERROR("Can't get tx for REPLY to %s\n", libcfs_nid2str(target.nid)); goto failed_0; } if (nob == 0) rc = 0; else if (kiov == NULL) rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd, niov, iov, offset, nob); else rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd, niov, kiov, offset, nob); if (rc != 0) { CERROR("Can't setup GET src for %s: %d\n", libcfs_nid2str(target.nid), rc); goto failed_1; } rc = kiblnd_init_rdma(rx->rx_conn, tx, IBLND_MSG_GET_DONE, nob, &rx->rx_msg->ibm_u.get.ibgm_rd, rx->rx_msg->ibm_u.get.ibgm_cookie); if (rc < 0) { CERROR("Can't setup rdma for GET from %s: %d\n", libcfs_nid2str(target.nid), rc); goto failed_1; } if (nob == 0) { /* No RDMA: local completion may happen now! */ lnet_finalize(ni, lntmsg, 0); } else { /* RDMA: lnet_finalize(lntmsg) when it * completes */ tx->tx_lntmsg[0] = lntmsg; } kiblnd_queue_tx(tx, rx->rx_conn); return; failed_1: kiblnd_tx_done(ni, tx); failed_0: lnet_finalize(ni, lntmsg, -EIO); } int kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov, unsigned int offset, unsigned int mlen, unsigned int rlen) { kib_rx_t *rx = private; kib_msg_t *rxmsg = rx->rx_msg; kib_conn_t *conn = rx->rx_conn; kib_tx_t *tx; kib_msg_t *txmsg; int nob; int post_credit = IBLND_POSTRX_PEER_CREDIT; int rc = 0; LASSERT(mlen <= rlen); LASSERT(!in_interrupt()); /* Either all pages or all vaddrs */ LASSERT(!(kiov != NULL && iov != NULL)); switch (rxmsg->ibm_type) { default: LBUG(); case IBLND_MSG_IMMEDIATE: nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[rlen]); if (nob > rx->rx_nob) { CERROR("Immediate message from %s too big: %d(%d)\n", libcfs_nid2str(rxmsg->ibm_u.immediate.ibim_hdr.src_nid), nob, rx->rx_nob); rc = -EPROTO; break; } if (kiov != NULL) lnet_copy_flat2kiov(niov, kiov, offset, IBLND_MSG_SIZE, rxmsg, offsetof(kib_msg_t, ibm_u.immediate.ibim_payload), mlen); else lnet_copy_flat2iov(niov, iov, offset, IBLND_MSG_SIZE, rxmsg, offsetof(kib_msg_t, ibm_u.immediate.ibim_payload), mlen); lnet_finalize(ni, lntmsg, 0); break; case IBLND_MSG_PUT_REQ: if (mlen == 0) { lnet_finalize(ni, lntmsg, 0); kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, 0, rxmsg->ibm_u.putreq.ibprm_cookie); break; } tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid); if (tx == NULL) { CERROR("Can't allocate tx for %s\n", libcfs_nid2str(conn->ibc_peer->ibp_nid)); /* Not replying will break the connection */ rc = -ENOMEM; break; } txmsg = tx->tx_msg; if (kiov == NULL) rc = kiblnd_setup_rd_iov(ni, tx, &txmsg->ibm_u.putack.ibpam_rd, niov, iov, offset, mlen); else rc = kiblnd_setup_rd_kiov(ni, tx, &txmsg->ibm_u.putack.ibpam_rd, niov, kiov, offset, mlen); if (rc != 0) { CERROR("Can't setup PUT sink for %s: %d\n", libcfs_nid2str(conn->ibc_peer->ibp_nid), rc); kiblnd_tx_done(ni, tx); /* tell peer it's over */ kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, rc, rxmsg->ibm_u.putreq.ibprm_cookie); break; } nob = offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[tx->tx_nfrags]); txmsg->ibm_u.putack.ibpam_src_cookie = rxmsg->ibm_u.putreq.ibprm_cookie; txmsg->ibm_u.putack.ibpam_dst_cookie = tx->tx_cookie; kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_ACK, nob); tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */ tx->tx_waiting = 1; /* waiting for PUT_DONE */ kiblnd_queue_tx(tx, conn); /* reposted buffer reserved for PUT_DONE */ post_credit = IBLND_POSTRX_NO_CREDIT; break; case IBLND_MSG_GET_REQ: if (lntmsg != NULL) { /* Optimized GET; RDMA lntmsg's payload */ kiblnd_reply(ni, rx, lntmsg); } else { /* GET didn't match anything */ kiblnd_send_completion(rx->rx_conn, IBLND_MSG_GET_DONE, -ENODATA, rxmsg->ibm_u.get.ibgm_cookie); } break; } kiblnd_post_rx(rx, post_credit); return rc; } int kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name) { struct task_struct *task = kthread_run(fn, arg, "%s", name); if (IS_ERR(task)) return PTR_ERR(task); atomic_inc(&kiblnd_data.kib_nthreads); return 0; } static void kiblnd_thread_fini(void) { atomic_dec(&kiblnd_data.kib_nthreads); } void kiblnd_peer_alive(kib_peer_t *peer) { /* This is racy, but everyone's only writing cfs_time_current() */ peer->ibp_last_alive = cfs_time_current(); mb(); } static void kiblnd_peer_notify(kib_peer_t *peer) { int error = 0; unsigned long last_alive = 0; unsigned long flags; read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); if (list_empty(&peer->ibp_conns) && peer->ibp_accepting == 0 && peer->ibp_connecting == 0 && peer->ibp_error != 0) { error = peer->ibp_error; peer->ibp_error = 0; last_alive = peer->ibp_last_alive; } read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); if (error != 0) lnet_notify(peer->ibp_ni, peer->ibp_nid, 0, last_alive); } void kiblnd_close_conn_locked(kib_conn_t *conn, int error) { /* This just does the immediate housekeeping. 'error' is zero for a * normal shutdown which can happen only after the connection has been * established. If the connection is established, schedule the * connection to be finished off by the connd. Otherwise the connd is * already dealing with it (either to set it up or tear it down). * Caller holds kib_global_lock exclusively in irq context */ kib_peer_t *peer = conn->ibc_peer; kib_dev_t *dev; unsigned long flags; LASSERT(error != 0 || conn->ibc_state >= IBLND_CONN_ESTABLISHED); if (error != 0 && conn->ibc_comms_error == 0) conn->ibc_comms_error = error; if (conn->ibc_state != IBLND_CONN_ESTABLISHED) return; /* already being handled */ if (error == 0 && list_empty(&conn->ibc_tx_noops) && list_empty(&conn->ibc_tx_queue) && list_empty(&conn->ibc_tx_queue_rsrvd) && list_empty(&conn->ibc_tx_queue_nocred) && list_empty(&conn->ibc_active_txs)) { CDEBUG(D_NET, "closing conn to %s\n", libcfs_nid2str(peer->ibp_nid)); } else { CNETERR("Closing conn to %s: error %d%s%s%s%s%s\n", libcfs_nid2str(peer->ibp_nid), error, list_empty(&conn->ibc_tx_queue) ? "" : "(sending)", list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)", list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)", list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)", list_empty(&conn->ibc_active_txs) ? "" : "(waiting)"); } dev = ((kib_net_t *)peer->ibp_ni->ni_data)->ibn_dev; list_del(&conn->ibc_list); /* connd (see below) takes over ibc_list's ref */ if (list_empty(&peer->ibp_conns) && /* no more conns */ kiblnd_peer_active(peer)) { /* still in peer table */ kiblnd_unlink_peer_locked(peer); /* set/clear error on last conn */ peer->ibp_error = conn->ibc_comms_error; } kiblnd_set_conn_state(conn, IBLND_CONN_CLOSING); if (error != 0 && kiblnd_dev_can_failover(dev)) { list_add_tail(&dev->ibd_fail_list, &kiblnd_data.kib_failed_devs); wake_up(&kiblnd_data.kib_failover_waitq); } spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); list_add_tail(&conn->ibc_list, &kiblnd_data.kib_connd_conns); wake_up(&kiblnd_data.kib_connd_waitq); spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags); } void kiblnd_close_conn(kib_conn_t *conn, int error) { unsigned long flags; write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); kiblnd_close_conn_locked(conn, error); write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); } static void kiblnd_handle_early_rxs(kib_conn_t *conn) { unsigned long flags; kib_rx_t *rx; kib_rx_t *tmp; LASSERT(!in_interrupt()); LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED); write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); list_for_each_entry_safe(rx, tmp, &conn->ibc_early_rxs, rx_list) { list_del(&rx->rx_list); write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); kiblnd_handle_rx(rx); write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); } write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); } static void kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs) { LIST_HEAD(zombies); struct list_head *tmp; struct list_head *nxt; kib_tx_t *tx; spin_lock(&conn->ibc_lock); list_for_each_safe(tmp, nxt, txs) { tx = list_entry(tmp, kib_tx_t, tx_list); if (txs == &conn->ibc_active_txs) { LASSERT(!tx->tx_queued); LASSERT(tx->tx_waiting || tx->tx_sending != 0); } else { LASSERT(tx->tx_queued); } tx->tx_status = -ECONNABORTED; tx->tx_waiting = 0; if (tx->tx_sending == 0) { tx->tx_queued = 0; list_del(&tx->tx_list); list_add(&tx->tx_list, &zombies); } } spin_unlock(&conn->ibc_lock); kiblnd_txlist_done(conn->ibc_peer->ibp_ni, &zombies, -ECONNABORTED); } static void kiblnd_finalise_conn(kib_conn_t *conn) { LASSERT(!in_interrupt()); LASSERT(conn->ibc_state > IBLND_CONN_INIT); kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED); /* abort_receives moves QP state to IB_QPS_ERR. This is only required * for connections that didn't get as far as being connected, because * rdma_disconnect() does this for free. */ kiblnd_abort_receives(conn); /* Complete all tx descs not waiting for sends to complete. * NB we should be safe from RDMA now that the QP has changed state */ kiblnd_abort_txs(conn, &conn->ibc_tx_noops); kiblnd_abort_txs(conn, &conn->ibc_tx_queue); kiblnd_abort_txs(conn, &conn->ibc_tx_queue_rsrvd); kiblnd_abort_txs(conn, &conn->ibc_tx_queue_nocred); kiblnd_abort_txs(conn, &conn->ibc_active_txs); kiblnd_handle_early_rxs(conn); } void kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error) { LIST_HEAD(zombies); unsigned long flags; LASSERT(error != 0); LASSERT(!in_interrupt()); write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); if (active) { LASSERT(peer->ibp_connecting > 0); peer->ibp_connecting--; } else { LASSERT(peer->ibp_accepting > 0); peer->ibp_accepting--; } if (peer->ibp_connecting != 0 || peer->ibp_accepting != 0) { /* another connection attempt under way... */ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); return; } if (list_empty(&peer->ibp_conns)) { /* Take peer's blocked transmits to complete with error */ list_add(&zombies, &peer->ibp_tx_queue); list_del_init(&peer->ibp_tx_queue); if (kiblnd_peer_active(peer)) kiblnd_unlink_peer_locked(peer); peer->ibp_error = error; } else { /* Can't have blocked transmits if there are connections */ LASSERT(list_empty(&peer->ibp_tx_queue)); } write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); kiblnd_peer_notify(peer); if (list_empty(&zombies)) return; CNETERR("Deleting messages for %s: connection failed\n", libcfs_nid2str(peer->ibp_nid)); kiblnd_txlist_done(peer->ibp_ni, &zombies, -EHOSTUNREACH); } void kiblnd_connreq_done(kib_conn_t *conn, int status) { kib_peer_t *peer = conn->ibc_peer; kib_tx_t *tx; kib_tx_t *tmp; struct list_head txs; unsigned long flags; int active; active = (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT); CDEBUG(D_NET, "%s: active(%d), version(%x), status(%d)\n", libcfs_nid2str(peer->ibp_nid), active, conn->ibc_version, status); LASSERT(!in_interrupt()); LASSERT((conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT && peer->ibp_connecting > 0) || (conn->ibc_state == IBLND_CONN_PASSIVE_WAIT && peer->ibp_accepting > 0)); LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars)); conn->ibc_connvars = NULL; if (status != 0) { /* failed to establish connection */ kiblnd_peer_connect_failed(peer, active, status); kiblnd_finalise_conn(conn); return; } /* connection established */ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); conn->ibc_last_send = jiffies; kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED); kiblnd_peer_alive(peer); /* Add conn to peer's list and nuke any dangling conns from a different * peer instance... */ kiblnd_conn_addref(conn); /* +1 ref for ibc_list */ list_add(&conn->ibc_list, &peer->ibp_conns); if (active) peer->ibp_connecting--; else peer->ibp_accepting--; if (peer->ibp_version == 0) { peer->ibp_version = conn->ibc_version; peer->ibp_incarnation = conn->ibc_incarnation; } if (peer->ibp_version != conn->ibc_version || peer->ibp_incarnation != conn->ibc_incarnation) { kiblnd_close_stale_conns_locked(peer, conn->ibc_version, conn->ibc_incarnation); peer->ibp_version = conn->ibc_version; peer->ibp_incarnation = conn->ibc_incarnation; } /* grab pending txs while I have the lock */ list_add(&txs, &peer->ibp_tx_queue); list_del_init(&peer->ibp_tx_queue); if (!kiblnd_peer_active(peer) || /* peer has been deleted */ conn->ibc_comms_error != 0) { /* error has happened already */ lnet_ni_t *ni = peer->ibp_ni; /* start to shut down connection */ kiblnd_close_conn_locked(conn, -ECONNABORTED); write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); kiblnd_txlist_done(ni, &txs, -ECONNABORTED); return; } write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); /* Schedule blocked txs */ spin_lock(&conn->ibc_lock); list_for_each_entry_safe(tx, tmp, &txs, tx_list) { list_del(&tx->tx_list); kiblnd_queue_tx_locked(tx, conn); } spin_unlock(&conn->ibc_lock); kiblnd_check_sends(conn); /* schedule blocked rxs */ kiblnd_handle_early_rxs(conn); } static void kiblnd_reject(struct rdma_cm_id *cmid, kib_rej_t *rej) { int rc; rc = rdma_reject(cmid, rej, sizeof(*rej)); if (rc != 0) CWARN("Error %d sending reject\n", rc); } static int kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) { rwlock_t *g_lock = &kiblnd_data.kib_global_lock; kib_msg_t *reqmsg = priv; kib_msg_t *ackmsg; kib_dev_t *ibdev; kib_peer_t *peer; kib_peer_t *peer2; kib_conn_t *conn; lnet_ni_t *ni = NULL; kib_net_t *net = NULL; lnet_nid_t nid; struct rdma_conn_param cp; kib_rej_t rej; int version = IBLND_MSG_VERSION; unsigned long flags; int rc; struct sockaddr_in *peer_addr; LASSERT(!in_interrupt()); /* cmid inherits 'context' from the corresponding listener id */ ibdev = (kib_dev_t *)cmid->context; LASSERT(ibdev != NULL); memset(&rej, 0, sizeof(rej)); rej.ibr_magic = IBLND_MSG_MAGIC; rej.ibr_why = IBLND_REJECT_FATAL; rej.ibr_cp.ibcp_max_msg_size = IBLND_MSG_SIZE; peer_addr = (struct sockaddr_in *)&(cmid->route.addr.dst_addr); if (*kiblnd_tunables.kib_require_priv_port && ntohs(peer_addr->sin_port) >= PROT_SOCK) { __u32 ip = ntohl(peer_addr->sin_addr.s_addr); CERROR("Peer's port (%pI4h:%hu) is not privileged\n", &ip, ntohs(peer_addr->sin_port)); goto failed; } if (priv_nob < offsetof(kib_msg_t, ibm_type)) { CERROR("Short connection request\n"); goto failed; } /* Future protocol version compatibility support! If the * o2iblnd-specific protocol changes, or when LNET unifies * protocols over all LNDs, the initial connection will * negotiate a protocol version. I trap this here to avoid * console errors; the reject tells the peer which protocol I * speak. */ if (reqmsg->ibm_magic == LNET_PROTO_MAGIC || reqmsg->ibm_magic == __swab32(LNET_PROTO_MAGIC)) goto failed; if (reqmsg->ibm_magic == IBLND_MSG_MAGIC && reqmsg->ibm_version != IBLND_MSG_VERSION && reqmsg->ibm_version != IBLND_MSG_VERSION_1) goto failed; if (reqmsg->ibm_magic == __swab32(IBLND_MSG_MAGIC) && reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION) && reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION_1)) goto failed; rc = kiblnd_unpack_msg(reqmsg, priv_nob); if (rc != 0) { CERROR("Can't parse connection request: %d\n", rc); goto failed; } nid = reqmsg->ibm_srcnid; ni = lnet_net2ni(LNET_NIDNET(reqmsg->ibm_dstnid)); if (ni != NULL) { net = (kib_net_t *)ni->ni_data; rej.ibr_incarnation = net->ibn_incarnation; } if (ni == NULL || /* no matching net */ ni->ni_nid != reqmsg->ibm_dstnid || /* right NET, wrong NID! */ net->ibn_dev != ibdev) { /* wrong device */ CERROR("Can't accept %s on %s (%s:%d:%pI4h): bad dst nid %s\n", libcfs_nid2str(nid), ni == NULL ? "NA" : libcfs_nid2str(ni->ni_nid), ibdev->ibd_ifname, ibdev->ibd_nnets, &ibdev->ibd_ifip, libcfs_nid2str(reqmsg->ibm_dstnid)); goto failed; } /* check time stamp as soon as possible */ if (reqmsg->ibm_dststamp != 0 && reqmsg->ibm_dststamp != net->ibn_incarnation) { CWARN("Stale connection request\n"); rej.ibr_why = IBLND_REJECT_CONN_STALE; goto failed; } /* I can accept peer's version */ version = reqmsg->ibm_version; if (reqmsg->ibm_type != IBLND_MSG_CONNREQ) { CERROR("Unexpected connreq msg type: %x from %s\n", reqmsg->ibm_type, libcfs_nid2str(nid)); goto failed; } if (reqmsg->ibm_u.connparams.ibcp_queue_depth != IBLND_MSG_QUEUE_SIZE(version)) { CERROR("Can't accept %s: incompatible queue depth %d (%d wanted)\n", libcfs_nid2str(nid), reqmsg->ibm_u.connparams.ibcp_queue_depth, IBLND_MSG_QUEUE_SIZE(version)); if (version == IBLND_MSG_VERSION) rej.ibr_why = IBLND_REJECT_MSG_QUEUE_SIZE; goto failed; } if (reqmsg->ibm_u.connparams.ibcp_max_frags != IBLND_RDMA_FRAGS(version)) { CERROR("Can't accept %s(version %x): incompatible max_frags %d (%d wanted)\n", libcfs_nid2str(nid), version, reqmsg->ibm_u.connparams.ibcp_max_frags, IBLND_RDMA_FRAGS(version)); if (version == IBLND_MSG_VERSION) rej.ibr_why = IBLND_REJECT_RDMA_FRAGS; goto failed; } if (reqmsg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) { CERROR("Can't accept %s: message size %d too big (%d max)\n", libcfs_nid2str(nid), reqmsg->ibm_u.connparams.ibcp_max_msg_size, IBLND_MSG_SIZE); goto failed; } /* assume 'nid' is a new peer; create */ rc = kiblnd_create_peer(ni, &peer, nid); if (rc != 0) { CERROR("Can't create peer for %s\n", libcfs_nid2str(nid)); rej.ibr_why = IBLND_REJECT_NO_RESOURCES; goto failed; } write_lock_irqsave(g_lock, flags); peer2 = kiblnd_find_peer_locked(nid); if (peer2 != NULL) { if (peer2->ibp_version == 0) { peer2->ibp_version = version; peer2->ibp_incarnation = reqmsg->ibm_srcstamp; } /* not the guy I've talked with */ if (peer2->ibp_incarnation != reqmsg->ibm_srcstamp || peer2->ibp_version != version) { kiblnd_close_peer_conns_locked(peer2, -ESTALE); write_unlock_irqrestore(g_lock, flags); CWARN("Conn stale %s [old ver: %x, new ver: %x]\n", libcfs_nid2str(nid), peer2->ibp_version, version); kiblnd_peer_decref(peer); rej.ibr_why = IBLND_REJECT_CONN_STALE; goto failed; } /* tie-break connection race in favour of the higher NID */ if (peer2->ibp_connecting != 0 && nid < ni->ni_nid) { write_unlock_irqrestore(g_lock, flags); CWARN("Conn race %s\n", libcfs_nid2str(peer2->ibp_nid)); kiblnd_peer_decref(peer); rej.ibr_why = IBLND_REJECT_CONN_RACE; goto failed; } peer2->ibp_accepting++; kiblnd_peer_addref(peer2); write_unlock_irqrestore(g_lock, flags); kiblnd_peer_decref(peer); peer = peer2; } else { /* Brand new peer */ LASSERT(peer->ibp_accepting == 0); LASSERT(peer->ibp_version == 0 && peer->ibp_incarnation == 0); peer->ibp_accepting = 1; peer->ibp_version = version; peer->ibp_incarnation = reqmsg->ibm_srcstamp; /* I have a ref on ni that prevents it being shutdown */ LASSERT(net->ibn_shutdown == 0); kiblnd_peer_addref(peer); list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid)); write_unlock_irqrestore(g_lock, flags); } conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_PASSIVE_WAIT, version); if (conn == NULL) { kiblnd_peer_connect_failed(peer, 0, -ENOMEM); kiblnd_peer_decref(peer); rej.ibr_why = IBLND_REJECT_NO_RESOURCES; goto failed; } /* conn now "owns" cmid, so I return success from here on to ensure the * CM callback doesn't destroy cmid. */ conn->ibc_incarnation = reqmsg->ibm_srcstamp; conn->ibc_credits = IBLND_MSG_QUEUE_SIZE(version); conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(version); LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(version) <= IBLND_RX_MSGS(version)); ackmsg = &conn->ibc_connvars->cv_msg; memset(ackmsg, 0, sizeof(*ackmsg)); kiblnd_init_msg(ackmsg, IBLND_MSG_CONNACK, sizeof(ackmsg->ibm_u.connparams)); ackmsg->ibm_u.connparams.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version); ackmsg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE; ackmsg->ibm_u.connparams.ibcp_max_frags = IBLND_RDMA_FRAGS(version); kiblnd_pack_msg(ni, ackmsg, version, 0, nid, reqmsg->ibm_srcstamp); memset(&cp, 0, sizeof(cp)); cp.private_data = ackmsg; cp.private_data_len = ackmsg->ibm_nob; cp.responder_resources = 0; /* No atomic ops or RDMA reads */ cp.initiator_depth = 0; cp.flow_control = 1; cp.retry_count = *kiblnd_tunables.kib_retry_count; cp.rnr_retry_count = *kiblnd_tunables.kib_rnr_retry_count; CDEBUG(D_NET, "Accept %s\n", libcfs_nid2str(nid)); rc = rdma_accept(cmid, &cp); if (rc != 0) { CERROR("Can't accept %s: %d\n", libcfs_nid2str(nid), rc); rej.ibr_version = version; rej.ibr_why = IBLND_REJECT_FATAL; kiblnd_reject(cmid, &rej); kiblnd_connreq_done(conn, rc); kiblnd_conn_decref(conn); } lnet_ni_decref(ni); return 0; failed: if (ni != NULL) lnet_ni_decref(ni); rej.ibr_version = version; rej.ibr_cp.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version); rej.ibr_cp.ibcp_max_frags = IBLND_RDMA_FRAGS(version); kiblnd_reject(cmid, &rej); return -ECONNREFUSED; } static void kiblnd_reconnect(kib_conn_t *conn, int version, __u64 incarnation, int why, kib_connparams_t *cp) { kib_peer_t *peer = conn->ibc_peer; char *reason; int retry = 0; unsigned long flags; LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT); LASSERT(peer->ibp_connecting > 0); /* 'conn' at least */ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); /* retry connection if it's still needed and no other connection * attempts (active or passive) are in progress * NB: reconnect is still needed even when ibp_tx_queue is * empty if ibp_version != version because reconnect may be * initiated by kiblnd_query() */ if ((!list_empty(&peer->ibp_tx_queue) || peer->ibp_version != version) && peer->ibp_connecting == 1 && peer->ibp_accepting == 0) { retry = 1; peer->ibp_connecting++; peer->ibp_version = version; peer->ibp_incarnation = incarnation; } write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); if (!retry) return; switch (why) { default: reason = "Unknown"; break; case IBLND_REJECT_CONN_STALE: reason = "stale"; break; case IBLND_REJECT_CONN_RACE: reason = "conn race"; break; case IBLND_REJECT_CONN_UNCOMPAT: reason = "version negotiation"; break; } CNETERR("%s: retrying (%s), %x, %x, queue_dep: %d, max_frag: %d, msg_size: %d\n", libcfs_nid2str(peer->ibp_nid), reason, IBLND_MSG_VERSION, version, cp != NULL ? cp->ibcp_queue_depth : IBLND_MSG_QUEUE_SIZE(version), cp != NULL ? cp->ibcp_max_frags : IBLND_RDMA_FRAGS(version), cp != NULL ? cp->ibcp_max_msg_size : IBLND_MSG_SIZE); kiblnd_connect_peer(peer); } static void kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob) { kib_peer_t *peer = conn->ibc_peer; LASSERT(!in_interrupt()); LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT); switch (reason) { case IB_CM_REJ_STALE_CONN: kiblnd_reconnect(conn, IBLND_MSG_VERSION, 0, IBLND_REJECT_CONN_STALE, NULL); break; case IB_CM_REJ_INVALID_SERVICE_ID: CNETERR("%s rejected: no listener at %d\n", libcfs_nid2str(peer->ibp_nid), *kiblnd_tunables.kib_service); break; case IB_CM_REJ_CONSUMER_DEFINED: if (priv_nob >= offsetof(kib_rej_t, ibr_padding)) { kib_rej_t *rej = priv; kib_connparams_t *cp = NULL; int flip = 0; __u64 incarnation = -1; /* NB. default incarnation is -1 because: * a) V1 will ignore dst incarnation in connreq. * b) V2 will provide incarnation while rejecting me, * -1 will be overwrote. * * if I try to connect to a V1 peer with V2 protocol, * it rejected me then upgrade to V2, I have no idea * about the upgrading and try to reconnect with V1, * in this case upgraded V2 can find out I'm trying to * talk to the old guy and reject me(incarnation is -1). */ if (rej->ibr_magic == __swab32(IBLND_MSG_MAGIC) || rej->ibr_magic == __swab32(LNET_PROTO_MAGIC)) { __swab32s(&rej->ibr_magic); __swab16s(&rej->ibr_version); flip = 1; } if (priv_nob >= sizeof(kib_rej_t) && rej->ibr_version > IBLND_MSG_VERSION_1) { /* priv_nob is always 148 in current version * of OFED, so we still need to check version. * (define of IB_CM_REJ_PRIVATE_DATA_SIZE) */ cp = &rej->ibr_cp; if (flip) { __swab64s(&rej->ibr_incarnation); __swab16s(&cp->ibcp_queue_depth); __swab16s(&cp->ibcp_max_frags); __swab32s(&cp->ibcp_max_msg_size); } incarnation = rej->ibr_incarnation; } if (rej->ibr_magic != IBLND_MSG_MAGIC && rej->ibr_magic != LNET_PROTO_MAGIC) { CERROR("%s rejected: consumer defined fatal error\n", libcfs_nid2str(peer->ibp_nid)); break; } if (rej->ibr_version != IBLND_MSG_VERSION && rej->ibr_version != IBLND_MSG_VERSION_1) { CERROR("%s rejected: o2iblnd version %x error\n", libcfs_nid2str(peer->ibp_nid), rej->ibr_version); break; } if (rej->ibr_why == IBLND_REJECT_FATAL && rej->ibr_version == IBLND_MSG_VERSION_1) { CDEBUG(D_NET, "rejected by old version peer %s: %x\n", libcfs_nid2str(peer->ibp_nid), rej->ibr_version); if (conn->ibc_version != IBLND_MSG_VERSION_1) rej->ibr_why = IBLND_REJECT_CONN_UNCOMPAT; } switch (rej->ibr_why) { case IBLND_REJECT_CONN_RACE: case IBLND_REJECT_CONN_STALE: case IBLND_REJECT_CONN_UNCOMPAT: kiblnd_reconnect(conn, rej->ibr_version, incarnation, rej->ibr_why, cp); break; case IBLND_REJECT_MSG_QUEUE_SIZE: CERROR("%s rejected: incompatible message queue depth %d, %d\n", libcfs_nid2str(peer->ibp_nid), cp != NULL ? cp->ibcp_queue_depth : IBLND_MSG_QUEUE_SIZE(rej->ibr_version), IBLND_MSG_QUEUE_SIZE(conn->ibc_version)); break; case IBLND_REJECT_RDMA_FRAGS: CERROR("%s rejected: incompatible # of RDMA fragments %d, %d\n", libcfs_nid2str(peer->ibp_nid), cp != NULL ? cp->ibcp_max_frags : IBLND_RDMA_FRAGS(rej->ibr_version), IBLND_RDMA_FRAGS(conn->ibc_version)); break; case IBLND_REJECT_NO_RESOURCES: CERROR("%s rejected: o2iblnd no resources\n", libcfs_nid2str(peer->ibp_nid)); break; case IBLND_REJECT_FATAL: CERROR("%s rejected: o2iblnd fatal error\n", libcfs_nid2str(peer->ibp_nid)); break; default: CERROR("%s rejected: o2iblnd reason %d\n", libcfs_nid2str(peer->ibp_nid), rej->ibr_why); break; } break; } /* fall through */ default: CNETERR("%s rejected: reason %d, size %d\n", libcfs_nid2str(peer->ibp_nid), reason, priv_nob); break; } kiblnd_connreq_done(conn, -ECONNREFUSED); } static void kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob) { kib_peer_t *peer = conn->ibc_peer; lnet_ni_t *ni = peer->ibp_ni; kib_net_t *net = ni->ni_data; kib_msg_t *msg = priv; int ver = conn->ibc_version; int rc = kiblnd_unpack_msg(msg, priv_nob); unsigned long flags; LASSERT(net != NULL); if (rc != 0) { CERROR("Can't unpack connack from %s: %d\n", libcfs_nid2str(peer->ibp_nid), rc); goto failed; } if (msg->ibm_type != IBLND_MSG_CONNACK) { CERROR("Unexpected message %d from %s\n", msg->ibm_type, libcfs_nid2str(peer->ibp_nid)); rc = -EPROTO; goto failed; } if (ver != msg->ibm_version) { CERROR("%s replied version %x is different with requested version %x\n", libcfs_nid2str(peer->ibp_nid), msg->ibm_version, ver); rc = -EPROTO; goto failed; } if (msg->ibm_u.connparams.ibcp_queue_depth != IBLND_MSG_QUEUE_SIZE(ver)) { CERROR("%s has incompatible queue depth %d(%d wanted)\n", libcfs_nid2str(peer->ibp_nid), msg->ibm_u.connparams.ibcp_queue_depth, IBLND_MSG_QUEUE_SIZE(ver)); rc = -EPROTO; goto failed; } if (msg->ibm_u.connparams.ibcp_max_frags != IBLND_RDMA_FRAGS(ver)) { CERROR("%s has incompatible max_frags %d (%d wanted)\n", libcfs_nid2str(peer->ibp_nid), msg->ibm_u.connparams.ibcp_max_frags, IBLND_RDMA_FRAGS(ver)); rc = -EPROTO; goto failed; } if (msg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) { CERROR("%s max message size %d too big (%d max)\n", libcfs_nid2str(peer->ibp_nid), msg->ibm_u.connparams.ibcp_max_msg_size, IBLND_MSG_SIZE); rc = -EPROTO; goto failed; } read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); if (msg->ibm_dstnid == ni->ni_nid && msg->ibm_dststamp == net->ibn_incarnation) rc = 0; else rc = -ESTALE; read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); if (rc != 0) { CERROR("Bad connection reply from %s, rc = %d, version: %x max_frags: %d\n", libcfs_nid2str(peer->ibp_nid), rc, msg->ibm_version, msg->ibm_u.connparams.ibcp_max_frags); goto failed; } conn->ibc_incarnation = msg->ibm_srcstamp; conn->ibc_credits = conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(ver); LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(ver) <= IBLND_RX_MSGS(ver)); kiblnd_connreq_done(conn, 0); return; failed: /* NB My QP has already established itself, so I handle anything going * wrong here by setting ibc_comms_error. * kiblnd_connreq_done(0) moves the conn state to ESTABLISHED, but then * immediately tears it down. */ LASSERT(rc != 0); conn->ibc_comms_error = rc; kiblnd_connreq_done(conn, 0); } static int kiblnd_active_connect(struct rdma_cm_id *cmid) { kib_peer_t *peer = (kib_peer_t *)cmid->context; kib_conn_t *conn; kib_msg_t *msg; struct rdma_conn_param cp; int version; __u64 incarnation; unsigned long flags; int rc; read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); incarnation = peer->ibp_incarnation; version = (peer->ibp_version == 0) ? IBLND_MSG_VERSION : peer->ibp_version; read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_ACTIVE_CONNECT, version); if (conn == NULL) { kiblnd_peer_connect_failed(peer, 1, -ENOMEM); kiblnd_peer_decref(peer); /* lose cmid's ref */ return -ENOMEM; } /* conn "owns" cmid now, so I return success from here on to ensure the * CM callback doesn't destroy cmid. conn also takes over cmid's ref * on peer */ msg = &conn->ibc_connvars->cv_msg; memset(msg, 0, sizeof(*msg)); kiblnd_init_msg(msg, IBLND_MSG_CONNREQ, sizeof(msg->ibm_u.connparams)); msg->ibm_u.connparams.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version); msg->ibm_u.connparams.ibcp_max_frags = IBLND_RDMA_FRAGS(version); msg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE; kiblnd_pack_msg(peer->ibp_ni, msg, version, 0, peer->ibp_nid, incarnation); memset(&cp, 0, sizeof(cp)); cp.private_data = msg; cp.private_data_len = msg->ibm_nob; cp.responder_resources = 0; /* No atomic ops or RDMA reads */ cp.initiator_depth = 0; cp.flow_control = 1; cp.retry_count = *kiblnd_tunables.kib_retry_count; cp.rnr_retry_count = *kiblnd_tunables.kib_rnr_retry_count; LASSERT(cmid->context == (void *)conn); LASSERT(conn->ibc_cmid == cmid); rc = rdma_connect(cmid, &cp); if (rc != 0) { CERROR("Can't connect to %s: %d\n", libcfs_nid2str(peer->ibp_nid), rc); kiblnd_connreq_done(conn, rc); kiblnd_conn_decref(conn); } return 0; } int kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) { kib_peer_t *peer; kib_conn_t *conn; int rc; switch (event->event) { default: CERROR("Unexpected event: %d, status: %d\n", event->event, event->status); LBUG(); case RDMA_CM_EVENT_CONNECT_REQUEST: /* destroy cmid on failure */ rc = kiblnd_passive_connect(cmid, (void *)KIBLND_CONN_PARAM(event), KIBLND_CONN_PARAM_LEN(event)); CDEBUG(D_NET, "connreq: %d\n", rc); return rc; case RDMA_CM_EVENT_ADDR_ERROR: peer = (kib_peer_t *)cmid->context; CNETERR("%s: ADDR ERROR %d\n", libcfs_nid2str(peer->ibp_nid), event->status); kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH); kiblnd_peer_decref(peer); return -EHOSTUNREACH; /* rc != 0 destroys cmid */ case RDMA_CM_EVENT_ADDR_RESOLVED: peer = (kib_peer_t *)cmid->context; CDEBUG(D_NET, "%s Addr resolved: %d\n", libcfs_nid2str(peer->ibp_nid), event->status); if (event->status != 0) { CNETERR("Can't resolve address for %s: %d\n", libcfs_nid2str(peer->ibp_nid), event->status); rc = event->status; } else { rc = rdma_resolve_route( cmid, *kiblnd_tunables.kib_timeout * 1000); if (rc == 0) return 0; /* Can't initiate route resolution */ CERROR("Can't resolve route for %s: %d\n", libcfs_nid2str(peer->ibp_nid), rc); } kiblnd_peer_connect_failed(peer, 1, rc); kiblnd_peer_decref(peer); return rc; /* rc != 0 destroys cmid */ case RDMA_CM_EVENT_ROUTE_ERROR: peer = (kib_peer_t *)cmid->context; CNETERR("%s: ROUTE ERROR %d\n", libcfs_nid2str(peer->ibp_nid), event->status); kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH); kiblnd_peer_decref(peer); return -EHOSTUNREACH; /* rc != 0 destroys cmid */ case RDMA_CM_EVENT_ROUTE_RESOLVED: peer = (kib_peer_t *)cmid->context; CDEBUG(D_NET, "%s Route resolved: %d\n", libcfs_nid2str(peer->ibp_nid), event->status); if (event->status == 0) return kiblnd_active_connect(cmid); CNETERR("Can't resolve route for %s: %d\n", libcfs_nid2str(peer->ibp_nid), event->status); kiblnd_peer_connect_failed(peer, 1, event->status); kiblnd_peer_decref(peer); return event->status; /* rc != 0 destroys cmid */ case RDMA_CM_EVENT_UNREACHABLE: conn = (kib_conn_t *)cmid->context; LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT || conn->ibc_state == IBLND_CONN_PASSIVE_WAIT); CNETERR("%s: UNREACHABLE %d\n", libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status); kiblnd_connreq_done(conn, -ENETDOWN); kiblnd_conn_decref(conn); return 0; case RDMA_CM_EVENT_CONNECT_ERROR: conn = (kib_conn_t *)cmid->context; LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT || conn->ibc_state == IBLND_CONN_PASSIVE_WAIT); CNETERR("%s: CONNECT ERROR %d\n", libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status); kiblnd_connreq_done(conn, -ENOTCONN); kiblnd_conn_decref(conn); return 0; case RDMA_CM_EVENT_REJECTED: conn = (kib_conn_t *)cmid->context; switch (conn->ibc_state) { default: LBUG(); case IBLND_CONN_PASSIVE_WAIT: CERROR("%s: REJECTED %d\n", libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status); kiblnd_connreq_done(conn, -ECONNRESET); break; case IBLND_CONN_ACTIVE_CONNECT: kiblnd_rejected(conn, event->status, (void *)KIBLND_CONN_PARAM(event), KIBLND_CONN_PARAM_LEN(event)); break; } kiblnd_conn_decref(conn); return 0; case RDMA_CM_EVENT_ESTABLISHED: conn = (kib_conn_t *)cmid->context; switch (conn->ibc_state) { default: LBUG(); case IBLND_CONN_PASSIVE_WAIT: CDEBUG(D_NET, "ESTABLISHED (passive): %s\n", libcfs_nid2str(conn->ibc_peer->ibp_nid)); kiblnd_connreq_done(conn, 0); break; case IBLND_CONN_ACTIVE_CONNECT: CDEBUG(D_NET, "ESTABLISHED(active): %s\n", libcfs_nid2str(conn->ibc_peer->ibp_nid)); kiblnd_check_connreply(conn, (void *)KIBLND_CONN_PARAM(event), KIBLND_CONN_PARAM_LEN(event)); break; } /* net keeps its ref on conn! */ return 0; case RDMA_CM_EVENT_TIMEWAIT_EXIT: CDEBUG(D_NET, "Ignore TIMEWAIT_EXIT event\n"); return 0; case RDMA_CM_EVENT_DISCONNECTED: conn = (kib_conn_t *)cmid->context; if (conn->ibc_state < IBLND_CONN_ESTABLISHED) { CERROR("%s DISCONNECTED\n", libcfs_nid2str(conn->ibc_peer->ibp_nid)); kiblnd_connreq_done(conn, -ECONNRESET); } else { kiblnd_close_conn(conn, 0); } kiblnd_conn_decref(conn); cmid->context = NULL; return 0; case RDMA_CM_EVENT_DEVICE_REMOVAL: LCONSOLE_ERROR_MSG(0x131, "Received notification of device removal\n" "Please shutdown LNET to allow this to proceed\n"); /* Can't remove network from underneath LNET for now, so I have * to ignore this */ return 0; case RDMA_CM_EVENT_ADDR_CHANGE: LCONSOLE_INFO("Physical link changed (eg hca/port)\n"); return 0; } } static int kiblnd_check_txs_locked(kib_conn_t *conn, struct list_head *txs) { kib_tx_t *tx; struct list_head *ttmp; list_for_each(ttmp, txs) { tx = list_entry(ttmp, kib_tx_t, tx_list); if (txs != &conn->ibc_active_txs) { LASSERT(tx->tx_queued); } else { LASSERT(!tx->tx_queued); LASSERT(tx->tx_waiting || tx->tx_sending != 0); } if (cfs_time_aftereq(jiffies, tx->tx_deadline)) { CERROR("Timed out tx: %s, %lu seconds\n", kiblnd_queue2str(conn, txs), cfs_duration_sec(jiffies - tx->tx_deadline)); return 1; } } return 0; } static int kiblnd_conn_timed_out_locked(kib_conn_t *conn) { return kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue) || kiblnd_check_txs_locked(conn, &conn->ibc_tx_noops) || kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue_rsrvd) || kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue_nocred) || kiblnd_check_txs_locked(conn, &conn->ibc_active_txs); } static void kiblnd_check_conns(int idx) { LIST_HEAD(closes); LIST_HEAD(checksends); struct list_head *peers = &kiblnd_data.kib_peers[idx]; struct list_head *ptmp; kib_peer_t *peer; kib_conn_t *conn; kib_conn_t *tmp; struct list_head *ctmp; unsigned long flags; /* NB. We expect to have a look at all the peers and not find any * RDMAs to time out, so we just use a shared lock while we * take a look... */ read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); list_for_each(ptmp, peers) { peer = list_entry(ptmp, kib_peer_t, ibp_list); list_for_each(ctmp, &peer->ibp_conns) { int timedout; int sendnoop; conn = list_entry(ctmp, kib_conn_t, ibc_list); LASSERT(conn->ibc_state == IBLND_CONN_ESTABLISHED); spin_lock(&conn->ibc_lock); sendnoop = kiblnd_need_noop(conn); timedout = kiblnd_conn_timed_out_locked(conn); if (!sendnoop && !timedout) { spin_unlock(&conn->ibc_lock); continue; } if (timedout) { CERROR("Timed out RDMA with %s (%lu): c: %u, oc: %u, rc: %u\n", libcfs_nid2str(peer->ibp_nid), cfs_duration_sec(cfs_time_current() - peer->ibp_last_alive), conn->ibc_credits, conn->ibc_outstanding_credits, conn->ibc_reserved_credits); list_add(&conn->ibc_connd_list, &closes); } else { list_add(&conn->ibc_connd_list, &checksends); } /* +ref for 'closes' or 'checksends' */ kiblnd_conn_addref(conn); spin_unlock(&conn->ibc_lock); } } read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); /* Handle timeout by closing the whole * connection. We can only be sure RDMA activity * has ceased once the QP has been modified. */ list_for_each_entry_safe(conn, tmp, &closes, ibc_connd_list) { list_del(&conn->ibc_connd_list); kiblnd_close_conn(conn, -ETIMEDOUT); kiblnd_conn_decref(conn); } /* In case we have enough credits to return via a * NOOP, but there were no non-blocking tx descs * free to do it last time... */ while (!list_empty(&checksends)) { conn = list_entry(checksends.next, kib_conn_t, ibc_connd_list); list_del(&conn->ibc_connd_list); kiblnd_check_sends(conn); kiblnd_conn_decref(conn); } } static void kiblnd_disconnect_conn(kib_conn_t *conn) { LASSERT(!in_interrupt()); LASSERT(current == kiblnd_data.kib_connd); LASSERT(conn->ibc_state == IBLND_CONN_CLOSING); rdma_disconnect(conn->ibc_cmid); kiblnd_finalise_conn(conn); kiblnd_peer_notify(conn->ibc_peer); } int kiblnd_connd(void *arg) { wait_queue_t wait; unsigned long flags; kib_conn_t *conn; int timeout; int i; int dropped_lock; int peer_index = 0; unsigned long deadline = jiffies; cfs_block_allsigs(); init_waitqueue_entry(&wait, current); kiblnd_data.kib_connd = current; spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); while (!kiblnd_data.kib_shutdown) { dropped_lock = 0; if (!list_empty(&kiblnd_data.kib_connd_zombies)) { conn = list_entry(kiblnd_data. \ kib_connd_zombies.next, kib_conn_t, ibc_list); list_del(&conn->ibc_list); spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags); dropped_lock = 1; kiblnd_destroy_conn(conn); spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); } if (!list_empty(&kiblnd_data.kib_connd_conns)) { conn = list_entry(kiblnd_data.kib_connd_conns.next, kib_conn_t, ibc_list); list_del(&conn->ibc_list); spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags); dropped_lock = 1; kiblnd_disconnect_conn(conn); kiblnd_conn_decref(conn); spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); } /* careful with the jiffy wrap... */ timeout = (int)(deadline - jiffies); if (timeout <= 0) { const int n = 4; const int p = 1; int chunk = kiblnd_data.kib_peer_hash_size; spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags); dropped_lock = 1; /* Time to check for RDMA timeouts on a few more * peers: I do checks every 'p' seconds on a * proportion of the peer table and I need to check * every connection 'n' times within a timeout * interval, to ensure I detect a timeout on any * connection within (n+1)/n times the timeout * interval. */ if (*kiblnd_tunables.kib_timeout > n * p) chunk = (chunk * n * p) / *kiblnd_tunables.kib_timeout; if (chunk == 0) chunk = 1; for (i = 0; i < chunk; i++) { kiblnd_check_conns(peer_index); peer_index = (peer_index + 1) % kiblnd_data.kib_peer_hash_size; } deadline += p * HZ; spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); } if (dropped_lock) continue; /* Nothing to do for 'timeout' */ set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&kiblnd_data.kib_connd_waitq, &wait); spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags); schedule_timeout(timeout); remove_wait_queue(&kiblnd_data.kib_connd_waitq, &wait); spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); } spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags); kiblnd_thread_fini(); return 0; } void kiblnd_qp_event(struct ib_event *event, void *arg) { kib_conn_t *conn = arg; switch (event->event) { case IB_EVENT_COMM_EST: CDEBUG(D_NET, "%s established\n", libcfs_nid2str(conn->ibc_peer->ibp_nid)); return; default: CERROR("%s: Async QP event type %d\n", libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event); return; } } static void kiblnd_complete(struct ib_wc *wc) { switch (kiblnd_wreqid2type(wc->wr_id)) { default: LBUG(); case IBLND_WID_RDMA: /* We only get RDMA completion notification if it fails. All * subsequent work items, including the final SEND will fail * too. However we can't print out any more info about the * failing RDMA because 'tx' might be back on the idle list or * even reused already if we didn't manage to post all our work * items */ CNETERR("RDMA (tx: %p) failed: %d\n", kiblnd_wreqid2ptr(wc->wr_id), wc->status); return; case IBLND_WID_TX: kiblnd_tx_complete(kiblnd_wreqid2ptr(wc->wr_id), wc->status); return; case IBLND_WID_RX: kiblnd_rx_complete(kiblnd_wreqid2ptr(wc->wr_id), wc->status, wc->byte_len); return; } } void kiblnd_cq_completion(struct ib_cq *cq, void *arg) { /* NB I'm not allowed to schedule this conn once its refcount has * reached 0. Since fundamentally I'm racing with scheduler threads * consuming my CQ I could be called after all completions have * occurred. But in this case, ibc_nrx == 0 && ibc_nsends_posted == 0 * and this CQ is about to be destroyed so I NOOP. */ kib_conn_t *conn = (kib_conn_t *)arg; struct kib_sched_info *sched = conn->ibc_sched; unsigned long flags; LASSERT(cq == conn->ibc_cq); spin_lock_irqsave(&sched->ibs_lock, flags); conn->ibc_ready = 1; if (!conn->ibc_scheduled && (conn->ibc_nrx > 0 || conn->ibc_nsends_posted > 0)) { kiblnd_conn_addref(conn); /* +1 ref for sched_conns */ conn->ibc_scheduled = 1; list_add_tail(&conn->ibc_sched_list, &sched->ibs_conns); if (waitqueue_active(&sched->ibs_waitq)) wake_up(&sched->ibs_waitq); } spin_unlock_irqrestore(&sched->ibs_lock, flags); } void kiblnd_cq_event(struct ib_event *event, void *arg) { kib_conn_t *conn = arg; CERROR("%s: async CQ event type %d\n", libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event); } int kiblnd_scheduler(void *arg) { long id = (long)arg; struct kib_sched_info *sched; kib_conn_t *conn; wait_queue_t wait; unsigned long flags; struct ib_wc wc; int did_something; int busy_loops = 0; int rc; cfs_block_allsigs(); init_waitqueue_entry(&wait, current); sched = kiblnd_data.kib_scheds[KIB_THREAD_CPT(id)]; rc = cfs_cpt_bind(lnet_cpt_table(), sched->ibs_cpt); if (rc != 0) { CWARN("Failed to bind on CPT %d, please verify whether all CPUs are healthy and reload modules if necessary, otherwise your system might under risk of low performance\n", sched->ibs_cpt); } spin_lock_irqsave(&sched->ibs_lock, flags); while (!kiblnd_data.kib_shutdown) { if (busy_loops++ >= IBLND_RESCHED) { spin_unlock_irqrestore(&sched->ibs_lock, flags); cond_resched(); busy_loops = 0; spin_lock_irqsave(&sched->ibs_lock, flags); } did_something = 0; if (!list_empty(&sched->ibs_conns)) { conn = list_entry(sched->ibs_conns.next, kib_conn_t, ibc_sched_list); /* take over kib_sched_conns' ref on conn... */ LASSERT(conn->ibc_scheduled); list_del(&conn->ibc_sched_list); conn->ibc_ready = 0; spin_unlock_irqrestore(&sched->ibs_lock, flags); rc = ib_poll_cq(conn->ibc_cq, 1, &wc); if (rc == 0) { rc = ib_req_notify_cq(conn->ibc_cq, IB_CQ_NEXT_COMP); if (rc < 0) { CWARN("%s: ib_req_notify_cq failed: %d, closing connection\n", libcfs_nid2str(conn->ibc_peer->ibp_nid), rc); kiblnd_close_conn(conn, -EIO); kiblnd_conn_decref(conn); spin_lock_irqsave(&sched->ibs_lock, flags); continue; } rc = ib_poll_cq(conn->ibc_cq, 1, &wc); } if (rc < 0) { CWARN("%s: ib_poll_cq failed: %d, closing connection\n", libcfs_nid2str(conn->ibc_peer->ibp_nid), rc); kiblnd_close_conn(conn, -EIO); kiblnd_conn_decref(conn); spin_lock_irqsave(&sched->ibs_lock, flags); continue; } spin_lock_irqsave(&sched->ibs_lock, flags); if (rc != 0 || conn->ibc_ready) { /* There may be another completion waiting; get * another scheduler to check while I handle * this one... */ /* +1 ref for sched_conns */ kiblnd_conn_addref(conn); list_add_tail(&conn->ibc_sched_list, &sched->ibs_conns); if (waitqueue_active(&sched->ibs_waitq)) wake_up(&sched->ibs_waitq); } else { conn->ibc_scheduled = 0; } if (rc != 0) { spin_unlock_irqrestore(&sched->ibs_lock, flags); kiblnd_complete(&wc); spin_lock_irqsave(&sched->ibs_lock, flags); } kiblnd_conn_decref(conn); /* ...drop my ref from above */ did_something = 1; } if (did_something) continue; set_current_state(TASK_INTERRUPTIBLE); add_wait_queue_exclusive(&sched->ibs_waitq, &wait); spin_unlock_irqrestore(&sched->ibs_lock, flags); schedule(); busy_loops = 0; remove_wait_queue(&sched->ibs_waitq, &wait); spin_lock_irqsave(&sched->ibs_lock, flags); } spin_unlock_irqrestore(&sched->ibs_lock, flags); kiblnd_thread_fini(); return 0; } int kiblnd_failover_thread(void *arg) { rwlock_t *glock = &kiblnd_data.kib_global_lock; kib_dev_t *dev; wait_queue_t wait; unsigned long flags; int rc; LASSERT(*kiblnd_tunables.kib_dev_failover != 0); cfs_block_allsigs(); init_waitqueue_entry(&wait, current); write_lock_irqsave(glock, flags); while (!kiblnd_data.kib_shutdown) { int do_failover = 0; int long_sleep; list_for_each_entry(dev, &kiblnd_data.kib_failed_devs, ibd_fail_list) { if (time_before(cfs_time_current(), dev->ibd_next_failover)) continue; do_failover = 1; break; } if (do_failover) { list_del_init(&dev->ibd_fail_list); dev->ibd_failover = 1; write_unlock_irqrestore(glock, flags); rc = kiblnd_dev_failover(dev); write_lock_irqsave(glock, flags); LASSERT(dev->ibd_failover); dev->ibd_failover = 0; if (rc >= 0) { /* Device is OK or failover succeed */ dev->ibd_next_failover = cfs_time_shift(3); continue; } /* failed to failover, retry later */ dev->ibd_next_failover = cfs_time_shift(min(dev->ibd_failed_failover, 10)); if (kiblnd_dev_can_failover(dev)) { list_add_tail(&dev->ibd_fail_list, &kiblnd_data.kib_failed_devs); } continue; } /* long sleep if no more pending failover */ long_sleep = list_empty(&kiblnd_data.kib_failed_devs); set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&kiblnd_data.kib_failover_waitq, &wait); write_unlock_irqrestore(glock, flags); rc = schedule_timeout(long_sleep ? cfs_time_seconds(10) : cfs_time_seconds(1)); remove_wait_queue(&kiblnd_data.kib_failover_waitq, &wait); write_lock_irqsave(glock, flags); if (!long_sleep || rc != 0) continue; /* have a long sleep, routine check all active devices, * we need checking like this because if there is not active * connection on the dev and no SEND from local, we may listen * on wrong HCA for ever while there is a bonding failover */ list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) { if (kiblnd_dev_can_failover(dev)) { list_add_tail(&dev->ibd_fail_list, &kiblnd_data.kib_failed_devs); } } } write_unlock_irqrestore(glock, flags); kiblnd_thread_fini(); return 0; }
gpl-2.0
sndnvaps/android_kernel_lge_hammerhead
net/bluetooth/hidp/core.c
227
26118
/* HIDP implementation for Linux Bluetooth stack (BlueZ). Copyright (C) 2003-2004 Marcel Holtmann <marcel@holtmann.org> Copyright (c) 2012 The Linux Foundation. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/freezer.h> #include <linux/fcntl.h> #include <linux/skbuff.h> #include <linux/socket.h> #include <linux/ioctl.h> #include <linux/file.h> #include <linux/init.h> #include <linux/wait.h> #include <net/sock.h> #include <linux/input.h> #include <linux/hid.h> #include <linux/hidraw.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/l2cap.h> #include "hidp.h" #define VERSION "1.2" static DECLARE_RWSEM(hidp_session_sem); static LIST_HEAD(hidp_session_list); static unsigned char hidp_keycode[256] = { 0, 0, 0, 0, 30, 48, 46, 32, 18, 33, 34, 35, 23, 36, 37, 38, 50, 49, 24, 25, 16, 19, 31, 20, 22, 47, 17, 45, 21, 44, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 28, 1, 14, 15, 57, 12, 13, 26, 27, 43, 43, 39, 40, 41, 51, 52, 53, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 87, 88, 99, 70, 119, 110, 102, 104, 111, 107, 109, 106, 105, 108, 103, 69, 98, 55, 74, 78, 96, 79, 80, 81, 75, 76, 77, 71, 72, 73, 82, 83, 86, 127, 116, 117, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 134, 138, 130, 132, 128, 129, 131, 137, 133, 135, 136, 113, 115, 114, 0, 0, 0, 121, 0, 89, 93, 124, 92, 94, 95, 0, 0, 0, 122, 123, 90, 91, 85, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 42, 56, 125, 97, 54, 100, 126, 164, 166, 165, 163, 161, 115, 114, 113, 150, 158, 159, 128, 136, 177, 178, 176, 142, 152, 173, 140 }; static unsigned char hidp_mkeyspat[] = { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01 }; static struct hidp_session *__hidp_get_session(bdaddr_t *bdaddr) { struct hidp_session *session; struct list_head *p; BT_DBG(""); list_for_each(p, &hidp_session_list) { session = list_entry(p, struct hidp_session, list); if (!bacmp(bdaddr, &session->bdaddr)) return session; } return NULL; } static void __hidp_link_session(struct hidp_session *session) { __module_get(THIS_MODULE); list_add(&session->list, &hidp_session_list); } static void __hidp_unlink_session(struct hidp_session *session) { bdaddr_t *dst = &session->bdaddr; struct hci_dev *hdev; struct device *dev = NULL; hdev = hci_get_route(dst, BDADDR_ANY); if (hdev) { session->conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst); if (session->conn && session->conn->hidp_session_valid) dev = &session->conn->dev; hci_dev_put(hdev); } if (dev) hci_conn_put_device(session->conn); list_del(&session->list); module_put(THIS_MODULE); } static void __hidp_copy_session(struct hidp_session *session, struct hidp_conninfo *ci) { memset(ci, 0, sizeof(*ci)); bacpy(&ci->bdaddr, &session->bdaddr); ci->flags = session->flags; ci->state = session->state; ci->vendor = 0x0000; ci->product = 0x0000; ci->version = 0x0000; if (session->input) { ci->vendor = session->input->id.vendor; ci->product = session->input->id.product; ci->version = session->input->id.version; if (session->input->name) strncpy(ci->name, session->input->name, 128); else strncpy(ci->name, "HID Boot Device", 128); } if (session->hid) { ci->vendor = session->hid->vendor; ci->product = session->hid->product; ci->version = session->hid->version; strncpy(ci->name, session->hid->name, 128); } } static int hidp_queue_event(struct hidp_session *session, struct input_dev *dev, unsigned int type, unsigned int code, int value) { unsigned char newleds; struct sk_buff *skb; BT_DBG("session %p type %d code %d value %d", session, type, code, value); if (type != EV_LED) return -1; newleds = (!!test_bit(LED_KANA, dev->led) << 3) | (!!test_bit(LED_COMPOSE, dev->led) << 3) | (!!test_bit(LED_SCROLLL, dev->led) << 2) | (!!test_bit(LED_CAPSL, dev->led) << 1) | (!!test_bit(LED_NUML, dev->led)); if (session->leds == newleds) return 0; session->leds = newleds; skb = alloc_skb(3, GFP_ATOMIC); if (!skb) { BT_ERR("Can't allocate memory for new frame"); return -ENOMEM; } *skb_put(skb, 1) = HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT; *skb_put(skb, 1) = 0x01; *skb_put(skb, 1) = newleds; skb_queue_tail(&session->intr_transmit, skb); hidp_schedule(session); return 0; } static int hidp_hidinput_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) { struct hid_device *hid = input_get_drvdata(dev); struct hidp_session *session = hid->driver_data; return hidp_queue_event(session, dev, type, code, value); } static int hidp_input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) { struct hidp_session *session = input_get_drvdata(dev); return hidp_queue_event(session, dev, type, code, value); } static void hidp_input_report(struct hidp_session *session, struct sk_buff *skb) { struct input_dev *dev = session->input; unsigned char *keys = session->keys; unsigned char *udata = skb->data + 1; signed char *sdata = skb->data + 1; int i, size = skb->len - 1; switch (skb->data[0]) { case 0x01: /* Keyboard report */ for (i = 0; i < 8; i++) input_report_key(dev, hidp_keycode[i + 224], (udata[0] >> i) & 1); /* If all the key codes have been set to 0x01, it means * too many keys were pressed at the same time. */ if (!memcmp(udata + 2, hidp_mkeyspat, 6)) break; for (i = 2; i < 8; i++) { if (keys[i] > 3 && memscan(udata + 2, keys[i], 6) == udata + 8) { if (hidp_keycode[keys[i]]) input_report_key(dev, hidp_keycode[keys[i]], 0); else BT_ERR("Unknown key (scancode %#x) released.", keys[i]); } if (udata[i] > 3 && memscan(keys + 2, udata[i], 6) == keys + 8) { if (hidp_keycode[udata[i]]) input_report_key(dev, hidp_keycode[udata[i]], 1); else BT_ERR("Unknown key (scancode %#x) pressed.", udata[i]); } } memcpy(keys, udata, 8); break; case 0x02: /* Mouse report */ input_report_key(dev, BTN_LEFT, sdata[0] & 0x01); input_report_key(dev, BTN_RIGHT, sdata[0] & 0x02); input_report_key(dev, BTN_MIDDLE, sdata[0] & 0x04); input_report_key(dev, BTN_SIDE, sdata[0] & 0x08); input_report_key(dev, BTN_EXTRA, sdata[0] & 0x10); input_report_rel(dev, REL_X, sdata[1]); input_report_rel(dev, REL_Y, sdata[2]); if (size > 3) input_report_rel(dev, REL_WHEEL, sdata[3]); break; } input_sync(dev); } static int __hidp_send_ctrl_message(struct hidp_session *session, unsigned char hdr, unsigned char *data, int size) { struct sk_buff *skb; BT_DBG("session %p data %p size %d", session, data, size); skb = alloc_skb(size + 1, GFP_ATOMIC); if (!skb) { BT_ERR("Can't allocate memory for new frame"); return -ENOMEM; } *skb_put(skb, 1) = hdr; if (data && size > 0) memcpy(skb_put(skb, size), data, size); skb_queue_tail(&session->ctrl_transmit, skb); return 0; } static inline int hidp_send_ctrl_message(struct hidp_session *session, unsigned char hdr, unsigned char *data, int size) { int err; err = __hidp_send_ctrl_message(session, hdr, data, size); hidp_schedule(session); return err; } static int hidp_queue_report(struct hidp_session *session, unsigned char *data, int size) { struct sk_buff *skb; BT_DBG("session %p hid %p data %p size %d", session, session->hid, data, size); skb = alloc_skb(size + 1, GFP_ATOMIC); if (!skb) { BT_ERR("Can't allocate memory for new frame"); return -ENOMEM; } *skb_put(skb, 1) = 0xa2; if (size > 0) memcpy(skb_put(skb, size), data, size); skb_queue_tail(&session->intr_transmit, skb); hidp_schedule(session); return 0; } static int hidp_send_report(struct hidp_session *session, struct hid_report *report) { unsigned char buf[32]; int rsize; rsize = ((report->size - 1) >> 3) + 1 + (report->id > 0); if (rsize > sizeof(buf)) return -EIO; hid_output_report(report, buf); return hidp_queue_report(session, buf, rsize); } static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count, unsigned char report_type) { switch (report_type) { case HID_FEATURE_REPORT: report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_FEATURE; break; case HID_OUTPUT_REPORT: report_type = HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT; break; default: return -EINVAL; } if (hidp_send_ctrl_message(hid->driver_data, report_type, data, count)) return -ENOMEM; return count; } static void hidp_idle_timeout(unsigned long arg) { struct hidp_session *session = (struct hidp_session *) arg; atomic_inc(&session->terminate); hidp_schedule(session); } static void hidp_set_timer(struct hidp_session *session) { if (session->idle_to > 0) mod_timer(&session->timer, jiffies + HZ * session->idle_to); } static inline void hidp_del_timer(struct hidp_session *session) { if (session->idle_to > 0) del_timer(&session->timer); } static void hidp_process_handshake(struct hidp_session *session, unsigned char param) { BT_DBG("session %p param 0x%02x", session, param); switch (param) { case HIDP_HSHK_SUCCESSFUL: /* FIXME: Call into SET_ GET_ handlers here */ break; case HIDP_HSHK_NOT_READY: case HIDP_HSHK_ERR_INVALID_REPORT_ID: case HIDP_HSHK_ERR_UNSUPPORTED_REQUEST: case HIDP_HSHK_ERR_INVALID_PARAMETER: /* FIXME: Call into SET_ GET_ handlers here */ break; case HIDP_HSHK_ERR_UNKNOWN: break; case HIDP_HSHK_ERR_FATAL: /* Device requests a reboot, as this is the only way this error * can be recovered. */ __hidp_send_ctrl_message(session, HIDP_TRANS_HID_CONTROL | HIDP_CTRL_SOFT_RESET, NULL, 0); break; default: __hidp_send_ctrl_message(session, HIDP_TRANS_HANDSHAKE | HIDP_HSHK_ERR_INVALID_PARAMETER, NULL, 0); break; } } static void hidp_process_hid_control(struct hidp_session *session, unsigned char param) { BT_DBG("session %p param 0x%02x", session, param); if (param == HIDP_CTRL_VIRTUAL_CABLE_UNPLUG) { /* Flush the transmit queues */ skb_queue_purge(&session->ctrl_transmit); skb_queue_purge(&session->intr_transmit); /* Kill session thread */ atomic_inc(&session->terminate); hidp_schedule(session); } } static void hidp_process_data(struct hidp_session *session, struct sk_buff *skb, unsigned char param) { BT_DBG("session %p skb %p len %d param 0x%02x", session, skb, skb->len, param); switch (param) { case HIDP_DATA_RTYPE_INPUT: hidp_set_timer(session); if (session->input) hidp_input_report(session, skb); if (session->hid) hid_input_report(session->hid, HID_INPUT_REPORT, skb->data, skb->len, 0); break; case HIDP_DATA_RTYPE_OTHER: case HIDP_DATA_RTYPE_OUPUT: case HIDP_DATA_RTYPE_FEATURE: break; default: __hidp_send_ctrl_message(session, HIDP_TRANS_HANDSHAKE | HIDP_HSHK_ERR_INVALID_PARAMETER, NULL, 0); } } static void hidp_recv_ctrl_frame(struct hidp_session *session, struct sk_buff *skb) { unsigned char hdr, type, param; BT_DBG("session %p skb %p len %d", session, skb, skb->len); hdr = skb->data[0]; skb_pull(skb, 1); type = hdr & HIDP_HEADER_TRANS_MASK; param = hdr & HIDP_HEADER_PARAM_MASK; switch (type) { case HIDP_TRANS_HANDSHAKE: hidp_process_handshake(session, param); break; case HIDP_TRANS_HID_CONTROL: hidp_process_hid_control(session, param); break; case HIDP_TRANS_DATA: hidp_process_data(session, skb, param); break; default: __hidp_send_ctrl_message(session, HIDP_TRANS_HANDSHAKE | HIDP_HSHK_ERR_UNSUPPORTED_REQUEST, NULL, 0); break; } kfree_skb(skb); } static void hidp_recv_intr_frame(struct hidp_session *session, struct sk_buff *skb) { unsigned char hdr; BT_DBG("session %p skb %p len %d", session, skb, skb->len); hdr = skb->data[0]; skb_pull(skb, 1); if (hdr == (HIDP_TRANS_DATA | HIDP_DATA_RTYPE_INPUT)) { hidp_set_timer(session); if (session->input) hidp_input_report(session, skb); if (session->hid) { hid_input_report(session->hid, HID_INPUT_REPORT, skb->data, skb->len, 1); BT_DBG("report len %d", skb->len); } } else { BT_DBG("Unsupported protocol header 0x%02x", hdr); } kfree_skb(skb); } static int hidp_send_frame(struct socket *sock, unsigned char *data, int len) { struct kvec iv = { data, len }; struct msghdr msg; BT_DBG("sock %p data %p len %d", sock, data, len); if (!len) return 0; memset(&msg, 0, sizeof(msg)); return kernel_sendmsg(sock, &msg, &iv, 1, len); } static void hidp_process_transmit(struct hidp_session *session) { struct sk_buff *skb; BT_DBG("session %p", session); while ((skb = skb_dequeue(&session->ctrl_transmit))) { if (hidp_send_frame(session->ctrl_sock, skb->data, skb->len) < 0) { skb_queue_head(&session->ctrl_transmit, skb); break; } hidp_set_timer(session); kfree_skb(skb); } while ((skb = skb_dequeue(&session->intr_transmit))) { if (hidp_send_frame(session->intr_sock, skb->data, skb->len) < 0) { skb_queue_head(&session->intr_transmit, skb); break; } hidp_set_timer(session); kfree_skb(skb); } } static int hidp_session(void *arg) { struct hidp_session *session = arg; struct sock *ctrl_sk = session->ctrl_sock->sk; struct sock *intr_sk = session->intr_sock->sk; struct sk_buff *skb; int vendor = 0x0000, product = 0x0000; wait_queue_t ctrl_wait, intr_wait; BT_DBG("session %p", session); if (session->input) { vendor = session->input->id.vendor; product = session->input->id.product; } if (session->hid) { vendor = session->hid->vendor; product = session->hid->product; } daemonize("khidpd_%04x%04x", vendor, product); set_user_nice(current, -15); init_waitqueue_entry(&ctrl_wait, current); init_waitqueue_entry(&intr_wait, current); add_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait); add_wait_queue(sk_sleep(intr_sk), &intr_wait); while (!atomic_read(&session->terminate)) { set_current_state(TASK_INTERRUPTIBLE); if (ctrl_sk->sk_state != BT_CONNECTED || intr_sk->sk_state != BT_CONNECTED) break; while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) { skb_orphan(skb); if (!skb_linearize(skb)) hidp_recv_ctrl_frame(session, skb); else kfree_skb(skb); } while ((skb = skb_dequeue(&intr_sk->sk_receive_queue))) { skb_orphan(skb); if (!skb_linearize(skb)) hidp_recv_intr_frame(session, skb); else kfree_skb(skb); } hidp_process_transmit(session); schedule(); } set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(intr_sk), &intr_wait); remove_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait); down_write(&hidp_session_sem); hidp_del_timer(session); if (session->input) { input_unregister_device(session->input); session->input = NULL; } if (session->hid) { hid_destroy_device(session->hid); session->hid = NULL; } /* Wakeup user-space polling for socket errors */ session->intr_sock->sk->sk_err = EUNATCH; session->ctrl_sock->sk->sk_err = EUNATCH; hidp_schedule(session); fput(session->intr_sock->file); wait_event_timeout(*(sk_sleep(ctrl_sk)), (ctrl_sk->sk_state == BT_CLOSED), msecs_to_jiffies(500)); fput(session->ctrl_sock->file); __hidp_unlink_session(session); up_write(&hidp_session_sem); kfree(session); return 0; } static struct hci_conn *hidp_get_connection(struct hidp_session *session) { bdaddr_t *src = &bt_sk(session->ctrl_sock->sk)->src; bdaddr_t *dst = &bt_sk(session->ctrl_sock->sk)->dst; struct hci_conn *conn; struct hci_dev *hdev; hdev = hci_get_route(dst, src); if (!hdev) return NULL; hci_dev_lock_bh(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst); if (conn) { conn->hidp_session_valid = true; hci_conn_hold_device(conn); } hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return conn; } static int hidp_setup_input(struct hidp_session *session, struct hidp_connadd_req *req) { struct input_dev *input; int err, i; input = input_allocate_device(); if (!input) return -ENOMEM; session->input = input; input_set_drvdata(input, session); input->name = "Bluetooth HID Boot Protocol Device"; input->id.bustype = BUS_BLUETOOTH; input->id.vendor = req->vendor; input->id.product = req->product; input->id.version = req->version; if (req->subclass & 0x40) { set_bit(EV_KEY, input->evbit); set_bit(EV_LED, input->evbit); set_bit(EV_REP, input->evbit); set_bit(LED_NUML, input->ledbit); set_bit(LED_CAPSL, input->ledbit); set_bit(LED_SCROLLL, input->ledbit); set_bit(LED_COMPOSE, input->ledbit); set_bit(LED_KANA, input->ledbit); for (i = 0; i < sizeof(hidp_keycode); i++) set_bit(hidp_keycode[i], input->keybit); clear_bit(0, input->keybit); } if (req->subclass & 0x80) { input->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); input->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_MIDDLE); input->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y); input->keybit[BIT_WORD(BTN_MOUSE)] |= BIT_MASK(BTN_SIDE) | BIT_MASK(BTN_EXTRA); input->relbit[0] |= BIT_MASK(REL_WHEEL); } input->dev.parent = &session->conn->dev; input->event = hidp_input_event; err = input_register_device(input); if (err < 0) { hci_conn_put_device(session->conn); return err; } return 0; } static int hidp_open(struct hid_device *hid) { return 0; } static void hidp_close(struct hid_device *hid) { } static int hidp_parse(struct hid_device *hid) { struct hidp_session *session = hid->driver_data; return hid_parse_report(session->hid, session->rd_data, session->rd_size); } static int hidp_start(struct hid_device *hid) { struct hidp_session *session = hid->driver_data; struct hid_report *report; list_for_each_entry(report, &hid->report_enum[HID_INPUT_REPORT]. report_list, list) hidp_send_report(session, report); list_for_each_entry(report, &hid->report_enum[HID_FEATURE_REPORT]. report_list, list) hidp_send_report(session, report); return 0; } static void hidp_stop(struct hid_device *hid) { struct hidp_session *session = hid->driver_data; skb_queue_purge(&session->ctrl_transmit); skb_queue_purge(&session->intr_transmit); hid->claimed = 0; } static struct hid_ll_driver hidp_hid_driver = { .parse = hidp_parse, .start = hidp_start, .stop = hidp_stop, .open = hidp_open, .close = hidp_close, .hidinput_input_event = hidp_hidinput_event, }; static int hidp_setup_hid(struct hidp_session *session, struct hidp_connadd_req *req) { struct hid_device *hid; int err; session->rd_data = kzalloc(req->rd_size, GFP_KERNEL); if (!session->rd_data) return -ENOMEM; if (copy_from_user(session->rd_data, req->rd_data, req->rd_size)) { err = -EFAULT; goto fault; } session->rd_size = req->rd_size; hid = hid_allocate_device(); if (IS_ERR(hid)) { err = PTR_ERR(hid); goto fault; } session->hid = hid; hid->driver_data = session; hid->bus = BUS_BLUETOOTH; hid->vendor = req->vendor; hid->product = req->product; hid->version = req->version; hid->country = req->country; strncpy(hid->name, req->name, 128); strncpy(hid->phys, batostr(&bt_sk(session->ctrl_sock->sk)->src), 64); strncpy(hid->uniq, batostr(&bt_sk(session->ctrl_sock->sk)->dst), 64); hid->dev.parent = &session->conn->dev; hid->ll_driver = &hidp_hid_driver; hid->hid_output_raw_report = hidp_output_raw_report; err = hid_add_device(hid); if (err < 0) goto failed; return 0; failed: hid_destroy_device(hid); session->hid = NULL; fault: kfree(session->rd_data); session->rd_data = NULL; return err; } int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock) { struct hidp_session *session, *s; int err; BT_DBG(""); if (bacmp(&bt_sk(ctrl_sock->sk)->src, &bt_sk(intr_sock->sk)->src) || bacmp(&bt_sk(ctrl_sock->sk)->dst, &bt_sk(intr_sock->sk)->dst)) return -ENOTUNIQ; session = kzalloc(sizeof(struct hidp_session), GFP_KERNEL); if (!session) return -ENOMEM; BT_DBG("rd_data %p rd_size %d", req->rd_data, req->rd_size); down_write(&hidp_session_sem); s = __hidp_get_session(&bt_sk(ctrl_sock->sk)->dst); if (s && s->state == BT_CONNECTED) { err = -EEXIST; goto failed; } bacpy(&session->bdaddr, &bt_sk(ctrl_sock->sk)->dst); session->ctrl_mtu = min_t(uint, l2cap_pi(ctrl_sock->sk)->omtu, l2cap_pi(ctrl_sock->sk)->imtu); session->intr_mtu = min_t(uint, l2cap_pi(intr_sock->sk)->omtu, l2cap_pi(intr_sock->sk)->imtu); BT_DBG("ctrl mtu %d intr mtu %d", session->ctrl_mtu, session->intr_mtu); session->ctrl_sock = ctrl_sock; session->intr_sock = intr_sock; session->state = BT_CONNECTED; session->conn = hidp_get_connection(session); if (!session->conn) { err = -ENOTCONN; goto failed; } setup_timer(&session->timer, hidp_idle_timeout, (unsigned long)session); skb_queue_head_init(&session->ctrl_transmit); skb_queue_head_init(&session->intr_transmit); session->flags = req->flags & (1 << HIDP_BLUETOOTH_VENDOR_ID); session->idle_to = req->idle_to; __hidp_link_session(session); if (req->rd_size > 0) { err = hidp_setup_hid(session, req); if (err && err != -ENODEV) goto purge; } if (!session->hid) { err = hidp_setup_input(session, req); if (err < 0) goto purge; } hidp_set_timer(session); err = kernel_thread(hidp_session, session, CLONE_KERNEL); if (err < 0) goto unlink; if (session->input) { hidp_send_ctrl_message(session, HIDP_TRANS_SET_PROTOCOL | HIDP_PROTO_BOOT, NULL, 0); session->flags |= (1 << HIDP_BOOT_PROTOCOL_MODE); session->leds = 0xff; hidp_input_event(session->input, EV_LED, 0, 0); } up_write(&hidp_session_sem); return 0; unlink: hidp_del_timer(session); if (session->input) { input_unregister_device(session->input); session->input = NULL; } if (session->hid) { hid_destroy_device(session->hid); session->hid = NULL; } kfree(session->rd_data); session->rd_data = NULL; purge: __hidp_unlink_session(session); skb_queue_purge(&session->ctrl_transmit); skb_queue_purge(&session->intr_transmit); failed: up_write(&hidp_session_sem); input_free_device(session->input); kfree(session); return err; } int hidp_del_connection(struct hidp_conndel_req *req) { struct hidp_session *session; int err = 0; BT_DBG(""); down_read(&hidp_session_sem); session = __hidp_get_session(&req->bdaddr); if (session) { if (req->flags & (1 << HIDP_VIRTUAL_CABLE_UNPLUG)) { hidp_send_ctrl_message(session, HIDP_TRANS_HID_CONTROL | HIDP_CTRL_VIRTUAL_CABLE_UNPLUG, NULL, 0); } else { /* Flush the transmit queues */ skb_queue_purge(&session->ctrl_transmit); skb_queue_purge(&session->intr_transmit); /* Wakeup user-space polling for socket errors */ session->intr_sock->sk->sk_err = EUNATCH; session->ctrl_sock->sk->sk_err = EUNATCH; /* Kill session thread */ atomic_inc(&session->terminate); hidp_schedule(session); } } else err = -ENOENT; up_read(&hidp_session_sem); return err; } int hidp_get_connlist(struct hidp_connlist_req *req) { struct list_head *p; int err = 0, n = 0; BT_DBG(""); down_read(&hidp_session_sem); list_for_each(p, &hidp_session_list) { struct hidp_session *session; struct hidp_conninfo ci; session = list_entry(p, struct hidp_session, list); __hidp_copy_session(session, &ci); if (copy_to_user(req->ci, &ci, sizeof(ci))) { err = -EFAULT; break; } if (++n >= req->cnum) break; req->ci++; } req->cnum = n; up_read(&hidp_session_sem); return err; } int hidp_get_conninfo(struct hidp_conninfo *ci) { struct hidp_session *session; int err = 0; down_read(&hidp_session_sem); session = __hidp_get_session(&ci->bdaddr); if (session) __hidp_copy_session(session, ci); else err = -ENOENT; up_read(&hidp_session_sem); return err; } static const struct hid_device_id hidp_table[] = { { HID_BLUETOOTH_DEVICE(HID_ANY_ID, HID_ANY_ID) }, { } }; static struct hid_driver hidp_driver = { .name = "generic-bluetooth", .id_table = hidp_table, }; static int __init hidp_init(void) { int ret; BT_INFO("HIDP (Human Interface Emulation) ver %s", VERSION); ret = hid_register_driver(&hidp_driver); if (ret) goto err; ret = hidp_init_sockets(); if (ret) goto err_drv; return 0; err_drv: hid_unregister_driver(&hidp_driver); err: return ret; } static void __exit hidp_exit(void) { hidp_cleanup_sockets(); hid_unregister_driver(&hidp_driver); } module_init(hidp_init); module_exit(hidp_exit); MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); MODULE_DESCRIPTION("Bluetooth HIDP ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); MODULE_ALIAS("bt-proto-6");
gpl-2.0
androidaosp/kernel-msm
arch/sparc/kernel/perf_event.c
227
46087
/* Performance event support for sparc64. * * Copyright (C) 2009, 2010 David S. Miller <davem@davemloft.net> * * This code is based almost entirely upon the x86 perf event * code, which is: * * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar * Copyright (C) 2009 Jaswinder Singh Rajput * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> */ #include <linux/perf_event.h> #include <linux/kprobes.h> #include <linux/ftrace.h> #include <linux/kernel.h> #include <linux/kdebug.h> #include <linux/mutex.h> #include <asm/stacktrace.h> #include <asm/cpudata.h> #include <asm/uaccess.h> #include <linux/atomic.h> #include <asm/nmi.h> #include <asm/pcr.h> #include <asm/cacheflush.h> #include "kernel.h" #include "kstack.h" /* Two classes of sparc64 chips currently exist. All of which have * 32-bit counters which can generate overflow interrupts on the * transition from 0xffffffff to 0. * * All chips upto and including SPARC-T3 have two performance * counters. The two 32-bit counters are accessed in one go using a * single 64-bit register. * * On these older chips both counters are controlled using a single * control register. The only way to stop all sampling is to clear * all of the context (user, supervisor, hypervisor) sampling enable * bits. But these bits apply to both counters, thus the two counters * can't be enabled/disabled individually. * * Furthermore, the control register on these older chips have two * event fields, one for each of the two counters. It's thus nearly * impossible to have one counter going while keeping the other one * stopped. Therefore it is possible to get overflow interrupts for * counters not currently "in use" and that condition must be checked * in the overflow interrupt handler. * * So we use a hack, in that we program inactive counters with the * "sw_count0" and "sw_count1" events. These count how many times * the instruction "sethi %hi(0xfc000), %g0" is executed. It's an * unusual way to encode a NOP and therefore will not trigger in * normal code. * * Starting with SPARC-T4 we have one control register per counter. * And the counters are stored in individual registers. The registers * for the counters are 64-bit but only a 32-bit counter is * implemented. The event selections on SPARC-T4 lack any * restrictions, therefore we can elide all of the complicated * conflict resolution code we have for SPARC-T3 and earlier chips. */ #define MAX_HWEVENTS 4 #define MAX_PCRS 4 #define MAX_PERIOD ((1UL << 32) - 1) #define PIC_UPPER_INDEX 0 #define PIC_LOWER_INDEX 1 #define PIC_NO_INDEX -1 struct cpu_hw_events { /* Number of events currently scheduled onto this cpu. * This tells how many entries in the arrays below * are valid. */ int n_events; /* Number of new events added since the last hw_perf_disable(). * This works because the perf event layer always adds new * events inside of a perf_{disable,enable}() sequence. */ int n_added; /* Array of events current scheduled on this cpu. */ struct perf_event *event[MAX_HWEVENTS]; /* Array of encoded longs, specifying the %pcr register * encoding and the mask of PIC counters this even can * be scheduled on. See perf_event_encode() et al. */ unsigned long events[MAX_HWEVENTS]; /* The current counter index assigned to an event. When the * event hasn't been programmed into the cpu yet, this will * hold PIC_NO_INDEX. The event->hw.idx value tells us where * we ought to schedule the event. */ int current_idx[MAX_HWEVENTS]; /* Software copy of %pcr register(s) on this cpu. */ u64 pcr[MAX_HWEVENTS]; /* Enabled/disable state. */ int enabled; unsigned int group_flag; }; static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; /* An event map describes the characteristics of a performance * counter event. In particular it gives the encoding as well as * a mask telling which counters the event can be measured on. * * The mask is unused on SPARC-T4 and later. */ struct perf_event_map { u16 encoding; u8 pic_mask; #define PIC_NONE 0x00 #define PIC_UPPER 0x01 #define PIC_LOWER 0x02 }; /* Encode a perf_event_map entry into a long. */ static unsigned long perf_event_encode(const struct perf_event_map *pmap) { return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask; } static u8 perf_event_get_msk(unsigned long val) { return val & 0xff; } static u64 perf_event_get_enc(unsigned long val) { return val >> 16; } #define C(x) PERF_COUNT_HW_CACHE_##x #define CACHE_OP_UNSUPPORTED 0xfffe #define CACHE_OP_NONSENSE 0xffff typedef struct perf_event_map cache_map_t [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX]; struct sparc_pmu { const struct perf_event_map *(*event_map)(int); const cache_map_t *cache_map; int max_events; u32 (*read_pmc)(int); void (*write_pmc)(int, u64); int upper_shift; int lower_shift; int event_mask; int user_bit; int priv_bit; int hv_bit; int irq_bit; int upper_nop; int lower_nop; unsigned int flags; #define SPARC_PMU_ALL_EXCLUDES_SAME 0x00000001 #define SPARC_PMU_HAS_CONFLICTS 0x00000002 int max_hw_events; int num_pcrs; int num_pic_regs; }; static u32 sparc_default_read_pmc(int idx) { u64 val; val = pcr_ops->read_pic(0); if (idx == PIC_UPPER_INDEX) val >>= 32; return val & 0xffffffff; } static void sparc_default_write_pmc(int idx, u64 val) { u64 shift, mask, pic; shift = 0; if (idx == PIC_UPPER_INDEX) shift = 32; mask = ((u64) 0xffffffff) << shift; val <<= shift; pic = pcr_ops->read_pic(0); pic &= ~mask; pic |= val; pcr_ops->write_pic(0, pic); } static const struct perf_event_map ultra3_perfmon_event_map[] = { [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER }, [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER }, [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER }, [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER }, }; static const struct perf_event_map *ultra3_event_map(int event_id) { return &ultra3_perfmon_event_map[event_id]; } static const cache_map_t ultra3_cache_map = { [C(L1D)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, }, [C(RESULT_MISS)] = { 0x09, PIC_UPPER, }, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER }, [C(RESULT_MISS)] = { 0x0a, PIC_UPPER }, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, }, }, [C(L1I)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, }, [C(RESULT_MISS)] = { 0x09, PIC_UPPER, }, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, [C(LL)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, }, [C(RESULT_MISS)] = { 0x0c, PIC_UPPER, }, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER }, [C(RESULT_MISS)] = { 0x0c, PIC_UPPER }, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, }, }, [C(DTLB)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { 0x12, PIC_UPPER, }, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, [C(ITLB)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { 0x11, PIC_UPPER, }, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, [C(BPU)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, [C(NODE)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, }; static const struct sparc_pmu ultra3_pmu = { .event_map = ultra3_event_map, .cache_map = &ultra3_cache_map, .max_events = ARRAY_SIZE(ultra3_perfmon_event_map), .read_pmc = sparc_default_read_pmc, .write_pmc = sparc_default_write_pmc, .upper_shift = 11, .lower_shift = 4, .event_mask = 0x3f, .user_bit = PCR_UTRACE, .priv_bit = PCR_STRACE, .upper_nop = 0x1c, .lower_nop = 0x14, .flags = (SPARC_PMU_ALL_EXCLUDES_SAME | SPARC_PMU_HAS_CONFLICTS), .max_hw_events = 2, .num_pcrs = 1, .num_pic_regs = 1, }; /* Niagara1 is very limited. The upper PIC is hard-locked to count * only instructions, so it is free running which creates all kinds of * problems. Some hardware designs make one wonder if the creator * even looked at how this stuff gets used by software. */ static const struct perf_event_map niagara1_perfmon_event_map[] = { [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, PIC_UPPER }, [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, PIC_UPPER }, [PERF_COUNT_HW_CACHE_REFERENCES] = { 0, PIC_NONE }, [PERF_COUNT_HW_CACHE_MISSES] = { 0x03, PIC_LOWER }, }; static const struct perf_event_map *niagara1_event_map(int event_id) { return &niagara1_perfmon_event_map[event_id]; } static const cache_map_t niagara1_cache_map = { [C(L1D)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { 0x03, PIC_LOWER, }, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { 0x03, PIC_LOWER, }, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, }, }, [C(L1I)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { 0x00, PIC_UPPER }, [C(RESULT_MISS)] = { 0x02, PIC_LOWER, }, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, [C(LL)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { 0x07, PIC_LOWER, }, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { 0x07, PIC_LOWER, }, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, }, }, [C(DTLB)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { 0x05, PIC_LOWER, }, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, [C(ITLB)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { 0x04, PIC_LOWER, }, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, [C(BPU)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, [C(NODE)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, }; static const struct sparc_pmu niagara1_pmu = { .event_map = niagara1_event_map, .cache_map = &niagara1_cache_map, .max_events = ARRAY_SIZE(niagara1_perfmon_event_map), .read_pmc = sparc_default_read_pmc, .write_pmc = sparc_default_write_pmc, .upper_shift = 0, .lower_shift = 4, .event_mask = 0x7, .user_bit = PCR_UTRACE, .priv_bit = PCR_STRACE, .upper_nop = 0x0, .lower_nop = 0x0, .flags = (SPARC_PMU_ALL_EXCLUDES_SAME | SPARC_PMU_HAS_CONFLICTS), .max_hw_events = 2, .num_pcrs = 1, .num_pic_regs = 1, }; static const struct perf_event_map niagara2_perfmon_event_map[] = { [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER }, [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER }, [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0208, PIC_UPPER | PIC_LOWER }, [PERF_COUNT_HW_CACHE_MISSES] = { 0x0302, PIC_UPPER | PIC_LOWER }, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x0201, PIC_UPPER | PIC_LOWER }, [PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER }, }; static const struct perf_event_map *niagara2_event_map(int event_id) { return &niagara2_perfmon_event_map[event_id]; } static const cache_map_t niagara2_cache_map = { [C(L1D)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, }, [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, }, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, }, [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, }, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, }, }, [C(L1I)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { 0x02ff, PIC_UPPER | PIC_LOWER, }, [C(RESULT_MISS)] = { 0x0301, PIC_UPPER | PIC_LOWER, }, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, [C(LL)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, }, [C(RESULT_MISS)] = { 0x0330, PIC_UPPER | PIC_LOWER, }, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, }, [C(RESULT_MISS)] = { 0x0320, PIC_UPPER | PIC_LOWER, }, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, }, }, [C(DTLB)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { 0x0b08, PIC_UPPER | PIC_LOWER, }, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, [C(ITLB)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { 0xb04, PIC_UPPER | PIC_LOWER, }, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, [C(BPU)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, [C(NODE)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, }; static const struct sparc_pmu niagara2_pmu = { .event_map = niagara2_event_map, .cache_map = &niagara2_cache_map, .max_events = ARRAY_SIZE(niagara2_perfmon_event_map), .read_pmc = sparc_default_read_pmc, .write_pmc = sparc_default_write_pmc, .upper_shift = 19, .lower_shift = 6, .event_mask = 0xfff, .user_bit = PCR_UTRACE, .priv_bit = PCR_STRACE, .hv_bit = PCR_N2_HTRACE, .irq_bit = 0x30, .upper_nop = 0x220, .lower_nop = 0x220, .flags = (SPARC_PMU_ALL_EXCLUDES_SAME | SPARC_PMU_HAS_CONFLICTS), .max_hw_events = 2, .num_pcrs = 1, .num_pic_regs = 1, }; static const struct perf_event_map niagara4_perfmon_event_map[] = { [PERF_COUNT_HW_CPU_CYCLES] = { (26 << 6) }, [PERF_COUNT_HW_INSTRUCTIONS] = { (3 << 6) | 0x3f }, [PERF_COUNT_HW_CACHE_REFERENCES] = { (3 << 6) | 0x04 }, [PERF_COUNT_HW_CACHE_MISSES] = { (16 << 6) | 0x07 }, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { (4 << 6) | 0x01 }, [PERF_COUNT_HW_BRANCH_MISSES] = { (25 << 6) | 0x0f }, }; static const struct perf_event_map *niagara4_event_map(int event_id) { return &niagara4_perfmon_event_map[event_id]; } static const cache_map_t niagara4_cache_map = { [C(L1D)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { (3 << 6) | 0x04 }, [C(RESULT_MISS)] = { (16 << 6) | 0x07 }, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = { (3 << 6) | 0x08 }, [C(RESULT_MISS)] = { (16 << 6) | 0x07 }, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, }, }, [C(L1I)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { (3 << 6) | 0x3f }, [C(RESULT_MISS)] = { (11 << 6) | 0x03 }, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, [C(LL)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { (3 << 6) | 0x04 }, [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = { (3 << 6) | 0x08 }, [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, }, }, [C(DTLB)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { (17 << 6) | 0x3f }, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, [C(ITLB)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { (6 << 6) | 0x3f }, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, [C(BPU)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, [C(NODE)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, }; static u32 sparc_vt_read_pmc(int idx) { u64 val = pcr_ops->read_pic(idx); return val & 0xffffffff; } static void sparc_vt_write_pmc(int idx, u64 val) { u64 pcr; /* There seems to be an internal latch on the overflow event * on SPARC-T4 that prevents it from triggering unless you * update the PIC exactly as we do here. The requirement * seems to be that you have to turn off event counting in the * PCR around the PIC update. * * For example, after the following sequence: * * 1) set PIC to -1 * 2) enable event counting and overflow reporting in PCR * 3) overflow triggers, softint 15 handler invoked * 4) clear OV bit in PCR * 5) write PIC to -1 * * a subsequent overflow event will not trigger. This * sequence works on SPARC-T3 and previous chips. */ pcr = pcr_ops->read_pcr(idx); pcr_ops->write_pcr(idx, PCR_N4_PICNPT); pcr_ops->write_pic(idx, val & 0xffffffff); pcr_ops->write_pcr(idx, pcr); } static const struct sparc_pmu niagara4_pmu = { .event_map = niagara4_event_map, .cache_map = &niagara4_cache_map, .max_events = ARRAY_SIZE(niagara4_perfmon_event_map), .read_pmc = sparc_vt_read_pmc, .write_pmc = sparc_vt_write_pmc, .upper_shift = 5, .lower_shift = 5, .event_mask = 0x7ff, .user_bit = PCR_N4_UTRACE, .priv_bit = PCR_N4_STRACE, /* We explicitly don't support hypervisor tracing. The T4 * generates the overflow event for precise events via a trap * which will not be generated (ie. it's completely lost) if * we happen to be in the hypervisor when the event triggers. * Essentially, the overflow event reporting is completely * unusable when you have hypervisor mode tracing enabled. */ .hv_bit = 0, .irq_bit = PCR_N4_TOE, .upper_nop = 0, .lower_nop = 0, .flags = 0, .max_hw_events = 4, .num_pcrs = 4, .num_pic_regs = 4, }; static const struct sparc_pmu *sparc_pmu __read_mostly; static u64 event_encoding(u64 event_id, int idx) { if (idx == PIC_UPPER_INDEX) event_id <<= sparc_pmu->upper_shift; else event_id <<= sparc_pmu->lower_shift; return event_id; } static u64 mask_for_index(int idx) { return event_encoding(sparc_pmu->event_mask, idx); } static u64 nop_for_index(int idx) { return event_encoding(idx == PIC_UPPER_INDEX ? sparc_pmu->upper_nop : sparc_pmu->lower_nop, idx); } static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx) { u64 enc, val, mask = mask_for_index(idx); int pcr_index = 0; if (sparc_pmu->num_pcrs > 1) pcr_index = idx; enc = perf_event_get_enc(cpuc->events[idx]); val = cpuc->pcr[pcr_index]; val &= ~mask; val |= event_encoding(enc, idx); cpuc->pcr[pcr_index] = val; pcr_ops->write_pcr(pcr_index, cpuc->pcr[pcr_index]); } static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx) { u64 mask = mask_for_index(idx); u64 nop = nop_for_index(idx); int pcr_index = 0; u64 val; if (sparc_pmu->num_pcrs > 1) pcr_index = idx; val = cpuc->pcr[pcr_index]; val &= ~mask; val |= nop; cpuc->pcr[pcr_index] = val; pcr_ops->write_pcr(pcr_index, cpuc->pcr[pcr_index]); } static u64 sparc_perf_event_update(struct perf_event *event, struct hw_perf_event *hwc, int idx) { int shift = 64 - 32; u64 prev_raw_count, new_raw_count; s64 delta; again: prev_raw_count = local64_read(&hwc->prev_count); new_raw_count = sparc_pmu->read_pmc(idx); if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, new_raw_count) != prev_raw_count) goto again; delta = (new_raw_count << shift) - (prev_raw_count << shift); delta >>= shift; local64_add(delta, &event->count); local64_sub(delta, &hwc->period_left); return new_raw_count; } static int sparc_perf_event_set_period(struct perf_event *event, struct hw_perf_event *hwc, int idx) { s64 left = local64_read(&hwc->period_left); s64 period = hwc->sample_period; int ret = 0; if (unlikely(left <= -period)) { left = period; local64_set(&hwc->period_left, left); hwc->last_period = period; ret = 1; } if (unlikely(left <= 0)) { left += period; local64_set(&hwc->period_left, left); hwc->last_period = period; ret = 1; } if (left > MAX_PERIOD) left = MAX_PERIOD; local64_set(&hwc->prev_count, (u64)-left); sparc_pmu->write_pmc(idx, (u64)(-left) & 0xffffffff); perf_event_update_userpage(event); return ret; } static void read_in_all_counters(struct cpu_hw_events *cpuc) { int i; for (i = 0; i < cpuc->n_events; i++) { struct perf_event *cp = cpuc->event[i]; if (cpuc->current_idx[i] != PIC_NO_INDEX && cpuc->current_idx[i] != cp->hw.idx) { sparc_perf_event_update(cp, &cp->hw, cpuc->current_idx[i]); cpuc->current_idx[i] = PIC_NO_INDEX; } } } /* On this PMU all PICs are programmed using a single PCR. Calculate * the combined control register value. * * For such chips we require that all of the events have the same * configuration, so just fetch the settings from the first entry. */ static void calculate_single_pcr(struct cpu_hw_events *cpuc) { int i; if (!cpuc->n_added) goto out; /* Assign to counters all unassigned events. */ for (i = 0; i < cpuc->n_events; i++) { struct perf_event *cp = cpuc->event[i]; struct hw_perf_event *hwc = &cp->hw; int idx = hwc->idx; u64 enc; if (cpuc->current_idx[i] != PIC_NO_INDEX) continue; sparc_perf_event_set_period(cp, hwc, idx); cpuc->current_idx[i] = idx; enc = perf_event_get_enc(cpuc->events[i]); cpuc->pcr[0] &= ~mask_for_index(idx); if (hwc->state & PERF_HES_STOPPED) cpuc->pcr[0] |= nop_for_index(idx); else cpuc->pcr[0] |= event_encoding(enc, idx); } out: cpuc->pcr[0] |= cpuc->event[0]->hw.config_base; } /* On this PMU each PIC has it's own PCR control register. */ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc) { int i; if (!cpuc->n_added) goto out; for (i = 0; i < cpuc->n_events; i++) { struct perf_event *cp = cpuc->event[i]; struct hw_perf_event *hwc = &cp->hw; int idx = hwc->idx; u64 enc; if (cpuc->current_idx[i] != PIC_NO_INDEX) continue; sparc_perf_event_set_period(cp, hwc, idx); cpuc->current_idx[i] = idx; enc = perf_event_get_enc(cpuc->events[i]); cpuc->pcr[idx] &= ~mask_for_index(idx); if (hwc->state & PERF_HES_STOPPED) cpuc->pcr[idx] |= nop_for_index(idx); else cpuc->pcr[idx] |= event_encoding(enc, idx); } out: for (i = 0; i < cpuc->n_events; i++) { struct perf_event *cp = cpuc->event[i]; int idx = cp->hw.idx; cpuc->pcr[idx] |= cp->hw.config_base; } } /* If performance event entries have been added, move existing events * around (if necessary) and then assign new entries to counters. */ static void update_pcrs_for_enable(struct cpu_hw_events *cpuc) { if (cpuc->n_added) read_in_all_counters(cpuc); if (sparc_pmu->num_pcrs == 1) { calculate_single_pcr(cpuc); } else { calculate_multiple_pcrs(cpuc); } } static void sparc_pmu_enable(struct pmu *pmu) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); int i; if (cpuc->enabled) return; cpuc->enabled = 1; barrier(); if (cpuc->n_events) update_pcrs_for_enable(cpuc); for (i = 0; i < sparc_pmu->num_pcrs; i++) pcr_ops->write_pcr(i, cpuc->pcr[i]); } static void sparc_pmu_disable(struct pmu *pmu) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); int i; if (!cpuc->enabled) return; cpuc->enabled = 0; cpuc->n_added = 0; for (i = 0; i < sparc_pmu->num_pcrs; i++) { u64 val = cpuc->pcr[i]; val &= ~(sparc_pmu->user_bit | sparc_pmu->priv_bit | sparc_pmu->hv_bit | sparc_pmu->irq_bit); cpuc->pcr[i] = val; pcr_ops->write_pcr(i, cpuc->pcr[i]); } } static int active_event_index(struct cpu_hw_events *cpuc, struct perf_event *event) { int i; for (i = 0; i < cpuc->n_events; i++) { if (cpuc->event[i] == event) break; } BUG_ON(i == cpuc->n_events); return cpuc->current_idx[i]; } static void sparc_pmu_start(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); int idx = active_event_index(cpuc, event); if (flags & PERF_EF_RELOAD) { WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); sparc_perf_event_set_period(event, &event->hw, idx); } event->hw.state = 0; sparc_pmu_enable_event(cpuc, &event->hw, idx); } static void sparc_pmu_stop(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); int idx = active_event_index(cpuc, event); if (!(event->hw.state & PERF_HES_STOPPED)) { sparc_pmu_disable_event(cpuc, &event->hw, idx); event->hw.state |= PERF_HES_STOPPED; } if (!(event->hw.state & PERF_HES_UPTODATE) && (flags & PERF_EF_UPDATE)) { sparc_perf_event_update(event, &event->hw, idx); event->hw.state |= PERF_HES_UPTODATE; } } static void sparc_pmu_del(struct perf_event *event, int _flags) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); unsigned long flags; int i; local_irq_save(flags); perf_pmu_disable(event->pmu); for (i = 0; i < cpuc->n_events; i++) { if (event == cpuc->event[i]) { /* Absorb the final count and turn off the * event. */ sparc_pmu_stop(event, PERF_EF_UPDATE); /* Shift remaining entries down into * the existing slot. */ while (++i < cpuc->n_events) { cpuc->event[i - 1] = cpuc->event[i]; cpuc->events[i - 1] = cpuc->events[i]; cpuc->current_idx[i - 1] = cpuc->current_idx[i]; } perf_event_update_userpage(event); cpuc->n_events--; break; } } perf_pmu_enable(event->pmu); local_irq_restore(flags); } static void sparc_pmu_read(struct perf_event *event) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); int idx = active_event_index(cpuc, event); struct hw_perf_event *hwc = &event->hw; sparc_perf_event_update(event, hwc, idx); } static atomic_t active_events = ATOMIC_INIT(0); static DEFINE_MUTEX(pmc_grab_mutex); static void perf_stop_nmi_watchdog(void *unused) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); int i; stop_nmi_watchdog(NULL); for (i = 0; i < sparc_pmu->num_pcrs; i++) cpuc->pcr[i] = pcr_ops->read_pcr(i); } static void perf_event_grab_pmc(void) { if (atomic_inc_not_zero(&active_events)) return; mutex_lock(&pmc_grab_mutex); if (atomic_read(&active_events) == 0) { if (atomic_read(&nmi_active) > 0) { on_each_cpu(perf_stop_nmi_watchdog, NULL, 1); BUG_ON(atomic_read(&nmi_active) != 0); } atomic_inc(&active_events); } mutex_unlock(&pmc_grab_mutex); } static void perf_event_release_pmc(void) { if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) { if (atomic_read(&nmi_active) == 0) on_each_cpu(start_nmi_watchdog, NULL, 1); mutex_unlock(&pmc_grab_mutex); } } static const struct perf_event_map *sparc_map_cache_event(u64 config) { unsigned int cache_type, cache_op, cache_result; const struct perf_event_map *pmap; if (!sparc_pmu->cache_map) return ERR_PTR(-ENOENT); cache_type = (config >> 0) & 0xff; if (cache_type >= PERF_COUNT_HW_CACHE_MAX) return ERR_PTR(-EINVAL); cache_op = (config >> 8) & 0xff; if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) return ERR_PTR(-EINVAL); cache_result = (config >> 16) & 0xff; if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) return ERR_PTR(-EINVAL); pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]); if (pmap->encoding == CACHE_OP_UNSUPPORTED) return ERR_PTR(-ENOENT); if (pmap->encoding == CACHE_OP_NONSENSE) return ERR_PTR(-EINVAL); return pmap; } static void hw_perf_event_destroy(struct perf_event *event) { perf_event_release_pmc(); } /* Make sure all events can be scheduled into the hardware at * the same time. This is simplified by the fact that we only * need to support 2 simultaneous HW events. * * As a side effect, the evts[]->hw.idx values will be assigned * on success. These are pending indexes. When the events are * actually programmed into the chip, these values will propagate * to the per-cpu cpuc->current_idx[] slots, see the code in * maybe_change_configuration() for details. */ static int sparc_check_constraints(struct perf_event **evts, unsigned long *events, int n_ev) { u8 msk0 = 0, msk1 = 0; int idx0 = 0; /* This case is possible when we are invoked from * hw_perf_group_sched_in(). */ if (!n_ev) return 0; if (n_ev > sparc_pmu->max_hw_events) return -1; if (!(sparc_pmu->flags & SPARC_PMU_HAS_CONFLICTS)) { int i; for (i = 0; i < n_ev; i++) evts[i]->hw.idx = i; return 0; } msk0 = perf_event_get_msk(events[0]); if (n_ev == 1) { if (msk0 & PIC_LOWER) idx0 = 1; goto success; } BUG_ON(n_ev != 2); msk1 = perf_event_get_msk(events[1]); /* If both events can go on any counter, OK. */ if (msk0 == (PIC_UPPER | PIC_LOWER) && msk1 == (PIC_UPPER | PIC_LOWER)) goto success; /* If one event is limited to a specific counter, * and the other can go on both, OK. */ if ((msk0 == PIC_UPPER || msk0 == PIC_LOWER) && msk1 == (PIC_UPPER | PIC_LOWER)) { if (msk0 & PIC_LOWER) idx0 = 1; goto success; } if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) && msk0 == (PIC_UPPER | PIC_LOWER)) { if (msk1 & PIC_UPPER) idx0 = 1; goto success; } /* If the events are fixed to different counters, OK. */ if ((msk0 == PIC_UPPER && msk1 == PIC_LOWER) || (msk0 == PIC_LOWER && msk1 == PIC_UPPER)) { if (msk0 & PIC_LOWER) idx0 = 1; goto success; } /* Otherwise, there is a conflict. */ return -1; success: evts[0]->hw.idx = idx0; if (n_ev == 2) evts[1]->hw.idx = idx0 ^ 1; return 0; } static int check_excludes(struct perf_event **evts, int n_prev, int n_new) { int eu = 0, ek = 0, eh = 0; struct perf_event *event; int i, n, first; if (!(sparc_pmu->flags & SPARC_PMU_ALL_EXCLUDES_SAME)) return 0; n = n_prev + n_new; if (n <= 1) return 0; first = 1; for (i = 0; i < n; i++) { event = evts[i]; if (first) { eu = event->attr.exclude_user; ek = event->attr.exclude_kernel; eh = event->attr.exclude_hv; first = 0; } else if (event->attr.exclude_user != eu || event->attr.exclude_kernel != ek || event->attr.exclude_hv != eh) { return -EAGAIN; } } return 0; } static int collect_events(struct perf_event *group, int max_count, struct perf_event *evts[], unsigned long *events, int *current_idx) { struct perf_event *event; int n = 0; if (!is_software_event(group)) { if (n >= max_count) return -1; evts[n] = group; events[n] = group->hw.event_base; current_idx[n++] = PIC_NO_INDEX; } list_for_each_entry(event, &group->sibling_list, group_entry) { if (!is_software_event(event) && event->state != PERF_EVENT_STATE_OFF) { if (n >= max_count) return -1; evts[n] = event; events[n] = event->hw.event_base; current_idx[n++] = PIC_NO_INDEX; } } return n; } static int sparc_pmu_add(struct perf_event *event, int ef_flags) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); int n0, ret = -EAGAIN; unsigned long flags; local_irq_save(flags); perf_pmu_disable(event->pmu); n0 = cpuc->n_events; if (n0 >= sparc_pmu->max_hw_events) goto out; cpuc->event[n0] = event; cpuc->events[n0] = event->hw.event_base; cpuc->current_idx[n0] = PIC_NO_INDEX; event->hw.state = PERF_HES_UPTODATE; if (!(ef_flags & PERF_EF_START)) event->hw.state |= PERF_HES_STOPPED; /* * If group events scheduling transaction was started, * skip the schedulability test here, it will be performed * at commit time(->commit_txn) as a whole */ if (cpuc->group_flag & PERF_EVENT_TXN) goto nocheck; if (check_excludes(cpuc->event, n0, 1)) goto out; if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1)) goto out; nocheck: cpuc->n_events++; cpuc->n_added++; ret = 0; out: perf_pmu_enable(event->pmu); local_irq_restore(flags); return ret; } static int sparc_pmu_event_init(struct perf_event *event) { struct perf_event_attr *attr = &event->attr; struct perf_event *evts[MAX_HWEVENTS]; struct hw_perf_event *hwc = &event->hw; unsigned long events[MAX_HWEVENTS]; int current_idx_dmy[MAX_HWEVENTS]; const struct perf_event_map *pmap; int n; if (atomic_read(&nmi_active) < 0) return -ENODEV; /* does not support taken branch sampling */ if (has_branch_stack(event)) return -EOPNOTSUPP; switch (attr->type) { case PERF_TYPE_HARDWARE: if (attr->config >= sparc_pmu->max_events) return -EINVAL; pmap = sparc_pmu->event_map(attr->config); break; case PERF_TYPE_HW_CACHE: pmap = sparc_map_cache_event(attr->config); if (IS_ERR(pmap)) return PTR_ERR(pmap); break; case PERF_TYPE_RAW: pmap = NULL; break; default: return -ENOENT; } if (pmap) { hwc->event_base = perf_event_encode(pmap); } else { /* * User gives us "(encoding << 16) | pic_mask" for * PERF_TYPE_RAW events. */ hwc->event_base = attr->config; } /* We save the enable bits in the config_base. */ hwc->config_base = sparc_pmu->irq_bit; if (!attr->exclude_user) hwc->config_base |= sparc_pmu->user_bit; if (!attr->exclude_kernel) hwc->config_base |= sparc_pmu->priv_bit; if (!attr->exclude_hv) hwc->config_base |= sparc_pmu->hv_bit; n = 0; if (event->group_leader != event) { n = collect_events(event->group_leader, sparc_pmu->max_hw_events - 1, evts, events, current_idx_dmy); if (n < 0) return -EINVAL; } events[n] = hwc->event_base; evts[n] = event; if (check_excludes(evts, n, 1)) return -EINVAL; if (sparc_check_constraints(evts, events, n + 1)) return -EINVAL; hwc->idx = PIC_NO_INDEX; /* Try to do all error checking before this point, as unwinding * state after grabbing the PMC is difficult. */ perf_event_grab_pmc(); event->destroy = hw_perf_event_destroy; if (!hwc->sample_period) { hwc->sample_period = MAX_PERIOD; hwc->last_period = hwc->sample_period; local64_set(&hwc->period_left, hwc->sample_period); } return 0; } /* * Start group events scheduling transaction * Set the flag to make pmu::enable() not perform the * schedulability test, it will be performed at commit time */ static void sparc_pmu_start_txn(struct pmu *pmu) { struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); perf_pmu_disable(pmu); cpuhw->group_flag |= PERF_EVENT_TXN; } /* * Stop group events scheduling transaction * Clear the flag and pmu::enable() will perform the * schedulability test. */ static void sparc_pmu_cancel_txn(struct pmu *pmu) { struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); cpuhw->group_flag &= ~PERF_EVENT_TXN; perf_pmu_enable(pmu); } /* * Commit group events scheduling transaction * Perform the group schedulability test as a whole * Return 0 if success */ static int sparc_pmu_commit_txn(struct pmu *pmu) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); int n; if (!sparc_pmu) return -EINVAL; cpuc = this_cpu_ptr(&cpu_hw_events); n = cpuc->n_events; if (check_excludes(cpuc->event, 0, n)) return -EINVAL; if (sparc_check_constraints(cpuc->event, cpuc->events, n)) return -EAGAIN; cpuc->group_flag &= ~PERF_EVENT_TXN; perf_pmu_enable(pmu); return 0; } static struct pmu pmu = { .pmu_enable = sparc_pmu_enable, .pmu_disable = sparc_pmu_disable, .event_init = sparc_pmu_event_init, .add = sparc_pmu_add, .del = sparc_pmu_del, .start = sparc_pmu_start, .stop = sparc_pmu_stop, .read = sparc_pmu_read, .start_txn = sparc_pmu_start_txn, .cancel_txn = sparc_pmu_cancel_txn, .commit_txn = sparc_pmu_commit_txn, }; void perf_event_print_debug(void) { unsigned long flags; int cpu, i; if (!sparc_pmu) return; local_irq_save(flags); cpu = smp_processor_id(); pr_info("\n"); for (i = 0; i < sparc_pmu->num_pcrs; i++) pr_info("CPU#%d: PCR%d[%016llx]\n", cpu, i, pcr_ops->read_pcr(i)); for (i = 0; i < sparc_pmu->num_pic_regs; i++) pr_info("CPU#%d: PIC%d[%016llx]\n", cpu, i, pcr_ops->read_pic(i)); local_irq_restore(flags); } static int __kprobes perf_event_nmi_handler(struct notifier_block *self, unsigned long cmd, void *__args) { struct die_args *args = __args; struct perf_sample_data data; struct cpu_hw_events *cpuc; struct pt_regs *regs; int i; if (!atomic_read(&active_events)) return NOTIFY_DONE; switch (cmd) { case DIE_NMI: break; default: return NOTIFY_DONE; } regs = args->regs; cpuc = this_cpu_ptr(&cpu_hw_events); /* If the PMU has the TOE IRQ enable bits, we need to do a * dummy write to the %pcr to clear the overflow bits and thus * the interrupt. * * Do this before we peek at the counters to determine * overflow so we don't lose any events. */ if (sparc_pmu->irq_bit && sparc_pmu->num_pcrs == 1) pcr_ops->write_pcr(0, cpuc->pcr[0]); for (i = 0; i < cpuc->n_events; i++) { struct perf_event *event = cpuc->event[i]; int idx = cpuc->current_idx[i]; struct hw_perf_event *hwc; u64 val; if (sparc_pmu->irq_bit && sparc_pmu->num_pcrs > 1) pcr_ops->write_pcr(idx, cpuc->pcr[idx]); hwc = &event->hw; val = sparc_perf_event_update(event, hwc, idx); if (val & (1ULL << 31)) continue; perf_sample_data_init(&data, 0, hwc->last_period); if (!sparc_perf_event_set_period(event, hwc, idx)) continue; if (perf_event_overflow(event, &data, regs)) sparc_pmu_stop(event, 0); } return NOTIFY_STOP; } static __read_mostly struct notifier_block perf_event_nmi_notifier = { .notifier_call = perf_event_nmi_handler, }; static bool __init supported_pmu(void) { if (!strcmp(sparc_pmu_type, "ultra3") || !strcmp(sparc_pmu_type, "ultra3+") || !strcmp(sparc_pmu_type, "ultra3i") || !strcmp(sparc_pmu_type, "ultra4+")) { sparc_pmu = &ultra3_pmu; return true; } if (!strcmp(sparc_pmu_type, "niagara")) { sparc_pmu = &niagara1_pmu; return true; } if (!strcmp(sparc_pmu_type, "niagara2") || !strcmp(sparc_pmu_type, "niagara3")) { sparc_pmu = &niagara2_pmu; return true; } if (!strcmp(sparc_pmu_type, "niagara4") || !strcmp(sparc_pmu_type, "niagara5")) { sparc_pmu = &niagara4_pmu; return true; } return false; } static int __init init_hw_perf_events(void) { int err; pr_info("Performance events: "); err = pcr_arch_init(); if (err || !supported_pmu()) { pr_cont("No support for PMU type '%s'\n", sparc_pmu_type); return 0; } pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type); perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); register_die_notifier(&perf_event_nmi_notifier); return 0; } pure_initcall(init_hw_perf_events); void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) { unsigned long ksp, fp; #ifdef CONFIG_FUNCTION_GRAPH_TRACER int graph = 0; #endif stack_trace_flush(); perf_callchain_store(entry, regs->tpc); ksp = regs->u_regs[UREG_I6]; fp = ksp + STACK_BIAS; do { struct sparc_stackf *sf; struct pt_regs *regs; unsigned long pc; if (!kstack_valid(current_thread_info(), fp)) break; sf = (struct sparc_stackf *) fp; regs = (struct pt_regs *) (sf + 1); if (kstack_is_trap_frame(current_thread_info(), regs)) { if (user_mode(regs)) break; pc = regs->tpc; fp = regs->u_regs[UREG_I6] + STACK_BIAS; } else { pc = sf->callers_pc; fp = (unsigned long)sf->fp + STACK_BIAS; } perf_callchain_store(entry, pc); #ifdef CONFIG_FUNCTION_GRAPH_TRACER if ((pc + 8UL) == (unsigned long) &return_to_handler) { int index = current->curr_ret_stack; if (current->ret_stack && index >= graph) { pc = current->ret_stack[index - graph].ret; perf_callchain_store(entry, pc); graph++; } } #endif } while (entry->nr < PERF_MAX_STACK_DEPTH); } static void perf_callchain_user_64(struct perf_callchain_entry *entry, struct pt_regs *regs) { unsigned long ufp; ufp = regs->u_regs[UREG_I6] + STACK_BIAS; do { struct sparc_stackf __user *usf; struct sparc_stackf sf; unsigned long pc; usf = (struct sparc_stackf __user *)ufp; if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) break; pc = sf.callers_pc; ufp = (unsigned long)sf.fp + STACK_BIAS; perf_callchain_store(entry, pc); } while (entry->nr < PERF_MAX_STACK_DEPTH); } static void perf_callchain_user_32(struct perf_callchain_entry *entry, struct pt_regs *regs) { unsigned long ufp; ufp = regs->u_regs[UREG_I6] & 0xffffffffUL; do { unsigned long pc; if (thread32_stack_is_64bit(ufp)) { struct sparc_stackf __user *usf; struct sparc_stackf sf; ufp += STACK_BIAS; usf = (struct sparc_stackf __user *)ufp; if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) break; pc = sf.callers_pc & 0xffffffff; ufp = ((unsigned long) sf.fp) & 0xffffffff; } else { struct sparc_stackf32 __user *usf; struct sparc_stackf32 sf; usf = (struct sparc_stackf32 __user *)ufp; if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) break; pc = sf.callers_pc; ufp = (unsigned long)sf.fp; } perf_callchain_store(entry, pc); } while (entry->nr < PERF_MAX_STACK_DEPTH); } void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) { perf_callchain_store(entry, regs->tpc); if (!current->mm) return; flushw_user(); if (test_thread_flag(TIF_32BIT)) perf_callchain_user_32(entry, regs); else perf_callchain_user_64(entry, regs); }
gpl-2.0
oldzhu/linux
drivers/clk/versatile/clk-sp810.c
483
3853
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Copyright (C) 2013 ARM Limited */ #include <linux/amba/sp810.h> #include <linux/slab.h> #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/err.h> #include <linux/of.h> #include <linux/of_address.h> #define to_clk_sp810_timerclken(_hw) \ container_of(_hw, struct clk_sp810_timerclken, hw) struct clk_sp810; struct clk_sp810_timerclken { struct clk_hw hw; struct clk *clk; struct clk_sp810 *sp810; int channel; }; struct clk_sp810 { struct device_node *node; void __iomem *base; spinlock_t lock; struct clk_sp810_timerclken timerclken[4]; }; static u8 clk_sp810_timerclken_get_parent(struct clk_hw *hw) { struct clk_sp810_timerclken *timerclken = to_clk_sp810_timerclken(hw); u32 val = readl(timerclken->sp810->base + SCCTRL); return !!(val & (1 << SCCTRL_TIMERENnSEL_SHIFT(timerclken->channel))); } static int clk_sp810_timerclken_set_parent(struct clk_hw *hw, u8 index) { struct clk_sp810_timerclken *timerclken = to_clk_sp810_timerclken(hw); struct clk_sp810 *sp810 = timerclken->sp810; u32 val, shift = SCCTRL_TIMERENnSEL_SHIFT(timerclken->channel); unsigned long flags = 0; if (WARN_ON(index > 1)) return -EINVAL; spin_lock_irqsave(&sp810->lock, flags); val = readl(sp810->base + SCCTRL); val &= ~(1 << shift); val |= index << shift; writel(val, sp810->base + SCCTRL); spin_unlock_irqrestore(&sp810->lock, flags); return 0; } static const struct clk_ops clk_sp810_timerclken_ops = { .get_parent = clk_sp810_timerclken_get_parent, .set_parent = clk_sp810_timerclken_set_parent, }; static struct clk *clk_sp810_timerclken_of_get(struct of_phandle_args *clkspec, void *data) { struct clk_sp810 *sp810 = data; if (WARN_ON(clkspec->args_count != 1 || clkspec->args[0] >= ARRAY_SIZE(sp810->timerclken))) return NULL; return sp810->timerclken[clkspec->args[0]].clk; } static void __init clk_sp810_of_setup(struct device_node *node) { struct clk_sp810 *sp810 = kzalloc(sizeof(*sp810), GFP_KERNEL); const char *parent_names[2]; int num = ARRAY_SIZE(parent_names); char name[12]; struct clk_init_data init; static int instance; int i; bool deprecated; if (!sp810) return; if (of_clk_parent_fill(node, parent_names, num) != num) { pr_warn("Failed to obtain parent clocks for SP810!\n"); kfree(sp810); return; } sp810->node = node; sp810->base = of_iomap(node, 0); spin_lock_init(&sp810->lock); init.name = name; init.ops = &clk_sp810_timerclken_ops; init.flags = CLK_IS_BASIC; init.parent_names = parent_names; init.num_parents = num; deprecated = !of_find_property(node, "assigned-clock-parents", NULL); for (i = 0; i < ARRAY_SIZE(sp810->timerclken); i++) { snprintf(name, sizeof(name), "sp810_%d_%d", instance, i); sp810->timerclken[i].sp810 = sp810; sp810->timerclken[i].channel = i; sp810->timerclken[i].hw.init = &init; /* * If DT isn't setting the parent, force it to be * the 1 MHz clock without going through the framework. * We do this before clk_register() so that it can determine * the parent and setup the tree properly. */ if (deprecated) init.ops->set_parent(&sp810->timerclken[i].hw, 1); sp810->timerclken[i].clk = clk_register(NULL, &sp810->timerclken[i].hw); WARN_ON(IS_ERR(sp810->timerclken[i].clk)); } of_clk_add_provider(node, clk_sp810_timerclken_of_get, sp810); instance++; } CLK_OF_DECLARE(sp810, "arm,sp810", clk_sp810_of_setup);
gpl-2.0
webore/lenovo
drivers/infiniband/hw/qib/qib_iba7322.c
739
253375
/* * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* * This file contains all of the code that is specific to the * InfiniPath 7322 chip */ #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/jiffies.h> #include <rdma/ib_verbs.h> #include <rdma/ib_smi.h> #include "qib.h" #include "qib_7322_regs.h" #include "qib_qsfp.h" #include "qib_mad.h" static void qib_setup_7322_setextled(struct qib_pportdata *, u32); static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t); static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op); static irqreturn_t qib_7322intr(int irq, void *data); static irqreturn_t qib_7322bufavail(int irq, void *data); static irqreturn_t sdma_intr(int irq, void *data); static irqreturn_t sdma_idle_intr(int irq, void *data); static irqreturn_t sdma_progress_intr(int irq, void *data); static irqreturn_t sdma_cleanup_intr(int irq, void *data); static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32, struct qib_ctxtdata *rcd); static u8 qib_7322_phys_portstate(u64); static u32 qib_7322_iblink_state(u64); static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd, u16 linitcmd); static void force_h1(struct qib_pportdata *); static void adj_tx_serdes(struct qib_pportdata *); static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8); static void qib_7322_mini_pcs_reset(struct qib_pportdata *); static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32); static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned); static void serdes_7322_los_enable(struct qib_pportdata *, int); static int serdes_7322_init_old(struct qib_pportdata *); static int serdes_7322_init_new(struct qib_pportdata *); #define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb)) /* LE2 serdes values for different cases */ #define LE2_DEFAULT 5 #define LE2_5m 4 #define LE2_QME 0 /* Below is special-purpose, so only really works for the IB SerDes blocks. */ #define IBSD(hw_pidx) (hw_pidx + 2) /* these are variables for documentation and experimentation purposes */ static const unsigned rcv_int_timeout = 375; static const unsigned rcv_int_count = 16; static const unsigned sdma_idle_cnt = 64; /* Time to stop altering Rx Equalization parameters, after link up. */ #define RXEQ_DISABLE_MSECS 2500 /* * Number of VLs we are configured to use (to allow for more * credits per vl, etc.) */ ushort qib_num_cfg_vls = 2; module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO); MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)"); static ushort qib_chase = 1; module_param_named(chase, qib_chase, ushort, S_IRUGO); MODULE_PARM_DESC(chase, "Enable state chase handling"); static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */ module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO); MODULE_PARM_DESC(long_attenuation, \ "attenuation cutoff (dB) for long copper cable setup"); static ushort qib_singleport; module_param_named(singleport, qib_singleport, ushort, S_IRUGO); MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space"); /* * Receive header queue sizes */ static unsigned qib_rcvhdrcnt; module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO); MODULE_PARM_DESC(rcvhdrcnt, "receive header count"); static unsigned qib_rcvhdrsize; module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO); MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words"); static unsigned qib_rcvhdrentsize; module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO); MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words"); #define MAX_ATTEN_LEN 64 /* plenty for any real system */ /* for read back, default index is ~5m copper cable */ static char txselect_list[MAX_ATTEN_LEN] = "10"; static struct kparam_string kp_txselect = { .string = txselect_list, .maxlen = MAX_ATTEN_LEN }; static int setup_txselect(const char *, struct kernel_param *); module_param_call(txselect, setup_txselect, param_get_string, &kp_txselect, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(txselect, \ "Tx serdes indices (for no QSFP or invalid QSFP data)"); #define BOARD_QME7342 5 #define BOARD_QMH7342 6 #define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \ BOARD_QMH7342) #define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \ BOARD_QME7342) #define KREG_IDX(regname) (QIB_7322_##regname##_OFFS / sizeof(u64)) #define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64))) #define MASK_ACROSS(lsb, msb) \ (((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb)) #define SYM_RMASK(regname, fldname) ((u64) \ QIB_7322_##regname##_##fldname##_RMASK) #define SYM_MASK(regname, fldname) ((u64) \ QIB_7322_##regname##_##fldname##_RMASK << \ QIB_7322_##regname##_##fldname##_LSB) #define SYM_FIELD(value, regname, fldname) ((u64) \ (((value) >> SYM_LSB(regname, fldname)) & \ SYM_RMASK(regname, fldname))) /* useful for things like LaFifoEmpty_0...7, TxCreditOK_0...7, etc. */ #define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \ (((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits)) #define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask) #define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask) #define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask) #define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask) #define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port) /* Below because most, but not all, fields of IntMask have that full suffix */ #define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port) #define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB) /* * the size bits give us 2^N, in KB units. 0 marks as invalid, * and 7 is reserved. We currently use only 2KB and 4KB */ #define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB #define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT) /* 2KB */ #define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT) /* 4KB */ #define IBA7322_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */ #define SendIBSLIDAssignMask \ QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK #define SendIBSLMCMask \ QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK #define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn) #define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn) #define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn) #define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn) #define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN) #define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN) #define _QIB_GPIO_SDA_NUM 1 #define _QIB_GPIO_SCL_NUM 0 #define QIB_EEPROM_WEN_NUM 14 #define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7322 cards. */ /* HW counter clock is at 4nsec */ #define QIB_7322_PSXMITWAIT_CHECK_RATE 4000 /* full speed IB port 1 only */ #define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR) #define PORT_SPD_CAP_SHIFT 3 /* full speed featuremask, both ports */ #define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT)) /* * This file contains almost all the chip-specific register information and * access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip. */ /* Use defines to tie machine-generated names to lower-case names */ #define kr_contextcnt KREG_IDX(ContextCnt) #define kr_control KREG_IDX(Control) #define kr_counterregbase KREG_IDX(CntrRegBase) #define kr_errclear KREG_IDX(ErrClear) #define kr_errmask KREG_IDX(ErrMask) #define kr_errstatus KREG_IDX(ErrStatus) #define kr_extctrl KREG_IDX(EXTCtrl) #define kr_extstatus KREG_IDX(EXTStatus) #define kr_gpio_clear KREG_IDX(GPIOClear) #define kr_gpio_mask KREG_IDX(GPIOMask) #define kr_gpio_out KREG_IDX(GPIOOut) #define kr_gpio_status KREG_IDX(GPIOStatus) #define kr_hwdiagctrl KREG_IDX(HwDiagCtrl) #define kr_debugportval KREG_IDX(DebugPortValueReg) #define kr_fmask KREG_IDX(feature_mask) #define kr_act_fmask KREG_IDX(active_feature_mask) #define kr_hwerrclear KREG_IDX(HwErrClear) #define kr_hwerrmask KREG_IDX(HwErrMask) #define kr_hwerrstatus KREG_IDX(HwErrStatus) #define kr_intclear KREG_IDX(IntClear) #define kr_intmask KREG_IDX(IntMask) #define kr_intredirect KREG_IDX(IntRedirect0) #define kr_intstatus KREG_IDX(IntStatus) #define kr_pagealign KREG_IDX(PageAlign) #define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0) #define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */ #define kr_rcvegrbase KREG_IDX(RcvEgrBase) #define kr_rcvegrcnt KREG_IDX(RcvEgrCnt) #define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt) #define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize) #define kr_rcvhdrsize KREG_IDX(RcvHdrSize) #define kr_rcvtidbase KREG_IDX(RcvTIDBase) #define kr_rcvtidcnt KREG_IDX(RcvTIDCnt) #define kr_revision KREG_IDX(Revision) #define kr_scratch KREG_IDX(Scratch) #define kr_sendbuffererror KREG_IDX(SendBufErr0) /* and base for 1 and 2 */ #define kr_sendcheckmask KREG_IDX(SendCheckMask0) /* and 1, 2 */ #define kr_sendctrl KREG_IDX(SendCtrl) #define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0) /* and 1, 2 */ #define kr_sendibpktmask KREG_IDX(SendIBPacketMask0) /* and 1, 2 */ #define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr) #define kr_sendpiobufbase KREG_IDX(SendBufBase) #define kr_sendpiobufcnt KREG_IDX(SendBufCnt) #define kr_sendpiosize KREG_IDX(SendBufSize) #define kr_sendregbase KREG_IDX(SendRegBase) #define kr_sendbufavail0 KREG_IDX(SendBufAvail0) #define kr_userregbase KREG_IDX(UserRegBase) #define kr_intgranted KREG_IDX(Int_Granted) #define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int) #define kr_intblocked KREG_IDX(IntBlocked) #define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG) /* * per-port kernel registers. Access only with qib_read_kreg_port() * or qib_write_kreg_port() */ #define krp_errclear KREG_IBPORT_IDX(ErrClear) #define krp_errmask KREG_IBPORT_IDX(ErrMask) #define krp_errstatus KREG_IBPORT_IDX(ErrStatus) #define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0) #define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit) #define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID) #define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig) #define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA) #define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB) #define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC) #define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA) #define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB) #define krp_txestatus KREG_IBPORT_IDX(TXEStatus) #define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0) #define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl) #define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey) #define krp_psinterval KREG_IBPORT_IDX(PSInterval) #define krp_psstart KREG_IBPORT_IDX(PSStart) #define krp_psstat KREG_IBPORT_IDX(PSStat) #define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP) #define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl) #define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt) #define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA) #define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0) #define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15) #define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl) #define krp_sendctrl KREG_IBPORT_IDX(SendCtrl) #define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase) #define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0) #define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1) #define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2) #define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0) #define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1) #define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2) #define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt) #define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead) #define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr) #define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt) #define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen) #define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld) #define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt) #define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus) #define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail) #define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom) #define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign) #define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask) #define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX) #define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD) #define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE) #define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl) /* * Per-context kernel registers. Access only with qib_read_kreg_ctxt() * or qib_write_kreg_ctxt() */ #define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0) #define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0) /* * TID Flow table, per context. Reduces * number of hdrq updates to one per flow (or on errors). * context 0 and 1 share same memory, but have distinct * addresses. Since for now, we never use expected sends * on kernel contexts, we don't worry about that (we initialize * those entries for ctxt 0/1 on driver load twice, for example). */ #define NUM_TIDFLOWS_CTXT 0x20 /* 0x20 per context; have to hardcode */ #define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0)) /* these are the error bits in the tid flows, and are W1C */ #define TIDFLOW_ERRBITS ( \ (SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \ SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \ (SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \ SYM_LSB(RcvTIDFlowTable0, SeqMismatch))) /* Most (not all) Counters are per-IBport. * Requires LBIntCnt is at offset 0 in the group */ #define CREG_IDX(regname) \ ((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64)) #define crp_badformat CREG_IDX(RxVersionErrCnt) #define crp_err_rlen CREG_IDX(RxLenErrCnt) #define crp_erricrc CREG_IDX(RxICRCErrCnt) #define crp_errlink CREG_IDX(RxLinkMalformCnt) #define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt) #define crp_errpkey CREG_IDX(RxPKeyMismatchCnt) #define crp_errvcrc CREG_IDX(RxVCRCErrCnt) #define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt) #define crp_iblinkdown CREG_IDX(IBLinkDownedCnt) #define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt) #define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt) #define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt) #define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt) #define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt) #define crp_pktrcv CREG_IDX(RxDataPktCnt) #define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt) #define crp_pktsend CREG_IDX(TxDataPktCnt) #define crp_pktsendflow CREG_IDX(TxFlowPktCnt) #define crp_psrcvdatacount CREG_IDX(PSRcvDataCount) #define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount) #define crp_psxmitdatacount CREG_IDX(PSXmitDataCount) #define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount) #define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount) #define crp_rcvebp CREG_IDX(RxEBPCnt) #define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt) #define crp_rcvovfl CREG_IDX(RxBufOvflCnt) #define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt) #define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt) #define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt) #define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt) #define crp_rxvlerr CREG_IDX(RxVlErrCnt) #define crp_sendstall CREG_IDX(TxFlowStallCnt) #define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt) #define crp_txhdrerr CREG_IDX(TxHeadersErrCnt) #define crp_txlenerr CREG_IDX(TxLenErrCnt) #define crp_txlenerr CREG_IDX(TxLenErrCnt) #define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt) #define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt) #define crp_txunderrun CREG_IDX(TxUnderrunCnt) #define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt) #define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt) #define crp_wordrcv CREG_IDX(RxDwordCnt) #define crp_wordsend CREG_IDX(TxDwordCnt) #define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut) /* these are the (few) counters that are not port-specific */ #define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \ QIB_7322_LBIntCnt_OFFS) / sizeof(u64)) #define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt) #define cr_lbint CREG_DEVIDX(LBIntCnt) #define cr_lbstall CREG_DEVIDX(LBFlowStallCnt) #define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt) #define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt) #define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt) #define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt) /* no chip register for # of IB ports supported, so define */ #define NUM_IB_PORTS 2 /* 1 VL15 buffer per hardware IB port, no register for this, so define */ #define NUM_VL15_BUFS NUM_IB_PORTS /* * context 0 and 1 are special, and there is no chip register that * defines this value, so we have to define it here. * These are all allocated to either 0 or 1 for single port * hardware configuration, otherwise each gets half */ #define KCTXT0_EGRCNT 2048 /* values for vl and port fields in PBC, 7322-specific */ #define PBC_PORT_SEL_LSB 26 #define PBC_PORT_SEL_RMASK 1 #define PBC_VL_NUM_LSB 27 #define PBC_VL_NUM_RMASK 7 #define PBC_7322_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */ #define PBC_7322_VL15_SEND_CTRL (1ULL << 31) /* control version of same */ static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = { [IB_RATE_2_5_GBPS] = 16, [IB_RATE_5_GBPS] = 8, [IB_RATE_10_GBPS] = 4, [IB_RATE_20_GBPS] = 2, [IB_RATE_30_GBPS] = 2, [IB_RATE_40_GBPS] = 1 }; #define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive) #define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive) /* link training states, from IBC */ #define IB_7322_LT_STATE_DISABLED 0x00 #define IB_7322_LT_STATE_LINKUP 0x01 #define IB_7322_LT_STATE_POLLACTIVE 0x02 #define IB_7322_LT_STATE_POLLQUIET 0x03 #define IB_7322_LT_STATE_SLEEPDELAY 0x04 #define IB_7322_LT_STATE_SLEEPQUIET 0x05 #define IB_7322_LT_STATE_CFGDEBOUNCE 0x08 #define IB_7322_LT_STATE_CFGRCVFCFG 0x09 #define IB_7322_LT_STATE_CFGWAITRMT 0x0a #define IB_7322_LT_STATE_CFGIDLE 0x0b #define IB_7322_LT_STATE_RECOVERRETRAIN 0x0c #define IB_7322_LT_STATE_TXREVLANES 0x0d #define IB_7322_LT_STATE_RECOVERWAITRMT 0x0e #define IB_7322_LT_STATE_RECOVERIDLE 0x0f #define IB_7322_LT_STATE_CFGENH 0x10 #define IB_7322_LT_STATE_CFGTEST 0x11 #define IB_7322_LT_STATE_CFGWAITRMTTEST 0x12 #define IB_7322_LT_STATE_CFGWAITENH 0x13 /* link state machine states from IBC */ #define IB_7322_L_STATE_DOWN 0x0 #define IB_7322_L_STATE_INIT 0x1 #define IB_7322_L_STATE_ARM 0x2 #define IB_7322_L_STATE_ACTIVE 0x3 #define IB_7322_L_STATE_ACT_DEFER 0x4 static const u8 qib_7322_physportstate[0x20] = { [IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED, [IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP, [IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL, [IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL, [IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP, [IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP, [IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN, [IB_7322_LT_STATE_CFGRCVFCFG] = IB_PHYSPORTSTATE_CFG_TRAIN, [IB_7322_LT_STATE_CFGWAITRMT] = IB_PHYSPORTSTATE_CFG_TRAIN, [IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE, [IB_7322_LT_STATE_RECOVERRETRAIN] = IB_PHYSPORTSTATE_LINK_ERR_RECOVER, [IB_7322_LT_STATE_RECOVERWAITRMT] = IB_PHYSPORTSTATE_LINK_ERR_RECOVER, [IB_7322_LT_STATE_RECOVERIDLE] = IB_PHYSPORTSTATE_LINK_ERR_RECOVER, [IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH, [IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN, [IB_7322_LT_STATE_CFGWAITRMTTEST] = IB_PHYSPORTSTATE_CFG_TRAIN, [IB_7322_LT_STATE_CFGWAITENH] = IB_PHYSPORTSTATE_CFG_WAIT_ENH, [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN, [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN, [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN, [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN }; struct qib_chip_specific { u64 __iomem *cregbase; u64 *cntrs; spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */ spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */ u64 main_int_mask; /* clear bits which have dedicated handlers */ u64 int_enable_mask; /* for per port interrupts in single port mode */ u64 errormask; u64 hwerrmask; u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */ u64 gpio_mask; /* shadow the gpio mask register */ u64 extctrl; /* shadow the gpio output enable, etc... */ u32 ncntrs; u32 nportcntrs; u32 cntrnamelen; u32 portcntrnamelen; u32 numctxts; u32 rcvegrcnt; u32 updthresh; /* current AvailUpdThld */ u32 updthresh_dflt; /* default AvailUpdThld */ u32 r1; int irq; u32 num_msix_entries; u32 sdmabufcnt; u32 lastbuf_for_pio; u32 stay_in_freeze; u32 recovery_ports_initted; struct msix_entry *msix_entries; void **msix_arg; unsigned long *sendchkenable; unsigned long *sendgrhchk; unsigned long *sendibchk; u32 rcvavail_timeout[18]; char emsgbuf[128]; /* for device error interrupt msg buffer */ }; /* Table of entries in "human readable" form Tx Emphasis. */ struct txdds_ent { u8 amp; u8 pre; u8 main; u8 post; }; struct vendor_txdds_ent { u8 oui[QSFP_VOUI_LEN]; u8 *partnum; struct txdds_ent sdr; struct txdds_ent ddr; struct txdds_ent qdr; }; static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *); #define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */ #define TXDDS_EXTRA_SZ 13 /* number of extra tx settings entries */ #define TXDDS_MFG_SZ 2 /* number of mfg tx settings entries */ #define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */ #define H1_FORCE_VAL 8 #define H1_FORCE_QME 1 /* may be overridden via setup_txselect() */ #define H1_FORCE_QMH 7 /* may be overridden via setup_txselect() */ /* The static and dynamic registers are paired, and the pairs indexed by spd */ #define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \ + ((spd) * 2)) #define QDR_DFE_DISABLE_DELAY 4000 /* msec after LINKUP */ #define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */ #define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */ #define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */ #define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */ struct qib_chippport_specific { u64 __iomem *kpregbase; u64 __iomem *cpregbase; u64 *portcntrs; struct qib_pportdata *ppd; wait_queue_head_t autoneg_wait; struct delayed_work autoneg_work; struct delayed_work ipg_work; struct timer_list chase_timer; /* * these 5 fields are used to establish deltas for IB symbol * errors and linkrecovery errors. They can be reported on * some chips during link negotiation prior to INIT, and with * DDR when faking DDR negotiations with non-IBTA switches. * The chip counters are adjusted at driver unload if there is * a non-zero delta. */ u64 ibdeltainprog; u64 ibsymdelta; u64 ibsymsnap; u64 iblnkerrdelta; u64 iblnkerrsnap; u64 iblnkdownsnap; u64 iblnkdowndelta; u64 ibmalfdelta; u64 ibmalfsnap; u64 ibcctrl_a; /* krp_ibcctrl_a shadow */ u64 ibcctrl_b; /* krp_ibcctrl_b shadow */ u64 qdr_dfe_time; u64 chase_end; u32 autoneg_tries; u32 recovery_init; u32 qdr_dfe_on; u32 qdr_reforce; /* * Per-bay per-channel rcv QMH H1 values and Tx values for QDR. * entry zero is unused, to simplify indexing */ u8 h1_val; u8 no_eep; /* txselect table index to use if no qsfp info */ u8 ipg_tries; u8 ibmalfusesnap; struct qib_qsfp_data qsfp_data; char epmsgbuf[192]; /* for port error interrupt msg buffer */ }; static struct { const char *name; irq_handler_t handler; int lsb; int port; /* 0 if not port-specific, else port # */ } irq_table[] = { { QIB_DRV_NAME, qib_7322intr, -1, 0 }, { QIB_DRV_NAME " (buf avail)", qib_7322bufavail, SYM_LSB(IntStatus, SendBufAvail), 0 }, { QIB_DRV_NAME " (sdma 0)", sdma_intr, SYM_LSB(IntStatus, SDmaInt_0), 1 }, { QIB_DRV_NAME " (sdma 1)", sdma_intr, SYM_LSB(IntStatus, SDmaInt_1), 2 }, { QIB_DRV_NAME " (sdmaI 0)", sdma_idle_intr, SYM_LSB(IntStatus, SDmaIdleInt_0), 1 }, { QIB_DRV_NAME " (sdmaI 1)", sdma_idle_intr, SYM_LSB(IntStatus, SDmaIdleInt_1), 2 }, { QIB_DRV_NAME " (sdmaP 0)", sdma_progress_intr, SYM_LSB(IntStatus, SDmaProgressInt_0), 1 }, { QIB_DRV_NAME " (sdmaP 1)", sdma_progress_intr, SYM_LSB(IntStatus, SDmaProgressInt_1), 2 }, { QIB_DRV_NAME " (sdmaC 0)", sdma_cleanup_intr, SYM_LSB(IntStatus, SDmaCleanupDone_0), 1 }, { QIB_DRV_NAME " (sdmaC 1)", sdma_cleanup_intr, SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 }, }; /* ibcctrl bits */ #define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1 /* cycle through TS1/TS2 till OK */ #define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2 /* wait for TS1, then go on */ #define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3 #define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16 #define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */ #define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */ #define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */ #define BLOB_7322_IBCHG 0x101 static inline void qib_write_kreg(const struct qib_devdata *dd, const u32 regno, u64 value); static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32); static void write_7322_initregs(struct qib_devdata *); static void write_7322_init_portregs(struct qib_pportdata *); static void setup_7322_link_recovery(struct qib_pportdata *, u32); static void check_7322_rxe_status(struct qib_pportdata *); static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *); /** * qib_read_ureg32 - read 32-bit virtualized per-context register * @dd: device * @regno: register number * @ctxt: context number * * Return the contents of a register that is virtualized to be per context. * Returns -1 on errors (not distinguishable from valid contents at * runtime; we may add a separate error variable at some point). */ static inline u32 qib_read_ureg32(const struct qib_devdata *dd, enum qib_ureg regno, int ctxt) { if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) return 0; return readl(regno + (u64 __iomem *)( (dd->ureg_align * ctxt) + (dd->userbase ? (char __iomem *)dd->userbase : (char __iomem *)dd->kregbase + dd->uregbase))); } /** * qib_read_ureg - read virtualized per-context register * @dd: device * @regno: register number * @ctxt: context number * * Return the contents of a register that is virtualized to be per context. * Returns -1 on errors (not distinguishable from valid contents at * runtime; we may add a separate error variable at some point). */ static inline u64 qib_read_ureg(const struct qib_devdata *dd, enum qib_ureg regno, int ctxt) { if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) return 0; return readq(regno + (u64 __iomem *)( (dd->ureg_align * ctxt) + (dd->userbase ? (char __iomem *)dd->userbase : (char __iomem *)dd->kregbase + dd->uregbase))); } /** * qib_write_ureg - write virtualized per-context register * @dd: device * @regno: register number * @value: value * @ctxt: context * * Write the contents of a register that is virtualized to be per context. */ static inline void qib_write_ureg(const struct qib_devdata *dd, enum qib_ureg regno, u64 value, int ctxt) { u64 __iomem *ubase; if (dd->userbase) ubase = (u64 __iomem *) ((char __iomem *) dd->userbase + dd->ureg_align * ctxt); else ubase = (u64 __iomem *) (dd->uregbase + (char __iomem *) dd->kregbase + dd->ureg_align * ctxt); if (dd->kregbase && (dd->flags & QIB_PRESENT)) writeq(value, &ubase[regno]); } static inline u32 qib_read_kreg32(const struct qib_devdata *dd, const u32 regno) { if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) return -1; return readl((u32 __iomem *) &dd->kregbase[regno]); } static inline u64 qib_read_kreg64(const struct qib_devdata *dd, const u32 regno) { if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) return -1; return readq(&dd->kregbase[regno]); } static inline void qib_write_kreg(const struct qib_devdata *dd, const u32 regno, u64 value) { if (dd->kregbase && (dd->flags & QIB_PRESENT)) writeq(value, &dd->kregbase[regno]); } /* * not many sanity checks for the port-specific kernel register routines, * since they are only used when it's known to be safe. */ static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd, const u16 regno) { if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT)) return 0ULL; return readq(&ppd->cpspec->kpregbase[regno]); } static inline void qib_write_kreg_port(const struct qib_pportdata *ppd, const u16 regno, u64 value) { if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase && (ppd->dd->flags & QIB_PRESENT)) writeq(value, &ppd->cpspec->kpregbase[regno]); } /** * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register * @dd: the qlogic_ib device * @regno: the register number to write * @ctxt: the context containing the register * @value: the value to write */ static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd, const u16 regno, unsigned ctxt, u64 value) { qib_write_kreg(dd, regno + ctxt, value); } static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno) { if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT)) return 0; return readq(&dd->cspec->cregbase[regno]); } static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno) { if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT)) return 0; return readl(&dd->cspec->cregbase[regno]); } static inline void write_7322_creg_port(const struct qib_pportdata *ppd, u16 regno, u64 value) { if (ppd->cpspec && ppd->cpspec->cpregbase && (ppd->dd->flags & QIB_PRESENT)) writeq(value, &ppd->cpspec->cpregbase[regno]); } static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd, u16 regno) { if (!ppd->cpspec || !ppd->cpspec->cpregbase || !(ppd->dd->flags & QIB_PRESENT)) return 0; return readq(&ppd->cpspec->cpregbase[regno]); } static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd, u16 regno) { if (!ppd->cpspec || !ppd->cpspec->cpregbase || !(ppd->dd->flags & QIB_PRESENT)) return 0; return readl(&ppd->cpspec->cpregbase[regno]); } /* bits in Control register */ #define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset) #define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn) /* bits in general interrupt regs */ #define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask) #define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17) #define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB) #define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask) #define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17) #define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB) #define QIB_I_C_ERROR INT_MASK(Err) #define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1)) #define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail) #define QIB_I_GPIO INT_MASK(AssertGPIO) #define QIB_I_P_SDMAINT(pidx) \ (INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \ INT_MASK_P(SDmaProgress, pidx) | \ INT_MASK_PM(SDmaCleanupDone, pidx)) /* Interrupt bits that are "per port" */ #define QIB_I_P_BITSEXTANT(pidx) \ (INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \ INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \ INT_MASK_P(SDmaProgress, pidx) | \ INT_MASK_PM(SDmaCleanupDone, pidx)) /* Interrupt bits that are common to a device */ /* currently unused: QIB_I_SPIOSENT */ #define QIB_I_C_BITSEXTANT \ (QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \ QIB_I_SPIOSENT | \ QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO) #define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \ QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1)) /* * Error bits that are "per port". */ #define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged) #define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr) #define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr) #define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr) #define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr) #define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr) #define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr) #define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr) #define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr) #define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr) #define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr) #define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr) #define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr) #define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr) #define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr) #define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr) #define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr) #define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr) #define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr) #define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr) #define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr) #define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr) #define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr) #define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr) #define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr) #define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr) #define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr) #define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr) #define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr) #define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr) #define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr) #define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr) #define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr) #define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr) #define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr) #define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr) #define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr) #define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr) #define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr) /* Error bits that are common to a device */ #define QIB_E_RESET ERR_MASK(ResetNegated) #define QIB_E_HARDWARE ERR_MASK(HardwareErr) #define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr) /* * Per chip (rather than per-port) errors. Most either do * nothing but trigger a print (because they self-recover, or * always occur in tandem with other errors that handle the * issue), or because they indicate errors with no recovery, * but we want to know that they happened. */ #define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr) #define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd) #define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr) #define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr) #define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr) #define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr) #define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr) #define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr) /* SDMA chip errors (not per port) * QIB_E_SDMA_BUF_DUP needs no special handling, because we will also get * the SDMAHALT error immediately, so we just print the dup error via the * E_AUTO mechanism. This is true of most of the per-port fatal errors * as well, but since this is port-independent, by definition, it's * handled a bit differently. SDMA_VL15 and SDMA_WRONG_PORT are per * packet send errors, and so are handled in the same manner as other * per-packet errors. */ #define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err) #define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr) #define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr) /* * Below functionally equivalent to legacy QLOGIC_IB_E_PKTERRS * it is used to print "common" packet errors. */ #define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\ QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\ QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\ QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \ QIB_E_P_REBP) /* Error Bits that Packet-related (Receive, per-port) */ #define QIB_E_P_RPKTERRS (\ QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \ QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \ QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\ QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \ QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \ QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP) /* * Error bits that are Send-related (per port) * (ARMLAUNCH excluded from E_SPKTERRS because it gets special handling). * All of these potentially need to have a buffer disarmed */ #define QIB_E_P_SPKTERRS (\ QIB_E_P_SUNEXP_PKTNUM |\ QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\ QIB_E_P_SMAXPKTLEN |\ QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \ QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \ QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL) #define QIB_E_SPKTERRS ( \ QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \ ERR_MASK_N(SendUnsupportedVLErr) | \ QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT) #define QIB_E_P_SDMAERRS ( \ QIB_E_P_SDMAHALT | \ QIB_E_P_SDMADESCADDRMISALIGN | \ QIB_E_P_SDMAUNEXPDATA | \ QIB_E_P_SDMAMISSINGDW | \ QIB_E_P_SDMADWEN | \ QIB_E_P_SDMARPYTAG | \ QIB_E_P_SDMA1STDESC | \ QIB_E_P_SDMABASE | \ QIB_E_P_SDMATAILOUTOFBOUND | \ QIB_E_P_SDMAOUTOFBOUND | \ QIB_E_P_SDMAGENMISMATCH) /* * This sets some bits more than once, but makes it more obvious which * bits are not handled under other categories, and the repeat definition * is not a problem. */ #define QIB_E_P_BITSEXTANT ( \ QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \ QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \ QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \ QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \ ) /* * These are errors that can occur when the link * changes state while a packet is being sent or received. This doesn't * cover things like EBP or VCRC that can be the result of a sending * having the link change state, so we receive a "known bad" packet. * All of these are "per port", so renamed: */ #define QIB_E_P_LINK_PKTERRS (\ QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\ QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\ QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\ QIB_E_P_RUNEXPCHAR) /* * This sets some bits more than once, but makes it more obvious which * bits are not handled under other categories (such as QIB_E_SPKTERRS), * and the repeat definition is not a problem. */ #define QIB_E_C_BITSEXTANT (\ QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\ QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\ QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE) /* Likewise Neuter E_SPKT_ERRS_IGNORE */ #define E_SPKT_ERRS_IGNORE 0 #define QIB_EXTS_MEMBIST_DISABLED \ SYM_MASK(EXTStatus, MemBISTDisabled) #define QIB_EXTS_MEMBIST_ENDTEST \ SYM_MASK(EXTStatus, MemBISTEndTest) #define QIB_E_SPIOARMLAUNCH \ ERR_MASK(SendArmLaunchErr) #define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd) #define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd) /* * IBTA_1_2 is set when multiple speeds are enabled (normal), * and also if forced QDR (only QDR enabled). It's enabled for the * forced QDR case so that scrambling will be enabled by the TS3 * exchange, when supported by both sides of the link. */ #define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE) #define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED) #define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR) #define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) #define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) #define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \ SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)) #define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR) #define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod) #define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod) #define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS) #define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS)) #define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS)) #define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP) #define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP) #define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \ SYM_MASK(IBCCtrlB_0, HRTBT_ENB)) #define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \ SYM_LSB(IBCCtrlB_0, HRTBT_ENB)) #define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB) #define IBA7322_REDIRECT_VEC_PER_REG 12 #define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En) #define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En) #define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En) #define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En) #define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En) #define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */ #define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \ .msg = #fldname } #define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \ fldname##Mask##_##port), .msg = #fldname } static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = { HWE_AUTO_P(IBSerdesPClkNotDetect, 1), HWE_AUTO_P(IBSerdesPClkNotDetect, 0), HWE_AUTO(PCIESerdesPClkNotDetect), HWE_AUTO(PowerOnBISTFailed), HWE_AUTO(TempsenseTholdReached), HWE_AUTO(MemoryErr), HWE_AUTO(PCIeBusParityErr), HWE_AUTO(PcieCplTimeout), HWE_AUTO(PciePoisonedTLP), HWE_AUTO_P(SDmaMemReadErr, 1), HWE_AUTO_P(SDmaMemReadErr, 0), HWE_AUTO_P(IBCBusFromSPCParityErr, 1), HWE_AUTO_P(IBCBusToSPCParityErr, 1), HWE_AUTO_P(IBCBusFromSPCParityErr, 0), HWE_AUTO(statusValidNoEop), HWE_AUTO(LATriggered), { .mask = 0 } }; #define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \ .msg = #fldname } #define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \ .msg = #fldname } static const struct qib_hwerror_msgs qib_7322error_msgs[] = { E_AUTO(ResetNegated), E_AUTO(HardwareErr), E_AUTO(InvalidAddrErr), E_AUTO(SDmaVL15Err), E_AUTO(SBufVL15MisUseErr), E_AUTO(InvalidEEPCmd), E_AUTO(RcvContextShareErr), E_AUTO(SendVLMismatchErr), E_AUTO(SendArmLaunchErr), E_AUTO(SendSpecialTriggerErr), E_AUTO(SDmaWrongPortErr), E_AUTO(SDmaBufMaskDuplicateErr), E_AUTO(RcvHdrFullErr), E_AUTO(RcvEgrFullErr), { .mask = 0 } }; static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = { E_P_AUTO(IBStatusChanged), E_P_AUTO(SHeadersErr), E_P_AUTO(VL15BufMisuseErr), /* * SDmaHaltErr is not really an error, make it clearer; */ {.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted"}, E_P_AUTO(SDmaDescAddrMisalignErr), E_P_AUTO(SDmaUnexpDataErr), E_P_AUTO(SDmaMissingDwErr), E_P_AUTO(SDmaDwEnErr), E_P_AUTO(SDmaRpyTagErr), E_P_AUTO(SDma1stDescErr), E_P_AUTO(SDmaBaseErr), E_P_AUTO(SDmaTailOutOfBoundErr), E_P_AUTO(SDmaOutOfBoundErr), E_P_AUTO(SDmaGenMismatchErr), E_P_AUTO(SendBufMisuseErr), E_P_AUTO(SendUnsupportedVLErr), E_P_AUTO(SendUnexpectedPktNumErr), E_P_AUTO(SendDroppedDataPktErr), E_P_AUTO(SendDroppedSmpPktErr), E_P_AUTO(SendPktLenErr), E_P_AUTO(SendUnderRunErr), E_P_AUTO(SendMaxPktLenErr), E_P_AUTO(SendMinPktLenErr), E_P_AUTO(RcvIBLostLinkErr), E_P_AUTO(RcvHdrErr), E_P_AUTO(RcvHdrLenErr), E_P_AUTO(RcvBadTidErr), E_P_AUTO(RcvBadVersionErr), E_P_AUTO(RcvIBFlowErr), E_P_AUTO(RcvEBPErr), E_P_AUTO(RcvUnsupportedVLErr), E_P_AUTO(RcvUnexpectedCharErr), E_P_AUTO(RcvShortPktLenErr), E_P_AUTO(RcvLongPktLenErr), E_P_AUTO(RcvMaxPktLenErr), E_P_AUTO(RcvMinPktLenErr), E_P_AUTO(RcvICRCErr), E_P_AUTO(RcvVCRCErr), E_P_AUTO(RcvFormatErr), { .mask = 0 } }; /* * Below generates "auto-message" for interrupts not specific to any port or * context */ #define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \ .msg = #fldname } /* Below generates "auto-message" for interrupts specific to a port */ #define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\ SYM_LSB(IntMask, fldname##Mask##_0), \ SYM_LSB(IntMask, fldname##Mask##_1)), \ .msg = #fldname "_P" } /* For some reason, the SerDesTrimDone bits are reversed */ #define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\ SYM_LSB(IntMask, fldname##Mask##_1), \ SYM_LSB(IntMask, fldname##Mask##_0)), \ .msg = #fldname "_P" } /* * Below generates "auto-message" for interrupts specific to a context, * with ctxt-number appended */ #define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\ SYM_LSB(IntMask, fldname##0IntMask), \ SYM_LSB(IntMask, fldname##17IntMask)), \ .msg = #fldname "_C"} static const struct qib_hwerror_msgs qib_7322_intr_msgs[] = { INTR_AUTO_P(SDmaInt), INTR_AUTO_P(SDmaProgressInt), INTR_AUTO_P(SDmaIdleInt), INTR_AUTO_P(SDmaCleanupDone), INTR_AUTO_C(RcvUrg), INTR_AUTO_P(ErrInt), INTR_AUTO(ErrInt), /* non-port-specific errs */ INTR_AUTO(AssertGPIOInt), INTR_AUTO_P(SendDoneInt), INTR_AUTO(SendBufAvailInt), INTR_AUTO_C(RcvAvail), { .mask = 0 } }; #define TXSYMPTOM_AUTO_P(fldname) \ { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), .msg = #fldname } static const struct qib_hwerror_msgs hdrchk_msgs[] = { TXSYMPTOM_AUTO_P(NonKeyPacket), TXSYMPTOM_AUTO_P(GRHFail), TXSYMPTOM_AUTO_P(PkeyFail), TXSYMPTOM_AUTO_P(QPFail), TXSYMPTOM_AUTO_P(SLIDFail), TXSYMPTOM_AUTO_P(RawIPV6), TXSYMPTOM_AUTO_P(PacketTooSmall), { .mask = 0 } }; #define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */ /* * Called when we might have an error that is specific to a particular * PIO buffer, and may need to cancel that buffer, so it can be re-used, * because we don't need to force the update of pioavail */ static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd) { struct qib_devdata *dd = ppd->dd; u32 i; int any; u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS; u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG; unsigned long sbuf[4]; /* * It's possible that sendbuffererror could have bits set; might * have already done this as a result of hardware error handling. */ any = 0; for (i = 0; i < regcnt; ++i) { sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i); if (sbuf[i]) { any = 1; qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]); } } if (any) qib_disarm_piobufs_set(dd, sbuf, piobcnt); } /* No txe_recover yet, if ever */ /* No decode__errors yet */ static void err_decode(char *msg, size_t len, u64 errs, const struct qib_hwerror_msgs *msp) { u64 these, lmask; int took, multi, n = 0; while (msp && msp->mask) { multi = (msp->mask & (msp->mask - 1)); while (errs & msp->mask) { these = (errs & msp->mask); lmask = (these & (these - 1)) ^ these; if (len) { if (n++) { /* separate the strings */ *msg++ = ','; len--; } took = scnprintf(msg, len, "%s", msp->msg); len -= took; msg += took; } errs &= ~lmask; if (len && multi) { /* More than one bit this mask */ int idx = -1; while (lmask & msp->mask) { ++idx; lmask >>= 1; } took = scnprintf(msg, len, "_%d", idx); len -= took; msg += took; } } ++msp; } /* If some bits are left, show in hex. */ if (len && errs) snprintf(msg, len, "%sMORE:%llX", n ? "," : "", (unsigned long long) errs); } /* only called if r1 set */ static void flush_fifo(struct qib_pportdata *ppd) { struct qib_devdata *dd = ppd->dd; u32 __iomem *piobuf; u32 bufn; u32 *hdr; u64 pbc; const unsigned hdrwords = 7; static struct qib_ib_header ibhdr = { .lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH), .lrh[1] = IB_LID_PERMISSIVE, .lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC), .lrh[3] = IB_LID_PERMISSIVE, .u.oth.bth[0] = cpu_to_be32( (IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY), .u.oth.bth[1] = cpu_to_be32(0), .u.oth.bth[2] = cpu_to_be32(0), .u.oth.u.ud.deth[0] = cpu_to_be32(0), .u.oth.u.ud.deth[1] = cpu_to_be32(0), }; /* * Send a dummy VL15 packet to flush the launch FIFO. * This will not actually be sent since the TxeBypassIbc bit is set. */ pbc = PBC_7322_VL15_SEND | (((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) | (hdrwords + SIZE_OF_CRC); piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn); if (!piobuf) return; writeq(pbc, piobuf); hdr = (u32 *) &ibhdr; if (dd->flags & QIB_PIO_FLUSH_WC) { qib_flush_wc(); qib_pio_copy(piobuf + 2, hdr, hdrwords - 1); qib_flush_wc(); __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1); qib_flush_wc(); } else qib_pio_copy(piobuf + 2, hdr, hdrwords); qib_sendbuf_done(dd, bufn); } /* * This is called with interrupts disabled and sdma_lock held. */ static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op) { struct qib_devdata *dd = ppd->dd; u64 set_sendctrl = 0; u64 clr_sendctrl = 0; if (op & QIB_SDMA_SENDCTRL_OP_ENABLE) set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable); else clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable); if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE) set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable); else clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable); if (op & QIB_SDMA_SENDCTRL_OP_HALT) set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt); else clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt); if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) | SYM_MASK(SendCtrl_0, TxeAbortIbc) | SYM_MASK(SendCtrl_0, TxeDrainRmFifo); else clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) | SYM_MASK(SendCtrl_0, TxeAbortIbc) | SYM_MASK(SendCtrl_0, TxeDrainRmFifo); spin_lock(&dd->sendctrl_lock); /* If we are draining everything, block sends first */ if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) { ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable); qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl); qib_write_kreg(dd, kr_scratch, 0); } ppd->p_sendctrl |= set_sendctrl; ppd->p_sendctrl &= ~clr_sendctrl; if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP) qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl | SYM_MASK(SendCtrl_0, SDmaCleanup)); else qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl); qib_write_kreg(dd, kr_scratch, 0); if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) { ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable); qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl); qib_write_kreg(dd, kr_scratch, 0); } spin_unlock(&dd->sendctrl_lock); if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1) flush_fifo(ppd); } static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd) { __qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned); } static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd) { /* * Set SendDmaLenGen and clear and set * the MSB of the generation count to enable generation checking * and load the internal generation counter. */ qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt); qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt | (1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB)); } /* * Must be called with sdma_lock held, or before init finished. */ static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail) { /* Commit writes to memory and advance the tail on the chip */ wmb(); ppd->sdma_descq_tail = tail; qib_write_kreg_port(ppd, krp_senddmatail, tail); } /* * This is called with interrupts disabled and sdma_lock held. */ static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd) { /* * Drain all FIFOs. * The hardware doesn't require this but we do it so that verbs * and user applications don't wait for link active to send stale * data. */ sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH); qib_sdma_7322_setlengen(ppd); qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */ ppd->sdma_head_dma[0] = 0; qib_7322_sdma_sendctrl(ppd, ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP); } #define DISABLES_SDMA ( \ QIB_E_P_SDMAHALT | \ QIB_E_P_SDMADESCADDRMISALIGN | \ QIB_E_P_SDMAMISSINGDW | \ QIB_E_P_SDMADWEN | \ QIB_E_P_SDMARPYTAG | \ QIB_E_P_SDMA1STDESC | \ QIB_E_P_SDMABASE | \ QIB_E_P_SDMATAILOUTOFBOUND | \ QIB_E_P_SDMAOUTOFBOUND | \ QIB_E_P_SDMAGENMISMATCH) static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs) { unsigned long flags; struct qib_devdata *dd = ppd->dd; errs &= QIB_E_P_SDMAERRS; if (errs & QIB_E_P_SDMAUNEXPDATA) qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit, ppd->port); spin_lock_irqsave(&ppd->sdma_lock, flags); switch (ppd->sdma_state.current_state) { case qib_sdma_state_s00_hw_down: break; case qib_sdma_state_s10_hw_start_up_wait: if (errs & QIB_E_P_SDMAHALT) __qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started); break; case qib_sdma_state_s20_idle: break; case qib_sdma_state_s30_sw_clean_up_wait: break; case qib_sdma_state_s40_hw_clean_up_wait: if (errs & QIB_E_P_SDMAHALT) __qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned); break; case qib_sdma_state_s50_hw_halt_wait: if (errs & QIB_E_P_SDMAHALT) __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted); break; case qib_sdma_state_s99_running: __qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted); __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted); break; } spin_unlock_irqrestore(&ppd->sdma_lock, flags); } /* * handle per-device errors (not per-port errors) */ static noinline void handle_7322_errors(struct qib_devdata *dd) { char *msg; u64 iserr = 0; u64 errs; u64 mask; int log_idx; qib_stats.sps_errints++; errs = qib_read_kreg64(dd, kr_errstatus); if (!errs) { qib_devinfo(dd->pcidev, "device error interrupt, " "but no error bits set!\n"); goto done; } /* don't report errors that are masked */ errs &= dd->cspec->errormask; msg = dd->cspec->emsgbuf; /* do these first, they are most important */ if (errs & QIB_E_HARDWARE) { *msg = '\0'; qib_7322_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf); } else for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx) if (errs & dd->eep_st_masks[log_idx].errs_to_log) qib_inc_eeprom_err(dd, log_idx, 1); if (errs & QIB_E_SPKTERRS) { qib_disarm_7322_senderrbufs(dd->pport); qib_stats.sps_txerrs++; } else if (errs & QIB_E_INVALIDADDR) qib_stats.sps_txerrs++; else if (errs & QIB_E_ARMLAUNCH) { qib_stats.sps_txerrs++; qib_disarm_7322_senderrbufs(dd->pport); } qib_write_kreg(dd, kr_errclear, errs); /* * The ones we mask off are handled specially below * or above. Also mask SDMADISABLED by default as it * is too chatty. */ mask = QIB_E_HARDWARE; *msg = '\0'; err_decode(msg, sizeof dd->cspec->emsgbuf, errs & ~mask, qib_7322error_msgs); /* * Getting reset is a tragedy for all ports. Mark the device * _and_ the ports as "offline" in way meaningful to each. */ if (errs & QIB_E_RESET) { int pidx; qib_dev_err(dd, "Got reset, requires re-init " "(unload and reload driver)\n"); dd->flags &= ~QIB_INITTED; /* needs re-init */ /* mark as having had error */ *dd->devstatusp |= QIB_STATUS_HWERROR; for (pidx = 0; pidx < dd->num_pports; ++pidx) if (dd->pport[pidx].link_speed_supported) *dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF; } if (*msg && iserr) qib_dev_err(dd, "%s error\n", msg); /* * If there were hdrq or egrfull errors, wake up any processes * waiting in poll. We used to try to check which contexts had * the overflow, but given the cost of that and the chip reads * to support it, it's better to just wake everybody up if we * get an overflow; waiters can poll again if it's not them. */ if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) { qib_handle_urcv(dd, ~0U); if (errs & ERR_MASK(RcvEgrFullErr)) qib_stats.sps_buffull++; else qib_stats.sps_hdrfull++; } done: return; } static void reenable_chase(unsigned long opaque) { struct qib_pportdata *ppd = (struct qib_pportdata *)opaque; ppd->cpspec->chase_timer.expires = 0; qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN, QLOGIC_IB_IBCC_LINKINITCMD_POLL); } static void disable_chase(struct qib_pportdata *ppd, u64 tnow, u8 ibclt) { ppd->cpspec->chase_end = 0; if (!qib_chase) return; qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME; add_timer(&ppd->cpspec->chase_timer); } static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst) { u8 ibclt; u64 tnow; ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState); /* * Detect and handle the state chase issue, where we can * get stuck if we are unlucky on timing on both sides of * the link. If we are, we disable, set a timer, and * then re-enable. */ switch (ibclt) { case IB_7322_LT_STATE_CFGRCVFCFG: case IB_7322_LT_STATE_CFGWAITRMT: case IB_7322_LT_STATE_TXREVLANES: case IB_7322_LT_STATE_CFGENH: tnow = get_jiffies_64(); if (ppd->cpspec->chase_end && time_after64(tnow, ppd->cpspec->chase_end)) disable_chase(ppd, tnow, ibclt); else if (!ppd->cpspec->chase_end) ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME; break; default: ppd->cpspec->chase_end = 0; break; } if (((ibclt >= IB_7322_LT_STATE_CFGTEST && ibclt <= IB_7322_LT_STATE_CFGWAITENH) || ibclt == IB_7322_LT_STATE_LINKUP) && (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) { force_h1(ppd); ppd->cpspec->qdr_reforce = 1; if (!ppd->dd->cspec->r1) serdes_7322_los_enable(ppd, 0); } else if (ppd->cpspec->qdr_reforce && (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) && (ibclt == IB_7322_LT_STATE_CFGENH || ibclt == IB_7322_LT_STATE_CFGIDLE || ibclt == IB_7322_LT_STATE_LINKUP)) force_h1(ppd); if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) && ppd->link_speed_enabled == QIB_IB_QDR && (ibclt == IB_7322_LT_STATE_CFGTEST || ibclt == IB_7322_LT_STATE_CFGENH || (ibclt >= IB_7322_LT_STATE_POLLACTIVE && ibclt <= IB_7322_LT_STATE_SLEEPQUIET))) adj_tx_serdes(ppd); if (ibclt != IB_7322_LT_STATE_LINKUP) { u8 ltstate = qib_7322_phys_portstate(ibcst); u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkTrainingState); if (!ppd->dd->cspec->r1 && pibclt == IB_7322_LT_STATE_LINKUP && ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER && ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN && ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT && ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE) /* If the link went down (but no into recovery, * turn LOS back on */ serdes_7322_los_enable(ppd, 1); if (!ppd->cpspec->qdr_dfe_on && ibclt <= IB_7322_LT_STATE_SLEEPQUIET) { ppd->cpspec->qdr_dfe_on = 1; ppd->cpspec->qdr_dfe_time = 0; /* On link down, reenable QDR adaptation */ qib_write_kreg_port(ppd, krp_static_adapt_dis(2), ppd->dd->cspec->r1 ? QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN); printk(KERN_INFO QIB_DRV_NAME " IB%u:%u re-enabled QDR adaptation " "ibclt %x\n", ppd->dd->unit, ppd->port, ibclt); } } } static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32); /* * This is per-pport error handling. * will likely get it's own MSIx interrupt (one for each port, * although just a single handler). */ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd) { char *msg; u64 ignore_this_time = 0, iserr = 0, errs, fmask; struct qib_devdata *dd = ppd->dd; /* do this as soon as possible */ fmask = qib_read_kreg64(dd, kr_act_fmask); if (!fmask) check_7322_rxe_status(ppd); errs = qib_read_kreg_port(ppd, krp_errstatus); if (!errs) qib_devinfo(dd->pcidev, "Port%d error interrupt, but no error bits set!\n", ppd->port); if (!fmask) errs &= ~QIB_E_P_IBSTATUSCHANGED; if (!errs) goto done; msg = ppd->cpspec->epmsgbuf; *msg = '\0'; if (errs & ~QIB_E_P_BITSEXTANT) { err_decode(msg, sizeof ppd->cpspec->epmsgbuf, errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs); if (!*msg) snprintf(msg, sizeof ppd->cpspec->epmsgbuf, "no others"); qib_dev_porterr(dd, ppd->port, "error interrupt with unknown" " errors 0x%016Lx set (and %s)\n", (errs & ~QIB_E_P_BITSEXTANT), msg); *msg = '\0'; } if (errs & QIB_E_P_SHDR) { u64 symptom; /* determine cause, then write to clear */ symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom); qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0); err_decode(msg, sizeof ppd->cpspec->epmsgbuf, symptom, hdrchk_msgs); *msg = '\0'; /* senderrbuf cleared in SPKTERRS below */ } if (errs & QIB_E_P_SPKTERRS) { if ((errs & QIB_E_P_LINK_PKTERRS) && !(ppd->lflags & QIBL_LINKACTIVE)) { /* * This can happen when trying to bring the link * up, but the IB link changes state at the "wrong" * time. The IB logic then complains that the packet * isn't valid. We don't want to confuse people, so * we just don't print them, except at debug */ err_decode(msg, sizeof ppd->cpspec->epmsgbuf, (errs & QIB_E_P_LINK_PKTERRS), qib_7322p_error_msgs); *msg = '\0'; ignore_this_time = errs & QIB_E_P_LINK_PKTERRS; } qib_disarm_7322_senderrbufs(ppd); } else if ((errs & QIB_E_P_LINK_PKTERRS) && !(ppd->lflags & QIBL_LINKACTIVE)) { /* * This can happen when SMA is trying to bring the link * up, but the IB link changes state at the "wrong" time. * The IB logic then complains that the packet isn't * valid. We don't want to confuse people, so we just * don't print them, except at debug */ err_decode(msg, sizeof ppd->cpspec->epmsgbuf, errs, qib_7322p_error_msgs); ignore_this_time = errs & QIB_E_P_LINK_PKTERRS; *msg = '\0'; } qib_write_kreg_port(ppd, krp_errclear, errs); errs &= ~ignore_this_time; if (!errs) goto done; if (errs & QIB_E_P_RPKTERRS) qib_stats.sps_rcverrs++; if (errs & QIB_E_P_SPKTERRS) qib_stats.sps_txerrs++; iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS); if (errs & QIB_E_P_SDMAERRS) sdma_7322_p_errors(ppd, errs); if (errs & QIB_E_P_IBSTATUSCHANGED) { u64 ibcs; u8 ltstate; ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a); ltstate = qib_7322_phys_portstate(ibcs); if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) handle_serdes_issues(ppd, ibcs); if (!(ppd->cpspec->ibcctrl_a & SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) { /* * We got our interrupt, so init code should be * happy and not try alternatives. Now squelch * other "chatter" from link-negotiation (pre Init) */ ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn); qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); } /* Update our picture of width and speed from chip */ ppd->link_width_active = (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ? IB_WIDTH_4X : IB_WIDTH_1X; ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ? QIB_IB_DDR : QIB_IB_SDR; if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate != IB_PHYSPORTSTATE_DISABLED) qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); else /* * Since going into a recovery state causes the link * state to go down and since recovery is transitory, * it is better if we "miss" ever seeing the link * training state go into recovery (i.e., ignore this * transition for link state special handling purposes) * without updating lastibcstat. */ if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER && ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN && ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT && ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE) qib_handle_e_ibstatuschanged(ppd, ibcs); } if (*msg && iserr) qib_dev_porterr(dd, ppd->port, "%s error\n", msg); if (ppd->state_wanted & ppd->lflags) wake_up_interruptible(&ppd->state_wait); done: return; } /* enable/disable chip from delivering interrupts */ static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable) { if (enable) { if (dd->flags & QIB_BADINTR) return; qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask); /* cause any pending enabled interrupts to be re-delivered */ qib_write_kreg(dd, kr_intclear, 0ULL); if (dd->cspec->num_msix_entries) { /* and same for MSIx */ u64 val = qib_read_kreg64(dd, kr_intgranted); if (val) qib_write_kreg(dd, kr_intgranted, val); } } else qib_write_kreg(dd, kr_intmask, 0ULL); } /* * Try to cleanup as much as possible for anything that might have gone * wrong while in freeze mode, such as pio buffers being written by user * processes (causing armlaunch), send errors due to going into freeze mode, * etc., and try to avoid causing extra interrupts while doing so. * Forcibly update the in-memory pioavail register copies after cleanup * because the chip won't do it while in freeze mode (the register values * themselves are kept correct). * Make sure that we don't lose any important interrupts by using the chip * feature that says that writing 0 to a bit in *clear that is set in * *status will cause an interrupt to be generated again (if allowed by * the *mask value). * This is in chip-specific code because of all of the register accesses, * even though the details are similar on most chips. */ static void qib_7322_clear_freeze(struct qib_devdata *dd) { int pidx; /* disable error interrupts, to avoid confusion */ qib_write_kreg(dd, kr_errmask, 0ULL); for (pidx = 0; pidx < dd->num_pports; ++pidx) if (dd->pport[pidx].link_speed_supported) qib_write_kreg_port(dd->pport + pidx, krp_errmask, 0ULL); /* also disable interrupts; errormask is sometimes overwriten */ qib_7322_set_intr_state(dd, 0); /* clear the freeze, and be sure chip saw it */ qib_write_kreg(dd, kr_control, dd->control); qib_read_kreg32(dd, kr_scratch); /* * Force new interrupt if any hwerr, error or interrupt bits are * still set, and clear "safe" send packet errors related to freeze * and cancelling sends. Re-enable error interrupts before possible * force of re-interrupt on pending interrupts. */ qib_write_kreg(dd, kr_hwerrclear, 0ULL); qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE); qib_write_kreg(dd, kr_errmask, dd->cspec->errormask); /* We need to purge per-port errs and reset mask, too */ for (pidx = 0; pidx < dd->num_pports; ++pidx) { if (!dd->pport[pidx].link_speed_supported) continue; qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull); qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull); } qib_7322_set_intr_state(dd, 1); } /* no error handling to speak of */ /** * qib_7322_handle_hwerrors - display hardware errors. * @dd: the qlogic_ib device * @msg: the output buffer * @msgl: the size of the output buffer * * Use same msg buffer as regular errors to avoid excessive stack * use. Most hardware errors are catastrophic, but for right now, * we'll print them and continue. We reuse the same message buffer as * qib_handle_errors() to avoid excessive stack usage. */ static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg, size_t msgl) { u64 hwerrs; u32 ctrl; int isfatal = 0; hwerrs = qib_read_kreg64(dd, kr_hwerrstatus); if (!hwerrs) goto bail; if (hwerrs == ~0ULL) { qib_dev_err(dd, "Read of hardware error status failed " "(all bits set); ignoring\n"); goto bail; } qib_stats.sps_hwerrs++; /* Always clear the error status register, except BIST fail */ qib_write_kreg(dd, kr_hwerrclear, hwerrs & ~HWE_MASK(PowerOnBISTFailed)); hwerrs &= dd->cspec->hwerrmask; /* no EEPROM logging, yet */ if (hwerrs) qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx " "(cleared)\n", (unsigned long long) hwerrs); ctrl = qib_read_kreg32(dd, kr_control); if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) { /* * No recovery yet... */ if ((hwerrs & ~HWE_MASK(LATriggered)) || dd->cspec->stay_in_freeze) { /* * If any set that we aren't ignoring only make the * complaint once, in case it's stuck or recurring, * and we get here multiple times * Force link down, so switch knows, and * LEDs are turned off. */ if (dd->flags & QIB_INITTED) isfatal = 1; } else qib_7322_clear_freeze(dd); } if (hwerrs & HWE_MASK(PowerOnBISTFailed)) { isfatal = 1; strlcpy(msg, "[Memory BIST test failed, " "InfiniPath hardware unusable]", msgl); /* ignore from now on, so disable until driver reloaded */ dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed); qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); } err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs); /* Ignore esoteric PLL failures et al. */ qib_dev_err(dd, "%s hardware error\n", msg); if (isfatal && !dd->diag_client) { qib_dev_err(dd, "Fatal Hardware Error, no longer" " usable, SN %.16s\n", dd->serial); /* * for /sys status file and user programs to print; if no * trailing brace is copied, we'll know it was truncated. */ if (dd->freezemsg) snprintf(dd->freezemsg, dd->freezelen, "{%s}", msg); qib_disable_after_error(dd); } bail:; } /** * qib_7322_init_hwerrors - enable hardware errors * @dd: the qlogic_ib device * * now that we have finished initializing everything that might reasonably * cause a hardware error, and cleared those errors bits as they occur, * we can enable hardware errors in the mask (potentially enabling * freeze mode), and enable hardware errors as errors (along with * everything else) in errormask */ static void qib_7322_init_hwerrors(struct qib_devdata *dd) { int pidx; u64 extsval; extsval = qib_read_kreg64(dd, kr_extstatus); if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED | QIB_EXTS_MEMBIST_ENDTEST))) qib_dev_err(dd, "MemBIST did not complete!\n"); /* never clear BIST failure, so reported on each driver load */ qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed)); qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); /* clear all */ qib_write_kreg(dd, kr_errclear, ~0ULL); /* enable errors that are masked, at least this first time. */ qib_write_kreg(dd, kr_errmask, ~0ULL); dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask); for (pidx = 0; pidx < dd->num_pports; ++pidx) if (dd->pport[pidx].link_speed_supported) qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0ULL); } /* * Disable and enable the armlaunch error. Used for PIO bandwidth testing * on chips that are count-based, rather than trigger-based. There is no * reference counting, but that's also fine, given the intended use. * Only chip-specific because it's all register accesses */ static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable) { if (enable) { qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH); dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH; } else dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH; qib_write_kreg(dd, kr_errmask, dd->cspec->errormask); } /* * Formerly took parameter <which> in pre-shifted, * pre-merged form with LinkCmd and LinkInitCmd * together, and assuming the zero was NOP. */ static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd, u16 linitcmd) { u64 mod_wd; struct qib_devdata *dd = ppd->dd; unsigned long flags; if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) { /* * If we are told to disable, note that so link-recovery * code does not attempt to bring us back up. * Also reset everything that we can, so we start * completely clean when re-enabled (before we * actually issue the disable to the IBC) */ qib_7322_mini_pcs_reset(ppd); spin_lock_irqsave(&ppd->lflags_lock, flags); ppd->lflags |= QIBL_IB_LINK_DISABLED; spin_unlock_irqrestore(&ppd->lflags_lock, flags); } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) { /* * Any other linkinitcmd will lead to LINKDOWN and then * to INIT (if all is well), so clear flag to let * link-recovery code attempt to bring us back up. */ spin_lock_irqsave(&ppd->lflags_lock, flags); ppd->lflags &= ~QIBL_IB_LINK_DISABLED; spin_unlock_irqrestore(&ppd->lflags_lock, flags); /* * Clear status change interrupt reduction so the * new state is seen. */ ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn); } mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) | (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT); qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a | mod_wd); /* write to chip to prevent back-to-back writes of ibc reg */ qib_write_kreg(dd, kr_scratch, 0); } /* * The total RCV buffer memory is 64KB, used for both ports, and is * in units of 64 bytes (same as IB flow control credit unit). * The consumedVL unit in the same registers are in 32 byte units! * So, a VL15 packet needs 4.50 IB credits, and 9 rx buffer chunks, * and we can therefore allocate just 9 IB credits for 2 VL15 packets * in krp_rxcreditvl15, rather than 10. */ #define RCV_BUF_UNITSZ 64 #define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports)) static void set_vls(struct qib_pportdata *ppd) { int i, numvls, totcred, cred_vl, vl0extra; struct qib_devdata *dd = ppd->dd; u64 val; numvls = qib_num_vls(ppd->vls_operational); /* * Set up per-VL credits. Below is kluge based on these assumptions: * 1) port is disabled at the time early_init is called. * 2) give VL15 17 credits, for two max-plausible packets. * 3) Give VL0-N the rest, with any rounding excess used for VL0 */ /* 2 VL15 packets @ 288 bytes each (including IB headers) */ totcred = NUM_RCV_BUF_UNITS(dd); cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ; totcred -= cred_vl; qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl); cred_vl = totcred / numvls; vl0extra = totcred - cred_vl * numvls; qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra); for (i = 1; i < numvls; i++) qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl); for (; i < 8; i++) /* no buffer space for other VLs */ qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0); /* Notify IBC that credits need to be recalculated */ val = qib_read_kreg_port(ppd, krp_ibsdtestiftx); val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE); qib_write_kreg_port(ppd, krp_ibsdtestiftx, val); qib_write_kreg(dd, kr_scratch, 0ULL); val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE); qib_write_kreg_port(ppd, krp_ibsdtestiftx, val); for (i = 0; i < numvls; i++) val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i); val = qib_read_kreg_port(ppd, krp_rxcreditvl15); /* Change the number of operational VLs */ ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a & ~SYM_MASK(IBCCtrlA_0, NumVLane)) | ((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane)); qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); qib_write_kreg(dd, kr_scratch, 0ULL); } /* * The code that deals with actual SerDes is in serdes_7322_init(). * Compared to the code for iba7220, it is minimal. */ static int serdes_7322_init(struct qib_pportdata *ppd); /** * qib_7322_bringup_serdes - bring up the serdes * @ppd: physical port on the qlogic_ib device */ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd) { struct qib_devdata *dd = ppd->dd; u64 val, guid, ibc; unsigned long flags; int ret = 0; /* * SerDes model not in Pd, but still need to * set up much of IBCCtrl and IBCDDRCtrl; move elsewhere * eventually. */ /* Put IBC in reset, sends disabled (should be in reset already) */ ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn); qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); qib_write_kreg(dd, kr_scratch, 0ULL); if (qib_compat_ddr_negotiate) { ppd->cpspec->ibdeltainprog = 1; ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd, crp_ibsymbolerr); ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd, crp_iblinkerrrecov); } /* flowcontrolwatermark is in units of KBytes */ ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark); /* * Flow control is sent this often, even if no changes in * buffer space occur. Units are 128ns for this chip. * Set to 3usec. */ ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod); /* max error tolerance */ ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold); /* IB credit flow control. */ ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold); /* * set initial max size pkt IBC will send, including ICRC; it's the * PIO buffer size in dwords, less 1; also see qib_set_mtu() */ ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) << SYM_LSB(IBCCtrlA_0, MaxPktLen); ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */ /* initially come up waiting for TS1, without sending anything. */ val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT); /* * Reset the PCS interface to the serdes (and also ibc, which is still * in reset from above). Writes new value of ibcctrl_a as last step. */ qib_7322_mini_pcs_reset(ppd); qib_write_kreg(dd, kr_scratch, 0ULL); if (!ppd->cpspec->ibcctrl_b) { unsigned lse = ppd->link_speed_enabled; /* * Not on re-init after reset, establish shadow * and force initial config. */ ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd, krp_ibcctrl_b); ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR | IBA7322_IBC_SPEED_DDR | IBA7322_IBC_SPEED_SDR | IBA7322_IBC_WIDTH_AUTONEG | SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED)); if (lse & (lse - 1)) /* Muliple speeds enabled */ ppd->cpspec->ibcctrl_b |= (lse << IBA7322_IBC_SPEED_LSB) | IBA7322_IBC_IBTA_1_2_MASK | IBA7322_IBC_MAX_SPEED_MASK; else ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ? IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK : (lse == QIB_IB_DDR) ? IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR; if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) == (IB_WIDTH_1X | IB_WIDTH_4X)) ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG; else ppd->cpspec->ibcctrl_b |= ppd->link_width_enabled == IB_WIDTH_4X ? IBA7322_IBC_WIDTH_4X_ONLY : IBA7322_IBC_WIDTH_1X_ONLY; /* always enable these on driver reload, not sticky */ ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK | IBA7322_IBC_HRTBT_MASK); } qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b); /* setup so we have more time at CFGTEST to change H1 */ val = qib_read_kreg_port(ppd, krp_ibcctrl_c); val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH); val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH); qib_write_kreg_port(ppd, krp_ibcctrl_c, val); serdes_7322_init(ppd); guid = be64_to_cpu(ppd->guid); if (!guid) { if (dd->base_guid) guid = be64_to_cpu(dd->base_guid) + ppd->port - 1; ppd->guid = cpu_to_be64(guid); } qib_write_kreg_port(ppd, krp_hrtbt_guid, guid); /* write to chip to prevent back-to-back writes of ibc reg */ qib_write_kreg(dd, kr_scratch, 0); /* Enable port */ ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn); set_vls(ppd); /* be paranoid against later code motion, etc. */ spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable); qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl); spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); /* Hold the link state machine for mezz boards */ if (IS_QMH(dd) || IS_QME(dd)) qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); /* Also enable IBSTATUSCHG interrupt. */ val = qib_read_kreg_port(ppd, krp_errmask); qib_write_kreg_port(ppd, krp_errmask, val | ERR_MASK_N(IBStatusChanged)); /* Always zero until we start messing with SerDes for real */ return ret; } /** * qib_7322_quiet_serdes - set serdes to txidle * @dd: the qlogic_ib device * Called when driver is being unloaded */ static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd) { u64 val; unsigned long flags; qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); spin_lock_irqsave(&ppd->lflags_lock, flags); ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; spin_unlock_irqrestore(&ppd->lflags_lock, flags); wake_up(&ppd->cpspec->autoneg_wait); cancel_delayed_work_sync(&ppd->cpspec->autoneg_work); if (ppd->dd->cspec->r1) cancel_delayed_work_sync(&ppd->cpspec->ipg_work); ppd->cpspec->chase_end = 0; if (ppd->cpspec->chase_timer.data) /* if initted */ del_timer_sync(&ppd->cpspec->chase_timer); /* * Despite the name, actually disables IBC as well. Do it when * we are as sure as possible that no more packets can be * received, following the down and the PCS reset. * The actual disabling happens in qib_7322_mini_pci_reset(), * along with the PCS being reset. */ ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn); qib_7322_mini_pcs_reset(ppd); /* * Update the adjusted counters so the adjustment persists * across driver reload. */ if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) { struct qib_devdata *dd = ppd->dd; u64 diagc; /* enable counter writes */ diagc = qib_read_kreg64(dd, kr_hwdiagctrl); qib_write_kreg(dd, kr_hwdiagctrl, diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable)); if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) { val = read_7322_creg32_port(ppd, crp_ibsymbolerr); if (ppd->cpspec->ibdeltainprog) val -= val - ppd->cpspec->ibsymsnap; val -= ppd->cpspec->ibsymdelta; write_7322_creg_port(ppd, crp_ibsymbolerr, val); } if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) { val = read_7322_creg32_port(ppd, crp_iblinkerrrecov); if (ppd->cpspec->ibdeltainprog) val -= val - ppd->cpspec->iblnkerrsnap; val -= ppd->cpspec->iblnkerrdelta; write_7322_creg_port(ppd, crp_iblinkerrrecov, val); } if (ppd->cpspec->iblnkdowndelta) { val = read_7322_creg32_port(ppd, crp_iblinkdown); val += ppd->cpspec->iblnkdowndelta; write_7322_creg_port(ppd, crp_iblinkdown, val); } /* * No need to save ibmalfdelta since IB perfcounters * are cleared on driver reload. */ /* and disable counter writes */ qib_write_kreg(dd, kr_hwdiagctrl, diagc); } } /** * qib_setup_7322_setextled - set the state of the two external LEDs * @ppd: physical port on the qlogic_ib device * @on: whether the link is up or not * * The exact combo of LEDs if on is true is determined by looking * at the ibcstatus. * * These LEDs indicate the physical and logical state of IB link. * For this chip (at least with recommended board pinouts), LED1 * is Yellow (logical state) and LED2 is Green (physical state), * * Note: We try to match the Mellanox HCA LED behavior as best * we can. Green indicates physical link state is OK (something is * plugged in, and we can train). * Amber indicates the link is logically up (ACTIVE). * Mellanox further blinks the amber LED to indicate data packet * activity, but we have no hardware support for that, so it would * require waking up every 10-20 msecs and checking the counters * on the chip, and then turning the LED off if appropriate. That's * visible overhead, so not something we will do. */ static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on) { struct qib_devdata *dd = ppd->dd; u64 extctl, ledblink = 0, val; unsigned long flags; int yel, grn; /* * The diags use the LED to indicate diag info, so we leave * the external LED alone when the diags are running. */ if (dd->diag_client) return; /* Allow override of LED display for, e.g. Locating system in rack */ if (ppd->led_override) { grn = (ppd->led_override & QIB_LED_PHYS); yel = (ppd->led_override & QIB_LED_LOG); } else if (on) { val = qib_read_kreg_port(ppd, krp_ibcstatus_a); grn = qib_7322_phys_portstate(val) == IB_PHYSPORTSTATE_LINKUP; yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE; } else { grn = 0; yel = 0; } spin_lock_irqsave(&dd->cspec->gpio_lock, flags); extctl = dd->cspec->extctrl & (ppd->port == 1 ? ~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK); if (grn) { extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN; /* * Counts are in chip clock (4ns) periods. * This is 1/16 sec (66.6ms) on, * 3/16 sec (187.5 ms) off, with packets rcvd. */ ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) | ((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT); } if (yel) extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL; dd->cspec->extctrl = extctl; qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl); spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); if (ledblink) /* blink the LED on packet receive */ qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink); } /* * Disable MSIx interrupt if enabled, call generic MSIx code * to cleanup, and clear pending MSIx interrupts. * Used for fallback to INTx, after reset, and when MSIx setup fails. */ static void qib_7322_nomsix(struct qib_devdata *dd) { u64 intgranted; int n; dd->cspec->main_int_mask = ~0ULL; n = dd->cspec->num_msix_entries; if (n) { int i; dd->cspec->num_msix_entries = 0; for (i = 0; i < n; i++) free_irq(dd->cspec->msix_entries[i].vector, dd->cspec->msix_arg[i]); qib_nomsix(dd); } /* make sure no MSIx interrupts are left pending */ intgranted = qib_read_kreg64(dd, kr_intgranted); if (intgranted) qib_write_kreg(dd, kr_intgranted, intgranted); } static void qib_7322_free_irq(struct qib_devdata *dd) { if (dd->cspec->irq) { free_irq(dd->cspec->irq, dd); dd->cspec->irq = 0; } qib_7322_nomsix(dd); } static void qib_setup_7322_cleanup(struct qib_devdata *dd) { int i; qib_7322_free_irq(dd); kfree(dd->cspec->cntrs); kfree(dd->cspec->sendchkenable); kfree(dd->cspec->sendgrhchk); kfree(dd->cspec->sendibchk); kfree(dd->cspec->msix_entries); kfree(dd->cspec->msix_arg); for (i = 0; i < dd->num_pports; i++) { unsigned long flags; u32 mask = QSFP_GPIO_MOD_PRS_N | (QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT); kfree(dd->pport[i].cpspec->portcntrs); if (dd->flags & QIB_HAS_QSFP) { spin_lock_irqsave(&dd->cspec->gpio_lock, flags); dd->cspec->gpio_mask &= ~mask; qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask); spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); qib_qsfp_deinit(&dd->pport[i].cpspec->qsfp_data); } if (dd->pport[i].ibport_data.smi_ah) ib_destroy_ah(&dd->pport[i].ibport_data.smi_ah->ibah); } } /* handle SDMA interrupts */ static void sdma_7322_intr(struct qib_devdata *dd, u64 istat) { struct qib_pportdata *ppd0 = &dd->pport[0]; struct qib_pportdata *ppd1 = &dd->pport[1]; u64 intr0 = istat & (INT_MASK_P(SDma, 0) | INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0)); u64 intr1 = istat & (INT_MASK_P(SDma, 1) | INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1)); if (intr0) qib_sdma_intr(ppd0); if (intr1) qib_sdma_intr(ppd1); if (istat & INT_MASK_PM(SDmaCleanupDone, 0)) qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started); if (istat & INT_MASK_PM(SDmaCleanupDone, 1)) qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started); } /* * Set or clear the Send buffer available interrupt enable bit. */ static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint) { unsigned long flags; spin_lock_irqsave(&dd->sendctrl_lock, flags); if (needint) dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail); else dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail); qib_write_kreg(dd, kr_sendctrl, dd->sendctrl); qib_write_kreg(dd, kr_scratch, 0ULL); spin_unlock_irqrestore(&dd->sendctrl_lock, flags); } /* * Somehow got an interrupt with reserved bits set in interrupt status. * Print a message so we know it happened, then clear them. * keep mainline interrupt handler cache-friendly */ static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat) { u64 kills; char msg[128]; kills = istat & ~QIB_I_BITSEXTANT; qib_dev_err(dd, "Clearing reserved interrupt(s) 0x%016llx:" " %s\n", (unsigned long long) kills, msg); qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills)); } /* keep mainline interrupt handler cache-friendly */ static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd) { u32 gpiostatus; int handled = 0; int pidx; /* * Boards for this chip currently don't use GPIO interrupts, * so clear by writing GPIOstatus to GPIOclear, and complain * to developer. To avoid endless repeats, clear * the bits in the mask, since there is some kind of * programming error or chip problem. */ gpiostatus = qib_read_kreg32(dd, kr_gpio_status); /* * In theory, writing GPIOstatus to GPIOclear could * have a bad side-effect on some diagnostic that wanted * to poll for a status-change, but the various shadows * make that problematic at best. Diags will just suppress * all GPIO interrupts during such tests. */ qib_write_kreg(dd, kr_gpio_clear, gpiostatus); /* * Check for QSFP MOD_PRS changes * only works for single port if IB1 != pidx1 */ for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP); ++pidx) { struct qib_pportdata *ppd; struct qib_qsfp_data *qd; u32 mask; if (!dd->pport[pidx].link_speed_supported) continue; mask = QSFP_GPIO_MOD_PRS_N; ppd = dd->pport + pidx; mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx); if (gpiostatus & dd->cspec->gpio_mask & mask) { u64 pins; qd = &ppd->cpspec->qsfp_data; gpiostatus &= ~mask; pins = qib_read_kreg64(dd, kr_extstatus); pins >>= SYM_LSB(EXTStatus, GPIOIn); if (!(pins & mask)) { ++handled; qd->t_insert = get_jiffies_64(); queue_work(ib_wq, &qd->work); } } } if (gpiostatus && !handled) { const u32 mask = qib_read_kreg32(dd, kr_gpio_mask); u32 gpio_irq = mask & gpiostatus; /* * Clear any troublemakers, and update chip from shadow */ dd->cspec->gpio_mask &= ~gpio_irq; qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask); } } /* * Handle errors and unusual events first, separate function * to improve cache hits for fast path interrupt handling. */ static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat) { if (istat & ~QIB_I_BITSEXTANT) unknown_7322_ibits(dd, istat); if (istat & QIB_I_GPIO) unknown_7322_gpio_intr(dd); if (istat & QIB_I_C_ERROR) handle_7322_errors(dd); if (istat & INT_MASK_P(Err, 0) && dd->rcd[0]) handle_7322_p_errors(dd->rcd[0]->ppd); if (istat & INT_MASK_P(Err, 1) && dd->rcd[1]) handle_7322_p_errors(dd->rcd[1]->ppd); } /* * Dynamically adjust the rcv int timeout for a context based on incoming * packet rate. */ static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts) { struct qib_devdata *dd = rcd->dd; u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt]; /* * Dynamically adjust idle timeout on chip * based on number of packets processed. */ if (npkts < rcv_int_count && timeout > 2) timeout >>= 1; else if (npkts >= rcv_int_count && timeout < rcv_int_timeout) timeout = min(timeout << 1, rcv_int_timeout); else return; dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout; qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout); } /* * This is the main interrupt handler. * It will normally only be used for low frequency interrupts but may * have to handle all interrupts if INTx is enabled or fewer than normal * MSIx interrupts were allocated. * This routine should ignore the interrupt bits for any of the * dedicated MSIx handlers. */ static irqreturn_t qib_7322intr(int irq, void *data) { struct qib_devdata *dd = data; irqreturn_t ret; u64 istat; u64 ctxtrbits; u64 rmask; unsigned i; u32 npkts; if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) { /* * This return value is not great, but we do not want the * interrupt core code to remove our interrupt handler * because we don't appear to be handling an interrupt * during a chip reset. */ ret = IRQ_HANDLED; goto bail; } istat = qib_read_kreg64(dd, kr_intstatus); if (unlikely(istat == ~0ULL)) { qib_bad_intrstatus(dd); qib_dev_err(dd, "Interrupt status all f's, skipping\n"); /* don't know if it was our interrupt or not */ ret = IRQ_NONE; goto bail; } istat &= dd->cspec->main_int_mask; if (unlikely(!istat)) { /* already handled, or shared and not us */ ret = IRQ_NONE; goto bail; } qib_stats.sps_ints++; if (dd->int_counter != (u32) -1) dd->int_counter++; /* handle "errors" of various kinds first, device ahead of port */ if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO | QIB_I_C_ERROR | INT_MASK_P(Err, 0) | INT_MASK_P(Err, 1)))) unlikely_7322_intr(dd, istat); /* * Clear the interrupt bits we found set, relatively early, so we * "know" know the chip will have seen this by the time we process * the queue, and will re-interrupt if necessary. The processor * itself won't take the interrupt again until we return. */ qib_write_kreg(dd, kr_intclear, istat); /* * Handle kernel receive queues before checking for pio buffers * available since receives can overflow; piobuf waiters can afford * a few extra cycles, since they were waiting anyway. */ ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK); if (ctxtrbits) { rmask = (1ULL << QIB_I_RCVAVAIL_LSB) | (1ULL << QIB_I_RCVURG_LSB); for (i = 0; i < dd->first_user_ctxt; i++) { if (ctxtrbits & rmask) { ctxtrbits &= ~rmask; if (dd->rcd[i]) { qib_kreceive(dd->rcd[i], NULL, &npkts); } } rmask <<= 1; } if (ctxtrbits) { ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) | (ctxtrbits >> QIB_I_RCVURG_LSB); qib_handle_urcv(dd, ctxtrbits); } } if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1))) sdma_7322_intr(dd, istat); if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED)) qib_ib_piobufavail(dd); ret = IRQ_HANDLED; bail: return ret; } /* * Dedicated receive packet available interrupt handler. */ static irqreturn_t qib_7322pintr(int irq, void *data) { struct qib_ctxtdata *rcd = data; struct qib_devdata *dd = rcd->dd; u32 npkts; if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) /* * This return value is not great, but we do not want the * interrupt core code to remove our interrupt handler * because we don't appear to be handling an interrupt * during a chip reset. */ return IRQ_HANDLED; qib_stats.sps_ints++; if (dd->int_counter != (u32) -1) dd->int_counter++; /* Clear the interrupt bit we expect to be set. */ qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) | (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt); qib_kreceive(rcd, NULL, &npkts); return IRQ_HANDLED; } /* * Dedicated Send buffer available interrupt handler. */ static irqreturn_t qib_7322bufavail(int irq, void *data) { struct qib_devdata *dd = data; if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) /* * This return value is not great, but we do not want the * interrupt core code to remove our interrupt handler * because we don't appear to be handling an interrupt * during a chip reset. */ return IRQ_HANDLED; qib_stats.sps_ints++; if (dd->int_counter != (u32) -1) dd->int_counter++; /* Clear the interrupt bit we expect to be set. */ qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL); /* qib_ib_piobufavail() will clear the want PIO interrupt if needed */ if (dd->flags & QIB_INITTED) qib_ib_piobufavail(dd); else qib_wantpiobuf_7322_intr(dd, 0); return IRQ_HANDLED; } /* * Dedicated Send DMA interrupt handler. */ static irqreturn_t sdma_intr(int irq, void *data) { struct qib_pportdata *ppd = data; struct qib_devdata *dd = ppd->dd; if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) /* * This return value is not great, but we do not want the * interrupt core code to remove our interrupt handler * because we don't appear to be handling an interrupt * during a chip reset. */ return IRQ_HANDLED; qib_stats.sps_ints++; if (dd->int_counter != (u32) -1) dd->int_counter++; /* Clear the interrupt bit we expect to be set. */ qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0)); qib_sdma_intr(ppd); return IRQ_HANDLED; } /* * Dedicated Send DMA idle interrupt handler. */ static irqreturn_t sdma_idle_intr(int irq, void *data) { struct qib_pportdata *ppd = data; struct qib_devdata *dd = ppd->dd; if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) /* * This return value is not great, but we do not want the * interrupt core code to remove our interrupt handler * because we don't appear to be handling an interrupt * during a chip reset. */ return IRQ_HANDLED; qib_stats.sps_ints++; if (dd->int_counter != (u32) -1) dd->int_counter++; /* Clear the interrupt bit we expect to be set. */ qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0)); qib_sdma_intr(ppd); return IRQ_HANDLED; } /* * Dedicated Send DMA progress interrupt handler. */ static irqreturn_t sdma_progress_intr(int irq, void *data) { struct qib_pportdata *ppd = data; struct qib_devdata *dd = ppd->dd; if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) /* * This return value is not great, but we do not want the * interrupt core code to remove our interrupt handler * because we don't appear to be handling an interrupt * during a chip reset. */ return IRQ_HANDLED; qib_stats.sps_ints++; if (dd->int_counter != (u32) -1) dd->int_counter++; /* Clear the interrupt bit we expect to be set. */ qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? INT_MASK_P(SDmaProgress, 1) : INT_MASK_P(SDmaProgress, 0)); qib_sdma_intr(ppd); return IRQ_HANDLED; } /* * Dedicated Send DMA cleanup interrupt handler. */ static irqreturn_t sdma_cleanup_intr(int irq, void *data) { struct qib_pportdata *ppd = data; struct qib_devdata *dd = ppd->dd; if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) /* * This return value is not great, but we do not want the * interrupt core code to remove our interrupt handler * because we don't appear to be handling an interrupt * during a chip reset. */ return IRQ_HANDLED; qib_stats.sps_ints++; if (dd->int_counter != (u32) -1) dd->int_counter++; /* Clear the interrupt bit we expect to be set. */ qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? INT_MASK_PM(SDmaCleanupDone, 1) : INT_MASK_PM(SDmaCleanupDone, 0)); qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started); return IRQ_HANDLED; } /* * Set up our chip-specific interrupt handler. * The interrupt type has already been setup, so * we just need to do the registration and error checking. * If we are using MSIx interrupts, we may fall back to * INTx later, if the interrupt handler doesn't get called * within 1/2 second (see verify_interrupt()). */ static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend) { int ret, i, msixnum; u64 redirect[6]; u64 mask; if (!dd->num_pports) return; if (clearpend) { /* * if not switching interrupt types, be sure interrupts are * disabled, and then clear anything pending at this point, * because we are starting clean. */ qib_7322_set_intr_state(dd, 0); /* clear the reset error, init error/hwerror mask */ qib_7322_init_hwerrors(dd); /* clear any interrupt bits that might be set */ qib_write_kreg(dd, kr_intclear, ~0ULL); /* make sure no pending MSIx intr, and clear diag reg */ qib_write_kreg(dd, kr_intgranted, ~0ULL); qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL); } if (!dd->cspec->num_msix_entries) { /* Try to get INTx interrupt */ try_intx: if (!dd->pcidev->irq) { qib_dev_err(dd, "irq is 0, BIOS error? " "Interrupts won't work\n"); goto bail; } ret = request_irq(dd->pcidev->irq, qib_7322intr, IRQF_SHARED, QIB_DRV_NAME, dd); if (ret) { qib_dev_err(dd, "Couldn't setup INTx " "interrupt (irq=%d): %d\n", dd->pcidev->irq, ret); goto bail; } dd->cspec->irq = dd->pcidev->irq; dd->cspec->main_int_mask = ~0ULL; goto bail; } /* Try to get MSIx interrupts */ memset(redirect, 0, sizeof redirect); mask = ~0ULL; msixnum = 0; for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) { irq_handler_t handler; const char *name; void *arg; u64 val; int lsb, reg, sh; if (i < ARRAY_SIZE(irq_table)) { if (irq_table[i].port) { /* skip if for a non-configured port */ if (irq_table[i].port > dd->num_pports) continue; arg = dd->pport + irq_table[i].port - 1; } else arg = dd; lsb = irq_table[i].lsb; handler = irq_table[i].handler; name = irq_table[i].name; } else { unsigned ctxt; ctxt = i - ARRAY_SIZE(irq_table); /* per krcvq context receive interrupt */ arg = dd->rcd[ctxt]; if (!arg) continue; lsb = QIB_I_RCVAVAIL_LSB + ctxt; handler = qib_7322pintr; name = QIB_DRV_NAME " (kctx)"; } ret = request_irq(dd->cspec->msix_entries[msixnum].vector, handler, 0, name, arg); if (ret) { /* * Shouldn't happen since the enable said we could * have as many as we are trying to setup here. */ qib_dev_err(dd, "Couldn't setup MSIx " "interrupt (vec=%d, irq=%d): %d\n", msixnum, dd->cspec->msix_entries[msixnum].vector, ret); qib_7322_nomsix(dd); goto try_intx; } dd->cspec->msix_arg[msixnum] = arg; if (lsb >= 0) { reg = lsb / IBA7322_REDIRECT_VEC_PER_REG; sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) * SYM_LSB(IntRedirect0, vec1); mask &= ~(1ULL << lsb); redirect[reg] |= ((u64) msixnum) << sh; } val = qib_read_kreg64(dd, 2 * msixnum + 1 + (QIB_7322_MsixTable_OFFS / sizeof(u64))); msixnum++; } /* Initialize the vector mapping */ for (i = 0; i < ARRAY_SIZE(redirect); i++) qib_write_kreg(dd, kr_intredirect + i, redirect[i]); dd->cspec->main_int_mask = mask; bail:; } /** * qib_7322_boardname - fill in the board name and note features * @dd: the qlogic_ib device * * info will be based on the board revision register */ static unsigned qib_7322_boardname(struct qib_devdata *dd) { /* Will need enumeration of board-types here */ char *n; u32 boardid, namelen; unsigned features = DUAL_PORT_CAP; boardid = SYM_FIELD(dd->revision, Revision, BoardID); switch (boardid) { case 0: n = "InfiniPath_QLE7342_Emulation"; break; case 1: n = "InfiniPath_QLE7340"; dd->flags |= QIB_HAS_QSFP; features = PORT_SPD_CAP; break; case 2: n = "InfiniPath_QLE7342"; dd->flags |= QIB_HAS_QSFP; break; case 3: n = "InfiniPath_QMI7342"; break; case 4: n = "InfiniPath_Unsupported7342"; qib_dev_err(dd, "Unsupported version of QMH7342\n"); features = 0; break; case BOARD_QMH7342: n = "InfiniPath_QMH7342"; features = 0x24; break; case BOARD_QME7342: n = "InfiniPath_QME7342"; break; case 8: n = "InfiniPath_QME7362"; dd->flags |= QIB_HAS_QSFP; break; case 15: n = "InfiniPath_QLE7342_TEST"; dd->flags |= QIB_HAS_QSFP; break; default: n = "InfiniPath_QLE73xy_UNKNOWN"; qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid); break; } dd->board_atten = 1; /* index into txdds_Xdr */ namelen = strlen(n) + 1; dd->boardname = kmalloc(namelen, GFP_KERNEL); if (!dd->boardname) qib_dev_err(dd, "Failed allocation for board name: %s\n", n); else snprintf(dd->boardname, namelen, "%s", n); snprintf(dd->boardversion, sizeof(dd->boardversion), "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n", QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname, (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch), dd->majrev, dd->minrev, (unsigned)SYM_FIELD(dd->revision, Revision_R, SW)); if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) { qib_devinfo(dd->pcidev, "IB%u: Forced to single port mode" " by module parameter\n", dd->unit); features &= PORT_SPD_CAP; } return features; } /* * This routine sleeps, so it can only be called from user context, not * from interrupt context. */ static int qib_do_7322_reset(struct qib_devdata *dd) { u64 val; u64 *msix_vecsave; int i, msix_entries, ret = 1; u16 cmdval; u8 int_line, clinesz; unsigned long flags; /* Use dev_err so it shows up in logs, etc. */ qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit); qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz); msix_entries = dd->cspec->num_msix_entries; /* no interrupts till re-initted */ qib_7322_set_intr_state(dd, 0); if (msix_entries) { qib_7322_nomsix(dd); /* can be up to 512 bytes, too big for stack */ msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries * sizeof(u64), GFP_KERNEL); if (!msix_vecsave) qib_dev_err(dd, "No mem to save MSIx data\n"); } else msix_vecsave = NULL; /* * Core PCI (as of 2.6.18) doesn't save or rewrite the full vector * info that is set up by the BIOS, so we have to save and restore * it ourselves. There is some risk something could change it, * after we save it, but since we have disabled the MSIx, it * shouldn't be touched... */ for (i = 0; i < msix_entries; i++) { u64 vecaddr, vecdata; vecaddr = qib_read_kreg64(dd, 2 * i + (QIB_7322_MsixTable_OFFS / sizeof(u64))); vecdata = qib_read_kreg64(dd, 1 + 2 * i + (QIB_7322_MsixTable_OFFS / sizeof(u64))); if (msix_vecsave) { msix_vecsave[2 * i] = vecaddr; /* save it without the masked bit set */ msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL; } } dd->pport->cpspec->ibdeltainprog = 0; dd->pport->cpspec->ibsymdelta = 0; dd->pport->cpspec->iblnkerrdelta = 0; dd->pport->cpspec->ibmalfdelta = 0; dd->int_counter = 0; /* so we check interrupts work again */ /* * Keep chip from being accessed until we are ready. Use * writeq() directly, to allow the write even though QIB_PRESENT * isn't set. */ dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR); dd->flags |= QIB_DOING_RESET; val = dd->control | QLOGIC_IB_C_RESET; writeq(val, &dd->kregbase[kr_control]); for (i = 1; i <= 5; i++) { /* * Allow MBIST, etc. to complete; longer on each retry. * We sometimes get machine checks from bus timeout if no * response, so for now, make it *really* long. */ msleep(1000 + (1 + i) * 3000); qib_pcie_reenable(dd, cmdval, int_line, clinesz); /* * Use readq directly, so we don't need to mark it as PRESENT * until we get a successful indication that all is well. */ val = readq(&dd->kregbase[kr_revision]); if (val == dd->revision) break; if (i == 5) { qib_dev_err(dd, "Failed to initialize after reset, " "unusable\n"); ret = 0; goto bail; } } dd->flags |= QIB_PRESENT; /* it's back */ if (msix_entries) { /* restore the MSIx vector address and data if saved above */ for (i = 0; i < msix_entries; i++) { dd->cspec->msix_entries[i].entry = i; if (!msix_vecsave || !msix_vecsave[2 * i]) continue; qib_write_kreg(dd, 2 * i + (QIB_7322_MsixTable_OFFS / sizeof(u64)), msix_vecsave[2 * i]); qib_write_kreg(dd, 1 + 2 * i + (QIB_7322_MsixTable_OFFS / sizeof(u64)), msix_vecsave[1 + 2 * i]); } } /* initialize the remaining registers. */ for (i = 0; i < dd->num_pports; ++i) write_7322_init_portregs(&dd->pport[i]); write_7322_initregs(dd); if (qib_pcie_params(dd, dd->lbus_width, &dd->cspec->num_msix_entries, dd->cspec->msix_entries)) qib_dev_err(dd, "Reset failed to setup PCIe or interrupts; " "continuing anyway\n"); qib_setup_7322_interrupt(dd, 1); for (i = 0; i < dd->num_pports; ++i) { struct qib_pportdata *ppd = &dd->pport[i]; spin_lock_irqsave(&ppd->lflags_lock, flags); ppd->lflags |= QIBL_IB_FORCE_NOTIFY; ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED; spin_unlock_irqrestore(&ppd->lflags_lock, flags); } bail: dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */ kfree(msix_vecsave); return ret; } /** * qib_7322_put_tid - write a TID to the chip * @dd: the qlogic_ib device * @tidptr: pointer to the expected TID (in chip) to update * @tidtype: 0 for eager, 1 for expected * @pa: physical address of in memory buffer; tidinvalid if freeing */ static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr, u32 type, unsigned long pa) { if (!(dd->flags & QIB_PRESENT)) return; if (pa != dd->tidinvalid) { u64 chippa = pa >> IBA7322_TID_PA_SHIFT; /* paranoia checks */ if (pa != (chippa << IBA7322_TID_PA_SHIFT)) { qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n", pa); return; } if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) { qib_dev_err(dd, "Physical page address 0x%lx " "larger than supported\n", pa); return; } if (type == RCVHQ_RCV_TYPE_EAGER) chippa |= dd->tidtemplate; else /* for now, always full 4KB page */ chippa |= IBA7322_TID_SZ_4K; pa = chippa; } writeq(pa, tidptr); mmiowb(); } /** * qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager * @dd: the qlogic_ib device * @ctxt: the ctxt * * clear all TID entries for a ctxt, expected and eager. * Used from qib_close(). */ static void qib_7322_clear_tids(struct qib_devdata *dd, struct qib_ctxtdata *rcd) { u64 __iomem *tidbase; unsigned long tidinv; u32 ctxt; int i; if (!dd->kregbase || !rcd) return; ctxt = rcd->ctxt; tidinv = dd->tidinvalid; tidbase = (u64 __iomem *) ((char __iomem *) dd->kregbase + dd->rcvtidbase + ctxt * dd->rcvtidcnt * sizeof(*tidbase)); for (i = 0; i < dd->rcvtidcnt; i++) qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED, tidinv); tidbase = (u64 __iomem *) ((char __iomem *) dd->kregbase + dd->rcvegrbase + rcd->rcvegr_tid_base * sizeof(*tidbase)); for (i = 0; i < rcd->rcvegrcnt; i++) qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER, tidinv); } /** * qib_7322_tidtemplate - setup constants for TID updates * @dd: the qlogic_ib device * * We setup stuff that we use a lot, to avoid calculating each time */ static void qib_7322_tidtemplate(struct qib_devdata *dd) { /* * For now, we always allocate 4KB buffers (at init) so we can * receive max size packets. We may want a module parameter to * specify 2KB or 4KB and/or make it per port instead of per device * for those who want to reduce memory footprint. Note that the * rcvhdrentsize size must be large enough to hold the largest * IB header (currently 96 bytes) that we expect to handle (plus of * course the 2 dwords of RHF). */ if (dd->rcvegrbufsize == 2048) dd->tidtemplate = IBA7322_TID_SZ_2K; else if (dd->rcvegrbufsize == 4096) dd->tidtemplate = IBA7322_TID_SZ_4K; dd->tidinvalid = 0; } /** * qib_init_7322_get_base_info - set chip-specific flags for user code * @rcd: the qlogic_ib ctxt * @kbase: qib_base_info pointer * * We set the PCIE flag because the lower bandwidth on PCIe vs * HyperTransport can affect some user packet algorithims. */ static int qib_7322_get_base_info(struct qib_ctxtdata *rcd, struct qib_base_info *kinfo) { kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP | QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL | QIB_RUNTIME_HDRSUPP | QIB_RUNTIME_SDMA; if (rcd->dd->cspec->r1) kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK; if (rcd->dd->flags & QIB_USE_SPCL_TRIG) kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER; return 0; } static struct qib_message_header * qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr) { u32 offset = qib_hdrget_offset(rhf_addr); return (struct qib_message_header *) (rhf_addr - dd->rhf_offset + offset); } /* * Configure number of contexts. */ static void qib_7322_config_ctxts(struct qib_devdata *dd) { unsigned long flags; u32 nchipctxts; nchipctxts = qib_read_kreg32(dd, kr_contextcnt); dd->cspec->numctxts = nchipctxts; if (qib_n_krcv_queues > 1 && dd->num_pports) { dd->first_user_ctxt = NUM_IB_PORTS + (qib_n_krcv_queues - 1) * dd->num_pports; if (dd->first_user_ctxt > nchipctxts) dd->first_user_ctxt = nchipctxts; dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports; } else { dd->first_user_ctxt = NUM_IB_PORTS; dd->n_krcv_queues = 1; } if (!qib_cfgctxts) { int nctxts = dd->first_user_ctxt + num_online_cpus(); if (nctxts <= 6) dd->ctxtcnt = 6; else if (nctxts <= 10) dd->ctxtcnt = 10; else if (nctxts <= nchipctxts) dd->ctxtcnt = nchipctxts; } else if (qib_cfgctxts < dd->num_pports) dd->ctxtcnt = dd->num_pports; else if (qib_cfgctxts <= nchipctxts) dd->ctxtcnt = qib_cfgctxts; if (!dd->ctxtcnt) /* none of the above, set to max */ dd->ctxtcnt = nchipctxts; /* * Chip can be configured for 6, 10, or 18 ctxts, and choice * affects number of eager TIDs per ctxt (1K, 2K, 4K). * Lock to be paranoid about later motion, etc. */ spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); if (dd->ctxtcnt > 10) dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg); else if (dd->ctxtcnt > 6) dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg); /* else configure for default 6 receive ctxts */ /* The XRC opcode is 5. */ dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode); /* * RcvCtrl *must* be written here so that the * chip understands how to change rcvegrcnt below. */ qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl); spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); /* kr_rcvegrcnt changes based on the number of contexts enabled */ dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt); if (qib_rcvhdrcnt) dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt); else dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, dd->num_pports > 1 ? 1024U : 2048U); } static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which) { int lsb, ret = 0; u64 maskr; /* right-justified mask */ switch (which) { case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */ ret = ppd->link_width_enabled; goto done; case QIB_IB_CFG_LWID: /* Get currently active Link-width */ ret = ppd->link_width_active; goto done; case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */ ret = ppd->link_speed_enabled; goto done; case QIB_IB_CFG_SPD: /* Get current Link spd */ ret = ppd->link_speed_active; goto done; case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */ lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP); maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP); break; case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */ lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED); maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED); break; case QIB_IB_CFG_LINKLATENCY: ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) & SYM_MASK(IBCStatusB_0, LinkRoundTripLatency); goto done; case QIB_IB_CFG_OP_VLS: ret = ppd->vls_operational; goto done; case QIB_IB_CFG_VL_HIGH_CAP: ret = 16; goto done; case QIB_IB_CFG_VL_LOW_CAP: ret = 16; goto done; case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */ ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0, OverrunThreshold); goto done; case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */ ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0, PhyerrThreshold); goto done; case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */ /* will only take effect when the link state changes */ ret = (ppd->cpspec->ibcctrl_a & SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ? IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL; goto done; case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */ lsb = IBA7322_IBC_HRTBT_LSB; maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */ break; case QIB_IB_CFG_PMA_TICKS: /* * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs * Since the clock is always 250MHz, the value is 3, 1 or 0. */ if (ppd->link_speed_active == QIB_IB_QDR) ret = 3; else if (ppd->link_speed_active == QIB_IB_DDR) ret = 1; else ret = 0; goto done; default: ret = -EINVAL; goto done; } ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr); done: return ret; } /* * Below again cribbed liberally from older version. Do not lean * heavily on it. */ #define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB #define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \ | (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16)) static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val) { struct qib_devdata *dd = ppd->dd; u64 maskr; /* right-justified mask */ int lsb, ret = 0; u16 lcmd, licmd; unsigned long flags; switch (which) { case QIB_IB_CFG_LIDLMC: /* * Set LID and LMC. Combined to avoid possible hazard * caller puts LMC in 16MSbits, DLID in 16LSbits of val */ lsb = IBA7322_IBC_DLIDLMC_SHIFT; maskr = IBA7322_IBC_DLIDLMC_MASK; /* * For header-checking, the SLID in the packet will * be masked with SendIBSLMCMask, and compared * with SendIBSLIDAssignMask. Make sure we do not * set any bits not covered by the mask, or we get * false-positives. */ qib_write_kreg_port(ppd, krp_sendslid, val & (val >> 16) & SendIBSLIDAssignMask); qib_write_kreg_port(ppd, krp_sendslidmask, (val >> 16) & SendIBSLMCMask); break; case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */ ppd->link_width_enabled = val; /* convert IB value to chip register value */ if (val == IB_WIDTH_1X) val = 0; else if (val == IB_WIDTH_4X) val = 1; else val = 3; maskr = SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS); lsb = SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS); break; case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */ /* * As with width, only write the actual register if the * link is currently down, otherwise takes effect on next * link change. Since setting is being explicitly requested * (via MAD or sysfs), clear autoneg failure status if speed * autoneg is enabled. */ ppd->link_speed_enabled = val; val <<= IBA7322_IBC_SPEED_LSB; maskr = IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK | IBA7322_IBC_MAX_SPEED_MASK; if (val & (val - 1)) { /* Muliple speeds enabled */ val |= IBA7322_IBC_IBTA_1_2_MASK | IBA7322_IBC_MAX_SPEED_MASK; spin_lock_irqsave(&ppd->lflags_lock, flags); ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED; spin_unlock_irqrestore(&ppd->lflags_lock, flags); } else if (val & IBA7322_IBC_SPEED_QDR) val |= IBA7322_IBC_IBTA_1_2_MASK; /* IBTA 1.2 mode + min/max + speed bits are contiguous */ lsb = SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE); break; case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */ lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP); maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP); break; case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */ lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED); maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED); break; case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */ maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0, OverrunThreshold); if (maskr != val) { ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, OverrunThreshold); ppd->cpspec->ibcctrl_a |= (u64) val << SYM_LSB(IBCCtrlA_0, OverrunThreshold); qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); qib_write_kreg(dd, kr_scratch, 0ULL); } goto bail; case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */ maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0, PhyerrThreshold); if (maskr != val) { ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, PhyerrThreshold); ppd->cpspec->ibcctrl_a |= (u64) val << SYM_LSB(IBCCtrlA_0, PhyerrThreshold); qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); qib_write_kreg(dd, kr_scratch, 0ULL); } goto bail; case QIB_IB_CFG_PKEYS: /* update pkeys */ maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) | ((u64) ppd->pkeys[2] << 32) | ((u64) ppd->pkeys[3] << 48); qib_write_kreg_port(ppd, krp_partitionkey, maskr); goto bail; case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */ /* will only take effect when the link state changes */ if (val == IB_LINKINITCMD_POLL) ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, LinkDownDefaultState); else /* SLEEP */ ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, LinkDownDefaultState); qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); qib_write_kreg(dd, kr_scratch, 0ULL); goto bail; case QIB_IB_CFG_MTU: /* update the MTU in IBC */ /* * Update our housekeeping variables, and set IBC max * size, same as init code; max IBC is max we allow in * buffer, less the qword pbc, plus 1 for ICRC, in dwords * Set even if it's unchanged, print debug message only * on changes. */ val = (ppd->ibmaxlen >> 2) + 1; ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen); ppd->cpspec->ibcctrl_a |= (u64)val << SYM_LSB(IBCCtrlA_0, MaxPktLen); qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); qib_write_kreg(dd, kr_scratch, 0ULL); goto bail; case QIB_IB_CFG_LSTATE: /* set the IB link state */ switch (val & 0xffff0000) { case IB_LINKCMD_DOWN: lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN; ppd->cpspec->ibmalfusesnap = 1; ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd, crp_errlink); if (!ppd->cpspec->ibdeltainprog && qib_compat_ddr_negotiate) { ppd->cpspec->ibdeltainprog = 1; ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd, crp_ibsymbolerr); ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd, crp_iblinkerrrecov); } break; case IB_LINKCMD_ARMED: lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED; if (ppd->cpspec->ibmalfusesnap) { ppd->cpspec->ibmalfusesnap = 0; ppd->cpspec->ibmalfdelta += read_7322_creg32_port(ppd, crp_errlink) - ppd->cpspec->ibmalfsnap; } break; case IB_LINKCMD_ACTIVE: lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE; break; default: ret = -EINVAL; qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16); goto bail; } switch (val & 0xffff) { case IB_LINKINITCMD_NOP: licmd = 0; break; case IB_LINKINITCMD_POLL: licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL; break; case IB_LINKINITCMD_SLEEP: licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP; break; case IB_LINKINITCMD_DISABLE: licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE; ppd->cpspec->chase_end = 0; /* * stop state chase counter and timer, if running. * wait forpending timer, but don't clear .data (ppd)! */ if (ppd->cpspec->chase_timer.expires) { del_timer_sync(&ppd->cpspec->chase_timer); ppd->cpspec->chase_timer.expires = 0; } break; default: ret = -EINVAL; qib_dev_err(dd, "bad linkinitcmd req 0x%x\n", val & 0xffff); goto bail; } qib_set_ib_7322_lstate(ppd, lcmd, licmd); goto bail; case QIB_IB_CFG_OP_VLS: if (ppd->vls_operational != val) { ppd->vls_operational = val; set_vls(ppd); } goto bail; case QIB_IB_CFG_VL_HIGH_LIMIT: qib_write_kreg_port(ppd, krp_highprio_limit, val); goto bail; case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */ if (val > 3) { ret = -EINVAL; goto bail; } lsb = IBA7322_IBC_HRTBT_LSB; maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */ break; case QIB_IB_CFG_PORT: /* val is the port number of the switch we are connected to. */ if (ppd->dd->cspec->r1) { cancel_delayed_work(&ppd->cpspec->ipg_work); ppd->cpspec->ipg_tries = 0; } goto bail; default: ret = -EINVAL; goto bail; } ppd->cpspec->ibcctrl_b &= ~(maskr << lsb); ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb); qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b); qib_write_kreg(dd, kr_scratch, 0); bail: return ret; } static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what) { int ret = 0; u64 val, ctrlb; /* only IBC loopback, may add serdes and xgxs loopbacks later */ if (!strncmp(what, "ibc", 3)) { ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, Loopback); val = 0; /* disable heart beat, so link will come up */ qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n", ppd->dd->unit, ppd->port); } else if (!strncmp(what, "off", 3)) { ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, Loopback); /* enable heart beat again */ val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB; qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback " "(normal)\n", ppd->dd->unit, ppd->port); } else ret = -EINVAL; if (!ret) { qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK << IBA7322_IBC_HRTBT_LSB); ppd->cpspec->ibcctrl_b = ctrlb | val; qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b); qib_write_kreg(ppd->dd, kr_scratch, 0); } return ret; } static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno, struct ib_vl_weight_elem *vl) { unsigned i; for (i = 0; i < 16; i++, regno++, vl++) { u32 val = qib_read_kreg_port(ppd, regno); vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) & SYM_RMASK(LowPriority0_0, VirtualLane); vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) & SYM_RMASK(LowPriority0_0, Weight); } } static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno, struct ib_vl_weight_elem *vl) { unsigned i; for (i = 0; i < 16; i++, regno++, vl++) { u64 val; val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) << SYM_LSB(LowPriority0_0, VirtualLane)) | ((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) << SYM_LSB(LowPriority0_0, Weight)); qib_write_kreg_port(ppd, regno, val); } if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) { struct qib_devdata *dd = ppd->dd; unsigned long flags; spin_lock_irqsave(&dd->sendctrl_lock, flags); ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn); qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl); qib_write_kreg(dd, kr_scratch, 0); spin_unlock_irqrestore(&dd->sendctrl_lock, flags); } } static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t) { switch (which) { case QIB_IB_TBL_VL_HIGH_ARB: get_vl_weights(ppd, krp_highprio_0, t); break; case QIB_IB_TBL_VL_LOW_ARB: get_vl_weights(ppd, krp_lowprio_0, t); break; default: return -EINVAL; } return 0; } static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t) { switch (which) { case QIB_IB_TBL_VL_HIGH_ARB: set_vl_weights(ppd, krp_highprio_0, t); break; case QIB_IB_TBL_VL_LOW_ARB: set_vl_weights(ppd, krp_lowprio_0, t); break; default: return -EINVAL; } return 0; } static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd, u32 updegr, u32 egrhd, u32 npkts) { /* * Need to write timeout register before updating rcvhdrhead to ensure * that the timer is enabled on reception of a packet. */ if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT) adjust_rcv_timeout(rcd, npkts); qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); if (updegr) qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt); } static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd) { u32 head, tail; head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt); if (rcd->rcvhdrtail_kvaddr) tail = qib_get_rcvhdrtail(rcd); else tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt); return head == tail; } #define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \ QIB_RCVCTRL_CTXT_DIS | \ QIB_RCVCTRL_TIDFLOW_ENB | \ QIB_RCVCTRL_TIDFLOW_DIS | \ QIB_RCVCTRL_TAILUPD_ENB | \ QIB_RCVCTRL_TAILUPD_DIS | \ QIB_RCVCTRL_INTRAVAIL_ENB | \ QIB_RCVCTRL_INTRAVAIL_DIS | \ QIB_RCVCTRL_BP_ENB | \ QIB_RCVCTRL_BP_DIS) #define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \ QIB_RCVCTRL_CTXT_DIS | \ QIB_RCVCTRL_PKEY_DIS | \ QIB_RCVCTRL_PKEY_ENB) /* * Modify the RCVCTRL register in chip-specific way. This * is a function because bit positions and (future) register * location is chip-specifc, but the needed operations are * generic. <op> is a bit-mask because we often want to * do multiple modifications. */ static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op, int ctxt) { struct qib_devdata *dd = ppd->dd; struct qib_ctxtdata *rcd; u64 mask, val; unsigned long flags; spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); if (op & QIB_RCVCTRL_TIDFLOW_ENB) dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable); if (op & QIB_RCVCTRL_TIDFLOW_DIS) dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable); if (op & QIB_RCVCTRL_TAILUPD_ENB) dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd); if (op & QIB_RCVCTRL_TAILUPD_DIS) dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd); if (op & QIB_RCVCTRL_PKEY_ENB) ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable); if (op & QIB_RCVCTRL_PKEY_DIS) ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable); if (ctxt < 0) { mask = (1ULL << dd->ctxtcnt) - 1; rcd = NULL; } else { mask = (1ULL << ctxt); rcd = dd->rcd[ctxt]; } if ((op & QIB_RCVCTRL_CTXT_ENB) && rcd) { ppd->p_rcvctrl |= (mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel)); if (!(dd->flags & QIB_NODMA_RTAIL)) { op |= QIB_RCVCTRL_TAILUPD_ENB; /* need reg write */ dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd); } /* Write these registers before the context is enabled. */ qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, rcd->rcvhdrqtailaddr_phys); qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, rcd->rcvhdrq_phys); rcd->seq_cnt = 1; } if (op & QIB_RCVCTRL_CTXT_DIS) ppd->p_rcvctrl &= ~(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel)); if (op & QIB_RCVCTRL_BP_ENB) dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull); if (op & QIB_RCVCTRL_BP_DIS) dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull)); if (op & QIB_RCVCTRL_INTRAVAIL_ENB) dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail)); if (op & QIB_RCVCTRL_INTRAVAIL_DIS) dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail)); /* * Decide which registers to write depending on the ops enabled. * Special case is "flush" (no bits set at all) * which needs to write both. */ if (op == 0 || (op & RCVCTRL_COMMON_MODS)) qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl); if (op == 0 || (op & RCVCTRL_PORT_MODS)) qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl); if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) { /* * Init the context registers also; if we were * disabled, tail and head should both be zero * already from the enable, but since we don't * know, we have to do it explicitly. */ val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt); qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt); /* be sure enabling write seen; hd/tl should be 0 */ (void) qib_read_kreg32(dd, kr_scratch); val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt); dd->rcd[ctxt]->head = val; /* If kctxt, interrupt on next receive. */ if (ctxt < dd->first_user_ctxt) val |= dd->rhdrhead_intr_off; qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt); } else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) && dd->rcd[ctxt] && dd->rhdrhead_intr_off) { /* arm rcv interrupt */ val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off; qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt); } if (op & QIB_RCVCTRL_CTXT_DIS) { unsigned f; /* Now that the context is disabled, clear these registers. */ if (ctxt >= 0) { qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0); qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0); for (f = 0; f < NUM_TIDFLOWS_CTXT; f++) qib_write_ureg(dd, ur_rcvflowtable + f, TIDFLOW_ERRBITS, ctxt); } else { unsigned i; for (i = 0; i < dd->cfgctxts; i++) { qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, i, 0); qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0); for (f = 0; f < NUM_TIDFLOWS_CTXT; f++) qib_write_ureg(dd, ur_rcvflowtable + f, TIDFLOW_ERRBITS, i); } } } spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); } /* * Modify the SENDCTRL register in chip-specific way. This * is a function where there are multiple such registers with * slightly different layouts. * The chip doesn't allow back-to-back sendctrl writes, so write * the scratch register after writing sendctrl. * * Which register is written depends on the operation. * Most operate on the common register, while * SEND_ENB and SEND_DIS operate on the per-port ones. * SEND_ENB is included in common because it can change SPCL_TRIG */ #define SENDCTRL_COMMON_MODS (\ QIB_SENDCTRL_CLEAR | \ QIB_SENDCTRL_AVAIL_DIS | \ QIB_SENDCTRL_AVAIL_ENB | \ QIB_SENDCTRL_AVAIL_BLIP | \ QIB_SENDCTRL_DISARM | \ QIB_SENDCTRL_DISARM_ALL | \ QIB_SENDCTRL_SEND_ENB) #define SENDCTRL_PORT_MODS (\ QIB_SENDCTRL_CLEAR | \ QIB_SENDCTRL_SEND_ENB | \ QIB_SENDCTRL_SEND_DIS | \ QIB_SENDCTRL_FLUSH) static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op) { struct qib_devdata *dd = ppd->dd; u64 tmp_dd_sendctrl; unsigned long flags; spin_lock_irqsave(&dd->sendctrl_lock, flags); /* First the dd ones that are "sticky", saved in shadow */ if (op & QIB_SENDCTRL_CLEAR) dd->sendctrl = 0; if (op & QIB_SENDCTRL_AVAIL_DIS) dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd); else if (op & QIB_SENDCTRL_AVAIL_ENB) { dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd); if (dd->flags & QIB_USE_SPCL_TRIG) dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn); } /* Then the ppd ones that are "sticky", saved in shadow */ if (op & QIB_SENDCTRL_SEND_DIS) ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable); else if (op & QIB_SENDCTRL_SEND_ENB) ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable); if (op & QIB_SENDCTRL_DISARM_ALL) { u32 i, last; tmp_dd_sendctrl = dd->sendctrl; last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS; /* * Disarm any buffers that are not yet launched, * disabling updates until done. */ tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd); for (i = 0; i < last; i++) { qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl | SYM_MASK(SendCtrl, Disarm) | i); qib_write_kreg(dd, kr_scratch, 0); } } if (op & QIB_SENDCTRL_FLUSH) { u64 tmp_ppd_sendctrl = ppd->p_sendctrl; /* * Now drain all the fifos. The Abort bit should never be * needed, so for now, at least, we don't use it. */ tmp_ppd_sendctrl |= SYM_MASK(SendCtrl_0, TxeDrainRmFifo) | SYM_MASK(SendCtrl_0, TxeDrainLaFifo) | SYM_MASK(SendCtrl_0, TxeBypassIbc); qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl); qib_write_kreg(dd, kr_scratch, 0); } tmp_dd_sendctrl = dd->sendctrl; if (op & QIB_SENDCTRL_DISARM) tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) | ((op & QIB_7322_SendCtrl_DisarmSendBuf_RMASK) << SYM_LSB(SendCtrl, DisarmSendBuf)); if ((op & QIB_SENDCTRL_AVAIL_BLIP) && (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd))) tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd); if (op == 0 || (op & SENDCTRL_COMMON_MODS)) { qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl); qib_write_kreg(dd, kr_scratch, 0); } if (op == 0 || (op & SENDCTRL_PORT_MODS)) { qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl); qib_write_kreg(dd, kr_scratch, 0); } if (op & QIB_SENDCTRL_AVAIL_BLIP) { qib_write_kreg(dd, kr_sendctrl, dd->sendctrl); qib_write_kreg(dd, kr_scratch, 0); } spin_unlock_irqrestore(&dd->sendctrl_lock, flags); if (op & QIB_SENDCTRL_FLUSH) { u32 v; /* * ensure writes have hit chip, then do a few * more reads, to allow DMA of pioavail registers * to occur, so in-memory copy is in sync with * the chip. Not always safe to sleep. */ v = qib_read_kreg32(dd, kr_scratch); qib_write_kreg(dd, kr_scratch, v); v = qib_read_kreg32(dd, kr_scratch); qib_write_kreg(dd, kr_scratch, v); qib_read_kreg32(dd, kr_scratch); } } #define _PORT_VIRT_FLAG 0x8000U /* "virtual", need adjustments */ #define _PORT_64BIT_FLAG 0x10000U /* not "virtual", but 64bit */ #define _PORT_CNTR_IDXMASK 0x7fffU /* mask off flags above */ /** * qib_portcntr_7322 - read a per-port chip counter * @ppd: the qlogic_ib pport * @creg: the counter to read (not a chip offset) */ static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg) { struct qib_devdata *dd = ppd->dd; u64 ret = 0ULL; u16 creg; /* 0xffff for unimplemented or synthesized counters */ static const u32 xlator[] = { [QIBPORTCNTR_PKTSEND] = crp_pktsend | _PORT_64BIT_FLAG, [QIBPORTCNTR_WORDSEND] = crp_wordsend | _PORT_64BIT_FLAG, [QIBPORTCNTR_PSXMITDATA] = crp_psxmitdatacount, [QIBPORTCNTR_PSXMITPKTS] = crp_psxmitpktscount, [QIBPORTCNTR_PSXMITWAIT] = crp_psxmitwaitcount, [QIBPORTCNTR_SENDSTALL] = crp_sendstall, [QIBPORTCNTR_PKTRCV] = crp_pktrcv | _PORT_64BIT_FLAG, [QIBPORTCNTR_PSRCVDATA] = crp_psrcvdatacount, [QIBPORTCNTR_PSRCVPKTS] = crp_psrcvpktscount, [QIBPORTCNTR_RCVEBP] = crp_rcvebp, [QIBPORTCNTR_RCVOVFL] = crp_rcvovfl, [QIBPORTCNTR_WORDRCV] = crp_wordrcv | _PORT_64BIT_FLAG, [QIBPORTCNTR_RXDROPPKT] = 0xffff, /* not needed for 7322 */ [QIBPORTCNTR_RXLOCALPHYERR] = crp_rxotherlocalphyerr, [QIBPORTCNTR_RXVLERR] = crp_rxvlerr, [QIBPORTCNTR_ERRICRC] = crp_erricrc, [QIBPORTCNTR_ERRVCRC] = crp_errvcrc, [QIBPORTCNTR_ERRLPCRC] = crp_errlpcrc, [QIBPORTCNTR_BADFORMAT] = crp_badformat, [QIBPORTCNTR_ERR_RLEN] = crp_err_rlen, [QIBPORTCNTR_IBSYMBOLERR] = crp_ibsymbolerr, [QIBPORTCNTR_INVALIDRLEN] = crp_invalidrlen, [QIBPORTCNTR_UNSUPVL] = crp_txunsupvl, [QIBPORTCNTR_EXCESSBUFOVFL] = crp_excessbufferovfl, [QIBPORTCNTR_ERRLINK] = crp_errlink, [QIBPORTCNTR_IBLINKDOWN] = crp_iblinkdown, [QIBPORTCNTR_IBLINKERRRECOV] = crp_iblinkerrrecov, [QIBPORTCNTR_LLI] = crp_locallinkintegrityerr, [QIBPORTCNTR_VL15PKTDROP] = crp_vl15droppedpkt, [QIBPORTCNTR_ERRPKEY] = crp_errpkey, /* * the next 3 aren't really counters, but were implemented * as counters in older chips, so still get accessed as * though they were counters from this code. */ [QIBPORTCNTR_PSINTERVAL] = krp_psinterval, [QIBPORTCNTR_PSSTART] = krp_psstart, [QIBPORTCNTR_PSSTAT] = krp_psstat, /* pseudo-counter, summed for all ports */ [QIBPORTCNTR_KHDROVFL] = 0xffff, }; if (reg >= ARRAY_SIZE(xlator)) { qib_devinfo(ppd->dd->pcidev, "Unimplemented portcounter %u\n", reg); goto done; } creg = xlator[reg] & _PORT_CNTR_IDXMASK; /* handle non-counters and special cases first */ if (reg == QIBPORTCNTR_KHDROVFL) { int i; /* sum over all kernel contexts (skip if mini_init) */ for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) { struct qib_ctxtdata *rcd = dd->rcd[i]; if (!rcd || rcd->ppd != ppd) continue; ret += read_7322_creg32(dd, cr_base_egrovfl + i); } goto done; } else if (reg == QIBPORTCNTR_RXDROPPKT) { /* * Used as part of the synthesis of port_rcv_errors * in the verbs code for IBTA counters. Not needed for 7322, * because all the errors are already counted by other cntrs. */ goto done; } else if (reg == QIBPORTCNTR_PSINTERVAL || reg == QIBPORTCNTR_PSSTART || reg == QIBPORTCNTR_PSSTAT) { /* were counters in older chips, now per-port kernel regs */ ret = qib_read_kreg_port(ppd, creg); goto done; } /* * Only fast increment counters are 64 bits; use 32 bit reads to * avoid two independent reads when on Opteron. */ if (xlator[reg] & _PORT_64BIT_FLAG) ret = read_7322_creg_port(ppd, creg); else ret = read_7322_creg32_port(ppd, creg); if (creg == crp_ibsymbolerr) { if (ppd->cpspec->ibdeltainprog) ret -= ret - ppd->cpspec->ibsymsnap; ret -= ppd->cpspec->ibsymdelta; } else if (creg == crp_iblinkerrrecov) { if (ppd->cpspec->ibdeltainprog) ret -= ret - ppd->cpspec->iblnkerrsnap; ret -= ppd->cpspec->iblnkerrdelta; } else if (creg == crp_errlink) ret -= ppd->cpspec->ibmalfdelta; else if (creg == crp_iblinkdown) ret += ppd->cpspec->iblnkdowndelta; done: return ret; } /* * Device counter names (not port-specific), one line per stat, * single string. Used by utilities like ipathstats to print the stats * in a way which works for different versions of drivers, without changing * the utility. Names need to be 12 chars or less (w/o newline), for proper * display by utility. * Non-error counters are first. * Start of "error" conters is indicated by a leading "E " on the first * "error" counter, and doesn't count in label length. * The EgrOvfl list needs to be last so we truncate them at the configured * context count for the device. * cntr7322indices contains the corresponding register indices. */ static const char cntr7322names[] = "Interrupts\n" "HostBusStall\n" "E RxTIDFull\n" "RxTIDInvalid\n" "RxTIDFloDrop\n" /* 7322 only */ "Ctxt0EgrOvfl\n" "Ctxt1EgrOvfl\n" "Ctxt2EgrOvfl\n" "Ctxt3EgrOvfl\n" "Ctxt4EgrOvfl\n" "Ctxt5EgrOvfl\n" "Ctxt6EgrOvfl\n" "Ctxt7EgrOvfl\n" "Ctxt8EgrOvfl\n" "Ctxt9EgrOvfl\n" "Ctx10EgrOvfl\n" "Ctx11EgrOvfl\n" "Ctx12EgrOvfl\n" "Ctx13EgrOvfl\n" "Ctx14EgrOvfl\n" "Ctx15EgrOvfl\n" "Ctx16EgrOvfl\n" "Ctx17EgrOvfl\n" ; static const u32 cntr7322indices[] = { cr_lbint | _PORT_64BIT_FLAG, cr_lbstall | _PORT_64BIT_FLAG, cr_tidfull, cr_tidinvalid, cr_rxtidflowdrop, cr_base_egrovfl + 0, cr_base_egrovfl + 1, cr_base_egrovfl + 2, cr_base_egrovfl + 3, cr_base_egrovfl + 4, cr_base_egrovfl + 5, cr_base_egrovfl + 6, cr_base_egrovfl + 7, cr_base_egrovfl + 8, cr_base_egrovfl + 9, cr_base_egrovfl + 10, cr_base_egrovfl + 11, cr_base_egrovfl + 12, cr_base_egrovfl + 13, cr_base_egrovfl + 14, cr_base_egrovfl + 15, cr_base_egrovfl + 16, cr_base_egrovfl + 17, }; /* * same as cntr7322names and cntr7322indices, but for port-specific counters. * portcntr7322indices is somewhat complicated by some registers needing * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG */ static const char portcntr7322names[] = "TxPkt\n" "TxFlowPkt\n" "TxWords\n" "RxPkt\n" "RxFlowPkt\n" "RxWords\n" "TxFlowStall\n" "TxDmaDesc\n" /* 7220 and 7322-only */ "E RxDlidFltr\n" /* 7220 and 7322-only */ "IBStatusChng\n" "IBLinkDown\n" "IBLnkRecov\n" "IBRxLinkErr\n" "IBSymbolErr\n" "RxLLIErr\n" "RxBadFormat\n" "RxBadLen\n" "RxBufOvrfl\n" "RxEBP\n" "RxFlowCtlErr\n" "RxICRCerr\n" "RxLPCRCerr\n" "RxVCRCerr\n" "RxInvalLen\n" "RxInvalPKey\n" "RxPktDropped\n" "TxBadLength\n" "TxDropped\n" "TxInvalLen\n" "TxUnderrun\n" "TxUnsupVL\n" "RxLclPhyErr\n" /* 7220 and 7322-only from here down */ "RxVL15Drop\n" "RxVlErr\n" "XcessBufOvfl\n" "RxQPBadCtxt\n" /* 7322-only from here down */ "TXBadHeader\n" ; static const u32 portcntr7322indices[] = { QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG, crp_pktsendflow, QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG, QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG, crp_pktrcvflowctrl, QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG, QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG, crp_txsdmadesc | _PORT_64BIT_FLAG, crp_rxdlidfltr, crp_ibstatuschange, QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG, QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG, QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG, QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG, QIBPORTCNTR_LLI | _PORT_VIRT_FLAG, QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG, QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG, QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG, QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG, crp_rcvflowctrlviol, QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG, QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG, QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG, QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG, QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG, QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG, crp_txminmaxlenerr, crp_txdroppedpkt, crp_txlenerr, crp_txunderrun, crp_txunsupvl, QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG, QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG, QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG, QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG, crp_rxqpinvalidctxt, crp_txhdrerr, }; /* do all the setup to make the counter reads efficient later */ static void init_7322_cntrnames(struct qib_devdata *dd) { int i, j = 0; char *s; for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts; i++) { /* we always have at least one counter before the egrovfl */ if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12)) j = 1; s = strchr(s + 1, '\n'); if (s && j) j++; } dd->cspec->ncntrs = i; if (!s) /* full list; size is without terminating null */ dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1; else dd->cspec->cntrnamelen = 1 + s - cntr7322names; dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs * sizeof(u64), GFP_KERNEL); if (!dd->cspec->cntrs) qib_dev_err(dd, "Failed allocation for counters\n"); for (i = 0, s = (char *)portcntr7322names; s; i++) s = strchr(s + 1, '\n'); dd->cspec->nportcntrs = i - 1; dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1; for (i = 0; i < dd->num_pports; ++i) { dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs * sizeof(u64), GFP_KERNEL); if (!dd->pport[i].cpspec->portcntrs) qib_dev_err(dd, "Failed allocation for" " portcounters\n"); } } static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep, u64 **cntrp) { u32 ret; if (namep) { ret = dd->cspec->cntrnamelen; if (pos >= ret) ret = 0; /* final read after getting everything */ else *namep = (char *) cntr7322names; } else { u64 *cntr = dd->cspec->cntrs; int i; ret = dd->cspec->ncntrs * sizeof(u64); if (!cntr || pos >= ret) { /* everything read, or couldn't get memory */ ret = 0; goto done; } *cntrp = cntr; for (i = 0; i < dd->cspec->ncntrs; i++) if (cntr7322indices[i] & _PORT_64BIT_FLAG) *cntr++ = read_7322_creg(dd, cntr7322indices[i] & _PORT_CNTR_IDXMASK); else *cntr++ = read_7322_creg32(dd, cntr7322indices[i]); } done: return ret; } static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port, char **namep, u64 **cntrp) { u32 ret; if (namep) { ret = dd->cspec->portcntrnamelen; if (pos >= ret) ret = 0; /* final read after getting everything */ else *namep = (char *)portcntr7322names; } else { struct qib_pportdata *ppd = &dd->pport[port]; u64 *cntr = ppd->cpspec->portcntrs; int i; ret = dd->cspec->nportcntrs * sizeof(u64); if (!cntr || pos >= ret) { /* everything read, or couldn't get memory */ ret = 0; goto done; } *cntrp = cntr; for (i = 0; i < dd->cspec->nportcntrs; i++) { if (portcntr7322indices[i] & _PORT_VIRT_FLAG) *cntr++ = qib_portcntr_7322(ppd, portcntr7322indices[i] & _PORT_CNTR_IDXMASK); else if (portcntr7322indices[i] & _PORT_64BIT_FLAG) *cntr++ = read_7322_creg_port(ppd, portcntr7322indices[i] & _PORT_CNTR_IDXMASK); else *cntr++ = read_7322_creg32_port(ppd, portcntr7322indices[i]); } } done: return ret; } /** * qib_get_7322_faststats - get word counters from chip before they overflow * @opaque - contains a pointer to the qlogic_ib device qib_devdata * * VESTIGIAL IBA7322 has no "small fast counters", so the only * real purpose of this function is to maintain the notion of * "active time", which in turn is only logged into the eeprom, * which we don;t have, yet, for 7322-based boards. * * called from add_timer */ static void qib_get_7322_faststats(unsigned long opaque) { struct qib_devdata *dd = (struct qib_devdata *) opaque; struct qib_pportdata *ppd; unsigned long flags; u64 traffic_wds; int pidx; for (pidx = 0; pidx < dd->num_pports; ++pidx) { ppd = dd->pport + pidx; /* * If port isn't enabled or not operational ports, or * diags is running (can cause memory diags to fail) * skip this port this time. */ if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED) || dd->diag_client) continue; /* * Maintain an activity timer, based on traffic * exceeding a threshold, so we need to check the word-counts * even if they are 64-bit. */ traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) + qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND); spin_lock_irqsave(&ppd->dd->eep_st_lock, flags); traffic_wds -= ppd->dd->traffic_wds; ppd->dd->traffic_wds += traffic_wds; if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD) atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time); spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags); if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active & QIB_IB_QDR) && (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE)) && ppd->cpspec->qdr_dfe_time && time_after64(get_jiffies_64(), ppd->cpspec->qdr_dfe_time)) { ppd->cpspec->qdr_dfe_on = 0; qib_write_kreg_port(ppd, krp_static_adapt_dis(2), ppd->dd->cspec->r1 ? QDR_STATIC_ADAPT_INIT_R1 : QDR_STATIC_ADAPT_INIT); force_h1(ppd); } } mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER); } /* * If we were using MSIx, try to fallback to INTx. */ static int qib_7322_intr_fallback(struct qib_devdata *dd) { if (!dd->cspec->num_msix_entries) return 0; /* already using INTx */ qib_devinfo(dd->pcidev, "MSIx interrupt not detected," " trying INTx interrupts\n"); qib_7322_nomsix(dd); qib_enable_intx(dd->pcidev); qib_setup_7322_interrupt(dd, 0); return 1; } /* * Reset the XGXS (between serdes and IBC). Slightly less intrusive * than resetting the IBC or external link state, and useful in some * cases to cause some retraining. To do this right, we reset IBC * as well, then return to previous state (which may be still in reset) * NOTE: some callers of this "know" this writes the current value * of cpspec->ibcctrl_a as part of it's operation, so if that changes, * check all callers. */ static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd) { u64 val; struct qib_devdata *dd = ppd->dd; const u64 reset_bits = SYM_MASK(IBPCSConfig_0, xcv_rreset) | SYM_MASK(IBPCSConfig_0, xcv_treset) | SYM_MASK(IBPCSConfig_0, tx_rx_reset); val = qib_read_kreg_port(ppd, krp_ib_pcsconfig); qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask & ~HWE_MASK(statusValidNoEop)); qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a & ~SYM_MASK(IBCCtrlA_0, IBLinkEn)); qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits); qib_read_kreg32(dd, kr_scratch); qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits); qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); qib_write_kreg(dd, kr_scratch, 0ULL); qib_write_kreg(dd, kr_hwerrclear, SYM_MASK(HwErrClear, statusValidNoEopClear)); qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); } /* * This code for non-IBTA-compliant IB speed negotiation is only known to * work for the SDR to DDR transition, and only between an HCA and a switch * with recent firmware. It is based on observed heuristics, rather than * actual knowledge of the non-compliant speed negotiation. * It has a number of hard-coded fields, since the hope is to rewrite this * when a spec is available on how the negoation is intended to work. */ static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr, u32 dcnt, u32 *data) { int i; u64 pbc; u32 __iomem *piobuf; u32 pnum, control, len; struct qib_devdata *dd = ppd->dd; i = 0; len = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */ control = qib_7322_setpbc_control(ppd, len, 0, 15); pbc = ((u64) control << 32) | len; while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) { if (i++ > 15) return; udelay(2); } /* disable header check on this packet, since it can't be valid */ dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL); writeq(pbc, piobuf); qib_flush_wc(); qib_pio_copy(piobuf + 2, hdr, 7); qib_pio_copy(piobuf + 9, data, dcnt); if (dd->flags & QIB_USE_SPCL_TRIG) { u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023; qib_flush_wc(); __raw_writel(0xaebecede, piobuf + spcl_off); } qib_flush_wc(); qib_sendbuf_done(dd, pnum); /* and re-enable hdr check */ dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL); } /* * _start packet gets sent twice at start, _done gets sent twice at end */ static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which) { struct qib_devdata *dd = ppd->dd; static u32 swapped; u32 dw, i, hcnt, dcnt, *data; static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba }; static u32 madpayload_start[0x40] = { 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0, 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */ }; static u32 madpayload_done[0x40] = { 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0, 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x40000001, 0x1388, 0x15e, /* rest 0's */ }; dcnt = ARRAY_SIZE(madpayload_start); hcnt = ARRAY_SIZE(hdr); if (!swapped) { /* for maintainability, do it at runtime */ for (i = 0; i < hcnt; i++) { dw = (__force u32) cpu_to_be32(hdr[i]); hdr[i] = dw; } for (i = 0; i < dcnt; i++) { dw = (__force u32) cpu_to_be32(madpayload_start[i]); madpayload_start[i] = dw; dw = (__force u32) cpu_to_be32(madpayload_done[i]); madpayload_done[i] = dw; } swapped = 1; } data = which ? madpayload_done : madpayload_start; autoneg_7322_sendpkt(ppd, hdr, dcnt, data); qib_read_kreg64(dd, kr_scratch); udelay(2); autoneg_7322_sendpkt(ppd, hdr, dcnt, data); qib_read_kreg64(dd, kr_scratch); udelay(2); } /* * Do the absolute minimum to cause an IB speed change, and make it * ready, but don't actually trigger the change. The caller will * do that when ready (if link is in Polling training state, it will * happen immediately, otherwise when link next goes down) * * This routine should only be used as part of the DDR autonegotation * code for devices that are not compliant with IB 1.2 (or code that * fixes things up for same). * * When link has gone down, and autoneg enabled, or autoneg has * failed and we give up until next time we set both speeds, and * then we want IBTA enabled as well as "use max enabled speed. */ static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed) { u64 newctrlb; newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK | IBA7322_IBC_MAX_SPEED_MASK); if (speed & (speed - 1)) /* multiple speeds */ newctrlb |= (speed << IBA7322_IBC_SPEED_LSB) | IBA7322_IBC_IBTA_1_2_MASK | IBA7322_IBC_MAX_SPEED_MASK; else newctrlb |= speed == QIB_IB_QDR ? IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK : ((speed == QIB_IB_DDR ? IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR)); if (newctrlb == ppd->cpspec->ibcctrl_b) return; ppd->cpspec->ibcctrl_b = newctrlb; qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b); qib_write_kreg(ppd->dd, kr_scratch, 0); } /* * This routine is only used when we are not talking to another * IB 1.2-compliant device that we think can do DDR. * (This includes all existing switch chips as of Oct 2007.) * 1.2-compliant devices go directly to DDR prior to reaching INIT */ static void try_7322_autoneg(struct qib_pportdata *ppd) { unsigned long flags; spin_lock_irqsave(&ppd->lflags_lock, flags); ppd->lflags |= QIBL_IB_AUTONEG_INPROG; spin_unlock_irqrestore(&ppd->lflags_lock, flags); qib_autoneg_7322_send(ppd, 0); set_7322_ibspeed_fast(ppd, QIB_IB_DDR); qib_7322_mini_pcs_reset(ppd); /* 2 msec is minimum length of a poll cycle */ queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work, msecs_to_jiffies(2)); } /* * Handle the empirically determined mechanism for auto-negotiation * of DDR speed with switches. */ static void autoneg_7322_work(struct work_struct *work) { struct qib_pportdata *ppd; struct qib_devdata *dd; u64 startms; u32 i; unsigned long flags; ppd = container_of(work, struct qib_chippport_specific, autoneg_work.work)->ppd; dd = ppd->dd; startms = jiffies_to_msecs(jiffies); /* * Busy wait for this first part, it should be at most a * few hundred usec, since we scheduled ourselves for 2msec. */ for (i = 0; i < 25; i++) { if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState) == IB_7322_LT_STATE_POLLQUIET) { qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE); break; } udelay(100); } if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) goto done; /* we got there early or told to stop */ /* we expect this to timeout */ if (wait_event_timeout(ppd->cpspec->autoneg_wait, !(ppd->lflags & QIBL_IB_AUTONEG_INPROG), msecs_to_jiffies(90))) goto done; qib_7322_mini_pcs_reset(ppd); /* we expect this to timeout */ if (wait_event_timeout(ppd->cpspec->autoneg_wait, !(ppd->lflags & QIBL_IB_AUTONEG_INPROG), msecs_to_jiffies(1700))) goto done; qib_7322_mini_pcs_reset(ppd); set_7322_ibspeed_fast(ppd, QIB_IB_SDR); /* * Wait up to 250 msec for link to train and get to INIT at DDR; * this should terminate early. */ wait_event_timeout(ppd->cpspec->autoneg_wait, !(ppd->lflags & QIBL_IB_AUTONEG_INPROG), msecs_to_jiffies(250)); done: if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) { spin_lock_irqsave(&ppd->lflags_lock, flags); ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) { ppd->lflags |= QIBL_IB_AUTONEG_FAILED; ppd->cpspec->autoneg_tries = 0; } spin_unlock_irqrestore(&ppd->lflags_lock, flags); set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled); } } /* * This routine is used to request IPG set in the QLogic switch. * Only called if r1. */ static void try_7322_ipg(struct qib_pportdata *ppd) { struct qib_ibport *ibp = &ppd->ibport_data; struct ib_mad_send_buf *send_buf; struct ib_mad_agent *agent; struct ib_smp *smp; unsigned delay; int ret; agent = ibp->send_agent; if (!agent) goto retry; send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, GFP_ATOMIC); if (IS_ERR(send_buf)) goto retry; if (!ibp->smi_ah) { struct ib_ah_attr attr; struct ib_ah *ah; memset(&attr, 0, sizeof attr); attr.dlid = be16_to_cpu(IB_LID_PERMISSIVE); attr.port_num = ppd->port; ah = ib_create_ah(ibp->qp0->ibqp.pd, &attr); if (IS_ERR(ah)) ret = -EINVAL; else { send_buf->ah = ah; ibp->smi_ah = to_iah(ah); ret = 0; } } else { send_buf->ah = &ibp->smi_ah->ibah; ret = 0; } smp = send_buf->mad; smp->base_version = IB_MGMT_BASE_VERSION; smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE; smp->class_version = 1; smp->method = IB_MGMT_METHOD_SEND; smp->hop_cnt = 1; smp->attr_id = QIB_VENDOR_IPG; smp->attr_mod = 0; if (!ret) ret = ib_post_send_mad(send_buf, NULL); if (ret) ib_free_send_mad(send_buf); retry: delay = 2 << ppd->cpspec->ipg_tries; queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work, msecs_to_jiffies(delay)); } /* * Timeout handler for setting IPG. * Only called if r1. */ static void ipg_7322_work(struct work_struct *work) { struct qib_pportdata *ppd; ppd = container_of(work, struct qib_chippport_specific, ipg_work.work)->ppd; if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE)) && ++ppd->cpspec->ipg_tries <= 10) try_7322_ipg(ppd); } static u32 qib_7322_iblink_state(u64 ibcs) { u32 state = (u32)SYM_FIELD(ibcs, IBCStatusA_0, LinkState); switch (state) { case IB_7322_L_STATE_INIT: state = IB_PORT_INIT; break; case IB_7322_L_STATE_ARM: state = IB_PORT_ARMED; break; case IB_7322_L_STATE_ACTIVE: /* fall through */ case IB_7322_L_STATE_ACT_DEFER: state = IB_PORT_ACTIVE; break; default: /* fall through */ case IB_7322_L_STATE_DOWN: state = IB_PORT_DOWN; break; } return state; } /* returns the IBTA port state, rather than the IBC link training state */ static u8 qib_7322_phys_portstate(u64 ibcs) { u8 state = (u8)SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState); return qib_7322_physportstate[state]; } static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs) { int ret = 0, symadj = 0; unsigned long flags; int mult; spin_lock_irqsave(&ppd->lflags_lock, flags); ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY; spin_unlock_irqrestore(&ppd->lflags_lock, flags); /* Update our picture of width and speed from chip */ if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) { ppd->link_speed_active = QIB_IB_QDR; mult = 4; } else if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) { ppd->link_speed_active = QIB_IB_DDR; mult = 2; } else { ppd->link_speed_active = QIB_IB_SDR; mult = 1; } if (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) { ppd->link_width_active = IB_WIDTH_4X; mult *= 4; } else ppd->link_width_active = IB_WIDTH_1X; ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)]; if (!ibup) { u64 clr; /* Link went down. */ /* do IPG MAD again after linkdown, even if last time failed */ ppd->cpspec->ipg_tries = 0; clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) & (SYM_MASK(IBCStatusB_0, heartbeat_timed_out) | SYM_MASK(IBCStatusB_0, heartbeat_crosstalk)); if (clr) qib_write_kreg_port(ppd, krp_ibcstatus_b, clr); if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED | QIBL_IB_AUTONEG_INPROG))) set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled); if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) { /* unlock the Tx settings, speed may change */ qib_write_kreg_port(ppd, krp_tx_deemph_override, SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, reset_tx_deemphasis_override)); qib_cancel_sends(ppd); /* on link down, ensure sane pcs state */ qib_7322_mini_pcs_reset(ppd); spin_lock_irqsave(&ppd->sdma_lock, flags); if (__qib_sdma_running(ppd)) __qib_sdma_process_event(ppd, qib_sdma_event_e70_go_idle); spin_unlock_irqrestore(&ppd->sdma_lock, flags); } clr = read_7322_creg32_port(ppd, crp_iblinkdown); if (clr == ppd->cpspec->iblnkdownsnap) ppd->cpspec->iblnkdowndelta++; } else { if (qib_compat_ddr_negotiate && !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED | QIBL_IB_AUTONEG_INPROG)) && ppd->link_speed_active == QIB_IB_SDR && (ppd->link_speed_enabled & QIB_IB_DDR) && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) { /* we are SDR, and auto-negotiation enabled */ ++ppd->cpspec->autoneg_tries; if (!ppd->cpspec->ibdeltainprog) { ppd->cpspec->ibdeltainprog = 1; ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd, crp_ibsymbolerr) - ppd->cpspec->ibsymsnap; ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd, crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap; } try_7322_autoneg(ppd); ret = 1; /* no other IB status change processing */ } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) && ppd->link_speed_active == QIB_IB_SDR) { qib_autoneg_7322_send(ppd, 1); set_7322_ibspeed_fast(ppd, QIB_IB_DDR); qib_7322_mini_pcs_reset(ppd); udelay(2); ret = 1; /* no other IB status change processing */ } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) && (ppd->link_speed_active & QIB_IB_DDR)) { spin_lock_irqsave(&ppd->lflags_lock, flags); ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG | QIBL_IB_AUTONEG_FAILED); spin_unlock_irqrestore(&ppd->lflags_lock, flags); ppd->cpspec->autoneg_tries = 0; /* re-enable SDR, for next link down */ set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled); wake_up(&ppd->cpspec->autoneg_wait); symadj = 1; } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) { /* * Clear autoneg failure flag, and do setup * so we'll try next time link goes down and * back to INIT (possibly connected to a * different device). */ spin_lock_irqsave(&ppd->lflags_lock, flags); ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED; spin_unlock_irqrestore(&ppd->lflags_lock, flags); ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK; symadj = 1; } if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) { symadj = 1; if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10) try_7322_ipg(ppd); if (!ppd->cpspec->recovery_init) setup_7322_link_recovery(ppd, 0); ppd->cpspec->qdr_dfe_time = jiffies + msecs_to_jiffies(QDR_DFE_DISABLE_DELAY); } ppd->cpspec->ibmalfusesnap = 0; ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd, crp_errlink); } if (symadj) { ppd->cpspec->iblnkdownsnap = read_7322_creg32_port(ppd, crp_iblinkdown); if (ppd->cpspec->ibdeltainprog) { ppd->cpspec->ibdeltainprog = 0; ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd, crp_ibsymbolerr) - ppd->cpspec->ibsymsnap; ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd, crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap; } } else if (!ibup && qib_compat_ddr_negotiate && !ppd->cpspec->ibdeltainprog && !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) { ppd->cpspec->ibdeltainprog = 1; ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd, crp_ibsymbolerr); ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd, crp_iblinkerrrecov); } if (!ret) qib_setup_7322_setextled(ppd, ibup); return ret; } /* * Does read/modify/write to appropriate registers to * set output and direction bits selected by mask. * these are in their canonical postions (e.g. lsb of * dir will end up in D48 of extctrl on existing chips). * returns contents of GP Inputs. */ static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask) { u64 read_val, new_out; unsigned long flags; if (mask) { /* some bits being written, lock access to GPIO */ dir &= mask; out &= mask; spin_lock_irqsave(&dd->cspec->gpio_lock, flags); dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe)); dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe)); new_out = (dd->cspec->gpio_out & ~mask) | out; qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl); qib_write_kreg(dd, kr_gpio_out, new_out); dd->cspec->gpio_out = new_out; spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); } /* * It is unlikely that a read at this time would get valid * data on a pin whose direction line was set in the same * call to this function. We include the read here because * that allows us to potentially combine a change on one pin with * a read on another, and because the old code did something like * this. */ read_val = qib_read_kreg64(dd, kr_extstatus); return SYM_FIELD(read_val, EXTStatus, GPIOIn); } /* Enable writes to config EEPROM, if possible. Returns previous state */ static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen) { int prev_wen; u32 mask; mask = 1 << QIB_EEPROM_WEN_NUM; prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM; gpio_7322_mod(dd, wen ? 0 : mask, mask, mask); return prev_wen & 1; } /* * Read fundamental info we need to use the chip. These are * the registers that describe chip capabilities, and are * saved in shadow registers. */ static void get_7322_chip_params(struct qib_devdata *dd) { u64 val; u32 piobufs; int mtu; dd->palign = qib_read_kreg32(dd, kr_pagealign); dd->uregbase = qib_read_kreg32(dd, kr_userregbase); dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt); dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase); dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase); dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase); dd->pio2k_bufbase = dd->piobufbase & 0xffffffff; val = qib_read_kreg64(dd, kr_sendpiobufcnt); dd->piobcnt2k = val & ~0U; dd->piobcnt4k = val >> 32; val = qib_read_kreg64(dd, kr_sendpiosize); dd->piosize2k = val & ~0U; dd->piosize4k = val >> 32; mtu = ib_mtu_enum_to_int(qib_ibmtu); if (mtu == -1) mtu = QIB_DEFAULT_MTU; dd->pport[0].ibmtu = (u32)mtu; dd->pport[1].ibmtu = (u32)mtu; /* these may be adjusted in init_chip_wc_pat() */ dd->pio2kbase = (u32 __iomem *) ((char __iomem *) dd->kregbase + dd->pio2k_bufbase); dd->pio4kbase = (u32 __iomem *) ((char __iomem *) dd->kregbase + (dd->piobufbase >> 32)); /* * 4K buffers take 2 pages; we use roundup just to be * paranoid; we calculate it once here, rather than on * ever buf allocate */ dd->align4k = ALIGN(dd->piosize4k, dd->palign); piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS; dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) / (sizeof(u64) * BITS_PER_BYTE / 2); } /* * The chip base addresses in cspec and cpspec have to be set * after possible init_chip_wc_pat(), rather than in * get_7322_chip_params(), so split out as separate function */ static void qib_7322_set_baseaddrs(struct qib_devdata *dd) { u32 cregbase; cregbase = qib_read_kreg32(dd, kr_counterregbase); dd->cspec->cregbase = (u64 __iomem *)(cregbase + (char __iomem *)dd->kregbase); dd->egrtidbase = (u64 __iomem *) ((char __iomem *) dd->kregbase + dd->rcvegrbase); /* port registers are defined as relative to base of chip */ dd->pport[0].cpspec->kpregbase = (u64 __iomem *)((char __iomem *)dd->kregbase); dd->pport[1].cpspec->kpregbase = (u64 __iomem *)(dd->palign + (char __iomem *)dd->kregbase); dd->pport[0].cpspec->cpregbase = (u64 __iomem *)(qib_read_kreg_port(&dd->pport[0], kr_counterregbase) + (char __iomem *)dd->kregbase); dd->pport[1].cpspec->cpregbase = (u64 __iomem *)(qib_read_kreg_port(&dd->pport[1], kr_counterregbase) + (char __iomem *)dd->kregbase); } /* * This is a fairly special-purpose observer, so we only support * the port-specific parts of SendCtrl */ #define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) | \ SYM_MASK(SendCtrl_0, SDmaEnable) | \ SYM_MASK(SendCtrl_0, SDmaIntEnable) | \ SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \ SYM_MASK(SendCtrl_0, SDmaHalt) | \ SYM_MASK(SendCtrl_0, IBVLArbiterEn) | \ SYM_MASK(SendCtrl_0, ForceCreditUpToDate)) static int sendctrl_hook(struct qib_devdata *dd, const struct diag_observer *op, u32 offs, u64 *data, u64 mask, int only_32) { unsigned long flags; unsigned idx; unsigned pidx; struct qib_pportdata *ppd = NULL; u64 local_data, all_bits; /* * The fixed correspondence between Physical ports and pports is * severed. We need to hunt for the ppd that corresponds * to the offset we got. And we have to do that without admitting * we know the stride, apparently. */ for (pidx = 0; pidx < dd->num_pports; ++pidx) { u64 __iomem *psptr; u32 psoffs; ppd = dd->pport + pidx; if (!ppd->cpspec->kpregbase) continue; psptr = ppd->cpspec->kpregbase + krp_sendctrl; psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr); if (psoffs == offs) break; } /* If pport is not being managed by driver, just avoid shadows. */ if (pidx >= dd->num_pports) ppd = NULL; /* In any case, "idx" is flat index in kreg space */ idx = offs / sizeof(u64); all_bits = ~0ULL; if (only_32) all_bits >>= 32; spin_lock_irqsave(&dd->sendctrl_lock, flags); if (!ppd || (mask & all_bits) != all_bits) { /* * At least some mask bits are zero, so we need * to read. The judgement call is whether from * reg or shadow. First-cut: read reg, and complain * if any bits which should be shadowed are different * from their shadowed value. */ if (only_32) local_data = (u64)qib_read_kreg32(dd, idx); else local_data = qib_read_kreg64(dd, idx); *data = (local_data & ~mask) | (*data & mask); } if (mask) { /* * At least some mask bits are one, so we need * to write, but only shadow some bits. */ u64 sval, tval; /* Shadowed, transient */ /* * New shadow val is bits we don't want to touch, * ORed with bits we do, that are intended for shadow. */ if (ppd) { sval = ppd->p_sendctrl & ~mask; sval |= *data & SENDCTRL_SHADOWED & mask; ppd->p_sendctrl = sval; } else sval = *data & SENDCTRL_SHADOWED & mask; tval = sval | (*data & ~SENDCTRL_SHADOWED & mask); qib_write_kreg(dd, idx, tval); qib_write_kreg(dd, kr_scratch, 0Ull); } spin_unlock_irqrestore(&dd->sendctrl_lock, flags); return only_32 ? 4 : 8; } static const struct diag_observer sendctrl_0_observer = { sendctrl_hook, KREG_IDX(SendCtrl_0) * sizeof(u64), KREG_IDX(SendCtrl_0) * sizeof(u64) }; static const struct diag_observer sendctrl_1_observer = { sendctrl_hook, KREG_IDX(SendCtrl_1) * sizeof(u64), KREG_IDX(SendCtrl_1) * sizeof(u64) }; static ushort sdma_fetch_prio = 8; module_param_named(sdma_fetch_prio, sdma_fetch_prio, ushort, S_IRUGO); MODULE_PARM_DESC(sdma_fetch_prio, "SDMA descriptor fetch priority"); /* Besides logging QSFP events, we set appropriate TxDDS values */ static void init_txdds_table(struct qib_pportdata *ppd, int override); static void qsfp_7322_event(struct work_struct *work) { struct qib_qsfp_data *qd; struct qib_pportdata *ppd; u64 pwrup; int ret; u32 le2; qd = container_of(work, struct qib_qsfp_data, work); ppd = qd->ppd; pwrup = qd->t_insert + msecs_to_jiffies(QSFP_PWR_LAG_MSEC); /* * Some QSFP's not only do not respond until the full power-up * time, but may behave badly if we try. So hold off responding * to insertion. */ while (1) { u64 now = get_jiffies_64(); if (time_after64(now, pwrup)) break; msleep(20); } ret = qib_refresh_qsfp_cache(ppd, &qd->cache); /* * Need to change LE2 back to defaults if we couldn't * read the cable type (to handle cable swaps), so do this * even on failure to read cable information. We don't * get here for QME, so IS_QME check not needed here. */ if (!ret && !ppd->dd->cspec->r1) { if (QSFP_IS_ACTIVE_FAR(qd->cache.tech)) le2 = LE2_QME; else if (qd->cache.atten[1] >= qib_long_atten && QSFP_IS_CU(qd->cache.tech)) le2 = LE2_5m; else le2 = LE2_DEFAULT; } else le2 = LE2_DEFAULT; ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7)); init_txdds_table(ppd, 0); } /* * There is little we can do but complain to the user if QSFP * initialization fails. */ static void qib_init_7322_qsfp(struct qib_pportdata *ppd) { unsigned long flags; struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data; struct qib_devdata *dd = ppd->dd; u64 mod_prs_bit = QSFP_GPIO_MOD_PRS_N; mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx); qd->ppd = ppd; qib_qsfp_init(qd, qsfp_7322_event); spin_lock_irqsave(&dd->cspec->gpio_lock, flags); dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert)); dd->cspec->gpio_mask |= mod_prs_bit; qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl); qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask); spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); } /* * called at device initialization time, and also if the txselect * module parameter is changed. This is used for cables that don't * have valid QSFP EEPROMs (not present, or attenuation is zero). * We initialize to the default, then if there is a specific * unit,port match, we use that (and set it immediately, for the * current speed, if the link is at INIT or better). * String format is "default# unit#,port#=# ... u,p=#", separators must * be a SPACE character. A newline terminates. The u,p=# tuples may * optionally have "u,p=#,#", where the final # is the H1 value * The last specific match is used (actually, all are used, but last * one is the one that winds up set); if none at all, fall back on default. */ static void set_no_qsfp_atten(struct qib_devdata *dd, int change) { char *nxt, *str; u32 pidx, unit, port, deflt, h1; unsigned long val; int any = 0, seth1; int txdds_size; str = txselect_list; /* default number is validated in setup_txselect() */ deflt = simple_strtoul(str, &nxt, 0); for (pidx = 0; pidx < dd->num_pports; ++pidx) dd->pport[pidx].cpspec->no_eep = deflt; txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ; if (IS_QME(dd) || IS_QMH(dd)) txdds_size += TXDDS_MFG_SZ; while (*nxt && nxt[1]) { str = ++nxt; unit = simple_strtoul(str, &nxt, 0); if (nxt == str || !*nxt || *nxt != ',') { while (*nxt && *nxt++ != ' ') /* skip to next, if any */ ; continue; } str = ++nxt; port = simple_strtoul(str, &nxt, 0); if (nxt == str || *nxt != '=') { while (*nxt && *nxt++ != ' ') /* skip to next, if any */ ; continue; } str = ++nxt; val = simple_strtoul(str, &nxt, 0); if (nxt == str) { while (*nxt && *nxt++ != ' ') /* skip to next, if any */ ; continue; } if (val >= txdds_size) continue; seth1 = 0; h1 = 0; /* gcc thinks it might be used uninitted */ if (*nxt == ',' && nxt[1]) { str = ++nxt; h1 = (u32)simple_strtoul(str, &nxt, 0); if (nxt == str) while (*nxt && *nxt++ != ' ') /* skip */ ; else seth1 = 1; } for (pidx = 0; dd->unit == unit && pidx < dd->num_pports; ++pidx) { struct qib_pportdata *ppd = &dd->pport[pidx]; if (ppd->port != port || !ppd->link_speed_supported) continue; ppd->cpspec->no_eep = val; if (seth1) ppd->cpspec->h1_val = h1; /* now change the IBC and serdes, overriding generic */ init_txdds_table(ppd, 1); /* Re-enable the physical state machine on mezz boards * now that the correct settings have been set. */ if (IS_QMH(dd) || IS_QME(dd)) qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_SLEEP); any++; } if (*nxt == '\n') break; /* done */ } if (change && !any) { /* no specific setting, use the default. * Change the IBC and serdes, but since it's * general, don't override specific settings. */ for (pidx = 0; pidx < dd->num_pports; ++pidx) if (dd->pport[pidx].link_speed_supported) init_txdds_table(&dd->pport[pidx], 0); } } /* handle the txselect parameter changing */ static int setup_txselect(const char *str, struct kernel_param *kp) { struct qib_devdata *dd; unsigned long val; char *n; if (strlen(str) >= MAX_ATTEN_LEN) { printk(KERN_INFO QIB_DRV_NAME " txselect_values string " "too long\n"); return -ENOSPC; } val = simple_strtoul(str, &n, 0); if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ)) { printk(KERN_INFO QIB_DRV_NAME "txselect_values must start with a number < %d\n", TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ); return -EINVAL; } strcpy(txselect_list, str); list_for_each_entry(dd, &qib_dev_list, list) if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322) set_no_qsfp_atten(dd, 1); return 0; } /* * Write the final few registers that depend on some of the * init setup. Done late in init, just before bringing up * the serdes. */ static int qib_late_7322_initreg(struct qib_devdata *dd) { int ret = 0, n; u64 val; qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize); qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize); qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt); qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys); val = qib_read_kreg64(dd, kr_sendpioavailaddr); if (val != dd->pioavailregs_phys) { qib_dev_err(dd, "Catastrophic software error, " "SendPIOAvailAddr written as %lx, " "read back as %llx\n", (unsigned long) dd->pioavailregs_phys, (unsigned long long) val); ret = -EINVAL; } n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS; qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL); /* driver sends get pkey, lid, etc. checking also, to catch bugs */ qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL); qib_register_observer(dd, &sendctrl_0_observer); qib_register_observer(dd, &sendctrl_1_observer); dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN; qib_write_kreg(dd, kr_control, dd->control); /* * Set SendDmaFetchPriority and init Tx params, including * QSFP handler on boards that have QSFP. * First set our default attenuation entry for cables that * don't have valid attenuation. */ set_no_qsfp_atten(dd, 0); for (n = 0; n < dd->num_pports; ++n) { struct qib_pportdata *ppd = dd->pport + n; qib_write_kreg_port(ppd, krp_senddmaprioritythld, sdma_fetch_prio & 0xf); /* Initialize qsfp if present on board. */ if (dd->flags & QIB_HAS_QSFP) qib_init_7322_qsfp(ppd); } dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN; qib_write_kreg(dd, kr_control, dd->control); return ret; } /* per IB port errors. */ #define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \ MASK_ACROSS(8, 15)) #define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41)) #define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \ MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \ MASK_ACROSS(0, 11)) /* * Write the initialization per-port registers that need to be done at * driver load and after reset completes (i.e., that aren't done as part * of other init procedures called from qib_init.c). * Some of these should be redundant on reset, but play safe. */ static void write_7322_init_portregs(struct qib_pportdata *ppd) { u64 val; int i; if (!ppd->link_speed_supported) { /* no buffer credits for this port */ for (i = 1; i < 8; i++) qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0); qib_write_kreg_port(ppd, krp_ibcctrl_b, 0); qib_write_kreg(ppd->dd, kr_scratch, 0); return; } /* * Set the number of supported virtual lanes in IBC, * for flow control packet handling on unsupported VLs */ val = qib_read_kreg_port(ppd, krp_ibsdtestiftx); val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP); val |= (u64)(ppd->vls_supported - 1) << SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP); qib_write_kreg_port(ppd, krp_ibsdtestiftx, val); qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP); /* enable tx header checking */ qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY | IBA7322_SENDCHK_BTHQP | IBA7322_SENDCHK_SLID | IBA7322_SENDCHK_RAW_IPV6 | IBA7322_SENDCHK_MINSZ); qib_write_kreg_port(ppd, krp_ncmodectrl, SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal)); /* * Unconditionally clear the bufmask bits. If SDMA is * enabled, we'll set them appropriately later. */ qib_write_kreg_port(ppd, krp_senddmabufmask0, 0); qib_write_kreg_port(ppd, krp_senddmabufmask1, 0); qib_write_kreg_port(ppd, krp_senddmabufmask2, 0); if (ppd->dd->cspec->r1) ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate); } /* * Write the initialization per-device registers that need to be done at * driver load and after reset completes (i.e., that aren't done as part * of other init procedures called from qib_init.c). Also write per-port * registers that are affected by overall device config, such as QP mapping * Some of these should be redundant on reset, but play safe. */ static void write_7322_initregs(struct qib_devdata *dd) { struct qib_pportdata *ppd; int i, pidx; u64 val; /* Set Multicast QPs received by port 2 to map to context one. */ qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1); for (pidx = 0; pidx < dd->num_pports; ++pidx) { unsigned n, regno; unsigned long flags; if (dd->n_krcv_queues < 2 || !dd->pport[pidx].link_speed_supported) continue; ppd = &dd->pport[pidx]; /* be paranoid against later code motion, etc. */ spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable); spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); /* Initialize QP to context mapping */ regno = krp_rcvqpmaptable; val = 0; if (dd->num_pports > 1) n = dd->first_user_ctxt / dd->num_pports; else n = dd->first_user_ctxt - 1; for (i = 0; i < 32; ) { unsigned ctxt; if (dd->num_pports > 1) ctxt = (i % n) * dd->num_pports + pidx; else if (i % n) ctxt = (i % n) + 1; else ctxt = ppd->hw_pidx; val |= ctxt << (5 * (i % 6)); i++; if (i % 6 == 0) { qib_write_kreg_port(ppd, regno, val); val = 0; regno++; } } qib_write_kreg_port(ppd, regno, val); } /* * Setup up interrupt mitigation for kernel contexts, but * not user contexts (user contexts use interrupts when * stalled waiting for any packet, so want those interrupts * right away). */ for (i = 0; i < dd->first_user_ctxt; i++) { dd->cspec->rcvavail_timeout[i] = rcv_int_timeout; qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout); } /* * Initialize as (disabled) rcvflow tables. Application code * will setup each flow as it uses the flow. * Doesn't clear any of the error bits that might be set. */ val = TIDFLOW_ERRBITS; /* these are W1C */ for (i = 0; i < dd->cfgctxts; i++) { int flow; for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++) qib_write_ureg(dd, ur_rcvflowtable+flow, val, i); } /* * dual cards init to dual port recovery, single port cards to * the one port. Dual port cards may later adjust to 1 port, * and then back to dual port if both ports are connected * */ if (dd->num_pports) setup_7322_link_recovery(dd->pport, dd->num_pports > 1); } static int qib_init_7322_variables(struct qib_devdata *dd) { struct qib_pportdata *ppd; unsigned features, pidx, sbufcnt; int ret, mtu; u32 sbufs, updthresh; /* pport structs are contiguous, allocated after devdata */ ppd = (struct qib_pportdata *)(dd + 1); dd->pport = ppd; ppd[0].dd = dd; ppd[1].dd = dd; dd->cspec = (struct qib_chip_specific *)(ppd + 2); ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1); ppd[1].cpspec = &ppd[0].cpspec[1]; ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */ ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */ spin_lock_init(&dd->cspec->rcvmod_lock); spin_lock_init(&dd->cspec->gpio_lock); /* we haven't yet set QIB_PRESENT, so use read directly */ dd->revision = readq(&dd->kregbase[kr_revision]); if ((dd->revision & 0xffffffffU) == 0xffffffffU) { qib_dev_err(dd, "Revision register read failure, " "giving up initialization\n"); ret = -ENODEV; goto bail; } dd->flags |= QIB_PRESENT; /* now register routines work */ dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor); dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor); dd->cspec->r1 = dd->minrev == 1; get_7322_chip_params(dd); features = qib_7322_boardname(dd); /* now that piobcnt2k and 4k set, we can allocate these */ sbufcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS + BITS_PER_LONG - 1; sbufcnt /= BITS_PER_LONG; dd->cspec->sendchkenable = kmalloc(sbufcnt * sizeof(*dd->cspec->sendchkenable), GFP_KERNEL); dd->cspec->sendgrhchk = kmalloc(sbufcnt * sizeof(*dd->cspec->sendgrhchk), GFP_KERNEL); dd->cspec->sendibchk = kmalloc(sbufcnt * sizeof(*dd->cspec->sendibchk), GFP_KERNEL); if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk || !dd->cspec->sendibchk) { qib_dev_err(dd, "Failed allocation for hdrchk bitmaps\n"); ret = -ENOMEM; goto bail; } ppd = dd->pport; /* * GPIO bits for TWSI data and clock, * used for serial EEPROM. */ dd->gpio_sda_num = _QIB_GPIO_SDA_NUM; dd->gpio_scl_num = _QIB_GPIO_SCL_NUM; dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV; dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY | QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP | QIB_HAS_THRESH_UPDATE | (sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0); dd->flags |= qib_special_trigger ? QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA; /* * Setup initial values. These may change when PAT is enabled, but * we need these to do initial chip register accesses. */ qib_7322_set_baseaddrs(dd); mtu = ib_mtu_enum_to_int(qib_ibmtu); if (mtu == -1) mtu = QIB_DEFAULT_MTU; dd->cspec->int_enable_mask = QIB_I_BITSEXTANT; /* all hwerrors become interrupts, unless special purposed */ dd->cspec->hwerrmask = ~0ULL; /* link_recovery setup causes these errors, so ignore them, * other than clearing them when they occur */ dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) | SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) | HWE_MASK(LATriggered)); for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) { struct qib_chippport_specific *cp = ppd->cpspec; ppd->link_speed_supported = features & PORT_SPD_CAP; features >>= PORT_SPD_CAP_SHIFT; if (!ppd->link_speed_supported) { /* single port mode (7340, or configured) */ dd->skip_kctxt_mask |= 1 << pidx; if (pidx == 0) { /* Make sure port is disabled. */ qib_write_kreg_port(ppd, krp_rcvctrl, 0); qib_write_kreg_port(ppd, krp_ibcctrl_a, 0); ppd[0] = ppd[1]; dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) | SYM_MASK(HwErrMask, SDmaMemReadErrMask_0)); dd->cspec->int_enable_mask &= ~( SYM_MASK(IntMask, SDmaCleanupDoneMask_0) | SYM_MASK(IntMask, SDmaIdleIntMask_0) | SYM_MASK(IntMask, SDmaProgressIntMask_0) | SYM_MASK(IntMask, SDmaIntMask_0) | SYM_MASK(IntMask, ErrIntMask_0) | SYM_MASK(IntMask, SendDoneIntMask_0)); } else { /* Make sure port is disabled. */ qib_write_kreg_port(ppd, krp_rcvctrl, 0); qib_write_kreg_port(ppd, krp_ibcctrl_a, 0); dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) | SYM_MASK(HwErrMask, SDmaMemReadErrMask_1)); dd->cspec->int_enable_mask &= ~( SYM_MASK(IntMask, SDmaCleanupDoneMask_1) | SYM_MASK(IntMask, SDmaIdleIntMask_1) | SYM_MASK(IntMask, SDmaProgressIntMask_1) | SYM_MASK(IntMask, SDmaIntMask_1) | SYM_MASK(IntMask, ErrIntMask_1) | SYM_MASK(IntMask, SendDoneIntMask_1)); } continue; } dd->num_pports++; qib_init_pportdata(ppd, dd, pidx, dd->num_pports); ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X; ppd->link_width_enabled = IB_WIDTH_4X; ppd->link_speed_enabled = ppd->link_speed_supported; /* * Set the initial values to reasonable default, will be set * for real when link is up. */ ppd->link_width_active = IB_WIDTH_4X; ppd->link_speed_active = QIB_IB_SDR; ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS]; switch (qib_num_cfg_vls) { case 1: ppd->vls_supported = IB_VL_VL0; break; case 2: ppd->vls_supported = IB_VL_VL0_1; break; default: qib_devinfo(dd->pcidev, "Invalid num_vls %u, using 4 VLs\n", qib_num_cfg_vls); qib_num_cfg_vls = 4; /* fall through */ case 4: ppd->vls_supported = IB_VL_VL0_3; break; case 8: if (mtu <= 2048) ppd->vls_supported = IB_VL_VL0_7; else { qib_devinfo(dd->pcidev, "Invalid num_vls %u for MTU %d " ", using 4 VLs\n", qib_num_cfg_vls, mtu); ppd->vls_supported = IB_VL_VL0_3; qib_num_cfg_vls = 4; } break; } ppd->vls_operational = ppd->vls_supported; init_waitqueue_head(&cp->autoneg_wait); INIT_DELAYED_WORK(&cp->autoneg_work, autoneg_7322_work); if (ppd->dd->cspec->r1) INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work); /* * For Mez and similar cards, no qsfp info, so do * the "cable info" setup here. Can be overridden * in adapter-specific routines. */ if (!(dd->flags & QIB_HAS_QSFP)) { if (!IS_QMH(dd) && !IS_QME(dd)) qib_devinfo(dd->pcidev, "IB%u:%u: " "Unknown mezzanine card type\n", dd->unit, ppd->port); cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME; /* * Choose center value as default tx serdes setting * until changed through module parameter. */ ppd->cpspec->no_eep = IS_QMH(dd) ? TXDDS_TABLE_SZ + 2 : TXDDS_TABLE_SZ + 4; } else cp->h1_val = H1_FORCE_VAL; /* Avoid writes to chip for mini_init */ if (!qib_mini_init) write_7322_init_portregs(ppd); init_timer(&cp->chase_timer); cp->chase_timer.function = reenable_chase; cp->chase_timer.data = (unsigned long)ppd; ppd++; } dd->rcvhdrentsize = qib_rcvhdrentsize ? qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE; dd->rcvhdrsize = qib_rcvhdrsize ? qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE; dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32); /* we always allocate at least 2048 bytes for eager buffers */ dd->rcvegrbufsize = max(mtu, 2048); qib_7322_tidtemplate(dd); /* * We can request a receive interrupt for 1 or * more packets from current offset. */ dd->rhdrhead_intr_off = (u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT; /* setup the stats timer; the add_timer is done at end of init */ init_timer(&dd->stats_timer); dd->stats_timer.function = qib_get_7322_faststats; dd->stats_timer.data = (unsigned long) dd; dd->ureg_align = 0x10000; /* 64KB alignment */ dd->piosize2kmax_dwords = dd->piosize2k >> 2; qib_7322_config_ctxts(dd); qib_set_ctxtcnt(dd); if (qib_wc_pat) { resource_size_t vl15off; /* * We do not set WC on the VL15 buffers to avoid * a rare problem with unaligned writes from * interrupt-flushed store buffers, so we need * to map those separately here. We can't solve * this for the rarely used mtrr case. */ ret = init_chip_wc_pat(dd, 0); if (ret) goto bail; /* vl15 buffers start just after the 4k buffers */ vl15off = dd->physaddr + (dd->piobufbase >> 32) + dd->piobcnt4k * dd->align4k; dd->piovl15base = ioremap_nocache(vl15off, NUM_VL15_BUFS * dd->align4k); if (!dd->piovl15base) goto bail; } qib_7322_set_baseaddrs(dd); /* set chip access pointers now */ ret = 0; if (qib_mini_init) goto bail; if (!dd->num_pports) { qib_dev_err(dd, "No ports enabled, giving up initialization\n"); goto bail; /* no error, so can still figure out why err */ } write_7322_initregs(dd); ret = qib_create_ctxts(dd); init_7322_cntrnames(dd); updthresh = 8U; /* update threshold */ /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA. * reserve the update threshold amount for other kernel use, such * as sending SMI, MAD, and ACKs, or 3, whichever is greater, * unless we aren't enabling SDMA, in which case we want to use * all the 4k bufs for the kernel. * if this was less than the update threshold, we could wait * a long time for an update. Coded this way because we * sometimes change the update threshold for various reasons, * and we want this to remain robust. */ if (dd->flags & QIB_HAS_SEND_DMA) { dd->cspec->sdmabufcnt = dd->piobcnt4k; sbufs = updthresh > 3 ? updthresh : 3; } else { dd->cspec->sdmabufcnt = 0; sbufs = dd->piobcnt4k; } dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k - dd->cspec->sdmabufcnt; dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs; dd->cspec->lastbuf_for_pio--; /* range is <= , not < */ dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ? dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0; /* * If we have 16 user contexts, we will have 7 sbufs * per context, so reduce the update threshold to match. We * want to update before we actually run out, at low pbufs/ctxt * so give ourselves some margin. */ if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh) updthresh = dd->pbufsctxt - 2; dd->cspec->updthresh_dflt = updthresh; dd->cspec->updthresh = updthresh; /* before full enable, no interrupts, no locking needed */ dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld)) << SYM_LSB(SendCtrl, AvailUpdThld)) | SYM_MASK(SendCtrl, SendBufAvailPad64Byte); dd->psxmitwait_supported = 1; dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE; bail: if (!dd->ctxtcnt) dd->ctxtcnt = 1; /* for other initialization code */ return ret; } static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc, u32 *pbufnum) { u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK; struct qib_devdata *dd = ppd->dd; /* last is same for 2k and 4k, because we use 4k if all 2k busy */ if (pbc & PBC_7322_VL15_SEND) { first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx; last = first; } else { if ((plen + 1) > dd->piosize2kmax_dwords) first = dd->piobcnt2k; else first = 0; last = dd->cspec->lastbuf_for_pio; } return qib_getsendbuf_range(dd, pbufnum, first, last); } static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv, u32 start) { qib_write_kreg_port(ppd, krp_psinterval, intv); qib_write_kreg_port(ppd, krp_psstart, start); } /* * Must be called with sdma_lock held, or before init finished. */ static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt) { qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt); } static struct sdma_set_state_action sdma_7322_action_table[] = { [qib_sdma_state_s00_hw_down] = { .go_s99_running_tofalse = 1, .op_enable = 0, .op_intenable = 0, .op_halt = 0, .op_drain = 0, }, [qib_sdma_state_s10_hw_start_up_wait] = { .op_enable = 0, .op_intenable = 1, .op_halt = 1, .op_drain = 0, }, [qib_sdma_state_s20_idle] = { .op_enable = 1, .op_intenable = 1, .op_halt = 1, .op_drain = 0, }, [qib_sdma_state_s30_sw_clean_up_wait] = { .op_enable = 0, .op_intenable = 1, .op_halt = 1, .op_drain = 0, }, [qib_sdma_state_s40_hw_clean_up_wait] = { .op_enable = 1, .op_intenable = 1, .op_halt = 1, .op_drain = 0, }, [qib_sdma_state_s50_hw_halt_wait] = { .op_enable = 1, .op_intenable = 1, .op_halt = 1, .op_drain = 1, }, [qib_sdma_state_s99_running] = { .op_enable = 1, .op_intenable = 1, .op_halt = 0, .op_drain = 0, .go_s99_running_totrue = 1, }, }; static void qib_7322_sdma_init_early(struct qib_pportdata *ppd) { ppd->sdma_state.set_state_action = sdma_7322_action_table; } static int init_sdma_7322_regs(struct qib_pportdata *ppd) { struct qib_devdata *dd = ppd->dd; unsigned lastbuf, erstbuf; u64 senddmabufmask[3] = { 0 }; int n, ret = 0; qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys); qib_sdma_7322_setlengen(ppd); qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */ qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt); qib_write_kreg_port(ppd, krp_senddmadesccnt, 0); qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys); if (dd->num_pports) n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */ else n = dd->cspec->sdmabufcnt; /* failsafe for init */ erstbuf = (dd->piobcnt2k + dd->piobcnt4k) - ((dd->num_pports == 1 || ppd->port == 2) ? n : dd->cspec->sdmabufcnt); lastbuf = erstbuf + n; ppd->sdma_state.first_sendbuf = erstbuf; ppd->sdma_state.last_sendbuf = lastbuf; for (; erstbuf < lastbuf; ++erstbuf) { unsigned word = erstbuf / BITS_PER_LONG; unsigned bit = erstbuf & (BITS_PER_LONG - 1); BUG_ON(word >= 3); senddmabufmask[word] |= 1ULL << bit; } qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]); qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]); qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]); return ret; } /* sdma_lock must be held */ static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd) { struct qib_devdata *dd = ppd->dd; int sane; int use_dmahead; u16 swhead; u16 swtail; u16 cnt; u16 hwhead; use_dmahead = __qib_sdma_running(ppd) && (dd->flags & QIB_HAS_SDMA_TIMEOUT); retry: hwhead = use_dmahead ? (u16) le64_to_cpu(*ppd->sdma_head_dma) : (u16) qib_read_kreg_port(ppd, krp_senddmahead); swhead = ppd->sdma_descq_head; swtail = ppd->sdma_descq_tail; cnt = ppd->sdma_descq_cnt; if (swhead < swtail) /* not wrapped */ sane = (hwhead >= swhead) & (hwhead <= swtail); else if (swhead > swtail) /* wrapped around */ sane = ((hwhead >= swhead) && (hwhead < cnt)) || (hwhead <= swtail); else /* empty */ sane = (hwhead == swhead); if (unlikely(!sane)) { if (use_dmahead) { /* try one more time, directly from the register */ use_dmahead = 0; goto retry; } /* proceed as if no progress */ hwhead = swhead; } return hwhead; } static int qib_sdma_7322_busy(struct qib_pportdata *ppd) { u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus); return (hwstatus & SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) || (hwstatus & SYM_MASK(SendDmaStatus_0, HaltInProg)) || !(hwstatus & SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) || !(hwstatus & SYM_MASK(SendDmaStatus_0, ScbEmpty)); } /* * Compute the amount of delay before sending the next packet if the * port's send rate differs from the static rate set for the QP. * The delay affects the next packet and the amount of the delay is * based on the length of the this packet. */ static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen, u8 srate, u8 vl) { u8 snd_mult = ppd->delay_mult; u8 rcv_mult = ib_rate_to_delay[srate]; u32 ret; ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0; /* Indicate VL15, else set the VL in the control word */ if (vl == 15) ret |= PBC_7322_VL15_SEND_CTRL; else ret |= vl << PBC_VL_NUM_LSB; ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB; return ret; } /* * Enable the per-port VL15 send buffers for use. * They follow the rest of the buffers, without a config parameter. * This was in initregs, but that is done before the shadow * is set up, and this has to be done after the shadow is * set up. */ static void qib_7322_initvl15_bufs(struct qib_devdata *dd) { unsigned vl15bufs; vl15bufs = dd->piobcnt2k + dd->piobcnt4k; qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS, TXCHK_CHG_TYPE_KERN, NULL); } static void qib_7322_init_ctxt(struct qib_ctxtdata *rcd) { if (rcd->ctxt < NUM_IB_PORTS) { if (rcd->dd->num_pports > 1) { rcd->rcvegrcnt = KCTXT0_EGRCNT / 2; rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0; } else { rcd->rcvegrcnt = KCTXT0_EGRCNT; rcd->rcvegr_tid_base = 0; } } else { rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt; rcd->rcvegr_tid_base = KCTXT0_EGRCNT + (rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt; } } #define QTXSLEEPS 5000 static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start, u32 len, u32 which, struct qib_ctxtdata *rcd) { int i; const int last = start + len - 1; const int lastr = last / BITS_PER_LONG; u32 sleeps = 0; int wait = rcd != NULL; unsigned long flags; while (wait) { unsigned long shadow; int cstart, previ = -1; /* * when flipping from kernel to user, we can't change * the checking type if the buffer is allocated to the * driver. It's OK the other direction, because it's * from close, and we have just disarm'ed all the * buffers. All the kernel to kernel changes are also * OK. */ for (cstart = start; cstart <= last; cstart++) { i = ((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT) / BITS_PER_LONG; if (i != previ) { shadow = (unsigned long) le64_to_cpu(dd->pioavailregs_dma[i]); previ = i; } if (test_bit(((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT) % BITS_PER_LONG, &shadow)) break; } if (cstart > last) break; if (sleeps == QTXSLEEPS) break; /* make sure we see an updated copy next time around */ sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); sleeps++; msleep(20); } switch (which) { case TXCHK_CHG_TYPE_DIS1: /* * disable checking on a range; used by diags; just * one buffer, but still written generically */ for (i = start; i <= last; i++) clear_bit(i, dd->cspec->sendchkenable); break; case TXCHK_CHG_TYPE_ENAB1: /* * (re)enable checking on a range; used by diags; just * one buffer, but still written generically; read * scratch to be sure buffer actually triggered, not * just flushed from processor. */ qib_read_kreg32(dd, kr_scratch); for (i = start; i <= last; i++) set_bit(i, dd->cspec->sendchkenable); break; case TXCHK_CHG_TYPE_KERN: /* usable by kernel */ for (i = start; i <= last; i++) { set_bit(i, dd->cspec->sendibchk); clear_bit(i, dd->cspec->sendgrhchk); } spin_lock_irqsave(&dd->uctxt_lock, flags); /* see if we need to raise avail update threshold */ for (i = dd->first_user_ctxt; dd->cspec->updthresh != dd->cspec->updthresh_dflt && i < dd->cfgctxts; i++) if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt && ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1) < dd->cspec->updthresh_dflt) break; spin_unlock_irqrestore(&dd->uctxt_lock, flags); if (i == dd->cfgctxts) { spin_lock_irqsave(&dd->sendctrl_lock, flags); dd->cspec->updthresh = dd->cspec->updthresh_dflt; dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld); dd->sendctrl |= (dd->cspec->updthresh & SYM_RMASK(SendCtrl, AvailUpdThld)) << SYM_LSB(SendCtrl, AvailUpdThld); spin_unlock_irqrestore(&dd->sendctrl_lock, flags); sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); } break; case TXCHK_CHG_TYPE_USER: /* for user process */ for (i = start; i <= last; i++) { clear_bit(i, dd->cspec->sendibchk); set_bit(i, dd->cspec->sendgrhchk); } spin_lock_irqsave(&dd->sendctrl_lock, flags); if (rcd && rcd->subctxt_cnt && ((rcd->piocnt / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) { dd->cspec->updthresh = (rcd->piocnt / rcd->subctxt_cnt) - 1; dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld); dd->sendctrl |= (dd->cspec->updthresh & SYM_RMASK(SendCtrl, AvailUpdThld)) << SYM_LSB(SendCtrl, AvailUpdThld); spin_unlock_irqrestore(&dd->sendctrl_lock, flags); sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); } else spin_unlock_irqrestore(&dd->sendctrl_lock, flags); break; default: break; } for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i) qib_write_kreg(dd, kr_sendcheckmask + i, dd->cspec->sendchkenable[i]); for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) { qib_write_kreg(dd, kr_sendgrhcheckmask + i, dd->cspec->sendgrhchk[i]); qib_write_kreg(dd, kr_sendibpktmask + i, dd->cspec->sendibchk[i]); } /* * Be sure whatever we did was seen by the chip and acted upon, * before we return. Mostly important for which >= 2. */ qib_read_kreg32(dd, kr_scratch); } /* useful for trigger analyzers, etc. */ static void writescratch(struct qib_devdata *dd, u32 val) { qib_write_kreg(dd, kr_scratch, val); } /* Dummy for now, use chip regs soon */ static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum) { return -ENXIO; } /** * qib_init_iba7322_funcs - set up the chip-specific function pointers * @dev: the pci_dev for qlogic_ib device * @ent: pci_device_id struct for this dev * * Also allocates, inits, and returns the devdata struct for this * device instance * * This is global, and is called directly at init to set up the * chip-specific function pointers for later use. */ struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev, const struct pci_device_id *ent) { struct qib_devdata *dd; int ret, i; u32 tabsize, actual_cnt = 0; dd = qib_alloc_devdata(pdev, NUM_IB_PORTS * sizeof(struct qib_pportdata) + sizeof(struct qib_chip_specific) + NUM_IB_PORTS * sizeof(struct qib_chippport_specific)); if (IS_ERR(dd)) goto bail; dd->f_bringup_serdes = qib_7322_bringup_serdes; dd->f_cleanup = qib_setup_7322_cleanup; dd->f_clear_tids = qib_7322_clear_tids; dd->f_free_irq = qib_7322_free_irq; dd->f_get_base_info = qib_7322_get_base_info; dd->f_get_msgheader = qib_7322_get_msgheader; dd->f_getsendbuf = qib_7322_getsendbuf; dd->f_gpio_mod = gpio_7322_mod; dd->f_eeprom_wen = qib_7322_eeprom_wen; dd->f_hdrqempty = qib_7322_hdrqempty; dd->f_ib_updown = qib_7322_ib_updown; dd->f_init_ctxt = qib_7322_init_ctxt; dd->f_initvl15_bufs = qib_7322_initvl15_bufs; dd->f_intr_fallback = qib_7322_intr_fallback; dd->f_late_initreg = qib_late_7322_initreg; dd->f_setpbc_control = qib_7322_setpbc_control; dd->f_portcntr = qib_portcntr_7322; dd->f_put_tid = qib_7322_put_tid; dd->f_quiet_serdes = qib_7322_mini_quiet_serdes; dd->f_rcvctrl = rcvctrl_7322_mod; dd->f_read_cntrs = qib_read_7322cntrs; dd->f_read_portcntrs = qib_read_7322portcntrs; dd->f_reset = qib_do_7322_reset; dd->f_init_sdma_regs = init_sdma_7322_regs; dd->f_sdma_busy = qib_sdma_7322_busy; dd->f_sdma_gethead = qib_sdma_7322_gethead; dd->f_sdma_sendctrl = qib_7322_sdma_sendctrl; dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt; dd->f_sdma_update_tail = qib_sdma_update_7322_tail; dd->f_sendctrl = sendctrl_7322_mod; dd->f_set_armlaunch = qib_set_7322_armlaunch; dd->f_set_cntr_sample = qib_set_cntr_7322_sample; dd->f_iblink_state = qib_7322_iblink_state; dd->f_ibphys_portstate = qib_7322_phys_portstate; dd->f_get_ib_cfg = qib_7322_get_ib_cfg; dd->f_set_ib_cfg = qib_7322_set_ib_cfg; dd->f_set_ib_loopback = qib_7322_set_loopback; dd->f_get_ib_table = qib_7322_get_ib_table; dd->f_set_ib_table = qib_7322_set_ib_table; dd->f_set_intr_state = qib_7322_set_intr_state; dd->f_setextled = qib_setup_7322_setextled; dd->f_txchk_change = qib_7322_txchk_change; dd->f_update_usrhead = qib_update_7322_usrhead; dd->f_wantpiobuf_intr = qib_wantpiobuf_7322_intr; dd->f_xgxs_reset = qib_7322_mini_pcs_reset; dd->f_sdma_hw_clean_up = qib_7322_sdma_hw_clean_up; dd->f_sdma_hw_start_up = qib_7322_sdma_hw_start_up; dd->f_sdma_init_early = qib_7322_sdma_init_early; dd->f_writescratch = writescratch; dd->f_tempsense_rd = qib_7322_tempsense_rd; /* * Do remaining PCIe setup and save PCIe values in dd. * Any error printing is already done by the init code. * On return, we have the chip mapped, but chip registers * are not set up until start of qib_init_7322_variables. */ ret = qib_pcie_ddinit(dd, pdev, ent); if (ret < 0) goto bail_free; /* initialize chip-specific variables */ ret = qib_init_7322_variables(dd); if (ret) goto bail_cleanup; if (qib_mini_init || !dd->num_pports) goto bail; /* * Determine number of vectors we want; depends on port count * and number of configured kernel receive queues actually used. * Should also depend on whether sdma is enabled or not, but * that's such a rare testing case it's not worth worrying about. */ tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table); for (i = 0; i < tabsize; i++) if ((i < ARRAY_SIZE(irq_table) && irq_table[i].port <= dd->num_pports) || (i >= ARRAY_SIZE(irq_table) && dd->rcd[i - ARRAY_SIZE(irq_table)])) actual_cnt++; tabsize = actual_cnt; dd->cspec->msix_entries = kmalloc(tabsize * sizeof(struct msix_entry), GFP_KERNEL); dd->cspec->msix_arg = kmalloc(tabsize * sizeof(void *), GFP_KERNEL); if (!dd->cspec->msix_entries || !dd->cspec->msix_arg) { qib_dev_err(dd, "No memory for MSIx table\n"); tabsize = 0; } for (i = 0; i < tabsize; i++) dd->cspec->msix_entries[i].entry = i; if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries)) qib_dev_err(dd, "Failed to setup PCIe or interrupts; " "continuing anyway\n"); /* may be less than we wanted, if not enough available */ dd->cspec->num_msix_entries = tabsize; /* setup interrupt handler */ qib_setup_7322_interrupt(dd, 1); /* clear diagctrl register, in case diags were running and crashed */ qib_write_kreg(dd, kr_hwdiagctrl, 0); goto bail; bail_cleanup: qib_pcie_ddcleanup(dd); bail_free: qib_free_devdata(dd); dd = ERR_PTR(ret); bail: return dd; } /* * Set the table entry at the specified index from the table specifed. * There are 3 * TXDDS_TABLE_SZ entries in all per port, with the first * TXDDS_TABLE_SZ for SDR, the next for DDR, and the last for QDR. * 'idx' below addresses the correct entry, while its 4 LSBs select the * corresponding entry (one of TXDDS_TABLE_SZ) from the selected table. */ #define DDS_ENT_AMP_LSB 14 #define DDS_ENT_MAIN_LSB 9 #define DDS_ENT_POST_LSB 5 #define DDS_ENT_PRE_XTRA_LSB 3 #define DDS_ENT_PRE_LSB 0 /* * Set one entry in the TxDDS table for spec'd port * ridx picks one of the entries, while tp points * to the appropriate table entry. */ static void set_txdds(struct qib_pportdata *ppd, int ridx, const struct txdds_ent *tp) { struct qib_devdata *dd = ppd->dd; u32 pack_ent; int regidx; /* Get correct offset in chip-space, and in source table */ regidx = KREG_IBPORT_IDX(IBSD_DDS_MAP_TABLE) + ridx; /* * We do not use qib_write_kreg_port() because it was intended * only for registers in the lower "port specific" pages. * So do index calculation by hand. */ if (ppd->hw_pidx) regidx += (dd->palign / sizeof(u64)); pack_ent = tp->amp << DDS_ENT_AMP_LSB; pack_ent |= tp->main << DDS_ENT_MAIN_LSB; pack_ent |= tp->pre << DDS_ENT_PRE_LSB; pack_ent |= tp->post << DDS_ENT_POST_LSB; qib_write_kreg(dd, regidx, pack_ent); /* Prevent back-to-back writes by hitting scratch */ qib_write_kreg(ppd->dd, kr_scratch, 0); } static const struct vendor_txdds_ent vendor_txdds[] = { { /* Amphenol 1m 30awg NoEq */ { 0x41, 0x50, 0x48 }, "584470002 ", { 10, 0, 0, 5 }, { 10, 0, 0, 9 }, { 7, 1, 0, 13 }, }, { /* Amphenol 3m 28awg NoEq */ { 0x41, 0x50, 0x48 }, "584470004 ", { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 7, 15 }, }, { /* Finisar 3m OM2 Optical */ { 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL", { 0, 0, 0, 3 }, { 0, 0, 0, 4 }, { 0, 0, 0, 13 }, }, { /* Finisar 30m OM2 Optical */ { 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL", { 0, 0, 0, 1 }, { 0, 0, 0, 5 }, { 0, 0, 0, 11 }, }, { /* Finisar Default OM2 Optical */ { 0x00, 0x90, 0x65 }, NULL, { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 0, 0, 12 }, }, { /* Gore 1m 30awg NoEq */ { 0x00, 0x21, 0x77 }, "QSN3300-1 ", { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 0, 15 }, }, { /* Gore 2m 30awg NoEq */ { 0x00, 0x21, 0x77 }, "QSN3300-2 ", { 0, 0, 0, 8 }, { 0, 0, 0, 10 }, { 0, 1, 7, 15 }, }, { /* Gore 1m 28awg NoEq */ { 0x00, 0x21, 0x77 }, "QSN3800-1 ", { 0, 0, 0, 6 }, { 0, 0, 0, 8 }, { 0, 1, 0, 15 }, }, { /* Gore 3m 28awg NoEq */ { 0x00, 0x21, 0x77 }, "QSN3800-3 ", { 0, 0, 0, 9 }, { 0, 0, 0, 13 }, { 0, 1, 7, 15 }, }, { /* Gore 5m 24awg Eq */ { 0x00, 0x21, 0x77 }, "QSN7000-5 ", { 0, 0, 0, 7 }, { 0, 0, 0, 9 }, { 0, 1, 3, 15 }, }, { /* Gore 7m 24awg Eq */ { 0x00, 0x21, 0x77 }, "QSN7000-7 ", { 0, 0, 0, 9 }, { 0, 0, 0, 11 }, { 0, 2, 6, 15 }, }, { /* Gore 5m 26awg Eq */ { 0x00, 0x21, 0x77 }, "QSN7600-5 ", { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 9, 13 }, }, { /* Gore 7m 26awg Eq */ { 0x00, 0x21, 0x77 }, "QSN7600-7 ", { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 10, 1, 8, 15 }, }, { /* Intersil 12m 24awg Active */ { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1224", { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 3, 0, 9 }, }, { /* Intersil 10m 28awg Active */ { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1028", { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 2, 0, 2 }, }, { /* Intersil 7m 30awg Active */ { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0730", { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 1, 0, 3 }, }, { /* Intersil 5m 32awg Active */ { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0532", { 0, 0, 0, 6 }, { 0, 0, 0, 6 }, { 0, 2, 0, 8 }, }, { /* Intersil Default Active */ { 0x00, 0x30, 0xB4 }, NULL, { 0, 0, 0, 6 }, { 0, 0, 0, 5 }, { 0, 2, 0, 5 }, }, { /* Luxtera 20m Active Optical */ { 0x00, 0x25, 0x63 }, NULL, { 0, 0, 0, 5 }, { 0, 0, 0, 8 }, { 0, 2, 0, 12 }, }, { /* Molex 1M Cu loopback */ { 0x00, 0x09, 0x3A }, "74763-0025 ", { 2, 2, 6, 15 }, { 2, 2, 6, 15 }, { 2, 2, 6, 15 }, }, { /* Molex 2m 28awg NoEq */ { 0x00, 0x09, 0x3A }, "74757-2201 ", { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 1, 15 }, }, }; static const struct txdds_ent txdds_sdr[TXDDS_TABLE_SZ] = { /* amp, pre, main, post */ { 2, 2, 15, 6 }, /* Loopback */ { 0, 0, 0, 1 }, /* 2 dB */ { 0, 0, 0, 2 }, /* 3 dB */ { 0, 0, 0, 3 }, /* 4 dB */ { 0, 0, 0, 4 }, /* 5 dB */ { 0, 0, 0, 5 }, /* 6 dB */ { 0, 0, 0, 6 }, /* 7 dB */ { 0, 0, 0, 7 }, /* 8 dB */ { 0, 0, 0, 8 }, /* 9 dB */ { 0, 0, 0, 9 }, /* 10 dB */ { 0, 0, 0, 10 }, /* 11 dB */ { 0, 0, 0, 11 }, /* 12 dB */ { 0, 0, 0, 12 }, /* 13 dB */ { 0, 0, 0, 13 }, /* 14 dB */ { 0, 0, 0, 14 }, /* 15 dB */ { 0, 0, 0, 15 }, /* 16 dB */ }; static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = { /* amp, pre, main, post */ { 2, 2, 15, 6 }, /* Loopback */ { 0, 0, 0, 8 }, /* 2 dB */ { 0, 0, 0, 8 }, /* 3 dB */ { 0, 0, 0, 9 }, /* 4 dB */ { 0, 0, 0, 9 }, /* 5 dB */ { 0, 0, 0, 10 }, /* 6 dB */ { 0, 0, 0, 10 }, /* 7 dB */ { 0, 0, 0, 11 }, /* 8 dB */ { 0, 0, 0, 11 }, /* 9 dB */ { 0, 0, 0, 12 }, /* 10 dB */ { 0, 0, 0, 12 }, /* 11 dB */ { 0, 0, 0, 13 }, /* 12 dB */ { 0, 0, 0, 13 }, /* 13 dB */ { 0, 0, 0, 14 }, /* 14 dB */ { 0, 0, 0, 14 }, /* 15 dB */ { 0, 0, 0, 15 }, /* 16 dB */ }; static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = { /* amp, pre, main, post */ { 2, 2, 15, 6 }, /* Loopback */ { 0, 1, 0, 7 }, /* 2 dB (also QMH7342) */ { 0, 1, 0, 9 }, /* 3 dB (also QMH7342) */ { 0, 1, 0, 11 }, /* 4 dB */ { 0, 1, 0, 13 }, /* 5 dB */ { 0, 1, 0, 15 }, /* 6 dB */ { 0, 1, 3, 15 }, /* 7 dB */ { 0, 1, 7, 15 }, /* 8 dB */ { 0, 1, 7, 15 }, /* 9 dB */ { 0, 1, 8, 15 }, /* 10 dB */ { 0, 1, 9, 15 }, /* 11 dB */ { 0, 1, 10, 15 }, /* 12 dB */ { 0, 2, 6, 15 }, /* 13 dB */ { 0, 2, 7, 15 }, /* 14 dB */ { 0, 2, 8, 15 }, /* 15 dB */ { 0, 2, 9, 15 }, /* 16 dB */ }; /* * extra entries for use with txselect, for indices >= TXDDS_TABLE_SZ. * These are mostly used for mez cards going through connectors * and backplane traces, but can be used to add other "unusual" * table values as well. */ static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = { /* amp, pre, main, post */ { 0, 0, 0, 1 }, /* QMH7342 backplane settings */ { 0, 0, 0, 1 }, /* QMH7342 backplane settings */ { 0, 0, 0, 2 }, /* QMH7342 backplane settings */ { 0, 0, 0, 2 }, /* QMH7342 backplane settings */ { 0, 0, 0, 11 }, /* QME7342 backplane settings */ { 0, 0, 0, 11 }, /* QME7342 backplane settings */ { 0, 0, 0, 11 }, /* QME7342 backplane settings */ { 0, 0, 0, 11 }, /* QME7342 backplane settings */ { 0, 0, 0, 11 }, /* QME7342 backplane settings */ { 0, 0, 0, 11 }, /* QME7342 backplane settings */ { 0, 0, 0, 11 }, /* QME7342 backplane settings */ { 0, 0, 0, 3 }, /* QMH7342 backplane settings */ { 0, 0, 0, 4 }, /* QMH7342 backplane settings */ }; static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = { /* amp, pre, main, post */ { 0, 0, 0, 7 }, /* QMH7342 backplane settings */ { 0, 0, 0, 7 }, /* QMH7342 backplane settings */ { 0, 0, 0, 8 }, /* QMH7342 backplane settings */ { 0, 0, 0, 8 }, /* QMH7342 backplane settings */ { 0, 0, 0, 13 }, /* QME7342 backplane settings */ { 0, 0, 0, 13 }, /* QME7342 backplane settings */ { 0, 0, 0, 13 }, /* QME7342 backplane settings */ { 0, 0, 0, 13 }, /* QME7342 backplane settings */ { 0, 0, 0, 13 }, /* QME7342 backplane settings */ { 0, 0, 0, 13 }, /* QME7342 backplane settings */ { 0, 0, 0, 13 }, /* QME7342 backplane settings */ { 0, 0, 0, 9 }, /* QMH7342 backplane settings */ { 0, 0, 0, 10 }, /* QMH7342 backplane settings */ }; static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = { /* amp, pre, main, post */ { 0, 1, 0, 4 }, /* QMH7342 backplane settings */ { 0, 1, 0, 5 }, /* QMH7342 backplane settings */ { 0, 1, 0, 6 }, /* QMH7342 backplane settings */ { 0, 1, 0, 8 }, /* QMH7342 backplane settings */ { 0, 1, 12, 10 }, /* QME7342 backplane setting */ { 0, 1, 12, 11 }, /* QME7342 backplane setting */ { 0, 1, 12, 12 }, /* QME7342 backplane setting */ { 0, 1, 12, 14 }, /* QME7342 backplane setting */ { 0, 1, 12, 6 }, /* QME7342 backplane setting */ { 0, 1, 12, 7 }, /* QME7342 backplane setting */ { 0, 1, 12, 8 }, /* QME7342 backplane setting */ { 0, 1, 0, 10 }, /* QMH7342 backplane settings */ { 0, 1, 0, 12 }, /* QMH7342 backplane settings */ }; static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = { /* amp, pre, main, post */ { 0, 0, 0, 0 }, /* QME7342 mfg settings */ { 0, 0, 0, 6 }, /* QME7342 P2 mfg settings */ }; static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds, unsigned atten) { /* * The attenuation table starts at 2dB for entry 1, * with entry 0 being the loopback entry. */ if (atten <= 2) atten = 1; else if (atten > TXDDS_TABLE_SZ) atten = TXDDS_TABLE_SZ - 1; else atten--; return txdds + atten; } /* * if override is set, the module parameter txselect has a value * for this specific port, so use it, rather than our normal mechanism. */ static void find_best_ent(struct qib_pportdata *ppd, const struct txdds_ent **sdr_dds, const struct txdds_ent **ddr_dds, const struct txdds_ent **qdr_dds, int override) { struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache; int idx; /* Search table of known cables */ for (idx = 0; !override && idx < ARRAY_SIZE(vendor_txdds); ++idx) { const struct vendor_txdds_ent *v = vendor_txdds + idx; if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) && (!v->partnum || !memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) { *sdr_dds = &v->sdr; *ddr_dds = &v->ddr; *qdr_dds = &v->qdr; return; } } /* Lookup serdes setting by cable type and attenuation */ if (!override && QSFP_IS_ACTIVE(qd->tech)) { *sdr_dds = txdds_sdr + ppd->dd->board_atten; *ddr_dds = txdds_ddr + ppd->dd->board_atten; *qdr_dds = txdds_qdr + ppd->dd->board_atten; return; } if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] || qd->atten[1])) { *sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]); *ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]); *qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]); return; } else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) { /* * If we have no (or incomplete) data from the cable * EEPROM, or no QSFP, or override is set, use the * module parameter value to index into the attentuation * table. */ idx = ppd->cpspec->no_eep; *sdr_dds = &txdds_sdr[idx]; *ddr_dds = &txdds_ddr[idx]; *qdr_dds = &txdds_qdr[idx]; } else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) { /* similar to above, but index into the "extra" table. */ idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ; *sdr_dds = &txdds_extra_sdr[idx]; *ddr_dds = &txdds_extra_ddr[idx]; *qdr_dds = &txdds_extra_qdr[idx]; } else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) && ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ)) { idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ); printk(KERN_INFO QIB_DRV_NAME " IB%u:%u use idx %u into txdds_mfg\n", ppd->dd->unit, ppd->port, idx); *sdr_dds = &txdds_extra_mfg[idx]; *ddr_dds = &txdds_extra_mfg[idx]; *qdr_dds = &txdds_extra_mfg[idx]; } else { /* this shouldn't happen, it's range checked */ *sdr_dds = txdds_sdr + qib_long_atten; *ddr_dds = txdds_ddr + qib_long_atten; *qdr_dds = txdds_qdr + qib_long_atten; } } static void init_txdds_table(struct qib_pportdata *ppd, int override) { const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds; struct txdds_ent *dds; int idx; int single_ent = 0; find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override); /* for mez cards or override, use the selected value for all entries */ if (!(ppd->dd->flags & QIB_HAS_QSFP) || override) single_ent = 1; /* Fill in the first entry with the best entry found. */ set_txdds(ppd, 0, sdr_dds); set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds); set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds); if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE)) { dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ? qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ? ddr_dds : sdr_dds)); write_tx_serdes_param(ppd, dds); } /* Fill in the remaining entries with the default table values. */ for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) { set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx); set_txdds(ppd, idx + TXDDS_TABLE_SZ, single_ent ? ddr_dds : txdds_ddr + idx); set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ, single_ent ? qdr_dds : txdds_qdr + idx); } } #define KR_AHB_ACC KREG_IDX(ahb_access_ctrl) #define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg) #define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy) #define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address) #define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data) #define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read) #define AHB_TRANS_TRIES 10 /* * The chan argument is 0=chan0, 1=chan1, 2=pll, 3=chan2, 4=chan4, * 5=subsystem which is why most calls have "chan + chan >> 1" * for the channel argument. */ static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr, u32 data, u32 mask) { u32 rd_data, wr_data, sz_mask; u64 trans, acc, prev_acc; u32 ret = 0xBAD0BAD; int tries; prev_acc = qib_read_kreg64(dd, KR_AHB_ACC); /* From this point on, make sure we return access */ acc = (quad << 1) | 1; qib_write_kreg(dd, KR_AHB_ACC, acc); for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) { trans = qib_read_kreg64(dd, KR_AHB_TRANS); if (trans & AHB_TRANS_RDY) break; } if (tries >= AHB_TRANS_TRIES) { qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES); goto bail; } /* If mask is not all 1s, we need to read, but different SerDes * entities have different sizes */ sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1; wr_data = data & mask & sz_mask; if ((~mask & sz_mask) != 0) { trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1); qib_write_kreg(dd, KR_AHB_TRANS, trans); for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) { trans = qib_read_kreg64(dd, KR_AHB_TRANS); if (trans & AHB_TRANS_RDY) break; } if (tries >= AHB_TRANS_TRIES) { qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n", AHB_TRANS_TRIES); goto bail; } /* Re-read in case host split reads and read data first */ trans = qib_read_kreg64(dd, KR_AHB_TRANS); rd_data = (uint32_t)(trans >> AHB_DATA_LSB); wr_data |= (rd_data & ~mask & sz_mask); } /* If mask is not zero, we need to write. */ if (mask & sz_mask) { trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1); trans |= ((uint64_t)wr_data << AHB_DATA_LSB); trans |= AHB_WR; qib_write_kreg(dd, KR_AHB_TRANS, trans); for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) { trans = qib_read_kreg64(dd, KR_AHB_TRANS); if (trans & AHB_TRANS_RDY) break; } if (tries >= AHB_TRANS_TRIES) { qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n", AHB_TRANS_TRIES); goto bail; } } ret = wr_data; bail: qib_write_kreg(dd, KR_AHB_ACC, prev_acc); return ret; } static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data, unsigned mask) { struct qib_devdata *dd = ppd->dd; int chan; u32 rbc; for (chan = 0; chan < SERDES_CHANS; ++chan) { ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr, data, mask); rbc = ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr, 0, 0); } } static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable) { u64 data = qib_read_kreg_port(ppd, krp_serdesctrl); u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN); if (enable && !state) { printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS on\n", ppd->dd->unit, ppd->port); data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN); } else if (!enable && state) { printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS off\n", ppd->dd->unit, ppd->port); data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN); } qib_write_kreg_port(ppd, krp_serdesctrl, data); } static int serdes_7322_init(struct qib_pportdata *ppd) { int ret = 0; if (ppd->dd->cspec->r1) ret = serdes_7322_init_old(ppd); else ret = serdes_7322_init_new(ppd); return ret; } static int serdes_7322_init_old(struct qib_pportdata *ppd) { u32 le_val; /* * Initialize the Tx DDS tables. Also done every QSFP event, * for adapters with QSFP */ init_txdds_table(ppd, 0); /* ensure no tx overrides from earlier driver loads */ qib_write_kreg_port(ppd, krp_tx_deemph_override, SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, reset_tx_deemphasis_override)); /* Patch some SerDes defaults to "Better for IB" */ /* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */ ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9)); /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */ ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11)); /* Enable LE2: rxle2en_r2a addr 13 bit [6] = 1 */ ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6)); /* May be overridden in qsfp_7322_event */ le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT; ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7)); /* enable LE1 adaptation for all but QME, which is disabled */ le_val = IS_QME(ppd->dd) ? 0 : 1; ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5)); /* Clear cmode-override, may be set from older driver */ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14); /* Timing Recovery: rxtapsel addr 5 bits [9:8] = 0 */ ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8)); /* setup LoS params; these are subsystem, so chan == 5 */ /* LoS filter threshold_count on, ch 0-3, set to 8 */ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11)); ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4)); ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11)); ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4)); /* LoS filter threshold_count off, ch 0-3, set to 4 */ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0)); ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8)); ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0)); ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8)); /* LoS filter select enabled */ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15); /* LoS target data: SDR=4, DDR=2, QDR=1 */ ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */ ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */ ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */ serdes_7322_los_enable(ppd, 1); /* rxbistena; set 0 to avoid effects of it switch later */ ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15); /* Configure 4 DFE taps, and only they adapt */ ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0)); /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */ le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac; ibsd_wr_allchans(ppd, 21, le_val, 0xfffe); /* * Set receive adaptation mode. SDR and DDR adaptation are * always on, and QDR is initially enabled; later disabled. */ qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL); qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL); qib_write_kreg_port(ppd, krp_static_adapt_dis(2), ppd->dd->cspec->r1 ? QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN); ppd->cpspec->qdr_dfe_on = 1; /* FLoop LOS gate: PPM filter enabled */ ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10); /* rx offset center enabled */ ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4); if (!ppd->dd->cspec->r1) { ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12); ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8); } /* Set the frequency loop bandwidth to 15 */ ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5)); return 0; } static int serdes_7322_init_new(struct qib_pportdata *ppd) { u64 tstart; u32 le_val, rxcaldone; int chan, chan_done = (1 << SERDES_CHANS) - 1; /* * Initialize the Tx DDS tables. Also done every QSFP event, * for adapters with QSFP */ init_txdds_table(ppd, 0); /* Clear cmode-override, may be set from older driver */ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14); /* ensure no tx overrides from earlier driver loads */ qib_write_kreg_port(ppd, krp_tx_deemph_override, SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, reset_tx_deemphasis_override)); /* START OF LSI SUGGESTED SERDES BRINGUP */ /* Reset - Calibration Setup */ /* Stop DFE adaptaion */ ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1)); /* Disable LE1 */ ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5)); /* Disable autoadapt for LE1 */ ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15)); /* Disable LE2 */ ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6)); /* Disable VGA */ ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0)); /* Disable AFE Offset Cancel */ ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12)); /* Disable Timing Loop */ ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3)); /* Disable Frequency Loop */ ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4)); /* Disable Baseline Wander Correction */ ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13)); /* Disable RX Calibration */ ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10)); /* Disable RX Offset Calibration */ ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4)); /* Select BB CDR */ ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15)); /* CDR Step Size */ ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8)); /* Enable phase Calibration */ ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5)); /* DFE Bandwidth [2:14-12] */ ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12)); /* DFE Config (4 taps only) */ ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0)); /* Gain Loop Bandwidth */ if (!ppd->dd->cspec->r1) { ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12)); ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8)); } else { ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11)); } /* Baseline Wander Correction Gain [13:4-0] (leave as default) */ /* Baseline Wander Correction Gain [3:7-5] (leave as default) */ /* Data Rate Select [5:7-6] (leave as default) */ /* RX Parallel Word Width [3:10-8] (leave as default) */ /* RX REST */ /* Single- or Multi-channel reset */ /* RX Analog reset */ /* RX Digital reset */ ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13)); msleep(20); /* RX Analog reset */ ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14)); msleep(20); /* RX Digital reset */ ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13)); msleep(20); /* setup LoS params; these are subsystem, so chan == 5 */ /* LoS filter threshold_count on, ch 0-3, set to 8 */ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11)); ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4)); ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11)); ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4)); /* LoS filter threshold_count off, ch 0-3, set to 4 */ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0)); ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8)); ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0)); ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8)); /* LoS filter select enabled */ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15); /* LoS target data: SDR=4, DDR=2, QDR=1 */ ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */ ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */ ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */ /* Turn on LOS on initial SERDES init */ serdes_7322_los_enable(ppd, 1); /* FLoop LOS gate: PPM filter enabled */ ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10); /* RX LATCH CALIBRATION */ /* Enable Eyefinder Phase Calibration latch */ ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0)); /* Enable RX Offset Calibration latch */ ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4)); msleep(20); /* Start Calibration */ ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10)); tstart = get_jiffies_64(); while (chan_done && !time_after64(get_jiffies_64(), tstart + msecs_to_jiffies(500))) { msleep(20); for (chan = 0; chan < SERDES_CHANS; ++chan) { rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), 25, 0, 0); if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 && (~chan_done & (1 << chan)) == 0) chan_done &= ~(1 << chan); } } if (chan_done) { printk(KERN_INFO QIB_DRV_NAME " Serdes %d calibration not done after .5 sec: 0x%x\n", IBSD(ppd->hw_pidx), chan_done); } else { for (chan = 0; chan < SERDES_CHANS; ++chan) { rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), 25, 0, 0); if ((~rxcaldone & (u32)BMASK(10, 10)) == 0) printk(KERN_INFO QIB_DRV_NAME " Serdes %d chan %d calibration " "failed\n", IBSD(ppd->hw_pidx), chan); } } /* Turn off Calibration */ ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10)); msleep(20); /* BRING RX UP */ /* Set LE2 value (May be overridden in qsfp_7322_event) */ le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT; ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7)); /* Set LE2 Loop bandwidth */ ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5)); /* Enable LE2 */ ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6)); msleep(20); /* Enable H0 only */ ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1)); /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */ le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac; ibsd_wr_allchans(ppd, 21, le_val, 0xfffe); /* Enable VGA */ ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0)); msleep(20); /* Set Frequency Loop Bandwidth */ ibsd_wr_allchans(ppd, 2, (7 << 5), BMASK(8, 5)); /* Enable Frequency Loop */ ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4)); /* Set Timing Loop Bandwidth */ ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9)); /* Enable Timing Loop */ ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3)); msleep(50); /* Enable DFE * Set receive adaptation mode. SDR and DDR adaptation are * always on, and QDR is initially enabled; later disabled. */ qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL); qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL); qib_write_kreg_port(ppd, krp_static_adapt_dis(2), ppd->dd->cspec->r1 ? QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN); ppd->cpspec->qdr_dfe_on = 1; /* Disable LE1 */ ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5)); /* Disable auto adapt for LE1 */ ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15)); msleep(20); /* Enable AFE Offset Cancel */ ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12)); /* Enable Baseline Wander Correction */ ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13)); /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */ ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11)); /* VGA output common mode */ ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2)); return 0; } /* start adjust QMH serdes parameters */ static void set_man_code(struct qib_pportdata *ppd, int chan, int code) { ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), 9, code << 9, 0x3f << 9); } static void set_man_mode_h1(struct qib_pportdata *ppd, int chan, int enable, u32 tapenable) { if (enable) ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), 1, 3 << 10, 0x1f << 10); else ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), 1, 0, 0x1f << 10); } /* Set clock to 1, 0, 1, 0 */ static void clock_man(struct qib_pportdata *ppd, int chan) { ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), 4, 0x4000, 0x4000); ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), 4, 0, 0x4000); ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), 4, 0x4000, 0x4000); ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), 4, 0, 0x4000); } /* * write the current Tx serdes pre,post,main,amp settings into the serdes. * The caller must pass the settings appropriate for the current speed, * or not care if they are correct for the current speed. */ static void write_tx_serdes_param(struct qib_pportdata *ppd, struct txdds_ent *txdds) { u64 deemph; deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override); /* field names for amp, main, post, pre, respectively */ deemph &= ~(SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) | SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) | SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) | SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena)); deemph |= SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, tx_override_deemphasis_select); deemph |= (txdds->amp & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a); deemph |= (txdds->main & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena); deemph |= (txdds->post & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena); deemph |= (txdds->pre & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena); qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph); } /* * Set the parameters for mez cards on link bounce, so they are * always exactly what was requested. Similar logic to init_txdds * but does just the serdes. */ static void adj_tx_serdes(struct qib_pportdata *ppd) { const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds; struct txdds_ent *dds; find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1); dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ? qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ? ddr_dds : sdr_dds)); write_tx_serdes_param(ppd, dds); } /* set QDR forced value for H1, if needed */ static void force_h1(struct qib_pportdata *ppd) { int chan; ppd->cpspec->qdr_reforce = 0; if (!ppd->dd->cspec->r1) return; for (chan = 0; chan < SERDES_CHANS; chan++) { set_man_mode_h1(ppd, chan, 1, 0); set_man_code(ppd, chan, ppd->cpspec->h1_val); clock_man(ppd, chan); set_man_mode_h1(ppd, chan, 0, 0); } } #define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN) #define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en) #define R_OPCODE_LSB 3 #define R_OP_NOP 0 #define R_OP_SHIFT 2 #define R_OP_UPDATE 3 #define R_TDI_LSB 2 #define R_TDO_LSB 1 #define R_RDY 1 static int qib_r_grab(struct qib_devdata *dd) { u64 val; val = SJA_EN; qib_write_kreg(dd, kr_r_access, val); qib_read_kreg32(dd, kr_scratch); return 0; } /* qib_r_wait_for_rdy() not only waits for the ready bit, it * returns the current state of R_TDO */ static int qib_r_wait_for_rdy(struct qib_devdata *dd) { u64 val; int timeout; for (timeout = 0; timeout < 100 ; ++timeout) { val = qib_read_kreg32(dd, kr_r_access); if (val & R_RDY) return (val >> R_TDO_LSB) & 1; } return -1; } static int qib_r_shift(struct qib_devdata *dd, int bisten, int len, u8 *inp, u8 *outp) { u64 valbase, val; int ret, pos; valbase = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_SHIFT << R_OPCODE_LSB); ret = qib_r_wait_for_rdy(dd); if (ret < 0) goto bail; for (pos = 0; pos < len; ++pos) { val = valbase; if (outp) { outp[pos >> 3] &= ~(1 << (pos & 7)); outp[pos >> 3] |= (ret << (pos & 7)); } if (inp) { int tdi = inp[pos >> 3] >> (pos & 7); val |= ((tdi & 1) << R_TDI_LSB); } qib_write_kreg(dd, kr_r_access, val); qib_read_kreg32(dd, kr_scratch); ret = qib_r_wait_for_rdy(dd); if (ret < 0) break; } /* Restore to NOP between operations. */ val = SJA_EN | (bisten << BISTEN_LSB); qib_write_kreg(dd, kr_r_access, val); qib_read_kreg32(dd, kr_scratch); ret = qib_r_wait_for_rdy(dd); if (ret >= 0) ret = pos; bail: return ret; } static int qib_r_update(struct qib_devdata *dd, int bisten) { u64 val; int ret; val = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_UPDATE << R_OPCODE_LSB); ret = qib_r_wait_for_rdy(dd); if (ret >= 0) { qib_write_kreg(dd, kr_r_access, val); qib_read_kreg32(dd, kr_scratch); } return ret; } #define BISTEN_PORT_SEL 15 #define LEN_PORT_SEL 625 #define BISTEN_AT 17 #define LEN_AT 156 #define BISTEN_ETM 16 #define LEN_ETM 632 #define BIT2BYTE(x) (((x) + BITS_PER_BYTE - 1) / BITS_PER_BYTE) /* these are common for all IB port use cases. */ static u8 reset_at[BIT2BYTE(LEN_AT)] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, }; static u8 reset_atetm[BIT2BYTE(LEN_ETM)] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e, 0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7, 0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70, 0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00, 0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, }; static u8 at[BIT2BYTE(LEN_AT)] = { 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, }; /* used for IB1 or IB2, only one in use */ static u8 atetm_1port[BIT2BYTE(LEN_ETM)] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03, 0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00, 0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00, }; /* used when both IB1 and IB2 are in use */ static u8 atetm_2port[BIT2BYTE(LEN_ETM)] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05, 0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07, 0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00, }; /* used when only IB1 is in use */ static u8 portsel_port1[BIT2BYTE(LEN_PORT_SEL)] = { 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32, 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, }; /* used when only IB2 is in use */ static u8 portsel_port2[BIT2BYTE(LEN_PORT_SEL)] = { 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32, 0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32, 0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01, }; /* used when both IB1 and IB2 are in use */ static u8 portsel_2port[BIT2BYTE(LEN_PORT_SEL)] = { 0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32, 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a, 0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, }; /* * Do setup to properly handle IB link recovery; if port is zero, we * are initializing to cover both ports; otherwise we are initializing * to cover a single port card, or the port has reached INIT and we may * need to switch coverage types. */ static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both) { u8 *portsel, *etm; struct qib_devdata *dd = ppd->dd; if (!ppd->dd->cspec->r1) return; if (!both) { dd->cspec->recovery_ports_initted++; ppd->cpspec->recovery_init = 1; } if (!both && dd->cspec->recovery_ports_initted == 1) { portsel = ppd->port == 1 ? portsel_port1 : portsel_port2; etm = atetm_1port; } else { portsel = portsel_2port; etm = atetm_2port; } if (qib_r_grab(dd) < 0 || qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 || qib_r_update(dd, BISTEN_ETM) < 0 || qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 || qib_r_update(dd, BISTEN_AT) < 0 || qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL, portsel, NULL) < 0 || qib_r_update(dd, BISTEN_PORT_SEL) < 0 || qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 || qib_r_update(dd, BISTEN_AT) < 0 || qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 || qib_r_update(dd, BISTEN_ETM) < 0) qib_dev_err(dd, "Failed IB link recovery setup\n"); } static void check_7322_rxe_status(struct qib_pportdata *ppd) { struct qib_devdata *dd = ppd->dd; u64 fmask; if (dd->cspec->recovery_ports_initted != 1) return; /* rest doesn't apply to dualport */ qib_write_kreg(dd, kr_control, dd->control | SYM_MASK(Control, FreezeMode)); (void)qib_read_kreg64(dd, kr_scratch); udelay(3); /* ibcreset asserted 400ns, be sure that's over */ fmask = qib_read_kreg64(dd, kr_act_fmask); if (!fmask) { /* * require a powercycle before we'll work again, and make * sure we get no more interrupts, and don't turn off * freeze. */ ppd->dd->cspec->stay_in_freeze = 1; qib_7322_set_intr_state(ppd->dd, 0); qib_write_kreg(dd, kr_fmask, 0ULL); qib_dev_err(dd, "HCA unusable until powercycled\n"); return; /* eventually reset */ } qib_write_kreg(ppd->dd, kr_hwerrclear, SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1)); /* don't do the full clear_freeze(), not needed for this */ qib_write_kreg(dd, kr_control, dd->control); qib_read_kreg32(dd, kr_scratch); /* take IBC out of reset */ if (ppd->link_speed_supported) { ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn); qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); qib_read_kreg32(dd, kr_scratch); if (ppd->lflags & QIBL_IB_LINK_DISABLED) qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); } }
gpl-2.0
Angor00/linux-aura-hd-android-2.6.35.3
drivers/acpi/acpica/hwxface.c
995
17462
/****************************************************************************** * * Module Name: hwxface - Public ACPICA hardware interfaces * *****************************************************************************/ /* * Copyright (C) 2000 - 2010, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acnamesp.h" #define _COMPONENT ACPI_HARDWARE ACPI_MODULE_NAME("hwxface") /****************************************************************************** * * FUNCTION: acpi_reset * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Set reset register in memory or IO space. Note: Does not * support reset register in PCI config space, this must be * handled separately. * ******************************************************************************/ acpi_status acpi_reset(void) { struct acpi_generic_address *reset_reg; acpi_status status; ACPI_FUNCTION_TRACE(acpi_reset); reset_reg = &acpi_gbl_FADT.reset_register; /* Check if the reset register is supported */ if (!(acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) || !reset_reg->address) { return_ACPI_STATUS(AE_NOT_EXIST); } if (reset_reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { /* * For I/O space, write directly to the OSL. This bypasses the port * validation mechanism, which may block a valid write to the reset * register. */ status = acpi_os_write_port((acpi_io_address) reset_reg->address, acpi_gbl_FADT.reset_value, reset_reg->bit_width); } else { /* Write the reset value to the reset register */ status = acpi_hw_write(acpi_gbl_FADT.reset_value, reset_reg); } return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_reset) /****************************************************************************** * * FUNCTION: acpi_read * * PARAMETERS: Value - Where the value is returned * Reg - GAS register structure * * RETURN: Status * * DESCRIPTION: Read from either memory or IO space. * * LIMITATIONS: <These limitations also apply to acpi_write> * bit_width must be exactly 8, 16, 32, or 64. * space_iD must be system_memory or system_iO. * bit_offset and access_width are currently ignored, as there has * not been a need to implement these. * ******************************************************************************/ acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg) { u32 value; u32 width; u64 address; acpi_status status; ACPI_FUNCTION_NAME(acpi_read); if (!return_value) { return (AE_BAD_PARAMETER); } /* Validate contents of the GAS register. Allow 64-bit transfers */ status = acpi_hw_validate_register(reg, 64, &address); if (ACPI_FAILURE(status)) { return (status); } width = reg->bit_width; if (width == 64) { width = 32; /* Break into two 32-bit transfers */ } /* Initialize entire 64-bit return value to zero */ *return_value = 0; value = 0; /* * Two address spaces supported: Memory or IO. PCI_Config is * not supported here because the GAS structure is insufficient */ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { status = acpi_os_read_memory((acpi_physical_address) address, &value, width); if (ACPI_FAILURE(status)) { return (status); } *return_value = value; if (reg->bit_width == 64) { /* Read the top 32 bits */ status = acpi_os_read_memory((acpi_physical_address) (address + 4), &value, 32); if (ACPI_FAILURE(status)) { return (status); } *return_value |= ((u64)value << 32); } } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */ status = acpi_hw_read_port((acpi_io_address) address, &value, width); if (ACPI_FAILURE(status)) { return (status); } *return_value = value; if (reg->bit_width == 64) { /* Read the top 32 bits */ status = acpi_hw_read_port((acpi_io_address) (address + 4), &value, 32); if (ACPI_FAILURE(status)) { return (status); } *return_value |= ((u64)value << 32); } } ACPI_DEBUG_PRINT((ACPI_DB_IO, "Read: %8.8X%8.8X width %2d from %8.8X%8.8X (%s)\n", ACPI_FORMAT_UINT64(*return_value), reg->bit_width, ACPI_FORMAT_UINT64(address), acpi_ut_get_region_name(reg->space_id))); return (status); } ACPI_EXPORT_SYMBOL(acpi_read) /****************************************************************************** * * FUNCTION: acpi_write * * PARAMETERS: Value - Value to be written * Reg - GAS register structure * * RETURN: Status * * DESCRIPTION: Write to either memory or IO space. * ******************************************************************************/ acpi_status acpi_write(u64 value, struct acpi_generic_address *reg) { u32 width; u64 address; acpi_status status; ACPI_FUNCTION_NAME(acpi_write); /* Validate contents of the GAS register. Allow 64-bit transfers */ status = acpi_hw_validate_register(reg, 64, &address); if (ACPI_FAILURE(status)) { return (status); } width = reg->bit_width; if (width == 64) { width = 32; /* Break into two 32-bit transfers */ } /* * Two address spaces supported: Memory or IO. PCI_Config is * not supported here because the GAS structure is insufficient */ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { status = acpi_os_write_memory((acpi_physical_address) address, ACPI_LODWORD(value), width); if (ACPI_FAILURE(status)) { return (status); } if (reg->bit_width == 64) { status = acpi_os_write_memory((acpi_physical_address) (address + 4), ACPI_HIDWORD(value), 32); if (ACPI_FAILURE(status)) { return (status); } } } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */ status = acpi_hw_write_port((acpi_io_address) address, ACPI_LODWORD(value), width); if (ACPI_FAILURE(status)) { return (status); } if (reg->bit_width == 64) { status = acpi_hw_write_port((acpi_io_address) (address + 4), ACPI_HIDWORD(value), 32); if (ACPI_FAILURE(status)) { return (status); } } } ACPI_DEBUG_PRINT((ACPI_DB_IO, "Wrote: %8.8X%8.8X width %2d to %8.8X%8.8X (%s)\n", ACPI_FORMAT_UINT64(value), reg->bit_width, ACPI_FORMAT_UINT64(address), acpi_ut_get_region_name(reg->space_id))); return (status); } ACPI_EXPORT_SYMBOL(acpi_write) /******************************************************************************* * * FUNCTION: acpi_read_bit_register * * PARAMETERS: register_id - ID of ACPI Bit Register to access * return_value - Value that was read from the register, * normalized to bit position zero. * * RETURN: Status and the value read from the specified Register. Value * returned is normalized to bit0 (is shifted all the way right) * * DESCRIPTION: ACPI bit_register read function. Does not acquire the HW lock. * * SUPPORTS: Bit fields in PM1 Status, PM1 Enable, PM1 Control, and * PM2 Control. * * Note: The hardware lock is not required when reading the ACPI bit registers * since almost all of them are single bit and it does not matter that * the parent hardware register can be split across two physical * registers. The only multi-bit field is SLP_TYP in the PM1 control * register, but this field does not cross an 8-bit boundary (nor does * it make much sense to actually read this field.) * ******************************************************************************/ acpi_status acpi_read_bit_register(u32 register_id, u32 *return_value) { struct acpi_bit_register_info *bit_reg_info; u32 register_value; u32 value; acpi_status status; ACPI_FUNCTION_TRACE_U32(acpi_read_bit_register, register_id); /* Get the info structure corresponding to the requested ACPI Register */ bit_reg_info = acpi_hw_get_bit_register_info(register_id); if (!bit_reg_info) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* Read the entire parent register */ status = acpi_hw_register_read(bit_reg_info->parent_register, &register_value); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Normalize the value that was read, mask off other bits */ value = ((register_value & bit_reg_info->access_bit_mask) >> bit_reg_info->bit_position); ACPI_DEBUG_PRINT((ACPI_DB_IO, "BitReg %X, ParentReg %X, Actual %8.8X, ReturnValue %8.8X\n", register_id, bit_reg_info->parent_register, register_value, value)); *return_value = value; return_ACPI_STATUS(AE_OK); } ACPI_EXPORT_SYMBOL(acpi_read_bit_register) /******************************************************************************* * * FUNCTION: acpi_write_bit_register * * PARAMETERS: register_id - ID of ACPI Bit Register to access * Value - Value to write to the register, in bit * position zero. The bit is automaticallly * shifted to the correct position. * * RETURN: Status * * DESCRIPTION: ACPI Bit Register write function. Acquires the hardware lock * since most operations require a read/modify/write sequence. * * SUPPORTS: Bit fields in PM1 Status, PM1 Enable, PM1 Control, and * PM2 Control. * * Note that at this level, the fact that there may be actually two * hardware registers (A and B - and B may not exist) is abstracted. * ******************************************************************************/ acpi_status acpi_write_bit_register(u32 register_id, u32 value) { struct acpi_bit_register_info *bit_reg_info; acpi_cpu_flags lock_flags; u32 register_value; acpi_status status = AE_OK; ACPI_FUNCTION_TRACE_U32(acpi_write_bit_register, register_id); /* Get the info structure corresponding to the requested ACPI Register */ bit_reg_info = acpi_hw_get_bit_register_info(register_id); if (!bit_reg_info) { return_ACPI_STATUS(AE_BAD_PARAMETER); } lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); /* * At this point, we know that the parent register is one of the * following: PM1 Status, PM1 Enable, PM1 Control, or PM2 Control */ if (bit_reg_info->parent_register != ACPI_REGISTER_PM1_STATUS) { /* * 1) Case for PM1 Enable, PM1 Control, and PM2 Control * * Perform a register read to preserve the bits that we are not * interested in */ status = acpi_hw_register_read(bit_reg_info->parent_register, &register_value); if (ACPI_FAILURE(status)) { goto unlock_and_exit; } /* * Insert the input bit into the value that was just read * and write the register */ ACPI_REGISTER_INSERT_VALUE(register_value, bit_reg_info->bit_position, bit_reg_info->access_bit_mask, value); status = acpi_hw_register_write(bit_reg_info->parent_register, register_value); } else { /* * 2) Case for PM1 Status * * The Status register is different from the rest. Clear an event * by writing 1, writing 0 has no effect. So, the only relevant * information is the single bit we're interested in, all others * should be written as 0 so they will be left unchanged. */ register_value = ACPI_REGISTER_PREPARE_BITS(value, bit_reg_info-> bit_position, bit_reg_info-> access_bit_mask); /* No need to write the register if value is all zeros */ if (register_value) { status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS, register_value); } } ACPI_DEBUG_PRINT((ACPI_DB_IO, "BitReg %X, ParentReg %X, Value %8.8X, Actual %8.8X\n", register_id, bit_reg_info->parent_register, value, register_value)); unlock_and_exit: acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_write_bit_register) /******************************************************************************* * * FUNCTION: acpi_get_sleep_type_data * * PARAMETERS: sleep_state - Numeric sleep state * *sleep_type_a - Where SLP_TYPa is returned * *sleep_type_b - Where SLP_TYPb is returned * * RETURN: Status - ACPI status * * DESCRIPTION: Obtain the SLP_TYPa and SLP_TYPb values for the requested sleep * state. * ******************************************************************************/ acpi_status acpi_get_sleep_type_data(u8 sleep_state, u8 *sleep_type_a, u8 *sleep_type_b) { acpi_status status = AE_OK; struct acpi_evaluate_info *info; ACPI_FUNCTION_TRACE(acpi_get_sleep_type_data); /* Validate parameters */ if ((sleep_state > ACPI_S_STATES_MAX) || !sleep_type_a || !sleep_type_b) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* Allocate the evaluation information block */ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); if (!info) { return_ACPI_STATUS(AE_NO_MEMORY); } info->pathname = ACPI_CAST_PTR(char, acpi_gbl_sleep_state_names[sleep_state]); /* Evaluate the namespace object containing the values for this state */ status = acpi_ns_evaluate(info); if (ACPI_FAILURE(status)) { ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%s while evaluating SleepState [%s]\n", acpi_format_exception(status), info->pathname)); goto cleanup; } /* Must have a return object */ if (!info->return_object) { ACPI_ERROR((AE_INFO, "No Sleep State object returned from [%s]", info->pathname)); status = AE_NOT_EXIST; } /* It must be of type Package */ else if (info->return_object->common.type != ACPI_TYPE_PACKAGE) { ACPI_ERROR((AE_INFO, "Sleep State return object is not a Package")); status = AE_AML_OPERAND_TYPE; } /* * The package must have at least two elements. NOTE (March 2005): This * goes against the current ACPI spec which defines this object as a * package with one encoded DWORD element. However, existing practice * by BIOS vendors seems to be to have 2 or more elements, at least * one per sleep type (A/B). */ else if (info->return_object->package.count < 2) { ACPI_ERROR((AE_INFO, "Sleep State return package does not have at least two elements")); status = AE_AML_NO_OPERAND; } /* The first two elements must both be of type Integer */ else if (((info->return_object->package.elements[0])->common.type != ACPI_TYPE_INTEGER) || ((info->return_object->package.elements[1])->common.type != ACPI_TYPE_INTEGER)) { ACPI_ERROR((AE_INFO, "Sleep State return package elements are not both Integers " "(%s, %s)", acpi_ut_get_object_type_name(info->return_object-> package.elements[0]), acpi_ut_get_object_type_name(info->return_object-> package.elements[1]))); status = AE_AML_OPERAND_TYPE; } else { /* Valid _Sx_ package size, type, and value */ *sleep_type_a = (u8) (info->return_object->package.elements[0])->integer.value; *sleep_type_b = (u8) (info->return_object->package.elements[1])->integer.value; } if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "While evaluating SleepState [%s], bad Sleep object %p type %s", info->pathname, info->return_object, acpi_ut_get_object_type_name(info-> return_object))); } acpi_ut_remove_reference(info->return_object); cleanup: ACPI_FREE(info); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_get_sleep_type_data)
gpl-2.0
SOKP/kernel_lge_hammerhead
arch/arm/mach-msm/qdsp6/routing.c
2019
1852
/* arch/arm/mach-msm/qdsp6/routing.c * * Copyright (C) 2009 Google, Inc. * Author: Brian Swetland <swetland@google.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/fs.h> #include <linux/module.h> #include <linux/miscdevice.h> #include <linux/uaccess.h> #include <mach/debug_mm.h> extern int q6audio_set_route(const char *name); static int q6_open(struct inode *inode, struct file *file) { pr_debug("[%s:%s]\n", __MM_FILE__, __func__); return 0; } static ssize_t q6_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { char cmd[32]; pr_debug("[%s:%s] count = %d", __MM_FILE__, __func__, count); if (count >= sizeof(cmd)) { pr_err("[%s:%s] invalid count %d\n", __MM_FILE__, __func__, count); return -EINVAL; } if (copy_from_user(cmd, buf, count)) return -EFAULT; cmd[count] = 0; if ((count > 1) && (cmd[count-1] == '\n')) cmd[count-1] = 0; q6audio_set_route(cmd); return count; } static int q6_release(struct inode *inode, struct file *file) { pr_debug("[%s:%s]\n", __MM_FILE__, __func__); return 0; } static struct file_operations q6_fops = { .owner = THIS_MODULE, .open = q6_open, .write = q6_write, .release = q6_release, }; static struct miscdevice q6_misc = { .minor = MISC_DYNAMIC_MINOR, .name = "msm_audio_route", .fops = &q6_fops, }; static int __init q6_init(void) { return misc_register(&q6_misc); } device_initcall(q6_init);
gpl-2.0
netwolfuk/Kernel_Unico
drivers/watchdog/sp805_wdt.c
2531
8725
/* * drivers/char/watchdog/sp805-wdt.c * * Watchdog driver for ARM SP805 watchdog module * * Copyright (C) 2010 ST Microelectronics * Viresh Kumar<viresh.kumar@st.com> * * This file is licensed under the terms of the GNU General Public * License version 2 or later. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/device.h> #include <linux/resource.h> #include <linux/amba/bus.h> #include <linux/bitops.h> #include <linux/clk.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/math64.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/types.h> #include <linux/uaccess.h> #include <linux/watchdog.h> /* default timeout in seconds */ #define DEFAULT_TIMEOUT 60 #define MODULE_NAME "sp805-wdt" /* watchdog register offsets and masks */ #define WDTLOAD 0x000 #define LOAD_MIN 0x00000001 #define LOAD_MAX 0xFFFFFFFF #define WDTVALUE 0x004 #define WDTCONTROL 0x008 /* control register masks */ #define INT_ENABLE (1 << 0) #define RESET_ENABLE (1 << 1) #define WDTINTCLR 0x00C #define WDTRIS 0x010 #define WDTMIS 0x014 #define INT_MASK (1 << 0) #define WDTLOCK 0xC00 #define UNLOCK 0x1ACCE551 #define LOCK 0x00000001 /** * struct sp805_wdt: sp805 wdt device structure * * lock: spin lock protecting dev structure and io access * base: base address of wdt * clk: clock structure of wdt * dev: amba device structure of wdt * status: current status of wdt * load_val: load value to be set for current timeout * timeout: current programmed timeout */ struct sp805_wdt { spinlock_t lock; void __iomem *base; struct clk *clk; struct amba_device *adev; unsigned long status; #define WDT_BUSY 0 #define WDT_CAN_BE_CLOSED 1 unsigned int load_val; unsigned int timeout; }; /* local variables */ static struct sp805_wdt *wdt; static int nowayout = WATCHDOG_NOWAYOUT; /* This routine finds load value that will reset system in required timout */ static void wdt_setload(unsigned int timeout) { u64 load, rate; rate = clk_get_rate(wdt->clk); /* * sp805 runs counter with given value twice, after the end of first * counter it gives an interrupt and then starts counter again. If * interrupt already occurred then it resets the system. This is why * load is half of what should be required. */ load = div_u64(rate, 2) * timeout - 1; load = (load > LOAD_MAX) ? LOAD_MAX : load; load = (load < LOAD_MIN) ? LOAD_MIN : load; spin_lock(&wdt->lock); wdt->load_val = load; /* roundup timeout to closest positive integer value */ wdt->timeout = div_u64((load + 1) * 2 + (rate / 2), rate); spin_unlock(&wdt->lock); } /* returns number of seconds left for reset to occur */ static u32 wdt_timeleft(void) { u64 load, rate; rate = clk_get_rate(wdt->clk); spin_lock(&wdt->lock); load = readl(wdt->base + WDTVALUE); /*If the interrupt is inactive then time left is WDTValue + WDTLoad. */ if (!(readl(wdt->base + WDTRIS) & INT_MASK)) load += wdt->load_val + 1; spin_unlock(&wdt->lock); return div_u64(load, rate); } /* enables watchdog timers reset */ static void wdt_enable(void) { spin_lock(&wdt->lock); writel(UNLOCK, wdt->base + WDTLOCK); writel(wdt->load_val, wdt->base + WDTLOAD); writel(INT_MASK, wdt->base + WDTINTCLR); writel(INT_ENABLE | RESET_ENABLE, wdt->base + WDTCONTROL); writel(LOCK, wdt->base + WDTLOCK); spin_unlock(&wdt->lock); } /* disables watchdog timers reset */ static void wdt_disable(void) { spin_lock(&wdt->lock); writel(UNLOCK, wdt->base + WDTLOCK); writel(0, wdt->base + WDTCONTROL); writel(0, wdt->base + WDTLOAD); writel(LOCK, wdt->base + WDTLOCK); spin_unlock(&wdt->lock); } static ssize_t sp805_wdt_write(struct file *file, const char *data, size_t len, loff_t *ppos) { if (len) { if (!nowayout) { size_t i; clear_bit(WDT_CAN_BE_CLOSED, &wdt->status); for (i = 0; i != len; i++) { char c; if (get_user(c, data + i)) return -EFAULT; /* Check for Magic Close character */ if (c == 'V') { set_bit(WDT_CAN_BE_CLOSED, &wdt->status); break; } } } wdt_enable(); } return len; } static const struct watchdog_info ident = { .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, .identity = MODULE_NAME, }; static long sp805_wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret = -ENOTTY; unsigned int timeout; switch (cmd) { case WDIOC_GETSUPPORT: ret = copy_to_user((struct watchdog_info *)arg, &ident, sizeof(ident)) ? -EFAULT : 0; break; case WDIOC_GETSTATUS: ret = put_user(0, (int *)arg); break; case WDIOC_KEEPALIVE: wdt_enable(); ret = 0; break; case WDIOC_SETTIMEOUT: ret = get_user(timeout, (unsigned int *)arg); if (ret) break; wdt_setload(timeout); wdt_enable(); /* Fall through */ case WDIOC_GETTIMEOUT: ret = put_user(wdt->timeout, (unsigned int *)arg); break; case WDIOC_GETTIMELEFT: ret = put_user(wdt_timeleft(), (unsigned int *)arg); break; } return ret; } static int sp805_wdt_open(struct inode *inode, struct file *file) { int ret = 0; if (test_and_set_bit(WDT_BUSY, &wdt->status)) return -EBUSY; ret = clk_enable(wdt->clk); if (ret) { dev_err(&wdt->adev->dev, "clock enable fail"); goto err; } wdt_enable(); /* can not be closed, once enabled */ clear_bit(WDT_CAN_BE_CLOSED, &wdt->status); return nonseekable_open(inode, file); err: clear_bit(WDT_BUSY, &wdt->status); return ret; } static int sp805_wdt_release(struct inode *inode, struct file *file) { if (!test_bit(WDT_CAN_BE_CLOSED, &wdt->status)) { clear_bit(WDT_BUSY, &wdt->status); dev_warn(&wdt->adev->dev, "Device closed unexpectedly\n"); return 0; } wdt_disable(); clk_disable(wdt->clk); clear_bit(WDT_BUSY, &wdt->status); return 0; } static const struct file_operations sp805_wdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = sp805_wdt_write, .unlocked_ioctl = sp805_wdt_ioctl, .open = sp805_wdt_open, .release = sp805_wdt_release, }; static struct miscdevice sp805_wdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &sp805_wdt_fops, }; static int __devinit sp805_wdt_probe(struct amba_device *adev, const struct amba_id *id) { int ret = 0; if (!request_mem_region(adev->res.start, resource_size(&adev->res), "sp805_wdt")) { dev_warn(&adev->dev, "Failed to get memory region resource\n"); ret = -ENOENT; goto err; } wdt = kzalloc(sizeof(*wdt), GFP_KERNEL); if (!wdt) { dev_warn(&adev->dev, "Kzalloc failed\n"); ret = -ENOMEM; goto err_kzalloc; } wdt->clk = clk_get(&adev->dev, NULL); if (IS_ERR(wdt->clk)) { dev_warn(&adev->dev, "Clock not found\n"); ret = PTR_ERR(wdt->clk); goto err_clk_get; } wdt->base = ioremap(adev->res.start, resource_size(&adev->res)); if (!wdt->base) { ret = -ENOMEM; dev_warn(&adev->dev, "ioremap fail\n"); goto err_ioremap; } wdt->adev = adev; spin_lock_init(&wdt->lock); wdt_setload(DEFAULT_TIMEOUT); ret = misc_register(&sp805_wdt_miscdev); if (ret < 0) { dev_warn(&adev->dev, "cannot register misc device\n"); goto err_misc_register; } dev_info(&adev->dev, "registration successful\n"); return 0; err_misc_register: iounmap(wdt->base); err_ioremap: clk_put(wdt->clk); err_clk_get: kfree(wdt); wdt = NULL; err_kzalloc: release_mem_region(adev->res.start, resource_size(&adev->res)); err: dev_err(&adev->dev, "Probe Failed!!!\n"); return ret; } static int __devexit sp805_wdt_remove(struct amba_device *adev) { misc_deregister(&sp805_wdt_miscdev); iounmap(wdt->base); clk_put(wdt->clk); kfree(wdt); release_mem_region(adev->res.start, resource_size(&adev->res)); return 0; } static struct amba_id sp805_wdt_ids[] __initdata = { { .id = 0x00141805, .mask = 0x00ffffff, }, { 0, 0 }, }; static struct amba_driver sp805_wdt_driver = { .drv = { .name = MODULE_NAME, }, .id_table = sp805_wdt_ids, .probe = sp805_wdt_probe, .remove = __devexit_p(sp805_wdt_remove), }; static int __init sp805_wdt_init(void) { return amba_driver_register(&sp805_wdt_driver); } module_init(sp805_wdt_init); static void __exit sp805_wdt_exit(void) { amba_driver_unregister(&sp805_wdt_driver); } module_exit(sp805_wdt_exit); module_param(nowayout, int, 0); MODULE_PARM_DESC(nowayout, "Set to 1 to keep watchdog running after device release"); MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); MODULE_DESCRIPTION("ARM SP805 Watchdog Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
gpl-2.0
sktjdgns1189/android_kernel_pantech_ef56s
arch/powerpc/kernel/pci_32.c
4579
7647
/* * Common pmac/prep/chrp pci routines. -- Cort */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/string.h> #include <linux/init.h> #include <linux/capability.h> #include <linux/sched.h> #include <linux/errno.h> #include <linux/bootmem.h> #include <linux/irq.h> #include <linux/list.h> #include <linux/of.h> #include <linux/slab.h> #include <linux/export.h> #include <asm/processor.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/sections.h> #include <asm/pci-bridge.h> #include <asm/ppc-pci.h> #include <asm/byteorder.h> #include <asm/uaccess.h> #include <asm/machdep.h> #undef DEBUG unsigned long isa_io_base = 0; unsigned long pci_dram_offset = 0; int pcibios_assign_bus_offset = 1; void pcibios_make_OF_bus_map(void); static void fixup_cpc710_pci64(struct pci_dev* dev); static u8* pci_to_OF_bus_map; /* By default, we don't re-assign bus numbers. We do this only on * some pmacs */ static int pci_assign_all_buses; static int pci_bus_count; /* This will remain NULL for now, until isa-bridge.c is made common * to both 32-bit and 64-bit. */ struct pci_dev *isa_bridge_pcidev; EXPORT_SYMBOL_GPL(isa_bridge_pcidev); static void fixup_cpc710_pci64(struct pci_dev* dev) { /* Hide the PCI64 BARs from the kernel as their content doesn't * fit well in the resource management */ dev->resource[0].start = dev->resource[0].end = 0; dev->resource[0].flags = 0; dev->resource[1].start = dev->resource[1].end = 0; dev->resource[1].flags = 0; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CPC710_PCI64, fixup_cpc710_pci64); /* * Functions below are used on OpenFirmware machines. */ static void make_one_node_map(struct device_node* node, u8 pci_bus) { const int *bus_range; int len; if (pci_bus >= pci_bus_count) return; bus_range = of_get_property(node, "bus-range", &len); if (bus_range == NULL || len < 2 * sizeof(int)) { printk(KERN_WARNING "Can't get bus-range for %s, " "assuming it starts at 0\n", node->full_name); pci_to_OF_bus_map[pci_bus] = 0; } else pci_to_OF_bus_map[pci_bus] = bus_range[0]; for_each_child_of_node(node, node) { struct pci_dev* dev; const unsigned int *class_code, *reg; class_code = of_get_property(node, "class-code", NULL); if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI && (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) continue; reg = of_get_property(node, "reg", NULL); if (!reg) continue; dev = pci_get_bus_and_slot(pci_bus, ((reg[0] >> 8) & 0xff)); if (!dev || !dev->subordinate) { pci_dev_put(dev); continue; } make_one_node_map(node, dev->subordinate->number); pci_dev_put(dev); } } void pcibios_make_OF_bus_map(void) { int i; struct pci_controller *hose, *tmp; struct property *map_prop; struct device_node *dn; pci_to_OF_bus_map = kmalloc(pci_bus_count, GFP_KERNEL); if (!pci_to_OF_bus_map) { printk(KERN_ERR "Can't allocate OF bus map !\n"); return; } /* We fill the bus map with invalid values, that helps * debugging. */ for (i=0; i<pci_bus_count; i++) pci_to_OF_bus_map[i] = 0xff; /* For each hose, we begin searching bridges */ list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { struct device_node* node = hose->dn; if (!node) continue; make_one_node_map(node, hose->first_busno); } dn = of_find_node_by_path("/"); map_prop = of_find_property(dn, "pci-OF-bus-map", NULL); if (map_prop) { BUG_ON(pci_bus_count > map_prop->length); memcpy(map_prop->value, pci_to_OF_bus_map, pci_bus_count); } of_node_put(dn); #ifdef DEBUG printk("PCI->OF bus map:\n"); for (i=0; i<pci_bus_count; i++) { if (pci_to_OF_bus_map[i] == 0xff) continue; printk("%d -> %d\n", i, pci_to_OF_bus_map[i]); } #endif } /* * Returns the PCI device matching a given OF node */ int pci_device_from_OF_node(struct device_node *node, u8 *bus, u8 *devfn) { struct pci_dev *dev = NULL; const __be32 *reg; int size; /* Check if it might have a chance to be a PCI device */ if (!pci_find_hose_for_OF_device(node)) return -ENODEV; reg = of_get_property(node, "reg", &size); if (!reg || size < 5 * sizeof(u32)) return -ENODEV; *bus = (be32_to_cpup(&reg[0]) >> 16) & 0xff; *devfn = (be32_to_cpup(&reg[0]) >> 8) & 0xff; /* Ok, here we need some tweak. If we have already renumbered * all busses, we can't rely on the OF bus number any more. * the pci_to_OF_bus_map is not enough as several PCI busses * may match the same OF bus number. */ if (!pci_to_OF_bus_map) return 0; for_each_pci_dev(dev) if (pci_to_OF_bus_map[dev->bus->number] == *bus && dev->devfn == *devfn) { *bus = dev->bus->number; pci_dev_put(dev); return 0; } return -ENODEV; } EXPORT_SYMBOL(pci_device_from_OF_node); /* We create the "pci-OF-bus-map" property now so it appears in the * /proc device tree */ void __init pci_create_OF_bus_map(void) { struct property* of_prop; struct device_node *dn; of_prop = (struct property*) alloc_bootmem(sizeof(struct property) + 256); if (!of_prop) return; dn = of_find_node_by_path("/"); if (dn) { memset(of_prop, -1, sizeof(struct property) + 256); of_prop->name = "pci-OF-bus-map"; of_prop->length = 256; of_prop->value = &of_prop[1]; prom_add_property(dn, of_prop); of_node_put(dn); } } void __devinit pcibios_setup_phb_io_space(struct pci_controller *hose) { unsigned long io_offset; struct resource *res = &hose->io_resource; /* Fixup IO space offset */ io_offset = pcibios_io_space_offset(hose); res->start += io_offset; res->end += io_offset; } static int __init pcibios_init(void) { struct pci_controller *hose, *tmp; int next_busno = 0; printk(KERN_INFO "PCI: Probing PCI hardware\n"); if (pci_has_flag(PCI_REASSIGN_ALL_BUS)) pci_assign_all_buses = 1; /* Scan all of the recorded PCI controllers. */ list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { if (pci_assign_all_buses) hose->first_busno = next_busno; hose->last_busno = 0xff; pcibios_scan_phb(hose); pci_bus_add_devices(hose->bus); if (pci_assign_all_buses || next_busno <= hose->last_busno) next_busno = hose->last_busno + pcibios_assign_bus_offset; } pci_bus_count = next_busno; /* OpenFirmware based machines need a map of OF bus * numbers vs. kernel bus numbers since we may have to * remap them. */ if (pci_assign_all_buses) pcibios_make_OF_bus_map(); /* Call common code to handle resource allocation */ pcibios_resource_survey(); /* Call machine dependent post-init code */ if (ppc_md.pcibios_after_init) ppc_md.pcibios_after_init(); return 0; } subsys_initcall(pcibios_init); static struct pci_controller* pci_bus_to_hose(int bus) { struct pci_controller *hose, *tmp; list_for_each_entry_safe(hose, tmp, &hose_list, list_node) if (bus >= hose->first_busno && bus <= hose->last_busno) return hose; return NULL; } /* Provide information on locations of various I/O regions in physical * memory. Do this on a per-card basis so that we choose the right * root bridge. * Note that the returned IO or memory base is a physical address */ long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn) { struct pci_controller* hose; long result = -EOPNOTSUPP; hose = pci_bus_to_hose(bus); if (!hose) return -ENODEV; switch (which) { case IOBASE_BRIDGE_NUMBER: return (long)hose->first_busno; case IOBASE_MEMORY: return (long)hose->pci_mem_offset; case IOBASE_IO: return (long)hose->io_base_phys; case IOBASE_ISA_IO: return (long)isa_io_base; case IOBASE_ISA_MEM: return (long)isa_mem_base; } return result; }
gpl-2.0
Ki113R/android_kernel_samsung_golden
drivers/net/bonding/bond_debugfs.c
6115
3002
#include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> #include <linux/netdevice.h> #include "bonding.h" #include "bond_alb.h" #ifdef CONFIG_DEBUG_FS #include <linux/debugfs.h> #include <linux/seq_file.h> static struct dentry *bonding_debug_root; /* * Show RLB hash table */ static int bond_debug_rlb_hash_show(struct seq_file *m, void *v) { struct bonding *bond = m->private; struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); struct rlb_client_info *client_info; u32 hash_index; if (bond->params.mode != BOND_MODE_ALB) return 0; seq_printf(m, "SourceIP DestinationIP " "Destination MAC DEV\n"); spin_lock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); hash_index = bond_info->rx_hashtbl_head; for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) { client_info = &(bond_info->rx_hashtbl[hash_index]); seq_printf(m, "%-15pI4 %-15pI4 %-17pM %s\n", &client_info->ip_src, &client_info->ip_dst, &client_info->mac_dst, client_info->slave->dev->name); } spin_unlock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); return 0; } static int bond_debug_rlb_hash_open(struct inode *inode, struct file *file) { return single_open(file, bond_debug_rlb_hash_show, inode->i_private); } static const struct file_operations bond_debug_rlb_hash_fops = { .owner = THIS_MODULE, .open = bond_debug_rlb_hash_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; void bond_debug_register(struct bonding *bond) { if (!bonding_debug_root) return; bond->debug_dir = debugfs_create_dir(bond->dev->name, bonding_debug_root); if (!bond->debug_dir) { pr_warning("%s: Warning: failed to register to debugfs\n", bond->dev->name); return; } debugfs_create_file("rlb_hash_table", 0400, bond->debug_dir, bond, &bond_debug_rlb_hash_fops); } void bond_debug_unregister(struct bonding *bond) { if (!bonding_debug_root) return; debugfs_remove_recursive(bond->debug_dir); } void bond_debug_reregister(struct bonding *bond) { struct dentry *d; if (!bonding_debug_root) return; d = debugfs_rename(bonding_debug_root, bond->debug_dir, bonding_debug_root, bond->dev->name); if (d) { bond->debug_dir = d; } else { pr_warning("%s: Warning: failed to reregister, " "so just unregister old one\n", bond->dev->name); bond_debug_unregister(bond); } } void bond_create_debugfs(void) { bonding_debug_root = debugfs_create_dir("bonding", NULL); if (!bonding_debug_root) { pr_warning("Warning: Cannot create bonding directory" " in debugfs\n"); } } void bond_destroy_debugfs(void) { debugfs_remove_recursive(bonding_debug_root); bonding_debug_root = NULL; } #else /* !CONFIG_DEBUG_FS */ void bond_debug_register(struct bonding *bond) { } void bond_debug_unregister(struct bonding *bond) { } void bond_debug_reregister(struct bonding *bond) { } void bond_create_debugfs(void) { } void bond_destroy_debugfs(void) { } #endif /* CONFIG_DEBUG_FS */
gpl-2.0
garwynn/D710VMUB_FL26_Kernel
arch/mips/lasat/lasat_board.c
14051
7154
/* * Thomas Horsten <thh@lasat.com> * Copyright (C) 2000 LASAT Networks A/S. * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * Routines specific to the LASAT boards */ #include <linux/types.h> #include <linux/crc32.h> #include <asm/lasat/lasat.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/ctype.h> #include <linux/mutex.h> #include <asm/addrspace.h> #include "at93c.h" /* New model description table */ #include "lasat_models.h" static DEFINE_MUTEX(lasat_eeprom_mutex); #define EEPROM_CRC(data, len) (~crc32(~0, data, len)) struct lasat_info lasat_board_info; int EEPROMRead(unsigned int pos, unsigned char *data, int len) { int i; for (i = 0; i < len; i++) *data++ = at93c_read(pos++); return 0; } int EEPROMWrite(unsigned int pos, unsigned char *data, int len) { int i; for (i = 0; i < len; i++) at93c_write(pos++, *data++); return 0; } static void init_flash_sizes(void) { unsigned long *lb = lasat_board_info.li_flashpart_base; unsigned long *ls = lasat_board_info.li_flashpart_size; int i; ls[LASAT_MTD_BOOTLOADER] = 0x40000; ls[LASAT_MTD_SERVICE] = 0xC0000; ls[LASAT_MTD_NORMAL] = 0x100000; if (!IS_LASAT_200()) { lasat_board_info.li_flash_base = 0x1e000000; lb[LASAT_MTD_BOOTLOADER] = 0x1e400000; if (lasat_board_info.li_flash_size > 0x200000) { ls[LASAT_MTD_CONFIG] = 0x100000; ls[LASAT_MTD_FS] = 0x500000; } } else { lasat_board_info.li_flash_base = 0x10000000; if (lasat_board_info.li_flash_size < 0x1000000) { lb[LASAT_MTD_BOOTLOADER] = 0x10000000; ls[LASAT_MTD_CONFIG] = 0x100000; if (lasat_board_info.li_flash_size >= 0x400000) ls[LASAT_MTD_FS] = lasat_board_info.li_flash_size - 0x300000; } } for (i = 1; i < LASAT_MTD_LAST; i++) lb[i] = lb[i-1] + ls[i-1]; } int lasat_init_board_info(void) { int c; unsigned long crc; unsigned long cfg0, cfg1; const struct product_info *ppi; int i_n_base_models = N_BASE_MODELS; const char * const * i_txt_base_models = txt_base_models; int i_n_prids = N_PRIDS; memset(&lasat_board_info, 0, sizeof(lasat_board_info)); /* First read the EEPROM info */ EEPROMRead(0, (unsigned char *)&lasat_board_info.li_eeprom_info, sizeof(struct lasat_eeprom_struct)); /* Check the CRC */ crc = EEPROM_CRC((unsigned char *)(&lasat_board_info.li_eeprom_info), sizeof(struct lasat_eeprom_struct) - 4); if (crc != lasat_board_info.li_eeprom_info.crc32) { printk(KERN_WARNING "WARNING...\nWARNING...\nEEPROM CRC does " "not match calculated, attempting to soldier on...\n"); } if (lasat_board_info.li_eeprom_info.version != LASAT_EEPROM_VERSION) { printk(KERN_WARNING "WARNING...\nWARNING...\nEEPROM version " "%d, wanted version %d, attempting to soldier on...\n", (unsigned int)lasat_board_info.li_eeprom_info.version, LASAT_EEPROM_VERSION); } cfg0 = lasat_board_info.li_eeprom_info.cfg[0]; cfg1 = lasat_board_info.li_eeprom_info.cfg[1]; if (LASAT_W0_DSCTYPE(cfg0) != 1) { printk(KERN_WARNING "WARNING...\nWARNING...\n" "Invalid configuration read from EEPROM, attempting to " "soldier on..."); } /* We have a valid configuration */ switch (LASAT_W0_SDRAMBANKSZ(cfg0)) { case 0: lasat_board_info.li_memsize = 0x0800000; break; case 1: lasat_board_info.li_memsize = 0x1000000; break; case 2: lasat_board_info.li_memsize = 0x2000000; break; case 3: lasat_board_info.li_memsize = 0x4000000; break; case 4: lasat_board_info.li_memsize = 0x8000000; break; default: lasat_board_info.li_memsize = 0; } switch (LASAT_W0_SDRAMBANKS(cfg0)) { case 0: break; case 1: lasat_board_info.li_memsize *= 2; break; default: break; } switch (LASAT_W0_BUSSPEED(cfg0)) { case 0x0: lasat_board_info.li_bus_hz = 60000000; break; case 0x1: lasat_board_info.li_bus_hz = 66000000; break; case 0x2: lasat_board_info.li_bus_hz = 66666667; break; case 0x3: lasat_board_info.li_bus_hz = 80000000; break; case 0x4: lasat_board_info.li_bus_hz = 83333333; break; case 0x5: lasat_board_info.li_bus_hz = 100000000; break; } switch (LASAT_W0_CPUCLK(cfg0)) { case 0x0: lasat_board_info.li_cpu_hz = lasat_board_info.li_bus_hz; break; case 0x1: lasat_board_info.li_cpu_hz = lasat_board_info.li_bus_hz + (lasat_board_info.li_bus_hz >> 1); break; case 0x2: lasat_board_info.li_cpu_hz = lasat_board_info.li_bus_hz + lasat_board_info.li_bus_hz; break; case 0x3: lasat_board_info.li_cpu_hz = lasat_board_info.li_bus_hz + lasat_board_info.li_bus_hz + (lasat_board_info.li_bus_hz >> 1); break; case 0x4: lasat_board_info.li_cpu_hz = lasat_board_info.li_bus_hz + lasat_board_info.li_bus_hz + lasat_board_info.li_bus_hz; break; } /* Flash size */ switch (LASAT_W1_FLASHSIZE(cfg1)) { case 0: lasat_board_info.li_flash_size = 0x200000; break; case 1: lasat_board_info.li_flash_size = 0x400000; break; case 2: lasat_board_info.li_flash_size = 0x800000; break; case 3: lasat_board_info.li_flash_size = 0x1000000; break; case 4: lasat_board_info.li_flash_size = 0x2000000; break; } init_flash_sizes(); lasat_board_info.li_bmid = LASAT_W0_BMID(cfg0); lasat_board_info.li_prid = lasat_board_info.li_eeprom_info.prid; if (lasat_board_info.li_prid == 0xffff || lasat_board_info.li_prid == 0) lasat_board_info.li_prid = lasat_board_info.li_bmid; /* Base model stuff */ if (lasat_board_info.li_bmid > i_n_base_models) lasat_board_info.li_bmid = i_n_base_models; strcpy(lasat_board_info.li_bmstr, i_txt_base_models[lasat_board_info.li_bmid]); /* Product ID dependent values */ c = lasat_board_info.li_prid; if (c >= i_n_prids) { strcpy(lasat_board_info.li_namestr, "Unknown Model"); strcpy(lasat_board_info.li_typestr, "Unknown Type"); } else { ppi = &vendor_info_table[0].vi_product_info[c]; strcpy(lasat_board_info.li_namestr, ppi->pi_name); if (ppi->pi_type) strcpy(lasat_board_info.li_typestr, ppi->pi_type); else sprintf(lasat_board_info.li_typestr, "%d", 10 * c); } return 0; } void lasat_write_eeprom_info(void) { unsigned long crc; mutex_lock(&lasat_eeprom_mutex); /* Generate the CRC */ crc = EEPROM_CRC((unsigned char *)(&lasat_board_info.li_eeprom_info), sizeof(struct lasat_eeprom_struct) - 4); lasat_board_info.li_eeprom_info.crc32 = crc; /* Write the EEPROM info */ EEPROMWrite(0, (unsigned char *)&lasat_board_info.li_eeprom_info, sizeof(struct lasat_eeprom_struct)); mutex_unlock(&lasat_eeprom_mutex); }
gpl-2.0
embeddedarm/linux-vanilla-imx6
drivers/of/platform.c
228
16488
/* * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp. * <benh@kernel.crashing.org> * and Arnd Bergmann, IBM Corp. * Merged from powerpc/kernel/of_platform.c and * sparc{,64}/kernel/of_device.c by Stephen Rothwell * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #include <linux/errno.h> #include <linux/module.h> #include <linux/amba/bus.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/of_address.h> #include <linux/of_device.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/platform_device.h> const struct of_device_id of_default_bus_match_table[] = { { .compatible = "simple-bus", }, { .compatible = "simple-mfd", }, #ifdef CONFIG_ARM_AMBA { .compatible = "arm,amba-bus", }, #endif /* CONFIG_ARM_AMBA */ {} /* Empty terminated list */ }; static int of_dev_node_match(struct device *dev, void *data) { return dev->of_node == data; } /** * of_find_device_by_node - Find the platform_device associated with a node * @np: Pointer to device tree node * * Returns platform_device pointer, or NULL if not found */ struct platform_device *of_find_device_by_node(struct device_node *np) { struct device *dev; dev = bus_find_device(&platform_bus_type, NULL, np, of_dev_node_match); return dev ? to_platform_device(dev) : NULL; } EXPORT_SYMBOL(of_find_device_by_node); #ifdef CONFIG_OF_ADDRESS /* * The following routines scan a subtree and registers a device for * each applicable node. * * Note: sparc doesn't use these routines because it has a different * mechanism for creating devices from device tree nodes. */ /** * of_device_make_bus_id - Use the device node data to assign a unique name * @dev: pointer to device structure that is linked to a device tree node * * This routine will first try using the translated bus address to * derive a unique name. If it cannot, then it will prepend names from * parent nodes until a unique name can be derived. */ void of_device_make_bus_id(struct device *dev) { struct device_node *node = dev->of_node; const __be32 *reg; u64 addr; /* Construct the name, using parent nodes if necessary to ensure uniqueness */ while (node->parent) { /* * If the address can be translated, then that is as much * uniqueness as we need. Make it the first component and return */ reg = of_get_property(node, "reg", NULL); if (reg && (addr = of_translate_address(node, reg)) != OF_BAD_ADDR) { dev_set_name(dev, dev_name(dev) ? "%llx.%s:%s" : "%llx.%s", (unsigned long long)addr, node->name, dev_name(dev)); return; } /* format arguments only used if dev_name() resolves to NULL */ dev_set_name(dev, dev_name(dev) ? "%s:%s" : "%s", strrchr(node->full_name, '/') + 1, dev_name(dev)); node = node->parent; } } /** * of_device_alloc - Allocate and initialize an of_device * @np: device node to assign to device * @bus_id: Name to assign to the device. May be null to use default name. * @parent: Parent device. */ struct platform_device *of_device_alloc(struct device_node *np, const char *bus_id, struct device *parent) { struct platform_device *dev; int rc, i, num_reg = 0, num_irq; struct resource *res, temp_res; dev = platform_device_alloc("", -1); if (!dev) return NULL; /* count the io and irq resources */ while (of_address_to_resource(np, num_reg, &temp_res) == 0) num_reg++; num_irq = of_irq_count(np); /* Populate the resource table */ if (num_irq || num_reg) { res = kzalloc(sizeof(*res) * (num_irq + num_reg), GFP_KERNEL); if (!res) { platform_device_put(dev); return NULL; } dev->num_resources = num_reg + num_irq; dev->resource = res; for (i = 0; i < num_reg; i++, res++) { rc = of_address_to_resource(np, i, res); WARN_ON(rc); } if (of_irq_to_resource_table(np, res, num_irq) != num_irq) pr_debug("not all legacy IRQ resources mapped for %s\n", np->name); } dev->dev.of_node = of_node_get(np); dev->dev.parent = parent ? : &platform_bus; if (bus_id) dev_set_name(&dev->dev, "%s", bus_id); else of_device_make_bus_id(&dev->dev); return dev; } EXPORT_SYMBOL(of_device_alloc); static void of_dma_deconfigure(struct device *dev) { arch_teardown_dma_ops(dev); } /** * of_platform_device_create_pdata - Alloc, initialize and register an of_device * @np: pointer to node to create device for * @bus_id: name to assign device * @platform_data: pointer to populate platform_data pointer with * @parent: Linux device model parent device. * * Returns pointer to created platform device, or NULL if a device was not * registered. Unavailable devices will not get registered. */ static struct platform_device *of_platform_device_create_pdata( struct device_node *np, const char *bus_id, void *platform_data, struct device *parent) { struct platform_device *dev; if (!of_device_is_available(np) || of_node_test_and_set_flag(np, OF_POPULATED)) return NULL; dev = of_device_alloc(np, bus_id, parent); if (!dev) goto err_clear_flag; dev->dev.bus = &platform_bus_type; dev->dev.platform_data = platform_data; of_dma_configure(&dev->dev, dev->dev.of_node); if (of_device_add(dev) != 0) { of_dma_deconfigure(&dev->dev); platform_device_put(dev); goto err_clear_flag; } return dev; err_clear_flag: of_node_clear_flag(np, OF_POPULATED); return NULL; } /** * of_platform_device_create - Alloc, initialize and register an of_device * @np: pointer to node to create device for * @bus_id: name to assign device * @parent: Linux device model parent device. * * Returns pointer to created platform device, or NULL if a device was not * registered. Unavailable devices will not get registered. */ struct platform_device *of_platform_device_create(struct device_node *np, const char *bus_id, struct device *parent) { return of_platform_device_create_pdata(np, bus_id, NULL, parent); } EXPORT_SYMBOL(of_platform_device_create); #ifdef CONFIG_ARM_AMBA static struct amba_device *of_amba_device_create(struct device_node *node, const char *bus_id, void *platform_data, struct device *parent) { struct amba_device *dev; const void *prop; int i, ret; pr_debug("Creating amba device %s\n", node->full_name); if (!of_device_is_available(node) || of_node_test_and_set_flag(node, OF_POPULATED)) return NULL; dev = amba_device_alloc(NULL, 0, 0); if (!dev) { pr_err("%s(): amba_device_alloc() failed for %s\n", __func__, node->full_name); goto err_clear_flag; } /* setup generic device info */ dev->dev.of_node = of_node_get(node); dev->dev.parent = parent ? : &platform_bus; dev->dev.platform_data = platform_data; if (bus_id) dev_set_name(&dev->dev, "%s", bus_id); else of_device_make_bus_id(&dev->dev); of_dma_configure(&dev->dev, dev->dev.of_node); /* Allow the HW Peripheral ID to be overridden */ prop = of_get_property(node, "arm,primecell-periphid", NULL); if (prop) dev->periphid = of_read_ulong(prop, 1); /* Decode the IRQs and address ranges */ for (i = 0; i < AMBA_NR_IRQS; i++) dev->irq[i] = irq_of_parse_and_map(node, i); ret = of_address_to_resource(node, 0, &dev->res); if (ret) { pr_err("%s(): of_address_to_resource() failed (%d) for %s\n", __func__, ret, node->full_name); goto err_free; } ret = amba_device_add(dev, &iomem_resource); if (ret) { pr_err("%s(): amba_device_add() failed (%d) for %s\n", __func__, ret, node->full_name); goto err_free; } return dev; err_free: amba_device_put(dev); err_clear_flag: of_node_clear_flag(node, OF_POPULATED); return NULL; } #else /* CONFIG_ARM_AMBA */ static struct amba_device *of_amba_device_create(struct device_node *node, const char *bus_id, void *platform_data, struct device *parent) { return NULL; } #endif /* CONFIG_ARM_AMBA */ /** * of_devname_lookup() - Given a device node, lookup the preferred Linux name */ static const struct of_dev_auxdata *of_dev_lookup(const struct of_dev_auxdata *lookup, struct device_node *np) { struct resource res; if (!lookup) return NULL; for(; lookup->compatible != NULL; lookup++) { if (!of_device_is_compatible(np, lookup->compatible)) continue; if (!of_address_to_resource(np, 0, &res)) if (res.start != lookup->phys_addr) continue; pr_debug("%s: devname=%s\n", np->full_name, lookup->name); return lookup; } return NULL; } /** * of_platform_bus_create() - Create a device for a node and its children. * @bus: device node of the bus to instantiate * @matches: match table for bus nodes * @lookup: auxdata table for matching id and platform_data with device nodes * @parent: parent for new device, or NULL for top level. * @strict: require compatible property * * Creates a platform_device for the provided device_node, and optionally * recursively create devices for all the child nodes. */ static int of_platform_bus_create(struct device_node *bus, const struct of_device_id *matches, const struct of_dev_auxdata *lookup, struct device *parent, bool strict) { const struct of_dev_auxdata *auxdata; struct device_node *child; struct platform_device *dev; const char *bus_id = NULL; void *platform_data = NULL; int rc = 0; /* Make sure it has a compatible property */ if (strict && (!of_get_property(bus, "compatible", NULL))) { pr_debug("%s() - skipping %s, no compatible prop\n", __func__, bus->full_name); return 0; } auxdata = of_dev_lookup(lookup, bus); if (auxdata) { bus_id = auxdata->name; platform_data = auxdata->platform_data; } if (of_device_is_compatible(bus, "arm,primecell")) { /* * Don't return an error here to keep compatibility with older * device tree files. */ of_amba_device_create(bus, bus_id, platform_data, parent); return 0; } dev = of_platform_device_create_pdata(bus, bus_id, platform_data, parent); if (!dev || !of_match_node(matches, bus)) return 0; for_each_child_of_node(bus, child) { pr_debug(" create child: %s\n", child->full_name); rc = of_platform_bus_create(child, matches, lookup, &dev->dev, strict); if (rc) { of_node_put(child); break; } } of_node_set_flag(bus, OF_POPULATED_BUS); return rc; } /** * of_platform_bus_probe() - Probe the device-tree for platform buses * @root: parent of the first level to probe or NULL for the root of the tree * @matches: match table for bus nodes * @parent: parent to hook devices from, NULL for toplevel * * Note that children of the provided root are not instantiated as devices * unless the specified root itself matches the bus list and is not NULL. */ int of_platform_bus_probe(struct device_node *root, const struct of_device_id *matches, struct device *parent) { struct device_node *child; int rc = 0; root = root ? of_node_get(root) : of_find_node_by_path("/"); if (!root) return -EINVAL; pr_debug("of_platform_bus_probe()\n"); pr_debug(" starting at: %s\n", root->full_name); /* Do a self check of bus type, if there's a match, create children */ if (of_match_node(matches, root)) { rc = of_platform_bus_create(root, matches, NULL, parent, false); } else for_each_child_of_node(root, child) { if (!of_match_node(matches, child)) continue; rc = of_platform_bus_create(child, matches, NULL, parent, false); if (rc) break; } of_node_put(root); return rc; } EXPORT_SYMBOL(of_platform_bus_probe); /** * of_platform_populate() - Populate platform_devices from device tree data * @root: parent of the first level to probe or NULL for the root of the tree * @matches: match table, NULL to use the default * @lookup: auxdata table for matching id and platform_data with device nodes * @parent: parent to hook devices from, NULL for toplevel * * Similar to of_platform_bus_probe(), this function walks the device tree * and creates devices from nodes. It differs in that it follows the modern * convention of requiring all device nodes to have a 'compatible' property, * and it is suitable for creating devices which are children of the root * node (of_platform_bus_probe will only create children of the root which * are selected by the @matches argument). * * New board support should be using this function instead of * of_platform_bus_probe(). * * Returns 0 on success, < 0 on failure. */ int of_platform_populate(struct device_node *root, const struct of_device_id *matches, const struct of_dev_auxdata *lookup, struct device *parent) { struct device_node *child; int rc = 0; root = root ? of_node_get(root) : of_find_node_by_path("/"); if (!root) return -EINVAL; for_each_child_of_node(root, child) { rc = of_platform_bus_create(child, matches, lookup, parent, true); if (rc) break; } of_node_set_flag(root, OF_POPULATED_BUS); of_node_put(root); return rc; } EXPORT_SYMBOL_GPL(of_platform_populate); static int of_platform_device_destroy(struct device *dev, void *data) { /* Do not touch devices not populated from the device tree */ if (!dev->of_node || !of_node_check_flag(dev->of_node, OF_POPULATED)) return 0; /* Recurse for any nodes that were treated as busses */ if (of_node_check_flag(dev->of_node, OF_POPULATED_BUS)) device_for_each_child(dev, NULL, of_platform_device_destroy); if (dev->bus == &platform_bus_type) platform_device_unregister(to_platform_device(dev)); #ifdef CONFIG_ARM_AMBA else if (dev->bus == &amba_bustype) amba_device_unregister(to_amba_device(dev)); #endif of_dma_deconfigure(dev); of_node_clear_flag(dev->of_node, OF_POPULATED); of_node_clear_flag(dev->of_node, OF_POPULATED_BUS); return 0; } /** * of_platform_depopulate() - Remove devices populated from device tree * @parent: device which children will be removed * * Complementary to of_platform_populate(), this function removes children * of the given device (and, recurrently, their children) that have been * created from their respective device tree nodes (and only those, * leaving others - eg. manually created - unharmed). * * Returns 0 when all children devices have been removed or * -EBUSY when some children remained. */ void of_platform_depopulate(struct device *parent) { if (parent->of_node && of_node_check_flag(parent->of_node, OF_POPULATED_BUS)) { device_for_each_child(parent, NULL, of_platform_device_destroy); of_node_clear_flag(parent->of_node, OF_POPULATED_BUS); } } EXPORT_SYMBOL_GPL(of_platform_depopulate); #ifdef CONFIG_OF_DYNAMIC static int of_platform_notify(struct notifier_block *nb, unsigned long action, void *arg) { struct of_reconfig_data *rd = arg; struct platform_device *pdev_parent, *pdev; bool children_left; switch (of_reconfig_get_state_change(action, rd)) { case OF_RECONFIG_CHANGE_ADD: /* verify that the parent is a bus */ if (!of_node_check_flag(rd->dn->parent, OF_POPULATED_BUS)) return NOTIFY_OK; /* not for us */ /* already populated? (driver using of_populate manually) */ if (of_node_check_flag(rd->dn, OF_POPULATED)) return NOTIFY_OK; /* pdev_parent may be NULL when no bus platform device */ pdev_parent = of_find_device_by_node(rd->dn->parent); pdev = of_platform_device_create(rd->dn, NULL, pdev_parent ? &pdev_parent->dev : NULL); of_dev_put(pdev_parent); if (pdev == NULL) { pr_err("%s: failed to create for '%s'\n", __func__, rd->dn->full_name); /* of_platform_device_create tosses the error code */ return notifier_from_errno(-EINVAL); } break; case OF_RECONFIG_CHANGE_REMOVE: /* already depopulated? */ if (!of_node_check_flag(rd->dn, OF_POPULATED)) return NOTIFY_OK; /* find our device by node */ pdev = of_find_device_by_node(rd->dn); if (pdev == NULL) return NOTIFY_OK; /* no? not meant for us */ /* unregister takes one ref away */ of_platform_device_destroy(&pdev->dev, &children_left); /* and put the reference of the find */ of_dev_put(pdev); break; } return NOTIFY_OK; } static struct notifier_block platform_of_notifier = { .notifier_call = of_platform_notify, }; void of_platform_register_reconfig_notifier(void) { WARN_ON(of_reconfig_notifier_register(&platform_of_notifier)); } #endif /* CONFIG_OF_DYNAMIC */ #endif /* CONFIG_OF_ADDRESS */
gpl-2.0
markfasheh/btrfs-stuff
arch/mips/lantiq/clk.c
228
3098
/* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com> * Copyright (C) 2010 John Crispin <blogic@openwrt.org> */ #include <linux/io.h> #include <linux/export.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/clk.h> #include <linux/clkdev.h> #include <linux/err.h> #include <linux/list.h> #include <asm/time.h> #include <asm/irq.h> #include <asm/div64.h> #include <lantiq_soc.h> #include "clk.h" #include "prom.h" /* lantiq socs have 3 static clocks */ static struct clk cpu_clk_generic[3]; void clkdev_add_static(unsigned long cpu, unsigned long fpi, unsigned long io) { cpu_clk_generic[0].rate = cpu; cpu_clk_generic[1].rate = fpi; cpu_clk_generic[2].rate = io; } struct clk *clk_get_cpu(void) { return &cpu_clk_generic[0]; } struct clk *clk_get_fpi(void) { return &cpu_clk_generic[1]; } EXPORT_SYMBOL_GPL(clk_get_fpi); struct clk *clk_get_io(void) { return &cpu_clk_generic[2]; } static inline int clk_good(struct clk *clk) { return clk && !IS_ERR(clk); } unsigned long clk_get_rate(struct clk *clk) { if (unlikely(!clk_good(clk))) return 0; if (clk->rate != 0) return clk->rate; if (clk->get_rate != NULL) return clk->get_rate(); return 0; } EXPORT_SYMBOL(clk_get_rate); int clk_set_rate(struct clk *clk, unsigned long rate) { if (unlikely(!clk_good(clk))) return 0; if (clk->rates && *clk->rates) { unsigned long *r = clk->rates; while (*r && (*r != rate)) r++; if (!*r) { pr_err("clk %s.%s: trying to set invalid rate %ld\n", clk->cl.dev_id, clk->cl.con_id, rate); return -1; } } clk->rate = rate; return 0; } EXPORT_SYMBOL(clk_set_rate); int clk_enable(struct clk *clk) { if (unlikely(!clk_good(clk))) return -1; if (clk->enable) return clk->enable(clk); return -1; } EXPORT_SYMBOL(clk_enable); void clk_disable(struct clk *clk) { if (unlikely(!clk_good(clk))) return; if (clk->disable) clk->disable(clk); } EXPORT_SYMBOL(clk_disable); int clk_activate(struct clk *clk) { if (unlikely(!clk_good(clk))) return -1; if (clk->activate) return clk->activate(clk); return -1; } EXPORT_SYMBOL(clk_activate); void clk_deactivate(struct clk *clk) { if (unlikely(!clk_good(clk))) return; if (clk->deactivate) clk->deactivate(clk); } EXPORT_SYMBOL(clk_deactivate); struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) { return NULL; } static inline u32 get_counter_resolution(void) { u32 res; __asm__ __volatile__( ".set push\n" ".set mips32r2\n" "rdhwr %0, $3\n" ".set pop\n" : "=&r" (res) : /* no input */ : "memory"); return res; } void __init plat_time_init(void) { struct clk *clk; ltq_soc_init(); clk = clk_get_cpu(); mips_hpt_frequency = clk_get_rate(clk) / get_counter_resolution(); write_c0_compare(read_c0_count()); pr_info("CPU Clock: %ldMHz\n", clk_get_rate(clk) / 1000000); clk_put(clk); }
gpl-2.0
tony0924/itri
arch/arm64/kernel/asm-offsets.c
228
6737
/* * Based on arch/arm/kernel/asm-offsets.c * * Copyright (C) 1995-2003 Russell King * 2001-2002 Keith Owens * Copyright (C) 2012 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/dma-mapping.h> #include <linux/kvm_host.h> #include <asm/thread_info.h> #include <asm/memory.h> #include <asm/cputable.h> #include <asm/vdso_datapage.h> #include <linux/kbuild.h> int main(void) { DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); BLANK(); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); DEFINE(TI_TASK, offsetof(struct thread_info, task)); DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain)); DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); BLANK(); DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context)); BLANK(); DEFINE(S_X0, offsetof(struct pt_regs, regs[0])); DEFINE(S_X1, offsetof(struct pt_regs, regs[1])); DEFINE(S_X2, offsetof(struct pt_regs, regs[2])); DEFINE(S_X3, offsetof(struct pt_regs, regs[3])); DEFINE(S_X4, offsetof(struct pt_regs, regs[4])); DEFINE(S_X5, offsetof(struct pt_regs, regs[5])); DEFINE(S_X6, offsetof(struct pt_regs, regs[6])); DEFINE(S_X7, offsetof(struct pt_regs, regs[7])); DEFINE(S_LR, offsetof(struct pt_regs, regs[30])); DEFINE(S_SP, offsetof(struct pt_regs, sp)); #ifdef CONFIG_COMPAT DEFINE(S_COMPAT_SP, offsetof(struct pt_regs, compat_sp)); #endif DEFINE(S_PSTATE, offsetof(struct pt_regs, pstate)); DEFINE(S_PC, offsetof(struct pt_regs, pc)); DEFINE(S_ORIG_X0, offsetof(struct pt_regs, orig_x0)); DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno)); DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs)); BLANK(); DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id)); BLANK(); DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); DEFINE(VMA_VM_FLAGS, offsetof(struct vm_area_struct, vm_flags)); BLANK(); DEFINE(VM_EXEC, VM_EXEC); BLANK(); DEFINE(PAGE_SZ, PAGE_SIZE); BLANK(); DEFINE(CPU_INFO_SZ, sizeof(struct cpu_info)); DEFINE(CPU_INFO_SETUP, offsetof(struct cpu_info, cpu_setup)); BLANK(); DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); BLANK(); DEFINE(CLOCK_REALTIME, CLOCK_REALTIME); DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC); DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); DEFINE(CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE); DEFINE(CLOCK_MONOTONIC_COARSE,CLOCK_MONOTONIC_COARSE); DEFINE(CLOCK_COARSE_RES, LOW_RES_NSEC); DEFINE(NSEC_PER_SEC, NSEC_PER_SEC); BLANK(); DEFINE(VDSO_CS_CYCLE_LAST, offsetof(struct vdso_data, cs_cycle_last)); DEFINE(VDSO_XTIME_CLK_SEC, offsetof(struct vdso_data, xtime_clock_sec)); DEFINE(VDSO_XTIME_CLK_NSEC, offsetof(struct vdso_data, xtime_clock_nsec)); DEFINE(VDSO_XTIME_CRS_SEC, offsetof(struct vdso_data, xtime_coarse_sec)); DEFINE(VDSO_XTIME_CRS_NSEC, offsetof(struct vdso_data, xtime_coarse_nsec)); DEFINE(VDSO_WTM_CLK_SEC, offsetof(struct vdso_data, wtm_clock_sec)); DEFINE(VDSO_WTM_CLK_NSEC, offsetof(struct vdso_data, wtm_clock_nsec)); DEFINE(VDSO_TB_SEQ_COUNT, offsetof(struct vdso_data, tb_seq_count)); DEFINE(VDSO_CS_MULT, offsetof(struct vdso_data, cs_mult)); DEFINE(VDSO_CS_SHIFT, offsetof(struct vdso_data, cs_shift)); DEFINE(VDSO_TZ_MINWEST, offsetof(struct vdso_data, tz_minuteswest)); DEFINE(VDSO_TZ_DSTTIME, offsetof(struct vdso_data, tz_dsttime)); DEFINE(VDSO_USE_SYSCALL, offsetof(struct vdso_data, use_syscall)); BLANK(); DEFINE(TVAL_TV_SEC, offsetof(struct timeval, tv_sec)); DEFINE(TVAL_TV_USEC, offsetof(struct timeval, tv_usec)); DEFINE(TSPEC_TV_SEC, offsetof(struct timespec, tv_sec)); DEFINE(TSPEC_TV_NSEC, offsetof(struct timespec, tv_nsec)); BLANK(); DEFINE(TZ_MINWEST, offsetof(struct timezone, tz_minuteswest)); DEFINE(TZ_DSTTIME, offsetof(struct timezone, tz_dsttime)); BLANK(); #ifdef CONFIG_KVM_ARM_HOST DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt)); DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs)); DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs)); DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs)); DEFINE(CPU_SP_EL1, offsetof(struct kvm_regs, sp_el1)); DEFINE(CPU_ELR_EL1, offsetof(struct kvm_regs, elr_el1)); DEFINE(CPU_SPSR, offsetof(struct kvm_regs, spsr)); DEFINE(CPU_SYSREGS, offsetof(struct kvm_cpu_context, sys_regs)); DEFINE(VCPU_ESR_EL2, offsetof(struct kvm_vcpu, arch.fault.esr_el2)); DEFINE(VCPU_FAR_EL2, offsetof(struct kvm_vcpu, arch.fault.far_el2)); DEFINE(VCPU_HPFAR_EL2, offsetof(struct kvm_vcpu, arch.fault.hpfar_el2)); DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2)); DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines)); DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context)); DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl)); DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval)); DEFINE(KVM_TIMER_CNTVOFF, offsetof(struct kvm, arch.timer.cntvoff)); DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled)); DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu)); DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr)); DEFINE(VGIC_CPU_VMCR, offsetof(struct vgic_cpu, vgic_vmcr)); DEFINE(VGIC_CPU_MISR, offsetof(struct vgic_cpu, vgic_misr)); DEFINE(VGIC_CPU_EISR, offsetof(struct vgic_cpu, vgic_eisr)); DEFINE(VGIC_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_elrsr)); DEFINE(VGIC_CPU_APR, offsetof(struct vgic_cpu, vgic_apr)); DEFINE(VGIC_CPU_LR, offsetof(struct vgic_cpu, vgic_lr)); DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr)); DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base)); #endif return 0; }
gpl-2.0
jashasweejena/VibeKernel
drivers/md/raid1.c
228
85403
/* * raid1.c : Multiple Devices driver for Linux * * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat * * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman * * RAID-1 management functions. * * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000 * * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk> * Various fixes by Neil Brown <neilb@cse.unsw.edu.au> * * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support * bitmapped intelligence in resync: * * - bitmap marked during normal i/o * - bitmap used to skip nondirty blocks during sync * * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology: * - persistent bitmap code * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * You should have received a copy of the GNU General Public License * (for example /usr/src/linux/COPYING); if not, write to the Free * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/slab.h> #include <linux/delay.h> #include <linux/blkdev.h> #include <linux/module.h> #include <linux/seq_file.h> #include <linux/ratelimit.h> #include "md.h" #include "raid1.h" #include "bitmap.h" /* * Number of guaranteed r1bios in case of extreme VM load: */ #define NR_RAID1_BIOS 256 /* when we get a read error on a read-only array, we redirect to another * device without failing the first device, or trying to over-write to * correct the read error. To keep track of bad blocks on a per-bio * level, we store IO_BLOCKED in the appropriate 'bios' pointer */ #define IO_BLOCKED ((struct bio *)1) /* When we successfully write to a known bad-block, we need to remove the * bad-block marking which must be done from process context. So we record * the success by setting devs[n].bio to IO_MADE_GOOD */ #define IO_MADE_GOOD ((struct bio *)2) #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2) /* When there are this many requests queue to be written by * the raid1 thread, we become 'congested' to provide back-pressure * for writeback. */ static int max_queued_requests = 1024; static void allow_barrier(struct r1conf *conf); static void lower_barrier(struct r1conf *conf); static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) { struct pool_info *pi = data; int size = offsetof(struct r1bio, bios[pi->raid_disks]); /* allocate a r1bio with room for raid_disks entries in the bios array */ return kzalloc(size, gfp_flags); } static void r1bio_pool_free(void *r1_bio, void *data) { kfree(r1_bio); } #define RESYNC_BLOCK_SIZE (64*1024) //#define RESYNC_BLOCK_SIZE PAGE_SIZE #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9) #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) #define RESYNC_WINDOW (2048*1024) static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) { struct pool_info *pi = data; struct r1bio *r1_bio; struct bio *bio; int need_pages; int i, j; r1_bio = r1bio_pool_alloc(gfp_flags, pi); if (!r1_bio) return NULL; /* * Allocate bios : 1 for reading, n-1 for writing */ for (j = pi->raid_disks ; j-- ; ) { bio = bio_kmalloc(gfp_flags, RESYNC_PAGES); if (!bio) goto out_free_bio; r1_bio->bios[j] = bio; } /* * Allocate RESYNC_PAGES data pages and attach them to * the first bio. * If this is a user-requested check/repair, allocate * RESYNC_PAGES for each bio. */ if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) need_pages = pi->raid_disks; else need_pages = 1; for (j = 0; j < need_pages; j++) { bio = r1_bio->bios[j]; bio->bi_vcnt = RESYNC_PAGES; if (bio_alloc_pages(bio, gfp_flags)) goto out_free_pages; } /* If not user-requests, copy the page pointers to all bios */ if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) { for (i=0; i<RESYNC_PAGES ; i++) for (j=1; j<pi->raid_disks; j++) r1_bio->bios[j]->bi_io_vec[i].bv_page = r1_bio->bios[0]->bi_io_vec[i].bv_page; } r1_bio->master_bio = NULL; return r1_bio; out_free_pages: while (--j >= 0) { struct bio_vec *bv; bio_for_each_segment_all(bv, r1_bio->bios[j], i) __free_page(bv->bv_page); } out_free_bio: while (++j < pi->raid_disks) bio_put(r1_bio->bios[j]); r1bio_pool_free(r1_bio, data); return NULL; } static void r1buf_pool_free(void *__r1_bio, void *data) { struct pool_info *pi = data; int i,j; struct r1bio *r1bio = __r1_bio; for (i = 0; i < RESYNC_PAGES; i++) for (j = pi->raid_disks; j-- ;) { if (j == 0 || r1bio->bios[j]->bi_io_vec[i].bv_page != r1bio->bios[0]->bi_io_vec[i].bv_page) safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page); } for (i=0 ; i < pi->raid_disks; i++) bio_put(r1bio->bios[i]); r1bio_pool_free(r1bio, data); } static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio) { int i; for (i = 0; i < conf->raid_disks * 2; i++) { struct bio **bio = r1_bio->bios + i; if (!BIO_SPECIAL(*bio)) bio_put(*bio); *bio = NULL; } } static void free_r1bio(struct r1bio *r1_bio) { struct r1conf *conf = r1_bio->mddev->private; put_all_bios(conf, r1_bio); mempool_free(r1_bio, conf->r1bio_pool); } static void put_buf(struct r1bio *r1_bio) { struct r1conf *conf = r1_bio->mddev->private; int i; for (i = 0; i < conf->raid_disks * 2; i++) { struct bio *bio = r1_bio->bios[i]; if (bio->bi_end_io) rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev); } mempool_free(r1_bio, conf->r1buf_pool); lower_barrier(conf); } static void reschedule_retry(struct r1bio *r1_bio) { unsigned long flags; struct mddev *mddev = r1_bio->mddev; struct r1conf *conf = mddev->private; spin_lock_irqsave(&conf->device_lock, flags); list_add(&r1_bio->retry_list, &conf->retry_list); conf->nr_queued ++; spin_unlock_irqrestore(&conf->device_lock, flags); wake_up(&conf->wait_barrier); md_wakeup_thread(mddev->thread); } /* * raid_end_bio_io() is called when we have finished servicing a mirrored * operation and are ready to return a success/failure code to the buffer * cache layer. */ static void call_bio_endio(struct r1bio *r1_bio) { struct bio *bio = r1_bio->master_bio; int done; struct r1conf *conf = r1_bio->mddev->private; if (bio->bi_phys_segments) { unsigned long flags; spin_lock_irqsave(&conf->device_lock, flags); bio->bi_phys_segments--; done = (bio->bi_phys_segments == 0); spin_unlock_irqrestore(&conf->device_lock, flags); } else done = 1; if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) clear_bit(BIO_UPTODATE, &bio->bi_flags); if (done) { bio_endio(bio, 0); /* * Wake up any possible resync thread that waits for the device * to go idle. */ allow_barrier(conf); } } static void raid_end_bio_io(struct r1bio *r1_bio) { struct bio *bio = r1_bio->master_bio; /* if nobody has done the final endio yet, do it now */ if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { pr_debug("raid1: sync end %s on sectors %llu-%llu\n", (bio_data_dir(bio) == WRITE) ? "write" : "read", (unsigned long long) bio->bi_sector, (unsigned long long) bio->bi_sector + bio_sectors(bio) - 1); call_bio_endio(r1_bio); } free_r1bio(r1_bio); } /* * Update disk head position estimator based on IRQ completion info. */ static inline void update_head_pos(int disk, struct r1bio *r1_bio) { struct r1conf *conf = r1_bio->mddev->private; conf->mirrors[disk].head_position = r1_bio->sector + (r1_bio->sectors); } /* * Find the disk number which triggered given bio */ static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio) { int mirror; struct r1conf *conf = r1_bio->mddev->private; int raid_disks = conf->raid_disks; for (mirror = 0; mirror < raid_disks * 2; mirror++) if (r1_bio->bios[mirror] == bio) break; BUG_ON(mirror == raid_disks * 2); update_head_pos(mirror, r1_bio); return mirror; } static void raid1_end_read_request(struct bio *bio, int error) { int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct r1bio *r1_bio = bio->bi_private; int mirror; struct r1conf *conf = r1_bio->mddev->private; mirror = r1_bio->read_disk; /* * this branch is our 'one mirror IO has finished' event handler: */ update_head_pos(mirror, r1_bio); if (uptodate) set_bit(R1BIO_Uptodate, &r1_bio->state); else { /* If all other devices have failed, we want to return * the error upwards rather than fail the last device. * Here we redefine "uptodate" to mean "Don't want to retry" */ unsigned long flags; spin_lock_irqsave(&conf->device_lock, flags); if (r1_bio->mddev->degraded == conf->raid_disks || (r1_bio->mddev->degraded == conf->raid_disks-1 && !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))) uptodate = 1; spin_unlock_irqrestore(&conf->device_lock, flags); } if (uptodate) { raid_end_bio_io(r1_bio); rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev); } else { /* * oops, read error: */ char b[BDEVNAME_SIZE]; printk_ratelimited( KERN_ERR "md/raid1:%s: %s: " "rescheduling sector %llu\n", mdname(conf->mddev), bdevname(conf->mirrors[mirror].rdev->bdev, b), (unsigned long long)r1_bio->sector); set_bit(R1BIO_ReadError, &r1_bio->state); reschedule_retry(r1_bio); /* don't drop the reference on read_disk yet */ } } static void close_write(struct r1bio *r1_bio) { /* it really is the end of this request */ if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { /* free extra copy of the data pages */ int i = r1_bio->behind_page_count; while (i--) safe_put_page(r1_bio->behind_bvecs[i].bv_page); kfree(r1_bio->behind_bvecs); r1_bio->behind_bvecs = NULL; } /* clear the bitmap if all writes complete successfully */ bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector, r1_bio->sectors, !test_bit(R1BIO_Degraded, &r1_bio->state), test_bit(R1BIO_BehindIO, &r1_bio->state)); md_write_end(r1_bio->mddev); } static void r1_bio_write_done(struct r1bio *r1_bio) { if (!atomic_dec_and_test(&r1_bio->remaining)) return; if (test_bit(R1BIO_WriteError, &r1_bio->state)) reschedule_retry(r1_bio); else { close_write(r1_bio); if (test_bit(R1BIO_MadeGood, &r1_bio->state)) reschedule_retry(r1_bio); else raid_end_bio_io(r1_bio); } } static void raid1_end_write_request(struct bio *bio, int error) { int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct r1bio *r1_bio = bio->bi_private; int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state); struct r1conf *conf = r1_bio->mddev->private; struct bio *to_put = NULL; mirror = find_bio_disk(r1_bio, bio); /* * 'one mirror IO has finished' event handler: */ if (!uptodate) { set_bit(WriteErrorSeen, &conf->mirrors[mirror].rdev->flags); if (!test_and_set_bit(WantReplacement, &conf->mirrors[mirror].rdev->flags)) set_bit(MD_RECOVERY_NEEDED, & conf->mddev->recovery); set_bit(R1BIO_WriteError, &r1_bio->state); } else { /* * Set R1BIO_Uptodate in our master bio, so that we * will return a good error code for to the higher * levels even if IO on some other mirrored buffer * fails. * * The 'master' represents the composite IO operation * to user-side. So if something waits for IO, then it * will wait for the 'master' bio. */ sector_t first_bad; int bad_sectors; r1_bio->bios[mirror] = NULL; to_put = bio; /* * Do not set R1BIO_Uptodate if the current device is * rebuilding or Faulty. This is because we cannot use * such device for properly reading the data back (we could * potentially use it, if the current write would have felt * before rdev->recovery_offset, but for simplicity we don't * check this here. */ if (test_bit(In_sync, &conf->mirrors[mirror].rdev->flags) && !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)) set_bit(R1BIO_Uptodate, &r1_bio->state); /* Maybe we can clear some bad blocks. */ if (is_badblock(conf->mirrors[mirror].rdev, r1_bio->sector, r1_bio->sectors, &first_bad, &bad_sectors)) { r1_bio->bios[mirror] = IO_MADE_GOOD; set_bit(R1BIO_MadeGood, &r1_bio->state); } } if (behind) { if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags)) atomic_dec(&r1_bio->behind_remaining); /* * In behind mode, we ACK the master bio once the I/O * has safely reached all non-writemostly * disks. Setting the Returned bit ensures that this * gets done only once -- we don't ever want to return * -EIO here, instead we'll wait */ if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) && test_bit(R1BIO_Uptodate, &r1_bio->state)) { /* Maybe we can return now */ if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { struct bio *mbio = r1_bio->master_bio; pr_debug("raid1: behind end write sectors" " %llu-%llu\n", (unsigned long long) mbio->bi_sector, (unsigned long long) mbio->bi_sector + bio_sectors(mbio) - 1); call_bio_endio(r1_bio); } } } if (r1_bio->bios[mirror] == NULL) rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev); /* * Let's see if all mirrored write operations have finished * already. */ r1_bio_write_done(r1_bio); if (to_put) bio_put(to_put); } /* * This routine returns the disk from which the requested read should * be done. There is a per-array 'next expected sequential IO' sector * number - if this matches on the next IO then we use the last disk. * There is also a per-disk 'last know head position' sector that is * maintained from IRQ contexts, both the normal and the resync IO * completion handlers update this position correctly. If there is no * perfect sequential match then we pick the disk whose head is closest. * * If there are 2 mirrors in the same 2 devices, performance degrades * because position is mirror, not device based. * * The rdev for the device selected will have nr_pending incremented. */ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors) { const sector_t this_sector = r1_bio->sector; int sectors; int best_good_sectors; int best_disk, best_dist_disk, best_pending_disk; int has_nonrot_disk; int disk; sector_t best_dist; unsigned int min_pending; struct md_rdev *rdev; int choose_first; int choose_next_idle; rcu_read_lock(); /* * Check if we can balance. We can balance on the whole * device if no resync is going on, or below the resync window. * We take the first readable disk when above the resync window. */ retry: sectors = r1_bio->sectors; best_disk = -1; best_dist_disk = -1; best_dist = MaxSector; best_pending_disk = -1; min_pending = UINT_MAX; best_good_sectors = 0; has_nonrot_disk = 0; choose_next_idle = 0; if (conf->mddev->recovery_cp < MaxSector && (this_sector + sectors >= conf->next_resync)) choose_first = 1; else choose_first = 0; for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) { sector_t dist; sector_t first_bad; int bad_sectors; unsigned int pending; bool nonrot; rdev = rcu_dereference(conf->mirrors[disk].rdev); if (r1_bio->bios[disk] == IO_BLOCKED || rdev == NULL || test_bit(Unmerged, &rdev->flags) || test_bit(Faulty, &rdev->flags)) continue; if (!test_bit(In_sync, &rdev->flags) && rdev->recovery_offset < this_sector + sectors) continue; if (test_bit(WriteMostly, &rdev->flags)) { /* Don't balance among write-mostly, just * use the first as a last resort */ if (best_disk < 0) { if (is_badblock(rdev, this_sector, sectors, &first_bad, &bad_sectors)) { if (first_bad < this_sector) /* Cannot use this */ continue; best_good_sectors = first_bad - this_sector; } else best_good_sectors = sectors; best_disk = disk; } continue; } /* This is a reasonable device to use. It might * even be best. */ if (is_badblock(rdev, this_sector, sectors, &first_bad, &bad_sectors)) { if (best_dist < MaxSector) /* already have a better device */ continue; if (first_bad <= this_sector) { /* cannot read here. If this is the 'primary' * device, then we must not read beyond * bad_sectors from another device.. */ bad_sectors -= (this_sector - first_bad); if (choose_first && sectors > bad_sectors) sectors = bad_sectors; if (best_good_sectors > sectors) best_good_sectors = sectors; } else { sector_t good_sectors = first_bad - this_sector; if (good_sectors > best_good_sectors) { best_good_sectors = good_sectors; best_disk = disk; } if (choose_first) break; } continue; } else best_good_sectors = sectors; nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev)); has_nonrot_disk |= nonrot; pending = atomic_read(&rdev->nr_pending); dist = abs(this_sector - conf->mirrors[disk].head_position); if (choose_first) { best_disk = disk; break; } /* Don't change to another disk for sequential reads */ if (conf->mirrors[disk].next_seq_sect == this_sector || dist == 0) { int opt_iosize = bdev_io_opt(rdev->bdev) >> 9; struct raid1_info *mirror = &conf->mirrors[disk]; best_disk = disk; /* * If buffered sequential IO size exceeds optimal * iosize, check if there is idle disk. If yes, choose * the idle disk. read_balance could already choose an * idle disk before noticing it's a sequential IO in * this disk. This doesn't matter because this disk * will idle, next time it will be utilized after the * first disk has IO size exceeds optimal iosize. In * this way, iosize of the first disk will be optimal * iosize at least. iosize of the second disk might be * small, but not a big deal since when the second disk * starts IO, the first disk is likely still busy. */ if (nonrot && opt_iosize > 0 && mirror->seq_start != MaxSector && mirror->next_seq_sect > opt_iosize && mirror->next_seq_sect - opt_iosize >= mirror->seq_start) { choose_next_idle = 1; continue; } break; } /* If device is idle, use it */ if (pending == 0) { best_disk = disk; break; } if (choose_next_idle) continue; if (min_pending > pending) { min_pending = pending; best_pending_disk = disk; } if (dist < best_dist) { best_dist = dist; best_dist_disk = disk; } } /* * If all disks are rotational, choose the closest disk. If any disk is * non-rotational, choose the disk with less pending request even the * disk is rotational, which might/might not be optimal for raids with * mixed ratation/non-rotational disks depending on workload. */ if (best_disk == -1) { if (has_nonrot_disk) best_disk = best_pending_disk; else best_disk = best_dist_disk; } if (best_disk >= 0) { rdev = rcu_dereference(conf->mirrors[best_disk].rdev); if (!rdev) goto retry; atomic_inc(&rdev->nr_pending); if (test_bit(Faulty, &rdev->flags)) { /* cannot risk returning a device that failed * before we inc'ed nr_pending */ rdev_dec_pending(rdev, conf->mddev); goto retry; } sectors = best_good_sectors; if (conf->mirrors[best_disk].next_seq_sect != this_sector) conf->mirrors[best_disk].seq_start = this_sector; conf->mirrors[best_disk].next_seq_sect = this_sector + sectors; } rcu_read_unlock(); *max_sectors = sectors; return best_disk; } static int raid1_mergeable_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *biovec) { struct mddev *mddev = q->queuedata; struct r1conf *conf = mddev->private; sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); int max = biovec->bv_len; if (mddev->merge_check_needed) { int disk; rcu_read_lock(); for (disk = 0; disk < conf->raid_disks * 2; disk++) { struct md_rdev *rdev = rcu_dereference( conf->mirrors[disk].rdev); if (rdev && !test_bit(Faulty, &rdev->flags)) { struct request_queue *q = bdev_get_queue(rdev->bdev); if (q->merge_bvec_fn) { bvm->bi_sector = sector + rdev->data_offset; bvm->bi_bdev = rdev->bdev; max = min(max, q->merge_bvec_fn( q, bvm, biovec)); } } } rcu_read_unlock(); } return max; } int md_raid1_congested(struct mddev *mddev, int bits) { struct r1conf *conf = mddev->private; int i, ret = 0; if ((bits & (1 << BDI_async_congested)) && conf->pending_count >= max_queued_requests) return 1; rcu_read_lock(); for (i = 0; i < conf->raid_disks * 2; i++) { struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); if (rdev && !test_bit(Faulty, &rdev->flags)) { struct request_queue *q = bdev_get_queue(rdev->bdev); BUG_ON(!q); /* Note the '|| 1' - when read_balance prefers * non-congested targets, it can be removed */ if ((bits & (1<<BDI_async_congested)) || 1) ret |= bdi_congested(&q->backing_dev_info, bits); else ret &= bdi_congested(&q->backing_dev_info, bits); } } rcu_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(md_raid1_congested); static int raid1_congested(void *data, int bits) { struct mddev *mddev = data; return mddev_congested(mddev, bits) || md_raid1_congested(mddev, bits); } static void flush_pending_writes(struct r1conf *conf) { /* Any writes that have been queued but are awaiting * bitmap updates get flushed here. */ spin_lock_irq(&conf->device_lock); if (conf->pending_bio_list.head) { struct bio *bio; bio = bio_list_get(&conf->pending_bio_list); conf->pending_count = 0; spin_unlock_irq(&conf->device_lock); /* flush any pending bitmap writes to * disk before proceeding w/ I/O */ bitmap_unplug(conf->mddev->bitmap); wake_up(&conf->wait_barrier); while (bio) { /* submit pending writes */ struct bio *next = bio->bi_next; bio->bi_next = NULL; if (unlikely((bio->bi_rw & REQ_DISCARD) && !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) /* Just ignore it */ bio_endio(bio, 0); else generic_make_request(bio); bio = next; } } else spin_unlock_irq(&conf->device_lock); } /* Barriers.... * Sometimes we need to suspend IO while we do something else, * either some resync/recovery, or reconfigure the array. * To do this we raise a 'barrier'. * The 'barrier' is a counter that can be raised multiple times * to count how many activities are happening which preclude * normal IO. * We can only raise the barrier if there is no pending IO. * i.e. if nr_pending == 0. * We choose only to raise the barrier if no-one is waiting for the * barrier to go down. This means that as soon as an IO request * is ready, no other operations which require a barrier will start * until the IO request has had a chance. * * So: regular IO calls 'wait_barrier'. When that returns there * is no backgroup IO happening, It must arrange to call * allow_barrier when it has finished its IO. * backgroup IO calls must call raise_barrier. Once that returns * there is no normal IO happeing. It must arrange to call * lower_barrier when the particular background IO completes. */ #define RESYNC_DEPTH 32 static void raise_barrier(struct r1conf *conf) { spin_lock_irq(&conf->resync_lock); /* Wait until no block IO is waiting */ wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting, conf->resync_lock); /* block any new IO from starting */ conf->barrier++; /* Now wait for all pending IO to complete */ wait_event_lock_irq(conf->wait_barrier, !conf->nr_pending && conf->barrier < RESYNC_DEPTH, conf->resync_lock); spin_unlock_irq(&conf->resync_lock); } static void lower_barrier(struct r1conf *conf) { unsigned long flags; BUG_ON(conf->barrier <= 0); spin_lock_irqsave(&conf->resync_lock, flags); conf->barrier--; spin_unlock_irqrestore(&conf->resync_lock, flags); wake_up(&conf->wait_barrier); } static void wait_barrier(struct r1conf *conf) { spin_lock_irq(&conf->resync_lock); if (conf->barrier) { conf->nr_waiting++; /* Wait for the barrier to drop. * However if there are already pending * requests (preventing the barrier from * rising completely), and the * pre-process bio queue isn't empty, * then don't wait, as we need to empty * that queue to get the nr_pending * count down. */ wait_event_lock_irq(conf->wait_barrier, !conf->barrier || (conf->nr_pending && current->bio_list && !bio_list_empty(current->bio_list)), conf->resync_lock); conf->nr_waiting--; } conf->nr_pending++; spin_unlock_irq(&conf->resync_lock); } static void allow_barrier(struct r1conf *conf) { unsigned long flags; spin_lock_irqsave(&conf->resync_lock, flags); conf->nr_pending--; spin_unlock_irqrestore(&conf->resync_lock, flags); wake_up(&conf->wait_barrier); } static void freeze_array(struct r1conf *conf, int extra) { /* stop syncio and normal IO and wait for everything to * go quite. * We increment barrier and nr_waiting, and then * wait until nr_pending match nr_queued+extra * This is called in the context of one normal IO request * that has failed. Thus any sync request that might be pending * will be blocked by nr_pending, and we need to wait for * pending IO requests to complete or be queued for re-try. * Thus the number queued (nr_queued) plus this request (extra) * must match the number of pending IOs (nr_pending) before * we continue. */ spin_lock_irq(&conf->resync_lock); conf->barrier++; conf->nr_waiting++; wait_event_lock_irq_cmd(conf->wait_barrier, conf->nr_pending == conf->nr_queued+extra, conf->resync_lock, flush_pending_writes(conf)); spin_unlock_irq(&conf->resync_lock); } static void unfreeze_array(struct r1conf *conf) { /* reverse the effect of the freeze */ spin_lock_irq(&conf->resync_lock); conf->barrier--; conf->nr_waiting--; wake_up(&conf->wait_barrier); spin_unlock_irq(&conf->resync_lock); } /* duplicate the data pages for behind I/O */ static void alloc_behind_pages(struct bio *bio, struct r1bio *r1_bio) { int i; struct bio_vec *bvec; struct bio_vec *bvecs = kzalloc(bio->bi_vcnt * sizeof(struct bio_vec), GFP_NOIO); if (unlikely(!bvecs)) return; bio_for_each_segment_all(bvec, bio, i) { bvecs[i] = *bvec; bvecs[i].bv_page = alloc_page(GFP_NOIO); if (unlikely(!bvecs[i].bv_page)) goto do_sync_io; memcpy(kmap(bvecs[i].bv_page) + bvec->bv_offset, kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len); kunmap(bvecs[i].bv_page); kunmap(bvec->bv_page); } r1_bio->behind_bvecs = bvecs; r1_bio->behind_page_count = bio->bi_vcnt; set_bit(R1BIO_BehindIO, &r1_bio->state); return; do_sync_io: for (i = 0; i < bio->bi_vcnt; i++) if (bvecs[i].bv_page) put_page(bvecs[i].bv_page); kfree(bvecs); pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); } struct raid1_plug_cb { struct blk_plug_cb cb; struct bio_list pending; int pending_cnt; }; static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule) { struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb, cb); struct mddev *mddev = plug->cb.data; struct r1conf *conf = mddev->private; struct bio *bio; if (from_schedule || current->bio_list) { spin_lock_irq(&conf->device_lock); bio_list_merge(&conf->pending_bio_list, &plug->pending); conf->pending_count += plug->pending_cnt; spin_unlock_irq(&conf->device_lock); wake_up(&conf->wait_barrier); md_wakeup_thread(mddev->thread); kfree(plug); return; } /* we aren't scheduling, so we can do the write-out directly. */ bio = bio_list_get(&plug->pending); bitmap_unplug(mddev->bitmap); wake_up(&conf->wait_barrier); while (bio) { /* submit pending writes */ struct bio *next = bio->bi_next; bio->bi_next = NULL; if (unlikely((bio->bi_rw & REQ_DISCARD) && !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) /* Just ignore it */ bio_endio(bio, 0); else generic_make_request(bio); bio = next; } kfree(plug); } static void make_request(struct mddev *mddev, struct bio * bio) { struct r1conf *conf = mddev->private; struct raid1_info *mirror; struct r1bio *r1_bio; struct bio *read_bio; int i, disks; struct bitmap *bitmap; unsigned long flags; const int rw = bio_data_dir(bio); const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); const unsigned long do_discard = (bio->bi_rw & (REQ_DISCARD | REQ_SECURE)); const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME); struct md_rdev *blocked_rdev; struct blk_plug_cb *cb; struct raid1_plug_cb *plug = NULL; int first_clone; int sectors_handled; int max_sectors; /* * Register the new request and wait if the reconstruction * thread has put up a bar for new requests. * Continue immediately if no resync is active currently. */ md_write_start(mddev, bio); /* wait on superblock update early */ if (bio_data_dir(bio) == WRITE && bio_end_sector(bio) > mddev->suspend_lo && bio->bi_sector < mddev->suspend_hi) { /* As the suspend_* range is controlled by * userspace, we want an interruptible * wait. */ DEFINE_WAIT(w); for (;;) { flush_signals(current); prepare_to_wait(&conf->wait_barrier, &w, TASK_INTERRUPTIBLE); if (bio_end_sector(bio) <= mddev->suspend_lo || bio->bi_sector >= mddev->suspend_hi) break; schedule(); } finish_wait(&conf->wait_barrier, &w); } wait_barrier(conf); bitmap = mddev->bitmap; /* * make_request() can abort the operation when READA is being * used and no empty request is available. * */ r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); r1_bio->master_bio = bio; r1_bio->sectors = bio_sectors(bio); r1_bio->state = 0; r1_bio->mddev = mddev; r1_bio->sector = bio->bi_sector; /* We might need to issue multiple reads to different * devices if there are bad blocks around, so we keep * track of the number of reads in bio->bi_phys_segments. * If this is 0, there is only one r1_bio and no locking * will be needed when requests complete. If it is * non-zero, then it is the number of not-completed requests. */ bio->bi_phys_segments = 0; clear_bit(BIO_SEG_VALID, &bio->bi_flags); if (rw == READ) { /* * read balancing logic: */ int rdisk; read_again: rdisk = read_balance(conf, r1_bio, &max_sectors); if (rdisk < 0) { /* couldn't find anywhere to read from */ raid_end_bio_io(r1_bio); return; } mirror = conf->mirrors + rdisk; if (test_bit(WriteMostly, &mirror->rdev->flags) && bitmap) { /* Reading from a write-mostly device must * take care not to over-take any writes * that are 'behind' */ wait_event(bitmap->behind_wait, atomic_read(&bitmap->behind_writes) == 0); } r1_bio->read_disk = rdisk; read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); md_trim_bio(read_bio, r1_bio->sector - bio->bi_sector, max_sectors); r1_bio->bios[rdisk] = read_bio; read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset; read_bio->bi_bdev = mirror->rdev->bdev; read_bio->bi_end_io = raid1_end_read_request; read_bio->bi_rw = READ | do_sync; read_bio->bi_private = r1_bio; if (max_sectors < r1_bio->sectors) { /* could not read all from this device, so we will * need another r1_bio. */ sectors_handled = (r1_bio->sector + max_sectors - bio->bi_sector); r1_bio->sectors = max_sectors; spin_lock_irq(&conf->device_lock); if (bio->bi_phys_segments == 0) bio->bi_phys_segments = 2; else bio->bi_phys_segments++; spin_unlock_irq(&conf->device_lock); /* Cannot call generic_make_request directly * as that will be queued in __make_request * and subsequent mempool_alloc might block waiting * for it. So hand bio over to raid1d. */ reschedule_retry(r1_bio); r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); r1_bio->master_bio = bio; r1_bio->sectors = bio_sectors(bio) - sectors_handled; r1_bio->state = 0; r1_bio->mddev = mddev; r1_bio->sector = bio->bi_sector + sectors_handled; goto read_again; } else generic_make_request(read_bio); return; } /* * WRITE: */ if (conf->pending_count >= max_queued_requests) { md_wakeup_thread(mddev->thread); wait_event(conf->wait_barrier, conf->pending_count < max_queued_requests); } /* first select target devices under rcu_lock and * inc refcount on their rdev. Record them by setting * bios[x] to bio * If there are known/acknowledged bad blocks on any device on * which we have seen a write error, we want to avoid writing those * blocks. * This potentially requires several writes to write around * the bad blocks. Each set of writes gets it's own r1bio * with a set of bios attached. */ disks = conf->raid_disks * 2; retry_write: blocked_rdev = NULL; rcu_read_lock(); max_sectors = r1_bio->sectors; for (i = 0; i < disks; i++) { struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { atomic_inc(&rdev->nr_pending); blocked_rdev = rdev; break; } r1_bio->bios[i] = NULL; if (!rdev || test_bit(Faulty, &rdev->flags) || test_bit(Unmerged, &rdev->flags)) { if (i < conf->raid_disks) set_bit(R1BIO_Degraded, &r1_bio->state); continue; } atomic_inc(&rdev->nr_pending); if (test_bit(WriteErrorSeen, &rdev->flags)) { sector_t first_bad; int bad_sectors; int is_bad; is_bad = is_badblock(rdev, r1_bio->sector, max_sectors, &first_bad, &bad_sectors); if (is_bad < 0) { /* mustn't write here until the bad block is * acknowledged*/ set_bit(BlockedBadBlocks, &rdev->flags); blocked_rdev = rdev; break; } if (is_bad && first_bad <= r1_bio->sector) { /* Cannot write here at all */ bad_sectors -= (r1_bio->sector - first_bad); if (bad_sectors < max_sectors) /* mustn't write more than bad_sectors * to other devices yet */ max_sectors = bad_sectors; rdev_dec_pending(rdev, mddev); /* We don't set R1BIO_Degraded as that * only applies if the disk is * missing, so it might be re-added, * and we want to know to recover this * chunk. * In this case the device is here, * and the fact that this chunk is not * in-sync is recorded in the bad * block log */ continue; } if (is_bad) { int good_sectors = first_bad - r1_bio->sector; if (good_sectors < max_sectors) max_sectors = good_sectors; } } r1_bio->bios[i] = bio; } rcu_read_unlock(); if (unlikely(blocked_rdev)) { /* Wait for this device to become unblocked */ int j; for (j = 0; j < i; j++) if (r1_bio->bios[j]) rdev_dec_pending(conf->mirrors[j].rdev, mddev); r1_bio->state = 0; allow_barrier(conf); md_wait_for_blocked_rdev(blocked_rdev, mddev); wait_barrier(conf); goto retry_write; } if (max_sectors < r1_bio->sectors) { /* We are splitting this write into multiple parts, so * we need to prepare for allocating another r1_bio. */ r1_bio->sectors = max_sectors; spin_lock_irq(&conf->device_lock); if (bio->bi_phys_segments == 0) bio->bi_phys_segments = 2; else bio->bi_phys_segments++; spin_unlock_irq(&conf->device_lock); } sectors_handled = r1_bio->sector + max_sectors - bio->bi_sector; atomic_set(&r1_bio->remaining, 1); atomic_set(&r1_bio->behind_remaining, 0); first_clone = 1; for (i = 0; i < disks; i++) { struct bio *mbio; if (!r1_bio->bios[i]) continue; mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); md_trim_bio(mbio, r1_bio->sector - bio->bi_sector, max_sectors); if (first_clone) { /* do behind I/O ? * Not if there are too many, or cannot * allocate memory, or a reader on WriteMostly * is waiting for behind writes to flush */ if (bitmap && (atomic_read(&bitmap->behind_writes) < mddev->bitmap_info.max_write_behind) && !waitqueue_active(&bitmap->behind_wait)) alloc_behind_pages(mbio, r1_bio); bitmap_startwrite(bitmap, r1_bio->sector, r1_bio->sectors, test_bit(R1BIO_BehindIO, &r1_bio->state)); first_clone = 0; } if (r1_bio->behind_bvecs) { struct bio_vec *bvec; int j; /* * We trimmed the bio, so _all is legit */ bio_for_each_segment_all(bvec, mbio, j) bvec->bv_page = r1_bio->behind_bvecs[j].bv_page; if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags)) atomic_inc(&r1_bio->behind_remaining); } r1_bio->bios[i] = mbio; mbio->bi_sector = (r1_bio->sector + conf->mirrors[i].rdev->data_offset); mbio->bi_bdev = conf->mirrors[i].rdev->bdev; mbio->bi_end_io = raid1_end_write_request; mbio->bi_rw = WRITE | do_flush_fua | do_sync | do_discard | do_same; mbio->bi_private = r1_bio; atomic_inc(&r1_bio->remaining); cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug)); if (cb) plug = container_of(cb, struct raid1_plug_cb, cb); else plug = NULL; spin_lock_irqsave(&conf->device_lock, flags); if (plug) { bio_list_add(&plug->pending, mbio); plug->pending_cnt++; } else { bio_list_add(&conf->pending_bio_list, mbio); conf->pending_count++; } spin_unlock_irqrestore(&conf->device_lock, flags); if (!plug) md_wakeup_thread(mddev->thread); } /* Mustn't call r1_bio_write_done before this next test, * as it could result in the bio being freed. */ if (sectors_handled < bio_sectors(bio)) { r1_bio_write_done(r1_bio); /* We need another r1_bio. It has already been counted * in bio->bi_phys_segments */ r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); r1_bio->master_bio = bio; r1_bio->sectors = bio_sectors(bio) - sectors_handled; r1_bio->state = 0; r1_bio->mddev = mddev; r1_bio->sector = bio->bi_sector + sectors_handled; goto retry_write; } r1_bio_write_done(r1_bio); /* In case raid1d snuck in to freeze_array */ wake_up(&conf->wait_barrier); } static void status(struct seq_file *seq, struct mddev *mddev) { struct r1conf *conf = mddev->private; int i; seq_printf(seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); rcu_read_lock(); for (i = 0; i < conf->raid_disks; i++) { struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); seq_printf(seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_"); } rcu_read_unlock(); seq_printf(seq, "]"); } static void error(struct mddev *mddev, struct md_rdev *rdev) { char b[BDEVNAME_SIZE]; struct r1conf *conf = mddev->private; /* * If it is not operational, then we have already marked it as dead * else if it is the last working disks, ignore the error, let the * next level up know. * else mark the drive as failed */ if (test_bit(In_sync, &rdev->flags) && (conf->raid_disks - mddev->degraded) == 1) { /* * Don't fail the drive, act as though we were just a * normal single drive. * However don't try a recovery from this drive as * it is very likely to fail. */ conf->recovery_disabled = mddev->recovery_disabled; return; } set_bit(Blocked, &rdev->flags); if (test_and_clear_bit(In_sync, &rdev->flags)) { unsigned long flags; spin_lock_irqsave(&conf->device_lock, flags); mddev->degraded++; set_bit(Faulty, &rdev->flags); spin_unlock_irqrestore(&conf->device_lock, flags); } else set_bit(Faulty, &rdev->flags); /* * if recovery is running, make sure it aborts. */ set_bit(MD_RECOVERY_INTR, &mddev->recovery); set_bit(MD_CHANGE_DEVS, &mddev->flags); printk(KERN_ALERT "md/raid1:%s: Disk failure on %s, disabling device.\n" "md/raid1:%s: Operation continuing on %d devices.\n", mdname(mddev), bdevname(rdev->bdev, b), mdname(mddev), conf->raid_disks - mddev->degraded); } static void print_conf(struct r1conf *conf) { int i; printk(KERN_DEBUG "RAID1 conf printout:\n"); if (!conf) { printk(KERN_DEBUG "(!conf)\n"); return; } printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, conf->raid_disks); rcu_read_lock(); for (i = 0; i < conf->raid_disks; i++) { char b[BDEVNAME_SIZE]; struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); if (rdev) printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n", i, !test_bit(In_sync, &rdev->flags), !test_bit(Faulty, &rdev->flags), bdevname(rdev->bdev,b)); } rcu_read_unlock(); } static void close_sync(struct r1conf *conf) { wait_barrier(conf); allow_barrier(conf); mempool_destroy(conf->r1buf_pool); conf->r1buf_pool = NULL; } static int raid1_spare_active(struct mddev *mddev) { int i; struct r1conf *conf = mddev->private; int count = 0; unsigned long flags; /* * Find all failed disks within the RAID1 configuration * and mark them readable. * Called under mddev lock, so rcu protection not needed. */ for (i = 0; i < conf->raid_disks; i++) { struct md_rdev *rdev = conf->mirrors[i].rdev; struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev; if (repl && repl->recovery_offset == MaxSector && !test_bit(Faulty, &repl->flags) && !test_and_set_bit(In_sync, &repl->flags)) { /* replacement has just become active */ if (!rdev || !test_and_clear_bit(In_sync, &rdev->flags)) count++; if (rdev) { /* Replaced device not technically * faulty, but we need to be sure * it gets removed and never re-added */ set_bit(Faulty, &rdev->flags); sysfs_notify_dirent_safe( rdev->sysfs_state); } } if (rdev && rdev->recovery_offset == MaxSector && !test_bit(Faulty, &rdev->flags) && !test_and_set_bit(In_sync, &rdev->flags)) { count++; sysfs_notify_dirent_safe(rdev->sysfs_state); } } spin_lock_irqsave(&conf->device_lock, flags); mddev->degraded -= count; spin_unlock_irqrestore(&conf->device_lock, flags); print_conf(conf); return count; } static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) { struct r1conf *conf = mddev->private; int err = -EEXIST; int mirror = 0; struct raid1_info *p; int first = 0; int last = conf->raid_disks - 1; struct request_queue *q = bdev_get_queue(rdev->bdev); if (mddev->recovery_disabled == conf->recovery_disabled) return -EBUSY; if (rdev->raid_disk >= 0) first = last = rdev->raid_disk; if (q->merge_bvec_fn) { set_bit(Unmerged, &rdev->flags); mddev->merge_check_needed = 1; } for (mirror = first; mirror <= last; mirror++) { p = conf->mirrors+mirror; if (!p->rdev) { disk_stack_limits(mddev->gendisk, rdev->bdev, rdev->data_offset << 9); p->head_position = 0; rdev->raid_disk = mirror; err = 0; /* As all devices are equivalent, we don't need a full recovery * if this was recently any drive of the array */ if (rdev->saved_raid_disk < 0) conf->fullsync = 1; rcu_assign_pointer(p->rdev, rdev); break; } if (test_bit(WantReplacement, &p->rdev->flags) && p[conf->raid_disks].rdev == NULL) { /* Add this device as a replacement */ clear_bit(In_sync, &rdev->flags); set_bit(Replacement, &rdev->flags); rdev->raid_disk = mirror; err = 0; conf->fullsync = 1; rcu_assign_pointer(p[conf->raid_disks].rdev, rdev); break; } } if (err == 0 && test_bit(Unmerged, &rdev->flags)) { /* Some requests might not have seen this new * merge_bvec_fn. We must wait for them to complete * before merging the device fully. * First we make sure any code which has tested * our function has submitted the request, then * we wait for all outstanding requests to complete. */ synchronize_sched(); freeze_array(conf, 0); unfreeze_array(conf); clear_bit(Unmerged, &rdev->flags); } md_integrity_add_rdev(rdev, mddev); if (blk_queue_discard(bdev_get_queue(rdev->bdev))) queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); print_conf(conf); return err; } static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev) { struct r1conf *conf = mddev->private; int err = 0; int number = rdev->raid_disk; struct raid1_info *p = conf->mirrors + number; if (rdev != p->rdev) p = conf->mirrors + conf->raid_disks + number; print_conf(conf); if (rdev == p->rdev) { if (test_bit(In_sync, &rdev->flags) || atomic_read(&rdev->nr_pending)) { err = -EBUSY; goto abort; } /* Only remove non-faulty devices if recovery * is not possible. */ if (!test_bit(Faulty, &rdev->flags) && mddev->recovery_disabled != conf->recovery_disabled && mddev->degraded < conf->raid_disks) { err = -EBUSY; goto abort; } p->rdev = NULL; synchronize_rcu(); if (atomic_read(&rdev->nr_pending)) { /* lost the race, try later */ err = -EBUSY; p->rdev = rdev; goto abort; } else if (conf->mirrors[conf->raid_disks + number].rdev) { /* We just removed a device that is being replaced. * Move down the replacement. We drain all IO before * doing this to avoid confusion. */ struct md_rdev *repl = conf->mirrors[conf->raid_disks + number].rdev; freeze_array(conf, 0); clear_bit(Replacement, &repl->flags); p->rdev = repl; conf->mirrors[conf->raid_disks + number].rdev = NULL; unfreeze_array(conf); clear_bit(WantReplacement, &rdev->flags); } else clear_bit(WantReplacement, &rdev->flags); err = md_integrity_register(mddev); } abort: print_conf(conf); return err; } static void end_sync_read(struct bio *bio, int error) { struct r1bio *r1_bio = bio->bi_private; update_head_pos(r1_bio->read_disk, r1_bio); /* * we have read a block, now it needs to be re-written, * or re-read if the read failed. * We don't do much here, just schedule handling by raid1d */ if (test_bit(BIO_UPTODATE, &bio->bi_flags)) set_bit(R1BIO_Uptodate, &r1_bio->state); if (atomic_dec_and_test(&r1_bio->remaining)) reschedule_retry(r1_bio); } static void end_sync_write(struct bio *bio, int error) { int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct r1bio *r1_bio = bio->bi_private; struct mddev *mddev = r1_bio->mddev; struct r1conf *conf = mddev->private; int mirror=0; sector_t first_bad; int bad_sectors; mirror = find_bio_disk(r1_bio, bio); if (!uptodate) { sector_t sync_blocks = 0; sector_t s = r1_bio->sector; long sectors_to_go = r1_bio->sectors; /* make sure these bits doesn't get cleared. */ do { bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1); s += sync_blocks; sectors_to_go -= sync_blocks; } while (sectors_to_go > 0); set_bit(WriteErrorSeen, &conf->mirrors[mirror].rdev->flags); if (!test_and_set_bit(WantReplacement, &conf->mirrors[mirror].rdev->flags)) set_bit(MD_RECOVERY_NEEDED, & mddev->recovery); set_bit(R1BIO_WriteError, &r1_bio->state); } else if (is_badblock(conf->mirrors[mirror].rdev, r1_bio->sector, r1_bio->sectors, &first_bad, &bad_sectors) && !is_badblock(conf->mirrors[r1_bio->read_disk].rdev, r1_bio->sector, r1_bio->sectors, &first_bad, &bad_sectors) ) set_bit(R1BIO_MadeGood, &r1_bio->state); if (atomic_dec_and_test(&r1_bio->remaining)) { int s = r1_bio->sectors; if (test_bit(R1BIO_MadeGood, &r1_bio->state) || test_bit(R1BIO_WriteError, &r1_bio->state)) reschedule_retry(r1_bio); else { put_buf(r1_bio); md_done_sync(mddev, s, uptodate); } } } static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector, int sectors, struct page *page, int rw) { if (sync_page_io(rdev, sector, sectors << 9, page, rw, false)) /* success */ return 1; if (rw == WRITE) { set_bit(WriteErrorSeen, &rdev->flags); if (!test_and_set_bit(WantReplacement, &rdev->flags)) set_bit(MD_RECOVERY_NEEDED, & rdev->mddev->recovery); } /* need to record an error - either for the block or the device */ if (!rdev_set_badblocks(rdev, sector, sectors, 0)) md_error(rdev->mddev, rdev); return 0; } static int fix_sync_read_error(struct r1bio *r1_bio) { /* Try some synchronous reads of other devices to get * good data, much like with normal read errors. Only * read into the pages we already have so we don't * need to re-issue the read request. * We don't need to freeze the array, because being in an * active sync request, there is no normal IO, and * no overlapping syncs. * We don't need to check is_badblock() again as we * made sure that anything with a bad block in range * will have bi_end_io clear. */ struct mddev *mddev = r1_bio->mddev; struct r1conf *conf = mddev->private; struct bio *bio = r1_bio->bios[r1_bio->read_disk]; sector_t sect = r1_bio->sector; int sectors = r1_bio->sectors; int idx = 0; while(sectors) { int s = sectors; int d = r1_bio->read_disk; int success = 0; struct md_rdev *rdev; int start; if (s > (PAGE_SIZE>>9)) s = PAGE_SIZE >> 9; do { if (r1_bio->bios[d]->bi_end_io == end_sync_read) { /* No rcu protection needed here devices * can only be removed when no resync is * active, and resync is currently active */ rdev = conf->mirrors[d].rdev; if (sync_page_io(rdev, sect, s<<9, bio->bi_io_vec[idx].bv_page, READ, false)) { success = 1; break; } } d++; if (d == conf->raid_disks * 2) d = 0; } while (!success && d != r1_bio->read_disk); if (!success) { char b[BDEVNAME_SIZE]; int abort = 0; /* Cannot read from anywhere, this block is lost. * Record a bad block on each device. If that doesn't * work just disable and interrupt the recovery. * Don't fail devices as that won't really help. */ printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error" " for block %llu\n", mdname(mddev), bdevname(bio->bi_bdev, b), (unsigned long long)r1_bio->sector); for (d = 0; d < conf->raid_disks * 2; d++) { rdev = conf->mirrors[d].rdev; if (!rdev || test_bit(Faulty, &rdev->flags)) continue; if (!rdev_set_badblocks(rdev, sect, s, 0)) abort = 1; } if (abort) { conf->recovery_disabled = mddev->recovery_disabled; set_bit(MD_RECOVERY_INTR, &mddev->recovery); md_done_sync(mddev, r1_bio->sectors, 0); put_buf(r1_bio); return 0; } /* Try next page */ sectors -= s; sect += s; idx++; continue; } start = d; /* write it back and re-read */ while (d != r1_bio->read_disk) { if (d == 0) d = conf->raid_disks * 2; d--; if (r1_bio->bios[d]->bi_end_io != end_sync_read) continue; rdev = conf->mirrors[d].rdev; if (r1_sync_page_io(rdev, sect, s, bio->bi_io_vec[idx].bv_page, WRITE) == 0) { r1_bio->bios[d]->bi_end_io = NULL; rdev_dec_pending(rdev, mddev); } } d = start; while (d != r1_bio->read_disk) { if (d == 0) d = conf->raid_disks * 2; d--; if (r1_bio->bios[d]->bi_end_io != end_sync_read) continue; rdev = conf->mirrors[d].rdev; if (r1_sync_page_io(rdev, sect, s, bio->bi_io_vec[idx].bv_page, READ) != 0) atomic_add(s, &rdev->corrected_errors); } sectors -= s; sect += s; idx ++; } set_bit(R1BIO_Uptodate, &r1_bio->state); set_bit(BIO_UPTODATE, &bio->bi_flags); return 1; } static int process_checks(struct r1bio *r1_bio) { /* We have read all readable devices. If we haven't * got the block, then there is no hope left. * If we have, then we want to do a comparison * and skip the write if everything is the same. * If any blocks failed to read, then we need to * attempt an over-write */ struct mddev *mddev = r1_bio->mddev; struct r1conf *conf = mddev->private; int primary; int i; int vcnt; /* Fix variable parts of all bios */ vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9); for (i = 0; i < conf->raid_disks * 2; i++) { int j; int size; int uptodate; struct bio *b = r1_bio->bios[i]; if (b->bi_end_io != end_sync_read) continue; /* fixup the bio for reuse, but preserve BIO_UPTODATE */ uptodate = test_bit(BIO_UPTODATE, &b->bi_flags); bio_reset(b); if (!uptodate) clear_bit(BIO_UPTODATE, &b->bi_flags); b->bi_vcnt = vcnt; b->bi_size = r1_bio->sectors << 9; b->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset; b->bi_bdev = conf->mirrors[i].rdev->bdev; b->bi_end_io = end_sync_read; b->bi_private = r1_bio; size = b->bi_size; for (j = 0; j < vcnt ; j++) { struct bio_vec *bi; bi = &b->bi_io_vec[j]; bi->bv_offset = 0; if (size > PAGE_SIZE) bi->bv_len = PAGE_SIZE; else bi->bv_len = size; size -= PAGE_SIZE; } } for (primary = 0; primary < conf->raid_disks * 2; primary++) if (r1_bio->bios[primary]->bi_end_io == end_sync_read && test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) { r1_bio->bios[primary]->bi_end_io = NULL; rdev_dec_pending(conf->mirrors[primary].rdev, mddev); break; } r1_bio->read_disk = primary; for (i = 0; i < conf->raid_disks * 2; i++) { int j; struct bio *pbio = r1_bio->bios[primary]; struct bio *sbio = r1_bio->bios[i]; int uptodate = test_bit(BIO_UPTODATE, &sbio->bi_flags); if (sbio->bi_end_io != end_sync_read) continue; /* Now we can 'fixup' the BIO_UPTODATE flag */ set_bit(BIO_UPTODATE, &sbio->bi_flags); if (uptodate) { for (j = vcnt; j-- ; ) { struct page *p, *s; p = pbio->bi_io_vec[j].bv_page; s = sbio->bi_io_vec[j].bv_page; if (memcmp(page_address(p), page_address(s), sbio->bi_io_vec[j].bv_len)) break; } } else j = 0; if (j >= 0) atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && uptodate)) { /* No need to write to this device. */ sbio->bi_end_io = NULL; rdev_dec_pending(conf->mirrors[i].rdev, mddev); continue; } bio_copy_data(sbio, pbio); } return 0; } static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) { struct r1conf *conf = mddev->private; int i; int disks = conf->raid_disks * 2; struct bio *bio, *wbio; bio = r1_bio->bios[r1_bio->read_disk]; if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) /* ouch - failed to read all of that. */ if (!fix_sync_read_error(r1_bio)) return; if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) if (process_checks(r1_bio) < 0) return; /* * schedule writes */ atomic_set(&r1_bio->remaining, 1); for (i = 0; i < disks ; i++) { wbio = r1_bio->bios[i]; if (wbio->bi_end_io == NULL || (wbio->bi_end_io == end_sync_read && (i == r1_bio->read_disk || !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)))) continue; wbio->bi_rw = WRITE; wbio->bi_end_io = end_sync_write; atomic_inc(&r1_bio->remaining); md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio)); generic_make_request(wbio); } if (atomic_dec_and_test(&r1_bio->remaining)) { /* if we're here, all write(s) have completed, so clean up */ int s = r1_bio->sectors; if (test_bit(R1BIO_MadeGood, &r1_bio->state) || test_bit(R1BIO_WriteError, &r1_bio->state)) reschedule_retry(r1_bio); else { put_buf(r1_bio); md_done_sync(mddev, s, 1); } } } /* * This is a kernel thread which: * * 1. Retries failed read operations on working mirrors. * 2. Updates the raid superblock when problems encounter. * 3. Performs writes following reads for array synchronising. */ static void fix_read_error(struct r1conf *conf, int read_disk, sector_t sect, int sectors) { struct mddev *mddev = conf->mddev; while(sectors) { int s = sectors; int d = read_disk; int success = 0; int start; struct md_rdev *rdev; if (s > (PAGE_SIZE>>9)) s = PAGE_SIZE >> 9; do { /* Note: no rcu protection needed here * as this is synchronous in the raid1d thread * which is the thread that might remove * a device. If raid1d ever becomes multi-threaded.... */ sector_t first_bad; int bad_sectors; rdev = conf->mirrors[d].rdev; if (rdev && (test_bit(In_sync, &rdev->flags) || (!test_bit(Faulty, &rdev->flags) && rdev->recovery_offset >= sect + s)) && is_badblock(rdev, sect, s, &first_bad, &bad_sectors) == 0 && sync_page_io(rdev, sect, s<<9, conf->tmppage, READ, false)) success = 1; else { d++; if (d == conf->raid_disks * 2) d = 0; } } while (!success && d != read_disk); if (!success) { /* Cannot read from anywhere - mark it bad */ struct md_rdev *rdev = conf->mirrors[read_disk].rdev; if (!rdev_set_badblocks(rdev, sect, s, 0)) md_error(mddev, rdev); break; } /* write it back and re-read */ start = d; while (d != read_disk) { if (d==0) d = conf->raid_disks * 2; d--; rdev = conf->mirrors[d].rdev; if (rdev && !test_bit(Faulty, &rdev->flags)) r1_sync_page_io(rdev, sect, s, conf->tmppage, WRITE); } d = start; while (d != read_disk) { char b[BDEVNAME_SIZE]; if (d==0) d = conf->raid_disks * 2; d--; rdev = conf->mirrors[d].rdev; if (rdev && !test_bit(Faulty, &rdev->flags)) { if (r1_sync_page_io(rdev, sect, s, conf->tmppage, READ)) { atomic_add(s, &rdev->corrected_errors); printk(KERN_INFO "md/raid1:%s: read error corrected " "(%d sectors at %llu on %s)\n", mdname(mddev), s, (unsigned long long)(sect + rdev->data_offset), bdevname(rdev->bdev, b)); } } } sectors -= s; sect += s; } } static int narrow_write_error(struct r1bio *r1_bio, int i) { struct mddev *mddev = r1_bio->mddev; struct r1conf *conf = mddev->private; struct md_rdev *rdev = conf->mirrors[i].rdev; /* bio has the data to be written to device 'i' where * we just recently had a write error. * We repeatedly clone the bio and trim down to one block, * then try the write. Where the write fails we record * a bad block. * It is conceivable that the bio doesn't exactly align with * blocks. We must handle this somehow. * * We currently own a reference on the rdev. */ int block_sectors; sector_t sector; int sectors; int sect_to_write = r1_bio->sectors; int ok = 1; if (rdev->badblocks.shift < 0) return 0; block_sectors = 1 << rdev->badblocks.shift; sector = r1_bio->sector; sectors = ((sector + block_sectors) & ~(sector_t)(block_sectors - 1)) - sector; while (sect_to_write) { struct bio *wbio; if (sectors > sect_to_write) sectors = sect_to_write; /* Write at 'sector' for 'sectors'*/ if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { unsigned vcnt = r1_bio->behind_page_count; struct bio_vec *vec = r1_bio->behind_bvecs; while (!vec->bv_page) { vec++; vcnt--; } wbio = bio_alloc_mddev(GFP_NOIO, vcnt, mddev); memcpy(wbio->bi_io_vec, vec, vcnt * sizeof(struct bio_vec)); wbio->bi_vcnt = vcnt; } else { wbio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev); } wbio->bi_rw = WRITE; wbio->bi_sector = r1_bio->sector; wbio->bi_size = r1_bio->sectors << 9; md_trim_bio(wbio, sector - r1_bio->sector, sectors); wbio->bi_sector += rdev->data_offset; wbio->bi_bdev = rdev->bdev; if (submit_bio_wait(WRITE, wbio) == 0) /* failure! */ ok = rdev_set_badblocks(rdev, sector, sectors, 0) && ok; bio_put(wbio); sect_to_write -= sectors; sector += sectors; sectors = block_sectors; } return ok; } static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio) { int m; int s = r1_bio->sectors; for (m = 0; m < conf->raid_disks * 2 ; m++) { struct md_rdev *rdev = conf->mirrors[m].rdev; struct bio *bio = r1_bio->bios[m]; if (bio->bi_end_io == NULL) continue; if (test_bit(BIO_UPTODATE, &bio->bi_flags) && test_bit(R1BIO_MadeGood, &r1_bio->state)) { rdev_clear_badblocks(rdev, r1_bio->sector, s, 0); } if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && test_bit(R1BIO_WriteError, &r1_bio->state)) { if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0)) md_error(conf->mddev, rdev); } } put_buf(r1_bio); md_done_sync(conf->mddev, s, 1); } static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio) { int m; for (m = 0; m < conf->raid_disks * 2 ; m++) if (r1_bio->bios[m] == IO_MADE_GOOD) { struct md_rdev *rdev = conf->mirrors[m].rdev; rdev_clear_badblocks(rdev, r1_bio->sector, r1_bio->sectors, 0); rdev_dec_pending(rdev, conf->mddev); } else if (r1_bio->bios[m] != NULL) { /* This drive got a write error. We need to * narrow down and record precise write * errors. */ if (!narrow_write_error(r1_bio, m)) { md_error(conf->mddev, conf->mirrors[m].rdev); /* an I/O failed, we can't clear the bitmap */ set_bit(R1BIO_Degraded, &r1_bio->state); } rdev_dec_pending(conf->mirrors[m].rdev, conf->mddev); } if (test_bit(R1BIO_WriteError, &r1_bio->state)) close_write(r1_bio); raid_end_bio_io(r1_bio); } static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) { int disk; int max_sectors; struct mddev *mddev = conf->mddev; struct bio *bio; char b[BDEVNAME_SIZE]; struct md_rdev *rdev; clear_bit(R1BIO_ReadError, &r1_bio->state); /* we got a read error. Maybe the drive is bad. Maybe just * the block and we can fix it. * We freeze all other IO, and try reading the block from * other devices. When we find one, we re-write * and check it that fixes the read error. * This is all done synchronously while the array is * frozen */ if (mddev->ro == 0) { freeze_array(conf, 1); fix_read_error(conf, r1_bio->read_disk, r1_bio->sector, r1_bio->sectors); unfreeze_array(conf); } else md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev); rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev); bio = r1_bio->bios[r1_bio->read_disk]; bdevname(bio->bi_bdev, b); read_more: disk = read_balance(conf, r1_bio, &max_sectors); if (disk == -1) { printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O" " read error for block %llu\n", mdname(mddev), b, (unsigned long long)r1_bio->sector); raid_end_bio_io(r1_bio); } else { const unsigned long do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC; if (bio) { r1_bio->bios[r1_bio->read_disk] = mddev->ro ? IO_BLOCKED : NULL; bio_put(bio); } r1_bio->read_disk = disk; bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev); md_trim_bio(bio, r1_bio->sector - bio->bi_sector, max_sectors); r1_bio->bios[r1_bio->read_disk] = bio; rdev = conf->mirrors[disk].rdev; printk_ratelimited(KERN_ERR "md/raid1:%s: redirecting sector %llu" " to other mirror: %s\n", mdname(mddev), (unsigned long long)r1_bio->sector, bdevname(rdev->bdev, b)); bio->bi_sector = r1_bio->sector + rdev->data_offset; bio->bi_bdev = rdev->bdev; bio->bi_end_io = raid1_end_read_request; bio->bi_rw = READ | do_sync; bio->bi_private = r1_bio; if (max_sectors < r1_bio->sectors) { /* Drat - have to split this up more */ struct bio *mbio = r1_bio->master_bio; int sectors_handled = (r1_bio->sector + max_sectors - mbio->bi_sector); r1_bio->sectors = max_sectors; spin_lock_irq(&conf->device_lock); if (mbio->bi_phys_segments == 0) mbio->bi_phys_segments = 2; else mbio->bi_phys_segments++; spin_unlock_irq(&conf->device_lock); generic_make_request(bio); bio = NULL; r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); r1_bio->master_bio = mbio; r1_bio->sectors = bio_sectors(mbio) - sectors_handled; r1_bio->state = 0; set_bit(R1BIO_ReadError, &r1_bio->state); r1_bio->mddev = mddev; r1_bio->sector = mbio->bi_sector + sectors_handled; goto read_more; } else generic_make_request(bio); } } static void raid1d(struct md_thread *thread) { struct mddev *mddev = thread->mddev; struct r1bio *r1_bio; unsigned long flags; struct r1conf *conf = mddev->private; struct list_head *head = &conf->retry_list; struct blk_plug plug; md_check_recovery(mddev); blk_start_plug(&plug); for (;;) { flush_pending_writes(conf); spin_lock_irqsave(&conf->device_lock, flags); if (list_empty(head)) { spin_unlock_irqrestore(&conf->device_lock, flags); break; } r1_bio = list_entry(head->prev, struct r1bio, retry_list); list_del(head->prev); conf->nr_queued--; spin_unlock_irqrestore(&conf->device_lock, flags); mddev = r1_bio->mddev; conf = mddev->private; if (test_bit(R1BIO_IsSync, &r1_bio->state)) { if (test_bit(R1BIO_MadeGood, &r1_bio->state) || test_bit(R1BIO_WriteError, &r1_bio->state)) handle_sync_write_finished(conf, r1_bio); else sync_request_write(mddev, r1_bio); } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) || test_bit(R1BIO_WriteError, &r1_bio->state)) handle_write_finished(conf, r1_bio); else if (test_bit(R1BIO_ReadError, &r1_bio->state)) handle_read_error(conf, r1_bio); else /* just a partial read to be scheduled from separate * context */ generic_make_request(r1_bio->bios[r1_bio->read_disk]); cond_resched(); if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) md_check_recovery(mddev); } blk_finish_plug(&plug); } static int init_resync(struct r1conf *conf) { int buffs; buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE; BUG_ON(conf->r1buf_pool); conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free, conf->poolinfo); if (!conf->r1buf_pool) return -ENOMEM; conf->next_resync = 0; return 0; } /* * perform a "sync" on one "block" * * We need to make sure that no normal I/O request - particularly write * requests - conflict with active sync requests. * * This is achieved by tracking pending requests and a 'barrier' concept * that can be installed to exclude normal IO requests. */ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster) { struct r1conf *conf = mddev->private; struct r1bio *r1_bio; struct bio *bio; sector_t max_sector, nr_sectors; int disk = -1; int i; int wonly = -1; int write_targets = 0, read_targets = 0; sector_t sync_blocks; int still_degraded = 0; int good_sectors = RESYNC_SECTORS; int min_bad = 0; /* number of sectors that are bad in all devices */ if (!conf->r1buf_pool) if (init_resync(conf)) return 0; max_sector = mddev->dev_sectors; if (sector_nr >= max_sector) { /* If we aborted, we need to abort the * sync on the 'current' bitmap chunk (there will * only be one in raid1 resync. * We can find the current addess in mddev->curr_resync */ if (mddev->curr_resync < max_sector) /* aborted */ bitmap_end_sync(mddev->bitmap, mddev->curr_resync, &sync_blocks, 1); else /* completed sync */ conf->fullsync = 0; bitmap_close_sync(mddev->bitmap); close_sync(conf); return 0; } if (mddev->bitmap == NULL && mddev->recovery_cp == MaxSector && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && conf->fullsync == 0) { *skipped = 1; return max_sector - sector_nr; } /* before building a request, check if we can skip these blocks.. * This call the bitmap_start_sync doesn't actually record anything */ if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { /* We can skip this block, and probably several more */ *skipped = 1; return sync_blocks; } /* * If there is non-resync activity waiting for a turn, * and resync is going fast enough, * then let it though before starting on this new sync request. */ if (!go_faster && conf->nr_waiting) msleep_interruptible(1000); bitmap_cond_end_sync(mddev->bitmap, sector_nr); r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO); raise_barrier(conf); conf->next_resync = sector_nr; rcu_read_lock(); /* * If we get a correctably read error during resync or recovery, * we might want to read from a different device. So we * flag all drives that could conceivably be read from for READ, * and any others (which will be non-In_sync devices) for WRITE. * If a read fails, we try reading from something else for which READ * is OK. */ r1_bio->mddev = mddev; r1_bio->sector = sector_nr; r1_bio->state = 0; set_bit(R1BIO_IsSync, &r1_bio->state); for (i = 0; i < conf->raid_disks * 2; i++) { struct md_rdev *rdev; bio = r1_bio->bios[i]; bio_reset(bio); rdev = rcu_dereference(conf->mirrors[i].rdev); if (rdev == NULL || test_bit(Faulty, &rdev->flags)) { if (i < conf->raid_disks) still_degraded = 1; } else if (!test_bit(In_sync, &rdev->flags)) { bio->bi_rw = WRITE; bio->bi_end_io = end_sync_write; write_targets ++; } else { /* may need to read from here */ sector_t first_bad = MaxSector; int bad_sectors; if (is_badblock(rdev, sector_nr, good_sectors, &first_bad, &bad_sectors)) { if (first_bad > sector_nr) good_sectors = first_bad - sector_nr; else { bad_sectors -= (sector_nr - first_bad); if (min_bad == 0 || min_bad > bad_sectors) min_bad = bad_sectors; } } if (sector_nr < first_bad) { if (test_bit(WriteMostly, &rdev->flags)) { if (wonly < 0) wonly = i; } else { if (disk < 0) disk = i; } bio->bi_rw = READ; bio->bi_end_io = end_sync_read; read_targets++; } else if (!test_bit(WriteErrorSeen, &rdev->flags) && test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { /* * The device is suitable for reading (InSync), * but has bad block(s) here. Let's try to correct them, * if we are doing resync or repair. Otherwise, leave * this device alone for this sync request. */ bio->bi_rw = WRITE; bio->bi_end_io = end_sync_write; write_targets++; } } if (bio->bi_end_io) { atomic_inc(&rdev->nr_pending); bio->bi_sector = sector_nr + rdev->data_offset; bio->bi_bdev = rdev->bdev; bio->bi_private = r1_bio; } } rcu_read_unlock(); if (disk < 0) disk = wonly; r1_bio->read_disk = disk; if (read_targets == 0 && min_bad > 0) { /* These sectors are bad on all InSync devices, so we * need to mark them bad on all write targets */ int ok = 1; for (i = 0 ; i < conf->raid_disks * 2 ; i++) if (r1_bio->bios[i]->bi_end_io == end_sync_write) { struct md_rdev *rdev = conf->mirrors[i].rdev; ok = rdev_set_badblocks(rdev, sector_nr, min_bad, 0 ) && ok; } set_bit(MD_CHANGE_DEVS, &mddev->flags); *skipped = 1; put_buf(r1_bio); if (!ok) { /* Cannot record the badblocks, so need to * abort the resync. * If there are multiple read targets, could just * fail the really bad ones ??? */ conf->recovery_disabled = mddev->recovery_disabled; set_bit(MD_RECOVERY_INTR, &mddev->recovery); return 0; } else return min_bad; } if (min_bad > 0 && min_bad < good_sectors) { /* only resync enough to reach the next bad->good * transition */ good_sectors = min_bad; } if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0) /* extra read targets are also write targets */ write_targets += read_targets-1; if (write_targets == 0 || read_targets == 0) { /* There is nowhere to write, so all non-sync * drives must be failed - so we are finished */ sector_t rv; if (min_bad > 0) max_sector = sector_nr + min_bad; rv = max_sector - sector_nr; *skipped = 1; put_buf(r1_bio); return rv; } if (max_sector > mddev->resync_max) max_sector = mddev->resync_max; /* Don't do IO beyond here */ if (max_sector > sector_nr + good_sectors) max_sector = sector_nr + good_sectors; nr_sectors = 0; sync_blocks = 0; do { struct page *page; int len = PAGE_SIZE; if (sector_nr + (len>>9) > max_sector) len = (max_sector - sector_nr) << 9; if (len == 0) break; if (sync_blocks == 0) { if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded) && !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) break; BUG_ON(sync_blocks < (PAGE_SIZE>>9)); if ((len >> 9) > sync_blocks) len = sync_blocks<<9; } for (i = 0 ; i < conf->raid_disks * 2; i++) { bio = r1_bio->bios[i]; if (bio->bi_end_io) { page = bio->bi_io_vec[bio->bi_vcnt].bv_page; if (bio_add_page(bio, page, len, 0) == 0) { /* stop here */ bio->bi_io_vec[bio->bi_vcnt].bv_page = page; while (i > 0) { i--; bio = r1_bio->bios[i]; if (bio->bi_end_io==NULL) continue; /* remove last page from this bio */ bio->bi_vcnt--; bio->bi_size -= len; bio->bi_flags &= ~(1<< BIO_SEG_VALID); } goto bio_full; } } } nr_sectors += len>>9; sector_nr += len>>9; sync_blocks -= (len>>9); } while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES); bio_full: r1_bio->sectors = nr_sectors; /* For a user-requested sync, we read all readable devices and do a * compare */ if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { atomic_set(&r1_bio->remaining, read_targets); for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) { bio = r1_bio->bios[i]; if (bio->bi_end_io == end_sync_read) { read_targets--; md_sync_acct(bio->bi_bdev, nr_sectors); generic_make_request(bio); } } } else { atomic_set(&r1_bio->remaining, 1); bio = r1_bio->bios[r1_bio->read_disk]; md_sync_acct(bio->bi_bdev, nr_sectors); generic_make_request(bio); } return nr_sectors; } static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks) { if (sectors) return sectors; return mddev->dev_sectors; } static struct r1conf *setup_conf(struct mddev *mddev) { struct r1conf *conf; int i; struct raid1_info *disk; struct md_rdev *rdev; int err = -ENOMEM; conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL); if (!conf) goto abort; conf->mirrors = kzalloc(sizeof(struct raid1_info) * mddev->raid_disks * 2, GFP_KERNEL); if (!conf->mirrors) goto abort; conf->tmppage = alloc_page(GFP_KERNEL); if (!conf->tmppage) goto abort; conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL); if (!conf->poolinfo) goto abort; conf->poolinfo->raid_disks = mddev->raid_disks * 2; conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc, r1bio_pool_free, conf->poolinfo); if (!conf->r1bio_pool) goto abort; conf->poolinfo->mddev = mddev; err = -EINVAL; spin_lock_init(&conf->device_lock); rdev_for_each(rdev, mddev) { struct request_queue *q; int disk_idx = rdev->raid_disk; if (disk_idx >= mddev->raid_disks || disk_idx < 0) continue; if (test_bit(Replacement, &rdev->flags)) disk = conf->mirrors + mddev->raid_disks + disk_idx; else disk = conf->mirrors + disk_idx; if (disk->rdev) goto abort; disk->rdev = rdev; q = bdev_get_queue(rdev->bdev); if (q->merge_bvec_fn) mddev->merge_check_needed = 1; disk->head_position = 0; disk->seq_start = MaxSector; } conf->raid_disks = mddev->raid_disks; conf->mddev = mddev; INIT_LIST_HEAD(&conf->retry_list); spin_lock_init(&conf->resync_lock); init_waitqueue_head(&conf->wait_barrier); bio_list_init(&conf->pending_bio_list); conf->pending_count = 0; conf->recovery_disabled = mddev->recovery_disabled - 1; err = -EIO; for (i = 0; i < conf->raid_disks * 2; i++) { disk = conf->mirrors + i; if (i < conf->raid_disks && disk[conf->raid_disks].rdev) { /* This slot has a replacement. */ if (!disk->rdev) { /* No original, just make the replacement * a recovering spare */ disk->rdev = disk[conf->raid_disks].rdev; disk[conf->raid_disks].rdev = NULL; } else if (!test_bit(In_sync, &disk->rdev->flags)) /* Original is not in_sync - bad */ goto abort; } if (!disk->rdev || !test_bit(In_sync, &disk->rdev->flags)) { disk->head_position = 0; if (disk->rdev && (disk->rdev->saved_raid_disk < 0)) conf->fullsync = 1; } } err = -ENOMEM; conf->thread = md_register_thread(raid1d, mddev, "raid1"); if (!conf->thread) { printk(KERN_ERR "md/raid1:%s: couldn't allocate thread\n", mdname(mddev)); goto abort; } return conf; abort: if (conf) { if (conf->r1bio_pool) mempool_destroy(conf->r1bio_pool); kfree(conf->mirrors); safe_put_page(conf->tmppage); kfree(conf->poolinfo); kfree(conf); } return ERR_PTR(err); } static int stop(struct mddev *mddev); static int run(struct mddev *mddev) { struct r1conf *conf; int i; struct md_rdev *rdev; int ret; bool discard_supported = false; if (mddev->level != 1) { printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n", mdname(mddev), mddev->level); return -EIO; } if (mddev->reshape_position != MaxSector) { printk(KERN_ERR "md/raid1:%s: reshape_position set but not supported\n", mdname(mddev)); return -EIO; } /* * copy the already verified devices into our private RAID1 * bookkeeping area. [whatever we allocate in run(), * should be freed in stop()] */ if (mddev->private == NULL) conf = setup_conf(mddev); else conf = mddev->private; if (IS_ERR(conf)) return PTR_ERR(conf); if (mddev->queue) blk_queue_max_write_same_sectors(mddev->queue, 0); rdev_for_each(rdev, mddev) { if (!mddev->gendisk) continue; disk_stack_limits(mddev->gendisk, rdev->bdev, rdev->data_offset << 9); if (blk_queue_discard(bdev_get_queue(rdev->bdev))) discard_supported = true; } mddev->degraded = 0; for (i=0; i < conf->raid_disks; i++) if (conf->mirrors[i].rdev == NULL || !test_bit(In_sync, &conf->mirrors[i].rdev->flags) || test_bit(Faulty, &conf->mirrors[i].rdev->flags)) mddev->degraded++; if (conf->raid_disks - mddev->degraded == 1) mddev->recovery_cp = MaxSector; if (mddev->recovery_cp != MaxSector) printk(KERN_NOTICE "md/raid1:%s: not clean" " -- starting background reconstruction\n", mdname(mddev)); printk(KERN_INFO "md/raid1:%s: active with %d out of %d mirrors\n", mdname(mddev), mddev->raid_disks - mddev->degraded, mddev->raid_disks); /* * Ok, everything is just fine now */ mddev->thread = conf->thread; conf->thread = NULL; mddev->private = conf; md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); if (mddev->queue) { mddev->queue->backing_dev_info.congested_fn = raid1_congested; mddev->queue->backing_dev_info.congested_data = mddev; blk_queue_merge_bvec(mddev->queue, raid1_mergeable_bvec); if (discard_supported) queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); else queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); } ret = md_integrity_register(mddev); if (ret) stop(mddev); return ret; } static int stop(struct mddev *mddev) { struct r1conf *conf = mddev->private; struct bitmap *bitmap = mddev->bitmap; /* wait for behind writes to complete */ if (bitmap && atomic_read(&bitmap->behind_writes) > 0) { printk(KERN_INFO "md/raid1:%s: behind writes in progress - waiting to stop.\n", mdname(mddev)); /* need to kick something here to make sure I/O goes? */ wait_event(bitmap->behind_wait, atomic_read(&bitmap->behind_writes) == 0); } raise_barrier(conf); lower_barrier(conf); md_unregister_thread(&mddev->thread); if (conf->r1bio_pool) mempool_destroy(conf->r1bio_pool); kfree(conf->mirrors); safe_put_page(conf->tmppage); kfree(conf->poolinfo); kfree(conf); mddev->private = NULL; return 0; } static int raid1_resize(struct mddev *mddev, sector_t sectors) { /* no resync is happening, and there is enough space * on all devices, so we can resize. * We need to make sure resync covers any new space. * If the array is shrinking we should possibly wait until * any io in the removed space completes, but it hardly seems * worth it. */ sector_t newsize = raid1_size(mddev, sectors, 0); if (mddev->external_size && mddev->array_sectors > newsize) return -EINVAL; if (mddev->bitmap) { int ret = bitmap_resize(mddev->bitmap, newsize, 0, 0); if (ret) return ret; } md_set_array_sectors(mddev, newsize); set_capacity(mddev->gendisk, mddev->array_sectors); revalidate_disk(mddev->gendisk); if (sectors > mddev->dev_sectors && mddev->recovery_cp > mddev->dev_sectors) { mddev->recovery_cp = mddev->dev_sectors; set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); } mddev->dev_sectors = sectors; mddev->resync_max_sectors = sectors; return 0; } static int raid1_reshape(struct mddev *mddev) { /* We need to: * 1/ resize the r1bio_pool * 2/ resize conf->mirrors * * We allocate a new r1bio_pool if we can. * Then raise a device barrier and wait until all IO stops. * Then resize conf->mirrors and swap in the new r1bio pool. * * At the same time, we "pack" the devices so that all the missing * devices have the higher raid_disk numbers. */ mempool_t *newpool, *oldpool; struct pool_info *newpoolinfo; struct raid1_info *newmirrors; struct r1conf *conf = mddev->private; int cnt, raid_disks; unsigned long flags; int d, d2, err; /* Cannot change chunk_size, layout, or level */ if (mddev->chunk_sectors != mddev->new_chunk_sectors || mddev->layout != mddev->new_layout || mddev->level != mddev->new_level) { mddev->new_chunk_sectors = mddev->chunk_sectors; mddev->new_layout = mddev->layout; mddev->new_level = mddev->level; return -EINVAL; } err = md_allow_write(mddev); if (err) return err; raid_disks = mddev->raid_disks + mddev->delta_disks; if (raid_disks < conf->raid_disks) { cnt=0; for (d= 0; d < conf->raid_disks; d++) if (conf->mirrors[d].rdev) cnt++; if (cnt > raid_disks) return -EBUSY; } newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL); if (!newpoolinfo) return -ENOMEM; newpoolinfo->mddev = mddev; newpoolinfo->raid_disks = raid_disks * 2; newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc, r1bio_pool_free, newpoolinfo); if (!newpool) { kfree(newpoolinfo); return -ENOMEM; } newmirrors = kzalloc(sizeof(struct raid1_info) * raid_disks * 2, GFP_KERNEL); if (!newmirrors) { kfree(newpoolinfo); mempool_destroy(newpool); return -ENOMEM; } freeze_array(conf, 0); /* ok, everything is stopped */ oldpool = conf->r1bio_pool; conf->r1bio_pool = newpool; for (d = d2 = 0; d < conf->raid_disks; d++) { struct md_rdev *rdev = conf->mirrors[d].rdev; if (rdev && rdev->raid_disk != d2) { sysfs_unlink_rdev(mddev, rdev); rdev->raid_disk = d2; sysfs_unlink_rdev(mddev, rdev); if (sysfs_link_rdev(mddev, rdev)) printk(KERN_WARNING "md/raid1:%s: cannot register rd%d\n", mdname(mddev), rdev->raid_disk); } if (rdev) newmirrors[d2++].rdev = rdev; } kfree(conf->mirrors); conf->mirrors = newmirrors; kfree(conf->poolinfo); conf->poolinfo = newpoolinfo; spin_lock_irqsave(&conf->device_lock, flags); mddev->degraded += (raid_disks - conf->raid_disks); spin_unlock_irqrestore(&conf->device_lock, flags); conf->raid_disks = mddev->raid_disks = raid_disks; mddev->delta_disks = 0; unfreeze_array(conf); set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); md_wakeup_thread(mddev->thread); mempool_destroy(oldpool); return 0; } static void raid1_quiesce(struct mddev *mddev, int state) { struct r1conf *conf = mddev->private; switch(state) { case 2: /* wake for suspend */ wake_up(&conf->wait_barrier); break; case 1: raise_barrier(conf); break; case 0: lower_barrier(conf); break; } } static void *raid1_takeover(struct mddev *mddev) { /* raid1 can take over: * raid5 with 2 devices, any layout or chunk size */ if (mddev->level == 5 && mddev->raid_disks == 2) { struct r1conf *conf; mddev->new_level = 1; mddev->new_layout = 0; mddev->new_chunk_sectors = 0; conf = setup_conf(mddev); if (!IS_ERR(conf)) conf->barrier = 1; return conf; } return ERR_PTR(-EINVAL); } static struct md_personality raid1_personality = { .name = "raid1", .level = 1, .owner = THIS_MODULE, .make_request = make_request, .run = run, .stop = stop, .status = status, .error_handler = error, .hot_add_disk = raid1_add_disk, .hot_remove_disk= raid1_remove_disk, .spare_active = raid1_spare_active, .sync_request = sync_request, .resize = raid1_resize, .size = raid1_size, .check_reshape = raid1_reshape, .quiesce = raid1_quiesce, .takeover = raid1_takeover, }; static int __init raid_init(void) { return register_md_personality(&raid1_personality); } static void raid_exit(void) { unregister_md_personality(&raid1_personality); } module_init(raid_init); module_exit(raid_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD"); MODULE_ALIAS("md-personality-3"); /* RAID1 */ MODULE_ALIAS("md-raid1"); MODULE_ALIAS("md-level-1"); module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);
gpl-2.0
dandel/linux-2.6.32.y
fs/xattr.c
484
16016
/* File: fs/xattr.c Extended attribute handling. Copyright (C) 2001 by Andreas Gruenbacher <a.gruenbacher@computer.org> Copyright (C) 2001 SGI - Silicon Graphics, Inc <linux-xfs@oss.sgi.com> Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> */ #include <linux/fs.h> #include <linux/slab.h> #include <linux/file.h> #include <linux/xattr.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/module.h> #include <linux/fsnotify.h> #include <linux/audit.h> #include <asm/uaccess.h> /* * Check permissions for extended attribute access. This is a bit complicated * because different namespaces have very different rules. */ static int xattr_permission(struct inode *inode, const char *name, int mask) { /* * We can never set or remove an extended attribute on a read-only * filesystem or on an immutable / append-only inode. */ if (mask & MAY_WRITE) { if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) return -EPERM; } /* * No restriction for security.* and system.* from the VFS. Decision * on these is left to the underlying filesystem / security module. */ if (!strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) || !strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) return 0; /* * The trusted.* namespace can only be accessed by a privileged user. */ if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN)) return (capable(CAP_SYS_ADMIN) ? 0 : -EPERM); /* In user.* namespace, only regular files and directories can have * extended attributes. For sticky directories, only the owner and * privileged user can write attributes. */ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) { if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode)) return -EPERM; if (S_ISDIR(inode->i_mode) && (inode->i_mode & S_ISVTX) && (mask & MAY_WRITE) && !is_owner_or_cap(inode)) return -EPERM; } return inode_permission(inode, mask); } /** * __vfs_setxattr_noperm - perform setxattr operation without performing * permission checks. * * @dentry - object to perform setxattr on * @name - xattr name to set * @value - value to set @name to * @size - size of @value * @flags - flags to pass into filesystem operations * * returns the result of the internal setxattr or setsecurity operations. * * This function requires the caller to lock the inode's i_mutex before it * is executed. It also assumes that the caller will make the appropriate * permission checks. */ int __vfs_setxattr_noperm(struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { struct inode *inode = dentry->d_inode; int error = -EOPNOTSUPP; if (inode->i_op->setxattr) { error = inode->i_op->setxattr(dentry, name, value, size, flags); if (!error) { fsnotify_xattr(dentry); security_inode_post_setxattr(dentry, name, value, size, flags); } } else if (!strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN)) { const char *suffix = name + XATTR_SECURITY_PREFIX_LEN; error = security_inode_setsecurity(inode, suffix, value, size, flags); if (!error) fsnotify_xattr(dentry); } return error; } int vfs_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { struct inode *inode = dentry->d_inode; int error; error = xattr_permission(inode, name, MAY_WRITE); if (error) return error; mutex_lock(&inode->i_mutex); error = security_inode_setxattr(dentry, name, value, size, flags); if (error) goto out; error = __vfs_setxattr_noperm(dentry, name, value, size, flags); out: mutex_unlock(&inode->i_mutex); return error; } EXPORT_SYMBOL_GPL(vfs_setxattr); ssize_t xattr_getsecurity(struct inode *inode, const char *name, void *value, size_t size) { void *buffer = NULL; ssize_t len; if (!value || !size) { len = security_inode_getsecurity(inode, name, &buffer, false); goto out_noalloc; } len = security_inode_getsecurity(inode, name, &buffer, true); if (len < 0) return len; if (size < len) { len = -ERANGE; goto out; } memcpy(value, buffer, len); out: security_release_secctx(buffer, len); out_noalloc: return len; } EXPORT_SYMBOL_GPL(xattr_getsecurity); ssize_t vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size) { struct inode *inode = dentry->d_inode; int error; error = xattr_permission(inode, name, MAY_READ); if (error) return error; error = security_inode_getxattr(dentry, name); if (error) return error; if (!strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN)) { const char *suffix = name + XATTR_SECURITY_PREFIX_LEN; int ret = xattr_getsecurity(inode, suffix, value, size); /* * Only overwrite the return value if a security module * is actually active. */ if (ret == -EOPNOTSUPP) goto nolsm; return ret; } nolsm: if (inode->i_op->getxattr) error = inode->i_op->getxattr(dentry, name, value, size); else error = -EOPNOTSUPP; return error; } EXPORT_SYMBOL_GPL(vfs_getxattr); ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size) { ssize_t error; error = security_inode_listxattr(d); if (error) return error; error = -EOPNOTSUPP; if (d->d_inode->i_op->listxattr) { error = d->d_inode->i_op->listxattr(d, list, size); } else { error = security_inode_listsecurity(d->d_inode, list, size); if (size && error > size) error = -ERANGE; } return error; } EXPORT_SYMBOL_GPL(vfs_listxattr); int vfs_removexattr(struct dentry *dentry, const char *name) { struct inode *inode = dentry->d_inode; int error; if (!inode->i_op->removexattr) return -EOPNOTSUPP; error = xattr_permission(inode, name, MAY_WRITE); if (error) return error; error = security_inode_removexattr(dentry, name); if (error) return error; mutex_lock(&inode->i_mutex); error = inode->i_op->removexattr(dentry, name); mutex_unlock(&inode->i_mutex); if (!error) fsnotify_xattr(dentry); return error; } EXPORT_SYMBOL_GPL(vfs_removexattr); /* * Extended attribute SET operations */ static long setxattr(struct dentry *d, const char __user *name, const void __user *value, size_t size, int flags) { int error; void *kvalue = NULL; char kname[XATTR_NAME_MAX + 1]; if (flags & ~(XATTR_CREATE|XATTR_REPLACE)) return -EINVAL; error = strncpy_from_user(kname, name, sizeof(kname)); if (error == 0 || error == sizeof(kname)) error = -ERANGE; if (error < 0) return error; if (size) { if (size > XATTR_SIZE_MAX) return -E2BIG; kvalue = memdup_user(value, size); if (IS_ERR(kvalue)) return PTR_ERR(kvalue); } error = vfs_setxattr(d, kname, kvalue, size, flags); kfree(kvalue); return error; } SYSCALL_DEFINE5(setxattr, const char __user *, pathname, const char __user *, name, const void __user *, value, size_t, size, int, flags) { struct path path; int error; error = user_path(pathname, &path); if (error) return error; error = mnt_want_write(path.mnt); if (!error) { error = setxattr(path.dentry, name, value, size, flags); mnt_drop_write(path.mnt); } path_put(&path); return error; } SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname, const char __user *, name, const void __user *, value, size_t, size, int, flags) { struct path path; int error; error = user_lpath(pathname, &path); if (error) return error; error = mnt_want_write(path.mnt); if (!error) { error = setxattr(path.dentry, name, value, size, flags); mnt_drop_write(path.mnt); } path_put(&path); return error; } SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name, const void __user *,value, size_t, size, int, flags) { struct file *f; struct dentry *dentry; int error = -EBADF; f = fget(fd); if (!f) return error; dentry = f->f_path.dentry; audit_inode(NULL, dentry); error = mnt_want_write_file(f); if (!error) { error = setxattr(dentry, name, value, size, flags); mnt_drop_write(f->f_path.mnt); } fput(f); return error; } /* * Extended attribute GET operations */ static ssize_t getxattr(struct dentry *d, const char __user *name, void __user *value, size_t size) { ssize_t error; void *kvalue = NULL; char kname[XATTR_NAME_MAX + 1]; error = strncpy_from_user(kname, name, sizeof(kname)); if (error == 0 || error == sizeof(kname)) error = -ERANGE; if (error < 0) return error; if (size) { if (size > XATTR_SIZE_MAX) size = XATTR_SIZE_MAX; kvalue = kzalloc(size, GFP_KERNEL); if (!kvalue) return -ENOMEM; } error = vfs_getxattr(d, kname, kvalue, size); if (error > 0) { if (size && copy_to_user(value, kvalue, error)) error = -EFAULT; } else if (error == -ERANGE && size >= XATTR_SIZE_MAX) { /* The file system tried to returned a value bigger than XATTR_SIZE_MAX bytes. Not possible. */ error = -E2BIG; } kfree(kvalue); return error; } SYSCALL_DEFINE4(getxattr, const char __user *, pathname, const char __user *, name, void __user *, value, size_t, size) { struct path path; ssize_t error; error = user_path(pathname, &path); if (error) return error; error = getxattr(path.dentry, name, value, size); path_put(&path); return error; } SYSCALL_DEFINE4(lgetxattr, const char __user *, pathname, const char __user *, name, void __user *, value, size_t, size) { struct path path; ssize_t error; error = user_lpath(pathname, &path); if (error) return error; error = getxattr(path.dentry, name, value, size); path_put(&path); return error; } SYSCALL_DEFINE4(fgetxattr, int, fd, const char __user *, name, void __user *, value, size_t, size) { struct file *f; ssize_t error = -EBADF; f = fget(fd); if (!f) return error; audit_inode(NULL, f->f_path.dentry); error = getxattr(f->f_path.dentry, name, value, size); fput(f); return error; } /* * Extended attribute LIST operations */ static ssize_t listxattr(struct dentry *d, char __user *list, size_t size) { ssize_t error; char *klist = NULL; if (size) { if (size > XATTR_LIST_MAX) size = XATTR_LIST_MAX; klist = kmalloc(size, GFP_KERNEL); if (!klist) return -ENOMEM; } error = vfs_listxattr(d, klist, size); if (error > 0) { if (size && copy_to_user(list, klist, error)) error = -EFAULT; } else if (error == -ERANGE && size >= XATTR_LIST_MAX) { /* The file system tried to returned a list bigger than XATTR_LIST_MAX bytes. Not possible. */ error = -E2BIG; } kfree(klist); return error; } SYSCALL_DEFINE3(listxattr, const char __user *, pathname, char __user *, list, size_t, size) { struct path path; ssize_t error; error = user_path(pathname, &path); if (error) return error; error = listxattr(path.dentry, list, size); path_put(&path); return error; } SYSCALL_DEFINE3(llistxattr, const char __user *, pathname, char __user *, list, size_t, size) { struct path path; ssize_t error; error = user_lpath(pathname, &path); if (error) return error; error = listxattr(path.dentry, list, size); path_put(&path); return error; } SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size) { struct file *f; ssize_t error = -EBADF; f = fget(fd); if (!f) return error; audit_inode(NULL, f->f_path.dentry); error = listxattr(f->f_path.dentry, list, size); fput(f); return error; } /* * Extended attribute REMOVE operations */ static long removexattr(struct dentry *d, const char __user *name) { int error; char kname[XATTR_NAME_MAX + 1]; error = strncpy_from_user(kname, name, sizeof(kname)); if (error == 0 || error == sizeof(kname)) error = -ERANGE; if (error < 0) return error; return vfs_removexattr(d, kname); } SYSCALL_DEFINE2(removexattr, const char __user *, pathname, const char __user *, name) { struct path path; int error; error = user_path(pathname, &path); if (error) return error; error = mnt_want_write(path.mnt); if (!error) { error = removexattr(path.dentry, name); mnt_drop_write(path.mnt); } path_put(&path); return error; } SYSCALL_DEFINE2(lremovexattr, const char __user *, pathname, const char __user *, name) { struct path path; int error; error = user_lpath(pathname, &path); if (error) return error; error = mnt_want_write(path.mnt); if (!error) { error = removexattr(path.dentry, name); mnt_drop_write(path.mnt); } path_put(&path); return error; } SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name) { struct file *f; struct dentry *dentry; int error = -EBADF; f = fget(fd); if (!f) return error; dentry = f->f_path.dentry; audit_inode(NULL, dentry); error = mnt_want_write_file(f); if (!error) { error = removexattr(dentry, name); mnt_drop_write(f->f_path.mnt); } fput(f); return error; } static const char * strcmp_prefix(const char *a, const char *a_prefix) { while (*a_prefix && *a == *a_prefix) { a++; a_prefix++; } return *a_prefix ? NULL : a; } /* * In order to implement different sets of xattr operations for each xattr * prefix with the generic xattr API, a filesystem should create a * null-terminated array of struct xattr_handler (one for each prefix) and * hang a pointer to it off of the s_xattr field of the superblock. * * The generic_fooxattr() functions will use this list to dispatch xattr * operations to the correct xattr_handler. */ #define for_each_xattr_handler(handlers, handler) \ for ((handler) = *(handlers)++; \ (handler) != NULL; \ (handler) = *(handlers)++) /* * Find the xattr_handler with the matching prefix. */ static struct xattr_handler * xattr_resolve_name(struct xattr_handler **handlers, const char **name) { struct xattr_handler *handler; if (!*name) return NULL; for_each_xattr_handler(handlers, handler) { const char *n = strcmp_prefix(*name, handler->prefix); if (n) { *name = n; break; } } return handler; } /* * Find the handler for the prefix and dispatch its get() operation. */ ssize_t generic_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size) { struct xattr_handler *handler; struct inode *inode = dentry->d_inode; handler = xattr_resolve_name(inode->i_sb->s_xattr, &name); if (!handler) return -EOPNOTSUPP; return handler->get(inode, name, buffer, size); } /* * Combine the results of the list() operation from every xattr_handler in the * list. */ ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size) { struct inode *inode = dentry->d_inode; struct xattr_handler *handler, **handlers = inode->i_sb->s_xattr; unsigned int size = 0; if (!buffer) { for_each_xattr_handler(handlers, handler) size += handler->list(inode, NULL, 0, NULL, 0); } else { char *buf = buffer; for_each_xattr_handler(handlers, handler) { size = handler->list(inode, buf, buffer_size, NULL, 0); if (size > buffer_size) return -ERANGE; buf += size; buffer_size -= size; } size = buf - buffer; } return size; } /* * Find the handler for the prefix and dispatch its set() operation. */ int generic_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { struct xattr_handler *handler; struct inode *inode = dentry->d_inode; if (size == 0) value = ""; /* empty EA, do not remove */ handler = xattr_resolve_name(inode->i_sb->s_xattr, &name); if (!handler) return -EOPNOTSUPP; return handler->set(inode, name, value, size, flags); } /* * Find the handler for the prefix and dispatch its set() operation to remove * any associated extended attribute. */ int generic_removexattr(struct dentry *dentry, const char *name) { struct xattr_handler *handler; struct inode *inode = dentry->d_inode; handler = xattr_resolve_name(inode->i_sb->s_xattr, &name); if (!handler) return -EOPNOTSUPP; return handler->set(inode, name, NULL, 0, XATTR_REPLACE); } EXPORT_SYMBOL(generic_getxattr); EXPORT_SYMBOL(generic_listxattr); EXPORT_SYMBOL(generic_setxattr); EXPORT_SYMBOL(generic_removexattr);
gpl-2.0
AICP/kernel_motorola_msm8992
drivers/input/touchscreen/msg21xx_ts.c
740
54780
/* * MStar MSG21XX touchscreen driver * * Copyright (c) 2006-2012 MStar Semiconductor, Inc. * * Copyright (C) 2012 Bruce Ding <bruce.ding@mstarsemi.com> * * Copyright (c) 2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/input.h> #include <linux/input/mt.h> #include <linux/interrupt.h> #include <linux/i2c.h> #include <linux/gpio.h> #include <linux/of_gpio.h> #include <linux/sysfs.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/delay.h> #include <linux/firmware.h> #include <linux/debugfs.h> #include <linux/regulator/consumer.h> #if defined(CONFIG_FB) #include <linux/notifier.h> #include <linux/fb.h> #endif #ifdef CONFIG_TOUCHSCREEN_PROXIMITY_SENSOR #include <linux/input/vir_ps.h> #endif /* Constant Value & Variable Definition*/ #define MSTAR_VTG_MIN_UV 2800000 #define MSTAR_VTG_MAX_UV 3300000 #define MSTAR_I2C_VTG_MIN_UV 1800000 #define MSTAR_I2C_VTG_MAX_UV 1800000 #define MAX_BUTTONS 4 #define FT_COORDS_ARR_SIZE 4 #define MSTAR_FW_NAME_MAX_LEN 50 #define MSTAR_CHIPTOP_REGISTER_BANK 0x1E #define MSTAR_CHIPTOP_REGISTER_ICTYPE 0xCC #define MSTAR_INIT_SW_ID 0x7FF #define MSTAR_DEBUG_DIR_NAME "ts_debug" #define MSG_FW_FILE_MAJOR_VERSION(x) \ (((x)->data[0x7f4f] << 8) + ((x)->data[0x7f4e])) #define MSG_FW_FILE_MINOR_VERSION(x) \ (((x)->data[0x7f51] << 8) + ((x)->data[0x7f50])) /* * Note. * Please do not change the below setting. */ #define TPD_WIDTH (2048) #define TPD_HEIGHT (2048) #define PINCTRL_STATE_ACTIVE "pmx_ts_active" #define PINCTRL_STATE_SUSPEND "pmx_ts_suspend" #define PINCTRL_STATE_RELEASE "pmx_ts_release" #define SLAVE_I2C_ID_DBBUS (0xC4>>1) #define DEMO_MODE_PACKET_LENGTH (8) #define TP_PRINT /*store the frimware binary data*/ static unsigned char fw_bin_data[94][1024]; static unsigned int crc32_table[256]; static unsigned short fw_file_major, fw_file_minor; static unsigned short main_sw_id = MSTAR_INIT_SW_ID; static unsigned short info_sw_id = MSTAR_INIT_SW_ID; static unsigned int bin_conf_crc32; struct msg21xx_ts_platform_data { const char *name; char fw_name[MSTAR_FW_NAME_MAX_LEN]; u8 fw_version_major; u8 fw_version_minor; u32 irq_gpio; u32 irq_gpio_flags; u32 reset_gpio; u32 reset_gpio_flags; u32 x_max; u32 y_max; u32 x_min; u32 y_min; u32 panel_minx; u32 panel_miny; u32 panel_maxx; u32 panel_maxy; u32 num_max_touches; u8 ic_type; u32 button_map[MAX_BUTTONS]; u32 num_buttons; u32 hard_reset_delay_ms; u32 post_hard_reset_delay_ms; bool updating_fw; }; /* Touch Data Type Definition */ struct touchPoint_t { unsigned short x; unsigned short y; }; struct touchInfo_t { struct touchPoint_t *point; unsigned char count; unsigned char keycode; }; struct msg21xx_ts_data { struct i2c_client *client; struct input_dev *input_dev; struct msg21xx_ts_platform_data *pdata; struct regulator *vdd; struct regulator *vcc_i2c; bool suspended; #if defined(CONFIG_FB) struct notifier_block fb_notif; #endif struct pinctrl *ts_pinctrl; struct pinctrl_state *pinctrl_state_active; struct pinctrl_state *pinctrl_state_suspend; struct pinctrl_state *pinctrl_state_release; struct mutex ts_mutex; struct touchInfo_t info; }; #if defined(CONFIG_FB) static int fb_notifier_callback(struct notifier_block *self, unsigned long event, void *data); #endif #ifdef CONFIG_TOUCHSCREEN_PROXIMITY_SENSOR static unsigned char bEnableTpProximity; static unsigned char bFaceClosingTp; #endif #ifdef TP_PRINT static int tp_print_proc_read(struct msg21xx_ts_data *ts_data); static void tp_print_create_entry(struct msg21xx_ts_data *ts_data); #endif static void _ReadBinConfig(struct msg21xx_ts_data *ts_data); static unsigned int _CalMainCRC32(struct msg21xx_ts_data *ts_data); static struct mutex msg21xx_mutex; enum EMEM_TYPE_t { EMEM_ALL = 0, EMEM_MAIN, EMEM_INFO, }; /* Function Definition */ static unsigned int _CRC_doReflect(unsigned int ref, signed char ch) { unsigned int value = 0; unsigned int i = 0; for (i = 1; i < (ch + 1); i++) { if (ref & 1) value |= 1 << (ch - i); ref >>= 1; } return value; } static unsigned int _CRC_getValue(unsigned int text, unsigned int prevCRC) { unsigned int ulCRC = prevCRC; ulCRC = (ulCRC >> 8) ^ crc32_table[(ulCRC & 0xFF) ^ text]; return ulCRC; } static void _CRC_initTable(void) { unsigned int magic_number = 0x04c11db7; unsigned int i, j; for (i = 0; i <= 0xFF; i++) { crc32_table[i] = _CRC_doReflect(i, 8) << 24; for (j = 0; j < 8; j++) crc32_table[i] = (crc32_table[i] << 1) ^ (crc32_table[i] & (0x80000000L) ? magic_number : 0); crc32_table[i] = _CRC_doReflect(crc32_table[i], 32); } } static void msg21xx_reset_hw(struct msg21xx_ts_platform_data *pdata) { gpio_direction_output(pdata->reset_gpio, 1); gpio_set_value_cansleep(pdata->reset_gpio, 0); /* Note that the RST must be in LOW 10ms at least */ usleep(pdata->hard_reset_delay_ms * 1000); gpio_set_value_cansleep(pdata->reset_gpio, 1); /* Enable the interrupt service thread/routine for INT after 50ms */ usleep(pdata->post_hard_reset_delay_ms * 1000); } static int read_i2c_seq(struct msg21xx_ts_data *ts_data, unsigned char addr, unsigned char *buf, unsigned short size) { int rc = 0; struct i2c_msg msgs[] = { { .addr = addr, .flags = I2C_M_RD, /* read flag */ .len = size, .buf = buf, }, }; /* If everything went ok (i.e. 1 msg transmitted), return #bytes transmitted, else error code. */ if (ts_data->client != NULL) { rc = i2c_transfer(ts_data->client->adapter, msgs, 1); if (rc < 0) dev_err(&ts_data->client->dev, "%s error %d\n", __func__, rc); } else { dev_err(&ts_data->client->dev, "ts_data->client is NULL\n"); } return rc; } static int write_i2c_seq(struct msg21xx_ts_data *ts_data, unsigned char addr, unsigned char *buf, unsigned short size) { int rc = 0; struct i2c_msg msgs[] = { { .addr = addr, /* * if read flag is undefined, * then it means write flag. */ .flags = 0, .len = size, .buf = buf, }, }; /* * If everything went ok (i.e. 1 msg transmitted), return #bytes * transmitted, else error code. */ if (ts_data->client != NULL) { rc = i2c_transfer(ts_data->client->adapter, msgs, 1); if (rc < 0) dev_err(&ts_data->client->dev, "%s error %d\n", __func__, rc); } else { dev_err(&ts_data->client->dev, "ts_data->client is NULL\n"); } return rc; } static unsigned short read_reg(struct msg21xx_ts_data *ts_data, unsigned char bank, unsigned char addr) { unsigned char tx_data[3] = {0x10, bank, addr}; unsigned char rx_data[2] = {0}; write_i2c_seq(ts_data, SLAVE_I2C_ID_DBBUS, tx_data, sizeof(tx_data)); read_i2c_seq(ts_data, SLAVE_I2C_ID_DBBUS, rx_data, sizeof(rx_data)); return rx_data[1] << 8 | rx_data[0]; } static void write_reg(struct msg21xx_ts_data *ts_data, unsigned char bank, unsigned char addr, unsigned short data) { unsigned char tx_data[5] = {0x10, bank, addr, data & 0xFF, data >> 8}; write_i2c_seq(ts_data, SLAVE_I2C_ID_DBBUS, tx_data, sizeof(tx_data)); } static void write_reg_8bit(struct msg21xx_ts_data *ts_data, unsigned char bank, unsigned char addr, unsigned char data) { unsigned char tx_data[4] = {0x10, bank, addr, data}; write_i2c_seq(ts_data, SLAVE_I2C_ID_DBBUS, tx_data, sizeof(tx_data)); } static void dbbusDWIICEnterSerialDebugMode(struct msg21xx_ts_data *ts_data) { unsigned char data[5]; /* Enter the Serial Debug Mode */ data[0] = 0x53; data[1] = 0x45; data[2] = 0x52; data[3] = 0x44; data[4] = 0x42; write_i2c_seq(ts_data, SLAVE_I2C_ID_DBBUS, data, sizeof(data)); } static void dbbusDWIICStopMCU(struct msg21xx_ts_data *ts_data) { unsigned char data[1]; /* Stop the MCU */ data[0] = 0x37; write_i2c_seq(ts_data, SLAVE_I2C_ID_DBBUS, data, sizeof(data)); } static void dbbusDWIICIICUseBus(struct msg21xx_ts_data *ts_data) { unsigned char data[1]; /* IIC Use Bus */ data[0] = 0x35; write_i2c_seq(ts_data, SLAVE_I2C_ID_DBBUS, data, sizeof(data)); } static void dbbusDWIICIICReshape(struct msg21xx_ts_data *ts_data) { unsigned char data[1]; /* IIC Re-shape */ data[0] = 0x71; write_i2c_seq(ts_data, SLAVE_I2C_ID_DBBUS, data, sizeof(data)); } static unsigned char msg21xx_get_ic_type(struct msg21xx_ts_data *ts_data) { unsigned char ic_type = 0; unsigned char bank; unsigned char addr; msg21xx_reset_hw(ts_data->pdata); dbbusDWIICEnterSerialDebugMode(ts_data); dbbusDWIICStopMCU(ts_data); dbbusDWIICIICUseBus(ts_data); dbbusDWIICIICReshape(ts_data); msleep(300); /* stop mcu */ write_reg_8bit(ts_data, 0x0F, 0xE6, 0x01); /* disable watch dog */ write_reg(ts_data, 0x3C, 0x60, 0xAA55); /* get ic type */ bank = MSTAR_CHIPTOP_REGISTER_BANK; addr = MSTAR_CHIPTOP_REGISTER_ICTYPE; ic_type = (0xff)&(read_reg(ts_data, bank, addr)); if (ic_type != ts_data->pdata->ic_type) ic_type = 0; msg21xx_reset_hw(ts_data->pdata); return ic_type; } static int msg21xx_read_firmware_id(struct msg21xx_ts_data *ts_data) { unsigned char command[3] = { 0x53, 0x00, 0x2A}; unsigned char response[4] = { 0 }; mutex_lock(&msg21xx_mutex); write_i2c_seq(ts_data, ts_data->client->addr, command, sizeof(command)); read_i2c_seq(ts_data, ts_data->client->addr, response, sizeof(response)); mutex_unlock(&msg21xx_mutex); ts_data->pdata->fw_version_major = (response[1]<<8) + response[0]; ts_data->pdata->fw_version_minor = (response[3]<<8) + response[2]; dev_info(&ts_data->client->dev, "major num = %d, minor num = %d\n", ts_data->pdata->fw_version_major, ts_data->pdata->fw_version_minor); return 0; } static int firmware_erase_c33(struct msg21xx_ts_data *ts_data, enum EMEM_TYPE_t emem_type) { /* stop mcu */ write_reg(ts_data, 0x0F, 0xE6, 0x0001); /* disable watch dog */ write_reg_8bit(ts_data, 0x3C, 0x60, 0x55); write_reg_8bit(ts_data, 0x3C, 0x61, 0xAA); /* set PROGRAM password */ write_reg_8bit(ts_data, 0x16, 0x1A, 0xBA); write_reg_8bit(ts_data, 0x16, 0x1B, 0xAB); write_reg_8bit(ts_data, 0x16, 0x18, 0x80); if (emem_type == EMEM_ALL) write_reg_8bit(ts_data, 0x16, 0x08, 0x10); write_reg_8bit(ts_data, 0x16, 0x18, 0x40); msleep(20); /* clear pce */ write_reg_8bit(ts_data, 0x16, 0x18, 0x80); /* erase trigger */ if (emem_type == EMEM_MAIN) write_reg_8bit(ts_data, 0x16, 0x0E, 0x04); /* erase main */ else write_reg_8bit(ts_data, 0x16, 0x0E, 0x08); /* erase all block */ return 0; } static ssize_t firmware_update_c33(struct device *dev, struct device_attribute *attr, const char *buf, size_t size, enum EMEM_TYPE_t emem_type, bool isForce) { unsigned int i, j; unsigned int crc_main, crc_main_tp; unsigned int crc_info, crc_info_tp; unsigned short reg_data = 0; int update_pass = 1; bool fw_upgrade = false; struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev); crc_main = 0xffffffff; crc_info = 0xffffffff; msg21xx_reset_hw(ts_data->pdata); msg21xx_read_firmware_id(ts_data); _ReadBinConfig(ts_data); if ((main_sw_id == info_sw_id) && (_CalMainCRC32(ts_data) == bin_conf_crc32) && (fw_file_major == ts_data->pdata->fw_version_major) && (fw_file_minor > ts_data->pdata->fw_version_minor)) { fw_upgrade = true; } if (!fw_upgrade && !isForce) { dev_dbg(dev, "no need to update\n"); msg21xx_reset_hw(ts_data->pdata); return size; } msg21xx_reset_hw(ts_data->pdata); msleep(300); dbbusDWIICEnterSerialDebugMode(ts_data); dbbusDWIICStopMCU(ts_data); dbbusDWIICIICUseBus(ts_data); dbbusDWIICIICReshape(ts_data); msleep(300); /* erase main */ firmware_erase_c33(ts_data, EMEM_MAIN); msleep(1000); msg21xx_reset_hw(ts_data->pdata); dbbusDWIICEnterSerialDebugMode(ts_data); dbbusDWIICStopMCU(ts_data); dbbusDWIICIICUseBus(ts_data); dbbusDWIICIICReshape(ts_data); msleep(300); /* * Program */ /* polling 0x3CE4 is 0x1C70 */ if ((emem_type == EMEM_ALL) || (emem_type == EMEM_MAIN)) { do { reg_data = read_reg(ts_data, 0x3C, 0xE4); } while (reg_data != 0x1C70); } switch (emem_type) { case EMEM_ALL: write_reg(ts_data, 0x3C, 0xE4, 0xE38F); /* for all-blocks */ break; case EMEM_MAIN: write_reg(ts_data, 0x3C, 0xE4, 0x7731); /* for main block */ break; case EMEM_INFO: write_reg(ts_data, 0x3C, 0xE4, 0x7731); /* for info block */ write_reg_8bit(ts_data, 0x0F, 0xE6, 0x01); write_reg_8bit(ts_data, 0x3C, 0xE4, 0xC5); write_reg_8bit(ts_data, 0x3C, 0xE5, 0x78); write_reg_8bit(ts_data, MSTAR_CHIPTOP_REGISTER_BANK, 0x04, 0x9F); write_reg_8bit(ts_data, MSTAR_CHIPTOP_REGISTER_BANK, 0x05, 0x82); write_reg_8bit(ts_data, 0x0F, 0xE6, 0x00); msleep(100); break; } /* polling 0x3CE4 is 0x2F43 */ do { reg_data = read_reg(ts_data, 0x3C, 0xE4); } while (reg_data != 0x2F43); /* calculate CRC 32 */ _CRC_initTable(); /* total 32 KB : 2 byte per R/W */ for (i = 0; i < 32; i++) { if (i == 31) { fw_bin_data[i][1014] = 0x5A; fw_bin_data[i][1015] = 0xA5; for (j = 0; j < 1016; j++) crc_main = _CRC_getValue(fw_bin_data[i][j], crc_main); } else { for (j = 0; j < 1024; j++) crc_main = _CRC_getValue(fw_bin_data[i][j], crc_main); } for (j = 0; j < 8; j++) write_i2c_seq(ts_data, ts_data->client->addr, &fw_bin_data[i][j * 128], 128); msleep(100); /* polling 0x3CE4 is 0xD0BC */ do { reg_data = read_reg(ts_data, 0x3C, 0xE4); } while (reg_data != 0xD0BC); write_reg(ts_data, 0x3C, 0xE4, 0x2F43); } if ((emem_type == EMEM_ALL) || (emem_type == EMEM_MAIN)) { /* write file done and check crc */ write_reg(ts_data, 0x3C, 0xE4, 0x1380); } msleep(20); if ((emem_type == EMEM_ALL) || (emem_type == EMEM_MAIN)) { /* polling 0x3CE4 is 0x9432 */ do { reg_data = read_reg(ts_data, 0x3C, 0xE4); } while (reg_data != 0x9432); } crc_main = crc_main ^ 0xffffffff; crc_info = crc_info ^ 0xffffffff; if ((emem_type == EMEM_ALL) || (emem_type == EMEM_MAIN)) { /* CRC Main from TP */ crc_main_tp = read_reg(ts_data, 0x3C, 0x80); crc_main_tp = (crc_main_tp << 16) | read_reg(ts_data, 0x3C, 0x82); /* CRC Info from TP */ crc_info_tp = read_reg(ts_data, 0x3C, 0xA0); crc_info_tp = (crc_info_tp << 16) | read_reg(ts_data, 0x3C, 0xA2); } update_pass = 1; if ((emem_type == EMEM_ALL) || (emem_type == EMEM_MAIN)) { if (crc_main_tp != crc_main) update_pass = 0; } if (!update_pass) { dev_err(dev, "update_C33 failed\n"); msg21xx_reset_hw(ts_data->pdata); return 0; } dev_dbg(dev, "update_C33 OK\n"); msg21xx_reset_hw(ts_data->pdata); return size; } static unsigned int _CalMainCRC32(struct msg21xx_ts_data *ts_data) { unsigned int ret = 0; unsigned short reg_data = 0; msg21xx_reset_hw(ts_data->pdata); dbbusDWIICEnterSerialDebugMode(ts_data); dbbusDWIICStopMCU(ts_data); dbbusDWIICIICUseBus(ts_data); dbbusDWIICIICReshape(ts_data); msleep(100); /* Stop MCU */ write_reg(ts_data, 0x0F, 0xE6, 0x0001); /* Stop Watchdog */ write_reg_8bit(ts_data, 0x3C, 0x60, 0x55); write_reg_8bit(ts_data, 0x3C, 0x61, 0xAA); /* cmd */ write_reg(ts_data, 0x3C, 0xE4, 0xDF4C); write_reg(ts_data, MSTAR_CHIPTOP_REGISTER_BANK, 0x04, 0x7d60); /* TP SW reset */ write_reg(ts_data, MSTAR_CHIPTOP_REGISTER_BANK, 0x04, 0x829F); /* MCU run */ write_reg(ts_data, 0x0F, 0xE6, 0x0000); /* polling 0x3CE4 */ do { reg_data = read_reg(ts_data, 0x3C, 0xE4); } while (reg_data != 0x9432); /* Cal CRC Main from TP */ ret = read_reg(ts_data, 0x3C, 0x80); ret = (ret << 16) | read_reg(ts_data, 0x3C, 0x82); dev_dbg(&ts_data->client->dev, "[21xxA]:Current main crc32=0x%x\n", ret); return ret; } static void _ReadBinConfig(struct msg21xx_ts_data *ts_data) { unsigned char dbbus_tx_data[5] = {0}; unsigned char dbbus_rx_data[4] = {0}; unsigned short reg_data = 0; msg21xx_reset_hw(ts_data->pdata); dbbusDWIICEnterSerialDebugMode(ts_data); dbbusDWIICStopMCU(ts_data); dbbusDWIICIICUseBus(ts_data); dbbusDWIICIICReshape(ts_data); msleep(100); /* Stop MCU */ write_reg(ts_data, 0x0F, 0xE6, 0x0001); /* Stop Watchdog */ write_reg_8bit(ts_data, 0x3C, 0x60, 0x55); write_reg_8bit(ts_data, 0x3C, 0x61, 0xAA); /* cmd */ write_reg(ts_data, 0x3C, 0xE4, 0xA4AB); write_reg(ts_data, MSTAR_CHIPTOP_REGISTER_BANK, 0x04, 0x7d60); /* TP SW reset */ write_reg(ts_data, MSTAR_CHIPTOP_REGISTER_BANK, 0x04, 0x829F); /* MCU run */ write_reg(ts_data, 0x0F, 0xE6, 0x0000); /* polling 0x3CE4 */ do { reg_data = read_reg(ts_data, 0x3C, 0xE4); } while (reg_data != 0x5B58); dbbus_tx_data[0] = 0x72; dbbus_tx_data[1] = 0x7F; dbbus_tx_data[2] = 0x55; dbbus_tx_data[3] = 0x00; dbbus_tx_data[4] = 0x04; write_i2c_seq(ts_data, ts_data->client->addr, &dbbus_tx_data[0], 5); read_i2c_seq(ts_data, ts_data->client->addr, &dbbus_rx_data[0], 4); if ((dbbus_rx_data[0] >= 0x30 && dbbus_rx_data[0] <= 0x39) && (dbbus_rx_data[1] >= 0x30 && dbbus_rx_data[1] <= 0x39) && (dbbus_rx_data[2] >= 0x31 && dbbus_rx_data[2] <= 0x39)) { main_sw_id = (dbbus_rx_data[0] - 0x30) * 100 + (dbbus_rx_data[1] - 0x30) * 10 + (dbbus_rx_data[2] - 0x30); } dbbus_tx_data[0] = 0x72; dbbus_tx_data[1] = 0x7F; dbbus_tx_data[2] = 0xFC; dbbus_tx_data[3] = 0x00; dbbus_tx_data[4] = 0x04; write_i2c_seq(ts_data, ts_data->client->addr, &dbbus_tx_data[0], 5); read_i2c_seq(ts_data, ts_data->client->addr, &dbbus_rx_data[0], 4); bin_conf_crc32 = (dbbus_rx_data[0] << 24) | (dbbus_rx_data[1] << 16) | (dbbus_rx_data[2] << 8) | (dbbus_rx_data[3]); dbbus_tx_data[0] = 0x72; dbbus_tx_data[1] = 0x83; dbbus_tx_data[2] = 0x00; dbbus_tx_data[3] = 0x00; dbbus_tx_data[4] = 0x04; write_i2c_seq(ts_data, ts_data->client->addr, &dbbus_tx_data[0], 5); read_i2c_seq(ts_data, ts_data->client->addr, &dbbus_rx_data[0], 4); if ((dbbus_rx_data[0] >= 0x30 && dbbus_rx_data[0] <= 0x39) && (dbbus_rx_data[1] >= 0x30 && dbbus_rx_data[1] <= 0x39) && (dbbus_rx_data[2] >= 0x31 && dbbus_rx_data[2] <= 0x39)) { info_sw_id = (dbbus_rx_data[0] - 0x30) * 100 + (dbbus_rx_data[1] - 0x30) * 10 + (dbbus_rx_data[2] - 0x30); } dev_dbg(&ts_data->client->dev, "[21xxA]:main_sw_id = %d, info_sw_id = %d, bin_conf_crc32 = 0x%x\n", main_sw_id, info_sw_id, bin_conf_crc32); } static ssize_t firmware_update_show(struct device *dev, struct device_attribute *attr, char *buf) { struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev); return snprintf(buf, 3, "%d\n", ts_data->pdata->updating_fw); } static ssize_t firmware_update_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev); ts_data->pdata->updating_fw = true; disable_irq(ts_data->client->irq); size = firmware_update_c33(dev, attr, buf, size, EMEM_MAIN, false); enable_irq(ts_data->client->irq); ts_data->pdata->updating_fw = false; return size; } static DEVICE_ATTR(update, (S_IRUGO | S_IWUSR), firmware_update_show, firmware_update_store); static int prepare_fw_data(struct device *dev) { int count; int i; int ret; const struct firmware *fw = NULL; struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev); ret = request_firmware(&fw, ts_data->pdata->fw_name, dev); if (ret < 0) { dev_err(dev, "Request firmware failed - %s (%d)\n", ts_data->pdata->fw_name, ret); return ret; } count = fw->size / 1024; for (i = 0; i < count; i++) memcpy(fw_bin_data[i], fw->data + (i * 1024), 1024); fw_file_major = MSG_FW_FILE_MAJOR_VERSION(fw); fw_file_minor = MSG_FW_FILE_MINOR_VERSION(fw); dev_dbg(dev, "New firmware: %d.%d", fw_file_major, fw_file_minor); return fw->size; } static ssize_t firmware_update_smart_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { int ret; struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev); ret = prepare_fw_data(dev); if (ret < 0) { dev_err(dev, "Request firmware failed -(%d)\n", ret); return ret; } ts_data->pdata->updating_fw = true; disable_irq(ts_data->client->irq); ret = firmware_update_c33(dev, attr, buf, size, EMEM_MAIN, false); if (ret == 0) dev_err(dev, "firmware_update_c33 ret = %d\n", ret); enable_irq(ts_data->client->irq); ts_data->pdata->updating_fw = false; return ret; } static ssize_t firmware_force_update_smart_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { int ret; struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev); ret = prepare_fw_data(dev); if (ret < 0) { dev_err(dev, "Request firmware failed -(%d)\n", ret); return ret; } ts_data->pdata->updating_fw = true; disable_irq(ts_data->client->irq); ret = firmware_update_c33(dev, attr, buf, size, EMEM_MAIN, true); if (ret == 0) dev_err(dev, "firmware_update_c33 et = %d\n", ret); enable_irq(ts_data->client->irq); ts_data->pdata->updating_fw = false; return ret; } static DEVICE_ATTR(update_fw, (S_IRUGO | S_IWUSR), firmware_update_show, firmware_update_smart_store); static DEVICE_ATTR(force_update_fw, (S_IRUGO | S_IWUSR), firmware_update_show, firmware_force_update_smart_store); static ssize_t firmware_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev); msg21xx_read_firmware_id(ts_data); return snprintf(buf, sizeof(char) * 8, "%03d%03d\n", ts_data->pdata->fw_version_major, ts_data->pdata->fw_version_minor); } static DEVICE_ATTR(version, S_IRUGO, firmware_version_show, NULL); static ssize_t msg21xx_fw_name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev); return snprintf(buf, MSTAR_FW_NAME_MAX_LEN - 1, "%s\n", ts_data->pdata->fw_name); } static ssize_t msg21xx_fw_name_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev); if (size > MSTAR_FW_NAME_MAX_LEN - 1) return -EINVAL; strlcpy(ts_data->pdata->fw_name, buf, size); if (ts_data->pdata->fw_name[size - 1] == '\n') ts_data->pdata->fw_name[size - 1] = 0; return size; } static DEVICE_ATTR(fw_name, (S_IRUGO | S_IWUSR), msg21xx_fw_name_show, msg21xx_fw_name_store); static ssize_t firmware_data_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { int count = size / 1024; int i; for (i = 0; i < count; i++) memcpy(fw_bin_data[i], buf + (i * 1024), 1024); if (buf != NULL) dev_dbg(dev, "buf[0] = %c\n", buf[0]); return size; } static DEVICE_ATTR(data, S_IWUSR, NULL, firmware_data_store); static ssize_t tp_print_show(struct device *dev, struct device_attribute *attr, char *buf) { struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev); tp_print_proc_read(ts_data); return snprintf(buf, 3, "%d\n", ts_data->suspended); } static ssize_t tp_print_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { return size; } static DEVICE_ATTR(tpp, (S_IRUGO | S_IWUSR), tp_print_show, tp_print_store); #ifdef CONFIG_TOUCHSCREEN_PROXIMITY_SENSOR static void _msg_enable_proximity(void) { unsigned char tx_data[4] = {0}; tx_data[0] = 0x52; tx_data[1] = 0x00; tx_data[2] = 0x47; tx_data[3] = 0xa0; mutex_lock(&msg21xx_mutex); write_i2c_seq(ts_data->client->addr, &tx_data[0], 4); mutex_unlock(&msg21xx_mutex); bEnableTpProximity = 1; } static void _msg_disable_proximity(void) { unsigned char tx_data[4] = {0}; tx_data[0] = 0x52; tx_data[1] = 0x00; tx_data[2] = 0x47; tx_data[3] = 0xa1; mutex_lock(&msg21xx_mutex); write_i2c_seq(ts_data->client->addr, &tx_data[0], 4); mutex_unlock(&msg21xx_mutex); bEnableTpProximity = 0; bFaceClosingTp = 0; } static void tsps_msg21xx_enable(int en) { if (en) _msg_enable_proximity(); else _msg_disable_proximity(); } static int tsps_msg21xx_data(void) { return bFaceClosingTp; } #endif static int msg21xx_pinctrl_init(struct msg21xx_ts_data *ts_data) { int retval; /* Get pinctrl if target uses pinctrl */ ts_data->ts_pinctrl = devm_pinctrl_get(&(ts_data->client->dev)); if (IS_ERR_OR_NULL(ts_data->ts_pinctrl)) { retval = PTR_ERR(ts_data->ts_pinctrl); dev_dbg(&ts_data->client->dev, "Target does not use pinctrl %d\n", retval); goto err_pinctrl_get; } ts_data->pinctrl_state_active = pinctrl_lookup_state( ts_data->ts_pinctrl, PINCTRL_STATE_ACTIVE); if (IS_ERR_OR_NULL(ts_data->pinctrl_state_active)) { retval = PTR_ERR(ts_data->pinctrl_state_active); dev_dbg(&ts_data->client->dev, "Can't lookup %s pinstate %d\n", PINCTRL_STATE_ACTIVE, retval); goto err_pinctrl_lookup; } ts_data->pinctrl_state_suspend = pinctrl_lookup_state( ts_data->ts_pinctrl, PINCTRL_STATE_SUSPEND); if (IS_ERR_OR_NULL(ts_data->pinctrl_state_suspend)) { retval = PTR_ERR(ts_data->pinctrl_state_suspend); dev_dbg(&ts_data->client->dev, "Can't lookup %s pinstate %d\n", PINCTRL_STATE_SUSPEND, retval); goto err_pinctrl_lookup; } ts_data->pinctrl_state_release = pinctrl_lookup_state( ts_data->ts_pinctrl, PINCTRL_STATE_RELEASE); if (IS_ERR_OR_NULL(ts_data->pinctrl_state_release)) { retval = PTR_ERR(ts_data->pinctrl_state_release); dev_dbg(&ts_data->client->dev, "Can't lookup %s pinstate %d\n", PINCTRL_STATE_RELEASE, retval); } return 0; err_pinctrl_lookup: devm_pinctrl_put(ts_data->ts_pinctrl); err_pinctrl_get: ts_data->ts_pinctrl = NULL; return retval; } static unsigned char calculate_checksum(unsigned char *msg, int length) { int checksum = 0, i; for (i = 0; i < length; i++) checksum += msg[i]; return (unsigned char)((-checksum) & 0xFF); } static int parse_info(struct msg21xx_ts_data *ts_data) { unsigned char data[DEMO_MODE_PACKET_LENGTH] = {0}; unsigned char checksum = 0; unsigned int x = 0, y = 0; unsigned int x2 = 0, y2 = 0; unsigned int delta_x = 0, delta_y = 0; mutex_lock(&msg21xx_mutex); read_i2c_seq(ts_data, ts_data->client->addr, &data[0], DEMO_MODE_PACKET_LENGTH); mutex_unlock(&msg21xx_mutex); checksum = calculate_checksum(&data[0], (DEMO_MODE_PACKET_LENGTH-1)); dev_dbg(&ts_data->client->dev, "check sum: [%x] == [%x]?\n", data[DEMO_MODE_PACKET_LENGTH-1], checksum); if (data[DEMO_MODE_PACKET_LENGTH-1] != checksum) { dev_err(&ts_data->client->dev, "WRONG CHECKSUM\n"); return -EINVAL; } if (data[0] != 0x52) { dev_err(&ts_data->client->dev, "WRONG HEADER\n"); return -EINVAL; } ts_data->info.keycode = 0xFF; if ((data[1] == 0xFF) && (data[2] == 0xFF) && (data[3] == 0xFF) && (data[4] == 0xFF) && (data[6] == 0xFF)) { if ((data[5] == 0xFF) || (data[5] == 0)) { ts_data->info.keycode = 0xFF; } else if ((data[5] == 1) || (data[5] == 2) || (data[5] == 4) || (data[5] == 8)) { ts_data->info.keycode = data[5] >> 1; dev_dbg(&ts_data->client->dev, "ts_data->info.keycode index %d\n", ts_data->info.keycode); } #ifdef CONFIG_TOUCHSCREEN_PROXIMITY_SENSOR else if (bEnableTpProximity && ((data[5] == 0x80) || (data[5] == 0x40))) { if (data[5] == 0x80) bFaceClosingTp = 1; else if (data[5] == 0x40) bFaceClosingTp = 0; return -EINVAL; } #endif else { dev_err(&ts_data->client->dev, "WRONG KEY\n"); return -EINVAL; } } else { x = (((data[1] & 0xF0) << 4) | data[2]); y = (((data[1] & 0x0F) << 8) | data[3]); delta_x = (((data[4] & 0xF0) << 4) | data[5]); delta_y = (((data[4] & 0x0F) << 8) | data[6]); if ((delta_x == 0) && (delta_y == 0)) { ts_data->info.point[0].x = x * ts_data->pdata->x_max / TPD_WIDTH; ts_data->info.point[0].y = y * ts_data->pdata->y_max / TPD_HEIGHT; ts_data->info.count = 1; } else { if (delta_x > 2048) delta_x -= 4096; if (delta_y > 2048) delta_y -= 4096; x2 = (unsigned int)((signed short)x + (signed short)delta_x); y2 = (unsigned int)((signed short)y + (signed short)delta_y); ts_data->info.point[0].x = x * ts_data->pdata->x_max / TPD_WIDTH; ts_data->info.point[0].y = y * ts_data->pdata->y_max / TPD_HEIGHT; ts_data->info.point[1].x = x2 * ts_data->pdata->x_max / TPD_WIDTH; ts_data->info.point[1].y = y2 * ts_data->pdata->y_max / TPD_HEIGHT; ts_data->info.count = ts_data->pdata->num_max_touches; } } return 0; } static void touch_driver_touch_released(struct msg21xx_ts_data *ts_data) { int i; for (i = 0; i < ts_data->pdata->num_max_touches; i++) { input_mt_slot(ts_data->input_dev, i); input_mt_report_slot_state(ts_data->input_dev, MT_TOOL_FINGER, 0); } input_report_key(ts_data->input_dev, BTN_TOUCH, 0); input_report_key(ts_data->input_dev, BTN_TOOL_FINGER, 0); input_sync(ts_data->input_dev); } /* read data through I2C then report data to input sub-system when interrupt occurred */ static irqreturn_t msg21xx_ts_interrupt(int irq, void *dev_id) { int i = 0; static int last_keycode = 0xFF; static int last_count; struct msg21xx_ts_data *ts_data = dev_id; ts_data->info.count = 0; if (0 == parse_info(ts_data)) { if (ts_data->info.keycode != 0xFF) { /* key touch pressed */ if (ts_data->info.keycode < ts_data->pdata->num_buttons) { if (ts_data->info.keycode != last_keycode) { dev_dbg(&ts_data->client->dev, "key touch pressed"); input_report_key(ts_data->input_dev, BTN_TOUCH, 1); input_report_key(ts_data->input_dev, ts_data->pdata->button_map[ ts_data->info.keycode], 1); last_keycode = ts_data->info.keycode; } else { /* pass duplicate key-pressing */ dev_dbg(&ts_data->client->dev, "REPEATED KEY\n"); } } else { dev_dbg(&ts_data->client->dev, "WRONG KEY\n"); } } else { /* key touch released */ if (last_keycode != 0xFF) { dev_dbg(&ts_data->client->dev, "key touch released"); input_report_key(ts_data->input_dev, BTN_TOUCH, 0); input_report_key(ts_data->input_dev, ts_data->pdata->button_map[last_keycode], 0); last_keycode = 0xFF; } } if (ts_data->info.count > 0) { /* point touch pressed */ for (i = 0; i < ts_data->info.count; i++) { input_mt_slot(ts_data->input_dev, i); input_mt_report_slot_state(ts_data->input_dev, MT_TOOL_FINGER, 1); input_report_abs(ts_data->input_dev, ABS_MT_TOUCH_MAJOR, 1); input_report_abs(ts_data->input_dev, ABS_MT_POSITION_X, ts_data->info.point[i].x); input_report_abs(ts_data->input_dev, ABS_MT_POSITION_Y, ts_data->info.point[i].y); } } if (last_count > ts_data->info.count) { for (i = ts_data->info.count; i < ts_data->pdata->num_max_touches; i++) { input_mt_slot(ts_data->input_dev, i); input_mt_report_slot_state(ts_data->input_dev, MT_TOOL_FINGER, 0); } } last_count = ts_data->info.count; input_report_key(ts_data->input_dev, BTN_TOUCH, ts_data->info.count > 0); input_report_key(ts_data->input_dev, BTN_TOOL_FINGER, ts_data->info.count > 0); input_sync(ts_data->input_dev); } return IRQ_HANDLED; } static int msg21xx_ts_power_init(struct msg21xx_ts_data *ts_data, bool init) { int rc; if (init) { ts_data->vdd = regulator_get(&ts_data->client->dev, "vdd"); if (IS_ERR(ts_data->vdd)) { rc = PTR_ERR(ts_data->vdd); dev_err(&ts_data->client->dev, "Regulator get failed vdd rc=%d\n", rc); return rc; } if (regulator_count_voltages(ts_data->vdd) > 0) { rc = regulator_set_voltage(ts_data->vdd, MSTAR_VTG_MIN_UV, MSTAR_VTG_MAX_UV); if (rc) { dev_err(&ts_data->client->dev, "Regulator set_vtg failed vdd rc=%d\n", rc); goto reg_vdd_put; } } ts_data->vcc_i2c = regulator_get(&ts_data->client->dev, "vcc_i2c"); if (IS_ERR(ts_data->vcc_i2c)) { rc = PTR_ERR(ts_data->vcc_i2c); dev_err(&ts_data->client->dev, "Regulator get failed vcc_i2c rc=%d\n", rc); goto reg_vdd_set_vtg; } if (regulator_count_voltages(ts_data->vcc_i2c) > 0) { rc = regulator_set_voltage(ts_data->vcc_i2c, MSTAR_I2C_VTG_MIN_UV, MSTAR_I2C_VTG_MAX_UV); if (rc) { dev_err(&ts_data->client->dev, "Regulator set_vtg failed vcc_i2c rc=%d\n", rc); goto reg_vcc_i2c_put; } } } else { if (regulator_count_voltages(ts_data->vdd) > 0) regulator_set_voltage(ts_data->vdd, 0, MSTAR_VTG_MAX_UV); regulator_put(ts_data->vdd); if (regulator_count_voltages(ts_data->vcc_i2c) > 0) regulator_set_voltage(ts_data->vcc_i2c, 0, MSTAR_I2C_VTG_MAX_UV); regulator_put(ts_data->vcc_i2c); } return 0; reg_vcc_i2c_put: regulator_put(ts_data->vcc_i2c); reg_vdd_set_vtg: if (regulator_count_voltages(ts_data->vdd) > 0) regulator_set_voltage(ts_data->vdd, 0, MSTAR_VTG_MAX_UV); reg_vdd_put: regulator_put(ts_data->vdd); return rc; } static int msg21xx_ts_power_on(struct msg21xx_ts_data *ts_data, bool on) { int rc; if (!on) goto power_off; rc = regulator_enable(ts_data->vdd); if (rc) { dev_err(&ts_data->client->dev, "Regulator vdd enable failed rc=%d\n", rc); return rc; } rc = regulator_enable(ts_data->vcc_i2c); if (rc) { dev_err(&ts_data->client->dev, "Regulator vcc_i2c enable failed rc=%d\n", rc); regulator_disable(ts_data->vdd); } return rc; power_off: rc = regulator_disable(ts_data->vdd); if (rc) { dev_err(&ts_data->client->dev, "Regulator vdd disable failed rc=%d\n", rc); return rc; } rc = regulator_disable(ts_data->vcc_i2c); if (rc) { dev_err(&ts_data->client->dev, "Regulator vcc_i2c disable failed rc=%d\n", rc); rc = regulator_enable(ts_data->vdd); } return rc; } static int msg21xx_ts_gpio_configure(struct msg21xx_ts_data *ts_data, bool on) { int ret = 0; if (!on) goto pwr_deinit; if (gpio_is_valid(ts_data->pdata->irq_gpio)) { ret = gpio_request(ts_data->pdata->irq_gpio, "msg21xx_irq_gpio"); if (ret) { dev_err(&ts_data->client->dev, "Failed to request GPIO[%d], %d\n", ts_data->pdata->irq_gpio, ret); goto err_irq_gpio_req; } ret = gpio_direction_input(ts_data->pdata->irq_gpio); if (ret) { dev_err(&ts_data->client->dev, "Failed to set direction for gpio[%d], %d\n", ts_data->pdata->irq_gpio, ret); goto err_irq_gpio_dir; } gpio_set_value_cansleep(ts_data->pdata->irq_gpio, 1); } else { dev_err(&ts_data->client->dev, "irq gpio not provided\n"); goto err_irq_gpio_req; } if (gpio_is_valid(ts_data->pdata->reset_gpio)) { ret = gpio_request(ts_data->pdata->reset_gpio, "msg21xx_reset_gpio"); if (ret) { dev_err(&ts_data->client->dev, "Failed to request GPIO[%d], %d\n", ts_data->pdata->reset_gpio, ret); goto err_reset_gpio_req; } /* power on TP */ ret = gpio_direction_output( ts_data->pdata->reset_gpio, 1); if (ret) { dev_err(&ts_data->client->dev, "Failed to set direction for GPIO[%d], %d\n", ts_data->pdata->reset_gpio, ret); goto err_reset_gpio_dir; } msleep(100); gpio_set_value_cansleep(ts_data->pdata->reset_gpio, 0); msleep(20); gpio_set_value_cansleep(ts_data->pdata->reset_gpio, 1); msleep(200); } else { dev_err(&ts_data->client->dev, "reset gpio not provided\n"); goto err_reset_gpio_req; } return 0; err_reset_gpio_dir: if (gpio_is_valid(ts_data->pdata->reset_gpio)) gpio_free(ts_data->pdata->irq_gpio); err_reset_gpio_req: err_irq_gpio_dir: if (gpio_is_valid(ts_data->pdata->irq_gpio)) gpio_free(ts_data->pdata->irq_gpio); err_irq_gpio_req: return ret; pwr_deinit: if (gpio_is_valid(ts_data->pdata->irq_gpio)) gpio_free(ts_data->pdata->irq_gpio); if (gpio_is_valid(ts_data->pdata->reset_gpio)) { gpio_set_value_cansleep(ts_data->pdata->reset_gpio, 0); ret = gpio_direction_input(ts_data->pdata->reset_gpio); if (ret) dev_err(&ts_data->client->dev, "Unable to set direction for gpio [%d]\n", ts_data->pdata->reset_gpio); gpio_free(ts_data->pdata->reset_gpio); } return 0; } #ifdef CONFIG_PM static int msg21xx_ts_resume(struct device *dev) { int retval; struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev); if (!ts_data->suspended) { dev_info(dev, "msg21xx_ts already in resume\n"); return 0; } mutex_lock(&ts_data->ts_mutex); retval = msg21xx_ts_power_on(ts_data, true); if (retval) { dev_err(dev, "msg21xx_ts power on failed"); mutex_unlock(&ts_data->ts_mutex); return retval; } if (ts_data->ts_pinctrl) { retval = pinctrl_select_state(ts_data->ts_pinctrl, ts_data->pinctrl_state_active); if (retval < 0) { dev_err(dev, "Cannot get active pinctrl state\n"); mutex_unlock(&ts_data->ts_mutex); return retval; } } retval = msg21xx_ts_gpio_configure(ts_data, true); if (retval) { dev_err(dev, "Failed to put gpios in active state %d", retval); mutex_unlock(&ts_data->ts_mutex); return retval; } enable_irq(ts_data->client->irq); ts_data->suspended = false; mutex_unlock(&ts_data->ts_mutex); return 0; } static int msg21xx_ts_suspend(struct device *dev) { int retval; struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev); if (ts_data->pdata->updating_fw) { dev_info(dev, "Firmware loading in progress\n"); return 0; } if (ts_data->suspended) { dev_info(dev, "msg21xx_ts already in suspend\n"); return 0; } #ifdef CONFIG_TOUCHSCREEN_PROXIMITY_SENSOR if (bEnableTpProximity) { dev_dbg(dev, "suspend bEnableTpProximity=%d\n", bEnableTpProximity); return 0; } #endif mutex_lock(&ts_data->ts_mutex); disable_irq(ts_data->client->irq); touch_driver_touch_released(ts_data); if (ts_data->ts_pinctrl) { retval = pinctrl_select_state(ts_data->ts_pinctrl, ts_data->pinctrl_state_suspend); if (retval < 0) { dev_err(dev, "Cannot get idle pinctrl state %d\n", retval); mutex_unlock(&ts_data->ts_mutex); return retval; } } retval = msg21xx_ts_gpio_configure(ts_data, false); if (retval) { dev_err(dev, "Failed to put gpios in idle state %d", retval); mutex_unlock(&ts_data->ts_mutex); return retval; } retval = msg21xx_ts_power_on(ts_data, false); if (retval) { dev_err(dev, "msg21xx_ts power off failed"); mutex_unlock(&ts_data->ts_mutex); return retval; } ts_data->suspended = true; mutex_unlock(&ts_data->ts_mutex); return 0; } #else static int msg21xx_ts_resume(struct device *dev) { return 0; } static int msg21xx_ts_suspend(struct device *dev) { return 0; } #endif static int msg21xx_debug_suspend_set(void *_data, u64 val) { struct msg21xx_ts_data *data = _data; mutex_lock(&data->input_dev->mutex); if (val) msg21xx_ts_suspend(&data->client->dev); else msg21xx_ts_resume(&data->client->dev); mutex_unlock(&data->input_dev->mutex); return 0; } static int msg21xx_debug_suspend_get(void *_data, u64 *val) { struct msg21xx_ts_data *data = _data; mutex_lock(&data->input_dev->mutex); *val = data->suspended; mutex_unlock(&data->input_dev->mutex); return 0; } DEFINE_SIMPLE_ATTRIBUTE(debug_suspend_fops, msg21xx_debug_suspend_get, msg21xx_debug_suspend_set, "%lld\n"); #if defined(CONFIG_FB) static int fb_notifier_callback(struct notifier_block *self, unsigned long event, void *data) { struct fb_event *evdata = data; int *blank; struct msg21xx_ts_data *ts_data = container_of(self, struct msg21xx_ts_data, fb_notif); if (evdata && evdata->data && event == FB_EVENT_BLANK) { blank = evdata->data; if (*blank == FB_BLANK_UNBLANK) msg21xx_ts_resume(&ts_data->client->dev); else if (*blank == FB_BLANK_POWERDOWN) msg21xx_ts_suspend(&ts_data->client->dev); } return 0; } #endif static int msg21xx_get_dt_coords(struct device *dev, char *name, struct msg21xx_ts_platform_data *pdata) { u32 coords[FT_COORDS_ARR_SIZE]; struct property *prop; struct device_node *np = dev->of_node; int coords_size, rc; prop = of_find_property(np, name, NULL); if (!prop) return -EINVAL; if (!prop->value) return -ENODATA; coords_size = prop->length / sizeof(u32); if (coords_size != FT_COORDS_ARR_SIZE) { dev_err(dev, "invalid %s\n", name); return -EINVAL; } rc = of_property_read_u32_array(np, name, coords, coords_size); if (rc && (rc != -EINVAL)) { dev_err(dev, "Unable to read %s\n", name); return rc; } if (!strcmp(name, "mstar,panel-coords")) { pdata->panel_minx = coords[0]; pdata->panel_miny = coords[1]; pdata->panel_maxx = coords[2]; pdata->panel_maxy = coords[3]; } else if (!strcmp(name, "mstar,display-coords")) { pdata->x_min = coords[0]; pdata->y_min = coords[1]; pdata->x_max = coords[2]; pdata->y_max = coords[3]; } else { dev_err(dev, "unsupported property %s\n", name); return -EINVAL; } return 0; } static int msg21xx_parse_dt(struct device *dev, struct msg21xx_ts_platform_data *pdata) { int rc; struct device_node *np = dev->of_node; struct property *prop; u32 temp_val; rc = msg21xx_get_dt_coords(dev, "mstar,panel-coords", pdata); if (rc && (rc != -EINVAL)) return rc; rc = msg21xx_get_dt_coords(dev, "mstar,display-coords", pdata); if (rc) return rc; rc = of_property_read_u32(np, "mstar,hard-reset-delay-ms", &temp_val); if (!rc) pdata->hard_reset_delay_ms = temp_val; else return rc; rc = of_property_read_u32(np, "mstar,post-hard-reset-delay-ms", &temp_val); if (!rc) pdata->post_hard_reset_delay_ms = temp_val; else return rc; /* reset, irq gpio info */ pdata->reset_gpio = of_get_named_gpio_flags(np, "mstar,reset-gpio", 0, &pdata->reset_gpio_flags); if (pdata->reset_gpio < 0) return pdata->reset_gpio; pdata->irq_gpio = of_get_named_gpio_flags(np, "mstar,irq-gpio", 0, &pdata->irq_gpio_flags); if (pdata->irq_gpio < 0) return pdata->irq_gpio; rc = of_property_read_u32(np, "mstar,ic-type", &temp_val); if (rc && (rc != -EINVAL)) return rc; pdata->ic_type = temp_val; rc = of_property_read_u32(np, "mstar,num-max-touches", &temp_val); if (!rc) pdata->num_max_touches = temp_val; else return rc; prop = of_find_property(np, "mstar,button-map", NULL); if (prop) { pdata->num_buttons = prop->length / sizeof(temp_val); if (pdata->num_buttons > MAX_BUTTONS) return -EINVAL; rc = of_property_read_u32_array(np, "mstar,button-map", pdata->button_map, pdata->num_buttons); if (rc) { dev_err(dev, "Unable to read key codes\n"); return rc; } } return 0; } /* probe function is used for matching and initializing input device */ static int msg21xx_ts_probe(struct i2c_client *client, const struct i2c_device_id *id) { int ret = 0, i; struct dentry *temp, *dir; struct input_dev *input_dev; struct msg21xx_ts_data *ts_data; struct msg21xx_ts_platform_data *pdata; if (client->dev.of_node) { pdata = devm_kzalloc(&client->dev, sizeof(struct msg21xx_ts_platform_data), GFP_KERNEL); if (!pdata) { dev_err(&client->dev, "Failed to allocate memory\n"); return -ENOMEM; } ret = msg21xx_parse_dt(&client->dev, pdata); if (ret) { dev_err(&client->dev, "DT parsing failed\n"); return ret; } } else pdata = client->dev.platform_data; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { dev_err(&client->dev, "I2C not supported\n"); return -ENODEV; } ts_data = devm_kzalloc(&client->dev, sizeof(struct msg21xx_ts_data), GFP_KERNEL); if (!ts_data) { dev_err(&client->dev, "Not enough memory\n"); return -ENOMEM; } ts_data->client = client; ts_data->info.point = devm_kzalloc(&client->dev, sizeof(struct touchPoint_t) * pdata->num_max_touches, GFP_KERNEL); if (!ts_data->info.point) { dev_err(&client->dev, "Not enough memory\n"); return -ENOMEM; } /* allocate an input device */ input_dev = input_allocate_device(); if (!input_dev) { ret = -ENOMEM; dev_err(&client->dev, "input device allocation failed\n"); goto err_input_allocate_dev; } input_dev->name = client->name; input_dev->phys = "I2C"; input_dev->dev.parent = &client->dev; input_dev->id.bustype = BUS_I2C; ts_data->input_dev = input_dev; ts_data->client = client; ts_data->pdata = pdata; input_set_drvdata(input_dev, ts_data); i2c_set_clientdata(client, ts_data); ret = msg21xx_ts_power_init(ts_data, true); if (ret) { dev_err(&client->dev, "Mstar power init failed\n"); return ret; } ret = msg21xx_ts_power_on(ts_data, true); if (ret) { dev_err(&client->dev, "Mstar power on failed\n"); goto exit_deinit_power; } ret = msg21xx_pinctrl_init(ts_data); if (!ret && ts_data->ts_pinctrl) { /* * Pinctrl handle is optional. If pinctrl handle is found * let pins to be configured in active state. If not * found continue further without error. */ ret = pinctrl_select_state(ts_data->ts_pinctrl, ts_data->pinctrl_state_active); if (ret < 0) dev_err(&client->dev, "Failed to select %s pinatate %d\n", PINCTRL_STATE_ACTIVE, ret); } ret = msg21xx_ts_gpio_configure(ts_data, true); if (ret) { dev_err(&client->dev, "Failed to configure gpio %d\n", ret); goto exit_gpio_config; } if (msg21xx_get_ic_type(ts_data) == 0) { dev_err(&client->dev, "The current IC is not Mstar\n"); ret = -1; goto err_wrong_ic_type; } mutex_init(&msg21xx_mutex); mutex_init(&ts_data->ts_mutex); /* set the supported event type for input device */ set_bit(EV_ABS, input_dev->evbit); set_bit(EV_SYN, input_dev->evbit); set_bit(EV_KEY, input_dev->evbit); set_bit(BTN_TOUCH, input_dev->keybit); set_bit(BTN_TOOL_FINGER, input_dev->keybit); set_bit(INPUT_PROP_DIRECT, input_dev->propbit); for (i = 0; i < pdata->num_buttons; i++) input_set_capability(input_dev, EV_KEY, pdata->button_map[i]); input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, 2, 0, 0); input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0, pdata->x_max, 0, 0); input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0, pdata->y_max, 0, 0); ret = input_mt_init_slots(input_dev, pdata->num_max_touches, 0); if (ret) { dev_err(&client->dev, "Error %d initialising slots\n", ret); goto err_free_mem; } /* register the input device to input sub-system */ ret = input_register_device(input_dev); if (ret < 0) { dev_err(&client->dev, "Unable to register ms-touchscreen input device\n"); goto err_input_reg_dev; } /* version */ if (device_create_file(&client->dev, &dev_attr_version) < 0) { dev_err(&client->dev, "Failed to create device file(%s)!\n", dev_attr_version.attr.name); goto err_create_fw_ver_file; } /* update */ if (device_create_file(&client->dev, &dev_attr_update) < 0) { dev_err(&client->dev, "Failed to create device file(%s)!\n", dev_attr_update.attr.name); goto err_create_fw_update_file; } /* data */ if (device_create_file(&client->dev, &dev_attr_data) < 0) { dev_err(&client->dev, "Failed to create device file(%s)!\n", dev_attr_data.attr.name); goto err_create_fw_data_file; } /* fw name */ if (device_create_file(&client->dev, &dev_attr_fw_name) < 0) { dev_err(&client->dev, "Failed to create device file(%s)!\n", dev_attr_fw_name.attr.name); goto err_create_fw_name_file; } /* smart fw update */ if (device_create_file(&client->dev, &dev_attr_update_fw) < 0) { dev_err(&client->dev, "Failed to create device file(%s)!\n", dev_attr_update_fw.attr.name); goto err_create_update_fw_file; } /* smart fw force update */ if (device_create_file(&client->dev, &dev_attr_force_update_fw) < 0) { dev_err(&client->dev, "Failed to create device file(%s)!\n", dev_attr_force_update_fw.attr.name); goto err_create_force_update_fw_file; } dir = debugfs_create_dir(MSTAR_DEBUG_DIR_NAME, NULL); temp = debugfs_create_file("suspend", S_IRUSR | S_IWUSR, dir, ts_data, &debug_suspend_fops); if (temp == NULL || IS_ERR(temp)) { dev_err(&client->dev, "debugfs_create_file failed: rc=%ld\n", PTR_ERR(temp)); goto free_debug_dir; } #ifdef TP_PRINT tp_print_create_entry(ts_data); #endif ret = request_threaded_irq(client->irq, NULL, msg21xx_ts_interrupt, pdata->irq_gpio_flags | IRQF_ONESHOT, "msg21xx", ts_data); if (ret) goto err_req_irq; disable_irq(client->irq); #if defined(CONFIG_FB) ts_data->fb_notif.notifier_call = fb_notifier_callback; ret = fb_register_client(&ts_data->fb_notif); #endif #ifdef CONFIG_TOUCHSCREEN_PROXIMITY_SENSOR tsps_assist_register_callback("msg21xx", &tsps_msg21xx_enable, &tsps_msg21xx_data); #endif dev_dbg(&client->dev, "mstar touch screen registered\n"); enable_irq(client->irq); return 0; err_req_irq: free_irq(client->irq, ts_data); device_remove_file(&client->dev, &dev_attr_data); free_debug_dir: debugfs_remove_recursive(dir); err_create_fw_data_file: device_remove_file(&client->dev, &dev_attr_update); err_create_fw_update_file: device_remove_file(&client->dev, &dev_attr_version); err_create_fw_name_file: device_remove_file(&client->dev, &dev_attr_fw_name); err_create_update_fw_file: device_remove_file(&client->dev, &dev_attr_update_fw); err_create_force_update_fw_file: device_remove_file(&client->dev, &dev_attr_force_update_fw); err_create_fw_ver_file: input_unregister_device(input_dev); err_input_reg_dev: input_free_device(input_dev); input_dev = NULL; err_input_allocate_dev: mutex_destroy(&msg21xx_mutex); mutex_destroy(&ts_data->ts_mutex); err_wrong_ic_type: msg21xx_ts_gpio_configure(ts_data, false); exit_gpio_config: if (ts_data->ts_pinctrl) { if (IS_ERR_OR_NULL(ts_data->pinctrl_state_release)) { devm_pinctrl_put(ts_data->ts_pinctrl); ts_data->ts_pinctrl = NULL; } else { ret = pinctrl_select_state(ts_data->ts_pinctrl, ts_data->pinctrl_state_release); if (ret < 0) dev_err(&ts_data->client->dev, "Cannot get release pinctrl state\n"); } } msg21xx_ts_power_on(ts_data, false); exit_deinit_power: msg21xx_ts_power_init(ts_data, false); err_free_mem: input_free_device(input_dev); return ret; } /* remove function is triggered when the input device is removed from input sub-system */ static int touch_driver_remove(struct i2c_client *client) { int retval = 0; struct msg21xx_ts_data *ts_data = i2c_get_clientdata(client); free_irq(ts_data->client->irq, ts_data); gpio_free(ts_data->pdata->irq_gpio); gpio_free(ts_data->pdata->reset_gpio); if (ts_data->ts_pinctrl) { if (IS_ERR_OR_NULL(ts_data->pinctrl_state_release)) { devm_pinctrl_put(ts_data->ts_pinctrl); ts_data->ts_pinctrl = NULL; } else { retval = pinctrl_select_state(ts_data->ts_pinctrl, ts_data->pinctrl_state_release); if (retval < 0) dev_err(&ts_data->client->dev, "Cannot get release pinctrl state\n"); } } input_unregister_device(ts_data->input_dev); mutex_destroy(&msg21xx_mutex); mutex_destroy(&ts_data->ts_mutex); return retval; } /* The I2C device list is used for matching I2C device and I2C device driver. */ static const struct i2c_device_id touch_device_id[] = { {"msg21xx", 0}, {}, /* should not omitted */ }; static struct of_device_id msg21xx_match_table[] = { { .compatible = "mstar,msg21xx", }, { }, }; MODULE_DEVICE_TABLE(i2c, touch_device_id); static struct i2c_driver touch_device_driver = { .driver = { .name = "ms-msg21xx", .owner = THIS_MODULE, .of_match_table = msg21xx_match_table, }, .probe = msg21xx_ts_probe, .remove = touch_driver_remove, .id_table = touch_device_id, }; module_i2c_driver(touch_device_driver); #ifdef TP_PRINT #include <linux/proc_fs.h> static unsigned short InfoAddr = 0x0F, PoolAddr = 0x10, TransLen = 256; static unsigned char row, units, cnt; static int tp_print_proc_read(struct msg21xx_ts_data *ts_data) { unsigned short i, j; unsigned short left, offset = 0; unsigned char dbbus_tx_data[3] = {0}; unsigned char u8Data; signed short s16Data; int s32Data; char *buf = NULL; left = cnt*row*units; if ((ts_data->suspended == 0) && (InfoAddr != 0x0F) && (PoolAddr != 0x10) && (left > 0)) { buf = kmalloc(left, GFP_KERNEL); if (buf != NULL) { while (left > 0) { dbbus_tx_data[0] = 0x53; dbbus_tx_data[1] = ((PoolAddr + offset) >> 8) & 0xFF; dbbus_tx_data[2] = (PoolAddr + offset) & 0xFF; mutex_lock(&msg21xx_mutex); write_i2c_seq(ts_data, ts_data->client->addr, &dbbus_tx_data[0], 3); read_i2c_seq(ts_data, ts_data->client->addr, &buf[offset], left > TransLen ? TransLen : left); mutex_unlock(&msg21xx_mutex); if (left > TransLen) { left -= TransLen; offset += TransLen; } else { left = 0; } } for (i = 0; i < cnt; i++) { for (j = 0; j < row; j++) { if (units == 1) { u8Data = buf[i * row * units + j * units]; } else if (units == 2) { s16Data = buf[i * row * units + j * units] + (buf[i * row * units + j * units + 1] << 8); } else if (units == 4) { s32Data = buf[i * row * units + j * units] + (buf[i * row * units + j * units + 1] << 8) + (buf[i * row * units + j * units + 2] << 16) + (buf[i * row * units + j * units + 3] << 24); } } } kfree(buf); } } return 0; } static void tp_print_create_entry(struct msg21xx_ts_data *ts_data) { unsigned char dbbus_tx_data[3] = {0}; unsigned char dbbus_rx_data[8] = {0}; dbbus_tx_data[0] = 0x53; dbbus_tx_data[1] = 0x00; dbbus_tx_data[2] = 0x58; mutex_lock(&msg21xx_mutex); write_i2c_seq(ts_data, ts_data->client->addr, &dbbus_tx_data[0], 3); read_i2c_seq(ts_data, ts_data->client->addr, &dbbus_rx_data[0], 4); mutex_unlock(&msg21xx_mutex); InfoAddr = (dbbus_rx_data[1]<<8) + dbbus_rx_data[0]; PoolAddr = (dbbus_rx_data[3]<<8) + dbbus_rx_data[2]; if ((InfoAddr != 0x0F) && (PoolAddr != 0x10)) { msleep(20); dbbus_tx_data[0] = 0x53; dbbus_tx_data[1] = (InfoAddr >> 8) & 0xFF; dbbus_tx_data[2] = InfoAddr & 0xFF; mutex_lock(&msg21xx_mutex); write_i2c_seq(ts_data, ts_data->client->addr, &dbbus_tx_data[0], 3); read_i2c_seq(ts_data, ts_data->client->addr, &dbbus_rx_data[0], 8); mutex_unlock(&msg21xx_mutex); units = dbbus_rx_data[0]; row = dbbus_rx_data[1]; cnt = dbbus_rx_data[2]; TransLen = (dbbus_rx_data[7]<<8) + dbbus_rx_data[6]; if (device_create_file(&ts_data->client->dev, &dev_attr_tpp) < 0) dev_err(&ts_data->client->dev, "Failed to create device file(%s)!\n", dev_attr_tpp.attr.name); } } #endif MODULE_AUTHOR("MStar Semiconductor, Inc."); MODULE_LICENSE("GPL v2");
gpl-2.0
jomeister15/SGH-I727-kernel
scripts/dtc/treesource.c
1508
5941
/* * (C) Copyright David Gibson <dwg@au1.ibm.com>, IBM Corporation. 2005. * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * USA */ #include "dtc.h" #include "srcpos.h" extern FILE *yyin; extern int yyparse(void); struct boot_info *the_boot_info; int treesource_error; struct boot_info *dt_from_source(const char *fname) { the_boot_info = NULL; treesource_error = 0; srcpos_file = dtc_open_file(fname, NULL); yyin = srcpos_file->file; if (yyparse() != 0) die("Unable to parse input tree\n"); if (treesource_error) die("Syntax error parsing input tree\n"); return the_boot_info; } static void write_prefix(FILE *f, int level) { int i; for (i = 0; i < level; i++) fputc('\t', f); } static int isstring(char c) { return (isprint(c) || (c == '\0') || strchr("\a\b\t\n\v\f\r", c)); } static void write_propval_string(FILE *f, struct data val) { const char *str = val.val; int i; int newchunk = 1; struct marker *m = val.markers; assert(str[val.len-1] == '\0'); for (i = 0; i < (val.len-1); i++) { char c = str[i]; if (newchunk) { while (m && (m->offset <= i)) { if (m->type == LABEL) { assert(m->offset == i); fprintf(f, "%s: ", m->ref); } m = m->next; } fprintf(f, "\""); newchunk = 0; } switch (c) { case '\a': fprintf(f, "\\a"); break; case '\b': fprintf(f, "\\b"); break; case '\t': fprintf(f, "\\t"); break; case '\n': fprintf(f, "\\n"); break; case '\v': fprintf(f, "\\v"); break; case '\f': fprintf(f, "\\f"); break; case '\r': fprintf(f, "\\r"); break; case '\\': fprintf(f, "\\\\"); break; case '\"': fprintf(f, "\\\""); break; case '\0': fprintf(f, "\", "); newchunk = 1; break; default: if (isprint(c)) fprintf(f, "%c", c); else fprintf(f, "\\x%02hhx", c); } } fprintf(f, "\""); /* Wrap up any labels at the end of the value */ for_each_marker_of_type(m, LABEL) { assert (m->offset == val.len); fprintf(f, " %s:", m->ref); } } static void write_propval_cells(FILE *f, struct data val) { void *propend = val.val + val.len; cell_t *cp = (cell_t *)val.val; struct marker *m = val.markers; fprintf(f, "<"); for (;;) { while (m && (m->offset <= ((char *)cp - val.val))) { if (m->type == LABEL) { assert(m->offset == ((char *)cp - val.val)); fprintf(f, "%s: ", m->ref); } m = m->next; } fprintf(f, "0x%x", fdt32_to_cpu(*cp++)); if ((void *)cp >= propend) break; fprintf(f, " "); } /* Wrap up any labels at the end of the value */ for_each_marker_of_type(m, LABEL) { assert (m->offset == val.len); fprintf(f, " %s:", m->ref); } fprintf(f, ">"); } static void write_propval_bytes(FILE *f, struct data val) { void *propend = val.val + val.len; const char *bp = val.val; struct marker *m = val.markers; fprintf(f, "["); for (;;) { while (m && (m->offset == (bp-val.val))) { if (m->type == LABEL) fprintf(f, "%s: ", m->ref); m = m->next; } fprintf(f, "%02hhx", *bp++); if ((const void *)bp >= propend) break; fprintf(f, " "); } /* Wrap up any labels at the end of the value */ for_each_marker_of_type(m, LABEL) { assert (m->offset == val.len); fprintf(f, " %s:", m->ref); } fprintf(f, "]"); } static void write_propval(FILE *f, struct property *prop) { int len = prop->val.len; const char *p = prop->val.val; struct marker *m = prop->val.markers; int nnotstring = 0, nnul = 0; int nnotstringlbl = 0, nnotcelllbl = 0; int i; if (len == 0) { fprintf(f, ";\n"); return; } for (i = 0; i < len; i++) { if (! isstring(p[i])) nnotstring++; if (p[i] == '\0') nnul++; } for_each_marker_of_type(m, LABEL) { if ((m->offset > 0) && (prop->val.val[m->offset - 1] != '\0')) nnotstringlbl++; if ((m->offset % sizeof(cell_t)) != 0) nnotcelllbl++; } fprintf(f, " = "); if ((p[len-1] == '\0') && (nnotstring == 0) && (nnul < (len-nnul)) && (nnotstringlbl == 0)) { write_propval_string(f, prop->val); } else if (((len % sizeof(cell_t)) == 0) && (nnotcelllbl == 0)) { write_propval_cells(f, prop->val); } else { write_propval_bytes(f, prop->val); } fprintf(f, ";\n"); } static void write_tree_source_node(FILE *f, struct node *tree, int level) { struct property *prop; struct node *child; write_prefix(f, level); if (tree->label) fprintf(f, "%s: ", tree->label); if (tree->name && (*tree->name)) fprintf(f, "%s {\n", tree->name); else fprintf(f, "/ {\n"); for_each_property(tree, prop) { write_prefix(f, level+1); if (prop->label) fprintf(f, "%s: ", prop->label); fprintf(f, "%s", prop->name); write_propval(f, prop); } for_each_child(tree, child) { fprintf(f, "\n"); write_tree_source_node(f, child, level+1); } write_prefix(f, level); fprintf(f, "};\n"); } void dt_to_source(FILE *f, struct boot_info *bi) { struct reserve_info *re; fprintf(f, "/dts-v1/;\n\n"); for (re = bi->reservelist; re; re = re->next) { if (re->label) fprintf(f, "%s: ", re->label); fprintf(f, "/memreserve/\t0x%016llx 0x%016llx;\n", (unsigned long long)re->re.address, (unsigned long long)re->re.size); } write_tree_source_node(f, bi->dt, 0); }
gpl-2.0
CyanogenMod/android_kernel_samsung_espresso10
arch/arm/plat-samsung/pm.c
1764
8668
/* linux/arch/arm/plat-s3c/pm.c * * Copyright 2008 Openmoko, Inc. * Copyright 2004-2008 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * http://armlinux.simtec.co.uk/ * * S3C common power management (suspend to ram) support. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/suspend.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/serial_core.h> #include <linux/io.h> #include <asm/cacheflush.h> #include <mach/hardware.h> #include <mach/map.h> #include <plat/regs-serial.h> #include <mach/regs-clock.h> #include <mach/regs-irq.h> #include <asm/irq.h> #include <plat/pm.h> #include <mach/pm-core.h> /* for external use */ unsigned long s3c_pm_flags; /* Debug code: * * This code supports debug output to the low level UARTs for use on * resume before the console layer is available. */ #ifdef CONFIG_SAMSUNG_PM_DEBUG extern void printascii(const char *); void s3c_pm_dbg(const char *fmt, ...) { va_list va; char buff[256]; va_start(va, fmt); vsprintf(buff, fmt, va); va_end(va); printascii(buff); } static inline void s3c_pm_debug_init(void) { /* restart uart clocks so we can use them to output */ s3c_pm_debug_init_uart(); } #else #define s3c_pm_debug_init() do { } while(0) #endif /* CONFIG_SAMSUNG_PM_DEBUG */ /* Save the UART configurations if we are configured for debug. */ unsigned char pm_uart_udivslot; #ifdef CONFIG_SAMSUNG_PM_DEBUG struct pm_uart_save uart_save[CONFIG_SERIAL_SAMSUNG_UARTS]; static void s3c_pm_save_uart(unsigned int uart, struct pm_uart_save *save) { void __iomem *regs = S3C_VA_UARTx(uart); save->ulcon = __raw_readl(regs + S3C2410_ULCON); save->ucon = __raw_readl(regs + S3C2410_UCON); save->ufcon = __raw_readl(regs + S3C2410_UFCON); save->umcon = __raw_readl(regs + S3C2410_UMCON); save->ubrdiv = __raw_readl(regs + S3C2410_UBRDIV); if (pm_uart_udivslot) save->udivslot = __raw_readl(regs + S3C2443_DIVSLOT); S3C_PMDBG("UART[%d]: ULCON=%04x, UCON=%04x, UFCON=%04x, UBRDIV=%04x\n", uart, save->ulcon, save->ucon, save->ufcon, save->ubrdiv); } static void s3c_pm_save_uarts(void) { struct pm_uart_save *save = uart_save; unsigned int uart; for (uart = 0; uart < CONFIG_SERIAL_SAMSUNG_UARTS; uart++, save++) s3c_pm_save_uart(uart, save); } static void s3c_pm_restore_uart(unsigned int uart, struct pm_uart_save *save) { void __iomem *regs = S3C_VA_UARTx(uart); s3c_pm_arch_update_uart(regs, save); __raw_writel(save->ulcon, regs + S3C2410_ULCON); __raw_writel(save->ucon, regs + S3C2410_UCON); __raw_writel(save->ufcon, regs + S3C2410_UFCON); __raw_writel(save->umcon, regs + S3C2410_UMCON); __raw_writel(save->ubrdiv, regs + S3C2410_UBRDIV); if (pm_uart_udivslot) __raw_writel(save->udivslot, regs + S3C2443_DIVSLOT); } static void s3c_pm_restore_uarts(void) { struct pm_uart_save *save = uart_save; unsigned int uart; for (uart = 0; uart < CONFIG_SERIAL_SAMSUNG_UARTS; uart++, save++) s3c_pm_restore_uart(uart, save); } #else static void s3c_pm_save_uarts(void) { } static void s3c_pm_restore_uarts(void) { } #endif /* The IRQ ext-int code goes here, it is too small to currently bother * with its own file. */ unsigned long s3c_irqwake_intmask = 0xffffffffL; unsigned long s3c_irqwake_eintmask = 0xffffffffL; int s3c_irqext_wake(struct irq_data *data, unsigned int state) { unsigned long bit = 1L << IRQ_EINT_BIT(data->irq); if (!(s3c_irqwake_eintallow & bit)) return -ENOENT; printk(KERN_INFO "wake %s for irq %d\n", state ? "enabled" : "disabled", data->irq); if (!state) s3c_irqwake_eintmask |= bit; else s3c_irqwake_eintmask &= ~bit; return 0; } /* helper functions to save and restore register state */ /** * s3c_pm_do_save() - save a set of registers for restoration on resume. * @ptr: Pointer to an array of registers. * @count: Size of the ptr array. * * Run through the list of registers given, saving their contents in the * array for later restoration when we wakeup. */ void s3c_pm_do_save(struct sleep_save *ptr, int count) { for (; count > 0; count--, ptr++) { ptr->val = __raw_readl(ptr->reg); S3C_PMDBG("saved %p value %08lx\n", ptr->reg, ptr->val); } } /** * s3c_pm_do_restore() - restore register values from the save list. * @ptr: Pointer to an array of registers. * @count: Size of the ptr array. * * Restore the register values saved from s3c_pm_do_save(). * * Note, we do not use S3C_PMDBG() in here, as the system may not have * restore the UARTs state yet */ void s3c_pm_do_restore(struct sleep_save *ptr, int count) { for (; count > 0; count--, ptr++) { printk(KERN_DEBUG "restore %p (restore %08lx, was %08x)\n", ptr->reg, ptr->val, __raw_readl(ptr->reg)); __raw_writel(ptr->val, ptr->reg); } } /** * s3c_pm_do_restore_core() - early restore register values from save list. * * This is similar to s3c_pm_do_restore() except we try and minimise the * side effects of the function in case registers that hardware might need * to work has been restored. * * WARNING: Do not put any debug in here that may effect memory or use * peripherals, as things may be changing! */ void s3c_pm_do_restore_core(struct sleep_save *ptr, int count) { for (; count > 0; count--, ptr++) __raw_writel(ptr->val, ptr->reg); } /* s3c2410_pm_show_resume_irqs * * print any IRQs asserted at resume time (ie, we woke from) */ static void __maybe_unused s3c_pm_show_resume_irqs(int start, unsigned long which, unsigned long mask) { int i; which &= ~mask; for (i = 0; i <= 31; i++) { if (which & (1L<<i)) { S3C_PMDBG("IRQ %d asserted at resume\n", start+i); } } } void (*pm_cpu_prep)(void); void (*pm_cpu_sleep)(void); #define any_allowed(mask, allow) (((mask) & (allow)) != (allow)) /* s3c_pm_enter * * central control for sleep/resume process */ static int s3c_pm_enter(suspend_state_t state) { /* ensure the debug is initialised (if enabled) */ s3c_pm_debug_init(); S3C_PMDBG("%s(%d)\n", __func__, state); if (pm_cpu_prep == NULL || pm_cpu_sleep == NULL) { printk(KERN_ERR "%s: error: no cpu sleep function\n", __func__); return -EINVAL; } /* check if we have anything to wake-up with... bad things seem * to happen if you suspend with no wakeup (system will often * require a full power-cycle) */ if (!any_allowed(s3c_irqwake_intmask, s3c_irqwake_intallow) && !any_allowed(s3c_irqwake_eintmask, s3c_irqwake_eintallow)) { printk(KERN_ERR "%s: No wake-up sources!\n", __func__); printk(KERN_ERR "%s: Aborting sleep\n", __func__); return -EINVAL; } /* save all necessary core registers not covered by the drivers */ s3c_pm_save_gpios(); s3c_pm_save_uarts(); s3c_pm_save_core(); /* set the irq configuration for wake */ s3c_pm_configure_extint(); S3C_PMDBG("sleep: irq wakeup masks: %08lx,%08lx\n", s3c_irqwake_intmask, s3c_irqwake_eintmask); s3c_pm_arch_prepare_irqs(); /* call cpu specific preparation */ pm_cpu_prep(); /* flush cache back to ram */ flush_cache_all(); s3c_pm_check_store(); /* send the cpu to sleep... */ s3c_pm_arch_stop_clocks(); /* s3c_cpu_save will also act as our return point from when * we resume as it saves its own register state and restores it * during the resume. */ s3c_cpu_save(0, PLAT_PHYS_OFFSET - PAGE_OFFSET); /* restore the cpu state using the kernel's cpu init code. */ cpu_init(); /* restore the system state */ s3c_pm_restore_core(); s3c_pm_restore_uarts(); s3c_pm_restore_gpios(); s3c_pm_debug_init(); /* check what irq (if any) restored the system */ s3c_pm_arch_show_resume_irqs(); S3C_PMDBG("%s: post sleep, preparing to return\n", __func__); /* LEDs should now be 1110 */ s3c_pm_debug_smdkled(1 << 1, 0); s3c_pm_check_restore(); /* ok, let's return from sleep */ S3C_PMDBG("S3C PM Resume (post-restore)\n"); return 0; } static int s3c_pm_prepare(void) { /* prepare check area if configured */ s3c_pm_check_prepare(); return 0; } static void s3c_pm_finish(void) { s3c_pm_check_cleanup(); } static const struct platform_suspend_ops s3c_pm_ops = { .enter = s3c_pm_enter, .prepare = s3c_pm_prepare, .finish = s3c_pm_finish, .valid = suspend_valid_only_mem, }; /* s3c_pm_init * * Attach the power management functions. This should be called * from the board specific initialisation if the board supports * it. */ int __init s3c_pm_init(void) { printk("S3C Power Management, Copyright 2004 Simtec Electronics\n"); suspend_set_ops(&s3c_pm_ops); return 0; }
gpl-2.0
174high/compat_wl18xx_origin_from_r8.6
drivers/media/platform/vivid/vivid-vbi-gen.c
1764
8824
/* * vivid-vbi-gen.c - vbi generator support functions. * * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved. * * This program is free software; you may redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/ktime.h> #include <linux/string.h> #include <linux/videodev2.h> #include "vivid-vbi-gen.h" static void wss_insert(u8 *wss, u32 val, unsigned size) { while (size--) *wss++ = (val & (1 << size)) ? 0xc0 : 0x10; } static void vivid_vbi_gen_wss_raw(const struct v4l2_sliced_vbi_data *data, u8 *buf, unsigned sampling_rate) { const unsigned rate = 5000000; /* WSS has a 5 MHz transmission rate */ u8 wss[29 + 24 + 24 + 24 + 18 + 18] = { 0 }; const unsigned zero = 0x07; const unsigned one = 0x38; unsigned bit = 0; u16 wss_data; int i; wss_insert(wss + bit, 0x1f1c71c7, 29); bit += 29; wss_insert(wss + bit, 0x1e3c1f, 24); bit += 24; wss_data = (data->data[1] << 8) | data->data[0]; for (i = 0; i <= 13; i++, bit += 6) wss_insert(wss + bit, (wss_data & (1 << i)) ? one : zero, 6); for (i = 0, bit = 0; bit < sizeof(wss); bit++) { unsigned n = ((bit + 1) * sampling_rate) / rate; while (i < n) buf[i++] = wss[bit]; } } static void vivid_vbi_gen_teletext_raw(const struct v4l2_sliced_vbi_data *data, u8 *buf, unsigned sampling_rate) { const unsigned rate = 6937500 / 10; /* Teletext has a 6.9375 MHz transmission rate */ u8 teletext[45] = { 0x55, 0x55, 0x27 }; unsigned bit = 0; int i; memcpy(teletext + 3, data->data, sizeof(teletext) - 3); /* prevents 32 bit overflow */ sampling_rate /= 10; for (i = 0, bit = 0; bit < sizeof(teletext) * 8; bit++) { unsigned n = ((bit + 1) * sampling_rate) / rate; u8 val = (teletext[bit / 8] & (1 << (bit & 7))) ? 0xc0 : 0x10; while (i < n) buf[i++] = val; } } static void cc_insert(u8 *cc, u8 ch) { unsigned tot = 0; unsigned i; for (i = 0; i < 7; i++) { cc[2 * i] = cc[2 * i + 1] = (ch & (1 << i)) ? 1 : 0; tot += cc[2 * i]; } cc[14] = cc[15] = !(tot & 1); } #define CC_PREAMBLE_BITS (14 + 4 + 2) static void vivid_vbi_gen_cc_raw(const struct v4l2_sliced_vbi_data *data, u8 *buf, unsigned sampling_rate) { const unsigned rate = 1000000; /* CC has a 1 MHz transmission rate */ u8 cc[CC_PREAMBLE_BITS + 2 * 16] = { /* Clock run-in: 7 cycles */ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, /* 2 cycles of 0 */ 0, 0, 0, 0, /* Start bit of 1 (each bit is two cycles) */ 1, 1 }; unsigned bit, i; cc_insert(cc + CC_PREAMBLE_BITS, data->data[0]); cc_insert(cc + CC_PREAMBLE_BITS + 16, data->data[1]); for (i = 0, bit = 0; bit < sizeof(cc); bit++) { unsigned n = ((bit + 1) * sampling_rate) / rate; while (i < n) buf[i++] = cc[bit] ? 0xc0 : 0x10; } } void vivid_vbi_gen_raw(const struct vivid_vbi_gen_data *vbi, const struct v4l2_vbi_format *vbi_fmt, u8 *buf) { unsigned idx; for (idx = 0; idx < 25; idx++) { const struct v4l2_sliced_vbi_data *data = vbi->data + idx; unsigned start_2nd_field; unsigned line = data->line; u8 *linebuf = buf; start_2nd_field = (data->id & V4L2_SLICED_VBI_525) ? 263 : 313; if (data->field) line += start_2nd_field; line -= vbi_fmt->start[data->field]; if (vbi_fmt->flags & V4L2_VBI_INTERLACED) linebuf += (line * 2 + data->field) * vbi_fmt->samples_per_line; else linebuf += (line + data->field * vbi_fmt->count[0]) * vbi_fmt->samples_per_line; if (data->id == V4L2_SLICED_CAPTION_525) vivid_vbi_gen_cc_raw(data, linebuf, vbi_fmt->sampling_rate); else if (data->id == V4L2_SLICED_WSS_625) vivid_vbi_gen_wss_raw(data, linebuf, vbi_fmt->sampling_rate); else if (data->id == V4L2_SLICED_TELETEXT_B) vivid_vbi_gen_teletext_raw(data, linebuf, vbi_fmt->sampling_rate); } } static const u8 vivid_cc_sequence1[30] = { 0x14, 0x20, /* Resume Caption Loading */ 'H', 'e', 'l', 'l', 'o', ' ', 'w', 'o', 'r', 'l', 'd', '!', 0x14, 0x2f, /* End of Caption */ }; static const u8 vivid_cc_sequence2[30] = { 0x14, 0x20, /* Resume Caption Loading */ 'C', 'l', 'o', 's', 'e', 'd', ' ', 'c', 'a', 'p', 't', 'i', 'o', 'n', 's', ' ', 't', 'e', 's', 't', 0x14, 0x2f, /* End of Caption */ }; static u8 calc_parity(u8 val) { unsigned i; unsigned tot = 0; for (i = 0; i < 7; i++) tot += (val & (1 << i)) ? 1 : 0; return val | ((tot & 1) ? 0 : 0x80); } static void vivid_vbi_gen_set_time_of_day(u8 *packet) { struct tm tm; u8 checksum, i; time_to_tm(get_seconds(), 0, &tm); packet[0] = calc_parity(0x07); packet[1] = calc_parity(0x01); packet[2] = calc_parity(0x40 | tm.tm_min); packet[3] = calc_parity(0x40 | tm.tm_hour); packet[4] = calc_parity(0x40 | tm.tm_mday); if (tm.tm_mday == 1 && tm.tm_mon == 2 && sys_tz.tz_minuteswest > tm.tm_min + tm.tm_hour * 60) packet[4] = calc_parity(0x60 | tm.tm_mday); packet[5] = calc_parity(0x40 | (1 + tm.tm_mon)); packet[6] = calc_parity(0x40 | (1 + tm.tm_wday)); packet[7] = calc_parity(0x40 | ((tm.tm_year - 90) & 0x3f)); packet[8] = calc_parity(0x0f); for (checksum = i = 0; i <= 8; i++) checksum += packet[i] & 0x7f; packet[9] = calc_parity(0x100 - checksum); checksum = 0; packet[10] = calc_parity(0x07); packet[11] = calc_parity(0x04); if (sys_tz.tz_minuteswest >= 0) packet[12] = calc_parity(0x40 | ((sys_tz.tz_minuteswest / 60) & 0x1f)); else packet[12] = calc_parity(0x40 | ((24 + sys_tz.tz_minuteswest / 60) & 0x1f)); packet[13] = calc_parity(0); packet[14] = calc_parity(0x0f); for (checksum = 0, i = 10; i <= 14; i++) checksum += packet[i] & 0x7f; packet[15] = calc_parity(0x100 - checksum); } static const u8 hamming[16] = { 0x15, 0x02, 0x49, 0x5e, 0x64, 0x73, 0x38, 0x2f, 0xd0, 0xc7, 0x8c, 0x9b, 0xa1, 0xb6, 0xfd, 0xea }; static void vivid_vbi_gen_teletext(u8 *packet, unsigned line, unsigned frame) { unsigned offset = 2; unsigned i; packet[0] = hamming[1 + ((line & 1) << 3)]; packet[1] = hamming[line >> 1]; memset(packet + 2, 0x20, 40); if (line == 0) { /* subcode */ packet[2] = hamming[frame % 10]; packet[3] = hamming[frame / 10]; packet[4] = hamming[0]; packet[5] = hamming[0]; packet[6] = hamming[0]; packet[7] = hamming[0]; packet[8] = hamming[0]; packet[9] = hamming[1]; offset = 10; } packet += offset; memcpy(packet, "Page: 100 Row: 10", 17); packet[7] = '0' + frame / 10; packet[8] = '0' + frame % 10; packet[15] = '0' + line / 10; packet[16] = '0' + line % 10; for (i = 0; i < 42 - offset; i++) packet[i] = calc_parity(packet[i]); } void vivid_vbi_gen_sliced(struct vivid_vbi_gen_data *vbi, bool is_60hz, unsigned seqnr) { struct v4l2_sliced_vbi_data *data0 = vbi->data; struct v4l2_sliced_vbi_data *data1 = vbi->data + 1; unsigned frame = seqnr % 60; memset(vbi->data, 0, sizeof(vbi->data)); if (!is_60hz) { unsigned i; for (i = 0; i <= 11; i++) { data0->id = V4L2_SLICED_TELETEXT_B; data0->line = 7 + i; vivid_vbi_gen_teletext(data0->data, i, frame); data0++; } data0->id = V4L2_SLICED_WSS_625; data0->line = 23; /* 4x3 video aspect ratio */ data0->data[0] = 0x08; data0++; for (i = 0; i <= 11; i++) { data0->id = V4L2_SLICED_TELETEXT_B; data0->field = 1; data0->line = 7 + i; vivid_vbi_gen_teletext(data0->data, 12 + i, frame); data0++; } return; } data0->id = V4L2_SLICED_CAPTION_525; data0->line = 21; data1->id = V4L2_SLICED_CAPTION_525; data1->field = 1; data1->line = 21; if (frame < 15) { data0->data[0] = calc_parity(vivid_cc_sequence1[2 * frame]); data0->data[1] = calc_parity(vivid_cc_sequence1[2 * frame + 1]); } else if (frame >= 30 && frame < 45) { frame -= 30; data0->data[0] = calc_parity(vivid_cc_sequence2[2 * frame]); data0->data[1] = calc_parity(vivid_cc_sequence2[2 * frame + 1]); } else { data0->data[0] = calc_parity(0); data0->data[1] = calc_parity(0); } frame = seqnr % (30 * 60); switch (frame) { case 0: vivid_vbi_gen_set_time_of_day(vbi->time_of_day_packet); /* fall through */ case 1 ... 7: data1->data[0] = vbi->time_of_day_packet[frame * 2]; data1->data[1] = vbi->time_of_day_packet[frame * 2 + 1]; break; default: data1->data[0] = calc_parity(0); data1->data[1] = calc_parity(0); break; } }
gpl-2.0
Nick73/King_Kernel
drivers/net/fs_enet/mii-bitbang.c
2532
5710
/* * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. * * Copyright (c) 2003 Intracom S.A. * by Pantelis Antoniou <panto@intracom.gr> * * 2005 (c) MontaVista Software, Inc. * Vitaly Bordug <vbordug@ru.mvista.com> * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #include <linux/module.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/mii.h> #include <linux/platform_device.h> #include <linux/mdio-bitbang.h> #include <linux/of_mdio.h> #include <linux/of_platform.h> #include "fs_enet.h" struct bb_info { struct mdiobb_ctrl ctrl; __be32 __iomem *dir; __be32 __iomem *dat; u32 mdio_msk; u32 mdc_msk; }; /* FIXME: If any other users of GPIO crop up, then these will have to * have some sort of global synchronization to avoid races with other * pins on the same port. The ideal solution would probably be to * bind the ports to a GPIO driver, and have this be a client of it. */ static inline void bb_set(u32 __iomem *p, u32 m) { out_be32(p, in_be32(p) | m); } static inline void bb_clr(u32 __iomem *p, u32 m) { out_be32(p, in_be32(p) & ~m); } static inline int bb_read(u32 __iomem *p, u32 m) { return (in_be32(p) & m) != 0; } static inline void mdio_dir(struct mdiobb_ctrl *ctrl, int dir) { struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); if (dir) bb_set(bitbang->dir, bitbang->mdio_msk); else bb_clr(bitbang->dir, bitbang->mdio_msk); /* Read back to flush the write. */ in_be32(bitbang->dir); } static inline int mdio_read(struct mdiobb_ctrl *ctrl) { struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); return bb_read(bitbang->dat, bitbang->mdio_msk); } static inline void mdio(struct mdiobb_ctrl *ctrl, int what) { struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); if (what) bb_set(bitbang->dat, bitbang->mdio_msk); else bb_clr(bitbang->dat, bitbang->mdio_msk); /* Read back to flush the write. */ in_be32(bitbang->dat); } static inline void mdc(struct mdiobb_ctrl *ctrl, int what) { struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); if (what) bb_set(bitbang->dat, bitbang->mdc_msk); else bb_clr(bitbang->dat, bitbang->mdc_msk); /* Read back to flush the write. */ in_be32(bitbang->dat); } static struct mdiobb_ops bb_ops = { .owner = THIS_MODULE, .set_mdc = mdc, .set_mdio_dir = mdio_dir, .set_mdio_data = mdio, .get_mdio_data = mdio_read, }; static int __devinit fs_mii_bitbang_init(struct mii_bus *bus, struct device_node *np) { struct resource res; const u32 *data; int mdio_pin, mdc_pin, len; struct bb_info *bitbang = bus->priv; int ret = of_address_to_resource(np, 0, &res); if (ret) return ret; if (res.end - res.start < 13) return -ENODEV; /* This should really encode the pin number as well, but all * we get is an int, and the odds of multiple bitbang mdio buses * is low enough that it's not worth going too crazy. */ snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start); data = of_get_property(np, "fsl,mdio-pin", &len); if (!data || len != 4) return -ENODEV; mdio_pin = *data; data = of_get_property(np, "fsl,mdc-pin", &len); if (!data || len != 4) return -ENODEV; mdc_pin = *data; bitbang->dir = ioremap(res.start, res.end - res.start + 1); if (!bitbang->dir) return -ENOMEM; bitbang->dat = bitbang->dir + 4; bitbang->mdio_msk = 1 << (31 - mdio_pin); bitbang->mdc_msk = 1 << (31 - mdc_pin); return 0; } static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev) { struct mii_bus *new_bus; struct bb_info *bitbang; int ret = -ENOMEM; bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL); if (!bitbang) goto out; bitbang->ctrl.ops = &bb_ops; new_bus = alloc_mdio_bitbang(&bitbang->ctrl); if (!new_bus) goto out_free_priv; new_bus->name = "CPM2 Bitbanged MII", ret = fs_mii_bitbang_init(new_bus, ofdev->dev.of_node); if (ret) goto out_free_bus; new_bus->phy_mask = ~0; new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); if (!new_bus->irq) goto out_unmap_regs; new_bus->parent = &ofdev->dev; dev_set_drvdata(&ofdev->dev, new_bus); ret = of_mdiobus_register(new_bus, ofdev->dev.of_node); if (ret) goto out_free_irqs; return 0; out_free_irqs: dev_set_drvdata(&ofdev->dev, NULL); kfree(new_bus->irq); out_unmap_regs: iounmap(bitbang->dir); out_free_bus: free_mdio_bitbang(new_bus); out_free_priv: kfree(bitbang); out: return ret; } static int fs_enet_mdio_remove(struct platform_device *ofdev) { struct mii_bus *bus = dev_get_drvdata(&ofdev->dev); struct bb_info *bitbang = bus->priv; mdiobus_unregister(bus); dev_set_drvdata(&ofdev->dev, NULL); kfree(bus->irq); free_mdio_bitbang(bus); iounmap(bitbang->dir); kfree(bitbang); return 0; } static struct of_device_id fs_enet_mdio_bb_match[] = { { .compatible = "fsl,cpm2-mdio-bitbang", }, {}, }; MODULE_DEVICE_TABLE(of, fs_enet_mdio_bb_match); static struct platform_driver fs_enet_bb_mdio_driver = { .driver = { .name = "fsl-bb-mdio", .owner = THIS_MODULE, .of_match_table = fs_enet_mdio_bb_match, }, .probe = fs_enet_mdio_probe, .remove = fs_enet_mdio_remove, }; static int fs_enet_mdio_bb_init(void) { return platform_driver_register(&fs_enet_bb_mdio_driver); } static void fs_enet_mdio_bb_exit(void) { platform_driver_unregister(&fs_enet_bb_mdio_driver); } module_init(fs_enet_mdio_bb_init); module_exit(fs_enet_mdio_bb_exit);
gpl-2.0
pgielda/vybrid-linux
drivers/media/video/omap3isp/ispvideo.c
2532
37969
/* * ispvideo.c * * TI OMAP3 ISP - Generic video node * * Copyright (C) 2009-2010 Nokia Corporation * * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com> * Sakari Ailus <sakari.ailus@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA */ #include <asm/cacheflush.h> #include <linux/clk.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/scatterlist.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <media/v4l2-dev.h> #include <media/v4l2-ioctl.h> #include <plat/iommu.h> #include <plat/iovmm.h> #include <plat/omap-pm.h> #include "ispvideo.h" #include "isp.h" /* ----------------------------------------------------------------------------- * Helper functions */ static struct isp_format_info formats[] = { { V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8, V4L2_PIX_FMT_GREY, 8, }, { V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y8_1X8, V4L2_PIX_FMT_Y10, 10, }, { V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y8_1X8, V4L2_PIX_FMT_Y12, 12, }, { V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_PIX_FMT_SBGGR8, 8, }, { V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_PIX_FMT_SGBRG8, 8, }, { V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_PIX_FMT_SGRBG8, 8, }, { V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_PIX_FMT_SRGGB8, 8, }, { V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, V4L2_MBUS_FMT_SGRBG10_1X10, 0, V4L2_PIX_FMT_SGRBG10DPCM8, 8, }, { V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_PIX_FMT_SBGGR10, 10, }, { V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_PIX_FMT_SGBRG10, 10, }, { V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_PIX_FMT_SGRBG10, 10, }, { V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_PIX_FMT_SRGGB10, 10, }, { V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_PIX_FMT_SBGGR12, 12, }, { V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_PIX_FMT_SGBRG12, 12, }, { V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_PIX_FMT_SGRBG12, 12, }, { V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_PIX_FMT_SRGGB12, 12, }, { V4L2_MBUS_FMT_UYVY8_1X16, V4L2_MBUS_FMT_UYVY8_1X16, V4L2_MBUS_FMT_UYVY8_1X16, 0, V4L2_PIX_FMT_UYVY, 16, }, { V4L2_MBUS_FMT_YUYV8_1X16, V4L2_MBUS_FMT_YUYV8_1X16, V4L2_MBUS_FMT_YUYV8_1X16, 0, V4L2_PIX_FMT_YUYV, 16, }, }; const struct isp_format_info * omap3isp_video_format_info(enum v4l2_mbus_pixelcode code) { unsigned int i; for (i = 0; i < ARRAY_SIZE(formats); ++i) { if (formats[i].code == code) return &formats[i]; } return NULL; } /* * Decide whether desired output pixel code can be obtained with * the lane shifter by shifting the input pixel code. * @in: input pixelcode to shifter * @out: output pixelcode from shifter * @additional_shift: # of bits the sensor's LSB is offset from CAMEXT[0] * * return true if the combination is possible * return false otherwise */ static bool isp_video_is_shiftable(enum v4l2_mbus_pixelcode in, enum v4l2_mbus_pixelcode out, unsigned int additional_shift) { const struct isp_format_info *in_info, *out_info; if (in == out) return true; in_info = omap3isp_video_format_info(in); out_info = omap3isp_video_format_info(out); if ((in_info->flavor == 0) || (out_info->flavor == 0)) return false; if (in_info->flavor != out_info->flavor) return false; return in_info->bpp - out_info->bpp + additional_shift <= 6; } /* * isp_video_mbus_to_pix - Convert v4l2_mbus_framefmt to v4l2_pix_format * @video: ISP video instance * @mbus: v4l2_mbus_framefmt format (input) * @pix: v4l2_pix_format format (output) * * Fill the output pix structure with information from the input mbus format. * The bytesperline and sizeimage fields are computed from the requested bytes * per line value in the pix format and information from the video instance. * * Return the number of padding bytes at end of line. */ static unsigned int isp_video_mbus_to_pix(const struct isp_video *video, const struct v4l2_mbus_framefmt *mbus, struct v4l2_pix_format *pix) { unsigned int bpl = pix->bytesperline; unsigned int min_bpl; unsigned int i; memset(pix, 0, sizeof(*pix)); pix->width = mbus->width; pix->height = mbus->height; for (i = 0; i < ARRAY_SIZE(formats); ++i) { if (formats[i].code == mbus->code) break; } if (WARN_ON(i == ARRAY_SIZE(formats))) return 0; min_bpl = pix->width * ALIGN(formats[i].bpp, 8) / 8; /* Clamp the requested bytes per line value. If the maximum bytes per * line value is zero, the module doesn't support user configurable line * sizes. Override the requested value with the minimum in that case. */ if (video->bpl_max) bpl = clamp(bpl, min_bpl, video->bpl_max); else bpl = min_bpl; if (!video->bpl_zero_padding || bpl != min_bpl) bpl = ALIGN(bpl, video->bpl_alignment); pix->pixelformat = formats[i].pixelformat; pix->bytesperline = bpl; pix->sizeimage = pix->bytesperline * pix->height; pix->colorspace = mbus->colorspace; pix->field = mbus->field; return bpl - min_bpl; } static void isp_video_pix_to_mbus(const struct v4l2_pix_format *pix, struct v4l2_mbus_framefmt *mbus) { unsigned int i; memset(mbus, 0, sizeof(*mbus)); mbus->width = pix->width; mbus->height = pix->height; for (i = 0; i < ARRAY_SIZE(formats); ++i) { if (formats[i].pixelformat == pix->pixelformat) break; } if (WARN_ON(i == ARRAY_SIZE(formats))) return; mbus->code = formats[i].code; mbus->colorspace = pix->colorspace; mbus->field = pix->field; } static struct v4l2_subdev * isp_video_remote_subdev(struct isp_video *video, u32 *pad) { struct media_pad *remote; remote = media_entity_remote_source(&video->pad); if (remote == NULL || media_entity_type(remote->entity) != MEDIA_ENT_T_V4L2_SUBDEV) return NULL; if (pad) *pad = remote->index; return media_entity_to_v4l2_subdev(remote->entity); } /* Return a pointer to the ISP video instance at the far end of the pipeline. */ static struct isp_video * isp_video_far_end(struct isp_video *video) { struct media_entity_graph graph; struct media_entity *entity = &video->video.entity; struct media_device *mdev = entity->parent; struct isp_video *far_end = NULL; mutex_lock(&mdev->graph_mutex); media_entity_graph_walk_start(&graph, entity); while ((entity = media_entity_graph_walk_next(&graph))) { if (entity == &video->video.entity) continue; if (media_entity_type(entity) != MEDIA_ENT_T_DEVNODE) continue; far_end = to_isp_video(media_entity_to_video_device(entity)); if (far_end->type != video->type) break; far_end = NULL; } mutex_unlock(&mdev->graph_mutex); return far_end; } /* * Validate a pipeline by checking both ends of all links for format * discrepancies. * * Compute the minimum time per frame value as the maximum of time per frame * limits reported by every block in the pipeline. * * Return 0 if all formats match, or -EPIPE if at least one link is found with * different formats on its two ends. */ static int isp_video_validate_pipeline(struct isp_pipeline *pipe) { struct isp_device *isp = pipe->output->isp; struct v4l2_subdev_format fmt_source; struct v4l2_subdev_format fmt_sink; struct media_pad *pad; struct v4l2_subdev *subdev; int ret; pipe->max_rate = pipe->l3_ick; subdev = isp_video_remote_subdev(pipe->output, NULL); if (subdev == NULL) return -EPIPE; while (1) { unsigned int shifter_link; /* Retrieve the sink format */ pad = &subdev->entity.pads[0]; if (!(pad->flags & MEDIA_PAD_FL_SINK)) break; fmt_sink.pad = pad->index; fmt_sink.which = V4L2_SUBDEV_FORMAT_ACTIVE; ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt_sink); if (ret < 0 && ret != -ENOIOCTLCMD) return -EPIPE; /* Update the maximum frame rate */ if (subdev == &isp->isp_res.subdev) omap3isp_resizer_max_rate(&isp->isp_res, &pipe->max_rate); /* Check ccdc maximum data rate when data comes from sensor * TODO: Include ccdc rate in pipe->max_rate and compare the * total pipe rate with the input data rate from sensor. */ if (subdev == &isp->isp_ccdc.subdev && pipe->input == NULL) { unsigned int rate = UINT_MAX; omap3isp_ccdc_max_rate(&isp->isp_ccdc, &rate); if (isp->isp_ccdc.vpcfg.pixelclk > rate) return -ENOSPC; } /* If sink pad is on CCDC, the link has the lane shifter * in the middle of it. */ shifter_link = subdev == &isp->isp_ccdc.subdev; /* Retrieve the source format */ pad = media_entity_remote_source(pad); if (pad == NULL || media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV) break; subdev = media_entity_to_v4l2_subdev(pad->entity); fmt_source.pad = pad->index; fmt_source.which = V4L2_SUBDEV_FORMAT_ACTIVE; ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt_source); if (ret < 0 && ret != -ENOIOCTLCMD) return -EPIPE; /* Check if the two ends match */ if (fmt_source.format.width != fmt_sink.format.width || fmt_source.format.height != fmt_sink.format.height) return -EPIPE; if (shifter_link) { unsigned int parallel_shift = 0; if (isp->isp_ccdc.input == CCDC_INPUT_PARALLEL) { struct isp_parallel_platform_data *pdata = &((struct isp_v4l2_subdevs_group *) subdev->host_priv)->bus.parallel; parallel_shift = pdata->data_lane_shift * 2; } if (!isp_video_is_shiftable(fmt_source.format.code, fmt_sink.format.code, parallel_shift)) return -EPIPE; } else if (fmt_source.format.code != fmt_sink.format.code) return -EPIPE; } return 0; } static int __isp_video_get_format(struct isp_video *video, struct v4l2_format *format) { struct v4l2_subdev_format fmt; struct v4l2_subdev *subdev; u32 pad; int ret; subdev = isp_video_remote_subdev(video, &pad); if (subdev == NULL) return -EINVAL; mutex_lock(&video->mutex); fmt.pad = pad; fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt); if (ret == -ENOIOCTLCMD) ret = -EINVAL; mutex_unlock(&video->mutex); if (ret) return ret; format->type = video->type; return isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix); } static int isp_video_check_format(struct isp_video *video, struct isp_video_fh *vfh) { struct v4l2_format format; int ret; memcpy(&format, &vfh->format, sizeof(format)); ret = __isp_video_get_format(video, &format); if (ret < 0) return ret; if (vfh->format.fmt.pix.pixelformat != format.fmt.pix.pixelformat || vfh->format.fmt.pix.height != format.fmt.pix.height || vfh->format.fmt.pix.width != format.fmt.pix.width || vfh->format.fmt.pix.bytesperline != format.fmt.pix.bytesperline || vfh->format.fmt.pix.sizeimage != format.fmt.pix.sizeimage) return -EINVAL; return ret; } /* ----------------------------------------------------------------------------- * IOMMU management */ #define IOMMU_FLAG (IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_8) /* * ispmmu_vmap - Wrapper for Virtual memory mapping of a scatter gather list * @dev: Device pointer specific to the OMAP3 ISP. * @sglist: Pointer to source Scatter gather list to allocate. * @sglen: Number of elements of the scatter-gatter list. * * Returns a resulting mapped device address by the ISP MMU, or -ENOMEM if * we ran out of memory. */ static dma_addr_t ispmmu_vmap(struct isp_device *isp, const struct scatterlist *sglist, int sglen) { struct sg_table *sgt; u32 da; sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); if (sgt == NULL) return -ENOMEM; sgt->sgl = (struct scatterlist *)sglist; sgt->nents = sglen; sgt->orig_nents = sglen; da = iommu_vmap(isp->iommu, 0, sgt, IOMMU_FLAG); if (IS_ERR_VALUE(da)) kfree(sgt); return da; } /* * ispmmu_vunmap - Unmap a device address from the ISP MMU * @dev: Device pointer specific to the OMAP3 ISP. * @da: Device address generated from a ispmmu_vmap call. */ static void ispmmu_vunmap(struct isp_device *isp, dma_addr_t da) { struct sg_table *sgt; sgt = iommu_vunmap(isp->iommu, (u32)da); kfree(sgt); } /* ----------------------------------------------------------------------------- * Video queue operations */ static void isp_video_queue_prepare(struct isp_video_queue *queue, unsigned int *nbuffers, unsigned int *size) { struct isp_video_fh *vfh = container_of(queue, struct isp_video_fh, queue); struct isp_video *video = vfh->video; *size = vfh->format.fmt.pix.sizeimage; if (*size == 0) return; *nbuffers = min(*nbuffers, video->capture_mem / PAGE_ALIGN(*size)); } static void isp_video_buffer_cleanup(struct isp_video_buffer *buf) { struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue); struct isp_buffer *buffer = to_isp_buffer(buf); struct isp_video *video = vfh->video; if (buffer->isp_addr) { ispmmu_vunmap(video->isp, buffer->isp_addr); buffer->isp_addr = 0; } } static int isp_video_buffer_prepare(struct isp_video_buffer *buf) { struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue); struct isp_buffer *buffer = to_isp_buffer(buf); struct isp_video *video = vfh->video; unsigned long addr; addr = ispmmu_vmap(video->isp, buf->sglist, buf->sglen); if (IS_ERR_VALUE(addr)) return -EIO; if (!IS_ALIGNED(addr, 32)) { dev_dbg(video->isp->dev, "Buffer address must be " "aligned to 32 bytes boundary.\n"); ispmmu_vunmap(video->isp, buffer->isp_addr); return -EINVAL; } buf->vbuf.bytesused = vfh->format.fmt.pix.sizeimage; buffer->isp_addr = addr; return 0; } /* * isp_video_buffer_queue - Add buffer to streaming queue * @buf: Video buffer * * In memory-to-memory mode, start streaming on the pipeline if buffers are * queued on both the input and the output, if the pipeline isn't already busy. * If the pipeline is busy, it will be restarted in the output module interrupt * handler. */ static void isp_video_buffer_queue(struct isp_video_buffer *buf) { struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue); struct isp_buffer *buffer = to_isp_buffer(buf); struct isp_video *video = vfh->video; struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity); enum isp_pipeline_state state; unsigned long flags; unsigned int empty; unsigned int start; empty = list_empty(&video->dmaqueue); list_add_tail(&buffer->buffer.irqlist, &video->dmaqueue); if (empty) { if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) state = ISP_PIPELINE_QUEUE_OUTPUT; else state = ISP_PIPELINE_QUEUE_INPUT; spin_lock_irqsave(&pipe->lock, flags); pipe->state |= state; video->ops->queue(video, buffer); video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED; start = isp_pipeline_ready(pipe); if (start) pipe->state |= ISP_PIPELINE_STREAM; spin_unlock_irqrestore(&pipe->lock, flags); if (start) omap3isp_pipeline_set_stream(pipe, ISP_PIPELINE_STREAM_SINGLESHOT); } } static const struct isp_video_queue_operations isp_video_queue_ops = { .queue_prepare = &isp_video_queue_prepare, .buffer_prepare = &isp_video_buffer_prepare, .buffer_queue = &isp_video_buffer_queue, .buffer_cleanup = &isp_video_buffer_cleanup, }; /* * omap3isp_video_buffer_next - Complete the current buffer and return the next * @video: ISP video object * @error: Whether an error occurred during capture * * Remove the current video buffer from the DMA queue and fill its timestamp, * field count and state fields before waking up its completion handler. * * The buffer state is set to VIDEOBUF_DONE if no error occurred (@error is 0) * or VIDEOBUF_ERROR otherwise (@error is non-zero). * * The DMA queue is expected to contain at least one buffer. * * Return a pointer to the next buffer in the DMA queue, or NULL if the queue is * empty. */ struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video, unsigned int error) { struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity); struct isp_video_queue *queue = video->queue; enum isp_pipeline_state state; struct isp_video_buffer *buf; unsigned long flags; struct timespec ts; spin_lock_irqsave(&queue->irqlock, flags); if (WARN_ON(list_empty(&video->dmaqueue))) { spin_unlock_irqrestore(&queue->irqlock, flags); return NULL; } buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer, irqlist); list_del(&buf->irqlist); spin_unlock_irqrestore(&queue->irqlock, flags); ktime_get_ts(&ts); buf->vbuf.timestamp.tv_sec = ts.tv_sec; buf->vbuf.timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC; /* Do frame number propagation only if this is the output video node. * Frame number either comes from the CSI receivers or it gets * incremented here if H3A is not active. * Note: There is no guarantee that the output buffer will finish * first, so the input number might lag behind by 1 in some cases. */ if (video == pipe->output && !pipe->do_propagation) buf->vbuf.sequence = atomic_inc_return(&pipe->frame_number); else buf->vbuf.sequence = atomic_read(&pipe->frame_number); buf->state = error ? ISP_BUF_STATE_ERROR : ISP_BUF_STATE_DONE; wake_up(&buf->wait); if (list_empty(&video->dmaqueue)) { if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) state = ISP_PIPELINE_QUEUE_OUTPUT | ISP_PIPELINE_STREAM; else state = ISP_PIPELINE_QUEUE_INPUT | ISP_PIPELINE_STREAM; spin_lock_irqsave(&pipe->lock, flags); pipe->state &= ~state; if (video->pipe.stream_state == ISP_PIPELINE_STREAM_CONTINUOUS) video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN; spin_unlock_irqrestore(&pipe->lock, flags); return NULL; } if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input != NULL) { spin_lock_irqsave(&pipe->lock, flags); pipe->state &= ~ISP_PIPELINE_STREAM; spin_unlock_irqrestore(&pipe->lock, flags); } buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer, irqlist); buf->state = ISP_BUF_STATE_ACTIVE; return to_isp_buffer(buf); } /* * omap3isp_video_resume - Perform resume operation on the buffers * @video: ISP video object * @continuous: Pipeline is in single shot mode if 0 or continuous mode otherwise * * This function is intended to be used on suspend/resume scenario. It * requests video queue layer to discard buffers marked as DONE if it's in * continuous mode and requests ISP modules to queue again the ACTIVE buffer * if there's any. */ void omap3isp_video_resume(struct isp_video *video, int continuous) { struct isp_buffer *buf = NULL; if (continuous && video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) omap3isp_video_queue_discard_done(video->queue); if (!list_empty(&video->dmaqueue)) { buf = list_first_entry(&video->dmaqueue, struct isp_buffer, buffer.irqlist); video->ops->queue(video, buf); video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED; } else { if (continuous) video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN; } } /* ----------------------------------------------------------------------------- * V4L2 ioctls */ static int isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap) { struct isp_video *video = video_drvdata(file); strlcpy(cap->driver, ISP_VIDEO_DRIVER_NAME, sizeof(cap->driver)); strlcpy(cap->card, video->video.name, sizeof(cap->card)); strlcpy(cap->bus_info, "media", sizeof(cap->bus_info)); cap->version = ISP_VIDEO_DRIVER_VERSION; if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; else cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING; return 0; } static int isp_video_get_format(struct file *file, void *fh, struct v4l2_format *format) { struct isp_video_fh *vfh = to_isp_video_fh(fh); struct isp_video *video = video_drvdata(file); if (format->type != video->type) return -EINVAL; mutex_lock(&video->mutex); *format = vfh->format; mutex_unlock(&video->mutex); return 0; } static int isp_video_set_format(struct file *file, void *fh, struct v4l2_format *format) { struct isp_video_fh *vfh = to_isp_video_fh(fh); struct isp_video *video = video_drvdata(file); struct v4l2_mbus_framefmt fmt; if (format->type != video->type) return -EINVAL; mutex_lock(&video->mutex); /* Fill the bytesperline and sizeimage fields by converting to media bus * format and back to pixel format. */ isp_video_pix_to_mbus(&format->fmt.pix, &fmt); isp_video_mbus_to_pix(video, &fmt, &format->fmt.pix); vfh->format = *format; mutex_unlock(&video->mutex); return 0; } static int isp_video_try_format(struct file *file, void *fh, struct v4l2_format *format) { struct isp_video *video = video_drvdata(file); struct v4l2_subdev_format fmt; struct v4l2_subdev *subdev; u32 pad; int ret; if (format->type != video->type) return -EINVAL; subdev = isp_video_remote_subdev(video, &pad); if (subdev == NULL) return -EINVAL; isp_video_pix_to_mbus(&format->fmt.pix, &fmt.format); fmt.pad = pad; fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt); if (ret) return ret == -ENOIOCTLCMD ? -EINVAL : ret; isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix); return 0; } static int isp_video_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cropcap) { struct isp_video *video = video_drvdata(file); struct v4l2_subdev *subdev; int ret; subdev = isp_video_remote_subdev(video, NULL); if (subdev == NULL) return -EINVAL; mutex_lock(&video->mutex); ret = v4l2_subdev_call(subdev, video, cropcap, cropcap); mutex_unlock(&video->mutex); return ret == -ENOIOCTLCMD ? -EINVAL : ret; } static int isp_video_get_crop(struct file *file, void *fh, struct v4l2_crop *crop) { struct isp_video *video = video_drvdata(file); struct v4l2_subdev_format format; struct v4l2_subdev *subdev; u32 pad; int ret; subdev = isp_video_remote_subdev(video, &pad); if (subdev == NULL) return -EINVAL; /* Try the get crop operation first and fallback to get format if not * implemented. */ ret = v4l2_subdev_call(subdev, video, g_crop, crop); if (ret != -ENOIOCTLCMD) return ret; format.pad = pad; format.which = V4L2_SUBDEV_FORMAT_ACTIVE; ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &format); if (ret < 0) return ret == -ENOIOCTLCMD ? -EINVAL : ret; crop->c.left = 0; crop->c.top = 0; crop->c.width = format.format.width; crop->c.height = format.format.height; return 0; } static int isp_video_set_crop(struct file *file, void *fh, struct v4l2_crop *crop) { struct isp_video *video = video_drvdata(file); struct v4l2_subdev *subdev; int ret; subdev = isp_video_remote_subdev(video, NULL); if (subdev == NULL) return -EINVAL; mutex_lock(&video->mutex); ret = v4l2_subdev_call(subdev, video, s_crop, crop); mutex_unlock(&video->mutex); return ret == -ENOIOCTLCMD ? -EINVAL : ret; } static int isp_video_get_param(struct file *file, void *fh, struct v4l2_streamparm *a) { struct isp_video_fh *vfh = to_isp_video_fh(fh); struct isp_video *video = video_drvdata(file); if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT || video->type != a->type) return -EINVAL; memset(a, 0, sizeof(*a)); a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; a->parm.output.capability = V4L2_CAP_TIMEPERFRAME; a->parm.output.timeperframe = vfh->timeperframe; return 0; } static int isp_video_set_param(struct file *file, void *fh, struct v4l2_streamparm *a) { struct isp_video_fh *vfh = to_isp_video_fh(fh); struct isp_video *video = video_drvdata(file); if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT || video->type != a->type) return -EINVAL; if (a->parm.output.timeperframe.denominator == 0) a->parm.output.timeperframe.denominator = 1; vfh->timeperframe = a->parm.output.timeperframe; return 0; } static int isp_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb) { struct isp_video_fh *vfh = to_isp_video_fh(fh); return omap3isp_video_queue_reqbufs(&vfh->queue, rb); } static int isp_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b) { struct isp_video_fh *vfh = to_isp_video_fh(fh); return omap3isp_video_queue_querybuf(&vfh->queue, b); } static int isp_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b) { struct isp_video_fh *vfh = to_isp_video_fh(fh); return omap3isp_video_queue_qbuf(&vfh->queue, b); } static int isp_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b) { struct isp_video_fh *vfh = to_isp_video_fh(fh); return omap3isp_video_queue_dqbuf(&vfh->queue, b, file->f_flags & O_NONBLOCK); } /* * Stream management * * Every ISP pipeline has a single input and a single output. The input can be * either a sensor or a video node. The output is always a video node. * * As every pipeline has an output video node, the ISP video objects at the * pipeline output stores the pipeline state. It tracks the streaming state of * both the input and output, as well as the availability of buffers. * * In sensor-to-memory mode, frames are always available at the pipeline input. * Starting the sensor usually requires I2C transfers and must be done in * interruptible context. The pipeline is started and stopped synchronously * to the stream on/off commands. All modules in the pipeline will get their * subdev set stream handler called. The module at the end of the pipeline must * delay starting the hardware until buffers are available at its output. * * In memory-to-memory mode, starting/stopping the stream requires * synchronization between the input and output. ISP modules can't be stopped * in the middle of a frame, and at least some of the modules seem to become * busy as soon as they're started, even if they don't receive a frame start * event. For that reason frames need to be processed in single-shot mode. The * driver needs to wait until a frame is completely processed and written to * memory before restarting the pipeline for the next frame. Pipelined * processing might be possible but requires more testing. * * Stream start must be delayed until buffers are available at both the input * and output. The pipeline must be started in the videobuf queue callback with * the buffers queue spinlock held. The modules subdev set stream operation must * not sleep. */ static int isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type) { struct isp_video_fh *vfh = to_isp_video_fh(fh); struct isp_video *video = video_drvdata(file); enum isp_pipeline_state state; struct isp_pipeline *pipe; struct isp_video *far_end; unsigned long flags; int ret; if (type != video->type) return -EINVAL; mutex_lock(&video->stream_lock); if (video->streaming) { mutex_unlock(&video->stream_lock); return -EBUSY; } /* Start streaming on the pipeline. No link touching an entity in the * pipeline can be activated or deactivated once streaming is started. */ pipe = video->video.entity.pipe ? to_isp_pipeline(&video->video.entity) : &video->pipe; media_entity_pipeline_start(&video->video.entity, &pipe->pipe); /* Verify that the currently configured format matches the output of * the connected subdev. */ ret = isp_video_check_format(video, vfh); if (ret < 0) goto error; video->bpl_padding = ret; video->bpl_value = vfh->format.fmt.pix.bytesperline; /* Find the ISP video node connected at the far end of the pipeline and * update the pipeline. */ far_end = isp_video_far_end(video); if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { state = ISP_PIPELINE_STREAM_OUTPUT | ISP_PIPELINE_IDLE_OUTPUT; pipe->input = far_end; pipe->output = video; } else { if (far_end == NULL) { ret = -EPIPE; goto error; } state = ISP_PIPELINE_STREAM_INPUT | ISP_PIPELINE_IDLE_INPUT; pipe->input = video; pipe->output = far_end; } if (video->isp->pdata->set_constraints) video->isp->pdata->set_constraints(video->isp, true); pipe->l3_ick = clk_get_rate(video->isp->clock[ISP_CLK_L3_ICK]); /* Validate the pipeline and update its state. */ ret = isp_video_validate_pipeline(pipe); if (ret < 0) goto error; spin_lock_irqsave(&pipe->lock, flags); pipe->state &= ~ISP_PIPELINE_STREAM; pipe->state |= state; spin_unlock_irqrestore(&pipe->lock, flags); /* Set the maximum time per frame as the value requested by userspace. * This is a soft limit that can be overridden if the hardware doesn't * support the request limit. */ if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) pipe->max_timeperframe = vfh->timeperframe; video->queue = &vfh->queue; INIT_LIST_HEAD(&video->dmaqueue); atomic_set(&pipe->frame_number, -1); ret = omap3isp_video_queue_streamon(&vfh->queue); if (ret < 0) goto error; /* In sensor-to-memory mode, the stream can be started synchronously * to the stream on command. In memory-to-memory mode, it will be * started when buffers are queued on both the input and output. */ if (pipe->input == NULL) { ret = omap3isp_pipeline_set_stream(pipe, ISP_PIPELINE_STREAM_CONTINUOUS); if (ret < 0) goto error; spin_lock_irqsave(&video->queue->irqlock, flags); if (list_empty(&video->dmaqueue)) video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN; spin_unlock_irqrestore(&video->queue->irqlock, flags); } error: if (ret < 0) { omap3isp_video_queue_streamoff(&vfh->queue); if (video->isp->pdata->set_constraints) video->isp->pdata->set_constraints(video->isp, false); media_entity_pipeline_stop(&video->video.entity); video->queue = NULL; } if (!ret) video->streaming = 1; mutex_unlock(&video->stream_lock); return ret; } static int isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type) { struct isp_video_fh *vfh = to_isp_video_fh(fh); struct isp_video *video = video_drvdata(file); struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity); enum isp_pipeline_state state; unsigned int streaming; unsigned long flags; if (type != video->type) return -EINVAL; mutex_lock(&video->stream_lock); /* Make sure we're not streaming yet. */ mutex_lock(&vfh->queue.lock); streaming = vfh->queue.streaming; mutex_unlock(&vfh->queue.lock); if (!streaming) goto done; /* Update the pipeline state. */ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) state = ISP_PIPELINE_STREAM_OUTPUT | ISP_PIPELINE_QUEUE_OUTPUT; else state = ISP_PIPELINE_STREAM_INPUT | ISP_PIPELINE_QUEUE_INPUT; spin_lock_irqsave(&pipe->lock, flags); pipe->state &= ~state; spin_unlock_irqrestore(&pipe->lock, flags); /* Stop the stream. */ omap3isp_pipeline_set_stream(pipe, ISP_PIPELINE_STREAM_STOPPED); omap3isp_video_queue_streamoff(&vfh->queue); video->queue = NULL; video->streaming = 0; if (video->isp->pdata->set_constraints) video->isp->pdata->set_constraints(video->isp, false); media_entity_pipeline_stop(&video->video.entity); done: mutex_unlock(&video->stream_lock); return 0; } static int isp_video_enum_input(struct file *file, void *fh, struct v4l2_input *input) { if (input->index > 0) return -EINVAL; strlcpy(input->name, "camera", sizeof(input->name)); input->type = V4L2_INPUT_TYPE_CAMERA; return 0; } static int isp_video_g_input(struct file *file, void *fh, unsigned int *input) { *input = 0; return 0; } static int isp_video_s_input(struct file *file, void *fh, unsigned int input) { return input == 0 ? 0 : -EINVAL; } static const struct v4l2_ioctl_ops isp_video_ioctl_ops = { .vidioc_querycap = isp_video_querycap, .vidioc_g_fmt_vid_cap = isp_video_get_format, .vidioc_s_fmt_vid_cap = isp_video_set_format, .vidioc_try_fmt_vid_cap = isp_video_try_format, .vidioc_g_fmt_vid_out = isp_video_get_format, .vidioc_s_fmt_vid_out = isp_video_set_format, .vidioc_try_fmt_vid_out = isp_video_try_format, .vidioc_cropcap = isp_video_cropcap, .vidioc_g_crop = isp_video_get_crop, .vidioc_s_crop = isp_video_set_crop, .vidioc_g_parm = isp_video_get_param, .vidioc_s_parm = isp_video_set_param, .vidioc_reqbufs = isp_video_reqbufs, .vidioc_querybuf = isp_video_querybuf, .vidioc_qbuf = isp_video_qbuf, .vidioc_dqbuf = isp_video_dqbuf, .vidioc_streamon = isp_video_streamon, .vidioc_streamoff = isp_video_streamoff, .vidioc_enum_input = isp_video_enum_input, .vidioc_g_input = isp_video_g_input, .vidioc_s_input = isp_video_s_input, }; /* ----------------------------------------------------------------------------- * V4L2 file operations */ static int isp_video_open(struct file *file) { struct isp_video *video = video_drvdata(file); struct isp_video_fh *handle; int ret = 0; handle = kzalloc(sizeof(*handle), GFP_KERNEL); if (handle == NULL) return -ENOMEM; v4l2_fh_init(&handle->vfh, &video->video); v4l2_fh_add(&handle->vfh); /* If this is the first user, initialise the pipeline. */ if (omap3isp_get(video->isp) == NULL) { ret = -EBUSY; goto done; } ret = omap3isp_pipeline_pm_use(&video->video.entity, 1); if (ret < 0) { omap3isp_put(video->isp); goto done; } omap3isp_video_queue_init(&handle->queue, video->type, &isp_video_queue_ops, video->isp->dev, sizeof(struct isp_buffer)); memset(&handle->format, 0, sizeof(handle->format)); handle->format.type = video->type; handle->timeperframe.denominator = 1; handle->video = video; file->private_data = &handle->vfh; done: if (ret < 0) { v4l2_fh_del(&handle->vfh); kfree(handle); } return ret; } static int isp_video_release(struct file *file) { struct isp_video *video = video_drvdata(file); struct v4l2_fh *vfh = file->private_data; struct isp_video_fh *handle = to_isp_video_fh(vfh); /* Disable streaming and free the buffers queue resources. */ isp_video_streamoff(file, vfh, video->type); mutex_lock(&handle->queue.lock); omap3isp_video_queue_cleanup(&handle->queue); mutex_unlock(&handle->queue.lock); omap3isp_pipeline_pm_use(&video->video.entity, 0); /* Release the file handle. */ v4l2_fh_del(vfh); kfree(handle); file->private_data = NULL; omap3isp_put(video->isp); return 0; } static unsigned int isp_video_poll(struct file *file, poll_table *wait) { struct isp_video_fh *vfh = to_isp_video_fh(file->private_data); struct isp_video_queue *queue = &vfh->queue; return omap3isp_video_queue_poll(queue, file, wait); } static int isp_video_mmap(struct file *file, struct vm_area_struct *vma) { struct isp_video_fh *vfh = to_isp_video_fh(file->private_data); return omap3isp_video_queue_mmap(&vfh->queue, vma); } static struct v4l2_file_operations isp_video_fops = { .owner = THIS_MODULE, .unlocked_ioctl = video_ioctl2, .open = isp_video_open, .release = isp_video_release, .poll = isp_video_poll, .mmap = isp_video_mmap, }; /* ----------------------------------------------------------------------------- * ISP video core */ static const struct isp_video_operations isp_video_dummy_ops = { }; int omap3isp_video_init(struct isp_video *video, const char *name) { const char *direction; int ret; switch (video->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: direction = "output"; video->pad.flags = MEDIA_PAD_FL_SINK; break; case V4L2_BUF_TYPE_VIDEO_OUTPUT: direction = "input"; video->pad.flags = MEDIA_PAD_FL_SOURCE; break; default: return -EINVAL; } ret = media_entity_init(&video->video.entity, 1, &video->pad, 0); if (ret < 0) return ret; mutex_init(&video->mutex); atomic_set(&video->active, 0); spin_lock_init(&video->pipe.lock); mutex_init(&video->stream_lock); /* Initialize the video device. */ if (video->ops == NULL) video->ops = &isp_video_dummy_ops; video->video.fops = &isp_video_fops; snprintf(video->video.name, sizeof(video->video.name), "OMAP3 ISP %s %s", name, direction); video->video.vfl_type = VFL_TYPE_GRABBER; video->video.release = video_device_release_empty; video->video.ioctl_ops = &isp_video_ioctl_ops; video->pipe.stream_state = ISP_PIPELINE_STREAM_STOPPED; video_set_drvdata(&video->video, video); return 0; } int omap3isp_video_register(struct isp_video *video, struct v4l2_device *vdev) { int ret; video->video.v4l2_dev = vdev; ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1); if (ret < 0) printk(KERN_ERR "%s: could not register video device (%d)\n", __func__, ret); return ret; } void omap3isp_video_unregister(struct isp_video *video) { if (video_is_registered(&video->video)) { media_entity_cleanup(&video->video.entity); video_unregister_device(&video->video); } }
gpl-2.0
pitah81/android_kernel_elephone_p8000
drivers/net/ieee802154/fakelb.c
2788
6911
/* * Loopback IEEE 802.15.4 interface * * Copyright 2007-2012 Siemens AG * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * Written by: * Sergey Lapin <slapin@ossfans.org> * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> * Alexander Smirnov <alex.bluesman.smirnov@gmail.com> */ #include <linux/module.h> #include <linux/timer.h> #include <linux/platform_device.h> #include <linux/netdevice.h> #include <linux/spinlock.h> #include <net/mac802154.h> #include <net/wpan-phy.h> static int numlbs = 1; struct fakelb_dev_priv { struct ieee802154_dev *dev; struct list_head list; struct fakelb_priv *fake; spinlock_t lock; bool working; }; struct fakelb_priv { struct list_head list; rwlock_t lock; }; static int fakelb_hw_ed(struct ieee802154_dev *dev, u8 *level) { might_sleep(); BUG_ON(!level); *level = 0xbe; return 0; } static int fakelb_hw_channel(struct ieee802154_dev *dev, int page, int channel) { pr_debug("set channel to %d\n", channel); might_sleep(); dev->phy->current_page = page; dev->phy->current_channel = channel; return 0; } static void fakelb_hw_deliver(struct fakelb_dev_priv *priv, struct sk_buff *skb) { struct sk_buff *newskb; spin_lock(&priv->lock); if (priv->working) { newskb = pskb_copy(skb, GFP_ATOMIC); ieee802154_rx_irqsafe(priv->dev, newskb, 0xcc); } spin_unlock(&priv->lock); } static int fakelb_hw_xmit(struct ieee802154_dev *dev, struct sk_buff *skb) { struct fakelb_dev_priv *priv = dev->priv; struct fakelb_priv *fake = priv->fake; might_sleep(); read_lock_bh(&fake->lock); if (priv->list.next == priv->list.prev) { /* we are the only one device */ fakelb_hw_deliver(priv, skb); } else { struct fakelb_dev_priv *dp; list_for_each_entry(dp, &priv->fake->list, list) { if (dp != priv && (dp->dev->phy->current_channel == priv->dev->phy->current_channel)) fakelb_hw_deliver(dp, skb); } } read_unlock_bh(&fake->lock); return 0; } static int fakelb_hw_start(struct ieee802154_dev *dev) { struct fakelb_dev_priv *priv = dev->priv; int ret = 0; spin_lock(&priv->lock); if (priv->working) ret = -EBUSY; else priv->working = 1; spin_unlock(&priv->lock); return ret; } static void fakelb_hw_stop(struct ieee802154_dev *dev) { struct fakelb_dev_priv *priv = dev->priv; spin_lock(&priv->lock); priv->working = 0; spin_unlock(&priv->lock); } static struct ieee802154_ops fakelb_ops = { .owner = THIS_MODULE, .xmit = fakelb_hw_xmit, .ed = fakelb_hw_ed, .set_channel = fakelb_hw_channel, .start = fakelb_hw_start, .stop = fakelb_hw_stop, }; /* Number of dummy devices to be set up by this module. */ module_param(numlbs, int, 0); MODULE_PARM_DESC(numlbs, " number of pseudo devices"); static int fakelb_add_one(struct device *dev, struct fakelb_priv *fake) { struct fakelb_dev_priv *priv; int err; struct ieee802154_dev *ieee; ieee = ieee802154_alloc_device(sizeof(*priv), &fakelb_ops); if (!ieee) return -ENOMEM; priv = ieee->priv; priv->dev = ieee; /* 868 MHz BPSK 802.15.4-2003 */ ieee->phy->channels_supported[0] |= 1; /* 915 MHz BPSK 802.15.4-2003 */ ieee->phy->channels_supported[0] |= 0x7fe; /* 2.4 GHz O-QPSK 802.15.4-2003 */ ieee->phy->channels_supported[0] |= 0x7FFF800; /* 868 MHz ASK 802.15.4-2006 */ ieee->phy->channels_supported[1] |= 1; /* 915 MHz ASK 802.15.4-2006 */ ieee->phy->channels_supported[1] |= 0x7fe; /* 868 MHz O-QPSK 802.15.4-2006 */ ieee->phy->channels_supported[2] |= 1; /* 915 MHz O-QPSK 802.15.4-2006 */ ieee->phy->channels_supported[2] |= 0x7fe; /* 2.4 GHz CSS 802.15.4a-2007 */ ieee->phy->channels_supported[3] |= 0x3fff; /* UWB Sub-gigahertz 802.15.4a-2007 */ ieee->phy->channels_supported[4] |= 1; /* UWB Low band 802.15.4a-2007 */ ieee->phy->channels_supported[4] |= 0x1e; /* UWB High band 802.15.4a-2007 */ ieee->phy->channels_supported[4] |= 0xffe0; /* 750 MHz O-QPSK 802.15.4c-2009 */ ieee->phy->channels_supported[5] |= 0xf; /* 750 MHz MPSK 802.15.4c-2009 */ ieee->phy->channels_supported[5] |= 0xf0; /* 950 MHz BPSK 802.15.4d-2009 */ ieee->phy->channels_supported[6] |= 0x3ff; /* 950 MHz GFSK 802.15.4d-2009 */ ieee->phy->channels_supported[6] |= 0x3ffc00; INIT_LIST_HEAD(&priv->list); priv->fake = fake; spin_lock_init(&priv->lock); ieee->parent = dev; err = ieee802154_register_device(ieee); if (err) goto err_reg; write_lock_bh(&fake->lock); list_add_tail(&priv->list, &fake->list); write_unlock_bh(&fake->lock); return 0; err_reg: ieee802154_free_device(priv->dev); return err; } static void fakelb_del(struct fakelb_dev_priv *priv) { write_lock_bh(&priv->fake->lock); list_del(&priv->list); write_unlock_bh(&priv->fake->lock); ieee802154_unregister_device(priv->dev); ieee802154_free_device(priv->dev); } static int fakelb_probe(struct platform_device *pdev) { struct fakelb_priv *priv; struct fakelb_dev_priv *dp; int err = -ENOMEM; int i; priv = kzalloc(sizeof(struct fakelb_priv), GFP_KERNEL); if (!priv) goto err_alloc; INIT_LIST_HEAD(&priv->list); rwlock_init(&priv->lock); for (i = 0; i < numlbs; i++) { err = fakelb_add_one(&pdev->dev, priv); if (err < 0) goto err_slave; } platform_set_drvdata(pdev, priv); dev_info(&pdev->dev, "added ieee802154 hardware\n"); return 0; err_slave: list_for_each_entry(dp, &priv->list, list) fakelb_del(dp); kfree(priv); err_alloc: return err; } static int fakelb_remove(struct platform_device *pdev) { struct fakelb_priv *priv = platform_get_drvdata(pdev); struct fakelb_dev_priv *dp, *temp; list_for_each_entry_safe(dp, temp, &priv->list, list) fakelb_del(dp); kfree(priv); return 0; } static struct platform_device *ieee802154fake_dev; static struct platform_driver ieee802154fake_driver = { .probe = fakelb_probe, .remove = fakelb_remove, .driver = { .name = "ieee802154fakelb", .owner = THIS_MODULE, }, }; static __init int fakelb_init_module(void) { ieee802154fake_dev = platform_device_register_simple( "ieee802154fakelb", -1, NULL, 0); return platform_driver_register(&ieee802154fake_driver); } static __exit void fake_remove_module(void) { platform_driver_unregister(&ieee802154fake_driver); platform_device_unregister(ieee802154fake_dev); } module_init(fakelb_init_module); module_exit(fake_remove_module); MODULE_LICENSE("GPL");
gpl-2.0
unless/-Wind_iproj_JB_kernel_temp
arch/arm/mach-omap2/vc.c
4836
10399
/* * OMAP Voltage Controller (VC) interface * * Copyright (C) 2011 Texas Instruments, Inc. * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/bug.h> #include <plat/cpu.h> #include "voltage.h" #include "vc.h" #include "prm-regbits-34xx.h" #include "prm-regbits-44xx.h" #include "prm44xx.h" /** * struct omap_vc_channel_cfg - describe the cfg_channel bitfield * @sa: bit for slave address * @rav: bit for voltage configuration register * @rac: bit for command configuration register * @racen: enable bit for RAC * @cmd: bit for command value set selection * * Channel configuration bits, common for OMAP3+ * OMAP3 register: PRM_VC_CH_CONF * OMAP4 register: PRM_VC_CFG_CHANNEL * OMAP5 register: PRM_VC_SMPS_<voltdm>_CONFIG */ struct omap_vc_channel_cfg { u8 sa; u8 rav; u8 rac; u8 racen; u8 cmd; }; static struct omap_vc_channel_cfg vc_default_channel_cfg = { .sa = BIT(0), .rav = BIT(1), .rac = BIT(2), .racen = BIT(3), .cmd = BIT(4), }; /* * On OMAP3+, all VC channels have the above default bitfield * configuration, except the OMAP4 MPU channel. This appears * to be a freak accident as every other VC channel has the * default configuration, thus creating a mutant channel config. */ static struct omap_vc_channel_cfg vc_mutant_channel_cfg = { .sa = BIT(0), .rav = BIT(2), .rac = BIT(3), .racen = BIT(4), .cmd = BIT(1), }; static struct omap_vc_channel_cfg *vc_cfg_bits; #define CFG_CHANNEL_MASK 0x1f /** * omap_vc_config_channel - configure VC channel to PMIC mappings * @voltdm: pointer to voltagdomain defining the desired VC channel * * Configures the VC channel to PMIC mappings for the following * PMIC settings * - i2c slave address (SA) * - voltage configuration address (RAV) * - command configuration address (RAC) and enable bit (RACEN) * - command values for ON, ONLP, RET and OFF (CMD) * * This function currently only allows flexible configuration of the * non-default channel. Starting with OMAP4, there are more than 2 * channels, with one defined as the default (on OMAP4, it's MPU.) * Only the non-default channel can be configured. */ static int omap_vc_config_channel(struct voltagedomain *voltdm) { struct omap_vc_channel *vc = voltdm->vc; /* * For default channel, the only configurable bit is RACEN. * All others must stay at zero (see function comment above.) */ if (vc->flags & OMAP_VC_CHANNEL_DEFAULT) vc->cfg_channel &= vc_cfg_bits->racen; voltdm->rmw(CFG_CHANNEL_MASK << vc->cfg_channel_sa_shift, vc->cfg_channel << vc->cfg_channel_sa_shift, vc->cfg_channel_reg); return 0; } /* Voltage scale and accessory APIs */ int omap_vc_pre_scale(struct voltagedomain *voltdm, unsigned long target_volt, u8 *target_vsel, u8 *current_vsel) { struct omap_vc_channel *vc = voltdm->vc; u32 vc_cmdval; /* Check if sufficient pmic info is available for this vdd */ if (!voltdm->pmic) { pr_err("%s: Insufficient pmic info to scale the vdd_%s\n", __func__, voltdm->name); return -EINVAL; } if (!voltdm->pmic->uv_to_vsel) { pr_err("%s: PMIC function to convert voltage in uV to" "vsel not registered. Hence unable to scale voltage" "for vdd_%s\n", __func__, voltdm->name); return -ENODATA; } if (!voltdm->read || !voltdm->write) { pr_err("%s: No read/write API for accessing vdd_%s regs\n", __func__, voltdm->name); return -EINVAL; } *target_vsel = voltdm->pmic->uv_to_vsel(target_volt); *current_vsel = voltdm->pmic->uv_to_vsel(voltdm->nominal_volt); /* Setting the ON voltage to the new target voltage */ vc_cmdval = voltdm->read(vc->cmdval_reg); vc_cmdval &= ~vc->common->cmd_on_mask; vc_cmdval |= (*target_vsel << vc->common->cmd_on_shift); voltdm->write(vc_cmdval, vc->cmdval_reg); omap_vp_update_errorgain(voltdm, target_volt); return 0; } void omap_vc_post_scale(struct voltagedomain *voltdm, unsigned long target_volt, u8 target_vsel, u8 current_vsel) { u32 smps_steps = 0, smps_delay = 0; smps_steps = abs(target_vsel - current_vsel); /* SMPS slew rate / step size. 2us added as buffer. */ smps_delay = ((smps_steps * voltdm->pmic->step_size) / voltdm->pmic->slew_rate) + 2; udelay(smps_delay); } /* vc_bypass_scale - VC bypass method of voltage scaling */ int omap_vc_bypass_scale(struct voltagedomain *voltdm, unsigned long target_volt) { struct omap_vc_channel *vc = voltdm->vc; u32 loop_cnt = 0, retries_cnt = 0; u32 vc_valid, vc_bypass_val_reg, vc_bypass_value; u8 target_vsel, current_vsel; int ret; ret = omap_vc_pre_scale(voltdm, target_volt, &target_vsel, &current_vsel); if (ret) return ret; vc_valid = vc->common->valid; vc_bypass_val_reg = vc->common->bypass_val_reg; vc_bypass_value = (target_vsel << vc->common->data_shift) | (vc->volt_reg_addr << vc->common->regaddr_shift) | (vc->i2c_slave_addr << vc->common->slaveaddr_shift); voltdm->write(vc_bypass_value, vc_bypass_val_reg); voltdm->write(vc_bypass_value | vc_valid, vc_bypass_val_reg); vc_bypass_value = voltdm->read(vc_bypass_val_reg); /* * Loop till the bypass command is acknowledged from the SMPS. * NOTE: This is legacy code. The loop count and retry count needs * to be revisited. */ while (!(vc_bypass_value & vc_valid)) { loop_cnt++; if (retries_cnt > 10) { pr_warning("%s: Retry count exceeded\n", __func__); return -ETIMEDOUT; } if (loop_cnt > 50) { retries_cnt++; loop_cnt = 0; udelay(10); } vc_bypass_value = voltdm->read(vc_bypass_val_reg); } omap_vc_post_scale(voltdm, target_volt, target_vsel, current_vsel); return 0; } static void __init omap3_vfsm_init(struct voltagedomain *voltdm) { /* * Voltage Manager FSM parameters init * XXX This data should be passed in from the board file */ voltdm->write(OMAP3_CLKSETUP, OMAP3_PRM_CLKSETUP_OFFSET); voltdm->write(OMAP3_VOLTOFFSET, OMAP3_PRM_VOLTOFFSET_OFFSET); voltdm->write(OMAP3_VOLTSETUP2, OMAP3_PRM_VOLTSETUP2_OFFSET); } static void __init omap3_vc_init_channel(struct voltagedomain *voltdm) { static bool is_initialized; if (is_initialized) return; omap3_vfsm_init(voltdm); is_initialized = true; } /* OMAP4 specific voltage init functions */ static void __init omap4_vc_init_channel(struct voltagedomain *voltdm) { static bool is_initialized; u32 vc_val; if (is_initialized) return; /* XXX These are magic numbers and do not belong! */ vc_val = (0x60 << OMAP4430_SCLL_SHIFT | 0x26 << OMAP4430_SCLH_SHIFT); voltdm->write(vc_val, OMAP4_PRM_VC_CFG_I2C_CLK_OFFSET); is_initialized = true; } /** * omap_vc_i2c_init - initialize I2C interface to PMIC * @voltdm: voltage domain containing VC data * * Use PMIC supplied settings for I2C high-speed mode and * master code (if set) and program the VC I2C configuration * register. * * The VC I2C configuration is common to all VC channels, * so this function only configures I2C for the first VC * channel registers. All other VC channels will use the * same configuration. */ static void __init omap_vc_i2c_init(struct voltagedomain *voltdm) { struct omap_vc_channel *vc = voltdm->vc; static bool initialized; static bool i2c_high_speed; u8 mcode; if (initialized) { if (voltdm->pmic->i2c_high_speed != i2c_high_speed) pr_warn("%s: I2C config for vdd_%s does not match other channels (%u).", __func__, voltdm->name, i2c_high_speed); return; } i2c_high_speed = voltdm->pmic->i2c_high_speed; if (i2c_high_speed) voltdm->rmw(vc->common->i2c_cfg_hsen_mask, vc->common->i2c_cfg_hsen_mask, vc->common->i2c_cfg_reg); mcode = voltdm->pmic->i2c_mcode; if (mcode) voltdm->rmw(vc->common->i2c_mcode_mask, mcode << __ffs(vc->common->i2c_mcode_mask), vc->common->i2c_cfg_reg); initialized = true; } void __init omap_vc_init_channel(struct voltagedomain *voltdm) { struct omap_vc_channel *vc = voltdm->vc; u8 on_vsel, onlp_vsel, ret_vsel, off_vsel; u32 val; if (!voltdm->pmic || !voltdm->pmic->uv_to_vsel) { pr_err("%s: No PMIC info for vdd_%s\n", __func__, voltdm->name); return; } if (!voltdm->read || !voltdm->write) { pr_err("%s: No read/write API for accessing vdd_%s regs\n", __func__, voltdm->name); return; } vc->cfg_channel = 0; if (vc->flags & OMAP_VC_CHANNEL_CFG_MUTANT) vc_cfg_bits = &vc_mutant_channel_cfg; else vc_cfg_bits = &vc_default_channel_cfg; /* get PMIC/board specific settings */ vc->i2c_slave_addr = voltdm->pmic->i2c_slave_addr; vc->volt_reg_addr = voltdm->pmic->volt_reg_addr; vc->cmd_reg_addr = voltdm->pmic->cmd_reg_addr; vc->setup_time = voltdm->pmic->volt_setup_time; /* Configure the i2c slave address for this VC */ voltdm->rmw(vc->smps_sa_mask, vc->i2c_slave_addr << __ffs(vc->smps_sa_mask), vc->smps_sa_reg); vc->cfg_channel |= vc_cfg_bits->sa; /* * Configure the PMIC register addresses. */ voltdm->rmw(vc->smps_volra_mask, vc->volt_reg_addr << __ffs(vc->smps_volra_mask), vc->smps_volra_reg); vc->cfg_channel |= vc_cfg_bits->rav; if (vc->cmd_reg_addr) { voltdm->rmw(vc->smps_cmdra_mask, vc->cmd_reg_addr << __ffs(vc->smps_cmdra_mask), vc->smps_cmdra_reg); vc->cfg_channel |= vc_cfg_bits->rac | vc_cfg_bits->racen; } /* Set up the on, inactive, retention and off voltage */ on_vsel = voltdm->pmic->uv_to_vsel(voltdm->pmic->on_volt); onlp_vsel = voltdm->pmic->uv_to_vsel(voltdm->pmic->onlp_volt); ret_vsel = voltdm->pmic->uv_to_vsel(voltdm->pmic->ret_volt); off_vsel = voltdm->pmic->uv_to_vsel(voltdm->pmic->off_volt); val = ((on_vsel << vc->common->cmd_on_shift) | (onlp_vsel << vc->common->cmd_onlp_shift) | (ret_vsel << vc->common->cmd_ret_shift) | (off_vsel << vc->common->cmd_off_shift)); voltdm->write(val, vc->cmdval_reg); vc->cfg_channel |= vc_cfg_bits->cmd; /* Channel configuration */ omap_vc_config_channel(voltdm); /* Configure the setup times */ voltdm->rmw(voltdm->vfsm->voltsetup_mask, vc->setup_time << __ffs(voltdm->vfsm->voltsetup_mask), voltdm->vfsm->voltsetup_reg); omap_vc_i2c_init(voltdm); if (cpu_is_omap34xx()) omap3_vc_init_channel(voltdm); else if (cpu_is_omap44xx()) omap4_vc_init_channel(voltdm); }
gpl-2.0
TEAM-RAZOR-DEVICES/kernel_lge_mako
net/irda/timer.c
5092
6440
/********************************************************************* * * Filename: timer.c * Version: * Description: * Status: Experimental. * Author: Dag Brattli <dagb@cs.uit.no> * Created at: Sat Aug 16 00:59:29 1997 * Modified at: Wed Dec 8 12:50:34 1999 * Modified by: Dag Brattli <dagb@cs.uit.no> * * Copyright (c) 1997, 1999 Dag Brattli <dagb@cs.uit.no>, * All Rights Reserved. * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * ********************************************************************/ #include <linux/delay.h> #include <net/irda/timer.h> #include <net/irda/irda.h> #include <net/irda/irda_device.h> #include <net/irda/irlap.h> #include <net/irda/irlmp.h> extern int sysctl_slot_timeout; static void irlap_slot_timer_expired(void* data); static void irlap_query_timer_expired(void* data); static void irlap_final_timer_expired(void* data); static void irlap_wd_timer_expired(void* data); static void irlap_backoff_timer_expired(void* data); static void irlap_media_busy_expired(void* data); void irlap_start_slot_timer(struct irlap_cb *self, int timeout) { irda_start_timer(&self->slot_timer, timeout, (void *) self, irlap_slot_timer_expired); } void irlap_start_query_timer(struct irlap_cb *self, int S, int s) { int timeout; /* Calculate when the peer discovery should end. Normally, we * get the end-of-discovery frame, so this is just in case * we miss it. * Basically, we multiply the number of remaining slots by our * slot time, plus add some extra time to properly receive the last * discovery packet (which is longer due to extra discovery info), * to avoid messing with for incomming connections requests and * to accommodate devices that perform discovery slower than us. * Jean II */ timeout = ((sysctl_slot_timeout * HZ / 1000) * (S - s) + XIDEXTRA_TIMEOUT + SMALLBUSY_TIMEOUT); /* Set or re-set the timer. We reset the timer for each received * discovery query, which allow us to automatically adjust to * the speed of the peer discovery (faster or slower). Jean II */ irda_start_timer( &self->query_timer, timeout, (void *) self, irlap_query_timer_expired); } void irlap_start_final_timer(struct irlap_cb *self, int timeout) { irda_start_timer(&self->final_timer, timeout, (void *) self, irlap_final_timer_expired); } void irlap_start_wd_timer(struct irlap_cb *self, int timeout) { irda_start_timer(&self->wd_timer, timeout, (void *) self, irlap_wd_timer_expired); } void irlap_start_backoff_timer(struct irlap_cb *self, int timeout) { irda_start_timer(&self->backoff_timer, timeout, (void *) self, irlap_backoff_timer_expired); } void irlap_start_mbusy_timer(struct irlap_cb *self, int timeout) { irda_start_timer(&self->media_busy_timer, timeout, (void *) self, irlap_media_busy_expired); } void irlap_stop_mbusy_timer(struct irlap_cb *self) { /* If timer is activated, kill it! */ del_timer(&self->media_busy_timer); /* If we are in NDM, there is a bunch of events in LAP that * that be pending due to the media_busy condition, such as * CONNECT_REQUEST and SEND_UI_FRAME. If we don't generate * an event, they will wait forever... * Jean II */ if (self->state == LAP_NDM) irlap_do_event(self, MEDIA_BUSY_TIMER_EXPIRED, NULL, NULL); } void irlmp_start_watchdog_timer(struct lsap_cb *self, int timeout) { irda_start_timer(&self->watchdog_timer, timeout, (void *) self, irlmp_watchdog_timer_expired); } void irlmp_start_discovery_timer(struct irlmp_cb *self, int timeout) { irda_start_timer(&self->discovery_timer, timeout, (void *) self, irlmp_discovery_timer_expired); } void irlmp_start_idle_timer(struct lap_cb *self, int timeout) { irda_start_timer(&self->idle_timer, timeout, (void *) self, irlmp_idle_timer_expired); } void irlmp_stop_idle_timer(struct lap_cb *self) { /* If timer is activated, kill it! */ del_timer(&self->idle_timer); } /* * Function irlap_slot_timer_expired (data) * * IrLAP slot timer has expired * */ static void irlap_slot_timer_expired(void *data) { struct irlap_cb *self = (struct irlap_cb *) data; IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); irlap_do_event(self, SLOT_TIMER_EXPIRED, NULL, NULL); } /* * Function irlap_query_timer_expired (data) * * IrLAP query timer has expired * */ static void irlap_query_timer_expired(void *data) { struct irlap_cb *self = (struct irlap_cb *) data; IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); irlap_do_event(self, QUERY_TIMER_EXPIRED, NULL, NULL); } /* * Function irda_final_timer_expired (data) * * * */ static void irlap_final_timer_expired(void *data) { struct irlap_cb *self = (struct irlap_cb *) data; IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); irlap_do_event(self, FINAL_TIMER_EXPIRED, NULL, NULL); } /* * Function irda_wd_timer_expired (data) * * * */ static void irlap_wd_timer_expired(void *data) { struct irlap_cb *self = (struct irlap_cb *) data; IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); irlap_do_event(self, WD_TIMER_EXPIRED, NULL, NULL); } /* * Function irda_backoff_timer_expired (data) * * * */ static void irlap_backoff_timer_expired(void *data) { struct irlap_cb *self = (struct irlap_cb *) data; IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); irlap_do_event(self, BACKOFF_TIMER_EXPIRED, NULL, NULL); } /* * Function irtty_media_busy_expired (data) * * */ static void irlap_media_busy_expired(void *data) { struct irlap_cb *self = (struct irlap_cb *) data; IRDA_ASSERT(self != NULL, return;); irda_device_set_media_busy(self->netdev, FALSE); /* Note : the LAP event will be send in irlap_stop_mbusy_timer(), * to catch other cases where the flag is cleared (for example * after a discovery) - Jean II */ }
gpl-2.0
AOKPSaber/kernel_samsung_p4
arch/sparc/mm/tlb.c
5604
1956
/* arch/sparc64/mm/tlb.c * * Copyright (C) 2004 David S. Miller <davem@redhat.com> */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/percpu.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/preempt.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> #include <asm/mmu_context.h> #include <asm/tlb.h> /* Heavily inspired by the ppc64 code. */ static DEFINE_PER_CPU(struct tlb_batch, tlb_batch); void flush_tlb_pending(void) { struct tlb_batch *tb = &get_cpu_var(tlb_batch); if (tb->tlb_nr) { flush_tsb_user(tb); if (CTX_VALID(tb->mm->context)) { #ifdef CONFIG_SMP smp_flush_tlb_pending(tb->mm, tb->tlb_nr, &tb->vaddrs[0]); #else __flush_tlb_pending(CTX_HWBITS(tb->mm->context), tb->tlb_nr, &tb->vaddrs[0]); #endif } tb->tlb_nr = 0; } put_cpu_var(tlb_batch); } void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig, int fullmm) { struct tlb_batch *tb = &get_cpu_var(tlb_batch); unsigned long nr; vaddr &= PAGE_MASK; if (pte_exec(orig)) vaddr |= 0x1UL; if (tlb_type != hypervisor && pte_dirty(orig)) { unsigned long paddr, pfn = pte_pfn(orig); struct address_space *mapping; struct page *page; if (!pfn_valid(pfn)) goto no_cache_flush; page = pfn_to_page(pfn); if (PageReserved(page)) goto no_cache_flush; /* A real file page? */ mapping = page_mapping(page); if (!mapping) goto no_cache_flush; paddr = (unsigned long) page_address(page); if ((paddr ^ vaddr) & (1 << 13)) flush_dcache_page_all(mm, page); } no_cache_flush: if (fullmm) { put_cpu_var(tlb_batch); return; } nr = tb->tlb_nr; if (unlikely(nr != 0 && mm != tb->mm)) { flush_tlb_pending(); nr = 0; } if (nr == 0) tb->mm = mm; tb->vaddrs[nr] = vaddr; tb->tlb_nr = ++nr; if (nr >= TLB_BATCH_NR) flush_tlb_pending(); put_cpu_var(tlb_batch); }
gpl-2.0
Ateeq72/android_kernel_samsung_n1
net/netfilter/nf_conntrack_sip.c
5604
45115
/* SIP extension for IP connection tracking. * * (C) 2005 by Christian Hentschel <chentschel@arnet.com.ar> * based on RR's ip_conntrack_ftp.c and other modules. * (C) 2007 United Security Providers * (C) 2007, 2008 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/ctype.h> #include <linux/skbuff.h> #include <linux/inet.h> #include <linux/in.h> #include <linux/udp.h> #include <linux/tcp.h> #include <linux/netfilter.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_core.h> #include <net/netfilter/nf_conntrack_expect.h> #include <net/netfilter/nf_conntrack_helper.h> #include <net/netfilter/nf_conntrack_zones.h> #include <linux/netfilter/nf_conntrack_sip.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Christian Hentschel <chentschel@arnet.com.ar>"); MODULE_DESCRIPTION("SIP connection tracking helper"); MODULE_ALIAS("ip_conntrack_sip"); MODULE_ALIAS_NFCT_HELPER("sip"); #define MAX_PORTS 8 static unsigned short ports[MAX_PORTS]; static unsigned int ports_c; module_param_array(ports, ushort, &ports_c, 0400); MODULE_PARM_DESC(ports, "port numbers of SIP servers"); static unsigned int sip_timeout __read_mostly = SIP_TIMEOUT; module_param(sip_timeout, uint, 0600); MODULE_PARM_DESC(sip_timeout, "timeout for the master SIP session"); static int sip_direct_signalling __read_mostly = 1; module_param(sip_direct_signalling, int, 0600); MODULE_PARM_DESC(sip_direct_signalling, "expect incoming calls from registrar " "only (default 1)"); static int sip_direct_media __read_mostly = 1; module_param(sip_direct_media, int, 0600); MODULE_PARM_DESC(sip_direct_media, "Expect Media streams between signalling " "endpoints only (default 1)"); unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen) __read_mostly; EXPORT_SYMBOL_GPL(nf_nat_sip_hook); void (*nf_nat_sip_seq_adjust_hook)(struct sk_buff *skb, s16 off) __read_mostly; EXPORT_SYMBOL_GPL(nf_nat_sip_seq_adjust_hook); unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen, struct nf_conntrack_expect *exp, unsigned int matchoff, unsigned int matchlen) __read_mostly; EXPORT_SYMBOL_GPL(nf_nat_sip_expect_hook); unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen, unsigned int sdpoff, enum sdp_header_types type, enum sdp_header_types term, const union nf_inet_addr *addr) __read_mostly; EXPORT_SYMBOL_GPL(nf_nat_sdp_addr_hook); unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen, unsigned int matchoff, unsigned int matchlen, u_int16_t port) __read_mostly; EXPORT_SYMBOL_GPL(nf_nat_sdp_port_hook); unsigned int (*nf_nat_sdp_session_hook)(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen, unsigned int sdpoff, const union nf_inet_addr *addr) __read_mostly; EXPORT_SYMBOL_GPL(nf_nat_sdp_session_hook); unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen, struct nf_conntrack_expect *rtp_exp, struct nf_conntrack_expect *rtcp_exp, unsigned int mediaoff, unsigned int medialen, union nf_inet_addr *rtp_addr) __read_mostly; EXPORT_SYMBOL_GPL(nf_nat_sdp_media_hook); static int string_len(const struct nf_conn *ct, const char *dptr, const char *limit, int *shift) { int len = 0; while (dptr < limit && isalpha(*dptr)) { dptr++; len++; } return len; } static int digits_len(const struct nf_conn *ct, const char *dptr, const char *limit, int *shift) { int len = 0; while (dptr < limit && isdigit(*dptr)) { dptr++; len++; } return len; } static int iswordc(const char c) { if (isalnum(c) || c == '!' || c == '"' || c == '%' || (c >= '(' && c <= '/') || c == ':' || c == '<' || c == '>' || c == '?' || (c >= '[' && c <= ']') || c == '_' || c == '`' || c == '{' || c == '}' || c == '~') return 1; return 0; } static int word_len(const char *dptr, const char *limit) { int len = 0; while (dptr < limit && iswordc(*dptr)) { dptr++; len++; } return len; } static int callid_len(const struct nf_conn *ct, const char *dptr, const char *limit, int *shift) { int len, domain_len; len = word_len(dptr, limit); dptr += len; if (!len || dptr == limit || *dptr != '@') return len; dptr++; len++; domain_len = word_len(dptr, limit); if (!domain_len) return 0; return len + domain_len; } /* get media type + port length */ static int media_len(const struct nf_conn *ct, const char *dptr, const char *limit, int *shift) { int len = string_len(ct, dptr, limit, shift); dptr += len; if (dptr >= limit || *dptr != ' ') return 0; len++; dptr++; return len + digits_len(ct, dptr, limit, shift); } static int parse_addr(const struct nf_conn *ct, const char *cp, const char **endp, union nf_inet_addr *addr, const char *limit) { const char *end; int ret = 0; if (!ct) return 0; memset(addr, 0, sizeof(*addr)); switch (nf_ct_l3num(ct)) { case AF_INET: ret = in4_pton(cp, limit - cp, (u8 *)&addr->ip, -1, &end); break; case AF_INET6: ret = in6_pton(cp, limit - cp, (u8 *)&addr->ip6, -1, &end); break; default: BUG(); } if (ret == 0 || end == cp) return 0; if (endp) *endp = end; return 1; } /* skip ip address. returns its length. */ static int epaddr_len(const struct nf_conn *ct, const char *dptr, const char *limit, int *shift) { union nf_inet_addr addr; const char *aux = dptr; if (!parse_addr(ct, dptr, &dptr, &addr, limit)) { pr_debug("ip: %s parse failed.!\n", dptr); return 0; } /* Port number */ if (*dptr == ':') { dptr++; dptr += digits_len(ct, dptr, limit, shift); } return dptr - aux; } /* get address length, skiping user info. */ static int skp_epaddr_len(const struct nf_conn *ct, const char *dptr, const char *limit, int *shift) { const char *start = dptr; int s = *shift; /* Search for @, but stop at the end of the line. * We are inside a sip: URI, so we don't need to worry about * continuation lines. */ while (dptr < limit && *dptr != '@' && *dptr != '\r' && *dptr != '\n') { (*shift)++; dptr++; } if (dptr < limit && *dptr == '@') { dptr++; (*shift)++; } else { dptr = start; *shift = s; } return epaddr_len(ct, dptr, limit, shift); } /* Parse a SIP request line of the form: * * Request-Line = Method SP Request-URI SP SIP-Version CRLF * * and return the offset and length of the address contained in the Request-URI. */ int ct_sip_parse_request(const struct nf_conn *ct, const char *dptr, unsigned int datalen, unsigned int *matchoff, unsigned int *matchlen, union nf_inet_addr *addr, __be16 *port) { const char *start = dptr, *limit = dptr + datalen, *end; unsigned int mlen; unsigned int p; int shift = 0; /* Skip method and following whitespace */ mlen = string_len(ct, dptr, limit, NULL); if (!mlen) return 0; dptr += mlen; if (++dptr >= limit) return 0; /* Find SIP URI */ for (; dptr < limit - strlen("sip:"); dptr++) { if (*dptr == '\r' || *dptr == '\n') return -1; if (strnicmp(dptr, "sip:", strlen("sip:")) == 0) { dptr += strlen("sip:"); break; } } if (!skp_epaddr_len(ct, dptr, limit, &shift)) return 0; dptr += shift; if (!parse_addr(ct, dptr, &end, addr, limit)) return -1; if (end < limit && *end == ':') { end++; p = simple_strtoul(end, (char **)&end, 10); if (p < 1024 || p > 65535) return -1; *port = htons(p); } else *port = htons(SIP_PORT); if (end == dptr) return 0; *matchoff = dptr - start; *matchlen = end - dptr; return 1; } EXPORT_SYMBOL_GPL(ct_sip_parse_request); /* SIP header parsing: SIP headers are located at the beginning of a line, but * may span several lines, in which case the continuation lines begin with a * whitespace character. RFC 2543 allows lines to be terminated with CR, LF or * CRLF, RFC 3261 allows only CRLF, we support both. * * Headers are followed by (optionally) whitespace, a colon, again (optionally) * whitespace and the values. Whitespace in this context means any amount of * tabs, spaces and continuation lines, which are treated as a single whitespace * character. * * Some headers may appear multiple times. A comma separated list of values is * equivalent to multiple headers. */ static const struct sip_header ct_sip_hdrs[] = { [SIP_HDR_CSEQ] = SIP_HDR("CSeq", NULL, NULL, digits_len), [SIP_HDR_FROM] = SIP_HDR("From", "f", "sip:", skp_epaddr_len), [SIP_HDR_TO] = SIP_HDR("To", "t", "sip:", skp_epaddr_len), [SIP_HDR_CONTACT] = SIP_HDR("Contact", "m", "sip:", skp_epaddr_len), [SIP_HDR_VIA_UDP] = SIP_HDR("Via", "v", "UDP ", epaddr_len), [SIP_HDR_VIA_TCP] = SIP_HDR("Via", "v", "TCP ", epaddr_len), [SIP_HDR_EXPIRES] = SIP_HDR("Expires", NULL, NULL, digits_len), [SIP_HDR_CONTENT_LENGTH] = SIP_HDR("Content-Length", "l", NULL, digits_len), [SIP_HDR_CALL_ID] = SIP_HDR("Call-Id", "i", NULL, callid_len), }; static const char *sip_follow_continuation(const char *dptr, const char *limit) { /* Walk past newline */ if (++dptr >= limit) return NULL; /* Skip '\n' in CR LF */ if (*(dptr - 1) == '\r' && *dptr == '\n') { if (++dptr >= limit) return NULL; } /* Continuation line? */ if (*dptr != ' ' && *dptr != '\t') return NULL; /* skip leading whitespace */ for (; dptr < limit; dptr++) { if (*dptr != ' ' && *dptr != '\t') break; } return dptr; } static const char *sip_skip_whitespace(const char *dptr, const char *limit) { for (; dptr < limit; dptr++) { if (*dptr == ' ') continue; if (*dptr != '\r' && *dptr != '\n') break; dptr = sip_follow_continuation(dptr, limit); if (dptr == NULL) return NULL; } return dptr; } /* Search within a SIP header value, dealing with continuation lines */ static const char *ct_sip_header_search(const char *dptr, const char *limit, const char *needle, unsigned int len) { for (limit -= len; dptr < limit; dptr++) { if (*dptr == '\r' || *dptr == '\n') { dptr = sip_follow_continuation(dptr, limit); if (dptr == NULL) break; continue; } if (strnicmp(dptr, needle, len) == 0) return dptr; } return NULL; } int ct_sip_get_header(const struct nf_conn *ct, const char *dptr, unsigned int dataoff, unsigned int datalen, enum sip_header_types type, unsigned int *matchoff, unsigned int *matchlen) { const struct sip_header *hdr = &ct_sip_hdrs[type]; const char *start = dptr, *limit = dptr + datalen; int shift = 0; for (dptr += dataoff; dptr < limit; dptr++) { /* Find beginning of line */ if (*dptr != '\r' && *dptr != '\n') continue; if (++dptr >= limit) break; if (*(dptr - 1) == '\r' && *dptr == '\n') { if (++dptr >= limit) break; } /* Skip continuation lines */ if (*dptr == ' ' || *dptr == '\t') continue; /* Find header. Compact headers must be followed by a * non-alphabetic character to avoid mismatches. */ if (limit - dptr >= hdr->len && strnicmp(dptr, hdr->name, hdr->len) == 0) dptr += hdr->len; else if (hdr->cname && limit - dptr >= hdr->clen + 1 && strnicmp(dptr, hdr->cname, hdr->clen) == 0 && !isalpha(*(dptr + hdr->clen))) dptr += hdr->clen; else continue; /* Find and skip colon */ dptr = sip_skip_whitespace(dptr, limit); if (dptr == NULL) break; if (*dptr != ':' || ++dptr >= limit) break; /* Skip whitespace after colon */ dptr = sip_skip_whitespace(dptr, limit); if (dptr == NULL) break; *matchoff = dptr - start; if (hdr->search) { dptr = ct_sip_header_search(dptr, limit, hdr->search, hdr->slen); if (!dptr) return -1; dptr += hdr->slen; } *matchlen = hdr->match_len(ct, dptr, limit, &shift); if (!*matchlen) return -1; *matchoff = dptr - start + shift; return 1; } return 0; } EXPORT_SYMBOL_GPL(ct_sip_get_header); /* Get next header field in a list of comma separated values */ static int ct_sip_next_header(const struct nf_conn *ct, const char *dptr, unsigned int dataoff, unsigned int datalen, enum sip_header_types type, unsigned int *matchoff, unsigned int *matchlen) { const struct sip_header *hdr = &ct_sip_hdrs[type]; const char *start = dptr, *limit = dptr + datalen; int shift = 0; dptr += dataoff; dptr = ct_sip_header_search(dptr, limit, ",", strlen(",")); if (!dptr) return 0; dptr = ct_sip_header_search(dptr, limit, hdr->search, hdr->slen); if (!dptr) return 0; dptr += hdr->slen; *matchoff = dptr - start; *matchlen = hdr->match_len(ct, dptr, limit, &shift); if (!*matchlen) return -1; *matchoff += shift; return 1; } /* Walk through headers until a parsable one is found or no header of the * given type is left. */ static int ct_sip_walk_headers(const struct nf_conn *ct, const char *dptr, unsigned int dataoff, unsigned int datalen, enum sip_header_types type, int *in_header, unsigned int *matchoff, unsigned int *matchlen) { int ret; if (in_header && *in_header) { while (1) { ret = ct_sip_next_header(ct, dptr, dataoff, datalen, type, matchoff, matchlen); if (ret > 0) return ret; if (ret == 0) break; dataoff += *matchoff; } *in_header = 0; } while (1) { ret = ct_sip_get_header(ct, dptr, dataoff, datalen, type, matchoff, matchlen); if (ret > 0) break; if (ret == 0) return ret; dataoff += *matchoff; } if (in_header) *in_header = 1; return 1; } /* Locate a SIP header, parse the URI and return the offset and length of * the address as well as the address and port themselves. A stream of * headers can be parsed by handing in a non-NULL datalen and in_header * pointer. */ int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr, unsigned int *dataoff, unsigned int datalen, enum sip_header_types type, int *in_header, unsigned int *matchoff, unsigned int *matchlen, union nf_inet_addr *addr, __be16 *port) { const char *c, *limit = dptr + datalen; unsigned int p; int ret; ret = ct_sip_walk_headers(ct, dptr, dataoff ? *dataoff : 0, datalen, type, in_header, matchoff, matchlen); WARN_ON(ret < 0); if (ret == 0) return ret; if (!parse_addr(ct, dptr + *matchoff, &c, addr, limit)) return -1; if (*c == ':') { c++; p = simple_strtoul(c, (char **)&c, 10); if (p < 1024 || p > 65535) return -1; *port = htons(p); } else *port = htons(SIP_PORT); if (dataoff) *dataoff = c - dptr; return 1; } EXPORT_SYMBOL_GPL(ct_sip_parse_header_uri); static int ct_sip_parse_param(const struct nf_conn *ct, const char *dptr, unsigned int dataoff, unsigned int datalen, const char *name, unsigned int *matchoff, unsigned int *matchlen) { const char *limit = dptr + datalen; const char *start; const char *end; limit = ct_sip_header_search(dptr + dataoff, limit, ",", strlen(",")); if (!limit) limit = dptr + datalen; start = ct_sip_header_search(dptr + dataoff, limit, name, strlen(name)); if (!start) return 0; start += strlen(name); end = ct_sip_header_search(start, limit, ";", strlen(";")); if (!end) end = limit; *matchoff = start - dptr; *matchlen = end - start; return 1; } /* Parse address from header parameter and return address, offset and length */ int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr, unsigned int dataoff, unsigned int datalen, const char *name, unsigned int *matchoff, unsigned int *matchlen, union nf_inet_addr *addr) { const char *limit = dptr + datalen; const char *start, *end; limit = ct_sip_header_search(dptr + dataoff, limit, ",", strlen(",")); if (!limit) limit = dptr + datalen; start = ct_sip_header_search(dptr + dataoff, limit, name, strlen(name)); if (!start) return 0; start += strlen(name); if (!parse_addr(ct, start, &end, addr, limit)) return 0; *matchoff = start - dptr; *matchlen = end - start; return 1; } EXPORT_SYMBOL_GPL(ct_sip_parse_address_param); /* Parse numerical header parameter and return value, offset and length */ int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr, unsigned int dataoff, unsigned int datalen, const char *name, unsigned int *matchoff, unsigned int *matchlen, unsigned int *val) { const char *limit = dptr + datalen; const char *start; char *end; limit = ct_sip_header_search(dptr + dataoff, limit, ",", strlen(",")); if (!limit) limit = dptr + datalen; start = ct_sip_header_search(dptr + dataoff, limit, name, strlen(name)); if (!start) return 0; start += strlen(name); *val = simple_strtoul(start, &end, 0); if (start == end) return 0; if (matchoff && matchlen) { *matchoff = start - dptr; *matchlen = end - start; } return 1; } EXPORT_SYMBOL_GPL(ct_sip_parse_numerical_param); static int ct_sip_parse_transport(struct nf_conn *ct, const char *dptr, unsigned int dataoff, unsigned int datalen, u8 *proto) { unsigned int matchoff, matchlen; if (ct_sip_parse_param(ct, dptr, dataoff, datalen, "transport=", &matchoff, &matchlen)) { if (!strnicmp(dptr + matchoff, "TCP", strlen("TCP"))) *proto = IPPROTO_TCP; else if (!strnicmp(dptr + matchoff, "UDP", strlen("UDP"))) *proto = IPPROTO_UDP; else return 0; if (*proto != nf_ct_protonum(ct)) return 0; } else *proto = nf_ct_protonum(ct); return 1; } /* SDP header parsing: a SDP session description contains an ordered set of * headers, starting with a section containing general session parameters, * optionally followed by multiple media descriptions. * * SDP headers always start at the beginning of a line. According to RFC 2327: * "The sequence CRLF (0x0d0a) is used to end a record, although parsers should * be tolerant and also accept records terminated with a single newline * character". We handle both cases. */ static const struct sip_header ct_sdp_hdrs[] = { [SDP_HDR_VERSION] = SDP_HDR("v=", NULL, digits_len), [SDP_HDR_OWNER_IP4] = SDP_HDR("o=", "IN IP4 ", epaddr_len), [SDP_HDR_CONNECTION_IP4] = SDP_HDR("c=", "IN IP4 ", epaddr_len), [SDP_HDR_OWNER_IP6] = SDP_HDR("o=", "IN IP6 ", epaddr_len), [SDP_HDR_CONNECTION_IP6] = SDP_HDR("c=", "IN IP6 ", epaddr_len), [SDP_HDR_MEDIA] = SDP_HDR("m=", NULL, media_len), }; /* Linear string search within SDP header values */ static const char *ct_sdp_header_search(const char *dptr, const char *limit, const char *needle, unsigned int len) { for (limit -= len; dptr < limit; dptr++) { if (*dptr == '\r' || *dptr == '\n') break; if (strncmp(dptr, needle, len) == 0) return dptr; } return NULL; } /* Locate a SDP header (optionally a substring within the header value), * optionally stopping at the first occurrence of the term header, parse * it and return the offset and length of the data we're interested in. */ int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr, unsigned int dataoff, unsigned int datalen, enum sdp_header_types type, enum sdp_header_types term, unsigned int *matchoff, unsigned int *matchlen) { const struct sip_header *hdr = &ct_sdp_hdrs[type]; const struct sip_header *thdr = &ct_sdp_hdrs[term]; const char *start = dptr, *limit = dptr + datalen; int shift = 0; for (dptr += dataoff; dptr < limit; dptr++) { /* Find beginning of line */ if (*dptr != '\r' && *dptr != '\n') continue; if (++dptr >= limit) break; if (*(dptr - 1) == '\r' && *dptr == '\n') { if (++dptr >= limit) break; } if (term != SDP_HDR_UNSPEC && limit - dptr >= thdr->len && strnicmp(dptr, thdr->name, thdr->len) == 0) break; else if (limit - dptr >= hdr->len && strnicmp(dptr, hdr->name, hdr->len) == 0) dptr += hdr->len; else continue; *matchoff = dptr - start; if (hdr->search) { dptr = ct_sdp_header_search(dptr, limit, hdr->search, hdr->slen); if (!dptr) return -1; dptr += hdr->slen; } *matchlen = hdr->match_len(ct, dptr, limit, &shift); if (!*matchlen) return -1; *matchoff = dptr - start + shift; return 1; } return 0; } EXPORT_SYMBOL_GPL(ct_sip_get_sdp_header); static int ct_sip_parse_sdp_addr(const struct nf_conn *ct, const char *dptr, unsigned int dataoff, unsigned int datalen, enum sdp_header_types type, enum sdp_header_types term, unsigned int *matchoff, unsigned int *matchlen, union nf_inet_addr *addr) { int ret; ret = ct_sip_get_sdp_header(ct, dptr, dataoff, datalen, type, term, matchoff, matchlen); if (ret <= 0) return ret; if (!parse_addr(ct, dptr + *matchoff, NULL, addr, dptr + *matchoff + *matchlen)) return -1; return 1; } static int refresh_signalling_expectation(struct nf_conn *ct, union nf_inet_addr *addr, u8 proto, __be16 port, unsigned int expires) { struct nf_conn_help *help = nfct_help(ct); struct nf_conntrack_expect *exp; struct hlist_node *n, *next; int found = 0; spin_lock_bh(&nf_conntrack_lock); hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) { if (exp->class != SIP_EXPECT_SIGNALLING || !nf_inet_addr_cmp(&exp->tuple.dst.u3, addr) || exp->tuple.dst.protonum != proto || exp->tuple.dst.u.udp.port != port) continue; if (!del_timer(&exp->timeout)) continue; exp->flags &= ~NF_CT_EXPECT_INACTIVE; exp->timeout.expires = jiffies + expires * HZ; add_timer(&exp->timeout); found = 1; break; } spin_unlock_bh(&nf_conntrack_lock); return found; } static void flush_expectations(struct nf_conn *ct, bool media) { struct nf_conn_help *help = nfct_help(ct); struct nf_conntrack_expect *exp; struct hlist_node *n, *next; spin_lock_bh(&nf_conntrack_lock); hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) { if ((exp->class != SIP_EXPECT_SIGNALLING) ^ media) continue; if (!del_timer(&exp->timeout)) continue; nf_ct_unlink_expect(exp); nf_ct_expect_put(exp); if (!media) break; } spin_unlock_bh(&nf_conntrack_lock); } static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen, union nf_inet_addr *daddr, __be16 port, enum sip_expectation_classes class, unsigned int mediaoff, unsigned int medialen) { struct nf_conntrack_expect *exp, *rtp_exp, *rtcp_exp; enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); struct net *net = nf_ct_net(ct); enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); union nf_inet_addr *saddr; struct nf_conntrack_tuple tuple; int direct_rtp = 0, skip_expect = 0, ret = NF_DROP; u_int16_t base_port; __be16 rtp_port, rtcp_port; typeof(nf_nat_sdp_port_hook) nf_nat_sdp_port; typeof(nf_nat_sdp_media_hook) nf_nat_sdp_media; saddr = NULL; if (sip_direct_media) { if (!nf_inet_addr_cmp(daddr, &ct->tuplehash[dir].tuple.src.u3)) return NF_ACCEPT; saddr = &ct->tuplehash[!dir].tuple.src.u3; } /* We need to check whether the registration exists before attempting * to register it since we can see the same media description multiple * times on different connections in case multiple endpoints receive * the same call. * * RTP optimization: if we find a matching media channel expectation * and both the expectation and this connection are SNATed, we assume * both sides can reach each other directly and use the final * destination address from the expectation. We still need to keep * the NATed expectations for media that might arrive from the * outside, and additionally need to expect the direct RTP stream * in case it passes through us even without NAT. */ memset(&tuple, 0, sizeof(tuple)); if (saddr) tuple.src.u3 = *saddr; tuple.src.l3num = nf_ct_l3num(ct); tuple.dst.protonum = IPPROTO_UDP; tuple.dst.u3 = *daddr; tuple.dst.u.udp.port = port; rcu_read_lock(); do { exp = __nf_ct_expect_find(net, nf_ct_zone(ct), &tuple); if (!exp || exp->master == ct || nfct_help(exp->master)->helper != nfct_help(ct)->helper || exp->class != class) break; #ifdef CONFIG_NF_NAT_NEEDED if (exp->tuple.src.l3num == AF_INET && !direct_rtp && (exp->saved_ip != exp->tuple.dst.u3.ip || exp->saved_proto.udp.port != exp->tuple.dst.u.udp.port) && ct->status & IPS_NAT_MASK) { daddr->ip = exp->saved_ip; tuple.dst.u3.ip = exp->saved_ip; tuple.dst.u.udp.port = exp->saved_proto.udp.port; direct_rtp = 1; } else #endif skip_expect = 1; } while (!skip_expect); rcu_read_unlock(); base_port = ntohs(tuple.dst.u.udp.port) & ~1; rtp_port = htons(base_port); rtcp_port = htons(base_port + 1); if (direct_rtp) { nf_nat_sdp_port = rcu_dereference(nf_nat_sdp_port_hook); if (nf_nat_sdp_port && !nf_nat_sdp_port(skb, dataoff, dptr, datalen, mediaoff, medialen, ntohs(rtp_port))) goto err1; } if (skip_expect) return NF_ACCEPT; rtp_exp = nf_ct_expect_alloc(ct); if (rtp_exp == NULL) goto err1; nf_ct_expect_init(rtp_exp, class, nf_ct_l3num(ct), saddr, daddr, IPPROTO_UDP, NULL, &rtp_port); rtcp_exp = nf_ct_expect_alloc(ct); if (rtcp_exp == NULL) goto err2; nf_ct_expect_init(rtcp_exp, class, nf_ct_l3num(ct), saddr, daddr, IPPROTO_UDP, NULL, &rtcp_port); nf_nat_sdp_media = rcu_dereference(nf_nat_sdp_media_hook); if (nf_nat_sdp_media && ct->status & IPS_NAT_MASK && !direct_rtp) ret = nf_nat_sdp_media(skb, dataoff, dptr, datalen, rtp_exp, rtcp_exp, mediaoff, medialen, daddr); else { if (nf_ct_expect_related(rtp_exp) == 0) { if (nf_ct_expect_related(rtcp_exp) != 0) nf_ct_unexpect_related(rtp_exp); else ret = NF_ACCEPT; } } nf_ct_expect_put(rtcp_exp); err2: nf_ct_expect_put(rtp_exp); err1: return ret; } static const struct sdp_media_type sdp_media_types[] = { SDP_MEDIA_TYPE("audio ", SIP_EXPECT_AUDIO), SDP_MEDIA_TYPE("video ", SIP_EXPECT_VIDEO), SDP_MEDIA_TYPE("image ", SIP_EXPECT_IMAGE), }; static const struct sdp_media_type *sdp_media_type(const char *dptr, unsigned int matchoff, unsigned int matchlen) { const struct sdp_media_type *t; unsigned int i; for (i = 0; i < ARRAY_SIZE(sdp_media_types); i++) { t = &sdp_media_types[i]; if (matchlen < t->len || strncmp(dptr + matchoff, t->name, t->len)) continue; return t; } return NULL; } static int process_sdp(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen, unsigned int cseq) { enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); unsigned int matchoff, matchlen; unsigned int mediaoff, medialen; unsigned int sdpoff; unsigned int caddr_len, maddr_len; unsigned int i; union nf_inet_addr caddr, maddr, rtp_addr; unsigned int port; enum sdp_header_types c_hdr; const struct sdp_media_type *t; int ret = NF_ACCEPT; typeof(nf_nat_sdp_addr_hook) nf_nat_sdp_addr; typeof(nf_nat_sdp_session_hook) nf_nat_sdp_session; nf_nat_sdp_addr = rcu_dereference(nf_nat_sdp_addr_hook); c_hdr = nf_ct_l3num(ct) == AF_INET ? SDP_HDR_CONNECTION_IP4 : SDP_HDR_CONNECTION_IP6; /* Find beginning of session description */ if (ct_sip_get_sdp_header(ct, *dptr, 0, *datalen, SDP_HDR_VERSION, SDP_HDR_UNSPEC, &matchoff, &matchlen) <= 0) return NF_ACCEPT; sdpoff = matchoff; /* The connection information is contained in the session description * and/or once per media description. The first media description marks * the end of the session description. */ caddr_len = 0; if (ct_sip_parse_sdp_addr(ct, *dptr, sdpoff, *datalen, c_hdr, SDP_HDR_MEDIA, &matchoff, &matchlen, &caddr) > 0) caddr_len = matchlen; mediaoff = sdpoff; for (i = 0; i < ARRAY_SIZE(sdp_media_types); ) { if (ct_sip_get_sdp_header(ct, *dptr, mediaoff, *datalen, SDP_HDR_MEDIA, SDP_HDR_UNSPEC, &mediaoff, &medialen) <= 0) break; /* Get media type and port number. A media port value of zero * indicates an inactive stream. */ t = sdp_media_type(*dptr, mediaoff, medialen); if (!t) { mediaoff += medialen; continue; } mediaoff += t->len; medialen -= t->len; port = simple_strtoul(*dptr + mediaoff, NULL, 10); if (port == 0) continue; if (port < 1024 || port > 65535) return NF_DROP; /* The media description overrides the session description. */ maddr_len = 0; if (ct_sip_parse_sdp_addr(ct, *dptr, mediaoff, *datalen, c_hdr, SDP_HDR_MEDIA, &matchoff, &matchlen, &maddr) > 0) { maddr_len = matchlen; memcpy(&rtp_addr, &maddr, sizeof(rtp_addr)); } else if (caddr_len) memcpy(&rtp_addr, &caddr, sizeof(rtp_addr)); else return NF_DROP; ret = set_expected_rtp_rtcp(skb, dataoff, dptr, datalen, &rtp_addr, htons(port), t->class, mediaoff, medialen); if (ret != NF_ACCEPT) return ret; /* Update media connection address if present */ if (maddr_len && nf_nat_sdp_addr && ct->status & IPS_NAT_MASK) { ret = nf_nat_sdp_addr(skb, dataoff, dptr, datalen, mediaoff, c_hdr, SDP_HDR_MEDIA, &rtp_addr); if (ret != NF_ACCEPT) return ret; } i++; } /* Update session connection and owner addresses */ nf_nat_sdp_session = rcu_dereference(nf_nat_sdp_session_hook); if (nf_nat_sdp_session && ct->status & IPS_NAT_MASK) ret = nf_nat_sdp_session(skb, dataoff, dptr, datalen, sdpoff, &rtp_addr); return ret; } static int process_invite_response(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen, unsigned int cseq, unsigned int code) { enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); struct nf_conn_help *help = nfct_help(ct); if ((code >= 100 && code <= 199) || (code >= 200 && code <= 299)) return process_sdp(skb, dataoff, dptr, datalen, cseq); else if (help->help.ct_sip_info.invite_cseq == cseq) flush_expectations(ct, true); return NF_ACCEPT; } static int process_update_response(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen, unsigned int cseq, unsigned int code) { enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); struct nf_conn_help *help = nfct_help(ct); if ((code >= 100 && code <= 199) || (code >= 200 && code <= 299)) return process_sdp(skb, dataoff, dptr, datalen, cseq); else if (help->help.ct_sip_info.invite_cseq == cseq) flush_expectations(ct, true); return NF_ACCEPT; } static int process_prack_response(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen, unsigned int cseq, unsigned int code) { enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); struct nf_conn_help *help = nfct_help(ct); if ((code >= 100 && code <= 199) || (code >= 200 && code <= 299)) return process_sdp(skb, dataoff, dptr, datalen, cseq); else if (help->help.ct_sip_info.invite_cseq == cseq) flush_expectations(ct, true); return NF_ACCEPT; } static int process_invite_request(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen, unsigned int cseq) { enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); struct nf_conn_help *help = nfct_help(ct); unsigned int ret; flush_expectations(ct, true); ret = process_sdp(skb, dataoff, dptr, datalen, cseq); if (ret == NF_ACCEPT) help->help.ct_sip_info.invite_cseq = cseq; return ret; } static int process_bye_request(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen, unsigned int cseq) { enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); flush_expectations(ct, true); return NF_ACCEPT; } /* Parse a REGISTER request and create a permanent expectation for incoming * signalling connections. The expectation is marked inactive and is activated * when receiving a response indicating success from the registrar. */ static int process_register_request(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen, unsigned int cseq) { enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); struct nf_conn_help *help = nfct_help(ct); enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); unsigned int matchoff, matchlen; struct nf_conntrack_expect *exp; union nf_inet_addr *saddr, daddr; __be16 port; u8 proto; unsigned int expires = 0; int ret; typeof(nf_nat_sip_expect_hook) nf_nat_sip_expect; /* Expected connections can not register again. */ if (ct->status & IPS_EXPECTED) return NF_ACCEPT; /* We must check the expiration time: a value of zero signals the * registrar to release the binding. We'll remove our expectation * when receiving the new bindings in the response, but we don't * want to create new ones. * * The expiration time may be contained in Expires: header, the * Contact: header parameters or the URI parameters. */ if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_EXPIRES, &matchoff, &matchlen) > 0) expires = simple_strtoul(*dptr + matchoff, NULL, 10); ret = ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen, SIP_HDR_CONTACT, NULL, &matchoff, &matchlen, &daddr, &port); if (ret < 0) return NF_DROP; else if (ret == 0) return NF_ACCEPT; /* We don't support third-party registrations */ if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3, &daddr)) return NF_ACCEPT; if (ct_sip_parse_transport(ct, *dptr, matchoff + matchlen, *datalen, &proto) == 0) return NF_ACCEPT; if (ct_sip_parse_numerical_param(ct, *dptr, matchoff + matchlen, *datalen, "expires=", NULL, NULL, &expires) < 0) return NF_DROP; if (expires == 0) { ret = NF_ACCEPT; goto store_cseq; } exp = nf_ct_expect_alloc(ct); if (!exp) return NF_DROP; saddr = NULL; if (sip_direct_signalling) saddr = &ct->tuplehash[!dir].tuple.src.u3; nf_ct_expect_init(exp, SIP_EXPECT_SIGNALLING, nf_ct_l3num(ct), saddr, &daddr, proto, NULL, &port); exp->timeout.expires = sip_timeout * HZ; exp->helper = nfct_help(ct)->helper; exp->flags = NF_CT_EXPECT_PERMANENT | NF_CT_EXPECT_INACTIVE; nf_nat_sip_expect = rcu_dereference(nf_nat_sip_expect_hook); if (nf_nat_sip_expect && ct->status & IPS_NAT_MASK) ret = nf_nat_sip_expect(skb, dataoff, dptr, datalen, exp, matchoff, matchlen); else { if (nf_ct_expect_related(exp) != 0) ret = NF_DROP; else ret = NF_ACCEPT; } nf_ct_expect_put(exp); store_cseq: if (ret == NF_ACCEPT) help->help.ct_sip_info.register_cseq = cseq; return ret; } static int process_register_response(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen, unsigned int cseq, unsigned int code) { enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); struct nf_conn_help *help = nfct_help(ct); enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); union nf_inet_addr addr; __be16 port; u8 proto; unsigned int matchoff, matchlen, coff = 0; unsigned int expires = 0; int in_contact = 0, ret; /* According to RFC 3261, "UAs MUST NOT send a new registration until * they have received a final response from the registrar for the * previous one or the previous REGISTER request has timed out". * * However, some servers fail to detect retransmissions and send late * responses, so we store the sequence number of the last valid * request and compare it here. */ if (help->help.ct_sip_info.register_cseq != cseq) return NF_ACCEPT; if (code >= 100 && code <= 199) return NF_ACCEPT; if (code < 200 || code > 299) goto flush; if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_EXPIRES, &matchoff, &matchlen) > 0) expires = simple_strtoul(*dptr + matchoff, NULL, 10); while (1) { unsigned int c_expires = expires; ret = ct_sip_parse_header_uri(ct, *dptr, &coff, *datalen, SIP_HDR_CONTACT, &in_contact, &matchoff, &matchlen, &addr, &port); if (ret < 0) return NF_DROP; else if (ret == 0) break; /* We don't support third-party registrations */ if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3, &addr)) continue; if (ct_sip_parse_transport(ct, *dptr, matchoff + matchlen, *datalen, &proto) == 0) continue; ret = ct_sip_parse_numerical_param(ct, *dptr, matchoff + matchlen, *datalen, "expires=", NULL, NULL, &c_expires); if (ret < 0) return NF_DROP; if (c_expires == 0) break; if (refresh_signalling_expectation(ct, &addr, proto, port, c_expires)) return NF_ACCEPT; } flush: flush_expectations(ct, false); return NF_ACCEPT; } static const struct sip_handler sip_handlers[] = { SIP_HANDLER("INVITE", process_invite_request, process_invite_response), SIP_HANDLER("UPDATE", process_sdp, process_update_response), SIP_HANDLER("ACK", process_sdp, NULL), SIP_HANDLER("PRACK", process_sdp, process_prack_response), SIP_HANDLER("BYE", process_bye_request, NULL), SIP_HANDLER("REGISTER", process_register_request, process_register_response), }; static int process_sip_response(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen) { enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); unsigned int matchoff, matchlen, matchend; unsigned int code, cseq, i; if (*datalen < strlen("SIP/2.0 200")) return NF_ACCEPT; code = simple_strtoul(*dptr + strlen("SIP/2.0 "), NULL, 10); if (!code) return NF_DROP; if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CSEQ, &matchoff, &matchlen) <= 0) return NF_DROP; cseq = simple_strtoul(*dptr + matchoff, NULL, 10); if (!cseq) return NF_DROP; matchend = matchoff + matchlen + 1; for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) { const struct sip_handler *handler; handler = &sip_handlers[i]; if (handler->response == NULL) continue; if (*datalen < matchend + handler->len || strnicmp(*dptr + matchend, handler->method, handler->len)) continue; return handler->response(skb, dataoff, dptr, datalen, cseq, code); } return NF_ACCEPT; } static int process_sip_request(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen) { enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); unsigned int matchoff, matchlen; unsigned int cseq, i; for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) { const struct sip_handler *handler; handler = &sip_handlers[i]; if (handler->request == NULL) continue; if (*datalen < handler->len || strnicmp(*dptr, handler->method, handler->len)) continue; if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CSEQ, &matchoff, &matchlen) <= 0) return NF_DROP; cseq = simple_strtoul(*dptr + matchoff, NULL, 10); if (!cseq) return NF_DROP; return handler->request(skb, dataoff, dptr, datalen, cseq); } return NF_ACCEPT; } static int process_sip_msg(struct sk_buff *skb, struct nf_conn *ct, unsigned int dataoff, const char **dptr, unsigned int *datalen) { typeof(nf_nat_sip_hook) nf_nat_sip; int ret; if (strnicmp(*dptr, "SIP/2.0 ", strlen("SIP/2.0 ")) != 0) ret = process_sip_request(skb, dataoff, dptr, datalen); else ret = process_sip_response(skb, dataoff, dptr, datalen); if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) { nf_nat_sip = rcu_dereference(nf_nat_sip_hook); if (nf_nat_sip && !nf_nat_sip(skb, dataoff, dptr, datalen)) ret = NF_DROP; } return ret; } static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff, struct nf_conn *ct, enum ip_conntrack_info ctinfo) { struct tcphdr *th, _tcph; unsigned int dataoff, datalen; unsigned int matchoff, matchlen, clen; unsigned int msglen, origlen; const char *dptr, *end; s16 diff, tdiff = 0; int ret = NF_ACCEPT; bool term; typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust; if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY) return NF_ACCEPT; /* No Data ? */ th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph); if (th == NULL) return NF_ACCEPT; dataoff = protoff + th->doff * 4; if (dataoff >= skb->len) return NF_ACCEPT; nf_ct_refresh(ct, skb, sip_timeout * HZ); if (unlikely(skb_linearize(skb))) return NF_DROP; dptr = skb->data + dataoff; datalen = skb->len - dataoff; if (datalen < strlen("SIP/2.0 200")) return NF_ACCEPT; while (1) { if (ct_sip_get_header(ct, dptr, 0, datalen, SIP_HDR_CONTENT_LENGTH, &matchoff, &matchlen) <= 0) break; clen = simple_strtoul(dptr + matchoff, (char **)&end, 10); if (dptr + matchoff == end) break; term = false; for (; end + strlen("\r\n\r\n") <= dptr + datalen; end++) { if (end[0] == '\r' && end[1] == '\n' && end[2] == '\r' && end[3] == '\n') { term = true; break; } } if (!term) break; end += strlen("\r\n\r\n") + clen; msglen = origlen = end - dptr; if (msglen > datalen) return NF_DROP; ret = process_sip_msg(skb, ct, dataoff, &dptr, &msglen); if (ret != NF_ACCEPT) break; diff = msglen - origlen; tdiff += diff; dataoff += msglen; dptr += msglen; datalen = datalen + diff - msglen; } if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) { nf_nat_sip_seq_adjust = rcu_dereference(nf_nat_sip_seq_adjust_hook); if (nf_nat_sip_seq_adjust) nf_nat_sip_seq_adjust(skb, tdiff); } return ret; } static int sip_help_udp(struct sk_buff *skb, unsigned int protoff, struct nf_conn *ct, enum ip_conntrack_info ctinfo) { unsigned int dataoff, datalen; const char *dptr; /* No Data ? */ dataoff = protoff + sizeof(struct udphdr); if (dataoff >= skb->len) return NF_ACCEPT; nf_ct_refresh(ct, skb, sip_timeout * HZ); if (unlikely(skb_linearize(skb))) return NF_DROP; dptr = skb->data + dataoff; datalen = skb->len - dataoff; if (datalen < strlen("SIP/2.0 200")) return NF_ACCEPT; return process_sip_msg(skb, ct, dataoff, &dptr, &datalen); } static struct nf_conntrack_helper sip[MAX_PORTS][4] __read_mostly; static char sip_names[MAX_PORTS][4][sizeof("sip-65535")] __read_mostly; static const struct nf_conntrack_expect_policy sip_exp_policy[SIP_EXPECT_MAX + 1] = { [SIP_EXPECT_SIGNALLING] = { .name = "signalling", .max_expected = 1, .timeout = 3 * 60, }, [SIP_EXPECT_AUDIO] = { .name = "audio", .max_expected = 2 * IP_CT_DIR_MAX, .timeout = 3 * 60, }, [SIP_EXPECT_VIDEO] = { .name = "video", .max_expected = 2 * IP_CT_DIR_MAX, .timeout = 3 * 60, }, [SIP_EXPECT_IMAGE] = { .name = "image", .max_expected = IP_CT_DIR_MAX, .timeout = 3 * 60, }, }; static void nf_conntrack_sip_fini(void) { int i, j; for (i = 0; i < ports_c; i++) { for (j = 0; j < ARRAY_SIZE(sip[i]); j++) { if (sip[i][j].me == NULL) continue; nf_conntrack_helper_unregister(&sip[i][j]); } } } static int __init nf_conntrack_sip_init(void) { int i, j, ret; char *tmpname; if (ports_c == 0) ports[ports_c++] = SIP_PORT; for (i = 0; i < ports_c; i++) { memset(&sip[i], 0, sizeof(sip[i])); sip[i][0].tuple.src.l3num = AF_INET; sip[i][0].tuple.dst.protonum = IPPROTO_UDP; sip[i][0].help = sip_help_udp; sip[i][1].tuple.src.l3num = AF_INET; sip[i][1].tuple.dst.protonum = IPPROTO_TCP; sip[i][1].help = sip_help_tcp; sip[i][2].tuple.src.l3num = AF_INET6; sip[i][2].tuple.dst.protonum = IPPROTO_UDP; sip[i][2].help = sip_help_udp; sip[i][3].tuple.src.l3num = AF_INET6; sip[i][3].tuple.dst.protonum = IPPROTO_TCP; sip[i][3].help = sip_help_tcp; for (j = 0; j < ARRAY_SIZE(sip[i]); j++) { sip[i][j].tuple.src.u.udp.port = htons(ports[i]); sip[i][j].expect_policy = sip_exp_policy; sip[i][j].expect_class_max = SIP_EXPECT_MAX; sip[i][j].me = THIS_MODULE; tmpname = &sip_names[i][j][0]; if (ports[i] == SIP_PORT) sprintf(tmpname, "sip"); else sprintf(tmpname, "sip-%u", i); sip[i][j].name = tmpname; pr_debug("port #%u: %u\n", i, ports[i]); ret = nf_conntrack_helper_register(&sip[i][j]); if (ret) { printk(KERN_ERR "nf_ct_sip: failed to register" " helper for pf: %u port: %u\n", sip[i][j].tuple.src.l3num, ports[i]); nf_conntrack_sip_fini(); return ret; } } } return 0; } module_init(nf_conntrack_sip_init); module_exit(nf_conntrack_sip_fini);
gpl-2.0
DirtyUnicorns/android_kernel_samsung_smdk4412
fs/nilfs2/gcinode.c
6628
5783
/* * gcinode.c - dummy inodes to buffer blocks for garbage collection * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Written by Seiji Kihara <kihara@osrg.net>, Amagai Yoshiji <amagai@osrg.net>, * and Ryusuke Konishi <ryusuke@osrg.net>. * Revised by Ryusuke Konishi <ryusuke@osrg.net>. * */ /* * This file adds the cache of on-disk blocks to be moved in garbage * collection. The disk blocks are held with dummy inodes (called * gcinodes), and this file provides lookup function of the dummy * inodes and their buffer read function. * * Buffers and pages held by the dummy inodes will be released each * time after they are copied to a new log. Dirty blocks made on the * current generation and the blocks to be moved by GC never overlap * because the dirty blocks make a new generation; they rather must be * written individually. */ #include <linux/buffer_head.h> #include <linux/mpage.h> #include <linux/hash.h> #include <linux/slab.h> #include <linux/swap.h> #include "nilfs.h" #include "btree.h" #include "btnode.h" #include "page.h" #include "mdt.h" #include "dat.h" #include "ifile.h" /* * nilfs_gccache_submit_read_data() - add data buffer and submit read request * @inode - gc inode * @blkoff - dummy offset treated as the key for the page cache * @pbn - physical block number of the block * @vbn - virtual block number of the block, 0 for non-virtual block * @out_bh - indirect pointer to a buffer_head struct to receive the results * * Description: nilfs_gccache_submit_read_data() registers the data buffer * specified by @pbn to the GC pagecache with the key @blkoff. * This function sets @vbn (@pbn if @vbn is zero) in b_blocknr of the buffer. * * Return Value: On success, 0 is returned. On Error, one of the following * negative error code is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-ENOENT - The block specified with @pbn does not exist. */ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff, sector_t pbn, __u64 vbn, struct buffer_head **out_bh) { struct buffer_head *bh; int err; bh = nilfs_grab_buffer(inode, inode->i_mapping, blkoff, 0); if (unlikely(!bh)) return -ENOMEM; if (buffer_uptodate(bh)) goto out; if (pbn == 0) { struct the_nilfs *nilfs = inode->i_sb->s_fs_info; err = nilfs_dat_translate(nilfs->ns_dat, vbn, &pbn); if (unlikely(err)) { /* -EIO, -ENOMEM, -ENOENT */ brelse(bh); goto failed; } } lock_buffer(bh); if (buffer_uptodate(bh)) { unlock_buffer(bh); goto out; } if (!buffer_mapped(bh)) { bh->b_bdev = inode->i_sb->s_bdev; set_buffer_mapped(bh); } bh->b_blocknr = pbn; bh->b_end_io = end_buffer_read_sync; get_bh(bh); submit_bh(READ, bh); if (vbn) bh->b_blocknr = vbn; out: err = 0; *out_bh = bh; failed: unlock_page(bh->b_page); page_cache_release(bh->b_page); return err; } /* * nilfs_gccache_submit_read_node() - add node buffer and submit read request * @inode - gc inode * @pbn - physical block number for the block * @vbn - virtual block number for the block * @out_bh - indirect pointer to a buffer_head struct to receive the results * * Description: nilfs_gccache_submit_read_node() registers the node buffer * specified by @vbn to the GC pagecache. @pbn can be supplied by the * caller to avoid translation of the disk block address. * * Return Value: On success, 0 is returned. On Error, one of the following * negative error code is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. */ int nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn, __u64 vbn, struct buffer_head **out_bh) { int ret; ret = nilfs_btnode_submit_block(&NILFS_I(inode)->i_btnode_cache, vbn ? : pbn, pbn, READ, out_bh, &pbn); if (ret == -EEXIST) /* internal code (cache hit) */ ret = 0; return ret; } int nilfs_gccache_wait_and_mark_dirty(struct buffer_head *bh) { wait_on_buffer(bh); if (!buffer_uptodate(bh)) return -EIO; if (buffer_dirty(bh)) return -EEXIST; if (buffer_nilfs_node(bh) && nilfs_btree_broken_node_block(bh)) { clear_buffer_uptodate(bh); return -EIO; } mark_buffer_dirty(bh); return 0; } int nilfs_init_gcinode(struct inode *inode) { struct nilfs_inode_info *ii = NILFS_I(inode); inode->i_mode = S_IFREG; mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); inode->i_mapping->a_ops = &empty_aops; inode->i_mapping->backing_dev_info = inode->i_sb->s_bdi; ii->i_flags = 0; nilfs_bmap_init_gc(ii->i_bmap); return 0; } /** * nilfs_remove_all_gcinodes() - remove all unprocessed gc inodes */ void nilfs_remove_all_gcinodes(struct the_nilfs *nilfs) { struct list_head *head = &nilfs->ns_gc_inodes; struct nilfs_inode_info *ii; while (!list_empty(head)) { ii = list_first_entry(head, struct nilfs_inode_info, i_dirty); list_del_init(&ii->i_dirty); truncate_inode_pages(&ii->vfs_inode.i_data, 0); nilfs_btnode_cache_clear(&ii->i_btnode_cache); iput(&ii->vfs_inode); } }
gpl-2.0
jmztaylor/android_kernel_htc_a5
fs/nilfs2/gcinode.c
6628
5783
/* * gcinode.c - dummy inodes to buffer blocks for garbage collection * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Written by Seiji Kihara <kihara@osrg.net>, Amagai Yoshiji <amagai@osrg.net>, * and Ryusuke Konishi <ryusuke@osrg.net>. * Revised by Ryusuke Konishi <ryusuke@osrg.net>. * */ /* * This file adds the cache of on-disk blocks to be moved in garbage * collection. The disk blocks are held with dummy inodes (called * gcinodes), and this file provides lookup function of the dummy * inodes and their buffer read function. * * Buffers and pages held by the dummy inodes will be released each * time after they are copied to a new log. Dirty blocks made on the * current generation and the blocks to be moved by GC never overlap * because the dirty blocks make a new generation; they rather must be * written individually. */ #include <linux/buffer_head.h> #include <linux/mpage.h> #include <linux/hash.h> #include <linux/slab.h> #include <linux/swap.h> #include "nilfs.h" #include "btree.h" #include "btnode.h" #include "page.h" #include "mdt.h" #include "dat.h" #include "ifile.h" /* * nilfs_gccache_submit_read_data() - add data buffer and submit read request * @inode - gc inode * @blkoff - dummy offset treated as the key for the page cache * @pbn - physical block number of the block * @vbn - virtual block number of the block, 0 for non-virtual block * @out_bh - indirect pointer to a buffer_head struct to receive the results * * Description: nilfs_gccache_submit_read_data() registers the data buffer * specified by @pbn to the GC pagecache with the key @blkoff. * This function sets @vbn (@pbn if @vbn is zero) in b_blocknr of the buffer. * * Return Value: On success, 0 is returned. On Error, one of the following * negative error code is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-ENOENT - The block specified with @pbn does not exist. */ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff, sector_t pbn, __u64 vbn, struct buffer_head **out_bh) { struct buffer_head *bh; int err; bh = nilfs_grab_buffer(inode, inode->i_mapping, blkoff, 0); if (unlikely(!bh)) return -ENOMEM; if (buffer_uptodate(bh)) goto out; if (pbn == 0) { struct the_nilfs *nilfs = inode->i_sb->s_fs_info; err = nilfs_dat_translate(nilfs->ns_dat, vbn, &pbn); if (unlikely(err)) { /* -EIO, -ENOMEM, -ENOENT */ brelse(bh); goto failed; } } lock_buffer(bh); if (buffer_uptodate(bh)) { unlock_buffer(bh); goto out; } if (!buffer_mapped(bh)) { bh->b_bdev = inode->i_sb->s_bdev; set_buffer_mapped(bh); } bh->b_blocknr = pbn; bh->b_end_io = end_buffer_read_sync; get_bh(bh); submit_bh(READ, bh); if (vbn) bh->b_blocknr = vbn; out: err = 0; *out_bh = bh; failed: unlock_page(bh->b_page); page_cache_release(bh->b_page); return err; } /* * nilfs_gccache_submit_read_node() - add node buffer and submit read request * @inode - gc inode * @pbn - physical block number for the block * @vbn - virtual block number for the block * @out_bh - indirect pointer to a buffer_head struct to receive the results * * Description: nilfs_gccache_submit_read_node() registers the node buffer * specified by @vbn to the GC pagecache. @pbn can be supplied by the * caller to avoid translation of the disk block address. * * Return Value: On success, 0 is returned. On Error, one of the following * negative error code is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. */ int nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn, __u64 vbn, struct buffer_head **out_bh) { int ret; ret = nilfs_btnode_submit_block(&NILFS_I(inode)->i_btnode_cache, vbn ? : pbn, pbn, READ, out_bh, &pbn); if (ret == -EEXIST) /* internal code (cache hit) */ ret = 0; return ret; } int nilfs_gccache_wait_and_mark_dirty(struct buffer_head *bh) { wait_on_buffer(bh); if (!buffer_uptodate(bh)) return -EIO; if (buffer_dirty(bh)) return -EEXIST; if (buffer_nilfs_node(bh) && nilfs_btree_broken_node_block(bh)) { clear_buffer_uptodate(bh); return -EIO; } mark_buffer_dirty(bh); return 0; } int nilfs_init_gcinode(struct inode *inode) { struct nilfs_inode_info *ii = NILFS_I(inode); inode->i_mode = S_IFREG; mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); inode->i_mapping->a_ops = &empty_aops; inode->i_mapping->backing_dev_info = inode->i_sb->s_bdi; ii->i_flags = 0; nilfs_bmap_init_gc(ii->i_bmap); return 0; } /** * nilfs_remove_all_gcinodes() - remove all unprocessed gc inodes */ void nilfs_remove_all_gcinodes(struct the_nilfs *nilfs) { struct list_head *head = &nilfs->ns_gc_inodes; struct nilfs_inode_info *ii; while (!list_empty(head)) { ii = list_first_entry(head, struct nilfs_inode_info, i_dirty); list_del_init(&ii->i_dirty); truncate_inode_pages(&ii->vfs_inode.i_data, 0); nilfs_btnode_cache_clear(&ii->i_btnode_cache); iput(&ii->vfs_inode); } }
gpl-2.0
TeamExodus/kernel_samsung_exynos5410
security/selinux/ss/ebitmap.c
7652
11441
/* * Implementation of the extensible bitmap type. * * Author : Stephen Smalley, <sds@epoch.ncsc.mil> */ /* * Updated: Hewlett-Packard <paul@paul-moore.com> * * Added support to import/export the NetLabel category bitmap * * (c) Copyright Hewlett-Packard Development Company, L.P., 2006 */ /* * Updated: KaiGai Kohei <kaigai@ak.jp.nec.com> * Applied standard bit operations to improve bitmap scanning. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/errno.h> #include <net/netlabel.h> #include "ebitmap.h" #include "policydb.h" #define BITS_PER_U64 (sizeof(u64) * 8) int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2) { struct ebitmap_node *n1, *n2; if (e1->highbit != e2->highbit) return 0; n1 = e1->node; n2 = e2->node; while (n1 && n2 && (n1->startbit == n2->startbit) && !memcmp(n1->maps, n2->maps, EBITMAP_SIZE / 8)) { n1 = n1->next; n2 = n2->next; } if (n1 || n2) return 0; return 1; } int ebitmap_cpy(struct ebitmap *dst, struct ebitmap *src) { struct ebitmap_node *n, *new, *prev; ebitmap_init(dst); n = src->node; prev = NULL; while (n) { new = kzalloc(sizeof(*new), GFP_ATOMIC); if (!new) { ebitmap_destroy(dst); return -ENOMEM; } new->startbit = n->startbit; memcpy(new->maps, n->maps, EBITMAP_SIZE / 8); new->next = NULL; if (prev) prev->next = new; else dst->node = new; prev = new; n = n->next; } dst->highbit = src->highbit; return 0; } #ifdef CONFIG_NETLABEL /** * ebitmap_netlbl_export - Export an ebitmap into a NetLabel category bitmap * @ebmap: the ebitmap to export * @catmap: the NetLabel category bitmap * * Description: * Export a SELinux extensibile bitmap into a NetLabel category bitmap. * Returns zero on success, negative values on error. * */ int ebitmap_netlbl_export(struct ebitmap *ebmap, struct netlbl_lsm_secattr_catmap **catmap) { struct ebitmap_node *e_iter = ebmap->node; struct netlbl_lsm_secattr_catmap *c_iter; u32 cmap_idx, cmap_sft; int i; /* NetLabel's NETLBL_CATMAP_MAPTYPE is defined as an array of u64, * however, it is not always compatible with an array of unsigned long * in ebitmap_node. * In addition, you should pay attention the following implementation * assumes unsigned long has a width equal with or less than 64-bit. */ if (e_iter == NULL) { *catmap = NULL; return 0; } c_iter = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (c_iter == NULL) return -ENOMEM; *catmap = c_iter; c_iter->startbit = e_iter->startbit & ~(NETLBL_CATMAP_SIZE - 1); while (e_iter) { for (i = 0; i < EBITMAP_UNIT_NUMS; i++) { unsigned int delta, e_startbit, c_endbit; e_startbit = e_iter->startbit + i * EBITMAP_UNIT_SIZE; c_endbit = c_iter->startbit + NETLBL_CATMAP_SIZE; if (e_startbit >= c_endbit) { c_iter->next = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (c_iter->next == NULL) goto netlbl_export_failure; c_iter = c_iter->next; c_iter->startbit = e_startbit & ~(NETLBL_CATMAP_SIZE - 1); } delta = e_startbit - c_iter->startbit; cmap_idx = delta / NETLBL_CATMAP_MAPSIZE; cmap_sft = delta % NETLBL_CATMAP_MAPSIZE; c_iter->bitmap[cmap_idx] |= e_iter->maps[i] << cmap_sft; } e_iter = e_iter->next; } return 0; netlbl_export_failure: netlbl_secattr_catmap_free(*catmap); return -ENOMEM; } /** * ebitmap_netlbl_import - Import a NetLabel category bitmap into an ebitmap * @ebmap: the ebitmap to import * @catmap: the NetLabel category bitmap * * Description: * Import a NetLabel category bitmap into a SELinux extensibile bitmap. * Returns zero on success, negative values on error. * */ int ebitmap_netlbl_import(struct ebitmap *ebmap, struct netlbl_lsm_secattr_catmap *catmap) { struct ebitmap_node *e_iter = NULL; struct ebitmap_node *emap_prev = NULL; struct netlbl_lsm_secattr_catmap *c_iter = catmap; u32 c_idx, c_pos, e_idx, e_sft; /* NetLabel's NETLBL_CATMAP_MAPTYPE is defined as an array of u64, * however, it is not always compatible with an array of unsigned long * in ebitmap_node. * In addition, you should pay attention the following implementation * assumes unsigned long has a width equal with or less than 64-bit. */ do { for (c_idx = 0; c_idx < NETLBL_CATMAP_MAPCNT; c_idx++) { unsigned int delta; u64 map = c_iter->bitmap[c_idx]; if (!map) continue; c_pos = c_iter->startbit + c_idx * NETLBL_CATMAP_MAPSIZE; if (!e_iter || c_pos >= e_iter->startbit + EBITMAP_SIZE) { e_iter = kzalloc(sizeof(*e_iter), GFP_ATOMIC); if (!e_iter) goto netlbl_import_failure; e_iter->startbit = c_pos - (c_pos % EBITMAP_SIZE); if (emap_prev == NULL) ebmap->node = e_iter; else emap_prev->next = e_iter; emap_prev = e_iter; } delta = c_pos - e_iter->startbit; e_idx = delta / EBITMAP_UNIT_SIZE; e_sft = delta % EBITMAP_UNIT_SIZE; while (map) { e_iter->maps[e_idx++] |= map & (-1UL); map = EBITMAP_SHIFT_UNIT_SIZE(map); } } c_iter = c_iter->next; } while (c_iter); if (e_iter != NULL) ebmap->highbit = e_iter->startbit + EBITMAP_SIZE; else ebitmap_destroy(ebmap); return 0; netlbl_import_failure: ebitmap_destroy(ebmap); return -ENOMEM; } #endif /* CONFIG_NETLABEL */ int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2) { struct ebitmap_node *n1, *n2; int i; if (e1->highbit < e2->highbit) return 0; n1 = e1->node; n2 = e2->node; while (n1 && n2 && (n1->startbit <= n2->startbit)) { if (n1->startbit < n2->startbit) { n1 = n1->next; continue; } for (i = 0; i < EBITMAP_UNIT_NUMS; i++) { if ((n1->maps[i] & n2->maps[i]) != n2->maps[i]) return 0; } n1 = n1->next; n2 = n2->next; } if (n2) return 0; return 1; } int ebitmap_get_bit(struct ebitmap *e, unsigned long bit) { struct ebitmap_node *n; if (e->highbit < bit) return 0; n = e->node; while (n && (n->startbit <= bit)) { if ((n->startbit + EBITMAP_SIZE) > bit) return ebitmap_node_get_bit(n, bit); n = n->next; } return 0; } int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value) { struct ebitmap_node *n, *prev, *new; prev = NULL; n = e->node; while (n && n->startbit <= bit) { if ((n->startbit + EBITMAP_SIZE) > bit) { if (value) { ebitmap_node_set_bit(n, bit); } else { unsigned int s; ebitmap_node_clr_bit(n, bit); s = find_first_bit(n->maps, EBITMAP_SIZE); if (s < EBITMAP_SIZE) return 0; /* drop this node from the bitmap */ if (!n->next) { /* * this was the highest map * within the bitmap */ if (prev) e->highbit = prev->startbit + EBITMAP_SIZE; else e->highbit = 0; } if (prev) prev->next = n->next; else e->node = n->next; kfree(n); } return 0; } prev = n; n = n->next; } if (!value) return 0; new = kzalloc(sizeof(*new), GFP_ATOMIC); if (!new) return -ENOMEM; new->startbit = bit - (bit % EBITMAP_SIZE); ebitmap_node_set_bit(new, bit); if (!n) /* this node will be the highest map within the bitmap */ e->highbit = new->startbit + EBITMAP_SIZE; if (prev) { new->next = prev->next; prev->next = new; } else { new->next = e->node; e->node = new; } return 0; } void ebitmap_destroy(struct ebitmap *e) { struct ebitmap_node *n, *temp; if (!e) return; n = e->node; while (n) { temp = n; n = n->next; kfree(temp); } e->highbit = 0; e->node = NULL; return; } int ebitmap_read(struct ebitmap *e, void *fp) { struct ebitmap_node *n = NULL; u32 mapunit, count, startbit, index; u64 map; __le32 buf[3]; int rc, i; ebitmap_init(e); rc = next_entry(buf, fp, sizeof buf); if (rc < 0) goto out; mapunit = le32_to_cpu(buf[0]); e->highbit = le32_to_cpu(buf[1]); count = le32_to_cpu(buf[2]); if (mapunit != BITS_PER_U64) { printk(KERN_ERR "SELinux: ebitmap: map size %u does not " "match my size %Zd (high bit was %d)\n", mapunit, BITS_PER_U64, e->highbit); goto bad; } /* round up e->highbit */ e->highbit += EBITMAP_SIZE - 1; e->highbit -= (e->highbit % EBITMAP_SIZE); if (!e->highbit) { e->node = NULL; goto ok; } for (i = 0; i < count; i++) { rc = next_entry(&startbit, fp, sizeof(u32)); if (rc < 0) { printk(KERN_ERR "SELinux: ebitmap: truncated map\n"); goto bad; } startbit = le32_to_cpu(startbit); if (startbit & (mapunit - 1)) { printk(KERN_ERR "SELinux: ebitmap start bit (%d) is " "not a multiple of the map unit size (%u)\n", startbit, mapunit); goto bad; } if (startbit > e->highbit - mapunit) { printk(KERN_ERR "SELinux: ebitmap start bit (%d) is " "beyond the end of the bitmap (%u)\n", startbit, (e->highbit - mapunit)); goto bad; } if (!n || startbit >= n->startbit + EBITMAP_SIZE) { struct ebitmap_node *tmp; tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); if (!tmp) { printk(KERN_ERR "SELinux: ebitmap: out of memory\n"); rc = -ENOMEM; goto bad; } /* round down */ tmp->startbit = startbit - (startbit % EBITMAP_SIZE); if (n) n->next = tmp; else e->node = tmp; n = tmp; } else if (startbit <= n->startbit) { printk(KERN_ERR "SELinux: ebitmap: start bit %d" " comes after start bit %d\n", startbit, n->startbit); goto bad; } rc = next_entry(&map, fp, sizeof(u64)); if (rc < 0) { printk(KERN_ERR "SELinux: ebitmap: truncated map\n"); goto bad; } map = le64_to_cpu(map); index = (startbit - n->startbit) / EBITMAP_UNIT_SIZE; while (map) { n->maps[index++] = map & (-1UL); map = EBITMAP_SHIFT_UNIT_SIZE(map); } } ok: rc = 0; out: return rc; bad: if (!rc) rc = -EINVAL; ebitmap_destroy(e); goto out; } int ebitmap_write(struct ebitmap *e, void *fp) { struct ebitmap_node *n; u32 count; __le32 buf[3]; u64 map; int bit, last_bit, last_startbit, rc; buf[0] = cpu_to_le32(BITS_PER_U64); count = 0; last_bit = 0; last_startbit = -1; ebitmap_for_each_positive_bit(e, n, bit) { if (rounddown(bit, (int)BITS_PER_U64) > last_startbit) { count++; last_startbit = rounddown(bit, BITS_PER_U64); } last_bit = roundup(bit + 1, BITS_PER_U64); } buf[1] = cpu_to_le32(last_bit); buf[2] = cpu_to_le32(count); rc = put_entry(buf, sizeof(u32), 3, fp); if (rc) return rc; map = 0; last_startbit = INT_MIN; ebitmap_for_each_positive_bit(e, n, bit) { if (rounddown(bit, (int)BITS_PER_U64) > last_startbit) { __le64 buf64[1]; /* this is the very first bit */ if (!map) { last_startbit = rounddown(bit, BITS_PER_U64); map = (u64)1 << (bit - last_startbit); continue; } /* write the last node */ buf[0] = cpu_to_le32(last_startbit); rc = put_entry(buf, sizeof(u32), 1, fp); if (rc) return rc; buf64[0] = cpu_to_le64(map); rc = put_entry(buf64, sizeof(u64), 1, fp); if (rc) return rc; /* set up for the next node */ map = 0; last_startbit = rounddown(bit, BITS_PER_U64); } map |= (u64)1 << (bit - last_startbit); } /* write the last node */ if (map) { __le64 buf64[1]; /* write the last node */ buf[0] = cpu_to_le32(last_startbit); rc = put_entry(buf, sizeof(u32), 1, fp); if (rc) return rc; buf64[0] = cpu_to_le64(map); rc = put_entry(buf64, sizeof(u64), 1, fp); if (rc) return rc; } return 0; }
gpl-2.0
BanBxda/Sense_4.3
security/selinux/ss/ebitmap.c
7652
11441
/* * Implementation of the extensible bitmap type. * * Author : Stephen Smalley, <sds@epoch.ncsc.mil> */ /* * Updated: Hewlett-Packard <paul@paul-moore.com> * * Added support to import/export the NetLabel category bitmap * * (c) Copyright Hewlett-Packard Development Company, L.P., 2006 */ /* * Updated: KaiGai Kohei <kaigai@ak.jp.nec.com> * Applied standard bit operations to improve bitmap scanning. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/errno.h> #include <net/netlabel.h> #include "ebitmap.h" #include "policydb.h" #define BITS_PER_U64 (sizeof(u64) * 8) int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2) { struct ebitmap_node *n1, *n2; if (e1->highbit != e2->highbit) return 0; n1 = e1->node; n2 = e2->node; while (n1 && n2 && (n1->startbit == n2->startbit) && !memcmp(n1->maps, n2->maps, EBITMAP_SIZE / 8)) { n1 = n1->next; n2 = n2->next; } if (n1 || n2) return 0; return 1; } int ebitmap_cpy(struct ebitmap *dst, struct ebitmap *src) { struct ebitmap_node *n, *new, *prev; ebitmap_init(dst); n = src->node; prev = NULL; while (n) { new = kzalloc(sizeof(*new), GFP_ATOMIC); if (!new) { ebitmap_destroy(dst); return -ENOMEM; } new->startbit = n->startbit; memcpy(new->maps, n->maps, EBITMAP_SIZE / 8); new->next = NULL; if (prev) prev->next = new; else dst->node = new; prev = new; n = n->next; } dst->highbit = src->highbit; return 0; } #ifdef CONFIG_NETLABEL /** * ebitmap_netlbl_export - Export an ebitmap into a NetLabel category bitmap * @ebmap: the ebitmap to export * @catmap: the NetLabel category bitmap * * Description: * Export a SELinux extensibile bitmap into a NetLabel category bitmap. * Returns zero on success, negative values on error. * */ int ebitmap_netlbl_export(struct ebitmap *ebmap, struct netlbl_lsm_secattr_catmap **catmap) { struct ebitmap_node *e_iter = ebmap->node; struct netlbl_lsm_secattr_catmap *c_iter; u32 cmap_idx, cmap_sft; int i; /* NetLabel's NETLBL_CATMAP_MAPTYPE is defined as an array of u64, * however, it is not always compatible with an array of unsigned long * in ebitmap_node. * In addition, you should pay attention the following implementation * assumes unsigned long has a width equal with or less than 64-bit. */ if (e_iter == NULL) { *catmap = NULL; return 0; } c_iter = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (c_iter == NULL) return -ENOMEM; *catmap = c_iter; c_iter->startbit = e_iter->startbit & ~(NETLBL_CATMAP_SIZE - 1); while (e_iter) { for (i = 0; i < EBITMAP_UNIT_NUMS; i++) { unsigned int delta, e_startbit, c_endbit; e_startbit = e_iter->startbit + i * EBITMAP_UNIT_SIZE; c_endbit = c_iter->startbit + NETLBL_CATMAP_SIZE; if (e_startbit >= c_endbit) { c_iter->next = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (c_iter->next == NULL) goto netlbl_export_failure; c_iter = c_iter->next; c_iter->startbit = e_startbit & ~(NETLBL_CATMAP_SIZE - 1); } delta = e_startbit - c_iter->startbit; cmap_idx = delta / NETLBL_CATMAP_MAPSIZE; cmap_sft = delta % NETLBL_CATMAP_MAPSIZE; c_iter->bitmap[cmap_idx] |= e_iter->maps[i] << cmap_sft; } e_iter = e_iter->next; } return 0; netlbl_export_failure: netlbl_secattr_catmap_free(*catmap); return -ENOMEM; } /** * ebitmap_netlbl_import - Import a NetLabel category bitmap into an ebitmap * @ebmap: the ebitmap to import * @catmap: the NetLabel category bitmap * * Description: * Import a NetLabel category bitmap into a SELinux extensibile bitmap. * Returns zero on success, negative values on error. * */ int ebitmap_netlbl_import(struct ebitmap *ebmap, struct netlbl_lsm_secattr_catmap *catmap) { struct ebitmap_node *e_iter = NULL; struct ebitmap_node *emap_prev = NULL; struct netlbl_lsm_secattr_catmap *c_iter = catmap; u32 c_idx, c_pos, e_idx, e_sft; /* NetLabel's NETLBL_CATMAP_MAPTYPE is defined as an array of u64, * however, it is not always compatible with an array of unsigned long * in ebitmap_node. * In addition, you should pay attention the following implementation * assumes unsigned long has a width equal with or less than 64-bit. */ do { for (c_idx = 0; c_idx < NETLBL_CATMAP_MAPCNT; c_idx++) { unsigned int delta; u64 map = c_iter->bitmap[c_idx]; if (!map) continue; c_pos = c_iter->startbit + c_idx * NETLBL_CATMAP_MAPSIZE; if (!e_iter || c_pos >= e_iter->startbit + EBITMAP_SIZE) { e_iter = kzalloc(sizeof(*e_iter), GFP_ATOMIC); if (!e_iter) goto netlbl_import_failure; e_iter->startbit = c_pos - (c_pos % EBITMAP_SIZE); if (emap_prev == NULL) ebmap->node = e_iter; else emap_prev->next = e_iter; emap_prev = e_iter; } delta = c_pos - e_iter->startbit; e_idx = delta / EBITMAP_UNIT_SIZE; e_sft = delta % EBITMAP_UNIT_SIZE; while (map) { e_iter->maps[e_idx++] |= map & (-1UL); map = EBITMAP_SHIFT_UNIT_SIZE(map); } } c_iter = c_iter->next; } while (c_iter); if (e_iter != NULL) ebmap->highbit = e_iter->startbit + EBITMAP_SIZE; else ebitmap_destroy(ebmap); return 0; netlbl_import_failure: ebitmap_destroy(ebmap); return -ENOMEM; } #endif /* CONFIG_NETLABEL */ int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2) { struct ebitmap_node *n1, *n2; int i; if (e1->highbit < e2->highbit) return 0; n1 = e1->node; n2 = e2->node; while (n1 && n2 && (n1->startbit <= n2->startbit)) { if (n1->startbit < n2->startbit) { n1 = n1->next; continue; } for (i = 0; i < EBITMAP_UNIT_NUMS; i++) { if ((n1->maps[i] & n2->maps[i]) != n2->maps[i]) return 0; } n1 = n1->next; n2 = n2->next; } if (n2) return 0; return 1; } int ebitmap_get_bit(struct ebitmap *e, unsigned long bit) { struct ebitmap_node *n; if (e->highbit < bit) return 0; n = e->node; while (n && (n->startbit <= bit)) { if ((n->startbit + EBITMAP_SIZE) > bit) return ebitmap_node_get_bit(n, bit); n = n->next; } return 0; } int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value) { struct ebitmap_node *n, *prev, *new; prev = NULL; n = e->node; while (n && n->startbit <= bit) { if ((n->startbit + EBITMAP_SIZE) > bit) { if (value) { ebitmap_node_set_bit(n, bit); } else { unsigned int s; ebitmap_node_clr_bit(n, bit); s = find_first_bit(n->maps, EBITMAP_SIZE); if (s < EBITMAP_SIZE) return 0; /* drop this node from the bitmap */ if (!n->next) { /* * this was the highest map * within the bitmap */ if (prev) e->highbit = prev->startbit + EBITMAP_SIZE; else e->highbit = 0; } if (prev) prev->next = n->next; else e->node = n->next; kfree(n); } return 0; } prev = n; n = n->next; } if (!value) return 0; new = kzalloc(sizeof(*new), GFP_ATOMIC); if (!new) return -ENOMEM; new->startbit = bit - (bit % EBITMAP_SIZE); ebitmap_node_set_bit(new, bit); if (!n) /* this node will be the highest map within the bitmap */ e->highbit = new->startbit + EBITMAP_SIZE; if (prev) { new->next = prev->next; prev->next = new; } else { new->next = e->node; e->node = new; } return 0; } void ebitmap_destroy(struct ebitmap *e) { struct ebitmap_node *n, *temp; if (!e) return; n = e->node; while (n) { temp = n; n = n->next; kfree(temp); } e->highbit = 0; e->node = NULL; return; } int ebitmap_read(struct ebitmap *e, void *fp) { struct ebitmap_node *n = NULL; u32 mapunit, count, startbit, index; u64 map; __le32 buf[3]; int rc, i; ebitmap_init(e); rc = next_entry(buf, fp, sizeof buf); if (rc < 0) goto out; mapunit = le32_to_cpu(buf[0]); e->highbit = le32_to_cpu(buf[1]); count = le32_to_cpu(buf[2]); if (mapunit != BITS_PER_U64) { printk(KERN_ERR "SELinux: ebitmap: map size %u does not " "match my size %Zd (high bit was %d)\n", mapunit, BITS_PER_U64, e->highbit); goto bad; } /* round up e->highbit */ e->highbit += EBITMAP_SIZE - 1; e->highbit -= (e->highbit % EBITMAP_SIZE); if (!e->highbit) { e->node = NULL; goto ok; } for (i = 0; i < count; i++) { rc = next_entry(&startbit, fp, sizeof(u32)); if (rc < 0) { printk(KERN_ERR "SELinux: ebitmap: truncated map\n"); goto bad; } startbit = le32_to_cpu(startbit); if (startbit & (mapunit - 1)) { printk(KERN_ERR "SELinux: ebitmap start bit (%d) is " "not a multiple of the map unit size (%u)\n", startbit, mapunit); goto bad; } if (startbit > e->highbit - mapunit) { printk(KERN_ERR "SELinux: ebitmap start bit (%d) is " "beyond the end of the bitmap (%u)\n", startbit, (e->highbit - mapunit)); goto bad; } if (!n || startbit >= n->startbit + EBITMAP_SIZE) { struct ebitmap_node *tmp; tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); if (!tmp) { printk(KERN_ERR "SELinux: ebitmap: out of memory\n"); rc = -ENOMEM; goto bad; } /* round down */ tmp->startbit = startbit - (startbit % EBITMAP_SIZE); if (n) n->next = tmp; else e->node = tmp; n = tmp; } else if (startbit <= n->startbit) { printk(KERN_ERR "SELinux: ebitmap: start bit %d" " comes after start bit %d\n", startbit, n->startbit); goto bad; } rc = next_entry(&map, fp, sizeof(u64)); if (rc < 0) { printk(KERN_ERR "SELinux: ebitmap: truncated map\n"); goto bad; } map = le64_to_cpu(map); index = (startbit - n->startbit) / EBITMAP_UNIT_SIZE; while (map) { n->maps[index++] = map & (-1UL); map = EBITMAP_SHIFT_UNIT_SIZE(map); } } ok: rc = 0; out: return rc; bad: if (!rc) rc = -EINVAL; ebitmap_destroy(e); goto out; } int ebitmap_write(struct ebitmap *e, void *fp) { struct ebitmap_node *n; u32 count; __le32 buf[3]; u64 map; int bit, last_bit, last_startbit, rc; buf[0] = cpu_to_le32(BITS_PER_U64); count = 0; last_bit = 0; last_startbit = -1; ebitmap_for_each_positive_bit(e, n, bit) { if (rounddown(bit, (int)BITS_PER_U64) > last_startbit) { count++; last_startbit = rounddown(bit, BITS_PER_U64); } last_bit = roundup(bit + 1, BITS_PER_U64); } buf[1] = cpu_to_le32(last_bit); buf[2] = cpu_to_le32(count); rc = put_entry(buf, sizeof(u32), 3, fp); if (rc) return rc; map = 0; last_startbit = INT_MIN; ebitmap_for_each_positive_bit(e, n, bit) { if (rounddown(bit, (int)BITS_PER_U64) > last_startbit) { __le64 buf64[1]; /* this is the very first bit */ if (!map) { last_startbit = rounddown(bit, BITS_PER_U64); map = (u64)1 << (bit - last_startbit); continue; } /* write the last node */ buf[0] = cpu_to_le32(last_startbit); rc = put_entry(buf, sizeof(u32), 1, fp); if (rc) return rc; buf64[0] = cpu_to_le64(map); rc = put_entry(buf64, sizeof(u64), 1, fp); if (rc) return rc; /* set up for the next node */ map = 0; last_startbit = rounddown(bit, BITS_PER_U64); } map |= (u64)1 << (bit - last_startbit); } /* write the last node */ if (map) { __le64 buf64[1]; /* write the last node */ buf[0] = cpu_to_le32(last_startbit); rc = put_entry(buf, sizeof(u32), 1, fp); if (rc) return rc; buf64[0] = cpu_to_le64(map); rc = put_entry(buf64, sizeof(u64), 1, fp); if (rc) return rc; } return 0; }
gpl-2.0
cwallac/KernelMerge
arch/sparc/lib/PeeCeeI.c
12004
4024
/* * PeeCeeI.c: The emerging standard... * * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) */ #include <linux/module.h> #include <asm/io.h> #include <asm/byteorder.h> void outsb(unsigned long __addr, const void *src, unsigned long count) { void __iomem *addr = (void __iomem *) __addr; const u8 *p = src; while (count--) outb(*p++, addr); } EXPORT_SYMBOL(outsb); void outsw(unsigned long __addr, const void *src, unsigned long count) { void __iomem *addr = (void __iomem *) __addr; while (count--) { __raw_writew(*(u16 *)src, addr); src += sizeof(u16); } } EXPORT_SYMBOL(outsw); void outsl(unsigned long __addr, const void *src, unsigned long count) { void __iomem *addr = (void __iomem *) __addr; u32 l, l2; if (!count) return; switch (((unsigned long)src) & 0x3) { case 0x0: /* src is naturally aligned */ while (count--) { __raw_writel(*(u32 *)src, addr); src += sizeof(u32); } break; case 0x2: /* 2-byte alignment */ while (count--) { l = (*(u16 *)src) << 16; l |= *(u16 *)(src + sizeof(u16)); __raw_writel(l, addr); src += sizeof(u32); } break; case 0x1: /* Hold three bytes in l each time, grab a byte from l2 */ l = (*(u8 *)src) << 24; l |= (*(u16 *)(src + sizeof(u8))) << 8; src += sizeof(u8) + sizeof(u16); while (count--) { l2 = *(u32 *)src; l |= (l2 >> 24); __raw_writel(l, addr); l = l2 << 8; src += sizeof(u32); } break; case 0x3: /* Hold a byte in l each time, grab 3 bytes from l2 */ l = (*(u8 *)src) << 24; src += sizeof(u8); while (count--) { l2 = *(u32 *)src; l |= (l2 >> 8); __raw_writel(l, addr); l = l2 << 24; src += sizeof(u32); } break; } } EXPORT_SYMBOL(outsl); void insb(unsigned long __addr, void *dst, unsigned long count) { void __iomem *addr = (void __iomem *) __addr; if (count) { u32 *pi; u8 *pb = dst; while ((((unsigned long)pb) & 0x3) && count--) *pb++ = inb(addr); pi = (u32 *)pb; while (count >= 4) { u32 w; w = (inb(addr) << 24); w |= (inb(addr) << 16); w |= (inb(addr) << 8); w |= (inb(addr) << 0); *pi++ = w; count -= 4; } pb = (u8 *)pi; while (count--) *pb++ = inb(addr); } } EXPORT_SYMBOL(insb); void insw(unsigned long __addr, void *dst, unsigned long count) { void __iomem *addr = (void __iomem *) __addr; if (count) { u16 *ps = dst; u32 *pi; if (((unsigned long)ps) & 0x2) { *ps++ = le16_to_cpu(inw(addr)); count--; } pi = (u32 *)ps; while (count >= 2) { u32 w; w = (le16_to_cpu(inw(addr)) << 16); w |= (le16_to_cpu(inw(addr)) << 0); *pi++ = w; count -= 2; } ps = (u16 *)pi; if (count) *ps = le16_to_cpu(inw(addr)); } } EXPORT_SYMBOL(insw); void insl(unsigned long __addr, void *dst, unsigned long count) { void __iomem *addr = (void __iomem *) __addr; if (count) { if ((((unsigned long)dst) & 0x3) == 0) { u32 *pi = dst; while (count--) *pi++ = le32_to_cpu(inl(addr)); } else { u32 l = 0, l2, *pi; u16 *ps; u8 *pb; switch (((unsigned long)dst) & 3) { case 0x2: ps = dst; count -= 1; l = le32_to_cpu(inl(addr)); *ps++ = l; pi = (u32 *)ps; while (count--) { l2 = le32_to_cpu(inl(addr)); *pi++ = (l << 16) | (l2 >> 16); l = l2; } ps = (u16 *)pi; *ps = l; break; case 0x1: pb = dst; count -= 1; l = le32_to_cpu(inl(addr)); *pb++ = l >> 24; ps = (u16 *)pb; *ps++ = ((l >> 8) & 0xffff); pi = (u32 *)ps; while (count--) { l2 = le32_to_cpu(inl(addr)); *pi++ = (l << 24) | (l2 >> 8); l = l2; } pb = (u8 *)pi; *pb = l; break; case 0x3: pb = (u8 *)dst; count -= 1; l = le32_to_cpu(inl(addr)); *pb++ = l >> 24; pi = (u32 *)pb; while (count--) { l2 = le32_to_cpu(inl(addr)); *pi++ = (l << 8) | (l2 >> 24); l = l2; } ps = (u16 *)pi; *ps++ = ((l >> 8) & 0xffff); pb = (u8 *)ps; *pb = l; break; } } } } EXPORT_SYMBOL(insl);
gpl-2.0
bowser-boot/bowser-kernel
drivers/net/irda/sir_dongle.c
12516
3508
/********************************************************************* * * sir_dongle.c: manager for serial dongle protocol drivers * * Copyright (c) 2002 Martin Diehl * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * ********************************************************************/ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/kmod.h> #include <linux/mutex.h> #include <net/irda/irda.h> #include "sir-dev.h" /************************************************************************** * * dongle registration and attachment * */ static LIST_HEAD(dongle_list); /* list of registered dongle drivers */ static DEFINE_MUTEX(dongle_list_lock); /* protects the list */ int irda_register_dongle(struct dongle_driver *new) { struct list_head *entry; struct dongle_driver *drv; IRDA_DEBUG(0, "%s : registering dongle \"%s\" (%d).\n", __func__, new->driver_name, new->type); mutex_lock(&dongle_list_lock); list_for_each(entry, &dongle_list) { drv = list_entry(entry, struct dongle_driver, dongle_list); if (new->type == drv->type) { mutex_unlock(&dongle_list_lock); return -EEXIST; } } list_add(&new->dongle_list, &dongle_list); mutex_unlock(&dongle_list_lock); return 0; } EXPORT_SYMBOL(irda_register_dongle); int irda_unregister_dongle(struct dongle_driver *drv) { mutex_lock(&dongle_list_lock); list_del(&drv->dongle_list); mutex_unlock(&dongle_list_lock); return 0; } EXPORT_SYMBOL(irda_unregister_dongle); int sirdev_get_dongle(struct sir_dev *dev, IRDA_DONGLE type) { struct list_head *entry; const struct dongle_driver *drv = NULL; int err = -EINVAL; request_module("irda-dongle-%d", type); if (dev->dongle_drv != NULL) return -EBUSY; /* serialize access to the list of registered dongles */ mutex_lock(&dongle_list_lock); list_for_each(entry, &dongle_list) { drv = list_entry(entry, struct dongle_driver, dongle_list); if (drv->type == type) break; else drv = NULL; } if (!drv) { err = -ENODEV; goto out_unlock; /* no such dongle */ } /* handling of SMP races with dongle module removal - three cases: * 1) dongle driver was already unregistered - then we haven't found the * requested dongle above and are already out here * 2) the module is already marked deleted but the driver is still * registered - then the try_module_get() below will fail * 3) the try_module_get() below succeeds before the module is marked * deleted - then sys_delete_module() fails and prevents the removal * because the module is in use. */ if (!try_module_get(drv->owner)) { err = -ESTALE; goto out_unlock; /* rmmod already pending */ } dev->dongle_drv = drv; if (!drv->open || (err=drv->open(dev))!=0) goto out_reject; /* failed to open driver */ mutex_unlock(&dongle_list_lock); return 0; out_reject: dev->dongle_drv = NULL; module_put(drv->owner); out_unlock: mutex_unlock(&dongle_list_lock); return err; } int sirdev_put_dongle(struct sir_dev *dev) { const struct dongle_driver *drv = dev->dongle_drv; if (drv) { if (drv->close) drv->close(dev); /* close this dongle instance */ dev->dongle_drv = NULL; /* unlink the dongle driver */ module_put(drv->owner);/* decrement driver's module refcount */ } return 0; }
gpl-2.0
bju2000/cm_kernel_samsung_slte
sound/drivers/opl4/yrw801.c
15588
56838
/* * Information about the Yamaha YRW801 wavetable ROM chip * * Copyright (c) 2003 by Clemens Ladisch <clemens@ladisch.de> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * Alternatively, this software may be distributed and/or modified under the * terms of the GNU General Public License as published by the Free Software * Foundation; either version 2 of the License, or (at your option) any later * version. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opl4_local.h" int snd_yrw801_detect(struct snd_opl4 *opl4) { char buf[15]; snd_opl4_read_memory(opl4, buf, 0x001200, 15); if (memcmp(buf, "CopyrightYAMAHA", 15)) return -ENODEV; snd_opl4_read_memory(opl4, buf, 0x1ffffe, 2); if (buf[0] != 0x01) return -ENODEV; snd_printdd("YRW801 ROM version %02x.%02x\n", buf[0], buf[1]); return 0; } /* * The instrument definitions are stored statically because, in practice, the * OPL4 is always coupled with a YRW801. Dynamic instrument loading would be * required if downloading sample data to external SRAM was actually supported * by this driver. */ static const struct opl4_region regions_00[] = { /* Acoustic Grand Piano */ {0x14, 0x27, {0x12c,7474,100, 0,0,0x00,0xc8,0x20,0xf2,0x13,0x08,0x0}}, {0x28, 0x2d, {0x12d,6816,100, 0,0,0x00,0xc8,0x20,0xf2,0x14,0x08,0x0}}, {0x2e, 0x33, {0x12e,5899,100, 0,0,0x00,0xc8,0x20,0xf2,0x14,0x08,0x0}}, {0x34, 0x39, {0x12f,5290,100, 0,0,0x00,0xc8,0x20,0xf2,0x14,0x08,0x0}}, {0x3a, 0x3f, {0x130,4260,100, 0,0,0x0a,0xc8,0x20,0xf2,0x14,0x08,0x0}}, {0x40, 0x45, {0x131,3625,100, 0,0,0x0a,0xc8,0x20,0xf2,0x14,0x08,0x0}}, {0x46, 0x4b, {0x132,3116,100, 0,0,0x04,0xc8,0x20,0xf2,0x14,0x08,0x0}}, {0x4c, 0x52, {0x133,2081,100, 0,0,0x03,0xc8,0x20,0xf2,0x14,0x18,0x0}}, {0x53, 0x58, {0x134,1444,100, 0,0,0x07,0xc8,0x20,0xf3,0x14,0x18,0x0}}, {0x59, 0x6d, {0x135,1915,100, 0,0,0x00,0xc8,0x20,0xf4,0x15,0x08,0x0}} }; static const struct opl4_region regions_01[] = { /* Bright Acoustic Piano */ {0x14, 0x2d, {0x12c,7474,100, 0,0,0x00,0xc8,0x20,0xf2,0x13,0x08,0x0}}, {0x2e, 0x33, {0x12d,6816,100, 0,0,0x00,0xc8,0x20,0xf2,0x14,0x08,0x0}}, {0x34, 0x39, {0x12e,5899,100, 0,0,0x00,0xc8,0x20,0xf2,0x14,0x08,0x0}}, {0x3a, 0x3f, {0x12f,5290,100, 0,0,0x00,0xc8,0x20,0xf2,0x14,0x08,0x0}}, {0x40, 0x45, {0x130,4260,100, 0,0,0x0a,0xc8,0x20,0xf2,0x14,0x08,0x0}}, {0x46, 0x4b, {0x131,3625,100, 0,0,0x0a,0xc8,0x20,0xf2,0x14,0x08,0x0}}, {0x4c, 0x52, {0x132,3116,100, 0,0,0x04,0xc8,0x20,0xf2,0x14,0x08,0x0}}, {0x53, 0x58, {0x133,2081,100, 0,0,0x07,0xc8,0x20,0xf2,0x14,0x18,0x0}}, {0x59, 0x5e, {0x134,1444,100, 0,0,0x0a,0xc8,0x20,0xf3,0x14,0x18,0x0}}, {0x5f, 0x6d, {0x135,1915,100, 0,0,0x00,0xc8,0x20,0xf4,0x15,0x08,0x0}} }; static const struct opl4_region regions_02[] = { /* Electric Grand Piano */ {0x14, 0x2d, {0x12c,7476,100, 1,0,0x00,0xae,0x20,0xf2,0x13,0x07,0x0}}, {0x2e, 0x33, {0x12d,6818,100, 1,0,0x00,0xae,0x20,0xf2,0x14,0x07,0x0}}, {0x34, 0x39, {0x12e,5901,100, 1,0,0x00,0xae,0x20,0xf2,0x14,0x07,0x0}}, {0x3a, 0x3f, {0x12f,5292,100, 1,0,0x00,0xae,0x20,0xf2,0x14,0x07,0x0}}, {0x40, 0x45, {0x130,4262,100, 1,0,0x00,0xae,0x20,0xf2,0x14,0x07,0x0}}, {0x46, 0x4b, {0x131,3627,100, 1,0,0x00,0xae,0x20,0xf2,0x14,0x07,0x0}}, {0x4c, 0x52, {0x132,3118,100, 1,0,0x00,0xae,0x20,0xf2,0x14,0x07,0x0}}, {0x53, 0x58, {0x133,2083,100, 1,0,0x00,0xae,0x20,0xf2,0x14,0x17,0x0}}, {0x59, 0x5e, {0x134,1446,100, 1,0,0x00,0xae,0x20,0xf3,0x14,0x17,0x0}}, {0x5f, 0x6d, {0x135,1917,100, 1,0,0x00,0xae,0x20,0xf4,0x15,0x07,0x0}}, {0x00, 0x7f, {0x06c,6375,100,-1,0,0x00,0xc2,0x28,0xf4,0x23,0x18,0x0}} }; static const struct opl4_region regions_03[] = { /* Honky-Tonk Piano */ {0x14, 0x27, {0x12c,7474,100, 0,0,0x00,0xb4,0x20,0xf2,0x13,0x08,0x0}}, {0x28, 0x2d, {0x12d,6816,100, 0,0,0x00,0xb4,0x20,0xf2,0x14,0x08,0x0}}, {0x2e, 0x33, {0x12e,5899,100, 0,0,0x00,0xb4,0x20,0xf2,0x14,0x08,0x0}}, {0x34, 0x39, {0x12f,5290,100, 0,0,0x00,0xb4,0x20,0xf2,0x14,0x08,0x0}}, {0x3a, 0x3f, {0x130,4260,100, 0,0,0x0a,0xb4,0x20,0xf2,0x14,0x08,0x0}}, {0x40, 0x45, {0x131,3625,100, 0,0,0x0a,0xb4,0x20,0xf2,0x14,0x08,0x0}}, {0x46, 0x4b, {0x132,3116,100, 0,0,0x04,0xb4,0x20,0xf2,0x14,0x08,0x0}}, {0x4c, 0x52, {0x133,2081,100, 0,0,0x03,0xb4,0x20,0xf2,0x14,0x18,0x0}}, {0x53, 0x58, {0x134,1444,100, 0,0,0x07,0xb4,0x20,0xf3,0x14,0x18,0x0}}, {0x59, 0x6d, {0x135,1915,100, 0,0,0x00,0xb4,0x20,0xf4,0x15,0x08,0x0}}, {0x14, 0x27, {0x12c,7486,100, 0,0,0x00,0xb4,0x20,0xf2,0x13,0x08,0x0}}, {0x28, 0x2d, {0x12d,6803,100, 0,0,0x00,0xb4,0x20,0xf2,0x14,0x08,0x0}}, {0x2e, 0x33, {0x12e,5912,100, 0,0,0x00,0xb4,0x20,0xf2,0x14,0x08,0x0}}, {0x34, 0x39, {0x12f,5275,100, 0,0,0x00,0xb4,0x20,0xf2,0x14,0x08,0x0}}, {0x3a, 0x3f, {0x130,4274,100, 0,0,0x0a,0xb4,0x20,0xf2,0x14,0x08,0x0}}, {0x40, 0x45, {0x131,3611,100, 0,0,0x0a,0xb4,0x20,0xf2,0x14,0x08,0x0}}, {0x46, 0x4b, {0x132,3129,100, 0,0,0x04,0xb4,0x20,0xf2,0x14,0x08,0x0}}, {0x4c, 0x52, {0x133,2074,100, 0,0,0x07,0xb4,0x20,0xf2,0x14,0x18,0x0}}, {0x53, 0x58, {0x134,1457,100, 0,0,0x01,0xb4,0x20,0xf3,0x14,0x18,0x0}}, {0x59, 0x6d, {0x135,1903,100, 0,0,0x00,0xb4,0x20,0xf4,0x15,0x08,0x0}} }; static const struct opl4_region regions_04[] = { /* Electric Piano 1 */ {0x15, 0x6c, {0x00b,6570,100, 0,0,0x00,0x28,0x38,0xf0,0x00,0x0c,0x0}}, {0x00, 0x7f, {0x06c,6375,100, 0,2,0x00,0xb0,0x22,0xf4,0x23,0x19,0x0}} }; static const struct opl4_region regions_05[] = { /* Electric Piano 2 */ {0x14, 0x27, {0x12c,7476,100, 0,3,0x00,0xa2,0x1b,0xf2,0x13,0x08,0x0}}, {0x28, 0x2d, {0x12d,6818,100, 0,3,0x00,0xa2,0x1b,0xf2,0x14,0x08,0x0}}, {0x2e, 0x33, {0x12e,5901,100, 0,3,0x00,0xa2,0x1b,0xf2,0x14,0x08,0x0}}, {0x34, 0x39, {0x12f,5292,100, 0,3,0x00,0xa2,0x1b,0xf2,0x14,0x08,0x0}}, {0x3a, 0x3f, {0x130,4262,100, 0,3,0x0a,0xa2,0x1b,0xf2,0x14,0x08,0x0}}, {0x40, 0x45, {0x131,3627,100, 0,3,0x0a,0xa2,0x1b,0xf2,0x14,0x08,0x0}}, {0x46, 0x4b, {0x132,3118,100, 0,3,0x04,0xa2,0x1b,0xf2,0x14,0x08,0x0}}, {0x4c, 0x52, {0x133,2083,100, 0,3,0x03,0xa2,0x1b,0xf2,0x14,0x18,0x0}}, {0x53, 0x58, {0x134,1446,100, 0,3,0x07,0xa2,0x1b,0xf3,0x14,0x18,0x0}}, {0x59, 0x6d, {0x135,1917,100, 0,3,0x00,0xa2,0x1b,0xf4,0x15,0x08,0x0}}, {0x14, 0x2d, {0x12c,7472,100, 0,0,0x00,0xa2,0x18,0xf2,0x13,0x08,0x0}}, {0x2e, 0x33, {0x12d,6814,100, 0,0,0x00,0xa2,0x18,0xf2,0x14,0x08,0x0}}, {0x34, 0x39, {0x12e,5897,100, 0,0,0x00,0xa2,0x18,0xf2,0x14,0x08,0x0}}, {0x3a, 0x3f, {0x12f,5288,100, 0,0,0x00,0xa2,0x18,0xf2,0x14,0x08,0x0}}, {0x40, 0x45, {0x130,4258,100, 0,0,0x0a,0xa2,0x18,0xf2,0x14,0x08,0x0}}, {0x46, 0x4b, {0x131,3623,100, 0,0,0x0a,0xa2,0x18,0xf2,0x14,0x08,0x0}}, {0x4c, 0x52, {0x132,3114,100, 0,0,0x04,0xa2,0x18,0xf2,0x14,0x08,0x0}}, {0x53, 0x58, {0x133,2079,100, 0,0,0x07,0xa2,0x18,0xf2,0x14,0x18,0x0}}, {0x59, 0x5e, {0x134,1442,100, 0,0,0x0a,0xa2,0x18,0xf3,0x14,0x18,0x0}}, {0x5f, 0x6d, {0x135,1913,100, 0,0,0x00,0xa2,0x18,0xf4,0x15,0x08,0x0}} }; static const struct opl4_region regions_06[] = { /* Harpsichord */ {0x15, 0x39, {0x080,5158,100, 0,0,0x00,0xb2,0x20,0xf5,0x24,0x19,0x0}}, {0x3a, 0x3f, {0x081,4408,100, 0,0,0x00,0xb2,0x20,0xf5,0x25,0x09,0x0}}, {0x40, 0x45, {0x082,3622,100, 0,0,0x00,0xb2,0x20,0xf5,0x25,0x09,0x0}}, {0x46, 0x4d, {0x083,2843,100, 0,0,0x00,0xb2,0x20,0xf5,0x25,0x19,0x0}}, {0x4e, 0x6c, {0x084,1307,100, 0,0,0x00,0xb2,0x20,0xf5,0x25,0x29,0x0}} }; static const struct opl4_region regions_07[] = { /* Clavinet */ {0x15, 0x51, {0x027,5009,100, 0,0,0x00,0xd2,0x28,0xf5,0x13,0x2b,0x0}}, {0x52, 0x6c, {0x028,3495,100, 0,0,0x00,0xd2,0x28,0xf5,0x13,0x3b,0x0}} }; static const struct opl4_region regions_08[] = { /* Celesta */ {0x15, 0x6c, {0x02b,3267,100, 0,0,0x00,0xdc,0x20,0xf4,0x15,0x07,0x3}} }; static const struct opl4_region regions_09[] = { /* Glockenspiel */ {0x15, 0x78, {0x0f3, 285,100, 0,0,0x00,0xc2,0x28,0xf6,0x25,0x25,0x0}} }; static const struct opl4_region regions_0a[] = { /* Music Box */ {0x15, 0x6c, {0x0f3,3362,100, 0,0,0x00,0xb6,0x20,0xa6,0x25,0x25,0x0}}, {0x15, 0x6c, {0x101,4773,100, 0,0,0x00,0xaa,0x20,0xd4,0x14,0x16,0x0}} }; static const struct opl4_region regions_0b[] = { /* Vibraphone */ {0x15, 0x6c, {0x101,4778,100, 0,0,0x00,0xc0,0x28,0xf4,0x14,0x16,0x4}} }; static const struct opl4_region regions_0c[] = { /* Marimba */ {0x15, 0x3f, {0x0f4,4778,100, 0,0,0x00,0xc4,0x38,0xf7,0x47,0x08,0x0}}, {0x40, 0x4c, {0x0f5,3217,100, 0,0,0x00,0xc4,0x38,0xf7,0x47,0x08,0x0}}, {0x4d, 0x5a, {0x0f5,3217,100, 0,0,0x00,0xc4,0x38,0xf7,0x48,0x08,0x0}}, {0x5b, 0x7f, {0x0f5,3218,100, 0,0,0x00,0xc4,0x38,0xf7,0x48,0x18,0x0}} }; static const struct opl4_region regions_0d[] = { /* Xylophone */ {0x00, 0x7f, {0x136,1729,100, 0,0,0x00,0xd2,0x38,0xf0,0x06,0x36,0x0}} }; static const struct opl4_region regions_0e[] = { /* Tubular Bell */ {0x01, 0x7f, {0x0ff,3999,100, 0,1,0x00,0x90,0x21,0xf4,0xa3,0x25,0x1}} }; static const struct opl4_region regions_0f[] = { /* Dulcimer */ {0x00, 0x7f, {0x03f,4236,100, 0,1,0x00,0xbc,0x29,0xf5,0x16,0x07,0x0}}, {0x00, 0x7f, {0x040,4236,100, 0,2,0x0e,0x94,0x2a,0xf5,0x16,0x07,0x0}} }; static const struct opl4_region regions_10[] = { /* Drawbar Organ */ {0x01, 0x7f, {0x08e,4394,100, 0,2,0x14,0xc2,0x3a,0xf0,0x00,0x0a,0x0}} }; static const struct opl4_region regions_11[] = { /* Percussive Organ */ {0x15, 0x3b, {0x08c,6062,100, 0,3,0x00,0xbe,0x3b,0xf0,0x00,0x09,0x0}}, {0x3c, 0x6c, {0x08d,2984,100, 0,3,0x00,0xbe,0x3b,0xf0,0x00,0x09,0x0}} }; static const struct opl4_region regions_12[] = { /* Rock Organ */ {0x15, 0x30, {0x128,6574,100, 0,1,0x00,0xcc,0x39,0xf0,0x00,0x0a,0x0}}, {0x31, 0x3c, {0x129,5040,100, 0,1,0x00,0xcc,0x39,0xf0,0x00,0x0a,0x0}}, {0x3d, 0x48, {0x12a,3498,100, 0,1,0x00,0xcc,0x39,0xf0,0x00,0x0a,0x0}}, {0x49, 0x54, {0x12b,1957,100, 0,1,0x00,0xcc,0x39,0xf0,0x00,0x0a,0x0}}, {0x55, 0x6c, {0x127, 423,100, 0,1,0x00,0xcc,0x39,0xf0,0x00,0x0a,0x0}} }; static const struct opl4_region regions_13[] = { /* Church Organ */ {0x15, 0x29, {0x087,7466,100, 0,1,0x00,0xc4,0x11,0xf0,0x00,0x09,0x0}}, {0x2a, 0x30, {0x088,6456,100, 0,1,0x00,0xc4,0x11,0xf0,0x00,0x09,0x0}}, {0x31, 0x38, {0x089,5428,100, 0,1,0x00,0xc4,0x11,0xf0,0x00,0x09,0x0}}, {0x39, 0x41, {0x08a,4408,100, 0,1,0x00,0xc4,0x11,0xf0,0x00,0x09,0x0}}, {0x42, 0x6c, {0x08b,3406,100, 0,1,0x00,0xc4,0x11,0xf0,0x00,0x09,0x0}} }; static const struct opl4_region regions_14[] = { /* Reed Organ */ {0x00, 0x53, {0x0ac,5570,100, 0,0,0x06,0xc0,0x38,0xf0,0x00,0x09,0x1}}, {0x54, 0x7f, {0x0ad,2497,100, 0,0,0x00,0xc0,0x38,0xf0,0x00,0x09,0x1}} }; static const struct opl4_region regions_15[] = { /* Accordion */ {0x15, 0x4c, {0x006,4261,100, 0,2,0x00,0xa4,0x22,0x90,0x00,0x09,0x0}}, {0x4d, 0x6c, {0x007,1530,100, 0,2,0x00,0xa4,0x22,0x90,0x00,0x09,0x0}}, {0x15, 0x6c, {0x070,4391,100, 0,3,0x00,0x8a,0x23,0xa0,0x00,0x09,0x0}} }; static const struct opl4_region regions_16[] = { /* Harmonica */ {0x15, 0x6c, {0x070,4408,100, 0,0,0x00,0xae,0x30,0xa0,0x00,0x09,0x2}} }; static const struct opl4_region regions_17[] = { /* Tango Accordion */ {0x00, 0x53, {0x0ac,5573,100, 0,0,0x00,0xae,0x38,0xf0,0x00,0x09,0x0}}, {0x54, 0x7f, {0x0ad,2500,100, 0,0,0x00,0xae,0x38,0xf0,0x00,0x09,0x0}}, {0x15, 0x6c, {0x041,8479,100, 0,2,0x00,0x6a,0x3a,0x75,0x20,0x0a,0x0}} }; static const struct opl4_region regions_18[] = { /* Nylon Guitar */ {0x15, 0x2f, {0x0b3,6964,100, 0,0,0x05,0xca,0x28,0xf5,0x34,0x09,0x0}}, {0x30, 0x36, {0x0b7,5567,100, 0,0,0x0c,0xca,0x28,0xf5,0x34,0x09,0x0}}, {0x37, 0x3c, {0x0b5,4653,100, 0,0,0x00,0xca,0x28,0xf6,0x34,0x09,0x0}}, {0x3d, 0x43, {0x0b4,3892,100, 0,0,0x00,0xca,0x28,0xf6,0x35,0x09,0x0}}, {0x44, 0x60, {0x0b6,2723,100, 0,0,0x00,0xca,0x28,0xf6,0x35,0x19,0x0}} }; static const struct opl4_region regions_19[] = { /* Steel Guitar */ {0x15, 0x31, {0x00c,6937,100, 0,0,0x00,0xbc,0x28,0xf0,0x04,0x19,0x0}}, {0x32, 0x38, {0x00d,5410,100, 0,0,0x00,0xbc,0x28,0xf0,0x05,0x09,0x0}}, {0x39, 0x47, {0x00e,4379,100, 0,0,0x00,0xbc,0x28,0xf5,0x94,0x09,0x0}}, {0x48, 0x6c, {0x00f,2843,100, 0,0,0x00,0xbc,0x28,0xf6,0x95,0x09,0x0}} }; static const struct opl4_region regions_1a[] = { /* Jazz Guitar */ {0x15, 0x31, {0x05a,6832,100, 0,0,0x00,0xca,0x28,0xf6,0x34,0x09,0x0}}, {0x32, 0x3f, {0x05b,4897,100, 0,0,0x00,0xca,0x28,0xf6,0x34,0x09,0x0}}, {0x40, 0x6c, {0x05c,3218,100, 0,0,0x00,0xca,0x28,0xf6,0x34,0x09,0x0}} }; static const struct opl4_region regions_1b[] = { /* Clean Guitar */ {0x15, 0x2c, {0x061,7053,100, 0,1,0x00,0xb4,0x29,0xf5,0x54,0x0a,0x0}}, {0x2d, 0x31, {0x060,6434,100, 0,1,0x00,0xb4,0x29,0xf5,0x54,0x0a,0x0}}, {0x32, 0x38, {0x063,5764,100, 0,1,0x00,0xbe,0x29,0xf5,0x55,0x0a,0x0}}, {0x39, 0x3f, {0x062,4627,100, 0,1,0x00,0xb4,0x29,0xf5,0x55,0x0a,0x0}}, {0x40, 0x44, {0x065,3963,100, 0,1,0x00,0xb4,0x29,0xf5,0x55,0x1a,0x0}}, {0x45, 0x4b, {0x064,3313,100, 0,1,0x00,0xb4,0x29,0xf5,0x55,0x1a,0x0}}, {0x4c, 0x54, {0x066,2462,100, 0,1,0x00,0xb4,0x29,0xf5,0x55,0x2a,0x0}}, {0x55, 0x6c, {0x067,1307,100, 0,1,0x00,0xb4,0x29,0xf6,0x56,0x0a,0x0}} }; static const struct opl4_region regions_1c[] = { /* Muted Guitar */ {0x01, 0x7f, {0x068,4408,100, 0,0,0x00,0xcc,0x28,0xf6,0x15,0x09,0x0}} }; static const struct opl4_region regions_1d[] = { /* Overdriven Guitar */ {0x00, 0x40, {0x0a5,6589,100, 0,1,0x00,0xc0,0x29,0xf2,0x11,0x09,0x0}}, {0x41, 0x7f, {0x0a6,5428,100, 0,1,0x00,0xc0,0x29,0xf2,0x11,0x09,0x0}} }; static const struct opl4_region regions_1e[] = { /* Distortion Guitar */ {0x15, 0x2a, {0x051,6928,100, 0,1,0x00,0xbc,0x21,0xa2,0x12,0x0a,0x0}}, {0x2b, 0x2e, {0x052,6433,100, 0,1,0x00,0xbc,0x21,0xa2,0x12,0x0a,0x0}}, {0x2f, 0x32, {0x053,5944,100, 0,1,0x00,0xbc,0x21,0xa2,0x12,0x0a,0x0}}, {0x33, 0x36, {0x054,5391,100, 0,1,0x00,0xbc,0x21,0xa2,0x12,0x0a,0x0}}, {0x37, 0x3a, {0x055,4897,100, 0,1,0x00,0xbc,0x21,0xa2,0x12,0x0a,0x0}}, {0x3b, 0x3e, {0x056,4408,100, 0,1,0x00,0xbc,0x21,0xa2,0x12,0x0a,0x0}}, {0x3f, 0x42, {0x057,3892,100, 0,1,0x00,0xbc,0x21,0xa2,0x12,0x0a,0x0}}, {0x43, 0x46, {0x058,3361,100, 0,1,0x00,0xbc,0x21,0xa2,0x12,0x0a,0x0}}, {0x47, 0x6c, {0x059,2784,100, 0,1,0x00,0xbc,0x21,0xa2,0x12,0x0a,0x0}} }; static const struct opl4_region regions_1f[] = { /* Guitar Harmonics */ {0x15, 0x44, {0x05e,5499,100, 0,0,0x00,0xce,0x28,0xf4,0x24,0x09,0x0}}, {0x45, 0x49, {0x05d,4850,100, 0,0,0x00,0xe2,0x28,0xf4,0x24,0x09,0x0}}, {0x4a, 0x6c, {0x05f,4259,100, 0,0,0x00,0xce,0x28,0xf4,0x24,0x09,0x0}} }; static const struct opl4_region regions_20[] = { /* Acoustic Bass */ {0x15, 0x30, {0x004,8053,100, 0,0,0x00,0xe2,0x18,0xf5,0x15,0x09,0x0}}, {0x31, 0x6c, {0x005,4754,100, 0,0,0x00,0xe2,0x18,0xf5,0x15,0x09,0x0}} }; static const struct opl4_region regions_21[] = { /* Fingered Bass */ {0x01, 0x20, {0x04a,8762,100, 0,0,0x00,0xde,0x18,0xf6,0x14,0x09,0x0}}, {0x21, 0x25, {0x04b,8114,100, 0,0,0x00,0xde,0x18,0xf6,0x14,0x09,0x0}}, {0x26, 0x2a, {0x04c,7475,100, 0,0,0x00,0xde,0x18,0xf6,0x14,0x09,0x0}}, {0x2b, 0x7f, {0x04d,6841,100, 0,0,0x00,0xde,0x18,0xf6,0x14,0x09,0x0}} }; static const struct opl4_region regions_22[] = { /* Picked Bass */ {0x15, 0x23, {0x04f,7954,100, 0,0,0x00,0xcc,0x18,0xf3,0x90,0x0a,0x0}}, {0x24, 0x2a, {0x050,7318,100, 0,0,0x05,0xcc,0x18,0xf3,0x90,0x1a,0x0}}, {0x2b, 0x2f, {0x06b,6654,100, 0,0,0x00,0xcc,0x18,0xf3,0x90,0x2a,0x0}}, {0x30, 0x47, {0x069,6031,100, 0,0,0x00,0xcc,0x18,0xf5,0xb0,0x0a,0x0}}, {0x48, 0x6c, {0x06a,5393,100, 0,0,0x00,0xcc,0x18,0xf5,0xb0,0x0a,0x0}} }; static const struct opl4_region regions_23[] = { /* Fretless Bass */ {0x01, 0x7f, {0x04e,5297,100, 0,0,0x00,0xd2,0x10,0xf3,0x63,0x19,0x0}} }; static const struct opl4_region regions_24[] = { /* Slap Bass 1 */ {0x15, 0x6c, {0x0a3,7606,100, 0,1,0x00,0xde,0x19,0xf5,0x32,0x1a,0x0}} }; static const struct opl4_region regions_25[] = { /* Slap Bass 2 */ {0x01, 0x7f, {0x0a2,6694,100, 0,0,0x00,0xda,0x20,0xb0,0x02,0x09,0x0}} }; static const struct opl4_region regions_26[] = { /* Synth Bass 1 */ {0x15, 0x6c, {0x0be,7466,100, 0,1,0x00,0xb8,0x39,0xf4,0x14,0x09,0x0}} }; static const struct opl4_region regions_27[] = { /* Synth Bass 2 */ {0x00, 0x7f, {0x117,8103,100, 0,1,0x00,0xca,0x39,0xf3,0x50,0x08,0x0}} }; static const struct opl4_region regions_28[] = { /* Violin */ {0x15, 0x3a, {0x105,5158,100, 0,3,0x00,0xcc,0x3b,0xf3,0x20,0x09,0x0}}, {0x3b, 0x3f, {0x102,4754,100, 0,3,0x00,0xcc,0x3b,0xf3,0x20,0x09,0x0}}, {0x40, 0x41, {0x106,4132,100, 0,3,0x00,0xcc,0x3b,0xf3,0x20,0x09,0x0}}, {0x42, 0x44, {0x107,4033,100, 0,3,0x00,0xcc,0x3b,0xf3,0x20,0x09,0x0}}, {0x45, 0x47, {0x108,3580,100, 0,3,0x00,0xcc,0x3b,0xf3,0x20,0x09,0x0}}, {0x48, 0x4a, {0x10a,2957,100, 0,3,0x00,0xcc,0x3b,0xf3,0x20,0x09,0x0}}, {0x4b, 0x4c, {0x10b,2724,100, 0,3,0x00,0xcc,0x3b,0xf3,0x20,0x09,0x0}}, {0x4d, 0x4e, {0x10c,2530,100, 0,3,0x00,0xcc,0x3b,0xf3,0x20,0x09,0x0}}, {0x4f, 0x51, {0x10d,2166,100, 0,3,0x00,0xcc,0x3b,0xf3,0x20,0x09,0x0}}, {0x52, 0x6c, {0x109,1825,100, 0,3,0x00,0xcc,0x3b,0xf3,0x20,0x09,0x0}} }; static const struct opl4_region regions_29[] = { /* Viola */ {0x15, 0x32, {0x103,5780,100, 0,3,0x00,0xc4,0x3b,0xa3,0x20,0x09,0x0}}, {0x33, 0x35, {0x104,5534,100, 0,3,0x00,0xc4,0x3b,0xa3,0x20,0x09,0x0}}, {0x36, 0x38, {0x105,5158,100, 0,3,0x00,0xc4,0x3b,0xa3,0x20,0x09,0x0}}, {0x39, 0x3d, {0x102,4754,100, 0,3,0x00,0xca,0x3b,0xa3,0x20,0x09,0x0}}, {0x3e, 0x3f, {0x106,4132,100, 0,3,0x00,0xc4,0x3b,0xa3,0x20,0x09,0x0}}, {0x40, 0x42, {0x107,4033,100, 0,3,0x00,0xc4,0x3b,0xa3,0x20,0x09,0x0}}, {0x43, 0x45, {0x108,3580,100, 0,3,0x00,0xd0,0x3b,0xa3,0x20,0x09,0x0}}, {0x46, 0x48, {0x10a,2957,100, 0,3,0x00,0xca,0x3b,0xa3,0x20,0x09,0x0}}, {0x49, 0x4a, {0x10b,2724,100, 0,3,0x00,0xd0,0x3b,0xa3,0x20,0x09,0x0}}, {0x4b, 0x4c, {0x10c,2530,100, 0,3,0x00,0xca,0x3b,0xa3,0x20,0x09,0x0}}, {0x4d, 0x4f, {0x10d,2166,100, 0,3,0x00,0xd0,0x3b,0xa3,0x20,0x09,0x0}}, {0x50, 0x6c, {0x109,1825,100, 0,3,0x00,0xd0,0x3b,0xa3,0x20,0x09,0x0}} }; static const struct opl4_region regions_2a[] = { /* Cello */ {0x15, 0x2d, {0x112,6545,100, 0,3,0x00,0xc0,0x33,0xa0,0x00,0x08,0x0}}, {0x2e, 0x37, {0x113,5764,100, 0,3,0x00,0xc0,0x33,0xa0,0x00,0x08,0x0}}, {0x38, 0x3e, {0x115,4378,100, 0,3,0x00,0xc0,0x33,0xa0,0x00,0x18,0x0}}, {0x3f, 0x44, {0x116,3998,100, 0,3,0x00,0xc0,0x33,0xa0,0x00,0x18,0x0}}, {0x45, 0x6c, {0x114,3218,100, 0,3,0x00,0xc0,0x33,0xa0,0x00,0x18,0x0}} }; static const struct opl4_region regions_2b[] = { /* Contrabass */ {0x15, 0x29, {0x110,7713,100, 0,1,0x00,0xc2,0x19,0x90,0x00,0x09,0x0}}, {0x2a, 0x6c, {0x111,6162,100, 0,1,0x00,0xc2,0x19,0x90,0x00,0x09,0x0}} }; static const struct opl4_region regions_2c[] = { /* Tremolo Strings */ {0x15, 0x3b, {0x0b0,4810,100, 0,0,0x0a,0xde,0x38,0xf0,0x00,0x07,0x6}}, {0x3c, 0x41, {0x035,4035,100, 0,0,0x05,0xde,0x38,0xf0,0x00,0x07,0x6}}, {0x42, 0x47, {0x033,3129,100, 0,0,0x05,0xde,0x38,0xf0,0x00,0x07,0x6}}, {0x48, 0x52, {0x034,2625,100, 0,0,0x05,0xde,0x38,0xf0,0x00,0x07,0x6}}, {0x53, 0x6c, {0x0af, 936,100, 0,0,0x00,0xde,0x38,0xf0,0x00,0x07,0x6}} }; static const struct opl4_region regions_2d[] = { /* Pizzicato Strings */ {0x15, 0x32, {0x0b8,6186,100, 0,0,0x00,0xbc,0x28,0xf0,0x00,0x05,0x0}}, {0x33, 0x3b, {0x0b9,5031,100, 0,0,0x00,0xbc,0x28,0xf0,0x00,0x05,0x0}}, {0x3c, 0x42, {0x0bb,4146,100, 0,0,0x00,0xbc,0x28,0xf0,0x00,0x05,0x0}}, {0x43, 0x48, {0x0ba,3245,100, 0,0,0x00,0xc2,0x28,0xf0,0x00,0x05,0x0}}, {0x49, 0x6c, {0x0bc,2352,100, 0,0,0x00,0xbc,0x28,0xf0,0x00,0x05,0x0}} }; static const struct opl4_region regions_2e[] = { /* Harp */ {0x15, 0x46, {0x07e,3740,100, 0,1,0x00,0xd2,0x29,0xf5,0x25,0x07,0x0}}, {0x47, 0x6c, {0x07f,2319,100, 0,1,0x00,0xd2,0x29,0xf5,0x25,0x07,0x0}} }; static const struct opl4_region regions_2f[] = { /* Timpani */ {0x15, 0x6c, {0x100,6570,100, 0,0,0x00,0xf8,0x28,0xf0,0x05,0x16,0x0}} }; static const struct opl4_region regions_30[] = { /* Strings */ {0x15, 0x3b, {0x13c,4806,100, 0,0,0x00,0xc8,0x20,0x80,0x00,0x07,0x0}}, {0x3c, 0x41, {0x13e,4035,100, 0,0,0x00,0xc8,0x20,0x80,0x00,0x07,0x0}}, {0x42, 0x47, {0x13d,3122,100, 0,0,0x00,0xc8,0x20,0x80,0x00,0x07,0x0}}, {0x48, 0x52, {0x13f,2629,100, 0,0,0x00,0xbe,0x20,0x80,0x00,0x07,0x0}}, {0x53, 0x6c, {0x140, 950,100, 0,0,0x00,0xbe,0x20,0x80,0x00,0x07,0x0}} }; static const struct opl4_region regions_31[] = { /* Slow Strings */ {0x15, 0x3b, {0x0b0,4810,100, 0,1,0x0a,0xbe,0x19,0xf0,0x00,0x07,0x0}}, {0x3c, 0x41, {0x035,4035,100, 0,1,0x05,0xbe,0x19,0xf0,0x00,0x07,0x0}}, {0x42, 0x47, {0x033,3129,100, 0,1,0x05,0xbe,0x19,0xf0,0x00,0x07,0x0}}, {0x48, 0x52, {0x034,2625,100, 0,1,0x05,0xbe,0x19,0xf0,0x00,0x07,0x0}}, {0x53, 0x6c, {0x0af, 936,100, 0,1,0x00,0xbe,0x19,0xf0,0x00,0x07,0x0}} }; static const struct opl4_region regions_32[] = { /* Synth Strings 1 */ {0x05, 0x71, {0x002,6045,100,-2,0,0x00,0xa6,0x20,0x93,0x22,0x06,0x0}}, {0x15, 0x6c, {0x0ae,3261,100, 2,0,0x00,0xc6,0x20,0x70,0x01,0x06,0x0}} }; static const struct opl4_region regions_33[] = { /* Synth Strings 2 */ {0x15, 0x6c, {0x002,4513,100, 5,1,0x00,0xb4,0x19,0x70,0x00,0x06,0x0}}, {0x15, 0x6c, {0x002,4501,100,-5,1,0x00,0xb4,0x19,0x70,0x00,0x06,0x0}} }; static const struct opl4_region regions_34[] = { /* Choir Aahs */ {0x15, 0x3a, {0x018,5010,100, 0,2,0x00,0xc2,0x1a,0x70,0x00,0x08,0x0}}, {0x3b, 0x40, {0x019,4370,100, 0,2,0x00,0xc2,0x1a,0x70,0x00,0x08,0x0}}, {0x41, 0x47, {0x01a,3478,100, 0,2,0x00,0xc2,0x1a,0x70,0x00,0x08,0x0}}, {0x48, 0x6c, {0x01b,2197,100, 0,2,0x00,0xc2,0x1a,0x70,0x00,0x08,0x0}} }; static const struct opl4_region regions_35[] = { /* Voice Oohs */ {0x15, 0x6c, {0x029,3596,100, 0,0,0x00,0xe6,0x20,0xf7,0x20,0x08,0x0}} }; static const struct opl4_region regions_36[] = { /* Synth Voice */ {0x15, 0x6c, {0x02a,3482,100, 0,1,0x00,0xc2,0x19,0x85,0x21,0x07,0x0}} }; static const struct opl4_region regions_37[] = { /* Orchestra Hit */ {0x15, 0x6c, {0x049,4394,100, 0,0,0x00,0xfe,0x30,0x80,0x05,0x05,0x0}} }; static const struct opl4_region regions_38[] = { /* Trumpet */ {0x15, 0x3c, {0x0f6,4706,100, 0,2,0x00,0xd6,0x32,0xf3,0x20,0x0a,0x0}}, {0x3d, 0x43, {0x0f8,3894,100, 0,2,0x00,0xd6,0x32,0xf3,0x20,0x0a,0x0}}, {0x44, 0x48, {0x0f7,3118,100, 0,2,0x00,0xd6,0x32,0xf3,0x20,0x0a,0x0}}, {0x49, 0x4e, {0x0fa,2322,100, 0,2,0x00,0xd6,0x32,0xf3,0x20,0x0a,0x0}}, {0x4f, 0x55, {0x0f9,1634,100, 0,2,0x00,0xd6,0x32,0xf3,0x20,0x0a,0x0}}, {0x56, 0x6c, {0x0fb, 786,100, 0,2,0x00,0xd6,0x32,0xf3,0x20,0x0a,0x0}} }; static const struct opl4_region regions_39[] = { /* Trombone */ {0x15, 0x3a, {0x0f0,5053,100, 0,1,0x00,0xd6,0x21,0xf0,0x00,0x09,0x0}}, {0x3b, 0x3f, {0x0f1,4290,100, 0,1,0x00,0xd6,0x21,0xf0,0x00,0x09,0x0}}, {0x40, 0x6c, {0x0f2,3580,100, 0,1,0x00,0xd6,0x21,0xf0,0x00,0x09,0x0}} }; static const struct opl4_region regions_3a[] = { /* Tuba */ {0x15, 0x2d, {0x085,7096,100, 0,1,0x00,0xde,0x21,0xf5,0x10,0x09,0x0}}, {0x2e, 0x6c, {0x086,6014,100, 0,1,0x00,0xde,0x21,0xf5,0x10,0x09,0x0}} }; static const struct opl4_region regions_3b[] = { /* Muted Trumpet */ {0x15, 0x45, {0x0b1,4135,100, 0,0,0x00,0xcc,0x28,0xf3,0x10,0x0a,0x1}}, {0x46, 0x6c, {0x0b2,2599,100, 0,0,0x00,0xcc,0x28,0x83,0x10,0x0a,0x1}} }; static const struct opl4_region regions_3c[] = { /* French Horns */ {0x15, 0x49, {0x07c,3624,100, 0,2,0x00,0xd0,0x1a,0xf0,0x00,0x09,0x0}}, {0x4a, 0x6c, {0x07d,2664,100, 0,2,0x00,0xd0,0x1a,0xf0,0x00,0x09,0x0}} }; static const struct opl4_region regions_3d[] = { /* Brass Section */ {0x15, 0x42, {0x0fc,4375,100, 0,0,0x00,0xd6,0x28,0xf0,0x00,0x0a,0x0}}, {0x43, 0x6c, {0x0fd,2854,100, 0,0,0x00,0xd6,0x28,0xf0,0x00,0x0a,0x0}} }; static const struct opl4_region regions_3e[] = { /* Synth Brass 1 */ {0x01, 0x27, {0x0d3,9094,100,-1,0,0x00,0xbe,0x18,0xa5,0x11,0x08,0x0}}, {0x28, 0x2d, {0x0da,8335,100,-1,0,0x00,0xbe,0x18,0xa5,0x11,0x08,0x0}}, {0x2e, 0x33, {0x0d4,7558,100,-1,0,0x00,0xbe,0x18,0xa5,0x11,0x08,0x0}}, {0x34, 0x39, {0x0db,6785,100,-1,0,0x00,0xbe,0x18,0xa5,0x11,0x08,0x0}}, {0x3a, 0x3f, {0x0d5,6042,100,-1,0,0x00,0xbe,0x18,0xa5,0x11,0x08,0x0}}, {0x40, 0x45, {0x0dc,5257,100,-1,0,0x00,0xbe,0x18,0xa5,0x11,0x08,0x0}}, {0x46, 0x4b, {0x0d6,4493,100,-1,0,0x00,0xbe,0x18,0xa5,0x11,0x08,0x0}}, {0x4c, 0x51, {0x0dd,3741,100,-1,0,0x00,0xbe,0x18,0xa5,0x11,0x08,0x0}}, {0x52, 0x57, {0x0d7,3012,100,-1,0,0x00,0xbe,0x18,0xa5,0x11,0x08,0x0}}, {0x58, 0x5d, {0x0de,2167,100,-1,0,0x00,0xbe,0x18,0xa5,0x11,0x08,0x0}}, {0x5e, 0x63, {0x0d8,1421,100,-1,0,0x00,0xbe,0x18,0xa5,0x11,0x08,0x0}}, {0x64, 0x7f, {0x0d9,-115,100,-1,0,0x00,0xbe,0x18,0xa5,0x11,0x08,0x0}}, {0x01, 0x27, {0x118,9103,100, 1,1,0x00,0xbe,0x19,0x85,0x23,0x08,0x0}}, {0x28, 0x2d, {0x119,8340,100, 1,1,0x00,0xbe,0x19,0x85,0x23,0x08,0x0}}, {0x2e, 0x33, {0x11a,7565,100, 1,1,0x00,0xbe,0x19,0x85,0x23,0x08,0x0}}, {0x34, 0x39, {0x11b,6804,100, 1,1,0x00,0xbe,0x19,0x85,0x23,0x08,0x0}}, {0x3a, 0x3f, {0x11c,6042,100, 1,1,0x00,0xbe,0x19,0x85,0x23,0x08,0x0}}, {0x40, 0x45, {0x11d,5277,100, 1,1,0x00,0xbe,0x19,0x85,0x23,0x08,0x0}}, {0x46, 0x4b, {0x11e,4520,100, 1,1,0x00,0xbe,0x19,0x85,0x23,0x08,0x0}}, {0x4c, 0x51, {0x11f,3741,100, 1,1,0x00,0xbe,0x19,0x85,0x23,0x08,0x0}}, {0x52, 0x57, {0x120,3012,100, 1,1,0x00,0xbe,0x19,0x85,0x23,0x08,0x0}}, {0x58, 0x5d, {0x121,2166,100, 1,1,0x00,0xbe,0x19,0x85,0x23,0x08,0x0}}, {0x5e, 0x64, {0x122,1421,100, 1,1,0x00,0xbe,0x19,0x85,0x23,0x08,0x0}}, {0x65, 0x7f, {0x123,-115,100, 1,1,0x00,0xbe,0x19,0x85,0x23,0x08,0x0}} }; static const struct opl4_region regions_3f[] = { /* Synth Brass 2 */ {0x01, 0x27, {0x118,9113,100, 3,6,0x00,0xae,0x26,0x85,0x23,0x08,0x0}}, {0x28, 0x2d, {0x119,8350,100, 3,6,0x00,0xae,0x26,0x85,0x23,0x08,0x0}}, {0x2e, 0x33, {0x11a,7575,100, 3,6,0x00,0xae,0x26,0x85,0x23,0x08,0x0}}, {0x34, 0x39, {0x11b,6814,100, 3,6,0x00,0xae,0x26,0x85,0x23,0x08,0x0}}, {0x3a, 0x3f, {0x11c,6052,100, 3,6,0x00,0xae,0x26,0x85,0x23,0x08,0x0}}, {0x40, 0x45, {0x11d,5287,100, 3,6,0x00,0xae,0x26,0x85,0x23,0x08,0x0}}, {0x46, 0x4b, {0x11e,4530,100, 3,6,0x00,0xae,0x26,0x85,0x23,0x08,0x0}}, {0x4c, 0x51, {0x11f,3751,100, 3,6,0x00,0xae,0x26,0x85,0x23,0x08,0x0}}, {0x52, 0x57, {0x120,3022,100, 3,6,0x00,0xae,0x26,0x85,0x23,0x08,0x0}}, {0x58, 0x5d, {0x121,2176,100, 3,6,0x00,0xae,0x26,0x85,0x23,0x08,0x0}}, {0x5e, 0x64, {0x122,1431,100, 3,6,0x00,0xae,0x26,0x85,0x23,0x08,0x0}}, {0x65, 0x7f, {0x123,-105,100, 3,6,0x00,0xae,0x26,0x85,0x23,0x08,0x0}}, {0x00, 0x7f, {0x124,4034,100,-3,2,0x00,0xea,0x22,0x85,0x23,0x08,0x0}} }; static const struct opl4_region regions_40[] = { /* Soprano Sax */ {0x15, 0x3f, {0x0e3,4228,100, 0,1,0x00,0xc8,0x21,0xf5,0x20,0x0a,0x0}}, {0x40, 0x45, {0x0e4,3495,100, 0,1,0x00,0xc8,0x21,0xf5,0x20,0x0a,0x0}}, {0x46, 0x4b, {0x0e5,2660,100, 0,1,0x00,0xd6,0x21,0xf5,0x20,0x0a,0x0}}, {0x4c, 0x51, {0x0e6,2002,100, 0,1,0x00,0xd6,0x21,0xf5,0x20,0x0a,0x0}}, {0x52, 0x59, {0x0e7,1186,100, 0,1,0x00,0xd6,0x21,0xf5,0x20,0x0a,0x0}}, {0x59, 0x6c, {0x0e8,1730,100, 0,1,0x00,0xc8,0x21,0xf5,0x20,0x0a,0x0}} }; static const struct opl4_region regions_41[] = { /* Alto Sax */ {0x15, 0x32, {0x092,6204,100, 0,1,0x00,0xbe,0x19,0xf5,0x20,0x0b,0x0}}, {0x33, 0x35, {0x096,5812,100, 0,1,0x00,0xbe,0x19,0xf5,0x20,0x0b,0x0}}, {0x36, 0x3a, {0x099,5318,100, 0,1,0x00,0xbe,0x19,0xf5,0x20,0x0b,0x0}}, {0x3b, 0x3b, {0x08f,5076,100, 0,1,0x00,0xbe,0x19,0xf5,0x20,0x0b,0x0}}, {0x3c, 0x3e, {0x093,4706,100, 0,1,0x00,0xbe,0x19,0xf5,0x20,0x0b,0x0}}, {0x3f, 0x41, {0x097,4321,100, 0,1,0x00,0xbe,0x19,0xf5,0x20,0x0b,0x0}}, {0x42, 0x44, {0x09a,3893,100, 0,1,0x00,0xbe,0x19,0xf5,0x20,0x0b,0x0}}, {0x45, 0x47, {0x090,3497,100, 0,1,0x00,0xbe,0x19,0xf5,0x20,0x0b,0x0}}, {0x48, 0x4a, {0x094,3119,100, 0,1,0x00,0xbe,0x19,0xf5,0x20,0x0b,0x0}}, {0x4b, 0x4d, {0x098,2726,100, 0,1,0x00,0xbe,0x19,0xf5,0x20,0x0b,0x0}}, {0x4e, 0x50, {0x09b,2393,100, 0,1,0x00,0xbe,0x19,0xf5,0x20,0x0b,0x0}}, {0x51, 0x53, {0x091,2088,100, 0,1,0x00,0xbe,0x19,0xf5,0x20,0x0b,0x0}}, {0x54, 0x6c, {0x095,1732,100, 0,1,0x00,0xbe,0x19,0xf5,0x20,0x0b,0x0}} }; static const struct opl4_region regions_42[] = { /* Tenor Sax */ {0x24, 0x30, {0x0e9,6301,100, 0,1,0x00,0xbc,0x19,0xf4,0x10,0x0b,0x0}}, {0x31, 0x34, {0x0ea,5781,100, 0,1,0x00,0xbc,0x19,0xf4,0x10,0x0b,0x0}}, {0x35, 0x3a, {0x0eb,5053,100, 0,1,0x00,0xbc,0x19,0xf4,0x10,0x0b,0x0}}, {0x3b, 0x41, {0x0ed,4165,100, 0,1,0x00,0xbc,0x19,0xf4,0x10,0x0b,0x0}}, {0x42, 0x47, {0x0ec,3218,100, 0,1,0x00,0xbc,0x19,0xf4,0x10,0x0b,0x0}}, {0x48, 0x51, {0x0ee,2462,100, 0,1,0x00,0xbc,0x19,0xf4,0x10,0x0b,0x0}}, {0x52, 0x6c, {0x0ef,1421,100, 0,1,0x00,0xbc,0x19,0xf4,0x10,0x0b,0x0}} }; static const struct opl4_region regions_43[] = { /* Baritone Sax */ {0x15, 0x2d, {0x0df,6714,100, 0,1,0x00,0xce,0x19,0xf0,0x00,0x0a,0x0}}, {0x2e, 0x34, {0x0e1,5552,100, 0,1,0x00,0xce,0x19,0xf0,0x00,0x0a,0x0}}, {0x35, 0x39, {0x0e2,5178,100, 0,1,0x00,0xce,0x19,0xf0,0x00,0x0a,0x0}}, {0x3a, 0x6c, {0x0e0,4437,100, 0,1,0x00,0xce,0x19,0xf0,0x00,0x0a,0x0}} }; static const struct opl4_region regions_44[] = { /* Oboe */ {0x15, 0x3c, {0x042,4493,100, 0,1,0x00,0xe6,0x39,0xf4,0x10,0x0a,0x0}}, {0x3d, 0x43, {0x044,3702,100, 0,1,0x00,0xdc,0x39,0xf4,0x10,0x0a,0x0}}, {0x44, 0x49, {0x043,2956,100, 0,1,0x00,0xdc,0x39,0xf4,0x10,0x0a,0x0}}, {0x4a, 0x4f, {0x046,2166,100, 0,1,0x00,0xdc,0x39,0xf4,0x10,0x0a,0x0}}, {0x50, 0x55, {0x045,1420,100, 0,1,0x00,0xdc,0x39,0xf4,0x10,0x0a,0x0}}, {0x56, 0x6c, {0x047, 630,100, 0,1,0x00,0xe6,0x39,0xf4,0x10,0x0a,0x0}} }; static const struct opl4_region regions_45[] = { /* English Horn */ {0x15, 0x38, {0x03c,5098,100, 0,1,0x00,0xc4,0x31,0xf0,0x00,0x09,0x0}}, {0x39, 0x3e, {0x03b,4291,100, 0,1,0x00,0xc4,0x31,0xf0,0x00,0x09,0x0}}, {0x3f, 0x6c, {0x03d,3540,100, 0,1,0x00,0xc4,0x31,0xf0,0x00,0x09,0x0}} }; static const struct opl4_region regions_46[] = { /* Bassoon */ {0x15, 0x22, {0x038,7833,100, 0,1,0x00,0xc6,0x31,0xf0,0x00,0x0b,0x0}}, {0x23, 0x2e, {0x03a,7070,100, 0,1,0x00,0xc6,0x31,0xf0,0x00,0x0b,0x0}}, {0x2f, 0x6c, {0x039,6302,100, 0,1,0x00,0xc6,0x31,0xf0,0x00,0x0b,0x0}} }; static const struct opl4_region regions_47[] = { /* Clarinet */ {0x15, 0x3b, {0x09e,5900,100, 0,1,0x00,0xc8,0x29,0xf3,0x20,0x0a,0x0}}, {0x3c, 0x41, {0x0a0,5158,100, 0,1,0x00,0xc8,0x29,0xf3,0x20,0x0a,0x0}}, {0x42, 0x4a, {0x09f,4260,100, 0,1,0x00,0xc8,0x29,0xf3,0x20,0x0a,0x0}}, {0x4b, 0x6c, {0x0a1,2957,100, 0,1,0x00,0xc8,0x29,0xf3,0x20,0x0a,0x0}} }; static const struct opl4_region regions_48[] = { /* Piccolo */ {0x15, 0x40, {0x071,4803,100, 0,0,0x00,0xe6,0x38,0xf0,0x00,0x0a,0x2}}, {0x41, 0x4d, {0x072,3314,100, 0,0,0x00,0xe6,0x38,0xf0,0x00,0x0a,0x2}}, {0x4e, 0x53, {0x073,1731,100, 0,0,0x00,0xe6,0x38,0xf0,0x00,0x0a,0x2}}, {0x54, 0x5f, {0x074,2085,100, 0,0,0x00,0xe6,0x38,0xf0,0x00,0x0a,0x2}}, {0x60, 0x6c, {0x075,1421,100, 0,0,0x00,0xe6,0x38,0xf0,0x00,0x0a,0x2}} }; static const struct opl4_region regions_49[] = { /* Flute */ {0x15, 0x40, {0x071,4803,100, 0,0,0x00,0xdc,0x38,0xf0,0x00,0x0a,0x2}}, {0x41, 0x4d, {0x072,3314,100, 0,0,0x00,0xdc,0x38,0xf0,0x00,0x0a,0x2}}, {0x4e, 0x6c, {0x073,1731,100, 0,0,0x00,0xe6,0x38,0xf0,0x00,0x0a,0x2}} }; static const struct opl4_region regions_4a[] = { /* Recorder */ {0x15, 0x6f, {0x0bd,4897,100, 0,0,0x00,0xec,0x30,0x70,0x00,0x09,0x1}} }; static const struct opl4_region regions_4b[] = { /* Pan Flute */ {0x15, 0x6c, {0x077,2359,100, 0,0,0x00,0xde,0x38,0xf0,0x00,0x09,0x3}} }; static const struct opl4_region regions_4c[] = { /* Bottle Blow */ {0x15, 0x6c, {0x077,2359,100, 0,0,0x00,0xc8,0x38,0xf0,0x00,0x09,0x1}}, {0x01, 0x7f, {0x125,7372,100, 0,0,0x1e,0x80,0x00,0xf0,0x00,0x09,0x0}} }; static const struct opl4_region regions_4d[] = { /* Shakuhachi */ {0x00, 0x7f, {0x0ab,4548,100, 0,0,0x00,0xd6,0x30,0xf0,0x00,0x0a,0x3}}, {0x15, 0x6c, {0x076,3716,100, 0,0,0x00,0xa2,0x28,0x70,0x00,0x09,0x2}} }; static const struct opl4_region regions_4e[] = { /* Whistle */ {0x00, 0x7f, {0x0aa,1731,100, 0,4,0x00,0xd2,0x2c,0x70,0x00,0x0a,0x0}} }; static const struct opl4_region regions_4f[] = { /* Ocarina */ {0x00, 0x7f, {0x0aa,1731,100, 0,1,0x00,0xce,0x29,0x90,0x00,0x0a,0x1}} }; static const struct opl4_region regions_50[] = { /* Square Lead */ {0x01, 0x2a, {0x0cc,9853,100, 3,0,0x00,0xac,0x38,0xc6,0x21,0x09,0x0}}, {0x2b, 0x36, {0x0cd,6785,100, 3,0,0x00,0xac,0x38,0xc6,0x21,0x09,0x0}}, {0x37, 0x42, {0x0ca,5248,100, 3,0,0x00,0xac,0x38,0xc6,0x21,0x09,0x0}}, {0x43, 0x4e, {0x0cf,3713,100, 3,0,0x00,0xac,0x38,0xc6,0x21,0x09,0x0}}, {0x4f, 0x5a, {0x0ce,2176,100, 3,0,0x00,0xac,0x38,0xc6,0x21,0x09,0x0}}, {0x5b, 0x7f, {0x0cb, 640,100, 3,0,0x00,0xac,0x38,0xc6,0x21,0x09,0x0}}, {0x01, 0x2a, {0x0cc,9844,100,-3,0,0x00,0xac,0x08,0xc6,0x21,0x09,0x0}}, {0x2b, 0x36, {0x0cd,6776,100,-3,0,0x00,0xac,0x08,0xc6,0x21,0x09,0x0}}, {0x37, 0x42, {0x0ca,5239,100,-3,0,0x00,0xac,0x08,0xc6,0x21,0x09,0x0}}, {0x43, 0x4e, {0x0cf,3704,100,-3,0,0x00,0xac,0x08,0xc6,0x21,0x09,0x0}}, {0x4f, 0x5a, {0x0ce,2167,100,-3,0,0x00,0xac,0x08,0xc6,0x21,0x09,0x0}}, {0x5b, 0x7f, {0x0cb, 631,100,-3,0,0x00,0xac,0x08,0xc6,0x21,0x09,0x0}} }; static const struct opl4_region regions_51[] = { /* Sawtooth Lead */ {0x01, 0x27, {0x118,9108,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x28, 0x2d, {0x119,8345,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x2e, 0x33, {0x11a,7570,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x34, 0x39, {0x11b,6809,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x3a, 0x3f, {0x11c,6047,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x40, 0x45, {0x11d,5282,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x46, 0x4b, {0x11e,4525,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x4c, 0x51, {0x11f,3746,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x52, 0x57, {0x120,3017,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x58, 0x5d, {0x121,2171,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x5e, 0x66, {0x122,1426,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x67, 0x7f, {0x123,-110,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x01, 0x27, {0x118,9098,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x28, 0x2d, {0x119,8335,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x2e, 0x33, {0x11a,7560,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x34, 0x39, {0x11b,6799,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x3a, 0x3f, {0x11c,6037,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x40, 0x45, {0x11d,5272,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x46, 0x4b, {0x11e,4515,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x4c, 0x51, {0x11f,3736,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x52, 0x57, {0x120,3007,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x58, 0x5d, {0x121,2161,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x5e, 0x66, {0x122,1416,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x67, 0x7f, {0x123,-120,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}} }; static const struct opl4_region regions_52[] = { /* Calliope Lead */ {0x00, 0x7f, {0x0aa,1731,100, 0,0,0x00,0xc2,0x28,0x90,0x00,0x0a,0x2}}, {0x15, 0x6c, {0x076,3716,100, 0,0,0x00,0xb6,0x28,0xb0,0x00,0x09,0x2}} }; static const struct opl4_region regions_53[] = { /* Chiffer Lead */ {0x00, 0x7f, {0x13a,3665,100, 0,2,0x00,0xcc,0x2a,0xf0,0x10,0x09,0x1}}, {0x01, 0x7f, {0x0fe,3660,100, 0,0,0x00,0xbe,0x28,0xf3,0x10,0x17,0x0}} }; static const struct opl4_region regions_54[] = { /* Charang Lead */ {0x00, 0x40, {0x0a5,6594,100, 0,3,0x00,0xba,0x33,0xf2,0x11,0x09,0x0}}, {0x41, 0x7f, {0x0a6,5433,100, 0,3,0x00,0xba,0x33,0xf2,0x11,0x09,0x0}}, {0x01, 0x27, {0x118,9098,100, 0,2,0x00,0xa4,0x2a,0xf2,0x22,0x0e,0x0}}, {0x28, 0x2d, {0x119,8335,100, 0,2,0x00,0xa4,0x2a,0xf2,0x22,0x0e,0x0}}, {0x2e, 0x33, {0x11a,7560,100, 0,2,0x00,0xa4,0x2a,0xf2,0x22,0x0e,0x0}}, {0x34, 0x39, {0x11b,6799,100, 0,2,0x00,0xa4,0x2a,0xf2,0x22,0x0e,0x0}}, {0x3a, 0x3f, {0x11c,6037,100, 0,2,0x00,0xa4,0x2a,0xf2,0x22,0x0e,0x0}}, {0x40, 0x45, {0x11d,5272,100, 0,2,0x00,0xa4,0x2a,0xf2,0x22,0x0e,0x0}}, {0x46, 0x4b, {0x11e,4515,100, 0,2,0x00,0xa4,0x2a,0xf2,0x22,0x0e,0x0}}, {0x4c, 0x51, {0x11f,3736,100, 0,2,0x00,0xa4,0x2a,0xf2,0x22,0x0e,0x0}}, {0x52, 0x57, {0x120,3007,100, 0,2,0x00,0xa4,0x2a,0xf2,0x22,0x0e,0x0}}, {0x58, 0x5d, {0x121,2161,100, 0,2,0x00,0xa4,0x2a,0xf2,0x22,0x0e,0x0}}, {0x5e, 0x66, {0x122,1416,100, 0,2,0x00,0xa4,0x2a,0xf2,0x22,0x0e,0x0}}, {0x67, 0x7f, {0x123,-120,100, 0,2,0x00,0xa4,0x2a,0xf2,0x22,0x0e,0x0}} }; static const struct opl4_region regions_55[] = { /* Voice Lead */ {0x00, 0x7f, {0x0aa,1739,100, 0,6,0x00,0x8c,0x2e,0x90,0x00,0x0a,0x0}}, {0x15, 0x6c, {0x02a,3474,100, 0,1,0x00,0xd8,0x29,0xf0,0x05,0x0a,0x0}} }; static const struct opl4_region regions_56[] = { /* 5ths Lead */ {0x01, 0x27, {0x118,8468,100, 0,2,0x00,0xd0,0x32,0xf5,0x20,0x08,0x0}}, {0x28, 0x2d, {0x119,7705,100, 0,2,0x00,0xd0,0x32,0xf5,0x20,0x08,0x0}}, {0x2e, 0x33, {0x11a,6930,100, 0,2,0x00,0xd0,0x32,0xf5,0x20,0x08,0x0}}, {0x34, 0x39, {0x11b,6169,100, 0,2,0x00,0xd0,0x32,0xf5,0x20,0x08,0x0}}, {0x3a, 0x3f, {0x11c,5407,100, 0,2,0x00,0xd0,0x32,0xf5,0x20,0x08,0x0}}, {0x40, 0x45, {0x11d,4642,100, 0,2,0x00,0xd0,0x32,0xf5,0x20,0x08,0x0}}, {0x46, 0x4b, {0x11e,3885,100, 0,2,0x00,0xd0,0x32,0xf5,0x20,0x08,0x0}}, {0x4c, 0x51, {0x11f,3106,100, 0,2,0x00,0xd0,0x32,0xf5,0x20,0x08,0x0}}, {0x52, 0x57, {0x120,2377,100, 0,2,0x00,0xd0,0x32,0xf5,0x20,0x08,0x0}}, {0x58, 0x5d, {0x121,1531,100, 0,2,0x00,0xd0,0x32,0xf5,0x20,0x08,0x0}}, {0x5e, 0x64, {0x122, 786,100, 0,2,0x00,0xd0,0x32,0xf5,0x20,0x08,0x0}}, {0x65, 0x7f, {0x123,-750,100, 0,2,0x00,0xd0,0x32,0xf5,0x20,0x08,0x0}}, {0x05, 0x71, {0x002,4503,100, 0,1,0x00,0xb8,0x31,0xb3,0x20,0x0b,0x0}} }; static const struct opl4_region regions_57[] = { /* Bass & Lead */ {0x00, 0x7f, {0x117,8109,100, 0,1,0x00,0xbc,0x29,0xf3,0x50,0x08,0x0}}, {0x01, 0x27, {0x118,9097,100, 0,2,0x00,0xbc,0x2a,0xf2,0x20,0x0a,0x0}}, {0x28, 0x2d, {0x119,8334,100, 0,2,0x00,0xbc,0x2a,0xf2,0x20,0x0a,0x0}}, {0x2e, 0x33, {0x11a,7559,100, 0,2,0x00,0xbc,0x2a,0xf2,0x20,0x0a,0x0}}, {0x34, 0x39, {0x11b,6798,100, 0,2,0x00,0xbc,0x2a,0xf2,0x20,0x0a,0x0}}, {0x3a, 0x3f, {0x11c,6036,100, 0,2,0x00,0xbc,0x2a,0xf2,0x20,0x0a,0x0}}, {0x40, 0x45, {0x11d,5271,100, 0,2,0x00,0xbc,0x2a,0xf2,0x20,0x0a,0x0}}, {0x46, 0x4b, {0x11e,4514,100, 0,2,0x00,0xbc,0x2a,0xf2,0x20,0x0a,0x0}}, {0x4c, 0x51, {0x11f,3735,100, 0,2,0x00,0xbc,0x2a,0xf2,0x20,0x0a,0x0}}, {0x52, 0x57, {0x120,3006,100, 0,2,0x00,0xbc,0x2a,0xf2,0x20,0x0a,0x0}}, {0x58, 0x5d, {0x121,2160,100, 0,2,0x00,0xbc,0x2a,0xf2,0x20,0x0a,0x0}}, {0x5e, 0x66, {0x122,1415,100, 0,2,0x00,0xbc,0x2a,0xf2,0x20,0x0a,0x0}}, {0x67, 0x7f, {0x123,-121,100, 0,2,0x00,0xbc,0x2a,0xf2,0x20,0x0a,0x0}} }; static const struct opl4_region regions_58[] = { /* New Age Pad */ {0x15, 0x6c, {0x002,4501,100, 0,4,0x00,0xa4,0x24,0x80,0x01,0x05,0x0}}, {0x15, 0x6c, {0x0f3,4253,100, 0,3,0x00,0x8c,0x23,0xa2,0x14,0x06,0x1}} }; static const struct opl4_region regions_59[] = { /* Warm Pad */ {0x15, 0x6c, {0x04e,5306,100, 2,2,0x00,0x92,0x2a,0x34,0x23,0x05,0x2}}, {0x15, 0x6c, {0x029,3575,100,-2,2,0x00,0xbe,0x22,0x31,0x23,0x06,0x0}} }; static const struct opl4_region regions_5a[] = { /* Polysynth Pad */ {0x01, 0x27, {0x118,9111,100, 0,3,0x00,0xae,0x23,0xf2,0x20,0x07,0x1}}, {0x28, 0x2d, {0x119,8348,100, 0,3,0x00,0xae,0x23,0xf2,0x20,0x07,0x1}}, {0x2e, 0x33, {0x11a,7573,100, 0,3,0x00,0xae,0x23,0xf2,0x20,0x07,0x1}}, {0x34, 0x39, {0x11b,6812,100, 0,3,0x00,0xae,0x23,0xf2,0x20,0x07,0x1}}, {0x3a, 0x3f, {0x11c,6050,100, 0,3,0x00,0xae,0x23,0xf2,0x20,0x07,0x1}}, {0x40, 0x45, {0x11d,5285,100, 0,3,0x00,0xae,0x23,0xf2,0x20,0x07,0x1}}, {0x46, 0x4b, {0x11e,4528,100, 0,3,0x00,0xae,0x23,0xf2,0x20,0x07,0x1}}, {0x4c, 0x51, {0x11f,3749,100, 0,3,0x00,0xae,0x23,0xf2,0x20,0x07,0x1}}, {0x52, 0x57, {0x120,3020,100, 0,3,0x00,0xae,0x23,0xf2,0x20,0x07,0x1}}, {0x58, 0x5d, {0x121,2174,100, 0,3,0x00,0xae,0x23,0xf2,0x20,0x07,0x1}}, {0x5e, 0x66, {0x122,1429,100, 0,3,0x00,0xae,0x23,0xf2,0x20,0x07,0x1}}, {0x67, 0x7f, {0x123,-107,100, 0,3,0x00,0xae,0x23,0xf2,0x20,0x07,0x1}}, {0x00, 0x7f, {0x124,4024,100, 0,2,0x00,0xae,0x22,0xe5,0x20,0x08,0x0}} }; static const struct opl4_region regions_5b[] = { /* Choir Pad */ {0x15, 0x3a, {0x018,5010,100, 0,5,0x00,0xb0,0x25,0x70,0x00,0x06,0x0}}, {0x3b, 0x40, {0x019,4370,100, 0,5,0x00,0xb0,0x25,0x70,0x00,0x06,0x0}}, {0x41, 0x47, {0x01a,3478,100, 0,5,0x00,0xb0,0x25,0x70,0x00,0x06,0x0}}, {0x48, 0x6c, {0x01b,2197,100, 0,5,0x00,0xb0,0x25,0x70,0x00,0x06,0x0}}, {0x15, 0x6c, {0x02a,3482,100, 0,4,0x00,0x98,0x24,0x65,0x21,0x06,0x0}} }; static const struct opl4_region regions_5c[] = { /* Bowed Pad */ {0x15, 0x6c, {0x101,4790,100,-1,1,0x00,0xbe,0x19,0x44,0x14,0x16,0x0}}, {0x00, 0x7f, {0x0aa,1720,100, 1,1,0x00,0x94,0x19,0x40,0x00,0x06,0x0}} }; static const struct opl4_region regions_5d[] = { /* Metallic Pad */ {0x15, 0x31, {0x00c,6943,100, 0,2,0x00,0xa0,0x0a,0x60,0x03,0x06,0x0}}, {0x32, 0x38, {0x00d,5416,100, 0,2,0x00,0xa0,0x0a,0x60,0x03,0x06,0x0}}, {0x39, 0x47, {0x00e,4385,100, 0,2,0x00,0xa0,0x0a,0x60,0x03,0x06,0x0}}, {0x48, 0x6c, {0x00f,2849,100, 0,2,0x00,0xa0,0x0a,0x60,0x03,0x06,0x0}}, {0x00, 0x7f, {0x03f,4224,100, 0,1,0x00,0x9c,0x31,0x65,0x16,0x07,0x0}} }; static const struct opl4_region regions_5e[] = { /* Halo Pad */ {0x00, 0x7f, {0x124,4038,100, 0,2,0x00,0xa6,0x1a,0x85,0x23,0x08,0x0}}, {0x15, 0x6c, {0x02a,3471,100, 0,3,0x00,0xc0,0x1b,0xc0,0x05,0x06,0x0}} }; static const struct opl4_region regions_5f[] = { /* Sweep Pad */ {0x01, 0x27, {0x0d3,9100,100, 0,1,0x00,0xce,0x19,0x13,0x11,0x06,0x0}}, {0x28, 0x2d, {0x0da,8341,100, 0,1,0x00,0xce,0x19,0x13,0x11,0x06,0x0}}, {0x2e, 0x33, {0x0d4,7564,100, 0,1,0x00,0xce,0x19,0x13,0x11,0x06,0x0}}, {0x34, 0x39, {0x0db,6791,100, 0,1,0x00,0xce,0x19,0x13,0x11,0x06,0x0}}, {0x3a, 0x3f, {0x0d5,6048,100, 0,1,0x00,0xce,0x19,0x13,0x11,0x06,0x0}}, {0x40, 0x45, {0x0dc,5263,100, 0,1,0x00,0xce,0x19,0x13,0x11,0x06,0x0}}, {0x46, 0x4b, {0x0d6,4499,100, 0,1,0x00,0xce,0x19,0x13,0x11,0x06,0x0}}, {0x4c, 0x51, {0x0dd,3747,100, 0,1,0x00,0xce,0x19,0x13,0x11,0x06,0x0}}, {0x52, 0x57, {0x0d7,3018,100, 0,1,0x00,0xce,0x19,0x13,0x11,0x06,0x0}}, {0x58, 0x5d, {0x0de,2173,100, 0,1,0x00,0xce,0x19,0x13,0x11,0x06,0x0}}, {0x5e, 0x63, {0x0d8,1427,100, 0,1,0x00,0xce,0x19,0x13,0x11,0x06,0x0}}, {0x64, 0x7f, {0x0d9,-109,100, 0,1,0x00,0xce,0x19,0x13,0x11,0x06,0x0}}, {0x01, 0x27, {0x0d3,9088,100, 0,0,0x00,0xce,0x18,0x13,0x11,0x06,0x0}}, {0x28, 0x2d, {0x0da,8329,100, 0,0,0x00,0xce,0x18,0x13,0x11,0x06,0x0}}, {0x2e, 0x33, {0x0d4,7552,100, 0,0,0x00,0xce,0x18,0x13,0x11,0x06,0x0}}, {0x34, 0x39, {0x0db,6779,100, 0,0,0x00,0xce,0x18,0x13,0x11,0x06,0x0}}, {0x3a, 0x3f, {0x0d5,6036,100, 0,0,0x00,0xce,0x18,0x13,0x11,0x06,0x0}}, {0x40, 0x45, {0x0dc,5251,100, 0,0,0x00,0xce,0x18,0x13,0x11,0x06,0x0}}, {0x46, 0x4b, {0x0d6,4487,100, 0,0,0x00,0xce,0x18,0x13,0x11,0x06,0x0}}, {0x4c, 0x51, {0x0dd,3735,100, 0,0,0x00,0xce,0x18,0x13,0x11,0x06,0x0}}, {0x52, 0x57, {0x0d7,3006,100, 0,0,0x00,0xce,0x18,0x13,0x11,0x06,0x0}}, {0x58, 0x5d, {0x0de,2161,100, 0,0,0x00,0xce,0x18,0x13,0x11,0x06,0x0}}, {0x5e, 0x63, {0x0d8,1415,100, 0,0,0x00,0xce,0x18,0x13,0x11,0x06,0x0}}, {0x64, 0x7f, {0x0d9,-121,100, 0,0,0x00,0xce,0x18,0x13,0x11,0x06,0x0}} }; static const struct opl4_region regions_60[] = { /* Ice Rain */ {0x01, 0x7f, {0x04e,9345,100, 0,2,0x00,0xcc,0x22,0xa3,0x63,0x17,0x0}}, {0x00, 0x7f, {0x143,5586, 20, 0,2,0x00,0x6e,0x2a,0xf0,0x05,0x05,0x0}} }; static const struct opl4_region regions_61[] = { /* Soundtrack */ {0x15, 0x6c, {0x002,4501,100, 0,2,0x00,0xb6,0x2a,0x60,0x01,0x05,0x0}}, {0x15, 0x6c, {0x0f3,1160,100, 0,5,0x00,0xa8,0x2d,0x52,0x14,0x06,0x2}} }; static const struct opl4_region regions_62[] = { /* Crystal */ {0x15, 0x6c, {0x0f3,1826,100, 0,3,0x00,0xb8,0x33,0xf6,0x25,0x25,0x0}}, {0x15, 0x2c, {0x06d,7454,100, 0,3,0x00,0xac,0x3b,0x85,0x24,0x06,0x0}}, {0x2d, 0x36, {0x06e,5925,100, 0,3,0x00,0xac,0x3b,0x85,0x24,0x06,0x0}}, {0x37, 0x6c, {0x06f,4403,100, 0,3,0x09,0xac,0x3b,0x85,0x24,0x06,0x0}} }; static const struct opl4_region regions_63[] = { /* Atmosphere */ {0x05, 0x71, {0x002,4509,100, 0,2,0x00,0xc8,0x32,0x73,0x22,0x06,0x1}}, {0x15, 0x2f, {0x0b3,6964,100, 0,2,0x05,0xc2,0x32,0xf5,0x34,0x07,0x2}}, {0x30, 0x36, {0x0b7,5567,100, 0,2,0x0c,0xc2,0x32,0xf5,0x34,0x07,0x2}}, {0x37, 0x3c, {0x0b5,4653,100, 0,2,0x00,0xc2,0x32,0xf6,0x34,0x07,0x2}}, {0x3d, 0x43, {0x0b4,3892,100, 0,2,0x00,0xc2,0x32,0xf6,0x35,0x07,0x2}}, {0x44, 0x60, {0x0b6,2723,100, 0,2,0x00,0xc2,0x32,0xf6,0x35,0x17,0x2}} }; static const struct opl4_region regions_64[] = { /* Brightness */ {0x00, 0x7f, {0x137,5285,100, 0,2,0x00,0xbe,0x2a,0xa5,0x18,0x08,0x0}}, {0x15, 0x6c, {0x02a,3481,100, 0,1,0x00,0xc8,0x29,0x80,0x05,0x05,0x0}} }; static const struct opl4_region regions_65[] = { /* Goblins */ {0x15, 0x6c, {0x002,4501,100,-1,2,0x00,0xca,0x2a,0x40,0x01,0x05,0x0}}, {0x15, 0x6c, {0x009,9679, 20, 1,4,0x00,0x3c,0x0c,0x22,0x11,0x06,0x0}} }; static const struct opl4_region regions_66[] = { /* Echoes */ {0x15, 0x6c, {0x02a,3487,100, 0,3,0x00,0xae,0x2b,0xf5,0x21,0x06,0x0}}, {0x00, 0x7f, {0x124,4027,100, 0,3,0x00,0xae,0x2b,0x85,0x23,0x07,0x0}} }; static const struct opl4_region regions_67[] = { /* Sci-Fi */ {0x15, 0x31, {0x00c,6940,100, 0,3,0x00,0xc8,0x2b,0x90,0x05,0x06,0x3}}, {0x32, 0x38, {0x00d,5413,100, 0,3,0x00,0xc8,0x2b,0x90,0x05,0x06,0x3}}, {0x39, 0x47, {0x00e,4382,100, 0,3,0x00,0xc8,0x2b,0x90,0x05,0x06,0x3}}, {0x48, 0x6c, {0x00f,2846,100, 0,3,0x00,0xc8,0x2b,0x90,0x05,0x06,0x3}}, {0x15, 0x6c, {0x002,4498,100, 0,2,0x00,0xd4,0x22,0x80,0x01,0x05,0x0}} }; static const struct opl4_region regions_68[] = { /* Sitar */ {0x00, 0x7f, {0x10f,4408,100, 0,2,0x00,0xc4,0x32,0xf4,0x15,0x16,0x1}} }; static const struct opl4_region regions_69[] = { /* Banjo */ {0x15, 0x34, {0x013,5685,100, 0,0,0x00,0xdc,0x38,0xf6,0x15,0x09,0x0}}, {0x35, 0x38, {0x014,5009,100, 0,0,0x00,0xdc,0x38,0xf6,0x15,0x09,0x0}}, {0x39, 0x3c, {0x012,4520,100, 0,0,0x00,0xdc,0x38,0xf6,0x15,0x09,0x0}}, {0x3d, 0x44, {0x015,3622,100, 0,0,0x00,0xdc,0x38,0xf6,0x15,0x09,0x0}}, {0x45, 0x4c, {0x017,2661,100, 0,0,0x00,0xdc,0x38,0xf6,0x15,0x09,0x0}}, {0x4d, 0x6d, {0x016,1632,100, 0,0,0x00,0xdc,0x38,0xf6,0x15,0x09,0x0}} }; static const struct opl4_region regions_6a[] = { /* Shamisen */ {0x15, 0x6c, {0x10e,3273,100, 0,0,0x00,0xc0,0x28,0xf7,0x76,0x08,0x0}} }; static const struct opl4_region regions_6b[] = { /* Koto */ {0x00, 0x7f, {0x0a9,4033,100, 0,0,0x00,0xc6,0x20,0xf0,0x06,0x07,0x0}} }; static const struct opl4_region regions_6c[] = { /* Kalimba */ {0x00, 0x7f, {0x137,3749,100, 0,0,0x00,0xce,0x38,0xf5,0x18,0x08,0x0}} }; static const struct opl4_region regions_6d[] = { /* Bagpipe */ {0x15, 0x39, {0x0a4,7683,100, 0,4,0x00,0xc0,0x1c,0xf0,0x00,0x09,0x0}}, {0x15, 0x39, {0x0a7,7680,100, 0,1,0x00,0xaa,0x19,0xf0,0x00,0x09,0x0}}, {0x3a, 0x6c, {0x0a8,3697,100, 0,1,0x00,0xaa,0x19,0xf0,0x00,0x09,0x0}} }; static const struct opl4_region regions_6e[] = { /* Fiddle */ {0x15, 0x3a, {0x105,5158,100, 0,1,0x00,0xca,0x31,0xf3,0x20,0x09,0x0}}, {0x3b, 0x3f, {0x102,4754,100, 0,1,0x00,0xca,0x31,0xf3,0x20,0x09,0x0}}, {0x40, 0x41, {0x106,4132,100, 0,1,0x00,0xca,0x31,0xf3,0x20,0x09,0x0}}, {0x42, 0x44, {0x107,4033,100, 0,1,0x00,0xca,0x31,0xf3,0x20,0x09,0x0}}, {0x45, 0x47, {0x108,3580,100, 0,1,0x00,0xca,0x31,0xf3,0x20,0x09,0x0}}, {0x48, 0x4a, {0x10a,2957,100, 0,1,0x00,0xca,0x31,0xf3,0x20,0x09,0x0}}, {0x4b, 0x4c, {0x10b,2724,100, 0,1,0x00,0xca,0x31,0xf3,0x20,0x09,0x0}}, {0x4d, 0x4e, {0x10c,2530,100, 0,1,0x00,0xca,0x31,0xf3,0x20,0x09,0x0}}, {0x4f, 0x51, {0x10d,2166,100, 0,1,0x00,0xca,0x31,0xf3,0x20,0x09,0x0}}, {0x52, 0x6c, {0x109,1825,100, 0,1,0x00,0xca,0x31,0xf3,0x20,0x09,0x0}} }; static const struct opl4_region regions_6f[] = { /* Shanai */ {0x15, 0x6c, {0x041,6946,100, 0,1,0x00,0xc4,0x31,0x95,0x20,0x09,0x0}} }; static const struct opl4_region regions_70[] = { /* Tinkle Bell */ {0x15, 0x73, {0x0f3,1821,100, 0,3,0x00,0xc8,0x3b,0xd6,0x25,0x25,0x0}}, {0x00, 0x7f, {0x137,5669,100, 0,3,0x00,0x66,0x3b,0xf5,0x18,0x08,0x0}} }; static const struct opl4_region regions_71[] = { /* Agogo */ {0x15, 0x74, {0x00b,2474,100, 0,0,0x00,0xd2,0x38,0xf0,0x00,0x09,0x0}} }; static const struct opl4_region regions_72[] = { /* Steel Drums */ {0x01, 0x7f, {0x0fe,3670,100, 0,0,0x00,0xca,0x38,0xf3,0x06,0x17,0x1}}, {0x15, 0x6c, {0x100,9602,100, 0,0,0x00,0x54,0x38,0xb0,0x05,0x16,0x1}} }; static const struct opl4_region regions_73[] = { /* Woodblock */ {0x15, 0x6c, {0x02c,2963, 50, 0,0,0x07,0xd4,0x00,0xf0,0x00,0x09,0x0}} }; static const struct opl4_region regions_74[] = { /* Taiko Drum */ {0x13, 0x6c, {0x03e,1194, 50, 0,0,0x00,0xaa,0x38,0xf0,0x04,0x04,0x0}} }; static const struct opl4_region regions_75[] = { /* Melodic Tom */ {0x15, 0x6c, {0x0c7,6418, 50, 0,0,0x00,0xe4,0x38,0xf0,0x05,0x01,0x0}} }; static const struct opl4_region regions_76[] = { /* Synth Drum */ {0x15, 0x6c, {0x026,3898, 50, 0,0,0x00,0xd0,0x38,0xf0,0x04,0x04,0x0}} }; static const struct opl4_region regions_77[] = { /* Reverse Cymbal */ {0x15, 0x6c, {0x031,4138, 50, 0,0,0x00,0xfe,0x38,0x3a,0xf0,0x09,0x0}} }; static const struct opl4_region regions_78[] = { /* Guitar Fret Noise */ {0x15, 0x6c, {0x138,5266,100, 0,0,0x00,0xa0,0x38,0xf0,0x00,0x09,0x0}} }; static const struct opl4_region regions_79[] = { /* Breath Noise */ {0x01, 0x7f, {0x125,4269,100, 0,0,0x1e,0xd0,0x38,0xf0,0x00,0x09,0x0}} }; static const struct opl4_region regions_7a[] = { /* Seashore */ {0x15, 0x6c, {0x008,2965, 20,-2,0,0x00,0xfe,0x00,0x20,0x03,0x04,0x0}}, {0x01, 0x7f, {0x037,4394, 20, 2,0,0x14,0xfe,0x00,0x20,0x04,0x05,0x0}} }; static const struct opl4_region regions_7b[] = { /* Bird Tweet */ {0x15, 0x6c, {0x009,8078, 5,-4,7,0x00,0xc2,0x0f,0x22,0x12,0x07,0x0}}, {0x15, 0x6c, {0x009,3583, 5, 4,5,0x00,0xae,0x15,0x72,0x12,0x07,0x0}} }; static const struct opl4_region regions_7c[] = { /* Telephone Ring */ {0x15, 0x6c, {0x003,3602, 10, 0,0,0x00,0xce,0x00,0xf0,0x00,0x0f,0x0}} }; static const struct opl4_region regions_7d[] = { /* Helicopter */ {0x0c, 0x7f, {0x001,2965, 10,-2,0,0x00,0xe0,0x08,0x30,0x01,0x07,0x0}}, {0x01, 0x7f, {0x037,4394, 10, 2,0,0x44,0x76,0x00,0x30,0x01,0x07,0x0}} }; static const struct opl4_region regions_7e[] = { /* Applause */ {0x15, 0x6c, {0x036,8273, 20,-6,7,0x00,0xc4,0x0f,0x70,0x01,0x05,0x0}}, {0x15, 0x6c, {0x036,8115, 5, 6,7,0x00,0xc6,0x07,0x70,0x01,0x05,0x0}} }; static const struct opl4_region regions_7f[] = { /* Gun Shot */ {0x15, 0x6c, {0x139,2858, 20, 0,0,0x00,0xbe,0x38,0xf0,0x03,0x00,0x0}} }; static const struct opl4_region regions_drums[] = { {0x18, 0x18, {0x0cb,6397,100, 3,0,0x00,0xf4,0x38,0xc9,0x1c,0x0c,0x0}}, {0x19, 0x19, {0x0c4,3714,100, 0,0,0x00,0xe0,0x00,0x97,0x19,0x09,0x0}}, {0x1a, 0x1a, {0x0c4,3519,100, 0,0,0x00,0xea,0x00,0x61,0x01,0x07,0x0}}, {0x1b, 0x1b, {0x0c4,3586,100, 0,0,0x00,0xea,0x00,0xf7,0x19,0x09,0x0}}, {0x1c, 0x1c, {0x0c4,3586,100, 0,0,0x00,0xea,0x00,0x81,0x01,0x07,0x0}}, {0x1e, 0x1e, {0x0c3,4783,100, 0,0,0x00,0xea,0x00,0xf0,0x00,0x09,0x0}}, {0x1f, 0x1f, {0x0d1,4042,100, 0,0,0x00,0xd6,0x00,0xf0,0x05,0x05,0x0}}, {0x20, 0x20, {0x0d2,5943,100, 0,0,0x00,0xcc,0x00,0xf0,0x00,0x09,0x0}}, {0x21, 0x21, {0x011,3842,100, 0,0,0x00,0xea,0x00,0xf0,0x16,0x06,0x0}}, {0x23, 0x23, {0x011,4098,100, 0,0,0x00,0xea,0x00,0xf0,0x16,0x06,0x0}}, {0x24, 0x24, {0x011,4370,100, 0,0,0x00,0xea,0x00,0xf0,0x00,0x06,0x0}}, {0x25, 0x25, {0x0d2,4404,100, 0,0,0x00,0xd6,0x00,0xf0,0x00,0x06,0x0}}, {0x26, 0x26, {0x0d1,4298,100, 0,0,0x00,0xd6,0x00,0xf0,0x05,0x05,0x0}}, {0x27, 0x27, {0x00a,4403,100,-1,0,0x00,0xd6,0x00,0xf0,0x00,0x09,0x0}}, {0x28, 0x28, {0x0d1,4554,100, 0,0,0x00,0xdc,0x00,0xf0,0x07,0x07,0x0}}, {0x29, 0x29, {0x0c8,4242,100,-4,0,0x00,0xd6,0x00,0xf6,0x16,0x06,0x0}}, {0x2a, 0x2a, {0x079,6160,100, 2,0,0x00,0xe0,0x00,0xf5,0x19,0x09,0x0}}, {0x2b, 0x2b, {0x0c8,4626,100,-3,0,0x00,0xd6,0x00,0xf6,0x16,0x06,0x0}}, {0x2c, 0x2c, {0x07b,6039,100, 2,0,0x00,0xd6,0x00,0xf0,0x00,0x09,0x0}}, {0x2d, 0x2d, {0x0c8,5394,100,-2,0,0x00,0xd6,0x00,0xf6,0x16,0x06,0x0}}, {0x2e, 0x2e, {0x07a,5690,100, 2,0,0x00,0xd6,0x00,0xf0,0x00,0x05,0x0}}, {0x2f, 0x2f, {0x0c7,5185,100, 2,0,0x00,0xe0,0x00,0xf6,0x17,0x07,0x0}}, {0x30, 0x30, {0x0c7,5650,100, 3,0,0x00,0xe0,0x00,0xf6,0x17,0x07,0x0}}, {0x31, 0x31, {0x031,4395,100, 2,0,0x00,0xea,0x00,0xf0,0x05,0x05,0x0}}, {0x32, 0x32, {0x0c7,6162,100, 4,0,0x00,0xe0,0x00,0xf6,0x17,0x07,0x0}}, {0x33, 0x33, {0x02e,4391,100,-2,0,0x00,0xea,0x00,0xf0,0x05,0x05,0x0}}, {0x34, 0x34, {0x07a,3009,100,-2,0,0x00,0xea,0x00,0xf2,0x15,0x05,0x0}}, {0x35, 0x35, {0x021,4522,100,-3,0,0x00,0xd6,0x00,0xf0,0x05,0x05,0x0}}, {0x36, 0x36, {0x025,5163,100, 1,0,0x00,0xe0,0x00,0xf0,0x00,0x09,0x0}}, {0x37, 0x37, {0x031,5287,100,-1,0,0x00,0xea,0x00,0xf5,0x16,0x06,0x0}}, {0x38, 0x38, {0x01d,4395,100, 2,0,0x00,0xe0,0x00,0xf0,0x00,0x09,0x0}}, {0x39, 0x39, {0x031,4647,100,-2,0,0x00,0xea,0x00,0xf4,0x16,0x06,0x0}}, {0x3a, 0x3a, {0x09d,4426,100,-4,0,0x00,0xe0,0x00,0xf4,0x17,0x07,0x0}}, {0x3b, 0x3b, {0x02e,4659,100,-2,0,0x00,0xea,0x00,0xf0,0x06,0x06,0x0}}, {0x3c, 0x3c, {0x01c,4769,100, 4,0,0x00,0xea,0x00,0xf0,0x00,0x09,0x0}}, {0x3d, 0x3d, {0x01c,4611,100, 4,0,0x00,0xea,0x00,0xf0,0x00,0x09,0x0}}, {0x3e, 0x3e, {0x01e,4402,100,-3,0,0x00,0xea,0x00,0xf0,0x00,0x09,0x0}}, {0x3f, 0x3f, {0x01f,4387,100,-3,0,0x00,0xea,0x00,0xf0,0x00,0x09,0x0}}, {0x40, 0x40, {0x01f,3983,100,-2,0,0x00,0xea,0x00,0xf0,0x00,0x09,0x0}}, {0x41, 0x41, {0x09c,4526,100, 2,0,0x00,0xea,0x00,0xf0,0x00,0x09,0x0}}, {0x42, 0x42, {0x09c,4016,100, 2,0,0x00,0xea,0x00,0xf0,0x00,0x09,0x0}}, {0x43, 0x43, {0x00b,4739,100,-4,0,0x00,0xea,0x00,0xf0,0x00,0x09,0x0}}, {0x44, 0x44, {0x00b,4179,100,-4,0,0x00,0xea,0x00,0xf0,0x00,0x09,0x0}}, {0x45, 0x45, {0x02f,4787,100,-4,0,0x00,0xd6,0x00,0xf0,0x00,0x09,0x0}}, {0x46, 0x46, {0x030,4665,100,-4,0,0x00,0xd6,0x00,0xf0,0x00,0x09,0x0}}, {0x47, 0x47, {0x144,4519,100, 4,0,0x00,0xea,0x00,0xf0,0x00,0x0b,0x0}}, {0x48, 0x48, {0x144,4111,100, 4,0,0x00,0xea,0x00,0xf0,0x00,0x0b,0x0}}, {0x49, 0x49, {0x024,6408,100, 3,0,0x00,0xe0,0x00,0xf0,0x00,0x09,0x0}}, {0x4a, 0x4a, {0x024,4144,100, 3,0,0x00,0xcc,0x00,0xf0,0x00,0x09,0x0}}, {0x4b, 0x4b, {0x020,4001,100, 2,0,0x00,0xe0,0x00,0xf0,0x00,0x09,0x0}}, {0x4c, 0x4c, {0x02c,4402,100, 4,0,0x00,0xea,0x00,0xf0,0x00,0x09,0x0}}, {0x4d, 0x4d, {0x02c,3612,100, 4,0,0x00,0xea,0x00,0xf0,0x00,0x09,0x0}}, {0x4e, 0x4e, {0x022,4129,100,-2,0,0x00,0xea,0x00,0xf0,0x00,0x09,0x0}}, {0x4f, 0x4f, {0x023,4147,100,-2,0,0x00,0xea,0x00,0xf0,0x00,0x09,0x0}}, {0x50, 0x50, {0x032,4412,100,-4,0,0x00,0xd6,0x00,0xf0,0x08,0x09,0x0}}, {0x51, 0x51, {0x032,4385,100,-4,0,0x00,0xd6,0x00,0xf0,0x00,0x09,0x0}}, {0x52, 0x52, {0x02f,5935,100,-1,0,0x00,0xd6,0x00,0xf0,0x00,0x09,0x0}} }; #define REGION(num) { ARRAY_SIZE(regions ## num), regions ## num } const struct opl4_region_ptr snd_yrw801_regions[0x81] = { REGION(_00), REGION(_01), REGION(_02), REGION(_03), REGION(_04), REGION(_05), REGION(_06), REGION(_07), REGION(_08), REGION(_09), REGION(_0a), REGION(_0b), REGION(_0c), REGION(_0d), REGION(_0e), REGION(_0f), REGION(_10), REGION(_11), REGION(_12), REGION(_13), REGION(_14), REGION(_15), REGION(_16), REGION(_17), REGION(_18), REGION(_19), REGION(_1a), REGION(_1b), REGION(_1c), REGION(_1d), REGION(_1e), REGION(_1f), REGION(_20), REGION(_21), REGION(_22), REGION(_23), REGION(_24), REGION(_25), REGION(_26), REGION(_27), REGION(_28), REGION(_29), REGION(_2a), REGION(_2b), REGION(_2c), REGION(_2d), REGION(_2e), REGION(_2f), REGION(_30), REGION(_31), REGION(_32), REGION(_33), REGION(_34), REGION(_35), REGION(_36), REGION(_37), REGION(_38), REGION(_39), REGION(_3a), REGION(_3b), REGION(_3c), REGION(_3d), REGION(_3e), REGION(_3f), REGION(_40), REGION(_41), REGION(_42), REGION(_43), REGION(_44), REGION(_45), REGION(_46), REGION(_47), REGION(_48), REGION(_49), REGION(_4a), REGION(_4b), REGION(_4c), REGION(_4d), REGION(_4e), REGION(_4f), REGION(_50), REGION(_51), REGION(_52), REGION(_53), REGION(_54), REGION(_55), REGION(_56), REGION(_57), REGION(_58), REGION(_59), REGION(_5a), REGION(_5b), REGION(_5c), REGION(_5d), REGION(_5e), REGION(_5f), REGION(_60), REGION(_61), REGION(_62), REGION(_63), REGION(_64), REGION(_65), REGION(_66), REGION(_67), REGION(_68), REGION(_69), REGION(_6a), REGION(_6b), REGION(_6c), REGION(_6d), REGION(_6e), REGION(_6f), REGION(_70), REGION(_71), REGION(_72), REGION(_73), REGION(_74), REGION(_75), REGION(_76), REGION(_77), REGION(_78), REGION(_79), REGION(_7a), REGION(_7b), REGION(_7c), REGION(_7d), REGION(_7e), REGION(_7f), REGION(_drums) };
gpl-2.0
menghang/android_kernel_xiaomi_msm8996
drivers/gpu/drm/drm_auth.c
741
6058
/** * \file drm_auth.c * IOCTLs for authentication * * \author Rickard E. (Rik) Faith <faith@valinux.com> * \author Gareth Hughes <gareth@valinux.com> */ /* * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include <drm/drmP.h> #include "drm_internal.h" struct drm_magic_entry { struct list_head head; struct drm_hash_item hash_item; struct drm_file *priv; }; /** * Find the file with the given magic number. * * \param dev DRM device. * \param magic magic number. * * Searches in drm_device::magiclist within all files with the same hash key * the one with matching magic number, while holding the drm_device::struct_mutex * lock. */ static struct drm_file *drm_find_file(struct drm_master *master, drm_magic_t magic) { struct drm_file *retval = NULL; struct drm_magic_entry *pt; struct drm_hash_item *hash; struct drm_device *dev = master->minor->dev; mutex_lock(&dev->struct_mutex); if (!drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) { pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item); retval = pt->priv; } mutex_unlock(&dev->struct_mutex); return retval; } /** * Adds a magic number. * * \param dev DRM device. * \param priv file private data. * \param magic magic number. * * Creates a drm_magic_entry structure and appends to the linked list * associated the magic number hash key in drm_device::magiclist, while holding * the drm_device::struct_mutex lock. */ static int drm_add_magic(struct drm_master *master, struct drm_file *priv, drm_magic_t magic) { struct drm_magic_entry *entry; struct drm_device *dev = master->minor->dev; DRM_DEBUG("%d\n", magic); entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return -ENOMEM; entry->priv = priv; entry->hash_item.key = (unsigned long)magic; mutex_lock(&dev->struct_mutex); drm_ht_insert_item(&master->magiclist, &entry->hash_item); list_add_tail(&entry->head, &master->magicfree); mutex_unlock(&dev->struct_mutex); return 0; } /** * Remove a magic number. * * \param dev DRM device. * \param magic magic number. * * Searches and unlinks the entry in drm_device::magiclist with the magic * number hash key, while holding the drm_device::struct_mutex lock. */ int drm_remove_magic(struct drm_master *master, drm_magic_t magic) { struct drm_magic_entry *pt; struct drm_hash_item *hash; struct drm_device *dev = master->minor->dev; DRM_DEBUG("%d\n", magic); mutex_lock(&dev->struct_mutex); if (drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) { mutex_unlock(&dev->struct_mutex); return -EINVAL; } pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item); drm_ht_remove_item(&master->magiclist, hash); list_del(&pt->head); mutex_unlock(&dev->struct_mutex); kfree(pt); return 0; } /** * Get a unique magic number (ioctl). * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a resulting drm_auth structure. * \return zero on success, or a negative number on failure. * * If there is a magic number in drm_file::magic then use it, otherwise * searches an unique non-zero magic number and add it associating it with \p * file_priv. * This ioctl needs protection by the drm_global_mutex, which protects * struct drm_file::magic and struct drm_magic_entry::priv. */ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv) { static drm_magic_t sequence = 0; static DEFINE_SPINLOCK(lock); struct drm_auth *auth = data; /* Find unique magic */ if (file_priv->magic) { auth->magic = file_priv->magic; } else { do { spin_lock(&lock); if (!sequence) ++sequence; /* reserve 0 */ auth->magic = sequence++; spin_unlock(&lock); } while (drm_find_file(file_priv->master, auth->magic)); file_priv->magic = auth->magic; drm_add_magic(file_priv->master, file_priv, auth->magic); } DRM_DEBUG("%u\n", auth->magic); return 0; } /** * Authenticate with a magic. * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a drm_auth structure. * \return zero if authentication successed, or a negative number otherwise. * * Checks if \p file_priv is associated with the magic number passed in \arg. * This ioctl needs protection by the drm_global_mutex, which protects * struct drm_file::magic and struct drm_magic_entry::priv. */ int drm_authmagic(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_auth *auth = data; struct drm_file *file; DRM_DEBUG("%u\n", auth->magic); if ((file = drm_find_file(file_priv->master, auth->magic))) { file->authenticated = 1; drm_remove_magic(file_priv->master, auth->magic); return 0; } return -EINVAL; }
gpl-2.0
anoever/thunderbolt
arch/powerpc/platforms/cell/spufs/sched.c
1509
30179
/* sched.c - SPU scheduler. * * Copyright (C) IBM 2005 * Author: Mark Nutter <mnutter@us.ibm.com> * * 2006-03-31 NUMA domains added. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #undef DEBUG #include <linux/errno.h> #include <linux/sched.h> #include <linux/sched/rt.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/completion.h> #include <linux/vmalloc.h> #include <linux/smp.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/numa.h> #include <linux/mutex.h> #include <linux/notifier.h> #include <linux/kthread.h> #include <linux/pid_namespace.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <asm/io.h> #include <asm/mmu_context.h> #include <asm/spu.h> #include <asm/spu_csa.h> #include <asm/spu_priv1.h> #include "spufs.h" #define CREATE_TRACE_POINTS #include "sputrace.h" struct spu_prio_array { DECLARE_BITMAP(bitmap, MAX_PRIO); struct list_head runq[MAX_PRIO]; spinlock_t runq_lock; int nr_waiting; }; static unsigned long spu_avenrun[3]; static struct spu_prio_array *spu_prio; static struct task_struct *spusched_task; static struct timer_list spusched_timer; static struct timer_list spuloadavg_timer; /* * Priority of a normal, non-rt, non-niced'd process (aka nice level 0). */ #define NORMAL_PRIO 120 /* * Frequency of the spu scheduler tick. By default we do one SPU scheduler * tick for every 10 CPU scheduler ticks. */ #define SPUSCHED_TICK (10) /* * These are the 'tuning knobs' of the scheduler: * * Minimum timeslice is 5 msecs (or 1 spu scheduler tick, whichever is * larger), default timeslice is 100 msecs, maximum timeslice is 800 msecs. */ #define MIN_SPU_TIMESLICE max(5 * HZ / (1000 * SPUSCHED_TICK), 1) #define DEF_SPU_TIMESLICE (100 * HZ / (1000 * SPUSCHED_TICK)) #define SCALE_PRIO(x, prio) \ max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE) /* * scale user-nice values [ -20 ... 0 ... 19 ] to time slice values: * [800ms ... 100ms ... 5ms] * * The higher a thread's priority, the bigger timeslices * it gets during one round of execution. But even the lowest * priority thread gets MIN_TIMESLICE worth of execution time. */ void spu_set_timeslice(struct spu_context *ctx) { if (ctx->prio < NORMAL_PRIO) ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio); else ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio); } /* * Update scheduling information from the owning thread. */ void __spu_update_sched_info(struct spu_context *ctx) { /* * assert that the context is not on the runqueue, so it is safe * to change its scheduling parameters. */ BUG_ON(!list_empty(&ctx->rq)); /* * 32-Bit assignments are atomic on powerpc, and we don't care about * memory ordering here because retrieving the controlling thread is * per definition racy. */ ctx->tid = current->pid; /* * We do our own priority calculations, so we normally want * ->static_prio to start with. Unfortunately this field * contains junk for threads with a realtime scheduling * policy so we have to look at ->prio in this case. */ if (rt_prio(current->prio)) ctx->prio = current->prio; else ctx->prio = current->static_prio; ctx->policy = current->policy; /* * TO DO: the context may be loaded, so we may need to activate * it again on a different node. But it shouldn't hurt anything * to update its parameters, because we know that the scheduler * is not actively looking at this field, since it is not on the * runqueue. The context will be rescheduled on the proper node * if it is timesliced or preempted. */ cpumask_copy(&ctx->cpus_allowed, tsk_cpus_allowed(current)); /* Save the current cpu id for spu interrupt routing. */ ctx->last_ran = raw_smp_processor_id(); } void spu_update_sched_info(struct spu_context *ctx) { int node; if (ctx->state == SPU_STATE_RUNNABLE) { node = ctx->spu->node; /* * Take list_mutex to sync with find_victim(). */ mutex_lock(&cbe_spu_info[node].list_mutex); __spu_update_sched_info(ctx); mutex_unlock(&cbe_spu_info[node].list_mutex); } else { __spu_update_sched_info(ctx); } } static int __node_allowed(struct spu_context *ctx, int node) { if (nr_cpus_node(node)) { const struct cpumask *mask = cpumask_of_node(node); if (cpumask_intersects(mask, &ctx->cpus_allowed)) return 1; } return 0; } static int node_allowed(struct spu_context *ctx, int node) { int rval; spin_lock(&spu_prio->runq_lock); rval = __node_allowed(ctx, node); spin_unlock(&spu_prio->runq_lock); return rval; } void do_notify_spus_active(void) { int node; /* * Wake up the active spu_contexts. * * When the awakened processes see their "notify_active" flag is set, * they will call spu_switch_notify(). */ for_each_online_node(node) { struct spu *spu; mutex_lock(&cbe_spu_info[node].list_mutex); list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { if (spu->alloc_state != SPU_FREE) { struct spu_context *ctx = spu->ctx; set_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags); mb(); wake_up_all(&ctx->stop_wq); } } mutex_unlock(&cbe_spu_info[node].list_mutex); } } /** * spu_bind_context - bind spu context to physical spu * @spu: physical spu to bind to * @ctx: context to bind */ static void spu_bind_context(struct spu *spu, struct spu_context *ctx) { spu_context_trace(spu_bind_context__enter, ctx, spu); spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); if (ctx->flags & SPU_CREATE_NOSCHED) atomic_inc(&cbe_spu_info[spu->node].reserved_spus); ctx->stats.slb_flt_base = spu->stats.slb_flt; ctx->stats.class2_intr_base = spu->stats.class2_intr; spu_associate_mm(spu, ctx->owner); spin_lock_irq(&spu->register_lock); spu->ctx = ctx; spu->flags = 0; ctx->spu = spu; ctx->ops = &spu_hw_ops; spu->pid = current->pid; spu->tgid = current->tgid; spu->ibox_callback = spufs_ibox_callback; spu->wbox_callback = spufs_wbox_callback; spu->stop_callback = spufs_stop_callback; spu->mfc_callback = spufs_mfc_callback; spin_unlock_irq(&spu->register_lock); spu_unmap_mappings(ctx); spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0); spu_restore(&ctx->csa, spu); spu->timestamp = jiffies; spu_switch_notify(spu, ctx); ctx->state = SPU_STATE_RUNNABLE; spuctx_switch_state(ctx, SPU_UTIL_USER); } /* * Must be used with the list_mutex held. */ static inline int sched_spu(struct spu *spu) { BUG_ON(!mutex_is_locked(&cbe_spu_info[spu->node].list_mutex)); return (!spu->ctx || !(spu->ctx->flags & SPU_CREATE_NOSCHED)); } static void aff_merge_remaining_ctxs(struct spu_gang *gang) { struct spu_context *ctx; list_for_each_entry(ctx, &gang->aff_list_head, aff_list) { if (list_empty(&ctx->aff_list)) list_add(&ctx->aff_list, &gang->aff_list_head); } gang->aff_flags |= AFF_MERGED; } static void aff_set_offsets(struct spu_gang *gang) { struct spu_context *ctx; int offset; offset = -1; list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list, aff_list) { if (&ctx->aff_list == &gang->aff_list_head) break; ctx->aff_offset = offset--; } offset = 0; list_for_each_entry(ctx, gang->aff_ref_ctx->aff_list.prev, aff_list) { if (&ctx->aff_list == &gang->aff_list_head) break; ctx->aff_offset = offset++; } gang->aff_flags |= AFF_OFFSETS_SET; } static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff, int group_size, int lowest_offset) { struct spu *spu; int node, n; /* * TODO: A better algorithm could be used to find a good spu to be * used as reference location for the ctxs chain. */ node = cpu_to_node(raw_smp_processor_id()); for (n = 0; n < MAX_NUMNODES; n++, node++) { /* * "available_spus" counts how many spus are not potentially * going to be used by other affinity gangs whose reference * context is already in place. Although this code seeks to * avoid having affinity gangs with a summed amount of * contexts bigger than the amount of spus in the node, * this may happen sporadically. In this case, available_spus * becomes negative, which is harmless. */ int available_spus; node = (node < MAX_NUMNODES) ? node : 0; if (!node_allowed(ctx, node)) continue; available_spus = 0; mutex_lock(&cbe_spu_info[node].list_mutex); list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { if (spu->ctx && spu->ctx->gang && !spu->ctx->aff_offset && spu->ctx->gang->aff_ref_spu) available_spus -= spu->ctx->gang->contexts; available_spus++; } if (available_spus < ctx->gang->contexts) { mutex_unlock(&cbe_spu_info[node].list_mutex); continue; } list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { if ((!mem_aff || spu->has_mem_affinity) && sched_spu(spu)) { mutex_unlock(&cbe_spu_info[node].list_mutex); return spu; } } mutex_unlock(&cbe_spu_info[node].list_mutex); } return NULL; } static void aff_set_ref_point_location(struct spu_gang *gang) { int mem_aff, gs, lowest_offset; struct spu_context *ctx; struct spu *tmp; mem_aff = gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM; lowest_offset = 0; gs = 0; list_for_each_entry(tmp, &gang->aff_list_head, aff_list) gs++; list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list, aff_list) { if (&ctx->aff_list == &gang->aff_list_head) break; lowest_offset = ctx->aff_offset; } gang->aff_ref_spu = aff_ref_location(gang->aff_ref_ctx, mem_aff, gs, lowest_offset); } static struct spu *ctx_location(struct spu *ref, int offset, int node) { struct spu *spu; spu = NULL; if (offset >= 0) { list_for_each_entry(spu, ref->aff_list.prev, aff_list) { BUG_ON(spu->node != node); if (offset == 0) break; if (sched_spu(spu)) offset--; } } else { list_for_each_entry_reverse(spu, ref->aff_list.next, aff_list) { BUG_ON(spu->node != node); if (offset == 0) break; if (sched_spu(spu)) offset++; } } return spu; } /* * affinity_check is called each time a context is going to be scheduled. * It returns the spu ptr on which the context must run. */ static int has_affinity(struct spu_context *ctx) { struct spu_gang *gang = ctx->gang; if (list_empty(&ctx->aff_list)) return 0; if (atomic_read(&ctx->gang->aff_sched_count) == 0) ctx->gang->aff_ref_spu = NULL; if (!gang->aff_ref_spu) { if (!(gang->aff_flags & AFF_MERGED)) aff_merge_remaining_ctxs(gang); if (!(gang->aff_flags & AFF_OFFSETS_SET)) aff_set_offsets(gang); aff_set_ref_point_location(gang); } return gang->aff_ref_spu != NULL; } /** * spu_unbind_context - unbind spu context from physical spu * @spu: physical spu to unbind from * @ctx: context to unbind */ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx) { u32 status; spu_context_trace(spu_unbind_context__enter, ctx, spu); spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); if (spu->ctx->flags & SPU_CREATE_NOSCHED) atomic_dec(&cbe_spu_info[spu->node].reserved_spus); if (ctx->gang) /* * If ctx->gang->aff_sched_count is positive, SPU affinity is * being considered in this gang. Using atomic_dec_if_positive * allow us to skip an explicit check for affinity in this gang */ atomic_dec_if_positive(&ctx->gang->aff_sched_count); spu_switch_notify(spu, NULL); spu_unmap_mappings(ctx); spu_save(&ctx->csa, spu); spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0); spin_lock_irq(&spu->register_lock); spu->timestamp = jiffies; ctx->state = SPU_STATE_SAVED; spu->ibox_callback = NULL; spu->wbox_callback = NULL; spu->stop_callback = NULL; spu->mfc_callback = NULL; spu->pid = 0; spu->tgid = 0; ctx->ops = &spu_backing_ops; spu->flags = 0; spu->ctx = NULL; spin_unlock_irq(&spu->register_lock); spu_associate_mm(spu, NULL); ctx->stats.slb_flt += (spu->stats.slb_flt - ctx->stats.slb_flt_base); ctx->stats.class2_intr += (spu->stats.class2_intr - ctx->stats.class2_intr_base); /* This maps the underlying spu state to idle */ spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED); ctx->spu = NULL; if (spu_stopped(ctx, &status)) wake_up_all(&ctx->stop_wq); } /** * spu_add_to_rq - add a context to the runqueue * @ctx: context to add */ static void __spu_add_to_rq(struct spu_context *ctx) { /* * Unfortunately this code path can be called from multiple threads * on behalf of a single context due to the way the problem state * mmap support works. * * Fortunately we need to wake up all these threads at the same time * and can simply skip the runqueue addition for every but the first * thread getting into this codepath. * * It's still quite hacky, and long-term we should proxy all other * threads through the owner thread so that spu_run is in control * of all the scheduling activity for a given context. */ if (list_empty(&ctx->rq)) { list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]); set_bit(ctx->prio, spu_prio->bitmap); if (!spu_prio->nr_waiting++) mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK); } } static void spu_add_to_rq(struct spu_context *ctx) { spin_lock(&spu_prio->runq_lock); __spu_add_to_rq(ctx); spin_unlock(&spu_prio->runq_lock); } static void __spu_del_from_rq(struct spu_context *ctx) { int prio = ctx->prio; if (!list_empty(&ctx->rq)) { if (!--spu_prio->nr_waiting) del_timer(&spusched_timer); list_del_init(&ctx->rq); if (list_empty(&spu_prio->runq[prio])) clear_bit(prio, spu_prio->bitmap); } } void spu_del_from_rq(struct spu_context *ctx) { spin_lock(&spu_prio->runq_lock); __spu_del_from_rq(ctx); spin_unlock(&spu_prio->runq_lock); } static void spu_prio_wait(struct spu_context *ctx) { DEFINE_WAIT(wait); /* * The caller must explicitly wait for a context to be loaded * if the nosched flag is set. If NOSCHED is not set, the caller * queues the context and waits for an spu event or error. */ BUG_ON(!(ctx->flags & SPU_CREATE_NOSCHED)); spin_lock(&spu_prio->runq_lock); prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE); if (!signal_pending(current)) { __spu_add_to_rq(ctx); spin_unlock(&spu_prio->runq_lock); mutex_unlock(&ctx->state_mutex); schedule(); mutex_lock(&ctx->state_mutex); spin_lock(&spu_prio->runq_lock); __spu_del_from_rq(ctx); } spin_unlock(&spu_prio->runq_lock); __set_current_state(TASK_RUNNING); remove_wait_queue(&ctx->stop_wq, &wait); } static struct spu *spu_get_idle(struct spu_context *ctx) { struct spu *spu, *aff_ref_spu; int node, n; spu_context_nospu_trace(spu_get_idle__enter, ctx); if (ctx->gang) { mutex_lock(&ctx->gang->aff_mutex); if (has_affinity(ctx)) { aff_ref_spu = ctx->gang->aff_ref_spu; atomic_inc(&ctx->gang->aff_sched_count); mutex_unlock(&ctx->gang->aff_mutex); node = aff_ref_spu->node; mutex_lock(&cbe_spu_info[node].list_mutex); spu = ctx_location(aff_ref_spu, ctx->aff_offset, node); if (spu && spu->alloc_state == SPU_FREE) goto found; mutex_unlock(&cbe_spu_info[node].list_mutex); atomic_dec(&ctx->gang->aff_sched_count); goto not_found; } mutex_unlock(&ctx->gang->aff_mutex); } node = cpu_to_node(raw_smp_processor_id()); for (n = 0; n < MAX_NUMNODES; n++, node++) { node = (node < MAX_NUMNODES) ? node : 0; if (!node_allowed(ctx, node)) continue; mutex_lock(&cbe_spu_info[node].list_mutex); list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { if (spu->alloc_state == SPU_FREE) goto found; } mutex_unlock(&cbe_spu_info[node].list_mutex); } not_found: spu_context_nospu_trace(spu_get_idle__not_found, ctx); return NULL; found: spu->alloc_state = SPU_USED; mutex_unlock(&cbe_spu_info[node].list_mutex); spu_context_trace(spu_get_idle__found, ctx, spu); spu_init_channels(spu); return spu; } /** * find_victim - find a lower priority context to preempt * @ctx: canidate context for running * * Returns the freed physical spu to run the new context on. */ static struct spu *find_victim(struct spu_context *ctx) { struct spu_context *victim = NULL; struct spu *spu; int node, n; spu_context_nospu_trace(spu_find_victim__enter, ctx); /* * Look for a possible preemption candidate on the local node first. * If there is no candidate look at the other nodes. This isn't * exactly fair, but so far the whole spu scheduler tries to keep * a strong node affinity. We might want to fine-tune this in * the future. */ restart: node = cpu_to_node(raw_smp_processor_id()); for (n = 0; n < MAX_NUMNODES; n++, node++) { node = (node < MAX_NUMNODES) ? node : 0; if (!node_allowed(ctx, node)) continue; mutex_lock(&cbe_spu_info[node].list_mutex); list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { struct spu_context *tmp = spu->ctx; if (tmp && tmp->prio > ctx->prio && !(tmp->flags & SPU_CREATE_NOSCHED) && (!victim || tmp->prio > victim->prio)) { victim = spu->ctx; } } if (victim) get_spu_context(victim); mutex_unlock(&cbe_spu_info[node].list_mutex); if (victim) { /* * This nests ctx->state_mutex, but we always lock * higher priority contexts before lower priority * ones, so this is safe until we introduce * priority inheritance schemes. * * XXX if the highest priority context is locked, * this can loop a long time. Might be better to * look at another context or give up after X retries. */ if (!mutex_trylock(&victim->state_mutex)) { put_spu_context(victim); victim = NULL; goto restart; } spu = victim->spu; if (!spu || victim->prio <= ctx->prio) { /* * This race can happen because we've dropped * the active list mutex. Not a problem, just * restart the search. */ mutex_unlock(&victim->state_mutex); put_spu_context(victim); victim = NULL; goto restart; } spu_context_trace(__spu_deactivate__unload, ctx, spu); mutex_lock(&cbe_spu_info[node].list_mutex); cbe_spu_info[node].nr_active--; spu_unbind_context(spu, victim); mutex_unlock(&cbe_spu_info[node].list_mutex); victim->stats.invol_ctx_switch++; spu->stats.invol_ctx_switch++; if (test_bit(SPU_SCHED_SPU_RUN, &victim->sched_flags)) spu_add_to_rq(victim); mutex_unlock(&victim->state_mutex); put_spu_context(victim); return spu; } } return NULL; } static void __spu_schedule(struct spu *spu, struct spu_context *ctx) { int node = spu->node; int success = 0; spu_set_timeslice(ctx); mutex_lock(&cbe_spu_info[node].list_mutex); if (spu->ctx == NULL) { spu_bind_context(spu, ctx); cbe_spu_info[node].nr_active++; spu->alloc_state = SPU_USED; success = 1; } mutex_unlock(&cbe_spu_info[node].list_mutex); if (success) wake_up_all(&ctx->run_wq); else spu_add_to_rq(ctx); } static void spu_schedule(struct spu *spu, struct spu_context *ctx) { /* not a candidate for interruptible because it's called either from the scheduler thread or from spu_deactivate */ mutex_lock(&ctx->state_mutex); if (ctx->state == SPU_STATE_SAVED) __spu_schedule(spu, ctx); spu_release(ctx); } /** * spu_unschedule - remove a context from a spu, and possibly release it. * @spu: The SPU to unschedule from * @ctx: The context currently scheduled on the SPU * @free_spu Whether to free the SPU for other contexts * * Unbinds the context @ctx from the SPU @spu. If @free_spu is non-zero, the * SPU is made available for other contexts (ie, may be returned by * spu_get_idle). If this is zero, the caller is expected to schedule another * context to this spu. * * Should be called with ctx->state_mutex held. */ static void spu_unschedule(struct spu *spu, struct spu_context *ctx, int free_spu) { int node = spu->node; mutex_lock(&cbe_spu_info[node].list_mutex); cbe_spu_info[node].nr_active--; if (free_spu) spu->alloc_state = SPU_FREE; spu_unbind_context(spu, ctx); ctx->stats.invol_ctx_switch++; spu->stats.invol_ctx_switch++; mutex_unlock(&cbe_spu_info[node].list_mutex); } /** * spu_activate - find a free spu for a context and execute it * @ctx: spu context to schedule * @flags: flags (currently ignored) * * Tries to find a free spu to run @ctx. If no free spu is available * add the context to the runqueue so it gets woken up once an spu * is available. */ int spu_activate(struct spu_context *ctx, unsigned long flags) { struct spu *spu; /* * If there are multiple threads waiting for a single context * only one actually binds the context while the others will * only be able to acquire the state_mutex once the context * already is in runnable state. */ if (ctx->spu) return 0; spu_activate_top: if (signal_pending(current)) return -ERESTARTSYS; spu = spu_get_idle(ctx); /* * If this is a realtime thread we try to get it running by * preempting a lower priority thread. */ if (!spu && rt_prio(ctx->prio)) spu = find_victim(ctx); if (spu) { unsigned long runcntl; runcntl = ctx->ops->runcntl_read(ctx); __spu_schedule(spu, ctx); if (runcntl & SPU_RUNCNTL_RUNNABLE) spuctx_switch_state(ctx, SPU_UTIL_USER); return 0; } if (ctx->flags & SPU_CREATE_NOSCHED) { spu_prio_wait(ctx); goto spu_activate_top; } spu_add_to_rq(ctx); return 0; } /** * grab_runnable_context - try to find a runnable context * * Remove the highest priority context on the runqueue and return it * to the caller. Returns %NULL if no runnable context was found. */ static struct spu_context *grab_runnable_context(int prio, int node) { struct spu_context *ctx; int best; spin_lock(&spu_prio->runq_lock); best = find_first_bit(spu_prio->bitmap, prio); while (best < prio) { struct list_head *rq = &spu_prio->runq[best]; list_for_each_entry(ctx, rq, rq) { /* XXX(hch): check for affinity here as well */ if (__node_allowed(ctx, node)) { __spu_del_from_rq(ctx); goto found; } } best++; } ctx = NULL; found: spin_unlock(&spu_prio->runq_lock); return ctx; } static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio) { struct spu *spu = ctx->spu; struct spu_context *new = NULL; if (spu) { new = grab_runnable_context(max_prio, spu->node); if (new || force) { spu_unschedule(spu, ctx, new == NULL); if (new) { if (new->flags & SPU_CREATE_NOSCHED) wake_up(&new->stop_wq); else { spu_release(ctx); spu_schedule(spu, new); /* this one can't easily be made interruptible */ mutex_lock(&ctx->state_mutex); } } } } return new != NULL; } /** * spu_deactivate - unbind a context from it's physical spu * @ctx: spu context to unbind * * Unbind @ctx from the physical spu it is running on and schedule * the highest priority context to run on the freed physical spu. */ void spu_deactivate(struct spu_context *ctx) { spu_context_nospu_trace(spu_deactivate__enter, ctx); __spu_deactivate(ctx, 1, MAX_PRIO); } /** * spu_yield - yield a physical spu if others are waiting * @ctx: spu context to yield * * Check if there is a higher priority context waiting and if yes * unbind @ctx from the physical spu and schedule the highest * priority context to run on the freed physical spu instead. */ void spu_yield(struct spu_context *ctx) { spu_context_nospu_trace(spu_yield__enter, ctx); if (!(ctx->flags & SPU_CREATE_NOSCHED)) { mutex_lock(&ctx->state_mutex); __spu_deactivate(ctx, 0, MAX_PRIO); mutex_unlock(&ctx->state_mutex); } } static noinline void spusched_tick(struct spu_context *ctx) { struct spu_context *new = NULL; struct spu *spu = NULL; if (spu_acquire(ctx)) BUG(); /* a kernel thread never has signals pending */ if (ctx->state != SPU_STATE_RUNNABLE) goto out; if (ctx->flags & SPU_CREATE_NOSCHED) goto out; if (ctx->policy == SCHED_FIFO) goto out; if (--ctx->time_slice && test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags)) goto out; spu = ctx->spu; spu_context_trace(spusched_tick__preempt, ctx, spu); new = grab_runnable_context(ctx->prio + 1, spu->node); if (new) { spu_unschedule(spu, ctx, 0); if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags)) spu_add_to_rq(ctx); } else { spu_context_nospu_trace(spusched_tick__newslice, ctx); if (!ctx->time_slice) ctx->time_slice++; } out: spu_release(ctx); if (new) spu_schedule(spu, new); } /** * count_active_contexts - count nr of active tasks * * Return the number of tasks currently running or waiting to run. * * Note that we don't take runq_lock / list_mutex here. Reading * a single 32bit value is atomic on powerpc, and we don't care * about memory ordering issues here. */ static unsigned long count_active_contexts(void) { int nr_active = 0, node; for (node = 0; node < MAX_NUMNODES; node++) nr_active += cbe_spu_info[node].nr_active; nr_active += spu_prio->nr_waiting; return nr_active; } /** * spu_calc_load - update the avenrun load estimates. * * No locking against reading these values from userspace, as for * the CPU loadavg code. */ static void spu_calc_load(void) { unsigned long active_tasks; /* fixed-point */ active_tasks = count_active_contexts() * FIXED_1; CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks); CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks); CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks); } static void spusched_wake(unsigned long data) { mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK); wake_up_process(spusched_task); } static void spuloadavg_wake(unsigned long data) { mod_timer(&spuloadavg_timer, jiffies + LOAD_FREQ); spu_calc_load(); } static int spusched_thread(void *unused) { struct spu *spu; int node; while (!kthread_should_stop()) { set_current_state(TASK_INTERRUPTIBLE); schedule(); for (node = 0; node < MAX_NUMNODES; node++) { struct mutex *mtx = &cbe_spu_info[node].list_mutex; mutex_lock(mtx); list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { struct spu_context *ctx = spu->ctx; if (ctx) { get_spu_context(ctx); mutex_unlock(mtx); spusched_tick(ctx); mutex_lock(mtx); put_spu_context(ctx); } } mutex_unlock(mtx); } } return 0; } void spuctx_switch_state(struct spu_context *ctx, enum spu_utilization_state new_state) { unsigned long long curtime; signed long long delta; struct spu *spu; enum spu_utilization_state old_state; int node; curtime = ktime_get_ns(); delta = curtime - ctx->stats.tstamp; WARN_ON(!mutex_is_locked(&ctx->state_mutex)); WARN_ON(delta < 0); spu = ctx->spu; old_state = ctx->stats.util_state; ctx->stats.util_state = new_state; ctx->stats.tstamp = curtime; /* * Update the physical SPU utilization statistics. */ if (spu) { ctx->stats.times[old_state] += delta; spu->stats.times[old_state] += delta; spu->stats.util_state = new_state; spu->stats.tstamp = curtime; node = spu->node; if (old_state == SPU_UTIL_USER) atomic_dec(&cbe_spu_info[node].busy_spus); if (new_state == SPU_UTIL_USER) atomic_inc(&cbe_spu_info[node].busy_spus); } } #define LOAD_INT(x) ((x) >> FSHIFT) #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) static int show_spu_loadavg(struct seq_file *s, void *private) { int a, b, c; a = spu_avenrun[0] + (FIXED_1/200); b = spu_avenrun[1] + (FIXED_1/200); c = spu_avenrun[2] + (FIXED_1/200); /* * Note that last_pid doesn't really make much sense for the * SPU loadavg (it even seems very odd on the CPU side...), * but we include it here to have a 100% compatible interface. */ seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n", LOAD_INT(a), LOAD_FRAC(a), LOAD_INT(b), LOAD_FRAC(b), LOAD_INT(c), LOAD_FRAC(c), count_active_contexts(), atomic_read(&nr_spu_contexts), task_active_pid_ns(current)->last_pid); return 0; } static int spu_loadavg_open(struct inode *inode, struct file *file) { return single_open(file, show_spu_loadavg, NULL); } static const struct file_operations spu_loadavg_fops = { .open = spu_loadavg_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; int __init spu_sched_init(void) { struct proc_dir_entry *entry; int err = -ENOMEM, i; spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL); if (!spu_prio) goto out; for (i = 0; i < MAX_PRIO; i++) { INIT_LIST_HEAD(&spu_prio->runq[i]); __clear_bit(i, spu_prio->bitmap); } spin_lock_init(&spu_prio->runq_lock); setup_timer(&spusched_timer, spusched_wake, 0); setup_timer(&spuloadavg_timer, spuloadavg_wake, 0); spusched_task = kthread_run(spusched_thread, NULL, "spusched"); if (IS_ERR(spusched_task)) { err = PTR_ERR(spusched_task); goto out_free_spu_prio; } mod_timer(&spuloadavg_timer, 0); entry = proc_create("spu_loadavg", 0, NULL, &spu_loadavg_fops); if (!entry) goto out_stop_kthread; pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n", SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE); return 0; out_stop_kthread: kthread_stop(spusched_task); out_free_spu_prio: kfree(spu_prio); out: return err; } void spu_sched_exit(void) { struct spu *spu; int node; remove_proc_entry("spu_loadavg", NULL); del_timer_sync(&spusched_timer); del_timer_sync(&spuloadavg_timer); kthread_stop(spusched_task); for (node = 0; node < MAX_NUMNODES; node++) { mutex_lock(&cbe_spu_info[node].list_mutex); list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) if (spu->alloc_state != SPU_FREE) spu->alloc_state = SPU_FREE; mutex_unlock(&cbe_spu_info[node].list_mutex); } kfree(spu_prio); }
gpl-2.0
STS-Dev-Team/kernel_kexec_modules
arch/mips/math-emu/dp_flong.c
1765
1912
/* IEEE754 floating point arithmetic * double precision: common utilities */ /* * MIPS floating point support * Copyright (C) 1994-2000 Algorithmics Ltd. * http://www.algor.co.uk * * ######################################################################## * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * ######################################################################## */ #include "ieee754dp.h" ieee754dp ieee754dp_flong(s64 x) { u64 xm; int xe; int xs; CLEARCX; if (x == 0) return ieee754dp_zero(0); if (x == 1 || x == -1) return ieee754dp_one(x < 0); if (x == 10 || x == -10) return ieee754dp_ten(x < 0); xs = (x < 0); if (xs) { if (x == (1ULL << 63)) xm = (1ULL << 63); /* max neg can't be safely negated */ else xm = -x; } else { xm = x; } /* normalize */ xe = DP_MBITS + 3; if (xm >> (DP_MBITS + 1 + 3)) { /* shunt out overflow bits */ while (xm >> (DP_MBITS + 1 + 3)) { XDPSRSX1(); } } else { /* normalize in grs extended double precision */ while ((xm >> (DP_MBITS + 3)) == 0) { xm <<= 1; xe--; } } DPNORMRET1(xs, xe, xm, "dp_flong", x); } ieee754dp ieee754dp_fulong(u64 u) { if ((s64) u < 0) return ieee754dp_add(ieee754dp_1e63(), ieee754dp_flong(u & ~(1ULL << 63))); return ieee754dp_flong(u); }
gpl-2.0
jdkernel/android-omap-tuna_3.0
drivers/mfd/ab3100-core.c
1765
22768
/* * Copyright (C) 2007-2010 ST-Ericsson * License terms: GNU General Public License (GPL) version 2 * Low-level core for exclusive access to the AB3100 IC on the I2C bus * and some basic chip-configuration. * Author: Linus Walleij <linus.walleij@stericsson.com> */ #include <linux/i2c.h> #include <linux/mutex.h> #include <linux/list.h> #include <linux/notifier.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/random.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/uaccess.h> #include <linux/mfd/core.h> #include <linux/mfd/abx500.h> /* These are the only registers inside AB3100 used in this main file */ /* Interrupt event registers */ #define AB3100_EVENTA1 0x21 #define AB3100_EVENTA2 0x22 #define AB3100_EVENTA3 0x23 /* AB3100 DAC converter registers */ #define AB3100_DIS 0x00 #define AB3100_D0C 0x01 #define AB3100_D1C 0x02 #define AB3100_D2C 0x03 #define AB3100_D3C 0x04 /* Chip ID register */ #define AB3100_CID 0x20 /* AB3100 interrupt registers */ #define AB3100_IMRA1 0x24 #define AB3100_IMRA2 0x25 #define AB3100_IMRA3 0x26 #define AB3100_IMRB1 0x2B #define AB3100_IMRB2 0x2C #define AB3100_IMRB3 0x2D /* System Power Monitoring and control registers */ #define AB3100_MCA 0x2E #define AB3100_MCB 0x2F /* SIM power up */ #define AB3100_SUP 0x50 /* * I2C communication * * The AB3100 is usually assigned address 0x48 (7-bit) * The chip is defined in the platform i2c_board_data section. */ static int ab3100_get_chip_id(struct device *dev) { struct ab3100 *ab3100 = dev_get_drvdata(dev->parent); return (int)ab3100->chip_id; } static int ab3100_set_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 regval) { u8 regandval[2] = {reg, regval}; int err; err = mutex_lock_interruptible(&ab3100->access_mutex); if (err) return err; /* * A two-byte write message with the first byte containing the register * number and the second byte containing the value to be written * effectively sets a register in the AB3100. */ err = i2c_master_send(ab3100->i2c_client, regandval, 2); if (err < 0) { dev_err(ab3100->dev, "write error (write register): %d\n", err); } else if (err != 2) { dev_err(ab3100->dev, "write error (write register) " "%d bytes transferred (expected 2)\n", err); err = -EIO; } else { /* All is well */ err = 0; } mutex_unlock(&ab3100->access_mutex); return err; } static int set_register_interruptible(struct device *dev, u8 bank, u8 reg, u8 value) { struct ab3100 *ab3100 = dev_get_drvdata(dev->parent); return ab3100_set_register_interruptible(ab3100, reg, value); } /* * The test registers exist at an I2C bus address up one * from the ordinary base. They are not supposed to be used * in production code, but sometimes you have to do that * anyway. It's currently only used from this file so declare * it static and do not export. */ static int ab3100_set_test_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 regval) { u8 regandval[2] = {reg, regval}; int err; err = mutex_lock_interruptible(&ab3100->access_mutex); if (err) return err; err = i2c_master_send(ab3100->testreg_client, regandval, 2); if (err < 0) { dev_err(ab3100->dev, "write error (write test register): %d\n", err); } else if (err != 2) { dev_err(ab3100->dev, "write error (write test register) " "%d bytes transferred (expected 2)\n", err); err = -EIO; } else { /* All is well */ err = 0; } mutex_unlock(&ab3100->access_mutex); return err; } static int ab3100_get_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 *regval) { int err; err = mutex_lock_interruptible(&ab3100->access_mutex); if (err) return err; /* * AB3100 require an I2C "stop" command between each message, else * it will not work. The only way of achieveing this with the * message transport layer is to send the read and write messages * separately. */ err = i2c_master_send(ab3100->i2c_client, &reg, 1); if (err < 0) { dev_err(ab3100->dev, "write error (send register address): %d\n", err); goto get_reg_out_unlock; } else if (err != 1) { dev_err(ab3100->dev, "write error (send register address) " "%d bytes transferred (expected 1)\n", err); err = -EIO; goto get_reg_out_unlock; } else { /* All is well */ err = 0; } err = i2c_master_recv(ab3100->i2c_client, regval, 1); if (err < 0) { dev_err(ab3100->dev, "write error (read register): %d\n", err); goto get_reg_out_unlock; } else if (err != 1) { dev_err(ab3100->dev, "write error (read register) " "%d bytes transferred (expected 1)\n", err); err = -EIO; goto get_reg_out_unlock; } else { /* All is well */ err = 0; } get_reg_out_unlock: mutex_unlock(&ab3100->access_mutex); return err; } static int get_register_interruptible(struct device *dev, u8 bank, u8 reg, u8 *value) { struct ab3100 *ab3100 = dev_get_drvdata(dev->parent); return ab3100_get_register_interruptible(ab3100, reg, value); } static int ab3100_get_register_page_interruptible(struct ab3100 *ab3100, u8 first_reg, u8 *regvals, u8 numregs) { int err; if (ab3100->chip_id == 0xa0 || ab3100->chip_id == 0xa1) /* These don't support paged reads */ return -EIO; err = mutex_lock_interruptible(&ab3100->access_mutex); if (err) return err; /* * Paged read also require an I2C "stop" command. */ err = i2c_master_send(ab3100->i2c_client, &first_reg, 1); if (err < 0) { dev_err(ab3100->dev, "write error (send first register address): %d\n", err); goto get_reg_page_out_unlock; } else if (err != 1) { dev_err(ab3100->dev, "write error (send first register address) " "%d bytes transferred (expected 1)\n", err); err = -EIO; goto get_reg_page_out_unlock; } err = i2c_master_recv(ab3100->i2c_client, regvals, numregs); if (err < 0) { dev_err(ab3100->dev, "write error (read register page): %d\n", err); goto get_reg_page_out_unlock; } else if (err != numregs) { dev_err(ab3100->dev, "write error (read register page) " "%d bytes transferred (expected %d)\n", err, numregs); err = -EIO; goto get_reg_page_out_unlock; } /* All is well */ err = 0; get_reg_page_out_unlock: mutex_unlock(&ab3100->access_mutex); return err; } static int get_register_page_interruptible(struct device *dev, u8 bank, u8 first_reg, u8 *regvals, u8 numregs) { struct ab3100 *ab3100 = dev_get_drvdata(dev->parent); return ab3100_get_register_page_interruptible(ab3100, first_reg, regvals, numregs); } static int ab3100_mask_and_set_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 andmask, u8 ormask) { u8 regandval[2] = {reg, 0}; int err; err = mutex_lock_interruptible(&ab3100->access_mutex); if (err) return err; /* First read out the target register */ err = i2c_master_send(ab3100->i2c_client, &reg, 1); if (err < 0) { dev_err(ab3100->dev, "write error (maskset send address): %d\n", err); goto get_maskset_unlock; } else if (err != 1) { dev_err(ab3100->dev, "write error (maskset send address) " "%d bytes transferred (expected 1)\n", err); err = -EIO; goto get_maskset_unlock; } err = i2c_master_recv(ab3100->i2c_client, &regandval[1], 1); if (err < 0) { dev_err(ab3100->dev, "write error (maskset read register): %d\n", err); goto get_maskset_unlock; } else if (err != 1) { dev_err(ab3100->dev, "write error (maskset read register) " "%d bytes transferred (expected 1)\n", err); err = -EIO; goto get_maskset_unlock; } /* Modify the register */ regandval[1] &= andmask; regandval[1] |= ormask; /* Write the register */ err = i2c_master_send(ab3100->i2c_client, regandval, 2); if (err < 0) { dev_err(ab3100->dev, "write error (write register): %d\n", err); goto get_maskset_unlock; } else if (err != 2) { dev_err(ab3100->dev, "write error (write register) " "%d bytes transferred (expected 2)\n", err); err = -EIO; goto get_maskset_unlock; } /* All is well */ err = 0; get_maskset_unlock: mutex_unlock(&ab3100->access_mutex); return err; } static int mask_and_set_register_interruptible(struct device *dev, u8 bank, u8 reg, u8 bitmask, u8 bitvalues) { struct ab3100 *ab3100 = dev_get_drvdata(dev->parent); return ab3100_mask_and_set_register_interruptible(ab3100, reg, bitmask, (bitmask & bitvalues)); } /* * Register a simple callback for handling any AB3100 events. */ int ab3100_event_register(struct ab3100 *ab3100, struct notifier_block *nb) { return blocking_notifier_chain_register(&ab3100->event_subscribers, nb); } EXPORT_SYMBOL(ab3100_event_register); /* * Remove a previously registered callback. */ int ab3100_event_unregister(struct ab3100 *ab3100, struct notifier_block *nb) { return blocking_notifier_chain_unregister(&ab3100->event_subscribers, nb); } EXPORT_SYMBOL(ab3100_event_unregister); static int ab3100_event_registers_startup_state_get(struct device *dev, u8 *event) { struct ab3100 *ab3100 = dev_get_drvdata(dev->parent); if (!ab3100->startup_events_read) return -EAGAIN; /* Try again later */ memcpy(event, ab3100->startup_events, 3); return 0; } static struct abx500_ops ab3100_ops = { .get_chip_id = ab3100_get_chip_id, .set_register = set_register_interruptible, .get_register = get_register_interruptible, .get_register_page = get_register_page_interruptible, .set_register_page = NULL, .mask_and_set_register = mask_and_set_register_interruptible, .event_registers_startup_state_get = ab3100_event_registers_startup_state_get, .startup_irq_enabled = NULL, }; /* * This is a threaded interrupt handler so we can make some * I2C calls etc. */ static irqreturn_t ab3100_irq_handler(int irq, void *data) { struct ab3100 *ab3100 = data; u8 event_regs[3]; u32 fatevent; int err; add_interrupt_randomness(irq); err = ab3100_get_register_page_interruptible(ab3100, AB3100_EVENTA1, event_regs, 3); if (err) goto err_event; fatevent = (event_regs[0] << 16) | (event_regs[1] << 8) | event_regs[2]; if (!ab3100->startup_events_read) { ab3100->startup_events[0] = event_regs[0]; ab3100->startup_events[1] = event_regs[1]; ab3100->startup_events[2] = event_regs[2]; ab3100->startup_events_read = true; } /* * The notified parties will have to mask out the events * they're interested in and react to them. They will be * notified on all events, then they use the fatevent value * to determine if they're interested. */ blocking_notifier_call_chain(&ab3100->event_subscribers, fatevent, NULL); dev_dbg(ab3100->dev, "IRQ Event: 0x%08x\n", fatevent); return IRQ_HANDLED; err_event: dev_dbg(ab3100->dev, "error reading event status\n"); return IRQ_HANDLED; } #ifdef CONFIG_DEBUG_FS /* * Some debugfs entries only exposed if we're using debug */ static int ab3100_registers_print(struct seq_file *s, void *p) { struct ab3100 *ab3100 = s->private; u8 value; u8 reg; seq_printf(s, "AB3100 registers:\n"); for (reg = 0; reg < 0xff; reg++) { ab3100_get_register_interruptible(ab3100, reg, &value); seq_printf(s, "[0x%x]: 0x%x\n", reg, value); } return 0; } static int ab3100_registers_open(struct inode *inode, struct file *file) { return single_open(file, ab3100_registers_print, inode->i_private); } static const struct file_operations ab3100_registers_fops = { .open = ab3100_registers_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .owner = THIS_MODULE, }; struct ab3100_get_set_reg_priv { struct ab3100 *ab3100; bool mode; }; static int ab3100_get_set_reg_open_file(struct inode *inode, struct file *file) { file->private_data = inode->i_private; return 0; } static ssize_t ab3100_get_set_reg(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ab3100_get_set_reg_priv *priv = file->private_data; struct ab3100 *ab3100 = priv->ab3100; char buf[32]; ssize_t buf_size; int regp; unsigned long user_reg; int err; int i = 0; /* Get userspace string and assure termination */ buf_size = min(count, (sizeof(buf)-1)); if (copy_from_user(buf, user_buf, buf_size)) return -EFAULT; buf[buf_size] = 0; /* * The idea is here to parse a string which is either * "0xnn" for reading a register, or "0xaa 0xbb" for * writing 0xbb to the register 0xaa. First move past * whitespace and then begin to parse the register. */ while ((i < buf_size) && (buf[i] == ' ')) i++; regp = i; /* * Advance pointer to end of string then terminate * the register string. This is needed to satisfy * the strict_strtoul() function. */ while ((i < buf_size) && (buf[i] != ' ')) i++; buf[i] = '\0'; err = strict_strtoul(&buf[regp], 16, &user_reg); if (err) return err; if (user_reg > 0xff) return -EINVAL; /* Either we read or we write a register here */ if (!priv->mode) { /* Reading */ u8 reg = (u8) user_reg; u8 regvalue; ab3100_get_register_interruptible(ab3100, reg, &regvalue); dev_info(ab3100->dev, "debug read AB3100 reg[0x%02x]: 0x%02x\n", reg, regvalue); } else { int valp; unsigned long user_value; u8 reg = (u8) user_reg; u8 value; u8 regvalue; /* * Writing, we need some value to write to * the register so keep parsing the string * from userspace. */ i++; while ((i < buf_size) && (buf[i] == ' ')) i++; valp = i; while ((i < buf_size) && (buf[i] != ' ')) i++; buf[i] = '\0'; err = strict_strtoul(&buf[valp], 16, &user_value); if (err) return err; if (user_reg > 0xff) return -EINVAL; value = (u8) user_value; ab3100_set_register_interruptible(ab3100, reg, value); ab3100_get_register_interruptible(ab3100, reg, &regvalue); dev_info(ab3100->dev, "debug write reg[0x%02x] with 0x%02x, " "after readback: 0x%02x\n", reg, value, regvalue); } return buf_size; } static const struct file_operations ab3100_get_set_reg_fops = { .open = ab3100_get_set_reg_open_file, .write = ab3100_get_set_reg, .llseek = noop_llseek, }; static struct dentry *ab3100_dir; static struct dentry *ab3100_reg_file; static struct ab3100_get_set_reg_priv ab3100_get_priv; static struct dentry *ab3100_get_reg_file; static struct ab3100_get_set_reg_priv ab3100_set_priv; static struct dentry *ab3100_set_reg_file; static void ab3100_setup_debugfs(struct ab3100 *ab3100) { int err; ab3100_dir = debugfs_create_dir("ab3100", NULL); if (!ab3100_dir) goto exit_no_debugfs; ab3100_reg_file = debugfs_create_file("registers", S_IRUGO, ab3100_dir, ab3100, &ab3100_registers_fops); if (!ab3100_reg_file) { err = -ENOMEM; goto exit_destroy_dir; } ab3100_get_priv.ab3100 = ab3100; ab3100_get_priv.mode = false; ab3100_get_reg_file = debugfs_create_file("get_reg", S_IWUSR, ab3100_dir, &ab3100_get_priv, &ab3100_get_set_reg_fops); if (!ab3100_get_reg_file) { err = -ENOMEM; goto exit_destroy_reg; } ab3100_set_priv.ab3100 = ab3100; ab3100_set_priv.mode = true; ab3100_set_reg_file = debugfs_create_file("set_reg", S_IWUSR, ab3100_dir, &ab3100_set_priv, &ab3100_get_set_reg_fops); if (!ab3100_set_reg_file) { err = -ENOMEM; goto exit_destroy_get_reg; } return; exit_destroy_get_reg: debugfs_remove(ab3100_get_reg_file); exit_destroy_reg: debugfs_remove(ab3100_reg_file); exit_destroy_dir: debugfs_remove(ab3100_dir); exit_no_debugfs: return; } static inline void ab3100_remove_debugfs(void) { debugfs_remove(ab3100_set_reg_file); debugfs_remove(ab3100_get_reg_file); debugfs_remove(ab3100_reg_file); debugfs_remove(ab3100_dir); } #else static inline void ab3100_setup_debugfs(struct ab3100 *ab3100) { } static inline void ab3100_remove_debugfs(void) { } #endif /* * Basic set-up, datastructure creation/destruction and I2C interface. * This sets up a default config in the AB3100 chip so that it * will work as expected. */ struct ab3100_init_setting { u8 abreg; u8 setting; }; static const struct ab3100_init_setting __devinitconst ab3100_init_settings[] = { { .abreg = AB3100_MCA, .setting = 0x01 }, { .abreg = AB3100_MCB, .setting = 0x30 }, { .abreg = AB3100_IMRA1, .setting = 0x00 }, { .abreg = AB3100_IMRA2, .setting = 0xFF }, { .abreg = AB3100_IMRA3, .setting = 0x01 }, { .abreg = AB3100_IMRB1, .setting = 0xBF }, { .abreg = AB3100_IMRB2, .setting = 0xFF }, { .abreg = AB3100_IMRB3, .setting = 0xFF }, { .abreg = AB3100_SUP, .setting = 0x00 }, { .abreg = AB3100_DIS, .setting = 0xF0 }, { .abreg = AB3100_D0C, .setting = 0x00 }, { .abreg = AB3100_D1C, .setting = 0x00 }, { .abreg = AB3100_D2C, .setting = 0x00 }, { .abreg = AB3100_D3C, .setting = 0x00 }, }; static int __devinit ab3100_setup(struct ab3100 *ab3100) { int err = 0; int i; for (i = 0; i < ARRAY_SIZE(ab3100_init_settings); i++) { err = ab3100_set_register_interruptible(ab3100, ab3100_init_settings[i].abreg, ab3100_init_settings[i].setting); if (err) goto exit_no_setup; } /* * Special trick to make the AB3100 use the 32kHz clock (RTC) * bit 3 in test register 0x02 is a special, undocumented test * register bit that only exist in AB3100 P1E */ if (ab3100->chip_id == 0xc4) { dev_warn(ab3100->dev, "AB3100 P1E variant detected, " "forcing chip to 32KHz\n"); err = ab3100_set_test_register_interruptible(ab3100, 0x02, 0x08); } exit_no_setup: return err; } /* The subdevices of the AB3100 */ static struct mfd_cell ab3100_devs[] = { { .name = "ab3100-dac", .id = -1, }, { .name = "ab3100-leds", .id = -1, }, { .name = "ab3100-power", .id = -1, }, { .name = "ab3100-regulators", .id = -1, }, { .name = "ab3100-sim", .id = -1, }, { .name = "ab3100-uart", .id = -1, }, { .name = "ab3100-rtc", .id = -1, }, { .name = "ab3100-charger", .id = -1, }, { .name = "ab3100-boost", .id = -1, }, { .name = "ab3100-adc", .id = -1, }, { .name = "ab3100-fuelgauge", .id = -1, }, { .name = "ab3100-vibrator", .id = -1, }, { .name = "ab3100-otp", .id = -1, }, { .name = "ab3100-codec", .id = -1, }, }; struct ab_family_id { u8 id; char *name; }; static const struct ab_family_id ids[] __devinitdata = { /* AB3100 */ { .id = 0xc0, .name = "P1A" }, { .id = 0xc1, .name = "P1B" }, { .id = 0xc2, .name = "P1C" }, { .id = 0xc3, .name = "P1D" }, { .id = 0xc4, .name = "P1E" }, { .id = 0xc5, .name = "P1F/R1A" }, { .id = 0xc6, .name = "P1G/R1A" }, { .id = 0xc7, .name = "P2A/R2A" }, { .id = 0xc8, .name = "P2B/R2B" }, /* AB3000 variants, not supported */ { .id = 0xa0 }, { .id = 0xa1 }, { .id = 0xa2 }, { .id = 0xa3 }, { .id = 0xa4 }, { .id = 0xa5 }, { .id = 0xa6 }, { .id = 0xa7 }, /* Terminator */ { .id = 0x00, }, }; static int __devinit ab3100_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct ab3100 *ab3100; struct ab3100_platform_data *ab3100_plf_data = client->dev.platform_data; int err; int i; ab3100 = kzalloc(sizeof(struct ab3100), GFP_KERNEL); if (!ab3100) { dev_err(&client->dev, "could not allocate AB3100 device\n"); return -ENOMEM; } /* Initialize data structure */ mutex_init(&ab3100->access_mutex); BLOCKING_INIT_NOTIFIER_HEAD(&ab3100->event_subscribers); ab3100->i2c_client = client; ab3100->dev = &ab3100->i2c_client->dev; i2c_set_clientdata(client, ab3100); /* Read chip ID register */ err = ab3100_get_register_interruptible(ab3100, AB3100_CID, &ab3100->chip_id); if (err) { dev_err(&client->dev, "could not communicate with the AB3100 analog " "baseband chip\n"); goto exit_no_detect; } for (i = 0; ids[i].id != 0x0; i++) { if (ids[i].id == ab3100->chip_id) { if (ids[i].name != NULL) { snprintf(&ab3100->chip_name[0], sizeof(ab3100->chip_name) - 1, "AB3100 %s", ids[i].name); break; } else { dev_err(&client->dev, "AB3000 is not supported\n"); goto exit_no_detect; } } } if (ids[i].id == 0x0) { dev_err(&client->dev, "unknown analog baseband chip id: 0x%x\n", ab3100->chip_id); dev_err(&client->dev, "accepting it anyway. Please update " "the driver.\n"); goto exit_no_detect; } dev_info(&client->dev, "Detected chip: %s\n", &ab3100->chip_name[0]); /* Attach a second dummy i2c_client to the test register address */ ab3100->testreg_client = i2c_new_dummy(client->adapter, client->addr + 1); if (!ab3100->testreg_client) { err = -ENOMEM; goto exit_no_testreg_client; } err = ab3100_setup(ab3100); if (err) goto exit_no_setup; err = request_threaded_irq(client->irq, NULL, ab3100_irq_handler, IRQF_ONESHOT, "ab3100-core", ab3100); /* This real unpredictable IRQ is of course sampled for entropy */ rand_initialize_irq(client->irq); if (err) goto exit_no_irq; err = abx500_register_ops(&client->dev, &ab3100_ops); if (err) goto exit_no_ops; /* Set up and register the platform devices. */ for (i = 0; i < ARRAY_SIZE(ab3100_devs); i++) { ab3100_devs[i].platform_data = ab3100_plf_data; ab3100_devs[i].pdata_size = sizeof(struct ab3100_platform_data); } err = mfd_add_devices(&client->dev, 0, ab3100_devs, ARRAY_SIZE(ab3100_devs), NULL, 0); ab3100_setup_debugfs(ab3100); return 0; exit_no_ops: exit_no_irq: exit_no_setup: i2c_unregister_device(ab3100->testreg_client); exit_no_testreg_client: exit_no_detect: kfree(ab3100); return err; } static int __devexit ab3100_remove(struct i2c_client *client) { struct ab3100 *ab3100 = i2c_get_clientdata(client); /* Unregister subdevices */ mfd_remove_devices(&client->dev); ab3100_remove_debugfs(); i2c_unregister_device(ab3100->testreg_client); /* * At this point, all subscribers should have unregistered * their notifiers so deactivate IRQ */ free_irq(client->irq, ab3100); kfree(ab3100); return 0; } static const struct i2c_device_id ab3100_id[] = { { "ab3100", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, ab3100_id); static struct i2c_driver ab3100_driver = { .driver = { .name = "ab3100", .owner = THIS_MODULE, }, .id_table = ab3100_id, .probe = ab3100_probe, .remove = __devexit_p(ab3100_remove), }; static int __init ab3100_i2c_init(void) { return i2c_add_driver(&ab3100_driver); } static void __exit ab3100_i2c_exit(void) { i2c_del_driver(&ab3100_driver); } subsys_initcall(ab3100_i2c_init); module_exit(ab3100_i2c_exit); MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>"); MODULE_DESCRIPTION("AB3100 core driver"); MODULE_LICENSE("GPL");
gpl-2.0
thewisenerd/android_kernel_htc_pico
drivers/staging/et131x/et1310_rx.c
2533
33529
/* * Agere Systems Inc. * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs * * Copyright © 2005 Agere Systems Inc. * All rights reserved. * http://www.agere.com * *------------------------------------------------------------------------------ * * et1310_rx.c - Routines used to perform data reception * *------------------------------------------------------------------------------ * * SOFTWARE LICENSE * * This software is provided subject to the following terms and conditions, * which you should read carefully before using the software. Using this * software indicates your acceptance of these terms and conditions. If you do * not agree with these terms and conditions, do not use the software. * * Copyright © 2005 Agere Systems Inc. * All rights reserved. * * Redistribution and use in source or binary forms, with or without * modifications, are permitted provided that the following conditions are met: * * . Redistributions of source code must retain the above copyright notice, this * list of conditions and the following Disclaimer as comments in the code as * well as in the documentation and/or other materials provided with the * distribution. * * . Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following Disclaimer in the documentation * and/or other materials provided with the distribution. * * . Neither the name of Agere Systems Inc. nor the names of the contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * Disclaimer * * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * */ #include "et131x_version.h" #include "et131x_defs.h" #include <linux/pci.h> #include <linux/init.h> #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/ctype.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/in.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/bitops.h> #include <asm/system.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/ioport.h> #include "et1310_phy.h" #include "et131x_adapter.h" #include "et1310_rx.h" #include "et131x.h" void nic_return_rfd(struct et131x_adapter *etdev, struct rfd *rfd); /** * et131x_rx_dma_memory_alloc * @adapter: pointer to our private adapter structure * * Returns 0 on success and errno on failure (as defined in errno.h) * * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required, * and the Packet Status Ring. */ int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter) { u32 i, j; u32 bufsize; u32 pktStatRingSize, FBRChunkSize; struct rx_ring *rx_ring; /* Setup some convenience pointers */ rx_ring = &adapter->rx_ring; /* Alloc memory for the lookup table */ #ifdef USE_FBR0 rx_ring->fbr[0] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL); #endif rx_ring->fbr[1] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL); /* The first thing we will do is configure the sizes of the buffer * rings. These will change based on jumbo packet support. Larger * jumbo packets increases the size of each entry in FBR0, and the * number of entries in FBR0, while at the same time decreasing the * number of entries in FBR1. * * FBR1 holds "large" frames, FBR0 holds "small" frames. If FBR1 * entries are huge in order to accommodate a "jumbo" frame, then it * will have less entries. Conversely, FBR1 will now be relied upon * to carry more "normal" frames, thus it's entry size also increases * and the number of entries goes up too (since it now carries * "small" + "regular" packets. * * In this scheme, we try to maintain 512 entries between the two * rings. Also, FBR1 remains a constant size - when it's size doubles * the number of entries halves. FBR0 increases in size, however. */ if (adapter->RegistryJumboPacket < 2048) { #ifdef USE_FBR0 rx_ring->Fbr0BufferSize = 256; rx_ring->Fbr0NumEntries = 512; #endif rx_ring->Fbr1BufferSize = 2048; rx_ring->Fbr1NumEntries = 512; } else if (adapter->RegistryJumboPacket < 4096) { #ifdef USE_FBR0 rx_ring->Fbr0BufferSize = 512; rx_ring->Fbr0NumEntries = 1024; #endif rx_ring->Fbr1BufferSize = 4096; rx_ring->Fbr1NumEntries = 512; } else { #ifdef USE_FBR0 rx_ring->Fbr0BufferSize = 1024; rx_ring->Fbr0NumEntries = 768; #endif rx_ring->Fbr1BufferSize = 16384; rx_ring->Fbr1NumEntries = 128; } #ifdef USE_FBR0 adapter->rx_ring.PsrNumEntries = adapter->rx_ring.Fbr0NumEntries + adapter->rx_ring.Fbr1NumEntries; #else adapter->rx_ring.PsrNumEntries = adapter->rx_ring.Fbr1NumEntries; #endif /* Allocate an area of memory for Free Buffer Ring 1 */ bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr1NumEntries) + 0xfff; rx_ring->pFbr1RingVa = pci_alloc_consistent(adapter->pdev, bufsize, &rx_ring->pFbr1RingPa); if (!rx_ring->pFbr1RingVa) { dev_err(&adapter->pdev->dev, "Cannot alloc memory for Free Buffer Ring 1\n"); return -ENOMEM; } /* Save physical address * * NOTE: pci_alloc_consistent(), used above to alloc DMA regions, * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses * are ever returned, make sure the high part is retrieved here * before storing the adjusted address. */ rx_ring->Fbr1Realpa = rx_ring->pFbr1RingPa; /* Align Free Buffer Ring 1 on a 4K boundary */ et131x_align_allocated_memory(adapter, &rx_ring->Fbr1Realpa, &rx_ring->Fbr1offset, 0x0FFF); rx_ring->pFbr1RingVa = (void *)((u8 *) rx_ring->pFbr1RingVa + rx_ring->Fbr1offset); #ifdef USE_FBR0 /* Allocate an area of memory for Free Buffer Ring 0 */ bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr0NumEntries) + 0xfff; rx_ring->pFbr0RingVa = pci_alloc_consistent(adapter->pdev, bufsize, &rx_ring->pFbr0RingPa); if (!rx_ring->pFbr0RingVa) { dev_err(&adapter->pdev->dev, "Cannot alloc memory for Free Buffer Ring 0\n"); return -ENOMEM; } /* Save physical address * * NOTE: pci_alloc_consistent(), used above to alloc DMA regions, * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses * are ever returned, make sure the high part is retrieved here before * storing the adjusted address. */ rx_ring->Fbr0Realpa = rx_ring->pFbr0RingPa; /* Align Free Buffer Ring 0 on a 4K boundary */ et131x_align_allocated_memory(adapter, &rx_ring->Fbr0Realpa, &rx_ring->Fbr0offset, 0x0FFF); rx_ring->pFbr0RingVa = (void *)((u8 *) rx_ring->pFbr0RingVa + rx_ring->Fbr0offset); #endif for (i = 0; i < (rx_ring->Fbr1NumEntries / FBR_CHUNKS); i++) { u64 Fbr1Offset; u64 Fbr1TempPa; u32 Fbr1Align; /* This code allocates an area of memory big enough for N * free buffers + (buffer_size - 1) so that the buffers can * be aligned on 4k boundaries. If each buffer were aligned * to a buffer_size boundary, the effect would be to double * the size of FBR0. By allocating N buffers at once, we * reduce this overhead. */ if (rx_ring->Fbr1BufferSize > 4096) Fbr1Align = 4096; else Fbr1Align = rx_ring->Fbr1BufferSize; FBRChunkSize = (FBR_CHUNKS * rx_ring->Fbr1BufferSize) + Fbr1Align - 1; rx_ring->Fbr1MemVa[i] = pci_alloc_consistent(adapter->pdev, FBRChunkSize, &rx_ring->Fbr1MemPa[i]); if (!rx_ring->Fbr1MemVa[i]) { dev_err(&adapter->pdev->dev, "Could not alloc memory\n"); return -ENOMEM; } /* See NOTE in "Save Physical Address" comment above */ Fbr1TempPa = rx_ring->Fbr1MemPa[i]; et131x_align_allocated_memory(adapter, &Fbr1TempPa, &Fbr1Offset, (Fbr1Align - 1)); for (j = 0; j < FBR_CHUNKS; j++) { u32 index = (i * FBR_CHUNKS) + j; /* Save the Virtual address of this index for quick * access later */ rx_ring->fbr[1]->virt[index] = (u8 *) rx_ring->Fbr1MemVa[i] + (j * rx_ring->Fbr1BufferSize) + Fbr1Offset; /* now store the physical address in the descriptor * so the device can access it */ rx_ring->fbr[1]->bus_high[index] = (u32) (Fbr1TempPa >> 32); rx_ring->fbr[1]->bus_low[index] = (u32) Fbr1TempPa; Fbr1TempPa += rx_ring->Fbr1BufferSize; rx_ring->fbr[1]->buffer1[index] = rx_ring->fbr[1]->virt[index]; rx_ring->fbr[1]->buffer2[index] = rx_ring->fbr[1]->virt[index] - 4; } } #ifdef USE_FBR0 /* Same for FBR0 (if in use) */ for (i = 0; i < (rx_ring->Fbr0NumEntries / FBR_CHUNKS); i++) { u64 Fbr0Offset; u64 Fbr0TempPa; FBRChunkSize = ((FBR_CHUNKS + 1) * rx_ring->Fbr0BufferSize) - 1; rx_ring->Fbr0MemVa[i] = pci_alloc_consistent(adapter->pdev, FBRChunkSize, &rx_ring->Fbr0MemPa[i]); if (!rx_ring->Fbr0MemVa[i]) { dev_err(&adapter->pdev->dev, "Could not alloc memory\n"); return -ENOMEM; } /* See NOTE in "Save Physical Address" comment above */ Fbr0TempPa = rx_ring->Fbr0MemPa[i]; et131x_align_allocated_memory(adapter, &Fbr0TempPa, &Fbr0Offset, rx_ring->Fbr0BufferSize - 1); for (j = 0; j < FBR_CHUNKS; j++) { u32 index = (i * FBR_CHUNKS) + j; rx_ring->fbr[0]->virt[index] = (u8 *) rx_ring->Fbr0MemVa[i] + (j * rx_ring->Fbr0BufferSize) + Fbr0Offset; rx_ring->fbr[0]->bus_high[index] = (u32) (Fbr0TempPa >> 32); rx_ring->fbr[0]->bus_low[index] = (u32) Fbr0TempPa; Fbr0TempPa += rx_ring->Fbr0BufferSize; rx_ring->fbr[0]->buffer1[index] = rx_ring->fbr[0]->virt[index]; rx_ring->fbr[0]->buffer2[index] = rx_ring->fbr[0]->virt[index] - 4; } } #endif /* Allocate an area of memory for FIFO of Packet Status ring entries */ pktStatRingSize = sizeof(struct pkt_stat_desc) * adapter->rx_ring.PsrNumEntries; rx_ring->pPSRingVa = pci_alloc_consistent(adapter->pdev, pktStatRingSize, &rx_ring->pPSRingPa); if (!rx_ring->pPSRingVa) { dev_err(&adapter->pdev->dev, "Cannot alloc memory for Packet Status Ring\n"); return -ENOMEM; } printk(KERN_INFO "PSR %lx\n", (unsigned long) rx_ring->pPSRingPa); /* * NOTE : pci_alloc_consistent(), used above to alloc DMA regions, * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses * are ever returned, make sure the high part is retrieved here before * storing the adjusted address. */ /* Allocate an area of memory for writeback of status information */ rx_ring->rx_status_block = pci_alloc_consistent(adapter->pdev, sizeof(struct rx_status_block), &rx_ring->rx_status_bus); if (!rx_ring->rx_status_block) { dev_err(&adapter->pdev->dev, "Cannot alloc memory for Status Block\n"); return -ENOMEM; } rx_ring->NumRfd = NIC_DEFAULT_NUM_RFD; printk(KERN_INFO "PRS %lx\n", (unsigned long)rx_ring->rx_status_bus); /* Recv * pci_pool_create initializes a lookaside list. After successful * creation, nonpaged fixed-size blocks can be allocated from and * freed to the lookaside list. * RFDs will be allocated from this pool. */ rx_ring->RecvLookaside = kmem_cache_create(adapter->netdev->name, sizeof(struct rfd), 0, SLAB_CACHE_DMA | SLAB_HWCACHE_ALIGN, NULL); adapter->Flags |= fMP_ADAPTER_RECV_LOOKASIDE; /* The RFDs are going to be put on lists later on, so initialize the * lists now. */ INIT_LIST_HEAD(&rx_ring->RecvList); return 0; } /** * et131x_rx_dma_memory_free - Free all memory allocated within this module. * @adapter: pointer to our private adapter structure */ void et131x_rx_dma_memory_free(struct et131x_adapter *adapter) { u32 index; u32 bufsize; u32 pktStatRingSize; struct rfd *rfd; struct rx_ring *rx_ring; /* Setup some convenience pointers */ rx_ring = &adapter->rx_ring; /* Free RFDs and associated packet descriptors */ WARN_ON(rx_ring->nReadyRecv != rx_ring->NumRfd); while (!list_empty(&rx_ring->RecvList)) { rfd = (struct rfd *) list_entry(rx_ring->RecvList.next, struct rfd, list_node); list_del(&rfd->list_node); rfd->skb = NULL; kmem_cache_free(adapter->rx_ring.RecvLookaside, rfd); } /* Free Free Buffer Ring 1 */ if (rx_ring->pFbr1RingVa) { /* First the packet memory */ for (index = 0; index < (rx_ring->Fbr1NumEntries / FBR_CHUNKS); index++) { if (rx_ring->Fbr1MemVa[index]) { u32 Fbr1Align; if (rx_ring->Fbr1BufferSize > 4096) Fbr1Align = 4096; else Fbr1Align = rx_ring->Fbr1BufferSize; bufsize = (rx_ring->Fbr1BufferSize * FBR_CHUNKS) + Fbr1Align - 1; pci_free_consistent(adapter->pdev, bufsize, rx_ring->Fbr1MemVa[index], rx_ring->Fbr1MemPa[index]); rx_ring->Fbr1MemVa[index] = NULL; } } /* Now the FIFO itself */ rx_ring->pFbr1RingVa = (void *)((u8 *) rx_ring->pFbr1RingVa - rx_ring->Fbr1offset); bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr1NumEntries) + 0xfff; pci_free_consistent(adapter->pdev, bufsize, rx_ring->pFbr1RingVa, rx_ring->pFbr1RingPa); rx_ring->pFbr1RingVa = NULL; } #ifdef USE_FBR0 /* Now the same for Free Buffer Ring 0 */ if (rx_ring->pFbr0RingVa) { /* First the packet memory */ for (index = 0; index < (rx_ring->Fbr0NumEntries / FBR_CHUNKS); index++) { if (rx_ring->Fbr0MemVa[index]) { bufsize = (rx_ring->Fbr0BufferSize * (FBR_CHUNKS + 1)) - 1; pci_free_consistent(adapter->pdev, bufsize, rx_ring->Fbr0MemVa[index], rx_ring->Fbr0MemPa[index]); rx_ring->Fbr0MemVa[index] = NULL; } } /* Now the FIFO itself */ rx_ring->pFbr0RingVa = (void *)((u8 *) rx_ring->pFbr0RingVa - rx_ring->Fbr0offset); bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr0NumEntries) + 0xfff; pci_free_consistent(adapter->pdev, bufsize, rx_ring->pFbr0RingVa, rx_ring->pFbr0RingPa); rx_ring->pFbr0RingVa = NULL; } #endif /* Free Packet Status Ring */ if (rx_ring->pPSRingVa) { pktStatRingSize = sizeof(struct pkt_stat_desc) * adapter->rx_ring.PsrNumEntries; pci_free_consistent(adapter->pdev, pktStatRingSize, rx_ring->pPSRingVa, rx_ring->pPSRingPa); rx_ring->pPSRingVa = NULL; } /* Free area of memory for the writeback of status information */ if (rx_ring->rx_status_block) { pci_free_consistent(adapter->pdev, sizeof(struct rx_status_block), rx_ring->rx_status_block, rx_ring->rx_status_bus); rx_ring->rx_status_block = NULL; } /* Free receive buffer pool */ /* Free receive packet pool */ /* Destroy the lookaside (RFD) pool */ if (adapter->Flags & fMP_ADAPTER_RECV_LOOKASIDE) { kmem_cache_destroy(rx_ring->RecvLookaside); adapter->Flags &= ~fMP_ADAPTER_RECV_LOOKASIDE; } /* Free the FBR Lookup Table */ #ifdef USE_FBR0 kfree(rx_ring->fbr[0]); #endif kfree(rx_ring->fbr[1]); /* Reset Counters */ rx_ring->nReadyRecv = 0; } /** * et131x_init_recv - Initialize receive data structures. * @adapter: pointer to our private adapter structure * * Returns 0 on success and errno on failure (as defined in errno.h) */ int et131x_init_recv(struct et131x_adapter *adapter) { int status = -ENOMEM; struct rfd *rfd = NULL; u32 rfdct; u32 numrfd = 0; struct rx_ring *rx_ring; /* Setup some convenience pointers */ rx_ring = &adapter->rx_ring; /* Setup each RFD */ for (rfdct = 0; rfdct < rx_ring->NumRfd; rfdct++) { rfd = kmem_cache_alloc(rx_ring->RecvLookaside, GFP_ATOMIC | GFP_DMA); if (!rfd) { dev_err(&adapter->pdev->dev, "Couldn't alloc RFD out of kmem_cache\n"); status = -ENOMEM; continue; } rfd->skb = NULL; /* Add this RFD to the RecvList */ list_add_tail(&rfd->list_node, &rx_ring->RecvList); /* Increment both the available RFD's, and the total RFD's. */ rx_ring->nReadyRecv++; numrfd++; } if (numrfd > NIC_MIN_NUM_RFD) status = 0; rx_ring->NumRfd = numrfd; if (status != 0) { kmem_cache_free(rx_ring->RecvLookaside, rfd); dev_err(&adapter->pdev->dev, "Allocation problems in et131x_init_recv\n"); } return status; } /** * ConfigRxDmaRegs - Start of Rx_DMA init sequence * @etdev: pointer to our adapter structure */ void ConfigRxDmaRegs(struct et131x_adapter *etdev) { struct rxdma_regs __iomem *rx_dma = &etdev->regs->rxdma; struct rx_ring *rx_local = &etdev->rx_ring; struct fbr_desc *fbr_entry; u32 entry; u32 psr_num_des; unsigned long flags; /* Halt RXDMA to perform the reconfigure. */ et131x_rx_dma_disable(etdev); /* Load the completion writeback physical address * * NOTE : pci_alloc_consistent(), used above to alloc DMA regions, * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses * are ever returned, make sure the high part is retrieved here * before storing the adjusted address. */ writel((u32) ((u64)rx_local->rx_status_bus >> 32), &rx_dma->dma_wb_base_hi); writel((u32) rx_local->rx_status_bus, &rx_dma->dma_wb_base_lo); memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block)); /* Set the address and parameters of the packet status ring into the * 1310's registers */ writel((u32) ((u64)rx_local->pPSRingPa >> 32), &rx_dma->psr_base_hi); writel((u32) rx_local->pPSRingPa, &rx_dma->psr_base_lo); writel(rx_local->PsrNumEntries - 1, &rx_dma->psr_num_des); writel(0, &rx_dma->psr_full_offset); psr_num_des = readl(&rx_dma->psr_num_des) & 0xFFF; writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100, &rx_dma->psr_min_des); spin_lock_irqsave(&etdev->rcv_lock, flags); /* These local variables track the PSR in the adapter structure */ rx_local->local_psr_full = 0; /* Now's the best time to initialize FBR1 contents */ fbr_entry = (struct fbr_desc *) rx_local->pFbr1RingVa; for (entry = 0; entry < rx_local->Fbr1NumEntries; entry++) { fbr_entry->addr_hi = rx_local->fbr[1]->bus_high[entry]; fbr_entry->addr_lo = rx_local->fbr[1]->bus_low[entry]; fbr_entry->word2 = entry; fbr_entry++; } /* Set the address and parameters of Free buffer ring 1 (and 0 if * required) into the 1310's registers */ writel((u32) (rx_local->Fbr1Realpa >> 32), &rx_dma->fbr1_base_hi); writel((u32) rx_local->Fbr1Realpa, &rx_dma->fbr1_base_lo); writel(rx_local->Fbr1NumEntries - 1, &rx_dma->fbr1_num_des); writel(ET_DMA10_WRAP, &rx_dma->fbr1_full_offset); /* This variable tracks the free buffer ring 1 full position, so it * has to match the above. */ rx_local->local_Fbr1_full = ET_DMA10_WRAP; writel(((rx_local->Fbr1NumEntries * LO_MARK_PERCENT_FOR_RX) / 100) - 1, &rx_dma->fbr1_min_des); #ifdef USE_FBR0 /* Now's the best time to initialize FBR0 contents */ fbr_entry = (struct fbr_desc *) rx_local->pFbr0RingVa; for (entry = 0; entry < rx_local->Fbr0NumEntries; entry++) { fbr_entry->addr_hi = rx_local->fbr[0]->bus_high[entry]; fbr_entry->addr_lo = rx_local->fbr[0]->bus_low[entry]; fbr_entry->word2 = entry; fbr_entry++; } writel((u32) (rx_local->Fbr0Realpa >> 32), &rx_dma->fbr0_base_hi); writel((u32) rx_local->Fbr0Realpa, &rx_dma->fbr0_base_lo); writel(rx_local->Fbr0NumEntries - 1, &rx_dma->fbr0_num_des); writel(ET_DMA10_WRAP, &rx_dma->fbr0_full_offset); /* This variable tracks the free buffer ring 0 full position, so it * has to match the above. */ rx_local->local_Fbr0_full = ET_DMA10_WRAP; writel(((rx_local->Fbr0NumEntries * LO_MARK_PERCENT_FOR_RX) / 100) - 1, &rx_dma->fbr0_min_des); #endif /* Program the number of packets we will receive before generating an * interrupt. * For version B silicon, this value gets updated once autoneg is *complete. */ writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done); /* The "time_done" is not working correctly to coalesce interrupts * after a given time period, but rather is giving us an interrupt * regardless of whether we have received packets. * This value gets updated once autoneg is complete. */ writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time); spin_unlock_irqrestore(&etdev->rcv_lock, flags); } /** * SetRxDmaTimer - Set the heartbeat timer according to line rate. * @etdev: pointer to our adapter structure */ void SetRxDmaTimer(struct et131x_adapter *etdev) { /* For version B silicon, we do not use the RxDMA timer for 10 and 100 * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing. */ if ((etdev->linkspeed == TRUEPHY_SPEED_100MBPS) || (etdev->linkspeed == TRUEPHY_SPEED_10MBPS)) { writel(0, &etdev->regs->rxdma.max_pkt_time); writel(1, &etdev->regs->rxdma.num_pkt_done); } } /** * et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310 * @etdev: pointer to our adapter structure */ void et131x_rx_dma_disable(struct et131x_adapter *etdev) { u32 csr; /* Setup the receive dma configuration register */ writel(0x00002001, &etdev->regs->rxdma.csr); csr = readl(&etdev->regs->rxdma.csr); if ((csr & 0x00020000) == 0) { /* Check halt status (bit 17) */ udelay(5); csr = readl(&etdev->regs->rxdma.csr); if ((csr & 0x00020000) == 0) dev_err(&etdev->pdev->dev, "RX Dma failed to enter halt state. CSR 0x%08x\n", csr); } } /** * et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310. * @etdev: pointer to our adapter structure */ void et131x_rx_dma_enable(struct et131x_adapter *etdev) { /* Setup the receive dma configuration register for normal operation */ u32 csr = 0x2000; /* FBR1 enable */ if (etdev->rx_ring.Fbr1BufferSize == 4096) csr |= 0x0800; else if (etdev->rx_ring.Fbr1BufferSize == 8192) csr |= 0x1000; else if (etdev->rx_ring.Fbr1BufferSize == 16384) csr |= 0x1800; #ifdef USE_FBR0 csr |= 0x0400; /* FBR0 enable */ if (etdev->rx_ring.Fbr0BufferSize == 256) csr |= 0x0100; else if (etdev->rx_ring.Fbr0BufferSize == 512) csr |= 0x0200; else if (etdev->rx_ring.Fbr0BufferSize == 1024) csr |= 0x0300; #endif writel(csr, &etdev->regs->rxdma.csr); csr = readl(&etdev->regs->rxdma.csr); if ((csr & 0x00020000) != 0) { udelay(5); csr = readl(&etdev->regs->rxdma.csr); if ((csr & 0x00020000) != 0) { dev_err(&etdev->pdev->dev, "RX Dma failed to exit halt state. CSR 0x%08x\n", csr); } } } /** * nic_rx_pkts - Checks the hardware for available packets * @etdev: pointer to our adapter * * Returns rfd, a pointer to our MPRFD. * * Checks the hardware for available packets, using completion ring * If packets are available, it gets an RFD from the RecvList, attaches * the packet to it, puts the RFD in the RecvPendList, and also returns * the pointer to the RFD. */ struct rfd * nic_rx_pkts(struct et131x_adapter *etdev) { struct rx_ring *rx_local = &etdev->rx_ring; struct rx_status_block *status; struct pkt_stat_desc *psr; struct rfd *rfd; u32 i; u8 *buf; unsigned long flags; struct list_head *element; u8 rindex; u16 bindex; u32 len; u32 word0; u32 word1; /* RX Status block is written by the DMA engine prior to every * interrupt. It contains the next to be used entry in the Packet * Status Ring, and also the two Free Buffer rings. */ status = rx_local->rx_status_block; word1 = status->Word1 >> 16; /* Get the useful bits */ /* Check the PSR and wrap bits do not match */ if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF)) /* Looks like this ring is not updated yet */ return NULL; /* The packet status ring indicates that data is available. */ psr = (struct pkt_stat_desc *) (rx_local->pPSRingVa) + (rx_local->local_psr_full & 0xFFF); /* Grab any information that is required once the PSR is * advanced, since we can no longer rely on the memory being * accurate */ len = psr->word1 & 0xFFFF; rindex = (psr->word1 >> 26) & 0x03; bindex = (psr->word1 >> 16) & 0x3FF; word0 = psr->word0; /* Indicate that we have used this PSR entry. */ /* FIXME wrap 12 */ add_12bit(&rx_local->local_psr_full, 1); if ((rx_local->local_psr_full & 0xFFF) > rx_local->PsrNumEntries - 1) { /* Clear psr full and toggle the wrap bit */ rx_local->local_psr_full &= ~0xFFF; rx_local->local_psr_full ^= 0x1000; } writel(rx_local->local_psr_full, &etdev->regs->rxdma.psr_full_offset); #ifndef USE_FBR0 if (rindex != 1) return NULL; #endif #ifdef USE_FBR0 if (rindex > 1 || (rindex == 0 && bindex > rx_local->Fbr0NumEntries - 1) || (rindex == 1 && bindex > rx_local->Fbr1NumEntries - 1)) #else if (rindex != 1 || bindex > rx_local->Fbr1NumEntries - 1) #endif { /* Illegal buffer or ring index cannot be used by S/W*/ dev_err(&etdev->pdev->dev, "NICRxPkts PSR Entry %d indicates " "length of %d and/or bad bi(%d)\n", rx_local->local_psr_full & 0xFFF, len, bindex); return NULL; } /* Get and fill the RFD. */ spin_lock_irqsave(&etdev->rcv_lock, flags); rfd = NULL; element = rx_local->RecvList.next; rfd = (struct rfd *) list_entry(element, struct rfd, list_node); if (rfd == NULL) { spin_unlock_irqrestore(&etdev->rcv_lock, flags); return NULL; } list_del(&rfd->list_node); rx_local->nReadyRecv--; spin_unlock_irqrestore(&etdev->rcv_lock, flags); rfd->bufferindex = bindex; rfd->ringindex = rindex; /* In V1 silicon, there is a bug which screws up filtering of * runt packets. Therefore runt packet filtering is disabled * in the MAC and the packets are dropped here. They are * also counted here. */ if (len < (NIC_MIN_PACKET_SIZE + 4)) { etdev->Stats.other_errors++; len = 0; } if (len) { if (etdev->ReplicaPhyLoopbk == 1) { buf = rx_local->fbr[rindex]->virt[bindex]; if (memcmp(&buf[6], etdev->addr, ETH_ALEN) == 0) { if (memcmp(&buf[42], "Replica packet", ETH_HLEN)) { etdev->ReplicaPhyLoopbkPF = 1; } } } /* Determine if this is a multicast packet coming in */ if ((word0 & ALCATEL_MULTICAST_PKT) && !(word0 & ALCATEL_BROADCAST_PKT)) { /* Promiscuous mode and Multicast mode are * not mutually exclusive as was first * thought. I guess Promiscuous is just * considered a super-set of the other * filters. Generally filter is 0x2b when in * promiscuous mode. */ if ((etdev->PacketFilter & ET131X_PACKET_TYPE_MULTICAST) && !(etdev->PacketFilter & ET131X_PACKET_TYPE_PROMISCUOUS) && !(etdev->PacketFilter & ET131X_PACKET_TYPE_ALL_MULTICAST)) { buf = rx_local->fbr[rindex]-> virt[bindex]; /* Loop through our list to see if the * destination address of this packet * matches one in our list. */ for (i = 0; i < etdev->MCAddressCount; i++) { if (buf[0] == etdev->MCList[i][0] && buf[1] == etdev->MCList[i][1] && buf[2] == etdev->MCList[i][2] && buf[3] == etdev->MCList[i][3] && buf[4] == etdev->MCList[i][4] && buf[5] == etdev->MCList[i][5]) { break; } } /* If our index is equal to the number * of Multicast address we have, then * this means we did not find this * packet's matching address in our * list. Set the len to zero, * so we free our RFD when we return * from this function. */ if (i == etdev->MCAddressCount) len = 0; } if (len > 0) etdev->Stats.multircv++; } else if (word0 & ALCATEL_BROADCAST_PKT) etdev->Stats.brdcstrcv++; else /* Not sure what this counter measures in * promiscuous mode. Perhaps we should check * the MAC address to see if it is directed * to us in promiscuous mode. */ etdev->Stats.unircv++; } if (len > 0) { struct sk_buff *skb = NULL; /*rfd->len = len - 4; */ rfd->len = len; skb = dev_alloc_skb(rfd->len + 2); if (!skb) { dev_err(&etdev->pdev->dev, "Couldn't alloc an SKB for Rx\n"); return NULL; } etdev->net_stats.rx_bytes += rfd->len; memcpy(skb_put(skb, rfd->len), rx_local->fbr[rindex]->virt[bindex], rfd->len); skb->dev = etdev->netdev; skb->protocol = eth_type_trans(skb, etdev->netdev); skb->ip_summed = CHECKSUM_NONE; netif_rx(skb); } else { rfd->len = 0; } nic_return_rfd(etdev, rfd); return rfd; } /** * et131x_reset_recv - Reset the receive list * @etdev: pointer to our adapter * * Assumption, Rcv spinlock has been acquired. */ void et131x_reset_recv(struct et131x_adapter *etdev) { WARN_ON(list_empty(&etdev->rx_ring.RecvList)); } /** * et131x_handle_recv_interrupt - Interrupt handler for receive processing * @etdev: pointer to our adapter * * Assumption, Rcv spinlock has been acquired. */ void et131x_handle_recv_interrupt(struct et131x_adapter *etdev) { struct rfd *rfd = NULL; u32 count = 0; bool done = true; /* Process up to available RFD's */ while (count < NUM_PACKETS_HANDLED) { if (list_empty(&etdev->rx_ring.RecvList)) { WARN_ON(etdev->rx_ring.nReadyRecv != 0); done = false; break; } rfd = nic_rx_pkts(etdev); if (rfd == NULL) break; /* Do not receive any packets until a filter has been set. * Do not receive any packets until we have link. * If length is zero, return the RFD in order to advance the * Free buffer ring. */ if (!etdev->PacketFilter || !(etdev->Flags & fMP_ADAPTER_LINK_DETECTION) || rfd->len == 0) { continue; } /* Increment the number of packets we received */ etdev->Stats.ipackets++; /* Set the status on the packet, either resources or success */ if (etdev->rx_ring.nReadyRecv < RFD_LOW_WATER_MARK) { dev_warn(&etdev->pdev->dev, "RFD's are running out\n"); } count++; } if (count == NUM_PACKETS_HANDLED || !done) { etdev->rx_ring.UnfinishedReceives = true; writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO, &etdev->regs->global.watchdog_timer); } else /* Watchdog timer will disable itself if appropriate. */ etdev->rx_ring.UnfinishedReceives = false; } static inline u32 bump_fbr(u32 *fbr, u32 limit) { u32 v = *fbr; v++; /* This works for all cases where limit < 1024. The 1023 case works because 1023++ is 1024 which means the if condition is not taken but the carry of the bit into the wrap bit toggles the wrap value correctly */ if ((v & ET_DMA10_MASK) > limit) { v &= ~ET_DMA10_MASK; v ^= ET_DMA10_WRAP; } /* For the 1023 case */ v &= (ET_DMA10_MASK|ET_DMA10_WRAP); *fbr = v; return v; } /** * NICReturnRFD - Recycle a RFD and put it back onto the receive list * @etdev: pointer to our adapter * @rfd: pointer to the RFD */ void nic_return_rfd(struct et131x_adapter *etdev, struct rfd *rfd) { struct rx_ring *rx_local = &etdev->rx_ring; struct rxdma_regs __iomem *rx_dma = &etdev->regs->rxdma; u16 bi = rfd->bufferindex; u8 ri = rfd->ringindex; unsigned long flags; /* We don't use any of the OOB data besides status. Otherwise, we * need to clean up OOB data */ if ( #ifdef USE_FBR0 (ri == 0 && bi < rx_local->Fbr0NumEntries) || #endif (ri == 1 && bi < rx_local->Fbr1NumEntries)) { spin_lock_irqsave(&etdev->FbrLock, flags); if (ri == 1) { struct fbr_desc *next = (struct fbr_desc *) (rx_local->pFbr1RingVa) + INDEX10(rx_local->local_Fbr1_full); /* Handle the Free Buffer Ring advancement here. Write * the PA / Buffer Index for the returned buffer into * the oldest (next to be freed)FBR entry */ next->addr_hi = rx_local->fbr[1]->bus_high[bi]; next->addr_lo = rx_local->fbr[1]->bus_low[bi]; next->word2 = bi; writel(bump_fbr(&rx_local->local_Fbr1_full, rx_local->Fbr1NumEntries - 1), &rx_dma->fbr1_full_offset); } #ifdef USE_FBR0 else { struct fbr_desc *next = (struct fbr_desc *) rx_local->pFbr0RingVa + INDEX10(rx_local->local_Fbr0_full); /* Handle the Free Buffer Ring advancement here. Write * the PA / Buffer Index for the returned buffer into * the oldest (next to be freed) FBR entry */ next->addr_hi = rx_local->fbr[0]->bus_high[bi]; next->addr_lo = rx_local->fbr[0]->bus_low[bi]; next->word2 = bi; writel(bump_fbr(&rx_local->local_Fbr0_full, rx_local->Fbr0NumEntries - 1), &rx_dma->fbr0_full_offset); } #endif spin_unlock_irqrestore(&etdev->FbrLock, flags); } else { dev_err(&etdev->pdev->dev, "NICReturnRFD illegal Buffer Index returned\n"); } /* The processing on this RFD is done, so put it back on the tail of * our list */ spin_lock_irqsave(&etdev->rcv_lock, flags); list_add_tail(&rfd->list_node, &rx_local->RecvList); rx_local->nReadyRecv++; spin_unlock_irqrestore(&etdev->rcv_lock, flags); WARN_ON(rx_local->nReadyRecv > rx_local->NumRfd); }
gpl-2.0
indodev/kernel-samsung-3.0
drivers/ata/pata_samsung_cf.c
2533
17677
/* * Copyright (c) 2010 Samsung Electronics Co., Ltd. * http://www.samsung.com * * PATA driver for Samsung SoCs. * Supports CF Interface in True IDE mode. Currently only PIO mode has been * implemented; UDMA support has to be added. * * Based on: * PATA driver for AT91SAM9260 Static Memory Controller * PATA driver for Toshiba SCC controller * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/clk.h> #include <linux/libata.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <plat/ata.h> #include <plat/regs-ata.h> #define DRV_NAME "pata_samsung_cf" #define DRV_VERSION "0.1" enum s3c_cpu_type { TYPE_S3C64XX, TYPE_S5PC100, TYPE_S5PV210, }; /* * struct s3c_ide_info - S3C PATA instance. * @clk: The clock resource for this controller. * @ide_addr: The area mapped for the hardware registers. * @sfr_addr: The area mapped for the special function registers. * @irq: The IRQ number we are using. * @cpu_type: The exact type of this controller. * @fifo_status_reg: The ATA_FIFO_STATUS register offset. */ struct s3c_ide_info { struct clk *clk; void __iomem *ide_addr; void __iomem *sfr_addr; unsigned int irq; enum s3c_cpu_type cpu_type; unsigned int fifo_status_reg; }; static void pata_s3c_set_endian(void __iomem *s3c_ide_regbase, u8 mode) { u32 reg = readl(s3c_ide_regbase + S3C_ATA_CFG); reg = mode ? (reg & ~S3C_ATA_CFG_SWAP) : (reg | S3C_ATA_CFG_SWAP); writel(reg, s3c_ide_regbase + S3C_ATA_CFG); } static void pata_s3c_cfg_mode(void __iomem *s3c_ide_sfrbase) { /* Select true-ide as the internal operating mode */ writel(readl(s3c_ide_sfrbase + S3C_CFATA_MUX) | S3C_CFATA_MUX_TRUEIDE, s3c_ide_sfrbase + S3C_CFATA_MUX); } static unsigned long pata_s3c_setup_timing(struct s3c_ide_info *info, const struct ata_timing *ata) { int t1 = ata->setup; int t2 = ata->act8b; int t2i = ata->rec8b; ulong piotime; piotime = ((t2i & 0xff) << 12) | ((t2 & 0xff) << 4) | (t1 & 0xf); return piotime; } static void pata_s3c_set_piomode(struct ata_port *ap, struct ata_device *adev) { struct s3c_ide_info *info = ap->host->private_data; struct ata_timing timing; int cycle_time; ulong ata_cfg = readl(info->ide_addr + S3C_ATA_CFG); ulong piotime; /* Enables IORDY if mode requires it */ if (ata_pio_need_iordy(adev)) ata_cfg |= S3C_ATA_CFG_IORDYEN; else ata_cfg &= ~S3C_ATA_CFG_IORDYEN; cycle_time = (int)(1000000000UL / clk_get_rate(info->clk)); ata_timing_compute(adev, adev->pio_mode, &timing, cycle_time * 1000, 0); piotime = pata_s3c_setup_timing(info, &timing); writel(ata_cfg, info->ide_addr + S3C_ATA_CFG); writel(piotime, info->ide_addr + S3C_ATA_PIO_TIME); } /* * Waits until the IDE controller is able to perform next read/write * operation to the disk. Needed for 64XX series boards only. */ static int wait_for_host_ready(struct s3c_ide_info *info) { ulong timeout; void __iomem *fifo_reg = info->ide_addr + info->fifo_status_reg; /* wait for maximum of 20 msec */ timeout = jiffies + msecs_to_jiffies(20); while (time_before(jiffies, timeout)) { if ((readl(fifo_reg) >> 28) == 0) return 0; } return -EBUSY; } /* * Writes to one of the task file registers. */ static void ata_outb(struct ata_host *host, u8 addr, void __iomem *reg) { struct s3c_ide_info *info = host->private_data; wait_for_host_ready(info); writeb(addr, reg); } /* * Reads from one of the task file registers. */ static u8 ata_inb(struct ata_host *host, void __iomem *reg) { struct s3c_ide_info *info = host->private_data; u8 temp; wait_for_host_ready(info); (void) readb(reg); wait_for_host_ready(info); temp = readb(info->ide_addr + S3C_ATA_PIO_RDATA); return temp; } /* * pata_s3c_tf_load - send taskfile registers to host controller */ static void pata_s3c_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) { struct ata_ioports *ioaddr = &ap->ioaddr; unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; if (tf->ctl != ap->last_ctl) { ata_outb(ap->host, tf->ctl, ioaddr->ctl_addr); ap->last_ctl = tf->ctl; ata_wait_idle(ap); } if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { ata_outb(ap->host, tf->hob_feature, ioaddr->feature_addr); ata_outb(ap->host, tf->hob_nsect, ioaddr->nsect_addr); ata_outb(ap->host, tf->hob_lbal, ioaddr->lbal_addr); ata_outb(ap->host, tf->hob_lbam, ioaddr->lbam_addr); ata_outb(ap->host, tf->hob_lbah, ioaddr->lbah_addr); } if (is_addr) { ata_outb(ap->host, tf->feature, ioaddr->feature_addr); ata_outb(ap->host, tf->nsect, ioaddr->nsect_addr); ata_outb(ap->host, tf->lbal, ioaddr->lbal_addr); ata_outb(ap->host, tf->lbam, ioaddr->lbam_addr); ata_outb(ap->host, tf->lbah, ioaddr->lbah_addr); } if (tf->flags & ATA_TFLAG_DEVICE) ata_outb(ap->host, tf->device, ioaddr->device_addr); ata_wait_idle(ap); } /* * pata_s3c_tf_read - input device's ATA taskfile shadow registers */ static void pata_s3c_tf_read(struct ata_port *ap, struct ata_taskfile *tf) { struct ata_ioports *ioaddr = &ap->ioaddr; tf->feature = ata_inb(ap->host, ioaddr->error_addr); tf->nsect = ata_inb(ap->host, ioaddr->nsect_addr); tf->lbal = ata_inb(ap->host, ioaddr->lbal_addr); tf->lbam = ata_inb(ap->host, ioaddr->lbam_addr); tf->lbah = ata_inb(ap->host, ioaddr->lbah_addr); tf->device = ata_inb(ap->host, ioaddr->device_addr); if (tf->flags & ATA_TFLAG_LBA48) { ata_outb(ap->host, tf->ctl | ATA_HOB, ioaddr->ctl_addr); tf->hob_feature = ata_inb(ap->host, ioaddr->error_addr); tf->hob_nsect = ata_inb(ap->host, ioaddr->nsect_addr); tf->hob_lbal = ata_inb(ap->host, ioaddr->lbal_addr); tf->hob_lbam = ata_inb(ap->host, ioaddr->lbam_addr); tf->hob_lbah = ata_inb(ap->host, ioaddr->lbah_addr); ata_outb(ap->host, tf->ctl, ioaddr->ctl_addr); ap->last_ctl = tf->ctl; } } /* * pata_s3c_exec_command - issue ATA command to host controller */ static void pata_s3c_exec_command(struct ata_port *ap, const struct ata_taskfile *tf) { ata_outb(ap->host, tf->command, ap->ioaddr.command_addr); ata_sff_pause(ap); } /* * pata_s3c_check_status - Read device status register */ static u8 pata_s3c_check_status(struct ata_port *ap) { return ata_inb(ap->host, ap->ioaddr.status_addr); } /* * pata_s3c_check_altstatus - Read alternate device status register */ static u8 pata_s3c_check_altstatus(struct ata_port *ap) { return ata_inb(ap->host, ap->ioaddr.altstatus_addr); } /* * pata_s3c_data_xfer - Transfer data by PIO */ unsigned int pata_s3c_data_xfer(struct ata_device *dev, unsigned char *buf, unsigned int buflen, int rw) { struct ata_port *ap = dev->link->ap; struct s3c_ide_info *info = ap->host->private_data; void __iomem *data_addr = ap->ioaddr.data_addr; unsigned int words = buflen >> 1, i; u16 *data_ptr = (u16 *)buf; /* Requires wait same as in ata_inb/ata_outb */ if (rw == READ) for (i = 0; i < words; i++, data_ptr++) { wait_for_host_ready(info); (void) readw(data_addr); wait_for_host_ready(info); *data_ptr = readw(info->ide_addr + S3C_ATA_PIO_RDATA); } else for (i = 0; i < words; i++, data_ptr++) { wait_for_host_ready(info); writew(*data_ptr, data_addr); } if (buflen & 0x01) dev_err(ap->dev, "unexpected trailing data\n"); return words << 1; } /* * pata_s3c_dev_select - Select device on ATA bus */ static void pata_s3c_dev_select(struct ata_port *ap, unsigned int device) { u8 tmp = ATA_DEVICE_OBS; if (device != 0) tmp |= ATA_DEV1; ata_outb(ap->host, tmp, ap->ioaddr.device_addr); ata_sff_pause(ap); } /* * pata_s3c_devchk - PATA device presence detection */ static unsigned int pata_s3c_devchk(struct ata_port *ap, unsigned int device) { struct ata_ioports *ioaddr = &ap->ioaddr; u8 nsect, lbal; pata_s3c_dev_select(ap, device); ata_outb(ap->host, 0x55, ioaddr->nsect_addr); ata_outb(ap->host, 0xaa, ioaddr->lbal_addr); ata_outb(ap->host, 0xaa, ioaddr->nsect_addr); ata_outb(ap->host, 0x55, ioaddr->lbal_addr); ata_outb(ap->host, 0x55, ioaddr->nsect_addr); ata_outb(ap->host, 0xaa, ioaddr->lbal_addr); nsect = ata_inb(ap->host, ioaddr->nsect_addr); lbal = ata_inb(ap->host, ioaddr->lbal_addr); if ((nsect == 0x55) && (lbal == 0xaa)) return 1; /* we found a device */ return 0; /* nothing found */ } /* * pata_s3c_wait_after_reset - wait for devices to become ready after reset */ static int pata_s3c_wait_after_reset(struct ata_link *link, unsigned long deadline) { int rc; ata_msleep(link->ap, ATA_WAIT_AFTER_RESET); /* always check readiness of the master device */ rc = ata_sff_wait_ready(link, deadline); /* -ENODEV means the odd clown forgot the D7 pulldown resistor * and TF status is 0xff, bail out on it too. */ if (rc) return rc; return 0; } /* * pata_s3c_bus_softreset - PATA device software reset */ static unsigned int pata_s3c_bus_softreset(struct ata_port *ap, unsigned long deadline) { struct ata_ioports *ioaddr = &ap->ioaddr; /* software reset. causes dev0 to be selected */ ata_outb(ap->host, ap->ctl, ioaddr->ctl_addr); udelay(20); ata_outb(ap->host, ap->ctl | ATA_SRST, ioaddr->ctl_addr); udelay(20); ata_outb(ap->host, ap->ctl, ioaddr->ctl_addr); ap->last_ctl = ap->ctl; return pata_s3c_wait_after_reset(&ap->link, deadline); } /* * pata_s3c_softreset - reset host port via ATA SRST */ static int pata_s3c_softreset(struct ata_link *link, unsigned int *classes, unsigned long deadline) { struct ata_port *ap = link->ap; unsigned int devmask = 0; int rc; u8 err; /* determine if device 0 is present */ if (pata_s3c_devchk(ap, 0)) devmask |= (1 << 0); /* select device 0 again */ pata_s3c_dev_select(ap, 0); /* issue bus reset */ rc = pata_s3c_bus_softreset(ap, deadline); /* if link is occupied, -ENODEV too is an error */ if (rc && rc != -ENODEV) { ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc); return rc; } /* determine by signature whether we have ATA or ATAPI devices */ classes[0] = ata_sff_dev_classify(&ap->link.device[0], devmask & (1 << 0), &err); return 0; } /* * pata_s3c_set_devctl - Write device control register */ static void pata_s3c_set_devctl(struct ata_port *ap, u8 ctl) { ata_outb(ap->host, ctl, ap->ioaddr.ctl_addr); } static struct scsi_host_template pata_s3c_sht = { ATA_PIO_SHT(DRV_NAME), }; static struct ata_port_operations pata_s3c_port_ops = { .inherits = &ata_sff_port_ops, .sff_check_status = pata_s3c_check_status, .sff_check_altstatus = pata_s3c_check_altstatus, .sff_tf_load = pata_s3c_tf_load, .sff_tf_read = pata_s3c_tf_read, .sff_data_xfer = pata_s3c_data_xfer, .sff_exec_command = pata_s3c_exec_command, .sff_dev_select = pata_s3c_dev_select, .sff_set_devctl = pata_s3c_set_devctl, .softreset = pata_s3c_softreset, .set_piomode = pata_s3c_set_piomode, }; static struct ata_port_operations pata_s5p_port_ops = { .inherits = &ata_sff_port_ops, .set_piomode = pata_s3c_set_piomode, }; static void pata_s3c_enable(void *s3c_ide_regbase, bool state) { u32 temp = readl(s3c_ide_regbase + S3C_ATA_CTRL); temp = state ? (temp | 1) : (temp & ~1); writel(temp, s3c_ide_regbase + S3C_ATA_CTRL); } static irqreturn_t pata_s3c_irq(int irq, void *dev_instance) { struct ata_host *host = dev_instance; struct s3c_ide_info *info = host->private_data; u32 reg; reg = readl(info->ide_addr + S3C_ATA_IRQ); writel(reg, info->ide_addr + S3C_ATA_IRQ); return ata_sff_interrupt(irq, dev_instance); } static void pata_s3c_hwinit(struct s3c_ide_info *info, struct s3c_ide_platdata *pdata) { switch (info->cpu_type) { case TYPE_S3C64XX: /* Configure as big endian */ pata_s3c_cfg_mode(info->sfr_addr); pata_s3c_set_endian(info->ide_addr, 1); pata_s3c_enable(info->ide_addr, true); msleep(100); /* Remove IRQ Status */ writel(0x1f, info->ide_addr + S3C_ATA_IRQ); writel(0x1b, info->ide_addr + S3C_ATA_IRQ_MSK); break; case TYPE_S5PC100: pata_s3c_cfg_mode(info->sfr_addr); /* FALLTHROUGH */ case TYPE_S5PV210: /* Configure as little endian */ pata_s3c_set_endian(info->ide_addr, 0); pata_s3c_enable(info->ide_addr, true); msleep(100); /* Remove IRQ Status */ writel(0x3f, info->ide_addr + S3C_ATA_IRQ); writel(0x3f, info->ide_addr + S3C_ATA_IRQ_MSK); break; default: BUG(); } } static int __init pata_s3c_probe(struct platform_device *pdev) { struct s3c_ide_platdata *pdata = pdev->dev.platform_data; struct device *dev = &pdev->dev; struct s3c_ide_info *info; struct resource *res; struct ata_port *ap; struct ata_host *host; enum s3c_cpu_type cpu_type; int ret; cpu_type = platform_get_device_id(pdev)->driver_data; info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); if (!info) { dev_err(dev, "failed to allocate memory for device data\n"); return -ENOMEM; } info->irq = platform_get_irq(pdev, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(dev, "failed to get mem resource\n"); return -EINVAL; } if (!devm_request_mem_region(dev, res->start, resource_size(res), DRV_NAME)) { dev_err(dev, "error requesting register region\n"); return -EBUSY; } info->ide_addr = devm_ioremap(dev, res->start, resource_size(res)); if (!info->ide_addr) { dev_err(dev, "failed to map IO base address\n"); return -ENOMEM; } info->clk = clk_get(&pdev->dev, "cfcon"); if (IS_ERR(info->clk)) { dev_err(dev, "failed to get access to cf controller clock\n"); ret = PTR_ERR(info->clk); info->clk = NULL; return ret; } clk_enable(info->clk); /* init ata host */ host = ata_host_alloc(dev, 1); if (!host) { dev_err(dev, "failed to allocate ide host\n"); ret = -ENOMEM; goto stop_clk; } ap = host->ports[0]; ap->pio_mask = ATA_PIO4; if (cpu_type == TYPE_S3C64XX) { ap->ops = &pata_s3c_port_ops; info->sfr_addr = info->ide_addr + 0x1800; info->ide_addr += 0x1900; info->fifo_status_reg = 0x94; } else if (cpu_type == TYPE_S5PC100) { ap->ops = &pata_s5p_port_ops; info->sfr_addr = info->ide_addr + 0x1800; info->ide_addr += 0x1900; info->fifo_status_reg = 0x84; } else { ap->ops = &pata_s5p_port_ops; info->fifo_status_reg = 0x84; } info->cpu_type = cpu_type; if (info->irq <= 0) { ap->flags |= ATA_FLAG_PIO_POLLING; info->irq = 0; ata_port_desc(ap, "no IRQ, using PIO polling\n"); } ap->ioaddr.cmd_addr = info->ide_addr + S3C_ATA_CMD; ap->ioaddr.data_addr = info->ide_addr + S3C_ATA_PIO_DTR; ap->ioaddr.error_addr = info->ide_addr + S3C_ATA_PIO_FED; ap->ioaddr.feature_addr = info->ide_addr + S3C_ATA_PIO_FED; ap->ioaddr.nsect_addr = info->ide_addr + S3C_ATA_PIO_SCR; ap->ioaddr.lbal_addr = info->ide_addr + S3C_ATA_PIO_LLR; ap->ioaddr.lbam_addr = info->ide_addr + S3C_ATA_PIO_LMR; ap->ioaddr.lbah_addr = info->ide_addr + S3C_ATA_PIO_LHR; ap->ioaddr.device_addr = info->ide_addr + S3C_ATA_PIO_DVR; ap->ioaddr.status_addr = info->ide_addr + S3C_ATA_PIO_CSD; ap->ioaddr.command_addr = info->ide_addr + S3C_ATA_PIO_CSD; ap->ioaddr.altstatus_addr = info->ide_addr + S3C_ATA_PIO_DAD; ap->ioaddr.ctl_addr = info->ide_addr + S3C_ATA_PIO_DAD; ata_port_desc(ap, "mmio cmd 0x%llx ", (unsigned long long)res->start); host->private_data = info; if (pdata && pdata->setup_gpio) pdata->setup_gpio(); /* Set endianness and enable the interface */ pata_s3c_hwinit(info, pdata); platform_set_drvdata(pdev, host); return ata_host_activate(host, info->irq, info->irq ? pata_s3c_irq : NULL, 0, &pata_s3c_sht); stop_clk: clk_disable(info->clk); clk_put(info->clk); return ret; } static int __exit pata_s3c_remove(struct platform_device *pdev) { struct ata_host *host = platform_get_drvdata(pdev); struct s3c_ide_info *info = host->private_data; ata_host_detach(host); clk_disable(info->clk); clk_put(info->clk); return 0; } #ifdef CONFIG_PM static int pata_s3c_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct ata_host *host = platform_get_drvdata(pdev); return ata_host_suspend(host, PMSG_SUSPEND); } static int pata_s3c_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct ata_host *host = platform_get_drvdata(pdev); struct s3c_ide_platdata *pdata = pdev->dev.platform_data; struct s3c_ide_info *info = host->private_data; pata_s3c_hwinit(info, pdata); ata_host_resume(host); return 0; } static const struct dev_pm_ops pata_s3c_pm_ops = { .suspend = pata_s3c_suspend, .resume = pata_s3c_resume, }; #endif /* driver device registration */ static struct platform_device_id pata_s3c_driver_ids[] = { { .name = "s3c64xx-pata", .driver_data = TYPE_S3C64XX, }, { .name = "s5pc100-pata", .driver_data = TYPE_S5PC100, }, { .name = "s5pv210-pata", .driver_data = TYPE_S5PV210, }, { } }; MODULE_DEVICE_TABLE(platform, pata_s3c_driver_ids); static struct platform_driver pata_s3c_driver = { .remove = __exit_p(pata_s3c_remove), .id_table = pata_s3c_driver_ids, .driver = { .name = DRV_NAME, .owner = THIS_MODULE, #ifdef CONFIG_PM .pm = &pata_s3c_pm_ops, #endif }, }; static int __init pata_s3c_init(void) { return platform_driver_probe(&pata_s3c_driver, pata_s3c_probe); } static void __exit pata_s3c_exit(void) { platform_driver_unregister(&pata_s3c_driver); } module_init(pata_s3c_init); module_exit(pata_s3c_exit); MODULE_AUTHOR("Abhilash Kesavan, <a.kesavan@samsung.com>"); MODULE_DESCRIPTION("low-level driver for Samsung PATA controller"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION);
gpl-2.0
archos-sa/archos-gpl-gen9-kernel
arch/mips/sgi-ip27/ip27-smp.c
4581
6112
/* * This file is subject to the terms and conditions of the GNU General * Public License. See the file "COPYING" in the main directory of this * archive for more details. * * Copyright (C) 2000 - 2001 by Kanoj Sarcar (kanoj@sgi.com) * Copyright (C) 2000 - 2001 by Silicon Graphics, Inc. */ #include <linux/init.h> #include <linux/sched.h> #include <linux/nodemask.h> #include <asm/page.h> #include <asm/processor.h> #include <asm/sn/arch.h> #include <asm/sn/gda.h> #include <asm/sn/intr.h> #include <asm/sn/klconfig.h> #include <asm/sn/launch.h> #include <asm/sn/mapped_kernel.h> #include <asm/sn/sn_private.h> #include <asm/sn/types.h> #include <asm/sn/sn0/hubpi.h> #include <asm/sn/sn0/hubio.h> #include <asm/sn/sn0/ip27.h> /* * Takes as first input the PROM assigned cpu id, and the kernel * assigned cpu id as the second. */ static void alloc_cpupda(cpuid_t cpu, int cpunum) { cnodeid_t node = get_cpu_cnode(cpu); nasid_t nasid = COMPACT_TO_NASID_NODEID(node); cputonasid(cpunum) = nasid; sn_cpu_info[cpunum].p_nodeid = node; cputoslice(cpunum) = get_cpu_slice(cpu); } static nasid_t get_actual_nasid(lboard_t *brd) { klhub_t *hub; if (!brd) return INVALID_NASID; /* find out if we are a completely disabled brd. */ hub = (klhub_t *)find_first_component(brd, KLSTRUCT_HUB); if (!hub) return INVALID_NASID; if (!(hub->hub_info.flags & KLINFO_ENABLE)) /* disabled node brd */ return hub->hub_info.physid; else return brd->brd_nasid; } static int do_cpumask(cnodeid_t cnode, nasid_t nasid, int highest) { static int tot_cpus_found = 0; lboard_t *brd; klcpu_t *acpu; int cpus_found = 0; cpuid_t cpuid; brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IP27); do { acpu = (klcpu_t *)find_first_component(brd, KLSTRUCT_CPU); while (acpu) { cpuid = acpu->cpu_info.virtid; /* cnode is not valid for completely disabled brds */ if (get_actual_nasid(brd) == brd->brd_nasid) cpuid_to_compact_node[cpuid] = cnode; if (cpuid > highest) highest = cpuid; /* Only let it join in if it's marked enabled */ if ((acpu->cpu_info.flags & KLINFO_ENABLE) && (tot_cpus_found != NR_CPUS)) { cpu_set(cpuid, cpu_possible_map); alloc_cpupda(cpuid, tot_cpus_found); cpus_found++; tot_cpus_found++; } acpu = (klcpu_t *)find_component(brd, (klinfo_t *)acpu, KLSTRUCT_CPU); } brd = KLCF_NEXT(brd); if (!brd) break; brd = find_lboard(brd, KLTYPE_IP27); } while (brd); return highest; } void cpu_node_probe(void) { int i, highest = 0; gda_t *gdap = GDA; /* * Initialize the arrays to invalid nodeid (-1) */ for (i = 0; i < MAX_COMPACT_NODES; i++) compact_to_nasid_node[i] = INVALID_NASID; for (i = 0; i < MAX_NASIDS; i++) nasid_to_compact_node[i] = INVALID_CNODEID; for (i = 0; i < MAXCPUS; i++) cpuid_to_compact_node[i] = INVALID_CNODEID; /* * MCD - this whole "compact node" stuff can probably be dropped, * as we can handle sparse numbering now */ nodes_clear(node_online_map); for (i = 0; i < MAX_COMPACT_NODES; i++) { nasid_t nasid = gdap->g_nasidtable[i]; if (nasid == INVALID_NASID) break; compact_to_nasid_node[i] = nasid; nasid_to_compact_node[nasid] = i; node_set_online(num_online_nodes()); highest = do_cpumask(i, nasid, highest); } printk("Discovered %d cpus on %d nodes\n", highest + 1, num_online_nodes()); } static __init void intr_clear_all(nasid_t nasid) { int i; REMOTE_HUB_S(nasid, PI_INT_MASK0_A, 0); REMOTE_HUB_S(nasid, PI_INT_MASK0_B, 0); REMOTE_HUB_S(nasid, PI_INT_MASK1_A, 0); REMOTE_HUB_S(nasid, PI_INT_MASK1_B, 0); for (i = 0; i < 128; i++) REMOTE_HUB_CLR_INTR(nasid, i); } static void ip27_send_ipi_single(int destid, unsigned int action) { int irq; switch (action) { case SMP_RESCHEDULE_YOURSELF: irq = CPU_RESCHED_A_IRQ; break; case SMP_CALL_FUNCTION: irq = CPU_CALL_A_IRQ; break; default: panic("sendintr"); } irq += cputoslice(destid); /* * Convert the compact hub number to the NASID to get the correct * part of the address space. Then set the interrupt bit associated * with the CPU we want to send the interrupt to. */ REMOTE_HUB_SEND_INTR(COMPACT_TO_NASID_NODEID(cpu_to_node(destid)), irq); } static void ip27_send_ipi_mask(const struct cpumask *mask, unsigned int action) { unsigned int i; for_each_cpu(i, mask) ip27_send_ipi_single(i, action); } static void __cpuinit ip27_init_secondary(void) { per_cpu_init(); } static void __cpuinit ip27_smp_finish(void) { extern void hub_rt_clock_event_init(void); hub_rt_clock_event_init(); local_irq_enable(); } static void __init ip27_cpus_done(void) { } /* * Launch a slave into smp_bootstrap(). It doesn't take an argument, and we * set sp to the kernel stack of the newly created idle process, gp to the proc * struct so that current_thread_info() will work. */ static void __cpuinit ip27_boot_secondary(int cpu, struct task_struct *idle) { unsigned long gp = (unsigned long)task_thread_info(idle); unsigned long sp = __KSTK_TOS(idle); LAUNCH_SLAVE(cputonasid(cpu), cputoslice(cpu), (launch_proc_t)MAPPED_KERN_RW_TO_K0(smp_bootstrap), 0, (void *) sp, (void *) gp); } static void __init ip27_smp_setup(void) { cnodeid_t cnode; for_each_online_node(cnode) { if (cnode == 0) continue; intr_clear_all(COMPACT_TO_NASID_NODEID(cnode)); } replicate_kernel_text(); /* * Assumption to be fixed: we're always booted on logical / physical * processor 0. While we're always running on logical processor 0 * this still means this is physical processor zero; it might for * example be disabled in the firmware. */ alloc_cpupda(0, 0); } static void __init ip27_prepare_cpus(unsigned int max_cpus) { /* We already did everything necessary earlier */ } struct plat_smp_ops ip27_smp_ops = { .send_ipi_single = ip27_send_ipi_single, .send_ipi_mask = ip27_send_ipi_mask, .init_secondary = ip27_init_secondary, .smp_finish = ip27_smp_finish, .cpus_done = ip27_cpus_done, .boot_secondary = ip27_boot_secondary, .smp_setup = ip27_smp_setup, .prepare_cpus = ip27_prepare_cpus, };
gpl-2.0
ktoonsez/KTSGS6
net/core/gen_estimator.c
6885
8636
/* * net/sched/gen_estimator.c Simple rate estimator. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> * * Changes: * Jamal Hadi Salim - moved it to net/core and reshulfed * names to make it usable in general net subsystem. */ #include <asm/uaccess.h> #include <linux/bitops.h> #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/in.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <linux/init.h> #include <linux/rbtree.h> #include <linux/slab.h> #include <net/sock.h> #include <net/gen_stats.h> /* This code is NOT intended to be used for statistics collection, its purpose is to provide a base for statistical multiplexing for controlled load service. If you need only statistics, run a user level daemon which periodically reads byte counters. Unfortunately, rate estimation is not a very easy task. F.e. I did not find a simple way to estimate the current peak rate and even failed to formulate the problem 8)8) So I preferred not to built an estimator into the scheduler, but run this task separately. Ideally, it should be kernel thread(s), but for now it runs from timers, which puts apparent top bounds on the number of rated flows, has minimal overhead on small, but is enough to handle controlled load service, sets of aggregates. We measure rate over A=(1<<interval) seconds and evaluate EWMA: avrate = avrate*(1-W) + rate*W where W is chosen as negative power of 2: W = 2^(-ewma_log) The resulting time constant is: T = A/(-ln(1-W)) NOTES. * avbps is scaled by 2^5, avpps is scaled by 2^10. * both values are reported as 32 bit unsigned values. bps can overflow for fast links : max speed being 34360Mbit/sec * Minimal interval is HZ/4=250msec (it is the greatest common divisor for HZ=100 and HZ=1024 8)), maximal interval is (HZ*2^EST_MAX_INTERVAL)/4 = 8sec. Shorter intervals are too expensive, longer ones can be implemented at user level painlessly. */ #define EST_MAX_INTERVAL 5 struct gen_estimator { struct list_head list; struct gnet_stats_basic_packed *bstats; struct gnet_stats_rate_est *rate_est; spinlock_t *stats_lock; int ewma_log; u64 last_bytes; u64 avbps; u32 last_packets; u32 avpps; struct rcu_head e_rcu; struct rb_node node; }; struct gen_estimator_head { struct timer_list timer; struct list_head list; }; static struct gen_estimator_head elist[EST_MAX_INTERVAL+1]; /* Protects against NULL dereference */ static DEFINE_RWLOCK(est_lock); /* Protects against soft lockup during large deletion */ static struct rb_root est_root = RB_ROOT; static DEFINE_SPINLOCK(est_tree_lock); static void est_timer(unsigned long arg) { int idx = (int)arg; struct gen_estimator *e; rcu_read_lock(); list_for_each_entry_rcu(e, &elist[idx].list, list) { u64 nbytes; u64 brate; u32 npackets; u32 rate; spin_lock(e->stats_lock); read_lock(&est_lock); if (e->bstats == NULL) goto skip; nbytes = e->bstats->bytes; npackets = e->bstats->packets; brate = (nbytes - e->last_bytes)<<(7 - idx); e->last_bytes = nbytes; e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log); e->rate_est->bps = (e->avbps+0xF)>>5; rate = (npackets - e->last_packets)<<(12 - idx); e->last_packets = npackets; e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log); e->rate_est->pps = (e->avpps+0x1FF)>>10; skip: read_unlock(&est_lock); spin_unlock(e->stats_lock); } if (!list_empty(&elist[idx].list)) mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx)); rcu_read_unlock(); } static void gen_add_node(struct gen_estimator *est) { struct rb_node **p = &est_root.rb_node, *parent = NULL; while (*p) { struct gen_estimator *e; parent = *p; e = rb_entry(parent, struct gen_estimator, node); if (est->bstats > e->bstats) p = &parent->rb_right; else p = &parent->rb_left; } rb_link_node(&est->node, parent, p); rb_insert_color(&est->node, &est_root); } static struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats, const struct gnet_stats_rate_est *rate_est) { struct rb_node *p = est_root.rb_node; while (p) { struct gen_estimator *e; e = rb_entry(p, struct gen_estimator, node); if (bstats > e->bstats) p = p->rb_right; else if (bstats < e->bstats || rate_est != e->rate_est) p = p->rb_left; else return e; } return NULL; } /** * gen_new_estimator - create a new rate estimator * @bstats: basic statistics * @rate_est: rate estimator statistics * @stats_lock: statistics lock * @opt: rate estimator configuration TLV * * Creates a new rate estimator with &bstats as source and &rate_est * as destination. A new timer with the interval specified in the * configuration TLV is created. Upon each interval, the latest statistics * will be read from &bstats and the estimated rate will be stored in * &rate_est with the statistics lock grabed during this period. * * Returns 0 on success or a negative error code. * */ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, struct gnet_stats_rate_est *rate_est, spinlock_t *stats_lock, struct nlattr *opt) { struct gen_estimator *est; struct gnet_estimator *parm = nla_data(opt); int idx; if (nla_len(opt) < sizeof(*parm)) return -EINVAL; if (parm->interval < -2 || parm->interval > 3) return -EINVAL; est = kzalloc(sizeof(*est), GFP_KERNEL); if (est == NULL) return -ENOBUFS; idx = parm->interval + 2; est->bstats = bstats; est->rate_est = rate_est; est->stats_lock = stats_lock; est->ewma_log = parm->ewma_log; est->last_bytes = bstats->bytes; est->avbps = rate_est->bps<<5; est->last_packets = bstats->packets; est->avpps = rate_est->pps<<10; spin_lock_bh(&est_tree_lock); if (!elist[idx].timer.function) { INIT_LIST_HEAD(&elist[idx].list); setup_timer(&elist[idx].timer, est_timer, idx); } if (list_empty(&elist[idx].list)) mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx)); list_add_rcu(&est->list, &elist[idx].list); gen_add_node(est); spin_unlock_bh(&est_tree_lock); return 0; } EXPORT_SYMBOL(gen_new_estimator); /** * gen_kill_estimator - remove a rate estimator * @bstats: basic statistics * @rate_est: rate estimator statistics * * Removes the rate estimator specified by &bstats and &rate_est. * * Note : Caller should respect an RCU grace period before freeing stats_lock */ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, struct gnet_stats_rate_est *rate_est) { struct gen_estimator *e; spin_lock_bh(&est_tree_lock); while ((e = gen_find_node(bstats, rate_est))) { rb_erase(&e->node, &est_root); write_lock(&est_lock); e->bstats = NULL; write_unlock(&est_lock); list_del_rcu(&e->list); kfree_rcu(e, e_rcu); } spin_unlock_bh(&est_tree_lock); } EXPORT_SYMBOL(gen_kill_estimator); /** * gen_replace_estimator - replace rate estimator configuration * @bstats: basic statistics * @rate_est: rate estimator statistics * @stats_lock: statistics lock * @opt: rate estimator configuration TLV * * Replaces the configuration of a rate estimator by calling * gen_kill_estimator() and gen_new_estimator(). * * Returns 0 on success or a negative error code. */ int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, struct gnet_stats_rate_est *rate_est, spinlock_t *stats_lock, struct nlattr *opt) { gen_kill_estimator(bstats, rate_est); return gen_new_estimator(bstats, rate_est, stats_lock, opt); } EXPORT_SYMBOL(gen_replace_estimator); /** * gen_estimator_active - test if estimator is currently in use * @bstats: basic statistics * @rate_est: rate estimator statistics * * Returns true if estimator is active, and false if not. */ bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, const struct gnet_stats_rate_est *rate_est) { bool res; ASSERT_RTNL(); spin_lock_bh(&est_tree_lock); res = gen_find_node(bstats, rate_est) != NULL; spin_unlock_bh(&est_tree_lock); return res; } EXPORT_SYMBOL(gen_estimator_active);
gpl-2.0
invisiblek/caf_kernel_msm
arch/xtensa/kernel/irq.c
7653
3247
/* * linux/arch/xtensa/kernel/irq.c * * Xtensa built-in interrupt controller and some generic functions copied * from i386. * * Copyright (C) 2002 - 2006 Tensilica, Inc. * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar * * * Chris Zankel <chris@zankel.net> * Kevin Chea * */ #include <linux/module.h> #include <linux/seq_file.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/kernel_stat.h> #include <asm/uaccess.h> #include <asm/platform.h> static unsigned int cached_irq_mask; atomic_t irq_err_count; /* * do_IRQ handles all normal device IRQ's (the special * SMP cross-CPU interrupts have their own specific * handlers). */ asmlinkage void do_IRQ(int irq, struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); if (irq >= NR_IRQS) { printk(KERN_EMERG "%s: cannot handle IRQ %d\n", __func__, irq); } irq_enter(); #ifdef CONFIG_DEBUG_STACKOVERFLOW /* Debugging check for stack overflow: is there less than 1KB free? */ { unsigned long sp; __asm__ __volatile__ ("mov %0, a1\n" : "=a" (sp)); sp &= THREAD_SIZE - 1; if (unlikely(sp < (sizeof(thread_info) + 1024))) printk("Stack overflow in do_IRQ: %ld\n", sp - sizeof(struct thread_info)); } #endif generic_handle_irq(irq); irq_exit(); set_irq_regs(old_regs); } int arch_show_interrupts(struct seq_file *p, int prec) { seq_printf(p, "%*s: ", prec, "ERR"); seq_printf(p, "%10u\n", atomic_read(&irq_err_count)); return 0; } static void xtensa_irq_mask(struct irq_data *d) { cached_irq_mask &= ~(1 << d->irq); set_sr (cached_irq_mask, INTENABLE); } static void xtensa_irq_unmask(struct irq_data *d) { cached_irq_mask |= 1 << d->irq; set_sr (cached_irq_mask, INTENABLE); } static void xtensa_irq_enable(struct irq_data *d) { variant_irq_enable(d->irq); xtensa_irq_unmask(d->irq); } static void xtensa_irq_disable(struct irq_data *d) { xtensa_irq_mask(d->irq); variant_irq_disable(d->irq); } static void xtensa_irq_ack(struct irq_data *d) { set_sr(1 << d->irq, INTCLEAR); } static int xtensa_irq_retrigger(struct irq_data *d) { set_sr (1 << d->irq, INTSET); return 1; } static struct irq_chip xtensa_irq_chip = { .name = "xtensa", .irq_enable = xtensa_irq_enable, .irq_disable = xtensa_irq_disable, .irq_mask = xtensa_irq_mask, .irq_unmask = xtensa_irq_unmask, .irq_ack = xtensa_irq_ack, .irq_retrigger = xtensa_irq_retrigger, }; void __init init_IRQ(void) { int index; for (index = 0; index < XTENSA_NR_IRQS; index++) { int mask = 1 << index; if (mask & XCHAL_INTTYPE_MASK_SOFTWARE) irq_set_chip_and_handler(index, &xtensa_irq_chip, handle_simple_irq); else if (mask & XCHAL_INTTYPE_MASK_EXTERN_EDGE) irq_set_chip_and_handler(index, &xtensa_irq_chip, handle_edge_irq); else if (mask & XCHAL_INTTYPE_MASK_EXTERN_LEVEL) irq_set_chip_and_handler(index, &xtensa_irq_chip, handle_level_irq); else if (mask & XCHAL_INTTYPE_MASK_TIMER) irq_set_chip_and_handler(index, &xtensa_irq_chip, handle_edge_irq); else /* XCHAL_INTTYPE_MASK_WRITE_ERROR */ /* XCHAL_INTTYPE_MASK_NMI */ irq_set_chip_and_handler(index, &xtensa_irq_chip, handle_level_irq); } cached_irq_mask = 0; variant_init_irq(); }
gpl-2.0
Elite-Kernels/Elite_M8
arch/xtensa/kernel/irq.c
7653
3247
/* * linux/arch/xtensa/kernel/irq.c * * Xtensa built-in interrupt controller and some generic functions copied * from i386. * * Copyright (C) 2002 - 2006 Tensilica, Inc. * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar * * * Chris Zankel <chris@zankel.net> * Kevin Chea * */ #include <linux/module.h> #include <linux/seq_file.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/kernel_stat.h> #include <asm/uaccess.h> #include <asm/platform.h> static unsigned int cached_irq_mask; atomic_t irq_err_count; /* * do_IRQ handles all normal device IRQ's (the special * SMP cross-CPU interrupts have their own specific * handlers). */ asmlinkage void do_IRQ(int irq, struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); if (irq >= NR_IRQS) { printk(KERN_EMERG "%s: cannot handle IRQ %d\n", __func__, irq); } irq_enter(); #ifdef CONFIG_DEBUG_STACKOVERFLOW /* Debugging check for stack overflow: is there less than 1KB free? */ { unsigned long sp; __asm__ __volatile__ ("mov %0, a1\n" : "=a" (sp)); sp &= THREAD_SIZE - 1; if (unlikely(sp < (sizeof(thread_info) + 1024))) printk("Stack overflow in do_IRQ: %ld\n", sp - sizeof(struct thread_info)); } #endif generic_handle_irq(irq); irq_exit(); set_irq_regs(old_regs); } int arch_show_interrupts(struct seq_file *p, int prec) { seq_printf(p, "%*s: ", prec, "ERR"); seq_printf(p, "%10u\n", atomic_read(&irq_err_count)); return 0; } static void xtensa_irq_mask(struct irq_data *d) { cached_irq_mask &= ~(1 << d->irq); set_sr (cached_irq_mask, INTENABLE); } static void xtensa_irq_unmask(struct irq_data *d) { cached_irq_mask |= 1 << d->irq; set_sr (cached_irq_mask, INTENABLE); } static void xtensa_irq_enable(struct irq_data *d) { variant_irq_enable(d->irq); xtensa_irq_unmask(d->irq); } static void xtensa_irq_disable(struct irq_data *d) { xtensa_irq_mask(d->irq); variant_irq_disable(d->irq); } static void xtensa_irq_ack(struct irq_data *d) { set_sr(1 << d->irq, INTCLEAR); } static int xtensa_irq_retrigger(struct irq_data *d) { set_sr (1 << d->irq, INTSET); return 1; } static struct irq_chip xtensa_irq_chip = { .name = "xtensa", .irq_enable = xtensa_irq_enable, .irq_disable = xtensa_irq_disable, .irq_mask = xtensa_irq_mask, .irq_unmask = xtensa_irq_unmask, .irq_ack = xtensa_irq_ack, .irq_retrigger = xtensa_irq_retrigger, }; void __init init_IRQ(void) { int index; for (index = 0; index < XTENSA_NR_IRQS; index++) { int mask = 1 << index; if (mask & XCHAL_INTTYPE_MASK_SOFTWARE) irq_set_chip_and_handler(index, &xtensa_irq_chip, handle_simple_irq); else if (mask & XCHAL_INTTYPE_MASK_EXTERN_EDGE) irq_set_chip_and_handler(index, &xtensa_irq_chip, handle_edge_irq); else if (mask & XCHAL_INTTYPE_MASK_EXTERN_LEVEL) irq_set_chip_and_handler(index, &xtensa_irq_chip, handle_level_irq); else if (mask & XCHAL_INTTYPE_MASK_TIMER) irq_set_chip_and_handler(index, &xtensa_irq_chip, handle_edge_irq); else /* XCHAL_INTTYPE_MASK_WRITE_ERROR */ /* XCHAL_INTTYPE_MASK_NMI */ irq_set_chip_and_handler(index, &xtensa_irq_chip, handle_level_irq); } cached_irq_mask = 0; variant_init_irq(); }
gpl-2.0
Abhinav1997/kernel-1
net/netfilter/xt_mac.c
8677
1791
/* Kernel module to match MAC address parameters. */ /* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/etherdevice.h> #include <linux/netfilter_ipv4.h> #include <linux/netfilter_ipv6.h> #include <linux/netfilter/xt_mac.h> #include <linux/netfilter/x_tables.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); MODULE_DESCRIPTION("Xtables: MAC address match"); MODULE_ALIAS("ipt_mac"); MODULE_ALIAS("ip6t_mac"); static bool mac_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_mac_info *info = par->matchinfo; bool ret; if (skb->dev == NULL || skb->dev->type != ARPHRD_ETHER) return false; if (skb_mac_header(skb) < skb->head) return false; if (skb_mac_header(skb) + ETH_HLEN > skb->data) return false; ret = compare_ether_addr(eth_hdr(skb)->h_source, info->srcaddr) == 0; ret ^= info->invert; return ret; } static struct xt_match mac_mt_reg __read_mostly = { .name = "mac", .revision = 0, .family = NFPROTO_UNSPEC, .match = mac_mt, .matchsize = sizeof(struct xt_mac_info), .hooks = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD), .me = THIS_MODULE, }; static int __init mac_mt_init(void) { return xt_register_match(&mac_mt_reg); } static void __exit mac_mt_exit(void) { xt_unregister_match(&mac_mt_reg); } module_init(mac_mt_init); module_exit(mac_mt_exit);
gpl-2.0
LiquidSmokeX64/URKernel
net/sched/sch_ingress.c
10981
3274
/* net/sched/sch_ingress.c - Ingress qdisc * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Authors: Jamal Hadi Salim 1999 */ #include <linux/module.h> #include <linux/types.h> #include <linux/list.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <net/netlink.h> #include <net/pkt_sched.h> struct ingress_qdisc_data { struct tcf_proto *filter_list; }; /* ------------------------- Class/flow operations ------------------------- */ static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg) { return NULL; } static unsigned long ingress_get(struct Qdisc *sch, u32 classid) { return TC_H_MIN(classid) + 1; } static unsigned long ingress_bind_filter(struct Qdisc *sch, unsigned long parent, u32 classid) { return ingress_get(sch, classid); } static void ingress_put(struct Qdisc *sch, unsigned long cl) { } static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker) { } static struct tcf_proto **ingress_find_tcf(struct Qdisc *sch, unsigned long cl) { struct ingress_qdisc_data *p = qdisc_priv(sch); return &p->filter_list; } /* --------------------------- Qdisc operations ---------------------------- */ static int ingress_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct ingress_qdisc_data *p = qdisc_priv(sch); struct tcf_result res; int result; result = tc_classify(skb, p->filter_list, &res); qdisc_bstats_update(sch, skb); switch (result) { case TC_ACT_SHOT: result = TC_ACT_SHOT; sch->qstats.drops++; break; case TC_ACT_STOLEN: case TC_ACT_QUEUED: result = TC_ACT_STOLEN; break; case TC_ACT_RECLASSIFY: case TC_ACT_OK: skb->tc_index = TC_H_MIN(res.classid); default: result = TC_ACT_OK; break; } return result; } /* ------------------------------------------------------------- */ static void ingress_destroy(struct Qdisc *sch) { struct ingress_qdisc_data *p = qdisc_priv(sch); tcf_destroy_chain(&p->filter_list); } static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb) { struct nlattr *nest; nest = nla_nest_start(skb, TCA_OPTIONS); if (nest == NULL) goto nla_put_failure; nla_nest_end(skb, nest); return skb->len; nla_put_failure: nla_nest_cancel(skb, nest); return -1; } static const struct Qdisc_class_ops ingress_class_ops = { .leaf = ingress_leaf, .get = ingress_get, .put = ingress_put, .walk = ingress_walk, .tcf_chain = ingress_find_tcf, .bind_tcf = ingress_bind_filter, .unbind_tcf = ingress_put, }; static struct Qdisc_ops ingress_qdisc_ops __read_mostly = { .cl_ops = &ingress_class_ops, .id = "ingress", .priv_size = sizeof(struct ingress_qdisc_data), .enqueue = ingress_enqueue, .destroy = ingress_destroy, .dump = ingress_dump, .owner = THIS_MODULE, }; static int __init ingress_module_init(void) { return register_qdisc(&ingress_qdisc_ops); } static void __exit ingress_module_exit(void) { unregister_qdisc(&ingress_qdisc_ops); } module_init(ingress_module_init) module_exit(ingress_module_exit) MODULE_LICENSE("GPL");
gpl-2.0
hephaex/a10c
arch/x86/kernel/hw_breakpoint.c
230
12545
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright (C) 2007 Alan Stern * Copyright (C) 2009 IBM Corporation * Copyright (C) 2009 Frederic Weisbecker <fweisbec@gmail.com> * * Authors: Alan Stern <stern@rowland.harvard.edu> * K.Prasad <prasad@linux.vnet.ibm.com> * Frederic Weisbecker <fweisbec@gmail.com> */ /* * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, * using the CPU's debug registers. */ #include <linux/perf_event.h> #include <linux/hw_breakpoint.h> #include <linux/irqflags.h> #include <linux/notifier.h> #include <linux/kallsyms.h> #include <linux/kprobes.h> #include <linux/percpu.h> #include <linux/kdebug.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/smp.h> #include <asm/hw_breakpoint.h> #include <asm/processor.h> #include <asm/debugreg.h> /* Per cpu debug control register value */ DEFINE_PER_CPU(unsigned long, cpu_dr7); EXPORT_PER_CPU_SYMBOL(cpu_dr7); /* Per cpu debug address registers values */ static DEFINE_PER_CPU(unsigned long, cpu_debugreg[HBP_NUM]); /* * Stores the breakpoints currently in use on each breakpoint address * register for each cpus */ static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]); static inline unsigned long __encode_dr7(int drnum, unsigned int len, unsigned int type) { unsigned long bp_info; bp_info = (len | type) & 0xf; bp_info <<= (DR_CONTROL_SHIFT + drnum * DR_CONTROL_SIZE); bp_info |= (DR_GLOBAL_ENABLE << (drnum * DR_ENABLE_SIZE)); return bp_info; } /* * Encode the length, type, Exact, and Enable bits for a particular breakpoint * as stored in debug register 7. */ unsigned long encode_dr7(int drnum, unsigned int len, unsigned int type) { return __encode_dr7(drnum, len, type) | DR_GLOBAL_SLOWDOWN; } /* * Decode the length and type bits for a particular breakpoint as * stored in debug register 7. Return the "enabled" status. */ int decode_dr7(unsigned long dr7, int bpnum, unsigned *len, unsigned *type) { int bp_info = dr7 >> (DR_CONTROL_SHIFT + bpnum * DR_CONTROL_SIZE); *len = (bp_info & 0xc) | 0x40; *type = (bp_info & 0x3) | 0x80; return (dr7 >> (bpnum * DR_ENABLE_SIZE)) & 0x3; } /* * Install a perf counter breakpoint. * * We seek a free debug address register and use it for this * breakpoint. Eventually we enable it in the debug control register. * * Atomic: we hold the counter->ctx->lock and we only handle variables * and registers local to this cpu. */ int arch_install_hw_breakpoint(struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); unsigned long *dr7; int i; for (i = 0; i < HBP_NUM; i++) { struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); if (!*slot) { *slot = bp; break; } } if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot")) return -EBUSY; set_debugreg(info->address, i); __this_cpu_write(cpu_debugreg[i], info->address); dr7 = &__get_cpu_var(cpu_dr7); *dr7 |= encode_dr7(i, info->len, info->type); set_debugreg(*dr7, 7); return 0; } /* * Uninstall the breakpoint contained in the given counter. * * First we search the debug address register it uses and then we disable * it. * * Atomic: we hold the counter->ctx->lock and we only handle variables * and registers local to this cpu. */ void arch_uninstall_hw_breakpoint(struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); unsigned long *dr7; int i; for (i = 0; i < HBP_NUM; i++) { struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); if (*slot == bp) { *slot = NULL; break; } } if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot")) return; dr7 = &__get_cpu_var(cpu_dr7); *dr7 &= ~__encode_dr7(i, info->len, info->type); set_debugreg(*dr7, 7); } static int get_hbp_len(u8 hbp_len) { unsigned int len_in_bytes = 0; switch (hbp_len) { case X86_BREAKPOINT_LEN_1: len_in_bytes = 1; break; case X86_BREAKPOINT_LEN_2: len_in_bytes = 2; break; case X86_BREAKPOINT_LEN_4: len_in_bytes = 4; break; #ifdef CONFIG_X86_64 case X86_BREAKPOINT_LEN_8: len_in_bytes = 8; break; #endif } return len_in_bytes; } /* * Check for virtual address in kernel space. */ int arch_check_bp_in_kernelspace(struct perf_event *bp) { unsigned int len; unsigned long va; struct arch_hw_breakpoint *info = counter_arch_bp(bp); va = info->address; len = get_hbp_len(info->len); return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); } int arch_bp_generic_fields(int x86_len, int x86_type, int *gen_len, int *gen_type) { /* Type */ switch (x86_type) { case X86_BREAKPOINT_EXECUTE: if (x86_len != X86_BREAKPOINT_LEN_X) return -EINVAL; *gen_type = HW_BREAKPOINT_X; *gen_len = sizeof(long); return 0; case X86_BREAKPOINT_WRITE: *gen_type = HW_BREAKPOINT_W; break; case X86_BREAKPOINT_RW: *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R; break; default: return -EINVAL; } /* Len */ switch (x86_len) { case X86_BREAKPOINT_LEN_1: *gen_len = HW_BREAKPOINT_LEN_1; break; case X86_BREAKPOINT_LEN_2: *gen_len = HW_BREAKPOINT_LEN_2; break; case X86_BREAKPOINT_LEN_4: *gen_len = HW_BREAKPOINT_LEN_4; break; #ifdef CONFIG_X86_64 case X86_BREAKPOINT_LEN_8: *gen_len = HW_BREAKPOINT_LEN_8; break; #endif default: return -EINVAL; } return 0; } static int arch_build_bp_info(struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); info->address = bp->attr.bp_addr; /* Type */ switch (bp->attr.bp_type) { case HW_BREAKPOINT_W: info->type = X86_BREAKPOINT_WRITE; break; case HW_BREAKPOINT_W | HW_BREAKPOINT_R: info->type = X86_BREAKPOINT_RW; break; case HW_BREAKPOINT_X: info->type = X86_BREAKPOINT_EXECUTE; /* * x86 inst breakpoints need to have a specific undefined len. * But we still need to check userspace is not trying to setup * an unsupported length, to get a range breakpoint for example. */ if (bp->attr.bp_len == sizeof(long)) { info->len = X86_BREAKPOINT_LEN_X; return 0; } default: return -EINVAL; } /* Len */ switch (bp->attr.bp_len) { case HW_BREAKPOINT_LEN_1: info->len = X86_BREAKPOINT_LEN_1; break; case HW_BREAKPOINT_LEN_2: info->len = X86_BREAKPOINT_LEN_2; break; case HW_BREAKPOINT_LEN_4: info->len = X86_BREAKPOINT_LEN_4; break; #ifdef CONFIG_X86_64 case HW_BREAKPOINT_LEN_8: info->len = X86_BREAKPOINT_LEN_8; break; #endif default: return -EINVAL; } return 0; } /* * Validate the arch-specific HW Breakpoint register settings */ int arch_validate_hwbkpt_settings(struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); unsigned int align; int ret; ret = arch_build_bp_info(bp); if (ret) return ret; ret = -EINVAL; switch (info->len) { case X86_BREAKPOINT_LEN_1: align = 0; break; case X86_BREAKPOINT_LEN_2: align = 1; break; case X86_BREAKPOINT_LEN_4: align = 3; break; #ifdef CONFIG_X86_64 case X86_BREAKPOINT_LEN_8: align = 7; break; #endif default: return ret; } /* * Check that the low-order bits of the address are appropriate * for the alignment implied by len. */ if (info->address & align) return -EINVAL; return 0; } /* * Dump the debug register contents to the user. * We can't dump our per cpu values because it * may contain cpu wide breakpoint, something that * doesn't belong to the current task. * * TODO: include non-ptrace user breakpoints (perf) */ void aout_dump_debugregs(struct user *dump) { int i; int dr7 = 0; struct perf_event *bp; struct arch_hw_breakpoint *info; struct thread_struct *thread = &current->thread; for (i = 0; i < HBP_NUM; i++) { bp = thread->ptrace_bps[i]; if (bp && !bp->attr.disabled) { dump->u_debugreg[i] = bp->attr.bp_addr; info = counter_arch_bp(bp); dr7 |= encode_dr7(i, info->len, info->type); } else { dump->u_debugreg[i] = 0; } } dump->u_debugreg[4] = 0; dump->u_debugreg[5] = 0; dump->u_debugreg[6] = current->thread.debugreg6; dump->u_debugreg[7] = dr7; } EXPORT_SYMBOL_GPL(aout_dump_debugregs); /* * Release the user breakpoints used by ptrace */ void flush_ptrace_hw_breakpoint(struct task_struct *tsk) { int i; struct thread_struct *t = &tsk->thread; for (i = 0; i < HBP_NUM; i++) { unregister_hw_breakpoint(t->ptrace_bps[i]); t->ptrace_bps[i] = NULL; } t->debugreg6 = 0; t->ptrace_dr7 = 0; } void hw_breakpoint_restore(void) { set_debugreg(__this_cpu_read(cpu_debugreg[0]), 0); set_debugreg(__this_cpu_read(cpu_debugreg[1]), 1); set_debugreg(__this_cpu_read(cpu_debugreg[2]), 2); set_debugreg(__this_cpu_read(cpu_debugreg[3]), 3); set_debugreg(current->thread.debugreg6, 6); set_debugreg(__this_cpu_read(cpu_dr7), 7); } EXPORT_SYMBOL_GPL(hw_breakpoint_restore); /* * Handle debug exception notifications. * * Return value is either NOTIFY_STOP or NOTIFY_DONE as explained below. * * NOTIFY_DONE returned if one of the following conditions is true. * i) When the causative address is from user-space and the exception * is a valid one, i.e. not triggered as a result of lazy debug register * switching * ii) When there are more bits than trap<n> set in DR6 register (such * as BD, BS or BT) indicating that more than one debug condition is * met and requires some more action in do_debug(). * * NOTIFY_STOP returned for all other cases * */ static int __kprobes hw_breakpoint_handler(struct die_args *args) { int i, cpu, rc = NOTIFY_STOP; struct perf_event *bp; unsigned long dr7, dr6; unsigned long *dr6_p; /* The DR6 value is pointed by args->err */ dr6_p = (unsigned long *)ERR_PTR(args->err); dr6 = *dr6_p; /* If it's a single step, TRAP bits are random */ if (dr6 & DR_STEP) return NOTIFY_DONE; /* Do an early return if no trap bits are set in DR6 */ if ((dr6 & DR_TRAP_BITS) == 0) return NOTIFY_DONE; get_debugreg(dr7, 7); /* Disable breakpoints during exception handling */ set_debugreg(0UL, 7); /* * Assert that local interrupts are disabled * Reset the DRn bits in the virtualized register value. * The ptrace trigger routine will add in whatever is needed. */ current->thread.debugreg6 &= ~DR_TRAP_BITS; cpu = get_cpu(); /* Handle all the breakpoints that were triggered */ for (i = 0; i < HBP_NUM; ++i) { if (likely(!(dr6 & (DR_TRAP0 << i)))) continue; /* * The counter may be concurrently released but that can only * occur from a call_rcu() path. We can then safely fetch * the breakpoint, use its callback, touch its counter * while we are in an rcu_read_lock() path. */ rcu_read_lock(); bp = per_cpu(bp_per_reg[i], cpu); /* * Reset the 'i'th TRAP bit in dr6 to denote completion of * exception handling */ (*dr6_p) &= ~(DR_TRAP0 << i); /* * bp can be NULL due to lazy debug register switching * or due to concurrent perf counter removing. */ if (!bp) { rcu_read_unlock(); break; } perf_bp_event(bp, args->regs); /* * Set up resume flag to avoid breakpoint recursion when * returning back to origin. */ if (bp->hw.info.type == X86_BREAKPOINT_EXECUTE) args->regs->flags |= X86_EFLAGS_RF; rcu_read_unlock(); } /* * Further processing in do_debug() is needed for a) user-space * breakpoints (to generate signals) and b) when the system has * taken exception due to multiple causes */ if ((current->thread.debugreg6 & DR_TRAP_BITS) || (dr6 & (~DR_TRAP_BITS))) rc = NOTIFY_DONE; set_debugreg(dr7, 7); put_cpu(); return rc; } /* * Handle debug exception notifications. */ int __kprobes hw_breakpoint_exceptions_notify( struct notifier_block *unused, unsigned long val, void *data) { if (val != DIE_DEBUG) return NOTIFY_DONE; return hw_breakpoint_handler(data); } void hw_breakpoint_pmu_read(struct perf_event *bp) { /* TODO */ }
gpl-2.0
dwindsor/linux-next
drivers/watchdog/stmp3xxx_rtc_wdt.c
486
4121
/* * Watchdog driver for the RTC based watchdog in STMP3xxx and i.MX23/28 * * Author: Wolfram Sang <kernel@pengutronix.de> * * Copyright (C) 2011-12 Wolfram Sang, Pengutronix * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/watchdog.h> #include <linux/platform_device.h> #include <linux/stmp3xxx_rtc_wdt.h> #include <linux/notifier.h> #include <linux/reboot.h> #define WDOG_TICK_RATE 1000 /* 1 kHz clock */ #define STMP3XXX_DEFAULT_TIMEOUT 19 #define STMP3XXX_MAX_TIMEOUT (UINT_MAX / WDOG_TICK_RATE) static int heartbeat = STMP3XXX_DEFAULT_TIMEOUT; module_param(heartbeat, uint, 0); MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat period in seconds from 1 to " __MODULE_STRING(STMP3XXX_MAX_TIMEOUT) ", default " __MODULE_STRING(STMP3XXX_DEFAULT_TIMEOUT)); static int wdt_start(struct watchdog_device *wdd) { struct device *dev = watchdog_get_drvdata(wdd); struct stmp3xxx_wdt_pdata *pdata = dev_get_platdata(dev); pdata->wdt_set_timeout(dev->parent, wdd->timeout * WDOG_TICK_RATE); return 0; } static int wdt_stop(struct watchdog_device *wdd) { struct device *dev = watchdog_get_drvdata(wdd); struct stmp3xxx_wdt_pdata *pdata = dev_get_platdata(dev); pdata->wdt_set_timeout(dev->parent, 0); return 0; } static int wdt_set_timeout(struct watchdog_device *wdd, unsigned new_timeout) { wdd->timeout = new_timeout; return wdt_start(wdd); } static const struct watchdog_info stmp3xxx_wdt_ident = { .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, .identity = "STMP3XXX RTC Watchdog", }; static const struct watchdog_ops stmp3xxx_wdt_ops = { .owner = THIS_MODULE, .start = wdt_start, .stop = wdt_stop, .set_timeout = wdt_set_timeout, }; static struct watchdog_device stmp3xxx_wdd = { .info = &stmp3xxx_wdt_ident, .ops = &stmp3xxx_wdt_ops, .min_timeout = 1, .max_timeout = STMP3XXX_MAX_TIMEOUT, .status = WATCHDOG_NOWAYOUT_INIT_STATUS, }; static int wdt_notify_sys(struct notifier_block *nb, unsigned long code, void *unused) { switch (code) { case SYS_DOWN: /* keep enabled, system might crash while going down */ break; case SYS_HALT: /* allow the system to actually halt */ case SYS_POWER_OFF: wdt_stop(&stmp3xxx_wdd); break; } return NOTIFY_DONE; } static struct notifier_block wdt_notifier = { .notifier_call = wdt_notify_sys, }; static int stmp3xxx_wdt_probe(struct platform_device *pdev) { int ret; watchdog_set_drvdata(&stmp3xxx_wdd, &pdev->dev); stmp3xxx_wdd.timeout = clamp_t(unsigned, heartbeat, 1, STMP3XXX_MAX_TIMEOUT); stmp3xxx_wdd.parent = &pdev->dev; ret = watchdog_register_device(&stmp3xxx_wdd); if (ret < 0) { dev_err(&pdev->dev, "cannot register watchdog device\n"); return ret; } if (register_reboot_notifier(&wdt_notifier)) dev_warn(&pdev->dev, "cannot register reboot notifier\n"); dev_info(&pdev->dev, "initialized watchdog with heartbeat %ds\n", stmp3xxx_wdd.timeout); return 0; } static int stmp3xxx_wdt_remove(struct platform_device *pdev) { unregister_reboot_notifier(&wdt_notifier); watchdog_unregister_device(&stmp3xxx_wdd); return 0; } static int __maybe_unused stmp3xxx_wdt_suspend(struct device *dev) { struct watchdog_device *wdd = &stmp3xxx_wdd; if (watchdog_active(wdd)) return wdt_stop(wdd); return 0; } static int __maybe_unused stmp3xxx_wdt_resume(struct device *dev) { struct watchdog_device *wdd = &stmp3xxx_wdd; if (watchdog_active(wdd)) return wdt_start(wdd); return 0; } static SIMPLE_DEV_PM_OPS(stmp3xxx_wdt_pm_ops, stmp3xxx_wdt_suspend, stmp3xxx_wdt_resume); static struct platform_driver stmp3xxx_wdt_driver = { .driver = { .name = "stmp3xxx_rtc_wdt", .pm = &stmp3xxx_wdt_pm_ops, }, .probe = stmp3xxx_wdt_probe, .remove = stmp3xxx_wdt_remove, }; module_platform_driver(stmp3xxx_wdt_driver); MODULE_DESCRIPTION("STMP3XXX RTC Watchdog Driver"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Wolfram Sang <kernel@pengutronix.de>");
gpl-2.0
delanoister-Andro-ID/GT-I9300-ICS-3.0.y
arch/arm/plat-samsung/dev-hsmmc.c
486
2090
/* linux/arch/arm/plat-s3c/dev-hsmmc.c * * Copyright (c) 2008 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * http://armlinux.simtec.co.uk/ * * S3C series device definition for hsmmc devices * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/mmc/host.h> #include <mach/map.h> #include <plat/sdhci.h> #include <plat/devs.h> #include <plat/cpu.h> #define S3C_SZ_HSMMC (0x1000) static struct resource s3c_hsmmc_resource[] = { [0] = { .start = S3C_PA_HSMMC0, .end = S3C_PA_HSMMC0 + S3C_SZ_HSMMC - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_HSMMC0, .end = IRQ_HSMMC0, .flags = IORESOURCE_IRQ, } }; static u64 s3c_device_hsmmc_dmamask = 0xffffffffUL; struct s3c_sdhci_platdata s3c_hsmmc0_def_platdata = { .max_width = 4, .host_caps = (MMC_CAP_4_BIT_DATA | MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED), .clk_type = S3C_SDHCI_CLK_DIV_INTERNAL, }; struct platform_device s3c_device_hsmmc0 = { .name = "s3c-sdhci", .id = 0, .num_resources = ARRAY_SIZE(s3c_hsmmc_resource), .resource = s3c_hsmmc_resource, .dev = { .dma_mask = &s3c_device_hsmmc_dmamask, .coherent_dma_mask = 0xffffffffUL, .platform_data = &s3c_hsmmc0_def_platdata, }, }; void s3c_sdhci0_set_platdata(struct s3c_sdhci_platdata *pd) { struct s3c_sdhci_platdata *set = &s3c_hsmmc0_def_platdata; set->cd_type = pd->cd_type; set->ext_cd_init = pd->ext_cd_init; set->ext_cd_cleanup = pd->ext_cd_cleanup; set->ext_cd_gpio = pd->ext_cd_gpio; set->ext_cd_gpio_invert = pd->ext_cd_gpio_invert; set->pm_flags = pd->pm_flags; if (pd->vmmc_name) set->vmmc_name = pd->vmmc_name; if (pd->max_width) set->max_width = pd->max_width; if (pd->cfg_gpio) set->cfg_gpio = pd->cfg_gpio; if (pd->cfg_card) set->cfg_card = pd->cfg_card; if (pd->host_caps) set->host_caps |= pd->host_caps; if (pd->clk_type) set->clk_type = pd->clk_type; }
gpl-2.0
SlimRoms/kernel_lge_v500
arch/arm/mach-msm/qdsp5v2/audio_fm.c
742
9465
/* Copyright (c) 2009-2011, The Linux Foundation. All rights reserved. * * Based on the mp3 native driver in arch/arm/mach-msm/qdsp5v2/audio_mp3.c * * Copyright (C) 2008 Google, Inc. * Copyright (C) 2008 HTC Corporation * * All source code in this file is licensed under the following license except * where indicated. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * See the GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program; if not, you can find it at http://www.fsf.org */ #include <linux/module.h> #include <linux/fs.h> #include <linux/miscdevice.h> #include <linux/uaccess.h> #include <linux/kthread.h> #include <linux/wait.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/android_pmem.h> #include <linux/msm_audio.h> #include <asm/atomic.h> #include <asm/ioctls.h> #include <mach/msm_adsp.h> #include <mach/debug_mm.h> #include <mach/qdsp5v2/audio_dev_ctl.h> #include <mach/qdsp5v2/afe.h> #include <mach/qdsp5v2/acdb_commands.h> #include <mach/qdsp5v2/audio_acdbi.h> #include <mach/qdsp5v2/audio_acdb_def.h> #define SESSION_ID_FM 6 #define FM_ENABLE 0xFFFF #define FM_DISABLE 0x0 #define FM_COPP 0x2 /* Macro specifies maximum FM routing possible */ #define FM_MAX_RX_ROUTE 0x2 struct fm_rx_calib_gain { uint16_t device_id; struct auddev_evt_devinfo dev_details; struct acdb_calib_gain_rx calib_rx; }; struct audio { struct mutex lock; int opened; int enabled; int running; uint16_t dec_id; uint16_t source; uint16_t fm_source; uint16_t fm_mask; uint32_t device_events; uint16_t volume; struct fm_rx_calib_gain fm_calibration_rx[FM_MAX_RX_ROUTE]; }; static struct audio fm_audio; /* must be called with audio->lock held */ static int audio_enable(struct audio *audio) { int rc = 0; if (audio->enabled) return 0; MM_DBG("fm mask= %08x fm_source = %08x\n", audio->fm_mask, audio->fm_source); if (audio->fm_mask && audio->fm_source) { rc = afe_config_fm_codec(FM_ENABLE, audio->fm_mask); if (!rc) audio->running = 1; /* Routed to icodec rx path */ if ((audio->fm_mask & AFE_HW_PATH_CODEC_RX) == AFE_HW_PATH_CODEC_RX) { afe_config_fm_calibration_gain( audio->fm_calibration_rx[0].device_id, audio->fm_calibration_rx[0].calib_rx.audppcalgain); } /* Routed to aux codec rx path */ if ((audio->fm_mask & AFE_HW_PATH_AUXPCM_RX) == AFE_HW_PATH_AUXPCM_RX){ afe_config_fm_calibration_gain( audio->fm_calibration_rx[1].device_id, audio->fm_calibration_rx[1].calib_rx.audppcalgain); } } audio->enabled = 1; return rc; } static void fm_listner(u32 evt_id, union auddev_evt_data *evt_payload, void *private_data) { struct audio *audio = (struct audio *) private_data; struct auddev_evt_devinfo *devinfo = (struct auddev_evt_devinfo *)evt_payload; switch (evt_id) { case AUDDEV_EVT_DEV_RDY: MM_DBG(":AUDDEV_EVT_DEV_RDY\n"); if (evt_payload->routing_id == FM_COPP) audio->fm_source = 1; else audio->source = (0x1 << evt_payload->routing_id); if (audio->source & 0x1) audio->fm_mask = 0x1; else if (audio->source & 0x2) audio->fm_mask = 0x3; else audio->fm_mask = 0x0; if (!audio->enabled || !audio->fm_mask || !audio->fm_source) break; else { afe_config_fm_codec(FM_ENABLE, audio->fm_mask); audio->running = 1; } break; case AUDDEV_EVT_DEV_RLS: MM_DBG(":AUDDEV_EVT_DEV_RLS\n"); if (evt_payload->routing_id == FM_COPP) audio->fm_source = 0; else audio->source &= ~(0x1 << evt_payload->routing_id); if (audio->source & 0x1) audio->fm_mask = 0x1; else if (audio->source & 0x2) audio->fm_mask = 0x3; else audio->fm_mask = 0x0; if (audio->running && (!audio->fm_mask || !audio->fm_source)) { afe_config_fm_codec(FM_DISABLE, audio->fm_mask); audio->running = 0; } break; case AUDDEV_EVT_STREAM_VOL_CHG: MM_DBG(":AUDDEV_EVT_STREAM_VOL_CHG, stream vol \n"); audio->volume = evt_payload->session_vol; afe_config_fm_volume(audio->volume); break; case AUDDEV_EVT_DEVICE_INFO:{ struct acdb_get_block get_block; int rc = 0; MM_DBG(":AUDDEV_EVT_DEVICE_INFO\n"); MM_DBG("sample_rate = %d\n", devinfo->sample_rate); MM_DBG("acdb_id = %d\n", devinfo->acdb_id); /* Applucable only for icodec rx and aux codec rx path and fm stream routed to it */ if (((devinfo->dev_id == 0x00) || (devinfo->dev_id == 0x01)) && (devinfo->sessions && (1 << audio->dec_id))) { /* Query ACDB driver for calib gain, only if difference in device */ if ((audio->fm_calibration_rx[devinfo->dev_id]. dev_details.acdb_id != devinfo->acdb_id) || (audio->fm_calibration_rx[devinfo->dev_id]. dev_details.sample_rate != devinfo->sample_rate)) { audio->fm_calibration_rx[devinfo->dev_id]. dev_details.dev_id = devinfo->dev_id; audio->fm_calibration_rx[devinfo->dev_id]. dev_details.sample_rate = devinfo->sample_rate; audio->fm_calibration_rx[devinfo->dev_id]. dev_details.dev_type = devinfo->dev_type; audio->fm_calibration_rx[devinfo->dev_id]. dev_details.sessions = devinfo->sessions; /* Query ACDB driver for calibration gain */ get_block.acdb_id = devinfo->acdb_id; get_block.sample_rate_id = devinfo->sample_rate; get_block.interface_id = IID_AUDIO_CALIBRATION_GAIN_RX; get_block.algorithm_block_id = ABID_AUDIO_CALIBRATION_GAIN_RX; get_block.total_bytes = sizeof(struct acdb_calib_gain_rx); get_block.buf_ptr = (u32 *) &audio->fm_calibration_rx[devinfo->dev_id]. calib_rx; rc = acdb_get_calibration_data(&get_block); if (rc < 0) { MM_ERR("Unable to get calibration"\ "gain\n"); /* Set to unity incase of error */ audio->\ fm_calibration_rx[devinfo->dev_id]. calib_rx.audppcalgain = 0x2000; } else MM_DBG("calibration gain = 0x%8x\n", *(get_block.buf_ptr)); } if (audio->running) { afe_config_fm_calibration_gain( audio->fm_calibration_rx[devinfo->dev_id]. device_id, audio->fm_calibration_rx[devinfo->dev_id]. calib_rx.audppcalgain); } } break; } default: MM_DBG(":ERROR:wrong event\n"); break; } } /* must be called with audio->lock held */ static int audio_disable(struct audio *audio) { MM_DBG("\n"); /* Macro prints the file name and function */ return afe_config_fm_codec(FM_DISABLE, audio->source); } static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct audio *audio = file->private_data; int rc = -EINVAL; MM_DBG("cmd = %d\n", cmd); mutex_lock(&audio->lock); switch (cmd) { case AUDIO_START: MM_DBG("AUDIO_START\n"); rc = audio_enable(audio); break; case AUDIO_STOP: MM_DBG("AUDIO_STOP\n"); rc = audio_disable(audio); audio->running = 0; audio->enabled = 0; break; case AUDIO_GET_SESSION_ID: if (copy_to_user((void *) arg, &audio->dec_id, sizeof(unsigned short))) rc = -EFAULT; else rc = 0; break; default: rc = -EINVAL; } mutex_unlock(&audio->lock); return rc; } static int audio_release(struct inode *inode, struct file *file) { struct audio *audio = file->private_data; MM_DBG("audio instance 0x%08x freeing\n", (int)audio); mutex_lock(&audio->lock); auddev_unregister_evt_listner(AUDDEV_CLNT_DEC, audio->dec_id); audio_disable(audio); audio->running = 0; audio->enabled = 0; audio->opened = 0; mutex_unlock(&audio->lock); return 0; } static int audio_open(struct inode *inode, struct file *file) { struct audio *audio = &fm_audio; int rc = 0; if (audio->opened) return -EPERM; /* Allocate the decoder */ audio->dec_id = SESSION_ID_FM; audio->running = 0; audio->fm_source = 0; audio->fm_mask = 0; /* Initialize the calibration gain structure */ audio->fm_calibration_rx[0].device_id = AFE_HW_PATH_CODEC_RX; audio->fm_calibration_rx[1].device_id = AFE_HW_PATH_AUXPCM_RX; audio->fm_calibration_rx[0].calib_rx.audppcalgain = 0x2000; audio->fm_calibration_rx[1].calib_rx.audppcalgain = 0x2000; audio->fm_calibration_rx[0].dev_details.acdb_id = PSEUDO_ACDB_ID; audio->fm_calibration_rx[1].dev_details.acdb_id = PSEUDO_ACDB_ID; audio->device_events = AUDDEV_EVT_DEV_RDY |AUDDEV_EVT_DEV_RLS| AUDDEV_EVT_STREAM_VOL_CHG| AUDDEV_EVT_DEVICE_INFO; rc = auddev_register_evt_listner(audio->device_events, AUDDEV_CLNT_DEC, audio->dec_id, fm_listner, (void *)audio); if (rc) { MM_ERR("%s: failed to register listnet\n", __func__); goto event_err; } audio->opened = 1; file->private_data = audio; event_err: return rc; } static const struct file_operations audio_fm_fops = { .owner = THIS_MODULE, .open = audio_open, .release = audio_release, .unlocked_ioctl = audio_ioctl, }; struct miscdevice audio_fm_misc = { .minor = MISC_DYNAMIC_MINOR, .name = "msm_fm", .fops = &audio_fm_fops, }; static int __init audio_init(void) { struct audio *audio = &fm_audio; mutex_init(&audio->lock); return misc_register(&audio_fm_misc); } device_initcall(audio_init); MODULE_DESCRIPTION("MSM FM driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
DSMKexec/kexec-kernel-g720n0
drivers/gpu/drm/radeon/radeon_ttm.c
1510
24036
/* * Copyright 2009 Jerome Glisse. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * */ /* * Authors: * Jerome Glisse <glisse@freedesktop.org> * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> * Dave Airlie */ #include <ttm/ttm_bo_api.h> #include <ttm/ttm_bo_driver.h> #include <ttm/ttm_placement.h> #include <ttm/ttm_module.h> #include <ttm/ttm_page_alloc.h> #include <drm/drmP.h> #include <drm/radeon_drm.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/swiotlb.h> #include "radeon_reg.h" #include "radeon.h" #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) static int radeon_ttm_debugfs_init(struct radeon_device *rdev); static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev) { struct radeon_mman *mman; struct radeon_device *rdev; mman = container_of(bdev, struct radeon_mman, bdev); rdev = container_of(mman, struct radeon_device, mman); return rdev; } /* * Global memory. */ static int radeon_ttm_mem_global_init(struct drm_global_reference *ref) { return ttm_mem_global_init(ref->object); } static void radeon_ttm_mem_global_release(struct drm_global_reference *ref) { ttm_mem_global_release(ref->object); } static int radeon_ttm_global_init(struct radeon_device *rdev) { struct drm_global_reference *global_ref; int r; rdev->mman.mem_global_referenced = false; global_ref = &rdev->mman.mem_global_ref; global_ref->global_type = DRM_GLOBAL_TTM_MEM; global_ref->size = sizeof(struct ttm_mem_global); global_ref->init = &radeon_ttm_mem_global_init; global_ref->release = &radeon_ttm_mem_global_release; r = drm_global_item_ref(global_ref); if (r != 0) { DRM_ERROR("Failed setting up TTM memory accounting " "subsystem.\n"); return r; } rdev->mman.bo_global_ref.mem_glob = rdev->mman.mem_global_ref.object; global_ref = &rdev->mman.bo_global_ref.ref; global_ref->global_type = DRM_GLOBAL_TTM_BO; global_ref->size = sizeof(struct ttm_bo_global); global_ref->init = &ttm_bo_global_init; global_ref->release = &ttm_bo_global_release; r = drm_global_item_ref(global_ref); if (r != 0) { DRM_ERROR("Failed setting up TTM BO subsystem.\n"); drm_global_item_unref(&rdev->mman.mem_global_ref); return r; } rdev->mman.mem_global_referenced = true; return 0; } static void radeon_ttm_global_fini(struct radeon_device *rdev) { if (rdev->mman.mem_global_referenced) { drm_global_item_unref(&rdev->mman.bo_global_ref.ref); drm_global_item_unref(&rdev->mman.mem_global_ref); rdev->mman.mem_global_referenced = false; } } static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) { return 0; } static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, struct ttm_mem_type_manager *man) { struct radeon_device *rdev; rdev = radeon_get_rdev(bdev); switch (type) { case TTM_PL_SYSTEM: /* System memory */ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; man->available_caching = TTM_PL_MASK_CACHING; man->default_caching = TTM_PL_FLAG_CACHED; break; case TTM_PL_TT: man->func = &ttm_bo_manager_func; man->gpu_offset = rdev->mc.gtt_start; man->available_caching = TTM_PL_MASK_CACHING; man->default_caching = TTM_PL_FLAG_CACHED; man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; #if __OS_HAS_AGP if (rdev->flags & RADEON_IS_AGP) { if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) { DRM_ERROR("AGP is not enabled for memory type %u\n", (unsigned)type); return -EINVAL; } if (!rdev->ddev->agp->cant_use_aperture) man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; man->default_caching = TTM_PL_FLAG_WC; } #endif break; case TTM_PL_VRAM: /* "On-card" video ram */ man->func = &ttm_bo_manager_func; man->gpu_offset = rdev->mc.vram_start; man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; man->default_caching = TTM_PL_FLAG_WC; break; default: DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); return -EINVAL; } return 0; } static void radeon_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *placement) { struct radeon_bo *rbo; static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; if (!radeon_ttm_bo_is_radeon_bo(bo)) { placement->fpfn = 0; placement->lpfn = 0; placement->placement = &placements; placement->busy_placement = &placements; placement->num_placement = 1; placement->num_busy_placement = 1; return; } rbo = container_of(bo, struct radeon_bo, tbo); switch (bo->mem.mem_type) { case TTM_PL_VRAM: if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false) radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU); else radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); break; case TTM_PL_TT: default: radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU); } *placement = rbo->placement; } static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp) { return 0; } static void radeon_move_null(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) { struct ttm_mem_reg *old_mem = &bo->mem; BUG_ON(old_mem->mm_node != NULL); *old_mem = *new_mem; new_mem->mm_node = NULL; } static int radeon_move_blit(struct ttm_buffer_object *bo, bool evict, bool no_wait_gpu, struct ttm_mem_reg *new_mem, struct ttm_mem_reg *old_mem) { struct radeon_device *rdev; uint64_t old_start, new_start; struct radeon_fence *fence; int r, ridx; rdev = radeon_get_rdev(bo->bdev); ridx = radeon_copy_ring_index(rdev); old_start = old_mem->start << PAGE_SHIFT; new_start = new_mem->start << PAGE_SHIFT; switch (old_mem->mem_type) { case TTM_PL_VRAM: old_start += rdev->mc.vram_start; break; case TTM_PL_TT: old_start += rdev->mc.gtt_start; break; default: DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); return -EINVAL; } switch (new_mem->mem_type) { case TTM_PL_VRAM: new_start += rdev->mc.vram_start; break; case TTM_PL_TT: new_start += rdev->mc.gtt_start; break; default: DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); return -EINVAL; } if (!rdev->ring[ridx].ready) { DRM_ERROR("Trying to move memory with ring turned off.\n"); return -EINVAL; } BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0); /* sync other rings */ fence = bo->sync_obj; r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */ &fence); /* FIXME: handle copy error */ r = ttm_bo_move_accel_cleanup(bo, (void *)fence, evict, no_wait_gpu, new_mem); radeon_fence_unref(&fence); return r; } static int radeon_move_vram_ram(struct ttm_buffer_object *bo, bool evict, bool interruptible, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { struct radeon_device *rdev; struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg tmp_mem; u32 placements; struct ttm_placement placement; int r; rdev = radeon_get_rdev(bo->bdev); tmp_mem = *new_mem; tmp_mem.mm_node = NULL; placement.fpfn = 0; placement.lpfn = 0; placement.num_placement = 1; placement.placement = &placements; placement.num_busy_placement = 1; placement.busy_placement = &placements; placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_gpu); if (unlikely(r)) { return r; } r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement); if (unlikely(r)) { goto out_cleanup; } r = ttm_tt_bind(bo->ttm, &tmp_mem); if (unlikely(r)) { goto out_cleanup; } r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem); if (unlikely(r)) { goto out_cleanup; } r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem); out_cleanup: ttm_bo_mem_put(bo, &tmp_mem); return r; } static int radeon_move_ram_vram(struct ttm_buffer_object *bo, bool evict, bool interruptible, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { struct radeon_device *rdev; struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg tmp_mem; struct ttm_placement placement; u32 placements; int r; rdev = radeon_get_rdev(bo->bdev); tmp_mem = *new_mem; tmp_mem.mm_node = NULL; placement.fpfn = 0; placement.lpfn = 0; placement.num_placement = 1; placement.placement = &placements; placement.num_busy_placement = 1; placement.busy_placement = &placements; placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_gpu); if (unlikely(r)) { return r; } r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem); if (unlikely(r)) { goto out_cleanup; } r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem); if (unlikely(r)) { goto out_cleanup; } out_cleanup: ttm_bo_mem_put(bo, &tmp_mem); return r; } static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, bool interruptible, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { struct radeon_device *rdev; struct ttm_mem_reg *old_mem = &bo->mem; int r; rdev = radeon_get_rdev(bo->bdev); if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { radeon_move_null(bo, new_mem); return 0; } if ((old_mem->mem_type == TTM_PL_TT && new_mem->mem_type == TTM_PL_SYSTEM) || (old_mem->mem_type == TTM_PL_SYSTEM && new_mem->mem_type == TTM_PL_TT)) { /* bind is enough */ radeon_move_null(bo, new_mem); return 0; } if (!rdev->ring[radeon_copy_ring_index(rdev)].ready || rdev->asic->copy.copy == NULL) { /* use memcpy */ goto memcpy; } if (old_mem->mem_type == TTM_PL_VRAM && new_mem->mem_type == TTM_PL_SYSTEM) { r = radeon_move_vram_ram(bo, evict, interruptible, no_wait_gpu, new_mem); } else if (old_mem->mem_type == TTM_PL_SYSTEM && new_mem->mem_type == TTM_PL_VRAM) { r = radeon_move_ram_vram(bo, evict, interruptible, no_wait_gpu, new_mem); } else { r = radeon_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem); } if (r) { memcpy: r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); } return r; } static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) { struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; struct radeon_device *rdev = radeon_get_rdev(bdev); mem->bus.addr = NULL; mem->bus.offset = 0; mem->bus.size = mem->num_pages << PAGE_SHIFT; mem->bus.base = 0; mem->bus.is_iomem = false; if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) return -EINVAL; switch (mem->mem_type) { case TTM_PL_SYSTEM: /* system memory */ return 0; case TTM_PL_TT: #if __OS_HAS_AGP if (rdev->flags & RADEON_IS_AGP) { /* RADEON_IS_AGP is set only if AGP is active */ mem->bus.offset = mem->start << PAGE_SHIFT; mem->bus.base = rdev->mc.agp_base; mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture; } #endif break; case TTM_PL_VRAM: mem->bus.offset = mem->start << PAGE_SHIFT; /* check if it's visible */ if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size) return -EINVAL; mem->bus.base = rdev->mc.aper_base; mem->bus.is_iomem = true; #ifdef __alpha__ /* * Alpha: use bus.addr to hold the ioremap() return, * so we can modify bus.base below. */ if (mem->placement & TTM_PL_FLAG_WC) mem->bus.addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size); else mem->bus.addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size); /* * Alpha: Use just the bus offset plus * the hose/domain memory base for bus.base. * It then can be used to build PTEs for VRAM * access, as done in ttm_bo_vm_fault(). */ mem->bus.base = (mem->bus.base & 0x0ffffffffUL) + rdev->ddev->hose->dense_mem_base; #endif break; default: return -EINVAL; } return 0; } static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) { } static int radeon_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible) { return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible); } static int radeon_sync_obj_flush(void *sync_obj) { return 0; } static void radeon_sync_obj_unref(void **sync_obj) { radeon_fence_unref((struct radeon_fence **)sync_obj); } static void *radeon_sync_obj_ref(void *sync_obj) { return radeon_fence_ref((struct radeon_fence *)sync_obj); } static bool radeon_sync_obj_signaled(void *sync_obj) { return radeon_fence_signaled((struct radeon_fence *)sync_obj); } /* * TTM backend functions. */ struct radeon_ttm_tt { struct ttm_dma_tt ttm; struct radeon_device *rdev; u64 offset; }; static int radeon_ttm_backend_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) { struct radeon_ttm_tt *gtt = (void*)ttm; int r; gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); if (!ttm->num_pages) { WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", ttm->num_pages, bo_mem, ttm); } r = radeon_gart_bind(gtt->rdev, gtt->offset, ttm->num_pages, ttm->pages, gtt->ttm.dma_address); if (r) { DRM_ERROR("failed to bind %lu pages at 0x%08X\n", ttm->num_pages, (unsigned)gtt->offset); return r; } return 0; } static int radeon_ttm_backend_unbind(struct ttm_tt *ttm) { struct radeon_ttm_tt *gtt = (void *)ttm; radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages); return 0; } static void radeon_ttm_backend_destroy(struct ttm_tt *ttm) { struct radeon_ttm_tt *gtt = (void *)ttm; ttm_dma_tt_fini(&gtt->ttm); kfree(gtt); } static struct ttm_backend_func radeon_backend_func = { .bind = &radeon_ttm_backend_bind, .unbind = &radeon_ttm_backend_unbind, .destroy = &radeon_ttm_backend_destroy, }; static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size, uint32_t page_flags, struct page *dummy_read_page) { struct radeon_device *rdev; struct radeon_ttm_tt *gtt; rdev = radeon_get_rdev(bdev); #if __OS_HAS_AGP if (rdev->flags & RADEON_IS_AGP) { return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge, size, page_flags, dummy_read_page); } #endif gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL); if (gtt == NULL) { return NULL; } gtt->ttm.ttm.func = &radeon_backend_func; gtt->rdev = rdev; if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) { kfree(gtt); return NULL; } return &gtt->ttm.ttm; } static int radeon_ttm_tt_populate(struct ttm_tt *ttm) { struct radeon_device *rdev; struct radeon_ttm_tt *gtt = (void *)ttm; unsigned i; int r; bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); if (ttm->state != tt_unpopulated) return 0; if (slave && ttm->sg) { drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, gtt->ttm.dma_address, ttm->num_pages); ttm->state = tt_unbound; return 0; } rdev = radeon_get_rdev(ttm->bdev); #if __OS_HAS_AGP if (rdev->flags & RADEON_IS_AGP) { return ttm_agp_tt_populate(ttm); } #endif #ifdef CONFIG_SWIOTLB if (swiotlb_nr_tbl()) { return ttm_dma_populate(&gtt->ttm, rdev->dev); } #endif r = ttm_pool_populate(ttm); if (r) { return r; } for (i = 0; i < ttm->num_pages; i++) { gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i], 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) { while (--i) { pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i], PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); gtt->ttm.dma_address[i] = 0; } ttm_pool_unpopulate(ttm); return -EFAULT; } } return 0; } static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm) { struct radeon_device *rdev; struct radeon_ttm_tt *gtt = (void *)ttm; unsigned i; bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); if (slave) return; rdev = radeon_get_rdev(ttm->bdev); #if __OS_HAS_AGP if (rdev->flags & RADEON_IS_AGP) { ttm_agp_tt_unpopulate(ttm); return; } #endif #ifdef CONFIG_SWIOTLB if (swiotlb_nr_tbl()) { ttm_dma_unpopulate(&gtt->ttm, rdev->dev); return; } #endif for (i = 0; i < ttm->num_pages; i++) { if (gtt->ttm.dma_address[i]) { pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i], PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); } } ttm_pool_unpopulate(ttm); } static struct ttm_bo_driver radeon_bo_driver = { .ttm_tt_create = &radeon_ttm_tt_create, .ttm_tt_populate = &radeon_ttm_tt_populate, .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate, .invalidate_caches = &radeon_invalidate_caches, .init_mem_type = &radeon_init_mem_type, .evict_flags = &radeon_evict_flags, .move = &radeon_bo_move, .verify_access = &radeon_verify_access, .sync_obj_signaled = &radeon_sync_obj_signaled, .sync_obj_wait = &radeon_sync_obj_wait, .sync_obj_flush = &radeon_sync_obj_flush, .sync_obj_unref = &radeon_sync_obj_unref, .sync_obj_ref = &radeon_sync_obj_ref, .move_notify = &radeon_bo_move_notify, .fault_reserve_notify = &radeon_bo_fault_reserve_notify, .io_mem_reserve = &radeon_ttm_io_mem_reserve, .io_mem_free = &radeon_ttm_io_mem_free, }; int radeon_ttm_init(struct radeon_device *rdev) { int r; r = radeon_ttm_global_init(rdev); if (r) { return r; } /* No others user of address space so set it to 0 */ r = ttm_bo_device_init(&rdev->mman.bdev, rdev->mman.bo_global_ref.ref.object, &radeon_bo_driver, DRM_FILE_PAGE_OFFSET, rdev->need_dma32); if (r) { DRM_ERROR("failed initializing buffer object driver(%d).\n", r); return r; } rdev->mman.initialized = true; r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, rdev->mc.real_vram_size >> PAGE_SHIFT); if (r) { DRM_ERROR("Failed initializing VRAM heap.\n"); return r; } r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->stollen_vga_memory); if (r) { return r; } r = radeon_bo_reserve(rdev->stollen_vga_memory, false); if (r) return r; r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL); radeon_bo_unreserve(rdev->stollen_vga_memory); if (r) { radeon_bo_unref(&rdev->stollen_vga_memory); return r; } DRM_INFO("radeon: %uM of VRAM memory ready\n", (unsigned) (rdev->mc.real_vram_size / (1024 * 1024))); r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, rdev->mc.gtt_size >> PAGE_SHIFT); if (r) { DRM_ERROR("Failed initializing GTT heap.\n"); return r; } DRM_INFO("radeon: %uM of GTT memory ready.\n", (unsigned)(rdev->mc.gtt_size / (1024 * 1024))); rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping; r = radeon_ttm_debugfs_init(rdev); if (r) { DRM_ERROR("Failed to init debugfs\n"); return r; } return 0; } void radeon_ttm_fini(struct radeon_device *rdev) { int r; if (!rdev->mman.initialized) return; if (rdev->stollen_vga_memory) { r = radeon_bo_reserve(rdev->stollen_vga_memory, false); if (r == 0) { radeon_bo_unpin(rdev->stollen_vga_memory); radeon_bo_unreserve(rdev->stollen_vga_memory); } radeon_bo_unref(&rdev->stollen_vga_memory); } ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM); ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT); ttm_bo_device_release(&rdev->mman.bdev); radeon_gart_fini(rdev); radeon_ttm_global_fini(rdev); rdev->mman.initialized = false; DRM_INFO("radeon: ttm finalized\n"); } /* this should only be called at bootup or when userspace * isn't running */ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size) { struct ttm_mem_type_manager *man; if (!rdev->mman.initialized) return; man = &rdev->mman.bdev.man[TTM_PL_VRAM]; /* this just adjusts TTM size idea, which sets lpfn to the correct value */ man->size = size >> PAGE_SHIFT; } static struct vm_operations_struct radeon_ttm_vm_ops; static const struct vm_operations_struct *ttm_vm_ops = NULL; static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct ttm_buffer_object *bo; struct radeon_device *rdev; int r; bo = (struct ttm_buffer_object *)vma->vm_private_data; if (bo == NULL) { return VM_FAULT_NOPAGE; } rdev = radeon_get_rdev(bo->bdev); down_read(&rdev->pm.mclk_lock); r = ttm_vm_ops->fault(vma, vmf); up_read(&rdev->pm.mclk_lock); return r; } int radeon_mmap(struct file *filp, struct vm_area_struct *vma) { struct drm_file *file_priv; struct radeon_device *rdev; int r; if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) { return drm_mmap(filp, vma); } file_priv = filp->private_data; rdev = file_priv->minor->dev->dev_private; if (rdev == NULL) { return -EINVAL; } r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev); if (unlikely(r != 0)) { return r; } if (unlikely(ttm_vm_ops == NULL)) { ttm_vm_ops = vma->vm_ops; radeon_ttm_vm_ops = *ttm_vm_ops; radeon_ttm_vm_ops.fault = &radeon_ttm_fault; } vma->vm_ops = &radeon_ttm_vm_ops; return 0; } #define RADEON_DEBUGFS_MEM_TYPES 2 #if defined(CONFIG_DEBUG_FS) static int radeon_mm_dump_table(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *)m->private; struct drm_mm *mm = (struct drm_mm *)node->info_ent->data; struct drm_device *dev = node->minor->dev; struct radeon_device *rdev = dev->dev_private; int ret; struct ttm_bo_global *glob = rdev->mman.bdev.glob; spin_lock(&glob->lru_lock); ret = drm_mm_dump_table(m, mm); spin_unlock(&glob->lru_lock); return ret; } #endif static int radeon_ttm_debugfs_init(struct radeon_device *rdev) { #if defined(CONFIG_DEBUG_FS) static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2]; static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32]; unsigned i; for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) { if (i == 0) sprintf(radeon_mem_types_names[i], "radeon_vram_mm"); else sprintf(radeon_mem_types_names[i], "radeon_gtt_mm"); radeon_mem_types_list[i].name = radeon_mem_types_names[i]; radeon_mem_types_list[i].show = &radeon_mm_dump_table; radeon_mem_types_list[i].driver_features = 0; if (i == 0) radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv; else radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv; } /* Add ttm page pool to debugfs */ sprintf(radeon_mem_types_names[i], "ttm_page_pool"); radeon_mem_types_list[i].name = radeon_mem_types_names[i]; radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs; radeon_mem_types_list[i].driver_features = 0; radeon_mem_types_list[i++].data = NULL; #ifdef CONFIG_SWIOTLB if (swiotlb_nr_tbl()) { sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool"); radeon_mem_types_list[i].name = radeon_mem_types_names[i]; radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs; radeon_mem_types_list[i].driver_features = 0; radeon_mem_types_list[i++].data = NULL; } #endif return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i); #endif return 0; }
gpl-2.0
focuschou/android_kernel_samsung_piranha
drivers/staging/msm/lcdc.c
2022
6086
/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/time.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/delay.h> #include <mach/hardware.h> #include <linux/io.h> #include <asm/system.h> #include <asm/mach-types.h> #include <linux/semaphore.h> #include <linux/uaccess.h> #include <linux/clk.h> #include <linux/platform_device.h> #include <linux/pm_qos_params.h> #include "msm_fb.h" static int lcdc_probe(struct platform_device *pdev); static int lcdc_remove(struct platform_device *pdev); static int lcdc_off(struct platform_device *pdev); static int lcdc_on(struct platform_device *pdev); static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST]; static int pdev_list_cnt; static struct clk *mdp_lcdc_pclk_clk; static struct clk *mdp_lcdc_pad_pclk_clk; int mdp_lcdc_pclk_clk_rate; int mdp_lcdc_pad_pclk_clk_rate; static struct platform_driver lcdc_driver = { .probe = lcdc_probe, .remove = lcdc_remove, .suspend = NULL, .resume = NULL, .shutdown = NULL, .driver = { .name = "lcdc", }, }; static struct lcdc_platform_data *lcdc_pdata; static int lcdc_off(struct platform_device *pdev) { int ret = 0; ret = panel_next_off(pdev); clk_disable(mdp_lcdc_pclk_clk); clk_disable(mdp_lcdc_pad_pclk_clk); if (lcdc_pdata && lcdc_pdata->lcdc_power_save) lcdc_pdata->lcdc_power_save(0); if (lcdc_pdata && lcdc_pdata->lcdc_gpio_config) ret = lcdc_pdata->lcdc_gpio_config(0); // pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ , "lcdc", // PM_QOS_DEFAULT_VALUE); return ret; } static int lcdc_on(struct platform_device *pdev) { int ret = 0; struct msm_fb_data_type *mfd; unsigned long panel_pixclock_freq , pm_qos_freq; mfd = platform_get_drvdata(pdev); panel_pixclock_freq = mfd->fbi->var.pixclock; if (panel_pixclock_freq > 58000000) /* pm_qos_freq should be in Khz */ pm_qos_freq = panel_pixclock_freq / 1000 ; else pm_qos_freq = 58000; // pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ , "lcdc", // pm_qos_freq); mfd = platform_get_drvdata(pdev); clk_enable(mdp_lcdc_pclk_clk); clk_enable(mdp_lcdc_pad_pclk_clk); if (lcdc_pdata && lcdc_pdata->lcdc_power_save) lcdc_pdata->lcdc_power_save(1); if (lcdc_pdata && lcdc_pdata->lcdc_gpio_config) ret = lcdc_pdata->lcdc_gpio_config(1); clk_set_rate(mdp_lcdc_pclk_clk, mfd->fbi->var.pixclock); clk_set_rate(mdp_lcdc_pad_pclk_clk, mfd->fbi->var.pixclock); mdp_lcdc_pclk_clk_rate = clk_get_rate(mdp_lcdc_pclk_clk); mdp_lcdc_pad_pclk_clk_rate = clk_get_rate(mdp_lcdc_pad_pclk_clk); ret = panel_next_on(pdev); return ret; } static int lcdc_probe(struct platform_device *pdev) { struct msm_fb_data_type *mfd; struct fb_info *fbi; struct platform_device *mdp_dev = NULL; struct msm_fb_panel_data *pdata = NULL; int rc; if (pdev->id == 0) { lcdc_pdata = pdev->dev.platform_data; return 0; } mfd = platform_get_drvdata(pdev); if (!mfd) return -ENODEV; if (mfd->key != MFD_KEY) return -EINVAL; if (pdev_list_cnt >= MSM_FB_MAX_DEV_LIST) return -ENOMEM; mdp_dev = platform_device_alloc("mdp", pdev->id); if (!mdp_dev) return -ENOMEM; /* * link to the latest pdev */ mfd->pdev = mdp_dev; mfd->dest = DISPLAY_LCDC; /* * alloc panel device data */ if (platform_device_add_data (mdp_dev, pdev->dev.platform_data, sizeof(struct msm_fb_panel_data))) { printk(KERN_ERR "lcdc_probe: platform_device_add_data failed!\n"); platform_device_put(mdp_dev); return -ENOMEM; } /* * data chain */ pdata = (struct msm_fb_panel_data *)mdp_dev->dev.platform_data; pdata->on = lcdc_on; pdata->off = lcdc_off; pdata->next = pdev; /* * get/set panel specific fb info */ mfd->panel_info = pdata->panel_info; mfd->fb_imgType = MDP_RGB_565; fbi = mfd->fbi; fbi->var.pixclock = mfd->panel_info.clk_rate; fbi->var.left_margin = mfd->panel_info.lcdc.h_back_porch; fbi->var.right_margin = mfd->panel_info.lcdc.h_front_porch; fbi->var.upper_margin = mfd->panel_info.lcdc.v_back_porch; fbi->var.lower_margin = mfd->panel_info.lcdc.v_front_porch; fbi->var.hsync_len = mfd->panel_info.lcdc.h_pulse_width; fbi->var.vsync_len = mfd->panel_info.lcdc.v_pulse_width; /* * set driver data */ platform_set_drvdata(mdp_dev, mfd); /* * register in mdp driver */ rc = platform_device_add(mdp_dev); if (rc) goto lcdc_probe_err; pdev_list[pdev_list_cnt++] = pdev; return 0; lcdc_probe_err: platform_device_put(mdp_dev); return rc; } static int lcdc_remove(struct platform_device *pdev) { // pm_qos_remove_requirement(PM_QOS_SYSTEM_BUS_FREQ , "lcdc"); return 0; } static int lcdc_register_driver(void) { return platform_driver_register(&lcdc_driver); } static int __init lcdc_driver_init(void) { mdp_lcdc_pclk_clk = clk_get(NULL, "mdp_lcdc_pclk_clk"); if (IS_ERR(mdp_lcdc_pclk_clk)) { printk(KERN_ERR "error: can't get mdp_lcdc_pclk_clk!\n"); return PTR_ERR(mdp_lcdc_pclk_clk); } mdp_lcdc_pad_pclk_clk = clk_get(NULL, "mdp_lcdc_pad_pclk_clk"); if (IS_ERR(mdp_lcdc_pad_pclk_clk)) { printk(KERN_ERR "error: can't get mdp_lcdc_pad_pclk_clk!\n"); return PTR_ERR(mdp_lcdc_pad_pclk_clk); } // pm_qos_add_requirement(PM_QOS_SYSTEM_BUS_FREQ , "lcdc", // PM_QOS_DEFAULT_VALUE); return lcdc_register_driver(); } module_init(lcdc_driver_init);
gpl-2.0
linyvxiang/linux-zswap
arch/h8300/platform/h8300h/irq.c
2278
1635
/* * Interrupt handling H8/300H depend. * Yoshinori Sato <ysato@users.sourceforge.jp> * */ #include <linux/init.h> #include <linux/errno.h> #include <asm/ptrace.h> #include <asm/traps.h> #include <asm/irq.h> #include <asm/io.h> #include <asm/gpio-internal.h> #include <asm/regs306x.h> const int __initconst h8300_saved_vectors[] = { #if defined(CONFIG_GDB_DEBUG) TRAP3_VEC, /* TRAPA #3 is GDB breakpoint */ #endif -1, }; const h8300_vector __initconst h8300_trap_table[] = { 0, 0, 0, 0, 0, 0, 0, 0, system_call, 0, 0, trace_break, }; int h8300_enable_irq_pin(unsigned int irq) { int bitmask; if (irq < EXT_IRQ0 || irq > EXT_IRQ5) return 0; /* initialize IRQ pin */ bitmask = 1 << (irq - EXT_IRQ0); switch(irq) { case EXT_IRQ0: case EXT_IRQ1: case EXT_IRQ2: case EXT_IRQ3: if (H8300_GPIO_RESERVE(H8300_GPIO_P8, bitmask) == 0) return -EBUSY; H8300_GPIO_DDR(H8300_GPIO_P8, bitmask, H8300_GPIO_INPUT); break; case EXT_IRQ4: case EXT_IRQ5: if (H8300_GPIO_RESERVE(H8300_GPIO_P9, bitmask) == 0) return -EBUSY; H8300_GPIO_DDR(H8300_GPIO_P9, bitmask, H8300_GPIO_INPUT); break; } return 0; } void h8300_disable_irq_pin(unsigned int irq) { int bitmask; if (irq < EXT_IRQ0 || irq > EXT_IRQ5) return; /* disable interrupt & release IRQ pin */ bitmask = 1 << (irq - EXT_IRQ0); switch(irq) { case EXT_IRQ0: case EXT_IRQ1: case EXT_IRQ2: case EXT_IRQ3: *(volatile unsigned char *)IER &= ~bitmask; H8300_GPIO_FREE(H8300_GPIO_P8, bitmask); break ; case EXT_IRQ4: case EXT_IRQ5: *(volatile unsigned char *)IER &= ~bitmask; H8300_GPIO_FREE(H8300_GPIO_P9, bitmask); break; } }
gpl-2.0
RadiumBot/Radium_tomato
arch/sh/kernel/cpu/sh4a/clock-shx3.c
2790
4718
/* * arch/sh/kernel/cpu/sh4/clock-shx3.c * * SH-X3 support for the clock framework * * Copyright (C) 2006-2007 Renesas Technology Corp. * Copyright (C) 2006-2007 Renesas Solutions Corp. * Copyright (C) 2006-2010 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/io.h> #include <linux/clkdev.h> #include <asm/clock.h> #include <asm/freq.h> /* * Default rate for the root input clock, reset this with clk_set_rate() * from the platform code. */ static struct clk extal_clk = { .rate = 16666666, }; static unsigned long pll_recalc(struct clk *clk) { /* PLL1 has a fixed x72 multiplier. */ return clk->parent->rate * 72; } static struct sh_clk_ops pll_clk_ops = { .recalc = pll_recalc, }; static struct clk pll_clk = { .ops = &pll_clk_ops, .parent = &extal_clk, .flags = CLK_ENABLE_ON_INIT, }; static struct clk *clks[] = { &extal_clk, &pll_clk, }; static unsigned int div2[] = { 1, 2, 4, 6, 8, 12, 16, 18, 24, 32, 36, 48 }; static struct clk_div_mult_table div4_div_mult_table = { .divisors = div2, .nr_divisors = ARRAY_SIZE(div2), }; static struct clk_div4_table div4_table = { .div_mult_table = &div4_div_mult_table, }; enum { DIV4_I, DIV4_SH, DIV4_B, DIV4_DDR, DIV4_SHA, DIV4_P, DIV4_NR }; #define DIV4(_bit, _mask, _flags) \ SH_CLK_DIV4(&pll_clk, FRQMR1, _bit, _mask, _flags) struct clk div4_clks[DIV4_NR] = { [DIV4_P] = DIV4(0, 0x0f80, 0), [DIV4_SHA] = DIV4(4, 0x0ff0, 0), [DIV4_DDR] = DIV4(12, 0x000c, CLK_ENABLE_ON_INIT), [DIV4_B] = DIV4(16, 0x0fe0, CLK_ENABLE_ON_INIT), [DIV4_SH] = DIV4(20, 0x000c, CLK_ENABLE_ON_INIT), [DIV4_I] = DIV4(28, 0x000e, CLK_ENABLE_ON_INIT), }; #define MSTPCR0 0xffc00030 #define MSTPCR1 0xffc00034 enum { MSTP027, MSTP026, MSTP025, MSTP024, MSTP009, MSTP008, MSTP003, MSTP002, MSTP001, MSTP000, MSTP119, MSTP105, MSTP104, MSTP_NR }; static struct clk mstp_clks[MSTP_NR] = { /* MSTPCR0 */ [MSTP027] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 27, 0), [MSTP026] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 26, 0), [MSTP025] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 25, 0), [MSTP024] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 24, 0), [MSTP009] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 9, 0), [MSTP008] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 8, 0), [MSTP003] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 3, 0), [MSTP002] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 2, 0), [MSTP001] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 1, 0), [MSTP000] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 0, 0), /* MSTPCR1 */ [MSTP119] = SH_CLK_MSTP32(NULL, MSTPCR1, 19, 0), [MSTP105] = SH_CLK_MSTP32(NULL, MSTPCR1, 5, 0), [MSTP104] = SH_CLK_MSTP32(NULL, MSTPCR1, 4, 0), }; static struct clk_lookup lookups[] = { /* main clocks */ CLKDEV_CON_ID("extal", &extal_clk), CLKDEV_CON_ID("pll_clk", &pll_clk), /* DIV4 clocks */ CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]), CLKDEV_CON_ID("shywaya_clk", &div4_clks[DIV4_SHA]), CLKDEV_CON_ID("ddr_clk", &div4_clks[DIV4_DDR]), CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]), CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]), CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]), /* MSTP32 clocks */ CLKDEV_ICK_ID("sci_fck", "sh-sci.3", &mstp_clks[MSTP027]), CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP026]), CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP025]), CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP024]), CLKDEV_CON_ID("h8ex_fck", &mstp_clks[MSTP003]), CLKDEV_CON_ID("csm_fck", &mstp_clks[MSTP002]), CLKDEV_CON_ID("fe1_fck", &mstp_clks[MSTP001]), CLKDEV_CON_ID("fe0_fck", &mstp_clks[MSTP000]), CLKDEV_ICK_ID("tmu_fck", "sh_tmu.0", &mstp_clks[MSTP008]), CLKDEV_ICK_ID("tmu_fck", "sh_tmu.1", &mstp_clks[MSTP008]), CLKDEV_ICK_ID("tmu_fck", "sh_tmu.2", &mstp_clks[MSTP008]), CLKDEV_ICK_ID("tmu_fck", "sh_tmu.3", &mstp_clks[MSTP009]), CLKDEV_ICK_ID("tmu_fck", "sh_tmu.4", &mstp_clks[MSTP009]), CLKDEV_ICK_ID("tmu_fck", "sh_tmu.5", &mstp_clks[MSTP009]), CLKDEV_CON_ID("hudi_fck", &mstp_clks[MSTP119]), CLKDEV_CON_ID("dmac_11_6_fck", &mstp_clks[MSTP105]), CLKDEV_CON_ID("dmac_5_0_fck", &mstp_clks[MSTP104]), }; int __init arch_clk_init(void) { int i, ret = 0; for (i = 0; i < ARRAY_SIZE(clks); i++) ret |= clk_register(clks[i]); for (i = 0; i < ARRAY_SIZE(lookups); i++) clkdev_add(&lookups[i]); if (!ret) ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks), &div4_table); if (!ret) ret = sh_clk_mstp_register(mstp_clks, MSTP_NR); return ret; }
gpl-2.0
peat-psuwit/android_kernel_lge_w7ds
kernel/mutex.c
4070
13544
/* * kernel/mutex.c * * Mutexes: blocking mutual exclusion locks * * Started by Ingo Molnar: * * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> * * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and * David Howells for suggestions and improvements. * * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline * from the -rt tree, where it was originally implemented for rtmutexes * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale * and Sven Dietrich. * * Also see Documentation/mutex-design.txt. */ #include <linux/mutex.h> #include <linux/sched.h> #include <linux/export.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/debug_locks.h> /* * In the DEBUG case we are using the "NULL fastpath" for mutexes, * which forces all calls into the slowpath: */ #ifdef CONFIG_DEBUG_MUTEXES # include "mutex-debug.h" # include <asm-generic/mutex-null.h> #else # include "mutex.h" # include <asm/mutex.h> #endif void __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) { atomic_set(&lock->count, 1); spin_lock_init(&lock->wait_lock); INIT_LIST_HEAD(&lock->wait_list); mutex_clear_owner(lock); debug_mutex_init(lock, name, key); } EXPORT_SYMBOL(__mutex_init); #ifndef CONFIG_DEBUG_LOCK_ALLOC /* * We split the mutex lock/unlock logic into separate fastpath and * slowpath functions, to reduce the register pressure on the fastpath. * We also put the fastpath first in the kernel image, to make sure the * branch is predicted by the CPU as default-untaken. */ static __used noinline void __sched __mutex_lock_slowpath(atomic_t *lock_count); /** * mutex_lock - acquire the mutex * @lock: the mutex to be acquired * * Lock the mutex exclusively for this task. If the mutex is not * available right now, it will sleep until it can get it. * * The mutex must later on be released by the same task that * acquired it. Recursive locking is not allowed. The task * may not exit without first unlocking the mutex. Also, kernel * memory where the mutex resides mutex must not be freed with * the mutex still locked. The mutex must first be initialized * (or statically defined) before it can be locked. memset()-ing * the mutex to 0 is not allowed. * * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging * checks that will enforce the restrictions and will also do * deadlock debugging. ) * * This function is similar to (but not equivalent to) down(). */ void __sched mutex_lock(struct mutex *lock) { might_sleep(); /* * The locking fastpath is the 1->0 transition from * 'unlocked' into 'locked' state. */ __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); mutex_set_owner(lock); } EXPORT_SYMBOL(mutex_lock); #endif static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); /** * mutex_unlock - release the mutex * @lock: the mutex to be released * * Unlock a mutex that has been locked by this task previously. * * This function must not be used in interrupt context. Unlocking * of a not locked mutex is not allowed. * * This function is similar to (but not equivalent to) up(). */ void __sched mutex_unlock(struct mutex *lock) { /* * The unlocking fastpath is the 0->1 transition from 'locked' * into 'unlocked' state: */ #ifndef CONFIG_DEBUG_MUTEXES /* * When debugging is enabled we must not clear the owner before time, * the slow path will always be taken, and that clears the owner field * after verifying that it was indeed current. */ mutex_clear_owner(lock); #endif __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); } EXPORT_SYMBOL(mutex_unlock); /* * Lock a mutex (possibly interruptible), slowpath: */ static inline int __sched __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, struct lockdep_map *nest_lock, unsigned long ip) { struct task_struct *task = current; struct mutex_waiter waiter; unsigned long flags; preempt_disable(); mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); #ifdef CONFIG_MUTEX_SPIN_ON_OWNER /* * Optimistic spinning. * * We try to spin for acquisition when we find that there are no * pending waiters and the lock owner is currently running on a * (different) CPU. * * The rationale is that if the lock owner is running, it is likely to * release the lock soon. * * Since this needs the lock owner, and this mutex implementation * doesn't track the owner atomically in the lock field, we need to * track it non-atomically. * * We can't do this for DEBUG_MUTEXES because that relies on wait_lock * to serialize everything. */ for (;;) { struct task_struct *owner; /* * If there's an owner, wait for it to either * release the lock or go to sleep. */ owner = ACCESS_ONCE(lock->owner); if (owner && !mutex_spin_on_owner(lock, owner)) break; if (atomic_cmpxchg(&lock->count, 1, 0) == 1) { lock_acquired(&lock->dep_map, ip); mutex_set_owner(lock); preempt_enable(); return 0; } /* * When there's no owner, we might have preempted between the * owner acquiring the lock and setting the owner field. If * we're an RT task that will live-lock because we won't let * the owner complete. */ if (!owner && (need_resched() || rt_task(task))) break; /* * The cpu_relax() call is a compiler barrier which forces * everything in this loop to be re-loaded. We don't need * memory barriers as we'll eventually observe the right * values at the cost of a few extra spins. */ arch_mutex_cpu_relax(); } #endif spin_lock_mutex(&lock->wait_lock, flags); debug_mutex_lock_common(lock, &waiter); debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); /* add waiting tasks to the end of the waitqueue (FIFO): */ list_add_tail(&waiter.list, &lock->wait_list); waiter.task = task; if (atomic_xchg(&lock->count, -1) == 1) goto done; lock_contended(&lock->dep_map, ip); for (;;) { /* * Lets try to take the lock again - this is needed even if * we get here for the first time (shortly after failing to * acquire the lock), to make sure that we get a wakeup once * it's unlocked. Later on, if we sleep, this is the * operation that gives us the lock. We xchg it to -1, so * that when we release the lock, we properly wake up the * other waiters: */ if (atomic_xchg(&lock->count, -1) == 1) break; /* * got a signal? (This code gets eliminated in the * TASK_UNINTERRUPTIBLE case.) */ if (unlikely(signal_pending_state(state, task))) { mutex_remove_waiter(lock, &waiter, task_thread_info(task)); mutex_release(&lock->dep_map, 1, ip); spin_unlock_mutex(&lock->wait_lock, flags); debug_mutex_free_waiter(&waiter); preempt_enable(); return -EINTR; } __set_task_state(task, state); /* didn't get the lock, go to sleep: */ spin_unlock_mutex(&lock->wait_lock, flags); schedule_preempt_disabled(); spin_lock_mutex(&lock->wait_lock, flags); } done: lock_acquired(&lock->dep_map, ip); /* got the lock - rejoice! */ mutex_remove_waiter(lock, &waiter, current_thread_info()); mutex_set_owner(lock); /* set it to 0 if there are no waiters left: */ if (likely(list_empty(&lock->wait_list))) atomic_set(&lock->count, 0); spin_unlock_mutex(&lock->wait_lock, flags); debug_mutex_free_waiter(&waiter); preempt_enable(); return 0; } #ifdef CONFIG_DEBUG_LOCK_ALLOC void __sched mutex_lock_nested(struct mutex *lock, unsigned int subclass) { might_sleep(); __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); } EXPORT_SYMBOL_GPL(mutex_lock_nested); void __sched _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) { might_sleep(); __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_); } EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); int __sched mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) { might_sleep(); return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_); } EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); int __sched mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) { might_sleep(); return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_); } EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); #endif /* * Release the lock, slowpath: */ static inline void __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) { struct mutex *lock = container_of(lock_count, struct mutex, count); unsigned long flags; spin_lock_mutex(&lock->wait_lock, flags); mutex_release(&lock->dep_map, nested, _RET_IP_); debug_mutex_unlock(lock); /* * some architectures leave the lock unlocked in the fastpath failure * case, others need to leave it locked. In the later case we have to * unlock it here */ if (__mutex_slowpath_needs_to_unlock()) atomic_set(&lock->count, 1); if (!list_empty(&lock->wait_list)) { /* get the first entry from the wait-list: */ struct mutex_waiter *waiter = list_entry(lock->wait_list.next, struct mutex_waiter, list); debug_mutex_wake_waiter(lock, waiter); wake_up_process(waiter->task); } spin_unlock_mutex(&lock->wait_lock, flags); } /* * Release the lock, slowpath: */ static __used noinline void __mutex_unlock_slowpath(atomic_t *lock_count) { __mutex_unlock_common_slowpath(lock_count, 1); } #ifndef CONFIG_DEBUG_LOCK_ALLOC /* * Here come the less common (and hence less performance-critical) APIs: * mutex_lock_interruptible() and mutex_trylock(). */ static noinline int __sched __mutex_lock_killable_slowpath(atomic_t *lock_count); static noinline int __sched __mutex_lock_interruptible_slowpath(atomic_t *lock_count); /** * mutex_lock_interruptible - acquire the mutex, interruptible * @lock: the mutex to be acquired * * Lock the mutex like mutex_lock(), and return 0 if the mutex has * been acquired or sleep until the mutex becomes available. If a * signal arrives while waiting for the lock then this function * returns -EINTR. * * This function is similar to (but not equivalent to) down_interruptible(). */ int __sched mutex_lock_interruptible(struct mutex *lock) { int ret; might_sleep(); ret = __mutex_fastpath_lock_retval (&lock->count, __mutex_lock_interruptible_slowpath); if (!ret) mutex_set_owner(lock); return ret; } EXPORT_SYMBOL(mutex_lock_interruptible); int __sched mutex_lock_killable(struct mutex *lock) { int ret; might_sleep(); ret = __mutex_fastpath_lock_retval (&lock->count, __mutex_lock_killable_slowpath); if (!ret) mutex_set_owner(lock); return ret; } EXPORT_SYMBOL(mutex_lock_killable); static __used noinline void __sched __mutex_lock_slowpath(atomic_t *lock_count) { struct mutex *lock = container_of(lock_count, struct mutex, count); __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); } static noinline int __sched __mutex_lock_killable_slowpath(atomic_t *lock_count) { struct mutex *lock = container_of(lock_count, struct mutex, count); return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); } static noinline int __sched __mutex_lock_interruptible_slowpath(atomic_t *lock_count) { struct mutex *lock = container_of(lock_count, struct mutex, count); return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); } #endif /* * Spinlock based trylock, we take the spinlock and check whether we * can get the lock: */ static inline int __mutex_trylock_slowpath(atomic_t *lock_count) { struct mutex *lock = container_of(lock_count, struct mutex, count); unsigned long flags; int prev; spin_lock_mutex(&lock->wait_lock, flags); prev = atomic_xchg(&lock->count, -1); if (likely(prev == 1)) { mutex_set_owner(lock); mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); } /* Set it back to 0 if there are no waiters: */ if (likely(list_empty(&lock->wait_list))) atomic_set(&lock->count, 0); spin_unlock_mutex(&lock->wait_lock, flags); return prev == 1; } /** * mutex_trylock - try to acquire the mutex, without waiting * @lock: the mutex to be acquired * * Try to acquire the mutex atomically. Returns 1 if the mutex * has been acquired successfully, and 0 on contention. * * NOTE: this function follows the spin_trylock() convention, so * it is negated from the down_trylock() return values! Be careful * about this when converting semaphore users to mutexes. * * This function must not be used in interrupt context. The * mutex must be released by the same task that acquired it. */ int __sched mutex_trylock(struct mutex *lock) { int ret; ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath); if (ret) mutex_set_owner(lock); return ret; } EXPORT_SYMBOL(mutex_trylock); /** * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 * @cnt: the atomic which we are to dec * @lock: the mutex to return holding if we dec to 0 * * return true and hold lock if we dec to 0, return false otherwise */ int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) { /* dec if we can't possibly hit 0 */ if (atomic_add_unless(cnt, -1, 1)) return 0; /* we might hit 0, so take the lock */ mutex_lock(lock); if (!atomic_dec_and_test(cnt)) { /* when we actually did the dec, we didn't hit 0 */ mutex_unlock(lock); return 0; } /* we hit 0, and we hold the lock */ return 1; } EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
gpl-2.0
gotoco/linux
drivers/net/ethernet/seeq/ether3.c
4326
23574
/* * linux/drivers/acorn/net/ether3.c * * Copyright (C) 1995-2000 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * SEEQ nq8005 ethernet driver for Acorn/ANT Ether3 card * for Acorn machines * * By Russell King, with some suggestions from borris@ant.co.uk * * Changelog: * 1.04 RMK 29/02/1996 Won't pass packets that are from our ethernet * address up to the higher levels - they're * silently ignored. I/F can now be put into * multicast mode. Receiver routine optimised. * 1.05 RMK 30/02/1996 Now claims interrupt at open when part of * the kernel rather than when a module. * 1.06 RMK 02/03/1996 Various code cleanups * 1.07 RMK 13/10/1996 Optimised interrupt routine and transmit * routines. * 1.08 RMK 14/10/1996 Fixed problem with too many packets, * prevented the kernel message about dropped * packets appearing too many times a second. * Now does not disable all IRQs, only the IRQ * used by this card. * 1.09 RMK 10/11/1996 Only enables TX irq when buffer space is low, * but we still service the TX queue if we get a * RX interrupt. * 1.10 RMK 15/07/1997 Fixed autoprobing of NQ8004. * 1.11 RMK 16/11/1997 Fixed autoprobing of NQ8005A. * 1.12 RMK 31/12/1997 Removed reference to dev_tint for Linux 2.1. * RMK 27/06/1998 Changed asm/delay.h to linux/delay.h. * 1.13 RMK 29/06/1998 Fixed problem with transmission of packets. * Chip seems to have a bug in, whereby if the * packet starts two bytes from the end of the * buffer, it corrupts the receiver chain, and * never updates the transmit status correctly. * 1.14 RMK 07/01/1998 Added initial code for ETHERB addressing. * 1.15 RMK 30/04/1999 More fixes to the transmit routine for buggy * hardware. * 1.16 RMK 10/02/2000 Updated for 2.3.43 * 1.17 RMK 13/05/2000 Updated for 2.3.99-pre8 */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/device.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/bitops.h> #include <asm/ecard.h> #include <asm/io.h> static char version[] = "ether3 ethernet driver (c) 1995-2000 R.M.King v1.17\n"; #include "ether3.h" static unsigned int net_debug = NET_DEBUG; static void ether3_setmulticastlist(struct net_device *dev); static int ether3_rx(struct net_device *dev, unsigned int maxcnt); static void ether3_tx(struct net_device *dev); static int ether3_open (struct net_device *dev); static int ether3_sendpacket (struct sk_buff *skb, struct net_device *dev); static irqreturn_t ether3_interrupt (int irq, void *dev_id); static int ether3_close (struct net_device *dev); static void ether3_setmulticastlist (struct net_device *dev); static void ether3_timeout(struct net_device *dev); #define BUS_16 2 #define BUS_8 1 #define BUS_UNKNOWN 0 /* --------------------------------------------------------------------------- */ typedef enum { buffer_write, buffer_read } buffer_rw_t; /* * ether3 read/write. Slow things down a bit... * The SEEQ8005 doesn't like us writing to its registers * too quickly. */ static inline void ether3_outb(int v, void __iomem *r) { writeb(v, r); udelay(1); } static inline void ether3_outw(int v, void __iomem *r) { writew(v, r); udelay(1); } #define ether3_inb(r) ({ unsigned int __v = readb((r)); udelay(1); __v; }) #define ether3_inw(r) ({ unsigned int __v = readw((r)); udelay(1); __v; }) static int ether3_setbuffer(struct net_device *dev, buffer_rw_t read, int start) { int timeout = 1000; ether3_outw(priv(dev)->regs.config1 | CFG1_LOCBUFMEM, REG_CONFIG1); ether3_outw(priv(dev)->regs.command | CMD_FIFOWRITE, REG_COMMAND); while ((ether3_inw(REG_STATUS) & STAT_FIFOEMPTY) == 0) { if (!timeout--) { printk("%s: setbuffer broken\n", dev->name); priv(dev)->broken = 1; return 1; } udelay(1); } if (read == buffer_read) { ether3_outw(start, REG_DMAADDR); ether3_outw(priv(dev)->regs.command | CMD_FIFOREAD, REG_COMMAND); } else { ether3_outw(priv(dev)->regs.command | CMD_FIFOWRITE, REG_COMMAND); ether3_outw(start, REG_DMAADDR); } return 0; } /* * write data to the buffer memory */ #define ether3_writebuffer(dev,data,length) \ writesw(REG_BUFWIN, (data), (length) >> 1) #define ether3_writeword(dev,data) \ writew((data), REG_BUFWIN) #define ether3_writelong(dev,data) { \ void __iomem *reg_bufwin = REG_BUFWIN; \ writew((data), reg_bufwin); \ writew((data) >> 16, reg_bufwin); \ } /* * read data from the buffer memory */ #define ether3_readbuffer(dev,data,length) \ readsw(REG_BUFWIN, (data), (length) >> 1) #define ether3_readword(dev) \ readw(REG_BUFWIN) #define ether3_readlong(dev) \ readw(REG_BUFWIN) | (readw(REG_BUFWIN) << 16) /* * Switch LED off... */ static void ether3_ledoff(unsigned long data) { struct net_device *dev = (struct net_device *)data; ether3_outw(priv(dev)->regs.config2 |= CFG2_CTRLO, REG_CONFIG2); } /* * switch LED on... */ static inline void ether3_ledon(struct net_device *dev) { del_timer(&priv(dev)->timer); priv(dev)->timer.expires = jiffies + HZ / 50; /* leave on for 1/50th second */ priv(dev)->timer.data = (unsigned long)dev; priv(dev)->timer.function = ether3_ledoff; add_timer(&priv(dev)->timer); if (priv(dev)->regs.config2 & CFG2_CTRLO) ether3_outw(priv(dev)->regs.config2 &= ~CFG2_CTRLO, REG_CONFIG2); } /* * Read the ethernet address string from the on board rom. * This is an ascii string!!! */ static int ether3_addr(char *addr, struct expansion_card *ec) { struct in_chunk_dir cd; char *s; if (ecard_readchunk(&cd, ec, 0xf5, 0) && (s = strchr(cd.d.string, '('))) { int i; for (i = 0; i<6; i++) { addr[i] = simple_strtoul(s + 1, &s, 0x10); if (*s != (i==5?')' : ':' )) break; } if (i == 6) return 0; } /* I wonder if we should even let the user continue in this case * - no, it would be better to disable the device */ printk(KERN_ERR "ether3: Couldn't read a valid MAC address from card.\n"); return -ENODEV; } /* --------------------------------------------------------------------------- */ static int ether3_ramtest(struct net_device *dev, unsigned char byte) { unsigned char *buffer = kmalloc(RX_END, GFP_KERNEL); int i,ret = 0; int max_errors = 4; int bad = -1; if (!buffer) return 1; memset(buffer, byte, RX_END); ether3_setbuffer(dev, buffer_write, 0); ether3_writebuffer(dev, buffer, TX_END); ether3_setbuffer(dev, buffer_write, RX_START); ether3_writebuffer(dev, buffer + RX_START, RX_LEN); memset(buffer, byte ^ 0xff, RX_END); ether3_setbuffer(dev, buffer_read, 0); ether3_readbuffer(dev, buffer, TX_END); ether3_setbuffer(dev, buffer_read, RX_START); ether3_readbuffer(dev, buffer + RX_START, RX_LEN); for (i = 0; i < RX_END; i++) { if (buffer[i] != byte) { if (max_errors > 0 && bad != buffer[i]) { printk("%s: RAM failed with (%02X instead of %02X) at 0x%04X", dev->name, buffer[i], byte, i); ret = 2; max_errors--; bad = i; } } else { if (bad != -1) { if (bad != i - 1) printk(" - 0x%04X\n", i - 1); printk("\n"); bad = -1; } } } if (bad != -1) printk(" - 0xffff\n"); kfree(buffer); return ret; } /* ------------------------------------------------------------------------------- */ static int ether3_init_2(struct net_device *dev) { int i; priv(dev)->regs.config1 = CFG1_RECVCOMPSTAT0|CFG1_DMABURST8; priv(dev)->regs.config2 = CFG2_CTRLO|CFG2_RECVCRC|CFG2_ERRENCRC; priv(dev)->regs.command = 0; /* * Set up our hardware address */ ether3_outw(priv(dev)->regs.config1 | CFG1_BUFSELSTAT0, REG_CONFIG1); for (i = 0; i < 6; i++) ether3_outb(dev->dev_addr[i], REG_BUFWIN); if (dev->flags & IFF_PROMISC) priv(dev)->regs.config1 |= CFG1_RECVPROMISC; else if (dev->flags & IFF_MULTICAST) priv(dev)->regs.config1 |= CFG1_RECVSPECBRMULTI; else priv(dev)->regs.config1 |= CFG1_RECVSPECBROAD; /* * There is a problem with the NQ8005 in that it occasionally loses the * last two bytes. To get round this problem, we receive the CRC as * well. That way, if we do lose the last two, then it doesn't matter. */ ether3_outw(priv(dev)->regs.config1 | CFG1_TRANSEND, REG_CONFIG1); ether3_outw((TX_END>>8) - 1, REG_BUFWIN); ether3_outw(priv(dev)->rx_head, REG_RECVPTR); ether3_outw(0, REG_TRANSMITPTR); ether3_outw(priv(dev)->rx_head >> 8, REG_RECVEND); ether3_outw(priv(dev)->regs.config2, REG_CONFIG2); ether3_outw(priv(dev)->regs.config1 | CFG1_LOCBUFMEM, REG_CONFIG1); ether3_outw(priv(dev)->regs.command, REG_COMMAND); i = ether3_ramtest(dev, 0x5A); if(i) return i; i = ether3_ramtest(dev, 0x1E); if(i) return i; ether3_setbuffer(dev, buffer_write, 0); ether3_writelong(dev, 0); return 0; } static void ether3_init_for_open(struct net_device *dev) { int i; /* Reset the chip */ ether3_outw(CFG2_RESET, REG_CONFIG2); udelay(4); priv(dev)->regs.command = 0; ether3_outw(CMD_RXOFF|CMD_TXOFF, REG_COMMAND); while (ether3_inw(REG_STATUS) & (STAT_RXON|STAT_TXON)) barrier(); ether3_outw(priv(dev)->regs.config1 | CFG1_BUFSELSTAT0, REG_CONFIG1); for (i = 0; i < 6; i++) ether3_outb(dev->dev_addr[i], REG_BUFWIN); priv(dev)->tx_head = 0; priv(dev)->tx_tail = 0; priv(dev)->regs.config2 |= CFG2_CTRLO; priv(dev)->rx_head = RX_START; ether3_outw(priv(dev)->regs.config1 | CFG1_TRANSEND, REG_CONFIG1); ether3_outw((TX_END>>8) - 1, REG_BUFWIN); ether3_outw(priv(dev)->rx_head, REG_RECVPTR); ether3_outw(priv(dev)->rx_head >> 8, REG_RECVEND); ether3_outw(0, REG_TRANSMITPTR); ether3_outw(priv(dev)->regs.config2, REG_CONFIG2); ether3_outw(priv(dev)->regs.config1 | CFG1_LOCBUFMEM, REG_CONFIG1); ether3_setbuffer(dev, buffer_write, 0); ether3_writelong(dev, 0); priv(dev)->regs.command = CMD_ENINTRX | CMD_ENINTTX; ether3_outw(priv(dev)->regs.command | CMD_RXON, REG_COMMAND); } static inline int ether3_probe_bus_8(struct net_device *dev, int val) { int write_low, write_high, read_low, read_high; write_low = val & 255; write_high = val >> 8; printk(KERN_DEBUG "ether3_probe: write8 [%02X:%02X]", write_high, write_low); ether3_outb(write_low, REG_RECVPTR); ether3_outb(write_high, REG_RECVPTR + 4); read_low = ether3_inb(REG_RECVPTR); read_high = ether3_inb(REG_RECVPTR + 4); printk(", read8 [%02X:%02X]\n", read_high, read_low); return read_low == write_low && read_high == write_high; } static inline int ether3_probe_bus_16(struct net_device *dev, int val) { int read_val; ether3_outw(val, REG_RECVPTR); read_val = ether3_inw(REG_RECVPTR); printk(KERN_DEBUG "ether3_probe: write16 [%04X], read16 [%04X]\n", val, read_val); return read_val == val; } /* * Open/initialize the board. This is called (in the current kernel) * sometime after booting when the 'ifconfig' program is run. * * This routine should set everything up anew at each open, even * registers that "should" only need to be set once at boot, so that * there is non-reboot way to recover if something goes wrong. */ static int ether3_open(struct net_device *dev) { if (request_irq(dev->irq, ether3_interrupt, 0, "ether3", dev)) return -EAGAIN; ether3_init_for_open(dev); netif_start_queue(dev); return 0; } /* * The inverse routine to ether3_open(). */ static int ether3_close(struct net_device *dev) { netif_stop_queue(dev); disable_irq(dev->irq); ether3_outw(CMD_RXOFF|CMD_TXOFF, REG_COMMAND); priv(dev)->regs.command = 0; while (ether3_inw(REG_STATUS) & (STAT_RXON|STAT_TXON)) barrier(); ether3_outb(0x80, REG_CONFIG2 + 4); ether3_outw(0, REG_COMMAND); free_irq(dev->irq, dev); return 0; } /* * Set or clear promiscuous/multicast mode filter for this adaptor. * * We don't attempt any packet filtering. The card may have a SEEQ 8004 * in which does not have the other ethernet address registers present... */ static void ether3_setmulticastlist(struct net_device *dev) { priv(dev)->regs.config1 &= ~CFG1_RECVPROMISC; if (dev->flags & IFF_PROMISC) { /* promiscuous mode */ priv(dev)->regs.config1 |= CFG1_RECVPROMISC; } else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) { priv(dev)->regs.config1 |= CFG1_RECVSPECBRMULTI; } else priv(dev)->regs.config1 |= CFG1_RECVSPECBROAD; ether3_outw(priv(dev)->regs.config1 | CFG1_LOCBUFMEM, REG_CONFIG1); } static void ether3_timeout(struct net_device *dev) { unsigned long flags; del_timer(&priv(dev)->timer); local_irq_save(flags); printk(KERN_ERR "%s: transmit timed out, network cable problem?\n", dev->name); printk(KERN_ERR "%s: state: { status=%04X cfg1=%04X cfg2=%04X }\n", dev->name, ether3_inw(REG_STATUS), ether3_inw(REG_CONFIG1), ether3_inw(REG_CONFIG2)); printk(KERN_ERR "%s: { rpr=%04X rea=%04X tpr=%04X }\n", dev->name, ether3_inw(REG_RECVPTR), ether3_inw(REG_RECVEND), ether3_inw(REG_TRANSMITPTR)); printk(KERN_ERR "%s: tx head=%X tx tail=%X\n", dev->name, priv(dev)->tx_head, priv(dev)->tx_tail); ether3_setbuffer(dev, buffer_read, priv(dev)->tx_tail); printk(KERN_ERR "%s: packet status = %08X\n", dev->name, ether3_readlong(dev)); local_irq_restore(flags); priv(dev)->regs.config2 |= CFG2_CTRLO; dev->stats.tx_errors += 1; ether3_outw(priv(dev)->regs.config2, REG_CONFIG2); priv(dev)->tx_head = priv(dev)->tx_tail = 0; netif_wake_queue(dev); } /* * Transmit a packet */ static int ether3_sendpacket(struct sk_buff *skb, struct net_device *dev) { unsigned long flags; unsigned int length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; unsigned int ptr, next_ptr; if (priv(dev)->broken) { dev_kfree_skb(skb); dev->stats.tx_dropped++; netif_start_queue(dev); return NETDEV_TX_OK; } length = (length + 1) & ~1; if (length != skb->len) { if (skb_padto(skb, length)) goto out; } next_ptr = (priv(dev)->tx_head + 1) & 15; local_irq_save(flags); if (priv(dev)->tx_tail == next_ptr) { local_irq_restore(flags); return NETDEV_TX_BUSY; /* unable to queue */ } ptr = 0x600 * priv(dev)->tx_head; priv(dev)->tx_head = next_ptr; next_ptr *= 0x600; #define TXHDR_FLAGS (TXHDR_TRANSMIT|TXHDR_CHAINCONTINUE|TXHDR_DATAFOLLOWS|TXHDR_ENSUCCESS) ether3_setbuffer(dev, buffer_write, next_ptr); ether3_writelong(dev, 0); ether3_setbuffer(dev, buffer_write, ptr); ether3_writelong(dev, 0); ether3_writebuffer(dev, skb->data, length); ether3_writeword(dev, htons(next_ptr)); ether3_writeword(dev, TXHDR_CHAINCONTINUE >> 16); ether3_setbuffer(dev, buffer_write, ptr); ether3_writeword(dev, htons((ptr + length + 4))); ether3_writeword(dev, TXHDR_FLAGS >> 16); ether3_ledon(dev); if (!(ether3_inw(REG_STATUS) & STAT_TXON)) { ether3_outw(ptr, REG_TRANSMITPTR); ether3_outw(priv(dev)->regs.command | CMD_TXON, REG_COMMAND); } next_ptr = (priv(dev)->tx_head + 1) & 15; local_irq_restore(flags); dev_kfree_skb(skb); if (priv(dev)->tx_tail == next_ptr) netif_stop_queue(dev); out: return NETDEV_TX_OK; } static irqreturn_t ether3_interrupt(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; unsigned int status, handled = IRQ_NONE; #if NET_DEBUG > 1 if(net_debug & DEBUG_INT) printk("eth3irq: %d ", irq); #endif status = ether3_inw(REG_STATUS); if (status & STAT_INTRX) { ether3_outw(CMD_ACKINTRX | priv(dev)->regs.command, REG_COMMAND); ether3_rx(dev, 12); handled = IRQ_HANDLED; } if (status & STAT_INTTX) { ether3_outw(CMD_ACKINTTX | priv(dev)->regs.command, REG_COMMAND); ether3_tx(dev); handled = IRQ_HANDLED; } #if NET_DEBUG > 1 if(net_debug & DEBUG_INT) printk("done\n"); #endif return handled; } /* * If we have a good packet(s), get it/them out of the buffers. */ static int ether3_rx(struct net_device *dev, unsigned int maxcnt) { unsigned int next_ptr = priv(dev)->rx_head, received = 0; ether3_ledon(dev); do { unsigned int this_ptr, status; unsigned char addrs[16]; /* * read the first 16 bytes from the buffer. * This contains the status bytes etc and ethernet addresses, * and we also check the source ethernet address to see if * it originated from us. */ { unsigned int temp_ptr; ether3_setbuffer(dev, buffer_read, next_ptr); temp_ptr = ether3_readword(dev); status = ether3_readword(dev); if ((status & (RXSTAT_DONE | RXHDR_CHAINCONTINUE | RXHDR_RECEIVE)) != (RXSTAT_DONE | RXHDR_CHAINCONTINUE) || !temp_ptr) break; this_ptr = next_ptr + 4; next_ptr = ntohs(temp_ptr); } ether3_setbuffer(dev, buffer_read, this_ptr); ether3_readbuffer(dev, addrs+2, 12); if (next_ptr < RX_START || next_ptr >= RX_END) { int i; printk("%s: bad next pointer @%04X: ", dev->name, priv(dev)->rx_head); printk("%02X %02X %02X %02X ", next_ptr >> 8, next_ptr & 255, status & 255, status >> 8); for (i = 2; i < 14; i++) printk("%02X ", addrs[i]); printk("\n"); next_ptr = priv(dev)->rx_head; break; } /* * ignore our own packets... */ if (!(*(unsigned long *)&dev->dev_addr[0] ^ *(unsigned long *)&addrs[2+6]) && !(*(unsigned short *)&dev->dev_addr[4] ^ *(unsigned short *)&addrs[2+10])) { maxcnt ++; /* compensate for loopedback packet */ ether3_outw(next_ptr >> 8, REG_RECVEND); } else if (!(status & (RXSTAT_OVERSIZE|RXSTAT_CRCERROR|RXSTAT_DRIBBLEERROR|RXSTAT_SHORTPACKET))) { unsigned int length = next_ptr - this_ptr; struct sk_buff *skb; if (next_ptr <= this_ptr) length += RX_END - RX_START; skb = netdev_alloc_skb(dev, length + 2); if (skb) { unsigned char *buf; skb_reserve(skb, 2); buf = skb_put(skb, length); ether3_readbuffer(dev, buf + 12, length - 12); ether3_outw(next_ptr >> 8, REG_RECVEND); *(unsigned short *)(buf + 0) = *(unsigned short *)(addrs + 2); *(unsigned long *)(buf + 2) = *(unsigned long *)(addrs + 4); *(unsigned long *)(buf + 6) = *(unsigned long *)(addrs + 8); *(unsigned short *)(buf + 10) = *(unsigned short *)(addrs + 12); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); received ++; } else { ether3_outw(next_ptr >> 8, REG_RECVEND); dev->stats.rx_dropped++; goto done; } } else { struct net_device_stats *stats = &dev->stats; ether3_outw(next_ptr >> 8, REG_RECVEND); if (status & RXSTAT_OVERSIZE) stats->rx_over_errors ++; if (status & RXSTAT_CRCERROR) stats->rx_crc_errors ++; if (status & RXSTAT_DRIBBLEERROR) stats->rx_fifo_errors ++; if (status & RXSTAT_SHORTPACKET) stats->rx_length_errors ++; stats->rx_errors++; } } while (-- maxcnt); done: dev->stats.rx_packets += received; priv(dev)->rx_head = next_ptr; /* * If rx went off line, then that means that the buffer may be full. We * have dropped at least one packet. */ if (!(ether3_inw(REG_STATUS) & STAT_RXON)) { dev->stats.rx_dropped++; ether3_outw(next_ptr, REG_RECVPTR); ether3_outw(priv(dev)->regs.command | CMD_RXON, REG_COMMAND); } return maxcnt; } /* * Update stats for the transmitted packet(s) */ static void ether3_tx(struct net_device *dev) { unsigned int tx_tail = priv(dev)->tx_tail; int max_work = 14; do { unsigned long status; /* * Read the packet header */ ether3_setbuffer(dev, buffer_read, tx_tail * 0x600); status = ether3_readlong(dev); /* * Check to see if this packet has been transmitted */ if ((status & (TXSTAT_DONE | TXHDR_TRANSMIT)) != (TXSTAT_DONE | TXHDR_TRANSMIT)) break; /* * Update errors */ if (!(status & (TXSTAT_BABBLED | TXSTAT_16COLLISIONS))) dev->stats.tx_packets++; else { dev->stats.tx_errors++; if (status & TXSTAT_16COLLISIONS) dev->stats.collisions += 16; if (status & TXSTAT_BABBLED) dev->stats.tx_fifo_errors++; } tx_tail = (tx_tail + 1) & 15; } while (--max_work); if (priv(dev)->tx_tail != tx_tail) { priv(dev)->tx_tail = tx_tail; netif_wake_queue(dev); } } static void ether3_banner(void) { static unsigned version_printed = 0; if (net_debug && version_printed++ == 0) printk(KERN_INFO "%s", version); } static const struct net_device_ops ether3_netdev_ops = { .ndo_open = ether3_open, .ndo_stop = ether3_close, .ndo_start_xmit = ether3_sendpacket, .ndo_set_rx_mode = ether3_setmulticastlist, .ndo_tx_timeout = ether3_timeout, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, }; static int ether3_probe(struct expansion_card *ec, const struct ecard_id *id) { const struct ether3_data *data = id->data; struct net_device *dev; int bus_type, ret; ether3_banner(); ret = ecard_request_resources(ec); if (ret) goto out; dev = alloc_etherdev(sizeof(struct dev_priv)); if (!dev) { ret = -ENOMEM; goto release; } SET_NETDEV_DEV(dev, &ec->dev); priv(dev)->base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0); if (!priv(dev)->base) { ret = -ENOMEM; goto free; } ec->irqaddr = priv(dev)->base + data->base_offset; ec->irqmask = 0xf0; priv(dev)->seeq = priv(dev)->base + data->base_offset; dev->irq = ec->irq; ether3_addr(dev->dev_addr, ec); init_timer(&priv(dev)->timer); /* Reset card... */ ether3_outb(0x80, REG_CONFIG2 + 4); bus_type = BUS_UNKNOWN; udelay(4); /* Test using Receive Pointer (16-bit register) to find out * how the ether3 is connected to the bus... */ if (ether3_probe_bus_8(dev, 0x100) && ether3_probe_bus_8(dev, 0x201)) bus_type = BUS_8; if (bus_type == BUS_UNKNOWN && ether3_probe_bus_16(dev, 0x101) && ether3_probe_bus_16(dev, 0x201)) bus_type = BUS_16; switch (bus_type) { case BUS_UNKNOWN: printk(KERN_ERR "%s: unable to identify bus width\n", dev->name); ret = -ENODEV; goto free; case BUS_8: printk(KERN_ERR "%s: %s found, but is an unsupported " "8-bit card\n", dev->name, data->name); ret = -ENODEV; goto free; default: break; } if (ether3_init_2(dev)) { ret = -ENODEV; goto free; } dev->netdev_ops = &ether3_netdev_ops; dev->watchdog_timeo = 5 * HZ / 100; ret = register_netdev(dev); if (ret) goto free; printk("%s: %s in slot %d, %pM\n", dev->name, data->name, ec->slot_no, dev->dev_addr); ecard_set_drvdata(ec, dev); return 0; free: free_netdev(dev); release: ecard_release_resources(ec); out: return ret; } static void ether3_remove(struct expansion_card *ec) { struct net_device *dev = ecard_get_drvdata(ec); ecard_set_drvdata(ec, NULL); unregister_netdev(dev); free_netdev(dev); ecard_release_resources(ec); } static struct ether3_data ether3 = { .name = "ether3", .base_offset = 0, }; static struct ether3_data etherb = { .name = "etherb", .base_offset = 0x800, }; static const struct ecard_id ether3_ids[] = { { MANU_ANT2, PROD_ANT_ETHER3, &ether3 }, { MANU_ANT, PROD_ANT_ETHER3, &ether3 }, { MANU_ANT, PROD_ANT_ETHERB, &etherb }, { 0xffff, 0xffff } }; static struct ecard_driver ether3_driver = { .probe = ether3_probe, .remove = ether3_remove, .id_table = ether3_ids, .drv = { .name = "ether3", }, }; static int __init ether3_init(void) { return ecard_register_driver(&ether3_driver); } static void __exit ether3_exit(void) { ecard_remove_driver(&ether3_driver); } module_init(ether3_init); module_exit(ether3_exit); MODULE_LICENSE("GPL");
gpl-2.0
Hybrid-Rom/kernel_lge_ls970
arch/um/drivers/net_kern.c
4582
20583
/* * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and * James Leu (jleu@mindspring.net). * Copyright (C) 2001 by various other people who didn't put their name here. * Licensed under the GPL. */ #include <linux/bootmem.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/inetdevice.h> #include <linux/init.h> #include <linux/list.h> #include <linux/netdevice.h> #include <linux/platform_device.h> #include <linux/rtnetlink.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/spinlock.h> #include "init.h" #include "irq_kern.h" #include "irq_user.h" #include "mconsole_kern.h" #include "net_kern.h" #include "net_user.h" #define DRIVER_NAME "uml-netdev" static DEFINE_SPINLOCK(opened_lock); static LIST_HEAD(opened); /* * The drop_skb is used when we can't allocate an skb. The * packet is read into drop_skb in order to get the data off the * connection to the host. * It is reallocated whenever a maximum packet size is seen which is * larger than any seen before. update_drop_skb is called from * eth_configure when a new interface is added. */ static DEFINE_SPINLOCK(drop_lock); static struct sk_buff *drop_skb; static int drop_max; static int update_drop_skb(int max) { struct sk_buff *new; unsigned long flags; int err = 0; spin_lock_irqsave(&drop_lock, flags); if (max <= drop_max) goto out; err = -ENOMEM; new = dev_alloc_skb(max); if (new == NULL) goto out; skb_put(new, max); kfree_skb(drop_skb); drop_skb = new; drop_max = max; err = 0; out: spin_unlock_irqrestore(&drop_lock, flags); return err; } static int uml_net_rx(struct net_device *dev) { struct uml_net_private *lp = netdev_priv(dev); int pkt_len; struct sk_buff *skb; /* If we can't allocate memory, try again next round. */ skb = dev_alloc_skb(lp->max_packet); if (skb == NULL) { drop_skb->dev = dev; /* Read a packet into drop_skb and don't do anything with it. */ (*lp->read)(lp->fd, drop_skb, lp); dev->stats.rx_dropped++; return 0; } skb->dev = dev; skb_put(skb, lp->max_packet); skb_reset_mac_header(skb); pkt_len = (*lp->read)(lp->fd, skb, lp); if (pkt_len > 0) { skb_trim(skb, pkt_len); skb->protocol = (*lp->protocol)(skb); dev->stats.rx_bytes += skb->len; dev->stats.rx_packets++; netif_rx(skb); return pkt_len; } kfree_skb(skb); return pkt_len; } static void uml_dev_close(struct work_struct *work) { struct uml_net_private *lp = container_of(work, struct uml_net_private, work); dev_close(lp->dev); } static irqreturn_t uml_net_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct uml_net_private *lp = netdev_priv(dev); int err; if (!netif_running(dev)) return IRQ_NONE; spin_lock(&lp->lock); while ((err = uml_net_rx(dev)) > 0) ; if (err < 0) { printk(KERN_ERR "Device '%s' read returned %d, shutting it down\n", dev->name, err); /* dev_close can't be called in interrupt context, and takes * again lp->lock. * And dev_close() can be safely called multiple times on the * same device, since it tests for (dev->flags & IFF_UP). So * there's no harm in delaying the device shutdown. * Furthermore, the workqueue will not re-enqueue an already * enqueued work item. */ schedule_work(&lp->work); goto out; } reactivate_fd(lp->fd, UM_ETH_IRQ); out: spin_unlock(&lp->lock); return IRQ_HANDLED; } static int uml_net_open(struct net_device *dev) { struct uml_net_private *lp = netdev_priv(dev); int err; if (lp->fd >= 0) { err = -ENXIO; goto out; } lp->fd = (*lp->open)(&lp->user); if (lp->fd < 0) { err = lp->fd; goto out; } err = um_request_irq(dev->irq, lp->fd, IRQ_READ, uml_net_interrupt, IRQF_SHARED, dev->name, dev); if (err != 0) { printk(KERN_ERR "uml_net_open: failed to get irq(%d)\n", err); err = -ENETUNREACH; goto out_close; } lp->tl.data = (unsigned long) &lp->user; netif_start_queue(dev); /* clear buffer - it can happen that the host side of the interface * is full when we get here. In this case, new data is never queued, * SIGIOs never arrive, and the net never works. */ while ((err = uml_net_rx(dev)) > 0) ; spin_lock(&opened_lock); list_add(&lp->list, &opened); spin_unlock(&opened_lock); return 0; out_close: if (lp->close != NULL) (*lp->close)(lp->fd, &lp->user); lp->fd = -1; out: return err; } static int uml_net_close(struct net_device *dev) { struct uml_net_private *lp = netdev_priv(dev); netif_stop_queue(dev); free_irq(dev->irq, dev); if (lp->close != NULL) (*lp->close)(lp->fd, &lp->user); lp->fd = -1; spin_lock(&opened_lock); list_del(&lp->list); spin_unlock(&opened_lock); return 0; } static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct uml_net_private *lp = netdev_priv(dev); unsigned long flags; int len; netif_stop_queue(dev); spin_lock_irqsave(&lp->lock, flags); len = (*lp->write)(lp->fd, skb, lp); if (len == skb->len) { dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; dev->trans_start = jiffies; netif_start_queue(dev); /* this is normally done in the interrupt when tx finishes */ netif_wake_queue(dev); } else if (len == 0) { netif_start_queue(dev); dev->stats.tx_dropped++; } else { netif_start_queue(dev); printk(KERN_ERR "uml_net_start_xmit: failed(%d)\n", len); } spin_unlock_irqrestore(&lp->lock, flags); dev_kfree_skb(skb); return NETDEV_TX_OK; } static void uml_net_set_multicast_list(struct net_device *dev) { return; } static void uml_net_tx_timeout(struct net_device *dev) { dev->trans_start = jiffies; netif_wake_queue(dev); } static int uml_net_change_mtu(struct net_device *dev, int new_mtu) { dev->mtu = new_mtu; return 0; } #ifdef CONFIG_NET_POLL_CONTROLLER static void uml_net_poll_controller(struct net_device *dev) { disable_irq(dev->irq); uml_net_interrupt(dev->irq, dev); enable_irq(dev->irq); } #endif static void uml_net_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strcpy(info->driver, DRIVER_NAME); strcpy(info->version, "42"); } static const struct ethtool_ops uml_net_ethtool_ops = { .get_drvinfo = uml_net_get_drvinfo, .get_link = ethtool_op_get_link, }; static void uml_net_user_timer_expire(unsigned long _conn) { #ifdef undef struct connection *conn = (struct connection *)_conn; dprintk(KERN_INFO "uml_net_user_timer_expire [%p]\n", conn); do_connect(conn); #endif } static int setup_etheraddr(char *str, unsigned char *addr, char *name) { char *end; int i; if (str == NULL) goto random; for (i = 0; i < 6; i++) { addr[i] = simple_strtoul(str, &end, 16); if ((end == str) || ((*end != ':') && (*end != ',') && (*end != '\0'))) { printk(KERN_ERR "setup_etheraddr: failed to parse '%s' " "as an ethernet address\n", str); goto random; } str = end + 1; } if (is_multicast_ether_addr(addr)) { printk(KERN_ERR "Attempt to assign a multicast ethernet address to a " "device disallowed\n"); goto random; } if (!is_valid_ether_addr(addr)) { printk(KERN_ERR "Attempt to assign an invalid ethernet address to a " "device disallowed\n"); goto random; } if (!is_local_ether_addr(addr)) { printk(KERN_WARNING "Warning: Assigning a globally valid ethernet " "address to a device\n"); printk(KERN_WARNING "You should set the 2nd rightmost bit in " "the first byte of the MAC,\n"); printk(KERN_WARNING "i.e. %02x:%02x:%02x:%02x:%02x:%02x\n", addr[0] | 0x02, addr[1], addr[2], addr[3], addr[4], addr[5]); } return 0; random: printk(KERN_INFO "Choosing a random ethernet address for device %s\n", name); random_ether_addr(addr); return 1; } static DEFINE_SPINLOCK(devices_lock); static LIST_HEAD(devices); static struct platform_driver uml_net_driver = { .driver = { .name = DRIVER_NAME, }, }; static void net_device_release(struct device *dev) { struct uml_net *device = dev_get_drvdata(dev); struct net_device *netdev = device->dev; struct uml_net_private *lp = netdev_priv(netdev); if (lp->remove != NULL) (*lp->remove)(&lp->user); list_del(&device->list); kfree(device); free_netdev(netdev); } static const struct net_device_ops uml_netdev_ops = { .ndo_open = uml_net_open, .ndo_stop = uml_net_close, .ndo_start_xmit = uml_net_start_xmit, .ndo_set_rx_mode = uml_net_set_multicast_list, .ndo_tx_timeout = uml_net_tx_timeout, .ndo_set_mac_address = eth_mac_addr, .ndo_change_mtu = uml_net_change_mtu, .ndo_validate_addr = eth_validate_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = uml_net_poll_controller, #endif }; /* * Ensures that platform_driver_register is called only once by * eth_configure. Will be set in an initcall. */ static int driver_registered; static void eth_configure(int n, void *init, char *mac, struct transport *transport) { struct uml_net *device; struct net_device *dev; struct uml_net_private *lp; int err, size; int random_mac; size = transport->private_size + sizeof(struct uml_net_private); device = kzalloc(sizeof(*device), GFP_KERNEL); if (device == NULL) { printk(KERN_ERR "eth_configure failed to allocate struct " "uml_net\n"); return; } dev = alloc_etherdev(size); if (dev == NULL) { printk(KERN_ERR "eth_configure: failed to allocate struct " "net_device for eth%d\n", n); goto out_free_device; } INIT_LIST_HEAD(&device->list); device->index = n; /* If this name ends up conflicting with an existing registered * netdevice, that is OK, register_netdev{,ice}() will notice this * and fail. */ snprintf(dev->name, sizeof(dev->name), "eth%d", n); random_mac = setup_etheraddr(mac, device->mac, dev->name); printk(KERN_INFO "Netdevice %d (%pM) : ", n, device->mac); lp = netdev_priv(dev); /* This points to the transport private data. It's still clear, but we * must memset it to 0 *now*. Let's help the drivers. */ memset(lp, 0, size); INIT_WORK(&lp->work, uml_dev_close); /* sysfs register */ if (!driver_registered) { platform_driver_register(&uml_net_driver); driver_registered = 1; } device->pdev.id = n; device->pdev.name = DRIVER_NAME; device->pdev.dev.release = net_device_release; dev_set_drvdata(&device->pdev.dev, device); if (platform_device_register(&device->pdev)) goto out_free_netdev; SET_NETDEV_DEV(dev,&device->pdev.dev); device->dev = dev; /* * These just fill in a data structure, so there's no failure * to be worried about. */ (*transport->kern->init)(dev, init); *lp = ((struct uml_net_private) { .list = LIST_HEAD_INIT(lp->list), .dev = dev, .fd = -1, .mac = { 0xfe, 0xfd, 0x0, 0x0, 0x0, 0x0}, .max_packet = transport->user->max_packet, .protocol = transport->kern->protocol, .open = transport->user->open, .close = transport->user->close, .remove = transport->user->remove, .read = transport->kern->read, .write = transport->kern->write, .add_address = transport->user->add_address, .delete_address = transport->user->delete_address }); init_timer(&lp->tl); spin_lock_init(&lp->lock); lp->tl.function = uml_net_user_timer_expire; memcpy(lp->mac, device->mac, sizeof(lp->mac)); if ((transport->user->init != NULL) && ((*transport->user->init)(&lp->user, dev) != 0)) goto out_unregister; /* don't use eth_mac_addr, it will not work here */ memcpy(dev->dev_addr, device->mac, ETH_ALEN); if (random_mac) dev->addr_assign_type |= NET_ADDR_RANDOM; dev->mtu = transport->user->mtu; dev->netdev_ops = &uml_netdev_ops; dev->ethtool_ops = &uml_net_ethtool_ops; dev->watchdog_timeo = (HZ >> 1); dev->irq = UM_ETH_IRQ; err = update_drop_skb(lp->max_packet); if (err) goto out_undo_user_init; rtnl_lock(); err = register_netdevice(dev); rtnl_unlock(); if (err) goto out_undo_user_init; spin_lock(&devices_lock); list_add(&device->list, &devices); spin_unlock(&devices_lock); return; out_undo_user_init: if (transport->user->remove != NULL) (*transport->user->remove)(&lp->user); out_unregister: platform_device_unregister(&device->pdev); return; /* platform_device_unregister frees dev and device */ out_free_netdev: free_netdev(dev); out_free_device: kfree(device); } static struct uml_net *find_device(int n) { struct uml_net *device; struct list_head *ele; spin_lock(&devices_lock); list_for_each(ele, &devices) { device = list_entry(ele, struct uml_net, list); if (device->index == n) goto out; } device = NULL; out: spin_unlock(&devices_lock); return device; } static int eth_parse(char *str, int *index_out, char **str_out, char **error_out) { char *end; int n, err = -EINVAL; n = simple_strtoul(str, &end, 0); if (end == str) { *error_out = "Bad device number"; return err; } str = end; if (*str != '=') { *error_out = "Expected '=' after device number"; return err; } str++; if (find_device(n)) { *error_out = "Device already configured"; return err; } *index_out = n; *str_out = str; return 0; } struct eth_init { struct list_head list; char *init; int index; }; static DEFINE_SPINLOCK(transports_lock); static LIST_HEAD(transports); /* Filled in during early boot */ static LIST_HEAD(eth_cmd_line); static int check_transport(struct transport *transport, char *eth, int n, void **init_out, char **mac_out) { int len; len = strlen(transport->name); if (strncmp(eth, transport->name, len)) return 0; eth += len; if (*eth == ',') eth++; else if (*eth != '\0') return 0; *init_out = kmalloc(transport->setup_size, GFP_KERNEL); if (*init_out == NULL) return 1; if (!transport->setup(eth, mac_out, *init_out)) { kfree(*init_out); *init_out = NULL; } return 1; } void register_transport(struct transport *new) { struct list_head *ele, *next; struct eth_init *eth; void *init; char *mac = NULL; int match; spin_lock(&transports_lock); BUG_ON(!list_empty(&new->list)); list_add(&new->list, &transports); spin_unlock(&transports_lock); list_for_each_safe(ele, next, &eth_cmd_line) { eth = list_entry(ele, struct eth_init, list); match = check_transport(new, eth->init, eth->index, &init, &mac); if (!match) continue; else if (init != NULL) { eth_configure(eth->index, init, mac, new); kfree(init); } list_del(&eth->list); } } static int eth_setup_common(char *str, int index) { struct list_head *ele; struct transport *transport; void *init; char *mac = NULL; int found = 0; spin_lock(&transports_lock); list_for_each(ele, &transports) { transport = list_entry(ele, struct transport, list); if (!check_transport(transport, str, index, &init, &mac)) continue; if (init != NULL) { eth_configure(index, init, mac, transport); kfree(init); } found = 1; break; } spin_unlock(&transports_lock); return found; } static int __init eth_setup(char *str) { struct eth_init *new; char *error; int n, err; err = eth_parse(str, &n, &str, &error); if (err) { printk(KERN_ERR "eth_setup - Couldn't parse '%s' : %s\n", str, error); return 1; } new = alloc_bootmem(sizeof(*new)); if (new == NULL) { printk(KERN_ERR "eth_init : alloc_bootmem failed\n"); return 1; } INIT_LIST_HEAD(&new->list); new->index = n; new->init = str; list_add_tail(&new->list, &eth_cmd_line); return 1; } __setup("eth", eth_setup); __uml_help(eth_setup, "eth[0-9]+=<transport>,<options>\n" " Configure a network device.\n\n" ); static int net_config(char *str, char **error_out) { int n, err; err = eth_parse(str, &n, &str, error_out); if (err) return err; /* This string is broken up and the pieces used by the underlying * driver. So, it is freed only if eth_setup_common fails. */ str = kstrdup(str, GFP_KERNEL); if (str == NULL) { *error_out = "net_config failed to strdup string"; return -ENOMEM; } err = !eth_setup_common(str, n); if (err) kfree(str); return err; } static int net_id(char **str, int *start_out, int *end_out) { char *end; int n; n = simple_strtoul(*str, &end, 0); if ((*end != '\0') || (end == *str)) return -1; *start_out = n; *end_out = n; *str = end; return n; } static int net_remove(int n, char **error_out) { struct uml_net *device; struct net_device *dev; struct uml_net_private *lp; device = find_device(n); if (device == NULL) return -ENODEV; dev = device->dev; lp = netdev_priv(dev); if (lp->fd > 0) return -EBUSY; unregister_netdev(dev); platform_device_unregister(&device->pdev); return 0; } static struct mc_device net_mc = { .list = LIST_HEAD_INIT(net_mc.list), .name = "eth", .config = net_config, .get_config = NULL, .id = net_id, .remove = net_remove, }; #ifdef CONFIG_INET static int uml_inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr) { struct in_ifaddr *ifa = ptr; struct net_device *dev = ifa->ifa_dev->dev; struct uml_net_private *lp; void (*proc)(unsigned char *, unsigned char *, void *); unsigned char addr_buf[4], netmask_buf[4]; if (dev->netdev_ops->ndo_open != uml_net_open) return NOTIFY_DONE; lp = netdev_priv(dev); proc = NULL; switch (event) { case NETDEV_UP: proc = lp->add_address; break; case NETDEV_DOWN: proc = lp->delete_address; break; } if (proc != NULL) { memcpy(addr_buf, &ifa->ifa_address, sizeof(addr_buf)); memcpy(netmask_buf, &ifa->ifa_mask, sizeof(netmask_buf)); (*proc)(addr_buf, netmask_buf, &lp->user); } return NOTIFY_DONE; } /* uml_net_init shouldn't be called twice on two CPUs at the same time */ static struct notifier_block uml_inetaddr_notifier = { .notifier_call = uml_inetaddr_event, }; static void inet_register(void) { struct list_head *ele; struct uml_net_private *lp; struct in_device *ip; struct in_ifaddr *in; register_inetaddr_notifier(&uml_inetaddr_notifier); /* Devices may have been opened already, so the uml_inetaddr_notifier * didn't get a chance to run for them. This fakes it so that * addresses which have already been set up get handled properly. */ spin_lock(&opened_lock); list_for_each(ele, &opened) { lp = list_entry(ele, struct uml_net_private, list); ip = lp->dev->ip_ptr; if (ip == NULL) continue; in = ip->ifa_list; while (in != NULL) { uml_inetaddr_event(NULL, NETDEV_UP, in); in = in->ifa_next; } } spin_unlock(&opened_lock); } #else static inline void inet_register(void) { } #endif static int uml_net_init(void) { mconsole_register_dev(&net_mc); inet_register(); return 0; } __initcall(uml_net_init); static void close_devices(void) { struct list_head *ele; struct uml_net_private *lp; spin_lock(&opened_lock); list_for_each(ele, &opened) { lp = list_entry(ele, struct uml_net_private, list); free_irq(lp->dev->irq, lp->dev); if ((lp->close != NULL) && (lp->fd >= 0)) (*lp->close)(lp->fd, &lp->user); if (lp->remove != NULL) (*lp->remove)(&lp->user); } spin_unlock(&opened_lock); } __uml_exitcall(close_devices); void iter_addresses(void *d, void (*cb)(unsigned char *, unsigned char *, void *), void *arg) { struct net_device *dev = d; struct in_device *ip = dev->ip_ptr; struct in_ifaddr *in; unsigned char address[4], netmask[4]; if (ip == NULL) return; in = ip->ifa_list; while (in != NULL) { memcpy(address, &in->ifa_address, sizeof(address)); memcpy(netmask, &in->ifa_mask, sizeof(netmask)); (*cb)(address, netmask, arg); in = in->ifa_next; } } int dev_netmask(void *d, void *m) { struct net_device *dev = d; struct in_device *ip = dev->ip_ptr; struct in_ifaddr *in; __be32 *mask_out = m; if (ip == NULL) return 1; in = ip->ifa_list; if (in == NULL) return 1; *mask_out = in->ifa_mask; return 0; } void *get_output_buffer(int *len_out) { void *ret; ret = (void *) __get_free_pages(GFP_KERNEL, 0); if (ret) *len_out = PAGE_SIZE; else *len_out = 0; return ret; } void free_output_buffer(void *buffer) { free_pages((unsigned long) buffer, 0); } int tap_setup_common(char *str, char *type, char **dev_name, char **mac_out, char **gate_addr) { char *remain; remain = split_if_spec(str, dev_name, mac_out, gate_addr, NULL); if (remain != NULL) { printk(KERN_ERR "tap_setup_common - Extra garbage on " "specification : '%s'\n", remain); return 1; } return 0; } unsigned short eth_protocol(struct sk_buff *skb) { return eth_type_trans(skb, skb->dev); }
gpl-2.0
SlimRoms/kernel_sony_msm8974
drivers/usb/musb/da8xx.c
4838
16397
/* * Texas Instruments DA8xx/OMAP-L1x "glue layer" * * Copyright (c) 2008-2009 MontaVista Software, Inc. <source@mvista.com> * * Based on the DaVinci "glue layer" code. * Copyright (C) 2005-2006 by Texas Instruments * * This file is part of the Inventra Controller Driver for Linux. * * The Inventra Controller Driver for Linux is free software; you * can redistribute it and/or modify it under the terms of the GNU * General Public License version 2 as published by the Free Software * Foundation. * * The Inventra Controller Driver for Linux is distributed in * the hope that it will be useful, but WITHOUT ANY WARRANTY; * without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public * License for more details. * * You should have received a copy of the GNU General Public License * along with The Inventra Controller Driver for Linux ; if not, * write to the Free Software Foundation, Inc., 59 Temple Place, * Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/module.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <mach/da8xx.h> #include <mach/usb.h> #include "musb_core.h" /* * DA8XX specific definitions */ /* USB 2.0 OTG module registers */ #define DA8XX_USB_REVISION_REG 0x00 #define DA8XX_USB_CTRL_REG 0x04 #define DA8XX_USB_STAT_REG 0x08 #define DA8XX_USB_EMULATION_REG 0x0c #define DA8XX_USB_MODE_REG 0x10 /* Transparent, CDC, [Generic] RNDIS */ #define DA8XX_USB_AUTOREQ_REG 0x14 #define DA8XX_USB_SRP_FIX_TIME_REG 0x18 #define DA8XX_USB_TEARDOWN_REG 0x1c #define DA8XX_USB_INTR_SRC_REG 0x20 #define DA8XX_USB_INTR_SRC_SET_REG 0x24 #define DA8XX_USB_INTR_SRC_CLEAR_REG 0x28 #define DA8XX_USB_INTR_MASK_REG 0x2c #define DA8XX_USB_INTR_MASK_SET_REG 0x30 #define DA8XX_USB_INTR_MASK_CLEAR_REG 0x34 #define DA8XX_USB_INTR_SRC_MASKED_REG 0x38 #define DA8XX_USB_END_OF_INTR_REG 0x3c #define DA8XX_USB_GENERIC_RNDIS_EP_SIZE_REG(n) (0x50 + (((n) - 1) << 2)) /* Control register bits */ #define DA8XX_SOFT_RESET_MASK 1 #define DA8XX_USB_TX_EP_MASK 0x1f /* EP0 + 4 Tx EPs */ #define DA8XX_USB_RX_EP_MASK 0x1e /* 4 Rx EPs */ /* USB interrupt register bits */ #define DA8XX_INTR_USB_SHIFT 16 #define DA8XX_INTR_USB_MASK (0x1ff << DA8XX_INTR_USB_SHIFT) /* 8 Mentor */ /* interrupts and DRVVBUS interrupt */ #define DA8XX_INTR_DRVVBUS 0x100 #define DA8XX_INTR_RX_SHIFT 8 #define DA8XX_INTR_RX_MASK (DA8XX_USB_RX_EP_MASK << DA8XX_INTR_RX_SHIFT) #define DA8XX_INTR_TX_SHIFT 0 #define DA8XX_INTR_TX_MASK (DA8XX_USB_TX_EP_MASK << DA8XX_INTR_TX_SHIFT) #define DA8XX_MENTOR_CORE_OFFSET 0x400 #define CFGCHIP2 IO_ADDRESS(DA8XX_SYSCFG0_BASE + DA8XX_CFGCHIP2_REG) struct da8xx_glue { struct device *dev; struct platform_device *musb; struct clk *clk; }; /* * REVISIT (PM): we should be able to keep the PHY in low power mode most * of the time (24 MHz oscillator and PLL off, etc.) by setting POWER.D0 * and, when in host mode, autosuspending idle root ports... PHY_PLLON * (overriding SUSPENDM?) then likely needs to stay off. */ static inline void phy_on(void) { u32 cfgchip2 = __raw_readl(CFGCHIP2); /* * Start the on-chip PHY and its PLL. */ cfgchip2 &= ~(CFGCHIP2_RESET | CFGCHIP2_PHYPWRDN | CFGCHIP2_OTGPWRDN); cfgchip2 |= CFGCHIP2_PHY_PLLON; __raw_writel(cfgchip2, CFGCHIP2); pr_info("Waiting for USB PHY clock good...\n"); while (!(__raw_readl(CFGCHIP2) & CFGCHIP2_PHYCLKGD)) cpu_relax(); } static inline void phy_off(void) { u32 cfgchip2 = __raw_readl(CFGCHIP2); /* * Ensure that USB 1.1 reference clock is not being sourced from * USB 2.0 PHY. Otherwise do not power down the PHY. */ if (!(cfgchip2 & CFGCHIP2_USB1PHYCLKMUX) && (cfgchip2 & CFGCHIP2_USB1SUSPENDM)) { pr_warning("USB 1.1 clocked from USB 2.0 PHY -- " "can't power it down\n"); return; } /* * Power down the on-chip PHY. */ cfgchip2 |= CFGCHIP2_PHYPWRDN | CFGCHIP2_OTGPWRDN; __raw_writel(cfgchip2, CFGCHIP2); } /* * Because we don't set CTRL.UINT, it's "important" to: * - not read/write INTRUSB/INTRUSBE (except during * initial setup, as a workaround); * - use INTSET/INTCLR instead. */ /** * da8xx_musb_enable - enable interrupts */ static void da8xx_musb_enable(struct musb *musb) { void __iomem *reg_base = musb->ctrl_base; u32 mask; /* Workaround: setup IRQs through both register sets. */ mask = ((musb->epmask & DA8XX_USB_TX_EP_MASK) << DA8XX_INTR_TX_SHIFT) | ((musb->epmask & DA8XX_USB_RX_EP_MASK) << DA8XX_INTR_RX_SHIFT) | DA8XX_INTR_USB_MASK; musb_writel(reg_base, DA8XX_USB_INTR_MASK_SET_REG, mask); /* Force the DRVVBUS IRQ so we can start polling for ID change. */ if (is_otg_enabled(musb)) musb_writel(reg_base, DA8XX_USB_INTR_SRC_SET_REG, DA8XX_INTR_DRVVBUS << DA8XX_INTR_USB_SHIFT); } /** * da8xx_musb_disable - disable HDRC and flush interrupts */ static void da8xx_musb_disable(struct musb *musb) { void __iomem *reg_base = musb->ctrl_base; musb_writel(reg_base, DA8XX_USB_INTR_MASK_CLEAR_REG, DA8XX_INTR_USB_MASK | DA8XX_INTR_TX_MASK | DA8XX_INTR_RX_MASK); musb_writeb(musb->mregs, MUSB_DEVCTL, 0); musb_writel(reg_base, DA8XX_USB_END_OF_INTR_REG, 0); } #define portstate(stmt) stmt static void da8xx_musb_set_vbus(struct musb *musb, int is_on) { WARN_ON(is_on && is_peripheral_active(musb)); } #define POLL_SECONDS 2 static struct timer_list otg_workaround; static void otg_timer(unsigned long _musb) { struct musb *musb = (void *)_musb; void __iomem *mregs = musb->mregs; u8 devctl; unsigned long flags; /* * We poll because DaVinci's won't expose several OTG-critical * status change events (from the transceiver) otherwise. */ devctl = musb_readb(mregs, MUSB_DEVCTL); dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl, otg_state_string(musb->xceiv->state)); spin_lock_irqsave(&musb->lock, flags); switch (musb->xceiv->state) { case OTG_STATE_A_WAIT_BCON: devctl &= ~MUSB_DEVCTL_SESSION; musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); devctl = musb_readb(musb->mregs, MUSB_DEVCTL); if (devctl & MUSB_DEVCTL_BDEVICE) { musb->xceiv->state = OTG_STATE_B_IDLE; MUSB_DEV_MODE(musb); } else { musb->xceiv->state = OTG_STATE_A_IDLE; MUSB_HST_MODE(musb); } break; case OTG_STATE_A_WAIT_VFALL: /* * Wait till VBUS falls below SessionEnd (~0.2 V); the 1.3 * RTL seems to mis-handle session "start" otherwise (or in * our case "recover"), in routine "VBUS was valid by the time * VBUSERR got reported during enumeration" cases. */ if (devctl & MUSB_DEVCTL_VBUS) { mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); break; } musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; musb_writel(musb->ctrl_base, DA8XX_USB_INTR_SRC_SET_REG, MUSB_INTR_VBUSERROR << DA8XX_INTR_USB_SHIFT); break; case OTG_STATE_B_IDLE: if (!is_peripheral_enabled(musb)) break; /* * There's no ID-changed IRQ, so we have no good way to tell * when to switch to the A-Default state machine (by setting * the DEVCTL.Session bit). * * Workaround: whenever we're in B_IDLE, try setting the * session flag every few seconds. If it works, ID was * grounded and we're now in the A-Default state machine. * * NOTE: setting the session flag is _supposed_ to trigger * SRP but clearly it doesn't. */ musb_writeb(mregs, MUSB_DEVCTL, devctl | MUSB_DEVCTL_SESSION); devctl = musb_readb(mregs, MUSB_DEVCTL); if (devctl & MUSB_DEVCTL_BDEVICE) mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); else musb->xceiv->state = OTG_STATE_A_IDLE; break; default: break; } spin_unlock_irqrestore(&musb->lock, flags); } static void da8xx_musb_try_idle(struct musb *musb, unsigned long timeout) { static unsigned long last_timer; if (!is_otg_enabled(musb)) return; if (timeout == 0) timeout = jiffies + msecs_to_jiffies(3); /* Never idle if active, or when VBUS timeout is not set as host */ if (musb->is_active || (musb->a_wait_bcon == 0 && musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) { dev_dbg(musb->controller, "%s active, deleting timer\n", otg_state_string(musb->xceiv->state)); del_timer(&otg_workaround); last_timer = jiffies; return; } if (time_after(last_timer, timeout) && timer_pending(&otg_workaround)) { dev_dbg(musb->controller, "Longer idle timer already pending, ignoring...\n"); return; } last_timer = timeout; dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n", otg_state_string(musb->xceiv->state), jiffies_to_msecs(timeout - jiffies)); mod_timer(&otg_workaround, timeout); } static irqreturn_t da8xx_musb_interrupt(int irq, void *hci) { struct musb *musb = hci; void __iomem *reg_base = musb->ctrl_base; struct usb_otg *otg = musb->xceiv->otg; unsigned long flags; irqreturn_t ret = IRQ_NONE; u32 status; spin_lock_irqsave(&musb->lock, flags); /* * NOTE: DA8XX shadows the Mentor IRQs. Don't manage them through * the Mentor registers (except for setup), use the TI ones and EOI. */ /* Acknowledge and handle non-CPPI interrupts */ status = musb_readl(reg_base, DA8XX_USB_INTR_SRC_MASKED_REG); if (!status) goto eoi; musb_writel(reg_base, DA8XX_USB_INTR_SRC_CLEAR_REG, status); dev_dbg(musb->controller, "USB IRQ %08x\n", status); musb->int_rx = (status & DA8XX_INTR_RX_MASK) >> DA8XX_INTR_RX_SHIFT; musb->int_tx = (status & DA8XX_INTR_TX_MASK) >> DA8XX_INTR_TX_SHIFT; musb->int_usb = (status & DA8XX_INTR_USB_MASK) >> DA8XX_INTR_USB_SHIFT; /* * DRVVBUS IRQs are the only proxy we have (a very poor one!) for * DA8xx's missing ID change IRQ. We need an ID change IRQ to * switch appropriately between halves of the OTG state machine. * Managing DEVCTL.Session per Mentor docs requires that we know its * value but DEVCTL.BDevice is invalid without DEVCTL.Session set. * Also, DRVVBUS pulses for SRP (but not at 5 V)... */ if (status & (DA8XX_INTR_DRVVBUS << DA8XX_INTR_USB_SHIFT)) { int drvvbus = musb_readl(reg_base, DA8XX_USB_STAT_REG); void __iomem *mregs = musb->mregs; u8 devctl = musb_readb(mregs, MUSB_DEVCTL); int err; err = is_host_enabled(musb) && (musb->int_usb & MUSB_INTR_VBUSERROR); if (err) { /* * The Mentor core doesn't debounce VBUS as needed * to cope with device connect current spikes. This * means it's not uncommon for bus-powered devices * to get VBUS errors during enumeration. * * This is a workaround, but newer RTL from Mentor * seems to allow a better one: "re"-starting sessions * without waiting for VBUS to stop registering in * devctl. */ musb->int_usb &= ~MUSB_INTR_VBUSERROR; musb->xceiv->state = OTG_STATE_A_WAIT_VFALL; mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); WARNING("VBUS error workaround (delay coming)\n"); } else if (is_host_enabled(musb) && drvvbus) { MUSB_HST_MODE(musb); otg->default_a = 1; musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; portstate(musb->port1_status |= USB_PORT_STAT_POWER); del_timer(&otg_workaround); } else { musb->is_active = 0; MUSB_DEV_MODE(musb); otg->default_a = 0; musb->xceiv->state = OTG_STATE_B_IDLE; portstate(musb->port1_status &= ~USB_PORT_STAT_POWER); } dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n", drvvbus ? "on" : "off", otg_state_string(musb->xceiv->state), err ? " ERROR" : "", devctl); ret = IRQ_HANDLED; } if (musb->int_tx || musb->int_rx || musb->int_usb) ret |= musb_interrupt(musb); eoi: /* EOI needs to be written for the IRQ to be re-asserted. */ if (ret == IRQ_HANDLED || status) musb_writel(reg_base, DA8XX_USB_END_OF_INTR_REG, 0); /* Poll for ID change */ if (is_otg_enabled(musb) && musb->xceiv->state == OTG_STATE_B_IDLE) mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); spin_unlock_irqrestore(&musb->lock, flags); return ret; } static int da8xx_musb_set_mode(struct musb *musb, u8 musb_mode) { u32 cfgchip2 = __raw_readl(CFGCHIP2); cfgchip2 &= ~CFGCHIP2_OTGMODE; switch (musb_mode) { case MUSB_HOST: /* Force VBUS valid, ID = 0 */ cfgchip2 |= CFGCHIP2_FORCE_HOST; break; case MUSB_PERIPHERAL: /* Force VBUS valid, ID = 1 */ cfgchip2 |= CFGCHIP2_FORCE_DEVICE; break; case MUSB_OTG: /* Don't override the VBUS/ID comparators */ cfgchip2 |= CFGCHIP2_NO_OVERRIDE; break; default: dev_dbg(musb->controller, "Trying to set unsupported mode %u\n", musb_mode); } __raw_writel(cfgchip2, CFGCHIP2); return 0; } static int da8xx_musb_init(struct musb *musb) { void __iomem *reg_base = musb->ctrl_base; u32 rev; musb->mregs += DA8XX_MENTOR_CORE_OFFSET; /* Returns zero if e.g. not clocked */ rev = musb_readl(reg_base, DA8XX_USB_REVISION_REG); if (!rev) goto fail; usb_nop_xceiv_register(); musb->xceiv = usb_get_transceiver(); if (!musb->xceiv) goto fail; if (is_host_enabled(musb)) setup_timer(&otg_workaround, otg_timer, (unsigned long)musb); /* Reset the controller */ musb_writel(reg_base, DA8XX_USB_CTRL_REG, DA8XX_SOFT_RESET_MASK); /* Start the on-chip PHY and its PLL. */ phy_on(); msleep(5); /* NOTE: IRQs are in mixed mode, not bypass to pure MUSB */ pr_debug("DA8xx OTG revision %08x, PHY %03x, control %02x\n", rev, __raw_readl(CFGCHIP2), musb_readb(reg_base, DA8XX_USB_CTRL_REG)); musb->isr = da8xx_musb_interrupt; return 0; fail: return -ENODEV; } static int da8xx_musb_exit(struct musb *musb) { if (is_host_enabled(musb)) del_timer_sync(&otg_workaround); phy_off(); usb_put_transceiver(musb->xceiv); usb_nop_xceiv_unregister(); return 0; } static const struct musb_platform_ops da8xx_ops = { .init = da8xx_musb_init, .exit = da8xx_musb_exit, .enable = da8xx_musb_enable, .disable = da8xx_musb_disable, .set_mode = da8xx_musb_set_mode, .try_idle = da8xx_musb_try_idle, .set_vbus = da8xx_musb_set_vbus, }; static u64 da8xx_dmamask = DMA_BIT_MASK(32); static int __devinit da8xx_probe(struct platform_device *pdev) { struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data; struct platform_device *musb; struct da8xx_glue *glue; struct clk *clk; int ret = -ENOMEM; glue = kzalloc(sizeof(*glue), GFP_KERNEL); if (!glue) { dev_err(&pdev->dev, "failed to allocate glue context\n"); goto err0; } musb = platform_device_alloc("musb-hdrc", -1); if (!musb) { dev_err(&pdev->dev, "failed to allocate musb device\n"); goto err1; } clk = clk_get(&pdev->dev, "usb20"); if (IS_ERR(clk)) { dev_err(&pdev->dev, "failed to get clock\n"); ret = PTR_ERR(clk); goto err2; } ret = clk_enable(clk); if (ret) { dev_err(&pdev->dev, "failed to enable clock\n"); goto err3; } musb->dev.parent = &pdev->dev; musb->dev.dma_mask = &da8xx_dmamask; musb->dev.coherent_dma_mask = da8xx_dmamask; glue->dev = &pdev->dev; glue->musb = musb; glue->clk = clk; pdata->platform_ops = &da8xx_ops; platform_set_drvdata(pdev, glue); ret = platform_device_add_resources(musb, pdev->resource, pdev->num_resources); if (ret) { dev_err(&pdev->dev, "failed to add resources\n"); goto err4; } ret = platform_device_add_data(musb, pdata, sizeof(*pdata)); if (ret) { dev_err(&pdev->dev, "failed to add platform_data\n"); goto err4; } ret = platform_device_add(musb); if (ret) { dev_err(&pdev->dev, "failed to register musb device\n"); goto err4; } return 0; err4: clk_disable(clk); err3: clk_put(clk); err2: platform_device_put(musb); err1: kfree(glue); err0: return ret; } static int __devexit da8xx_remove(struct platform_device *pdev) { struct da8xx_glue *glue = platform_get_drvdata(pdev); platform_device_del(glue->musb); platform_device_put(glue->musb); clk_disable(glue->clk); clk_put(glue->clk); kfree(glue); return 0; } static struct platform_driver da8xx_driver = { .probe = da8xx_probe, .remove = __devexit_p(da8xx_remove), .driver = { .name = "musb-da8xx", }, }; MODULE_DESCRIPTION("DA8xx/OMAP-L1x MUSB Glue Layer"); MODULE_AUTHOR("Sergei Shtylyov <sshtylyov@ru.mvista.com>"); MODULE_LICENSE("GPL v2"); static int __init da8xx_init(void) { return platform_driver_register(&da8xx_driver); } module_init(da8xx_init); static void __exit da8xx_exit(void) { platform_driver_unregister(&da8xx_driver); } module_exit(da8xx_exit);
gpl-2.0
1N4148/SAMSUNG_OSRC_DUMPS
drivers/rtc/rtc-proc.c
8166
3272
/* * RTC subsystem, proc interface * * Copyright (C) 2005-06 Tower Technologies * Author: Alessandro Zummo <a.zummo@towertech.it> * * based on arch/arm/common/rtctime.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/rtc.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include "rtc-core.h" static int rtc_proc_show(struct seq_file *seq, void *offset) { int err; struct rtc_device *rtc = seq->private; const struct rtc_class_ops *ops = rtc->ops; struct rtc_wkalrm alrm; struct rtc_time tm; err = rtc_read_time(rtc, &tm); if (err == 0) { seq_printf(seq, "rtc_time\t: %02d:%02d:%02d\n" "rtc_date\t: %04d-%02d-%02d\n", tm.tm_hour, tm.tm_min, tm.tm_sec, tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday); } err = rtc_read_alarm(rtc, &alrm); if (err == 0) { seq_printf(seq, "alrm_time\t: "); if ((unsigned int)alrm.time.tm_hour <= 24) seq_printf(seq, "%02d:", alrm.time.tm_hour); else seq_printf(seq, "**:"); if ((unsigned int)alrm.time.tm_min <= 59) seq_printf(seq, "%02d:", alrm.time.tm_min); else seq_printf(seq, "**:"); if ((unsigned int)alrm.time.tm_sec <= 59) seq_printf(seq, "%02d\n", alrm.time.tm_sec); else seq_printf(seq, "**\n"); seq_printf(seq, "alrm_date\t: "); if ((unsigned int)alrm.time.tm_year <= 200) seq_printf(seq, "%04d-", alrm.time.tm_year + 1900); else seq_printf(seq, "****-"); if ((unsigned int)alrm.time.tm_mon <= 11) seq_printf(seq, "%02d-", alrm.time.tm_mon + 1); else seq_printf(seq, "**-"); if (alrm.time.tm_mday && (unsigned int)alrm.time.tm_mday <= 31) seq_printf(seq, "%02d\n", alrm.time.tm_mday); else seq_printf(seq, "**\n"); seq_printf(seq, "alarm_IRQ\t: %s\n", alrm.enabled ? "yes" : "no"); seq_printf(seq, "alrm_pending\t: %s\n", alrm.pending ? "yes" : "no"); seq_printf(seq, "update IRQ enabled\t: %s\n", (rtc->uie_rtctimer.enabled) ? "yes" : "no"); seq_printf(seq, "periodic IRQ enabled\t: %s\n", (rtc->pie_enabled) ? "yes" : "no"); seq_printf(seq, "periodic IRQ frequency\t: %d\n", rtc->irq_freq); seq_printf(seq, "max user IRQ frequency\t: %d\n", rtc->max_user_freq); } seq_printf(seq, "24hr\t\t: yes\n"); if (ops->proc) ops->proc(rtc->dev.parent, seq); return 0; } static int rtc_proc_open(struct inode *inode, struct file *file) { int ret; struct rtc_device *rtc = PDE(inode)->data; if (!try_module_get(THIS_MODULE)) return -ENODEV; ret = single_open(file, rtc_proc_show, rtc); if (ret) module_put(THIS_MODULE); return ret; } static int rtc_proc_release(struct inode *inode, struct file *file) { int res = single_release(inode, file); module_put(THIS_MODULE); return res; } static const struct file_operations rtc_proc_fops = { .open = rtc_proc_open, .read = seq_read, .llseek = seq_lseek, .release = rtc_proc_release, }; void rtc_proc_add_device(struct rtc_device *rtc) { if (rtc->id == 0) proc_create_data("driver/rtc", 0, NULL, &rtc_proc_fops, rtc); } void rtc_proc_del_device(struct rtc_device *rtc) { if (rtc->id == 0) remove_proc_entry("driver/rtc", NULL); }
gpl-2.0
bigsupersquid/android_kernel_lge_msm7x27-3.0.x
arch/microblaze/kernel/kgdb.c
8934
3989
/* * Microblaze KGDB support * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/kgdb.h> #include <linux/kdebug.h> #include <linux/irq.h> #include <linux/io.h> #include <asm/cacheflush.h> #include <asm/asm-offsets.h> #include <asm/pvr.h> #define GDB_REG 0 #define GDB_PC 32 #define GDB_MSR 33 #define GDB_EAR 34 #define GDB_ESR 35 #define GDB_FSR 36 #define GDB_BTR 37 #define GDB_PVR 38 #define GDB_REDR 50 #define GDB_RPID 51 #define GDB_RZPR 52 #define GDB_RTLBX 53 #define GDB_RTLBSX 54 /* mfs can't read it */ #define GDB_RTLBLO 55 #define GDB_RTLBHI 56 /* keep pvr separately because it is unchangeble */ struct pvr_s pvr; void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) { int i; unsigned long *pt_regb = (unsigned long *)regs; int temp; /* registers r0 - r31, pc, msr, ear, esr, fsr + do not save pt_mode */ for (i = 0; i < (sizeof(struct pt_regs) / 4) - 1; i++) gdb_regs[i] = pt_regb[i]; /* Branch target register can't be changed */ __asm__ __volatile__ ("mfs %0, rbtr;" : "=r"(temp) : ); gdb_regs[GDB_BTR] = temp; /* pvr part - we have 11 pvr regs */ for (i = 0; i < sizeof(struct pvr_s)/4; i++) gdb_regs[GDB_PVR + i] = pvr.pvr[i]; /* read special registers - can't be changed */ __asm__ __volatile__ ("mfs %0, redr;" : "=r"(temp) : ); gdb_regs[GDB_REDR] = temp; __asm__ __volatile__ ("mfs %0, rpid;" : "=r"(temp) : ); gdb_regs[GDB_RPID] = temp; __asm__ __volatile__ ("mfs %0, rzpr;" : "=r"(temp) : ); gdb_regs[GDB_RZPR] = temp; __asm__ __volatile__ ("mfs %0, rtlbx;" : "=r"(temp) : ); gdb_regs[GDB_RTLBX] = temp; __asm__ __volatile__ ("mfs %0, rtlblo;" : "=r"(temp) : ); gdb_regs[GDB_RTLBLO] = temp; __asm__ __volatile__ ("mfs %0, rtlbhi;" : "=r"(temp) : ); gdb_regs[GDB_RTLBHI] = temp; } void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) { int i; unsigned long *pt_regb = (unsigned long *)regs; /* pt_regs and gdb_regs have the same 37 values. * The rest of gdb_regs are unused and can't be changed. * r0 register value can't be changed too. */ for (i = 1; i < (sizeof(struct pt_regs) / 4) - 1; i++) pt_regb[i] = gdb_regs[i]; } void microblaze_kgdb_break(struct pt_regs *regs) { if (kgdb_handle_exception(1, SIGTRAP, 0, regs) != 0) return; /* Jump over the first arch_kgdb_breakpoint which is barrier to * get kgdb work. The same solution is used for powerpc */ if (*(u32 *) (regs->pc) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr)) regs->pc += BREAK_INSTR_SIZE; } /* untested */ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) { int i; unsigned long *pt_regb = (unsigned long *)(p->thread.regs); /* registers r0 - r31, pc, msr, ear, esr, fsr + do not save pt_mode */ for (i = 0; i < (sizeof(struct pt_regs) / 4) - 1; i++) gdb_regs[i] = pt_regb[i]; /* pvr part - we have 11 pvr regs */ for (i = 0; i < sizeof(struct pvr_s)/4; i++) gdb_regs[GDB_PVR + i] = pvr.pvr[i]; } void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip) { regs->pc = ip; } int kgdb_arch_handle_exception(int vector, int signo, int err_code, char *remcom_in_buffer, char *remcom_out_buffer, struct pt_regs *regs) { char *ptr; unsigned long address; switch (remcom_in_buffer[0]) { case 'c': /* handle the optional parameter */ ptr = &remcom_in_buffer[1]; if (kgdb_hex2long(&ptr, &address)) regs->pc = address; return 0; } return -1; /* this means that we do not want to exit from the handler */ } int kgdb_arch_init(void) { get_pvr(&pvr); /* Fill PVR structure */ return 0; } void kgdb_arch_exit(void) { /* Nothing to do */ } /* * Global data */ struct kgdb_arch arch_kgdb_ops = { #ifdef __MICROBLAZEEL__ .gdb_bpt_instr = {0x18, 0x00, 0x0c, 0xba}, /* brki r16, 0x18 */ #else .gdb_bpt_instr = {0xba, 0x0c, 0x00, 0x18}, /* brki r16, 0x18 */ #endif };
gpl-2.0
SlimRoms/kernel_samsung_hlte
arch/arm/mach-gemini/time.c
12006
2470
/* * Copyright (C) 2001-2006 Storlink, Corp. * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/io.h> #include <mach/hardware.h> #include <mach/global_reg.h> #include <asm/mach/time.h> /* * Register definitions for the timers */ #define TIMER_COUNT(BASE_ADDR) (BASE_ADDR + 0x00) #define TIMER_LOAD(BASE_ADDR) (BASE_ADDR + 0x04) #define TIMER_MATCH1(BASE_ADDR) (BASE_ADDR + 0x08) #define TIMER_MATCH2(BASE_ADDR) (BASE_ADDR + 0x0C) #define TIMER_CR(BASE_ADDR) (BASE_ADDR + 0x30) #define TIMER_1_CR_ENABLE (1 << 0) #define TIMER_1_CR_CLOCK (1 << 1) #define TIMER_1_CR_INT (1 << 2) #define TIMER_2_CR_ENABLE (1 << 3) #define TIMER_2_CR_CLOCK (1 << 4) #define TIMER_2_CR_INT (1 << 5) #define TIMER_3_CR_ENABLE (1 << 6) #define TIMER_3_CR_CLOCK (1 << 7) #define TIMER_3_CR_INT (1 << 8) /* * IRQ handler for the timer */ static irqreturn_t gemini_timer_interrupt(int irq, void *dev_id) { timer_tick(); return IRQ_HANDLED; } static struct irqaction gemini_timer_irq = { .name = "Gemini Timer Tick", .flags = IRQF_DISABLED | IRQF_TIMER, .handler = gemini_timer_interrupt, }; /* * Set up timer interrupt, and return the current time in seconds. */ void __init gemini_timer_init(void) { unsigned int tick_rate, reg_v; reg_v = __raw_readl(IO_ADDRESS(GEMINI_GLOBAL_BASE + GLOBAL_STATUS)); tick_rate = REG_TO_AHB_SPEED(reg_v) * 1000000; printk(KERN_INFO "Bus: %dMHz", tick_rate / 1000000); tick_rate /= 6; /* APB bus run AHB*(1/6) */ switch(reg_v & CPU_AHB_RATIO_MASK) { case CPU_AHB_1_1: printk(KERN_CONT "(1/1)\n"); break; case CPU_AHB_3_2: printk(KERN_CONT "(3/2)\n"); break; case CPU_AHB_24_13: printk(KERN_CONT "(24/13)\n"); break; case CPU_AHB_2_1: printk(KERN_CONT "(2/1)\n"); break; } /* * Make irqs happen for the system timer */ setup_irq(IRQ_TIMER2, &gemini_timer_irq); /* Start the timer */ __raw_writel(tick_rate / HZ, TIMER_COUNT(IO_ADDRESS(GEMINI_TIMER2_BASE))); __raw_writel(tick_rate / HZ, TIMER_LOAD(IO_ADDRESS(GEMINI_TIMER2_BASE))); __raw_writel(TIMER_2_CR_ENABLE | TIMER_2_CR_INT, TIMER_CR(IO_ADDRESS(GEMINI_TIMER_BASE))); }
gpl-2.0
bhanvadia/lge-mako-kernel
sound/pci/echoaudio/mona_dsp.c
12518
11013
/**************************************************************************** Copyright Echo Digital Audio Corporation (c) 1998 - 2004 All rights reserved www.echoaudio.com This file is part of Echo Digital Audio's generic driver library. Echo Digital Audio's generic driver library is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. ************************************************************************* Translation from C++ and adaptation for use in ALSA-Driver were made by Giuliano Pochini <pochini@shiny.it> ****************************************************************************/ static int write_control_reg(struct echoaudio *chip, u32 value, char force); static int set_input_clock(struct echoaudio *chip, u16 clock); static int set_professional_spdif(struct echoaudio *chip, char prof); static int set_digital_mode(struct echoaudio *chip, u8 mode); static int load_asic_generic(struct echoaudio *chip, u32 cmd, short asic); static int check_asic_status(struct echoaudio *chip); static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id) { int err; DE_INIT(("init_hw() - Mona\n")); if (snd_BUG_ON((subdevice_id & 0xfff0) != MONA)) return -ENODEV; if ((err = init_dsp_comm_page(chip))) { DE_INIT(("init_hw - could not initialize DSP comm page\n")); return err; } chip->device_id = device_id; chip->subdevice_id = subdevice_id; chip->bad_board = TRUE; chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL | ECHO_CLOCK_BIT_SPDIF | ECHO_CLOCK_BIT_WORD | ECHO_CLOCK_BIT_ADAT; chip->digital_modes = ECHOCAPS_HAS_DIGITAL_MODE_SPDIF_RCA | ECHOCAPS_HAS_DIGITAL_MODE_SPDIF_OPTICAL | ECHOCAPS_HAS_DIGITAL_MODE_ADAT; /* Mona comes in both '301 and '361 flavors */ if (chip->device_id == DEVICE_ID_56361) chip->dsp_code_to_load = FW_MONA_361_DSP; else chip->dsp_code_to_load = FW_MONA_301_DSP; if ((err = load_firmware(chip)) < 0) return err; chip->bad_board = FALSE; DE_INIT(("init_hw done\n")); return err; } static int set_mixer_defaults(struct echoaudio *chip) { chip->digital_mode = DIGITAL_MODE_SPDIF_RCA; chip->professional_spdif = FALSE; chip->digital_in_automute = TRUE; return init_line_levels(chip); } static u32 detect_input_clocks(const struct echoaudio *chip) { u32 clocks_from_dsp, clock_bits; /* Map the DSP clock detect bits to the generic driver clock detect bits */ clocks_from_dsp = le32_to_cpu(chip->comm_page->status_clocks); clock_bits = ECHO_CLOCK_BIT_INTERNAL; if (clocks_from_dsp & GML_CLOCK_DETECT_BIT_SPDIF) clock_bits |= ECHO_CLOCK_BIT_SPDIF; if (clocks_from_dsp & GML_CLOCK_DETECT_BIT_ADAT) clock_bits |= ECHO_CLOCK_BIT_ADAT; if (clocks_from_dsp & GML_CLOCK_DETECT_BIT_WORD) clock_bits |= ECHO_CLOCK_BIT_WORD; return clock_bits; } /* Mona has an ASIC on the PCI card and another ASIC in the external box; both need to be loaded. */ static int load_asic(struct echoaudio *chip) { u32 control_reg; int err; short asic; if (chip->asic_loaded) return 0; mdelay(10); if (chip->device_id == DEVICE_ID_56361) asic = FW_MONA_361_1_ASIC48; else asic = FW_MONA_301_1_ASIC48; err = load_asic_generic(chip, DSP_FNC_LOAD_MONA_PCI_CARD_ASIC, asic); if (err < 0) return err; chip->asic_code = asic; mdelay(10); /* Do the external one */ err = load_asic_generic(chip, DSP_FNC_LOAD_MONA_EXTERNAL_ASIC, FW_MONA_2_ASIC); if (err < 0) return err; mdelay(10); err = check_asic_status(chip); /* Set up the control register if the load succeeded - 48 kHz, internal clock, S/PDIF RCA mode */ if (!err) { control_reg = GML_CONVERTER_ENABLE | GML_48KHZ; err = write_control_reg(chip, control_reg, TRUE); } return err; } /* Depending on what digital mode you want, Mona needs different ASICs loaded. This function checks the ASIC needed for the new mode and sees if it matches the one already loaded. */ static int switch_asic(struct echoaudio *chip, char double_speed) { int err; short asic; /* Check the clock detect bits to see if this is a single-speed clock or a double-speed clock; load a new ASIC if necessary. */ if (chip->device_id == DEVICE_ID_56361) { if (double_speed) asic = FW_MONA_361_1_ASIC96; else asic = FW_MONA_361_1_ASIC48; } else { if (double_speed) asic = FW_MONA_301_1_ASIC96; else asic = FW_MONA_301_1_ASIC48; } if (asic != chip->asic_code) { /* Load the desired ASIC */ err = load_asic_generic(chip, DSP_FNC_LOAD_MONA_PCI_CARD_ASIC, asic); if (err < 0) return err; chip->asic_code = asic; } return 0; } static int set_sample_rate(struct echoaudio *chip, u32 rate) { u32 control_reg, clock; short asic; char force_write; /* Only set the clock for internal mode. */ if (chip->input_clock != ECHO_CLOCK_INTERNAL) { DE_ACT(("set_sample_rate: Cannot set sample rate - " "clock not set to CLK_CLOCKININTERNAL\n")); /* Save the rate anyhow */ chip->comm_page->sample_rate = cpu_to_le32(rate); chip->sample_rate = rate; return 0; } /* Now, check to see if the required ASIC is loaded */ if (rate >= 88200) { if (chip->digital_mode == DIGITAL_MODE_ADAT) return -EINVAL; if (chip->device_id == DEVICE_ID_56361) asic = FW_MONA_361_1_ASIC96; else asic = FW_MONA_301_1_ASIC96; } else { if (chip->device_id == DEVICE_ID_56361) asic = FW_MONA_361_1_ASIC48; else asic = FW_MONA_301_1_ASIC48; } force_write = 0; if (asic != chip->asic_code) { int err; /* Load the desired ASIC (load_asic_generic() can sleep) */ spin_unlock_irq(&chip->lock); err = load_asic_generic(chip, DSP_FNC_LOAD_MONA_PCI_CARD_ASIC, asic); spin_lock_irq(&chip->lock); if (err < 0) return err; chip->asic_code = asic; force_write = 1; } /* Compute the new control register value */ clock = 0; control_reg = le32_to_cpu(chip->comm_page->control_register); control_reg &= GML_CLOCK_CLEAR_MASK; control_reg &= GML_SPDIF_RATE_CLEAR_MASK; switch (rate) { case 96000: clock = GML_96KHZ; break; case 88200: clock = GML_88KHZ; break; case 48000: clock = GML_48KHZ | GML_SPDIF_SAMPLE_RATE1; break; case 44100: clock = GML_44KHZ; /* Professional mode */ if (control_reg & GML_SPDIF_PRO_MODE) clock |= GML_SPDIF_SAMPLE_RATE0; break; case 32000: clock = GML_32KHZ | GML_SPDIF_SAMPLE_RATE0 | GML_SPDIF_SAMPLE_RATE1; break; case 22050: clock = GML_22KHZ; break; case 16000: clock = GML_16KHZ; break; case 11025: clock = GML_11KHZ; break; case 8000: clock = GML_8KHZ; break; default: DE_ACT(("set_sample_rate: %d invalid!\n", rate)); return -EINVAL; } control_reg |= clock; chip->comm_page->sample_rate = cpu_to_le32(rate); /* ignored by the DSP */ chip->sample_rate = rate; DE_ACT(("set_sample_rate: %d clock %d\n", rate, clock)); return write_control_reg(chip, control_reg, force_write); } static int set_input_clock(struct echoaudio *chip, u16 clock) { u32 control_reg, clocks_from_dsp; int err; DE_ACT(("set_input_clock:\n")); /* Prevent two simultaneous calls to switch_asic() */ if (atomic_read(&chip->opencount)) return -EAGAIN; /* Mask off the clock select bits */ control_reg = le32_to_cpu(chip->comm_page->control_register) & GML_CLOCK_CLEAR_MASK; clocks_from_dsp = le32_to_cpu(chip->comm_page->status_clocks); switch (clock) { case ECHO_CLOCK_INTERNAL: DE_ACT(("Set Mona clock to INTERNAL\n")); chip->input_clock = ECHO_CLOCK_INTERNAL; return set_sample_rate(chip, chip->sample_rate); case ECHO_CLOCK_SPDIF: if (chip->digital_mode == DIGITAL_MODE_ADAT) return -EAGAIN; spin_unlock_irq(&chip->lock); err = switch_asic(chip, clocks_from_dsp & GML_CLOCK_DETECT_BIT_SPDIF96); spin_lock_irq(&chip->lock); if (err < 0) return err; DE_ACT(("Set Mona clock to SPDIF\n")); control_reg |= GML_SPDIF_CLOCK; if (clocks_from_dsp & GML_CLOCK_DETECT_BIT_SPDIF96) control_reg |= GML_DOUBLE_SPEED_MODE; else control_reg &= ~GML_DOUBLE_SPEED_MODE; break; case ECHO_CLOCK_WORD: DE_ACT(("Set Mona clock to WORD\n")); spin_unlock_irq(&chip->lock); err = switch_asic(chip, clocks_from_dsp & GML_CLOCK_DETECT_BIT_WORD96); spin_lock_irq(&chip->lock); if (err < 0) return err; control_reg |= GML_WORD_CLOCK; if (clocks_from_dsp & GML_CLOCK_DETECT_BIT_WORD96) control_reg |= GML_DOUBLE_SPEED_MODE; else control_reg &= ~GML_DOUBLE_SPEED_MODE; break; case ECHO_CLOCK_ADAT: DE_ACT(("Set Mona clock to ADAT\n")); if (chip->digital_mode != DIGITAL_MODE_ADAT) return -EAGAIN; control_reg |= GML_ADAT_CLOCK; control_reg &= ~GML_DOUBLE_SPEED_MODE; break; default: DE_ACT(("Input clock 0x%x not supported for Mona\n", clock)); return -EINVAL; } chip->input_clock = clock; return write_control_reg(chip, control_reg, TRUE); } static int dsp_set_digital_mode(struct echoaudio *chip, u8 mode) { u32 control_reg; int err, incompatible_clock; /* Set clock to "internal" if it's not compatible with the new mode */ incompatible_clock = FALSE; switch (mode) { case DIGITAL_MODE_SPDIF_OPTICAL: case DIGITAL_MODE_SPDIF_RCA: if (chip->input_clock == ECHO_CLOCK_ADAT) incompatible_clock = TRUE; break; case DIGITAL_MODE_ADAT: if (chip->input_clock == ECHO_CLOCK_SPDIF) incompatible_clock = TRUE; break; default: DE_ACT(("Digital mode not supported: %d\n", mode)); return -EINVAL; } spin_lock_irq(&chip->lock); if (incompatible_clock) { /* Switch to 48KHz, internal */ chip->sample_rate = 48000; set_input_clock(chip, ECHO_CLOCK_INTERNAL); } /* Clear the current digital mode */ control_reg = le32_to_cpu(chip->comm_page->control_register); control_reg &= GML_DIGITAL_MODE_CLEAR_MASK; /* Tweak the control reg */ switch (mode) { case DIGITAL_MODE_SPDIF_OPTICAL: control_reg |= GML_SPDIF_OPTICAL_MODE; break; case DIGITAL_MODE_SPDIF_RCA: /* GML_SPDIF_OPTICAL_MODE bit cleared */ break; case DIGITAL_MODE_ADAT: /* If the current ASIC is the 96KHz ASIC, switch the ASIC and set to 48 KHz */ if (chip->asic_code == FW_MONA_361_1_ASIC96 || chip->asic_code == FW_MONA_301_1_ASIC96) { set_sample_rate(chip, 48000); } control_reg |= GML_ADAT_MODE; control_reg &= ~GML_DOUBLE_SPEED_MODE; break; } err = write_control_reg(chip, control_reg, FALSE); spin_unlock_irq(&chip->lock); if (err < 0) return err; chip->digital_mode = mode; DE_ACT(("set_digital_mode to %d\n", mode)); return incompatible_clock; }
gpl-2.0
farindk/linux-sunxi
arch/powerpc/boot/virtex.c
14054
2919
/* * The platform specific code for virtex devices since a boot loader is not * always used. * * (C) Copyright 2008 Xilinx, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include "ops.h" #include "io.h" #include "stdio.h" #define UART_DLL 0 /* Out: Divisor Latch Low */ #define UART_DLM 1 /* Out: Divisor Latch High */ #define UART_FCR 2 /* Out: FIFO Control Register */ #define UART_FCR_CLEAR_RCVR 0x02 /* Clear the RCVR FIFO */ #define UART_FCR_CLEAR_XMIT 0x04 /* Clear the XMIT FIFO */ #define UART_LCR 3 /* Out: Line Control Register */ #define UART_MCR 4 /* Out: Modem Control Register */ #define UART_MCR_RTS 0x02 /* RTS complement */ #define UART_MCR_DTR 0x01 /* DTR complement */ #define UART_LCR_DLAB 0x80 /* Divisor latch access bit */ #define UART_LCR_WLEN8 0x03 /* Wordlength: 8 bits */ static int virtex_ns16550_console_init(void *devp) { unsigned char *reg_base; u32 reg_shift, reg_offset, clk, spd; u16 divisor; int n; if (dt_get_virtual_reg(devp, (void **)&reg_base, 1) < 1) return -1; n = getprop(devp, "reg-offset", &reg_offset, sizeof(reg_offset)); if (n == sizeof(reg_offset)) reg_base += reg_offset; n = getprop(devp, "reg-shift", &reg_shift, sizeof(reg_shift)); if (n != sizeof(reg_shift)) reg_shift = 0; n = getprop(devp, "current-speed", (void *)&spd, sizeof(spd)); if (n != sizeof(spd)) spd = 9600; /* should there be a default clock rate?*/ n = getprop(devp, "clock-frequency", (void *)&clk, sizeof(clk)); if (n != sizeof(clk)) return -1; divisor = clk / (16 * spd); /* Access baud rate */ out_8(reg_base + (UART_LCR << reg_shift), UART_LCR_DLAB); /* Baud rate based on input clock */ out_8(reg_base + (UART_DLL << reg_shift), divisor & 0xFF); out_8(reg_base + (UART_DLM << reg_shift), divisor >> 8); /* 8 data, 1 stop, no parity */ out_8(reg_base + (UART_LCR << reg_shift), UART_LCR_WLEN8); /* RTS/DTR */ out_8(reg_base + (UART_MCR << reg_shift), UART_MCR_RTS | UART_MCR_DTR); /* Clear transmitter and receiver */ out_8(reg_base + (UART_FCR << reg_shift), UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR); return 0; } /* For virtex, the kernel may be loaded without using a bootloader and if so some UARTs need more setup than is provided in the normal console init */ int platform_specific_init(void) { void *devp; char devtype[MAX_PROP_LEN]; char path[MAX_PATH_LEN]; devp = finddevice("/chosen"); if (devp == NULL) return -1; if (getprop(devp, "linux,stdout-path", path, MAX_PATH_LEN) > 0) { devp = finddevice(path); if (devp == NULL) return -1; if ((getprop(devp, "device_type", devtype, sizeof(devtype)) > 0) && !strcmp(devtype, "serial") && (dt_is_compatible(devp, "ns16550"))) virtex_ns16550_console_init(devp); } return 0; }
gpl-2.0
AICP/kernel_lge_mako
drivers/media/video/stk-sensor.c
14566
19466
/* stk-sensor.c: Driver for ov96xx sensor (used in some Syntek webcams) * * Copyright 2007-2008 Jaime Velasco Juan <jsagarribay@gmail.com> * * Some parts derived from ov7670.c: * Copyright 2006 One Laptop Per Child Association, Inc. Written * by Jonathan Corbet with substantial inspiration from Mark * McClelland's ovcamchip code. * * Copyright 2006-7 Jonathan Corbet <corbet@lwn.net> * * This file may be distributed under the terms of the GNU General * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Controlling the sensor via the STK1125 vendor specific control interface: * The camera uses an OmniVision sensor and the stk1125 provides an * SCCB(i2c)-USB bridge which let us program the sensor. * In my case the sensor id is 0x9652, it can be read from sensor's register * 0x0A and 0x0B as follows: * - read register #R: * output #R to index 0x0208 * output 0x0070 to index 0x0200 * input 1 byte from index 0x0201 (some kind of status register) * until its value is 0x01 * input 1 byte from index 0x0209. This is the value of #R * - write value V to register #R * output #R to index 0x0204 * output V to index 0x0205 * output 0x0005 to index 0x0200 * input 1 byte from index 0x0201 until its value becomes 0x04 */ /* It seems the i2c bus is controlled with these registers */ #include "stk-webcam.h" #define STK_IIC_BASE (0x0200) # define STK_IIC_OP (STK_IIC_BASE) # define STK_IIC_OP_TX (0x05) # define STK_IIC_OP_RX (0x70) # define STK_IIC_STAT (STK_IIC_BASE+1) # define STK_IIC_STAT_TX_OK (0x04) # define STK_IIC_STAT_RX_OK (0x01) /* I don't know what does this register. * when it is 0x00 or 0x01, we cannot talk to the sensor, * other values work */ # define STK_IIC_ENABLE (STK_IIC_BASE+2) # define STK_IIC_ENABLE_NO (0x00) /* This is what the driver writes in windows */ # define STK_IIC_ENABLE_YES (0x1e) /* * Address of the slave. Seems like the binary driver look for the * sensor in multiple places, attempting a reset sequence. * We only know about the ov9650 */ # define STK_IIC_ADDR (STK_IIC_BASE+3) # define STK_IIC_TX_INDEX (STK_IIC_BASE+4) # define STK_IIC_TX_VALUE (STK_IIC_BASE+5) # define STK_IIC_RX_INDEX (STK_IIC_BASE+8) # define STK_IIC_RX_VALUE (STK_IIC_BASE+9) #define MAX_RETRIES (50) #define SENSOR_ADDRESS (0x60) /* From ov7670.c (These registers aren't fully accurate) */ /* Registers */ #define REG_GAIN 0x00 /* Gain lower 8 bits (rest in vref) */ #define REG_BLUE 0x01 /* blue gain */ #define REG_RED 0x02 /* red gain */ #define REG_VREF 0x03 /* Pieces of GAIN, VSTART, VSTOP */ #define REG_COM1 0x04 /* Control 1 */ #define COM1_CCIR656 0x40 /* CCIR656 enable */ #define COM1_QFMT 0x20 /* QVGA/QCIF format */ #define COM1_SKIP_0 0x00 /* Do not skip any row */ #define COM1_SKIP_2 0x04 /* Skip 2 rows of 4 */ #define COM1_SKIP_3 0x08 /* Skip 3 rows of 4 */ #define REG_BAVE 0x05 /* U/B Average level */ #define REG_GbAVE 0x06 /* Y/Gb Average level */ #define REG_AECHH 0x07 /* AEC MS 5 bits */ #define REG_RAVE 0x08 /* V/R Average level */ #define REG_COM2 0x09 /* Control 2 */ #define COM2_SSLEEP 0x10 /* Soft sleep mode */ #define REG_PID 0x0a /* Product ID MSB */ #define REG_VER 0x0b /* Product ID LSB */ #define REG_COM3 0x0c /* Control 3 */ #define COM3_SWAP 0x40 /* Byte swap */ #define COM3_SCALEEN 0x08 /* Enable scaling */ #define COM3_DCWEN 0x04 /* Enable downsamp/crop/window */ #define REG_COM4 0x0d /* Control 4 */ #define REG_COM5 0x0e /* All "reserved" */ #define REG_COM6 0x0f /* Control 6 */ #define REG_AECH 0x10 /* More bits of AEC value */ #define REG_CLKRC 0x11 /* Clock control */ #define CLK_PLL 0x80 /* Enable internal PLL */ #define CLK_EXT 0x40 /* Use external clock directly */ #define CLK_SCALE 0x3f /* Mask for internal clock scale */ #define REG_COM7 0x12 /* Control 7 */ #define COM7_RESET 0x80 /* Register reset */ #define COM7_FMT_MASK 0x38 #define COM7_FMT_SXGA 0x00 #define COM7_FMT_VGA 0x40 #define COM7_FMT_CIF 0x20 /* CIF format */ #define COM7_FMT_QVGA 0x10 /* QVGA format */ #define COM7_FMT_QCIF 0x08 /* QCIF format */ #define COM7_RGB 0x04 /* bits 0 and 2 - RGB format */ #define COM7_YUV 0x00 /* YUV */ #define COM7_BAYER 0x01 /* Bayer format */ #define COM7_PBAYER 0x05 /* "Processed bayer" */ #define REG_COM8 0x13 /* Control 8 */ #define COM8_FASTAEC 0x80 /* Enable fast AGC/AEC */ #define COM8_AECSTEP 0x40 /* Unlimited AEC step size */ #define COM8_BFILT 0x20 /* Band filter enable */ #define COM8_AGC 0x04 /* Auto gain enable */ #define COM8_AWB 0x02 /* White balance enable */ #define COM8_AEC 0x01 /* Auto exposure enable */ #define REG_COM9 0x14 /* Control 9 - gain ceiling */ #define REG_COM10 0x15 /* Control 10 */ #define COM10_HSYNC 0x40 /* HSYNC instead of HREF */ #define COM10_PCLK_HB 0x20 /* Suppress PCLK on horiz blank */ #define COM10_HREF_REV 0x08 /* Reverse HREF */ #define COM10_VS_LEAD 0x04 /* VSYNC on clock leading edge */ #define COM10_VS_NEG 0x02 /* VSYNC negative */ #define COM10_HS_NEG 0x01 /* HSYNC negative */ #define REG_HSTART 0x17 /* Horiz start high bits */ #define REG_HSTOP 0x18 /* Horiz stop high bits */ #define REG_VSTART 0x19 /* Vert start high bits */ #define REG_VSTOP 0x1a /* Vert stop high bits */ #define REG_PSHFT 0x1b /* Pixel delay after HREF */ #define REG_MIDH 0x1c /* Manuf. ID high */ #define REG_MIDL 0x1d /* Manuf. ID low */ #define REG_MVFP 0x1e /* Mirror / vflip */ #define MVFP_MIRROR 0x20 /* Mirror image */ #define MVFP_FLIP 0x10 /* Vertical flip */ #define REG_AEW 0x24 /* AGC upper limit */ #define REG_AEB 0x25 /* AGC lower limit */ #define REG_VPT 0x26 /* AGC/AEC fast mode op region */ #define REG_ADVFL 0x2d /* Insert dummy lines (LSB) */ #define REG_ADVFH 0x2e /* Insert dummy lines (MSB) */ #define REG_HSYST 0x30 /* HSYNC rising edge delay */ #define REG_HSYEN 0x31 /* HSYNC falling edge delay */ #define REG_HREF 0x32 /* HREF pieces */ #define REG_TSLB 0x3a /* lots of stuff */ #define TSLB_YLAST 0x04 /* UYVY or VYUY - see com13 */ #define TSLB_BYTEORD 0x08 /* swap bytes in 16bit mode? */ #define REG_COM11 0x3b /* Control 11 */ #define COM11_NIGHT 0x80 /* NIght mode enable */ #define COM11_NMFR 0x60 /* Two bit NM frame rate */ #define COM11_HZAUTO 0x10 /* Auto detect 50/60 Hz */ #define COM11_50HZ 0x08 /* Manual 50Hz select */ #define COM11_EXP 0x02 #define REG_COM12 0x3c /* Control 12 */ #define COM12_HREF 0x80 /* HREF always */ #define REG_COM13 0x3d /* Control 13 */ #define COM13_GAMMA 0x80 /* Gamma enable */ #define COM13_UVSAT 0x40 /* UV saturation auto adjustment */ #define COM13_CMATRIX 0x10 /* Enable color matrix for RGB or YUV */ #define COM13_UVSWAP 0x01 /* V before U - w/TSLB */ #define REG_COM14 0x3e /* Control 14 */ #define COM14_DCWEN 0x10 /* DCW/PCLK-scale enable */ #define REG_EDGE 0x3f /* Edge enhancement factor */ #define REG_COM15 0x40 /* Control 15 */ #define COM15_R10F0 0x00 /* Data range 10 to F0 */ #define COM15_R01FE 0x80 /* 01 to FE */ #define COM15_R00FF 0xc0 /* 00 to FF */ #define COM15_RGB565 0x10 /* RGB565 output */ #define COM15_RGBFIXME 0x20 /* FIXME */ #define COM15_RGB555 0x30 /* RGB555 output */ #define REG_COM16 0x41 /* Control 16 */ #define COM16_AWBGAIN 0x08 /* AWB gain enable */ #define REG_COM17 0x42 /* Control 17 */ #define COM17_AECWIN 0xc0 /* AEC window - must match COM4 */ #define COM17_CBAR 0x08 /* DSP Color bar */ /* * This matrix defines how the colors are generated, must be * tweaked to adjust hue and saturation. * * Order: v-red, v-green, v-blue, u-red, u-green, u-blue * * They are nine-bit signed quantities, with the sign bit * stored in 0x58. Sign for v-red is bit 0, and up from there. */ #define REG_CMATRIX_BASE 0x4f #define CMATRIX_LEN 6 #define REG_CMATRIX_SIGN 0x58 #define REG_BRIGHT 0x55 /* Brightness */ #define REG_CONTRAS 0x56 /* Contrast control */ #define REG_GFIX 0x69 /* Fix gain control */ #define REG_RGB444 0x8c /* RGB 444 control */ #define R444_ENABLE 0x02 /* Turn on RGB444, overrides 5x5 */ #define R444_RGBX 0x01 /* Empty nibble at end */ #define REG_HAECC1 0x9f /* Hist AEC/AGC control 1 */ #define REG_HAECC2 0xa0 /* Hist AEC/AGC control 2 */ #define REG_BD50MAX 0xa5 /* 50hz banding step limit */ #define REG_HAECC3 0xa6 /* Hist AEC/AGC control 3 */ #define REG_HAECC4 0xa7 /* Hist AEC/AGC control 4 */ #define REG_HAECC5 0xa8 /* Hist AEC/AGC control 5 */ #define REG_HAECC6 0xa9 /* Hist AEC/AGC control 6 */ #define REG_HAECC7 0xaa /* Hist AEC/AGC control 7 */ #define REG_BD60MAX 0xab /* 60hz banding step limit */ /* Returns 0 if OK */ static int stk_sensor_outb(struct stk_camera *dev, u8 reg, u8 val) { int i = 0; int tmpval = 0; if (stk_camera_write_reg(dev, STK_IIC_TX_INDEX, reg)) return 1; if (stk_camera_write_reg(dev, STK_IIC_TX_VALUE, val)) return 1; if (stk_camera_write_reg(dev, STK_IIC_OP, STK_IIC_OP_TX)) return 1; do { if (stk_camera_read_reg(dev, STK_IIC_STAT, &tmpval)) return 1; i++; } while (tmpval == 0 && i < MAX_RETRIES); if (tmpval != STK_IIC_STAT_TX_OK) { if (tmpval) STK_ERROR("stk_sensor_outb failed, status=0x%02x\n", tmpval); return 1; } else return 0; } static int stk_sensor_inb(struct stk_camera *dev, u8 reg, u8 *val) { int i = 0; int tmpval = 0; if (stk_camera_write_reg(dev, STK_IIC_RX_INDEX, reg)) return 1; if (stk_camera_write_reg(dev, STK_IIC_OP, STK_IIC_OP_RX)) return 1; do { if (stk_camera_read_reg(dev, STK_IIC_STAT, &tmpval)) return 1; i++; } while (tmpval == 0 && i < MAX_RETRIES); if (tmpval != STK_IIC_STAT_RX_OK) { if (tmpval) STK_ERROR("stk_sensor_inb failed, status=0x%02x\n", tmpval); return 1; } if (stk_camera_read_reg(dev, STK_IIC_RX_VALUE, &tmpval)) return 1; *val = (u8) tmpval; return 0; } static int stk_sensor_write_regvals(struct stk_camera *dev, struct regval *rv) { int ret; if (rv == NULL) return 0; while (rv->reg != 0xff || rv->val != 0xff) { ret = stk_sensor_outb(dev, rv->reg, rv->val); if (ret != 0) return ret; rv++; } return 0; } int stk_sensor_sleep(struct stk_camera *dev) { u8 tmp; return stk_sensor_inb(dev, REG_COM2, &tmp) || stk_sensor_outb(dev, REG_COM2, tmp|COM2_SSLEEP); } int stk_sensor_wakeup(struct stk_camera *dev) { u8 tmp; return stk_sensor_inb(dev, REG_COM2, &tmp) || stk_sensor_outb(dev, REG_COM2, tmp&~COM2_SSLEEP); } static struct regval ov_initvals[] = { {REG_CLKRC, CLK_PLL}, {REG_COM11, 0x01}, {0x6a, 0x7d}, {REG_AECH, 0x40}, {REG_GAIN, 0x00}, {REG_BLUE, 0x80}, {REG_RED, 0x80}, /* Do not enable fast AEC for now */ /*{REG_COM8, COM8_FASTAEC|COM8_AECSTEP|COM8_BFILT|COM8_AGC|COM8_AEC},*/ {REG_COM8, COM8_AECSTEP|COM8_BFILT|COM8_AGC|COM8_AEC}, {0x39, 0x50}, {0x38, 0x93}, {0x37, 0x00}, {0x35, 0x81}, {REG_COM5, 0x20}, {REG_COM1, 0x00}, {REG_COM3, 0x00}, {REG_COM4, 0x00}, {REG_PSHFT, 0x00}, {0x16, 0x07}, {0x33, 0xe2}, {0x34, 0xbf}, {REG_COM16, 0x00}, {0x96, 0x04}, /* Gamma curve values */ /* { 0x7a, 0x20 }, { 0x7b, 0x10 }, { 0x7c, 0x1e }, { 0x7d, 0x35 }, { 0x7e, 0x5a }, { 0x7f, 0x69 }, { 0x80, 0x76 }, { 0x81, 0x80 }, { 0x82, 0x88 }, { 0x83, 0x8f }, { 0x84, 0x96 }, { 0x85, 0xa3 }, { 0x86, 0xaf }, { 0x87, 0xc4 }, { 0x88, 0xd7 }, { 0x89, 0xe8 }, */ {REG_GFIX, 0x40}, {0x8e, 0x00}, {REG_COM12, 0x73}, {0x8f, 0xdf}, {0x8b, 0x06}, {0x8c, 0x20}, {0x94, 0x88}, {0x95, 0x88}, /* {REG_COM15, 0xc1}, TODO */ {0x29, 0x3f}, {REG_COM6, 0x42}, {REG_BD50MAX, 0x80}, {REG_HAECC6, 0xb8}, {REG_HAECC7, 0x92}, {REG_BD60MAX, 0x0a}, {0x90, 0x00}, {0x91, 0x00}, {REG_HAECC1, 0x00}, {REG_HAECC2, 0x00}, {REG_AEW, 0x68}, {REG_AEB, 0x5c}, {REG_VPT, 0xc3}, {REG_COM9, 0x2e}, {0x2a, 0x00}, {0x2b, 0x00}, {0xff, 0xff}, /* END MARKER */ }; /* Probe the I2C bus and initialise the sensor chip */ int stk_sensor_init(struct stk_camera *dev) { u8 idl = 0; u8 idh = 0; if (stk_camera_write_reg(dev, STK_IIC_ENABLE, STK_IIC_ENABLE_YES) || stk_camera_write_reg(dev, STK_IIC_ADDR, SENSOR_ADDRESS) || stk_sensor_outb(dev, REG_COM7, COM7_RESET)) { STK_ERROR("Sensor resetting failed\n"); return -ENODEV; } msleep(10); /* Read the manufacturer ID: ov = 0x7FA2 */ if (stk_sensor_inb(dev, REG_MIDH, &idh) || stk_sensor_inb(dev, REG_MIDL, &idl)) { STK_ERROR("Strange error reading sensor ID\n"); return -ENODEV; } if (idh != 0x7f || idl != 0xa2) { STK_ERROR("Huh? you don't have a sensor from ovt\n"); return -ENODEV; } if (stk_sensor_inb(dev, REG_PID, &idh) || stk_sensor_inb(dev, REG_VER, &idl)) { STK_ERROR("Could not read sensor model\n"); return -ENODEV; } stk_sensor_write_regvals(dev, ov_initvals); msleep(10); STK_INFO("OmniVision sensor detected, id %02X%02X" " at address %x\n", idh, idl, SENSOR_ADDRESS); return 0; } /* V4L2_PIX_FMT_UYVY */ static struct regval ov_fmt_uyvy[] = { {REG_TSLB, TSLB_YLAST|0x08 }, { 0x4f, 0x80 }, /* "matrix coefficient 1" */ { 0x50, 0x80 }, /* "matrix coefficient 2" */ { 0x51, 0 }, /* vb */ { 0x52, 0x22 }, /* "matrix coefficient 4" */ { 0x53, 0x5e }, /* "matrix coefficient 5" */ { 0x54, 0x80 }, /* "matrix coefficient 6" */ {REG_COM13, COM13_UVSAT|COM13_CMATRIX}, {REG_COM15, COM15_R00FF }, {0xff, 0xff}, /* END MARKER */ }; /* V4L2_PIX_FMT_YUYV */ static struct regval ov_fmt_yuyv[] = { {REG_TSLB, 0 }, { 0x4f, 0x80 }, /* "matrix coefficient 1" */ { 0x50, 0x80 }, /* "matrix coefficient 2" */ { 0x51, 0 }, /* vb */ { 0x52, 0x22 }, /* "matrix coefficient 4" */ { 0x53, 0x5e }, /* "matrix coefficient 5" */ { 0x54, 0x80 }, /* "matrix coefficient 6" */ {REG_COM13, COM13_UVSAT|COM13_CMATRIX}, {REG_COM15, COM15_R00FF }, {0xff, 0xff}, /* END MARKER */ }; /* V4L2_PIX_FMT_RGB565X rrrrrggg gggbbbbb */ static struct regval ov_fmt_rgbr[] = { { REG_RGB444, 0 }, /* No RGB444 please */ {REG_TSLB, 0x00}, { REG_COM1, 0x0 }, { REG_COM9, 0x38 }, /* 16x gain ceiling; 0x8 is reserved bit */ { 0x4f, 0xb3 }, /* "matrix coefficient 1" */ { 0x50, 0xb3 }, /* "matrix coefficient 2" */ { 0x51, 0 }, /* vb */ { 0x52, 0x3d }, /* "matrix coefficient 4" */ { 0x53, 0xa7 }, /* "matrix coefficient 5" */ { 0x54, 0xe4 }, /* "matrix coefficient 6" */ { REG_COM13, COM13_GAMMA }, { REG_COM15, COM15_RGB565|COM15_R00FF }, { 0xff, 0xff }, }; /* V4L2_PIX_FMT_RGB565 gggbbbbb rrrrrggg */ static struct regval ov_fmt_rgbp[] = { { REG_RGB444, 0 }, /* No RGB444 please */ {REG_TSLB, TSLB_BYTEORD }, { REG_COM1, 0x0 }, { REG_COM9, 0x38 }, /* 16x gain ceiling; 0x8 is reserved bit */ { 0x4f, 0xb3 }, /* "matrix coefficient 1" */ { 0x50, 0xb3 }, /* "matrix coefficient 2" */ { 0x51, 0 }, /* vb */ { 0x52, 0x3d }, /* "matrix coefficient 4" */ { 0x53, 0xa7 }, /* "matrix coefficient 5" */ { 0x54, 0xe4 }, /* "matrix coefficient 6" */ { REG_COM13, COM13_GAMMA }, { REG_COM15, COM15_RGB565|COM15_R00FF }, { 0xff, 0xff }, }; /* V4L2_PIX_FMT_SRGGB8 */ static struct regval ov_fmt_bayer[] = { /* This changes color order */ {REG_TSLB, 0x40}, /* BGGR */ /* {REG_TSLB, 0x08}, */ /* BGGR with vertical image flipping */ {REG_COM15, COM15_R00FF }, {0xff, 0xff}, /* END MARKER */ }; /* * Store a set of start/stop values into the camera. */ static int stk_sensor_set_hw(struct stk_camera *dev, int hstart, int hstop, int vstart, int vstop) { int ret; unsigned char v; /* * Horizontal: 11 bits, top 8 live in hstart and hstop. Bottom 3 of * hstart are in href[2:0], bottom 3 of hstop in href[5:3]. There is * a mystery "edge offset" value in the top two bits of href. */ ret = stk_sensor_outb(dev, REG_HSTART, (hstart >> 3) & 0xff); ret += stk_sensor_outb(dev, REG_HSTOP, (hstop >> 3) & 0xff); ret += stk_sensor_inb(dev, REG_HREF, &v); v = (v & 0xc0) | ((hstop & 0x7) << 3) | (hstart & 0x7); msleep(10); ret += stk_sensor_outb(dev, REG_HREF, v); /* * Vertical: similar arrangement (note: this is different from ov7670.c) */ ret += stk_sensor_outb(dev, REG_VSTART, (vstart >> 3) & 0xff); ret += stk_sensor_outb(dev, REG_VSTOP, (vstop >> 3) & 0xff); ret += stk_sensor_inb(dev, REG_VREF, &v); v = (v & 0xc0) | ((vstop & 0x7) << 3) | (vstart & 0x7); msleep(10); ret += stk_sensor_outb(dev, REG_VREF, v); return ret; } int stk_sensor_configure(struct stk_camera *dev) { int com7; /* * We setup the sensor to output dummy lines in low-res modes, * so we don't get absurdly hight framerates. */ unsigned dummylines; int flip; struct regval *rv; switch (dev->vsettings.mode) { case MODE_QCIF: com7 = COM7_FMT_QCIF; dummylines = 604; break; case MODE_QVGA: com7 = COM7_FMT_QVGA; dummylines = 267; break; case MODE_CIF: com7 = COM7_FMT_CIF; dummylines = 412; break; case MODE_VGA: com7 = COM7_FMT_VGA; dummylines = 11; break; case MODE_SXGA: com7 = COM7_FMT_SXGA; dummylines = 0; break; default: STK_ERROR("Unsupported mode %d\n", dev->vsettings.mode); return -EFAULT; } switch (dev->vsettings.palette) { case V4L2_PIX_FMT_UYVY: com7 |= COM7_YUV; rv = ov_fmt_uyvy; break; case V4L2_PIX_FMT_YUYV: com7 |= COM7_YUV; rv = ov_fmt_yuyv; break; case V4L2_PIX_FMT_RGB565: com7 |= COM7_RGB; rv = ov_fmt_rgbp; break; case V4L2_PIX_FMT_RGB565X: com7 |= COM7_RGB; rv = ov_fmt_rgbr; break; case V4L2_PIX_FMT_SBGGR8: com7 |= COM7_PBAYER; rv = ov_fmt_bayer; break; default: STK_ERROR("Unsupported colorspace\n"); return -EFAULT; } /*FIXME sometimes the sensor go to a bad state stk_sensor_write_regvals(dev, ov_initvals); */ stk_sensor_outb(dev, REG_COM7, com7); msleep(50); stk_sensor_write_regvals(dev, rv); flip = (dev->vsettings.vflip?MVFP_FLIP:0) | (dev->vsettings.hflip?MVFP_MIRROR:0); stk_sensor_outb(dev, REG_MVFP, flip); if (dev->vsettings.palette == V4L2_PIX_FMT_SBGGR8 && !dev->vsettings.vflip) stk_sensor_outb(dev, REG_TSLB, 0x08); stk_sensor_outb(dev, REG_ADVFH, dummylines >> 8); stk_sensor_outb(dev, REG_ADVFL, dummylines & 0xff); msleep(50); switch (dev->vsettings.mode) { case MODE_VGA: if (stk_sensor_set_hw(dev, 302, 1582, 6, 486)) STK_ERROR("stk_sensor_set_hw failed (VGA)\n"); break; case MODE_SXGA: case MODE_CIF: case MODE_QVGA: case MODE_QCIF: /*FIXME These settings seem ignored by the sensor if (stk_sensor_set_hw(dev, 220, 1500, 10, 1034)) STK_ERROR("stk_sensor_set_hw failed (SXGA)\n"); */ break; } msleep(10); return 0; } int stk_sensor_set_brightness(struct stk_camera *dev, int br) { if (br < 0 || br > 0xff) return -EINVAL; stk_sensor_outb(dev, REG_AEB, max(0x00, br - 6)); stk_sensor_outb(dev, REG_AEW, min(0xff, br + 6)); return 0; }
gpl-2.0
chenyu105/linux
drivers/parisc/lasi.c
14566
6308
/* * LASI Device Driver * * (c) Copyright 1999 Red Hat Software * Portions (c) Copyright 1999 The Puffin Group Inc. * Portions (c) Copyright 1999 Hewlett-Packard * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * by Alan Cox <alan@redhat.com> and * Alex deVries <alex@onefishtwo.ca> */ #include <linux/errno.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/pm.h> #include <linux/types.h> #include <asm/io.h> #include <asm/hardware.h> #include <asm/led.h> #include "gsc.h" #define LASI_VER 0xC008 /* LASI Version */ #define LASI_IO_CONF 0x7FFFE /* LASI primary configuration register */ #define LASI_IO_CONF2 0x7FFFF /* LASI secondary configuration register */ static void lasi_choose_irq(struct parisc_device *dev, void *ctrl) { int irq; switch (dev->id.sversion) { case 0x74: irq = 7; break; /* Centronics */ case 0x7B: irq = 13; break; /* Audio */ case 0x81: irq = 14; break; /* Lasi itself */ case 0x82: irq = 9; break; /* SCSI */ case 0x83: irq = 20; break; /* Floppy */ case 0x84: irq = 26; break; /* PS/2 Keyboard */ case 0x87: irq = 18; break; /* ISDN */ case 0x8A: irq = 8; break; /* LAN */ case 0x8C: irq = 5; break; /* RS232 */ case 0x8D: irq = (dev->hw_path == 13) ? 16 : 17; break; /* Telephone */ default: return; /* unknown */ } gsc_asic_assign_irq(ctrl, irq, &dev->irq); } static void __init lasi_init_irq(struct gsc_asic *this_lasi) { unsigned long lasi_base = this_lasi->hpa; /* Stop LASI barking for a bit */ gsc_writel(0x00000000, lasi_base+OFFSET_IMR); /* clear pending interrupts */ gsc_readl(lasi_base+OFFSET_IRR); /* We're not really convinced we want to reset the onboard * devices. Firmware does it for us... */ /* Resets */ /* gsc_writel(0xFFFFFFFF, lasi_base+0x2000);*/ /* Parallel */ if(pdc_add_valid(lasi_base+0x4004) == PDC_OK) gsc_writel(0xFFFFFFFF, lasi_base+0x4004); /* Audio */ /* gsc_writel(0xFFFFFFFF, lasi_base+0x5000);*/ /* Serial */ /* gsc_writel(0xFFFFFFFF, lasi_base+0x6000);*/ /* SCSI */ gsc_writel(0xFFFFFFFF, lasi_base+0x7000); /* LAN */ gsc_writel(0xFFFFFFFF, lasi_base+0x8000); /* Keyboard */ gsc_writel(0xFFFFFFFF, lasi_base+0xA000); /* FDC */ /* Ok we hit it on the head with a hammer, our Dog is now ** comatose and muzzled. Devices will now unmask LASI ** interrupts as they are registered as irq's in the LASI range. */ /* XXX: I thought it was `awks that got `it on the `ead with an * `ammer. -- willy */ } /* ** lasi_led_init() ** ** lasi_led_init() initializes the LED controller on the LASI. ** ** Since Mirage and Electra machines use a different LED ** address register, we need to check for these machines ** explicitly. */ #ifndef CONFIG_CHASSIS_LCD_LED #define lasi_led_init(x) /* nothing */ #else static void __init lasi_led_init(unsigned long lasi_hpa) { unsigned long datareg; switch (CPU_HVERSION) { /* Gecko machines have only one single LED, which can be permanently turned on by writing a zero into the power control register. */ case 0x600: /* Gecko (712/60) */ case 0x601: /* Gecko (712/80) */ case 0x602: /* Gecko (712/100) */ case 0x603: /* Anole 64 (743/64) */ case 0x604: /* Anole 100 (743/100) */ case 0x605: /* Gecko (712/120) */ datareg = lasi_hpa + 0x0000C000; gsc_writeb(0, datareg); return; /* no need to register the LED interrupt-function */ /* Mirage and Electra machines need special offsets */ case 0x60A: /* Mirage Jr (715/64) */ case 0x60B: /* Mirage 100 */ case 0x60C: /* Mirage 100+ */ case 0x60D: /* Electra 100 */ case 0x60E: /* Electra 120 */ datareg = lasi_hpa - 0x00020000; break; default: datareg = lasi_hpa + 0x0000C000; break; } register_led_driver(DISPLAY_MODEL_LASI, LED_CMD_REG_NONE, datareg); } #endif /* * lasi_power_off * * Function for lasi to turn off the power. This is accomplished by setting a * 1 to PWR_ON_L in the Power Control Register * */ static unsigned long lasi_power_off_hpa __read_mostly; static void lasi_power_off(void) { unsigned long datareg; /* calculate addr of the Power Control Register */ datareg = lasi_power_off_hpa + 0x0000C000; /* Power down the machine */ gsc_writel(0x02, datareg); } static int __init lasi_init_chip(struct parisc_device *dev) { extern void (*chassis_power_off)(void); struct gsc_asic *lasi; struct gsc_irq gsc_irq; int ret; lasi = kzalloc(sizeof(*lasi), GFP_KERNEL); if (!lasi) return -ENOMEM; lasi->name = "Lasi"; lasi->hpa = dev->hpa.start; /* Check the 4-bit (yes, only 4) version register */ lasi->version = gsc_readl(lasi->hpa + LASI_VER) & 0xf; printk(KERN_INFO "%s version %d at 0x%lx found.\n", lasi->name, lasi->version, lasi->hpa); /* initialize the chassis LEDs really early */ lasi_led_init(lasi->hpa); /* Stop LASI barking for a bit */ lasi_init_irq(lasi); /* the IRQ lasi should use */ dev->irq = gsc_alloc_irq(&gsc_irq); if (dev->irq < 0) { printk(KERN_ERR "%s(): cannot get GSC irq\n", __func__); kfree(lasi); return -EBUSY; } lasi->eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data; ret = request_irq(gsc_irq.irq, gsc_asic_intr, 0, "lasi", lasi); if (ret < 0) { kfree(lasi); return ret; } /* enable IRQ's for devices below LASI */ gsc_writel(lasi->eim, lasi->hpa + OFFSET_IAR); /* Done init'ing, register this driver */ ret = gsc_common_setup(dev, lasi); if (ret) { kfree(lasi); return ret; } gsc_fixup_irqs(dev, lasi, lasi_choose_irq); /* initialize the power off function */ /* FIXME: Record the LASI HPA for the power off function. This should * ensure that only the first LASI (the one controlling the power off) * should set the HPA here */ lasi_power_off_hpa = lasi->hpa; chassis_power_off = lasi_power_off; return ret; } static struct parisc_device_id lasi_tbl[] = { { HPHW_BA, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00081 }, { 0, } }; struct parisc_driver lasi_driver = { .name = "lasi", .id_table = lasi_tbl, .probe = lasi_init_chip, };
gpl-2.0
aimaletdinow/LABS
drivers/parisc/lasi.c
14566
6308
/* * LASI Device Driver * * (c) Copyright 1999 Red Hat Software * Portions (c) Copyright 1999 The Puffin Group Inc. * Portions (c) Copyright 1999 Hewlett-Packard * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * by Alan Cox <alan@redhat.com> and * Alex deVries <alex@onefishtwo.ca> */ #include <linux/errno.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/pm.h> #include <linux/types.h> #include <asm/io.h> #include <asm/hardware.h> #include <asm/led.h> #include "gsc.h" #define LASI_VER 0xC008 /* LASI Version */ #define LASI_IO_CONF 0x7FFFE /* LASI primary configuration register */ #define LASI_IO_CONF2 0x7FFFF /* LASI secondary configuration register */ static void lasi_choose_irq(struct parisc_device *dev, void *ctrl) { int irq; switch (dev->id.sversion) { case 0x74: irq = 7; break; /* Centronics */ case 0x7B: irq = 13; break; /* Audio */ case 0x81: irq = 14; break; /* Lasi itself */ case 0x82: irq = 9; break; /* SCSI */ case 0x83: irq = 20; break; /* Floppy */ case 0x84: irq = 26; break; /* PS/2 Keyboard */ case 0x87: irq = 18; break; /* ISDN */ case 0x8A: irq = 8; break; /* LAN */ case 0x8C: irq = 5; break; /* RS232 */ case 0x8D: irq = (dev->hw_path == 13) ? 16 : 17; break; /* Telephone */ default: return; /* unknown */ } gsc_asic_assign_irq(ctrl, irq, &dev->irq); } static void __init lasi_init_irq(struct gsc_asic *this_lasi) { unsigned long lasi_base = this_lasi->hpa; /* Stop LASI barking for a bit */ gsc_writel(0x00000000, lasi_base+OFFSET_IMR); /* clear pending interrupts */ gsc_readl(lasi_base+OFFSET_IRR); /* We're not really convinced we want to reset the onboard * devices. Firmware does it for us... */ /* Resets */ /* gsc_writel(0xFFFFFFFF, lasi_base+0x2000);*/ /* Parallel */ if(pdc_add_valid(lasi_base+0x4004) == PDC_OK) gsc_writel(0xFFFFFFFF, lasi_base+0x4004); /* Audio */ /* gsc_writel(0xFFFFFFFF, lasi_base+0x5000);*/ /* Serial */ /* gsc_writel(0xFFFFFFFF, lasi_base+0x6000);*/ /* SCSI */ gsc_writel(0xFFFFFFFF, lasi_base+0x7000); /* LAN */ gsc_writel(0xFFFFFFFF, lasi_base+0x8000); /* Keyboard */ gsc_writel(0xFFFFFFFF, lasi_base+0xA000); /* FDC */ /* Ok we hit it on the head with a hammer, our Dog is now ** comatose and muzzled. Devices will now unmask LASI ** interrupts as they are registered as irq's in the LASI range. */ /* XXX: I thought it was `awks that got `it on the `ead with an * `ammer. -- willy */ } /* ** lasi_led_init() ** ** lasi_led_init() initializes the LED controller on the LASI. ** ** Since Mirage and Electra machines use a different LED ** address register, we need to check for these machines ** explicitly. */ #ifndef CONFIG_CHASSIS_LCD_LED #define lasi_led_init(x) /* nothing */ #else static void __init lasi_led_init(unsigned long lasi_hpa) { unsigned long datareg; switch (CPU_HVERSION) { /* Gecko machines have only one single LED, which can be permanently turned on by writing a zero into the power control register. */ case 0x600: /* Gecko (712/60) */ case 0x601: /* Gecko (712/80) */ case 0x602: /* Gecko (712/100) */ case 0x603: /* Anole 64 (743/64) */ case 0x604: /* Anole 100 (743/100) */ case 0x605: /* Gecko (712/120) */ datareg = lasi_hpa + 0x0000C000; gsc_writeb(0, datareg); return; /* no need to register the LED interrupt-function */ /* Mirage and Electra machines need special offsets */ case 0x60A: /* Mirage Jr (715/64) */ case 0x60B: /* Mirage 100 */ case 0x60C: /* Mirage 100+ */ case 0x60D: /* Electra 100 */ case 0x60E: /* Electra 120 */ datareg = lasi_hpa - 0x00020000; break; default: datareg = lasi_hpa + 0x0000C000; break; } register_led_driver(DISPLAY_MODEL_LASI, LED_CMD_REG_NONE, datareg); } #endif /* * lasi_power_off * * Function for lasi to turn off the power. This is accomplished by setting a * 1 to PWR_ON_L in the Power Control Register * */ static unsigned long lasi_power_off_hpa __read_mostly; static void lasi_power_off(void) { unsigned long datareg; /* calculate addr of the Power Control Register */ datareg = lasi_power_off_hpa + 0x0000C000; /* Power down the machine */ gsc_writel(0x02, datareg); } static int __init lasi_init_chip(struct parisc_device *dev) { extern void (*chassis_power_off)(void); struct gsc_asic *lasi; struct gsc_irq gsc_irq; int ret; lasi = kzalloc(sizeof(*lasi), GFP_KERNEL); if (!lasi) return -ENOMEM; lasi->name = "Lasi"; lasi->hpa = dev->hpa.start; /* Check the 4-bit (yes, only 4) version register */ lasi->version = gsc_readl(lasi->hpa + LASI_VER) & 0xf; printk(KERN_INFO "%s version %d at 0x%lx found.\n", lasi->name, lasi->version, lasi->hpa); /* initialize the chassis LEDs really early */ lasi_led_init(lasi->hpa); /* Stop LASI barking for a bit */ lasi_init_irq(lasi); /* the IRQ lasi should use */ dev->irq = gsc_alloc_irq(&gsc_irq); if (dev->irq < 0) { printk(KERN_ERR "%s(): cannot get GSC irq\n", __func__); kfree(lasi); return -EBUSY; } lasi->eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data; ret = request_irq(gsc_irq.irq, gsc_asic_intr, 0, "lasi", lasi); if (ret < 0) { kfree(lasi); return ret; } /* enable IRQ's for devices below LASI */ gsc_writel(lasi->eim, lasi->hpa + OFFSET_IAR); /* Done init'ing, register this driver */ ret = gsc_common_setup(dev, lasi); if (ret) { kfree(lasi); return ret; } gsc_fixup_irqs(dev, lasi, lasi_choose_irq); /* initialize the power off function */ /* FIXME: Record the LASI HPA for the power off function. This should * ensure that only the first LASI (the one controlling the power off) * should set the HPA here */ lasi_power_off_hpa = lasi->hpa; chassis_power_off = lasi_power_off; return ret; } static struct parisc_device_id lasi_tbl[] = { { HPHW_BA, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00081 }, { 0, } }; struct parisc_driver lasi_driver = { .name = "lasi", .id_table = lasi_tbl, .probe = lasi_init_chip, };
gpl-2.0
CyanogenMod/android_kernel_acer_t20-common
arch/sparc/kernel/pci_common.c
231
12942
/* pci_common.c: PCI controller common support. * * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net) */ #include <linux/string.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/device.h> #include <linux/of_device.h> #include <asm/prom.h> #include <asm/oplib.h> #include "pci_impl.h" #include "pci_sun4v.h" static int config_out_of_range(struct pci_pbm_info *pbm, unsigned long bus, unsigned long devfn, unsigned long reg) { if (bus < pbm->pci_first_busno || bus > pbm->pci_last_busno) return 1; return 0; } static void *sun4u_config_mkaddr(struct pci_pbm_info *pbm, unsigned long bus, unsigned long devfn, unsigned long reg) { unsigned long rbits = pbm->config_space_reg_bits; if (config_out_of_range(pbm, bus, devfn, reg)) return NULL; reg = (reg & ((1 << rbits) - 1)); devfn <<= rbits; bus <<= rbits + 8; return (void *) (pbm->config_space | bus | devfn | reg); } /* At least on Sabre, it is necessary to access all PCI host controller * registers at their natural size, otherwise zeros are returned. * Strange but true, and I see no language in the UltraSPARC-IIi * programmer's manual that mentions this even indirectly. */ static int sun4u_read_pci_cfg_host(struct pci_pbm_info *pbm, unsigned char bus, unsigned int devfn, int where, int size, u32 *value) { u32 tmp32, *addr; u16 tmp16; u8 tmp8; addr = sun4u_config_mkaddr(pbm, bus, devfn, where); if (!addr) return PCIBIOS_SUCCESSFUL; switch (size) { case 1: if (where < 8) { unsigned long align = (unsigned long) addr; align &= ~1; pci_config_read16((u16 *)align, &tmp16); if (where & 1) *value = tmp16 >> 8; else *value = tmp16 & 0xff; } else { pci_config_read8((u8 *)addr, &tmp8); *value = (u32) tmp8; } break; case 2: if (where < 8) { pci_config_read16((u16 *)addr, &tmp16); *value = (u32) tmp16; } else { pci_config_read8((u8 *)addr, &tmp8); *value = (u32) tmp8; pci_config_read8(((u8 *)addr) + 1, &tmp8); *value |= ((u32) tmp8) << 8; } break; case 4: tmp32 = 0xffffffff; sun4u_read_pci_cfg_host(pbm, bus, devfn, where, 2, &tmp32); *value = tmp32; tmp32 = 0xffffffff; sun4u_read_pci_cfg_host(pbm, bus, devfn, where + 2, 2, &tmp32); *value |= tmp32 << 16; break; } return PCIBIOS_SUCCESSFUL; } static int sun4u_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, int where, int size, u32 *value) { struct pci_pbm_info *pbm = bus_dev->sysdata; unsigned char bus = bus_dev->number; u32 *addr; u16 tmp16; u8 tmp8; switch (size) { case 1: *value = 0xff; break; case 2: *value = 0xffff; break; case 4: *value = 0xffffffff; break; } if (!bus_dev->number && !PCI_SLOT(devfn)) return sun4u_read_pci_cfg_host(pbm, bus, devfn, where, size, value); addr = sun4u_config_mkaddr(pbm, bus, devfn, where); if (!addr) return PCIBIOS_SUCCESSFUL; switch (size) { case 1: pci_config_read8((u8 *)addr, &tmp8); *value = (u32) tmp8; break; case 2: if (where & 0x01) { printk("pci_read_config_word: misaligned reg [%x]\n", where); return PCIBIOS_SUCCESSFUL; } pci_config_read16((u16 *)addr, &tmp16); *value = (u32) tmp16; break; case 4: if (where & 0x03) { printk("pci_read_config_dword: misaligned reg [%x]\n", where); return PCIBIOS_SUCCESSFUL; } pci_config_read32(addr, value); break; } return PCIBIOS_SUCCESSFUL; } static int sun4u_write_pci_cfg_host(struct pci_pbm_info *pbm, unsigned char bus, unsigned int devfn, int where, int size, u32 value) { u32 *addr; addr = sun4u_config_mkaddr(pbm, bus, devfn, where); if (!addr) return PCIBIOS_SUCCESSFUL; switch (size) { case 1: if (where < 8) { unsigned long align = (unsigned long) addr; u16 tmp16; align &= ~1; pci_config_read16((u16 *)align, &tmp16); if (where & 1) { tmp16 &= 0x00ff; tmp16 |= value << 8; } else { tmp16 &= 0xff00; tmp16 |= value; } pci_config_write16((u16 *)align, tmp16); } else pci_config_write8((u8 *)addr, value); break; case 2: if (where < 8) { pci_config_write16((u16 *)addr, value); } else { pci_config_write8((u8 *)addr, value & 0xff); pci_config_write8(((u8 *)addr) + 1, value >> 8); } break; case 4: sun4u_write_pci_cfg_host(pbm, bus, devfn, where, 2, value & 0xffff); sun4u_write_pci_cfg_host(pbm, bus, devfn, where + 2, 2, value >> 16); break; } return PCIBIOS_SUCCESSFUL; } static int sun4u_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, int where, int size, u32 value) { struct pci_pbm_info *pbm = bus_dev->sysdata; unsigned char bus = bus_dev->number; u32 *addr; if (!bus_dev->number && !PCI_SLOT(devfn)) return sun4u_write_pci_cfg_host(pbm, bus, devfn, where, size, value); addr = sun4u_config_mkaddr(pbm, bus, devfn, where); if (!addr) return PCIBIOS_SUCCESSFUL; switch (size) { case 1: pci_config_write8((u8 *)addr, value); break; case 2: if (where & 0x01) { printk("pci_write_config_word: misaligned reg [%x]\n", where); return PCIBIOS_SUCCESSFUL; } pci_config_write16((u16 *)addr, value); break; case 4: if (where & 0x03) { printk("pci_write_config_dword: misaligned reg [%x]\n", where); return PCIBIOS_SUCCESSFUL; } pci_config_write32(addr, value); } return PCIBIOS_SUCCESSFUL; } struct pci_ops sun4u_pci_ops = { .read = sun4u_read_pci_cfg, .write = sun4u_write_pci_cfg, }; static int sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, int where, int size, u32 *value) { struct pci_pbm_info *pbm = bus_dev->sysdata; u32 devhandle = pbm->devhandle; unsigned int bus = bus_dev->number; unsigned int device = PCI_SLOT(devfn); unsigned int func = PCI_FUNC(devfn); unsigned long ret; if (config_out_of_range(pbm, bus, devfn, where)) { ret = ~0UL; } else { ret = pci_sun4v_config_get(devhandle, HV_PCI_DEVICE_BUILD(bus, device, func), where, size); } switch (size) { case 1: *value = ret & 0xff; break; case 2: *value = ret & 0xffff; break; case 4: *value = ret & 0xffffffff; break; }; return PCIBIOS_SUCCESSFUL; } static int sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, int where, int size, u32 value) { struct pci_pbm_info *pbm = bus_dev->sysdata; u32 devhandle = pbm->devhandle; unsigned int bus = bus_dev->number; unsigned int device = PCI_SLOT(devfn); unsigned int func = PCI_FUNC(devfn); if (config_out_of_range(pbm, bus, devfn, where)) { /* Do nothing. */ } else { /* We don't check for hypervisor errors here, but perhaps * we should and influence our return value depending upon * what kind of error is thrown. */ pci_sun4v_config_put(devhandle, HV_PCI_DEVICE_BUILD(bus, device, func), where, size, value); } return PCIBIOS_SUCCESSFUL; } struct pci_ops sun4v_pci_ops = { .read = sun4v_read_pci_cfg, .write = sun4v_write_pci_cfg, }; void pci_get_pbm_props(struct pci_pbm_info *pbm) { const u32 *val = of_get_property(pbm->op->dev.of_node, "bus-range", NULL); pbm->pci_first_busno = val[0]; pbm->pci_last_busno = val[1]; val = of_get_property(pbm->op->dev.of_node, "ino-bitmap", NULL); if (val) { pbm->ino_bitmap = (((u64)val[1] << 32UL) | ((u64)val[0] << 0UL)); } } static void pci_register_legacy_regions(struct resource *io_res, struct resource *mem_res) { struct resource *p; /* VGA Video RAM. */ p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) return; p->name = "Video RAM area"; p->start = mem_res->start + 0xa0000UL; p->end = p->start + 0x1ffffUL; p->flags = IORESOURCE_BUSY; request_resource(mem_res, p); p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) return; p->name = "System ROM"; p->start = mem_res->start + 0xf0000UL; p->end = p->start + 0xffffUL; p->flags = IORESOURCE_BUSY; request_resource(mem_res, p); p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) return; p->name = "Video ROM"; p->start = mem_res->start + 0xc0000UL; p->end = p->start + 0x7fffUL; p->flags = IORESOURCE_BUSY; request_resource(mem_res, p); } static void pci_register_iommu_region(struct pci_pbm_info *pbm) { const u32 *vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", NULL); if (vdma) { struct resource *rp = kzalloc(sizeof(*rp), GFP_KERNEL); if (!rp) { pr_info("%s: Cannot allocate IOMMU resource.\n", pbm->name); return; } rp->name = "IOMMU"; rp->start = pbm->mem_space.start + (unsigned long) vdma[0]; rp->end = rp->start + (unsigned long) vdma[1] - 1UL; rp->flags = IORESOURCE_BUSY; if (request_resource(&pbm->mem_space, rp)) { pr_info("%s: Unable to request IOMMU resource.\n", pbm->name); kfree(rp); } } } void pci_determine_mem_io_space(struct pci_pbm_info *pbm) { const struct linux_prom_pci_ranges *pbm_ranges; int i, saw_mem, saw_io; int num_pbm_ranges; saw_mem = saw_io = 0; pbm_ranges = of_get_property(pbm->op->dev.of_node, "ranges", &i); if (!pbm_ranges) { prom_printf("PCI: Fatal error, missing PBM ranges property " " for %s\n", pbm->name); prom_halt(); } num_pbm_ranges = i / sizeof(*pbm_ranges); for (i = 0; i < num_pbm_ranges; i++) { const struct linux_prom_pci_ranges *pr = &pbm_ranges[i]; unsigned long a, size; u32 parent_phys_hi, parent_phys_lo; u32 size_hi, size_lo; int type; parent_phys_hi = pr->parent_phys_hi; parent_phys_lo = pr->parent_phys_lo; if (tlb_type == hypervisor) parent_phys_hi &= 0x0fffffff; size_hi = pr->size_hi; size_lo = pr->size_lo; type = (pr->child_phys_hi >> 24) & 0x3; a = (((unsigned long)parent_phys_hi << 32UL) | ((unsigned long)parent_phys_lo << 0UL)); size = (((unsigned long)size_hi << 32UL) | ((unsigned long)size_lo << 0UL)); switch (type) { case 0: /* PCI config space, 16MB */ pbm->config_space = a; break; case 1: /* 16-bit IO space, 16MB */ pbm->io_space.start = a; pbm->io_space.end = a + size - 1UL; pbm->io_space.flags = IORESOURCE_IO; saw_io = 1; break; case 2: /* 32-bit MEM space, 2GB */ pbm->mem_space.start = a; pbm->mem_space.end = a + size - 1UL; pbm->mem_space.flags = IORESOURCE_MEM; saw_mem = 1; break; case 3: /* XXX 64-bit MEM handling XXX */ default: break; }; } if (!saw_io || !saw_mem) { prom_printf("%s: Fatal error, missing %s PBM range.\n", pbm->name, (!saw_io ? "IO" : "MEM")); prom_halt(); } printk("%s: PCI IO[%llx] MEM[%llx]\n", pbm->name, pbm->io_space.start, pbm->mem_space.start); pbm->io_space.name = pbm->mem_space.name = pbm->name; request_resource(&ioport_resource, &pbm->io_space); request_resource(&iomem_resource, &pbm->mem_space); pci_register_legacy_regions(&pbm->io_space, &pbm->mem_space); pci_register_iommu_region(pbm); } /* Generic helper routines for PCI error reporting. */ void pci_scan_for_target_abort(struct pci_pbm_info *pbm, struct pci_bus *pbus) { struct pci_dev *pdev; struct pci_bus *bus; list_for_each_entry(pdev, &pbus->devices, bus_list) { u16 status, error_bits; pci_read_config_word(pdev, PCI_STATUS, &status); error_bits = (status & (PCI_STATUS_SIG_TARGET_ABORT | PCI_STATUS_REC_TARGET_ABORT)); if (error_bits) { pci_write_config_word(pdev, PCI_STATUS, error_bits); printk("%s: Device %s saw Target Abort [%016x]\n", pbm->name, pci_name(pdev), status); } } list_for_each_entry(bus, &pbus->children, node) pci_scan_for_target_abort(pbm, bus); } void pci_scan_for_master_abort(struct pci_pbm_info *pbm, struct pci_bus *pbus) { struct pci_dev *pdev; struct pci_bus *bus; list_for_each_entry(pdev, &pbus->devices, bus_list) { u16 status, error_bits; pci_read_config_word(pdev, PCI_STATUS, &status); error_bits = (status & (PCI_STATUS_REC_MASTER_ABORT)); if (error_bits) { pci_write_config_word(pdev, PCI_STATUS, error_bits); printk("%s: Device %s received Master Abort [%016x]\n", pbm->name, pci_name(pdev), status); } } list_for_each_entry(bus, &pbus->children, node) pci_scan_for_master_abort(pbm, bus); } void pci_scan_for_parity_error(struct pci_pbm_info *pbm, struct pci_bus *pbus) { struct pci_dev *pdev; struct pci_bus *bus; list_for_each_entry(pdev, &pbus->devices, bus_list) { u16 status, error_bits; pci_read_config_word(pdev, PCI_STATUS, &status); error_bits = (status & (PCI_STATUS_PARITY | PCI_STATUS_DETECTED_PARITY)); if (error_bits) { pci_write_config_word(pdev, PCI_STATUS, error_bits); printk("%s: Device %s saw Parity Error [%016x]\n", pbm->name, pci_name(pdev), status); } } list_for_each_entry(bus, &pbus->children, node) pci_scan_for_parity_error(pbm, bus); }
gpl-2.0
thederekjay/SGH-T989_GB_Kernel
tools/perf/builtin-report.c
743
14427
/* * builtin-report.c * * Builtin report command: Analyze the perf.data input file, * look up and read DSOs and symbol information and display * a histogram of results, along various sorting keys. */ #include "builtin.h" #include "util/util.h" #include "util/color.h" #include <linux/list.h> #include "util/cache.h" #include <linux/rbtree.h> #include "util/symbol.h" #include "util/callchain.h" #include "util/strlist.h" #include "util/values.h" #include "perf.h" #include "util/debug.h" #include "util/header.h" #include "util/session.h" #include "util/parse-options.h" #include "util/parse-events.h" #include "util/thread.h" #include "util/sort.h" #include "util/hist.h" static char const *input_name = "perf.data"; static bool force; static bool hide_unresolved; static bool dont_use_callchains; static bool show_threads; static struct perf_read_values show_threads_values; static const char default_pretty_printing_style[] = "normal"; static const char *pretty_printing_style = default_pretty_printing_style; static char callchain_default_opt[] = "fractal,0.5"; static struct hists *perf_session__hists_findnew(struct perf_session *self, u64 event_stream, u32 type, u64 config) { struct rb_node **p = &self->hists_tree.rb_node; struct rb_node *parent = NULL; struct hists *iter, *new; while (*p != NULL) { parent = *p; iter = rb_entry(parent, struct hists, rb_node); if (iter->config == config) return iter; if (config > iter->config) p = &(*p)->rb_right; else p = &(*p)->rb_left; } new = malloc(sizeof(struct hists)); if (new == NULL) return NULL; memset(new, 0, sizeof(struct hists)); new->event_stream = event_stream; new->config = config; new->type = type; rb_link_node(&new->rb_node, parent, p); rb_insert_color(&new->rb_node, &self->hists_tree); return new; } static int perf_session__add_hist_entry(struct perf_session *self, struct addr_location *al, struct sample_data *data) { struct map_symbol *syms = NULL; struct symbol *parent = NULL; int err = -ENOMEM; struct hist_entry *he; struct hists *hists; struct perf_event_attr *attr; if ((sort__has_parent || symbol_conf.use_callchain) && data->callchain) { syms = perf_session__resolve_callchain(self, al->thread, data->callchain, &parent); if (syms == NULL) return -ENOMEM; } attr = perf_header__find_attr(data->id, &self->header); if (attr) hists = perf_session__hists_findnew(self, data->id, attr->type, attr->config); else hists = perf_session__hists_findnew(self, data->id, 0, 0); if (hists == NULL) goto out_free_syms; he = __hists__add_entry(hists, al, parent, data->period); if (he == NULL) goto out_free_syms; err = 0; if (symbol_conf.use_callchain) { err = append_chain(he->callchain, data->callchain, syms, data->period); if (err) goto out_free_syms; } /* * Only in the newt browser we are doing integrated annotation, * so we don't allocated the extra space needed because the stdio * code will not use it. */ if (use_browser > 0) err = hist_entry__inc_addr_samples(he, al->addr); out_free_syms: free(syms); return err; } static int add_event_total(struct perf_session *session, struct sample_data *data, struct perf_event_attr *attr) { struct hists *hists; if (attr) hists = perf_session__hists_findnew(session, data->id, attr->type, attr->config); else hists = perf_session__hists_findnew(session, data->id, 0, 0); if (!hists) return -ENOMEM; hists->stats.total_period += data->period; /* * FIXME: add_event_total should be moved from here to * perf_session__process_event so that the proper hist is passed to * the event_op methods. */ hists__inc_nr_events(hists, PERF_RECORD_SAMPLE); session->hists.stats.total_period += data->period; return 0; } static int process_sample_event(event_t *event, struct perf_session *session) { struct sample_data data = { .period = 1, }; struct addr_location al; struct perf_event_attr *attr; event__parse_sample(event, session->sample_type, &data); dump_printf("(IP, %d): %d/%d: %#Lx period: %Ld\n", event->header.misc, data.pid, data.tid, data.ip, data.period); if (session->sample_type & PERF_SAMPLE_CALLCHAIN) { unsigned int i; dump_printf("... chain: nr:%Lu\n", data.callchain->nr); if (!ip_callchain__valid(data.callchain, event)) { pr_debug("call-chain problem with event, " "skipping it.\n"); return 0; } if (dump_trace) { for (i = 0; i < data.callchain->nr; i++) dump_printf("..... %2d: %016Lx\n", i, data.callchain->ips[i]); } } if (event__preprocess_sample(event, session, &al, NULL) < 0) { fprintf(stderr, "problem processing %d event, skipping it.\n", event->header.type); return -1; } if (al.filtered || (hide_unresolved && al.sym == NULL)) return 0; if (perf_session__add_hist_entry(session, &al, &data)) { pr_debug("problem incrementing symbol period, skipping event\n"); return -1; } attr = perf_header__find_attr(data.id, &session->header); if (add_event_total(session, &data, attr)) { pr_debug("problem adding event period\n"); return -1; } return 0; } static int process_read_event(event_t *event, struct perf_session *session __used) { struct perf_event_attr *attr; attr = perf_header__find_attr(event->read.id, &session->header); if (show_threads) { const char *name = attr ? __event_name(attr->type, attr->config) : "unknown"; perf_read_values_add_value(&show_threads_values, event->read.pid, event->read.tid, event->read.id, name, event->read.value); } dump_printf(": %d %d %s %Lu\n", event->read.pid, event->read.tid, attr ? __event_name(attr->type, attr->config) : "FAIL", event->read.value); return 0; } static int perf_session__setup_sample_type(struct perf_session *self) { if (!(self->sample_type & PERF_SAMPLE_CALLCHAIN)) { if (sort__has_parent) { fprintf(stderr, "selected --sort parent, but no" " callchain data. Did you call" " perf record without -g?\n"); return -EINVAL; } if (symbol_conf.use_callchain) { fprintf(stderr, "selected -g but no callchain data." " Did you call perf record without" " -g?\n"); return -1; } } else if (!dont_use_callchains && callchain_param.mode != CHAIN_NONE && !symbol_conf.use_callchain) { symbol_conf.use_callchain = true; if (register_callchain_param(&callchain_param) < 0) { fprintf(stderr, "Can't register callchain" " params\n"); return -EINVAL; } } return 0; } static struct perf_event_ops event_ops = { .sample = process_sample_event, .mmap = event__process_mmap, .comm = event__process_comm, .exit = event__process_task, .fork = event__process_task, .lost = event__process_lost, .read = process_read_event, .attr = event__process_attr, .event_type = event__process_event_type, .tracing_data = event__process_tracing_data, .build_id = event__process_build_id, }; extern volatile int session_done; static void sig_handler(int sig __used) { session_done = 1; } static size_t hists__fprintf_nr_sample_events(struct hists *self, const char *evname, FILE *fp) { size_t ret; char unit; unsigned long nr_events = self->stats.nr_events[PERF_RECORD_SAMPLE]; nr_events = convert_unit(nr_events, &unit); ret = fprintf(fp, "# Events: %lu%c", nr_events, unit); if (evname != NULL) ret += fprintf(fp, " %s", evname); return ret + fprintf(fp, "\n#\n"); } static int hists__tty_browse_tree(struct rb_root *tree, const char *help) { struct rb_node *next = rb_first(tree); while (next) { struct hists *hists = rb_entry(next, struct hists, rb_node); const char *evname = NULL; if (rb_first(&hists->entries) != rb_last(&hists->entries)) evname = __event_name(hists->type, hists->config); hists__fprintf_nr_sample_events(hists, evname, stdout); hists__fprintf(hists, NULL, false, stdout); fprintf(stdout, "\n\n"); next = rb_next(&hists->rb_node); } if (sort_order == default_sort_order && parent_pattern == default_parent_pattern) { fprintf(stdout, "#\n# (%s)\n#\n", help); if (show_threads) { bool style = !strcmp(pretty_printing_style, "raw"); perf_read_values_display(stdout, &show_threads_values, style); perf_read_values_destroy(&show_threads_values); } } return 0; } static int __cmd_report(void) { int ret = -EINVAL; struct perf_session *session; struct rb_node *next; const char *help = "For a higher level overview, try: perf report --sort comm,dso"; signal(SIGINT, sig_handler); session = perf_session__new(input_name, O_RDONLY, force, false); if (session == NULL) return -ENOMEM; if (show_threads) perf_read_values_init(&show_threads_values); ret = perf_session__setup_sample_type(session); if (ret) goto out_delete; ret = perf_session__process_events(session, &event_ops); if (ret) goto out_delete; if (dump_trace) { perf_session__fprintf_nr_events(session, stdout); goto out_delete; } if (verbose > 3) perf_session__fprintf(session, stdout); if (verbose > 2) perf_session__fprintf_dsos(session, stdout); next = rb_first(&session->hists_tree); while (next) { struct hists *hists; hists = rb_entry(next, struct hists, rb_node); hists__collapse_resort(hists); hists__output_resort(hists); next = rb_next(&hists->rb_node); } if (use_browser > 0) hists__tui_browse_tree(&session->hists_tree, help); else hists__tty_browse_tree(&session->hists_tree, help); out_delete: perf_session__delete(session); return ret; } static int parse_callchain_opt(const struct option *opt __used, const char *arg, int unset) { char *tok, *tok2; char *endptr; /* * --no-call-graph */ if (unset) { dont_use_callchains = true; return 0; } symbol_conf.use_callchain = true; if (!arg) return 0; tok = strtok((char *)arg, ","); if (!tok) return -1; /* get the output mode */ if (!strncmp(tok, "graph", strlen(arg))) callchain_param.mode = CHAIN_GRAPH_ABS; else if (!strncmp(tok, "flat", strlen(arg))) callchain_param.mode = CHAIN_FLAT; else if (!strncmp(tok, "fractal", strlen(arg))) callchain_param.mode = CHAIN_GRAPH_REL; else if (!strncmp(tok, "none", strlen(arg))) { callchain_param.mode = CHAIN_NONE; symbol_conf.use_callchain = false; return 0; } else return -1; /* get the min percentage */ tok = strtok(NULL, ","); if (!tok) goto setup; tok2 = strtok(NULL, ","); callchain_param.min_percent = strtod(tok, &endptr); if (tok == endptr) return -1; if (tok2) callchain_param.print_limit = strtod(tok2, &endptr); setup: if (register_callchain_param(&callchain_param) < 0) { fprintf(stderr, "Can't register callchain params\n"); return -1; } return 0; } static const char * const report_usage[] = { "perf report [<options>] <command>", NULL }; static const struct option options[] = { OPT_STRING('i', "input", &input_name, "file", "input file name"), OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"), OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"), OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, "file", "vmlinux pathname"), OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules, "load module symbols - WARNING: use only with -k and LIVE kernel"), OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples, "Show a column with the number of samples"), OPT_BOOLEAN('T', "threads", &show_threads, "Show per-thread event counters"), OPT_STRING(0, "pretty", &pretty_printing_style, "key", "pretty printing style key: normal raw"), OPT_STRING('s', "sort", &sort_order, "key[,key2...]", "sort by key(s): pid, comm, dso, symbol, parent"), OPT_BOOLEAN('P', "full-paths", &symbol_conf.full_paths, "Don't shorten the pathnames taking into account the cwd"), OPT_BOOLEAN(0, "showcpuutilization", &symbol_conf.show_cpu_utilization, "Show sample percentage for different cpu modes"), OPT_STRING('p', "parent", &parent_pattern, "regex", "regex filter to identify parent, see: '--sort parent'"), OPT_BOOLEAN('x', "exclude-other", &symbol_conf.exclude_other, "Only display entries with parent-match"), OPT_CALLBACK_DEFAULT('g', "call-graph", NULL, "output_type,min_percent", "Display callchains using output_type (graph, flat, fractal, or none) and min percent threshold. " "Default: fractal,0.5", &parse_callchain_opt, callchain_default_opt), OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]", "only consider symbols in these dsos"), OPT_STRING('C', "comms", &symbol_conf.comm_list_str, "comm[,comm...]", "only consider symbols in these comms"), OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]", "only consider these symbols"), OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str, "width[,width...]", "don't try to adjust column width, use these fixed values"), OPT_STRING('t', "field-separator", &symbol_conf.field_sep, "separator", "separator for columns, no spaces will be added between " "columns '.' is reserved."), OPT_BOOLEAN('U', "hide-unresolved", &hide_unresolved, "Only display entries resolved to a symbol"), OPT_END() }; int cmd_report(int argc, const char **argv, const char *prefix __used) { argc = parse_options(argc, argv, options, report_usage, 0); if (strcmp(input_name, "-") != 0) setup_browser(); /* * Only in the newt browser we are doing integrated annotation, * so don't allocate extra space that won't be used in the stdio * implementation. */ if (use_browser > 0) symbol_conf.priv_size = sizeof(struct sym_priv); if (symbol__init() < 0) return -1; setup_sorting(report_usage, options); if (parent_pattern != default_parent_pattern) { if (sort_dimension__add("parent") < 0) return -1; sort_parent.elide = 1; } else symbol_conf.exclude_other = false; /* * Any (unrecognized) arguments left? */ if (argc) usage_with_options(report_usage, options); sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list, "dso", stdout); sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list, "comm", stdout); sort_entry__setup_elide(&sort_sym, symbol_conf.sym_list, "symbol", stdout); return __cmd_report(); }
gpl-2.0
mephistophilis/samsung_nowplus_kernel
drivers/infiniband/hw/qib/qib_init.c
743
42607
/* * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. * All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/vmalloc.h> #include <linux/delay.h> #include <linux/idr.h> #include "qib.h" #include "qib_common.h" /* * min buffers we want to have per context, after driver */ #define QIB_MIN_USER_CTXT_BUFCNT 7 #define QLOGIC_IB_R_SOFTWARE_MASK 0xFF #define QLOGIC_IB_R_SOFTWARE_SHIFT 24 #define QLOGIC_IB_R_EMULATOR_MASK (1ULL<<62) /* * Number of ctxts we are configured to use (to allow for more pio * buffers per ctxt, etc.) Zero means use chip value. */ ushort qib_cfgctxts; module_param_named(cfgctxts, qib_cfgctxts, ushort, S_IRUGO); MODULE_PARM_DESC(cfgctxts, "Set max number of contexts to use"); /* * If set, do not write to any regs if avoidable, hack to allow * check for deranged default register values. */ ushort qib_mini_init; module_param_named(mini_init, qib_mini_init, ushort, S_IRUGO); MODULE_PARM_DESC(mini_init, "If set, do minimal diag init"); unsigned qib_n_krcv_queues; module_param_named(krcvqs, qib_n_krcv_queues, uint, S_IRUGO); MODULE_PARM_DESC(krcvqs, "number of kernel receive queues per IB port"); /* * qib_wc_pat parameter: * 0 is WC via MTRR * 1 is WC via PAT * If PAT initialization fails, code reverts back to MTRR */ unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */ module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO); MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism"); struct workqueue_struct *qib_wq; struct workqueue_struct *qib_cq_wq; static void verify_interrupt(unsigned long); static struct idr qib_unit_table; u32 qib_cpulist_count; unsigned long *qib_cpulist; /* set number of contexts we'll actually use */ void qib_set_ctxtcnt(struct qib_devdata *dd) { if (!qib_cfgctxts) dd->cfgctxts = dd->ctxtcnt; else if (qib_cfgctxts < dd->num_pports) dd->cfgctxts = dd->ctxtcnt; else if (qib_cfgctxts <= dd->ctxtcnt) dd->cfgctxts = qib_cfgctxts; else dd->cfgctxts = dd->ctxtcnt; } /* * Common code for creating the receive context array. */ int qib_create_ctxts(struct qib_devdata *dd) { unsigned i; int ret; /* * Allocate full ctxtcnt array, rather than just cfgctxts, because * cleanup iterates across all possible ctxts. */ dd->rcd = kzalloc(sizeof(*dd->rcd) * dd->ctxtcnt, GFP_KERNEL); if (!dd->rcd) { qib_dev_err(dd, "Unable to allocate ctxtdata array, " "failing\n"); ret = -ENOMEM; goto done; } /* create (one or more) kctxt */ for (i = 0; i < dd->first_user_ctxt; ++i) { struct qib_pportdata *ppd; struct qib_ctxtdata *rcd; if (dd->skip_kctxt_mask & (1 << i)) continue; ppd = dd->pport + (i % dd->num_pports); rcd = qib_create_ctxtdata(ppd, i); if (!rcd) { qib_dev_err(dd, "Unable to allocate ctxtdata" " for Kernel ctxt, failing\n"); ret = -ENOMEM; goto done; } rcd->pkeys[0] = QIB_DEFAULT_P_KEY; rcd->seq_cnt = 1; } ret = 0; done: return ret; } /* * Common code for user and kernel context setup. */ struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt) { struct qib_devdata *dd = ppd->dd; struct qib_ctxtdata *rcd; rcd = kzalloc(sizeof(*rcd), GFP_KERNEL); if (rcd) { INIT_LIST_HEAD(&rcd->qp_wait_list); rcd->ppd = ppd; rcd->dd = dd; rcd->cnt = 1; rcd->ctxt = ctxt; dd->rcd[ctxt] = rcd; dd->f_init_ctxt(rcd); /* * To avoid wasting a lot of memory, we allocate 32KB chunks * of physically contiguous memory, advance through it until * used up and then allocate more. Of course, we need * memory to store those extra pointers, now. 32KB seems to * be the most that is "safe" under memory pressure * (creating large files and then copying them over * NFS while doing lots of MPI jobs). The OOM killer can * get invoked, even though we say we can sleep and this can * cause significant system problems.... */ rcd->rcvegrbuf_size = 0x8000; rcd->rcvegrbufs_perchunk = rcd->rcvegrbuf_size / dd->rcvegrbufsize; rcd->rcvegrbuf_chunks = (rcd->rcvegrcnt + rcd->rcvegrbufs_perchunk - 1) / rcd->rcvegrbufs_perchunk; } return rcd; } /* * Common code for initializing the physical port structure. */ void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd, u8 hw_pidx, u8 port) { ppd->dd = dd; ppd->hw_pidx = hw_pidx; ppd->port = port; /* IB port number, not index */ spin_lock_init(&ppd->sdma_lock); spin_lock_init(&ppd->lflags_lock); init_waitqueue_head(&ppd->state_wait); init_timer(&ppd->symerr_clear_timer); ppd->symerr_clear_timer.function = qib_clear_symerror_on_linkup; ppd->symerr_clear_timer.data = (unsigned long)ppd; } static int init_pioavailregs(struct qib_devdata *dd) { int ret, pidx; u64 *status_page; dd->pioavailregs_dma = dma_alloc_coherent( &dd->pcidev->dev, PAGE_SIZE, &dd->pioavailregs_phys, GFP_KERNEL); if (!dd->pioavailregs_dma) { qib_dev_err(dd, "failed to allocate PIOavail reg area " "in memory\n"); ret = -ENOMEM; goto done; } /* * We really want L2 cache aligned, but for current CPUs of * interest, they are the same. */ status_page = (u64 *) ((char *) dd->pioavailregs_dma + ((2 * L1_CACHE_BYTES + dd->pioavregs * sizeof(u64)) & ~L1_CACHE_BYTES)); /* device status comes first, for backwards compatibility */ dd->devstatusp = status_page; *status_page++ = 0; for (pidx = 0; pidx < dd->num_pports; ++pidx) { dd->pport[pidx].statusp = status_page; *status_page++ = 0; } /* * Setup buffer to hold freeze and other messages, accessible to * apps, following statusp. This is per-unit, not per port. */ dd->freezemsg = (char *) status_page; *dd->freezemsg = 0; /* length of msg buffer is "whatever is left" */ ret = (char *) status_page - (char *) dd->pioavailregs_dma; dd->freezelen = PAGE_SIZE - ret; ret = 0; done: return ret; } /** * init_shadow_tids - allocate the shadow TID array * @dd: the qlogic_ib device * * allocate the shadow TID array, so we can qib_munlock previous * entries. It may make more sense to move the pageshadow to the * ctxt data structure, so we only allocate memory for ctxts actually * in use, since we at 8k per ctxt, now. * We don't want failures here to prevent use of the driver/chip, * so no return value. */ static void init_shadow_tids(struct qib_devdata *dd) { struct page **pages; dma_addr_t *addrs; pages = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *)); if (!pages) { qib_dev_err(dd, "failed to allocate shadow page * " "array, no expected sends!\n"); goto bail; } addrs = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t)); if (!addrs) { qib_dev_err(dd, "failed to allocate shadow dma handle " "array, no expected sends!\n"); goto bail_free; } memset(pages, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *)); memset(addrs, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t)); dd->pageshadow = pages; dd->physshadow = addrs; return; bail_free: vfree(pages); bail: dd->pageshadow = NULL; } /* * Do initialization for device that is only needed on * first detect, not on resets. */ static int loadtime_init(struct qib_devdata *dd) { int ret = 0; if (((dd->revision >> QLOGIC_IB_R_SOFTWARE_SHIFT) & QLOGIC_IB_R_SOFTWARE_MASK) != QIB_CHIP_SWVERSION) { qib_dev_err(dd, "Driver only handles version %d, " "chip swversion is %d (%llx), failng\n", QIB_CHIP_SWVERSION, (int)(dd->revision >> QLOGIC_IB_R_SOFTWARE_SHIFT) & QLOGIC_IB_R_SOFTWARE_MASK, (unsigned long long) dd->revision); ret = -ENOSYS; goto done; } if (dd->revision & QLOGIC_IB_R_EMULATOR_MASK) qib_devinfo(dd->pcidev, "%s", dd->boardversion); spin_lock_init(&dd->pioavail_lock); spin_lock_init(&dd->sendctrl_lock); spin_lock_init(&dd->uctxt_lock); spin_lock_init(&dd->qib_diag_trans_lock); spin_lock_init(&dd->eep_st_lock); mutex_init(&dd->eep_lock); if (qib_mini_init) goto done; ret = init_pioavailregs(dd); init_shadow_tids(dd); qib_get_eeprom_info(dd); /* setup time (don't start yet) to verify we got interrupt */ init_timer(&dd->intrchk_timer); dd->intrchk_timer.function = verify_interrupt; dd->intrchk_timer.data = (unsigned long) dd; done: return ret; } /** * init_after_reset - re-initialize after a reset * @dd: the qlogic_ib device * * sanity check at least some of the values after reset, and * ensure no receive or transmit (explictly, in case reset * failed */ static int init_after_reset(struct qib_devdata *dd) { int i; /* * Ensure chip does no sends or receives, tail updates, or * pioavail updates while we re-initialize. This is mostly * for the driver data structures, not chip registers. */ for (i = 0; i < dd->num_pports; ++i) { /* * ctxt == -1 means "all contexts". Only really safe for * _dis_abling things, as here. */ dd->f_rcvctrl(dd->pport + i, QIB_RCVCTRL_CTXT_DIS | QIB_RCVCTRL_INTRAVAIL_DIS | QIB_RCVCTRL_TAILUPD_DIS, -1); /* Redundant across ports for some, but no big deal. */ dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_DIS | QIB_SENDCTRL_AVAIL_DIS); } return 0; } static void enable_chip(struct qib_devdata *dd) { u64 rcvmask; int i; /* * Enable PIO send, and update of PIOavail regs to memory. */ for (i = 0; i < dd->num_pports; ++i) dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_ENB | QIB_SENDCTRL_AVAIL_ENB); /* * Enable kernel ctxts' receive and receive interrupt. * Other ctxts done as user opens and inits them. */ rcvmask = QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_INTRAVAIL_ENB; rcvmask |= (dd->flags & QIB_NODMA_RTAIL) ? QIB_RCVCTRL_TAILUPD_DIS : QIB_RCVCTRL_TAILUPD_ENB; for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) { struct qib_ctxtdata *rcd = dd->rcd[i]; if (rcd) dd->f_rcvctrl(rcd->ppd, rcvmask, i); } } static void verify_interrupt(unsigned long opaque) { struct qib_devdata *dd = (struct qib_devdata *) opaque; if (!dd) return; /* being torn down */ /* * If we don't have a lid or any interrupts, let the user know and * don't bother checking again. */ if (dd->int_counter == 0) { if (!dd->f_intr_fallback(dd)) dev_err(&dd->pcidev->dev, "No interrupts detected, " "not usable.\n"); else /* re-arm the timer to see if fallback works */ mod_timer(&dd->intrchk_timer, jiffies + HZ/2); } } static void init_piobuf_state(struct qib_devdata *dd) { int i, pidx; u32 uctxts; /* * Ensure all buffers are free, and fifos empty. Buffers * are common, so only do once for port 0. * * After enable and qib_chg_pioavailkernel so we can safely * enable pioavail updates and PIOENABLE. After this, packets * are ready and able to go out. */ dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_ALL); for (pidx = 0; pidx < dd->num_pports; ++pidx) dd->f_sendctrl(dd->pport + pidx, QIB_SENDCTRL_FLUSH); /* * If not all sendbufs are used, add the one to each of the lower * numbered contexts. pbufsctxt and lastctxt_piobuf are * calculated in chip-specific code because it may cause some * chip-specific adjustments to be made. */ uctxts = dd->cfgctxts - dd->first_user_ctxt; dd->ctxts_extrabuf = dd->pbufsctxt ? dd->lastctxt_piobuf - (dd->pbufsctxt * uctxts) : 0; /* * Set up the shadow copies of the piobufavail registers, * which we compare against the chip registers for now, and * the in memory DMA'ed copies of the registers. * By now pioavail updates to memory should have occurred, so * copy them into our working/shadow registers; this is in * case something went wrong with abort, but mostly to get the * initial values of the generation bit correct. */ for (i = 0; i < dd->pioavregs; i++) { __le64 tmp; tmp = dd->pioavailregs_dma[i]; /* * Don't need to worry about pioavailkernel here * because we will call qib_chg_pioavailkernel() later * in initialization, to busy out buffers as needed. */ dd->pioavailshadow[i] = le64_to_cpu(tmp); } while (i < ARRAY_SIZE(dd->pioavailshadow)) dd->pioavailshadow[i++] = 0; /* for debugging sanity */ /* after pioavailshadow is setup */ qib_chg_pioavailkernel(dd, 0, dd->piobcnt2k + dd->piobcnt4k, TXCHK_CHG_TYPE_KERN, NULL); dd->f_initvl15_bufs(dd); } /** * qib_init - do the actual initialization sequence on the chip * @dd: the qlogic_ib device * @reinit: reinitializing, so don't allocate new memory * * Do the actual initialization sequence on the chip. This is done * both from the init routine called from the PCI infrastructure, and * when we reset the chip, or detect that it was reset internally, * or it's administratively re-enabled. * * Memory allocation here and in called routines is only done in * the first case (reinit == 0). We have to be careful, because even * without memory allocation, we need to re-write all the chip registers * TIDs, etc. after the reset or enable has completed. */ int qib_init(struct qib_devdata *dd, int reinit) { int ret = 0, pidx, lastfail = 0; u32 portok = 0; unsigned i; struct qib_ctxtdata *rcd; struct qib_pportdata *ppd; unsigned long flags; /* Set linkstate to unknown, so we can watch for a transition. */ for (pidx = 0; pidx < dd->num_pports; ++pidx) { ppd = dd->pport + pidx; spin_lock_irqsave(&ppd->lflags_lock, flags); ppd->lflags &= ~(QIBL_LINKACTIVE | QIBL_LINKARMED | QIBL_LINKDOWN | QIBL_LINKINIT | QIBL_LINKV); spin_unlock_irqrestore(&ppd->lflags_lock, flags); } if (reinit) ret = init_after_reset(dd); else ret = loadtime_init(dd); if (ret) goto done; /* Bypass most chip-init, to get to device creation */ if (qib_mini_init) return 0; ret = dd->f_late_initreg(dd); if (ret) goto done; /* dd->rcd can be NULL if early init failed */ for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) { /* * Set up the (kernel) rcvhdr queue and egr TIDs. If doing * re-init, the simplest way to handle this is to free * existing, and re-allocate. * Need to re-create rest of ctxt 0 ctxtdata as well. */ rcd = dd->rcd[i]; if (!rcd) continue; lastfail = qib_create_rcvhdrq(dd, rcd); if (!lastfail) lastfail = qib_setup_eagerbufs(rcd); if (lastfail) { qib_dev_err(dd, "failed to allocate kernel ctxt's " "rcvhdrq and/or egr bufs\n"); continue; } } for (pidx = 0; pidx < dd->num_pports; ++pidx) { int mtu; if (lastfail) ret = lastfail; ppd = dd->pport + pidx; mtu = ib_mtu_enum_to_int(qib_ibmtu); if (mtu == -1) { mtu = QIB_DEFAULT_MTU; qib_ibmtu = 0; /* don't leave invalid value */ } /* set max we can ever have for this driver load */ ppd->init_ibmaxlen = min(mtu > 2048 ? dd->piosize4k : dd->piosize2k, dd->rcvegrbufsize + (dd->rcvhdrentsize << 2)); /* * Have to initialize ibmaxlen, but this will normally * change immediately in qib_set_mtu(). */ ppd->ibmaxlen = ppd->init_ibmaxlen; qib_set_mtu(ppd, mtu); spin_lock_irqsave(&ppd->lflags_lock, flags); ppd->lflags |= QIBL_IB_LINK_DISABLED; spin_unlock_irqrestore(&ppd->lflags_lock, flags); lastfail = dd->f_bringup_serdes(ppd); if (lastfail) { qib_devinfo(dd->pcidev, "Failed to bringup IB port %u\n", ppd->port); lastfail = -ENETDOWN; continue; } /* let link come up, and enable IBC */ spin_lock_irqsave(&ppd->lflags_lock, flags); ppd->lflags &= ~QIBL_IB_LINK_DISABLED; spin_unlock_irqrestore(&ppd->lflags_lock, flags); portok++; } if (!portok) { /* none of the ports initialized */ if (!ret && lastfail) ret = lastfail; else if (!ret) ret = -ENETDOWN; /* but continue on, so we can debug cause */ } enable_chip(dd); init_piobuf_state(dd); done: if (!ret) { /* chip is OK for user apps; mark it as initialized */ for (pidx = 0; pidx < dd->num_pports; ++pidx) { ppd = dd->pport + pidx; /* * Set status even if port serdes is not initialized * so that diags will work. */ *ppd->statusp |= QIB_STATUS_CHIP_PRESENT | QIB_STATUS_INITTED; if (!ppd->link_speed_enabled) continue; if (dd->flags & QIB_HAS_SEND_DMA) ret = qib_setup_sdma(ppd); init_timer(&ppd->hol_timer); ppd->hol_timer.function = qib_hol_event; ppd->hol_timer.data = (unsigned long)ppd; ppd->hol_state = QIB_HOL_UP; } /* now we can enable all interrupts from the chip */ dd->f_set_intr_state(dd, 1); /* * Setup to verify we get an interrupt, and fallback * to an alternate if necessary and possible. */ mod_timer(&dd->intrchk_timer, jiffies + HZ/2); /* start stats retrieval timer */ mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER); } /* if ret is non-zero, we probably should do some cleanup here... */ return ret; } /* * These next two routines are placeholders in case we don't have per-arch * code for controlling write combining. If explicit control of write * combining is not available, performance will probably be awful. */ int __attribute__((weak)) qib_enable_wc(struct qib_devdata *dd) { return -EOPNOTSUPP; } void __attribute__((weak)) qib_disable_wc(struct qib_devdata *dd) { } static inline struct qib_devdata *__qib_lookup(int unit) { return idr_find(&qib_unit_table, unit); } struct qib_devdata *qib_lookup(int unit) { struct qib_devdata *dd; unsigned long flags; spin_lock_irqsave(&qib_devs_lock, flags); dd = __qib_lookup(unit); spin_unlock_irqrestore(&qib_devs_lock, flags); return dd; } /* * Stop the timers during unit shutdown, or after an error late * in initialization. */ static void qib_stop_timers(struct qib_devdata *dd) { struct qib_pportdata *ppd; int pidx; if (dd->stats_timer.data) { del_timer_sync(&dd->stats_timer); dd->stats_timer.data = 0; } if (dd->intrchk_timer.data) { del_timer_sync(&dd->intrchk_timer); dd->intrchk_timer.data = 0; } for (pidx = 0; pidx < dd->num_pports; ++pidx) { ppd = dd->pport + pidx; if (ppd->hol_timer.data) del_timer_sync(&ppd->hol_timer); if (ppd->led_override_timer.data) { del_timer_sync(&ppd->led_override_timer); atomic_set(&ppd->led_override_timer_active, 0); } if (ppd->symerr_clear_timer.data) del_timer_sync(&ppd->symerr_clear_timer); } } /** * qib_shutdown_device - shut down a device * @dd: the qlogic_ib device * * This is called to make the device quiet when we are about to * unload the driver, and also when the device is administratively * disabled. It does not free any data structures. * Everything it does has to be setup again by qib_init(dd, 1) */ static void qib_shutdown_device(struct qib_devdata *dd) { struct qib_pportdata *ppd; unsigned pidx; for (pidx = 0; pidx < dd->num_pports; ++pidx) { ppd = dd->pport + pidx; spin_lock_irq(&ppd->lflags_lock); ppd->lflags &= ~(QIBL_LINKDOWN | QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE | QIBL_LINKV); spin_unlock_irq(&ppd->lflags_lock); *ppd->statusp &= ~(QIB_STATUS_IB_CONF | QIB_STATUS_IB_READY); } dd->flags &= ~QIB_INITTED; /* mask interrupts, but not errors */ dd->f_set_intr_state(dd, 0); for (pidx = 0; pidx < dd->num_pports; ++pidx) { ppd = dd->pport + pidx; dd->f_rcvctrl(ppd, QIB_RCVCTRL_TAILUPD_DIS | QIB_RCVCTRL_CTXT_DIS | QIB_RCVCTRL_INTRAVAIL_DIS | QIB_RCVCTRL_PKEY_ENB, -1); /* * Gracefully stop all sends allowing any in progress to * trickle out first. */ dd->f_sendctrl(ppd, QIB_SENDCTRL_CLEAR); } /* * Enough for anything that's going to trickle out to have actually * done so. */ udelay(20); for (pidx = 0; pidx < dd->num_pports; ++pidx) { ppd = dd->pport + pidx; dd->f_setextled(ppd, 0); /* make sure LEDs are off */ if (dd->flags & QIB_HAS_SEND_DMA) qib_teardown_sdma(ppd); dd->f_sendctrl(ppd, QIB_SENDCTRL_AVAIL_DIS | QIB_SENDCTRL_SEND_DIS); /* * Clear SerdesEnable. * We can't count on interrupts since we are stopping. */ dd->f_quiet_serdes(ppd); } qib_update_eeprom_log(dd); } /** * qib_free_ctxtdata - free a context's allocated data * @dd: the qlogic_ib device * @rcd: the ctxtdata structure * * free up any allocated data for a context * This should not touch anything that would affect a simultaneous * re-allocation of context data, because it is called after qib_mutex * is released (and can be called from reinit as well). * It should never change any chip state, or global driver state. */ void qib_free_ctxtdata(struct qib_devdata *dd, struct qib_ctxtdata *rcd) { if (!rcd) return; if (rcd->rcvhdrq) { dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size, rcd->rcvhdrq, rcd->rcvhdrq_phys); rcd->rcvhdrq = NULL; if (rcd->rcvhdrtail_kvaddr) { dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, rcd->rcvhdrtail_kvaddr, rcd->rcvhdrqtailaddr_phys); rcd->rcvhdrtail_kvaddr = NULL; } } if (rcd->rcvegrbuf) { unsigned e; for (e = 0; e < rcd->rcvegrbuf_chunks; e++) { void *base = rcd->rcvegrbuf[e]; size_t size = rcd->rcvegrbuf_size; dma_free_coherent(&dd->pcidev->dev, size, base, rcd->rcvegrbuf_phys[e]); } kfree(rcd->rcvegrbuf); rcd->rcvegrbuf = NULL; kfree(rcd->rcvegrbuf_phys); rcd->rcvegrbuf_phys = NULL; rcd->rcvegrbuf_chunks = 0; } kfree(rcd->tid_pg_list); vfree(rcd->user_event_mask); vfree(rcd->subctxt_uregbase); vfree(rcd->subctxt_rcvegrbuf); vfree(rcd->subctxt_rcvhdr_base); kfree(rcd); } /* * Perform a PIO buffer bandwidth write test, to verify proper system * configuration. Even when all the setup calls work, occasionally * BIOS or other issues can prevent write combining from working, or * can cause other bandwidth problems to the chip. * * This test simply writes the same buffer over and over again, and * measures close to the peak bandwidth to the chip (not testing * data bandwidth to the wire). On chips that use an address-based * trigger to send packets to the wire, this is easy. On chips that * use a count to trigger, we want to make sure that the packet doesn't * go out on the wire, or trigger flow control checks. */ static void qib_verify_pioperf(struct qib_devdata *dd) { u32 pbnum, cnt, lcnt; u32 __iomem *piobuf; u32 *addr; u64 msecs, emsecs; piobuf = dd->f_getsendbuf(dd->pport, 0ULL, &pbnum); if (!piobuf) { qib_devinfo(dd->pcidev, "No PIObufs for checking perf, skipping\n"); return; } /* * Enough to give us a reasonable test, less than piobuf size, and * likely multiple of store buffer length. */ cnt = 1024; addr = vmalloc(cnt); if (!addr) { qib_devinfo(dd->pcidev, "Couldn't get memory for checking PIO perf," " skipping\n"); goto done; } preempt_disable(); /* we want reasonably accurate elapsed time */ msecs = 1 + jiffies_to_msecs(jiffies); for (lcnt = 0; lcnt < 10000U; lcnt++) { /* wait until we cross msec boundary */ if (jiffies_to_msecs(jiffies) >= msecs) break; udelay(1); } dd->f_set_armlaunch(dd, 0); /* * length 0, no dwords actually sent */ writeq(0, piobuf); qib_flush_wc(); /* * This is only roughly accurate, since even with preempt we * still take interrupts that could take a while. Running for * >= 5 msec seems to get us "close enough" to accurate values. */ msecs = jiffies_to_msecs(jiffies); for (emsecs = lcnt = 0; emsecs <= 5UL; lcnt++) { qib_pio_copy(piobuf + 64, addr, cnt >> 2); emsecs = jiffies_to_msecs(jiffies) - msecs; } /* 1 GiB/sec, slightly over IB SDR line rate */ if (lcnt < (emsecs * 1024U)) qib_dev_err(dd, "Performance problem: bandwidth to PIO buffers is " "only %u MiB/sec\n", lcnt / (u32) emsecs); preempt_enable(); vfree(addr); done: /* disarm piobuf, so it's available again */ dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbnum)); qib_sendbuf_done(dd, pbnum); dd->f_set_armlaunch(dd, 1); } void qib_free_devdata(struct qib_devdata *dd) { unsigned long flags; spin_lock_irqsave(&qib_devs_lock, flags); idr_remove(&qib_unit_table, dd->unit); list_del(&dd->list); spin_unlock_irqrestore(&qib_devs_lock, flags); ib_dealloc_device(&dd->verbs_dev.ibdev); } /* * Allocate our primary per-unit data structure. Must be done via verbs * allocator, because the verbs cleanup process both does cleanup and * free of the data structure. * "extra" is for chip-specific data. * * Use the idr mechanism to get a unit number for this unit. */ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra) { unsigned long flags; struct qib_devdata *dd; int ret; if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) { dd = ERR_PTR(-ENOMEM); goto bail; } dd = (struct qib_devdata *) ib_alloc_device(sizeof(*dd) + extra); if (!dd) { dd = ERR_PTR(-ENOMEM); goto bail; } spin_lock_irqsave(&qib_devs_lock, flags); ret = idr_get_new(&qib_unit_table, dd, &dd->unit); if (ret >= 0) list_add(&dd->list, &qib_dev_list); spin_unlock_irqrestore(&qib_devs_lock, flags); if (ret < 0) { qib_early_err(&pdev->dev, "Could not allocate unit ID: error %d\n", -ret); ib_dealloc_device(&dd->verbs_dev.ibdev); dd = ERR_PTR(ret); goto bail; } if (!qib_cpulist_count) { u32 count = num_online_cpus(); qib_cpulist = kzalloc(BITS_TO_LONGS(count) * sizeof(long), GFP_KERNEL); if (qib_cpulist) qib_cpulist_count = count; else qib_early_err(&pdev->dev, "Could not alloc cpulist " "info, cpu affinity might be wrong\n"); } bail: return dd; } /* * Called from freeze mode handlers, and from PCI error * reporting code. Should be paranoid about state of * system and data structures. */ void qib_disable_after_error(struct qib_devdata *dd) { if (dd->flags & QIB_INITTED) { u32 pidx; dd->flags &= ~QIB_INITTED; if (dd->pport) for (pidx = 0; pidx < dd->num_pports; ++pidx) { struct qib_pportdata *ppd; ppd = dd->pport + pidx; if (dd->flags & QIB_PRESENT) { qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE); dd->f_setextled(ppd, 0); } *ppd->statusp &= ~QIB_STATUS_IB_READY; } } /* * Mark as having had an error for driver, and also * for /sys and status word mapped to user programs. * This marks unit as not usable, until reset. */ if (dd->devstatusp) *dd->devstatusp |= QIB_STATUS_HWERROR; } static void __devexit qib_remove_one(struct pci_dev *); static int __devinit qib_init_one(struct pci_dev *, const struct pci_device_id *); #define DRIVER_LOAD_MSG "QLogic " QIB_DRV_NAME " loaded: " #define PFX QIB_DRV_NAME ": " static const struct pci_device_id qib_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_QLOGIC_IB_6120) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7220) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7322) }, { 0, } }; MODULE_DEVICE_TABLE(pci, qib_pci_tbl); struct pci_driver qib_driver = { .name = QIB_DRV_NAME, .probe = qib_init_one, .remove = __devexit_p(qib_remove_one), .id_table = qib_pci_tbl, .err_handler = &qib_pci_err_handler, }; /* * Do all the generic driver unit- and chip-independent memory * allocation and initialization. */ static int __init qlogic_ib_init(void) { int ret; ret = qib_dev_init(); if (ret) goto bail; /* * We create our own workqueue mainly because we want to be * able to flush it when devices are being removed. We can't * use schedule_work()/flush_scheduled_work() because both * unregister_netdev() and linkwatch_event take the rtnl lock, * so flush_scheduled_work() can deadlock during device * removal. */ qib_wq = create_workqueue("qib"); if (!qib_wq) { ret = -ENOMEM; goto bail_dev; } qib_cq_wq = create_singlethread_workqueue("qib_cq"); if (!qib_cq_wq) { ret = -ENOMEM; goto bail_wq; } /* * These must be called before the driver is registered with * the PCI subsystem. */ idr_init(&qib_unit_table); if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) { printk(KERN_ERR QIB_DRV_NAME ": idr_pre_get() failed\n"); ret = -ENOMEM; goto bail_cq_wq; } ret = pci_register_driver(&qib_driver); if (ret < 0) { printk(KERN_ERR QIB_DRV_NAME ": Unable to register driver: error %d\n", -ret); goto bail_unit; } /* not fatal if it doesn't work */ if (qib_init_qibfs()) printk(KERN_ERR QIB_DRV_NAME ": Unable to register ipathfs\n"); goto bail; /* all OK */ bail_unit: idr_destroy(&qib_unit_table); bail_cq_wq: destroy_workqueue(qib_cq_wq); bail_wq: destroy_workqueue(qib_wq); bail_dev: qib_dev_cleanup(); bail: return ret; } module_init(qlogic_ib_init); /* * Do the non-unit driver cleanup, memory free, etc. at unload. */ static void __exit qlogic_ib_cleanup(void) { int ret; ret = qib_exit_qibfs(); if (ret) printk(KERN_ERR QIB_DRV_NAME ": " "Unable to cleanup counter filesystem: " "error %d\n", -ret); pci_unregister_driver(&qib_driver); destroy_workqueue(qib_wq); destroy_workqueue(qib_cq_wq); qib_cpulist_count = 0; kfree(qib_cpulist); idr_destroy(&qib_unit_table); qib_dev_cleanup(); } module_exit(qlogic_ib_cleanup); /* this can only be called after a successful initialization */ static void cleanup_device_data(struct qib_devdata *dd) { int ctxt; int pidx; struct qib_ctxtdata **tmp; unsigned long flags; /* users can't do anything more with chip */ for (pidx = 0; pidx < dd->num_pports; ++pidx) if (dd->pport[pidx].statusp) *dd->pport[pidx].statusp &= ~QIB_STATUS_CHIP_PRESENT; if (!qib_wc_pat) qib_disable_wc(dd); if (dd->pioavailregs_dma) { dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, (void *) dd->pioavailregs_dma, dd->pioavailregs_phys); dd->pioavailregs_dma = NULL; } if (dd->pageshadow) { struct page **tmpp = dd->pageshadow; dma_addr_t *tmpd = dd->physshadow; int i, cnt = 0; for (ctxt = 0; ctxt < dd->cfgctxts; ctxt++) { int ctxt_tidbase = ctxt * dd->rcvtidcnt; int maxtid = ctxt_tidbase + dd->rcvtidcnt; for (i = ctxt_tidbase; i < maxtid; i++) { if (!tmpp[i]) continue; pci_unmap_page(dd->pcidev, tmpd[i], PAGE_SIZE, PCI_DMA_FROMDEVICE); qib_release_user_pages(&tmpp[i], 1); tmpp[i] = NULL; cnt++; } } tmpp = dd->pageshadow; dd->pageshadow = NULL; vfree(tmpp); } /* * Free any resources still in use (usually just kernel contexts) * at unload; we do for ctxtcnt, because that's what we allocate. * We acquire lock to be really paranoid that rcd isn't being * accessed from some interrupt-related code (that should not happen, * but best to be sure). */ spin_lock_irqsave(&dd->uctxt_lock, flags); tmp = dd->rcd; dd->rcd = NULL; spin_unlock_irqrestore(&dd->uctxt_lock, flags); for (ctxt = 0; tmp && ctxt < dd->ctxtcnt; ctxt++) { struct qib_ctxtdata *rcd = tmp[ctxt]; tmp[ctxt] = NULL; /* debugging paranoia */ qib_free_ctxtdata(dd, rcd); } kfree(tmp); kfree(dd->boardname); } /* * Clean up on unit shutdown, or error during unit load after * successful initialization. */ static void qib_postinit_cleanup(struct qib_devdata *dd) { /* * Clean up chip-specific stuff. * We check for NULL here, because it's outside * the kregbase check, and we need to call it * after the free_irq. Thus it's possible that * the function pointers were never initialized. */ if (dd->f_cleanup) dd->f_cleanup(dd); qib_pcie_ddcleanup(dd); cleanup_device_data(dd); qib_free_devdata(dd); } static int __devinit qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int ret, j, pidx, initfail; struct qib_devdata *dd = NULL; ret = qib_pcie_init(pdev, ent); if (ret) goto bail; /* * Do device-specific initialiation, function table setup, dd * allocation, etc. */ switch (ent->device) { case PCI_DEVICE_ID_QLOGIC_IB_6120: #ifdef CONFIG_PCI_MSI dd = qib_init_iba6120_funcs(pdev, ent); #else qib_early_err(&pdev->dev, "QLogic PCIE device 0x%x cannot " "work if CONFIG_PCI_MSI is not enabled\n", ent->device); #endif break; case PCI_DEVICE_ID_QLOGIC_IB_7220: dd = qib_init_iba7220_funcs(pdev, ent); break; case PCI_DEVICE_ID_QLOGIC_IB_7322: dd = qib_init_iba7322_funcs(pdev, ent); break; default: qib_early_err(&pdev->dev, "Failing on unknown QLogic " "deviceid 0x%x\n", ent->device); ret = -ENODEV; } if (IS_ERR(dd)) ret = PTR_ERR(dd); if (ret) goto bail; /* error already printed */ /* do the generic initialization */ initfail = qib_init(dd, 0); ret = qib_register_ib_device(dd); /* * Now ready for use. this should be cleared whenever we * detect a reset, or initiate one. If earlier failure, * we still create devices, so diags, etc. can be used * to determine cause of problem. */ if (!qib_mini_init && !initfail && !ret) dd->flags |= QIB_INITTED; j = qib_device_create(dd); if (j) qib_dev_err(dd, "Failed to create /dev devices: %d\n", -j); j = qibfs_add(dd); if (j) qib_dev_err(dd, "Failed filesystem setup for counters: %d\n", -j); if (qib_mini_init || initfail || ret) { qib_stop_timers(dd); flush_scheduled_work(); for (pidx = 0; pidx < dd->num_pports; ++pidx) dd->f_quiet_serdes(dd->pport + pidx); if (qib_mini_init) goto bail; if (!j) { (void) qibfs_remove(dd); qib_device_remove(dd); } if (!ret) qib_unregister_ib_device(dd); qib_postinit_cleanup(dd); if (initfail) ret = initfail; goto bail; } if (!qib_wc_pat) { ret = qib_enable_wc(dd); if (ret) { qib_dev_err(dd, "Write combining not enabled " "(err %d): performance may be poor\n", -ret); ret = 0; } } qib_verify_pioperf(dd); bail: return ret; } static void __devexit qib_remove_one(struct pci_dev *pdev) { struct qib_devdata *dd = pci_get_drvdata(pdev); int ret; /* unregister from IB core */ qib_unregister_ib_device(dd); /* * Disable the IB link, disable interrupts on the device, * clear dma engines, etc. */ if (!qib_mini_init) qib_shutdown_device(dd); qib_stop_timers(dd); /* wait until all of our (qsfp) schedule_work() calls complete */ flush_scheduled_work(); ret = qibfs_remove(dd); if (ret) qib_dev_err(dd, "Failed counters filesystem cleanup: %d\n", -ret); qib_device_remove(dd); qib_postinit_cleanup(dd); } /** * qib_create_rcvhdrq - create a receive header queue * @dd: the qlogic_ib device * @rcd: the context data * * This must be contiguous memory (from an i/o perspective), and must be * DMA'able (which means for some systems, it will go through an IOMMU, * or be forced into a low address range). */ int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd) { unsigned amt; if (!rcd->rcvhdrq) { dma_addr_t phys_hdrqtail; gfp_t gfp_flags; amt = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize * sizeof(u32), PAGE_SIZE); gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ? GFP_USER : GFP_KERNEL; rcd->rcvhdrq = dma_alloc_coherent( &dd->pcidev->dev, amt, &rcd->rcvhdrq_phys, gfp_flags | __GFP_COMP); if (!rcd->rcvhdrq) { qib_dev_err(dd, "attempt to allocate %d bytes " "for ctxt %u rcvhdrq failed\n", amt, rcd->ctxt); goto bail; } if (rcd->ctxt >= dd->first_user_ctxt) { rcd->user_event_mask = vmalloc_user(PAGE_SIZE); if (!rcd->user_event_mask) goto bail_free_hdrq; } if (!(dd->flags & QIB_NODMA_RTAIL)) { rcd->rcvhdrtail_kvaddr = dma_alloc_coherent( &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail, gfp_flags); if (!rcd->rcvhdrtail_kvaddr) goto bail_free; rcd->rcvhdrqtailaddr_phys = phys_hdrqtail; } rcd->rcvhdrq_size = amt; } /* clear for security and sanity on each use */ memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size); if (rcd->rcvhdrtail_kvaddr) memset(rcd->rcvhdrtail_kvaddr, 0, PAGE_SIZE); return 0; bail_free: qib_dev_err(dd, "attempt to allocate 1 page for ctxt %u " "rcvhdrqtailaddr failed\n", rcd->ctxt); vfree(rcd->user_event_mask); rcd->user_event_mask = NULL; bail_free_hdrq: dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq, rcd->rcvhdrq_phys); rcd->rcvhdrq = NULL; bail: return -ENOMEM; } /** * allocate eager buffers, both kernel and user contexts. * @rcd: the context we are setting up. * * Allocate the eager TID buffers and program them into hip. * They are no longer completely contiguous, we do multiple allocation * calls. Otherwise we get the OOM code involved, by asking for too * much per call, with disastrous results on some kernels. */ int qib_setup_eagerbufs(struct qib_ctxtdata *rcd) { struct qib_devdata *dd = rcd->dd; unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff; size_t size; gfp_t gfp_flags; /* * GFP_USER, but without GFP_FS, so buffer cache can be * coalesced (we hope); otherwise, even at order 4, * heavy filesystem activity makes these fail, and we can * use compound pages. */ gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP; egrcnt = rcd->rcvegrcnt; egroff = rcd->rcvegr_tid_base; egrsize = dd->rcvegrbufsize; chunk = rcd->rcvegrbuf_chunks; egrperchunk = rcd->rcvegrbufs_perchunk; size = rcd->rcvegrbuf_size; if (!rcd->rcvegrbuf) { rcd->rcvegrbuf = kzalloc(chunk * sizeof(rcd->rcvegrbuf[0]), GFP_KERNEL); if (!rcd->rcvegrbuf) goto bail; } if (!rcd->rcvegrbuf_phys) { rcd->rcvegrbuf_phys = kmalloc(chunk * sizeof(rcd->rcvegrbuf_phys[0]), GFP_KERNEL); if (!rcd->rcvegrbuf_phys) goto bail_rcvegrbuf; } for (e = 0; e < rcd->rcvegrbuf_chunks; e++) { if (rcd->rcvegrbuf[e]) continue; rcd->rcvegrbuf[e] = dma_alloc_coherent(&dd->pcidev->dev, size, &rcd->rcvegrbuf_phys[e], gfp_flags); if (!rcd->rcvegrbuf[e]) goto bail_rcvegrbuf_phys; } rcd->rcvegr_phys = rcd->rcvegrbuf_phys[0]; for (e = chunk = 0; chunk < rcd->rcvegrbuf_chunks; chunk++) { dma_addr_t pa = rcd->rcvegrbuf_phys[chunk]; unsigned i; /* clear for security and sanity on each use */ memset(rcd->rcvegrbuf[chunk], 0, size); for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) { dd->f_put_tid(dd, e + egroff + (u64 __iomem *) ((char __iomem *) dd->kregbase + dd->rcvegrbase), RCVHQ_RCV_TYPE_EAGER, pa); pa += egrsize; } cond_resched(); /* don't hog the cpu */ } return 0; bail_rcvegrbuf_phys: for (e = 0; e < rcd->rcvegrbuf_chunks && rcd->rcvegrbuf[e]; e++) dma_free_coherent(&dd->pcidev->dev, size, rcd->rcvegrbuf[e], rcd->rcvegrbuf_phys[e]); kfree(rcd->rcvegrbuf_phys); rcd->rcvegrbuf_phys = NULL; bail_rcvegrbuf: kfree(rcd->rcvegrbuf); rcd->rcvegrbuf = NULL; bail: return -ENOMEM; } /* * Note: Changes to this routine should be mirrored * for the diagnostics routine qib_remap_ioaddr32(). * There is also related code for VL15 buffers in qib_init_7322_variables(). * The teardown code that unmaps is in qib_pcie_ddcleanup() */ int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen) { u64 __iomem *qib_kregbase = NULL; void __iomem *qib_piobase = NULL; u64 __iomem *qib_userbase = NULL; u64 qib_kreglen; u64 qib_pio2koffset = dd->piobufbase & 0xffffffff; u64 qib_pio4koffset = dd->piobufbase >> 32; u64 qib_pio2klen = dd->piobcnt2k * dd->palign; u64 qib_pio4klen = dd->piobcnt4k * dd->align4k; u64 qib_physaddr = dd->physaddr; u64 qib_piolen; u64 qib_userlen = 0; /* * Free the old mapping because the kernel will try to reuse the * old mapping and not create a new mapping with the * write combining attribute. */ iounmap(dd->kregbase); dd->kregbase = NULL; /* * Assumes chip address space looks like: * - kregs + sregs + cregs + uregs (in any order) * - piobufs (2K and 4K bufs in either order) * or: * - kregs + sregs + cregs (in any order) * - piobufs (2K and 4K bufs in either order) * - uregs */ if (dd->piobcnt4k == 0) { qib_kreglen = qib_pio2koffset; qib_piolen = qib_pio2klen; } else if (qib_pio2koffset < qib_pio4koffset) { qib_kreglen = qib_pio2koffset; qib_piolen = qib_pio4koffset + qib_pio4klen - qib_kreglen; } else { qib_kreglen = qib_pio4koffset; qib_piolen = qib_pio2koffset + qib_pio2klen - qib_kreglen; } qib_piolen += vl15buflen; /* Map just the configured ports (not all hw ports) */ if (dd->uregbase > qib_kreglen) qib_userlen = dd->ureg_align * dd->cfgctxts; /* Sanity checks passed, now create the new mappings */ qib_kregbase = ioremap_nocache(qib_physaddr, qib_kreglen); if (!qib_kregbase) goto bail; qib_piobase = ioremap_wc(qib_physaddr + qib_kreglen, qib_piolen); if (!qib_piobase) goto bail_kregbase; if (qib_userlen) { qib_userbase = ioremap_nocache(qib_physaddr + dd->uregbase, qib_userlen); if (!qib_userbase) goto bail_piobase; } dd->kregbase = qib_kregbase; dd->kregend = (u64 __iomem *) ((char __iomem *) qib_kregbase + qib_kreglen); dd->piobase = qib_piobase; dd->pio2kbase = (void __iomem *) (((char __iomem *) dd->piobase) + qib_pio2koffset - qib_kreglen); if (dd->piobcnt4k) dd->pio4kbase = (void __iomem *) (((char __iomem *) dd->piobase) + qib_pio4koffset - qib_kreglen); if (qib_userlen) /* ureg will now be accessed relative to dd->userbase */ dd->userbase = qib_userbase; return 0; bail_piobase: iounmap(qib_piobase); bail_kregbase: iounmap(qib_kregbase); bail: return -ENOMEM; }
gpl-2.0
GranPC/linux-asus-flo
arch/arm/mach-msm/ipc_logging.c
1255
15116
/* Copyright (c) 2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/jiffies.h> #include <linux/debugfs.h> #include <linux/io.h> #include <linux/idr.h> #include <linux/string.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/delay.h> #include <linux/completion.h> #include <mach/msm_ipc_logging.h> #include "ipc_logging.h" static LIST_HEAD(ipc_log_context_list); DEFINE_SPINLOCK(ipc_log_context_list_lock); static atomic_t next_log_id = ATOMIC_INIT(0); static struct ipc_log_page *get_first_page(struct ipc_log_context *ilctxt) { struct ipc_log_page_header *p_pghdr; struct ipc_log_page *pg = NULL; if (!ilctxt) return NULL; p_pghdr = list_first_entry(&ilctxt->page_list, struct ipc_log_page_header, list); pg = container_of(p_pghdr, struct ipc_log_page, hdr); return pg; } static struct ipc_log_page *get_next_page(struct ipc_log_context *ilctxt, struct ipc_log_page *cur_pg) { struct ipc_log_page_header *p_pghdr; struct ipc_log_page *pg = NULL; if (!ilctxt || !cur_pg) return NULL; if (ilctxt->last_page == cur_pg) return ilctxt->first_page; p_pghdr = list_first_entry(&cur_pg->hdr.list, struct ipc_log_page_header, list); pg = container_of(p_pghdr, struct ipc_log_page, hdr); return pg; } /* If data == NULL, drop the log of size data_size*/ static void ipc_log_read(struct ipc_log_context *ilctxt, void *data, int data_size) { int bytes_to_read; bytes_to_read = MIN(((PAGE_SIZE - sizeof(struct ipc_log_page_header)) - ilctxt->read_page->hdr.read_offset), data_size); if (data) memcpy(data, (ilctxt->read_page->data + ilctxt->read_page->hdr.read_offset), bytes_to_read); if (bytes_to_read != data_size) { ilctxt->read_page->hdr.read_offset = 0xFFFF; ilctxt->read_page = get_next_page(ilctxt, ilctxt->read_page); ilctxt->read_page->hdr.read_offset = 0; if (data) memcpy((data + bytes_to_read), (ilctxt->read_page->data + ilctxt->read_page->hdr.read_offset), (data_size - bytes_to_read)); bytes_to_read = (data_size - bytes_to_read); } ilctxt->read_page->hdr.read_offset += bytes_to_read; ilctxt->write_avail += data_size; } /* * Reads a message. * * If a message is read successfully, then the the message context * will be set to: * .hdr message header .size and .type values * .offset beginning of message data * * @ectxt Message context and if NULL, drops the message. * * @returns 0 - no message available * 1 - message read */ int msg_read(struct ipc_log_context *ilctxt, struct encode_context *ectxt) { struct tsv_header hdr; ipc_log_read(ilctxt, &hdr, sizeof(hdr)); if (ectxt) { ectxt->hdr.type = hdr.type; ectxt->hdr.size = hdr.size; ectxt->offset = sizeof(hdr); ipc_log_read(ilctxt, (ectxt->buff + ectxt->offset), (int)hdr.size); } else { ipc_log_read(ilctxt, NULL, (int)hdr.size); } return sizeof(hdr) + (int)hdr.size; } /* * Commits messages to the FIFO. If the FIFO is full, then enough * messages are dropped to create space for the new message. */ void ipc_log_write(void *ctxt, struct encode_context *ectxt) { struct ipc_log_context *ilctxt = (struct ipc_log_context *)ctxt; int bytes_to_write; unsigned long flags; if (!ilctxt || !ectxt) { pr_err("%s: Invalid ipc_log or encode context\n", __func__); return; } spin_lock_irqsave(&ipc_log_context_list_lock, flags); spin_lock(&ilctxt->ipc_log_context_lock); while (ilctxt->write_avail < ectxt->offset) msg_read(ilctxt, NULL); bytes_to_write = MIN(((PAGE_SIZE - sizeof(struct ipc_log_page_header)) - ilctxt->write_page->hdr.write_offset), ectxt->offset); memcpy((ilctxt->write_page->data + ilctxt->write_page->hdr.write_offset), ectxt->buff, bytes_to_write); if (bytes_to_write != ectxt->offset) { ilctxt->write_page->hdr.write_offset = 0xFFFF; ilctxt->write_page = get_next_page(ilctxt, ilctxt->write_page); ilctxt->write_page->hdr.write_offset = 0; memcpy((ilctxt->write_page->data + ilctxt->write_page->hdr.write_offset), (ectxt->buff + bytes_to_write), (ectxt->offset - bytes_to_write)); bytes_to_write = (ectxt->offset - bytes_to_write); } ilctxt->write_page->hdr.write_offset += bytes_to_write; ilctxt->write_avail -= ectxt->offset; complete(&ilctxt->read_avail); spin_unlock(&ilctxt->ipc_log_context_lock); spin_unlock_irqrestore(&ipc_log_context_list_lock, flags); } EXPORT_SYMBOL(ipc_log_write); /* * Starts a new message after which you can add serialized data and * then complete the message by calling msg_encode_end(). */ void msg_encode_start(struct encode_context *ectxt, uint32_t type) { if (!ectxt) { pr_err("%s: Invalid encode context\n", __func__); return; } ectxt->hdr.type = type; ectxt->hdr.size = 0; ectxt->offset = sizeof(ectxt->hdr); } EXPORT_SYMBOL(msg_encode_start); /* * Completes the message */ void msg_encode_end(struct encode_context *ectxt) { if (!ectxt) { pr_err("%s: Invalid encode context\n", __func__); return; } /* finalize data size */ ectxt->hdr.size = ectxt->offset - sizeof(ectxt->hdr); BUG_ON(ectxt->hdr.size > MAX_MSG_SIZE); memcpy(ectxt->buff, &ectxt->hdr, sizeof(ectxt->hdr)); } EXPORT_SYMBOL(msg_encode_end); /* * Helper funtion used to write data to a message context. * * @ectxt context initialized by calling msg_encode_start() * @data data to write * @size number of bytes of data to write */ static inline int tsv_write_data(struct encode_context *ectxt, void *data, uint32_t size) { if (!ectxt) { pr_err("%s: Invalid encode context\n", __func__); return -EINVAL; } if ((ectxt->offset + size) > MAX_MSG_SIZE) { pr_err("%s: No space to encode further\n", __func__); return -EINVAL; } memcpy((void *)(ectxt->buff + ectxt->offset), data, size); ectxt->offset += size; return 0; } /* * Helper function that writes a type to the context. * * @ectxt context initialized by calling msg_encode_start() * @type primitive type * @size size of primitive in bytes */ static inline int tsv_write_header(struct encode_context *ectxt, uint32_t type, uint32_t size) { struct tsv_header hdr; hdr.type = (unsigned char)type; hdr.size = (unsigned char)size; return tsv_write_data(ectxt, &hdr, sizeof(hdr)); } /* * Writes the current timestamp count. * * @ectxt context initialized by calling msg_encode_start() */ int tsv_timestamp_write(struct encode_context *ectxt) { int ret; unsigned long long t_now = sched_clock(); ret = tsv_write_header(ectxt, TSV_TYPE_TIMESTAMP, sizeof(t_now)); if (ret) return ret; return tsv_write_data(ectxt, &t_now, sizeof(t_now)); } EXPORT_SYMBOL(tsv_timestamp_write); /* * Writes a data pointer. * * @ectxt context initialized by calling msg_encode_start() * @pointer pointer value to write */ int tsv_pointer_write(struct encode_context *ectxt, void *pointer) { int ret; ret = tsv_write_header(ectxt, TSV_TYPE_POINTER, sizeof(pointer)); if (ret) return ret; return tsv_write_data(ectxt, &pointer, sizeof(pointer)); } EXPORT_SYMBOL(tsv_pointer_write); /* * Writes a 32-bit integer value. * * @ectxt context initialized by calling msg_encode_start() * @n integer to write */ int tsv_int32_write(struct encode_context *ectxt, int32_t n) { int ret; ret = tsv_write_header(ectxt, TSV_TYPE_INT32, sizeof(n)); if (ret) return ret; return tsv_write_data(ectxt, &n, sizeof(n)); } EXPORT_SYMBOL(tsv_int32_write); /* * Writes a byte array. * * @ectxt context initialized by calling msg_write_start() * @data Beginning address of data * @data_size Size of data to be written */ int tsv_byte_array_write(struct encode_context *ectxt, void *data, int data_size) { int ret; ret = tsv_write_header(ectxt, TSV_TYPE_BYTE_ARRAY, data_size); if (ret) return ret; return tsv_write_data(ectxt, data, data_size); } EXPORT_SYMBOL(tsv_byte_array_write); /* * Helper function to log a string * * @ilctxt ipc_log_context created using ipc_log_context_create() * @fmt Data specified using format specifiers */ int ipc_log_string(void *ilctxt, const char *fmt, ...) { struct encode_context ectxt; int avail_size, data_size, hdr_size = sizeof(struct tsv_header); va_list arg_list; if (!ilctxt) return -EINVAL; msg_encode_start(&ectxt, TSV_TYPE_STRING); tsv_timestamp_write(&ectxt); avail_size = (MAX_MSG_SIZE - (ectxt.offset + hdr_size)); va_start(arg_list, fmt); data_size = vsnprintf((ectxt.buff + ectxt.offset + hdr_size), avail_size, fmt, arg_list); va_end(arg_list); tsv_write_header(&ectxt, TSV_TYPE_BYTE_ARRAY, data_size); ectxt.offset += data_size; msg_encode_end(&ectxt); ipc_log_write(ilctxt, &ectxt); return 0; } /* * Helper funtion used to read data from a message context. * * @ectxt context initialized by calling msg_read() * @data data to read * @size number of bytes of data to read */ static void tsv_read_data(struct encode_context *ectxt, void *data, uint32_t size) { BUG_ON((ectxt->offset + size) > MAX_MSG_SIZE); memcpy(data, (ectxt->buff + ectxt->offset), size); ectxt->offset += size; } /* * Helper function that reads a type from the context and updates the * context pointers. * * @ectxt context initialized by calling msg_read() * @hdr type header */ static void tsv_read_header(struct encode_context *ectxt, struct tsv_header *hdr) { BUG_ON((ectxt->offset + sizeof(*hdr)) > MAX_MSG_SIZE); memcpy(hdr, (ectxt->buff + ectxt->offset), sizeof(*hdr)); ectxt->offset += sizeof(*hdr); } /* * Reads a timestamp. * * @ectxt context initialized by calling msg_read() * @dctxt deserialization context * @format output format (appended to %6u.%09u timestamp format) */ void tsv_timestamp_read(struct encode_context *ectxt, struct decode_context *dctxt, const char *format) { struct tsv_header hdr; unsigned long long val; unsigned long nanosec_rem; tsv_read_header(ectxt, &hdr); BUG_ON(hdr.type != TSV_TYPE_TIMESTAMP); tsv_read_data(ectxt, &val, sizeof(val)); nanosec_rem = do_div(val, 1000000000U); IPC_SPRINTF_DECODE(dctxt, "[%6u.%09lu]%s", (unsigned)val, nanosec_rem, format); } EXPORT_SYMBOL(tsv_timestamp_read); /* * Reads a data pointer. * * @ectxt context initialized by calling msg_read() * @dctxt deserialization context * @format output format */ void tsv_pointer_read(struct encode_context *ectxt, struct decode_context *dctxt, const char *format) { struct tsv_header hdr; void *val; tsv_read_header(ectxt, &hdr); BUG_ON(hdr.type != TSV_TYPE_POINTER); tsv_read_data(ectxt, &val, sizeof(val)); IPC_SPRINTF_DECODE(dctxt, format, val); } EXPORT_SYMBOL(tsv_pointer_read); /* * Reads a 32-bit integer value. * * @ectxt context initialized by calling msg_read() * @dctxt deserialization context * @format output format */ int32_t tsv_int32_read(struct encode_context *ectxt, struct decode_context *dctxt, const char *format) { struct tsv_header hdr; int32_t val; tsv_read_header(ectxt, &hdr); BUG_ON(hdr.type != TSV_TYPE_INT32); tsv_read_data(ectxt, &val, sizeof(val)); IPC_SPRINTF_DECODE(dctxt, format, val); return val; } EXPORT_SYMBOL(tsv_int32_read); /* * Reads a byte array/string. * * @ectxt context initialized by calling msg_read() * @dctxt deserialization context * @format output format */ void tsv_byte_array_read(struct encode_context *ectxt, struct decode_context *dctxt, const char *format) { struct tsv_header hdr; tsv_read_header(ectxt, &hdr); BUG_ON(hdr.type != TSV_TYPE_BYTE_ARRAY); tsv_read_data(ectxt, dctxt->buff, hdr.size); dctxt->buff += hdr.size; dctxt->size -= hdr.size; } EXPORT_SYMBOL(tsv_byte_array_read); int add_deserialization_func(void *ctxt, int type, void (*dfunc)(struct encode_context *, struct decode_context *)) { struct ipc_log_context *ilctxt = (struct ipc_log_context *)ctxt; struct dfunc_info *df_info; unsigned long flags; if (!ilctxt || !dfunc) return -EINVAL; df_info = kmalloc(sizeof(struct dfunc_info), GFP_KERNEL); if (!df_info) return -ENOSPC; spin_lock_irqsave(&ipc_log_context_list_lock, flags); spin_lock(&ilctxt->ipc_log_context_lock); df_info->type = type; df_info->dfunc = dfunc; list_add_tail(&df_info->list, &ilctxt->dfunc_info_list); spin_unlock(&ilctxt->ipc_log_context_lock); spin_unlock_irqrestore(&ipc_log_context_list_lock, flags); return 0; } EXPORT_SYMBOL(add_deserialization_func); void *ipc_log_context_create(int max_num_pages, const char *mod_name) { struct ipc_log_context *ctxt; struct ipc_log_page *pg = NULL; int page_cnt, local_log_id; unsigned long flags; ctxt = kzalloc(sizeof(struct ipc_log_context), GFP_KERNEL); if (!ctxt) { pr_err("%s: cannot create ipc_log_context\n", __func__); return 0; } local_log_id = atomic_add_return(1, &next_log_id); init_completion(&ctxt->read_avail); INIT_LIST_HEAD(&ctxt->page_list); INIT_LIST_HEAD(&ctxt->dfunc_info_list); spin_lock_init(&ctxt->ipc_log_context_lock); for (page_cnt = 0; page_cnt < max_num_pages; page_cnt++) { pg = kzalloc(sizeof(struct ipc_log_page), GFP_KERNEL); if (!pg) { pr_err("%s: cannot create ipc_log_page\n", __func__); goto release_ipc_log_context; } pg->hdr.magic = IPC_LOGGING_MAGIC_NUM; pg->hdr.nmagic = ~(IPC_LOGGING_MAGIC_NUM); pg->hdr.log_id = (uint32_t)local_log_id; pg->hdr.page_num = page_cnt; pg->hdr.read_offset = 0xFFFF; pg->hdr.write_offset = 0xFFFF; spin_lock_irqsave(&ctxt->ipc_log_context_lock, flags); list_add_tail(&pg->hdr.list, &ctxt->page_list); spin_unlock_irqrestore(&ctxt->ipc_log_context_lock, flags); } ctxt->first_page = get_first_page(ctxt); ctxt->last_page = pg; ctxt->write_page = ctxt->first_page; ctxt->read_page = ctxt->first_page; ctxt->write_page->hdr.write_offset = 0; ctxt->read_page->hdr.read_offset = 0; ctxt->write_avail = max_num_pages * (PAGE_SIZE - sizeof(struct ipc_log_page_header)); create_ctx_debugfs(ctxt, mod_name); spin_lock_irqsave(&ipc_log_context_list_lock, flags); list_add_tail(&ctxt->list, &ipc_log_context_list); spin_unlock_irqrestore(&ipc_log_context_list_lock, flags); return (void *)ctxt; release_ipc_log_context: while (page_cnt-- > 0) { pg = get_first_page(ctxt); list_del(&pg->hdr.list); kfree(pg); } kfree(ctxt); return 0; } EXPORT_SYMBOL(ipc_log_context_create); static int __init ipc_logging_init(void) { check_and_create_debugfs(); return 0; } module_init(ipc_logging_init); MODULE_DESCRIPTION("ipc logging"); MODULE_LICENSE("GPL v2");
gpl-2.0
leopesto/kernel_mtk6577
drivers/usb/gadget/f_serial.c
2279
8258
/* * f_serial.c - generic USB serial function driver * * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com) * Copyright (C) 2008 by David Brownell * Copyright (C) 2008 by Nokia Corporation * * This software is distributed under the terms of the GNU General * Public License ("GPL") as published by the Free Software Foundation, * either version 2 of that License or (at your option) any later version. */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/device.h> #include "u_serial.h" #include "gadget_chips.h" /* * This function packages a simple "generic serial" port with no real * control mechanisms, just raw data transfer over two bulk endpoints. * * Because it's not standardized, this isn't as interoperable as the * CDC ACM driver. However, for many purposes it's just as functional * if you can arrange appropriate host side drivers. */ struct gser_descs { struct usb_endpoint_descriptor *in; struct usb_endpoint_descriptor *out; }; struct f_gser { struct gserial port; u8 data_id; u8 port_num; struct gser_descs fs; struct gser_descs hs; }; static inline struct f_gser *func_to_gser(struct usb_function *f) { return container_of(f, struct f_gser, port.func); } /*-------------------------------------------------------------------------*/ /* interface descriptor: */ static struct usb_interface_descriptor gser_interface_desc __initdata = { .bLength = USB_DT_INTERFACE_SIZE, .bDescriptorType = USB_DT_INTERFACE, /* .bInterfaceNumber = DYNAMIC */ .bNumEndpoints = 2, .bInterfaceClass = USB_CLASS_VENDOR_SPEC, .bInterfaceSubClass = 0, .bInterfaceProtocol = 0, /* .iInterface = DYNAMIC */ }; /* full speed support: */ static struct usb_endpoint_descriptor gser_fs_in_desc __initdata = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, }; static struct usb_endpoint_descriptor gser_fs_out_desc __initdata = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, }; static struct usb_descriptor_header *gser_fs_function[] __initdata = { (struct usb_descriptor_header *) &gser_interface_desc, (struct usb_descriptor_header *) &gser_fs_in_desc, (struct usb_descriptor_header *) &gser_fs_out_desc, NULL, }; /* high speed support: */ static struct usb_endpoint_descriptor gser_hs_in_desc __initdata = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(512), }; static struct usb_endpoint_descriptor gser_hs_out_desc __initdata = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(512), }; static struct usb_descriptor_header *gser_hs_function[] __initdata = { (struct usb_descriptor_header *) &gser_interface_desc, (struct usb_descriptor_header *) &gser_hs_in_desc, (struct usb_descriptor_header *) &gser_hs_out_desc, NULL, }; /* string descriptors: */ static struct usb_string gser_string_defs[] = { [0].s = "Generic Serial", { } /* end of list */ }; static struct usb_gadget_strings gser_string_table = { .language = 0x0409, /* en-us */ .strings = gser_string_defs, }; static struct usb_gadget_strings *gser_strings[] = { &gser_string_table, NULL, }; /*-------------------------------------------------------------------------*/ static int gser_set_alt(struct usb_function *f, unsigned intf, unsigned alt) { struct f_gser *gser = func_to_gser(f); struct usb_composite_dev *cdev = f->config->cdev; /* we know alt == 0, so this is an activation or a reset */ if (gser->port.in->driver_data) { DBG(cdev, "reset generic ttyGS%d\n", gser->port_num); gserial_disconnect(&gser->port); } else { DBG(cdev, "activate generic ttyGS%d\n", gser->port_num); gser->port.in_desc = ep_choose(cdev->gadget, gser->hs.in, gser->fs.in); gser->port.out_desc = ep_choose(cdev->gadget, gser->hs.out, gser->fs.out); } gserial_connect(&gser->port, gser->port_num); return 0; } static void gser_disable(struct usb_function *f) { struct f_gser *gser = func_to_gser(f); struct usb_composite_dev *cdev = f->config->cdev; DBG(cdev, "generic ttyGS%d deactivated\n", gser->port_num); gserial_disconnect(&gser->port); } /*-------------------------------------------------------------------------*/ /* serial function driver setup/binding */ static int __init gser_bind(struct usb_configuration *c, struct usb_function *f) { struct usb_composite_dev *cdev = c->cdev; struct f_gser *gser = func_to_gser(f); int status; struct usb_ep *ep; /* allocate instance-specific interface IDs */ status = usb_interface_id(c, f); if (status < 0) goto fail; gser->data_id = status; gser_interface_desc.bInterfaceNumber = status; status = -ENODEV; /* allocate instance-specific endpoints */ ep = usb_ep_autoconfig(cdev->gadget, &gser_fs_in_desc); if (!ep) goto fail; gser->port.in = ep; ep->driver_data = cdev; /* claim */ ep = usb_ep_autoconfig(cdev->gadget, &gser_fs_out_desc); if (!ep) goto fail; gser->port.out = ep; ep->driver_data = cdev; /* claim */ /* copy descriptors, and track endpoint copies */ f->descriptors = usb_copy_descriptors(gser_fs_function); gser->fs.in = usb_find_endpoint(gser_fs_function, f->descriptors, &gser_fs_in_desc); gser->fs.out = usb_find_endpoint(gser_fs_function, f->descriptors, &gser_fs_out_desc); /* support all relevant hardware speeds... we expect that when * hardware is dual speed, all bulk-capable endpoints work at * both speeds */ if (gadget_is_dualspeed(c->cdev->gadget)) { gser_hs_in_desc.bEndpointAddress = gser_fs_in_desc.bEndpointAddress; gser_hs_out_desc.bEndpointAddress = gser_fs_out_desc.bEndpointAddress; /* copy descriptors, and track endpoint copies */ f->hs_descriptors = usb_copy_descriptors(gser_hs_function); gser->hs.in = usb_find_endpoint(gser_hs_function, f->hs_descriptors, &gser_hs_in_desc); gser->hs.out = usb_find_endpoint(gser_hs_function, f->hs_descriptors, &gser_hs_out_desc); } DBG(cdev, "generic ttyGS%d: %s speed IN/%s OUT/%s\n", gser->port_num, gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", gser->port.in->name, gser->port.out->name); return 0; fail: /* we might as well release our claims on endpoints */ if (gser->port.out) gser->port.out->driver_data = NULL; if (gser->port.in) gser->port.in->driver_data = NULL; ERROR(cdev, "%s: can't bind, err %d\n", f->name, status); return status; } static void gser_unbind(struct usb_configuration *c, struct usb_function *f) { if (gadget_is_dualspeed(c->cdev->gadget)) usb_free_descriptors(f->hs_descriptors); usb_free_descriptors(f->descriptors); kfree(func_to_gser(f)); } /** * gser_bind_config - add a generic serial function to a configuration * @c: the configuration to support the serial instance * @port_num: /dev/ttyGS* port this interface will use * Context: single threaded during gadget setup * * Returns zero on success, else negative errno. * * Caller must have called @gserial_setup() with enough ports to * handle all the ones it binds. Caller is also responsible * for calling @gserial_cleanup() before module unload. */ int __init gser_bind_config(struct usb_configuration *c, u8 port_num) { struct f_gser *gser; int status; /* REVISIT might want instance-specific strings to help * distinguish instances ... */ /* maybe allocate device-global string ID */ if (gser_string_defs[0].id == 0) { status = usb_string_id(c->cdev); if (status < 0) return status; gser_string_defs[0].id = status; } /* allocate and initialize one new instance */ gser = kzalloc(sizeof *gser, GFP_KERNEL); if (!gser) return -ENOMEM; gser->port_num = port_num; gser->port.func.name = "gser"; gser->port.func.strings = gser_strings; gser->port.func.bind = gser_bind; gser->port.func.unbind = gser_unbind; gser->port.func.set_alt = gser_set_alt; gser->port.func.disable = gser_disable; status = usb_add_function(c, &gser->port.func); if (status) kfree(gser); return status; }
gpl-2.0
cuteprince/jb_kernel_3.0.16_htc_golfu
drivers/staging/westbridge/astoria/api/src/cyaslep2pep.c
2535
9367
/* Cypress West Bridge API source file (cyaslep2pep.c) ## =========================== ## Copyright (C) 2010 Cypress Semiconductor ## ## This program is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License ## as published by the Free Software Foundation; either version 2 ## of the License, or (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program; if not, write to the Free Software ## Foundation, Inc., 51 Franklin Street, Fifth Floor ## Boston, MA 02110-1301, USA. ## =========================== */ #include "../../include/linux/westbridge/cyashal.h" #include "../../include/linux/westbridge/cyasusb.h" #include "../../include/linux/westbridge/cyaserr.h" #include "../../include/linux/westbridge/cyaslowlevel.h" #include "../../include/linux/westbridge/cyasdma.h" typedef enum cy_as_physical_endpoint_state { cy_as_e_p_free, cy_as_e_p_in, cy_as_e_p_out, cy_as_e_p_iso_in, cy_as_e_p_iso_out } cy_as_physical_endpoint_state; /* * This map is used to map an index between 1 and 10 * to a logical endpoint number. This is used to map * LEP register indexes into actual EP numbers. */ static cy_as_end_point_number_t end_point_map[] = { 3, 5, 7, 9, 10, 11, 12, 13, 14, 15 }; #define CY_AS_EPCFG_1024 (1 << 3) #define CY_AS_EPCFG_DBL (0x02) #define CY_AS_EPCFG_TRIPLE (0x03) #define CY_AS_EPCFG_QUAD (0x00) /* * NB: This table contains the register values for PEP1 * and PEP3. PEP2 and PEP4 only have a bit to change the * direction of the PEP and therefre are not represented * in this table. */ static uint8_t pep_register_values[12][4] = { /* Bit 1:0 buffering, 0 = quad, 2 = double, 3 = triple */ /* Bit 3 size, 0 = 512, 1 = 1024 */ { CY_AS_EPCFG_DBL, CY_AS_EPCFG_DBL, },/* Config 1 - PEP1 (2 * 512), PEP2 (2 * 512), * PEP3 (2 * 512), PEP4 (2 * 512) */ { CY_AS_EPCFG_DBL, CY_AS_EPCFG_QUAD, }, /* Config 2 - PEP1 (2 * 512), PEP2 (2 * 512), * PEP3 (4 * 512), PEP4 (N/A) */ { CY_AS_EPCFG_DBL, CY_AS_EPCFG_DBL | CY_AS_EPCFG_1024, },/* Config 3 - PEP1 (2 * 512), PEP2 (2 * 512), * PEP3 (2 * 1024), PEP4(N/A) */ { CY_AS_EPCFG_QUAD, CY_AS_EPCFG_DBL, },/* Config 4 - PEP1 (4 * 512), PEP2 (N/A), * PEP3 (2 * 512), PEP4 (2 * 512) */ { CY_AS_EPCFG_QUAD, CY_AS_EPCFG_QUAD, },/* Config 5 - PEP1 (4 * 512), PEP2 (N/A), * PEP3 (4 * 512), PEP4 (N/A) */ { CY_AS_EPCFG_QUAD, CY_AS_EPCFG_1024 | CY_AS_EPCFG_DBL, },/* Config 6 - PEP1 (4 * 512), PEP2 (N/A), * PEP3 (2 * 1024), PEP4 (N/A) */ { CY_AS_EPCFG_1024 | CY_AS_EPCFG_DBL, CY_AS_EPCFG_DBL, },/* Config 7 - PEP1 (2 * 1024), PEP2 (N/A), * PEP3 (2 * 512), PEP4 (2 * 512) */ { CY_AS_EPCFG_1024 | CY_AS_EPCFG_DBL, CY_AS_EPCFG_QUAD, },/* Config 8 - PEP1 (2 * 1024), PEP2 (N/A), * PEP3 (4 * 512), PEP4 (N/A) */ { CY_AS_EPCFG_1024 | CY_AS_EPCFG_DBL, CY_AS_EPCFG_1024 | CY_AS_EPCFG_DBL, },/* Config 9 - PEP1 (2 * 1024), PEP2 (N/A), * PEP3 (2 * 1024), PEP4 (N/A)*/ { CY_AS_EPCFG_TRIPLE, CY_AS_EPCFG_TRIPLE, },/* Config 10 - PEP1 (3 * 512), PEP2 (N/A), * PEP3 (3 * 512), PEP4 (2 * 512)*/ { CY_AS_EPCFG_TRIPLE | CY_AS_EPCFG_1024, CY_AS_EPCFG_DBL, },/* Config 11 - PEP1 (3 * 1024), PEP2 (N/A), * PEP3 (N/A), PEP4 (2 * 512) */ { CY_AS_EPCFG_QUAD | CY_AS_EPCFG_1024, CY_AS_EPCFG_DBL, },/* Config 12 - PEP1 (4 * 1024), PEP2 (N/A), * PEP3 (N/A), PEP4 (N/A) */ }; static cy_as_return_status_t find_endpoint_directions(cy_as_device *dev_p, cy_as_physical_endpoint_state epstate[4]) { int i; cy_as_physical_endpoint_state desired; /* * note, there is no error checking here because * ISO error checking happens when the API is called. */ for (i = 0; i < 10; i++) { int epno = end_point_map[i]; if (dev_p->usb_config[epno].enabled) { int pep = dev_p->usb_config[epno].physical; if (dev_p->usb_config[epno].type == cy_as_usb_iso) { /* * marking this as an ISO endpoint, removes the * physical EP from consideration when * mapping the remaining E_ps. */ if (dev_p->usb_config[epno].dir == cy_as_usb_in) desired = cy_as_e_p_iso_in; else desired = cy_as_e_p_iso_out; } else { if (dev_p->usb_config[epno].dir == cy_as_usb_in) desired = cy_as_e_p_in; else desired = cy_as_e_p_out; } /* * NB: Note the API calls insure that an ISO endpoint * has a physical and logical EP number that are the * same, therefore this condition is not enforced here. */ if (epstate[pep - 1] != cy_as_e_p_free && epstate[pep - 1] != desired) return CY_AS_ERROR_INVALID_CONFIGURATION; epstate[pep - 1] = desired; } } /* * create the EP1 config values directly. * both EP1OUT and EP1IN are invalid by default. */ dev_p->usb_ep1cfg[0] = 0; dev_p->usb_ep1cfg[1] = 0; if (dev_p->usb_config[1].enabled) { if ((dev_p->usb_config[1].dir == cy_as_usb_out) || (dev_p->usb_config[1].dir == cy_as_usb_in_out)) { /* Set the valid bit and type field. */ dev_p->usb_ep1cfg[0] = (1 << 7); if (dev_p->usb_config[1].type == cy_as_usb_bulk) dev_p->usb_ep1cfg[0] |= (2 << 4); else dev_p->usb_ep1cfg[0] |= (3 << 4); } if ((dev_p->usb_config[1].dir == cy_as_usb_in) || (dev_p->usb_config[1].dir == cy_as_usb_in_out)) { /* Set the valid bit and type field. */ dev_p->usb_ep1cfg[1] = (1 << 7); if (dev_p->usb_config[1].type == cy_as_usb_bulk) dev_p->usb_ep1cfg[1] |= (2 << 4); else dev_p->usb_ep1cfg[1] |= (3 << 4); } } return CY_AS_ERROR_SUCCESS; } static void create_register_settings(cy_as_device *dev_p, cy_as_physical_endpoint_state epstate[4]) { int i; uint8_t v; for (i = 0; i < 4; i++) { if (i == 0) { /* Start with the values that specify size */ dev_p->usb_pepcfg[i] = pep_register_values [dev_p->usb_phy_config - 1][0]; } else if (i == 2) { /* Start with the values that specify size */ dev_p->usb_pepcfg[i] = pep_register_values [dev_p->usb_phy_config - 1][1]; } else dev_p->usb_pepcfg[i] = 0; /* Adjust direction if it is in */ if (epstate[i] == cy_as_e_p_iso_in || epstate[i] == cy_as_e_p_in) dev_p->usb_pepcfg[i] |= (1 << 6); } /* Configure the logical EP registers */ for (i = 0; i < 10; i++) { int val; int epnum = end_point_map[i]; v = 0x10; /* PEP 1, Bulk Endpoint, EP not valid */ if (dev_p->usb_config[epnum].enabled) { v |= (1 << 7); /* Enabled */ val = dev_p->usb_config[epnum].physical - 1; cy_as_hal_assert(val >= 0 && val <= 3); v |= (val << 5); switch (dev_p->usb_config[epnum].type) { case cy_as_usb_bulk: val = 2; break; case cy_as_usb_int: val = 3; break; case cy_as_usb_iso: val = 1; break; default: cy_as_hal_assert(cy_false); break; } v |= (val << 3); } dev_p->usb_lepcfg[i] = v; } } cy_as_return_status_t cy_as_usb_map_logical2_physical(cy_as_device *dev_p) { cy_as_return_status_t ret; /* Physical EPs 3 5 7 9 respectively in the array */ cy_as_physical_endpoint_state epstate[4] = { cy_as_e_p_free, cy_as_e_p_free, cy_as_e_p_free, cy_as_e_p_free }; /* Find the direction for the endpoints */ ret = find_endpoint_directions(dev_p, epstate); if (ret != CY_AS_ERROR_SUCCESS) return ret; /* * now create the register settings based on the given * assigned of logical E_ps to physical endpoints. */ create_register_settings(dev_p, epstate); return ret; } static uint16_t get_max_dma_size(cy_as_device *dev_p, cy_as_end_point_number_t ep) { uint16_t size = dev_p->usb_config[ep].size; if (size == 0) { switch (dev_p->usb_config[ep].type) { case cy_as_usb_control: size = 64; break; case cy_as_usb_bulk: size = cy_as_device_is_usb_high_speed(dev_p) ? 512 : 64; break; case cy_as_usb_int: size = cy_as_device_is_usb_high_speed(dev_p) ? 1024 : 64; break; case cy_as_usb_iso: size = cy_as_device_is_usb_high_speed(dev_p) ? 1024 : 1023; break; } } return size; } cy_as_return_status_t cy_as_usb_set_dma_sizes(cy_as_device *dev_p) { cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS; uint32_t i; for (i = 0; i < 10; i++) { cy_as_usb_end_point_config *config_p = &dev_p->usb_config[end_point_map[i]]; if (config_p->enabled) { ret = cy_as_dma_set_max_dma_size(dev_p, end_point_map[i], get_max_dma_size(dev_p, end_point_map[i])); if (ret != CY_AS_ERROR_SUCCESS) break; } } return ret; } cy_as_return_status_t cy_as_usb_setup_dma(cy_as_device *dev_p) { cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS; uint32_t i; for (i = 0; i < 10; i++) { cy_as_usb_end_point_config *config_p = &dev_p->usb_config[end_point_map[i]]; if (config_p->enabled) { /* Map the endpoint direction to the DMA direction */ cy_as_dma_direction dir = cy_as_direction_out; if (config_p->dir == cy_as_usb_in) dir = cy_as_direction_in; ret = cy_as_dma_enable_end_point(dev_p, end_point_map[i], cy_true, dir); if (ret != CY_AS_ERROR_SUCCESS) break; } } return ret; }
gpl-2.0
Frontier314/kernel_mixtilehf_3015
drivers/mtd/nand/rtc_from4.c
2791
18044
/* * drivers/mtd/nand/rtc_from4.c * * Copyright (C) 2004 Red Hat, Inc. * * Derived from drivers/mtd/nand/spia.c * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Overview: * This is a device driver for the AG-AND flash device found on the * Renesas Technology Corp. Flash ROM 4-slot interface board (FROM_BOARD4), * which utilizes the Renesas HN29V1G91T-30 part. * This chip is a 1 GBibit (128MiB x 8 bits) AG-AND flash device. */ #include <linux/delay.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/rslib.h> #include <linux/bitrev.h> #include <linux/module.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> #include <asm/io.h> /* * MTD structure for Renesas board */ static struct mtd_info *rtc_from4_mtd = NULL; #define RTC_FROM4_MAX_CHIPS 2 /* HS77x9 processor register defines */ #define SH77X9_BCR1 ((volatile unsigned short *)(0xFFFFFF60)) #define SH77X9_BCR2 ((volatile unsigned short *)(0xFFFFFF62)) #define SH77X9_WCR1 ((volatile unsigned short *)(0xFFFFFF64)) #define SH77X9_WCR2 ((volatile unsigned short *)(0xFFFFFF66)) #define SH77X9_MCR ((volatile unsigned short *)(0xFFFFFF68)) #define SH77X9_PCR ((volatile unsigned short *)(0xFFFFFF6C)) #define SH77X9_FRQCR ((volatile unsigned short *)(0xFFFFFF80)) /* * Values specific to the Renesas Technology Corp. FROM_BOARD4 (used with HS77x9 processor) */ /* Address where flash is mapped */ #define RTC_FROM4_FIO_BASE 0x14000000 /* CLE and ALE are tied to address lines 5 & 4, respectively */ #define RTC_FROM4_CLE (1 << 5) #define RTC_FROM4_ALE (1 << 4) /* address lines A24-A22 used for chip selection */ #define RTC_FROM4_NAND_ADDR_SLOT3 (0x00800000) #define RTC_FROM4_NAND_ADDR_SLOT4 (0x00C00000) #define RTC_FROM4_NAND_ADDR_FPGA (0x01000000) /* mask address lines A24-A22 used for chip selection */ #define RTC_FROM4_NAND_ADDR_MASK (RTC_FROM4_NAND_ADDR_SLOT3 | RTC_FROM4_NAND_ADDR_SLOT4 | RTC_FROM4_NAND_ADDR_FPGA) /* FPGA status register for checking device ready (bit zero) */ #define RTC_FROM4_FPGA_SR (RTC_FROM4_NAND_ADDR_FPGA | 0x00000002) #define RTC_FROM4_DEVICE_READY 0x0001 /* FPGA Reed-Solomon ECC Control register */ #define RTC_FROM4_RS_ECC_CTL (RTC_FROM4_NAND_ADDR_FPGA | 0x00000050) #define RTC_FROM4_RS_ECC_CTL_CLR (1 << 7) #define RTC_FROM4_RS_ECC_CTL_GEN (1 << 6) #define RTC_FROM4_RS_ECC_CTL_FD_E (1 << 5) /* FPGA Reed-Solomon ECC code base */ #define RTC_FROM4_RS_ECC (RTC_FROM4_NAND_ADDR_FPGA | 0x00000060) #define RTC_FROM4_RS_ECCN (RTC_FROM4_NAND_ADDR_FPGA | 0x00000080) /* FPGA Reed-Solomon ECC check register */ #define RTC_FROM4_RS_ECC_CHK (RTC_FROM4_NAND_ADDR_FPGA | 0x00000070) #define RTC_FROM4_RS_ECC_CHK_ERROR (1 << 7) #define ERR_STAT_ECC_AVAILABLE 0x20 /* Undefine for software ECC */ #define RTC_FROM4_HWECC 1 /* Define as 1 for no virtual erase blocks (in JFFS2) */ #define RTC_FROM4_NO_VIRTBLOCKS 0 /* * Module stuff */ static void __iomem *rtc_from4_fio_base = (void *)P2SEGADDR(RTC_FROM4_FIO_BASE); static const struct mtd_partition partition_info[] = { { .name = "Renesas flash partition 1", .offset = 0, .size = MTDPART_SIZ_FULL}, }; #define NUM_PARTITIONS 1 /* * hardware specific flash bbt decriptors * Note: this is to allow debugging by disabling * NAND_BBT_CREATE and/or NAND_BBT_WRITE * */ static uint8_t bbt_pattern[] = { 'B', 'b', 't', '0' }; static uint8_t mirror_pattern[] = { '1', 't', 'b', 'B' }; static struct nand_bbt_descr rtc_from4_bbt_main_descr = { .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP, .offs = 40, .len = 4, .veroffs = 44, .maxblocks = 4, .pattern = bbt_pattern }; static struct nand_bbt_descr rtc_from4_bbt_mirror_descr = { .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP, .offs = 40, .len = 4, .veroffs = 44, .maxblocks = 4, .pattern = mirror_pattern }; #ifdef RTC_FROM4_HWECC /* the Reed Solomon control structure */ static struct rs_control *rs_decoder; /* * hardware specific Out Of Band information */ static struct nand_ecclayout rtc_from4_nand_oobinfo = { .eccbytes = 32, .eccpos = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, .oobfree = {{32, 32}} }; #endif /* * rtc_from4_hwcontrol - hardware specific access to control-lines * @mtd: MTD device structure * @cmd: hardware control command * * Address lines (A5 and A4) are used to control Command and Address Latch * Enable on this board, so set the read/write address appropriately. * * Chip Enable is also controlled by the Chip Select (CS5) and * Address lines (A24-A22), so no action is required here. * */ static void rtc_from4_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) { struct nand_chip *chip = (mtd->priv); if (cmd == NAND_CMD_NONE) return; if (ctrl & NAND_CLE) writeb(cmd, chip->IO_ADDR_W | RTC_FROM4_CLE); else writeb(cmd, chip->IO_ADDR_W | RTC_FROM4_ALE); } /* * rtc_from4_nand_select_chip - hardware specific chip select * @mtd: MTD device structure * @chip: Chip to select (0 == slot 3, 1 == slot 4) * * The chip select is based on address lines A24-A22. * This driver uses flash slots 3 and 4 (A23-A22). * */ static void rtc_from4_nand_select_chip(struct mtd_info *mtd, int chip) { struct nand_chip *this = mtd->priv; this->IO_ADDR_R = (void __iomem *)((unsigned long)this->IO_ADDR_R & ~RTC_FROM4_NAND_ADDR_MASK); this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W & ~RTC_FROM4_NAND_ADDR_MASK); switch (chip) { case 0: /* select slot 3 chip */ this->IO_ADDR_R = (void __iomem *)((unsigned long)this->IO_ADDR_R | RTC_FROM4_NAND_ADDR_SLOT3); this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W | RTC_FROM4_NAND_ADDR_SLOT3); break; case 1: /* select slot 4 chip */ this->IO_ADDR_R = (void __iomem *)((unsigned long)this->IO_ADDR_R | RTC_FROM4_NAND_ADDR_SLOT4); this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W | RTC_FROM4_NAND_ADDR_SLOT4); break; } } /* * rtc_from4_nand_device_ready - hardware specific ready/busy check * @mtd: MTD device structure * * This board provides the Ready/Busy state in the status register * of the FPGA. Bit zero indicates the RDY(1)/BSY(0) signal. * */ static int rtc_from4_nand_device_ready(struct mtd_info *mtd) { unsigned short status; status = *((volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_FPGA_SR)); return (status & RTC_FROM4_DEVICE_READY); } /* * deplete - code to perform device recovery in case there was a power loss * @mtd: MTD device structure * @chip: Chip to select (0 == slot 3, 1 == slot 4) * * If there was a sudden loss of power during an erase operation, a * "device recovery" operation must be performed when power is restored * to ensure correct operation. This routine performs the required steps * for the requested chip. * * See page 86 of the data sheet for details. * */ static void deplete(struct mtd_info *mtd, int chip) { struct nand_chip *this = mtd->priv; /* wait until device is ready */ while (!this->dev_ready(mtd)) ; this->select_chip(mtd, chip); /* Send the commands for device recovery, phase 1 */ this->cmdfunc(mtd, NAND_CMD_DEPLETE1, 0x0000, 0x0000); this->cmdfunc(mtd, NAND_CMD_DEPLETE2, -1, -1); /* Send the commands for device recovery, phase 2 */ this->cmdfunc(mtd, NAND_CMD_DEPLETE1, 0x0000, 0x0004); this->cmdfunc(mtd, NAND_CMD_DEPLETE2, -1, -1); } #ifdef RTC_FROM4_HWECC /* * rtc_from4_enable_hwecc - hardware specific hardware ECC enable function * @mtd: MTD device structure * @mode: I/O mode; read or write * * enable hardware ECC for data read or write * */ static void rtc_from4_enable_hwecc(struct mtd_info *mtd, int mode) { volatile unsigned short *rs_ecc_ctl = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC_CTL); unsigned short status; switch (mode) { case NAND_ECC_READ: status = RTC_FROM4_RS_ECC_CTL_CLR | RTC_FROM4_RS_ECC_CTL_FD_E; *rs_ecc_ctl = status; break; case NAND_ECC_READSYN: status = 0x00; *rs_ecc_ctl = status; break; case NAND_ECC_WRITE: status = RTC_FROM4_RS_ECC_CTL_CLR | RTC_FROM4_RS_ECC_CTL_GEN | RTC_FROM4_RS_ECC_CTL_FD_E; *rs_ecc_ctl = status; break; default: BUG(); break; } } /* * rtc_from4_calculate_ecc - hardware specific code to read ECC code * @mtd: MTD device structure * @dat: buffer containing the data to generate ECC codes * @ecc_code ECC codes calculated * * The ECC code is calculated by the FPGA. All we have to do is read the values * from the FPGA registers. * * Note: We read from the inverted registers, since data is inverted before * the code is calculated. So all 0xff data (blank page) results in all 0xff rs code * */ static void rtc_from4_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code) { volatile unsigned short *rs_eccn = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECCN); unsigned short value; int i; for (i = 0; i < 8; i++) { value = *rs_eccn; ecc_code[i] = (unsigned char)value; rs_eccn++; } ecc_code[7] |= 0x0f; /* set the last four bits (not used) */ } /* * rtc_from4_correct_data - hardware specific code to correct data using ECC code * @mtd: MTD device structure * @buf: buffer containing the data to generate ECC codes * @ecc1 ECC codes read * @ecc2 ECC codes calculated * * The FPGA tells us fast, if there's an error or not. If no, we go back happy * else we read the ecc results from the fpga and call the rs library to decode * and hopefully correct the error. * */ static int rtc_from4_correct_data(struct mtd_info *mtd, const u_char *buf, u_char *ecc1, u_char *ecc2) { int i, j, res; unsigned short status; uint16_t par[6], syn[6]; uint8_t ecc[8]; volatile unsigned short *rs_ecc; status = *((volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC_CHK)); if (!(status & RTC_FROM4_RS_ECC_CHK_ERROR)) { return 0; } /* Read the syndrom pattern from the FPGA and correct the bitorder */ rs_ecc = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC); for (i = 0; i < 8; i++) { ecc[i] = bitrev8(*rs_ecc); rs_ecc++; } /* convert into 6 10bit syndrome fields */ par[5] = rs_decoder->index_of[(((uint16_t) ecc[0] >> 0) & 0x0ff) | (((uint16_t) ecc[1] << 8) & 0x300)]; par[4] = rs_decoder->index_of[(((uint16_t) ecc[1] >> 2) & 0x03f) | (((uint16_t) ecc[2] << 6) & 0x3c0)]; par[3] = rs_decoder->index_of[(((uint16_t) ecc[2] >> 4) & 0x00f) | (((uint16_t) ecc[3] << 4) & 0x3f0)]; par[2] = rs_decoder->index_of[(((uint16_t) ecc[3] >> 6) & 0x003) | (((uint16_t) ecc[4] << 2) & 0x3fc)]; par[1] = rs_decoder->index_of[(((uint16_t) ecc[5] >> 0) & 0x0ff) | (((uint16_t) ecc[6] << 8) & 0x300)]; par[0] = (((uint16_t) ecc[6] >> 2) & 0x03f) | (((uint16_t) ecc[7] << 6) & 0x3c0); /* Convert to computable syndrome */ for (i = 0; i < 6; i++) { syn[i] = par[0]; for (j = 1; j < 6; j++) if (par[j] != rs_decoder->nn) syn[i] ^= rs_decoder->alpha_to[rs_modnn(rs_decoder, par[j] + i * j)]; /* Convert to index form */ syn[i] = rs_decoder->index_of[syn[i]]; } /* Let the library code do its magic. */ res = decode_rs8(rs_decoder, (uint8_t *) buf, par, 512, syn, 0, NULL, 0xff, NULL); if (res > 0) { DEBUG(MTD_DEBUG_LEVEL0, "rtc_from4_correct_data: " "ECC corrected %d errors on read\n", res); } return res; } /** * rtc_from4_errstat - perform additional error status checks * @mtd: MTD device structure * @this: NAND chip structure * @state: state or the operation * @status: status code returned from read status * @page: startpage inside the chip, must be called with (page & this->pagemask) * * Perform additional error status checks on erase and write failures * to determine if errors are correctable. For this device, correctable * 1-bit errors on erase and write are considered acceptable. * * note: see pages 34..37 of data sheet for details. * */ static int rtc_from4_errstat(struct mtd_info *mtd, struct nand_chip *this, int state, int status, int page) { int er_stat = 0; int rtn, retlen; size_t len; uint8_t *buf; int i; this->cmdfunc(mtd, NAND_CMD_STATUS_CLEAR, -1, -1); if (state == FL_ERASING) { for (i = 0; i < 4; i++) { if (!(status & 1 << (i + 1))) continue; this->cmdfunc(mtd, (NAND_CMD_STATUS_ERROR + i + 1), -1, -1); rtn = this->read_byte(mtd); this->cmdfunc(mtd, NAND_CMD_STATUS_RESET, -1, -1); /* err_ecc_not_avail */ if (!(rtn & ERR_STAT_ECC_AVAILABLE)) er_stat |= 1 << (i + 1); } } else if (state == FL_WRITING) { unsigned long corrected = mtd->ecc_stats.corrected; /* single bank write logic */ this->cmdfunc(mtd, NAND_CMD_STATUS_ERROR, -1, -1); rtn = this->read_byte(mtd); this->cmdfunc(mtd, NAND_CMD_STATUS_RESET, -1, -1); if (!(rtn & ERR_STAT_ECC_AVAILABLE)) { /* err_ecc_not_avail */ er_stat |= 1 << 1; goto out; } len = mtd->writesize; buf = kmalloc(len, GFP_KERNEL); if (!buf) { printk(KERN_ERR "rtc_from4_errstat: Out of memory!\n"); er_stat = 1; goto out; } /* recovery read */ rtn = nand_do_read(mtd, page, len, &retlen, buf); /* if read failed or > 1-bit error corrected */ if (rtn || (mtd->ecc_stats.corrected - corrected) > 1) er_stat |= 1 << 1; kfree(buf); } out: rtn = status; if (er_stat == 0) { /* if ECC is available */ rtn = (status & ~NAND_STATUS_FAIL); /* clear the error bit */ } return rtn; } #endif /* * Main initialization routine */ static int __init rtc_from4_init(void) { struct nand_chip *this; unsigned short bcr1, bcr2, wcr2; int i; int ret; /* Allocate memory for MTD device structure and private data */ rtc_from4_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL); if (!rtc_from4_mtd) { printk("Unable to allocate Renesas NAND MTD device structure.\n"); return -ENOMEM; } /* Get pointer to private data */ this = (struct nand_chip *)(&rtc_from4_mtd[1]); /* Initialize structures */ memset(rtc_from4_mtd, 0, sizeof(struct mtd_info)); memset(this, 0, sizeof(struct nand_chip)); /* Link the private data with the MTD structure */ rtc_from4_mtd->priv = this; rtc_from4_mtd->owner = THIS_MODULE; /* set area 5 as PCMCIA mode to clear the spec of tDH(Data hold time;9ns min) */ bcr1 = *SH77X9_BCR1 & ~0x0002; bcr1 |= 0x0002; *SH77X9_BCR1 = bcr1; /* set */ bcr2 = *SH77X9_BCR2 & ~0x0c00; bcr2 |= 0x0800; *SH77X9_BCR2 = bcr2; /* set area 5 wait states */ wcr2 = *SH77X9_WCR2 & ~0x1c00; wcr2 |= 0x1c00; *SH77X9_WCR2 = wcr2; /* Set address of NAND IO lines */ this->IO_ADDR_R = rtc_from4_fio_base; this->IO_ADDR_W = rtc_from4_fio_base; /* Set address of hardware control function */ this->cmd_ctrl = rtc_from4_hwcontrol; /* Set address of chip select function */ this->select_chip = rtc_from4_nand_select_chip; /* command delay time (in us) */ this->chip_delay = 100; /* return the status of the Ready/Busy line */ this->dev_ready = rtc_from4_nand_device_ready; #ifdef RTC_FROM4_HWECC printk(KERN_INFO "rtc_from4_init: using hardware ECC detection.\n"); this->ecc.mode = NAND_ECC_HW_SYNDROME; this->ecc.size = 512; this->ecc.bytes = 8; /* return the status of extra status and ECC checks */ this->errstat = rtc_from4_errstat; /* set the nand_oobinfo to support FPGA H/W error detection */ this->ecc.layout = &rtc_from4_nand_oobinfo; this->ecc.hwctl = rtc_from4_enable_hwecc; this->ecc.calculate = rtc_from4_calculate_ecc; this->ecc.correct = rtc_from4_correct_data; /* We could create the decoder on demand, if memory is a concern. * This way we have it handy, if an error happens * * Symbolsize is 10 (bits) * Primitve polynomial is x^10+x^3+1 * first consecutive root is 0 * primitve element to generate roots = 1 * generator polinomial degree = 6 */ rs_decoder = init_rs(10, 0x409, 0, 1, 6); if (!rs_decoder) { printk(KERN_ERR "Could not create a RS decoder\n"); ret = -ENOMEM; goto err_1; } #else printk(KERN_INFO "rtc_from4_init: using software ECC detection.\n"); this->ecc.mode = NAND_ECC_SOFT; #endif /* set the bad block tables to support debugging */ this->bbt_td = &rtc_from4_bbt_main_descr; this->bbt_md = &rtc_from4_bbt_mirror_descr; /* Scan to find existence of the device */ if (nand_scan(rtc_from4_mtd, RTC_FROM4_MAX_CHIPS)) { ret = -ENXIO; goto err_2; } /* Perform 'device recovery' for each chip in case there was a power loss. */ for (i = 0; i < this->numchips; i++) { deplete(rtc_from4_mtd, i); } #if RTC_FROM4_NO_VIRTBLOCKS /* use a smaller erase block to minimize wasted space when a block is bad */ /* note: this uses eight times as much RAM as using the default and makes */ /* mounts take four times as long. */ rtc_from4_mtd->flags |= MTD_NO_VIRTBLOCKS; #endif /* Register the partitions */ ret = mtd_device_register(rtc_from4_mtd, partition_info, NUM_PARTITIONS); if (ret) goto err_3; /* Return happy */ return 0; err_3: nand_release(rtc_from4_mtd); err_2: free_rs(rs_decoder); err_1: kfree(rtc_from4_mtd); return ret; } module_init(rtc_from4_init); /* * Clean up routine */ static void __exit rtc_from4_cleanup(void) { /* Release resource, unregister partitions */ nand_release(rtc_from4_mtd); /* Free the MTD device structure */ kfree(rtc_from4_mtd); #ifdef RTC_FROM4_HWECC /* Free the reed solomon resources */ if (rs_decoder) { free_rs(rs_decoder); } #endif } module_exit(rtc_from4_cleanup); MODULE_LICENSE("GPL"); MODULE_AUTHOR("d.marlin <dmarlin@redhat.com"); MODULE_DESCRIPTION("Board-specific glue layer for AG-AND flash on Renesas FROM_BOARD4");
gpl-2.0
friedrich420/N3-AEL-Kernel-NF1-v5-
sound/soc/tegra/tegra_wm8903.c
4839
14756
/* * tegra_wm8903.c - Tegra machine ASoC driver for boards using WM8903 codec. * * Author: Stephen Warren <swarren@nvidia.com> * Copyright (C) 2010-2011 - NVIDIA, Inc. * * Based on code copyright/by: * * (c) 2009, 2010 Nvidia Graphics Pvt. Ltd. * * Copyright 2007 Wolfson Microelectronics PLC. * Author: Graeme Gregory * graeme.gregory@wolfsonmicro.com or linux@wolfsonmicro.com * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include <asm/mach-types.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/gpio.h> #include <linux/of_gpio.h> #include <mach/tegra_wm8903_pdata.h> #include <sound/core.h> #include <sound/jack.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include "../codecs/wm8903.h" #include "tegra_das.h" #include "tegra_i2s.h" #include "tegra_pcm.h" #include "tegra_asoc_utils.h" #define DRV_NAME "tegra-snd-wm8903" #define GPIO_SPKR_EN BIT(0) #define GPIO_HP_MUTE BIT(1) #define GPIO_INT_MIC_EN BIT(2) #define GPIO_EXT_MIC_EN BIT(3) #define GPIO_HP_DET BIT(4) struct tegra_wm8903 { struct tegra_wm8903_platform_data pdata; struct platform_device *pcm_dev; struct tegra_asoc_utils_data util_data; int gpio_requested; }; static int tegra_wm8903_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; struct snd_soc_codec *codec = rtd->codec; struct snd_soc_card *card = codec->card; struct tegra_wm8903 *machine = snd_soc_card_get_drvdata(card); int srate, mclk; int err; srate = params_rate(params); switch (srate) { case 64000: case 88200: case 96000: mclk = 128 * srate; break; default: mclk = 256 * srate; break; } /* FIXME: Codec only requires >= 3MHz if OSR==0 */ while (mclk < 6000000) mclk *= 2; err = tegra_asoc_utils_set_rate(&machine->util_data, srate, mclk); if (err < 0) { dev_err(card->dev, "Can't configure clocks\n"); return err; } err = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS); if (err < 0) { dev_err(card->dev, "codec_dai fmt not set\n"); return err; } err = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS); if (err < 0) { dev_err(card->dev, "cpu_dai fmt not set\n"); return err; } err = snd_soc_dai_set_sysclk(codec_dai, 0, mclk, SND_SOC_CLOCK_IN); if (err < 0) { dev_err(card->dev, "codec_dai clock not set\n"); return err; } return 0; } static struct snd_soc_ops tegra_wm8903_ops = { .hw_params = tegra_wm8903_hw_params, }; static struct snd_soc_jack tegra_wm8903_hp_jack; static struct snd_soc_jack_pin tegra_wm8903_hp_jack_pins[] = { { .pin = "Headphone Jack", .mask = SND_JACK_HEADPHONE, }, }; static struct snd_soc_jack_gpio tegra_wm8903_hp_jack_gpio = { .name = "headphone detect", .report = SND_JACK_HEADPHONE, .debounce_time = 150, .invert = 1, }; static struct snd_soc_jack tegra_wm8903_mic_jack; static struct snd_soc_jack_pin tegra_wm8903_mic_jack_pins[] = { { .pin = "Mic Jack", .mask = SND_JACK_MICROPHONE, }, }; static int tegra_wm8903_event_int_spk(struct snd_soc_dapm_widget *w, struct snd_kcontrol *k, int event) { struct snd_soc_dapm_context *dapm = w->dapm; struct snd_soc_card *card = dapm->card; struct tegra_wm8903 *machine = snd_soc_card_get_drvdata(card); struct tegra_wm8903_platform_data *pdata = &machine->pdata; if (!(machine->gpio_requested & GPIO_SPKR_EN)) return 0; gpio_set_value_cansleep(pdata->gpio_spkr_en, SND_SOC_DAPM_EVENT_ON(event)); return 0; } static int tegra_wm8903_event_hp(struct snd_soc_dapm_widget *w, struct snd_kcontrol *k, int event) { struct snd_soc_dapm_context *dapm = w->dapm; struct snd_soc_card *card = dapm->card; struct tegra_wm8903 *machine = snd_soc_card_get_drvdata(card); struct tegra_wm8903_platform_data *pdata = &machine->pdata; if (!(machine->gpio_requested & GPIO_HP_MUTE)) return 0; gpio_set_value_cansleep(pdata->gpio_hp_mute, !SND_SOC_DAPM_EVENT_ON(event)); return 0; } static const struct snd_soc_dapm_widget tegra_wm8903_dapm_widgets[] = { SND_SOC_DAPM_SPK("Int Spk", tegra_wm8903_event_int_spk), SND_SOC_DAPM_HP("Headphone Jack", tegra_wm8903_event_hp), SND_SOC_DAPM_MIC("Mic Jack", NULL), }; static const struct snd_soc_dapm_route harmony_audio_map[] = { {"Headphone Jack", NULL, "HPOUTR"}, {"Headphone Jack", NULL, "HPOUTL"}, {"Int Spk", NULL, "ROP"}, {"Int Spk", NULL, "RON"}, {"Int Spk", NULL, "LOP"}, {"Int Spk", NULL, "LON"}, {"Mic Jack", NULL, "MICBIAS"}, {"IN1L", NULL, "Mic Jack"}, }; static const struct snd_soc_dapm_route seaboard_audio_map[] = { {"Headphone Jack", NULL, "HPOUTR"}, {"Headphone Jack", NULL, "HPOUTL"}, {"Int Spk", NULL, "ROP"}, {"Int Spk", NULL, "RON"}, {"Int Spk", NULL, "LOP"}, {"Int Spk", NULL, "LON"}, {"Mic Jack", NULL, "MICBIAS"}, {"IN1R", NULL, "Mic Jack"}, }; static const struct snd_soc_dapm_route kaen_audio_map[] = { {"Headphone Jack", NULL, "HPOUTR"}, {"Headphone Jack", NULL, "HPOUTL"}, {"Int Spk", NULL, "ROP"}, {"Int Spk", NULL, "RON"}, {"Int Spk", NULL, "LOP"}, {"Int Spk", NULL, "LON"}, {"Mic Jack", NULL, "MICBIAS"}, {"IN2R", NULL, "Mic Jack"}, }; static const struct snd_soc_dapm_route aebl_audio_map[] = { {"Headphone Jack", NULL, "HPOUTR"}, {"Headphone Jack", NULL, "HPOUTL"}, {"Int Spk", NULL, "LINEOUTR"}, {"Int Spk", NULL, "LINEOUTL"}, {"Mic Jack", NULL, "MICBIAS"}, {"IN1R", NULL, "Mic Jack"}, }; static const struct snd_kcontrol_new tegra_wm8903_controls[] = { SOC_DAPM_PIN_SWITCH("Int Spk"), }; static int tegra_wm8903_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; struct snd_soc_dapm_context *dapm = &codec->dapm; struct snd_soc_card *card = codec->card; struct tegra_wm8903 *machine = snd_soc_card_get_drvdata(card); struct tegra_wm8903_platform_data *pdata = &machine->pdata; struct device_node *np = card->dev->of_node; int ret; if (card->dev->platform_data) { memcpy(pdata, card->dev->platform_data, sizeof(*pdata)); } else if (np) { /* * This part must be in init() rather than probe() in order to * guarantee that the WM8903 has been probed, and hence its * GPIO controller registered, which is a pre-condition for * of_get_named_gpio() to be able to map the phandles in the * properties to the controller node. Given this, all * pdata handling is in init() for consistency. */ pdata->gpio_spkr_en = of_get_named_gpio(np, "nvidia,spkr-en-gpios", 0); pdata->gpio_hp_mute = of_get_named_gpio(np, "nvidia,hp-mute-gpios", 0); pdata->gpio_hp_det = of_get_named_gpio(np, "nvidia,hp-det-gpios", 0); pdata->gpio_int_mic_en = of_get_named_gpio(np, "nvidia,int-mic-en-gpios", 0); pdata->gpio_ext_mic_en = of_get_named_gpio(np, "nvidia,ext-mic-en-gpios", 0); } else { dev_err(card->dev, "No platform data supplied\n"); return -EINVAL; } if (gpio_is_valid(pdata->gpio_spkr_en)) { ret = gpio_request(pdata->gpio_spkr_en, "spkr_en"); if (ret) { dev_err(card->dev, "cannot get spkr_en gpio\n"); return ret; } machine->gpio_requested |= GPIO_SPKR_EN; gpio_direction_output(pdata->gpio_spkr_en, 0); } if (gpio_is_valid(pdata->gpio_hp_mute)) { ret = gpio_request(pdata->gpio_hp_mute, "hp_mute"); if (ret) { dev_err(card->dev, "cannot get hp_mute gpio\n"); return ret; } machine->gpio_requested |= GPIO_HP_MUTE; gpio_direction_output(pdata->gpio_hp_mute, 1); } if (gpio_is_valid(pdata->gpio_int_mic_en)) { ret = gpio_request(pdata->gpio_int_mic_en, "int_mic_en"); if (ret) { dev_err(card->dev, "cannot get int_mic_en gpio\n"); return ret; } machine->gpio_requested |= GPIO_INT_MIC_EN; /* Disable int mic; enable signal is active-high */ gpio_direction_output(pdata->gpio_int_mic_en, 0); } if (gpio_is_valid(pdata->gpio_ext_mic_en)) { ret = gpio_request(pdata->gpio_ext_mic_en, "ext_mic_en"); if (ret) { dev_err(card->dev, "cannot get ext_mic_en gpio\n"); return ret; } machine->gpio_requested |= GPIO_EXT_MIC_EN; /* Enable ext mic; enable signal is active-low */ gpio_direction_output(pdata->gpio_ext_mic_en, 0); } if (gpio_is_valid(pdata->gpio_hp_det)) { tegra_wm8903_hp_jack_gpio.gpio = pdata->gpio_hp_det; snd_soc_jack_new(codec, "Headphone Jack", SND_JACK_HEADPHONE, &tegra_wm8903_hp_jack); snd_soc_jack_add_pins(&tegra_wm8903_hp_jack, ARRAY_SIZE(tegra_wm8903_hp_jack_pins), tegra_wm8903_hp_jack_pins); snd_soc_jack_add_gpios(&tegra_wm8903_hp_jack, 1, &tegra_wm8903_hp_jack_gpio); machine->gpio_requested |= GPIO_HP_DET; } snd_soc_jack_new(codec, "Mic Jack", SND_JACK_MICROPHONE, &tegra_wm8903_mic_jack); snd_soc_jack_add_pins(&tegra_wm8903_mic_jack, ARRAY_SIZE(tegra_wm8903_mic_jack_pins), tegra_wm8903_mic_jack_pins); wm8903_mic_detect(codec, &tegra_wm8903_mic_jack, SND_JACK_MICROPHONE, 0); snd_soc_dapm_force_enable_pin(dapm, "MICBIAS"); return 0; } static struct snd_soc_dai_link tegra_wm8903_dai = { .name = "WM8903", .stream_name = "WM8903 PCM", .codec_name = "wm8903.0-001a", .platform_name = "tegra-pcm-audio", .cpu_dai_name = "tegra-i2s.0", .codec_dai_name = "wm8903-hifi", .init = tegra_wm8903_init, .ops = &tegra_wm8903_ops, }; static struct snd_soc_card snd_soc_tegra_wm8903 = { .name = "tegra-wm8903", .owner = THIS_MODULE, .dai_link = &tegra_wm8903_dai, .num_links = 1, .controls = tegra_wm8903_controls, .num_controls = ARRAY_SIZE(tegra_wm8903_controls), .dapm_widgets = tegra_wm8903_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(tegra_wm8903_dapm_widgets), .fully_routed = true, }; static __devinit int tegra_wm8903_driver_probe(struct platform_device *pdev) { struct snd_soc_card *card = &snd_soc_tegra_wm8903; struct tegra_wm8903 *machine; int ret; if (!pdev->dev.platform_data && !pdev->dev.of_node) { dev_err(&pdev->dev, "No platform data supplied\n"); return -EINVAL; } machine = devm_kzalloc(&pdev->dev, sizeof(struct tegra_wm8903), GFP_KERNEL); if (!machine) { dev_err(&pdev->dev, "Can't allocate tegra_wm8903 struct\n"); ret = -ENOMEM; goto err; } machine->pcm_dev = ERR_PTR(-EINVAL); card->dev = &pdev->dev; platform_set_drvdata(pdev, card); snd_soc_card_set_drvdata(card, machine); if (pdev->dev.of_node) { ret = snd_soc_of_parse_card_name(card, "nvidia,model"); if (ret) goto err; ret = snd_soc_of_parse_audio_routing(card, "nvidia,audio-routing"); if (ret) goto err; tegra_wm8903_dai.codec_name = NULL; tegra_wm8903_dai.codec_of_node = of_parse_phandle( pdev->dev.of_node, "nvidia,audio-codec", 0); if (!tegra_wm8903_dai.codec_of_node) { dev_err(&pdev->dev, "Property 'nvidia,audio-codec' missing or invalid\n"); ret = -EINVAL; goto err; } tegra_wm8903_dai.cpu_dai_name = NULL; tegra_wm8903_dai.cpu_dai_of_node = of_parse_phandle( pdev->dev.of_node, "nvidia,i2s-controller", 0); if (!tegra_wm8903_dai.cpu_dai_of_node) { dev_err(&pdev->dev, "Property 'nvidia,i2s-controller' missing or invalid\n"); ret = -EINVAL; goto err; } machine->pcm_dev = platform_device_register_simple( "tegra-pcm-audio", -1, NULL, 0); if (IS_ERR(machine->pcm_dev)) { dev_err(&pdev->dev, "Can't instantiate tegra-pcm-audio\n"); ret = PTR_ERR(machine->pcm_dev); goto err; } } else { if (machine_is_harmony()) { card->dapm_routes = harmony_audio_map; card->num_dapm_routes = ARRAY_SIZE(harmony_audio_map); } else if (machine_is_seaboard()) { card->dapm_routes = seaboard_audio_map; card->num_dapm_routes = ARRAY_SIZE(seaboard_audio_map); } else if (machine_is_kaen()) { card->dapm_routes = kaen_audio_map; card->num_dapm_routes = ARRAY_SIZE(kaen_audio_map); } else { card->dapm_routes = aebl_audio_map; card->num_dapm_routes = ARRAY_SIZE(aebl_audio_map); } } ret = tegra_asoc_utils_init(&machine->util_data, &pdev->dev); if (ret) goto err_unregister; ret = snd_soc_register_card(card); if (ret) { dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret); goto err_fini_utils; } return 0; err_fini_utils: tegra_asoc_utils_fini(&machine->util_data); err_unregister: if (!IS_ERR(machine->pcm_dev)) platform_device_unregister(machine->pcm_dev); err: return ret; } static int __devexit tegra_wm8903_driver_remove(struct platform_device *pdev) { struct snd_soc_card *card = platform_get_drvdata(pdev); struct tegra_wm8903 *machine = snd_soc_card_get_drvdata(card); struct tegra_wm8903_platform_data *pdata = &machine->pdata; if (machine->gpio_requested & GPIO_HP_DET) snd_soc_jack_free_gpios(&tegra_wm8903_hp_jack, 1, &tegra_wm8903_hp_jack_gpio); if (machine->gpio_requested & GPIO_EXT_MIC_EN) gpio_free(pdata->gpio_ext_mic_en); if (machine->gpio_requested & GPIO_INT_MIC_EN) gpio_free(pdata->gpio_int_mic_en); if (machine->gpio_requested & GPIO_HP_MUTE) gpio_free(pdata->gpio_hp_mute); if (machine->gpio_requested & GPIO_SPKR_EN) gpio_free(pdata->gpio_spkr_en); machine->gpio_requested = 0; snd_soc_unregister_card(card); tegra_asoc_utils_fini(&machine->util_data); if (!IS_ERR(machine->pcm_dev)) platform_device_unregister(machine->pcm_dev); return 0; } static const struct of_device_id tegra_wm8903_of_match[] __devinitconst = { { .compatible = "nvidia,tegra-audio-wm8903", }, {}, }; static struct platform_driver tegra_wm8903_driver = { .driver = { .name = DRV_NAME, .owner = THIS_MODULE, .pm = &snd_soc_pm_ops, .of_match_table = tegra_wm8903_of_match, }, .probe = tegra_wm8903_driver_probe, .remove = __devexit_p(tegra_wm8903_driver_remove), }; module_platform_driver(tegra_wm8903_driver); MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>"); MODULE_DESCRIPTION("Tegra+WM8903 machine ASoC driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRV_NAME); MODULE_DEVICE_TABLE(of, tegra_wm8903_of_match);
gpl-2.0
andrepuschmann/linux-omap
drivers/net/wireless/hostap/hostap_pci.c
8935
11237
#define PRISM2_PCI /* Host AP driver's support for Intersil Prism2.5 PCI cards is based on * driver patches from Reyk Floeter <reyk@vantronix.net> and * Andy Warner <andyw@pobox.com> */ #include <linux/module.h> #include <linux/init.h> #include <linux/if.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/wireless.h> #include <net/iw_handler.h> #include <linux/ioport.h> #include <linux/pci.h> #include <asm/io.h> #include "hostap_wlan.h" static char *dev_info = "hostap_pci"; MODULE_AUTHOR("Jouni Malinen"); MODULE_DESCRIPTION("Support for Intersil Prism2.5-based 802.11 wireless LAN " "PCI cards."); MODULE_SUPPORTED_DEVICE("Intersil Prism2.5-based WLAN PCI cards"); MODULE_LICENSE("GPL"); /* struct local_info::hw_priv */ struct hostap_pci_priv { void __iomem *mem_start; }; /* FIX: do we need mb/wmb/rmb with memory operations? */ static DEFINE_PCI_DEVICE_TABLE(prism2_pci_id_table) = { /* Intersil Prism3 ISL3872 11Mb/s WLAN Controller */ { 0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID }, /* Intersil Prism2.5 ISL3874 11Mb/s WLAN Controller */ { 0x1260, 0x3873, PCI_ANY_ID, PCI_ANY_ID }, /* Samsung MagicLAN SWL-2210P */ { 0x167d, 0xa000, PCI_ANY_ID, PCI_ANY_ID }, { 0 } }; #ifdef PRISM2_IO_DEBUG static inline void hfa384x_outb_debug(struct net_device *dev, int a, u8 v) { struct hostap_interface *iface; struct hostap_pci_priv *hw_priv; local_info_t *local; unsigned long flags; iface = netdev_priv(dev); local = iface->local; hw_priv = local->hw_priv; spin_lock_irqsave(&local->lock, flags); prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTB, a, v); writeb(v, hw_priv->mem_start + a); spin_unlock_irqrestore(&local->lock, flags); } static inline u8 hfa384x_inb_debug(struct net_device *dev, int a) { struct hostap_interface *iface; struct hostap_pci_priv *hw_priv; local_info_t *local; unsigned long flags; u8 v; iface = netdev_priv(dev); local = iface->local; hw_priv = local->hw_priv; spin_lock_irqsave(&local->lock, flags); v = readb(hw_priv->mem_start + a); prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INB, a, v); spin_unlock_irqrestore(&local->lock, flags); return v; } static inline void hfa384x_outw_debug(struct net_device *dev, int a, u16 v) { struct hostap_interface *iface; struct hostap_pci_priv *hw_priv; local_info_t *local; unsigned long flags; iface = netdev_priv(dev); local = iface->local; hw_priv = local->hw_priv; spin_lock_irqsave(&local->lock, flags); prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTW, a, v); writew(v, hw_priv->mem_start + a); spin_unlock_irqrestore(&local->lock, flags); } static inline u16 hfa384x_inw_debug(struct net_device *dev, int a) { struct hostap_interface *iface; struct hostap_pci_priv *hw_priv; local_info_t *local; unsigned long flags; u16 v; iface = netdev_priv(dev); local = iface->local; hw_priv = local->hw_priv; spin_lock_irqsave(&local->lock, flags); v = readw(hw_priv->mem_start + a); prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INW, a, v); spin_unlock_irqrestore(&local->lock, flags); return v; } #define HFA384X_OUTB(v,a) hfa384x_outb_debug(dev, (a), (v)) #define HFA384X_INB(a) hfa384x_inb_debug(dev, (a)) #define HFA384X_OUTW(v,a) hfa384x_outw_debug(dev, (a), (v)) #define HFA384X_INW(a) hfa384x_inw_debug(dev, (a)) #define HFA384X_OUTW_DATA(v,a) hfa384x_outw_debug(dev, (a), le16_to_cpu((v))) #define HFA384X_INW_DATA(a) cpu_to_le16(hfa384x_inw_debug(dev, (a))) #else /* PRISM2_IO_DEBUG */ static inline void hfa384x_outb(struct net_device *dev, int a, u8 v) { struct hostap_interface *iface; struct hostap_pci_priv *hw_priv; iface = netdev_priv(dev); hw_priv = iface->local->hw_priv; writeb(v, hw_priv->mem_start + a); } static inline u8 hfa384x_inb(struct net_device *dev, int a) { struct hostap_interface *iface; struct hostap_pci_priv *hw_priv; iface = netdev_priv(dev); hw_priv = iface->local->hw_priv; return readb(hw_priv->mem_start + a); } static inline void hfa384x_outw(struct net_device *dev, int a, u16 v) { struct hostap_interface *iface; struct hostap_pci_priv *hw_priv; iface = netdev_priv(dev); hw_priv = iface->local->hw_priv; writew(v, hw_priv->mem_start + a); } static inline u16 hfa384x_inw(struct net_device *dev, int a) { struct hostap_interface *iface; struct hostap_pci_priv *hw_priv; iface = netdev_priv(dev); hw_priv = iface->local->hw_priv; return readw(hw_priv->mem_start + a); } #define HFA384X_OUTB(v,a) hfa384x_outb(dev, (a), (v)) #define HFA384X_INB(a) hfa384x_inb(dev, (a)) #define HFA384X_OUTW(v,a) hfa384x_outw(dev, (a), (v)) #define HFA384X_INW(a) hfa384x_inw(dev, (a)) #define HFA384X_OUTW_DATA(v,a) hfa384x_outw(dev, (a), le16_to_cpu((v))) #define HFA384X_INW_DATA(a) cpu_to_le16(hfa384x_inw(dev, (a))) #endif /* PRISM2_IO_DEBUG */ static int hfa384x_from_bap(struct net_device *dev, u16 bap, void *buf, int len) { u16 d_off; __le16 *pos; d_off = (bap == 1) ? HFA384X_DATA1_OFF : HFA384X_DATA0_OFF; pos = (__le16 *) buf; for ( ; len > 1; len -= 2) *pos++ = HFA384X_INW_DATA(d_off); if (len & 1) *((char *) pos) = HFA384X_INB(d_off); return 0; } static int hfa384x_to_bap(struct net_device *dev, u16 bap, void *buf, int len) { u16 d_off; __le16 *pos; d_off = (bap == 1) ? HFA384X_DATA1_OFF : HFA384X_DATA0_OFF; pos = (__le16 *) buf; for ( ; len > 1; len -= 2) HFA384X_OUTW_DATA(*pos++, d_off); if (len & 1) HFA384X_OUTB(*((char *) pos), d_off); return 0; } /* FIX: This might change at some point.. */ #include "hostap_hw.c" static void prism2_pci_cor_sreset(local_info_t *local) { struct net_device *dev = local->dev; u16 reg; reg = HFA384X_INB(HFA384X_PCICOR_OFF); printk(KERN_DEBUG "%s: Original COR value: 0x%0x\n", dev->name, reg); /* linux-wlan-ng uses extremely long hold and settle times for * COR sreset. A comment in the driver code mentions that the long * delays appear to be necessary. However, at least IBM 22P6901 seems * to work fine with shorter delays. * * Longer delays can be configured by uncommenting following line: */ /* #define PRISM2_PCI_USE_LONG_DELAYS */ #ifdef PRISM2_PCI_USE_LONG_DELAYS int i; HFA384X_OUTW(reg | 0x0080, HFA384X_PCICOR_OFF); mdelay(250); HFA384X_OUTW(reg & ~0x0080, HFA384X_PCICOR_OFF); mdelay(500); /* Wait for f/w to complete initialization (CMD:BUSY == 0) */ i = 2000000 / 10; while ((HFA384X_INW(HFA384X_CMD_OFF) & HFA384X_CMD_BUSY) && --i) udelay(10); #else /* PRISM2_PCI_USE_LONG_DELAYS */ HFA384X_OUTW(reg | 0x0080, HFA384X_PCICOR_OFF); mdelay(2); HFA384X_OUTW(reg & ~0x0080, HFA384X_PCICOR_OFF); mdelay(2); #endif /* PRISM2_PCI_USE_LONG_DELAYS */ if (HFA384X_INW(HFA384X_CMD_OFF) & HFA384X_CMD_BUSY) { printk(KERN_DEBUG "%s: COR sreset timeout\n", dev->name); } } static void prism2_pci_genesis_reset(local_info_t *local, int hcr) { struct net_device *dev = local->dev; HFA384X_OUTW(0x00C5, HFA384X_PCICOR_OFF); mdelay(10); HFA384X_OUTW(hcr, HFA384X_PCIHCR_OFF); mdelay(10); HFA384X_OUTW(0x0045, HFA384X_PCICOR_OFF); mdelay(10); } static struct prism2_helper_functions prism2_pci_funcs = { .card_present = NULL, .cor_sreset = prism2_pci_cor_sreset, .genesis_reset = prism2_pci_genesis_reset, .hw_type = HOSTAP_HW_PCI, }; static int prism2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { unsigned long phymem; void __iomem *mem = NULL; local_info_t *local = NULL; struct net_device *dev = NULL; static int cards_found /* = 0 */; int irq_registered = 0; struct hostap_interface *iface; struct hostap_pci_priv *hw_priv; hw_priv = kzalloc(sizeof(*hw_priv), GFP_KERNEL); if (hw_priv == NULL) return -ENOMEM; if (pci_enable_device(pdev)) goto err_out_free; phymem = pci_resource_start(pdev, 0); if (!request_mem_region(phymem, pci_resource_len(pdev, 0), "Prism2")) { printk(KERN_ERR "prism2: Cannot reserve PCI memory region\n"); goto err_out_disable; } mem = pci_ioremap_bar(pdev, 0); if (mem == NULL) { printk(KERN_ERR "prism2: Cannot remap PCI memory region\n") ; goto fail; } dev = prism2_init_local_data(&prism2_pci_funcs, cards_found, &pdev->dev); if (dev == NULL) goto fail; iface = netdev_priv(dev); local = iface->local; local->hw_priv = hw_priv; cards_found++; dev->irq = pdev->irq; hw_priv->mem_start = mem; dev->base_addr = (unsigned long) mem; prism2_pci_cor_sreset(local); pci_set_drvdata(pdev, dev); if (request_irq(dev->irq, prism2_interrupt, IRQF_SHARED, dev->name, dev)) { printk(KERN_WARNING "%s: request_irq failed\n", dev->name); goto fail; } else irq_registered = 1; if (!local->pri_only && prism2_hw_config(dev, 1)) { printk(KERN_DEBUG "%s: hardware initialization failed\n", dev_info); goto fail; } printk(KERN_INFO "%s: Intersil Prism2.5 PCI: " "mem=0x%lx, irq=%d\n", dev->name, phymem, dev->irq); return hostap_hw_ready(dev); fail: if (irq_registered && dev) free_irq(dev->irq, dev); if (mem) iounmap(mem); release_mem_region(phymem, pci_resource_len(pdev, 0)); err_out_disable: pci_disable_device(pdev); prism2_free_local_data(dev); err_out_free: kfree(hw_priv); return -ENODEV; } static void prism2_pci_remove(struct pci_dev *pdev) { struct net_device *dev; struct hostap_interface *iface; void __iomem *mem_start; struct hostap_pci_priv *hw_priv; dev = pci_get_drvdata(pdev); iface = netdev_priv(dev); hw_priv = iface->local->hw_priv; /* Reset the hardware, and ensure interrupts are disabled. */ prism2_pci_cor_sreset(iface->local); hfa384x_disable_interrupts(dev); if (dev->irq) free_irq(dev->irq, dev); mem_start = hw_priv->mem_start; prism2_free_local_data(dev); kfree(hw_priv); iounmap(mem_start); release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); pci_disable_device(pdev); } #ifdef CONFIG_PM static int prism2_pci_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *dev = pci_get_drvdata(pdev); if (netif_running(dev)) { netif_stop_queue(dev); netif_device_detach(dev); } prism2_suspend(dev); pci_save_state(pdev); pci_disable_device(pdev); pci_set_power_state(pdev, PCI_D3hot); return 0; } static int prism2_pci_resume(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); int err; err = pci_enable_device(pdev); if (err) { printk(KERN_ERR "%s: pci_enable_device failed on resume\n", dev->name); return err; } pci_restore_state(pdev); prism2_hw_config(dev, 0); if (netif_running(dev)) { netif_device_attach(dev); netif_start_queue(dev); } return 0; } #endif /* CONFIG_PM */ MODULE_DEVICE_TABLE(pci, prism2_pci_id_table); static struct pci_driver prism2_pci_driver = { .name = "hostap_pci", .id_table = prism2_pci_id_table, .probe = prism2_pci_probe, .remove = prism2_pci_remove, #ifdef CONFIG_PM .suspend = prism2_pci_suspend, .resume = prism2_pci_resume, #endif /* CONFIG_PM */ }; static int __init init_prism2_pci(void) { return pci_register_driver(&prism2_pci_driver); } static void __exit exit_prism2_pci(void) { pci_unregister_driver(&prism2_pci_driver); } module_init(init_prism2_pci); module_exit(exit_prism2_pci);
gpl-2.0
boa19861105/android_LP5.0.2_kernel_htc_dlxub1
drivers/net/wireless/hostap/hostap_pci.c
8935
11237
#define PRISM2_PCI /* Host AP driver's support for Intersil Prism2.5 PCI cards is based on * driver patches from Reyk Floeter <reyk@vantronix.net> and * Andy Warner <andyw@pobox.com> */ #include <linux/module.h> #include <linux/init.h> #include <linux/if.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/wireless.h> #include <net/iw_handler.h> #include <linux/ioport.h> #include <linux/pci.h> #include <asm/io.h> #include "hostap_wlan.h" static char *dev_info = "hostap_pci"; MODULE_AUTHOR("Jouni Malinen"); MODULE_DESCRIPTION("Support for Intersil Prism2.5-based 802.11 wireless LAN " "PCI cards."); MODULE_SUPPORTED_DEVICE("Intersil Prism2.5-based WLAN PCI cards"); MODULE_LICENSE("GPL"); /* struct local_info::hw_priv */ struct hostap_pci_priv { void __iomem *mem_start; }; /* FIX: do we need mb/wmb/rmb with memory operations? */ static DEFINE_PCI_DEVICE_TABLE(prism2_pci_id_table) = { /* Intersil Prism3 ISL3872 11Mb/s WLAN Controller */ { 0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID }, /* Intersil Prism2.5 ISL3874 11Mb/s WLAN Controller */ { 0x1260, 0x3873, PCI_ANY_ID, PCI_ANY_ID }, /* Samsung MagicLAN SWL-2210P */ { 0x167d, 0xa000, PCI_ANY_ID, PCI_ANY_ID }, { 0 } }; #ifdef PRISM2_IO_DEBUG static inline void hfa384x_outb_debug(struct net_device *dev, int a, u8 v) { struct hostap_interface *iface; struct hostap_pci_priv *hw_priv; local_info_t *local; unsigned long flags; iface = netdev_priv(dev); local = iface->local; hw_priv = local->hw_priv; spin_lock_irqsave(&local->lock, flags); prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTB, a, v); writeb(v, hw_priv->mem_start + a); spin_unlock_irqrestore(&local->lock, flags); } static inline u8 hfa384x_inb_debug(struct net_device *dev, int a) { struct hostap_interface *iface; struct hostap_pci_priv *hw_priv; local_info_t *local; unsigned long flags; u8 v; iface = netdev_priv(dev); local = iface->local; hw_priv = local->hw_priv; spin_lock_irqsave(&local->lock, flags); v = readb(hw_priv->mem_start + a); prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INB, a, v); spin_unlock_irqrestore(&local->lock, flags); return v; } static inline void hfa384x_outw_debug(struct net_device *dev, int a, u16 v) { struct hostap_interface *iface; struct hostap_pci_priv *hw_priv; local_info_t *local; unsigned long flags; iface = netdev_priv(dev); local = iface->local; hw_priv = local->hw_priv; spin_lock_irqsave(&local->lock, flags); prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTW, a, v); writew(v, hw_priv->mem_start + a); spin_unlock_irqrestore(&local->lock, flags); } static inline u16 hfa384x_inw_debug(struct net_device *dev, int a) { struct hostap_interface *iface; struct hostap_pci_priv *hw_priv; local_info_t *local; unsigned long flags; u16 v; iface = netdev_priv(dev); local = iface->local; hw_priv = local->hw_priv; spin_lock_irqsave(&local->lock, flags); v = readw(hw_priv->mem_start + a); prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INW, a, v); spin_unlock_irqrestore(&local->lock, flags); return v; } #define HFA384X_OUTB(v,a) hfa384x_outb_debug(dev, (a), (v)) #define HFA384X_INB(a) hfa384x_inb_debug(dev, (a)) #define HFA384X_OUTW(v,a) hfa384x_outw_debug(dev, (a), (v)) #define HFA384X_INW(a) hfa384x_inw_debug(dev, (a)) #define HFA384X_OUTW_DATA(v,a) hfa384x_outw_debug(dev, (a), le16_to_cpu((v))) #define HFA384X_INW_DATA(a) cpu_to_le16(hfa384x_inw_debug(dev, (a))) #else /* PRISM2_IO_DEBUG */ static inline void hfa384x_outb(struct net_device *dev, int a, u8 v) { struct hostap_interface *iface; struct hostap_pci_priv *hw_priv; iface = netdev_priv(dev); hw_priv = iface->local->hw_priv; writeb(v, hw_priv->mem_start + a); } static inline u8 hfa384x_inb(struct net_device *dev, int a) { struct hostap_interface *iface; struct hostap_pci_priv *hw_priv; iface = netdev_priv(dev); hw_priv = iface->local->hw_priv; return readb(hw_priv->mem_start + a); } static inline void hfa384x_outw(struct net_device *dev, int a, u16 v) { struct hostap_interface *iface; struct hostap_pci_priv *hw_priv; iface = netdev_priv(dev); hw_priv = iface->local->hw_priv; writew(v, hw_priv->mem_start + a); } static inline u16 hfa384x_inw(struct net_device *dev, int a) { struct hostap_interface *iface; struct hostap_pci_priv *hw_priv; iface = netdev_priv(dev); hw_priv = iface->local->hw_priv; return readw(hw_priv->mem_start + a); } #define HFA384X_OUTB(v,a) hfa384x_outb(dev, (a), (v)) #define HFA384X_INB(a) hfa384x_inb(dev, (a)) #define HFA384X_OUTW(v,a) hfa384x_outw(dev, (a), (v)) #define HFA384X_INW(a) hfa384x_inw(dev, (a)) #define HFA384X_OUTW_DATA(v,a) hfa384x_outw(dev, (a), le16_to_cpu((v))) #define HFA384X_INW_DATA(a) cpu_to_le16(hfa384x_inw(dev, (a))) #endif /* PRISM2_IO_DEBUG */ static int hfa384x_from_bap(struct net_device *dev, u16 bap, void *buf, int len) { u16 d_off; __le16 *pos; d_off = (bap == 1) ? HFA384X_DATA1_OFF : HFA384X_DATA0_OFF; pos = (__le16 *) buf; for ( ; len > 1; len -= 2) *pos++ = HFA384X_INW_DATA(d_off); if (len & 1) *((char *) pos) = HFA384X_INB(d_off); return 0; } static int hfa384x_to_bap(struct net_device *dev, u16 bap, void *buf, int len) { u16 d_off; __le16 *pos; d_off = (bap == 1) ? HFA384X_DATA1_OFF : HFA384X_DATA0_OFF; pos = (__le16 *) buf; for ( ; len > 1; len -= 2) HFA384X_OUTW_DATA(*pos++, d_off); if (len & 1) HFA384X_OUTB(*((char *) pos), d_off); return 0; } /* FIX: This might change at some point.. */ #include "hostap_hw.c" static void prism2_pci_cor_sreset(local_info_t *local) { struct net_device *dev = local->dev; u16 reg; reg = HFA384X_INB(HFA384X_PCICOR_OFF); printk(KERN_DEBUG "%s: Original COR value: 0x%0x\n", dev->name, reg); /* linux-wlan-ng uses extremely long hold and settle times for * COR sreset. A comment in the driver code mentions that the long * delays appear to be necessary. However, at least IBM 22P6901 seems * to work fine with shorter delays. * * Longer delays can be configured by uncommenting following line: */ /* #define PRISM2_PCI_USE_LONG_DELAYS */ #ifdef PRISM2_PCI_USE_LONG_DELAYS int i; HFA384X_OUTW(reg | 0x0080, HFA384X_PCICOR_OFF); mdelay(250); HFA384X_OUTW(reg & ~0x0080, HFA384X_PCICOR_OFF); mdelay(500); /* Wait for f/w to complete initialization (CMD:BUSY == 0) */ i = 2000000 / 10; while ((HFA384X_INW(HFA384X_CMD_OFF) & HFA384X_CMD_BUSY) && --i) udelay(10); #else /* PRISM2_PCI_USE_LONG_DELAYS */ HFA384X_OUTW(reg | 0x0080, HFA384X_PCICOR_OFF); mdelay(2); HFA384X_OUTW(reg & ~0x0080, HFA384X_PCICOR_OFF); mdelay(2); #endif /* PRISM2_PCI_USE_LONG_DELAYS */ if (HFA384X_INW(HFA384X_CMD_OFF) & HFA384X_CMD_BUSY) { printk(KERN_DEBUG "%s: COR sreset timeout\n", dev->name); } } static void prism2_pci_genesis_reset(local_info_t *local, int hcr) { struct net_device *dev = local->dev; HFA384X_OUTW(0x00C5, HFA384X_PCICOR_OFF); mdelay(10); HFA384X_OUTW(hcr, HFA384X_PCIHCR_OFF); mdelay(10); HFA384X_OUTW(0x0045, HFA384X_PCICOR_OFF); mdelay(10); } static struct prism2_helper_functions prism2_pci_funcs = { .card_present = NULL, .cor_sreset = prism2_pci_cor_sreset, .genesis_reset = prism2_pci_genesis_reset, .hw_type = HOSTAP_HW_PCI, }; static int prism2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { unsigned long phymem; void __iomem *mem = NULL; local_info_t *local = NULL; struct net_device *dev = NULL; static int cards_found /* = 0 */; int irq_registered = 0; struct hostap_interface *iface; struct hostap_pci_priv *hw_priv; hw_priv = kzalloc(sizeof(*hw_priv), GFP_KERNEL); if (hw_priv == NULL) return -ENOMEM; if (pci_enable_device(pdev)) goto err_out_free; phymem = pci_resource_start(pdev, 0); if (!request_mem_region(phymem, pci_resource_len(pdev, 0), "Prism2")) { printk(KERN_ERR "prism2: Cannot reserve PCI memory region\n"); goto err_out_disable; } mem = pci_ioremap_bar(pdev, 0); if (mem == NULL) { printk(KERN_ERR "prism2: Cannot remap PCI memory region\n") ; goto fail; } dev = prism2_init_local_data(&prism2_pci_funcs, cards_found, &pdev->dev); if (dev == NULL) goto fail; iface = netdev_priv(dev); local = iface->local; local->hw_priv = hw_priv; cards_found++; dev->irq = pdev->irq; hw_priv->mem_start = mem; dev->base_addr = (unsigned long) mem; prism2_pci_cor_sreset(local); pci_set_drvdata(pdev, dev); if (request_irq(dev->irq, prism2_interrupt, IRQF_SHARED, dev->name, dev)) { printk(KERN_WARNING "%s: request_irq failed\n", dev->name); goto fail; } else irq_registered = 1; if (!local->pri_only && prism2_hw_config(dev, 1)) { printk(KERN_DEBUG "%s: hardware initialization failed\n", dev_info); goto fail; } printk(KERN_INFO "%s: Intersil Prism2.5 PCI: " "mem=0x%lx, irq=%d\n", dev->name, phymem, dev->irq); return hostap_hw_ready(dev); fail: if (irq_registered && dev) free_irq(dev->irq, dev); if (mem) iounmap(mem); release_mem_region(phymem, pci_resource_len(pdev, 0)); err_out_disable: pci_disable_device(pdev); prism2_free_local_data(dev); err_out_free: kfree(hw_priv); return -ENODEV; } static void prism2_pci_remove(struct pci_dev *pdev) { struct net_device *dev; struct hostap_interface *iface; void __iomem *mem_start; struct hostap_pci_priv *hw_priv; dev = pci_get_drvdata(pdev); iface = netdev_priv(dev); hw_priv = iface->local->hw_priv; /* Reset the hardware, and ensure interrupts are disabled. */ prism2_pci_cor_sreset(iface->local); hfa384x_disable_interrupts(dev); if (dev->irq) free_irq(dev->irq, dev); mem_start = hw_priv->mem_start; prism2_free_local_data(dev); kfree(hw_priv); iounmap(mem_start); release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); pci_disable_device(pdev); } #ifdef CONFIG_PM static int prism2_pci_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *dev = pci_get_drvdata(pdev); if (netif_running(dev)) { netif_stop_queue(dev); netif_device_detach(dev); } prism2_suspend(dev); pci_save_state(pdev); pci_disable_device(pdev); pci_set_power_state(pdev, PCI_D3hot); return 0; } static int prism2_pci_resume(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); int err; err = pci_enable_device(pdev); if (err) { printk(KERN_ERR "%s: pci_enable_device failed on resume\n", dev->name); return err; } pci_restore_state(pdev); prism2_hw_config(dev, 0); if (netif_running(dev)) { netif_device_attach(dev); netif_start_queue(dev); } return 0; } #endif /* CONFIG_PM */ MODULE_DEVICE_TABLE(pci, prism2_pci_id_table); static struct pci_driver prism2_pci_driver = { .name = "hostap_pci", .id_table = prism2_pci_id_table, .probe = prism2_pci_probe, .remove = prism2_pci_remove, #ifdef CONFIG_PM .suspend = prism2_pci_suspend, .resume = prism2_pci_resume, #endif /* CONFIG_PM */ }; static int __init init_prism2_pci(void) { return pci_register_driver(&prism2_pci_driver); } static void __exit exit_prism2_pci(void) { pci_unregister_driver(&prism2_pci_driver); } module_init(init_prism2_pci); module_exit(exit_prism2_pci);
gpl-2.0
croniccorey/cronmod-kernel
crypto/anubis.c
9191
28481
/* * Cryptographic API. * * Anubis Algorithm * * The Anubis algorithm was developed by Paulo S. L. M. Barreto and * Vincent Rijmen. * * See * * P.S.L.M. Barreto, V. Rijmen, * ``The Anubis block cipher,'' * NESSIE submission, 2000. * * This software implements the "tweaked" version of Anubis. * Only the S-box and (consequently) the rounds constants have been * changed. * * The original authors have disclaimed all copyright interest in this * code and thus put it in the public domain. The subsequent authors * have put this under the GNU General Public License. * * By Aaron Grothe ajgrothe@yahoo.com, October 28, 2004 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <asm/byteorder.h> #include <linux/crypto.h> #include <linux/types.h> #define ANUBIS_MIN_KEY_SIZE 16 #define ANUBIS_MAX_KEY_SIZE 40 #define ANUBIS_BLOCK_SIZE 16 #define ANUBIS_MAX_N 10 #define ANUBIS_MAX_ROUNDS (8 + ANUBIS_MAX_N) struct anubis_ctx { int key_len; // in bits int R; u32 E[ANUBIS_MAX_ROUNDS + 1][4]; u32 D[ANUBIS_MAX_ROUNDS + 1][4]; }; static const u32 T0[256] = { 0xba69d2bbU, 0x54a84de5U, 0x2f5ebce2U, 0x74e8cd25U, 0x53a651f7U, 0xd3bb6bd0U, 0xd2b96fd6U, 0x4d9a29b3U, 0x50a05dfdU, 0xac458acfU, 0x8d070e09U, 0xbf63c6a5U, 0x70e0dd3dU, 0x52a455f1U, 0x9a29527bU, 0x4c982db5U, 0xeac98f46U, 0xd5b773c4U, 0x97336655U, 0xd1bf63dcU, 0x3366ccaaU, 0x51a259fbU, 0x5bb671c7U, 0xa651a2f3U, 0xdea15ffeU, 0x48903dadU, 0xa84d9ad7U, 0x992f5e71U, 0xdbab4be0U, 0x3264c8acU, 0xb773e695U, 0xfce5d732U, 0xe3dbab70U, 0x9e214263U, 0x913f7e41U, 0x9b2b567dU, 0xe2d9af76U, 0xbb6bd6bdU, 0x4182199bU, 0x6edca579U, 0xa557aef9U, 0xcb8b0b80U, 0x6bd6b167U, 0x95376e59U, 0xa15fbee1U, 0xf3fbeb10U, 0xb17ffe81U, 0x0204080cU, 0xcc851792U, 0xc49537a2U, 0x1d3a744eU, 0x14285078U, 0xc39b2bb0U, 0x63c69157U, 0xdaa94fe6U, 0x5dba69d3U, 0x5fbe61dfU, 0xdca557f2U, 0x7dfae913U, 0xcd871394U, 0x7ffee11fU, 0x5ab475c1U, 0x6cd8ad75U, 0x5cb86dd5U, 0xf7f3fb08U, 0x264c98d4U, 0xffe3db38U, 0xedc79354U, 0xe8cd874aU, 0x9d274e69U, 0x6fdea17fU, 0x8e010203U, 0x19326456U, 0xa05dbae7U, 0xf0fde71aU, 0x890f1e11U, 0x0f1e3c22U, 0x070e1c12U, 0xaf4386c5U, 0xfbebcb20U, 0x08102030U, 0x152a547eU, 0x0d1a342eU, 0x04081018U, 0x01020406U, 0x64c88d45U, 0xdfa35bf8U, 0x76ecc529U, 0x79f2f90bU, 0xdda753f4U, 0x3d7af48eU, 0x162c5874U, 0x3f7efc82U, 0x376edcb2U, 0x6ddaa973U, 0x3870e090U, 0xb96fdeb1U, 0x73e6d137U, 0xe9cf834cU, 0x356ad4beU, 0x55aa49e3U, 0x71e2d93bU, 0x7bf6f107U, 0x8c050a0fU, 0x72e4d531U, 0x880d1a17U, 0xf6f1ff0eU, 0x2a54a8fcU, 0x3e7cf884U, 0x5ebc65d9U, 0x274e9cd2U, 0x468c0589U, 0x0c183028U, 0x65ca8943U, 0x68d0bd6dU, 0x61c2995bU, 0x03060c0aU, 0xc19f23bcU, 0x57ae41efU, 0xd6b17fceU, 0xd9af43ecU, 0x58b07dcdU, 0xd8ad47eaU, 0x66cc8549U, 0xd7b37bc8U, 0x3a74e89cU, 0xc88d078aU, 0x3c78f088U, 0xfae9cf26U, 0x96316253U, 0xa753a6f5U, 0x982d5a77U, 0xecc59752U, 0xb86ddab7U, 0xc7933ba8U, 0xae4182c3U, 0x69d2b96bU, 0x4b9631a7U, 0xab4b96ddU, 0xa94f9ed1U, 0x67ce814fU, 0x0a14283cU, 0x478e018fU, 0xf2f9ef16U, 0xb577ee99U, 0x224488ccU, 0xe5d7b364U, 0xeec19f5eU, 0xbe61c2a3U, 0x2b56acfaU, 0x811f3e21U, 0x1224486cU, 0x831b362dU, 0x1b366c5aU, 0x0e1c3824U, 0x23468ccaU, 0xf5f7f304U, 0x458a0983U, 0x214284c6U, 0xce811f9eU, 0x499239abU, 0x2c58b0e8U, 0xf9efc32cU, 0xe6d1bf6eU, 0xb671e293U, 0x2850a0f0U, 0x172e5c72U, 0x8219322bU, 0x1a34685cU, 0x8b0b161dU, 0xfee1df3eU, 0x8a09121bU, 0x09122436U, 0xc98f038cU, 0x87132635U, 0x4e9c25b9U, 0xe1dfa37cU, 0x2e5cb8e4U, 0xe4d5b762U, 0xe0dda77aU, 0xebcb8b40U, 0x903d7a47U, 0xa455aaffU, 0x1e3c7844U, 0x85172e39U, 0x60c09d5dU, 0x00000000U, 0x254a94deU, 0xf4f5f702U, 0xf1ffe31cU, 0x94356a5fU, 0x0b162c3aU, 0xe7d3bb68U, 0x75eac923U, 0xefc39b58U, 0x3468d0b8U, 0x3162c4a6U, 0xd4b577c2U, 0xd0bd67daU, 0x86112233U, 0x7efce519U, 0xad478ec9U, 0xfde7d334U, 0x2952a4f6U, 0x3060c0a0U, 0x3b76ec9aU, 0x9f234665U, 0xf8edc72aU, 0xc6913faeU, 0x13264c6aU, 0x060c1814U, 0x050a141eU, 0xc59733a4U, 0x11224466U, 0x77eec12fU, 0x7cf8ed15U, 0x7af4f501U, 0x78f0fd0dU, 0x366cd8b4U, 0x1c387048U, 0x3972e496U, 0x59b279cbU, 0x18306050U, 0x56ac45e9U, 0xb37bf68dU, 0xb07dfa87U, 0x244890d8U, 0x204080c0U, 0xb279f28bU, 0x9239724bU, 0xa35bb6edU, 0xc09d27baU, 0x44880d85U, 0x62c49551U, 0x10204060U, 0xb475ea9fU, 0x84152a3fU, 0x43861197U, 0x933b764dU, 0xc2992fb6U, 0x4a9435a1U, 0xbd67cea9U, 0x8f030605U, 0x2d5ab4eeU, 0xbc65caafU, 0x9c254a6fU, 0x6ad4b561U, 0x40801d9dU, 0xcf831b98U, 0xa259b2ebU, 0x801d3a27U, 0x4f9e21bfU, 0x1f3e7c42U, 0xca890f86U, 0xaa4992dbU, 0x42841591U, }; static const u32 T1[256] = { 0x69babbd2U, 0xa854e54dU, 0x5e2fe2bcU, 0xe87425cdU, 0xa653f751U, 0xbbd3d06bU, 0xb9d2d66fU, 0x9a4db329U, 0xa050fd5dU, 0x45accf8aU, 0x078d090eU, 0x63bfa5c6U, 0xe0703dddU, 0xa452f155U, 0x299a7b52U, 0x984cb52dU, 0xc9ea468fU, 0xb7d5c473U, 0x33975566U, 0xbfd1dc63U, 0x6633aaccU, 0xa251fb59U, 0xb65bc771U, 0x51a6f3a2U, 0xa1defe5fU, 0x9048ad3dU, 0x4da8d79aU, 0x2f99715eU, 0xabdbe04bU, 0x6432acc8U, 0x73b795e6U, 0xe5fc32d7U, 0xdbe370abU, 0x219e6342U, 0x3f91417eU, 0x2b9b7d56U, 0xd9e276afU, 0x6bbbbdd6U, 0x82419b19U, 0xdc6e79a5U, 0x57a5f9aeU, 0x8bcb800bU, 0xd66b67b1U, 0x3795596eU, 0x5fa1e1beU, 0xfbf310ebU, 0x7fb181feU, 0x04020c08U, 0x85cc9217U, 0x95c4a237U, 0x3a1d4e74U, 0x28147850U, 0x9bc3b02bU, 0xc6635791U, 0xa9dae64fU, 0xba5dd369U, 0xbe5fdf61U, 0xa5dcf257U, 0xfa7d13e9U, 0x87cd9413U, 0xfe7f1fe1U, 0xb45ac175U, 0xd86c75adU, 0xb85cd56dU, 0xf3f708fbU, 0x4c26d498U, 0xe3ff38dbU, 0xc7ed5493U, 0xcde84a87U, 0x279d694eU, 0xde6f7fa1U, 0x018e0302U, 0x32195664U, 0x5da0e7baU, 0xfdf01ae7U, 0x0f89111eU, 0x1e0f223cU, 0x0e07121cU, 0x43afc586U, 0xebfb20cbU, 0x10083020U, 0x2a157e54U, 0x1a0d2e34U, 0x08041810U, 0x02010604U, 0xc864458dU, 0xa3dff85bU, 0xec7629c5U, 0xf2790bf9U, 0xa7ddf453U, 0x7a3d8ef4U, 0x2c167458U, 0x7e3f82fcU, 0x6e37b2dcU, 0xda6d73a9U, 0x703890e0U, 0x6fb9b1deU, 0xe67337d1U, 0xcfe94c83U, 0x6a35bed4U, 0xaa55e349U, 0xe2713bd9U, 0xf67b07f1U, 0x058c0f0aU, 0xe47231d5U, 0x0d88171aU, 0xf1f60effU, 0x542afca8U, 0x7c3e84f8U, 0xbc5ed965U, 0x4e27d29cU, 0x8c468905U, 0x180c2830U, 0xca654389U, 0xd0686dbdU, 0xc2615b99U, 0x06030a0cU, 0x9fc1bc23U, 0xae57ef41U, 0xb1d6ce7fU, 0xafd9ec43U, 0xb058cd7dU, 0xadd8ea47U, 0xcc664985U, 0xb3d7c87bU, 0x743a9ce8U, 0x8dc88a07U, 0x783c88f0U, 0xe9fa26cfU, 0x31965362U, 0x53a7f5a6U, 0x2d98775aU, 0xc5ec5297U, 0x6db8b7daU, 0x93c7a83bU, 0x41aec382U, 0xd2696bb9U, 0x964ba731U, 0x4babdd96U, 0x4fa9d19eU, 0xce674f81U, 0x140a3c28U, 0x8e478f01U, 0xf9f216efU, 0x77b599eeU, 0x4422cc88U, 0xd7e564b3U, 0xc1ee5e9fU, 0x61bea3c2U, 0x562bfaacU, 0x1f81213eU, 0x24126c48U, 0x1b832d36U, 0x361b5a6cU, 0x1c0e2438U, 0x4623ca8cU, 0xf7f504f3U, 0x8a458309U, 0x4221c684U, 0x81ce9e1fU, 0x9249ab39U, 0x582ce8b0U, 0xeff92cc3U, 0xd1e66ebfU, 0x71b693e2U, 0x5028f0a0U, 0x2e17725cU, 0x19822b32U, 0x341a5c68U, 0x0b8b1d16U, 0xe1fe3edfU, 0x098a1b12U, 0x12093624U, 0x8fc98c03U, 0x13873526U, 0x9c4eb925U, 0xdfe17ca3U, 0x5c2ee4b8U, 0xd5e462b7U, 0xdde07aa7U, 0xcbeb408bU, 0x3d90477aU, 0x55a4ffaaU, 0x3c1e4478U, 0x1785392eU, 0xc0605d9dU, 0x00000000U, 0x4a25de94U, 0xf5f402f7U, 0xfff11ce3U, 0x35945f6aU, 0x160b3a2cU, 0xd3e768bbU, 0xea7523c9U, 0xc3ef589bU, 0x6834b8d0U, 0x6231a6c4U, 0xb5d4c277U, 0xbdd0da67U, 0x11863322U, 0xfc7e19e5U, 0x47adc98eU, 0xe7fd34d3U, 0x5229f6a4U, 0x6030a0c0U, 0x763b9aecU, 0x239f6546U, 0xedf82ac7U, 0x91c6ae3fU, 0x26136a4cU, 0x0c061418U, 0x0a051e14U, 0x97c5a433U, 0x22116644U, 0xee772fc1U, 0xf87c15edU, 0xf47a01f5U, 0xf0780dfdU, 0x6c36b4d8U, 0x381c4870U, 0x723996e4U, 0xb259cb79U, 0x30185060U, 0xac56e945U, 0x7bb38df6U, 0x7db087faU, 0x4824d890U, 0x4020c080U, 0x79b28bf2U, 0x39924b72U, 0x5ba3edb6U, 0x9dc0ba27U, 0x8844850dU, 0xc4625195U, 0x20106040U, 0x75b49feaU, 0x15843f2aU, 0x86439711U, 0x3b934d76U, 0x99c2b62fU, 0x944aa135U, 0x67bda9ceU, 0x038f0506U, 0x5a2deeb4U, 0x65bcafcaU, 0x259c6f4aU, 0xd46a61b5U, 0x80409d1dU, 0x83cf981bU, 0x59a2ebb2U, 0x1d80273aU, 0x9e4fbf21U, 0x3e1f427cU, 0x89ca860fU, 0x49aadb92U, 0x84429115U, }; static const u32 T2[256] = { 0xd2bbba69U, 0x4de554a8U, 0xbce22f5eU, 0xcd2574e8U, 0x51f753a6U, 0x6bd0d3bbU, 0x6fd6d2b9U, 0x29b34d9aU, 0x5dfd50a0U, 0x8acfac45U, 0x0e098d07U, 0xc6a5bf63U, 0xdd3d70e0U, 0x55f152a4U, 0x527b9a29U, 0x2db54c98U, 0x8f46eac9U, 0x73c4d5b7U, 0x66559733U, 0x63dcd1bfU, 0xccaa3366U, 0x59fb51a2U, 0x71c75bb6U, 0xa2f3a651U, 0x5ffedea1U, 0x3dad4890U, 0x9ad7a84dU, 0x5e71992fU, 0x4be0dbabU, 0xc8ac3264U, 0xe695b773U, 0xd732fce5U, 0xab70e3dbU, 0x42639e21U, 0x7e41913fU, 0x567d9b2bU, 0xaf76e2d9U, 0xd6bdbb6bU, 0x199b4182U, 0xa5796edcU, 0xaef9a557U, 0x0b80cb8bU, 0xb1676bd6U, 0x6e599537U, 0xbee1a15fU, 0xeb10f3fbU, 0xfe81b17fU, 0x080c0204U, 0x1792cc85U, 0x37a2c495U, 0x744e1d3aU, 0x50781428U, 0x2bb0c39bU, 0x915763c6U, 0x4fe6daa9U, 0x69d35dbaU, 0x61df5fbeU, 0x57f2dca5U, 0xe9137dfaU, 0x1394cd87U, 0xe11f7ffeU, 0x75c15ab4U, 0xad756cd8U, 0x6dd55cb8U, 0xfb08f7f3U, 0x98d4264cU, 0xdb38ffe3U, 0x9354edc7U, 0x874ae8cdU, 0x4e699d27U, 0xa17f6fdeU, 0x02038e01U, 0x64561932U, 0xbae7a05dU, 0xe71af0fdU, 0x1e11890fU, 0x3c220f1eU, 0x1c12070eU, 0x86c5af43U, 0xcb20fbebU, 0x20300810U, 0x547e152aU, 0x342e0d1aU, 0x10180408U, 0x04060102U, 0x8d4564c8U, 0x5bf8dfa3U, 0xc52976ecU, 0xf90b79f2U, 0x53f4dda7U, 0xf48e3d7aU, 0x5874162cU, 0xfc823f7eU, 0xdcb2376eU, 0xa9736ddaU, 0xe0903870U, 0xdeb1b96fU, 0xd13773e6U, 0x834ce9cfU, 0xd4be356aU, 0x49e355aaU, 0xd93b71e2U, 0xf1077bf6U, 0x0a0f8c05U, 0xd53172e4U, 0x1a17880dU, 0xff0ef6f1U, 0xa8fc2a54U, 0xf8843e7cU, 0x65d95ebcU, 0x9cd2274eU, 0x0589468cU, 0x30280c18U, 0x894365caU, 0xbd6d68d0U, 0x995b61c2U, 0x0c0a0306U, 0x23bcc19fU, 0x41ef57aeU, 0x7fced6b1U, 0x43ecd9afU, 0x7dcd58b0U, 0x47ead8adU, 0x854966ccU, 0x7bc8d7b3U, 0xe89c3a74U, 0x078ac88dU, 0xf0883c78U, 0xcf26fae9U, 0x62539631U, 0xa6f5a753U, 0x5a77982dU, 0x9752ecc5U, 0xdab7b86dU, 0x3ba8c793U, 0x82c3ae41U, 0xb96b69d2U, 0x31a74b96U, 0x96ddab4bU, 0x9ed1a94fU, 0x814f67ceU, 0x283c0a14U, 0x018f478eU, 0xef16f2f9U, 0xee99b577U, 0x88cc2244U, 0xb364e5d7U, 0x9f5eeec1U, 0xc2a3be61U, 0xacfa2b56U, 0x3e21811fU, 0x486c1224U, 0x362d831bU, 0x6c5a1b36U, 0x38240e1cU, 0x8cca2346U, 0xf304f5f7U, 0x0983458aU, 0x84c62142U, 0x1f9ece81U, 0x39ab4992U, 0xb0e82c58U, 0xc32cf9efU, 0xbf6ee6d1U, 0xe293b671U, 0xa0f02850U, 0x5c72172eU, 0x322b8219U, 0x685c1a34U, 0x161d8b0bU, 0xdf3efee1U, 0x121b8a09U, 0x24360912U, 0x038cc98fU, 0x26358713U, 0x25b94e9cU, 0xa37ce1dfU, 0xb8e42e5cU, 0xb762e4d5U, 0xa77ae0ddU, 0x8b40ebcbU, 0x7a47903dU, 0xaaffa455U, 0x78441e3cU, 0x2e398517U, 0x9d5d60c0U, 0x00000000U, 0x94de254aU, 0xf702f4f5U, 0xe31cf1ffU, 0x6a5f9435U, 0x2c3a0b16U, 0xbb68e7d3U, 0xc92375eaU, 0x9b58efc3U, 0xd0b83468U, 0xc4a63162U, 0x77c2d4b5U, 0x67dad0bdU, 0x22338611U, 0xe5197efcU, 0x8ec9ad47U, 0xd334fde7U, 0xa4f62952U, 0xc0a03060U, 0xec9a3b76U, 0x46659f23U, 0xc72af8edU, 0x3faec691U, 0x4c6a1326U, 0x1814060cU, 0x141e050aU, 0x33a4c597U, 0x44661122U, 0xc12f77eeU, 0xed157cf8U, 0xf5017af4U, 0xfd0d78f0U, 0xd8b4366cU, 0x70481c38U, 0xe4963972U, 0x79cb59b2U, 0x60501830U, 0x45e956acU, 0xf68db37bU, 0xfa87b07dU, 0x90d82448U, 0x80c02040U, 0xf28bb279U, 0x724b9239U, 0xb6eda35bU, 0x27bac09dU, 0x0d854488U, 0x955162c4U, 0x40601020U, 0xea9fb475U, 0x2a3f8415U, 0x11974386U, 0x764d933bU, 0x2fb6c299U, 0x35a14a94U, 0xcea9bd67U, 0x06058f03U, 0xb4ee2d5aU, 0xcaafbc65U, 0x4a6f9c25U, 0xb5616ad4U, 0x1d9d4080U, 0x1b98cf83U, 0xb2eba259U, 0x3a27801dU, 0x21bf4f9eU, 0x7c421f3eU, 0x0f86ca89U, 0x92dbaa49U, 0x15914284U, }; static const u32 T3[256] = { 0xbbd269baU, 0xe54da854U, 0xe2bc5e2fU, 0x25cde874U, 0xf751a653U, 0xd06bbbd3U, 0xd66fb9d2U, 0xb3299a4dU, 0xfd5da050U, 0xcf8a45acU, 0x090e078dU, 0xa5c663bfU, 0x3ddde070U, 0xf155a452U, 0x7b52299aU, 0xb52d984cU, 0x468fc9eaU, 0xc473b7d5U, 0x55663397U, 0xdc63bfd1U, 0xaacc6633U, 0xfb59a251U, 0xc771b65bU, 0xf3a251a6U, 0xfe5fa1deU, 0xad3d9048U, 0xd79a4da8U, 0x715e2f99U, 0xe04babdbU, 0xacc86432U, 0x95e673b7U, 0x32d7e5fcU, 0x70abdbe3U, 0x6342219eU, 0x417e3f91U, 0x7d562b9bU, 0x76afd9e2U, 0xbdd66bbbU, 0x9b198241U, 0x79a5dc6eU, 0xf9ae57a5U, 0x800b8bcbU, 0x67b1d66bU, 0x596e3795U, 0xe1be5fa1U, 0x10ebfbf3U, 0x81fe7fb1U, 0x0c080402U, 0x921785ccU, 0xa23795c4U, 0x4e743a1dU, 0x78502814U, 0xb02b9bc3U, 0x5791c663U, 0xe64fa9daU, 0xd369ba5dU, 0xdf61be5fU, 0xf257a5dcU, 0x13e9fa7dU, 0x941387cdU, 0x1fe1fe7fU, 0xc175b45aU, 0x75add86cU, 0xd56db85cU, 0x08fbf3f7U, 0xd4984c26U, 0x38dbe3ffU, 0x5493c7edU, 0x4a87cde8U, 0x694e279dU, 0x7fa1de6fU, 0x0302018eU, 0x56643219U, 0xe7ba5da0U, 0x1ae7fdf0U, 0x111e0f89U, 0x223c1e0fU, 0x121c0e07U, 0xc58643afU, 0x20cbebfbU, 0x30201008U, 0x7e542a15U, 0x2e341a0dU, 0x18100804U, 0x06040201U, 0x458dc864U, 0xf85ba3dfU, 0x29c5ec76U, 0x0bf9f279U, 0xf453a7ddU, 0x8ef47a3dU, 0x74582c16U, 0x82fc7e3fU, 0xb2dc6e37U, 0x73a9da6dU, 0x90e07038U, 0xb1de6fb9U, 0x37d1e673U, 0x4c83cfe9U, 0xbed46a35U, 0xe349aa55U, 0x3bd9e271U, 0x07f1f67bU, 0x0f0a058cU, 0x31d5e472U, 0x171a0d88U, 0x0efff1f6U, 0xfca8542aU, 0x84f87c3eU, 0xd965bc5eU, 0xd29c4e27U, 0x89058c46U, 0x2830180cU, 0x4389ca65U, 0x6dbdd068U, 0x5b99c261U, 0x0a0c0603U, 0xbc239fc1U, 0xef41ae57U, 0xce7fb1d6U, 0xec43afd9U, 0xcd7db058U, 0xea47add8U, 0x4985cc66U, 0xc87bb3d7U, 0x9ce8743aU, 0x8a078dc8U, 0x88f0783cU, 0x26cfe9faU, 0x53623196U, 0xf5a653a7U, 0x775a2d98U, 0x5297c5ecU, 0xb7da6db8U, 0xa83b93c7U, 0xc38241aeU, 0x6bb9d269U, 0xa731964bU, 0xdd964babU, 0xd19e4fa9U, 0x4f81ce67U, 0x3c28140aU, 0x8f018e47U, 0x16eff9f2U, 0x99ee77b5U, 0xcc884422U, 0x64b3d7e5U, 0x5e9fc1eeU, 0xa3c261beU, 0xfaac562bU, 0x213e1f81U, 0x6c482412U, 0x2d361b83U, 0x5a6c361bU, 0x24381c0eU, 0xca8c4623U, 0x04f3f7f5U, 0x83098a45U, 0xc6844221U, 0x9e1f81ceU, 0xab399249U, 0xe8b0582cU, 0x2cc3eff9U, 0x6ebfd1e6U, 0x93e271b6U, 0xf0a05028U, 0x725c2e17U, 0x2b321982U, 0x5c68341aU, 0x1d160b8bU, 0x3edfe1feU, 0x1b12098aU, 0x36241209U, 0x8c038fc9U, 0x35261387U, 0xb9259c4eU, 0x7ca3dfe1U, 0xe4b85c2eU, 0x62b7d5e4U, 0x7aa7dde0U, 0x408bcbebU, 0x477a3d90U, 0xffaa55a4U, 0x44783c1eU, 0x392e1785U, 0x5d9dc060U, 0x00000000U, 0xde944a25U, 0x02f7f5f4U, 0x1ce3fff1U, 0x5f6a3594U, 0x3a2c160bU, 0x68bbd3e7U, 0x23c9ea75U, 0x589bc3efU, 0xb8d06834U, 0xa6c46231U, 0xc277b5d4U, 0xda67bdd0U, 0x33221186U, 0x19e5fc7eU, 0xc98e47adU, 0x34d3e7fdU, 0xf6a45229U, 0xa0c06030U, 0x9aec763bU, 0x6546239fU, 0x2ac7edf8U, 0xae3f91c6U, 0x6a4c2613U, 0x14180c06U, 0x1e140a05U, 0xa43397c5U, 0x66442211U, 0x2fc1ee77U, 0x15edf87cU, 0x01f5f47aU, 0x0dfdf078U, 0xb4d86c36U, 0x4870381cU, 0x96e47239U, 0xcb79b259U, 0x50603018U, 0xe945ac56U, 0x8df67bb3U, 0x87fa7db0U, 0xd8904824U, 0xc0804020U, 0x8bf279b2U, 0x4b723992U, 0xedb65ba3U, 0xba279dc0U, 0x850d8844U, 0x5195c462U, 0x60402010U, 0x9fea75b4U, 0x3f2a1584U, 0x97118643U, 0x4d763b93U, 0xb62f99c2U, 0xa135944aU, 0xa9ce67bdU, 0x0506038fU, 0xeeb45a2dU, 0xafca65bcU, 0x6f4a259cU, 0x61b5d46aU, 0x9d1d8040U, 0x981b83cfU, 0xebb259a2U, 0x273a1d80U, 0xbf219e4fU, 0x427c3e1fU, 0x860f89caU, 0xdb9249aaU, 0x91158442U, }; static const u32 T4[256] = { 0xbabababaU, 0x54545454U, 0x2f2f2f2fU, 0x74747474U, 0x53535353U, 0xd3d3d3d3U, 0xd2d2d2d2U, 0x4d4d4d4dU, 0x50505050U, 0xacacacacU, 0x8d8d8d8dU, 0xbfbfbfbfU, 0x70707070U, 0x52525252U, 0x9a9a9a9aU, 0x4c4c4c4cU, 0xeaeaeaeaU, 0xd5d5d5d5U, 0x97979797U, 0xd1d1d1d1U, 0x33333333U, 0x51515151U, 0x5b5b5b5bU, 0xa6a6a6a6U, 0xdedededeU, 0x48484848U, 0xa8a8a8a8U, 0x99999999U, 0xdbdbdbdbU, 0x32323232U, 0xb7b7b7b7U, 0xfcfcfcfcU, 0xe3e3e3e3U, 0x9e9e9e9eU, 0x91919191U, 0x9b9b9b9bU, 0xe2e2e2e2U, 0xbbbbbbbbU, 0x41414141U, 0x6e6e6e6eU, 0xa5a5a5a5U, 0xcbcbcbcbU, 0x6b6b6b6bU, 0x95959595U, 0xa1a1a1a1U, 0xf3f3f3f3U, 0xb1b1b1b1U, 0x02020202U, 0xccccccccU, 0xc4c4c4c4U, 0x1d1d1d1dU, 0x14141414U, 0xc3c3c3c3U, 0x63636363U, 0xdadadadaU, 0x5d5d5d5dU, 0x5f5f5f5fU, 0xdcdcdcdcU, 0x7d7d7d7dU, 0xcdcdcdcdU, 0x7f7f7f7fU, 0x5a5a5a5aU, 0x6c6c6c6cU, 0x5c5c5c5cU, 0xf7f7f7f7U, 0x26262626U, 0xffffffffU, 0xededededU, 0xe8e8e8e8U, 0x9d9d9d9dU, 0x6f6f6f6fU, 0x8e8e8e8eU, 0x19191919U, 0xa0a0a0a0U, 0xf0f0f0f0U, 0x89898989U, 0x0f0f0f0fU, 0x07070707U, 0xafafafafU, 0xfbfbfbfbU, 0x08080808U, 0x15151515U, 0x0d0d0d0dU, 0x04040404U, 0x01010101U, 0x64646464U, 0xdfdfdfdfU, 0x76767676U, 0x79797979U, 0xddddddddU, 0x3d3d3d3dU, 0x16161616U, 0x3f3f3f3fU, 0x37373737U, 0x6d6d6d6dU, 0x38383838U, 0xb9b9b9b9U, 0x73737373U, 0xe9e9e9e9U, 0x35353535U, 0x55555555U, 0x71717171U, 0x7b7b7b7bU, 0x8c8c8c8cU, 0x72727272U, 0x88888888U, 0xf6f6f6f6U, 0x2a2a2a2aU, 0x3e3e3e3eU, 0x5e5e5e5eU, 0x27272727U, 0x46464646U, 0x0c0c0c0cU, 0x65656565U, 0x68686868U, 0x61616161U, 0x03030303U, 0xc1c1c1c1U, 0x57575757U, 0xd6d6d6d6U, 0xd9d9d9d9U, 0x58585858U, 0xd8d8d8d8U, 0x66666666U, 0xd7d7d7d7U, 0x3a3a3a3aU, 0xc8c8c8c8U, 0x3c3c3c3cU, 0xfafafafaU, 0x96969696U, 0xa7a7a7a7U, 0x98989898U, 0xececececU, 0xb8b8b8b8U, 0xc7c7c7c7U, 0xaeaeaeaeU, 0x69696969U, 0x4b4b4b4bU, 0xababababU, 0xa9a9a9a9U, 0x67676767U, 0x0a0a0a0aU, 0x47474747U, 0xf2f2f2f2U, 0xb5b5b5b5U, 0x22222222U, 0xe5e5e5e5U, 0xeeeeeeeeU, 0xbebebebeU, 0x2b2b2b2bU, 0x81818181U, 0x12121212U, 0x83838383U, 0x1b1b1b1bU, 0x0e0e0e0eU, 0x23232323U, 0xf5f5f5f5U, 0x45454545U, 0x21212121U, 0xcecececeU, 0x49494949U, 0x2c2c2c2cU, 0xf9f9f9f9U, 0xe6e6e6e6U, 0xb6b6b6b6U, 0x28282828U, 0x17171717U, 0x82828282U, 0x1a1a1a1aU, 0x8b8b8b8bU, 0xfefefefeU, 0x8a8a8a8aU, 0x09090909U, 0xc9c9c9c9U, 0x87878787U, 0x4e4e4e4eU, 0xe1e1e1e1U, 0x2e2e2e2eU, 0xe4e4e4e4U, 0xe0e0e0e0U, 0xebebebebU, 0x90909090U, 0xa4a4a4a4U, 0x1e1e1e1eU, 0x85858585U, 0x60606060U, 0x00000000U, 0x25252525U, 0xf4f4f4f4U, 0xf1f1f1f1U, 0x94949494U, 0x0b0b0b0bU, 0xe7e7e7e7U, 0x75757575U, 0xefefefefU, 0x34343434U, 0x31313131U, 0xd4d4d4d4U, 0xd0d0d0d0U, 0x86868686U, 0x7e7e7e7eU, 0xadadadadU, 0xfdfdfdfdU, 0x29292929U, 0x30303030U, 0x3b3b3b3bU, 0x9f9f9f9fU, 0xf8f8f8f8U, 0xc6c6c6c6U, 0x13131313U, 0x06060606U, 0x05050505U, 0xc5c5c5c5U, 0x11111111U, 0x77777777U, 0x7c7c7c7cU, 0x7a7a7a7aU, 0x78787878U, 0x36363636U, 0x1c1c1c1cU, 0x39393939U, 0x59595959U, 0x18181818U, 0x56565656U, 0xb3b3b3b3U, 0xb0b0b0b0U, 0x24242424U, 0x20202020U, 0xb2b2b2b2U, 0x92929292U, 0xa3a3a3a3U, 0xc0c0c0c0U, 0x44444444U, 0x62626262U, 0x10101010U, 0xb4b4b4b4U, 0x84848484U, 0x43434343U, 0x93939393U, 0xc2c2c2c2U, 0x4a4a4a4aU, 0xbdbdbdbdU, 0x8f8f8f8fU, 0x2d2d2d2dU, 0xbcbcbcbcU, 0x9c9c9c9cU, 0x6a6a6a6aU, 0x40404040U, 0xcfcfcfcfU, 0xa2a2a2a2U, 0x80808080U, 0x4f4f4f4fU, 0x1f1f1f1fU, 0xcacacacaU, 0xaaaaaaaaU, 0x42424242U, }; static const u32 T5[256] = { 0x00000000U, 0x01020608U, 0x02040c10U, 0x03060a18U, 0x04081820U, 0x050a1e28U, 0x060c1430U, 0x070e1238U, 0x08103040U, 0x09123648U, 0x0a143c50U, 0x0b163a58U, 0x0c182860U, 0x0d1a2e68U, 0x0e1c2470U, 0x0f1e2278U, 0x10206080U, 0x11226688U, 0x12246c90U, 0x13266a98U, 0x142878a0U, 0x152a7ea8U, 0x162c74b0U, 0x172e72b8U, 0x183050c0U, 0x193256c8U, 0x1a345cd0U, 0x1b365ad8U, 0x1c3848e0U, 0x1d3a4ee8U, 0x1e3c44f0U, 0x1f3e42f8U, 0x2040c01dU, 0x2142c615U, 0x2244cc0dU, 0x2346ca05U, 0x2448d83dU, 0x254ade35U, 0x264cd42dU, 0x274ed225U, 0x2850f05dU, 0x2952f655U, 0x2a54fc4dU, 0x2b56fa45U, 0x2c58e87dU, 0x2d5aee75U, 0x2e5ce46dU, 0x2f5ee265U, 0x3060a09dU, 0x3162a695U, 0x3264ac8dU, 0x3366aa85U, 0x3468b8bdU, 0x356abeb5U, 0x366cb4adU, 0x376eb2a5U, 0x387090ddU, 0x397296d5U, 0x3a749ccdU, 0x3b769ac5U, 0x3c7888fdU, 0x3d7a8ef5U, 0x3e7c84edU, 0x3f7e82e5U, 0x40809d3aU, 0x41829b32U, 0x4284912aU, 0x43869722U, 0x4488851aU, 0x458a8312U, 0x468c890aU, 0x478e8f02U, 0x4890ad7aU, 0x4992ab72U, 0x4a94a16aU, 0x4b96a762U, 0x4c98b55aU, 0x4d9ab352U, 0x4e9cb94aU, 0x4f9ebf42U, 0x50a0fdbaU, 0x51a2fbb2U, 0x52a4f1aaU, 0x53a6f7a2U, 0x54a8e59aU, 0x55aae392U, 0x56ace98aU, 0x57aeef82U, 0x58b0cdfaU, 0x59b2cbf2U, 0x5ab4c1eaU, 0x5bb6c7e2U, 0x5cb8d5daU, 0x5dbad3d2U, 0x5ebcd9caU, 0x5fbedfc2U, 0x60c05d27U, 0x61c25b2fU, 0x62c45137U, 0x63c6573fU, 0x64c84507U, 0x65ca430fU, 0x66cc4917U, 0x67ce4f1fU, 0x68d06d67U, 0x69d26b6fU, 0x6ad46177U, 0x6bd6677fU, 0x6cd87547U, 0x6dda734fU, 0x6edc7957U, 0x6fde7f5fU, 0x70e03da7U, 0x71e23bafU, 0x72e431b7U, 0x73e637bfU, 0x74e82587U, 0x75ea238fU, 0x76ec2997U, 0x77ee2f9fU, 0x78f00de7U, 0x79f20befU, 0x7af401f7U, 0x7bf607ffU, 0x7cf815c7U, 0x7dfa13cfU, 0x7efc19d7U, 0x7ffe1fdfU, 0x801d2774U, 0x811f217cU, 0x82192b64U, 0x831b2d6cU, 0x84153f54U, 0x8517395cU, 0x86113344U, 0x8713354cU, 0x880d1734U, 0x890f113cU, 0x8a091b24U, 0x8b0b1d2cU, 0x8c050f14U, 0x8d07091cU, 0x8e010304U, 0x8f03050cU, 0x903d47f4U, 0x913f41fcU, 0x92394be4U, 0x933b4decU, 0x94355fd4U, 0x953759dcU, 0x963153c4U, 0x973355ccU, 0x982d77b4U, 0x992f71bcU, 0x9a297ba4U, 0x9b2b7dacU, 0x9c256f94U, 0x9d27699cU, 0x9e216384U, 0x9f23658cU, 0xa05de769U, 0xa15fe161U, 0xa259eb79U, 0xa35bed71U, 0xa455ff49U, 0xa557f941U, 0xa651f359U, 0xa753f551U, 0xa84dd729U, 0xa94fd121U, 0xaa49db39U, 0xab4bdd31U, 0xac45cf09U, 0xad47c901U, 0xae41c319U, 0xaf43c511U, 0xb07d87e9U, 0xb17f81e1U, 0xb2798bf9U, 0xb37b8df1U, 0xb4759fc9U, 0xb57799c1U, 0xb67193d9U, 0xb77395d1U, 0xb86db7a9U, 0xb96fb1a1U, 0xba69bbb9U, 0xbb6bbdb1U, 0xbc65af89U, 0xbd67a981U, 0xbe61a399U, 0xbf63a591U, 0xc09dba4eU, 0xc19fbc46U, 0xc299b65eU, 0xc39bb056U, 0xc495a26eU, 0xc597a466U, 0xc691ae7eU, 0xc793a876U, 0xc88d8a0eU, 0xc98f8c06U, 0xca89861eU, 0xcb8b8016U, 0xcc85922eU, 0xcd879426U, 0xce819e3eU, 0xcf839836U, 0xd0bddaceU, 0xd1bfdcc6U, 0xd2b9d6deU, 0xd3bbd0d6U, 0xd4b5c2eeU, 0xd5b7c4e6U, 0xd6b1cefeU, 0xd7b3c8f6U, 0xd8adea8eU, 0xd9afec86U, 0xdaa9e69eU, 0xdbabe096U, 0xdca5f2aeU, 0xdda7f4a6U, 0xdea1febeU, 0xdfa3f8b6U, 0xe0dd7a53U, 0xe1df7c5bU, 0xe2d97643U, 0xe3db704bU, 0xe4d56273U, 0xe5d7647bU, 0xe6d16e63U, 0xe7d3686bU, 0xe8cd4a13U, 0xe9cf4c1bU, 0xeac94603U, 0xebcb400bU, 0xecc55233U, 0xedc7543bU, 0xeec15e23U, 0xefc3582bU, 0xf0fd1ad3U, 0xf1ff1cdbU, 0xf2f916c3U, 0xf3fb10cbU, 0xf4f502f3U, 0xf5f704fbU, 0xf6f10ee3U, 0xf7f308ebU, 0xf8ed2a93U, 0xf9ef2c9bU, 0xfae92683U, 0xfbeb208bU, 0xfce532b3U, 0xfde734bbU, 0xfee13ea3U, 0xffe338abU, }; static const u32 rc[] = { 0xba542f74U, 0x53d3d24dU, 0x50ac8dbfU, 0x70529a4cU, 0xead597d1U, 0x33515ba6U, 0xde48a899U, 0xdb32b7fcU, 0xe39e919bU, 0xe2bb416eU, 0xa5cb6b95U, 0xa1f3b102U, 0xccc41d14U, 0xc363da5dU, 0x5fdc7dcdU, 0x7f5a6c5cU, 0xf726ffedU, 0xe89d6f8eU, 0x19a0f089U, }; static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct anubis_ctx *ctx = crypto_tfm_ctx(tfm); const __be32 *key = (const __be32 *)in_key; u32 *flags = &tfm->crt_flags; int N, R, i, r; u32 kappa[ANUBIS_MAX_N]; u32 inter[ANUBIS_MAX_N]; switch (key_len) { case 16: case 20: case 24: case 28: case 32: case 36: case 40: break; default: *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; } ctx->key_len = key_len * 8; N = ctx->key_len >> 5; ctx->R = R = 8 + N; /* * map cipher key to initial key state (mu): */ for (i = 0; i < N; i++) kappa[i] = be32_to_cpu(key[i]); /* * generate R + 1 round keys: */ for (r = 0; r <= R; r++) { u32 K0, K1, K2, K3; /* * generate r-th round key K^r: */ K0 = T4[(kappa[N - 1] >> 24) ]; K1 = T4[(kappa[N - 1] >> 16) & 0xff]; K2 = T4[(kappa[N - 1] >> 8) & 0xff]; K3 = T4[(kappa[N - 1] ) & 0xff]; for (i = N - 2; i >= 0; i--) { K0 = T4[(kappa[i] >> 24) ] ^ (T5[(K0 >> 24) ] & 0xff000000U) ^ (T5[(K0 >> 16) & 0xff] & 0x00ff0000U) ^ (T5[(K0 >> 8) & 0xff] & 0x0000ff00U) ^ (T5[(K0 ) & 0xff] & 0x000000ffU); K1 = T4[(kappa[i] >> 16) & 0xff] ^ (T5[(K1 >> 24) ] & 0xff000000U) ^ (T5[(K1 >> 16) & 0xff] & 0x00ff0000U) ^ (T5[(K1 >> 8) & 0xff] & 0x0000ff00U) ^ (T5[(K1 ) & 0xff] & 0x000000ffU); K2 = T4[(kappa[i] >> 8) & 0xff] ^ (T5[(K2 >> 24) ] & 0xff000000U) ^ (T5[(K2 >> 16) & 0xff] & 0x00ff0000U) ^ (T5[(K2 >> 8) & 0xff] & 0x0000ff00U) ^ (T5[(K2 ) & 0xff] & 0x000000ffU); K3 = T4[(kappa[i] ) & 0xff] ^ (T5[(K3 >> 24) ] & 0xff000000U) ^ (T5[(K3 >> 16) & 0xff] & 0x00ff0000U) ^ (T5[(K3 >> 8) & 0xff] & 0x0000ff00U) ^ (T5[(K3 ) & 0xff] & 0x000000ffU); } ctx->E[r][0] = K0; ctx->E[r][1] = K1; ctx->E[r][2] = K2; ctx->E[r][3] = K3; /* * compute kappa^{r+1} from kappa^r: */ if (r == R) break; for (i = 0; i < N; i++) { int j = i; inter[i] = T0[(kappa[j--] >> 24) ]; if (j < 0) j = N - 1; inter[i] ^= T1[(kappa[j--] >> 16) & 0xff]; if (j < 0) j = N - 1; inter[i] ^= T2[(kappa[j--] >> 8) & 0xff]; if (j < 0) j = N - 1; inter[i] ^= T3[(kappa[j ] ) & 0xff]; } kappa[0] = inter[0] ^ rc[r]; for (i = 1; i < N; i++) kappa[i] = inter[i]; } /* * generate inverse key schedule: K'^0 = K^R, K'^R = * K^0, K'^r = theta(K^{R-r}): */ for (i = 0; i < 4; i++) { ctx->D[0][i] = ctx->E[R][i]; ctx->D[R][i] = ctx->E[0][i]; } for (r = 1; r < R; r++) { for (i = 0; i < 4; i++) { u32 v = ctx->E[R - r][i]; ctx->D[r][i] = T0[T4[(v >> 24) ] & 0xff] ^ T1[T4[(v >> 16) & 0xff] & 0xff] ^ T2[T4[(v >> 8) & 0xff] & 0xff] ^ T3[T4[(v ) & 0xff] & 0xff]; } } return 0; } static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4], u8 *ciphertext, const u8 *plaintext, const int R) { const __be32 *src = (const __be32 *)plaintext; __be32 *dst = (__be32 *)ciphertext; int i, r; u32 state[4]; u32 inter[4]; /* * map plaintext block to cipher state (mu) * and add initial round key (sigma[K^0]): */ for (i = 0; i < 4; i++) state[i] = be32_to_cpu(src[i]) ^ roundKey[0][i]; /* * R - 1 full rounds: */ for (r = 1; r < R; r++) { inter[0] = T0[(state[0] >> 24) ] ^ T1[(state[1] >> 24) ] ^ T2[(state[2] >> 24) ] ^ T3[(state[3] >> 24) ] ^ roundKey[r][0]; inter[1] = T0[(state[0] >> 16) & 0xff] ^ T1[(state[1] >> 16) & 0xff] ^ T2[(state[2] >> 16) & 0xff] ^ T3[(state[3] >> 16) & 0xff] ^ roundKey[r][1]; inter[2] = T0[(state[0] >> 8) & 0xff] ^ T1[(state[1] >> 8) & 0xff] ^ T2[(state[2] >> 8) & 0xff] ^ T3[(state[3] >> 8) & 0xff] ^ roundKey[r][2]; inter[3] = T0[(state[0] ) & 0xff] ^ T1[(state[1] ) & 0xff] ^ T2[(state[2] ) & 0xff] ^ T3[(state[3] ) & 0xff] ^ roundKey[r][3]; state[0] = inter[0]; state[1] = inter[1]; state[2] = inter[2]; state[3] = inter[3]; } /* * last round: */ inter[0] = (T0[(state[0] >> 24) ] & 0xff000000U) ^ (T1[(state[1] >> 24) ] & 0x00ff0000U) ^ (T2[(state[2] >> 24) ] & 0x0000ff00U) ^ (T3[(state[3] >> 24) ] & 0x000000ffU) ^ roundKey[R][0]; inter[1] = (T0[(state[0] >> 16) & 0xff] & 0xff000000U) ^ (T1[(state[1] >> 16) & 0xff] & 0x00ff0000U) ^ (T2[(state[2] >> 16) & 0xff] & 0x0000ff00U) ^ (T3[(state[3] >> 16) & 0xff] & 0x000000ffU) ^ roundKey[R][1]; inter[2] = (T0[(state[0] >> 8) & 0xff] & 0xff000000U) ^ (T1[(state[1] >> 8) & 0xff] & 0x00ff0000U) ^ (T2[(state[2] >> 8) & 0xff] & 0x0000ff00U) ^ (T3[(state[3] >> 8) & 0xff] & 0x000000ffU) ^ roundKey[R][2]; inter[3] = (T0[(state[0] ) & 0xff] & 0xff000000U) ^ (T1[(state[1] ) & 0xff] & 0x00ff0000U) ^ (T2[(state[2] ) & 0xff] & 0x0000ff00U) ^ (T3[(state[3] ) & 0xff] & 0x000000ffU) ^ roundKey[R][3]; /* * map cipher state to ciphertext block (mu^{-1}): */ for (i = 0; i < 4; i++) dst[i] = cpu_to_be32(inter[i]); } static void anubis_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct anubis_ctx *ctx = crypto_tfm_ctx(tfm); anubis_crypt(ctx->E, dst, src, ctx->R); } static void anubis_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct anubis_ctx *ctx = crypto_tfm_ctx(tfm); anubis_crypt(ctx->D, dst, src, ctx->R); } static struct crypto_alg anubis_alg = { .cra_name = "anubis", .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = ANUBIS_BLOCK_SIZE, .cra_ctxsize = sizeof (struct anubis_ctx), .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(anubis_alg.cra_list), .cra_u = { .cipher = { .cia_min_keysize = ANUBIS_MIN_KEY_SIZE, .cia_max_keysize = ANUBIS_MAX_KEY_SIZE, .cia_setkey = anubis_setkey, .cia_encrypt = anubis_encrypt, .cia_decrypt = anubis_decrypt } } }; static int __init anubis_mod_init(void) { int ret = 0; ret = crypto_register_alg(&anubis_alg); return ret; } static void __exit anubis_mod_fini(void) { crypto_unregister_alg(&anubis_alg); } module_init(anubis_mod_init); module_exit(anubis_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Anubis Cryptographic Algorithm");
gpl-2.0
zarboz/Ville-5.0.1
arch/x86/um/delay.c
10215
1171
/* * Copyright (C) 2011 Richard Weinberger <richrd@nod.at> * Mostly copied from arch/x86/lib/delay.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/delay.h> #include <asm/param.h> void __delay(unsigned long loops) { asm volatile( "test %0,%0\n" "jz 3f\n" "jmp 1f\n" ".align 16\n" "1: jmp 2f\n" ".align 16\n" "2: dec %0\n" " jnz 2b\n" "3: dec %0\n" : /* we don't need output */ : "a" (loops) ); } EXPORT_SYMBOL(__delay); inline void __const_udelay(unsigned long xloops) { int d0; xloops *= 4; asm("mull %%edx" : "=d" (xloops), "=&a" (d0) : "1" (xloops), "0" (loops_per_jiffy * (HZ/4))); __delay(++xloops); } EXPORT_SYMBOL(__const_udelay); void __udelay(unsigned long usecs) { __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */ } EXPORT_SYMBOL(__udelay); void __ndelay(unsigned long nsecs) { __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */ } EXPORT_SYMBOL(__ndelay);
gpl-2.0
Flipkart/linux
drivers/xen/swiotlb-xen.c
232
19880
/* * Copyright 2010 * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> * * This code provides a IOMMU for Xen PV guests with PCI passthrough. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License v2.0 as published by * the Free Software Foundation * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * PV guests under Xen are running in an non-contiguous memory architecture. * * When PCI pass-through is utilized, this necessitates an IOMMU for * translating bus (DMA) to virtual and vice-versa and also providing a * mechanism to have contiguous pages for device drivers operations (say DMA * operations). * * Specifically, under Xen the Linux idea of pages is an illusion. It * assumes that pages start at zero and go up to the available memory. To * help with that, the Linux Xen MMU provides a lookup mechanism to * translate the page frame numbers (PFN) to machine frame numbers (MFN) * and vice-versa. The MFN are the "real" frame numbers. Furthermore * memory is not contiguous. Xen hypervisor stitches memory for guests * from different pools, which means there is no guarantee that PFN==MFN * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are * allocated in descending order (high to low), meaning the guest might * never get any MFN's under the 4GB mark. * */ #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt #include <linux/bootmem.h> #include <linux/dma-mapping.h> #include <linux/export.h> #include <xen/swiotlb-xen.h> #include <xen/page.h> #include <xen/xen-ops.h> #include <xen/hvc-console.h> #include <asm/dma-mapping.h> #include <asm/xen/page-coherent.h> #include <trace/events/swiotlb.h> /* * Used to do a quick range check in swiotlb_tbl_unmap_single and * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this * API. */ #ifndef CONFIG_X86 static unsigned long dma_alloc_coherent_mask(struct device *dev, gfp_t gfp) { unsigned long dma_mask = 0; dma_mask = dev->coherent_dma_mask; if (!dma_mask) dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32); return dma_mask; } #endif static char *xen_io_tlb_start, *xen_io_tlb_end; static unsigned long xen_io_tlb_nslabs; /* * Quick lookup value of the bus address of the IOTLB. */ static u64 start_dma_addr; /* * Both of these functions should avoid PFN_PHYS because phys_addr_t * can be 32bit when dma_addr_t is 64bit leading to a loss in * information if the shift is done before casting to 64bit. */ static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr) { unsigned long mfn = pfn_to_mfn(PFN_DOWN(paddr)); dma_addr_t dma = (dma_addr_t)mfn << PAGE_SHIFT; dma |= paddr & ~PAGE_MASK; return dma; } static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr) { unsigned long pfn = mfn_to_pfn(PFN_DOWN(baddr)); dma_addr_t dma = (dma_addr_t)pfn << PAGE_SHIFT; phys_addr_t paddr = dma; paddr |= baddr & ~PAGE_MASK; return paddr; } static inline dma_addr_t xen_virt_to_bus(void *address) { return xen_phys_to_bus(virt_to_phys(address)); } static int check_pages_physically_contiguous(unsigned long pfn, unsigned int offset, size_t length) { unsigned long next_mfn; int i; int nr_pages; next_mfn = pfn_to_mfn(pfn); nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT; for (i = 1; i < nr_pages; i++) { if (pfn_to_mfn(++pfn) != ++next_mfn) return 0; } return 1; } static inline int range_straddles_page_boundary(phys_addr_t p, size_t size) { unsigned long pfn = PFN_DOWN(p); unsigned int offset = p & ~PAGE_MASK; if (offset + size <= PAGE_SIZE) return 0; if (check_pages_physically_contiguous(pfn, offset, size)) return 0; return 1; } static int is_xen_swiotlb_buffer(dma_addr_t dma_addr) { unsigned long mfn = PFN_DOWN(dma_addr); unsigned long pfn = mfn_to_local_pfn(mfn); phys_addr_t paddr; /* If the address is outside our domain, it CAN * have the same virtual address as another address * in our domain. Therefore _only_ check address within our domain. */ if (pfn_valid(pfn)) { paddr = PFN_PHYS(pfn); return paddr >= virt_to_phys(xen_io_tlb_start) && paddr < virt_to_phys(xen_io_tlb_end); } return 0; } static int max_dma_bits = 32; static int xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs) { int i, rc; int dma_bits; dma_addr_t dma_handle; phys_addr_t p = virt_to_phys(buf); dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT; i = 0; do { int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE); do { rc = xen_create_contiguous_region( p + (i << IO_TLB_SHIFT), get_order(slabs << IO_TLB_SHIFT), dma_bits, &dma_handle); } while (rc && dma_bits++ < max_dma_bits); if (rc) return rc; i += slabs; } while (i < nslabs); return 0; } static unsigned long xen_set_nslabs(unsigned long nr_tbl) { if (!nr_tbl) { xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT); xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE); } else xen_io_tlb_nslabs = nr_tbl; return xen_io_tlb_nslabs << IO_TLB_SHIFT; } enum xen_swiotlb_err { XEN_SWIOTLB_UNKNOWN = 0, XEN_SWIOTLB_ENOMEM, XEN_SWIOTLB_EFIXUP }; static const char *xen_swiotlb_error(enum xen_swiotlb_err err) { switch (err) { case XEN_SWIOTLB_ENOMEM: return "Cannot allocate Xen-SWIOTLB buffer\n"; case XEN_SWIOTLB_EFIXUP: return "Failed to get contiguous memory for DMA from Xen!\n"\ "You either: don't have the permissions, do not have"\ " enough free memory under 4GB, or the hypervisor memory"\ " is too fragmented!"; default: break; } return ""; } int __ref xen_swiotlb_init(int verbose, bool early) { unsigned long bytes, order; int rc = -ENOMEM; enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN; unsigned int repeat = 3; xen_io_tlb_nslabs = swiotlb_nr_tbl(); retry: bytes = xen_set_nslabs(xen_io_tlb_nslabs); order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT); /* * Get IO TLB memory from any location. */ if (early) xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes)); else { #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { xen_io_tlb_start = (void *)__get_free_pages(__GFP_NOWARN, order); if (xen_io_tlb_start) break; order--; } if (order != get_order(bytes)) { pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n", (PAGE_SIZE << order) >> 20); xen_io_tlb_nslabs = SLABS_PER_PAGE << order; bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT; } } if (!xen_io_tlb_start) { m_ret = XEN_SWIOTLB_ENOMEM; goto error; } xen_io_tlb_end = xen_io_tlb_start + bytes; /* * And replace that memory with pages under 4GB. */ rc = xen_swiotlb_fixup(xen_io_tlb_start, bytes, xen_io_tlb_nslabs); if (rc) { if (early) free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes)); else { free_pages((unsigned long)xen_io_tlb_start, order); xen_io_tlb_start = NULL; } m_ret = XEN_SWIOTLB_EFIXUP; goto error; } start_dma_addr = xen_virt_to_bus(xen_io_tlb_start); if (early) { if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, verbose)) panic("Cannot allocate SWIOTLB buffer"); rc = 0; } else rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs); return rc; error: if (repeat--) { xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */ (xen_io_tlb_nslabs >> 1)); pr_info("Lowering to %luMB\n", (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20); goto retry; } pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc); if (early) panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc); else free_pages((unsigned long)xen_io_tlb_start, order); return rc; } void * xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags, struct dma_attrs *attrs) { void *ret; int order = get_order(size); u64 dma_mask = DMA_BIT_MASK(32); phys_addr_t phys; dma_addr_t dev_addr; /* * Ignore region specifiers - the kernel's ideas of * pseudo-phys memory layout has nothing to do with the * machine physical layout. We can't allocate highmem * because we can't return a pointer to it. */ flags &= ~(__GFP_DMA | __GFP_HIGHMEM); if (dma_alloc_from_coherent(hwdev, size, dma_handle, &ret)) return ret; /* On ARM this function returns an ioremap'ped virtual address for * which virt_to_phys doesn't return the corresponding physical * address. In fact on ARM virt_to_phys only works for kernel direct * mapped RAM memory. Also see comment below. */ ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs); if (!ret) return ret; if (hwdev && hwdev->coherent_dma_mask) dma_mask = dma_alloc_coherent_mask(hwdev, flags); /* At this point dma_handle is the physical address, next we are * going to set it to the machine address. * Do not use virt_to_phys(ret) because on ARM it doesn't correspond * to *dma_handle. */ phys = *dma_handle; dev_addr = xen_phys_to_bus(phys); if (((dev_addr + size - 1 <= dma_mask)) && !range_straddles_page_boundary(phys, size)) *dma_handle = dev_addr; else { if (xen_create_contiguous_region(phys, order, fls64(dma_mask), dma_handle) != 0) { xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs); return NULL; } } memset(ret, 0, size); return ret; } EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent); void xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dev_addr, struct dma_attrs *attrs) { int order = get_order(size); phys_addr_t phys; u64 dma_mask = DMA_BIT_MASK(32); if (dma_release_from_coherent(hwdev, order, vaddr)) return; if (hwdev && hwdev->coherent_dma_mask) dma_mask = hwdev->coherent_dma_mask; /* do not use virt_to_phys because on ARM it doesn't return you the * physical address */ phys = xen_bus_to_phys(dev_addr); if (((dev_addr + size - 1 > dma_mask)) || range_straddles_page_boundary(phys, size)) xen_destroy_contiguous_region(phys, order); xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs); } EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent); /* * Map a single buffer of the indicated size for DMA in streaming mode. The * physical address to use is returned. * * Once the device is given the dma address, the device owns this memory until * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed. */ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { phys_addr_t map, phys = page_to_phys(page) + offset; dma_addr_t dev_addr = xen_phys_to_bus(phys); BUG_ON(dir == DMA_NONE); /* * If the address happens to be in the device's DMA window, * we can safely return the device addr and not worry about bounce * buffering it. */ if (dma_capable(dev, dev_addr, size) && !range_straddles_page_boundary(phys, size) && !xen_arch_need_swiotlb(dev, PFN_DOWN(phys), PFN_DOWN(dev_addr)) && !swiotlb_force) { /* we are not interested in the dma_addr returned by * xen_dma_map_page, only in the potential cache flushes executed * by the function. */ xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs); return dev_addr; } /* * Oh well, have to allocate and map a bounce buffer. */ trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir); if (map == SWIOTLB_MAP_ERROR) return DMA_ERROR_CODE; xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT), dev_addr, map & ~PAGE_MASK, size, dir, attrs); dev_addr = xen_phys_to_bus(map); /* * Ensure that the address returned is DMA'ble */ if (!dma_capable(dev, dev_addr, size)) { swiotlb_tbl_unmap_single(dev, map, size, dir); dev_addr = 0; } return dev_addr; } EXPORT_SYMBOL_GPL(xen_swiotlb_map_page); /* * Unmap a single streaming mode DMA translation. The dma_addr and size must * match what was provided for in a previous xen_swiotlb_map_page call. All * other usages are undefined. * * After this call, reads by the cpu to the buffer are guaranteed to see * whatever the device wrote there. */ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { phys_addr_t paddr = xen_bus_to_phys(dev_addr); BUG_ON(dir == DMA_NONE); xen_dma_unmap_page(hwdev, dev_addr, size, dir, attrs); /* NOTE: We use dev_addr here, not paddr! */ if (is_xen_swiotlb_buffer(dev_addr)) { swiotlb_tbl_unmap_single(hwdev, paddr, size, dir); return; } if (dir != DMA_FROM_DEVICE) return; /* * phys_to_virt doesn't work with hihgmem page but we could * call dma_mark_clean() with hihgmem page here. However, we * are fine since dma_mark_clean() is null on POWERPC. We can * make dma_mark_clean() take a physical address if necessary. */ dma_mark_clean(phys_to_virt(paddr), size); } void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { xen_unmap_single(hwdev, dev_addr, size, dir, attrs); } EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page); /* * Make physical memory consistent for a single streaming mode DMA translation * after a transfer. * * If you perform a xen_swiotlb_map_page() but wish to interrogate the buffer * using the cpu, yet do not wish to teardown the dma mapping, you must * call this function before doing so. At the next point you give the dma * address back to the card, you must first perform a * xen_swiotlb_dma_sync_for_device, and then the device again owns the buffer */ static void xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, enum dma_sync_target target) { phys_addr_t paddr = xen_bus_to_phys(dev_addr); BUG_ON(dir == DMA_NONE); if (target == SYNC_FOR_CPU) xen_dma_sync_single_for_cpu(hwdev, dev_addr, size, dir); /* NOTE: We use dev_addr here, not paddr! */ if (is_xen_swiotlb_buffer(dev_addr)) swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target); if (target == SYNC_FOR_DEVICE) xen_dma_sync_single_for_device(hwdev, dev_addr, size, dir); if (dir != DMA_FROM_DEVICE) return; dma_mark_clean(phys_to_virt(paddr), size); } void xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir) { xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); } EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_cpu); void xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir) { xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); } EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device); /* * Map a set of buffers described by scatterlist in streaming mode for DMA. * This is the scatter-gather version of the above xen_swiotlb_map_page * interface. Here the scatter gather list elements are each tagged with the * appropriate dma address and length. They are obtained via * sg_dma_{address,length}(SG). * * NOTE: An implementation may be able to use a smaller number of * DMA address/length pairs than there are SG table elements. * (for example via virtual mapping capabilities) * The routine returns the number of addr/length pairs actually * used, at most nents. * * Device ownership issues as mentioned above for xen_swiotlb_map_page are the * same here. */ int xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs) { struct scatterlist *sg; int i; BUG_ON(dir == DMA_NONE); for_each_sg(sgl, sg, nelems, i) { phys_addr_t paddr = sg_phys(sg); dma_addr_t dev_addr = xen_phys_to_bus(paddr); if (swiotlb_force || xen_arch_need_swiotlb(hwdev, PFN_DOWN(paddr), PFN_DOWN(dev_addr)) || !dma_capable(hwdev, dev_addr, sg->length) || range_straddles_page_boundary(paddr, sg->length)) { phys_addr_t map = swiotlb_tbl_map_single(hwdev, start_dma_addr, sg_phys(sg), sg->length, dir); if (map == SWIOTLB_MAP_ERROR) { dev_warn(hwdev, "swiotlb buffer is full\n"); /* Don't panic here, we expect map_sg users to do proper error handling. */ xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir, attrs); sg_dma_len(sgl) = 0; return 0; } xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT), dev_addr, map & ~PAGE_MASK, sg->length, dir, attrs); sg->dma_address = xen_phys_to_bus(map); } else { /* we are not interested in the dma_addr returned by * xen_dma_map_page, only in the potential cache flushes executed * by the function. */ xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT), dev_addr, paddr & ~PAGE_MASK, sg->length, dir, attrs); sg->dma_address = dev_addr; } sg_dma_len(sg) = sg->length; } return nelems; } EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg_attrs); /* * Unmap a set of streaming mode DMA translations. Again, cpu read rules * concerning calls here are the same as for swiotlb_unmap_page() above. */ void xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs) { struct scatterlist *sg; int i; BUG_ON(dir == DMA_NONE); for_each_sg(sgl, sg, nelems, i) xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs); } EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs); /* * Make physical memory consistent for a set of streaming mode DMA translations * after a transfer. * * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules * and usage. */ static void xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, enum dma_sync_target target) { struct scatterlist *sg; int i; for_each_sg(sgl, sg, nelems, i) xen_swiotlb_sync_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, target); } void xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, int nelems, enum dma_data_direction dir) { xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); } EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_cpu); void xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, int nelems, enum dma_data_direction dir) { xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); } EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device); int xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) { return !dma_addr; } EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mapping_error); /* * Return whether the given device DMA address mask can be supported * properly. For example, if your device can only drive the low 24-bits * during bus mastering, then you would pass 0x00ffffff as the mask to * this function. */ int xen_swiotlb_dma_supported(struct device *hwdev, u64 mask) { return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask; } EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported); int xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask) { if (!dev->dma_mask || !xen_swiotlb_dma_supported(dev, dma_mask)) return -EIO; *dev->dma_mask = dma_mask; return 0; } EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask);
gpl-2.0
haitend/u-boot-for-mpc8315
board/esd/ar405/fpgadata_xl30.c
232
199742
0x00,0x09,0x0f,0xf0,0x0f,0xf0,0x0f,0xf0,0x0f,0xf0,0x00,0x00,0x01,0x61,0x00,0x0d, 0x70,0x70,0x63,0x5f,0x61,0x72,0x30,0x31,0x2e,0x6e,0x63,0x64,0x00,0x62,0x00,0x0b, 0x73,0x33,0x30,0x78,0x6c,0x70,0x71,0x32,0x34,0x30,0x00,0x63,0x00,0x0b,0x32,0x30, 0x30,0x34,0x2f,0x31,0x32,0x2f,0x31,0x34,0x00,0x64,0x00,0x09,0x31,0x37,0x3a,0x30, 0x33,0x3a,0x32,0x33,0x00,0x65,0xe2,0x01,0x00,0x00,0x97,0xf2,0xff,0x30,0xe6,0x03, 0xe5,0x01,0x01,0x01,0xe5,0xe6,0x04,0x01,0x02,0x11,0x09,0x09,0x02,0x04,0x04,0x06, 0x09,0x07,0x09,0x04,0x04,0x04,0x04,0x09,0x09,0x0b,0x09,0x09,0x04,0x04,0x04,0x04, 0x04,0x09,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x03,0x07,0x08,0x01,0xe5, 0x0c,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x13,0x03,0x02,0x02,0x03,0x02,0x08, 0x09,0x11,0x03,0x11,0x02,0x06,0x03,0x05,0x03,0x21,0x33,0x11,0xe5,0xe5,0xe3,0x17, 0x0b,0xe5,0x01,0x0f,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x04,0x04,0x02, 0x06,0x01,0xe5,0x05,0x01,0x02,0x02,0xe6,0x03,0x04,0x09,0x09,0x07,0xe6,0x03,0x06, 0x01,0x07,0x09,0x04,0x04,0x09,0x09,0x06,0x02,0x04,0x04,0x04,0x04,0x09,0x09,0x09, 0x12,0x14,0x09,0x09,0x03,0x05,0x03,0x05,0x09,0x09,0x03,0x05,0x09,0xe5,0x01,0x05, 0xe5,0x07,0xe5,0xe6,0x04,0x0b,0xe5,0x07,0xe5,0x07,0x03,0x05,0xe5,0x01,0x05,0xe5, 0x01,0x05,0xe6,0xe5,0x04,0x03,0x05,0x03,0x05,0xe5,0x01,0x05,0xe5,0x01,0x05,0xe5, 0x07,0x09,0x03,0x0e,0x05,0x03,0x05,0x03,0x05,0x13,0x03,0x05,0x03,0x08,0x31,0xe5, 0xe5,0x37,0x44,0x01,0x01,0x01,0x79,0x8a,0xe5,0x01,0x10,0x09,0x09,0x1d,0x09,0x38, 0x04,0x36,0x45,0xe7,0xe5,0x11,0x09,0x09,0x13,0x09,0x3e,0x09,0x3b,0x1d,0x18,0x05, 0x01,0x01,0x06,0x73,0x12,0x77,0xe5,0xe5,0xe5,0xa2,0xe5,0x41,0x1c,0xe8,0x84,0x1f, 0x25,0x3a,0xe5,0xe6,0x3c,0x70,0x4e,0x0c,0x10,0x09,0x09,0x09,0x09,0x09,0x09,0x09, 0x09,0x09,0x09,0x09,0x03,0x07,0x01,0x07,0x09,0x09,0x09,0x09,0x09,0xe5,0x07,0x09, 0x09,0x09,0x01,0x07,0x0d,0xe5,0x01,0x10,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09, 0x05,0x03,0x05,0x03,0x05,0x03,0x07,0x03,0x09,0x05,0x03,0x05,0x03,0x09,0x09,0x09, 0x05,0x03,0x09,0x01,0x07,0x09,0x09,0x08,0x06,0x01,0x0d,0xe5,0x05,0x01,0xe5,0x07, 0xe5,0x07,0xe5,0x07,0xe5,0x04,0x01,0xe6,0x07,0xe5,0x03,0x02,0xe6,0x07,0xe5,0x01, 0x05,0xe5,0x07,0xe5,0x07,0xe5,0x07,0x01,0xe5,0x03,0x03,0xe5,0x07,0xe5,0x01,0x05, 0xe5,0x01,0x05,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x02,0x04, 0xe5,0x07,0xe5,0x01,0x0c,0xe8,0x0f,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x05,0x03, 0x01,0x07,0x09,0xe5,0x07,0xe5,0x01,0x02,0xe5,0x02,0x09,0x09,0x09,0x09,0x01,0x01, 0x05,0x09,0x09,0x09,0x09,0x09,0x09,0x06,0x0a,0x05,0xe5,0x05,0xe5,0x07,0xe5,0x07, 0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x04,0x02,0xe5,0x03,0x03,0xe5,0x07,0xe5,0x07, 0xe5,0x07,0xe5,0x07,0xe5,0x02,0x06,0xe5,0x03,0x03,0xe5,0x03,0x03,0xe5,0x04,0x02, 0xe5,0x02,0x01,0x02,0xe5,0x01,0x05,0xe5,0x04,0x02,0xe5,0x07,0xe5,0x07,0xe5,0x03, 0x03,0xe5,0x03,0x03,0xe5,0x07,0xe5,0x07,0x02,0x03,0x03,0x0f,0x09,0x09,0x09,0x09, 0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x0b,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09, 0x09,0x09,0x09,0x08,0x07,0x01,0x0d,0x09,0x09,0x09,0x09,0x04,0x01,0x02,0x09,0x09, 0x09,0x09,0x09,0x05,0x03,0x09,0x01,0x03,0x05,0x09,0x09,0x08,0xe5,0x06,0x01,0x09, 0x08,0xe5,0x08,0x09,0x09,0x09,0x0a,0x07,0xe6,0x01,0x03,0x06,0x02,0xe5,0x04,0x02, 0x06,0x02,0x06,0x02,0x06,0x02,0x06,0x02,0x06,0x02,0x06,0x02,0x06,0x02,0x06,0x02, 0x04,0x01,0x02,0x05,0xe5,0x01,0x08,0x02,0x06,0x02,0x06,0x02,0x06,0x02,0x04,0x01, 0x02,0x06,0x02,0x06,0x02,0x06,0x02,0x06,0x02,0x06,0x02,0x06,0x02,0x06,0x02,0x06, 0x06,0x02,0xe5,0x02,0x37,0xe5,0x04,0x09,0x31,0x05,0x05,0x13,0x09,0x04,0x24,0x07, 0x09,0x17,0x02,0x02,0x01,0x01,0x03,0x42,0x01,0x13,0x13,0x10,0x0c,0x01,0x09,0x07, 0x01,0x06,0x05,0x24,0x06,0x0a,0x0f,0x0f,0x01,0xe6,0xe5,0x1d,0x1b,0x21,0x09,0x09, 0x13,0x01,0x13,0x1d,0x05,0x17,0x2b,0x05,0xe6,0x33,0x04,0x05,0xe5,0x05,0x03,0x06, 0x2c,0x0b,0x04,0x1d,0x59,0xe6,0x0f,0x09,0x09,0x09,0x09,0x07,0x01,0x09,0x09,0x09, 0x09,0x09,0x09,0x01,0x05,0x03,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09, 0x09,0x0d,0xe5,0x01,0x2b,0x02,0x1a,0x13,0x02,0x09,0x06,0x0c,0x08,0xe5,0xe5,0x05, 0x02,0xe5,0x07,0x06,0x01,0xe5,0x09,0x0f,0x01,0xe5,0x0f,0x09,0x03,0x03,0x01,0x1b, 0xe5,0xe5,0x2a,0x12,0x2f,0xe5,0x08,0x09,0x0b,0x16,0x06,0x16,0x12,0x06,0x03,0x08, 0x1c,0x01,0x2b,0x1e,0x03,0x0e,0xe5,0x07,0x0a,0x03,0x04,0x0c,0x04,0x03,0xe5,0x07, 0x6a,0xe5,0x01,0x59,0x06,0x03,0x1c,0x0b,0x0b,0x18,0x0d,0x13,0x2b,0x06,0xe7,0x14, 0x0a,0x09,0x2c,0x15,0x64,0x34,0x01,0xe5,0x13,0x16,0x09,0x01,0x17,0x0b,0x01,0x09, 0x01,0x01,0x2a,0x0e,0x01,0x04,0x02,0x01,0x09,0x07,0x01,0x02,0x04,0x01,0x07,0xe6, 0x01,0xe5,0x02,0xe7,0x05,0x01,0x13,0x05,0xe7,0x2d,0x06,0x02,0x0a,0x0c,0x0f,0x06, 0x01,0x0d,0x0e,0x1e,0x01,0x07,0x02,0x07,0x09,0xe5,0x08,0x08,0x08,0x0a,0x01,0x19, 0x01,0x54,0xe5,0x26,0x03,0x01,0x12,0x10,0x02,0x13,0x0e,0x06,0x08,0x23,0x04,0x01, 0x01,0x10,0x13,0xe5,0x02,0x04,0x09,0x09,0x09,0x02,0x06,0x15,0x09,0x01,0x02,0x02, 0x07,0x05,0x11,0x06,0x01,0x0a,0x13,0x0d,0x02,0x30,0xe6,0x01,0x0f,0x34,0x23,0x11, 0xe5,0x28,0x30,0x0b,0x1b,0x07,0xe5,0x26,0x12,0x11,0xe5,0x26,0x25,0xe6,0xe5,0x0b, 0x05,0x03,0x01,0x4d,0x02,0xe5,0x10,0x5a,0x14,0xe6,0x18,0x2d,0x10,0x13,0x17,0x01, 0x33,0x33,0x01,0x04,0x02,0x01,0x09,0x08,0x02,0xe6,0xe6,0x03,0x04,0x02,0x01,0x04, 0x04,0x07,0x01,0x0a,0x06,0x01,0x07,0x01,0x0a,0x08,0x15,0x0d,0xe7,0x19,0x04,0x13, 0x21,0x01,0x11,0x09,0x07,0xe6,0x09,0x03,0x05,0xe5,0x01,0x10,0x06,0x02,0x0b,0x05, 0x01,0x1d,0x06,0x1b,0x01,0xe6,0x24,0x2f,0x13,0x08,0x13,0x0c,0x14,0x07,0x30,0x01, 0x02,0x21,0x01,0xe6,0x1f,0x1f,0xe5,0x25,0xe5,0x0e,0x0a,0x03,0x09,0x06,0x02,0xe5, 0x11,0xe5,0x07,0xe5,0x04,0x06,0xe5,0x03,0x0b,0x27,0xe7,0xe6,0x01,0x01,0x2d,0x01, 0x0e,0x20,0x01,0x01,0x12,0x0b,0xe5,0x02,0x0b,0x01,0xe5,0x0f,0xe5,0xe6,0x01,0xe5, 0x03,0xe5,0x01,0x07,0x03,0xe5,0x0b,0x09,0x14,0xe5,0x08,0x01,0x04,0x01,0x04,0x40, 0xe5,0x01,0x01,0x1d,0x01,0x0f,0x04,0x01,0xe5,0x09,0xe5,0xe5,0xe5,0x06,0x03,0xe5, 0x0a,0xe5,0x0c,0x07,0x10,0x01,0xe5,0x05,0xe5,0x02,0x0e,0x01,0x02,0x09,0x05,0x01, 0xe5,0x01,0x43,0x02,0x01,0x2f,0x06,0x0b,0x02,0x01,0x07,0x10,0x0c,0x0b,0x0e,0x02, 0x06,0xe5,0x11,0x02,0x01,0x0b,0x02,0x02,0xe5,0x46,0x0d,0x13,0x19,0x02,0x02,0x05, 0xe5,0x09,0x09,0x01,0x05,0x03,0x09,0x06,0xe5,0x10,0x06,0x03,0x09,0x08,0x03,0x02, 0xe5,0x01,0x08,0xe5,0xe5,0x01,0x4a,0x05,0x03,0x01,0x02,0x0e,0x04,0x0e,0x07,0x03, 0x03,0xe5,0x03,0x05,0x03,0x05,0x01,0x01,0x05,0x03,0x04,0xe5,0x02,0x03,0x03,0x01, 0x01,0x02,0x0a,0x04,0x01,0xe7,0x04,0x01,0xe5,0x0b,0x02,0x01,0x01,0x04,0x03,0xe8, 0x42,0x04,0x01,0x09,0x13,0x05,0x1e,0xe7,0xe5,0x03,0x01,0xe5,0x08,0x01,0x0f,0x03, 0x07,0x0e,0x01,0x01,0x05,0x06,0x13,0x0d,0x03,0xe5,0x48,0x01,0x07,0x3a,0x02,0x01, 0x04,0x02,0x09,0x01,0x11,0x0b,0x0d,0xe5,0x01,0x01,0x1b,0x01,0x0e,0xe8,0x08,0x02, 0x6a,0xe5,0x07,0x0d,0x07,0xe6,0x10,0x02,0x06,0x09,0x1d,0x02,0x06,0x1e,0xe5,0xe6, 0x09,0xe5,0x75,0xe6,0x12,0x09,0x09,0x0b,0x07,0x01,0x11,0x09,0x06,0x04,0x11,0x09, 0x02,0xe6,0x0d,0x09,0x09,0x09,0x09,0x09,0x09,0x02,0x06,0x09,0x09,0x02,0x03,0x02, 0x09,0x0b,0x09,0x03,0x02,0x02,0x03,0xe5,0xe5,0x01,0x05,0x03,0x06,0x02,0x03,0x05, 0x02,0x01,0x01,0x02,0x03,0x02,0x02,0x05,0x03,0x04,0x01,0x02,0x06,0x02,0x03,0x09, 0x02,0x01,0x02,0x6a,0x02,0x1f,0x08,0x0a,0x10,0x02,0x13,0x08,0xe5,0x1b,0xe5,0x0d, 0x02,0x03,0x6e,0x1e,0x14,0x12,0x13,0x0b,0x1d,0x13,0x02,0x21,0x01,0x2e,0x02,0x11, 0x13,0x0a,0x0a,0x01,0x07,0x01,0x07,0x01,0x06,0x02,0x09,0x07,0x08,0xe5,0xe5,0x06, 0x01,0x06,0x0c,0x07,0x01,0x04,0x09,0xe8,0x20,0x01,0xe5,0x0c,0x22,0xe5,0x0f,0x02, 0x09,0x06,0x02,0x08,0x02,0x06,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x07,0xe5, 0x05,0x03,0x05,0x02,0x03,0x02,0x01,0xe5,0x05,0x01,0x01,0x02,0x02,0x01,0xe6,0xe5, 0x01,0xe5,0x01,0x06,0x01,0x01,0x05,0xe5,0x02,0x02,0xe5,0xe5,0x1f,0xe5,0xe5,0x30, 0x11,0x29,0x01,0x07,0x01,0x06,0xe5,0xe5,0x0d,0x02,0x01,0x06,0xe5,0x08,0x01,0x04, 0x02,0x01,0x13,0x09,0x03,0x0a,0x02,0xe5,0x14,0xe5,0x07,0xe6,0xe5,0x04,0xe5,0x07, 0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x01,0x05,0xe5,0x07,0xe6,0x06,0xe5,0x07,0xe5, 0x06,0xe6,0xe6,0x04,0xe5,0xe6,0xe5,0x02,0xe5,0xe6,0xe5,0x02,0xe5,0xe6,0x04,0xe5, 0xe5,0x05,0xe5,0xe6,0xe5,0x05,0xe5,0x04,0x02,0xe5,0xe5,0x04,0x01,0x01,0x05,0xe5, 0x07,0xe6,0x06,0xe6,0x06,0xe7,0x02,0x02,0x01,0xe5,0x7a,0x06,0x07,0x03,0x02,0x01, 0x03,0x03,0x01,0xe5,0x05,0x01,0xe5,0x02,0x04,0x04,0x04,0x07,0x09,0x01,0x05,0x01, 0x01,0x09,0x07,0x0b,0x0e,0x01,0x01,0x7b,0x08,0x01,0xe5,0x04,0x02,0xe5,0xe5,0x05, 0xe5,0xe6,0x02,0x01,0xe5,0xe5,0x02,0x02,0x02,0x03,0x02,0xe5,0x07,0xe5,0x08,0x01, 0x06,0xe5,0xe5,0x08,0x09,0x07,0x10,0xe5,0xe6,0x70,0x12,0x05,0x04,0xe5,0x01,0x04, 0xe6,0x01,0x03,0x01,0xe6,0xe6,0x03,0xe5,0x07,0xe5,0x01,0x05,0xe6,0x04,0x01,0x03, 0x05,0x15,0x0b,0x0e,0x01,0xe6,0x37,0x4e,0x02,0x02,0xe5,0x01,0x01,0x04,0x02,0x01, 0xe5,0x02,0x02,0x01,0xe5,0x01,0x06,0x02,0x03,0x04,0xe5,0x02,0x05,0x03,0x01,0xe5, 0x05,0x04,0x09,0x06,0x07,0x04,0x0b,0xe6,0xe5,0x66,0x13,0x02,0x07,0x14,0xe5,0xe5, 0x37,0xe5,0xe5,0x08,0x10,0x02,0x0d,0xe8,0x10,0x01,0x07,0x01,0x07,0x01,0x07,0x01, 0x01,0x05,0x01,0x07,0x01,0x01,0x05,0x01,0x07,0x01,0x01,0x05,0x01,0x04,0x02,0x01, 0x07,0x01,0x01,0x02,0x02,0x01,0xe5,0x07,0x01,0xe5,0xe5,0x03,0x01,0x04,0x02,0x01, 0x07,0x01,0x01,0x05,0x01,0xe6,0x04,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x01,0x05, 0xe6,0xe5,0x04,0x01,0x01,0x05,0x01,0x02,0x08,0xe5,0xe5,0x1f,0x20,0x0d,0x0b,0x1b, 0x03,0x05,0x07,0xe5,0x01,0x13,0x05,0x0a,0x08,0xe6,0x0d,0x05,0x05,0x02,0x05,0x03, 0x0f,0xe5,0x01,0x0d,0xe8,0x09,0x12,0x1f,0x1d,0x08,0x04,0x04,0x05,0x09,0x08,0x16, 0x0f,0x25,0x2f,0xe5,0xe8,0x3e,0x0c,0x03,0xe5,0x34,0x0d,0x0f,0x05,0x06,0x03,0xe6, 0xe5,0x0b,0x12,0x0d,0x1b,0xe5,0x02,0xe5,0x11,0xe5,0x0b,0x01,0x2f,0xe6,0x1a,0x0c, 0x12,0x01,0x01,0x02,0x02,0x09,0x0d,0x07,0x01,0x02,0x09,0x02,0x01,0x04,0x08,0x05, 0x07,0xe6,0x08,0x05,0x07,0x02,0x01,0x2f,0x01,0x34,0x0c,0x06,0x08,0x01,0xe5,0x0c, 0x08,0x09,0x0c,0x14,0x05,0x0f,0x01,0x08,0x01,0x0d,0x09,0x01,0x03,0x2f,0x23,0x15, 0x09,0x02,0x01,0x0c,0xe5,0x05,0x03,0xe6,0x0d,0x16,0x11,0x01,0x0a,0xe5,0x0b,0x0a, 0x03,0x05,0x0a,0xe5,0xe6,0x0f,0x02,0x08,0x13,0x31,0x04,0x02,0x0d,0x05,0x05,0xe5, 0x0d,0x06,0x02,0x13,0xe5,0x07,0x13,0x06,0x01,0x02,0x0d,0x08,0xe5,0x05,0x0b,0x02, 0x0a,0xe5,0x2d,0x2c,0x02,0x09,0x18,0xe5,0x08,0x02,0x0d,0x22,0x08,0x30,0x01,0x01, 0x1d,0x04,0x04,0x35,0x01,0x0b,0x08,0x03,0x08,0x02,0xe5,0x04,0xe6,0x01,0x04,0xe5, 0x02,0xe7,0x01,0x03,0xe5,0x07,0xe7,0x03,0x01,0x09,0x44,0xe5,0xe5,0x21,0x48,0x19, 0x17,0x07,0x0b,0x0e,0x41,0x03,0xe5,0x01,0x02,0x6b,0x05,0x0d,0x04,0x01,0x04,0x09, 0x09,0x0e,0x0a,0x23,0x17,0x11,0x6c,0x01,0x04,0x1a,0x1d,0x04,0x1a,0x39,0xe6,0xe5, 0x04,0x6d,0x12,0x25,0x02,0x01,0x0d,0x2a,0x1c,0x01,0xe7,0x07,0x01,0x05,0x01,0xe5, 0x04,0xe5,0xe5,0xe5,0x03,0xe5,0xe5,0xe5,0x02,0xe5,0x44,0x10,0x02,0x18,0xe5,0x03, 0xe5,0xe6,0x04,0xe5,0x01,0xe5,0x03,0xe5,0x0b,0x01,0x11,0xe5,0x05,0x01,0x01,0x04, 0xe5,0x08,0x09,0x01,0xe6,0xe5,0x01,0x01,0x04,0xe5,0x11,0xe5,0x04,0x02,0xe5,0x4b, 0x02,0x01,0xe5,0x03,0x06,0xe5,0x07,0x09,0x06,0xe5,0xe5,0x01,0x06,0xe5,0x07,0x09, 0x05,0xe5,0x01,0x1a,0xe5,0x16,0x01,0x02,0x01,0xe6,0x03,0x16,0x13,0x15,0xe5,0x01, 0x01,0x25,0x06,0x01,0x02,0x0b,0x1d,0x02,0x06,0x01,0x07,0x01,0x02,0xe5,0x02,0x04, 0xe5,0x02,0x04,0x01,0x18,0x11,0x04,0xe6,0xe5,0x01,0x43,0xe5,0xe5,0xe5,0x21,0xe5, 0xe5,0x13,0xe5,0x08,0x09,0x06,0xe5,0x0a,0x06,0x09,0xe5,0x07,0xe5,0x07,0xe5,0x25, 0x06,0x04,0xe6,0x11,0x09,0x09,0x3b,0x04,0x08,0xe5,0xe5,0x0c,0x02,0x04,0x09,0x03, 0x02,0xe5,0x0e,0x02,0x02,0x05,0xe5,0x05,0x02,0x06,0x02,0x06,0x02,0xe5,0xe5,0x02, 0x02,0x05,0x09,0x0c,0x07,0x02,0xe6,0x01,0x4a,0x1b,0x01,0x02,0xe7,0xe5,0x06,0x03, 0x02,0x01,0x02,0x03,0x01,0x02,0x04,0x02,0x01,0x04,0x02,0x01,0xe5,0x02,0x02,0x01, 0xe5,0x02,0x03,0xe6,0x02,0x02,0x01,0xe5,0x02,0x02,0x01,0xe5,0x02,0x02,0x01,0xe5, 0xe5,0xe5,0x01,0x01,0x1f,0x02,0x02,0x01,0xe7,0x12,0x09,0x06,0x0a,0x18,0x01,0x15, 0x02,0x01,0xe5,0x08,0x0b,0x0b,0x09,0x07,0x0b,0x09,0x05,0x03,0x09,0x09,0x07,0x01, 0x0e,0x01,0x04,0x02,0x01,0x10,0xe5,0x01,0xe5,0x10,0xe8,0x04,0xe5,0xe6,0x05,0x22, 0x01,0x16,0x01,0x04,0x07,0x04,0x10,0x27,0x29,0x0b,0xe5,0xe5,0x06,0x01,0x15,0xe6, 0x08,0x02,0x03,0x09,0x04,0x04,0x3b,0x04,0x07,0x02,0x10,0x02,0x08,0xe5,0x07,0x02, 0x06,0x02,0x06,0x02,0x06,0xe5,0x07,0xe5,0x11,0xe5,0x0e,0x02,0x01,0x04,0x04,0x07, 0x02,0x06,0xe5,0x02,0x0a,0x05,0xe5,0x07,0xe5,0x07,0xe5,0x39,0xe5,0x0b,0x06,0x2c, 0x09,0x13,0xe5,0x1b,0x05,0xe5,0x01,0xe6,0x02,0xe5,0x0b,0x09,0x01,0x01,0xe6,0x06, 0x05,0xe5,0x07,0xe5,0x07,0xe6,0x06,0x09,0x09,0x09,0x09,0x09,0xe6,0x06,0x02,0xe5, 0x01,0x02,0x09,0x04,0x01,0x04,0x09,0x09,0x02,0x02,0x03,0x04,0xe6,0x01,0x06,0x02, 0x09,0x02,0x06,0x02,0x01,0x01,0x02,0x03,0x05,0xe5,0x07,0xe6,0x06,0x05,0x07,0x02, 0x01,0x02,0x6c,0xe5,0x08,0x15,0x09,0x09,0x12,0x0a,0x09,0x27,0x0e,0x05,0xe5,0x10, 0x09,0x53,0x08,0x14,0x09,0x09,0x1d,0x09,0x1b,0x0d,0x12,0xe8,0x0f,0x01,0x07,0xe6, 0x06,0x01,0x39,0x01,0x10,0x02,0x03,0xe5,0x09,0x0a,0x08,0xe5,0x08,0x09,0x01,0x07, 0x09,0x09,0x13,0x01,0xe5,0xe5,0x03,0x01,0xe5,0xe5,0x16,0x01,0x11,0x09,0x09,0x3b, 0x08,0x06,0x03,0x01,0x05,0xe5,0x0a,0x08,0xe5,0x07,0xe5,0x07,0xe5,0x05,0x01,0xe5, 0x07,0xe5,0x07,0xe5,0x04,0x02,0xe5,0x09,0x05,0x01,0xe6,0x04,0x01,0xe6,0x04,0x01, 0x01,0x09,0x02,0xe9,0x19,0x47,0x02,0x0c,0x09,0x15,0x09,0x04,0x04,0x06,0xe5,0xe5, 0x1c,0x09,0x03,0x0d,0x01,0x09,0x03,0x0b,0xe5,0xe5,0x12,0xe7,0x05,0xe7,0x05,0xe7, 0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x05,0x01,0xe5,0x07,0xe6,0x06, 0xe6,0x04,0xe7,0xe5,0x06,0xe7,0x04,0xe8,0x04,0xe8,0x05,0xe7,0xe5,0x02,0xe7,0x05, 0x01,0xe5,0x05,0x01,0xe6,0x04,0xe8,0x06,0xe5,0x05,0xe5,0xe6,0xe5,0x02,0xe5,0xe5, 0x01,0x05,0xe5,0x06,0x01,0x01,0x1a,0x45,0x06,0x01,0x09,0x0e,0x01,0x09,0x01,0x06, 0x02,0x03,0x03,0x01,0x07,0x01,0xe5,0xe6,0x04,0x02,0x03,0x02,0x02,0x03,0x09,0xe5, 0xe5,0x09,0x01,0x0a,0xe5,0x06,0x0c,0x02,0x02,0x13,0x02,0x06,0x02,0x06,0x02,0x35, 0x02,0x03,0x01,0x09,0x02,0x0a,0x02,0x01,0x08,0xe5,0x07,0xe5,0x02,0x02,0x01,0xe5, 0x07,0xe6,0xe6,0x02,0xe7,0x06,0xe6,0x06,0x09,0xe5,0x09,0x01,0x03,0x02,0x06,0x02, 0x02,0x02,0x04,0xe5,0x03,0xe5,0xe5,0xe5,0x10,0x02,0xe5,0x03,0xe5,0x01,0xe5,0x04, 0x02,0xe5,0x36,0x03,0x0d,0x0f,0x03,0x07,0x08,0xe5,0x08,0x09,0x03,0x03,0x01,0x09, 0x09,0x09,0x03,0x01,0x09,0x02,0xe6,0x03,0x02,0xe6,0x01,0x07,0x06,0xe6,0xe5,0x12, 0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x37,0x01,0x0d,0x0a,0x02,0x01,0x09,0x09, 0xe5,0x04,0x02,0x06,0x02,0xe5,0x01,0x01,0xe5,0x02,0x08,0xe5,0x0e,0x02,0x03,0x0f, 0x09,0x05,0x08,0x03,0x01,0xe5,0x01,0x6b,0x05,0x09,0x02,0x01,0x10,0xe5,0xe5,0x05, 0xe5,0xe5,0x05,0xe5,0xe5,0xe5,0x11,0x05,0xe5,0xe6,0x04,0xe5,0xe5,0x05,0x02,0x03, 0xe5,0x26,0x02,0x11,0x01,0xe6,0x04,0x01,0xe6,0x04,0x01,0x01,0x05,0x01,0x06,0xe5, 0xe5,0x06,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x01,0x05,0xe6,0x06,0x01, 0xe5,0x02,0x04,0x01,0x03,0x03,0x01,0x02,0x01,0x02,0x01,0xe5,0x02,0x02,0x01,0x07, 0x01,0xe5,0x05,0x01,0x03,0x03,0x01,0x03,0x03,0x01,0x04,0x02,0x01,0x07,0x01,0x07, 0x01,0x07,0x01,0x0a,0x01,0x01,0x0b,0xe5,0x02,0x04,0x03,0xe5,0x07,0xe5,0x29,0x0f, 0xe5,0x03,0x0c,0x02,0x05,0xe5,0x36,0x03,0x06,0x17,0x04,0x06,0x01,0x07,0x14,0x03, 0xe5,0x01,0xe5,0x03,0x08,0x13,0x0f,0x09,0x0a,0x16,0x07,0x09,0x08,0x0a,0x0b,0x04, 0xe6,0xe5,0x08,0x09,0x19,0xe5,0x01,0x0a,0x0c,0x05,0x05,0x04,0x03,0x0a,0x01,0x01, 0x02,0x13,0x02,0xe5,0xe5,0x07,0x3b,0x19,0x01,0x09,0x05,0x0d,0x05,0x02,0x07,0x01, 0xe5,0x06,0x12,0x09,0x1a,0x04,0x0a,0x09,0xe9,0x18,0x45,0x06,0x01,0xe5,0x05,0x02, 0x06,0x04,0xe5,0x03,0xe6,0x04,0x01,0x02,0x07,0x06,0x02,0x0b,0x04,0x04,0xe5,0x02, 0x02,0x01,0x04,0x02,0x06,0x07,0x02,0x0b,0x01,0x07,0x01,0x0c,0x02,0xe5,0xe5,0x0a, 0x1c,0x3a,0x01,0x02,0x01,0x09,0x04,0x04,0x02,0x01,0x01,0x07,0x03,0x0c,0x05,0x03, 0x06,0x15,0x0a,0x1c,0x01,0x07,0x0e,0xe6,0xe6,0x21,0x0d,0x3c,0xe5,0x02,0x03,0x0d, 0x03,0x09,0x02,0x05,0xe5,0xe5,0x02,0x07,0x09,0x03,0x05,0x01,0x01,0x01,0x02,0x02, 0x06,0x0a,0x05,0x07,0x09,0x04,0x0c,0xe7,0x11,0x02,0x05,0xe5,0x1d,0x0d,0x04,0x09, 0x09,0x09,0x07,0x10,0x01,0x04,0x05,0x08,0x10,0x16,0xe6,0x0a,0x08,0x06,0xe5,0x07, 0x05,0x03,0x11,0xe7,0x23,0x26,0x18,0x09,0x05,0x03,0x0d,0x0f,0x06,0x02,0x02,0x1c, 0x11,0xe5,0x0e,0x04,0x04,0x04,0x02,0x10,0xe5,0xe6,0x09,0x05,0x09,0x09,0x02,0x53, 0x01,0x07,0x01,0xe5,0x01,0x03,0x01,0xe6,0x03,0x10,0x08,0x06,0xe5,0x0d,0x0e,0x0b, 0x05,0x03,0x09,0x0d,0xe5,0x01,0x46,0x39,0x04,0x04,0x08,0x22,0x02,0x01,0x01,0x1b, 0x0b,0x21,0xe5,0xe5,0x08,0x05,0x01,0x07,0x01,0x09,0x0d,0x3a,0x05,0x03,0x01,0x0c, 0xe6,0x08,0x09,0x04,0x01,0x09,0x31,0xe5,0xe6,0x09,0x03,0x04,0x05,0x07,0x02,0xe5, 0x0f,0xe5,0x0d,0x02,0x27,0x2d,0x08,0x0a,0x03,0x01,0x01,0x03,0x03,0x03,0x08,0x12, 0x04,0x19,0x06,0x09,0x06,0x10,0xe6,0xe6,0x0b,0xe5,0x08,0x03,0x06,0x02,0x02,0x21, 0x16,0x0b,0x14,0x0b,0x02,0x05,0x02,0xe6,0x33,0x11,0x04,0x03,0x02,0x02,0x13,0x01, 0x01,0x01,0x01,0x5d,0xe5,0x06,0x10,0x02,0xe5,0x05,0x01,0x0b,0x09,0x09,0xe5,0x11, 0xe5,0x04,0x02,0xe5,0x12,0x02,0xe5,0x03,0xe5,0x1d,0xe5,0x01,0x01,0x02,0x03,0x1f, 0x33,0x01,0x04,0x05,0xe5,0x02,0x10,0xe5,0x11,0x04,0x04,0x04,0x01,0x01,0x01,0xe5, 0x07,0xe5,0x07,0xe6,0x05,0xe6,0xe5,0x05,0xe5,0x0a,0x23,0x01,0x01,0x02,0xe5,0xe6, 0x03,0x01,0x02,0xe5,0x01,0x06,0x01,0x02,0x04,0x01,0xe5,0x23,0x01,0x05,0x1a,0x07, 0x09,0x04,0x05,0x05,0x0b,0x01,0xe5,0xe5,0x08,0x01,0x07,0x09,0x07,0x01,0x09,0x15, 0x02,0xe5,0x02,0x04,0xe5,0x0a,0x01,0x05,0x02,0xe5,0xe5,0x05,0xe5,0x01,0x09,0x2e, 0x20,0x06,0x02,0x0b,0x13,0x06,0x02,0x01,0x09,0x09,0x09,0x07,0x01,0x09,0x0d,0xe5, 0x11,0xe5,0x0f,0x02,0x01,0xe7,0xe5,0x0c,0x02,0x02,0x03,0x03,0x01,0x03,0x05,0x1c, 0x1a,0x06,0x05,0xe6,0xe5,0x06,0x08,0x02,0x01,0x07,0x06,0xe6,0x05,0x02,0x02,0x09, 0x09,0x13,0x0c,0x06,0xe5,0x02,0x01,0x03,0x02,0x02,0x11,0xe9,0x03,0x0a,0x05,0xe5, 0x01,0x06,0x02,0x03,0xe6,0x21,0x19,0x03,0x03,0x01,0x01,0x09,0x01,0x03,0x03,0x01, 0x01,0x09,0x04,0x04,0x04,0x02,0x01,0x07,0x01,0x07,0x01,0x09,0x07,0x01,0x12,0x05, 0x03,0x05,0x0b,0x02,0x02,0xe5,0xe6,0x0e,0x05,0x0b,0x27,0x17,0x07,0x07,0x05,0xe5, 0x01,0x0b,0x01,0x07,0x06,0xe5,0xe5,0xe5,0x03,0xe5,0x02,0x04,0x04,0x04,0x01,0x02, 0x04,0x0e,0x0c,0x04,0x01,0x05,0x01,0x01,0x05,0x03,0x08,0x04,0xe6,0x01,0xe5,0x0b, 0x02,0x06,0x02,0x06,0x02,0x24,0x18,0xe6,0x01,0x05,0xe5,0x01,0x04,0x06,0x09,0x01, 0x0e,0xe7,0xe5,0x03,0xe6,0x01,0x05,0x03,0x04,0x04,0x05,0x0d,0x0b,0xe5,0xe5,0x01, 0x02,0x03,0x01,0xe5,0x01,0x1a,0xe5,0x08,0x02,0x03,0x02,0xe5,0x04,0x09,0x02,0x1d, 0x1a,0x2c,0x27,0xe6,0x10,0x02,0x06,0x02,0x03,0x04,0x04,0x02,0xe5,0x04,0x0c,0x02, 0x07,0xe5,0x01,0x14,0x13,0x01,0x1b,0xe6,0x16,0xe5,0x12,0x18,0x01,0x11,0x27,0x0f, 0xe5,0x1c,0x02,0x06,0x02,0xe6,0xe6,0x0a,0x02,0xe5,0x01,0x02,0x02,0xe5,0xe5,0x02, 0x02,0xe5,0x01,0x05,0x09,0x09,0x09,0x09,0x09,0xe6,0x06,0x06,0x02,0x09,0x06,0x04, 0x03,0x02,0x02,0x09,0x03,0x05,0x09,0x02,0x06,0x09,0x03,0x05,0x09,0xe5,0x04,0x02, 0xe5,0xe5,0x02,0x02,0xe5,0xe5,0x05,0x05,0x07,0x03,0xe5,0x01,0x06,0x02,0x06,0x09, 0x2a,0x24,0x02,0x12,0xe5,0x07,0x02,0x4c,0x0b,0xe5,0x08,0x0e,0x02,0x03,0x0b,0x39, 0x28,0x14,0x0b,0x4b,0x0b,0x09,0x16,0xe6,0x0c,0x02,0x09,0x09,0x19,0xe5,0xe5,0x06, 0x01,0x14,0xe6,0x0a,0x04,0x02,0x03,0x0b,0xe5,0xe5,0x2d,0x1e,0x01,0xe7,0xe5,0x04, 0x09,0x15,0x01,0x02,0xe5,0x0e,0x2f,0x01,0xe5,0x05,0x03,0x13,0x0f,0x03,0x01,0x02, 0x03,0x05,0x02,0x01,0xe5,0x05,0x02,0x0a,0x04,0x03,0x05,0x03,0x09,0x05,0x03,0x09, 0x06,0x01,0xe6,0x04,0x0b,0x07,0x01,0x01,0x01,0x0d,0xe5,0x0c,0x09,0x01,0x2c,0x02, 0x01,0x0e,0x18,0x0d,0x05,0x01,0x06,0x07,0x12,0x03,0x01,0x11,0x01,0x07,0x01,0x07, 0x01,0xe5,0x1b,0x0e,0x01,0x01,0x15,0xe6,0x06,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5, 0x07,0xe6,0x06,0xe5,0x07,0xe5,0x02,0x04,0xe5,0x07,0xe6,0x06,0xe5,0x06,0xe6,0xe6, 0xe5,0x03,0x01,0xe5,0x04,0xe5,0xe5,0x06,0xe6,0x05,0xe8,0xe5,0x02,0xe7,0x05,0xe8, 0xe5,0x02,0x01,0xe6,0xe5,0x02,0xe8,0xe5,0x02,0x01,0xe6,0x06,0xe5,0x01,0x05,0xe5, 0x01,0x04,0xe6,0x07,0x02,0x23,0x1b,0x01,0x1e,0xe5,0x07,0x10,0x01,0x04,0x04,0x01, 0xe5,0x07,0xe5,0x0f,0x01,0xe6,0xe5,0x05,0xe5,0xe5,0x02,0x01,0xe6,0xe5,0x08,0x09, 0xe5,0x04,0xe5,0x04,0x02,0xe5,0x04,0x12,0x07,0x02,0xe5,0x0e,0x13,0x1b,0x01,0x34, 0x04,0x01,0x06,0x01,0xe5,0xe6,0x02,0x01,0x03,0x02,0x02,0x05,0x03,0xe5,0xe5,0x03, 0x01,0x07,0x01,0xe5,0xe5,0x03,0x01,0x13,0x06,0x01,0x01,0x0b,0x07,0x01,0x02,0x01, 0x02,0xe5,0x03,0xe9,0x08,0x0f,0x09,0x46,0x10,0x01,0x06,0x04,0x05,0xe5,0x01,0x04, 0xe6,0x01,0x04,0xe5,0xe5,0xe5,0x04,0x01,0x01,0x05,0x01,0x01,0x05,0x01,0x01,0x05, 0x01,0x01,0x05,0x21,0x07,0x06,0x03,0x21,0x2c,0x13,0x07,0x10,0x01,0x09,0x01,0xe5, 0x01,0xe5,0x04,0x02,0x05,0x03,0x01,0xe6,0xe6,0x01,0xe8,0xe6,0x02,0xe7,0xe6,0x01, 0xe8,0xe5,0x04,0x01,0x01,0x03,0x01,0xe5,0x04,0xe5,0x05,0x12,0x04,0x03,0x03,0x05, 0x0d,0x01,0xe5,0x05,0x01,0xe5,0x05,0x02,0x24,0x04,0x22,0x14,0x0c,0xe5,0x05,0xe5, 0xe5,0x05,0x0c,0x06,0x01,0xe5,0x0f,0x07,0x0b,0x09,0x16,0x03,0x01,0x01,0x0e,0x02, 0x01,0x02,0x01,0x02,0x01,0x03,0x03,0x01,0x07,0x01,0x01,0x05,0x01,0x02,0x04,0x01, 0x07,0x01,0x07,0x01,0xe5,0x05,0x01,0x01,0x05,0x01,0x03,0x03,0x01,0x01,0x05,0x01, 0xe5,0x06,0xe5,0xe5,0xe5,0x04,0x01,0x04,0x02,0x01,0x07,0x01,0x07,0x01,0x04,0x02, 0x01,0xe5,0x05,0x01,0x01,0x02,0x02,0x01,0x01,0x05,0x01,0x01,0x05,0x01,0xe6,0x04, 0x01,0xe6,0x04,0x01,0x0a,0x03,0x13,0x0c,0xe5,0x27,0x02,0x11,0xe5,0x08,0x2b,0x0a, 0xe5,0x02,0x04,0x02,0xe5,0x04,0x09,0x05,0x04,0xe5,0x0c,0xe6,0x05,0x09,0x04,0x14, 0x02,0xe6,0x09,0x09,0x30,0x15,0x01,0x09,0x06,0x0a,0x08,0x07,0x04,0x09,0x06,0x02, 0x06,0x09,0x02,0x06,0x02,0x06,0x02,0x0b,0xe5,0xe5,0x1f,0x08,0xe5,0x02,0x02,0x19, 0x42,0x0b,0x02,0x09,0x07,0x09,0x02,0x08,0x09,0x09,0x09,0x0f,0x0f,0x11,0x20,0x01, 0xe6,0x15,0x01,0x07,0xe5,0x04,0x06,0x19,0x01,0x0b,0x1d,0x05,0x01,0x04,0x04,0x01, 0x07,0x0b,0xe5,0x05,0xe5,0xe5,0x08,0x06,0x01,0xe5,0x02,0x05,0x10,0xe6,0x06,0x01, 0x07,0x01,0x04,0x15,0xe6,0x0b,0x0a,0x02,0x18,0x18,0x18,0x0a,0x07,0xe5,0xe5,0x01, 0x01,0x02,0x01,0x05,0x01,0x01,0x03,0x07,0x10,0x08,0x04,0x01,0x0e,0x0b,0x06,0x05, 0x09,0x04,0x08,0x0a,0x01,0xe6,0x2b,0x03,0x05,0x11,0x06,0x18,0xe5,0x07,0x0d,0x03, 0x11,0x06,0x0c,0x0a,0x08,0x09,0x09,0x0d,0x09,0x0b,0x02,0x0b,0x03,0x2b,0x01,0x2a, 0x0d,0xe5,0xe5,0x09,0x09,0x06,0x0e,0x0d,0x11,0x07,0x13,0x0c,0x06,0x05,0x1c,0x02, 0x0b,0x22,0x09,0x14,0x1c,0x02,0xe5,0x04,0x02,0x0a,0x01,0xe5,0x19,0x13,0x27,0x03, 0x05,0x11,0x05,0x07,0x03,0x09,0x01,0x11,0x3d,0x1c,0x0d,0x03,0x02,0x02,0xe6,0x04, 0x02,0xe6,0x06,0xe5,0x02,0x01,0x0c,0xe5,0x02,0x02,0x01,0xe5,0x03,0x03,0xe5,0xe6, 0x1e,0x01,0x12,0x03,0x02,0xe5,0x18,0x44,0x17,0x08,0x01,0xe5,0x09,0xe5,0x02,0x06, 0x07,0xe5,0x11,0xe5,0x07,0xe5,0x07,0xe5,0x04,0x1d,0x0f,0x03,0xe6,0xe5,0x11,0x0d, 0x0c,0x0a,0x12,0x28,0xe5,0x06,0x03,0xe5,0x08,0xe5,0x08,0x05,0x15,0x02,0x22,0xe5, 0x02,0x09,0x0f,0x0d,0xe6,0xe5,0x27,0x02,0x01,0x2c,0x0c,0x01,0x02,0x01,0x0a,0x01, 0xe6,0x01,0x0a,0x11,0x02,0xe5,0x0a,0x03,0x01,0xe5,0x23,0x22,0x08,0x03,0xe5,0x04, 0x25,0x1d,0x0f,0x07,0x09,0x1a,0x14,0x01,0x01,0x11,0x25,0xe5,0x04,0x27,0x02,0xe7, 0x0e,0xe5,0x07,0x09,0x1c,0x01,0x15,0x01,0xe5,0x02,0xe5,0x06,0x1b,0x04,0xe7,0x05, 0x01,0xe5,0x05,0x05,0x03,0x02,0x01,0x0e,0x09,0x02,0x01,0x04,0x08,0xe5,0x01,0xe5, 0x04,0xe5,0x07,0x05,0x0e,0xe7,0x01,0x02,0x01,0x05,0x01,0x04,0x02,0x01,0x04,0x02, 0x01,0x04,0x23,0x09,0xe5,0x03,0xe5,0x01,0x03,0x05,0x0f,0xe5,0x07,0xe5,0x03,0xe5, 0x01,0x15,0xe5,0x01,0x02,0x03,0xe5,0xe5,0x0b,0x09,0x02,0xe5,0x04,0x0a,0x01,0xe5, 0x01,0xe5,0x01,0xe5,0xe5,0xe5,0x03,0x05,0x0b,0x01,0x03,0x02,0x04,0x26,0x29,0x07, 0x06,0x01,0x07,0xe5,0xe5,0x08,0x01,0x0b,0x07,0x01,0x01,0x05,0x01,0x01,0x05,0x0c, 0x20,0xe5,0x03,0x09,0x11,0x0b,0x05,0x02,0x02,0x24,0xe5,0x01,0x1d,0x09,0x09,0x09, 0x06,0xe5,0x06,0xe5,0x01,0x01,0x08,0xe5,0x04,0xe5,0x01,0x01,0x04,0xe5,0xe5,0xe5, 0x06,0x09,0xe5,0xe5,0x23,0x09,0x13,0x0d,0x04,0x01,0x12,0x09,0x09,0x07,0x02,0x02, 0xe5,0x17,0xe5,0x01,0x02,0x06,0xe5,0x03,0x07,0x03,0x01,0xe5,0xe5,0x01,0x01,0x01, 0x05,0x08,0x02,0x09,0x09,0x04,0x04,0x07,0x0b,0x09,0x09,0x07,0x08,0x02,0x03,0x05, 0x06,0x02,0x0b,0xe5,0xe6,0x01,0x28,0x02,0x08,0x04,0x01,0x0a,0x04,0x01,0x01,0xe5, 0x02,0x03,0xe6,0x02,0x08,0x08,0x01,0xe5,0x02,0x04,0x07,0x03,0x09,0x09,0x05,0x03, 0x05,0x02,0xe5,0xe5,0x01,0x21,0x05,0xe5,0x02,0x03,0xe5,0x0b,0x02,0x08,0x02,0x01, 0xe7,0x11,0xe5,0x06,0x08,0x0f,0x10,0x01,0x0c,0x05,0x01,0x03,0x02,0x01,0x02,0x04, 0x04,0x05,0x01,0x01,0x0b,0x04,0x01,0xe5,0xe5,0x06,0x01,0x02,0x04,0x18,0x0b,0x01, 0x07,0x01,0xe5,0x06,0x03,0x05,0x05,0x07,0x0d,0xe5,0x01,0xe5,0x10,0xe5,0xe5,0x05, 0xe5,0x08,0xe6,0x01,0x02,0x08,0x0d,0xe5,0x01,0x09,0x09,0x04,0x01,0x01,0xe5,0x01, 0x03,0xe6,0x01,0x07,0x01,0x01,0xe5,0xe5,0x03,0x03,0xe5,0xe5,0xe6,0xe5,0xe6,0x03, 0x01,0x01,0xe5,0x03,0x03,0xe5,0x03,0x02,0x0a,0xe5,0x07,0xe5,0xe5,0x05,0xe5,0xe5, 0x01,0x02,0x06,0x03,0xe5,0xe5,0x06,0xe6,0x03,0x0b,0x02,0xe6,0xe5,0x04,0x0a,0x04, 0x04,0x09,0x09,0x02,0x09,0x02,0x10,0xe5,0x0e,0x09,0x02,0x10,0x04,0x06,0x02,0xe5, 0x04,0x09,0x09,0x09,0x02,0x06,0x09,0x04,0x04,0x02,0x01,0x04,0x02,0x06,0x04,0x04, 0x14,0x02,0xe6,0xe5,0x0f,0xe5,0x07,0xe5,0x07,0xe5,0x0b,0xe6,0x03,0x02,0x13,0xe5, 0x0d,0xe5,0x0b,0x01,0x07,0xe5,0x03,0xe5,0x09,0xe5,0x01,0x05,0xe5,0x07,0xe5,0x07, 0xe5,0x08,0x02,0x01,0x03,0xe5,0x07,0xe5,0x07,0xe5,0x03,0x07,0x01,0x03,0xe5,0x07, 0xe5,0x12,0x03,0xe5,0xe6,0x0d,0xe5,0x07,0xe5,0x07,0xe6,0x01,0x01,0x02,0xe5,0x07, 0x03,0x05,0x09,0x09,0x09,0xe7,0xe5,0xe5,0x01,0xe5,0x01,0x05,0x09,0xe5,0x09,0xe5, 0x03,0x03,0xe5,0x07,0xe5,0x07,0xe5,0x02,0x01,0x02,0x01,0x01,0x05,0xe5,0x07,0xe5, 0x07,0xe5,0x04,0x02,0xe5,0xe6,0x04,0xe5,0x07,0xe6,0x06,0x0d,0x03,0xe5,0x01,0x27, 0x3b,0x47,0x24,0x2f,0x02,0xe8,0x10,0x09,0x0b,0x3d,0x19,0x0b,0x09,0x09,0x09,0x03, 0x0f,0x09,0x09,0x13,0x21,0xe7,0x0f,0xe6,0x06,0xe6,0x06,0xe6,0x06,0x03,0x13,0x04, 0x14,0xe6,0x06,0x0d,0x04,0xe5,0xe5,0x04,0xe5,0xe8,0x03,0x01,0xe7,0xe5,0x03,0xe5, 0xe5,0xe5,0x04,0xe6,0x03,0x01,0xe5,0x08,0xe6,0x06,0xe6,0xe5,0x01,0x01,0xe7,0x06, 0x09,0xe6,0x06,0xe6,0xe5,0x12,0x02,0xe5,0x09,0x06,0x09,0x09,0x13,0x01,0x0a,0x04, 0x01,0x13,0x0c,0x05,0xe5,0x06,0xe6,0x06,0x01,0xe6,0x03,0xe5,0xe7,0x03,0x02,0xe6, 0x08,0x05,0x01,0x01,0x09,0x09,0x05,0x01,0xe6,0x06,0x0b,0x09,0x09,0x05,0xe5,0x02, 0x01,0xe9,0x0f,0x09,0x0b,0x0d,0x01,0x13,0x21,0x03,0x06,0x04,0x03,0x09,0x05,0x01, 0xe5,0x01,0x05,0x0d,0xe5,0x01,0x01,0x0d,0x07,0x01,0x09,0x03,0x19,0xe5,0x0a,0x0a, 0xe5,0x12,0xe7,0x05,0xe7,0x02,0x02,0xe7,0x07,0xe6,0xe5,0x04,0xe5,0x07,0xe5,0x07, 0xe5,0x07,0xe5,0x02,0x02,0x01,0xe5,0x07,0xe5,0x01,0x05,0xe5,0x01,0x03,0xe6,0x01, 0x06,0xe6,0xe5,0xe5,0x02,0xe6,0x02,0x03,0xe6,0x03,0x02,0xe6,0xe5,0xe5,0x02,0x01, 0xe5,0x02,0x02,0xe7,0x05,0x01,0xe5,0x02,0x02,0xe7,0x01,0x05,0xe5,0x02,0x02,0xe7, 0x02,0x02,0xe7,0x07,0xe7,0x02,0x05,0x24,0xe5,0x06,0x1d,0x14,0xe5,0x06,0xe5,0x07, 0x09,0x03,0x05,0x01,0x05,0x04,0x03,0xe5,0x02,0xe5,0x01,0x01,0x04,0x02,0x01,0x01, 0x01,0x06,0x03,0x05,0x03,0x06,0x01,0xe6,0x03,0x0d,0xe5,0x07,0xe5,0x04,0x01,0x0e, 0x03,0x13,0x02,0x06,0x02,0x06,0x02,0xe5,0xe5,0x1c,0x17,0x05,0x07,0x04,0x06,0x06, 0x01,0xe5,0xe5,0x03,0x01,0x06,0x02,0x02,0xe5,0x04,0x07,0x01,0xe5,0xe5,0x04,0xe5, 0x05,0x01,0xe5,0x07,0x01,0x01,0x03,0x01,0xe5,0x01,0x0d,0x02,0x06,0x02,0xe5,0xe5, 0x01,0x02,0x01,0xe5,0x03,0x04,0x11,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x0a, 0x01,0x01,0x11,0x01,0x13,0x03,0x03,0xe5,0x07,0x09,0x01,0x02,0xe5,0x06,0x01,0x01, 0x04,0xe5,0x02,0x03,0x01,0x02,0xe5,0x03,0xe5,0x02,0x03,0x01,0x01,0x01,0x04,0xe5, 0x01,0xe5,0x03,0xe5,0xe5,0xe5,0x04,0x02,0xe5,0x0e,0x02,0xe5,0x04,0x02,0xe5,0xe5, 0xe5,0x06,0x06,0xe6,0xe5,0x08,0x02,0x06,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe6, 0x08,0x01,0x2b,0x01,0x01,0x02,0x06,0x0b,0x05,0x09,0xe5,0x07,0x05,0x0e,0x02,0x01, 0x03,0xe5,0x07,0xe5,0x11,0x13,0x01,0xe5,0x05,0x01,0xe6,0xe5,0x02,0x04,0x03,0x02, 0xe8,0x0f,0x09,0x0a,0x04,0x01,0xe5,0x2d,0x09,0x03,0x06,0x01,0x01,0x02,0x05,0x0c, 0x0c,0x0b,0x02,0x01,0x04,0x02,0x24,0xe5,0xe5,0x19,0x02,0x0d,0x01,0x01,0x11,0x01, 0x02,0x04,0x01,0x02,0x04,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x07,0x01, 0x04,0x02,0x01,0xe5,0x05,0x01,0x02,0x04,0x01,0x01,0x05,0x01,0x02,0x01,0x04,0x01, 0xe5,0x05,0xe6,0xe5,0x04,0x01,0x03,0x03,0x01,0x02,0xe5,0x02,0x01,0x06,0xe7,0x06, 0x01,0x07,0x01,0x03,0x03,0x01,0x02,0x04,0x01,0x07,0x01,0xe5,0x05,0x01,0x0a,0x01, 0x01,0x02,0x0c,0xe5,0x01,0x05,0xe5,0x07,0xe6,0x05,0x0c,0x20,0x04,0xe5,0x06,0xe7, 0x0b,0x04,0xe5,0x09,0xe5,0xe6,0x04,0xe5,0x08,0x06,0x01,0xe5,0x05,0x01,0xe5,0x01, 0x06,0xe5,0x01,0x05,0x0f,0x02,0xe5,0x07,0xe6,0xe5,0xe5,0x02,0xe5,0x01,0x03,0xe5, 0x01,0x0c,0x01,0xe7,0xe5,0x04,0x35,0x27,0x01,0x04,0xe5,0x0c,0x02,0x03,0x03,0x06, 0x03,0x03,0x04,0x03,0x03,0x02,0x08,0x08,0x01,0x03,0x04,0x06,0x05,0xe5,0x0a,0x0c, 0x1a,0xe5,0x02,0x01,0x0b,0x01,0x02,0x04,0x01,0x07,0x01,0x04,0x10,0x23,0x01,0x09, 0x0c,0x05,0x01,0x03,0x01,0x01,0x01,0x07,0x02,0x01,0x04,0x03,0x05,0x02,0xe5,0xe5, 0x05,0x02,0x22,0x01,0x07,0x01,0x02,0x04,0x01,0x18,0x03,0x28,0x02,0x01,0x17,0x0a, 0x06,0x02,0x09,0x02,0x01,0x01,0x05,0xe6,0x17,0x01,0x02,0x01,0x18,0x04,0xe5,0x1d, 0x07,0xe6,0x07,0x09,0x03,0x04,0x04,0x09,0x01,0x01,0x2c,0x01,0xe5,0x01,0x0a,0x27, 0x02,0x01,0x06,0x07,0x09,0x01,0xe5,0x07,0x01,0x02,0x03,0x1f,0x06,0x0b,0x1b,0x0b, 0x12,0x01,0x01,0x13,0x07,0x01,0x09,0x04,0xe5,0x02,0x01,0x02,0xe5,0x14,0x01,0x04, 0xe5,0x14,0x01,0xe5,0x07,0x06,0x01,0x04,0xe5,0x02,0x0c,0x09,0xe5,0x0b,0x05,0x13, 0x09,0x03,0x04,0x0b,0x08,0x03,0x01,0x03,0x0c,0xe6,0x1c,0x0d,0x09,0x01,0x0e,0x0c, 0x20,0x0f,0xe5,0x0d,0x15,0x13,0x11,0x14,0x0f,0xe5,0x0f,0x02,0x1c,0xe5,0x07,0x06, 0x02,0x1e,0x05,0x0b,0x0c,0x06,0x0c,0x0c,0x10,0x01,0x11,0x08,0x37,0x10,0x03,0x0d, 0xe5,0x07,0xe5,0x35,0x24,0x03,0x02,0xe5,0x03,0x02,0xe5,0xe6,0x01,0x02,0x01,0xe6, 0x02,0x03,0xe5,0x04,0x02,0xe6,0x03,0x02,0x09,0x09,0x07,0x02,0x01,0x11,0xe5,0x02, 0x04,0xe5,0x18,0x01,0xe6,0x05,0x09,0x09,0x3e,0x08,0x12,0x05,0x02,0xe5,0x04,0x01, 0x02,0x06,0x02,0x06,0x09,0x02,0x21,0x01,0x0f,0x07,0x09,0x17,0x02,0xe5,0x0d,0x09, 0x07,0x06,0x05,0x04,0x04,0x04,0x09,0x02,0x01,0x09,0x19,0x0a,0xe5,0xe5,0x01,0x01, 0x03,0x0c,0x06,0x06,0x05,0x04,0x12,0x14,0x09,0x01,0x01,0x09,0x08,0x01,0x0f,0x01, 0xe6,0x18,0x04,0x09,0x02,0xe5,0x08,0x11,0x01,0x04,0x0f,0x0b,0x10,0x0c,0x02,0x05, 0x01,0x08,0x05,0x02,0x01,0x08,0x05,0x02,0x0a,0x02,0x1e,0x05,0x02,0x11,0x01,0x01, 0x01,0xe5,0x2c,0x04,0x02,0xe5,0x1b,0x28,0x17,0xe5,0x11,0x12,0x10,0x1b,0x17,0x05, 0xe6,0xe7,0x0e,0x09,0x02,0xe5,0x04,0xe5,0x0d,0x24,0x01,0x04,0x01,0xe5,0x01,0x17, 0x0b,0x02,0xe5,0xe5,0x02,0x02,0xe5,0x05,0x01,0x01,0x04,0x02,0xe5,0x06,0xe5,0x01, 0xe5,0x0b,0x02,0x01,0x04,0x05,0x06,0x01,0x04,0x09,0xe5,0x12,0xe5,0xe8,0x01,0x01, 0x0b,0x09,0x03,0xe5,0x04,0x0f,0xe6,0x21,0xe5,0xe6,0x01,0x05,0x09,0xe5,0x0a,0x07, 0x03,0x02,0xe5,0x01,0x02,0x02,0xe5,0x08,0x01,0xe6,0xe5,0x01,0xe5,0x0b,0x04,0x01, 0x01,0x04,0x01,0xe5,0x05,0x05,0x05,0xe5,0x04,0x09,0x12,0x01,0x02,0x01,0x01,0x04, 0x23,0x01,0x06,0xe5,0x01,0x01,0x18,0xe5,0x0c,0x02,0x04,0x06,0xe5,0x02,0x07,0x01, 0x03,0xe5,0x03,0x01,0x07,0x09,0x01,0x09,0x03,0xe5,0x01,0x01,0x04,0xe5,0x05,0x0e, 0x01,0x07,0x29,0x05,0x02,0x02,0x25,0x09,0x02,0x01,0x18,0xe5,0x0a,0x09,0x06,0xe5, 0xe5,0x08,0x01,0x09,0x01,0x07,0x06,0xe5,0xe5,0xe5,0x06,0x09,0x01,0x04,0x16,0x01, 0x07,0x19,0xe5,0x0f,0x03,0xe5,0xe5,0x05,0x0b,0x09,0x09,0x02,0x01,0x02,0x04,0xe5, 0x01,0x1b,0x02,0x05,0x02,0x02,0x03,0xe5,0x03,0x04,0x01,0x02,0x01,0x01,0x05,0x0b, 0x04,0x04,0x09,0x06,0x02,0x01,0x01,0x05,0x02,0x01,0x01,0x02,0x01,0x01,0x05,0x09, 0x04,0x0e,0x09,0x07,0x05,0x07,0xe6,0xe5,0x01,0x2b,0x03,0xe7,0x02,0x02,0x01,0x17, 0x03,0xe7,0x02,0x03,0xe6,0x02,0x05,0x01,0x02,0x08,0x06,0x04,0x05,0x03,0x09,0x05, 0x03,0x01,0x02,0x04,0x05,0x04,0x01,0x01,0x0e,0x05,0x03,0x19,0x02,0x03,0xe6,0xe5, 0x01,0x03,0xe5,0xe6,0x03,0xe5,0x0c,0x09,0x06,0x02,0x02,0x01,0x07,0x01,0x1b,0x01, 0x09,0x01,0x03,0x01,0x05,0xe5,0x01,0x01,0x07,0x01,0x01,0xe5,0x05,0x01,0x02,0x01, 0x01,0xe5,0x05,0x01,0xe5,0xe5,0x03,0x04,0x02,0x04,0x01,0x04,0x04,0x0e,0x01,0xe5, 0xe5,0x06,0x0e,0x08,0x02,0x02,0x0f,0x01,0x01,0xe5,0x05,0x0a,0xe8,0x04,0xe8,0x05, 0xe7,0xe5,0x01,0x06,0x01,0x1b,0x02,0x06,0x04,0x01,0x01,0xe5,0x02,0xe5,0x03,0x09, 0x01,0x02,0x01,0x01,0x02,0x01,0x01,0xe5,0xe5,0xe6,0x02,0xe5,0xe5,0xe6,0xe5,0xe6, 0x01,0x01,0x03,0xe5,0x03,0x01,0x01,0xe5,0x01,0x01,0x0d,0xe5,0xe5,0x01,0x01,0x01, 0xe5,0x03,0x0d,0xe5,0xe5,0xe5,0x04,0xe7,0xe5,0xe6,0x0d,0xe7,0x0f,0x09,0x09,0x09, 0x02,0x09,0x02,0x17,0x09,0x09,0x13,0x0b,0x09,0x09,0x09,0x09,0x13,0x02,0x01,0x04, 0x13,0x09,0x09,0x02,0x0b,0x02,0x10,0xe5,0x07,0xe5,0x07,0xe5,0x0b,0x01,0x07,0x1d, 0x05,0xe5,0x07,0xe5,0x11,0xe5,0x09,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5, 0x07,0x09,0xe5,0x03,0x03,0xe5,0x11,0xe5,0x07,0xe5,0x0b,0x01,0x08,0xea,0x0b,0xe5, 0x07,0xe5,0x07,0xe6,0x01,0x01,0x02,0xe5,0xe6,0x04,0x03,0x05,0x09,0x06,0x02,0xe5, 0x03,0x03,0xe6,0x06,0xe5,0x07,0x09,0xe6,0x03,0x04,0xe5,0x07,0xe5,0x07,0xe5,0x07, 0xe5,0x07,0xe5,0x04,0x02,0xe5,0x07,0xe5,0x07,0xe5,0x07,0x09,0xe5,0x07,0xe6,0x03, 0x02,0xe5,0x01,0x08,0xe8,0xe5,0x01,0x4c,0x02,0x2e,0x02,0x32,0xe5,0x1c,0x1a,0x02, 0x0e,0x04,0x01,0x11,0x09,0x35,0x19,0x17,0x07,0x09,0x09,0x09,0x09,0x01,0x11,0x09, 0x01,0x11,0x0c,0x17,0x0a,0x05,0xe6,0x06,0xe6,0x06,0xe6,0x03,0x02,0x1c,0x07,0x02, 0x09,0xe6,0x06,0xe6,0x03,0x06,0x04,0xe7,0xe5,0x02,0x02,0xe7,0xe5,0x03,0xe7,0xe5, 0x01,0x01,0xe7,0xe5,0x03,0xe5,0xe5,0x03,0x01,0xe7,0x03,0x01,0x04,0x02,0x01,0xe7, 0xe5,0x01,0x01,0xe7,0x05,0x04,0x05,0xe6,0x06,0x01,0x04,0x02,0x0d,0xe5,0xe6,0x10, 0x09,0x09,0x05,0x0d,0x11,0xe5,0x07,0x0b,0x02,0x06,0x02,0x04,0x01,0x07,0xe6,0x06, 0x02,0xe5,0x03,0x02,0xe6,0x04,0x01,0xe6,0x06,0xe6,0x04,0x01,0xe6,0x06,0xe6,0x04, 0x01,0xe6,0x03,0xe5,0xe7,0x04,0x02,0x0a,0x09,0x05,0x09,0xe5,0x02,0x02,0xe8,0x07, 0x07,0x09,0x1b,0x10,0x02,0x07,0x10,0x0c,0x08,0x02,0x01,0x09,0x01,0x07,0x03,0xe5, 0xe6,0xe6,0x07,0x06,0xe5,0xe5,0x02,0x03,0x01,0x0b,0x01,0x03,0x01,0x03,0x05,0x2c, 0xe5,0x01,0x13,0xe7,0x05,0xe7,0x02,0x02,0xe8,0x06,0xe5,0x01,0x05,0xe5,0x07,0xe5, 0x01,0x05,0xe5,0x07,0xe5,0x02,0x02,0x01,0xe5,0x02,0x04,0xe5,0x01,0x05,0xe5,0x01, 0x03,0xe7,0xe6,0x04,0xe7,0x01,0xe5,0x01,0x01,0xe6,0xe5,0x02,0xe7,0x01,0x03,0xe8, 0xe6,0x01,0xe7,0x01,0x03,0xe7,0x05,0x01,0xe6,0xe6,0x01,0xe8,0x06,0xe5,0x02,0x02, 0xe7,0x05,0xe8,0x06,0xe7,0x02,0x01,0x01,0x01,0x10,0x09,0x09,0xe5,0x24,0x14,0xe5, 0x07,0x12,0xe5,0x01,0x02,0x04,0xe5,0x01,0x02,0x02,0xe6,0x03,0x06,0x02,0x02,0x06, 0x03,0x02,0x02,0xe5,0xe5,0x05,0xe5,0xe5,0x09,0x05,0x0d,0xe5,0x07,0xe5,0x17,0x01, 0x13,0x02,0x06,0x02,0x06,0x02,0x20,0x09,0x0d,0x12,0x06,0x04,0x01,0x01,0xe5,0x05, 0xe6,0x01,0x03,0x01,0xe5,0x07,0xe5,0x01,0x04,0xe6,0x07,0xe6,0xe5,0x02,0x01,0xe6, 0xe5,0x04,0x04,0x02,0x01,0xe5,0x0f,0x02,0x06,0x02,0x0a,0xe5,0x03,0xe5,0xe8,0x0e, 0x03,0xe5,0x03,0x03,0xe5,0x07,0xe5,0x0c,0x13,0x19,0x04,0x12,0xe5,0x03,0x05,0xe5, 0x03,0x03,0x05,0x08,0xe5,0x02,0x03,0x01,0x04,0x03,0xe5,0xe5,0xe5,0x03,0xe5,0x06, 0x01,0x08,0xe5,0x11,0xe5,0x07,0xe5,0x0a,0x06,0xe5,0x01,0x12,0xe5,0xe6,0x04,0xe5, 0xe6,0x04,0xe5,0xe7,0x0a,0x02,0x10,0x16,0xe5,0xe5,0x05,0x10,0x02,0x01,0xe5,0x02, 0x04,0x01,0x04,0x02,0xe6,0x06,0x06,0x02,0xe6,0x06,0x04,0x01,0xe5,0xe7,0x06,0x09, 0xe5,0x11,0xe5,0xe6,0x04,0xe5,0xe7,0x09,0x03,0x02,0x02,0x11,0x19,0xe5,0xe5,0xe5, 0x08,0x18,0x02,0x0a,0x05,0x01,0xe6,0x06,0xe5,0x0d,0x02,0x0d,0x0e,0x17,0xe5,0x0d, 0x0e,0x01,0x1a,0xe5,0x0c,0x02,0xe5,0x08,0x07,0x01,0x02,0x04,0x01,0x07,0x01,0x03, 0x03,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x02,0x04,0x01,0x01, 0x05,0x01,0x01,0x05,0x01,0xe6,0x06,0x01,0xe5,0x05,0x01,0x01,0x05,0x01,0x07,0x01, 0x06,0xe7,0x06,0xe6,0x06,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0x07,0x01,0x07,0xe6, 0x06,0x01,0x0a,0xe6,0xe5,0x0e,0xe5,0x01,0x05,0xe6,0x06,0xe5,0x03,0x0f,0x19,0xe5, 0x05,0xe5,0x01,0xe6,0x05,0xe6,0xe5,0x07,0x07,0xe5,0x09,0x03,0x03,0x19,0x06,0xe6, 0x02,0x02,0x0a,0x02,0x06,0x06,0x0f,0x05,0xe6,0x02,0x0f,0x03,0x02,0xe5,0x06,0x02, 0x01,0x01,0x05,0x01,0x01,0x03,0x02,0x01,0x04,0x20,0x10,0x04,0x09,0x07,0x07,0x02, 0x01,0x0c,0x17,0x06,0x02,0x03,0x03,0x08,0x05,0x06,0x02,0x0d,0x0d,0x01,0x01,0x17, 0xe5,0xe5,0xe5,0x04,0x0c,0x0b,0x04,0x04,0x07,0x2e,0x08,0x07,0x05,0x03,0x09,0x04, 0x06,0x27,0x02,0x3d,0x0d,0x02,0x2d,0x25,0xe5,0x06,0x03,0x03,0x06,0x02,0x02,0x01, 0x0c,0xe5,0x03,0x07,0x0b,0xe5,0x05,0x09,0x04,0x02,0x02,0x01,0x01,0x06,0x0c,0xe5, 0x05,0x01,0x0e,0x09,0x04,0x0f,0xe7,0x27,0x03,0x10,0x28,0x08,0x03,0x05,0x09,0x01, 0x09,0x08,0x04,0x11,0x17,0x05,0x09,0x01,0x04,0x17,0x0f,0xe5,0x01,0x09,0x09,0x09, 0x08,0x1d,0x06,0x03,0x01,0x08,0x12,0x03,0x06,0x06,0xe5,0x03,0x05,0x29,0x31,0x0b, 0x04,0x0f,0xe6,0x12,0x35,0x27,0x0c,0x0d,0x07,0x03,0x0f,0x04,0x0f,0x1c,0x09,0x09, 0x0c,0x02,0x04,0x02,0x01,0x01,0x06,0x15,0xe5,0x01,0x28,0x02,0x0d,0x13,0x02,0x10, 0x02,0xe5,0x0e,0x08,0x02,0x08,0xe6,0x18,0x1e,0x15,0x02,0x06,0x03,0x01,0x06,0x19, 0x04,0x01,0x04,0x17,0x1d,0x02,0xe5,0x03,0x09,0x02,0x09,0xe5,0x01,0x02,0x01,0x02, 0xe6,0x04,0x01,0xe6,0x06,0x0a,0x01,0x07,0x05,0x0d,0xe5,0x0b,0x2c,0x01,0x1e,0x03, 0x0f,0x37,0x16,0x03,0x03,0x09,0x07,0x01,0x1b,0x0d,0x07,0x01,0x02,0x24,0x0e,0xe6, 0xe5,0xe5,0x06,0x0e,0x0b,0xe5,0x04,0x3a,0xe5,0xe6,0xe5,0x03,0x04,0x01,0xe5,0xe5, 0x03,0x0a,0xe5,0xe5,0xe5,0x03,0x07,0x01,0xe5,0x03,0xe5,0x04,0xe5,0xe5,0x01,0x0a, 0x05,0x04,0x03,0x0a,0x05,0xe5,0x03,0x0a,0x11,0x07,0xe5,0x31,0x05,0x14,0x09,0xe5, 0x07,0x05,0xe5,0x08,0x09,0x04,0x09,0x09,0x0a,0x04,0xe5,0x03,0xe5,0x08,0x0e,0x12, 0x02,0x25,0x02,0xe6,0x01,0x09,0x1b,0x05,0x0d,0x14,0x17,0x01,0x08,0xe5,0x01,0x07, 0x26,0x01,0x1c,0x05,0x0a,0x02,0xe5,0x30,0xe6,0x0e,0x02,0x01,0x04,0x02,0xe5,0x04, 0xe5,0x0d,0x20,0xe5,0x01,0xe5,0x04,0x0f,0x06,0xe5,0x04,0x06,0x04,0x02,0xe5,0xe5, 0x02,0xe5,0x07,0x0c,0x01,0x02,0x07,0x10,0x01,0x04,0x0c,0xe5,0x05,0x01,0x01,0x04, 0xe5,0x06,0x01,0x09,0xe6,0x01,0xe5,0x01,0x01,0x05,0xe5,0x04,0x02,0xe5,0x04,0xe5, 0xe5,0xe5,0x03,0x0f,0x13,0xe6,0x03,0x01,0x07,0xe5,0x04,0x05,0xe5,0x0c,0x08,0x0b, 0x01,0xe5,0x01,0x02,0x01,0x08,0x0b,0xe5,0x0b,0x0f,0xe5,0x04,0x0d,0xe5,0x07,0x01, 0x0b,0xe5,0x08,0x01,0x02,0xe5,0xe6,0x03,0x0f,0xe5,0xe5,0xe5,0x0c,0xe5,0x03,0x07, 0x15,0x0b,0x11,0x04,0x01,0x0a,0x01,0x03,0xe5,0x03,0x01,0x07,0x09,0x01,0x04,0xe5, 0xe5,0x05,0x01,0xe5,0xe5,0x06,0x06,0x01,0xe5,0x0f,0x01,0x18,0x07,0x09,0x06,0x01, 0x02,0x14,0x01,0x0d,0xe5,0x01,0x09,0x13,0x01,0x1b,0x06,0xe5,0x02,0x04,0xe5,0xe5, 0xe5,0x07,0xe5,0xe5,0x06,0x05,0xe5,0x01,0x01,0x04,0xe5,0x0a,0x01,0x07,0x06,0x02, 0x10,0x02,0x1f,0x0b,0x04,0x01,0x12,0x09,0x09,0x07,0x02,0x02,0xe5,0x12,0x01,0x04, 0x01,0x0f,0x06,0x02,0x03,0x07,0x01,0x05,0x0b,0x04,0x04,0x09,0x09,0xe5,0x01,0x05, 0x04,0x04,0x02,0x01,0x0e,0x02,0x07,0x02,0x05,0x09,0x06,0x0e,0x03,0x02,0x18,0x12, 0x05,0xe5,0x02,0x0f,0x01,0x01,0x19,0x03,0x03,0x05,0x04,0x04,0x0b,0x05,0x03,0x09, 0x0a,0x01,0x01,0x04,0x05,0x03,0x05,0x17,0x04,0x16,0x01,0x09,0x03,0xe8,0x15,0x01, 0x04,0x01,0x04,0x05,0x07,0x01,0x13,0x01,0x15,0x07,0x03,0xe5,0x09,0x01,0x01,0x01, 0xe5,0x05,0x01,0x02,0x01,0x01,0xe5,0x05,0x02,0x01,0x01,0x02,0x01,0xe5,0x08,0x01, 0x07,0x03,0x01,0xe5,0x1d,0x08,0x01,0x04,0x01,0xe5,0x03,0x04,0x01,0x01,0x04,0xe9, 0x10,0xe5,0x03,0x01,0x01,0xe5,0xe5,0xe5,0x04,0x04,0x02,0x06,0x13,0x01,0x16,0x04, 0x03,0xe5,0x05,0x07,0x01,0x02,0xe6,0x03,0x01,0x01,0xe5,0xe5,0xe6,0x02,0xe5,0xe6, 0xe5,0xe5,0x01,0xe6,0x0b,0x01,0x01,0xe5,0xe5,0x01,0x04,0x12,0xe5,0x11,0xe5,0xe5, 0xe5,0x04,0x01,0x04,0x03,0xe5,0x09,0x01,0xe5,0x0f,0x09,0x02,0x06,0x09,0x31,0x02, 0x06,0x0c,0x02,0x03,0x0b,0x09,0x02,0x01,0x04,0x13,0x04,0x04,0x13,0x0c,0x06,0x09, 0x04,0x07,0x02,0x08,0x02,0x02,0x0d,0xe5,0x07,0xe5,0x01,0x01,0x03,0xe5,0x39,0xe5, 0x01,0xe6,0x02,0xe5,0x11,0xe5,0x09,0xe5,0x07,0xe5,0x01,0x01,0x03,0xe5,0x11,0xe5, 0x07,0xe5,0x08,0x08,0xe5,0x0b,0x01,0x03,0xe5,0x07,0xe5,0x0b,0xe5,0x05,0x01,0xe6, 0xe6,0x0d,0xe5,0x07,0xe5,0x03,0x03,0xe6,0x03,0x02,0xe5,0xe5,0x05,0x09,0x06,0x02, 0x02,0x06,0x09,0xe6,0x06,0xe5,0x04,0x02,0x09,0xe6,0x08,0xe5,0x07,0xe5,0x03,0x03, 0xe6,0x03,0x02,0x09,0xe5,0x07,0xe7,0x05,0x09,0xe5,0x04,0x02,0x03,0x05,0xe5,0x07, 0xe6,0x06,0x0d,0xe6,0x01,0x02,0x24,0x02,0x1c,0xe5,0x25,0xe5,0x31,0xe5,0x2f,0xe5, 0x2b,0x03,0xe7,0x10,0x09,0x0d,0x1d,0x23,0x03,0x1b,0x09,0x0d,0x0f,0x1d,0x03,0x0f, 0x20,0x01,0xe6,0x0f,0xe6,0x02,0xe5,0xe8,0x06,0xe6,0x03,0x02,0x2e,0x01,0xe7,0xe5, 0x03,0xe5,0xe5,0x03,0x01,0x09,0xe7,0x04,0xe5,0xe8,0x02,0xe5,0xe8,0x03,0x01,0xe7, 0x03,0x01,0x09,0xe7,0xe5,0x01,0x01,0xe7,0x03,0x0c,0xe6,0x10,0x01,0x01,0x05,0x01, 0x03,0x11,0xe5,0xe6,0x10,0x05,0x01,0xe6,0x08,0x07,0x1f,0x0c,0x02,0x01,0xe6,0x06, 0xe6,0x04,0x01,0xe6,0x01,0x04,0xe6,0x01,0x01,0x02,0x01,0xe6,0x01,0x02,0x01,0xe6, 0x04,0x01,0xe6,0x04,0x03,0x04,0x02,0xe6,0x04,0x01,0xe6,0x04,0x03,0x01,0x07,0x04, 0xe5,0xe7,0x08,0x09,0x08,0x06,0xe5,0x02,0x01,0xe5,0x01,0xe5,0x0f,0x09,0x02,0x2c, 0x0d,0x02,0x06,0x05,0x04,0x02,0x01,0x09,0x01,0x07,0x01,0x01,0x0f,0x01,0xe5,0x01, 0x03,0x01,0x03,0x05,0xe5,0x01,0xe5,0x01,0x01,0xe5,0x02,0x02,0x10,0x04,0x1b,0x01, 0x0e,0x03,0x13,0xe7,0x05,0xe7,0x02,0x02,0xe7,0x01,0x05,0xe5,0x07,0xe5,0x07,0xe6, 0x06,0xe5,0x07,0xe5,0x02,0x02,0x01,0xe5,0x01,0x05,0xe6,0xe5,0x04,0xe5,0x01,0x03, 0xe7,0xe6,0xe5,0x02,0xe7,0x02,0x02,0x01,0xe6,0xe5,0x02,0xe8,0x04,0x01,0xe5,0x01, 0x03,0xe8,0xe5,0x02,0xe8,0x04,0x01,0xe5,0x02,0x02,0xe8,0x05,0xe6,0x05,0xe7,0x05, 0xe8,0xe5,0x04,0xe7,0x02,0x02,0xe7,0x16,0x01,0x0a,0x06,0x31,0x01,0xe6,0x06,0xe5, 0x07,0x09,0x03,0x08,0x02,0x03,0x01,0xe6,0x07,0xe5,0xe5,0x04,0x09,0x03,0x06,0x02, 0x10,0x0d,0x01,0x09,0x03,0xe5,0x03,0x13,0x01,0x13,0x02,0xe5,0xe5,0x02,0x02,0x03, 0x02,0x02,0x32,0x01,0x03,0x05,0xe5,0x05,0x04,0x06,0x04,0x01,0x05,0x03,0x01,0xe5, 0xe5,0x02,0x02,0x05,0x03,0xe5,0x03,0x03,0x02,0x06,0x09,0x05,0x0a,0x02,0x05,0x0a, 0x01,0xe5,0x05,0x01,0xe5,0x09,0xe5,0x03,0xe5,0x02,0x11,0x02,0xe5,0x04,0x02,0xe5, 0x04,0x02,0xe5,0x20,0x01,0x13,0x03,0x03,0x01,0x11,0x01,0x03,0xe5,0xe5,0x03,0x03, 0x05,0x03,0x08,0xe5,0x02,0x03,0x01,0x03,0x01,0x02,0xe5,0x03,0xe5,0x01,0xe5,0x06, 0x01,0x04,0xe5,0x01,0xe5,0x0e,0x02,0xe5,0x04,0x02,0xe5,0xe5,0xe5,0x06,0x06,0x01, 0x01,0x13,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x1d,0x19,0x01,0x03,0x07,0x0b, 0x04,0xe5,0x09,0xe6,0x05,0xe7,0x06,0xe7,0x04,0xe7,0x01,0x05,0xe5,0x05,0x01,0xe5, 0x05,0x01,0x07,0x01,0xe5,0x0f,0x09,0x03,0x01,0x08,0x03,0x02,0x02,0xe5,0x0f,0x09, 0x09,0x08,0x06,0x01,0xe5,0x0f,0x45,0x01,0x13,0x02,0x0a,0x08,0x06,0x02,0x0b,0x04, 0x01,0xe5,0x09,0x09,0x05,0x02,0x01,0x08,0x03,0xe7,0x01,0x03,0x0a,0xe6,0x01,0x04, 0x01,0x02,0x04,0x01,0x02,0x04,0x01,0x03,0x03,0x01,0x07,0x01,0x03,0x03,0x01,0x07, 0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0x07,0x01,0x01,0x05,0x01,0x01,0x07,0x01,0xe6, 0x01,0x02,0x01,0xe5,0x05,0x01,0x07,0x01,0x02,0x04,0x01,0x07,0x01,0x01,0x01,0x03, 0x01,0x07,0x01,0x04,0x02,0xe6,0x06,0x01,0x07,0x01,0x02,0x04,0x01,0x0a,0xe5,0xe6, 0x0e,0xe6,0x06,0xe5,0x07,0xe5,0x05,0x12,0x20,0xe5,0x04,0x02,0xe5,0x01,0x02,0x15, 0x06,0x08,0x06,0xe6,0x01,0x01,0xe6,0x01,0x04,0x0a,0x02,0x06,0x03,0x01,0x05,0x06, 0x07,0x04,0x03,0x05,0xe5,0x05,0xe5,0x0f,0xe5,0xe6,0x09,0x08,0x13,0x09,0x15,0x13, 0x0c,0x1a,0x01,0x03,0x05,0x08,0x02,0xe5,0xe5,0x14,0x03,0x05,0x09,0x02,0x02,0x03, 0xe5,0x0a,0x08,0x01,0x01,0x04,0x0a,0x07,0x02,0xe6,0x02,0x09,0x01,0x02,0x04,0x01, 0x07,0x01,0x0c,0x07,0x24,0x01,0x07,0x06,0x06,0xe5,0xe5,0x08,0x01,0x0b,0x17,0x0b, 0x13,0x0d,0x09,0x02,0x06,0x05,0x0b,0x07,0x02,0xe5,0xe5,0x0a,0x12,0x09,0x02,0x17, 0x16,0x09,0x22,0x01,0x0e,0x02,0x02,0x08,0x11,0xe6,0x06,0xe5,0x11,0xe5,0x02,0x1e, 0x0a,0xe5,0x01,0x08,0x16,0x0d,0x0f,0x05,0x29,0x01,0x07,0xe5,0xe5,0x09,0x02,0xe5, 0x05,0x08,0x03,0x04,0x10,0x06,0x06,0x1d,0x0a,0x1b,0x02,0xe5,0x08,0x09,0x09,0x09, 0x08,0x0f,0xe5,0x20,0x0f,0x0b,0x01,0x03,0x03,0x09,0x02,0x07,0x06,0x01,0xe5,0x07, 0x01,0x07,0x0c,0x0f,0xe6,0x07,0x03,0x09,0x09,0x0e,0x01,0x18,0xe5,0x07,0x1a,0x01, 0x02,0x09,0xe5,0x07,0x07,0x0b,0x04,0x0c,0x08,0x06,0x02,0x02,0x03,0x0c,0x01,0x07, 0x09,0x01,0x0f,0x06,0x04,0x09,0xe5,0xe5,0x02,0x08,0x06,0x08,0x04,0x02,0xe5,0x07, 0x09,0x01,0x07,0x16,0x12,0x13,0x07,0x08,0x01,0xe5,0x01,0x02,0x05,0x04,0x04,0xe5, 0xe5,0x01,0x06,0x02,0x02,0x10,0x0b,0x07,0x17,0x04,0x07,0x1a,0x0a,0x02,0x06,0x10, 0xe5,0x06,0x2e,0x11,0x09,0xe5,0xe5,0x05,0x09,0x02,0x02,0xe5,0x0a,0x05,0x02,0xe5, 0x02,0x0a,0xe5,0x08,0x16,0xe5,0x07,0xe5,0x0c,0x04,0xe6,0x17,0x02,0xe5,0x02,0x13, 0x02,0x0d,0x01,0x41,0x18,0x0d,0x06,0xe6,0x06,0x2b,0x06,0x01,0x12,0x19,0x02,0xe5, 0x02,0x0b,0x22,0x09,0x09,0x05,0x03,0x04,0x02,0xe6,0x03,0x03,0x0f,0x03,0xe5,0xe5, 0x05,0x05,0x01,0x09,0xe5,0x05,0x01,0x04,0x04,0x03,0x02,0x06,0x05,0x03,0xe6,0x06, 0x04,0x04,0xe5,0xe5,0x01,0x04,0x01,0x16,0x0b,0x03,0x01,0xe6,0x01,0x06,0x02,0xe6, 0x17,0x04,0x26,0xe5,0x03,0x02,0xe6,0xe5,0x0c,0xe5,0x0b,0x02,0x05,0x02,0x02,0xe5, 0x03,0x03,0x06,0x07,0x01,0xe5,0x09,0x05,0x01,0x19,0x02,0xe5,0x1c,0x0d,0x01,0xe7, 0x0b,0x0e,0x10,0x20,0x07,0x1b,0x0a,0x0b,0x13,0x08,0x03,0x09,0x05,0x1c,0x02,0x04, 0x06,0x12,0x0e,0xe9,0x07,0xe5,0x04,0x09,0x02,0x01,0x0d,0xe5,0x07,0xe6,0x03,0x02, 0x17,0xe5,0x2c,0xe6,0x01,0xe5,0x03,0xe5,0x13,0x04,0x0b,0x02,0x01,0x01,0xe5,0xe7, 0x04,0x01,0x18,0x01,0x02,0x01,0x05,0x01,0x0c,0xe8,0xe5,0x01,0x01,0x05,0x01,0x04, 0x08,0x02,0xe5,0x0f,0x02,0x01,0x0a,0xe6,0x17,0xe5,0xe6,0x0e,0xe5,0x1a,0x02,0x01, 0x14,0x09,0xe6,0x06,0xe5,0x04,0x01,0xe7,0x08,0x18,0xe5,0x04,0x05,0x0b,0x01,0x02, 0xe6,0xe5,0x03,0x34,0x02,0xe5,0xe5,0xe5,0x03,0x01,0x02,0x04,0xe5,0x0c,0x03,0xe5, 0x03,0x04,0xe5,0x07,0x01,0x02,0x03,0xe5,0x08,0x01,0xe5,0x01,0x02,0x02,0x01,0xe5, 0xe5,0x01,0x01,0x01,0x02,0x04,0x01,0xe5,0xe5,0x06,0x06,0xe5,0x04,0x05,0x10,0x01, 0xe5,0x05,0x01,0xe5,0xe5,0x06,0x0b,0x05,0x02,0x02,0x39,0xe5,0xe5,0xe5,0x03,0x04, 0x11,0x06,0xe5,0xe5,0x04,0xe5,0x03,0x04,0xe5,0xe5,0x10,0xe5,0x08,0xe5,0xe5,0xe5, 0x03,0xe5,0xe5,0xe5,0x03,0x02,0x01,0x04,0xe5,0xe5,0x05,0x0e,0x0e,0xe5,0x07,0x02, 0x01,0x07,0x0d,0x02,0xe5,0xe6,0x11,0x09,0x09,0x18,0x01,0x05,0x06,0x0a,0x02,0x02, 0x02,0x0b,0x01,0xe5,0x05,0x01,0xe5,0x03,0x01,0x01,0x04,0x03,0x02,0x0f,0x06,0x02, 0xe5,0xe7,0xe5,0x01,0x04,0x04,0x02,0x01,0x01,0x07,0x0b,0xe5,0xe5,0xe5,0xe5,0x07, 0x06,0x04,0x0a,0x06,0xe7,0x01,0x3c,0x03,0x0a,0x0d,0xe5,0x02,0x02,0x01,0x09,0xe5, 0x02,0x04,0x02,0x02,0xe5,0x01,0x10,0x02,0x01,0x04,0x05,0x04,0x04,0x03,0x02,0x01, 0xe5,0x03,0x01,0x01,0xe5,0xe5,0xe5,0x06,0x01,0x01,0x02,0xe5,0x01,0x05,0x03,0x0a, 0x05,0x03,0x09,0x02,0xe6,0xe6,0x12,0x01,0x07,0x06,0x02,0x01,0x05,0x0e,0x01,0x02, 0x01,0x01,0x02,0x0f,0x03,0x01,0xe5,0xe5,0x03,0x02,0x02,0x01,0x04,0x02,0x01,0x01, 0x0e,0x05,0x03,0x04,0x01,0x04,0x04,0x07,0x01,0x04,0x01,0xe5,0x0d,0x04,0x0b,0x02, 0x02,0x0b,0x01,0x01,0x05,0x06,0x08,0x01,0xe5,0xe5,0x10,0xe7,0x05,0xe5,0x01,0x06, 0x01,0x16,0x01,0x04,0x01,0x02,0x09,0x07,0x04,0x01,0xe7,0x08,0x09,0x01,0x08,0x14, 0x01,0x01,0xe5,0x01,0x01,0x09,0x01,0xe6,0xe6,0x01,0x15,0x0a,0xe5,0x0e,0x01,0x02, 0x04,0x05,0xe6,0x08,0xe7,0x0f,0x09,0x09,0x04,0x36,0x0c,0x09,0x02,0x12,0x05,0x03, 0x06,0x13,0x27,0x0c,0x06,0x0c,0x02,0x07,0x02,0xe5,0x0f,0xe5,0x07,0xe5,0x07,0xe5, 0x1f,0x10,0x08,0xe5,0x08,0x02,0xe6,0x06,0x09,0x07,0xe5,0x03,0x07,0x01,0x03,0xe5, 0x1f,0x13,0x09,0xe5,0x04,0x02,0x01,0x03,0xe5,0x0b,0xe5,0x08,0x02,0xe5,0x0d,0xe5, 0x07,0xe5,0x07,0xe6,0x06,0x09,0x09,0x05,0x03,0x09,0x09,0xe6,0x06,0x06,0x02,0x05, 0x03,0x05,0x02,0x02,0xe5,0x07,0x02,0xe5,0x04,0xe5,0x04,0x02,0x09,0x06,0x02,0x05, 0x03,0x09,0x03,0x05,0xe5,0x02,0x01,0x02,0xe6,0xe5,0x04,0xe7,0x02,0x02,0x02,0x0a, 0x03,0xe5,0x01,0x6a,0x02,0x13,0x0b,0x12,0xe5,0x08,0x08,0xe5,0x1c,0x1c,0xe5,0x0d, 0x02,0xe5,0x01,0x11,0x09,0x52,0x12,0x04,0x08,0x03,0x0b,0x03,0x07,0x0b,0x1c,0x1e, 0x13,0xe5,0xe5,0x09,0x04,0xe7,0xe5,0x04,0x01,0x01,0x02,0x01,0xe5,0xe5,0x05,0x07, 0x01,0x1a,0xe5,0xe5,0x08,0xe7,0x19,0x09,0x01,0x01,0x11,0xe5,0xe5,0x05,0x13,0x1d, 0xe5,0x0c,0x05,0xe6,0x02,0xe5,0xe5,0x0e,0x01,0xe5,0x0e,0xe6,0x08,0x04,0xe5,0xe7, 0x03,0x02,0xe5,0x05,0x01,0xe5,0x09,0x09,0x02,0x02,0x01,0xe6,0xe5,0x04,0xe6,0x08, 0x01,0x07,0x02,0x04,0x01,0x07,0x01,0xe6,0xe5,0x06,0x07,0xe6,0x04,0x03,0x08,0xe5, 0x04,0x03,0x08,0x03,0x05,0x06,0x03,0x02,0x05,0xe5,0x08,0x02,0x02,0x01,0xe5,0x05, 0xe5,0x02,0x02,0xe5,0xe6,0x0d,0x01,0x0f,0x01,0x09,0x07,0x01,0x0e,0x08,0x03,0x01, 0x04,0x04,0x03,0x03,0x01,0x13,0x08,0x02,0x01,0x01,0x0f,0x03,0xe5,0x03,0x0e,0x02, 0x01,0x11,0x01,0x11,0x01,0x0a,0x06,0x01,0x0e,0x01,0x01,0x0f,0x05,0xe5,0x07,0xe6, 0xe5,0x04,0xe5,0x01,0x05,0xe6,0xe5,0x04,0xe5,0x07,0xe5,0x07,0xe6,0xe5,0x04,0xe5, 0x01,0xe5,0x01,0x01,0xe6,0xe5,0x04,0xe5,0x01,0x05,0xe6,0x06,0xe5,0x01,0x01,0x05, 0xe5,0x07,0xe5,0x01,0x05,0xe6,0x05,0xe5,0x06,0xe6,0xe5,0x04,0x01,0x07,0x01,0xe6, 0xe5,0x02,0xe6,0x02,0x05,0xe6,0xe5,0x04,0xe5,0x07,0xe6,0xe5,0x04,0xe6,0x03,0x02, 0xe7,0x08,0x05,0x0a,0xe5,0x10,0x11,0x01,0x07,0x01,0x09,0x09,0xe5,0x03,0x17,0x09, 0x01,0x0a,0x08,0xe5,0x05,0x01,0x07,0x02,0xe5,0x02,0x03,0x05,0x01,0x01,0xe5,0x01, 0x01,0x04,0x02,0xe6,0x02,0xe5,0x19,0x01,0xe6,0x05,0x05,0x02,0xe5,0x08,0x05,0x02, 0x09,0x09,0x06,0x11,0x01,0x02,0x04,0x01,0x02,0x06,0x02,0x06,0x02,0xe5,0x07,0x08, 0x07,0x01,0x07,0x01,0x02,0x08,0x07,0xe6,0x04,0x04,0x03,0x02,0x02,0x03,0xe5,0x03, 0x03,0xe5,0xe5,0x09,0x05,0xe5,0x01,0xe5,0x08,0x0a,0x04,0x01,0x07,0xe5,0x03,0x01, 0xe5,0xe6,0x0d,0x1d,0x1b,0x01,0x09,0x09,0xe5,0x03,0x17,0x08,0xe5,0xe5,0x09,0x08, 0x09,0x04,0x02,0x05,0xe6,0x02,0x03,0x01,0x01,0x01,0xe5,0x03,0x01,0x01,0x05,0x0d, 0x11,0x09,0x07,0x01,0xe5,0x01,0x0c,0x1d,0x1b,0x01,0x09,0x09,0x03,0x10,0x06,0x0b, 0x01,0x13,0x13,0xe5,0x01,0xe5,0x08,0x02,0x01,0xe5,0x05,0x01,0xe5,0x01,0x06,0x06, 0x13,0x01,0xe5,0x0a,0x01,0x02,0xe5,0x2e,0x12,0x06,0x02,0x06,0x02,0x14,0x05,0x01, 0xe6,0x10,0x02,0x09,0x10,0xe5,0xe5,0x03,0x01,0x02,0x06,0xe5,0xe5,0x13,0x05,0x02, 0x06,0x02,0x10,0xe5,0xe5,0x06,0x05,0xe5,0xe6,0x0a,0x05,0x01,0x07,0x01,0x01,0x05, 0x01,0x07,0x01,0x07,0x01,0x01,0x05,0x01,0x01,0x01,0x03,0x01,0x07,0x01,0x07,0x01, 0x07,0x01,0x04,0x02,0x01,0x01,0x05,0x01,0x05,0x03,0x01,0x02,0x04,0x01,0xe5,0x05, 0x01,0x03,0x03,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x01,0x05,0x01,0x03,0x03,0x01, 0x01,0x05,0x01,0x01,0x05,0x01,0x07,0x01,0xe6,0x01,0x02,0x02,0xe5,0xe6,0x0f,0x01, 0x02,0x03,0xe5,0x02,0x04,0xe5,0x07,0xe5,0x0d,0x01,0x03,0xe5,0x03,0x09,0xe5,0x01, 0x03,0x03,0xe5,0x09,0x01,0x02,0x04,0x10,0xe5,0xe6,0xe5,0x03,0x01,0xe5,0x01,0x05, 0xe6,0xe5,0x0c,0xe5,0x01,0x06,0x05,0x04,0xe5,0x01,0x0c,0xe5,0x03,0xe6,0x01,0xe5, 0x03,0xe5,0x0c,0x04,0x0c,0xe7,0x01,0x08,0xe5,0x07,0xe5,0x07,0x01,0x05,0x0a,0x14, 0x07,0x09,0x03,0x04,0x0a,0x0f,0x08,0x02,0x03,0x03,0x01,0x04,0x01,0x08,0x1d,0x01, 0xe5,0xe5,0x03,0x11,0x0b,0x08,0x0a,0x01,0x04,0xe5,0xe5,0xe5,0x01,0x01,0x06,0x06, 0x05,0x09,0x08,0x01,0x17,0x01,0x0e,0x07,0x02,0xe5,0x20,0x04,0x01,0x01,0x01,0x03, 0x01,0x01,0x04,0x11,0x09,0x09,0x19,0x13,0x0f,0x02,0x03,0xe7,0x09,0x0f,0x06,0x13, 0x01,0x03,0x0a,0x01,0xe6,0x05,0x03,0x0e,0x02,0x01,0x06,0xe5,0xe5,0x01,0x07,0x01, 0x06,0x01,0x09,0x04,0x05,0xe5,0x01,0x01,0xe8,0x07,0x07,0x01,0xe6,0xe5,0x02,0xe6, 0x04,0x0b,0x01,0x07,0x01,0x11,0x01,0x01,0x06,0x02,0x03,0x02,0x02,0x08,0x13,0x03, 0x12,0x01,0x16,0x09,0x07,0x07,0x07,0x01,0x03,0x02,0x0a,0x03,0x02,0xe5,0x17,0x08, 0x07,0x03,0x03,0x04,0x10,0x01,0x01,0x09,0x05,0x09,0x01,0x02,0x07,0x05,0x01,0xe6, 0x08,0x01,0x07,0x09,0x09,0x15,0x03,0x08,0xe5,0x06,0x01,0x02,0x07,0x01,0x02,0x01, 0x01,0x05,0x02,0xe5,0x08,0x07,0x01,0x01,0x07,0x01,0x03,0x01,0x03,0x03,0x0e,0xe5, 0x03,0x02,0x01,0x09,0x01,0x03,0x03,0x0e,0x02,0x05,0x05,0x03,0x05,0x03,0x0e,0x0c, 0x0d,0x03,0x1b,0x0e,0x01,0x07,0xe5,0x07,0x03,0x05,0xe6,0x01,0x04,0x05,0x03,0xe7, 0x07,0x05,0x03,0x02,0x05,0xe5,0x0b,0x05,0x0a,0x04,0x03,0xe5,0x01,0x06,0x04,0x01, 0x05,0x10,0x02,0xe5,0x03,0xe5,0x09,0x06,0x01,0xe5,0x11,0x01,0x06,0xe5,0x24,0x05, 0x0c,0x02,0x04,0x04,0xe5,0x08,0x03,0x07,0x0b,0x06,0x01,0xe6,0x01,0xe6,0x03,0x02, 0xe5,0xe5,0x07,0x09,0x27,0x0a,0x03,0x0e,0x02,0x07,0x04,0xe5,0x07,0x02,0x0b,0xe6, 0x06,0x01,0x07,0xe6,0x0a,0x06,0x03,0x1c,0x04,0x07,0x09,0x0b,0xe5,0x0a,0xe5,0x02, 0x01,0x02,0xe5,0x04,0x02,0xe5,0xe5,0x01,0x03,0xe5,0xe7,0x02,0x01,0x06,0xe7,0x03, 0x02,0xe6,0x03,0xe5,0x05,0x04,0xe7,0x06,0xe6,0x02,0x08,0x0c,0xe7,0xe5,0x0e,0x08, 0x13,0xe5,0x11,0x29,0x18,0x05,0x04,0x03,0x02,0x03,0x01,0x01,0x03,0xe6,0xe5,0x04, 0xe6,0xe5,0x03,0x03,0x04,0x05,0x07,0x02,0x13,0xe5,0xe6,0x03,0x1a,0xe6,0x01,0x36, 0x03,0x03,0xe6,0x06,0x1d,0x06,0x02,0x03,0x05,0xe5,0x01,0x07,0x08,0x08,0x01,0xe5, 0x09,0x02,0x08,0x05,0x08,0x03,0x08,0xe5,0x05,0x06,0x11,0xe5,0x11,0x02,0xe6,0x19, 0x25,0x01,0x07,0x01,0x01,0x01,0x0a,0x0b,0xe5,0x04,0x03,0x02,0x04,0xe6,0xe6,0x07, 0xe5,0xe5,0x10,0x1c,0xe5,0x07,0x03,0x07,0x04,0x03,0x1e,0x0b,0xe6,0xe6,0x3e,0xe5, 0x07,0xe5,0x03,0x07,0x10,0x08,0xe5,0x16,0x16,0x09,0x0c,0x03,0x08,0x01,0x0f,0x2e, 0x01,0xe8,0x07,0xe5,0x02,0x01,0xe5,0x0a,0x01,0x04,0x08,0xe5,0x15,0xe5,0x03,0xe6, 0xe5,0xe6,0x04,0xe5,0x06,0x14,0xe5,0x04,0x0f,0xe5,0x04,0x02,0xe5,0x04,0x0a,0x01, 0xe5,0x04,0x08,0xe6,0x03,0x02,0xe6,0x03,0xe5,0x01,0x0a,0x01,0x01,0x04,0xe5,0xe6, 0x04,0x01,0xe5,0x05,0x0a,0xe6,0xe5,0xe5,0x01,0x01,0x05,0x01,0x04,0x05,0xe5,0x03, 0xe5,0x05,0x08,0x10,0xe5,0x04,0xe5,0x06,0xe5,0x03,0x03,0xe5,0x15,0xe5,0x03,0xe5, 0x0c,0x05,0x01,0x01,0x03,0x03,0x01,0x04,0x02,0xe5,0x0e,0x08,0x03,0xe5,0xe7,0x06, 0xe5,0x01,0x05,0x04,0x01,0xe5,0x03,0x03,0x01,0x03,0x0a,0x07,0x01,0x02,0xe6,0xe5, 0x03,0x14,0x21,0xe5,0x08,0xe5,0x04,0x05,0x1a,0xe5,0x02,0x03,0xe5,0x08,0xe5,0x02, 0x06,0xe5,0x01,0x01,0x03,0xe5,0x01,0x01,0x02,0xe5,0x01,0x01,0x1b,0x01,0x01,0xe5, 0x03,0x01,0x03,0xe5,0x03,0x16,0x10,0x04,0x02,0xe5,0x01,0x14,0x23,0xe5,0x08,0x09, 0xe5,0x1a,0xe5,0x01,0x0f,0xe5,0x09,0xe5,0x07,0xe5,0x08,0x02,0x01,0x0d,0xe5,0x08, 0x02,0x01,0x04,0xe5,0xe5,0xe5,0x02,0xe5,0x01,0x19,0xe5,0x03,0x0b,0x03,0x01,0xe5, 0x11,0x06,0x02,0xe5,0x01,0x05,0x1a,0x04,0xe6,0x09,0x01,0x04,0x01,0xe5,0xe5,0x01, 0x04,0x01,0x02,0x09,0xe5,0x01,0x02,0x10,0x01,0x04,0xe6,0x01,0x04,0xe5,0x01,0x06, 0x04,0x01,0x02,0x0c,0x01,0x01,0x07,0x01,0x02,0x03,0x09,0x05,0x04,0x01,0x02,0x09, 0x03,0x09,0xe6,0x01,0x14,0x03,0x01,0x02,0x1e,0x01,0x01,0x01,0x02,0x02,0x01,0x01, 0x02,0xe5,0xe5,0xe5,0x03,0xe5,0xe5,0xe5,0x04,0x01,0x01,0x0a,0x01,0x01,0x04,0x06, 0x04,0x04,0x04,0x09,0x02,0x01,0x04,0x05,0x01,0x01,0x09,0x02,0x01,0xe5,0x02,0x02, 0x01,0xe5,0xe5,0xe5,0x02,0x01,0x03,0x02,0xe6,0x09,0x01,0x01,0x09,0x03,0xe5,0x03, 0x02,0xe5,0xe7,0x12,0x01,0x02,0x04,0x06,0x1b,0x11,0x04,0x02,0x01,0x01,0x01,0x02, 0x01,0xe5,0xe5,0xe5,0x01,0x01,0xe5,0xe5,0x19,0xe6,0xe5,0x01,0x02,0x03,0x02,0x05, 0x01,0x01,0x02,0x01,0xe5,0xe5,0xe5,0x04,0x09,0x01,0x07,0x01,0x04,0x09,0x01,0x0a, 0x01,0x01,0xe5,0x0b,0x07,0xe7,0xe5,0x10,0xe7,0x01,0x03,0xe5,0xe6,0x05,0x1a,0x11, 0x09,0x01,0x02,0x01,0x02,0x01,0x04,0x01,0xe5,0xe5,0x11,0x07,0x07,0x01,0xe5,0x05, 0x04,0x01,0x02,0x01,0x02,0x01,0x02,0x0e,0x01,0x07,0x01,0x01,0xe5,0xe8,0x0c,0xe5, 0x03,0x01,0x02,0x08,0xe5,0x0a,0xe6,0xe5,0x0f,0x09,0x04,0x04,0x20,0x02,0x06,0xe5, 0x07,0x02,0x03,0x04,0x1b,0x0b,0x01,0x03,0xe5,0x01,0x01,0x03,0xe5,0x01,0xe5,0x04, 0x04,0x04,0x0c,0x02,0x06,0xe5,0x04,0x02,0xe5,0x0e,0x09,0x09,0x02,0x0a,0xe5,0x01, 0x10,0xe5,0x07,0xe5,0x07,0xe5,0x1f,0x13,0x05,0xe5,0x21,0x0b,0x03,0xe5,0x01,0x01, 0x03,0xe5,0x01,0xe5,0x03,0xe5,0x07,0xe5,0x0b,0x09,0x05,0xe5,0x01,0x09,0xe5,0x03, 0xe5,0x07,0xe5,0x07,0xe5,0x03,0x07,0x03,0xe5,0x0d,0xe5,0x07,0xe5,0x07,0xe6,0x06, 0x09,0x09,0x03,0x02,0x02,0x06,0x02,0x02,0xe5,0x04,0xe6,0x06,0x02,0x01,0x01,0x02, 0x06,0x02,0x06,0x01,0x02,0x09,0xe6,0xe5,0x01,0x02,0xe6,0x06,0xe7,0x05,0xe7,0x02, 0x02,0x03,0x02,0x02,0x03,0x02,0x02,0xe5,0x01,0x02,0x02,0x04,0x01,0x02,0xe5,0x04, 0x02,0xe6,0x06,0xe5,0x0a,0xe5,0x02,0xe5,0x01,0x42,0x09,0x02,0x24,0x09,0x0e,0x06, 0x1f,0xe5,0x05,0x0b,0xe5,0x05,0x02,0x09,0x08,0xe5,0x17,0x02,0xe5,0x01,0x11,0x09, 0x35,0x34,0x06,0x05,0x09,0x19,0x13,0x05,0x03,0x08,0x06,0x02,0x10,0x10,0x01,0x0e, 0xe6,0x06,0x01,0x07,0xe6,0x20,0xe5,0xe5,0x06,0x06,0x05,0xe6,0x05,0x07,0x01,0x06, 0xe5,0xe5,0x08,0x01,0x0a,0x01,0x07,0xe6,0x06,0xe6,0x03,0x01,0xe7,0x02,0xe5,0xe5, 0x08,0x07,0x01,0xe7,0x05,0x04,0x04,0xe7,0xe5,0x03,0xe7,0x06,0xe6,0x0a,0xe5,0x01, 0x11,0x09,0x02,0x06,0x16,0x06,0x05,0x02,0x08,0x01,0x09,0x07,0xe6,0x06,0x01,0x05, 0x01,0xe5,0x07,0x01,0xe5,0x02,0x06,0x02,0x06,0x09,0x05,0x01,0xe6,0x04,0x01,0xe5, 0x05,0x02,0x06,0x01,0xe6,0x06,0xe6,0x04,0x02,0xe5,0x04,0x02,0xe5,0x07,0xe5,0x04, 0xe5,0x02,0x01,0xe6,0xe5,0x10,0x02,0x0f,0x02,0x19,0x07,0x01,0x07,0x01,0x10,0x02, 0x07,0x01,0x03,0x03,0x01,0x03,0xe5,0xe5,0xe6,0xe5,0x13,0x03,0x05,0x06,0x01,0xe5, 0x01,0x03,0x01,0x09,0x03,0xe5,0x03,0x01,0x07,0x09,0x09,0x07,0x01,0x06,0x0a,0xe5, 0x12,0xe7,0x05,0xe7,0x05,0xe7,0x07,0xe5,0x07,0xe5,0x01,0x05,0xe6,0xe5,0x04,0xe6, 0xe5,0x04,0xe5,0x02,0x02,0x01,0xe5,0x01,0x05,0xe5,0x01,0x05,0xe6,0xe5,0x02,0x01, 0xe5,0x01,0x01,0x05,0xe5,0x05,0x01,0xe5,0x05,0x01,0x08,0xe5,0xe5,0xe5,0x03,0xe5, 0x02,0x03,0xe6,0x02,0x03,0xe8,0x06,0x03,0x05,0xe6,0x01,0x02,0xe8,0x01,0x02,0xe8, 0xe6,0x03,0xe6,0x03,0x01,0xe8,0x19,0x27,0x06,0x01,0x07,0x01,0x0a,0x08,0xe6,0x06, 0x09,0x09,0x01,0xe6,0x0c,0x09,0x09,0x08,0xe5,0xe5,0xe5,0x04,0x03,0xe5,0x02,0xe6, 0x02,0x04,0x04,0xe5,0x06,0xe5,0x07,0xe5,0x08,0xe5,0x05,0x06,0xe7,0x11,0xe5,0x01, 0x03,0x01,0xe5,0x01,0x05,0xe5,0x01,0x17,0x01,0x04,0x01,0x07,0x01,0x02,0x09,0xe5, 0x04,0x02,0x08,0x05,0x01,0x03,0x04,0xe5,0x0c,0x09,0x04,0x04,0x04,0x04,0x04,0xe5, 0xe5,0x05,0xe5,0x01,0x09,0x01,0x03,0xe5,0x02,0x05,0x04,0xe5,0x01,0xe5,0x03,0xe5, 0x01,0x05,0x04,0xe5,0x03,0xe9,0x13,0xe5,0x07,0xe5,0x07,0xe5,0x16,0x07,0x01,0x07, 0x01,0x0a,0x04,0x03,0x09,0x07,0x01,0x05,0x03,0x01,0x0d,0x01,0x07,0x01,0x08,0xe5, 0x08,0x01,0x01,0x03,0xe6,0x02,0x03,0x01,0x03,0x05,0x03,0x09,0xe5,0x02,0xe5,0x02, 0xe5,0x02,0xe5,0x0a,0x06,0x01,0xe6,0x12,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5, 0x15,0xe5,0x05,0x01,0x07,0x01,0xe5,0x0b,0x01,0x03,0x11,0x01,0xe5,0x01,0x01,0x03, 0x01,0xe5,0x0b,0x09,0xe5,0x07,0xe5,0x07,0xe5,0x01,0x01,0x03,0xe5,0x03,0xe5,0x01, 0xe5,0x01,0x05,0xe5,0x04,0x06,0x05,0x01,0xe6,0x04,0x01,0xe5,0x0e,0x04,0xe5,0x13, 0x13,0x16,0x02,0x06,0xe5,0xe5,0x05,0x27,0xe5,0xe5,0x07,0x09,0xe5,0xe5,0x08,0x01, 0x13,0x02,0x01,0xe5,0xe6,0x04,0xe5,0xe5,0x05,0x09,0xe5,0xe5,0x1b,0xe5,0x06,0x05, 0xe5,0xe6,0xe6,0x06,0x06,0x01,0x01,0x05,0x01,0x01,0x05,0x01,0x02,0x04,0x01,0x01, 0x05,0x01,0x04,0x01,0xe5,0xe5,0xe5,0x04,0x01,0x02,0x04,0x01,0x01,0x05,0x01,0xe6, 0x04,0x01,0x07,0x01,0x01,0x04,0xe5,0xe6,0x02,0xe5,0x01,0xe9,0xe5,0x02,0x01,0xe6, 0x04,0x01,0xe6,0x04,0x01,0x07,0x01,0x01,0x02,0x02,0x01,0x04,0x02,0x01,0x01,0x02, 0x02,0x01,0xe6,0xe5,0x02,0x01,0xe6,0x04,0x01,0x06,0xe5,0xe5,0xe5,0x04,0x01,0x04, 0x02,0x02,0x03,0x0f,0xe5,0x01,0x08,0x02,0xe5,0x01,0xe5,0x19,0x05,0x04,0x09,0x06, 0x03,0xe5,0x02,0xe5,0x0e,0x06,0x16,0x03,0x03,0x01,0xe5,0x11,0x0c,0x08,0x07,0xe5, 0x0b,0x06,0x02,0x05,0x09,0xe5,0x0e,0x01,0xe5,0x06,0x03,0xe5,0xe5,0x05,0x0a,0x01, 0x10,0x0d,0x05,0x15,0x18,0x09,0xe5,0x09,0x02,0x06,0x13,0x02,0x02,0x0c,0x0d,0xe5, 0xe5,0x05,0x0a,0x07,0x05,0x04,0xe5,0xe5,0xe5,0x03,0x03,0x02,0x01,0x02,0x02,0x0f, 0x35,0x06,0x02,0x08,0x02,0x0f,0x24,0x06,0xe5,0xe6,0x04,0x09,0x09,0x09,0x09,0x09, 0x16,0x05,0x04,0xe5,0x0b,0x01,0x01,0x01,0x06,0xe6,0x18,0x09,0x14,0x03,0x01,0xe6, 0x06,0x01,0x01,0x03,0x05,0xe5,0x0c,0x02,0x01,0xe5,0x01,0x01,0xe7,0x07,0x01,0xe5, 0x02,0x04,0xe5,0x07,0x01,0x02,0x0c,0x01,0x02,0xe5,0x04,0xe5,0x09,0x05,0x01,0xe5, 0x05,0x01,0xe5,0x02,0x05,0x0a,0x05,0xe7,0x0c,0x01,0xe6,0x0a,0x08,0x2e,0x03,0x0e, 0x03,0x01,0x11,0x12,0x03,0xe5,0x01,0x0d,0x09,0x0e,0x01,0x03,0x03,0x04,0x17,0x02, 0x0d,0x20,0xe5,0x0e,0x03,0x09,0x09,0x06,0x1a,0xe6,0x02,0x03,0xe6,0x08,0x08,0xe5, 0x04,0x01,0x04,0x03,0xe5,0x08,0x01,0x05,0x03,0x02,0x06,0x0c,0x02,0x15,0x03,0x03, 0x01,0x0d,0x0d,0x01,0x03,0x03,0x03,0x01,0x03,0x0a,0x01,0xe6,0x0c,0x03,0x01,0x2e, 0x0d,0x02,0x09,0x02,0x06,0x10,0x0d,0x13,0x28,0xe5,0x0f,0x06,0x0e,0xe6,0x0c,0x0b, 0x01,0xe5,0x02,0x18,0x02,0x24,0x0e,0x08,0xe5,0x01,0xe5,0x04,0x0d,0x1a,0x05,0x03, 0x07,0x09,0x16,0x10,0x05,0x01,0x01,0x09,0x0f,0x0b,0x01,0x01,0x01,0x0e,0x12,0xe5, 0x11,0x01,0x03,0x04,0x08,0xe6,0x02,0x12,0x01,0x08,0x02,0x03,0x07,0x08,0xe8,0x02, 0x03,0x05,0xe5,0x02,0x0b,0x02,0xe5,0x0e,0x02,0x09,0x07,0x01,0x09,0x07,0x01,0x03, 0xe6,0x02,0x06,0x0d,0xe8,0x02,0x0c,0x02,0x0d,0xe5,0x1c,0x0a,0x16,0x17,0x01,0x04, 0xe5,0x01,0x04,0x04,0x01,0x0b,0x05,0x09,0xe5,0x08,0x19,0x02,0x05,0x03,0x0c,0x19, 0xe6,0xe5,0x13,0x1d,0x09,0x04,0x04,0x02,0x01,0x09,0x0e,0x04,0xe5,0x02,0x15,0x04, 0x03,0xe5,0x07,0x09,0x1b,0x01,0x07,0x01,0xe5,0x07,0xe5,0x01,0x01,0xe5,0x02,0x04, 0xe5,0x0a,0x07,0x09,0x01,0x01,0xe5,0xe7,0x11,0x07,0x0d,0x16,0xe5,0x08,0x06,0x01, 0xe5,0x01,0x01,0x0d,0x03,0x1a,0xe6,0x01,0x01,0x11,0x10,0xe6,0x02,0x05,0x0a,0x10, 0xe5,0x07,0x01,0xe5,0x1d,0xe5,0xe7,0x04,0x0b,0x20,0x0d,0x09,0x09,0x02,0xe5,0x2c, 0x05,0x24,0x27,0x06,0xe5,0xe5,0x05,0x0e,0x09,0x02,0xe5,0x01,0xe6,0x07,0xe5,0x04, 0x05,0x04,0x01,0x01,0x04,0x26,0xe6,0xe5,0xe5,0x06,0xe5,0x01,0x02,0x01,0xe5,0x01, 0x02,0xe5,0x09,0x01,0xe5,0x0b,0x04,0x09,0x09,0x02,0xe5,0x0a,0x03,0x05,0x17,0x32, 0xe5,0xe6,0xe5,0x01,0x01,0x05,0x01,0x03,0x01,0xe6,0x01,0x04,0xe6,0x04,0x26,0xe5, 0x02,0xe5,0x01,0xe5,0x02,0xe5,0x07,0xe5,0x06,0x04,0xe5,0x04,0x01,0x0c,0xe5,0x01, 0x09,0x05,0xe5,0x07,0xe5,0x07,0x03,0x02,0x01,0xe5,0x05,0x01,0x06,0xe5,0x12,0x01, 0xe6,0x1d,0x01,0x02,0x03,0x04,0x0d,0x04,0x04,0x1e,0xe5,0x01,0x01,0x04,0x01,0x05, 0xe6,0x03,0x0b,0x02,0x01,0x04,0xe5,0xe6,0x18,0x13,0x01,0x07,0x07,0x09,0x17,0x01, 0xe5,0x01,0x0b,0x0d,0xe5,0x04,0x08,0x05,0xe7,0x01,0x14,0x24,0x02,0x01,0x0d,0xe5, 0x03,0x0e,0x04,0x04,0x18,0x13,0x09,0x09,0x09,0x19,0xe5,0x0b,0x14,0x01,0x0a,0x03, 0x02,0x05,0x07,0x01,0x02,0x04,0x04,0x09,0x16,0x11,0x02,0x02,0xe5,0x04,0xe5,0xe5, 0x01,0x02,0x04,0x04,0xe5,0x01,0x03,0x02,0x05,0x08,0x02,0x03,0x05,0x06,0x02,0x01, 0x01,0x02,0x02,0x04,0x04,0x04,0x04,0x04,0x01,0x02,0x09,0x06,0x02,0x01,0x01,0x02, 0x02,0xe5,0x01,0xe5,0xe5,0x01,0x04,0x04,0x01,0x01,0x07,0x01,0x01,0x02,0x0a,0x01, 0x01,0x05,0x03,0x27,0x09,0x07,0x01,0x02,0xe7,0xe5,0xe5,0x04,0x02,0x02,0x01,0xe6, 0x02,0x10,0xe5,0x02,0x01,0x01,0xe5,0x09,0x03,0x01,0x02,0xe5,0x02,0x02,0x01,0xe5, 0x02,0x05,0x03,0x05,0x01,0x01,0x0f,0x03,0xe5,0x02,0xe5,0x02,0x01,0x02,0xe5,0xe5, 0xe5,0x0b,0x01,0x04,0x04,0x01,0xe5,0x0b,0x01,0x02,0x01,0x01,0xe5,0x04,0xe5,0x05, 0x19,0x01,0x1b,0x01,0x01,0x01,0xe5,0x03,0x04,0x01,0x07,0x01,0x0b,0x02,0x02,0x0a, 0xe5,0x03,0x04,0x01,0x02,0x01,0x05,0x09,0x05,0xe5,0x01,0x01,0x01,0xe7,0x07,0xe5, 0x03,0x05,0x03,0x04,0x01,0xe5,0xe5,0xe5,0x01,0x06,0x01,0xe5,0x07,0xe5,0xe5,0xe5, 0x01,0x0a,0x01,0x01,0xe7,0x01,0x03,0xe5,0x08,0x18,0x01,0x13,0xe5,0x05,0x01,0x02, 0x01,0x05,0x03,0x08,0xe5,0x05,0xe5,0x05,0x03,0xe5,0xe5,0x05,0xe5,0x03,0x03,0xe5, 0xe5,0x01,0x04,0x01,0x02,0x03,0xe7,0x01,0x03,0xe5,0x03,0x01,0x02,0x08,0xe5,0xe5, 0x01,0x01,0x07,0x03,0xe5,0xe5,0x01,0x01,0x02,0x01,0xe5,0x03,0x02,0xe5,0x09,0xe5, 0xe5,0x01,0x0d,0x09,0x09,0x31,0x02,0x06,0x04,0x04,0x02,0x01,0x07,0x06,0x0b,0x04, 0x04,0x09,0x04,0x04,0x04,0x04,0x09,0x09,0x02,0xe5,0x04,0x04,0x07,0xe5,0x04,0x04, 0x04,0x02,0xe6,0x13,0x01,0x10,0xe5,0x07,0xe5,0x07,0xe5,0x29,0xe5,0x03,0x03,0x01, 0x03,0xe5,0x07,0xe5,0x03,0x04,0x02,0x01,0x03,0xe5,0x01,0x03,0x03,0xe5,0x07,0xe5, 0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x01,0x05,0xe5,0x01,0x0f,0xe5, 0x07,0xe5,0x01,0x06,0x0d,0x02,0xe5,0x0d,0xe5,0x07,0xe5,0x07,0xe6,0x06,0x09,0x09, 0x09,0x06,0x02,0xe5,0x01,0x05,0xe6,0x03,0x02,0xe5,0x02,0x01,0x02,0x05,0x03,0xe5, 0x01,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x04,0x02,0xe6,0x06,0xe5,0x07,0xe5,0x07,0xe6, 0xe5,0x04,0xe5,0x01,0x02,0x02,0x02,0x06,0xe5,0x07,0xe6,0x02,0x03,0x0d,0xe5,0xe7, 0x01,0x45,0x06,0x02,0x12,0xe5,0x08,0x32,0x2f,0x02,0x2c,0x02,0xe5,0xe6,0x10,0x09, 0x29,0x0b,0x13,0x05,0x03,0x05,0x09,0x0b,0x09,0x09,0x13,0x09,0x13,0x03,0x0f,0x20, 0xe5,0xe6,0x0c,0x01,0xe7,0x06,0xe6,0x03,0x01,0xe5,0xe5,0x19,0x04,0x02,0x01,0x04, 0x04,0xe6,0x04,0x01,0xe7,0x06,0xe5,0x0d,0xe5,0xe8,0x04,0x02,0xe7,0x03,0x01,0xe7, 0x03,0x01,0xe7,0x05,0xe7,0x05,0xe7,0x06,0x01,0x04,0x01,0xe5,0xe5,0x03,0x01,0xe5, 0xe5,0x02,0x02,0x09,0xe5,0xe5,0x02,0xe5,0xe8,0x14,0x01,0xe6,0x0c,0x01,0xe6,0x08, 0x05,0x01,0xe6,0x1a,0xe5,0x05,0x01,0xe5,0x09,0x02,0x01,0xe5,0xe7,0x04,0x02,0xe5, 0x01,0x06,0x05,0x01,0xe6,0x03,0x04,0xe6,0x04,0x01,0xe6,0x03,0xe5,0xe7,0x03,0xe5, 0x01,0xe5,0x01,0x04,0xe6,0x08,0x05,0x01,0xe6,0x04,0x01,0xe6,0x06,0xe6,0x06,0xe6, 0x04,0x01,0xe6,0x07,0xe5,0xe5,0x02,0xe5,0x02,0x03,0x01,0xe5,0x0b,0x01,0x01,0x0e, 0xe5,0xe5,0x02,0x16,0x02,0x07,0x01,0x06,0x0a,0x01,0x01,0x02,0x01,0x02,0x14,0x02, 0x07,0xe5,0x02,0x02,0x01,0x01,0x02,0x02,0x01,0xe5,0x07,0x09,0x01,0x18,0x0c,0x06, 0x02,0x0a,0x02,0x16,0xe6,0x0c,0x01,0x03,0xe7,0x02,0x02,0xe8,0xe5,0x02,0xe7,0x07, 0xe5,0x07,0xe5,0x01,0x05,0xe6,0x06,0xe5,0x02,0x04,0xe6,0xe5,0x02,0x01,0xe5,0x02, 0x04,0xe5,0x07,0xe5,0x07,0xe5,0xe5,0x01,0x03,0xe8,0xe5,0x02,0xe8,0xe5,0x02,0xe8, 0x01,0x02,0xe7,0x01,0x03,0xe7,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0xe8,0xe5,0x04, 0xe5,0x01,0x03,0xe7,0x05,0xe7,0x07,0xe7,0x02,0x02,0x01,0xe5,0x15,0x03,0x05,0x09, 0x0d,0x08,0xe5,0x05,0x01,0xe5,0x12,0x06,0x02,0xe5,0x07,0xe5,0x04,0x01,0x0b,0xe6, 0x01,0x05,0x09,0xe5,0x06,0xe5,0x02,0x04,0xe5,0x02,0x0c,0x01,0x07,0x01,0xe6,0x10, 0x06,0xe5,0xe6,0x04,0xe5,0x01,0xe5,0x0b,0x03,0x11,0x01,0x01,0xe5,0x03,0x01,0x01, 0xe5,0x03,0x01,0x01,0xe5,0x0c,0x08,0xe5,0x05,0x01,0x0b,0x09,0x01,0x01,0x05,0x09, 0x05,0x01,0x01,0x03,0xe5,0x03,0x02,0xe5,0x01,0x03,0x01,0x03,0x05,0x03,0xe5,0x03, 0x03,0x02,0xe5,0xe5,0x02,0x05,0xe5,0x01,0xe5,0xe5,0x01,0xe5,0x02,0x01,0x02,0xe5, 0x01,0xe5,0x03,0x06,0x01,0x01,0x01,0xe6,0xe5,0xe5,0xe5,0xe6,0x01,0x02,0x04,0xe5, 0x03,0xe6,0xe6,0x13,0xe5,0x03,0x03,0xe5,0x07,0xe5,0x16,0x07,0x01,0xe5,0x12,0x04, 0x0d,0x0f,0x05,0x05,0x07,0xe6,0x06,0xe6,0x02,0xe5,0x01,0x01,0x03,0x03,0x01,0x09, 0x09,0x08,0xe5,0x0c,0x04,0xe5,0x03,0x03,0xe5,0xe5,0x08,0x0a,0x13,0x09,0x09,0x10, 0x08,0xe5,0x05,0x01,0xe5,0x15,0x05,0x07,0x0f,0xe5,0xe5,0x03,0x03,0x01,0xe5,0x05, 0x01,0xe5,0x06,0xe7,0x06,0xe5,0x01,0x04,0xe6,0x05,0xe7,0x05,0x01,0x07,0x01,0xe5, 0x0b,0x03,0x09,0x05,0x08,0x03,0x01,0xe8,0x16,0x02,0x30,0xe5,0x03,0x01,0xe5,0xe5, 0x0f,0x02,0x3e,0xe5,0x03,0x02,0x08,0xe5,0x19,0x02,0x1c,0xe5,0x0c,0x01,0xe6,0xe5, 0x08,0x05,0x01,0x05,0x01,0x01,0x02,0x04,0x01,0x01,0x05,0x01,0x02,0x04,0x01,0x01, 0x05,0xe6,0xe5,0x04,0x01,0x01,0x01,0x03,0x01,0x01,0x05,0x01,0x01,0x02,0x02,0x01, 0x01,0x05,0x01,0xe6,0x04,0x01,0x02,0x06,0x01,0x01,0x05,0xe6,0x06,0x01,0x02,0x04, 0x01,0x02,0xe5,0x02,0x01,0x07,0xe6,0x06,0x01,0x07,0x01,0x01,0x05,0xe6,0xe5,0x03, 0xe7,0xe5,0x04,0x01,0x07,0x01,0x06,0x03,0xe6,0xe5,0x14,0x03,0xe5,0x07,0xe5,0x04, 0x18,0x08,0x0d,0xe5,0x02,0xe5,0x05,0x05,0x1b,0xe5,0x07,0xe5,0xe5,0x09,0x05,0xe5, 0x05,0x01,0xe5,0x01,0x05,0xe5,0x04,0x02,0xe5,0x04,0x02,0x07,0xe5,0x01,0x02,0x04, 0x01,0x07,0x09,0x0b,0x02,0x02,0xe6,0x0a,0x04,0x04,0xe5,0xe5,0x04,0x02,0x1f,0x06, 0x03,0x0b,0x02,0x01,0x07,0x0f,0xe6,0x07,0x01,0x03,0x01,0x02,0x01,0x02,0x03,0x04, 0x06,0xe5,0x01,0x09,0x04,0xe5,0x03,0x09,0x03,0x06,0x0b,0xe5,0x02,0x02,0x01,0x04, 0x0c,0x04,0x05,0x02,0x02,0xe6,0x08,0x18,0x16,0x03,0xe5,0x0c,0x0c,0x04,0x0b,0x03, 0x01,0x09,0x08,0x06,0x08,0x02,0x10,0xe5,0x06,0x0a,0xe5,0x04,0x02,0x01,0x0a,0x19, 0x03,0x0b,0x05,0x04,0xe7,0x07,0x01,0x02,0xe6,0x09,0x06,0xe6,0x1e,0x05,0x01,0x03, 0x03,0xe6,0x06,0x01,0x02,0x07,0x06,0x01,0x04,0x05,0x07,0x09,0xe8,0x01,0x03,0x01, 0xe5,0x08,0x0d,0x05,0x17,0x04,0x0b,0x0f,0x01,0x0e,0x02,0xe5,0xe5,0x0b,0x19,0x1e, 0x01,0x02,0x06,0x19,0x03,0x01,0x05,0x0b,0x0c,0x08,0x02,0x19,0x06,0x1b,0x03,0x09, 0x07,0x01,0x04,0x0b,0x05,0x10,0x09,0x05,0xe5,0x15,0x0b,0x06,0xe5,0x08,0x01,0x0a, 0x1a,0xe5,0x04,0x03,0x09,0xe5,0x07,0x0c,0x05,0xe5,0x09,0x07,0xe5,0x0c,0x03,0x05, 0x06,0xe5,0x0e,0x05,0x0b,0xe7,0x24,0xe6,0x0f,0x10,0xe5,0x14,0x13,0x04,0x0b,0x04, 0xe5,0x01,0x0f,0x0e,0x01,0x02,0x53,0xe8,0x4a,0x36,0x02,0xe5,0x02,0x09,0x09,0x0d, 0x02,0x0d,0x09,0x0b,0x01,0x05,0x0a,0x0a,0x07,0x08,0x04,0x01,0xe5,0x34,0xe6,0x02, 0x13,0x03,0xe5,0x15,0xe5,0xe5,0x08,0x02,0xe5,0x06,0xe5,0xe6,0x06,0x08,0xe5,0x06, 0x01,0x01,0x04,0x02,0x06,0x03,0xe5,0x06,0xe6,0x17,0xe5,0x01,0x01,0x01,0x01,0x06, 0x06,0xe5,0x01,0x0c,0x01,0xe6,0xe5,0x35,0x0b,0x0c,0x01,0x01,0x16,0x0c,0x01,0xe5, 0x02,0x06,0xe5,0x1e,0x0e,0xe6,0xe5,0x02,0x03,0x01,0x01,0x01,0xe5,0xe5,0x1a,0x0c, 0x01,0x07,0x07,0x01,0xe5,0x01,0x08,0x35,0x04,0x02,0x01,0x0a,0x01,0x06,0x01,0x02, 0x1c,0xe5,0x02,0x13,0x06,0x02,0x09,0xe5,0xe5,0x01,0xe5,0x04,0x09,0x13,0x09,0x0e, 0x04,0x0e,0xe5,0x01,0xe6,0x08,0x19,0x1b,0x09,0x01,0x11,0x0b,0x0a,0x07,0x0d,0x02, 0x18,0x08,0x04,0x02,0xe6,0x04,0x03,0x05,0x09,0x02,0xe5,0x06,0xe6,0x01,0x11,0x0c, 0x04,0x06,0x0a,0x09,0x09,0x01,0x11,0x05,0xe5,0x03,0x02,0x02,0x11,0xe5,0x03,0x14, 0x07,0x0f,0x05,0x09,0x0f,0x12,0xe5,0x13,0x0e,0x06,0x05,0x05,0x04,0x03,0x02,0x02, 0xe6,0x11,0x01,0x0a,0x16,0x0d,0x01,0xe5,0x01,0xe6,0x03,0x02,0xe6,0xe5,0xe5,0x02, 0xe5,0x01,0xe5,0x03,0xe5,0xe6,0x05,0x09,0x06,0x04,0x05,0xe5,0x01,0x05,0x03,0x05, 0x09,0x03,0x16,0x01,0x11,0xe5,0x23,0xe6,0x02,0x02,0x01,0x1c,0x1d,0xe5,0x03,0xe5, 0x01,0x01,0x01,0x02,0x01,0xe5,0x0f,0xe5,0xe6,0x01,0x06,0x08,0xe5,0xe5,0x07,0xe5, 0xe5,0x05,0x03,0x05,0x03,0x05,0x0d,0x01,0xe5,0x12,0xe5,0x0a,0xe5,0x11,0xe5,0x13, 0x01,0x04,0x01,0x04,0x1c,0x0f,0xe5,0x01,0x01,0x09,0x07,0x09,0x03,0xe5,0x07,0x07, 0x03,0xe5,0x01,0x01,0x01,0x05,0x0d,0x04,0x01,0xe5,0x05,0xe5,0xe5,0x08,0x04,0x04, 0x24,0x01,0x02,0x0a,0x03,0x01,0xe5,0xe5,0x09,0x08,0x05,0x01,0xe5,0x01,0x1e,0x10, 0xe5,0xe5,0xe5,0x06,0x09,0x01,0x07,0x13,0x09,0x05,0xe5,0x01,0x0b,0x09,0x09,0x09, 0x2e,0xe5,0xe5,0x09,0x05,0x0d,0x02,0x09,0x02,0x02,0xe5,0x20,0x1f,0x06,0xe5,0xe5, 0x03,0x01,0xe5,0x14,0x06,0x02,0x02,0xe5,0x01,0x02,0x02,0xe5,0x01,0x01,0xe5,0x03, 0xe5,0x01,0x05,0x09,0x04,0x04,0x04,0x01,0x02,0x09,0x04,0x01,0x02,0x09,0x04,0x01, 0xe5,0xe5,0xe5,0xe5,0x02,0xe5,0x02,0x03,0x02,0xe5,0x01,0x03,0x0f,0xe6,0x01,0x1e, 0x03,0x13,0x05,0x03,0x03,0xe6,0xe5,0xe5,0xe5,0x01,0xe5,0x02,0x0f,0x03,0x03,0xe5, 0x04,0x03,0xe5,0x03,0x03,0x02,0x04,0x03,0x05,0x09,0x04,0x03,0x05,0x04,0x0e,0x01, 0x01,0x0f,0x02,0x09,0x06,0x04,0x01,0x01,0x03,0x0a,0x02,0xe5,0xe7,0x1f,0x13,0x01, 0x04,0x04,0x05,0x01,0x01,0x07,0x04,0x09,0x06,0x05,0x01,0x01,0x05,0x01,0x07,0x02, 0xe5,0x06,0x01,0x01,0x07,0x01,0x07,0x06,0x01,0xe5,0xe5,0x0a,0x01,0xe5,0x01,0x01, 0x01,0x01,0xe5,0x06,0xe6,0x01,0x01,0x0a,0x06,0x01,0x02,0x14,0xe5,0x02,0x02,0x1e, 0x13,0x01,0x05,0x01,0x09,0x01,0x07,0x13,0x09,0x04,0x04,0x04,0x06,0x04,0x04,0x01, 0x02,0x04,0x01,0x02,0x04,0x03,0xe5,0xe5,0x01,0x04,0x09,0x04,0x01,0x02,0x08,0xe5, 0x03,0x01,0xe6,0x07,0x03,0x01,0x1c,0xe5,0xe5,0x01,0x2e,0x02,0x10,0x02,0x06,0xe5, 0x1b,0xe5,0x07,0xe5,0x2e,0x02,0x01,0xe5,0x05,0x02,0x03,0x09,0x09,0x09,0x09,0x02, 0x13,0xe5,0x0c,0x32,0xe5,0x39,0xe5,0x15,0x21,0xe5,0x01,0xe5,0x07,0x05,0xe5,0x07, 0xe5,0x07,0xe5,0x0b,0x09,0x01,0x0d,0xe5,0x01,0x09,0xe6,0xe6,0x0a,0x02,0x09,0x09, 0x09,0x09,0x04,0x01,0x02,0x04,0x01,0x02,0x09,0x09,0x09,0x02,0x03,0x02,0x06,0x02, 0x0b,0x09,0x06,0x02,0x09,0xe5,0x04,0x02,0x05,0x03,0xe6,0x06,0xe6,0x06,0xe5,0x02, 0x01,0x02,0xe5,0x01,0x05,0xe5,0x03,0xe5,0x01,0x04,0x01,0x02,0x01,0x03,0x07,0x01, 0x02,0x02,0x08,0xe5,0x30,0x09,0x24,0x02,0x08,0xe5,0x08,0x0b,0x06,0x02,0x12,0x28, 0x12,0xe5,0x08,0x0e,0x03,0xe5,0xe5,0x09,0x31,0x0a,0x25,0x09,0x0a,0x0b,0x09,0x10, 0x27,0x01,0x15,0x07,0x18,0x0f,0x1b,0x01,0x07,0x01,0x07,0x01,0x18,0x04,0x07,0x01, 0x09,0x02,0x01,0x02,0x01,0x02,0x05,0x02,0x02,0x06,0x02,0x04,0x01,0x02,0x01,0x02, 0x01,0xe5,0xe5,0x05,0x02,0x06,0xe7,0x05,0xe7,0x03,0x01,0xe7,0x06,0x06,0x01,0xe6, 0x04,0x0c,0xe5,0x0b,0x01,0x01,0x0d,0x02,0x1a,0x01,0xe5,0x04,0xe5,0xe6,0x05,0x01, 0xe5,0x08,0x12,0xe5,0x04,0xe5,0xe6,0x05,0x03,0x05,0x03,0x07,0x01,0x01,0x05,0x01, 0x01,0x05,0x03,0x05,0x01,0xe6,0x06,0x01,0x07,0xe6,0x06,0xe6,0x03,0xe5,0xe7,0x08, 0x04,0xe5,0x01,0x08,0x01,0x02,0x02,0x01,0x01,0x05,0xe5,0x02,0x03,0xe7,0x0d,0x04, 0x15,0x07,0x02,0x01,0x03,0x03,0x01,0x0e,0x0e,0x0d,0xe5,0x01,0x01,0x03,0x03,0x01, 0x03,0x07,0x03,0x05,0x03,0x03,0x01,0x03,0x0f,0x03,0x02,0x02,0xe5,0x04,0x02,0xe5, 0x02,0x13,0x02,0x01,0xe5,0x02,0x02,0x05,0x05,0xe5,0x04,0x07,0xe5,0x01,0x0d,0x07, 0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe6,0xe5,0x04,0xe6,0xe5,0x04,0xe5,0x07,0xe5,0x07, 0xe5,0x01,0x05,0xe5,0x07,0xe6,0xe5,0x04,0xe6,0x06,0xe5,0xe6,0xe5,0x04,0xe6,0xe5, 0x04,0xe6,0x06,0xe5,0x07,0xe6,0x06,0xe5,0x01,0x05,0xe5,0x01,0x05,0xe5,0x02,0x04, 0xe5,0x07,0xe6,0xe5,0x04,0xe5,0x01,0x05,0xe5,0x01,0x05,0xe7,0x02,0x02,0xe7,0x0e, 0x1b,0x01,0x0a,0x06,0x01,0x14,0x08,0xe5,0x05,0x01,0x09,0x09,0x1f,0x07,0x01,0xe5, 0x07,0x09,0x09,0xe5,0x05,0x01,0xe5,0x0f,0x01,0x09,0x09,0x0e,0x01,0x01,0x0d,0x1d, 0x01,0x0a,0x06,0x01,0x14,0x08,0x07,0x01,0x07,0x03,0x09,0x07,0x03,0x05,0x03,0x09, 0x05,0x01,0xe5,0xe5,0x05,0x01,0x07,0x02,0x06,0x02,0x04,0x01,0x02,0x09,0x04,0x01, 0x07,0x04,0x06,0x02,0x04,0xe5,0x03,0xe5,0xe8,0x07,0x02,0x02,0x31,0x07,0x15,0xe5, 0x1b,0x1f,0x13,0x09,0x09,0xe5,0x08,0x08,0x07,0x01,0x09,0x09,0x07,0x09,0xe5,0x01, 0x0a,0x0c,0x1d,0x06,0x01,0x0a,0x09,0x08,0xe5,0x08,0x1c,0x09,0x1d,0x0b,0x09,0x1b, 0x01,0xe5,0x05,0x0b,0x0c,0x03,0x01,0x0b,0x15,0xe5,0xe5,0x0d,0x15,0x01,0xe5,0x05, 0xe5,0xe5,0x1b,0xe5,0x05,0x02,0x08,0x09,0x01,0xe5,0x0f,0x01,0xe5,0xe5,0x03,0x01, 0xe5,0x1e,0x04,0x02,0x10,0x02,0x04,0x08,0x03,0x07,0x01,0x11,0x01,0xe6,0x04,0x01, 0xe5,0x02,0x02,0x01,0x01,0x05,0x01,0x02,0x04,0x01,0x01,0x05,0x01,0xe5,0x05,0x01, 0x04,0x02,0x01,0x01,0x05,0x01,0x01,0xe5,0x03,0xe6,0xe5,0x04,0x01,0x04,0x01,0xe5, 0xe7,0x02,0x03,0x01,0x04,0x02,0x01,0x01,0x05,0x01,0x04,0x01,0xe5,0xe6,0xe5,0xe5, 0x01,0x01,0x07,0x01,0x07,0x01,0x06,0xe5,0xe5,0x03,0x02,0x01,0x07,0x01,0x03,0x03, 0x01,0x07,0x01,0x02,0x07,0xe5,0x01,0x4a,0xe6,0x0b,0x05,0xe5,0xe5,0x05,0xe5,0x0b, 0x03,0x05,0x0b,0x05,0xe5,0x01,0x05,0xe5,0x01,0x0f,0xe5,0x01,0x0b,0x03,0x09,0x07, 0x13,0x03,0x08,0x0a,0x03,0x02,0xe5,0x0a,0x09,0x09,0x1d,0x08,0x01,0x03,0x04,0x09, 0x04,0x06,0x0e,0x02,0x06,0x02,0x08,0x02,0xe5,0x01,0x02,0x03,0x05,0x07,0x01,0x03, 0x05,0xe5,0x07,0x09,0x0c,0x09,0x09,0x06,0x02,0xe5,0x05,0x08,0xe5,0xe7,0xe5,0x01, 0x03,0x01,0x08,0x37,0x03,0x07,0x05,0x01,0x01,0x06,0x01,0x01,0x0b,0x01,0x15,0x01, 0x04,0x02,0x01,0x0c,0x06,0x13,0x09,0x05,0x06,0xe5,0x01,0x27,0xe7,0x0a,0x10,0x08, 0xe5,0x06,0x08,0x07,0x01,0x07,0x02,0x0b,0x09,0x0a,0x03,0x01,0x05,0x01,0x01,0x02, 0x09,0x02,0x03,0x01,0x01,0x05,0x01,0xe5,0x0c,0x01,0x04,0xe5,0x06,0x1a,0x01,0x07, 0xe6,0x06,0xe5,0x01,0xe5,0x05,0x0e,0x03,0x35,0x09,0x02,0x09,0x03,0x01,0x09,0x07, 0x07,0x06,0x09,0x07,0x08,0x09,0x04,0x18,0xe5,0x07,0x19,0x05,0xe5,0x06,0x11,0x0c, 0x01,0x13,0x09,0x0f,0x02,0x06,0x01,0x02,0x04,0x09,0x01,0x1f,0x03,0x05,0x03,0xe6, 0x02,0x03,0x03,0x03,0x09,0x03,0x04,0x0a,0x06,0x09,0x01,0x0a,0x05,0xe5,0x05,0x05, 0x09,0x0d,0x03,0x0d,0x01,0xe5,0x08,0x1a,0x06,0xe6,0x06,0xe6,0x06,0xe6,0x06,0xe6, 0x06,0x02,0x03,0x02,0x01,0x07,0x02,0x04,0x01,0x01,0xe5,0x08,0x03,0x01,0xe5,0xe6, 0x01,0x01,0x02,0xe5,0x01,0x04,0x01,0x09,0x09,0x09,0x09,0x09,0xe6,0x36,0x01,0xe5, 0x02,0x11,0x1c,0x11,0x10,0x0b,0x07,0x04,0x1a,0x02,0x04,0x03,0x04,0xe5,0x08,0x03, 0xe5,0x03,0x04,0x04,0x03,0x05,0x0e,0xe5,0x0a,0x06,0x1b,0x05,0x01,0x0d,0xe5,0x01, 0x05,0xe5,0x07,0xe5,0x08,0x03,0x1f,0x0c,0x09,0x01,0x0a,0x0b,0x04,0x0a,0x15,0x04, 0x0e,0x07,0x09,0xe6,0x12,0x04,0x03,0x0f,0x0a,0xe5,0xe6,0x02,0x06,0x02,0x01,0x07, 0x01,0x07,0x01,0x08,0x0c,0x09,0x18,0xe5,0x19,0x07,0x04,0x20,0x13,0x0f,0x02,0xe5, 0x14,0xe5,0x04,0x16,0x04,0x01,0xe5,0x16,0x01,0x07,0x01,0x02,0x06,0x06,0x02,0x06, 0x02,0x09,0x13,0x06,0x0c,0x07,0x01,0x06,0x02,0x01,0x06,0x13,0x09,0x25,0x02,0x01, 0x03,0xe5,0xe5,0x05,0x09,0x15,0x02,0xe5,0x06,0x10,0x09,0x07,0xe6,0x06,0xe6,0x06, 0xe6,0x06,0xe6,0x05,0x02,0x02,0x06,0x02,0x06,0x02,0x03,0x02,0xe5,0xe5,0x01,0xe5, 0x01,0xe5,0x03,0x05,0xe5,0xe5,0x01,0x01,0x01,0xe5,0xe5,0x01,0xe6,0xe8,0x01,0x01, 0x01,0xe5,0xe5,0x03,0x01,0xe5,0xe5,0x01,0xe6,0xe5,0x01,0x04,0x01,0x13,0x2b,0xe6, 0x01,0x02,0x02,0x0d,0x04,0x09,0x21,0x08,0x07,0x02,0x04,0x01,0x09,0x01,0x05,0x01, 0x08,0x09,0x02,0x0b,0x27,0x09,0x10,0x0e,0x0c,0x04,0x0c,0xe6,0xe7,0x06,0xe5,0x05, 0x05,0x03,0x05,0x01,0x01,0x05,0x06,0xe5,0xe5,0xe5,0xe5,0x01,0xe5,0xe5,0x02,0x02, 0x01,0xe5,0x02,0x02,0xe5,0xe5,0x02,0x05,0x01,0x01,0x05,0x03,0x05,0x01,0x01,0x02, 0xe5,0xe5,0x02,0x02,0xe5,0x02,0x01,0x01,0x02,0xe5,0xe5,0x02,0x02,0xe5,0xe5,0x02, 0xe5,0xe6,0xe5,0x02,0x05,0x03,0x01,0xe5,0x01,0x01,0x01,0x02,0xe5,0x04,0x05,0x01, 0x01,0x06,0x08,0x20,0x01,0x03,0xe6,0x01,0x01,0x05,0xe5,0x03,0x03,0x01,0xe5,0x01, 0x03,0x01,0xe5,0x01,0x08,0x04,0xe5,0x02,0x01,0x02,0xe5,0x02,0x01,0x01,0xe5,0x03, 0x04,0xe5,0x02,0x01,0x07,0xe5,0x02,0xe5,0x02,0x09,0x01,0x02,0x01,0x02,0x04,0x01, 0x01,0x02,0x04,0x01,0x02,0x04,0x01,0x02,0x04,0x01,0x02,0xe5,0x07,0x01,0x02,0x01, 0x02,0x08,0xe5,0xe5,0x04,0x02,0x06,0xe5,0x04,0x01,0xe5,0x20,0x02,0x03,0x04,0x12, 0x09,0x06,0x04,0x04,0x04,0x02,0x01,0x04,0x02,0x01,0x04,0x01,0xe5,0xe5,0x03,0x04, 0x04,0x04,0x04,0x01,0xe5,0xe5,0x03,0x02,0x01,0x04,0x01,0xe5,0xe5,0x05,0x01,0xe5, 0xe5,0x03,0x01,0x02,0x04,0x01,0x02,0x04,0x04,0x04,0x04,0x04,0x01,0x02,0x02,0x04, 0x04,0x0b,0x07,0x06,0xe5,0xe5,0xe5,0x17,0xe8,0x01,0x14,0x09,0x06,0x01,0xe5,0x05, 0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01, 0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0x02,0x06,0x01,0xe5,0x05,0x01,0xe5, 0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0xe5,0xe5,0x08, 0x09,0x05,0xe5,0x01,0x01,0x12,0x02,0x01,0xe5,0x02,0x12,0x04,0x04,0x04,0x04,0x06, 0x02,0xe5,0x01,0x02,0x02,0xe5,0x01,0x02,0x02,0xe5,0x01,0x02,0x02,0xe5,0x01,0x02, 0x02,0xe5,0x01,0x02,0x02,0xe5,0x01,0x02,0x02,0x01,0x01,0x02,0x02,0x01,0x01,0x02, 0x02,0x01,0x01,0x04,0x02,0x01,0x01,0x02,0x02,0x01,0x01,0x02,0x02,0x01,0x01,0x02, 0x02,0x01,0x01,0x02,0x02,0x01,0x01,0x02,0x02,0xe5,0x01,0xe5,0x03,0x09,0x06,0x04, 0x01,0xe5,0x03,0x09,0x14,0x04,0x02,0x14,0x03,0x05,0x03,0x05,0x03,0x02,0x01,0xe5, 0x02,0x02,0x01,0xe5,0x02,0x02,0x01,0xe5,0x02,0x02,0x01,0xe5,0x02,0x02,0x01,0xe5, 0x02,0x02,0x01,0xe5,0x02,0x01,0x02,0xe5,0x02,0x01,0x02,0xe5,0x02,0x01,0x02,0x01, 0xe5,0x02,0x01,0x02,0xe5,0x02,0x01,0x02,0xe5,0x02,0x01,0x02,0xe5,0x02,0x04,0xe5, 0x02,0x01,0x02,0xe5,0x02,0x02,0x01,0xe5,0x02,0x0a,0x04,0x03,0x01,0x02,0xe5,0x02, 0x09,0x13,0x01,0x01,0xe8,0x10,0xe6,0x01,0x04,0xe6,0x01,0x03,0xe7,0x01,0x03,0x02, 0x02,0x03,0x05,0x03,0x05,0x03,0x05,0x03,0x05,0x09,0x03,0x05,0x03,0x05,0x0b,0x04, 0x04,0x09,0x03,0x05,0x07,0x01,0x09,0x0b,0x02,0x01,0x02,0x01,0x09,0x07,0x03,0x01, 0xe5,0x01,0x01,0x01,0xe5,0xe5,0xe5,0x0f,0xe6,0x01,0x11,0xe5,0xe5,0xe6,0x02,0xe5, 0xe5,0xe6,0x03,0x01,0x01,0x02,0x02,0x01,0xe6,0x01,0x02,0x01,0x01,0x02,0x02,0x01, 0x01,0x02,0x02,0x01,0x01,0x02,0x02,0x01,0x01,0x02,0x01,0xe5,0xe5,0xe5,0x01,0x02, 0x06,0x02,0xe6,0x03,0x01,0xe7,0x05,0x01,0xe7,0x03,0x01,0xe7,0x03,0x02,0xe6,0x03, 0x01,0xe5,0x05,0x01,0xe7,0x03,0x01,0xe5,0x05,0x01,0xe5,0xe6,0xe5,0xe5,0x01,0x04, 0x09,0x04,0x04,0x01,0x02,0x14,0xe7,0xe5,0x0f,0x09,0x09,0x06,0x02,0x02,0x03,0x02, 0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02, 0x03,0x02,0x02,0x03,0x02,0x02,0x05,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x03, 0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x06,0x02,0x01,0x07,0xe5,0x0e,0x09,0x17, 0x01,0xe6,0x0f,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x01,0x01,0x03,0xe5,0xe6,0x04, 0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5, 0xe6,0x04,0xe5,0xe6,0x06,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6, 0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x18,0xe5,0x07,0xe5,0x19,0xe5,0x08, 0x01,0x02,0xe5,0x07,0xe5,0xe5,0x05,0xe6,0x01,0x01,0xe5,0xe8,0x01,0x01,0x01,0xe6, 0x04,0x01,0xe6,0x04,0x01,0xe6,0x04,0x01,0xe6,0x04,0x01,0xe5,0x05,0x01,0xe6,0x04, 0x01,0xe6,0x04,0x01,0xe5,0x07,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe6,0x04,0x01, 0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0xe5,0x03,0x01,0xe5,0x04,0x02,0x09,0x09,0xe7, 0x05,0xe6,0x06,0x0d,0xe5,0x02,0x02,0x09,0x1d,0xe5,0x08,0x09,0x09,0x09,0x09,0x09, 0x09,0x09,0x0b,0x09,0x09,0x09,0x09,0x09,0x09,0x07,0xe5,0x08,0x2c,0x03,0x01,0xe5, 0x08,0x07,0x09,0x0d,0x01,0x09,0x02,0x06,0x02,0x06,0x02,0x06,0x02,0x06,0x02,0xe5, 0x04,0x02,0x06,0x02,0x06,0x02,0xe5,0x05,0xe5,0x01,0xe5,0x04,0x02,0xe5,0x04,0x02, 0x06,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x02,0x08,0x31,0x03, 0x0c,0x02,0xe7,0x05,0xe7,0x05,0xe7,0x05,0xe5,0xe5,0x03,0x02,0xe6,0x03,0x02,0xe6, 0x02,0xe5,0x01,0x01,0x04,0x02,0xe6,0x03,0x02,0x01,0x04,0x02,0x01,0x04,0x02,0xe6, 0x03,0x02,0xe6,0x05,0x02,0xe6,0x03,0x02,0xe6,0x03,0x02,0xe6,0x03,0x02,0xe6,0x02, 0xe5,0x01,0xe6,0x03,0x02,0x01,0x04,0x02,0xe6,0x05,0x02,0x06,0x0a,0xe6,0x06,0xe6, 0x14,0x01,0xe6,0x0c,0x02,0xe5,0x06,0xe6,0x06,0xe6,0x04,0x02,0xe5,0x04,0x02,0xe5, 0x03,0xe5,0x01,0xe5,0x04,0x02,0xe5,0x03,0xe5,0x01,0xe5,0x04,0x02,0xe5,0x01,0x02, 0x02,0xe5,0x03,0xe5,0x01,0xe5,0x03,0xe5,0x01,0xe5,0x03,0x02,0x02,0xe5,0x03,0xe5, 0x01,0xe5,0x03,0xe5,0x01,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x03,0xe5,0x01,0xe5, 0x04,0x02,0xe5,0x04,0x03,0x05,0x01,0xe5,0x02,0x06,0x09,0x17,0x02,0xe5,0x08,0x04, 0x01,0x07,0x01,0x07,0x10,0x02,0x09,0x09,0x09,0x09,0x06,0x02,0x09,0xe5,0x02,0x04, 0xe5,0x06,0x02,0xe5,0x07,0xe5,0x07,0xe5,0x04,0x02,0x04,0x04,0xe5,0x07,0x04,0x04, 0x09,0x03,0xe5,0x03,0x0a,0x02,0xe5,0x04,0x17,0x01,0x01,0x0d,0x05,0xe7,0x01,0x03, 0xe7,0x01,0xe5,0x01,0xe7,0x05,0xe5,0xe5,0x01,0xe5,0x01,0xe5,0xe5,0x01,0xe5,0x01, 0xe5,0xe5,0x01,0x03,0xe7,0x01,0xe5,0x01,0xe7,0x01,0x03,0xe7,0x01,0x04,0xe6,0x01, 0x03,0xe7,0x01,0x03,0xe7,0x03,0x03,0xe7,0x01,0x03,0x01,0xe5,0x01,0x04,0xe6,0x01, 0xe5,0x01,0xe7,0x01,0x04,0xe6,0x01,0x03,0xe7,0x01,0xe5,0x03,0xe5,0x01,0x03,0x01, 0xe6,0xe5,0x04,0xe5,0x07,0xe5,0x05,0xe7,0x07,0xe5,0x07,0x01,0xe5,0x0e,0x06,0x02, 0x06,0x02,0xe6,0x03,0xe5,0xe6,0x07,0xe6,0x06,0xe6,0x06,0xe6,0x06,0xe6,0x06,0xe6, 0x06,0xe5,0x07,0xe6,0x06,0xe6,0x08,0xe6,0x06,0xe6,0x06,0xe6,0x06,0xe5,0x07,0xe6, 0x06,0xe5,0x07,0xe6,0x06,0x07,0x01,0xe5,0x0e,0x03,0x05,0x0c,0x05,0xe7,0x0c,0x04, 0xe5,0xe6,0x01,0x02,0xe5,0xe6,0x01,0x02,0xe5,0xe7,0xe6,0xe6,0x01,0x02,0x02,0xe5, 0x01,0x02,0x02,0xe5,0x01,0x02,0x02,0xe5,0x01,0x02,0x02,0xe5,0x01,0x02,0x02,0x01, 0xe6,0x01,0xe5,0xe5,0x02,0x02,0x02,0xe5,0x01,0x02,0x02,0x02,0xe5,0x03,0x02,0x01, 0x01,0x02,0x02,0xe5,0x01,0x02,0x02,0x01,0x01,0x02,0x02,0x01,0x01,0x02,0x02,0x03, 0x02,0xe5,0xe5,0xe5,0xe5,0x01,0x02,0x04,0x04,0x03,0xe5,0xe5,0x0b,0x02,0xe5,0x04, 0xe5,0xe6,0x10,0x01,0xe6,0x0e,0x04,0xe5,0x02,0x04,0xe5,0x02,0x04,0xe5,0xe5,0x05, 0xe7,0x05,0xe7,0x05,0xe7,0x05,0xe5,0xe5,0x05,0xe5,0xe5,0x04,0x01,0x01,0x06,0xe5, 0xe5,0x05,0xe5,0xe5,0x04,0xe6,0x01,0xe5,0x04,0x01,0x01,0x07,0x01,0x07,0x01,0x02, 0x02,0x01,0x01,0x06,0xe5,0xe5,0x04,0x01,0x01,0x07,0x03,0x03,0x01,0x01,0x01,0xe5, 0x0d,0x04,0x03,0xe5,0x11,0xe5,0xe6,0x0c,0x05,0x05,0x03,0x05,0x03,0x05,0x03,0x03, 0x05,0x03,0x05,0x03,0x05,0x01,0xe6,0x04,0x01,0xe6,0x04,0xe5,0xe6,0x04,0x01,0xe6, 0x04,0x01,0xe6,0x04,0x02,0x02,0x05,0xe8,0x04,0xe8,0x04,0xe8,0x04,0xe8,0x04,0x01, 0xe6,0x04,0xe8,0x04,0x01,0x03,0x03,0xe8,0xe6,0x0b,0xe5,0x07,0x14,0xe8,0x18,0xe5, 0x05,0xe5,0xe5,0xe5,0x01,0x3b,0x09,0x2f,0x23,0x09,0xe5,0xe5,0x0c,0x02,0x02,0x25, 0x01,0x0f,0x01,0x07,0x01,0x03,0x03,0x01,0x07,0x01,0x07,0x01,0xe5,0x05,0x01,0x07, 0x01,0x07,0x01,0xe5,0x05,0xe6,0x06,0x01,0x07,0x01,0x07,0x01,0x09,0x01,0x07,0x01, 0x01,0x05,0x01,0x01,0x05,0x01,0x01,0x04,0xe5,0xe5,0xe5,0x04,0x01,0x02,0xe5,0x02, 0x01,0x01,0x01,0x03,0x01,0x01,0x05,0x01,0xe6,0x02,0x01,0x01,0x07,0x01,0x01,0x05, 0x01,0x0b,0x01,0xe5,0x01,0xe5,0x20,0x04,0x03,0x01,0x07,0x02,0x06,0x02,0x05,0xe5, 0x01,0x06,0x02,0x05,0xe5,0x07,0xe5,0x01,0x06,0x0f,0x01,0x10,0x09,0x05,0xe5,0x01, 0x01,0xe5,0x01,0x03,0xe5,0x03,0xe5,0x07,0x03,0x07,0x01,0x05,0x05,0x0d,0x15,0x04, 0xe7,0x0a,0x09,0x04,0x04,0x0b,0x07,0x01,0x04,0x01,0xe5,0xe5,0x04,0xe5,0xe5,0xe6, 0x03,0xe6,0x04,0x01,0xe5,0xe5,0x06,0xe5,0x09,0x07,0x0f,0x0f,0x09,0x01,0x11,0xe5, 0x04,0x06,0xe5,0xe5,0x01,0x0e,0xe5,0x0c,0x1a,0xe5,0x02,0x0c,0x04,0x1f,0x0c,0x07, 0x01,0x07,0x07,0x06,0x02,0x01,0x04,0x03,0x02,0x06,0x02,0x05,0x03,0x01,0x09,0x05, 0x01,0x0e,0x02,0x06,0x02,0x03,0x01,0x02,0x01,0xe5,0x03,0x03,0x0e,0x14,0x1a,0xe6, 0x08,0x08,0x22,0x06,0x02,0x09,0x09,0x07,0x01,0x01,0x07,0x09,0xe5,0x05,0x01,0xe5, 0x05,0x01,0x01,0xe5,0x05,0x01,0x01,0x05,0x01,0xe5,0x11,0xe5,0xe5,0x07,0x1b,0x01, 0x01,0x25,0xe5,0x02,0xe5,0x01,0x11,0x18,0x0c,0x0a,0x09,0x03,0xe5,0x03,0x09,0x02, 0xe6,0x02,0x0d,0x09,0x03,0xe5,0x0a,0x01,0x0e,0xe5,0x07,0x09,0x0a,0x26,0x09,0x0f, 0xe6,0x01,0x01,0x08,0x11,0x08,0x03,0x05,0x03,0xe6,0x02,0x03,0x04,0xe6,0x06,0xe5, 0x02,0x04,0xe5,0x02,0x03,0x01,0x01,0x0a,0x06,0x0d,0x03,0x03,0x03,0x09,0x09,0xe5, 0x07,0x04,0xe6,0x01,0x03,0x01,0xe5,0xe5,0x0f,0x06,0x24,0x09,0xe5,0x02,0x02,0x0e, 0x0f,0x09,0x02,0x06,0x06,0x02,0x01,0x05,0x01,0x09,0x02,0x06,0x04,0x02,0x0b,0x15, 0x0d,0x0f,0x09,0x09,0x05,0x03,0x04,0x04,0x2e,0x08,0xe6,0x01,0x03,0x27,0x02,0x01, 0x02,0xe5,0x0d,0xe5,0x05,0x07,0x03,0x02,0x13,0x16,0x1a,0x04,0x03,0xe5,0x03,0x15, 0x04,0x1b,0x1b,0x03,0xe5,0x01,0x13,0x0e,0x02,0x05,0xe5,0x01,0x05,0xe6,0x05,0x01, 0x11,0xe6,0x06,0x04,0x01,0x02,0x09,0xe5,0x0f,0x02,0xe5,0x02,0x0b,0x05,0x07,0x03, 0x03,0x01,0xe5,0x04,0x03,0x06,0x0c,0xe5,0xe5,0x02,0x0c,0xe5,0x04,0x01,0x0b,0x04, 0x09,0x01,0xe5,0xe5,0x02,0x1f,0x07,0x02,0x04,0x04,0x02,0x04,0x12,0xe5,0x0e,0x0c, 0x05,0xe5,0xe5,0x04,0xe5,0x02,0xe5,0x08,0x06,0x05,0x03,0x0d,0x09,0x0f,0x03,0xe5, 0x0f,0x03,0x10,0x02,0x0a,0x03,0xe5,0x01,0x0b,0x06,0x10,0x02,0x0b,0x04,0x0d,0x06, 0x06,0x01,0x02,0x01,0x01,0x02,0x03,0x02,0x02,0x03,0x05,0x15,0x0b,0xe5,0x01,0x08, 0x01,0x02,0x06,0x03,0x03,0x04,0x0a,0x01,0x01,0x04,0x06,0x0b,0x07,0x0c,0x0e,0xe9, 0xe5,0x1f,0x13,0x22,0x04,0x08,0xe5,0xe5,0x06,0x03,0x09,0x02,0x04,0x03,0x0c,0x04, 0x01,0x04,0xe5,0x02,0x08,0xe5,0x04,0x02,0x10,0xe5,0x0d,0x16,0xe5,0xe5,0x0b,0x03, 0x1b,0x08,0x13,0x21,0x05,0x07,0x0b,0x15,0x1c,0x0c,0x07,0x13,0x0c,0x12,0xe5,0x03, 0x03,0x03,0x09,0xe8,0x20,0x01,0x01,0xe5,0x04,0xe5,0x08,0x01,0xe5,0x04,0xe5,0x01, 0x01,0x03,0xe5,0x08,0x09,0xe5,0x06,0xe5,0x08,0x01,0xe5,0x01,0x02,0x03,0xe5,0x06, 0x05,0x02,0xe5,0x01,0x01,0x04,0x0c,0xe5,0x03,0xe5,0x08,0xe5,0x06,0x0a,0x02,0xe5, 0x02,0xe5,0xe5,0x06,0xe5,0x11,0xe6,0x08,0x04,0xe5,0x01,0x01,0x1d,0x02,0xe5,0x0e, 0x02,0xe5,0xe5,0xe7,0x04,0xe5,0x0d,0x07,0x02,0x05,0xe5,0x01,0x06,0x02,0x02,0x01, 0xe5,0x05,0xe5,0x03,0x05,0x01,0xe5,0x02,0x01,0xe5,0x01,0xe5,0xe5,0x05,0xe5,0x04, 0xe5,0xe6,0x03,0xe5,0x02,0x02,0x10,0x02,0xe5,0xe6,0x0d,0x10,0xe5,0x01,0x07,0x01, 0x02,0x02,0xe5,0x03,0x04,0xe5,0x01,0x01,0x13,0x03,0xe5,0x01,0x01,0x01,0x02,0xe5, 0x02,0x09,0x01,0x01,0xe5,0x01,0x01,0x01,0x01,0xe5,0x01,0x02,0x03,0xe5,0x02,0x02, 0x04,0x01,0x04,0x01,0x02,0x02,0x04,0x01,0x04,0x01,0x04,0x01,0xe5,0x03,0x01,0x09, 0x04,0xe5,0x02,0x09,0x01,0x01,0xe5,0x03,0x04,0x01,0xe5,0xe5,0x03,0xe5,0x02,0x03, 0xe5,0x01,0x01,0x1d,0x03,0xe5,0x03,0x05,0xe7,0x01,0x06,0xe5,0x01,0x01,0x0d,0xe5, 0x07,0xe5,0x01,0x01,0x04,0xe5,0x0a,0x05,0xe5,0x01,0x01,0x07,0xe5,0x01,0x02,0x0c, 0x01,0x11,0x01,0x04,0xe5,0x0c,0x01,0x07,0x05,0xe5,0x01,0x09,0x13,0x01,0x04,0x08, 0xe5,0x01,0x01,0x1b,0x05,0xe5,0x05,0x02,0x02,0xe5,0x25,0x01,0x01,0xe5,0xe5,0x0b, 0x01,0x01,0xe5,0xe5,0x02,0x02,0xe5,0xe5,0x08,0xe5,0xe5,0x01,0x06,0xe5,0x01,0x02, 0x04,0x01,0x05,0xe5,0x01,0xe5,0x0f,0x01,0x01,0xe5,0x0a,0x02,0xe5,0x01,0x05,0x04, 0x01,0x04,0x01,0x05,0xe5,0x01,0xe5,0xe5,0x04,0x06,0x01,0x01,0x20,0x04,0x01,0x06, 0x01,0x01,0xe5,0x01,0x0e,0x13,0x04,0xe5,0xe5,0xe5,0x08,0x04,0xe5,0xe5,0xe5,0x01, 0x01,0xe5,0xe5,0xe5,0x06,0x0b,0x05,0x03,0x02,0x01,0x04,0x02,0x01,0xe5,0x02,0x0b, 0x04,0xe5,0x02,0x05,0x03,0x0a,0x01,0x01,0xe5,0xe5,0xe5,0x01,0x01,0x04,0x05,0x01, 0x01,0x09,0x04,0x04,0x19,0x03,0x09,0x03,0xe5,0xe6,0x0b,0x01,0x13,0x02,0x01,0xe5, 0xe5,0xe5,0x01,0x06,0x02,0x02,0x01,0x01,0x07,0x01,0x07,0x01,0x09,0x07,0x01,0x02, 0x06,0x04,0x02,0x01,0x02,0xe6,0x05,0x02,0x02,0x01,0x01,0x09,0x01,0x01,0xe5,0x01, 0x01,0x02,0x04,0x01,0x09,0x07,0x01,0x04,0x04,0x02,0x02,0x01,0x01,0x04,0x01,0x16, 0x0d,0x01,0x01,0xe5,0x0c,0x01,0x13,0x01,0xe5,0xe8,0xe5,0x08,0x01,0xe5,0x03,0x01, 0x07,0x01,0x07,0x01,0x01,0xe5,0x05,0x01,0xe5,0xe5,0xe6,0xe5,0x03,0x01,0x02,0x01, 0xe5,0xe6,0xe5,0xe5,0x0a,0x01,0xe5,0xe5,0xe6,0xe5,0x06,0x04,0x01,0x01,0xe5,0xe5, 0x01,0x01,0x02,0x01,0x09,0x01,0xe5,0x03,0x01,0x03,0xe6,0x02,0x01,0xe7,0x01,0x01, 0x04,0x16,0x0d,0x04,0x01,0x24,0x13,0x02,0x24,0x13,0x02,0x01,0xe5,0x0e,0x02,0x02, 0x0d,0x0c,0xe6,0x06,0x02,0x03,0x02,0x09,0x02,0x03,0x02,0xe5,0x09,0x23,0x01,0x01, 0xe7,0x23,0xe5,0x01,0x0f,0xe5,0x01,0x01,0x17,0xe5,0x01,0xe5,0x03,0xe5,0x11,0xe5, 0x01,0x11,0xe5,0x01,0x0f,0xe5,0x15,0x05,0xe5,0x03,0x07,0x05,0xe5,0x33,0xe6,0xe6, 0x08,0x01,0x02,0x09,0x06,0x02,0xe5,0x03,0x03,0x06,0x02,0xe5,0x03,0x03,0x06,0x02, 0x09,0xe5,0x07,0xe5,0x07,0x09,0xe5,0x03,0x03,0x06,0x04,0xe5,0x02,0xe6,0x01,0x09, 0xe6,0x06,0x09,0x05,0xe5,0x01,0xe5,0x07,0x05,0x03,0xe5,0x07,0x09,0x09,0x09,0x0d, 0xe5,0xe5,0xe5,0x01,0x09,0x12,0xe5,0x08,0x08,0xe5,0x08,0x06,0x3d,0xe5,0x0a,0x13, 0x12,0xe5,0x49,0x02,0xe5,0xe6,0x09,0x14,0x05,0x02,0x0a,0x05,0x02,0x24,0x13,0x0d, 0x02,0x04,0x03,0x13,0x13,0x05,0x13,0x34,0xe8,0xe5,0x0c,0x14,0xe6,0x03,0x01,0x0a, 0xe6,0x03,0x01,0x07,0x01,0x09,0xe7,0x06,0xe6,0x03,0x01,0x0a,0xe6,0x02,0xe5,0xe5, 0x0b,0xe6,0x03,0x01,0x07,0x01,0xe7,0x05,0x02,0x03,0xe5,0xe5,0x09,0xe6,0x03,0x01, 0x0a,0xe6,0x23,0x0e,0xe8,0x0c,0x02,0x13,0xe5,0x04,0x01,0xe5,0x04,0x03,0xe5,0x01, 0x02,0x01,0xe5,0x05,0x01,0xe5,0x07,0xe6,0xe6,0x04,0xe5,0x04,0x01,0xe5,0x02,0x05, 0xe5,0x04,0x01,0xe5,0x0a,0xe5,0x04,0x01,0xe5,0x02,0x02,0x01,0xe6,0x01,0x04,0x01, 0x05,0x01,0xe5,0x04,0x03,0xe5,0x01,0x02,0x01,0xe5,0x02,0x01,0x03,0xe5,0x24,0xe5, 0x0c,0x02,0xe5,0x0e,0x0e,0x06,0x05,0x01,0x04,0x05,0x06,0x01,0x07,0x01,0x0b,0x08, 0x06,0x01,0x0b,0x01,0xe5,0x01,0x01,0x04,0x03,0x03,0x06,0x01,0x07,0x01,0x04,0x04, 0x07,0x01,0x04,0x0c,0x01,0x0a,0x16,0x0f,0x0e,0x02,0xe5,0x0c,0x07,0xe5,0x07,0xe5, 0x07,0xe6,0xe5,0x02,0xe7,0x05,0x01,0xe6,0xe5,0x02,0xe8,0xe5,0x02,0xe7,0x05,0x01, 0xe5,0x07,0xe6,0xe5,0x02,0xe7,0x05,0x01,0xe6,0xe5,0x03,0xe6,0x09,0xe6,0xe5,0x02, 0xe8,0xe6,0x01,0xe8,0x04,0xe8,0xe5,0x02,0xe7,0x02,0x04,0xe6,0xe5,0x02,0x01,0xe5, 0x06,0xe6,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x01,0x05,0xe5,0x06,0x01,0x01,0x0f,0x13, 0xe5,0x07,0xe5,0x01,0xe5,0xe5,0x01,0xe6,0x03,0x02,0xe5,0x01,0xe5,0xe5,0x01,0xe5, 0x01,0xe5,0xe5,0x01,0x06,0x02,0xe6,0x03,0x02,0xe6,0x03,0x02,0xe5,0x04,0x02,0x04, 0x01,0x04,0xe6,0x03,0x02,0x04,0x01,0x03,0xe5,0x01,0x01,0x02,0x06,0xe5,0xe6,0x02, 0x01,0x02,0xe5,0x04,0xe5,0xe6,0x01,0x05,0xe6,0x03,0x20,0xe5,0x0d,0x01,0xe5,0x0c, 0x13,0x01,0xe5,0xe5,0x03,0x02,0x04,0xe6,0xe5,0x01,0x02,0xe6,0x01,0x04,0xe6,0x06, 0xe5,0x01,0x02,0x02,0xe6,0xe5,0x01,0x03,0x02,0x03,0x01,0xe6,0xe6,0xe5,0x01,0xe6, 0x07,0x02,0x01,0x02,0x02,0xe6,0x06,0xe5,0x04,0x02,0xe5,0x01,0x02,0x02,0xe6,0xe6, 0x03,0xe6,0xe6,0xe5,0x01,0xe6,0xe5,0x04,0x01,0x01,0x02,0x02,0xe5,0x1f,0xe5,0x0c, 0xe6,0xe5,0x0e,0x1d,0x05,0x07,0x01,0x03,0x05,0x03,0xe5,0x03,0x07,0x01,0x0d,0x04, 0xe5,0x06,0x01,0x03,0x08,0x08,0x03,0x03,0x01,0x07,0x01,0x03,0x03,0xe6,0x06,0x01, 0x09,0x04,0x04,0x08,0xe5,0x20,0x0e,0xe5,0x01,0x0d,0x13,0x09,0x02,0x03,0x02,0x05, 0xe5,0x01,0x02,0x03,0x02,0x02,0x03,0x08,0xe5,0x01,0x06,0x04,0xe5,0x01,0xe5,0x01, 0x05,0xe5,0x01,0x02,0x08,0x05,0xe5,0x01,0x02,0x0c,0xe5,0x01,0x05,0xe5,0x04,0x02, 0x03,0x05,0xe5,0x04,0x06,0x05,0xe5,0x04,0x1c,0xe5,0x0c,0xe6,0xe5,0x1e,0x02,0x01, 0x11,0x01,0x0a,0x11,0xe5,0x06,0x01,0x11,0x01,0x06,0x02,0x08,0xe5,0xe5,0xe5,0x03, 0x0e,0x09,0x0e,0x02,0x10,0x03,0x13,0xe5,0x1f,0xe5,0xe5,0xe6,0x0d,0x01,0x01,0x05, 0x01,0x07,0x01,0xe6,0x04,0xe8,0x04,0xe8,0x03,0xe7,0x06,0xe7,0xe5,0x03,0x01,0x01, 0x05,0x01,0x02,0x04,0xe6,0xe5,0x04,0x01,0x01,0x05,0x01,0xe5,0xe5,0x01,0x03,0x01, 0x02,0x01,0x01,0xe5,0xe5,0x06,0x01,0x07,0x01,0x01,0x05,0x01,0xe6,0x01,0x02,0x01, 0x07,0x01,0x01,0x01,0x03,0x01,0x07,0x01,0x01,0x05,0x01,0x01,0x05,0x01,0x01,0x05, 0x01,0x0b,0x01,0xe5,0x23,0x01,0x11,0x12,0xe5,0x07,0xe5,0x01,0x05,0xe5,0x07,0x09, 0xe5,0x05,0xe5,0x08,0x0c,0xe5,0x01,0x0b,0x09,0x07,0x03,0x01,0xe5,0x04,0x02,0xe5, 0x07,0xe5,0x01,0x07,0x14,0x08,0x0c,0x03,0xe5,0x07,0x02,0x09,0x09,0xe5,0x11,0xe5, 0x07,0x09,0xe5,0xe5,0x01,0x05,0x08,0x01,0x03,0x01,0x05,0x05,0x01,0xe5,0x0b,0x01, 0x0b,0x01,0x01,0x01,0x01,0x01,0x09,0x0a,0x09,0x02,0x07,0x01,0xe6,0x03,0x02,0x0d, 0x02,0x09,0x09,0x04,0x04,0x04,0x01,0x01,0xe7,0xe6,0x2b,0x14,0x0d,0x02,0x0c,0x02, 0x02,0x06,0x08,0x01,0x06,0x02,0x11,0x07,0x03,0x09,0x0b,0xe5,0x03,0x07,0xe5,0x02, 0x01,0x3a,0x01,0x09,0x12,0x04,0xe6,0x01,0x04,0x01,0xe5,0x02,0xe5,0xe7,0x01,0x01, 0x02,0x01,0xe5,0x05,0x01,0x02,0x01,0x07,0x01,0x02,0xe6,0x03,0xe5,0xe5,0xe6,0x05, 0xe6,0x06,0x01,0x04,0x02,0x01,0xe7,0x04,0xe5,0xe5,0x09,0x05,0x0a,0x01,0x01,0x08, 0x03,0x01,0x0a,0x01,0x06,0x21,0xe5,0xe5,0x08,0x01,0xe7,0x0a,0x05,0x0f,0x05,0x01, 0x01,0x02,0x05,0xe5,0x06,0x04,0x05,0x02,0x05,0x06,0x03,0xe5,0x04,0x02,0xe5,0x01, 0x06,0x07,0x01,0x02,0x06,0x02,0x08,0x02,0x0a,0x0c,0x01,0x02,0x05,0x12,0x04,0x03, 0xe5,0x07,0x0b,0x09,0x06,0x0b,0xe5,0xe6,0x08,0x0f,0x01,0x02,0x04,0x0d,0x04,0xe5, 0x02,0x03,0xe6,0x02,0xe5,0x01,0xe6,0x02,0x09,0x04,0xe5,0x0c,0x05,0xe5,0x07,0x03, 0x03,0x03,0xe5,0x07,0xe5,0x0b,0x06,0x01,0x04,0xe6,0x01,0x07,0x0c,0x03,0xe6,0x08, 0x0d,0x04,0xe5,0x0b,0x08,0x01,0xe5,0x01,0xe5,0xe5,0x06,0xe5,0x06,0x05,0x08,0x04, 0x07,0x06,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe6,0x02,0x01,0x09,0x01,0x0a,0x13, 0x07,0x0d,0x09,0x0c,0xe5,0x09,0x06,0x09,0x04,0x01,0x07,0x0c,0x04,0x01,0x09,0x10, 0x01,0xe6,0x07,0x09,0x09,0x09,0x09,0x0b,0x09,0x0c,0x06,0x0b,0x07,0x05,0x09,0x06, 0xe5,0x02,0x01,0x07,0x01,0x0b,0x1d,0x06,0xe5,0xe5,0x0a,0x05,0xe6,0x06,0x02,0x01, 0x09,0x06,0x05,0x03,0xe6,0xe5,0x08,0x07,0x01,0x09,0x03,0xe6,0xe5,0x0f,0x07,0x06, 0x02,0x05,0x04,0xe5,0x01,0xe5,0x02,0xe6,0x01,0x02,0x0b,0x04,0x01,0x02,0x09,0xe5, 0x04,0x04,0x06,0x02,0xe5,0xe5,0x01,0xe5,0x01,0xe6,0x0d,0x16,0x05,0x04,0xe5,0x07, 0xe5,0x04,0x0c,0xe5,0x0e,0xe8,0x02,0x10,0x0f,0x02,0x01,0xe5,0x07,0xe5,0x01,0x02, 0xe5,0x13,0x02,0x02,0x06,0x02,0x02,0x1e,0x02,0x07,0xe5,0xe5,0x13,0x01,0x10,0x0f, 0x09,0x07,0x08,0x11,0x02,0x09,0x03,0xe5,0xe6,0x16,0x01,0x04,0x04,0xe5,0x02,0x09, 0x04,0x06,0x06,0x01,0x0f,0x04,0x02,0x0e,0x15,0x0e,0x04,0x01,0x09,0x06,0x02,0xe5, 0xe5,0x03,0x04,0x04,0x09,0x02,0x01,0x03,0xe5,0x02,0xe5,0x03,0x03,0x07,0x02,0x02, 0x03,0x02,0xe5,0x03,0x08,0x01,0xe7,0x02,0x03,0x0e,0x01,0x09,0x02,0x07,0x07,0x0d, 0x07,0x02,0x02,0xe5,0xe6,0x03,0x03,0x02,0x03,0x0f,0x0b,0x09,0x06,0xe5,0x1c,0x08, 0x06,0x06,0x0f,0x05,0x05,0x03,0x02,0x02,0x07,0x05,0x05,0x02,0x03,0x16,0x0d,0x15, 0x12,0x09,0x05,0x01,0x2d,0x02,0xe6,0x0c,0x1b,0x1d,0xe6,0x17,0x01,0x07,0x07,0xe7, 0x02,0x0c,0x0b,0x09,0x04,0x04,0x10,0xe5,0x06,0x0b,0x07,0x12,0x03,0xe5,0x03,0x0d, 0x0b,0xe5,0x12,0x04,0x0c,0x01,0x0e,0x03,0xe5,0x05,0x09,0x16,0x0b,0xe6,0xe5,0x04, 0x01,0xe5,0x06,0xe5,0x07,0xe5,0x07,0xe5,0x04,0x02,0xe5,0x07,0xe5,0x07,0xe5,0x04, 0x02,0xe5,0x07,0x09,0xe5,0x07,0xe5,0x07,0xe5,0x04,0x02,0xe5,0x09,0xe5,0x07,0xe5, 0x07,0xe5,0x07,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0x06, 0x03,0x08,0xe5,0x07,0x0d,0x09,0x03,0xe6,0x07,0x01,0x03,0xe6,0x06,0x08,0x01,0x01, 0xe5,0x0d,0x01,0xe5,0x12,0x08,0x09,0x1e,0x16,0xe5,0x05,0x08,0xe5,0x01,0xe5,0x0d, 0x09,0xe5,0x0b,0x01,0x0a,0x02,0xe5,0x01,0xe5,0xe5,0x01,0xe5,0x08,0x0a,0xe5,0xe5, 0xe6,0x01,0x01,0x04,0xe5,0x06,0x0a,0xe5,0x02,0x04,0xe5,0x0b,0x02,0x06,0x16,0x09, 0x25,0x10,0xe5,0x01,0xe5,0x0a,0x02,0xe5,0x08,0xe5,0x05,0x04,0xe5,0x0e,0xe5,0x01, 0x08,0x03,0x02,0x01,0xe7,0x03,0xe5,0x04,0x07,0x01,0x02,0x01,0xe6,0x03,0x05,0xe5, 0xe5,0xe5,0xe5,0x0b,0xe5,0x02,0x01,0xe7,0x01,0x01,0x17,0xe5,0x01,0x06,0xe5,0xe5, 0xe5,0x08,0x04,0xe5,0x02,0x33,0x01,0x05,0x01,0x04,0xe5,0x01,0x01,0x03,0xe5,0xe5, 0xe5,0x08,0x01,0x07,0x04,0x01,0x02,0x07,0x01,0x07,0x01,0x01,0x02,0xe5,0x01,0x01, 0x08,0x04,0x01,0xe6,0x01,0x0a,0x01,0x18,0x02,0x01,0x07,0x09,0x06,0x02,0x06,0xe5, 0xe5,0xe5,0x02,0xe5,0x01,0x05,0xe5,0x01,0x33,0x06,0x02,0x01,0x04,0xe5,0x04,0x02, 0x02,0x01,0x07,0x06,0x02,0x10,0x02,0x01,0x03,0xe5,0x01,0x01,0x04,0x10,0x03,0xe5, 0xe5,0x16,0x01,0x0c,0x02,0x0d,0x02,0xe5,0x01,0xe5,0x03,0xe5,0xe6,0x01,0x02,0x06, 0x02,0x01,0x01,0xe5,0xe5,0x01,0xe5,0x01,0x05,0x04,0x09,0x0d,0x09,0x06,0x02,0x02, 0xe5,0xe6,0x11,0x0d,0x05,0xe6,0x04,0x02,0x05,0xe5,0xe7,0x08,0x07,0x05,0xe6,0x01, 0x01,0x05,0xe8,0x01,0x0e,0x05,0x01,0x01,0x09,0x09,0x05,0x01,0x02,0x03,0xe5,0xe5, 0x01,0x06,0x02,0x04,0x04,0x01,0x01,0xe5,0xe5,0x01,0x01,0x01,0x05,0x04,0x09,0x0a, 0xe5,0x04,0x02,0xe5,0x05,0xe7,0x02,0x02,0x01,0x04,0x09,0x01,0x02,0x04,0x05,0x03, 0x03,0xe5,0x01,0x0b,0x02,0x01,0xe5,0x02,0x05,0x01,0x01,0x02,0x01,0x04,0x09,0x02, 0xe9,0x0b,0x01,0x03,0xe5,0x01,0x01,0x02,0x06,0x07,0x01,0x04,0x01,0x02,0x05,0x01, 0x06,0x02,0x01,0x04,0x04,0x09,0x02,0x06,0x02,0x04,0x09,0x0d,0x09,0x09,0x01,0xe5, 0xe5,0x01,0x01,0x08,0x08,0x01,0x09,0x05,0x01,0x01,0x09,0x04,0x02,0x01,0x07,0x01, 0x08,0x0e,0x04,0x0d,0x01,0x04,0x01,0xe5,0xe5,0x03,0x01,0x02,0x01,0xe5,0x03,0x01, 0x02,0x01,0x01,0xe5,0xe5,0x01,0x04,0x01,0x02,0x02,0x01,0x01,0x02,0x02,0x01,0x04, 0x04,0x04,0x04,0x09,0x09,0x0a,0xe5,0x08,0x09,0x04,0x01,0xe5,0xe5,0xe5,0x10,0x01, 0x04,0x01,0xe5,0x08,0x01,0x02,0x01,0x04,0x01,0xe5,0xe8,0xe5,0x06,0x01,0x0b,0xe5, 0xe5,0xe5,0x06,0xe8,0x23,0x02,0x1d,0xe5,0x07,0x09,0x02,0x41,0x04,0x0d,0x03,0xe5, 0x07,0x15,0x04,0x02,0xe5,0x07,0x09,0x02,0x02,0xe5,0x01,0xe5,0x0a,0x01,0x24,0xe5, 0x03,0x04,0x02,0x1d,0x01,0x07,0x1a,0x09,0x0b,0x09,0x08,0xe5,0x15,0xe5,0x07,0x01, 0x0e,0x08,0xe5,0x01,0x09,0xe6,0x06,0x05,0xe5,0x01,0x09,0xe6,0xe7,0x07,0x01,0x02, 0x09,0x02,0x03,0x02,0xe5,0x04,0x02,0x05,0x03,0x06,0x02,0x06,0x02,0x03,0x05,0x04, 0xe6,0x01,0x09,0x09,0x09,0x08,0x02,0x09,0x06,0x02,0xe6,0x06,0x06,0x02,0x04,0x01, 0x02,0x05,0x03,0x04,0x01,0x02,0x09,0xe5,0xe6,0x01,0x02,0x04,0x01,0x02,0x03,0x02, 0x02,0xe5,0x03,0xe5,0x05,0xe5,0xe5,0xe5,0x01,0x09,0x12,0xe5,0x07,0x0a,0x06,0x02, 0x06,0x02,0x13,0x09,0x29,0x06,0x02,0x10,0x0c,0x13,0x09,0x06,0x0c,0x06,0x02,0x06, 0x02,0x04,0x02,0xe5,0x01,0x0a,0x14,0x05,0x09,0x01,0x0a,0x08,0x15,0x08,0x21,0x07, 0x0a,0x17,0x05,0x13,0x09,0x05,0x0d,0x07,0x05,0x01,0x02,0x09,0xe5,0x01,0x01,0x0a, 0x02,0x09,0x06,0x03,0x01,0x06,0x02,0x01,0x04,0x02,0x01,0x02,0x01,0x02,0x01,0x04, 0x02,0x01,0x02,0x01,0x02,0x01,0x04,0x02,0x06,0x09,0x09,0x0b,0x09,0x09,0xe7,0x05, 0x11,0x01,0x09,0x04,0x04,0x02,0x01,0x05,0xe6,0x03,0x01,0x07,0x01,0x0a,0xe6,0x0a, 0xe6,0xe5,0x01,0x07,0x02,0x02,0x08,0xe5,0x08,0xe5,0x01,0x02,0x03,0x07,0x01,0x05, 0x01,0x01,0x05,0x03,0x05,0x01,0x01,0x05,0x01,0x01,0x05,0x03,0x07,0x01,0x01,0x05, 0x01,0x01,0x07,0x01,0x01,0x07,0x01,0x03,0x02,0xe5,0x06,0x01,0x09,0x05,0x01,0xe5, 0x04,0xe5,0x01,0x06,0x01,0x01,0x01,0x06,0xe5,0x01,0x02,0x01,0xe5,0x04,0xe5,0xe6, 0x09,0x05,0xe5,0x02,0x01,0xe8,0x0e,0x09,0x04,0x0e,0x03,0x05,0x03,0x05,0x03,0x05, 0x03,0x05,0x03,0x05,0x09,0x09,0x03,0x05,0x04,0x03,0x02,0x03,0x05,0x03,0xe5,0x03, 0xe5,0x07,0x11,0x01,0x06,0x02,0x09,0x03,0x09,0x05,0x07,0x01,0x03,0x14,0x02,0xe5, 0x0c,0x07,0xe5,0x01,0x05,0xe5,0x07,0xe6,0x04,0xe7,0x01,0x03,0xe8,0x04,0xe8,0x04, 0xe7,0x01,0x03,0xe8,0xe5,0x02,0xe8,0x06,0xe5,0x01,0x03,0x01,0xe5,0x01,0x04,0xe6, 0x03,0x04,0xe6,0x07,0xe6,0x04,0xe7,0x01,0x03,0xe7,0x05,0xe8,0xe5,0x02,0xe8,0x04, 0x01,0xe6,0xe5,0x02,0xe7,0x02,0x04,0xe6,0x06,0xe6,0xe5,0x04,0xe5,0x02,0x04,0xe7, 0x02,0x02,0x01,0xe5,0xe5,0x0c,0xe6,0x06,0x09,0xe5,0x05,0x01,0x09,0x11,0x01,0x07, 0x3f,0x09,0xe6,0xe6,0x07,0x0d,0x01,0x09,0xe5,0x11,0xe5,0x05,0x01,0xe5,0x05,0x01, 0xe5,0x08,0x0d,0xe8,0x0c,0x0b,0x07,0x01,0xe5,0xe5,0x03,0x01,0x02,0xe5,0x01,0xe5, 0x03,0xe5,0x01,0x05,0xe5,0x01,0xe5,0xe5,0x01,0xe5,0x01,0xe5,0x03,0xe5,0x01,0x02, 0x02,0xe5,0x01,0x04,0x04,0x04,0x04,0x04,0x04,0x06,0x04,0x04,0x04,0xe5,0x03,0x03, 0x04,0x04,0x04,0x04,0xe5,0xe5,0x02,0x02,0xe5,0x01,0x02,0x05,0x02,0xe5,0x01,0xe5, 0xe6,0xe5,0x03,0x01,0xe5,0x07,0x0a,0xe5,0x0d,0x01,0x0f,0x09,0x18,0xe5,0x02,0x04, 0xe5,0x02,0x04,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x08,0x07,0x01,0x08,0x03,0x07, 0xe5,0x02,0x05,0x03,0x05,0x08,0xe5,0x06,0xe6,0xe5,0xe5,0x03,0xe5,0x02,0x05,0x08, 0xe5,0x0c,0x07,0x01,0xe5,0x16,0x02,0xe5,0x0c,0x02,0x08,0x07,0x0f,0x01,0xe6,0x04, 0x01,0xe6,0x04,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x01,0x03,0xe7,0x05,0xe7, 0x05,0xe7,0x06,0x01,0x08,0xe7,0x05,0xe8,0x05,0xe6,0x07,0xe5,0x05,0xe7,0x01,0xe5, 0x01,0x01,0xe6,0x01,0x02,0x01,0x03,0x03,0x01,0xe6,0x08,0x02,0x06,0x01,0xe5,0x11, 0x07,0xe5,0x1a,0x08,0x10,0xe5,0xe5,0x05,0x02,0x06,0xe5,0xe5,0x17,0x05,0xe5,0x03, 0x04,0x04,0x03,0xe5,0x21,0x0b,0x02,0xe6,0x08,0x09,0x08,0xe5,0x03,0x04,0x04,0x09, 0x02,0x06,0x02,0x01,0x06,0x05,0xe5,0xe5,0xe5,0x0e,0x01,0x07,0x01,0x01,0x05,0x01, 0xe6,0x03,0xe5,0xe7,0x04,0x01,0x02,0x01,0x01,0xe5,0xe7,0x04,0x01,0xe6,0x04,0x01, 0xe5,0x05,0x01,0x07,0x01,0x02,0x04,0x01,0x07,0x01,0x09,0x01,0x01,0x05,0xe6,0x01, 0x04,0xe8,0x03,0xe5,0xe6,0x03,0x01,0x01,0x07,0x01,0x01,0x05,0x01,0x01,0x05,0x01, 0xe5,0x02,0x02,0x01,0xe5,0x02,0x02,0x01,0xe6,0x01,0x02,0x01,0xe5,0x03,0x01,0x01, 0x06,0x03,0x03,0x02,0xe5,0x03,0x18,0x01,0xe5,0x0d,0x02,0x04,0xe6,0xe5,0x09,0x0e, 0x18,0x09,0x02,0x06,0x06,0x08,0x09,0x08,0x07,0xe5,0x2f,0xe5,0x01,0x03,0x01,0xe5, 0x01,0x05,0xe5,0x08,0xe5,0x01,0x01,0x01,0xe5,0x03,0x06,0x09,0x09,0xe5,0x17,0x09, 0x09,0x11,0x02,0x02,0x08,0x0a,0x15,0x05,0x07,0x10,0x01,0x05,0x04,0xe5,0xe5,0x01, 0x06,0x01,0x07,0x07,0xe5,0xe5,0x02,0x07,0x01,0x07,0x02,0x04,0x08,0x01,0x01,0xe5, 0xe5,0x0f,0x0d,0x02,0x01,0xe5,0x0c,0x04,0x1f,0x09,0xe5,0x06,0x0a,0x07,0xe5,0x18, 0x0d,0x01,0x07,0x17,0x18,0x09,0x05,0x01,0x04,0x0b,0xe5,0x01,0x15,0x0d,0x04,0x02, 0xe6,0x06,0xe6,0x06,0x01,0x02,0x01,0x02,0xe6,0x01,0x08,0x01,0xe5,0x01,0x01,0x05, 0x01,0x03,0x04,0x02,0x05,0x0c,0xe5,0xe5,0x04,0x08,0x06,0x04,0xe5,0x0a,0x02,0x02, 0x03,0x04,0x13,0xe5,0xe5,0x04,0x19,0x0d,0xe9,0x07,0x18,0x05,0x0b,0x09,0x03,0x10, 0x08,0x01,0x01,0x04,0x05,0x0d,0x09,0x01,0xe5,0xe5,0xe5,0x1c,0x03,0x07,0x02,0x02, 0x10,0x01,0x0c,0x05,0x1d,0x05,0x02,0xe5,0xe6,0x08,0x05,0x08,0xe6,0x07,0x07,0xe6, 0x06,0x01,0x07,0x05,0x03,0x05,0x05,0x03,0x05,0xe5,0x06,0xe5,0x06,0x01,0x09,0xe5, 0x05,0x03,0x02,0x05,0xe5,0x02,0x05,0x03,0x01,0x0b,0x0c,0x0c,0x12,0x07,0x04,0x05, 0x01,0x01,0x01,0x04,0x0a,0xe6,0xe6,0x0c,0xe5,0x02,0x04,0x09,0x03,0x05,0x04,0x04, 0x02,0x04,0x0e,0x02,0x03,0x07,0x01,0xe5,0x07,0x07,0x02,0x03,0x04,0x06,0x05,0x08, 0x11,0x01,0x12,0xe5,0x08,0x05,0x03,0x02,0x05,0xe5,0x08,0x03,0x05,0x02,0x06,0x06, 0x07,0x02,0xe7,0xe6,0x06,0x02,0x06,0x02,0x05,0x0d,0x10,0x06,0x01,0xe5,0x0a,0x02, 0xe5,0x01,0x03,0x08,0xe5,0x0e,0xe6,0xe5,0x01,0x01,0x04,0x05,0xe5,0x03,0x03,0x04, 0x03,0x01,0x02,0x03,0x04,0x0c,0x09,0x03,0x05,0x09,0x09,0x04,0x04,0x09,0x04,0xe5, 0x09,0x01,0xe6,0x07,0xe5,0x02,0xe5,0x0d,0x03,0xe5,0x05,0x0c,0x01,0x02,0x15,0x02, 0x06,0x07,0x0a,0x01,0x01,0x09,0x07,0xe5,0xe5,0x05,0x01,0xe7,0x01,0x01,0x02,0x03, 0x01,0x04,0x01,0x06,0xe6,0x04,0xe7,0x03,0x01,0xe8,0x04,0xe5,0x08,0xe6,0x03,0xe5, 0xe6,0x07,0xe6,0xe5,0x01,0x0a,0x0a,0xe7,0x0f,0x0a,0xe5,0x03,0x01,0x12,0x10,0xe5, 0x0c,0x1b,0x07,0x01,0x08,0x01,0x0b,0x07,0x0b,0x05,0x02,0x03,0x06,0x0a,0xe5,0x01, 0x0e,0x07,0x03,0x05,0x02,0x13,0x03,0xe6,0xe5,0xe5,0xe5,0x09,0xe5,0x04,0x08,0x04, 0x03,0x08,0x01,0x01,0x02,0x04,0x01,0x07,0x04,0x09,0x09,0x04,0x0b,0x02,0x01,0x0e, 0x04,0x09,0x09,0x01,0x04,0x08,0x03,0x06,0x01,0xe5,0x02,0x03,0x01,0x03,0x05,0x03, 0xe5,0x02,0xe5,0x02,0x05,0x03,0xe5,0x03,0x06,0x02,0x0e,0x02,0xe6,0x0c,0x14,0x0b, 0x03,0x03,0x13,0x05,0x03,0x06,0x02,0x0a,0x04,0x02,0xe5,0x01,0x02,0x06,0x08,0x01, 0xe6,0x04,0xe5,0x03,0xe5,0x15,0x03,0x04,0xe5,0x01,0x09,0x09,0x08,0xe5,0xe5,0x06, 0x01,0x05,0xe6,0x07,0x0e,0x01,0x1e,0x05,0x01,0x11,0xe5,0x0e,0x01,0x07,0x01,0x01, 0x05,0xe5,0x0e,0x04,0x01,0x11,0x01,0x07,0xe5,0x04,0x02,0x05,0x0d,0x01,0x09,0x04, 0x09,0x02,0x03,0xe5,0x03,0x09,0x09,0x09,0x03,0x0c,0xea,0x0d,0xe6,0x03,0x02,0x0a, 0xe5,0x03,0x01,0x09,0x02,0x06,0x09,0x07,0x06,0xe5,0xe5,0x01,0xe5,0x01,0x01,0xe5, 0x01,0xe5,0x04,0x02,0xe5,0x04,0x02,0x08,0x01,0xe6,0x04,0x02,0x03,0xe5,0xe5,0x01, 0xe5,0x01,0xe5,0x03,0xe5,0x01,0x01,0xe5,0xe5,0x03,0x01,0x04,0x08,0xe5,0x07,0xe5, 0x01,0xe5,0x02,0x01,0x01,0xe5,0x04,0xe5,0x01,0xe5,0x19,0x01,0xe5,0xe6,0x01,0x01, 0x0b,0x01,0x04,0x05,0xe5,0x02,0x08,0x0a,0x08,0x09,0x10,0x01,0x07,0xe5,0x04,0x08, 0x11,0x03,0x0d,0xe5,0x03,0x03,0xe5,0x05,0xe6,0x01,0x06,0xe5,0x03,0x0a,0x09,0x02, 0x01,0x03,0x0b,0x01,0x01,0x07,0x01,0x0c,0x01,0x02,0x01,0xe6,0x03,0x12,0x05,0xe5, 0x03,0x01,0x05,0x0b,0x18,0x09,0x06,0xe5,0xe5,0x08,0x07,0x01,0x09,0x0b,0x07,0x01, 0x10,0x05,0x06,0x01,0x01,0x01,0x03,0xe5,0x01,0x05,0x01,0x01,0x01,0x02,0xe5,0x02, 0x01,0x03,0xe5,0x01,0x01,0xe5,0x05,0x01,0x03,0x01,0x01,0x01,0x02,0xe5,0x03,0x05, 0x01,0xe5,0x01,0x14,0x06,0xe5,0x0a,0x13,0x09,0x06,0x01,0xe5,0x05,0x01,0xe5,0x05, 0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0x02,0x06,0x01,0xe5,0x05,0x01, 0xe5,0x12,0x0d,0x02,0x10,0x01,0xe5,0x05,0x02,0x09,0xe5,0x04,0x0c,0x06,0x02,0x02, 0xe5,0x0c,0x01,0x07,0x01,0x0c,0xe5,0x01,0xe5,0xe5,0x0b,0xe5,0x01,0x06,0x02,0x06, 0x02,0xe5,0xe5,0x03,0x01,0xe5,0xe5,0x05,0xe5,0xe5,0x03,0x01,0xe5,0xe5,0x03,0x01, 0xe5,0xe5,0x02,0x02,0x02,0x01,0x04,0x01,0xe5,0xe5,0x03,0x01,0xe5,0xe5,0x02,0x02, 0xe5,0xe5,0x06,0x04,0x09,0x01,0xe6,0x06,0x01,0x0f,0xe5,0xe6,0x03,0x01,0x02,0x05, 0xe5,0x01,0x04,0x01,0x01,0xe5,0x05,0xe5,0x01,0xe5,0x01,0x0a,0x01,0x01,0x05,0x01, 0x01,0x09,0x05,0x01,0x01,0x09,0x05,0x09,0x06,0x01,0x02,0x01,0x04,0x02,0x01,0x02, 0xe6,0x01,0x01,0x07,0x01,0x01,0x02,0x02,0x01,0x06,0x02,0x01,0x01,0x02,0x02,0x01, 0x01,0x02,0x02,0x01,0x04,0xe5,0xe5,0xe5,0x04,0x03,0x09,0x09,0x09,0x09,0x09,0x09, 0x09,0x09,0x03,0xe5,0xe6,0x0b,0x01,0x07,0x01,0x04,0x01,0x02,0x07,0x01,0x09,0x03, 0xe5,0x10,0x06,0x05,0x03,0x09,0x09,0x02,0x06,0x03,0xe5,0x02,0x02,0x04,0x01,0x02, 0x09,0x05,0x01,0x01,0x07,0x05,0xe5,0x02,0x02,0x02,0x03,0x03,0x01,0x03,0x05,0x01, 0x01,0x02,0xe5,0xe5,0xe5,0xe5,0x02,0x05,0x02,0x01,0xe5,0x02,0x0e,0x01,0x02,0x02, 0x0a,0x01,0x07,0x01,0x04,0x04,0x01,0xe7,0x01,0x01,0x09,0x01,0xe5,0x03,0x09,0x09, 0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x09,0x01,0x03,0xe5,0x01,0x01,0x04, 0x01,0xe5,0xe5,0x06,0x01,0x07,0x03,0xe5,0x08,0x02,0x09,0x08,0x07,0x01,0x09,0x07, 0x0a,0xe5,0x05,0xe5,0xe5,0x02,0x02,0x1a,0x01,0xe5,0x02,0x0c,0x02,0x03,0x02,0x02, 0x06,0xe5,0x0b,0x09,0x05,0x03,0x05,0x03,0x05,0x03,0x05,0x05,0x05,0x03,0x05,0x03, 0x05,0x09,0x06,0x08,0xe5,0x01,0xe5,0x03,0x03,0xe5,0x03,0x03,0xe6,0x02,0xe5,0x01, 0xe6,0x02,0x05,0x03,0xe5,0x07,0x03,0xe5,0x07,0x02,0xe6,0x1d,0x05,0xe5,0x0b,0xe5, 0x03,0xe5,0x01,0x06,0x02,0xe5,0x04,0x02,0x12,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5, 0x02,0x06,0xe5,0x07,0xe5,0x07,0xe5,0x03,0x03,0xe5,0x04,0xe5,0x01,0x05,0xe5,0x01, 0xe5,0x07,0xe5,0x07,0xe5,0x03,0xe5,0x01,0xe5,0x07,0xe5,0x03,0xe5,0x01,0xe5,0x07, 0x0a,0xe5,0xe6,0x0d,0x09,0x02,0x02,0x03,0xe5,0x07,0x06,0x02,0xe5,0x01,0x05,0x09, 0x03,0x02,0xe5,0xe5,0x05,0xe5,0xe5,0x03,0x01,0xe5,0xe5,0x05,0xe5,0xe5,0x05,0xe5, 0xe5,0x05,0x02,0x01,0x06,0xe5,0xe5,0x05,0xe5,0xe5,0x08,0x09,0xe5,0x01,0x02,0x02, 0xe6,0x03,0x02,0x06,0x02,0x06,0x02,0xe6,0x03,0x02,0x06,0x02,0xe6,0x03,0x02,0x05, 0xe5,0x05,0xe5,0xe5,0xe5,0x01,0x27,0x08,0xe5,0x1b,0xe6,0x06,0xe6,0x07,0xe5,0x04, 0x02,0xe5,0x04,0x02,0xe5,0x04,0x05,0x05,0x03,0x05,0x03,0x19,0x02,0x06,0x09,0x09, 0x0b,0x09,0x07,0x02,0x06,0x07,0x02,0x01,0x01,0x25,0x01,0x0b,0x05,0x17,0x09,0x04, 0x03,0x05,0x03,0x05,0x02,0x06,0x0b,0x09,0x09,0x14,0x02,0x04,0x09,0x09,0x09,0x09, 0x09,0x03,0x05,0x0e,0x02,0xe5,0xe5,0x09,0x02,0x07,0x01,0x0a,0xe6,0x03,0x01,0x0a, 0xe6,0x03,0x01,0x09,0x0e,0x04,0x09,0x09,0x06,0x02,0x06,0x01,0x02,0x09,0x06,0x02, 0x07,0x01,0x07,0x01,0xe7,0x06,0xe6,0x1a,0xe6,0x10,0xe6,0x14,0xe5,0x01,0x0f,0xe5, 0x02,0x02,0x01,0xe5,0x02,0x01,0x03,0xe5,0x04,0x01,0xe5,0x08,0xe5,0x04,0x01,0x01, 0x01,0x05,0x01,0x01,0x05,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x04,0xe5,0xe6,0x05, 0x01,0xe5,0x07,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x02,0x01, 0xe5,0xe7,0x08,0x09,0x09,0x02,0x06,0x09,0x02,0x06,0x09,0x0c,0xe5,0x01,0x0f,0x07, 0x01,0x0a,0x08,0x04,0x01,0x03,0x57,0x1e,0x01,0xe5,0x02,0x06,0x01,0x1b,0x02,0x0f, 0x02,0x15,0x01,0xe5,0x0e,0x05,0xe6,0xe5,0x04,0xe5,0x07,0xe6,0x06,0xe5,0x07,0xe5, 0x07,0xe5,0x07,0xe5,0x01,0x05,0xe5,0x01,0x03,0x01,0xe5,0x01,0x05,0xe5,0x01,0x05, 0xe5,0x01,0x05,0xe5,0xe5,0x01,0x04,0xe6,0x01,0x03,0x01,0xe5,0x01,0x03,0xe7,0x05, 0xe8,0xe5,0x02,0xe7,0x05,0xe7,0x05,0x01,0xe5,0x06,0xe6,0x07,0xe5,0x07,0xe5,0x07, 0xe5,0x07,0xe5,0x08,0x01,0x0f,0x07,0x01,0xe5,0x07,0xe6,0x04,0x01,0x09,0xe5,0x11, 0x09,0xe5,0x08,0x04,0x0e,0x09,0x05,0x05,0xe5,0x03,0x03,0x05,0x09,0xe5,0xe6,0x04, 0xe5,0xe5,0x03,0x01,0x07,0x01,0x03,0xe5,0xe5,0x05,0xe5,0x01,0x01,0x0d,0x19,0x08, 0x01,0xe5,0x0e,0x07,0x01,0x07,0x01,0x01,0x05,0x01,0x07,0x01,0x01,0x0a,0x06,0x02, 0x04,0x01,0xe5,0x07,0x09,0x09,0x09,0xe5,0x04,0x04,0x04,0xe6,0x01,0xe5,0x01,0x01, 0xe5,0x01,0x04,0xe7,0xe6,0x01,0x01,0xe6,0xe5,0xe5,0x02,0xe5,0x04,0x02,0xe5,0x04, 0x02,0x06,0x02,0xe5,0x04,0x07,0x01,0x09,0x09,0x0e,0xe5,0x0e,0x0a,0x12,0x0a,0x1c, 0x09,0xe5,0x01,0x01,0x03,0x09,0xe5,0x07,0x08,0x02,0x05,0x03,0x05,0x03,0x03,0x01, 0x08,0xe5,0x02,0x03,0x01,0x07,0x01,0x09,0x09,0x31,0x02,0x0f,0x0a,0x06,0x09,0x09, 0x1d,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x07, 0x01,0xe5,0x02,0x02,0x01,0xe5,0x01,0x03,0x01,0xe5,0x02,0x05,0x02,0x03,0x01,0x03, 0x09,0x24,0x17,0x09,0xe8,0x02,0x13,0xe5,0xe5,0x05,0xe5,0xe5,0x05,0xe5,0xe5,0x05, 0xe5,0xe5,0x05,0x09,0xe5,0xe5,0x13,0x05,0x02,0x06,0xe5,0xe5,0x05,0x0b,0xe5,0xe5, 0x05,0xe5,0xe5,0x05,0xe5,0xe6,0x08,0x05,0x01,0xe5,0x05,0x02,0xe5,0x04,0x02,0xe5, 0x04,0x02,0xe5,0x04,0x02,0x06,0x02,0x06,0x02,0xe5,0x04,0x02,0x04,0x08,0xe8,0xe5, 0x0e,0x01,0x03,0x03,0x01,0x02,0xe5,0x02,0x01,0x01,0x01,0x03,0xe6,0xe5,0xe5,0x02, 0x01,0x03,0x03,0x01,0x02,0xe5,0x02,0x01,0x07,0x01,0x07,0xe7,0x05,0x01,0x01,0x01, 0x03,0x01,0xe5,0x02,0x02,0x01,0x06,0x02,0x01,0x01,0x02,0x01,0xe5,0xe5,0x03,0x02, 0x01,0x07,0x01,0x02,0x04,0x01,0x05,0x01,0x01,0xe5,0x03,0x01,0x01,0xe5,0x03,0x01, 0x01,0x05,0x01,0x01,0x01,0x03,0x01,0x01,0x05,0x01,0x01,0x05,0x01,0xe6,0x06,0x02, 0x02,0xe5,0x18,0xe5,0x0a,0x01,0x0f,0x02,0x02,0x1a,0x02,0x02,0x07,0xe8,0xe5,0x05, 0x04,0x0a,0x01,0x06,0x22,0x07,0x02,0xe5,0x07,0x06,0x02,0xe5,0x04,0x02,0xe5,0x0e, 0x03,0x02,0x0c,0x07,0x02,0xe6,0x0a,0x03,0x05,0xe5,0xe5,0xe5,0x02,0x0f,0x05,0x06, 0x07,0x0a,0xe5,0x05,0x01,0x05,0xe5,0x04,0x01,0xe6,0x06,0x06,0x04,0x0b,0x05,0x03, 0x13,0xe5,0x02,0x08,0x05,0x02,0x06,0x01,0xe5,0x07,0xe5,0x07,0xe5,0x01,0x03,0x02, 0xe5,0x01,0x04,0xe5,0x01,0x07,0x08,0x02,0xe6,0x10,0x03,0xe5,0x0e,0x1b,0x0d,0x08, 0x13,0x07,0x08,0x07,0x2b,0x03,0x02,0x01,0x08,0x08,0x01,0x04,0x02,0x01,0x1f,0x09, 0xe5,0x03,0xe5,0x01,0x07,0x08,0x03,0x01,0x07,0xe6,0x09,0x06,0xe7,0x04,0xe5,0xe5, 0x03,0x0c,0x06,0x04,0x06,0xe5,0xe5,0x08,0x07,0xe5,0x05,0x05,0xe6,0x04,0xe7,0x05, 0xe5,0xe5,0x0e,0xe5,0xe6,0x04,0x02,0xe5,0x02,0x04,0x09,0x02,0x03,0x02,0x09,0x09, 0xe5,0x07,0x05,0x02,0x04,0xe7,0xe5,0x19,0x01,0x05,0x10,0x02,0x0a,0x0f,0x04,0x06, 0x0a,0x01,0x03,0xe5,0x01,0x07,0x04,0x02,0x02,0x0b,0x10,0x09,0x02,0x02,0x0b,0x1f, 0xe5,0x1a,0x04,0x05,0xe8,0x08,0x09,0xe5,0x01,0x04,0x09,0x06,0x02,0x06,0x07,0x01, 0x02,0x06,0x08,0xe5,0x02,0x09,0x1d,0x03,0x01,0x01,0x19,0x06,0xe5,0xe5,0x02,0xe5, 0x05,0x09,0x05,0x01,0x01,0x02,0x01,0x04,0x01,0x07,0x05,0x03,0x05,0xe5,0x01,0x0f, 0xe7,0xe5,0x08,0x06,0x13,0x06,0x07,0x01,0x09,0x03,0x05,0x02,0x06,0x02,0x0e,0x06, 0x1a,0x1a,0x02,0x09,0x0c,0x1a,0x0d,0x0b,0x0e,0x05,0xe5,0xe6,0x02,0x1a,0x06,0x20, 0x0e,0x01,0x1d,0x02,0x01,0x06,0x03,0xe5,0x04,0x0a,0x07,0xe6,0x02,0x03,0x04,0x06, 0x02,0x06,0x0c,0x04,0x0b,0x0b,0x06,0x08,0x02,0x0b,0xe6,0x07,0xe6,0x01,0xe6,0x02, 0xe5,0x01,0xe5,0x12,0x01,0x0e,0x02,0x07,0x06,0xe5,0xe5,0xe5,0x04,0x08,0x01,0x01, 0xe6,0xe5,0x0a,0x03,0x01,0x04,0x01,0xe5,0x09,0x09,0xe5,0x07,0x06,0x01,0x01,0x1b, 0x07,0x01,0x01,0x08,0x03,0x04,0x01,0xe6,0x07,0x01,0x0f,0xe6,0x02,0x0c,0xe5,0x07, 0x02,0x09,0xe5,0x03,0x0e,0x05,0x01,0x11,0x05,0x04,0x02,0x03,0x04,0xe5,0xe5,0x0f, 0x03,0x15,0x10,0x02,0x11,0x13,0x01,0x04,0x11,0x07,0x0b,0x04,0xe7,0x04,0x12,0x05, 0x0e,0x06,0x02,0x06,0x02,0x06,0x02,0x0e,0x01,0x02,0x13,0xe5,0x02,0x02,0x01,0x06, 0x04,0x04,0x09,0x01,0x09,0x05,0xe5,0x04,0x0e,0x0b,0x0c,0x0a,0xe5,0x06,0x02,0x02, 0xe5,0x12,0xe7,0xe5,0x02,0x08,0x15,0x01,0x05,0x01,0x01,0x05,0x01,0x07,0xe5,0x01, 0x05,0x02,0x01,0x05,0x01,0x09,0x08,0x08,0x05,0x02,0xe6,0x08,0x05,0x17,0x01,0x04, 0x02,0x05,0x09,0x03,0x03,0x07,0x07,0x0f,0x03,0x16,0x02,0xe5,0xe5,0x04,0x08,0x1f, 0x09,0x09,0x09,0xe5,0x0e,0x06,0x01,0x04,0x05,0x0b,0x0c,0x1c,0x02,0x07,0x12,0x14, 0x0d,0x05,0x1a,0xe7,0x07,0x01,0x03,0xe5,0x04,0x01,0xe7,0x03,0x01,0xe5,0x07,0x01, 0xe5,0x07,0x01,0xe5,0x05,0x01,0xe5,0x06,0x04,0x02,0xe5,0x04,0x06,0xe5,0x06,0xe5, 0x01,0x03,0x02,0xe5,0xe5,0x03,0x01,0x01,0x01,0x01,0x02,0x01,0xe5,0x01,0x09,0x03, 0x02,0xe5,0x04,0xe5,0x07,0x02,0x01,0x04,0x09,0xe5,0x03,0x03,0x07,0x01,0x02,0xe5, 0xe5,0xe6,0x07,0xe6,0xe6,0x04,0xe5,0x09,0x03,0x01,0x02,0x01,0x05,0x01,0xe6,0xe5, 0x05,0x03,0xe5,0x03,0xe5,0x03,0xe5,0x02,0x08,0x03,0x01,0xe5,0x01,0x03,0x01,0xe5, 0x01,0x04,0x04,0xe7,0x05,0x04,0x04,0xe5,0x04,0x01,0xe5,0x02,0x02,0x01,0xe5,0x04, 0xe5,0x03,0x06,0x01,0xe5,0x08,0x03,0xe7,0x05,0x05,0x02,0x02,0xe5,0x07,0xe5,0x07, 0xe5,0x01,0x0a,0x02,0x05,0x04,0x08,0xe5,0x03,0xe5,0xe5,0x07,0x01,0x04,0x01,0x04, 0x0a,0x07,0x09,0x01,0x04,0xe5,0x02,0x01,0x01,0xe5,0x07,0xe5,0x08,0x01,0x07,0x01, 0xe5,0xe5,0x06,0x0b,0x07,0x09,0x06,0xe5,0x01,0x04,0x05,0x09,0x10,0x01,0x05,0x08, 0x09,0x05,0x03,0x02,0x01,0x02,0x02,0x04,0x01,0x13,0x03,0xe5,0x03,0x05,0xe5,0xe5, 0x01,0x0a,0x09,0x09,0x0f,0xe5,0x01,0x06,0x02,0x09,0x09,0x09,0x09,0x09,0x09,0x0b, 0x09,0x09,0x13,0x1d,0x05,0xe5,0x0b,0x01,0x11,0x06,0xe5,0x04,0x04,0x01,0x02,0x0c, 0x04,0x01,0xe5,0x08,0x01,0x06,0x05,0xe5,0x01,0x05,0x01,0xe6,0x04,0xe5,0x01,0x05, 0xe5,0x01,0xe5,0xe5,0x06,0x04,0x06,0x02,0x04,0x04,0x04,0x04,0x0b,0xe5,0x01,0xe5, 0x03,0x04,0x04,0x04,0x01,0x02,0x09,0xe5,0x01,0x05,0x09,0x04,0x04,0x09,0xe5,0x01, 0xe5,0x14,0x06,0x06,0xe5,0x01,0xe5,0x0c,0x03,0x04,0xe5,0x02,0x05,0x03,0x01,0x01, 0xe5,0x03,0x09,0x04,0x04,0x04,0x04,0x05,0x01,0x01,0x01,0x02,0xe5,0x02,0x05,0x03, 0x04,0xe5,0x02,0x05,0x03,0x0b,0x02,0x01,0xe5,0x02,0x05,0x03,0x05,0x01,0x01,0x09, 0x02,0x01,0x14,0x03,0x09,0x02,0x01,0xe5,0x02,0x0f,0x03,0x02,0xe6,0x03,0x03,0xe7, 0x0e,0x02,0x02,0x01,0x09,0x01,0x04,0x04,0x07,0x04,0x02,0x01,0x03,0xe5,0x01,0x01, 0x06,0x01,0xe5,0x05,0x02,0x0b,0x01,0xe5,0xe6,0xe5,0x05,0x02,0x04,0xe6,0x03,0x06, 0x02,0x06,0x02,0x03,0x01,0xe5,0x01,0x01,0x07,0x10,0x0c,0x06,0x04,0x07,0x01,0x03, 0xe5,0x0d,0x0d,0xe6,0xe6,0x0c,0x09,0x09,0x05,0xe6,0x02,0x01,0xe7,0x01,0x03,0xe7, 0x01,0x04,0x04,0x03,0xe7,0x01,0x05,0xe6,0xe5,0x02,0x05,0x04,0x01,0xe5,0xe5,0x02, 0xe5,0xe6,0xe5,0x02,0xe5,0x04,0xe5,0x02,0xe5,0x01,0x01,0x03,0xe5,0xe8,0x03,0x01, 0x02,0x01,0x01,0xe5,0x03,0x03,0xe5,0x07,0xe8,0x04,0xe5,0xe5,0xe6,0x02,0xe5,0xe7, 0x01,0x01,0xe7,0x01,0x01,0x04,0x01,0x0a,0x0d,0x03,0x01,0xe5,0x26,0x02,0x03,0x02, 0xe5,0x04,0x02,0x06,0x09,0x02,0x02,0x0d,0x02,0x06,0x04,0x04,0x02,0x01,0xe5,0x02, 0x02,0xe5,0x06,0x02,0x06,0x09,0x04,0x04,0x09,0x02,0x06,0x09,0x02,0xe5,0x04,0x09, 0x02,0xe5,0x11,0x02,0x0f,0xe5,0xe5,0xe6,0x09,0x1d,0x05,0xe5,0x01,0xe5,0x03,0xe5, 0x01,0x01,0x03,0xe5,0x01,0xe5,0x03,0xe5,0x01,0x13,0x01,0x03,0xe5,0x07,0xe5,0x01, 0xe5,0x03,0xe5,0x09,0xe5,0x03,0x03,0xe5,0x07,0xe5,0x01,0x05,0xe5,0x07,0xe5,0x03, 0x03,0xe5,0x07,0xe5,0x07,0xe5,0x01,0xe5,0x03,0xe5,0x01,0x0f,0x03,0x09,0x07,0x02, 0x02,0xe5,0x09,0x03,0x09,0x09,0x05,0x03,0xe5,0x04,0x02,0xe5,0x01,0x02,0x02,0xe7, 0x05,0xe5,0x03,0x03,0x02,0x03,0x02,0x03,0x05,0xe6,0x06,0xe5,0xe5,0x02,0x02,0xe5, 0x04,0x01,0x02,0xe5,0x07,0xe5,0x07,0xe6,0xe5,0x04,0xe5,0x07,0xe5,0x07,0xe5,0x04, 0x02,0xe5,0x07,0xe5,0x04,0x02,0xe5,0x02,0xe6,0x01,0x09,0xe5,0x03,0x03,0x04,0xe6, 0x05,0xe6,0x01,0x02,0x1d,0x12,0xe5,0x05,0x02,0x13,0x08,0x1d,0xe5,0x07,0xe5,0x28, 0x12,0xe5,0x11,0xe5,0x08,0x1d,0x04,0x02,0x02,0xe5,0x1d,0x10,0x03,0x05,0x01,0x11, 0x01,0x25,0x03,0x05,0x03,0x02,0x04,0x09,0x13,0x03,0x05,0x09,0x03,0x05,0x09,0x03, 0x05,0x03,0x1b,0x0a,0x01,0xe6,0x0e,0x07,0x01,0x07,0x01,0x0a,0xe6,0x05,0xe7,0x03, 0x01,0xe7,0x05,0xe5,0xe5,0x03,0x09,0x01,0xe6,0x04,0x01,0xe7,0x02,0xe5,0xe8,0x02, 0xe5,0xe8,0x07,0xe7,0x03,0x01,0xe5,0xe5,0x02,0xe5,0xe8,0x05,0xe7,0xe5,0x03,0xe5, 0xe5,0x05,0xe7,0x02,0xe5,0xe6,0xe5,0x03,0x01,0xe7,0x06,0xe6,0xe5,0xe6,0xe5,0x06, 0x01,0x01,0x05,0x01,0x0e,0x01,0x01,0x0c,0x02,0xe5,0x04,0xe5,0xe6,0x05,0x01,0xe5, 0x02,0x01,0x03,0xe5,0x03,0xe5,0xe7,0x03,0xe5,0x01,0xe5,0x01,0x02,0x01,0xe6,0x06, 0xe5,0x04,0xe5,0xe7,0x04,0x01,0xe6,0x01,0x02,0x01,0xe6,0x04,0x01,0xe6,0x06,0x01, 0xe6,0x04,0x01,0xe6,0x04,0x01,0xe6,0x06,0xe6,0x03,0xe5,0xe7,0x04,0x02,0xe5,0x01, 0x02,0x01,0xe6,0x03,0xe5,0xe7,0x07,0xe5,0x04,0x01,0xe5,0x05,0x01,0xe5,0x02,0x02, 0x01,0xe5,0x0c,0xe5,0x01,0xe5,0x06,0xe5,0x04,0x07,0x01,0x04,0x04,0x0b,0x02,0x06, 0x07,0xe5,0x0c,0x02,0x06,0x02,0x01,0x07,0x01,0xe5,0x01,0x03,0x01,0x04,0x02,0x01, 0xe5,0x02,0x03,0x04,0x01,0xe5,0x01,0x01,0x03,0x06,0x03,0x04,0xe5,0x0b,0xe5,0x03, 0x01,0x02,0x02,0x01,0x0b,0x02,0x05,0x02,0x03,0x01,0x07,0x01,0xe5,0x17,0xe8,0x0d, 0x05,0xe6,0xe5,0x04,0xe6,0x06,0xe5,0x07,0xe6,0x06,0xe5,0x07,0xe6,0x06,0xe5,0x01, 0x05,0xe6,0xe6,0x03,0xe6,0xe5,0x04,0xe6,0xe6,0x03,0xe6,0xe5,0x03,0xe6,0x01,0x05, 0x01,0xe6,0xe5,0x02,0xe7,0x06,0xe6,0x01,0x04,0xe7,0x05,0xe7,0x04,0xe8,0xe5,0x02, 0x01,0xe5,0x05,0xe7,0x07,0xe6,0xe5,0x04,0xe6,0xe5,0x04,0xe5,0x05,0x01,0xe5,0x06, 0x02,0xe5,0x0e,0x0a,0x06,0x01,0x09,0xe5,0x23,0x02,0xe5,0x04,0x01,0xe5,0x0f,0x01, 0xe6,0x04,0x01,0x0c,0x0c,0xe5,0x01,0x01,0xe6,0x06,0x0a,0xe5,0x06,0xe5,0x01,0x03, 0x01,0xe6,0x04,0x01,0xe5,0x07,0xe6,0x06,0x07,0x01,0x07,0x01,0x03,0x0b,0xe5,0xe5, 0xe5,0x0c,0x0a,0x06,0x01,0x07,0x01,0xe5,0xe5,0x08,0x09,0x09,0x06,0x07,0x01,0x01, 0x0a,0x04,0x01,0x02,0x04,0x01,0x02,0x03,0x07,0x03,0x05,0x03,0xe5,0xe5,0x01,0x01, 0x01,0x02,0x02,0x01,0x01,0x05,0x03,0xe5,0x03,0x03,0xe5,0xe5,0x01,0x04,0x01,0x02, 0xe5,0x01,0xe5,0xe5,0x01,0x04,0x09,0x01,0x07,0x01,0x05,0x0c,0x08,0x06,0x13,0x2f, 0x0b,0xe5,0x11,0x09,0x04,0x03,0x03,0x02,0x01,0x09,0x09,0x03,0x05,0x08,0xe5,0x02, 0xe5,0x02,0xe5,0x02,0x03,0x01,0x04,0x03,0xe5,0x0c,0x09,0x19,0x01,0xe5,0x0e,0xe5, 0x08,0x06,0x02,0x06,0x29,0xe5,0x05,0x01,0xe5,0x0f,0x01,0x07,0x01,0x06,0x08,0xe7, 0x05,0x01,0xe5,0x05,0xe7,0x01,0x03,0xe7,0x05,0xe8,0x04,0xe8,0xe5,0x04,0x07,0x01, 0xe6,0x08,0x02,0x08,0x0a,0x02,0x0a,0xe5,0xe7,0x15,0x02,0x13,0x06,0xe5,0xe5,0x03, 0x04,0x06,0x04,0x02,0x01,0x01,0xe5,0x07,0xe5,0xe5,0x0e,0x01,0x06,0x0b,0x02,0x09, 0x06,0x01,0xe5,0x09,0x05,0x0e,0x0c,0x01,0xe5,0xe5,0xe5,0x03,0x02,0x06,0x02,0x01, 0x1f,0xe5,0xe6,0x10,0x01,0x04,0x02,0x01,0x01,0x05,0xe6,0xe5,0x04,0x01,0xe6,0xe5, 0x02,0x01,0x01,0x05,0xe6,0x06,0x01,0x04,0x02,0x01,0x07,0x01,0x02,0x04,0xe7,0x05, 0x01,0x07,0x01,0x07,0x01,0x01,0xe5,0x05,0x01,0xe5,0x02,0x02,0x01,0x07,0x01,0x04, 0x02,0x01,0x02,0x04,0x01,0x07,0x01,0x03,0x03,0x01,0xe6,0xe5,0x02,0x01,0x04,0x02, 0x01,0xe5,0x05,0x01,0x02,0x04,0x01,0x02,0x01,0x02,0x02,0x02,0xe5,0x01,0x0e,0xe5, 0x03,0x01,0xe5,0x0f,0x05,0x06,0x02,0xe5,0x01,0x02,0xe5,0x05,0xe6,0x02,0x0e,0xe5, 0x09,0x04,0x02,0xe6,0x01,0x03,0x09,0x02,0xe5,0x01,0x03,0x01,0xe5,0x03,0x03,0xe5, 0x07,0xe5,0x07,0x09,0xe5,0x04,0x02,0x0a,0xe5,0x04,0x02,0x05,0xe5,0x02,0x07,0x18, 0xe9,0x01,0x04,0x03,0x09,0xe5,0xe5,0x02,0x02,0x04,0xe5,0x02,0x0b,0x09,0x05,0x02, 0x03,0x02,0x06,0x04,0x0d,0xe5,0x04,0x01,0x03,0x09,0x01,0x02,0xe5,0x04,0xe5,0x01, 0x0b,0x02,0x06,0x01,0x07,0x07,0x01,0x09,0x03,0xe5,0x01,0x0a,0x11,0x02,0x01,0x02, 0x12,0x04,0x12,0x17,0x05,0x03,0x09,0x09,0x01,0x07,0x03,0x06,0x01,0x01,0x02,0x06, 0x16,0x06,0x06,0x02,0x06,0xe5,0x05,0x02,0x05,0xe5,0x04,0x05,0x05,0x0c,0x07,0x02, 0x0e,0x15,0x0a,0xe9,0x12,0x02,0x15,0x07,0xe6,0x08,0xe5,0x02,0x02,0x01,0x11,0x01, 0xe5,0x05,0xe6,0x06,0xe5,0xe5,0x07,0xe5,0x06,0xe5,0xe5,0x08,0x03,0xe5,0x0a,0x0a, 0x11,0x03,0x11,0xe5,0x01,0x03,0x01,0x01,0x07,0xe8,0x0b,0x08,0xe6,0x01,0x01,0x11, 0x02,0x02,0x06,0x0a,0x07,0x02,0x06,0x01,0x0c,0x04,0x0a,0x02,0x04,0x01,0xe5,0x12, 0x08,0xe5,0x10,0x01,0x17,0x03,0x1f,0x0d,0x08,0x05,0x06,0x0c,0x03,0xe6,0x05,0x10, 0x09,0x04,0x04,0x09,0x03,0xe5,0x03,0x02,0x04,0x01,0xe5,0x02,0x04,0x04,0x02,0x01, 0x09,0x02,0x06,0x09,0x07,0x03,0x01,0x01,0x03,0x0b,0x03,0x05,0x05,0x03,0x09,0xe6, 0x04,0x01,0x01,0x01,0xe5,0x03,0x09,0x07,0x01,0xe5,0x07,0x17,0x01,0x02,0x09,0x0d, 0x02,0x06,0x02,0x06,0x02,0x06,0xe7,0x05,0xe6,0x06,0xe6,0x04,0x01,0xe6,0x06,0xe6, 0x06,0xe6,0xe5,0x04,0xe7,0x02,0x02,0xe6,0x01,0x06,0xe7,0x05,0x09,0xe6,0x06,0xe6, 0x05,0xe7,0x02,0xe5,0x01,0xe6,0x06,0xe6,0x01,0x04,0xe6,0x06,0x09,0x09,0x03,0x08, 0x0e,0xe5,0xe5,0x05,0x0c,0x28,0x06,0x0c,0x06,0x02,0x09,0x09,0x08,0x02,0x04,0x05, 0x0b,0x19,0x08,0x1e,0x29,0x08,0x03,0x08,0x0f,0x01,0x05,0xe7,0x05,0xe7,0x02,0x02, 0xe6,0x07,0xe5,0x02,0x01,0x07,0x0e,0x01,0x06,0x01,0x07,0x07,0x0b,0x02,0xe8,0x03, 0xe8,0x05,0xe7,0x01,0x02,0x09,0x06,0x04,0x13,0x03,0x04,0xe5,0xe6,0x04,0xe5,0x07, 0xe5,0xe5,0x17,0x01,0xe5,0x02,0x11,0x02,0x09,0x09,0x0a,0x02,0x03,0x02,0x02,0x1c, 0x06,0x1c,0x02,0x09,0x06,0xe5,0xe5,0x1a,0x02,0x13,0x07,0x01,0x03,0x03,0x01,0x0a, 0x18,0xe5,0xe6,0x0a,0x02,0x02,0x06,0x06,0x02,0x09,0x08,0xe5,0x07,0xe5,0x05,0x02, 0x04,0x01,0x02,0x04,0x04,0x04,0x04,0x04,0xe6,0x01,0x03,0x05,0x03,0xe5,0xe5,0x03, 0x03,0x02,0x05,0xe5,0x04,0x06,0x02,0x02,0x06,0x09,0x03,0x05,0x09,0x09,0x03,0x05, 0x08,0xe5,0x0e,0x04,0xe5,0x02,0x02,0x01,0x0d,0x09,0x05,0x03,0x01,0x07,0x07,0xe6, 0x06,0xe6,0x06,0xe6,0x06,0xe6,0x06,0xe6,0x06,0xe6,0x06,0xe6,0x05,0xe7,0x08,0xe6, 0x06,0x01,0x07,0xe7,0x05,0xe6,0x06,0xe6,0x01,0x04,0xe6,0x06,0xe7,0x05,0xe6,0x08, 0x06,0x02,0xe5,0xe5,0x05,0x14,0x03,0xe5,0x01,0x10,0x09,0xe5,0x07,0x09,0x33,0x08, 0x18,0x0b,0x05,0x12,0x0a,0x15,0x0f,0x09,0xe6,0x06,0x03,0x10,0x02,0x01,0xe8,0x07, 0xe5,0x03,0x14,0xe5,0x07,0xe5,0x03,0x03,0x01,0xe5,0x01,0x03,0x05,0x03,0x01,0xe5, 0x01,0x09,0x03,0x05,0x06,0xe5,0xe5,0x02,0x05,0x01,0x04,0x01,0x02,0x03,0x02,0xe5, 0xe5,0x02,0x02,0xe5,0xe5,0x02,0x02,0x01,0xe5,0x05,0xe5,0xe5,0x05,0xe5,0xe5,0x02, 0x05,0x0d,0x09,0x09,0x09,0x02,0xe5,0xe5,0x0d,0xe6,0xe7,0x01,0x01,0x0d,0x04,0x06, 0x01,0xe5,0x01,0x06,0x02,0x08,0xe5,0x01,0xe5,0x03,0xe5,0x07,0x01,0x01,0xe5,0x03, 0x02,0x06,0xe5,0x07,0x04,0x01,0x02,0x03,0xe5,0x03,0x04,0xe5,0x04,0xe5,0xe5,0x05, 0xe5,0xe5,0x05,0xe5,0x02,0xe5,0x02,0x09,0x09,0xe5,0xe5,0xe5,0xe5,0x01,0x08,0xe6, 0xe5,0xe5,0xe6,0x02,0x02,0x01,0xe5,0x01,0x06,0x03,0x02,0xe5,0xe5,0x0a,0x01,0x02, 0x03,0x04,0x05,0xe5,0xe5,0xe5,0x21,0x04,0x04,0x04,0x02,0x01,0x04,0x04,0x04,0x04, 0x04,0x04,0x04,0x04,0x04,0x01,0xe5,0xe5,0x03,0x02,0x01,0x06,0x04,0x04,0x04,0x04, 0x02,0x01,0x04,0x02,0x01,0x04,0x02,0x01,0x04,0x04,0x04,0x02,0x01,0x02,0x06,0x01, 0x11,0xe5,0x0a,0x0b,0x04,0x01,0x01,0x02,0x07,0x02,0x01,0x04,0xe5,0xe5,0x08,0x09, 0x06,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05, 0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0x02,0x06,0x01,0xe5,0x05,0x01, 0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5, 0x08,0x09,0x05,0xe5,0x01,0x09,0x0d,0x05,0xe5,0x0a,0x10,0x01,0x01,0x05,0xe5,0x01, 0x05,0xe5,0x01,0x02,0x02,0x01,0x01,0x02,0x02,0x01,0x01,0x02,0x02,0x01,0x01,0x02, 0x02,0xe5,0x01,0x02,0x02,0xe5,0x01,0x02,0x02,0xe5,0x01,0x02,0x02,0xe5,0x01,0x02, 0x02,0xe5,0x01,0x04,0x02,0x03,0x02,0x02,0xe5,0x01,0x02,0x02,0x06,0x02,0x03,0x02, 0x02,0x03,0x02,0x02,0xe5,0x01,0x02,0x02,0x03,0xe5,0x03,0x09,0x01,0x01,0x05,0xe5, 0x01,0x05,0x01,0x01,0xe5,0x0f,0x03,0x02,0x0e,0x09,0x04,0x04,0x09,0x05,0x03,0x05, 0x03,0x05,0x03,0x05,0x03,0x05,0x03,0x05,0x03,0x02,0x01,0xe5,0x02,0x02,0x01,0xe5, 0x02,0x07,0x03,0x02,0xe7,0x02,0x04,0xe5,0x02,0x01,0x02,0xe5,0x02,0x02,0xe7,0x02, 0x02,0xe7,0x02,0x05,0x03,0x02,0xe7,0x02,0x09,0x04,0x04,0x02,0x01,0x04,0x04,0xe5, 0x02,0x09,0x02,0xe5,0x01,0xe5,0x0b,0x01,0x04,0x02,0x04,0x01,0xe5,0xe5,0x05,0x02, 0x06,0x02,0x03,0xe5,0x03,0x06,0x02,0x03,0xe5,0x03,0x06,0x01,0xe5,0x02,0xe5,0x03, 0x06,0x02,0x03,0xe5,0x03,0x06,0x04,0x04,0x04,0x05,0xe6,0xe5,0x08,0x06,0x02,0x06, 0x02,0x03,0x05,0x06,0x04,0x04,0x01,0xe5,0x03,0x02,0x01,0x09,0x04,0x01,0xe7,0x0e, 0x04,0x02,0x0a,0x01,0x04,0xe5,0xe5,0x02,0xe5,0xe8,0x02,0xe5,0x01,0x01,0x03,0xe5, 0xe6,0x02,0x02,0x01,0x01,0x02,0x01,0xe5,0xe7,0x01,0x02,0x01,0x01,0x02,0x01,0xe7, 0x03,0x02,0x01,0x01,0x02,0x01,0xe5,0xe7,0x01,0x02,0x01,0x01,0x02,0x01,0xe5,0xe7, 0x03,0x01,0xe5,0x05,0x01,0xe5,0xe5,0x03,0x01,0xe5,0x05,0x01,0xe5,0xe7,0x01,0x01, 0xe5,0xe7,0x01,0x02,0x06,0x01,0xe5,0xe6,0x02,0x01,0xe5,0xe5,0xe6,0x02,0xe5,0x03, 0x03,0xe5,0x03,0x03,0xe5,0xe5,0x01,0x0d,0x02,0x02,0xe5,0xe5,0x12,0x04,0x02,0x06, 0x09,0x02,0x01,0x01,0x02,0x02,0x02,0xe5,0x01,0x02,0x03,0x02,0x02,0x03,0x02,0x02, 0x03,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x05,0x02,0x02,0x03, 0x02,0x02,0x01,0x01,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x03, 0x02,0x02,0x01,0x04,0x02,0x06,0x02,0xe5,0x04,0x02,0x06,0x04,0x0f,0x02,0x02,0xe5, 0x13,0xe5,0x03,0xe5,0x01,0x01,0x03,0xe5,0x01,0xe5,0x03,0xe5,0x01,0x01,0x03,0xe5, 0x01,0x05,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6, 0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x06,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04, 0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5, 0x07,0xe5,0x01,0x01,0x03,0xe5,0x01,0x14,0xe5,0xe6,0x0a,0x02,0x06,0x02,0xe5,0xe6, 0x01,0x02,0xe5,0x04,0x02,0xe5,0xe6,0x02,0x01,0xe8,0x02,0x01,0xe5,0x05,0x01,0xe7, 0x03,0x01,0xe5,0x05,0x01,0xe6,0x04,0x01,0xe5,0x05,0x01,0xe6,0x04,0x01,0xe5,0x07, 0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01, 0xe6,0x04,0x01,0xe5,0x05,0x01,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x01,0x02,0x02, 0xe5,0x01,0x02,0x02,0x02,0x01,0x01,0x05,0xe5,0xe5,0x01,0x02,0x06,0x02,0x08,0xe5, 0x07,0xe5,0x07,0xe5,0x08,0xe5,0x08,0x09,0x09,0x09,0x09,0x09,0x09,0x0b,0x09,0x09, 0x09,0x09,0x09,0x09,0x09,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x08,0x04,0x03, 0xe7,0x08,0x0b,0x05,0x03,0x05,0x03,0x05,0x02,0x02,0x09,0x02,0xe5,0x04,0x02,0x06, 0x02,0xe5,0x04,0x02,0x06,0x02,0xe5,0x04,0x02,0x06,0x02,0xe5,0x05,0xe5,0x01,0xe5, 0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0x06,0x02, 0xe5,0x04,0x02,0xe5,0x02,0x05,0x03,0x05,0x03,0x05,0x01,0x09,0x0a,0x03,0x0c,0xe5, 0xe5,0x05,0x02,0xe5,0xe5,0x05,0xe7,0x05,0xe5,0xe5,0x05,0xe7,0x03,0x02,0xe6,0x03, 0x02,0xe6,0x03,0x02,0x01,0x01,0x02,0x02,0xe6,0x03,0x02,0x01,0x01,0x02,0x02,0x01, 0x04,0x02,0xe6,0x05,0x02,0xe6,0x03,0x02,0xe6,0x03,0x02,0xe6,0x03,0x02,0xe6,0x03, 0x02,0xe6,0xe5,0x01,0x02,0xe6,0x03,0x02,0xe6,0x03,0x02,0x01,0x06,0xe7,0x05,0xe7, 0x05,0xe7,0x02,0x02,0x0f,0xe7,0x0c,0x01,0xe5,0x05,0x01,0xe6,0x03,0xe5,0xe7,0x04, 0x01,0xe6,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02, 0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x06,0x02,0xe5,0x04,0x02,0xe5, 0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04, 0x02,0xe5,0x04,0x01,0xe6,0x03,0xe5,0xe7,0x04,0x01,0xe6,0x04,0x02,0x06,0xe5,0x02, 0x03,0xe7,0x0d,0x04,0x09,0x09,0x01,0x07,0x01,0x04,0x07,0x01,0x02,0x01,0x02,0x01, 0x07,0x09,0x01,0x04,0x02,0x09,0x06,0x02,0x08,0x02,0x09,0x01,0x02,0x04,0x06,0x02, 0x09,0x09,0x09,0x09,0x04,0x09,0x08,0xe5,0x05,0x07,0x12,0x0d,0x01,0x03,0xe5,0xe6, 0x04,0xe5,0xe6,0x01,0x02,0xe5,0xe6,0x04,0x01,0xe5,0x06,0xe6,0x01,0x03,0x01,0xe5, 0x01,0x04,0xe6,0x01,0x03,0x01,0xe5,0x01,0x03,0xe7,0x01,0x03,0x01,0xe5,0x01,0x03, 0xe7,0x01,0xe5,0x03,0xe5,0x03,0xe5,0x01,0xe7,0x01,0x03,0xe7,0x01,0xe5,0x01,0xe7, 0x01,0xe5,0x01,0xe7,0x01,0xe5,0x01,0x01,0xe5,0x01,0xe5,0x01,0xe7,0x01,0xe5,0x01, 0x01,0xe5,0x01,0x03,0xe8,0x01,0x04,0xe6,0x01,0x02,0xe6,0xe5,0x04,0xe6,0xe5,0x06, 0xe7,0x02,0x01,0x01,0x01,0x08,0x04,0x01,0x0a,0x09,0xe5,0x07,0xe5,0x03,0xe5,0xe5, 0x05,0x02,0x06,0x02,0x06,0x02,0x06,0x02,0x06,0x02,0xe5,0x04,0x02,0xe6,0x03,0x02, 0xe5,0x03,0xe5,0x03,0xe5,0x04,0x02,0x06,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5, 0x04,0x02,0xe5,0x04,0x02,0xe5,0x07,0xe6,0x03,0x03,0x09,0x04,0x04,0x04,0x03,0x0f, 0x02,0x09,0x03,0x01,0x03,0x02,0x03,0x01,0x03,0x05,0x03,0x05,0x02,0xe6,0xe5,0x01, 0x02,0xe5,0x01,0x02,0x02,0xe5,0x01,0x02,0x02,0xe5,0x01,0x02,0x02,0xe5,0x01,0x02, 0x02,0xe5,0x01,0xe5,0xe5,0x01,0xe5,0x01,0x02,0x02,0xe5,0x01,0x02,0x03,0x04,0x02, 0xe5,0xe6,0x01,0x02,0x02,0xe5,0x01,0x02,0x02,0xe5,0x01,0x02,0xe5,0xe6,0x01,0x02, 0x02,0xe5,0x01,0x02,0x02,0xe5,0x01,0x02,0x02,0x03,0x02,0xe5,0xe6,0x02,0x01,0x09, 0x03,0x03,0x01,0x03,0xe5,0x0f,0xe6,0xe5,0x0b,0xe5,0xe5,0x03,0xe6,0x05,0xe7,0x05, 0xe7,0x05,0x01,0x01,0x06,0xe5,0xe5,0x04,0x01,0x01,0x06,0xe5,0xe5,0x04,0x01,0x01, 0x05,0xe6,0xe5,0x04,0x01,0x01,0x05,0xe6,0xe5,0x01,0x04,0x02,0xe5,0x01,0x03,0xe5, 0xe5,0x04,0xe6,0xe5,0x01,0x02,0xe6,0xe5,0x01,0x03,0xe5,0xe5,0x01,0x02,0x01,0x01, 0x02,0x02,0xe6,0xe5,0x01,0x02,0x01,0x01,0x06,0xe5,0x0d,0x02,0xe6,0x06,0xe6,0x02, 0x11,0xe5,0x0c,0x01,0xe5,0x01,0x09,0xe5,0x07,0xe5,0x07,0xe5,0x03,0x03,0xe5,0x01, 0x05,0xe5,0x01,0x05,0xe5,0x01,0x05,0xe5,0x01,0x05,0xe5,0x01,0x06,0x02,0x05,0xe5, 0x01,0x05,0xe5,0x03,0x05,0xe5,0x01,0x05,0x03,0x06,0x02,0x05,0xe5,0x01,0x06,0x02, 0x06,0x02,0x09,0x05,0x13,0xe5,0x07,0xe5,0x01,0x02,0x11,0xe5,0x01,0x0d,0x08,0x06, 0xe5,0xe5,0x05,0x02,0x28,0x13,0x1f,0x13,0x09,0x23,0x09,0x02,0x06,0x07,0x01,0x02, 0xe5,0x07,0xe5,0x0b,0x01,0x01,0x0b,0x05,0x01,0x01,0x05,0x01,0x02,0xe5,0x02,0x01, 0xe6,0x04,0x01,0x07,0x01,0x07,0xe6,0xe5,0x04,0x01,0x01,0x04,0xe5,0xe5,0x06,0x01, 0x01,0x05,0x01,0xe5,0x05,0xe6,0x01,0x04,0xe6,0x08,0xe6,0x06,0x01,0x07,0xe6,0x01, 0x04,0x01,0x07,0x01,0x07,0x01,0x01,0x05,0xe6,0x05,0xe5,0xe6,0x01,0x03,0x01,0xe6, 0x01,0x02,0x01,0x01,0x04,0xe5,0xe5,0x06,0x01,0x0a,0x02,0xe5,0x01,0x10,0x03,0x01, 0xe5,0x01,0xe5,0x07,0x03,0xe6,0x02,0xe5,0xe5,0x05,0xe5,0xe5,0x04,0x07,0x01,0xe6, 0x03,0xe5,0xe5,0x09,0x05,0x09,0xe5,0xe5,0x05,0x02,0x05,0x02,0xe5,0x02,0x08,0xe5, 0x03,0xe5,0x07,0xe5,0x01,0xe5,0x03,0xe5,0x01,0xe5,0x03,0x03,0x05,0xe5,0x02,0x01, 0x01,0xe5,0x03,0x02,0x02,0x02,0x05,0xe5,0x05,0xe5,0xe5,0x05,0x10,0xe6,0xe6,0x11, 0x09,0x09,0x02,0x0f,0xe5,0x0b,0x01,0x03,0x04,0x07,0x06,0x0a,0x04,0x03,0x10,0x04, 0x03,0x04,0x03,0x01,0x07,0x01,0x02,0x04,0x02,0x05,0x07,0x03,0x01,0x0a,0xe6,0x01, 0x02,0x03,0x01,0x02,0x01,0x07,0xe5,0x02,0x0e,0x06,0xe5,0xe7,0x11,0x04,0x01,0x0c, 0x03,0x02,0x02,0x0d,0x05,0x0f,0x03,0x0b,0x04,0x09,0x09,0x06,0x04,0x0c,0x06,0x09, 0x06,0x02,0x09,0x09,0x0a,0x12,0xe5,0x04,0x12,0xe5,0x06,0x03,0x0d,0xe6,0x08,0x07, 0xe6,0x03,0x04,0xe5,0x03,0x01,0x0c,0x03,0xe5,0x02,0x03,0x05,0xe5,0x02,0xe5,0x02, 0x05,0x01,0x01,0xe5,0x05,0x01,0x06,0xe8,0x05,0x01,0x01,0x06,0xe5,0xe5,0x05,0xe5, 0xe5,0x05,0xe5,0xe5,0x02,0x01,0xe6,0xe5,0x04,0xe6,0xe5,0x06,0x01,0x07,0x01,0x07, 0xe5,0x07,0x06,0x04,0x06,0x07,0x06,0x01,0xe9,0x07,0x05,0x06,0x01,0x08,0x01,0x04, 0x0c,0x02,0x03,0x07,0x0b,0x01,0x07,0x0a,0x02,0x02,0x09,0x06,0x06,0xe5,0xe5,0xe5, 0xe5,0x08,0x07,0x01,0x03,0x03,0x01,0x02,0x04,0x01,0x07,0x01,0x02,0x01,0x02,0x01, 0x07,0x01,0x05,0x05,0x07,0x10,0x08,0x0c,0x09,0x09,0x08,0x0e,0x01,0x08,0xe5,0x07, 0xe6,0x05,0x03,0x05,0xe6,0x06,0xe6,0x02,0x04,0xe5,0x04,0x01,0xe6,0x08,0x03,0x03, 0x03,0x04,0xe5,0xe5,0x04,0xe6,0x01,0xe5,0x04,0x07,0x05,0x04,0x07,0x05,0xe5,0x02, 0x04,0xe6,0x15,0x03,0x05,0x11,0x01,0x03,0xe5,0x01,0x0d,0x03,0x20,0x01,0x0f,0x0b, 0x09,0x05,0x03,0x01,0x02,0x0e,0x0b,0x01,0x02,0x04,0x09,0x01,0x07,0x0e,0x0e,0x01, 0xe5,0x05,0x0c,0x09,0x09,0x02,0x07,0xe5,0x0a,0x03,0x02,0x10,0x07,0x01,0x07,0x08, 0x0b,0x01,0x07,0x09,0x04,0x07,0x01,0x03,0x09,0x0c,0x01,0x07,0x02,0x04,0xe5,0x04, 0x0d,0x02,0x04,0x01,0x08,0xe5,0x01,0x07,0x08,0x07,0x01,0x0a,0x0d,0x07,0x08,0x06, 0x03,0x01,0xe6,0x07,0xe5,0x0b,0x09,0x12,0xe5,0x05,0x09,0x01,0x08,0x06,0x01,0x06, 0x04,0xe5,0x06,0x01,0x06,0x02,0xe8,0x05,0xe5,0x05,0x06,0x01,0xe5,0xe5,0x04,0x0e, 0x06,0x08,0xe5,0x01,0x04,0x04,0x09,0xe6,0x07,0xe5,0x08,0x01,0x15,0x01,0xe5,0xe6, 0x05,0x04,0x06,0x14,0x01,0x19,0x1d,0x13,0x08,0x03,0xe7,0x04,0x03,0xe5,0x06,0x09, 0xe5,0x07,0x06,0xe5,0xe7,0x03,0x02,0x09,0x0e,0x03,0x02,0x02,0x09,0x0b,0x0d,0x02, 0x02,0x0c,0x1b,0x01,0x06,0xe5,0xe6,0x01,0x02,0x09,0x06,0x02,0xe5,0xe5,0x01,0x09, 0xe5,0x02,0x01,0x05,0xe5,0x04,0xe5,0x04,0x09,0x01,0x03,0x03,0x01,0xe5,0x04,0x10, 0x03,0x01,0x07,0x01,0x03,0x0d,0x01,0x08,0x06,0x02,0xe6,0x0b,0x15,0x02,0xe5,0xe5, 0x0b,0x0b,0xe5,0x07,0xe5,0x05,0x03,0x05,0x0a,0x06,0x01,0x02,0x06,0xe5,0x04,0x0c, 0xe5,0x11,0xe5,0x09,0x0a,0x06,0x01,0x03,0x05,0x09,0xe5,0x0f,0x01,0xe5,0x0b,0x09, 0x05,0x01,0x09,0x07,0xe5,0x02,0x09,0x03,0x10,0x03,0x17,0x09,0x1d,0x13,0x15,0x0a, 0x06,0x16,0x09,0x2f,0xe5,0x10,0x02,0x0c,0x01,0xe5,0x01,0xe6,0x0d,0xe5,0x01,0x01, 0x04,0x02,0xe5,0x0e,0x09,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0x09, 0xe5,0x04,0x02,0xe5,0x04,0x04,0x09,0x09,0x09,0x09,0x09,0xe5,0xe6,0x04,0x13,0xe5, 0x07,0x08,0xe5,0x01,0xe5,0x03,0xe5,0x09,0x05,0x02,0x01,0x08,0xe5,0x01,0x02,0xe5, 0x18,0xe5,0x07,0x09,0x06,0x02,0x09,0x02,0x01,0x04,0x09,0x05,0xe5,0x01,0x05,0x03, 0x07,0xe5,0x01,0x06,0x02,0x09,0x05,0xe5,0x01,0x05,0xe5,0x01,0x05,0xe5,0x01,0x02, 0xe5,0x14,0xe5,0xe5,0xe5,0xe5,0xe7,0xe5,0x08,0x03,0xe5,0x0c,0x01,0x02,0x03,0x04, 0x0a,0x02,0x01,0xe5,0xe5,0xe5,0xe5,0x04,0x01,0x0e,0xe5,0xe5,0x05,0xe5,0xe5,0x04, 0xe5,0x01,0x01,0x04,0x01,0x02,0x04,0xe5,0x02,0xe5,0x05,0x10,0xe5,0x05,0x01,0xe5, 0x01,0x0c,0x04,0xe5,0xe5,0x04,0xe5,0x03,0x03,0xe5,0x08,0x01,0xe5,0x04,0xe5,0x03, 0x01,0x05,0x0b,0x09,0x04,0xe5,0xe5,0xe5,0x03,0x01,0xe5,0xe5,0xe6,0x05,0x07,0xe5, 0x01,0x0a,0x06,0x02,0x01,0x04,0xe5,0xe5,0xe5,0x0c,0xe5,0x01,0x06,0x02,0x06,0x02, 0x06,0x02,0x05,0xe5,0x01,0xe5,0xe5,0x02,0xe5,0xe5,0x04,0xe5,0x07,0xe5,0x01,0x05, 0xe5,0x13,0xe5,0x01,0x09,0x13,0x09,0x06,0x02,0x09,0x09,0x06,0x02,0x01,0x07,0x01, 0x0b,0x06,0x05,0x09,0x05,0x06,0x09,0x04,0x01,0x07,0x01,0x02,0x02,0x03,0x02,0x02, 0x03,0x02,0x01,0xe7,0x03,0x04,0x01,0xe5,0xe5,0x05,0x09,0x02,0xe5,0x01,0x05,0xe5, 0x01,0x04,0x02,0x01,0x01,0xe5,0xe5,0x03,0x01,0xe5,0xe5,0x01,0x09,0xe5,0x01,0x02, 0x02,0xe5,0x01,0x02,0x02,0x01,0x01,0xe5,0xe5,0x01,0xe5,0xe6,0x01,0x02,0x06,0x02, 0x03,0x02,0x02,0xe5,0xe6,0x13,0x05,0xe5,0x01,0xe5,0x01,0x0a,0x03,0x04,0x04,0x09, 0x05,0x01,0x01,0x05,0x01,0x02,0x06,0x02,0x09,0x06,0x02,0x04,0x01,0x04,0x01,0x02, 0x07,0x04,0x03,0x04,0x07,0x04,0x04,0x09,0x01,0x02,0x09,0x03,0xe5,0x03,0x03,0xe5, 0x03,0x04,0x01,0x02,0x01,0x01,0x02,0x02,0x04,0x03,0x03,0x01,0x03,0x02,0x01,0x04, 0x09,0x09,0x05,0xe6,0x0d,0x05,0x01,0x01,0x01,0x02,0x01,0xe5,0xe5,0xe5,0x01,0x02, 0x01,0x02,0x01,0x02,0x09,0x01,0x02,0x04,0x01,0x07,0x01,0x02,0x06,0x04,0x02,0x01, 0x07,0x01,0x05,0x01,0x06,0x01,0x02,0x01,0x07,0x01,0x02,0x04,0x09,0x01,0x05,0x01, 0x01,0x02,0x02,0x01,0x0b,0x02,0x04,0x01,0x04,0x04,0x01,0x07,0x07,0x01,0x07,0x01, 0x0d,0xe5,0xe7,0x0c,0x09,0x01,0x02,0x01,0x02,0x01,0x02,0x02,0x01,0x01,0x02,0x01, 0xe5,0xe5,0x03,0x04,0x01,0x02,0x04,0x04,0x04,0x01,0x02,0x04,0x03,0xe5,0xe6,0xe5, 0xe5,0x01,0x04,0x01,0x02,0x04,0x03,0xe5,0xe5,0x03,0x01,0x02,0x04,0x04,0x04,0x04, 0x04,0x01,0x02,0x04,0x01,0x02,0x04,0x04,0x04,0x04,0x04,0x01,0x02,0x02,0x01,0x04, 0x04,0x03,0xe5,0x03,0x01,0x07,0x01,0x0e,0x02,0xe5,0x19,0x04,0x04,0x0e,0x11,0x02, 0x06,0x02,0x06,0x02,0x06,0xe6,0x10,0x02,0x03,0x04,0x13,0x02,0x2e,0xe5,0x07,0x02, 0x03,0x09,0x20,0xe6,0xe6,0x13,0xe5,0x03,0xe5,0x07,0xe5,0x08,0x16,0x09,0xe5,0x07, 0x05,0xe5,0x01,0xe5,0x04,0x12,0xe5,0x14,0x02,0x13,0x13,0xe5,0x07,0x09,0x05,0xe5, 0x07,0xe5,0x1c,0x02,0x03,0xe5,0x0a,0x02,0x04,0x01,0x02,0xe6,0x06,0xe6,0x06,0x06, 0x02,0x09,0x05,0x03,0x09,0x05,0x03,0xe5,0x07,0x09,0x06,0x02,0xe5,0x04,0x04,0x09, 0x05,0x03,0x09,0x03,0x02,0x02,0x06,0x02,0x06,0x02,0x03,0x05,0x03,0x05,0xe6,0x06, 0xe5,0x07,0x09,0x0d,0x01,0xe7,0x01,0x06,0x02,0x09,0x13,0x06,0x02,0x09,0x27,0x09, 0x06,0x09,0x18,0x09,0x06,0x0b,0x07,0x43,0x02,0xe5,0x01,0x09,0x0a,0x12,0x07,0x02, 0x09,0x26,0x09,0x11,0x06,0x11,0x0a,0x41,0x20,0xe5,0xe6,0x0c,0x01,0x09,0xe7,0x03, 0x01,0xe5,0xe5,0x05,0x02,0x06,0x02,0x04,0x01,0x02,0x04,0x01,0x02,0x01,0x04,0x02, 0x04,0x01,0xe7,0x05,0x02,0x06,0x02,0x04,0x01,0xe7,0x07,0x02,0x01,0x02,0x01,0x02, 0x01,0x04,0x02,0x06,0x02,0x01,0x04,0x02,0x01,0x04,0x02,0x06,0x02,0x04,0x01,0x02, 0x01,0x05,0xe6,0x05,0xe5,0xe5,0x0f,0x0e,0xe5,0x01,0x0d,0x01,0xe5,0x04,0xe5,0x01, 0xe5,0x03,0xe5,0xe7,0x01,0x02,0x03,0x02,0x02,0x03,0x05,0x01,0x01,0x05,0x01,0x01, 0x05,0x03,0x04,0xe5,0xe7,0xe5,0x02,0x03,0x01,0xe5,0x01,0x03,0x05,0x02,0xe5,0x06, 0x03,0x05,0x01,0x01,0x01,0x03,0x01,0x01,0x05,0x01,0x01,0x05,0x01,0x01,0x07,0x01, 0x05,0x01,0x01,0x05,0x01,0x01,0x09,0x04,0x02,0xe6,0x10,0xe5,0x0c,0x01,0x01,0x0d, 0x01,0x09,0x01,0x05,0x01,0x09,0x03,0x05,0x04,0x18,0x0b,0x07,0x09,0x03,0x02,0x02, 0x01,0x02,0x03,0x02,0x09,0x04,0x04,0x03,0xe5,0x08,0x09,0x04,0x04,0x09,0x18,0x13, 0x03,0x0b,0xe8,0x0d,0x05,0xe6,0x06,0xe6,0xe5,0x04,0xe6,0x04,0xe8,0x04,0xe7,0x05, 0xe7,0x05,0xe7,0x01,0x03,0xe7,0x06,0xe7,0x05,0xe7,0x04,0xe7,0x07,0xe5,0x01,0x05, 0xe7,0x01,0x04,0xe7,0xe5,0x02,0xe8,0x04,0x01,0xe5,0x01,0x03,0xe8,0x04,0x01,0xe5, 0x01,0x03,0x01,0xe5,0x05,0xe7,0x02,0x04,0xe5,0x01,0x05,0xe5,0x07,0xe5,0x01,0x05, 0xe5,0x06,0x01,0xe6,0x0c,0x01,0x09,0xe5,0x11,0xe6,0x06,0x09,0x09,0x09,0x13,0x07, 0x01,0x0a,0x0a,0x1d,0x09,0x09,0x09,0x09,0x0a,0x08,0x13,0x0f,0xe7,0x0c,0x01,0x07, 0x04,0x09,0x06,0x02,0xe5,0x01,0x02,0x02,0xe5,0x01,0xe5,0x03,0xe5,0x01,0x02,0x02, 0xe5,0x01,0xe5,0x03,0xe5,0x01,0x05,0x01,0x01,0x02,0x02,0x01,0x01,0xe5,0xe5,0x01, 0xe5,0x01,0x03,0x01,0x03,0x04,0x02,0x03,0x05,0x03,0x02,0x02,0xe5,0x01,0xe5,0xe5, 0x01,0x03,0xe5,0xe5,0x01,0xe5,0x01,0x02,0x02,0x03,0xe5,0xe5,0x01,0xe5,0x04,0x02, 0x01,0x01,0x05,0x06,0x02,0x10,0x0f,0x01,0xe6,0x0b,0x01,0x09,0xe5,0x11,0x04,0xe5, 0x02,0x04,0xe5,0x02,0x04,0xe5,0x02,0x04,0xe5,0x02,0x04,0xe5,0x08,0x03,0x05,0x08, 0xe5,0x0b,0x02,0x03,0xe6,0x02,0x04,0xe5,0x07,0xe5,0x06,0x01,0x08,0xe5,0x02,0x03, 0x01,0x09,0x03,0x03,0x01,0x04,0x08,0x13,0x11,0xe5,0x02,0x09,0x01,0xe5,0x05,0x15, 0x03,0x01,0xe5,0x01,0x03,0x01,0xe6,0x04,0x01,0xe5,0x01,0x03,0x01,0xe5,0x01,0x03, 0x01,0xe5,0x05,0xe7,0x01,0x03,0xe7,0x05,0x01,0xe5,0x06,0x01,0x04,0x04,0xe6,0x01, 0x03,0x01,0xe5,0x01,0x03,0xe8,0x05,0xe6,0x01,0x03,0xe8,0x05,0xe6,0x01,0x03,0x01, 0x03,0x04,0xe6,0x0b,0x13,0x0e,0x02,0xe5,0x01,0x2f,0x02,0x02,0x08,0xe5,0x07,0xe5, 0x1b,0xe5,0x0f,0x02,0x12,0x09,0x01,0xe5,0x1c,0x0b,0x04,0xe5,0xe5,0x31,0x02,0x03, 0x11,0x01,0x07,0x01,0x07,0xe6,0xe5,0x04,0xe6,0x06,0xe6,0xe5,0x04,0xe6,0x06,0xe6, 0x06,0xe6,0x06,0x01,0x07,0x01,0x07,0x01,0x05,0x01,0x01,0x01,0x07,0x01,0x03,0x03, 0xe6,0xe5,0x04,0x01,0x07,0x01,0x01,0x05,0x01,0x07,0x01,0x02,0x04,0x01,0x02,0x01, 0x02,0x01,0x01,0x05,0x01,0x01,0x05,0x01,0x07,0x01,0x07,0x01,0x01,0x09,0xe7,0x0f, 0x08,0xe5,0x01,0x05,0xe7,0xe5,0x03,0xe5,0xe5,0x03,0xe7,0x07,0xe6,0x06,0xe6,0x06, 0xe6,0x06,0xe5,0x01,0x05,0xe5,0x03,0x03,0xe5,0x08,0x03,0x03,0x02,0xe5,0x07,0xe6, 0x01,0xe5,0x01,0xe6,0xe5,0x05,0xe6,0x06,0xe5,0x05,0x01,0xe5,0x1b,0xe5,0x01,0x01, 0x03,0xe5,0x02,0x1a,0x02,0x02,0xe6,0x09,0x02,0x02,0x05,0x08,0xe5,0x03,0x0b,0x0b, 0x11,0x0b,0x12,0x0c,0x07,0x03,0x06,0x01,0x08,0xe5,0xe5,0x01,0x01,0x02,0x09,0x01, 0x02,0x01,0xe5,0xe5,0x06,0x06,0x02,0x0b,0x01,0xe5,0x06,0xe5,0x04,0x09,0x05,0x0b, 0x02,0x01,0x01,0x11,0x02,0x02,0x06,0x02,0x07,0x01,0x07,0x01,0x07,0x01,0x07,0x01, 0x07,0x01,0x04,0x02,0x01,0x04,0x01,0xe5,0xe5,0x05,0xe5,0xe5,0x01,0x09,0x03,0x04, 0x09,0x05,0xe5,0x0b,0x06,0x09,0x02,0xe5,0x02,0xe5,0x09,0x05,0x03,0x09,0x04,0x0c, 0x06,0x0e,0x01,0x0d,0xe6,0x02,0x08,0x04,0x07,0x01,0x01,0x07,0x01,0x07,0x01,0x04, 0x02,0x01,0x11,0xe6,0x06,0x01,0x07,0x01,0xe5,0x03,0x01,0x01,0x07,0x01,0x01,0x04, 0x08,0x05,0x0a,0x13,0x12,0xe5,0x08,0x1d,0x04,0x03,0xe5,0x02,0xe5,0xe6,0x08,0x04, 0x02,0x03,0x09,0x04,0x08,0x09,0x03,0x03,0x02,0x06,0x02,0x02,0x01,0x11,0x01,0x01, 0x02,0x02,0x03,0x02,0x02,0x0b,0x01,0xe6,0xe5,0x02,0x02,0x08,0x01,0x05,0x0a,0xe5, 0x08,0x08,0x09,0x03,0x04,0x01,0x07,0xe6,0x10,0x09,0x09,0xe7,0xe5,0x0f,0x09,0x10, 0x01,0x03,0x03,0x01,0x01,0x07,0xe5,0x07,0x09,0x07,0x01,0x09,0x09,0x02,0x01,0x04, 0x03,0xe5,0x01,0x03,0x09,0x09,0x05,0x03,0x02,0x06,0x09,0xe5,0x07,0x03,0x2d,0x0d, 0xe5,0xe5,0xe5,0x0b,0x0e,0x10,0x01,0x07,0x05,0x03,0x09,0x09,0x01,0x04,0x02,0x09, 0x03,0x05,0x09,0x04,0x01,0x04,0x03,0x05,0x09,0x09,0x09,0x03,0x02,0x02,0x09,0x0d, 0x11,0x0c,0x02,0xe7,0xe5,0x0a,0xe9,0xe5,0x06,0xe5,0x06,0x0b,0x06,0x04,0x06,0x1d, 0x0c,0x1b,0x0a,0x01,0x0a,0x09,0x0e,0x03,0x0f,0x1d,0x09,0x13,0x08,0x03,0xe5,0x01, 0x01,0x06,0x0f,0x04,0x02,0xe6,0x03,0x0c,0x05,0x03,0x09,0x08,0xe5,0x09,0xe6,0xe5, 0x04,0xe7,0x05,0xe6,0x13,0x03,0x22,0xe6,0x02,0x08,0x03,0x1e,0xe6,0x16,0x09,0xe9, 0x02,0x11,0x03,0x07,0x02,0x0e,0x12,0x09,0x0d,0x09,0x09,0x12,0x02,0x25,0x02,0x05, 0x14,0x11,0x05,0x03,0x1f,0xe6,0x09,0x09,0x09,0x01,0x02,0x03,0xe5,0x03,0x04,0x04, 0x03,0x05,0x06,0x02,0x06,0x02,0x04,0x04,0x04,0x04,0x04,0x01,0x02,0x04,0x01,0x02, 0x04,0x06,0x01,0x01,0x05,0x02,0x03,0x02,0x03,0x05,0x09,0x03,0xe5,0xe5,0x01,0x04, 0x04,0x04,0x0b,0x02,0x19,0x0e,0x03,0x03,0xe5,0x0e,0x0b,0x05,0xe5,0xe6,0x02,0x01, 0xe5,0x01,0x05,0xe5,0x07,0xe5,0x01,0xe5,0x03,0xe5,0x01,0x05,0xe5,0x01,0xe5,0x03, 0xe5,0x01,0x05,0xe5,0x01,0x05,0xe6,0x01,0x04,0xe6,0x01,0x02,0x01,0x01,0xe5,0x07, 0xe6,0x02,0x03,0xe5,0x01,0x03,0x01,0xe6,0xe5,0x04,0xe5,0x01,0x05,0xe6,0x06,0xe5, 0x0a,0x07,0x06,0x04,0x09,0x12,0x03,0x01,0xe6,0x1a,0x07,0x08,0x02,0x06,0x02,0x06, 0x02,0x06,0x09,0x02,0x06,0x02,0x06,0x02,0x06,0x02,0x06,0x0b,0x09,0x09,0x09,0x09, 0x03,0xe5,0x03,0x09,0x02,0x09,0x08,0x0a,0x20,0x01,0xe6,0x0c,0x06,0xe5,0xe5,0x02, 0x09,0x02,0xe5,0xe5,0x05,0xe5,0xe5,0x02,0x05,0x06,0xe5,0xe5,0x02,0x02,0xe5,0xe5, 0x02,0x02,0xe5,0xe5,0x02,0x02,0xe5,0xe5,0x02,0x02,0xe5,0xe5,0x02,0x02,0xe5,0xe5, 0x05,0xe5,0x02,0x03,0x01,0xe5,0x01,0x09,0x06,0xe5,0xe5,0xe5,0xe5,0x01,0xe5,0xe5, 0x02,0x02,0xe5,0xe5,0x02,0x01,0xe5,0x05,0x02,0xe5,0x0d,0xe5,0x08,0x05,0x0c,0xe5, 0x09,0x02,0xe5,0xe6,0xe5,0xe5,0x07,0x09,0x04,0x04,0xe5,0xe5,0x07,0x02,0x06,0x01, 0x02,0x01,0x02,0x09,0x01,0xe5,0x05,0x01,0x02,0x01,0x02,0xe5,0xe5,0x05,0xe5,0x02, 0x01,0x02,0xe5,0x02,0x01,0x02,0x02,0x08,0xe5,0xe5,0x05,0x03,0xe5,0x03,0x02,0x01, 0x01,0x02,0x04,0x01,0x02,0x04,0x01,0x02,0x03,0xe5,0x02,0xe5,0x01,0xe6,0x0f,0x02, 0x01,0xe6,0x07,0x06,0x01,0x0c,0x01,0x04,0x01,0x04,0x08,0x09,0x03,0x07,0x04,0x04, 0x04,0x04,0x02,0x01,0x04,0x02,0x01,0x04,0x04,0x04,0x02,0x01,0x04,0x04,0x04,0x02, 0x01,0x04,0x02,0x01,0x04,0x04,0x06,0x04,0x04,0x02,0x01,0x04,0x04,0x04,0x01,0xe5, 0xe5,0x03,0x01,0xe5,0xe5,0x03,0x01,0xe5,0xe5,0x01,0x03,0x05,0x01,0x04,0x01,0xe5, 0xe5,0x08,0x01,0x05,0x0d,0x02,0xe5,0x02,0x04,0x02,0xe5,0xe6,0x09,0x09,0x09,0x06, 0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01, 0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0x02,0x06,0x01,0xe5, 0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x08, 0x01,0x07,0x01,0x07,0x09,0x10,0xe5,0x04,0x02,0xe9,0xe5,0x09,0x09,0x04,0x06,0x02, 0x06,0x02,0x03,0x02,0x02,0xe5,0x01,0x02,0x02,0x01,0x01,0x02,0x02,0xe5,0x01,0x02, 0x02,0x01,0x01,0x02,0x02,0x06,0x02,0x01,0x01,0x02,0x02,0xe5,0x01,0x02,0x02,0x08, 0x02,0x06,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x06,0x02,0x06,0x02,0x04,0x04,0x09, 0x10,0x02,0xe5,0x01,0xe5,0x08,0x01,0x0d,0x01,0xe5,0xe5,0x08,0x02,0xe5,0x02,0x05, 0x03,0x05,0x03,0x05,0x03,0x02,0xe7,0x02,0x05,0x03,0x04,0xe5,0x02,0x05,0x03,0x04, 0xe5,0x02,0x05,0x03,0x04,0xe5,0x02,0x05,0x03,0x01,0x02,0x02,0x03,0x05,0x03,0x02, 0xe7,0x02,0x02,0xe7,0x02,0x04,0xe5,0x02,0x04,0xe5,0x02,0x04,0xe5,0x02,0x09,0x09, 0x05,0x03,0x02,0x01,0xe5,0x02,0x05,0x01,0x01,0x09,0x02,0x01,0xe6,0x0c,0x09,0x03, 0xe5,0xe5,0x03,0x01,0x02,0x01,0xe5,0x02,0x02,0x02,0x03,0x05,0x03,0x02,0xe7,0x02, 0x05,0x03,0x01,0xe6,0xe5,0x08,0x03,0x03,0x01,0x03,0x05,0x06,0x04,0x04,0x04,0x09, 0x03,0x02,0x02,0x06,0xe5,0xe5,0x06,0x01,0x06,0xe5,0x02,0x02,0x01,0x02,0x01,0x03, 0xe5,0x01,0x01,0x04,0x04,0x01,0x05,0x09,0x01,0x0d,0x02,0xe6,0x01,0x0a,0x09,0x04, 0x02,0x01,0x04,0x01,0x01,0x02,0x02,0x02,0x03,0x02,0x06,0x02,0xe6,0x03,0x02,0x06, 0x02,0x01,0x04,0x01,0xe5,0x05,0x02,0x06,0x02,0x06,0x01,0xe5,0xe7,0x03,0x01,0xe5, 0x05,0x01,0xe5,0x05,0x02,0x01,0xe6,0x01,0x01,0xe8,0x02,0x01,0xe5,0x05,0x01,0xe8, 0x02,0x01,0xe8,0xe5,0xe5,0xe6,0x03,0x01,0x04,0xe5,0xe5,0x03,0x04,0x09,0x01,0x0b, 0xe5,0x02,0x01,0xe5,0xe5,0x16,0x09,0x06,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02, 0x03,0x02,0x02,0x03,0x02,0x02,0x01,0x01,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02, 0x03,0x02,0x02,0x05,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x03, 0x02,0x02,0x03,0x02,0x02,0x06,0x02,0x06,0x0c,0x01,0x04,0x20,0x01,0xe5,0xe5,0x19, 0xe5,0x07,0xe5,0x07,0xe5,0x01,0x01,0x03,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6, 0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x06, 0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5, 0xe6,0x04,0xe5,0xe6,0x04,0xe5,0x0d,0x03,0xe5,0x20,0xe8,0x0d,0x02,0x06,0xe6,0x06, 0xe6,0x01,0x01,0xe5,0xe7,0x02,0x01,0x01,0xe6,0x04,0x01,0xe6,0x04,0x01,0xe6,0x04, 0x01,0xe6,0x04,0x01,0xe5,0x05,0x01,0xe6,0x04,0x01,0xe6,0x04,0x01,0xe5,0x07,0x01, 0xe5,0x05,0x01,0xe5,0x05,0x01,0xe6,0x04,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5, 0x05,0x01,0xe5,0x07,0xe5,0x04,0x02,0x02,0x06,0xe6,0x06,0x09,0x0d,0x01,0x01,0xe5, 0x29,0xe5,0x08,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x0b,0x09,0x09,0x09,0x09,0x09, 0x09,0x0f,0x02,0x2c,0x02,0x01,0x01,0x29,0x01,0x09,0x02,0x06,0x02,0x06,0x02,0x06, 0x02,0x06,0x02,0xe5,0x04,0x02,0x06,0x02,0x06,0x02,0xe5,0x05,0xe5,0x01,0xe5,0x04, 0x02,0xe5,0x04,0x02,0x06,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5, 0x08,0x01,0x32,0xe5,0xe6,0x0c,0x01,0x0a,0xe6,0x05,0xe5,0xe5,0x05,0xe7,0x03,0x02, 0xe6,0x03,0x02,0xe6,0x03,0x02,0xe6,0x03,0x02,0xe6,0x03,0x02,0xe6,0x03,0x02,0xe6, 0x03,0x02,0xe6,0x03,0x02,0xe6,0x05,0x02,0xe6,0x03,0x02,0x01,0x04,0x02,0xe6,0x03, 0x02,0x01,0x04,0x02,0x01,0x04,0x02,0x01,0x04,0x02,0x01,0x01,0x01,0xe5,0xe6,0xe5, 0x03,0x01,0x07,0x01,0xe5,0xe5,0xe5,0x1d,0x01,0xe5,0x0c,0x01,0xe5,0x09,0x07,0xe6, 0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04, 0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x06,0x02,0xe5,0x04,0x02, 0xe5,0x04,0x02,0xe5,0x03,0xe5,0x01,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02, 0xe5,0x04,0x01,0xe6,0x04,0x01,0xe5,0x05,0x01,0xe6,0x21,0x01,0xe5,0x0a,0xe5,0xe5, 0x0a,0x07,0x13,0x09,0x09,0x09,0x09,0x03,0xe5,0x03,0x09,0x09,0x08,0x02,0x09,0x03, 0x05,0x03,0x05,0x04,0x04,0x09,0x04,0x04,0x07,0x01,0x03,0x03,0x01,0x06,0xe5,0xe5, 0x22,0x01,0xe5,0x08,0x03,0x01,0x05,0xe5,0x07,0xe5,0x01,0x05,0xe5,0x02,0x02,0xe7, 0x01,0xe5,0x01,0xe7,0x01,0xe5,0x01,0xe7,0x01,0xe5,0x01,0xe7,0x01,0xe5,0x01,0xe7, 0x01,0xe5,0x01,0xe7,0x01,0xe5,0x02,0xe6,0x01,0xe5,0x01,0xe7,0x01,0xe5,0x02,0xe6, 0x03,0xe5,0x02,0xe6,0x01,0x03,0x01,0xe5,0x01,0xe5,0x01,0xe7,0x01,0x04,0xe6,0x01, 0x04,0xe6,0x01,0x03,0xe7,0x01,0x05,0xe6,0xe5,0x02,0x01,0xe6,0x06,0xe6,0xe5,0x04, 0xe5,0x07,0xe5,0x07,0xe5,0x06,0xe5,0x01,0x1a,0x08,0xe6,0x04,0x01,0xe5,0x04,0x02, 0xe6,0x03,0x02,0xe5,0x04,0x02,0xe6,0x03,0x02,0xe6,0x03,0x02,0xe5,0x04,0x02,0xe5, 0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x04,0xe5,0x04,0x02,0x06,0x02,0xe5,0x04,0x02, 0xe6,0x03,0x02,0xe5,0x04,0x02,0xe6,0x03,0x02,0xe6,0x0d,0xe5,0xe7,0x07,0x09,0x09, 0x0d,0xe6,0xe5,0x19,0x01,0x06,0x02,0x04,0x01,0x02,0xe5,0xe6,0x01,0x02,0xe5,0xe6, 0x01,0x02,0xe5,0xe6,0x01,0x02,0xe5,0xe6,0x01,0x02,0xe5,0xe6,0x01,0x02,0x01,0xe6, 0x01,0x02,0x02,0xe5,0x01,0x02,0xe5,0xe6,0x01,0x02,0x03,0x04,0x02,0x01,0xe6,0x01, 0x02,0xe5,0xe6,0x01,0x02,0x01,0xe6,0x01,0x02,0x01,0xe6,0x01,0xe5,0xe5,0x01,0xe5, 0x01,0x02,0x01,0xe6,0x01,0x02,0x02,0x06,0x02,0xe6,0xe5,0x09,0x01,0x18,0xe5,0x03, 0xe5,0xe5,0xe6,0x21,0x07,0x02,0x03,0xe5,0xe5,0x05,0xe5,0xe5,0x01,0x03,0xe5,0xe5, 0x05,0xe5,0xe5,0x05,0xe5,0xe5,0x01,0x02,0x01,0x01,0x02,0x03,0xe5,0xe5,0x01,0x03, 0xe5,0xe5,0x01,0x03,0x03,0xe5,0x01,0x04,0x01,0x07,0x01,0x02,0x02,0x01,0x01,0x07, 0x01,0x06,0xe5,0xe5,0x04,0x01,0x01,0x07,0x07,0x01,0x01,0x01,0x14,0x09,0x06,0x06, 0xe5,0x01,0x03,0x1f,0x09,0x03,0x03,0x05,0x03,0x05,0x03,0x05,0x03,0x05,0x03,0x05, 0xe5,0x01,0x05,0x03,0x05,0x03,0x0b,0x05,0xe5,0x01,0x05,0xe5,0x01,0x05,0xe5,0x01, 0x05,0xe5,0x01,0x05,0x03,0x05,0xe5,0x01,0x05,0x09,0xe5,0x01,0x01,0xe5,0x12,0x09, 0x07,0x03,0x01,0x01,0xe6,0x0c,0x01,0xe5,0x05,0xe5,0xe5,0x3b,0x13,0x13,0x15,0x17, 0x0f,0x05,0x02,0x0a,0x1b,0xe5,0x16,0xe8,0x0d,0x02,0x01,0x01,0x01,0x03,0xe6,0x06, 0x01,0x02,0x04,0x01,0x01,0x05,0xe6,0xe5,0x04,0xe6,0xe5,0x04,0x01,0x01,0x05,0xe6, 0x06,0x01,0xe6,0x04,0xe6,0x06,0xe6,0xe5,0x04,0x01,0x02,0x06,0x01,0x01,0x05,0xe6, 0x06,0x01,0x02,0x04,0x01,0x07,0x01,0x01,0x05,0x01,0x02,0x01,0x02,0x01,0x02,0x04, 0x01,0x07,0x01,0x01,0x05,0x01,0x07,0x01,0x02,0x04,0x01,0x03,0x03,0x02,0xe5,0xe6, 0x1a,0x04,0x02,0x03,0x05,0xe5,0x01,0xe5,0x03,0x09,0xe5,0xe5,0x06,0x09,0x08,0xe5, 0x01,0x05,0xe5,0x07,0xe5,0xe5,0xe6,0x02,0xe5,0x06,0x02,0xe5,0x01,0xe5,0x03,0xe5, 0x04,0x02,0xe5,0x01,0xe5,0x03,0xe5,0x07,0xe5,0x07,0xe5,0x01,0x03,0x01,0xe5,0x04, 0x05,0x03,0x04,0x07,0x01,0x07,0xe5,0x02,0x04,0xe5,0x0e,0x01,0xe5,0x0a,0x09,0x07, 0x04,0x04,0x03,0x0c,0x06,0x07,0x01,0x02,0x04,0x01,0x02,0x04,0x01,0xe5,0xe5,0x03, 0xe5,0x02,0x04,0x01,0xe5,0xe5,0x03,0xe5,0x03,0x02,0x04,0xe7,0x03,0xe5,0x09,0xe5, 0x04,0x02,0xe5,0x05,0xe5,0xe5,0x07,0x08,0x01,0xe5,0x20,0x03,0x11,0x06,0xe9,0xe5, 0x18,0x0c,0x01,0x02,0x06,0x02,0x05,0xe5,0xe5,0x02,0xe5,0x0b,0x05,0x10,0x02,0x03, 0x05,0x03,0x02,0x06,0x0e,0x14,0x03,0x05,0x07,0x05,0x08,0x08,0x07,0x03,0x03,0x09, 0x08,0x01,0x0f,0x01,0xe5,0x0c,0x01,0x0d,0x01,0x0b,0x01,0x01,0x05,0x01,0x07,0xe6, 0x03,0x04,0x02,0x04,0x01,0x11,0x01,0x07,0x01,0x09,0x02,0x01,0x02,0x01,0x01,0x05, 0x01,0x07,0xe8,0x01,0x04,0x01,0xe5,0x03,0x01,0x0b,0xe5,0x05,0x08,0xe7,0x08,0x02, 0xe5,0x04,0xe5,0x23,0xe6,0x0c,0xe5,0x04,0x0a,0x0a,0x01,0x03,0x03,0x01,0x19,0x01, 0x01,0x03,0x01,0xe5,0x03,0x01,0x01,0x01,0x01,0x05,0x01,0x01,0x05,0x0b,0x01,0x01, 0x03,0x02,0x02,0x02,0x01,0x08,0xe5,0x03,0x04,0x05,0x07,0x0b,0x07,0x06,0x09,0x01, 0x06,0x06,0x01,0x01,0x06,0x02,0x09,0xe5,0xe6,0x02,0x04,0x23,0x08,0xe5,0x06,0xe6, 0x06,0xe6,0x03,0x01,0x01,0xe5,0x03,0x03,0xe5,0x07,0xe5,0x02,0xe5,0x02,0xe5,0x02, 0xe5,0x01,0xe6,0x02,0x03,0xe5,0xe6,0x07,0xe5,0x02,0x03,0xe6,0x04,0x01,0x01,0x03, 0x04,0xe5,0x02,0x04,0xe6,0x0b,0x0a,0x02,0x0a,0xe5,0x21,0x01,0x01,0xe5,0x01,0x0c, 0x09,0x0d,0x0c,0x01,0x07,0x01,0x02,0x02,0x01,0x04,0xe5,0x02,0x01,0x07,0x07,0x01, 0x01,0x07,0x01,0x07,0x0b,0x04,0x04,0x01,0x07,0x06,0x02,0x09,0x01,0x07,0x01,0x0d, 0x08,0x09,0x09,0x08,0x08,0x11,0x0a,0x07,0x09,0x01,0x07,0x07,0xe5,0x09,0x0b,0x01, 0x02,0x02,0x10,0x06,0x0f,0x01,0x04,0x01,0x05,0xe5,0x01,0x02,0x03,0x04,0x07,0x01, 0x15,0x26,0x03,0x08,0x05,0x01,0x14,0xe9,0x0c,0x12,0xe5,0x08,0x02,0x05,0xe5,0x04, 0x02,0xe5,0x07,0xe5,0x05,0x01,0xe5,0x07,0x01,0x05,0x01,0xe5,0x07,0xe5,0x07,0xe5, 0x07,0x11,0x07,0x05,0xe6,0x03,0x01,0xe6,0x01,0xe6,0x01,0xe6,0x01,0xe5,0xe5,0xe5, 0x03,0x01,0x02,0x06,0x02,0xe6,0x01,0x1f,0x07,0x04,0x01,0x03,0x0b,0x13,0x01,0x02, 0xe5,0x02,0x04,0x04,0x09,0x01,0x07,0x04,0x04,0x0e,0x04,0x04,0x04,0x09,0x01,0x02, 0x02,0x0f,0x11,0x0a,0x09,0x17,0x02,0x01,0x2c,0xe5,0xe7,0x06,0x19,0x0a,0x02,0x03, 0xe7,0x06,0xe6,0x03,0x02,0xe6,0x05,0x01,0xe5,0x09,0x06,0xe6,0x05,0xe8,0x04,0xe7, 0x05,0x04,0xe5,0x05,0x01,0x06,0x03,0x08,0x06,0x01,0x01,0x03,0xe6,0xe5,0x03,0x09, 0xe5,0x07,0x02,0x16,0x02,0x08,0x0c,0x01,0x01,0x01,0x22,0x02,0x04,0xe5,0x02,0x0c, 0x02,0x0d,0x04,0xe5,0x0c,0x19,0x03,0x06,0x04,0x05,0x07,0x01,0x03,0x05,0x01,0x1e, 0x04,0x01,0x0c,0x08,0xe5,0x05,0x0a,0x0c,0xe5,0xe5,0xe5,0x04,0x27,0x13,0x06,0x09, 0x01,0x28,0x01,0x13,0x14,0x02,0x31,0x09,0x06,0x0f,0x04,0x03,0x01,0xe5,0x02,0x0c, 0xe5,0x04,0x01,0xe5,0xe5,0x06,0xe5,0x08,0x09,0xe5,0x07,0x09,0xe5,0x07,0x07,0x0b, 0x07,0x01,0xe5,0x07,0x0c,0x01,0xe5,0x02,0x01,0x0a,0xe6,0x05,0x09,0x09,0x09,0x02, 0x01,0x04,0x02,0xe5,0x02,0x01,0x01,0xe5,0x01,0x04,0x01,0x01,0x03,0xe5,0x13,0x04, 0xe6,0xe5,0x0c,0x06,0x0c,0x10,0x02,0x09,0x09,0x06,0x02,0x13,0x09,0x06,0x02,0x0f, 0xe5,0xe5,0x12,0x03,0x05,0x02,0x09,0x06,0x03,0x02,0x01,0xe5,0x0b,0x06,0xe5,0x04, 0xe5,0x0a,0xe5,0x02,0x06,0x01,0x02,0x03,0x02,0x14,0x17,0x0c,0xe5,0xe5,0x04,0xe5, 0x01,0x10,0xe5,0x02,0xe5,0x07,0x07,0x17,0x0b,0x02,0xe5,0xe5,0x05,0xe5,0x14,0x0b, 0x03,0xe5,0x01,0x03,0x02,0xe5,0xe5,0xe5,0x01,0x04,0x01,0x0b,0x07,0x09,0x04,0xe5, 0xe7,0x03,0x11,0x1d,0x06,0x02,0x06,0x02,0x09,0x05,0xe5,0x01,0xe5,0x01,0x01,0xe5, 0x01,0x06,0xe5,0xe5,0x08,0x05,0xe5,0x03,0x09,0x09,0x05,0xe5,0x01,0x09,0x06,0x02, 0x09,0x01,0x03,0xe5,0x01,0x01,0x04,0xe5,0xe5,0xe5,0x06,0x13,0x05,0xe5,0x05,0x04, 0xe7,0x15,0x13,0x01,0x09,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x06,0x02,0xe5,0x01, 0xe5,0xe6,0xe5,0x03,0x01,0x02,0xe5,0x01,0x02,0x02,0x06,0x02,0x06,0x01,0x02,0x06, 0x04,0x01,0x02,0x02,0x09,0x01,0x01,0x02,0x05,0x03,0x17,0x02,0x05,0x04,0x01,0x02, 0x03,0x0c,0x04,0x01,0x06,0xe5,0xe5,0xe6,0xe5,0x13,0x03,0x0f,0x01,0x01,0x05,0x01, 0x02,0x03,0x02,0x02,0x06,0x02,0x04,0x01,0x02,0x01,0x01,0xe5,0xe5,0x01,0x01,0x01, 0xe5,0xe5,0x01,0x06,0x02,0x04,0x01,0x02,0x01,0x01,0x02,0x01,0x02,0x01,0x01,0xe5, 0xe5,0x05,0x02,0x02,0x08,0x04,0xe5,0xe5,0x08,0x07,0x08,0xe5,0x0c,0x05,0x01,0x01, 0x03,0x0b,0x03,0x04,0x04,0x02,0x03,0x01,0x15,0x0e,0x04,0x01,0x09,0x05,0x01,0x01, 0x02,0x04,0x01,0x09,0x02,0x06,0x02,0x01,0x04,0x02,0x04,0x01,0x09,0x0b,0x04,0x04, 0x05,0x01,0x01,0x04,0x02,0x04,0x02,0x03,0x07,0x01,0x02,0x06,0x02,0x02,0x01,0x09, 0x01,0x02,0x04,0x01,0x04,0x0e,0x05,0x07,0x01,0xe5,0xe6,0x15,0x06,0x0c,0x01,0x02, 0x04,0x01,0x02,0x04,0x01,0x02,0x04,0x01,0x02,0x04,0x01,0x02,0x04,0x04,0x02,0x01, 0x04,0x04,0x01,0x02,0x04,0x01,0x02,0x06,0x04,0x01,0xe5,0xe5,0xe5,0x01,0x04,0x01, 0x02,0x01,0xe5,0xe5,0x02,0xe5,0x03,0x01,0x02,0x04,0x01,0x02,0x04,0x01,0x02,0x04, 0x01,0x02,0x04,0x01,0x01,0xe5,0x03,0x01,0x01,0xe5,0xe6,0x0a,0x0d,0x02,0x02,0xe5, 0x1e,0x2f,0x02,0x06,0x02,0x06,0xe5,0x29,0x07,0xe5,0x09,0x04,0x02,0xe5,0x11,0x09, 0xe5,0x07,0x02,0x03,0x09,0x02,0x01,0x19,0x01,0xe5,0xe7,0x2e,0x20,0xe5,0x07,0x05, 0xe5,0x01,0xe5,0x2e,0x12,0xe5,0x01,0xe5,0x04,0x09,0x04,0x04,0x02,0x06,0x02,0x05, 0xe5,0x07,0xe5,0x01,0x01,0x07,0x11,0x05,0xe5,0x0d,0x09,0x04,0x01,0x02,0x09,0x06, 0x02,0x09,0x09,0x06,0x02,0x04,0xe6,0x01,0xe6,0x06,0x09,0x06,0x02,0x04,0x01,0x04, 0x09,0x04,0x01,0x02,0x09,0xe5,0x07,0x04,0x01,0x02,0x04,0x01,0x02,0x03,0xe5,0xe5, 0x01,0x03,0x05,0xe5,0x04,0x02,0xe5,0x03,0xe5,0x01,0x03,0x05,0x04,0x01,0x06,0x01, 0x01,0xe5,0x1f,0x09,0x06,0x02,0x09,0x10,0x0c,0x09,0x09,0x08,0xe5,0x08,0x15,0x13, 0x09,0x09,0x09,0x12,0xe5,0x05,0x02,0x13,0x04,0x05,0xe5,0x1c,0x09,0x07,0x02,0x09, 0x1e,0x07,0x09,0x09,0x0b,0x02,0x12,0x0f,0x02,0x0a,0x09,0x09,0x0f,0x03,0x05,0x02, 0x12,0x0a,0xe8,0x22,0x04,0x04,0x02,0x01,0x04,0x02,0x06,0x02,0x01,0x02,0x01,0x02, 0x01,0x04,0x02,0x06,0xe7,0xe5,0x03,0x02,0x06,0x02,0x06,0x02,0x01,0x04,0x01,0x02, 0x04,0x01,0x02,0x06,0x02,0x06,0xe7,0xe5,0x01,0x01,0x02,0x01,0x02,0x01,0x02,0x01, 0x02,0x01,0x02,0x01,0x04,0x02,0x04,0x01,0xe7,0x03,0x01,0xe5,0xe5,0x03,0x01,0x1a, 0x01,0x14,0x09,0x02,0x02,0x06,0x03,0x05,0x03,0x05,0x03,0x05,0x01,0x01,0x07,0x01, 0x02,0x01,0xe5,0x01,0xe5,0x04,0x03,0x02,0x02,0x03,0x02,0x02,0x03,0x09,0x01,0x05, 0x01,0x01,0x01,0x03,0x01,0x01,0x02,0x02,0x01,0xe6,0x04,0x01,0x01,0x01,0x03,0x01, 0x01,0x01,0x03,0x01,0x01,0x01,0xe5,0x01,0x01,0x01,0x01,0x03,0x01,0xe6,0x03,0xe5, 0xe7,0x01,0x02,0x02,0x19,0xe7,0x11,0x0f,0x04,0x04,0x04,0x04,0x09,0x0e,0x04,0x04, 0x05,0x03,0x04,0x04,0x04,0x09,0x04,0x03,0xe5,0xe5,0x03,0x04,0x09,0x35,0xe5,0x01, 0x01,0x04,0x0c,0x01,0x18,0x03,0x15,0xe5,0x07,0xe6,0x06,0xe6,0x04,0xe8,0x04,0xe8, 0x04,0xe7,0x05,0xe8,0x04,0xe7,0x05,0xe8,0x04,0xe8,0x04,0xe8,0x05,0xe6,0x03,0x04, 0xe6,0x01,0x03,0xe8,0xe5,0x03,0xe7,0x01,0x03,0xe6,0x05,0x01,0xe5,0x05,0xe7,0x05, 0x01,0xe5,0x01,0x03,0xe8,0xe6,0x03,0xe5,0x07,0xe6,0xe5,0x04,0xe5,0x07,0xe5,0x07, 0xe5,0xe5,0x19,0x08,0xe5,0x07,0xe5,0x07,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x0b, 0x1e,0x08,0x09,0x09,0x09,0x0a,0x08,0x07,0x01,0xe5,0x17,0xe7,0x20,0x0b,0xe5,0xe6, 0x01,0x02,0x02,0xe5,0x01,0x02,0x02,0xe5,0x01,0x02,0x02,0xe5,0x01,0x02,0x02,0xe5, 0x01,0xe5,0x03,0x03,0x02,0x02,0x03,0x02,0x02,0xe5,0x01,0x02,0x02,0x01,0x01,0x07, 0x03,0x05,0xe5,0x01,0x02,0x02,0x01,0x01,0x03,0x01,0x03,0xe5,0x03,0x03,0xe5,0x03, 0xe5,0x01,0xe5,0x03,0x04,0x01,0x02,0xe5,0x01,0x05,0x04,0x04,0x04,0x01,0x19,0x01, 0xe6,0x18,0x08,0xe5,0x07,0x04,0xe5,0x02,0x04,0xe5,0x02,0x04,0xe5,0x02,0x04,0xe5, 0x02,0x04,0xe5,0x06,0xe6,0x02,0x03,0xe6,0x02,0x04,0xe5,0x02,0x08,0x02,0x04,0xe5, 0x02,0x04,0xe5,0x08,0x08,0xe5,0x02,0x03,0x01,0x03,0x04,0xe5,0x02,0x03,0x01,0x08, 0xe5,0x03,0x08,0x07,0x01,0xe5,0x16,0x01,0xe6,0x02,0x16,0x06,0x02,0x08,0x03,0x01, 0xe5,0x01,0x03,0x01,0xe5,0x01,0x03,0x01,0xe5,0x01,0x03,0x01,0xe6,0x04,0x01,0xe5, 0x05,0x01,0xe5,0x01,0x03,0x01,0xe5,0x01,0x03,0x01,0xe5,0x01,0x04,0x01,0x08,0x01, 0xe5,0x01,0x03,0xe7,0x01,0x03,0xe7,0x05,0xe8,0x05,0xe7,0x04,0xe8,0x06,0x03,0x03, 0x01,0xe5,0x09,0x09,0x01,0xe5,0x17,0x01,0xe5,0x16,0xe5,0xe5,0x0d,0x0b,0xe5,0xe5, 0x02,0x02,0xe5,0xe5,0xe5,0xe5,0x01,0xe5,0xe5,0x05,0x1d,0xe5,0xe5,0x05,0xe5,0xe5, 0x13,0xe5,0x07,0xe5,0x06,0x01,0x08,0xe5,0x07,0xe5,0x07,0xe5,0x08,0x10,0x03,0x20, 0x02,0xe5,0x10,0x01,0xe6,0xe5,0x02,0x01,0xe5,0x04,0xe5,0xe7,0x04,0x01,0x07,0x01, 0x02,0x04,0x01,0x07,0x01,0x03,0x03,0x01,0x07,0xe6,0x01,0x04,0x01,0x07,0x01,0x06, 0xe5,0xe6,0x07,0xe6,0xe5,0x04,0x01,0x07,0x01,0x01,0x05,0xe6,0x06,0x01,0x07,0x01, 0x01,0x05,0x01,0x07,0x01,0x02,0x04,0x01,0x01,0x01,0x03,0x01,0x01,0x04,0xe5,0xe5, 0x01,0x04,0x01,0x0b,0xe5,0xe5,0x18,0x09,0xe5,0x01,0x09,0x04,0x09,0x02,0x06,0x02, 0xe5,0x0a,0x01,0x02,0xe5,0x0a,0x01,0x01,0x01,0x02,0x06,0x02,0x06,0x0e,0x03,0x06, 0x06,0xe5,0x04,0x16,0xe5,0x05,0x0b,0xe5,0x08,0x08,0xe5,0x17,0xe5,0xe6,0x14,0x02, 0x06,0xe5,0x04,0x09,0x01,0x07,0x08,0x14,0x12,0x01,0x05,0x04,0x04,0x03,0x0a,0x0c, 0x03,0x05,0xe5,0x02,0x02,0x09,0x07,0x05,0x01,0x0b,0x0a,0x02,0xe5,0x04,0x03,0x03, 0x0c,0x08,0x01,0x01,0xe5,0x19,0x08,0x03,0x09,0x01,0x0e,0x0e,0x0a,0xe5,0xe5,0x05, 0x09,0x03,0x03,0x01,0x06,0x02,0x13,0x08,0xe5,0x03,0x1d,0x0a,0x06,0x01,0x01,0x09, 0x06,0x02,0xe5,0x02,0x09,0x07,0xe5,0x01,0x04,0x03,0x17,0x0a,0xe6,0x06,0xe6,0x01, 0x03,0xe7,0x03,0x02,0xe6,0x05,0x05,0x07,0x02,0x03,0xe6,0x06,0xe6,0x06,0xe6,0x03, 0x02,0xe5,0x01,0x07,0xe6,0x06,0x08,0xe7,0x02,0xe5,0x01,0x01,0xe5,0x05,0x01,0x07, 0x01,0x04,0x06,0x02,0x02,0x01,0x05,0x01,0x01,0x02,0x01,0x04,0x17,0xe5,0xe5,0xe6, 0x01,0x2d,0x15,0x1b,0x01,0x11,0x02,0x08,0x01,0x02,0x09,0x0a,0x07,0x09,0x02,0x04, 0x01,0x02,0x06,0x02,0x03,0x08,0x01,0x02,0x01,0x05,0xe5,0xe5,0xe5,0x02,0x09,0x0f, 0x02,0x01,0x01,0xe5,0x08,0x0e,0x09,0x06,0xe5,0x04,0x03,0x02,0x01,0x04,0x09,0x06, 0x0e,0x04,0x02,0x02,0x06,0x01,0x07,0x05,0x03,0x07,0x05,0xe5,0xe5,0x01,0x07,0x09, 0x09,0x05,0x0d,0x0f,0x03,0x05,0x03,0x03,0x05,0x08,0x0a,0x04,0xe5,0x13,0x01,0x02, 0x02,0xe5,0xe5,0x13,0x01,0x01,0x02,0x0a,0x15,0x04,0x0e,0x04,0x0c,0x0d,0x03,0x02, 0x0f,0x0c,0x03,0xe5,0x0c,0x09,0x01,0x02,0x02,0x09,0x05,0x11,0x09,0x03,0x03,0x06, 0x07,0x11,0x0b,0x0a,0x12,0x03,0x07,0xe6,0x0e,0x0b,0x0c,0x09,0x07,0x08,0x03,0x05, 0x10,0x04,0x23,0x01,0x13,0x0c,0x01,0xe6,0x1f,0x01,0x01,0x04,0xe5,0x07,0x0a,0xe6, 0x03,0x02,0xe5,0x07,0xe7,0x05,0xe8,0x02,0x02,0x08,0x02,0x06,0xe5,0x07,0x01,0xe5, 0x0a,0x0d,0x02,0xe6,0x0b,0xe5,0x0d,0x01,0x09,0x02,0x06,0xe5,0x0d,0x09,0x14,0xe5, 0x02,0x05,0x21,0x15,0x07,0xe5,0x03,0x06,0x07,0x05,0x04,0x05,0x01,0x07,0xe6,0xe5, 0x01,0x01,0x02,0x01,0xe5,0x1e,0x1d,0x17,0x27,0x02,0xe6,0x11,0x05,0x03,0x09,0x09, 0x02,0x03,0x05,0x0d,0x03,0x02,0x01,0x03,0x05,0x03,0x02,0x06,0x05,0x03,0xe5,0x04, 0x05,0x08,0xe5,0x03,0x06,0x03,0x0c,0xe5,0x04,0x03,0x01,0x06,0x03,0x01,0x04,0x04, 0x06,0x09,0x08,0x03,0x0a,0x09,0x07,0x19,0x0b,0xe5,0x08,0x09,0x09,0x01,0x07,0x06, 0x01,0x07,0x0c,0x06,0x01,0x0d,0x06,0x01,0x06,0x02,0x07,0x01,0x09,0x03,0x03,0x01, 0x08,0xe5,0x05,0xe5,0xe5,0x04,0xe5,0x12,0x09,0x15,0x02,0xe5,0xe6,0x01,0x02,0x14, 0x0c,0x13,0x02,0x04,0x10,0x04,0x06,0x13,0x0a,0x01,0x0b,0x06,0x2f,0x05,0x03,0x13, 0x0b,0x0c,0x02,0x03,0x03,0xe5,0xe6,0x08,0xe5,0x04,0xe5,0x12,0xe5,0x07,0x02,0x01, 0x04,0x09,0xe5,0x03,0x0d,0xe5,0xe6,0x01,0x02,0xe5,0x07,0x06,0x03,0x04,0x03,0x02, 0xe5,0x01,0xe5,0x02,0xe5,0x07,0xe5,0x07,0x09,0xe5,0x11,0x09,0xe5,0x07,0x02,0x01, 0x07,0xe5,0xe5,0x02,0x01,0xe5,0x01,0x04,0x01,0x01,0x03,0xe5,0x09,0x04,0xe6,0x02, 0x05,0x01,0x17,0x0a,0x02,0x01,0x0a,0xe5,0xe5,0x05,0x05,0xe5,0x08,0xe5,0xe5,0x01, 0x06,0xe5,0x01,0x05,0x06,0x01,0xe5,0x02,0xe5,0xe6,0x02,0x03,0xe5,0xe6,0xe5,0x02, 0xe5,0x07,0x09,0x05,0x06,0x01,0x04,0x05,0xe5,0x01,0x02,0x01,0x04,0x02,0xe5,0xe7, 0x03,0xe5,0xe7,0x03,0x01,0xe5,0x03,0xe6,0x0d,0x01,0x02,0x01,0xe6,0x03,0x0c,0x27, 0xe5,0x05,0x07,0x06,0x01,0xe5,0xe5,0xe5,0x09,0x06,0x03,0xe5,0x0b,0x0a,0xe5,0x19, 0xe5,0xe5,0xe5,0x03,0xe5,0x16,0x03,0xe5,0x01,0x01,0x03,0xe5,0x01,0x01,0x07,0x01, 0x01,0x05,0x0d,0x07,0x04,0x02,0xe5,0x13,0xe5,0x28,0x09,0x09,0x01,0x03,0xe5,0x01, 0x09,0x09,0x09,0x0b,0x05,0xe5,0x01,0x10,0x02,0x01,0x04,0x16,0x09,0x01,0x04,0xe5, 0xe5,0xe5,0x03,0x02,0x01,0x07,0x10,0xe5,0x04,0x02,0x03,0xe5,0x0a,0x0a,0x09,0x01, 0x02,0x09,0x04,0x01,0x02,0x06,0x02,0x03,0xe5,0x03,0x09,0x09,0xe5,0x01,0x02,0x02, 0xe5,0x01,0x05,0xe5,0x01,0xe5,0x03,0x06,0x04,0x09,0xe5,0x01,0x05,0x02,0x06,0x02, 0x0a,0xe5,0xe5,0x01,0x06,0x02,0x09,0x04,0x01,0x02,0x02,0x01,0x01,0x02,0x03,0xe5, 0x03,0x14,0xe5,0xe5,0xe5,0x01,0x07,0xe6,0x0d,0x02,0x01,0xe5,0xe5,0xe5,0x0e,0x01, 0x01,0x05,0x03,0x02,0xe7,0x02,0x09,0x09,0x05,0x03,0x02,0x01,0x04,0x05,0x03,0x07, 0x03,0x09,0x13,0x05,0x05,0x01,0xe6,0xe5,0xe5,0x04,0x03,0x01,0x02,0x04,0x05,0x01, 0x01,0x02,0x01,0xe5,0x02,0x03,0x01,0x04,0x12,0x04,0x01,0x02,0x1e,0x01,0x0c,0xe6, 0x01,0x01,0x03,0xe5,0x03,0x01,0x05,0x03,0x01,0xe5,0x01,0x01,0x03,0xe5,0x01,0x0b, 0x02,0x04,0x06,0x02,0x0b,0x09,0x06,0x0c,0x01,0x02,0x0e,0x01,0x02,0x01,0x01,0x02, 0x04,0x02,0x01,0x04,0x02,0x01,0x02,0x01,0x02,0x01,0x01,0x02,0x01,0xe5,0x05,0x12, 0x01,0xe5,0xe5,0x01,0x1e,0x01,0x01,0xe7,0x05,0xe5,0x03,0x01,0x01,0xe5,0x03,0x04, 0x01,0x02,0x04,0x04,0x01,0x01,0xe5,0xe5,0x01,0x03,0xe5,0xe5,0xe6,0x02,0xe5,0xe5, 0x01,0x03,0xe5,0xe8,0x02,0xe5,0x05,0x03,0xe5,0x03,0x03,0xe8,0x04,0xe5,0x03,0x01, 0x01,0xe5,0x0d,0x01,0x01,0xe5,0xe5,0x01,0x03,0xe5,0xe6,0xe5,0xe5,0xe6,0xe8,0xe5, 0xe6,0xe6,0xe5,0xe5,0x01,0xe6,0x01,0x04,0x01,0xe5,0x11,0x01,0x01,0xe6,0x1a,0x02, 0x03,0x09,0x09,0x09,0x04,0x04,0x02,0xe5,0x04,0x04,0x04,0x09,0x02,0x01,0x04,0x02, 0xe5,0x04,0x0b,0x09,0x02,0xe5,0x04,0x02,0x06,0x0c,0x06,0x04,0x04,0x04,0x04,0x09, 0x02,0x01,0x04,0x0e,0x11,0x01,0x01,0xe5,0x1d,0x05,0xe5,0x07,0xe5,0x07,0xe5,0x07, 0xe5,0x07,0xe5,0x01,0xe5,0x03,0xe5,0x07,0xe5,0x01,0x05,0xe5,0x01,0x01,0x03,0xe5, 0x01,0xe5,0x03,0xe5,0x01,0x07,0xe5,0x07,0xe5,0x01,0xe5,0x03,0xe5,0x01,0xe6,0x02, 0xe5,0x08,0x02,0xe6,0x02,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x03,0x03,0xe5,0x07, 0xe5,0x01,0x13,0xe5,0x01,0xe5,0x0a,0x02,0x04,0x01,0x02,0x02,0xe5,0x04,0xe5,0x07, 0xe5,0x07,0xe5,0x07,0xe7,0x05,0xe7,0x05,0xe5,0xe5,0x05,0xe5,0x03,0x03,0xe5,0xe6, 0x01,0x02,0xe5,0x07,0xe5,0x03,0x05,0xe5,0x04,0x02,0xe5,0x07,0xe5,0x02,0x01,0x02, 0xe5,0x07,0x09,0xe5,0x07,0xe5,0xe5,0x05,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe6,0x03, 0x02,0xe5,0x03,0x03,0x04,0x01,0x06,0xe5,0x02,0x0b,0xe5,0x08,0x27,0x30,0xe5,0x1d, 0xe5,0x12,0x09,0x09,0x1a,0x02,0x06,0x09,0x02,0x18,0x05,0xe5,0x0a,0x07,0x11,0x09, 0x09,0x01,0x1b,0x09,0x09,0x03,0x05,0x09,0x06,0x04,0x03,0x05,0x09,0x01,0x07,0x03, 0x07,0x07,0x09,0x09,0x01,0x07,0x0c,0x06,0x16,0xe5,0xe6,0x0e,0x0e,0x02,0x01,0xe7, 0x02,0xe5,0xe8,0x03,0x01,0xe7,0x05,0xe7,0xe5,0x01,0x01,0xe7,0x02,0x02,0xe7,0x05, 0xe7,0x03,0x01,0xe7,0x05,0xe7,0x03,0x01,0xe7,0x02,0x02,0x01,0xe7,0x05,0xe7,0x02, 0xe5,0xe8,0x02,0x02,0xe7,0x0f,0xe7,0x02,0x02,0xe7,0x05,0xe7,0x05,0xe5,0xe5,0x05, 0xe7,0x03,0x01,0x02,0x15,0xe5,0xe6,0x0c,0x02,0x09,0x06,0x01,0xe6,0x04,0x01,0xe6, 0x01,0x02,0x01,0xe6,0x03,0xe5,0x01,0xe5,0x04,0x01,0xe6,0x06,0xe6,0x01,0x02,0x01, 0xe6,0x04,0x01,0xe6,0x04,0x01,0xe6,0x04,0x01,0xe6,0x06,0x01,0xe6,0x04,0x01,0xe6, 0x04,0x01,0xe6,0x04,0x02,0xe5,0x08,0x01,0xe5,0x01,0x02,0xe5,0x06,0xe6,0x06,0xe6, 0x04,0x02,0xe5,0x04,0x01,0xe6,0x04,0x01,0xe6,0x16,0x02,0xe5,0x0d,0x03,0xe5,0x01, 0x09,0x01,0x01,0x05,0x01,0x01,0x05,0x01,0x01,0x02,0x04,0x01,0x04,0x03,0x08,0xe5, 0x13,0x02,0x05,0x0f,0x01,0xe5,0xe5,0x03,0x01,0x04,0x08,0x08,0xe5,0x11,0x01,0x07, 0xe5,0x07,0x01,0x01,0x02,0xe5,0xe5,0x08,0x01,0x02,0x02,0x01,0x18,0x02,0xe5,0x0c, 0x07,0xe5,0x07,0xe6,0xe5,0x04,0xe6,0xe5,0x04,0xe6,0xe5,0x04,0xe6,0x06,0xe5,0x07, 0xe5,0x01,0x05,0xe6,0x01,0x04,0xe5,0x07,0xe6,0x06,0xe5,0x02,0x03,0xe6,0x01,0x01, 0xe5,0x01,0xe8,0x06,0xe5,0x05,0x01,0xe6,0x04,0xe7,0x07,0xe6,0x04,0xe7,0x01,0x03, 0x01,0xe5,0x01,0x03,0xe8,0xe5,0x04,0xe6,0xe5,0x04,0xe6,0xe5,0x04,0xe5,0x07,0xe5, 0x08,0x01,0x0f,0x0a,0x09,0x09,0x09,0x08,0x07,0x01,0xe5,0x07,0xe6,0x07,0xe5,0x04, 0x01,0xe5,0x08,0x06,0x01,0xe5,0x02,0x07,0x10,0x01,0xe6,0x06,0x04,0x02,0x0b,0x09, 0xe6,0x06,0x09,0x0a,0x08,0x18,0xe5,0xe6,0x0c,0x18,0x07,0x01,0x09,0x04,0x04,0x04, 0x01,0x02,0x06,0x02,0x09,0x04,0x01,0xe5,0xe5,0x08,0x04,0x01,0xe5,0xe5,0x02,0x05, 0x01,0x03,0x05,0x03,0xe5,0xe5,0x01,0x03,0xe5,0x03,0xe5,0x01,0x02,0x02,0x03,0xe5, 0x03,0xe5,0x01,0x02,0x02,0x06,0x02,0xe5,0x01,0xe5,0x03,0x04,0x04,0x04,0x01,0x02, 0x16,0xe8,0x0d,0x0a,0x09,0x13,0x08,0x0a,0x08,0x1e,0x11,0x06,0xe6,0x08,0x07,0x01, 0x03,0x05,0x09,0x03,0x04,0xe5,0x02,0x03,0x01,0x03,0x04,0xe5,0x0d,0x06,0x01,0x19, 0xe5,0xe5,0x02,0x09,0x0c,0x24,0x15,0x2c,0x01,0x08,0xe7,0x05,0xe7,0x05,0xe8,0x04, 0x01,0xe5,0x05,0xe8,0x04,0xe7,0x01,0x05,0x03,0x03,0x01,0xe5,0x09,0x09,0x01,0x18, 0x02,0xe5,0x16,0xe5,0xe5,0x03,0x0f,0x03,0x15,0xe5,0xe5,0x0f,0x02,0x10,0x02,0x06, 0x02,0x01,0x06,0xe5,0xe5,0x23,0x27,0xe5,0xe5,0x07,0xe6,0x04,0xe5,0xe5,0x16,0xe5, 0xe6,0x10,0x01,0x01,0x02,0x02,0x01,0x07,0x01,0x01,0x05,0x01,0x07,0x01,0x02,0x04, 0x01,0x07,0x01,0x07,0x01,0x05,0x01,0xe7,0x04,0xe5,0xe5,0x04,0x01,0xe6,0x03,0x02, 0x01,0x05,0x03,0x01,0x07,0xe6,0x06,0xe6,0x06,0x01,0x03,0x03,0x01,0xe5,0x05,0x01, 0x01,0x05,0x01,0x07,0x01,0x02,0x01,0x02,0x01,0x07,0x01,0x03,0x03,0x01,0x01,0x04, 0xe7,0x01,0x07,0xe8,0x08,0x0f,0xe5,0x07,0xe5,0x08,0xe5,0xe6,0x03,0xe5,0x01,0x0e, 0xe5,0x01,0x1b,0xe5,0x01,0x05,0x01,0x04,0x02,0x07,0x03,0xe5,0x02,0x04,0x03,0x05, 0xe5,0x06,0xe5,0xe5,0xe5,0x05,0x07,0xe5,0x02,0x04,0xe5,0x07,0xe6,0x06,0xe5,0x04, 0x03,0x08,0xe5,0x04,0x0e,0x02,0xe5,0x01,0xe5,0xe5,0x08,0x09,0x01,0x06,0x02,0x01, 0x05,0x01,0x02,0x03,0x02,0xe5,0xe5,0x0c,0x0a,0x06,0x01,0x0a,0x01,0x02,0x01,0x02, 0xe5,0x07,0x01,0x06,0x04,0x04,0x02,0xe5,0x09,0x0a,0x02,0x02,0x0d,0xe5,0xe5,0x05, 0x02,0x02,0xe5,0x01,0x01,0x01,0x01,0x01,0x05,0x01,0x02,0x02,0x01,0x02,0xe5,0xe5, 0xe5,0xe5,0x0f,0x02,0xe7,0xe5,0xe5,0x15,0x0a,0xe5,0x01,0xe5,0x0a,0x09,0x09,0x02, 0x0f,0xe5,0x12,0x06,0x02,0x05,0x01,0x01,0x0b,0x09,0x07,0x01,0xe5,0x07,0x09,0xe5, 0x08,0x09,0x06,0xe5,0x02,0x0e,0x07,0x08,0x0c,0x01,0xe7,0x06,0xe5,0x05,0x08,0x04, 0x02,0x01,0x11,0xe6,0xe5,0x12,0x04,0x05,0x04,0x01,0xe6,0x01,0x07,0x04,0x01,0x11, 0xe5,0x01,0x01,0x05,0x01,0x0a,0x10,0x01,0xe5,0x02,0x12,0xe5,0x04,0x06,0xe5,0x05, 0x01,0x01,0x02,0x1e,0xe5,0xe5,0xe5,0x17,0x0a,0x04,0x01,0x08,0x01,0x07,0x09,0x1b, 0x01,0x1d,0x01,0x01,0x06,0x02,0x01,0x0f,0x0b,0x01,0x03,0x1f,0x08,0x01,0xe5,0x01, 0xe5,0x02,0x07,0x09,0x01,0x09,0xe8,0x11,0x0d,0x05,0x0f,0x04,0x04,0x04,0x04,0x02, 0x01,0xe5,0x02,0x02,0x06,0x01,0x07,0x09,0x03,0xe5,0x03,0x02,0x04,0x03,0x02,0x05, 0xe6,0xe5,0x04,0xe5,0x01,0x05,0xe6,0xe5,0x04,0xe5,0x07,0xe5,0x08,0x09,0x01,0x17, 0x09,0x11,0xe9,0x19,0x09,0x03,0x02,0x02,0x05,0xe5,0xe6,0x02,0x02,0x01,0xe6,0x04, 0x01,0x07,0x01,0x07,0x01,0x02,0x04,0x01,0x03,0x03,0x01,0x07,0x01,0x06,0x02,0xe5, 0x03,0x03,0x09,0x09,0x09,0x04,0x01,0xe5,0x0a,0xe5,0x02,0x05,0x01,0x0b,0x07,0x0a, 0x0c,0x01,0x07,0xe5,0xe6,0x02,0x11,0x10,0x06,0x03,0x06,0x02,0x06,0xe5,0x07,0xe5, 0x07,0xe5,0x07,0xe5,0x07,0x01,0x07,0x03,0x05,0xe5,0x04,0x03,0x03,0x05,0x09,0x05, 0x03,0x06,0x02,0x02,0x01,0x09,0x04,0x20,0x20,0xe7,0x07,0x12,0x02,0x03,0x02,0x04, 0xe8,0x02,0x0f,0x02,0x06,0x06,0xe6,0x03,0x02,0xe6,0xe5,0xe5,0xe5,0x15,0x0b,0x0e, 0x01,0x09,0x02,0x0e,0x0e,0x09,0x09,0x06,0x01,0x01,0x04,0x02,0xe5,0x0c,0x0b,0x03, 0x11,0xe5,0x0e,0x09,0x07,0x09,0x11,0x03,0x09,0x05,0x08,0x07,0x07,0x04,0x02,0x03, 0x1d,0x15,0x07,0x09,0x08,0x12,0x01,0x02,0x16,0xe5,0xe6,0xe5,0x0b,0x24,0x02,0x01, 0x07,0x03,0x05,0x09,0x02,0x06,0x06,0x02,0x04,0x04,0x06,0x02,0x06,0x02,0x01,0x09, 0x02,0xe5,0x01,0x02,0x02,0xe5,0x04,0x03,0x01,0x03,0x06,0xe5,0xe5,0x02,0x03,0x01, 0x09,0x04,0x01,0x02,0x04,0x01,0x06,0x09,0xe5,0x13,0xe7,0x0e,0x01,0x14,0x0e,0x01, 0x02,0x02,0x01,0x01,0x07,0x01,0x05,0x01,0x01,0x01,0x05,0x01,0x07,0x01,0x07,0x01, 0x01,0x03,0x01,0x01,0xe5,0xe5,0x01,0x05,0x02,0x02,0x02,0xe5,0x01,0x05,0xe5,0x01, 0x03,0x01,0xe5,0x01,0x03,0x01,0xe5,0x01,0x06,0x02,0x06,0xe5,0xe5,0x04,0xe5,0x06, 0x01,0xe5,0x07,0xe5,0x17,0x09,0xe5,0xe5,0x01,0x02,0x0a,0x02,0x21,0x09,0x09,0x09, 0x03,0x05,0x08,0xe5,0x08,0x09,0x0d,0x09,0x09,0x04,0x04,0x01,0x07,0x02,0x05,0x0a, 0x01,0x06,0x05,0x02,0x0a,0x03,0x22,0xe5,0xe5,0x06,0xe5,0x04,0xe6,0x1b,0x05,0x03, 0x05,0x09,0x03,0x05,0x03,0x05,0x03,0x05,0x03,0x05,0x05,0xe5,0x01,0x03,0x07,0x03, 0x02,0xe5,0xe5,0x05,0xe5,0xe5,0x05,0xe5,0xe5,0x02,0x02,0xe5,0xe5,0x04,0xe5,0x01, 0x03,0x02,0xe5,0xe5,0x02,0x09,0x0f,0x02,0xe5,0x04,0x02,0xe5,0x04,0x0e,0x01,0xe5, 0x01,0xe5,0x02,0x05,0xe5,0xe6,0x1e,0x08,0x01,0x01,0xe5,0x03,0x09,0x01,0x01,0xe5, 0x03,0xe5,0x02,0xe5,0x02,0xe5,0x07,0x01,0x01,0xe5,0x03,0x03,0xe5,0x03,0xe5,0x09, 0xe5,0x01,0xe5,0x03,0x09,0x09,0xe5,0xe5,0x05,0x04,0x01,0x02,0x09,0xe5,0x02,0x01, 0x01,0xe6,0x0f,0x01,0xe5,0x05,0x03,0x05,0x0b,0x01,0x04,0xe6,0xe5,0x01,0x0a,0x01, 0x1e,0x01,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x02,0x01,0x04, 0x04,0x04,0x02,0x01,0x06,0x04,0x04,0x02,0x01,0x04,0x02,0x01,0x04,0x04,0x04,0x02, 0x01,0x04,0x01,0xe5,0xe5,0x03,0x02,0x01,0x02,0x04,0x0b,0xe5,0xe5,0xe5,0x01,0x04, 0x09,0x06,0xe5,0x02,0x04,0x01,0x01,0x0d,0x24,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01, 0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5, 0x05,0x01,0x02,0x06,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05, 0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x0f,0x02,0x01,0x07,0x09,0x06,0x06,0x02, 0xe6,0xe6,0x08,0x01,0x02,0x16,0x04,0x01,0x02,0x06,0x02,0xe5,0x01,0x02,0x02,0xe5, 0x01,0x02,0x02,0x01,0x01,0x02,0x02,0x06,0x02,0x01,0x01,0x02,0x02,0x06,0x02,0x01, 0x01,0x02,0x02,0xe5,0x01,0x04,0x02,0xe5,0x01,0x02,0x02,0x06,0x02,0x03,0x02,0x02, 0x03,0x02,0x02,0x03,0x02,0x02,0x01,0x01,0x02,0x02,0x04,0x04,0x09,0x02,0x01,0x08, 0xe5,0x08,0x07,0x07,0xe7,0xe5,0x08,0x02,0xe5,0x02,0x01,0x02,0x14,0x01,0x01,0x05, 0x03,0x05,0x03,0x02,0x01,0xe5,0x02,0x04,0xe5,0x02,0x05,0x03,0x05,0x03,0x05,0x03, 0x05,0x03,0x07,0x03,0x05,0x03,0x02,0x01,0xe5,0x02,0x02,0xe7,0x02,0x02,0xe7,0x02, 0x02,0xe7,0x02,0x01,0x02,0xe5,0x02,0x05,0x03,0x0f,0x01,0x01,0x03,0x01,0x03,0x05, 0x03,0x09,0x04,0x01,0x0f,0x16,0x01,0x02,0x01,0x07,0x03,0xe5,0x03,0x06,0x01,0xe5, 0x02,0x03,0x01,0x09,0x03,0x05,0x09,0x03,0xe5,0xe5,0x01,0x0b,0x04,0x04,0x09,0x09, 0x04,0x04,0x05,0xe5,0x01,0x04,0xe6,0x01,0x0b,0x0e,0x01,0xe5,0xe5,0x06,0x04,0x04, 0x09,0xe5,0x03,0x01,0xe7,0xe6,0x09,0x17,0xe5,0xe8,0xe5,0xe6,0xe5,0xe5,0x01,0x02, 0x01,0x01,0x02,0x01,0xe7,0x03,0x02,0x01,0x01,0x02,0x01,0xe5,0x05,0x02,0x06,0x01, 0xe5,0xe5,0xe5,0x01,0x02,0x01,0xe6,0x01,0x01,0xe5,0x07,0x01,0xe5,0xe5,0x03,0x01, 0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0xe5,0xe5,0x01,0x01,0xe5,0x05, 0x01,0xe5,0x05,0x01,0xe5,0x07,0xe5,0xe5,0xe6,0xe5,0x06,0x09,0x09,0x0a,0xe5,0x11, 0x09,0x02,0x03,0x09,0x02,0x02,0xe5,0x01,0x02,0x02,0xe5,0x01,0x02,0x03,0x02,0x02, 0x03,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x05, 0x02,0x02,0x01,0x01,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x03, 0x02,0x02,0x03,0x02,0x02,0x06,0x02,0x06,0x0c,0xe5,0x1b,0x01,0xe5,0xe5,0x09,0x09, 0x01,0x07,0xe5,0x03,0xe5,0x07,0xe5,0x01,0x05,0xe5,0x01,0x05,0xe5,0xe6,0x04,0xe5, 0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6, 0x06,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04, 0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0x2d,0xe6,0x06,0x02,0x02,0x05, 0x03,0x09,0xe5,0x07,0xe5,0x01,0x02,0xe5,0xe7,0xe5,0x02,0x01,0xe5,0xe5,0x03,0x01, 0xe6,0x04,0x01,0xe5,0x05,0x01,0xe6,0x04,0x01,0xe5,0x05,0x01,0xe6,0x04,0x01,0xe5, 0x07,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05, 0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x07,0xe5,0x07,0x02,0x06,0x02,0x06,0x06, 0x06,0xe5,0x01,0xe5,0x08,0x02,0x24,0x02,0xe5,0x08,0x09,0x09,0x09,0x09,0x09,0x09, 0x0b,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x2d,0x02,0x04,0x04,0x01,0x09,0x1b,0x09, 0x03,0x01,0x09,0x02,0xe5,0x04,0x02,0x06,0x02,0xe5,0x04,0x02,0x06,0x02,0xe5,0x04, 0x02,0x06,0x02,0xe5,0x05,0xe5,0x01,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02, 0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x08,0x21,0x08, 0xe5,0x01,0x0d,0x01,0x07,0x01,0x06,0xe5,0xe8,0x03,0x01,0xe7,0x05,0xe7,0x03,0x02, 0xe6,0x03,0x02,0xe6,0x03,0x02,0xe6,0x02,0xe5,0x01,0x01,0x04,0x02,0xe6,0x03,0x02, 0x01,0x04,0x02,0x01,0x03,0x02,0x02,0xe6,0x03,0x02,0xe6,0x03,0x02,0xe6,0x03,0x02, 0xe6,0x03,0x02,0xe6,0x03,0x02,0xe6,0x03,0x02,0xe6,0x03,0x02,0x01,0x06,0xe5,0xe5, 0x03,0x01,0x04,0x1d,0xe5,0x01,0x0d,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe6,0x04, 0x01,0xe6,0x04,0x02,0xe5,0x04,0x02,0xe5,0x03,0xe5,0x01,0xe5,0x04,0x02,0xe5,0x04, 0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x06,0x02,0xe5,0x04,0x02, 0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5, 0x04,0x02,0xe5,0x03,0x02,0xe6,0x04,0x01,0xe5,0x20,0x03,0xe5,0x08,0x02,0x01,0x0e, 0x02,0x01,0xe5,0x02,0x06,0x02,0x06,0x04,0x02,0x04,0x04,0x01,0x07,0x04,0x04,0x09, 0x01,0x02,0x04,0x09,0x08,0x02,0x01,0x07,0x04,0x04,0x09,0x09,0x06,0x02,0x01,0x07, 0x09,0x09,0x09,0x23,0x01,0xe5,0x0c,0x01,0x05,0xe5,0x07,0xe6,0xe5,0x04,0xe5,0x05, 0x01,0xe5,0x06,0xe6,0x01,0xe5,0x01,0x01,0xe5,0x01,0x04,0xe6,0x01,0xe5,0x01,0x01, 0xe5,0x01,0x03,0xe7,0x01,0x03,0x01,0xe5,0x01,0x04,0xe6,0x01,0x05,0xe5,0x03,0x03, 0xe7,0x01,0xe5,0x01,0xe7,0x01,0xe5,0x01,0x01,0xe5,0x01,0xe5,0x01,0xe7,0x01,0xe5, 0x01,0x01,0xe5,0x01,0x03,0xe7,0x01,0xe5,0x01,0x01,0xe5,0x01,0x03,0xe7,0x01,0x03, 0xe7,0x01,0x05,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0xe5,0x0c,0x01,0xe5,0x05,0x01, 0x0a,0x06,0x01,0xe5,0x04,0xe5,0xe5,0x05,0x02,0xe5,0x04,0x02,0x06,0x02,0xe5,0x04, 0x02,0x06,0x02,0x06,0x02,0x06,0x02,0x06,0x04,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5, 0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x07,0x06,0x02,0xe5, 0x04,0xe5,0x0b,0x17,0xe5,0xe6,0x0c,0x01,0x07,0x01,0x0c,0x04,0x01,0x02,0x02,0xe6, 0xe5,0x01,0x02,0xe5,0x01,0x02,0x02,0xe5,0x01,0x02,0x02,0xe5,0x01,0x02,0x02,0xe5, 0x01,0x02,0x02,0xe5,0x01,0x02,0x02,0xe5,0x01,0x02,0x02,0xe5,0x01,0x02,0x03,0x04, 0x02,0xe5,0xe6,0x01,0x02,0x02,0xe5,0x01,0x02,0x02,0xe5,0x01,0x02,0xe5,0xe6,0x01, 0x02,0x02,0xe5,0x01,0x02,0xe5,0xe6,0x01,0x02,0x02,0x03,0x02,0xe5,0xe6,0x01,0xe5, 0xe5,0xe6,0xe5,0x0b,0x17,0x01,0x01,0x09,0x03,0x01,0xe5,0x12,0x09,0x02,0x01,0x01, 0x06,0xe5,0xe5,0x01,0x02,0x01,0x01,0x06,0xe5,0xe5,0x01,0x02,0x01,0x01,0x05,0xe6, 0xe5,0x04,0x01,0x01,0x06,0xe5,0xe5,0x09,0xe5,0x01,0x03,0xe5,0xe5,0x01,0x02,0xe6, 0xe5,0x01,0x02,0x01,0x01,0x02,0x03,0xe5,0xe5,0x01,0x02,0x01,0x01,0x02,0x03,0xe5, 0xe5,0x01,0x02,0x01,0x01,0x06,0xe5,0x02,0x03,0x28,0x02,0xe5,0x01,0x0a,0x02,0x20, 0xe5,0x03,0x03,0xe5,0x01,0x05,0xe5,0x01,0x05,0xe5,0x01,0x05,0xe5,0x01,0x05,0xe5, 0x01,0x06,0x02,0x05,0xe5,0x01,0x06,0x04,0x05,0xe5,0x01,0x05,0x03,0x06,0x02,0x05, 0xe5,0x01,0x06,0x02,0x05,0xe5,0x01,0x09,0x05,0x05,0x04,0x0f,0x17,0x02,0xe6,0x41, 0x1d,0x6b,0x02,0xe5,0x0e,0x02,0x07,0x01,0x06,0xe5,0xe5,0x10,0x11,0x01,0x07,0x01, 0x07,0x01,0x07,0x01,0x01,0x05,0x01,0x01,0x05,0x01,0x06,0xe5,0xe5,0xe5,0x04,0x01, 0xe6,0x04,0x01,0x07,0x01,0x07,0xe6,0x01,0x04,0x01,0x01,0x07,0xe6,0xe5,0x04,0xe6, 0xe5,0x04,0xe6,0x06,0x01,0x01,0x05,0xe6,0x01,0x04,0xe6,0x06,0x01,0x04,0x02,0x01, 0x01,0x05,0x01,0x04,0x02,0x01,0x07,0x01,0x07,0x01,0x0a,0x01,0x01,0x02,0x0c,0xe5, 0x04,0x0c,0xe5,0xe5,0x05,0xe5,0x01,0x05,0xe5,0x02,0x05,0x01,0x03,0x02,0x03,0xe5, 0x04,0x01,0x06,0xe5,0xe5,0xe5,0x04,0x02,0x02,0x02,0xe5,0xe5,0xe5,0x03,0xe5,0xe5, 0xe5,0x02,0x02,0x0a,0x08,0xe5,0x02,0x04,0xe5,0x01,0xe5,0x03,0xe5,0x02,0x04,0xe5, 0x07,0xe5,0x01,0x03,0x01,0xe5,0x01,0x05,0xe5,0x05,0x05,0x05,0x08,0x13,0xe5,0x0c, 0x02,0x0e,0xe5,0xe5,0x03,0x02,0x01,0x03,0x02,0x01,0xe5,0x01,0x03,0xe5,0x0d,0x03, 0xe5,0x03,0x04,0x07,0x02,0x03,0x05,0x06,0x01,0x10,0x03,0x01,0x02,0x06,0x05,0x01, 0x01,0x03,0x03,0x01,0x09,0x03,0x01,0x03,0xe5,0x03,0x02,0x03,0x04,0xe5,0xe5,0x0f, 0x1a,0xe6,0x01,0x0e,0x19,0x07,0x07,0x05,0x05,0x06,0x06,0x0b,0x03,0x03,0x09,0x01, 0x09,0x03,0x08,0x13,0x09,0x09,0x09,0x13,0x1a,0x02,0x06,0x11,0xe5,0x01,0xe5,0x0b, 0x05,0x0d,0x01,0x0a,0x08,0x02,0x09,0x01,0x04,0x07,0x01,0x04,0x04,0x09,0x02,0x01, 0x04,0x05,0x03,0xe5,0x05,0x03,0x07,0x01,0x05,0x01,0x01,0xe5,0x03,0x07,0x03,0x01, 0xe5,0x05,0x10,0x02,0x10,0x25,0xe6,0xe7,0x0e,0x12,0xe5,0x0f,0x10,0x0b,0x02,0x01, 0x01,0x0c,0x0c,0x10,0x01,0x0a,0x02,0x06,0xe5,0xe5,0x05,0x06,0x01,0x01,0x01,0x0a, 0x05,0x06,0x02,0x02,0x05,0x13,0x1c,0xe5,0x01,0xe5,0x07,0x27,0x03,0xe7,0x06,0xe5, 0x04,0x01,0x06,0x08,0x03,0x06,0x01,0xe5,0x04,0x04,0xe5,0x06,0x01,0x03,0x03,0x01, 0x03,0x01,0x03,0xe6,0x02,0xe5,0x02,0xe5,0x11,0xe5,0x02,0x04,0xe5,0x02,0x08,0x14, 0x01,0x1d,0x06,0xea,0x39,0x04,0x01,0x0b,0xe5,0x0b,0x06,0xe5,0x02,0x09,0x01,0xe5, 0x06,0x0d,0x06,0x01,0x02,0x04,0x01,0xe5,0x0b,0x03,0x01,0xe5,0x05,0x01,0x20,0x11, 0x04,0x12,0xe5,0x01,0x08,0x02,0x23,0x0c,0x13,0x07,0x03,0x07,0x02,0x08,0x05,0x0e, 0x19,0x09,0xe5,0x07,0xe5,0xe5,0x05,0x02,0x0b,0x2d,0x01,0x0a,0x02,0x1b,0x03,0x04, 0x06,0x03,0x01,0x01,0x04,0x02,0x09,0x01,0xe5,0x04,0xe5,0x01,0x08,0x01,0x08,0x03, 0x02,0x01,0x05,0x03,0x04,0x02,0x01,0x01,0xe5,0x02,0x0c,0x01,0x05,0x09,0x01,0x07, 0x01,0x09,0x07,0x04,0x0a,0xe5,0x0b,0xe5,0x08,0x19,0x06,0x24,0x07,0x05,0x03,0xe5, 0x0e,0x08,0x05,0x0d,0x02,0x09,0x06,0x03,0x01,0x09,0x08,0x13,0x29,0x03,0x2c,0x02, 0xe6,0xe5,0x16,0x03,0x05,0x09,0xe5,0x09,0x0c,0xe5,0xe6,0x01,0x05,0x01,0x08,0x02, 0xe5,0xe5,0x04,0x04,0x01,0x02,0xe5,0x01,0x02,0x02,0x09,0x02,0x03,0x01,0xe5,0x04, 0x05,0x03,0x06,0x02,0x09,0x06,0x06,0x06,0x01,0x08,0x07,0x02,0x23,0xe7,0xe5,0x07, 0x37,0x08,0x08,0x01,0x08,0x0a,0x08,0x0a,0x11,0x03,0x06,0x01,0x09,0x07,0x01,0xe5, 0x0f,0x01,0xe5,0x16,0x04,0x23,0x02,0x1d,0x2c,0x14,0x12,0x08,0x08,0x01,0x20,0x3c, 0x21,0x02,0xe7,0x28,0x03,0xe5,0x07,0x09,0x09,0x09,0x07,0x01,0x09,0x0a,0x08,0xe5, 0xe5,0xe5,0x03,0x01,0x09,0x07,0x01,0x09,0x09,0x09,0x09,0x09,0x02,0xe5,0x01,0x02, 0xe5,0x26,0x01,0x01,0xe6,0x01,0x01,0x0f,0xe5,0x14,0x03,0xe5,0x07,0x06,0x02,0x06, 0x02,0x06,0x02,0x05,0x03,0x09,0x0b,0xe5,0x02,0x05,0xe5,0x01,0xe6,0x07,0x03,0x09, 0x06,0x02,0x05,0x03,0x09,0x05,0xe5,0x01,0x06,0x02,0x01,0xe5,0x04,0x26,0x01,0x03, 0x01,0xe5,0x03,0x26,0x09,0x0b,0x03,0xe5,0x01,0x01,0x04,0x01,0xe5,0xe5,0x03,0x01, 0x02,0xe5,0x02,0x01,0x02,0x07,0x0b,0x01,0x09,0x01,0x0f,0x05,0xe5,0x01,0x01,0x11, 0x09,0x09,0x01,0x03,0xe5,0x01,0x01,0x22,0x01,0x02,0x04,0xe5,0xe6,0x2a,0x09,0x10, 0x02,0x09,0x09,0xe7,0x02,0x04,0x04,0xe5,0xe5,0x13,0xe5,0x08,0x05,0xe5,0x01,0x09, 0x09,0x06,0xe5,0xe5,0x04,0xe5,0x01,0x06,0xe5,0xe5,0x05,0xe5,0xe5,0xe5,0x21,0x06, 0x02,0x01,0x01,0x2b,0x0b,0x02,0x04,0x01,0x02,0x02,0x03,0x02,0x03,0x02,0x02,0x07, 0x01,0x02,0x01,0x0b,0x02,0x04,0x01,0x02,0x08,0x02,0x06,0x09,0x02,0x06,0x02,0x01, 0x01,0x02,0x02,0x06,0x02,0x06,0x02,0xe5,0x01,0x02,0x03,0x02,0x05,0x20,0x07,0xe6, 0xe6,0x01,0x28,0x03,0x07,0x02,0x04,0x04,0xe5,0x01,0x05,0xe5,0xe6,0x04,0xe5,0x01, 0xe5,0x03,0x01,0x01,0xe5,0x0a,0x02,0x04,0x04,0xe5,0x01,0x02,0x03,0x02,0x01,0xe5, 0xe5,0x03,0x01,0x02,0x02,0x09,0x04,0x01,0x02,0x06,0x02,0x04,0x04,0x03,0x09,0x04, 0x27,0x02,0xe8,0x01,0x11,0x01,0x14,0x04,0x04,0x01,0x07,0x01,0x02,0x04,0x01,0x07, 0x01,0x09,0x02,0x04,0x01,0x02,0x04,0x01,0x03,0xe5,0x01,0x01,0x0b,0x01,0x02,0x04, 0x07,0x01,0x07,0x01,0x09,0x02,0x04,0x01,0x07,0x04,0x02,0x01,0x01,0x05,0x01,0x01, 0x01,0x29,0x01,0xe7,0x01,0x11,0x01,0x14,0x04,0x04,0x01,0x02,0x04,0x04,0x04,0x04, 0x04,0x04,0x04,0x01,0x02,0x04,0x01,0x02,0x04,0x01,0x02,0x04,0x04,0x06,0x04,0x01, 0xe7,0xe5,0x01,0x04,0x01,0x02,0x04,0x04,0x04,0x01,0x02,0x04,0x01,0x02,0x04,0x04, 0x04,0x04,0x04,0x01,0x02,0x2b,0xe6,0xe5,0x06,0x27,0x02,0x06,0x02,0x06,0x02,0x10, 0x13,0xe5,0x1a,0x0c,0x27,0x09,0xe5,0x07,0xe5,0x04,0x0c,0xe5,0x1b,0xe5,0x01,0xe5, 0x09,0x01,0x22,0x16,0x13,0x01,0x03,0xe5,0x08,0x1e,0xe5,0x08,0x02,0x01,0x27,0x07, 0x06,0x08,0xe5,0x0b,0xe5,0x1b,0xe9,0x07,0x05,0x09,0x09,0x09,0x04,0x01,0x02,0x09, 0x04,0xe6,0x01,0x09,0x03,0x02,0x02,0xe6,0x06,0x09,0x06,0x02,0x04,0x01,0x04,0xe6, 0x06,0x04,0xe6,0x01,0x04,0x01,0x02,0x09,0x09,0x09,0x03,0x05,0x09,0xe7,0x02,0x02, 0x09,0x09,0x04,0x01,0x06,0xe6,0x01,0x02,0x27,0x09,0x09,0x09,0x12,0xe5,0x08,0x09, 0x08,0xe5,0x08,0x15,0x09,0x13,0x24,0x02,0x1d,0x04,0x03,0xe5,0xe5,0x28,0x05,0x01, 0x09,0x09,0x15,0x09,0x09,0x07,0x0b,0x02,0x10,0x0b,0x13,0x26,0x1e,0x08,0x01,0x01, 0x01,0x08,0x22,0x02,0x06,0x02,0x06,0x02,0x06,0x02,0x06,0x02,0x06,0xe7,0x05,0x02, 0x06,0x02,0x06,0x02,0x08,0xe5,0xe5,0x03,0x01,0x02,0x01,0x04,0x02,0x01,0x04,0x02, 0x01,0x02,0x01,0x02,0x04,0x01,0x02,0x01,0x02,0x01,0x02,0x04,0x01,0x02,0x04,0x01, 0xe5,0xe5,0x03,0x01,0x23,0xe5,0xe5,0x2a,0x03,0x02,0x02,0x03,0x05,0x03,0x05,0x03, 0x07,0x01,0x08,0xe5,0x04,0x03,0x01,0x03,0x03,0x05,0x03,0x07,0x02,0xe5,0x01,0x02, 0x01,0x01,0x01,0x03,0x03,0x02,0x04,0x01,0x05,0x01,0x01,0x05,0x01,0x01,0x02,0x02, 0x01,0x01,0x02,0x02,0x01,0x01,0x01,0x03,0x01,0xe6,0x04,0x01,0xe5,0x21,0xe5,0xe5, 0x13,0x18,0x09,0x03,0x05,0x09,0x03,0x05,0x03,0x05,0xe5,0x07,0x09,0x03,0x05,0x08, 0x02,0x0e,0x04,0x09,0x03,0xe5,0x11,0x0f,0x04,0x02,0x01,0x07,0x01,0x22,0xe8,0x14, 0xe5,0x07,0xe5,0x07,0xe6,0x04,0xe8,0x04,0xe8,0x04,0xe8,0x04,0xe7,0x01,0x03,0xe7, 0x06,0xe7,0x04,0xe8,0x04,0xe8,0x05,0xe6,0x01,0x05,0xe7,0x06,0xe7,0x04,0xe7,0x01, 0x03,0x01,0xe5,0x05,0xe7,0x05,0x01,0xe5,0x05,0x01,0xe5,0x01,0x03,0xe8,0xe5,0x04, 0xe6,0xe5,0x04,0xe5,0x07,0xe5,0x07,0xe5,0x06,0xe6,0xe5,0x0f,0xe5,0x18,0x01,0xe5, 0x05,0x01,0x07,0x01,0x09,0x07,0x01,0x09,0x07,0x01,0x09,0x09,0x0b,0xe6,0xe6,0x03, 0x09,0x09,0x09,0x09,0x09,0x1b,0x01,0x25,0xe5,0x2a,0x01,0xe5,0xe6,0x01,0xe5,0xe5, 0x01,0xe5,0x01,0xe5,0xe5,0x01,0xe5,0x01,0x02,0x02,0xe5,0x01,0xe5,0xe5,0x01,0xe5, 0x01,0x05,0x03,0xe5,0xe5,0x01,0x01,0x01,0x02,0x02,0xe5,0x01,0x02,0x02,0x01,0x01, 0x02,0x04,0x03,0x02,0x02,0x03,0x02,0x02,0xe5,0x01,0xe5,0x03,0x03,0x02,0x02,0xe5, 0x01,0xe5,0x03,0x03,0x02,0x02,0xe5,0x07,0xe5,0x01,0x05,0x04,0x01,0x24,0xe6,0x31, 0xe5,0x07,0xe5,0x07,0xe5,0x02,0x04,0xe5,0x07,0xe5,0x02,0x04,0xe5,0x06,0x01,0x03, 0x04,0xe5,0x02,0x08,0x02,0x05,0x03,0x04,0xe5,0x02,0x04,0xe5,0x02,0x03,0x01,0x03, 0x04,0xe5,0x02,0x03,0x01,0x03,0x05,0x03,0x04,0xe5,0x0a,0x24,0x01,0xe6,0x0f,0x09, 0x16,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x01,0x03,0x01,0xe5,0x05,0x01,0xe5, 0x05,0x01,0xe5,0x05,0xe7,0x01,0x03,0x01,0xe5,0x01,0x04,0x01,0x02,0x05,0x01,0xe5, 0x01,0x03,0x01,0xe5,0x01,0x03,0xe8,0x05,0xe6,0x01,0x03,0xe8,0x05,0xe6,0x01,0x03, 0x01,0x03,0x03,0x01,0xe5,0x09,0x01,0x23,0xe8,0x01,0x27,0xe5,0xe5,0x05,0xe5,0xe6, 0x04,0xe5,0xe5,0x07,0xe5,0x07,0xe5,0x02,0x04,0xe5,0x07,0xe5,0x05,0xe5,0xe5,0x05, 0xe5,0xe5,0x03,0x0d,0xe5,0xe5,0x05,0xe5,0xe5,0x05,0xe5,0xe6,0x04,0xe5,0xe5,0x05, 0xe5,0xe5,0x05,0x09,0xe5,0xe5,0x13,0x1d,0x03,0x02,0x06,0x04,0x05,0x01,0x07,0x01, 0xe6,0x04,0x01,0xe6,0x04,0x01,0xe6,0x04,0x01,0x07,0xe7,0x05,0x01,0x02,0x04,0xe6, 0xe5,0x04,0xe6,0xe5,0x04,0x01,0x02,0x04,0xe6,0xe5,0x04,0xe6,0xe5,0x06,0x01,0x01, 0x05,0x01,0x01,0x05,0x01,0x01,0x05,0x01,0x07,0x01,0x01,0x04,0xe5,0xe5,0x03,0x02, 0x01,0xe5,0xe5,0x02,0xe5,0xe5,0x01,0x04,0x01,0x07,0x01,0x01,0x05,0x01,0x07,0x01, 0x0d,0xe5,0x15,0x15,0xe7,0x05,0xe6,0x06,0xe7,0x06,0xe5,0x01,0x05,0xe6,0x06,0xe6, 0x06,0xe5,0x01,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe6,0x05,0xe6,0x02,0xe5, 0x01,0xe6,0x06,0xe6,0x01,0x04,0xe6,0x06,0xe6,0x07,0xe5,0x06,0xe6,0x08,0x2d,0xe7, 0x06,0x24,0x09,0xe5,0x07,0x05,0x03,0x03,0x01,0x03,0x09,0x06,0x02,0x02,0xe5,0xe5, 0x09,0x0d,0xe5,0x05,0x02,0x06,0x02,0x02,0x03,0x02,0x09,0x02,0x06,0x05,0x03,0x02, 0x06,0x07,0x03,0x04,0x04,0x04,0x11,0x08,0xe5,0xe5,0xe5,0xe5,0x08,0x05,0x02,0x0f, 0x06,0x09,0x09,0x05,0x03,0x06,0x02,0x09,0x08,0xe5,0x07,0x0f,0x05,0x0a,0x05,0x03, 0x07,0x01,0x03,0x02,0x02,0x03,0x05,0x09,0x09,0x0f,0x13,0x14,0xe5,0x01,0xe5,0x29, 0xe6,0x05,0xe7,0x06,0xe6,0xe5,0x01,0x02,0x01,0x07,0x01,0xe5,0x08,0x02,0x03,0x01, 0x04,0x02,0xe6,0x05,0xe7,0x06,0xe5,0x02,0x06,0xe6,0x06,0xe6,0x06,0xe7,0x05,0xe6, 0x06,0xe7,0x05,0xe6,0x03,0x02,0xe6,0x06,0x01,0x0a,0x04,0x0f,0x0b,0xe7,0xe5,0x1e, 0x1b,0x0d,0x02,0x02,0x01,0x01,0x02,0x03,0x09,0x02,0x02,0x02,0x13,0x20,0x08,0x09, 0x17,0x07,0x19,0x14,0x01,0xe5,0xe7,0x07,0x1a,0x06,0x02,0x05,0xe5,0xe5,0x03,0x02, 0x05,0x13,0x05,0x03,0x05,0x07,0x05,0x03,0x09,0x03,0x03,0xe5,0x01,0xe6,0x02,0x03, 0x01,0x03,0x03,0x07,0x01,0x01,0x07,0x09,0x02,0x1c,0x21,0x01,0x02,0x0a,0x34,0x01, 0xe5,0x02,0x07,0xe5,0x05,0x06,0x02,0x06,0xe5,0x02,0x08,0x02,0x0d,0xe5,0x03,0x04, 0x02,0x06,0x02,0x10,0xe5,0x06,0x17,0x10,0x0f,0x15,0xe7,0x01,0x12,0x22,0x14,0xe5, 0x08,0x10,0x04,0x0b,0x07,0x02,0x0a,0x02,0x02,0x13,0x06,0x01,0xe5,0x09,0xe5,0x0f, 0x11,0xe5,0xe5,0x08,0x15,0x01,0x01,0x2b,0xe6,0x0d,0x01,0x0a,0xe5,0x07,0x01,0x05, 0x01,0xe8,0x03,0xe6,0x02,0x05,0x04,0xe5,0x04,0x02,0x03,0xe7,0x08,0x08,0xe5,0x24, 0x09,0x07,0x05,0x2b,0x01,0xe6,0x07,0x23,0x0e,0x0c,0xe6,0x03,0x0e,0x08,0xe5,0x09, 0x11,0x04,0x07,0x08,0x01,0x18,0xe5,0x11,0x0b,0x2f,0xe9,0x3c,0x11,0x0a,0x03,0x06, 0xe5,0xe6,0x07,0xe5,0x01,0x09,0x06,0xe5,0x0f,0x02,0x03,0x02,0x09,0x06,0x0f,0x15, 0x02,0x01,0xe5,0xe5,0x03,0x1d,0xe5,0xe6,0x37,0x02,0x09,0x06,0x01,0xe5,0x07,0x03, 0x02,0x02,0x07,0xe5,0xe5,0x03,0xe5,0xe6,0x07,0x09,0x05,0x05,0x01,0x05,0x0a,0x13, 0x02,0x0b,0x15,0x01,0x22,0xe5,0xe6,0x01,0x06,0x25,0x13,0x0c,0x0f,0xe5,0x08,0x07, 0x12,0x03,0x0c,0x06,0x20,0x03,0x03,0x09,0x01,0x02,0x1a,0x14,0xe6,0xe5,0xe5,0xe5, 0x08,0x10,0xe5,0x17,0x03,0xe5,0x03,0x14,0x09,0xe5,0x0d,0x03,0x05,0x0f,0xe5,0x06, 0xe5,0x08,0x01,0xe5,0x04,0xe5,0x01,0xe5,0x03,0xe5,0x08,0x05,0x09,0x06,0xe5,0x1e, 0x0c,0x01,0x01,0x03,0xe5,0x0b,0x10,0xe5,0x19,0x15,0x01,0x04,0x06,0x01,0x01,0x0e, 0x03,0x05,0x06,0xe5,0x01,0x01,0x02,0x08,0x07,0x01,0x03,0xe5,0xe6,0x0e,0x01,0x15, 0xe5,0x02,0xe5,0x06,0x18,0x0d,0x03,0x01,0xe5,0xe5,0x0a,0x45,0x01,0x02,0xe6,0x03, 0x11,0x09,0x06,0x01,0x01,0x02,0x02,0x04,0x01,0x04,0x01,0x02,0x07,0x01,0x01,0x09, 0x09,0x05,0x0b,0x07,0x01,0x1b,0x0b,0x05,0xe5,0xe6,0x03,0x07,0x45,0x01,0x1b,0x09, 0x15,0x01,0x04,0xe5,0x07,0x02,0x01,0x22,0xe5,0x02,0x04,0xe5,0xe5,0xe5,0x02,0xe5, 0x15,0x0d,0x01,0xea,0x0b,0x27,0x01,0x07,0x01,0x0c,0x09,0x09,0xe5,0x01,0xe5,0xe5, 0x01,0x04,0x04,0x04,0x04,0x0b,0xe5,0x01,0xe5,0xe5,0x0b,0x01,0xe6,0x01,0x04,0x01, 0xe5,0xe5,0x0b,0x04,0x05,0x02,0xe5,0xe5,0x01,0xe5,0x01,0x05,0x06,0x11,0x0f,0xea, 0x0b,0x03,0x23,0x01,0x01,0x05,0x01,0x01,0x13,0x09,0x05,0x01,0x01,0x05,0x03,0x05, 0x03,0x0b,0x05,0x01,0x01,0x09,0x04,0xe5,0x02,0x02,0x01,0xe5,0xe5,0xe5,0x10,0x09, 0x02,0x08,0x07,0x11,0x03,0x09,0x01,0xe5,0xe5,0xe5,0x0c,0x27,0x01,0x04,0x01,0xe5, 0xe5,0x01,0x01,0x01,0x07,0x02,0x01,0x02,0x06,0x03,0xe5,0x01,0x01,0x04,0x02,0x09, 0x03,0x09,0x07,0x01,0x02,0xe6,0x03,0x02,0xe7,0xe5,0xe5,0x03,0x02,0x01,0x0c,0x04, 0x09,0x01,0x07,0x01,0x01,0x02,0x16,0x05,0xe5,0x07,0xe7,0xe6,0x0b,0x27,0x01,0x04, 0x01,0xe5,0xe5,0x03,0x01,0x04,0xe5,0xe8,0xe5,0xe6,0x05,0x01,0xe5,0xe5,0x01,0x01, 0x01,0xe5,0xe8,0x02,0xe5,0xe5,0x01,0x0d,0x01,0xe5,0x03,0x01,0x03,0xe5,0x03,0x01, 0xe5,0xe5,0xe6,0xe5,0x03,0xe7,0xe5,0x0a,0xe7,0x03,0x09,0x07,0x01,0x02,0xe7,0x14, 0x06,0x06,0x01,0x01,0x01,0xe5,0x4b,0x09,0x09,0x02,0xe6,0x03,0x09,0x04,0x10,0x02, 0x02,0x0d,0x02,0x09,0x10,0x02,0xe5,0x07,0x10,0x22,0x06,0x02,0xe5,0xe5,0x45,0x05, 0xe5,0x07,0xe5,0x07,0xe5,0x01,0xe5,0x03,0xe5,0x07,0xe5,0x13,0xe5,0x01,0x0f,0xe5, 0x03,0x07,0xe6,0x03,0x08,0xe5,0x01,0x06,0x04,0x0d,0xe5,0x08,0x21,0xe5,0xe6,0x0d, 0x09,0x09,0x09,0x09,0x09,0x02,0x02,0x03,0xe5,0x07,0xe5,0x04,0x02,0xe5,0x07,0xe5, 0x07,0xe5,0x07,0x0b,0xe5,0x03,0x03,0x06,0x02,0xe5,0xe5,0xe5,0xe5,0x01,0x09,0x09, 0xe5,0x03,0xe5,0x01,0x09,0x09,0xe7,0xe5,0xe5,0x01,0x09,0x09,0x06,0x06,0xe5,0xe7, 0x5a,0xe5,0x32,0x08,0xe5,0x08,0x1a,0x02,0x09,0x09,0x09,0x1c,0xe5,0x03,0x03,0xe7, 0x4c,0x09,0x03,0x05,0x09,0x09,0x15,0x03,0x09,0x05,0x03,0x0f,0x09,0x01,0x09,0x0b, 0x09,0x1c,0x09,0x01,0x01,0x0d,0x01,0x18,0x09,0x09,0x02,0x01,0x04,0x02,0x01,0xe7, 0x06,0xe6,0x06,0x01,0x03,0x02,0xe7,0x05,0xe7,0x03,0x01,0x0c,0xe6,0x03,0x01,0x0a, 0xe6,0xe5,0x01,0x01,0x07,0x01,0x07,0x01,0xe7,0xe5,0x01,0x01,0x13,0xe7,0x0d,0x01, 0x07,0x01,0x0f,0xe7,0x0c,0x01,0xe5,0x20,0x0c,0x01,0xe5,0x04,0xe5,0xe7,0x08,0x08, 0xe5,0x01,0x04,0xe6,0x03,0x02,0xe6,0x04,0x01,0xe6,0x01,0x07,0xe5,0x04,0x01,0xe5, 0x08,0xe5,0x04,0x01,0xe5,0x04,0xe5,0xe7,0x04,0x01,0xe6,0x04,0x01,0x01,0x01,0x07, 0x05,0x02,0xe5,0x08,0x01,0x03,0x01,0xe5,0x05,0x01,0xe5,0x0d,0xe5,0xe5,0x0c,0x01, 0x39,0x01,0xe5,0x08,0x03,0x0e,0xe5,0x02,0x04,0x01,0x0b,0x08,0x03,0x01,0xe5,0xe5, 0x03,0x06,0x07,0x04,0x02,0x01,0x03,0xe5,0xe5,0x02,0x06,0x01,0x03,0xe5,0x08,0x01, 0x02,0x03,0x0c,0xe5,0xe5,0x03,0x02,0x01,0x09,0x05,0xe8,0x0b,0x01,0x05,0xe5,0x07, 0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe6,0xe5,0x04,0xe5,0x07,0xe5,0x07,0xe5, 0x01,0x05,0xe5,0x01,0x05,0xe5,0x07,0xe5,0x09,0xe6,0xe5,0x04,0xe5,0x07,0xe6,0x06, 0xe6,0xe5,0x04,0xe5,0x07,0xe6,0xe5,0x04,0xe5,0x07,0xe6,0x01,0x04,0xe5,0x07,0xe6, 0xe5,0x04,0xe6,0xe5,0x04,0xe5,0x07,0xe7,0x2d,0x09,0x06,0x01,0x0a,0x12,0xe6,0x06, 0xe5,0x07,0xe5,0x05,0x01,0xe5,0x09,0x09,0xe6,0x06,0xe5,0x05,0x01,0xe6,0x07,0x06, 0x01,0xe6,0x04,0x01,0x09,0x09,0xe5,0x05,0x16,0xe5,0x0f,0x2e,0x10,0x01,0x1b,0x01, 0x09,0x09,0xe5,0x05,0x01,0x08,0xe5,0xe5,0x06,0x09,0x01,0x07,0x01,0x11,0x01,0x07, 0x01,0x07,0x09,0x31,0xe6,0x36,0x13,0x1c,0xe5,0x07,0x0a,0x14,0x0a,0x08,0x0a,0x12, 0x09,0x09,0xe5,0x05,0x25,0xe5,0xe5,0x02,0x20,0x09,0x09,0x09,0x0b,0x09,0x04,0x04, 0x06,0x02,0x06,0x02,0x09,0x06,0x04,0x04,0x02,0x06,0x04,0x04,0x02,0x0b,0x09,0x04, 0x01,0x02,0x04,0x04,0x04,0x04,0x09,0x11,0x0d,0xe8,0x0a,0x01,0x01,0xe5,0x3e,0x1a, 0x05,0x02,0x03,0x02,0x02,0x07,0xe5,0x01,0x01,0x08,0x05,0x02,0x14,0xe5,0x01,0x09, 0x01,0x02,0x1a,0x01,0xe5,0xe5,0x0b,0x01,0x02,0x0d,0xe5,0x01,0x0e,0x02,0x01,0x01, 0x05,0x01,0x01,0x05,0x01,0x01,0x05,0x01,0x01,0x05,0x01,0x01,0x05,0x01,0x02,0x04, 0x01,0x02,0x04,0x01,0x01,0x05,0xe7,0xe5,0x03,0xe7,0x05,0x01,0xe6,0xe5,0x02,0xe8, 0x06,0x01,0xe6,0x04,0x01,0x07,0x01,0xe6,0x04,0x01,0x02,0x04,0x01,0x07,0x01,0x01, 0x04,0xe5,0xe5,0x05,0xe5,0xe5,0x05,0xe5,0xe5,0xe5,0xe5,0x02,0x01,0x02,0x04,0x01, 0x05,0x01,0x01,0x0a,0xe6,0xe5,0x01,0x34,0xe5,0x0d,0x04,0x03,0x05,0xe5,0xe6,0x03, 0x02,0x02,0x04,0x06,0xe5,0xe5,0x05,0x01,0xe5,0x07,0xe7,0xe5,0x0d,0xe6,0x12,0xe5, 0x0f,0xe5,0x01,0x04,0x04,0x07,0xe5,0x09,0x24,0xe5,0x0f,0x24,0xe5,0x0e,0x01,0x07, 0x02,0x04,0x02,0x03,0x06,0x07,0x02,0x04,0x02,0x01,0xe5,0xe6,0x15,0x18,0x0b,0x1a, 0x01,0x02,0x1b,0xe5,0x0a,0xe5,0xe7,0x5b,0x01,0x07,0x09,0x09,0x04,0x03,0x02,0xe5, 0xe5,0x05,0x07,0x01,0x01,0x02,0x04,0x03,0x07,0x02,0x03,0x05,0x0c,0x27,0x11,0x01, 0xe8,0x03,0xe5,0x02,0x03,0xe6,0x3a,0xe5,0x08,0x06,0x01,0x0e,0x15,0xe5,0x02,0x04, 0x02,0x01,0x09,0x01,0xe5,0xe5,0x04,0x05,0xe5,0xe5,0xe6,0x0d,0x01,0x07,0xe6,0x10, 0x01,0x01,0x05,0xe6,0x06,0xe6,0x0c,0xe7,0xe6,0x01,0x01,0x07,0x11,0x29,0x0f,0x01, 0x01,0x01,0x05,0x07,0x09,0x01,0x0b,0xe5,0x09,0x02,0x02,0x13,0x04,0xe5,0x01,0xe5, 0x0e,0x1c,0xe5,0x10,0x10,0xe5,0xe7,0x07,0x2e,0x01,0x05,0xe5,0x01,0x03,0x08,0x08, 0x08,0x0b,0x03,0x09,0x09,0x02,0xe6,0x02,0x02,0x07,0x01,0x0f,0x03,0x09,0x04,0x02, 0x09,0xe6,0x08,0x11,0x08,0x10,0x03,0xe6,0x03,0x06,0x03,0x13,0x09,0x1e,0xe5,0x08, 0x13,0x04,0x06,0x05,0x02,0x03,0x0b,0x06,0x02,0x04,0x09,0x04,0x0f,0x02,0x0a,0x01, 0x07,0xe5,0x01,0xe5,0x0a,0x05,0x17,0xe5,0xe6,0x02,0x4a,0x08,0xe5,0x12,0x03,0xe5, 0x01,0x04,0x04,0x06,0x0a,0xe5,0x01,0x01,0x03,0xe5,0x01,0x03,0x06,0xe5,0x03,0x02, 0x09,0x0a,0x01,0x07,0xe5,0xe5,0x01,0x0e,0x09,0x02,0x12,0xe5,0x01,0xe5,0xe5,0x3a, 0x04,0x02,0x01,0x09,0x09,0xe5,0xe5,0xe5,0xe5,0x01,0xe7,0x0c,0x0b,0x01,0x05,0x03, 0x01,0xe5,0x0a,0xe5,0x05,0x02,0x03,0x02,0x01,0x1a,0xe6,0x06,0x01,0x2c,0x03,0x3b, 0x13,0x1a,0x17,0x02,0x0b,0x02,0x11,0x05,0x04,0x1a,0x02,0x34,0x01,0x01,0xe7,0x3c, 0x04,0x01,0x0a,0x08,0xe5,0xe5,0x05,0x01,0xe5,0x05,0x06,0x06,0x01,0xe5,0x01,0x08, 0x08,0x0e,0x01,0x04,0x03,0x0b,0x01,0xe5,0x11,0x03,0xe5,0x01,0x01,0x04,0x01,0x25, 0x03,0x1e,0x3e,0x01,0x0b,0x04,0x02,0x09,0x01,0x0c,0x02,0x02,0xe5,0x04,0x08,0x0e, 0x09,0x13,0x01,0x07,0x05,0x21,0x02,0x02,0x01,0xe5,0x24,0x47,0x04,0x09,0x16,0x1c, 0x0a,0x11,0xe5,0x01,0x07,0x2d,0x02,0xe6,0x43,0x01,0x03,0xe6,0x06,0xe5,0x07,0xe6, 0x06,0x02,0xe5,0x04,0x0d,0xe5,0x06,0x05,0x02,0x0b,0x01,0xe5,0x04,0x08,0x13,0x06, 0x02,0xe5,0x2e,0x02,0x01,0xe5,0xe6,0x01,0x01,0x40,0xe5,0x0b,0xe5,0x01,0x05,0xe5, 0xe5,0x0b,0xe5,0x02,0x03,0x0b,0x01,0x02,0xe5,0x11,0xe5,0x04,0xe5,0x04,0x16,0x01, 0x0a,0xe5,0xe5,0x31,0x02,0x02,0xe5,0x03,0x48,0x07,0x07,0x01,0x04,0xe5,0xe5,0xe5, 0x03,0x01,0x18,0x01,0xe5,0xe6,0x0c,0x04,0x01,0xe5,0xe5,0x01,0xe6,0x12,0xe5,0xe5, 0xe5,0x01,0x01,0x01,0xe5,0xe5,0x03,0x01,0x02,0x02,0xe6,0x01,0x01,0x24,0xe6,0x52, 0x09,0x01,0x04,0x02,0x01,0x04,0x04,0x13,0x06,0xe5,0x02,0x07,0x05,0xe5,0x01,0x01, 0x07,0x01,0x0e,0x02,0x01,0x04,0x02,0x01,0x04,0x04,0x03,0xe5,0x01,0x01,0x1c,0x02, 0x01,0x01,0xe7,0x3e,0x01,0x13,0x09,0x04,0xe6,0x07,0x04,0x06,0x06,0x01,0x04,0x02, 0x01,0x01,0x03,0x0c,0x02,0x05,0x07,0x06,0x07,0xe5,0x07,0x01,0x01,0x03,0x01,0xe6, 0x03,0x28,0xe9,0x01,0x3c,0x01,0x01,0x0f,0x03,0x05,0x03,0x09,0x05,0x02,0x08,0x01, 0x01,0x02,0x02,0x03,0x02,0x01,0xe5,0x01,0x03,0x01,0x04,0x09,0x05,0x01,0xe5,0x02, 0x01,0x02,0x01,0x01,0x01,0xe5,0x03,0x05,0x01,0x01,0x05,0x03,0x09,0x1d,0x01,0x01, 0x01,0xe5,0x01,0x39,0x01,0xe5,0xe5,0x12,0x04,0x01,0xe5,0xe5,0x02,0xe5,0x01,0x01, 0x04,0x01,0xe5,0x05,0x02,0x06,0x06,0x01,0x02,0x01,0x02,0x04,0x01,0x05,0x04,0x01, 0x0e,0x01,0xe5,0x08,0x01,0x04,0x01,0xe5,0xe5,0x06,0x01,0x04,0x02,0x01,0x21,0xe5, 0xe7,0x3e,0x01,0x11,0x09,0x01,0x03,0xe5,0x01,0x01,0x0a,0x02,0xe6,0x02,0x03,0xe6, 0x02,0x04,0x01,0x01,0x03,0x10,0x01,0x01,0xe5,0x06,0x08,0x07,0x01,0x03,0xe5,0x01, 0x01,0x07,0x01,0x01,0xe5,0xe6,0xe5,0xe5,0x20,0x02,0xe6,0x08,0x02,0x1a,0x02,0x06, 0xe5,0x11,0x13,0x13,0xe5,0x04,0x02,0x02,0x06,0x02,0x05,0x09,0x02,0x09,0x02,0x03, 0x02,0xe5,0x04,0x02,0x02,0x10,0xe5,0x07,0xe5,0x04,0x04,0x26,0x02,0xe5,0xe5,0x25, 0xe5,0x07,0x13,0x01,0x13,0x11,0x05,0xe5,0x01,0x06,0x02,0xe5,0x05,0xe5,0x07,0xe5, 0x03,0x04,0x02,0x05,0xe5,0x01,0x05,0xe5,0x01,0x06,0x02,0xe5,0x04,0x02,0x0f,0xe5, 0x01,0x28,0x02,0xe5,0x0d,0x09,0x09,0x09,0x03,0x05,0x09,0x02,0x02,0x03,0x09,0x02, 0x01,0x01,0x02,0x09,0x05,0xe5,0x01,0xe5,0x03,0x03,0x0b,0xe6,0x06,0xe5,0xe5,0x02, 0x02,0x02,0xe5,0x04,0xe5,0x03,0xe5,0x01,0xe5,0x01,0x02,0x02,0x02,0x06,0x05,0xe5, 0x01,0x06,0x02,0xe5,0x03,0xe5,0x01,0x09,0x09,0x06,0x06,0xe6,0x01,0x02,0x45,0x13, 0x12,0xe5,0x27,0xe5,0x0f,0x0b,0xe5,0x11,0xe5,0x07,0xe5,0x07,0xe5,0x19,0x02,0x04, 0x03,0x01,0xe5,0x46,0x13,0x13,0x05,0x09,0x15,0x03,0x05,0x09,0x09,0x03,0x05,0x09, 0x03,0x07,0x07,0x03,0x1c,0x09,0xe8,0xe5,0x1e,0x01,0x07,0x01,0x04,0x0c,0x01,0x09, 0x07,0x01,0x04,0x04,0x07,0x01,0x06,0x03,0xe5,0x0b,0x07,0x01,0x07,0xe5,0x0e,0x01, 0xe7,0x06,0xe5,0x0e,0x01,0x04,0x02,0x01,0x07,0x01,0xe5,0xe5,0x05,0x22,0x02,0xe5, 0x20,0x01,0xe5,0x05,0x01,0xe5,0x0e,0xe5,0xe6,0x05,0x02,0x05,0xe5,0xe6,0x05,0x01, 0xe5,0x05,0x01,0xe5,0x05,0x02,0xe5,0x08,0x0b,0x02,0x02,0x02,0xe5,0x01,0x06,0x05, 0x01,0xe6,0x07,0xe5,0x01,0x06,0x05,0x01,0xe6,0x03,0xe5,0xe7,0x03,0xe5,0x01,0xe5, 0x04,0x02,0x24,0xe5,0x4a,0x13,0x06,0x07,0x02,0x06,0x1a,0xe5,0x0c,0x09,0x02,0x01, 0xe5,0x0b,0xe5,0x08,0x0e,0x03,0x05,0x22,0xe5,0x01,0x15,0xe5,0x07,0xe5,0x07,0xe5, 0x07,0xe5,0x07,0xe5,0x07,0xe6,0x06,0xe5,0x07,0xe6,0xe5,0x02,0x01,0xe5,0x07,0xe5, 0x02,0x04,0xe5,0x07,0xe5,0x09,0xe5,0x07,0xe5,0x07,0xe5,0x02,0x04,0xe5,0x07,0xe5, 0x01,0x05,0xe5,0x07,0xe6,0x06,0xe5,0x07,0xe6,0x06,0xe5,0x07,0xe5,0x07,0xe5,0x06, 0xe5,0xe6,0x20,0x01,0x07,0x01,0x0a,0xe5,0x04,0x01,0x09,0x07,0x01,0xe6,0x06,0xe6, 0xe5,0xe5,0xe5,0xe6,0x07,0xe5,0x08,0xe5,0x09,0xe5,0x04,0x02,0x06,0x02,0x06,0x01, 0xe5,0x05,0x01,0xe5,0x08,0x06,0x01,0x07,0x01,0x07,0x01,0xe5,0x07,0x22,0xe5,0xe6, 0x20,0x01,0x07,0x01,0x11,0x01,0x07,0x09,0x01,0x07,0x01,0x07,0x01,0xe5,0x1a,0x16, 0x07,0x01,0xe5,0x19,0x01,0x07,0x01,0x07,0x02,0x06,0x24,0xe6,0xe6,0x49,0x13,0x05, 0x0e,0x08,0x16,0x09,0x13,0x09,0x1c,0x09,0x23,0xe7,0x02,0x34,0x10,0x0c,0x06,0x01, 0xe5,0x08,0x08,0x02,0x06,0x02,0x0b,0x06,0x02,0x09,0x09,0x09,0x06,0x02,0x09,0x04, 0x04,0x09,0x04,0x26,0x01,0x6c,0x04,0x01,0xe5,0x0c,0x03,0x0a,0x3b,0x01,0xe5,0x02, 0x0a,0x26,0x02,0xe5,0x01,0xe5,0x0c,0x01,0x01,0x05,0x01,0x01,0x05,0x01,0x01,0x05, 0x01,0x02,0x04,0x01,0x02,0x04,0x01,0x07,0x01,0xe6,0x04,0x01,0x01,0x05,0x01,0x01, 0x05,0x01,0x02,0x01,0x02,0x01,0x01,0x05,0x01,0x09,0x01,0x01,0x05,0x01,0xe5,0xe5, 0x03,0x01,0x01,0x05,0x01,0x02,0x04,0xe6,0x01,0x04,0x01,0x01,0x05,0xe6,0x02,0x03, 0x01,0x02,0x04,0xe6,0xe5,0x04,0x01,0x06,0xe5,0xe5,0x06,0x01,0x02,0x07,0x02,0xe5, 0x0b,0x11,0x32,0x03,0x04,0x08,0x0f,0xe5,0x13,0xe5,0x06,0xe5,0x08,0xe5,0x07,0xe5, 0x01,0x05,0x09,0xe5,0x0e,0x08,0x03,0xe5,0x2c,0xe7,0x07,0x25,0x18,0x09,0x09,0x06, 0x02,0x0a,0x01,0x06,0x0c,0x01,0x09,0x05,0x02,0x08,0x04,0x06,0x08,0x03,0xe5,0x12, 0x04,0x0c,0x1f,0xe6,0xe6,0xe5,0x52,0x0d,0x01,0x09,0x12,0x01,0x18,0x01,0x07,0x05, 0x0c,0x1d,0x0b,0x21,0x01,0x01,0xe5,0x24,0x0d,0x21,0x0f,0x09,0xe5,0x02,0x04,0xe5, 0x04,0x03,0x0a,0xe5,0xe5,0x02,0x03,0x01,0x02,0x02,0x04,0x06,0x01,0xe5,0x51,0x01, 0xe7,0x1e,0x05,0x35,0x08,0x0f,0x08,0x05,0x01,0xe5,0x05,0x06,0x09,0x0f,0xe5,0xe5, 0x19,0x06,0x27,0x0b,0x01,0xe6,0x07,0x04,0xe5,0xe5,0x10,0x09,0x09,0x2b,0x09,0x09, 0x01,0x0a,0xe5,0x04,0x05,0x09,0x10,0x01,0x02,0x0a,0x09,0x03,0x01,0x02,0x09,0x13, 0x17,0x01,0x02,0x0a,0x02,0x13,0x09,0xe7,0x05,0xe5,0xe5,0x08,0x06,0x01,0xe5,0x08, 0x03,0x0f,0x03,0x05,0x09,0xe5,0x04,0x0e,0x16,0x0d,0x01,0x0f,0x14,0xe5,0x01,0x06, 0x0d,0x0c,0x03,0x02,0x0c,0x13,0xe5,0x11,0x1a,0x1d,0x12,0x01,0x08,0xe5,0x04,0xe6, 0x19,0x1d,0x05,0x03,0x05,0x02,0x07,0x02,0x17,0x02,0xe5,0x11,0x12,0x18,0x09,0x04, 0x0c,0x06,0x14,0x0e,0x06,0x08,0x03,0x14,0x04,0x03,0x0a,0x04,0x1c,0x03,0x12,0x0a, 0x01,0x01,0x46,0x1b,0x02,0x0f,0x03,0x06,0x03,0xe6,0x1b,0x05,0xe5,0x13,0x0c,0x3d, 0xe5,0x0f,0x02,0x04,0x02,0x01,0x02,0x01,0x04,0xe5,0x02,0x09,0x04,0x04,0x03,0x05, 0x01,0x02,0x04,0x09,0x01,0x02,0x04,0x2b,0x16,0x04,0x09,0x0d,0x01,0x03,0x04,0x02, 0x01,0x13,0x01,0x07,0x09,0x06,0x01,0x04,0x0c,0x01,0x03,0x01,0x09,0x08,0x07,0x01, 0xe5,0xe5,0xe5,0x03,0xe5,0x0c,0x05,0x09,0x08,0xe6,0xe5,0x13,0x13,0x08,0x0a,0x07, 0x06,0x02,0x07,0x01,0x09,0x09,0x07,0x01,0x09,0x02,0x06,0x02,0x0e,0xe5,0x16,0x0a, 0x0e,0x05,0x09,0x03,0x03,0x09,0x01,0x01,0xe5,0x03,0xe5,0x09,0x06,0x1f,0x1e,0xe5, 0x07,0xe5,0x11,0xe5,0x04,0x04,0x01,0x0f,0xe5,0x07,0x01,0x0b,0x03,0xe5,0xe5,0x0b, 0x02,0xe5,0x08,0x09,0x09,0x09,0x04,0xe5,0x02,0x09,0x09,0x05,0x02,0xe5,0xe5,0x01, 0xe5,0x01,0xe5,0xe5,0x03,0xe7,0x07,0xe5,0x03,0x05,0xe5,0x05,0xe7,0x07,0xe5,0x07, 0xe5,0x07,0xe5,0x08,0x04,0xe5,0x02,0x04,0x04,0x08,0xe5,0x03,0x04,0x08,0xe5,0x07, 0xe5,0x0c,0x02,0xe5,0x17,0x14,0x05,0x0a,0x09,0x01,0x0a,0x08,0x01,0x08,0xe5,0x01, 0x01,0x02,0x01,0x09,0x02,0x07,0xe5,0x06,0x01,0x09,0x08,0xe5,0x08,0x0e,0x18,0x09, 0x09,0x09,0x0e,0x02,0x03,0x0b,0x06,0x02,0x06,0x0c,0x06,0x0c,0x0e,0x01,0x02,0x02, 0x06,0x03,0x05,0x03,0x05,0x06,0x02,0x04,0x01,0x04,0x03,0x02,0x02,0x09,0x09,0x03, 0x03,0x01,0x07,0x01,0x07,0x07,0x01,0x01,0x09,0xe5,0x11,0x09,0x0f,0x02,0x08,0xe5, 0x1c,0x08,0x13,0x09,0x03,0x09,0x0d,0x0b,0x09,0x0b,0x17,0x03,0x0b,0x09,0xe5,0x07, 0x27,0x12,0x02,0xe6,0xe5,0x0e,0x01,0x0c,0x04,0x04,0x05,0x05,0x02,0x13,0x0a,0x03, 0x18,0x09,0x0b,0xe5,0x04,0x02,0x09,0x01,0x07,0x07,0x01,0x07,0x08,0xe5,0xe5,0x02, 0x01,0x01,0x01,0x04,0x0e,0x09,0x09,0x0f,0x01,0xe5,0x0b,0xe6,0x07,0xe5,0x05,0x01, 0xe5,0x02,0x04,0xe5,0x07,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x07, 0xe6,0x06,0xe6,0x06,0xe5,0x07,0xe5,0x08,0xe6,0x07,0xe5,0x07,0xe5,0x06,0xe6,0x07, 0x13,0x03,0x03,0x0b,0xe5,0x01,0x05,0xe5,0x07,0xe5,0x07,0xe5,0x0e,0x01,0xe6,0x02, 0x09,0x01,0x07,0x05,0x03,0x01,0x07,0x07,0x01,0x01,0x07,0x08,0xe5,0xe5,0x06,0x09, 0x04,0x04,0x09,0x09,0x0a,0xe5,0x02,0x05,0x09,0x05,0x03,0x12,0x0c,0x04,0x05,0x06, 0x08,0xe5,0x08,0x09,0x0b,0x06,0x01,0x0e,0xe5,0x07,0x09,0x09,0x09,0x09,0x09,0x09, 0x09,0x09,0x09,0xe5,0xe5,0x05,0xe5,0xe5,0x07,0xe5,0xe5,0x05,0xe5,0xe5,0x05,0xe5, 0xe5,0x05,0xe5,0xe5,0x05,0xe5,0xe5,0x05,0x09,0x09,0x09,0xe5,0x07,0x02,0x06,0xe5, 0xe5,0x05,0xe5,0xe5,0x0b,0x02,0xe5,0x0d,0x02,0x06,0x01,0x07,0x09,0x01,0x02,0x04, 0x09,0x01,0x07,0x09,0x01,0x07,0x03,0x05,0x05,0x03,0x02,0x06,0x0b,0x04,0x04,0x09, 0x1f,0x07,0x13,0x09,0x09,0x09,0x02,0x10,0x12,0x44,0x02,0x04,0xe5,0x04,0x02,0xe5, 0x18,0x25,0x01,0x01,0x21,0x2f,0x05,0x01,0x01,0x0e,0x09,0x02,0x06,0x02,0x06,0x09, 0x02,0x01,0x02,0x01,0x02,0x01,0x04,0x09,0x09,0x09,0x01,0xe5,0x05,0x05,0x03,0x02, 0x05,0x02,0x09,0x02,0xe5,0x04,0x04,0x03,0xe5,0x07,0x03,0x05,0xe5,0x08,0x05,0x02, 0xe5,0x02,0x03,0x01,0x04,0x04,0x09,0x09,0x09,0x06,0xe5,0xe5,0x06,0x18,0x09,0x09, 0x1d,0x0f,0x13,0x03,0x0a,0x0a,0x04,0x10,0x09,0x06,0xe5,0xe5,0x4a,0xe5,0x01,0x07, 0x08,0xe5,0x07,0x09,0x09,0x03,0x05,0x09,0x09,0xe5,0x07,0x09,0x09,0x09,0x05,0x03, 0x06,0xe5,0x02,0x09,0x09,0x09,0xe5,0x07,0x07,0x01,0x01,0x07,0x01,0x07,0x05,0x03, 0x01,0x07,0x09,0x09,0x0e,0x01,0xe5,0x03,0x61,0x11,0x01,0x07,0x0b,0x09,0x09,0x03, 0x05,0x02,0xe5,0x05,0x15,0x3b,0xe5,0x14,0x13,0x13,0x09,0x14,0x09,0x0d,0x09,0x06, 0x04,0x09,0x04,0xe5,0x02,0x04,0x04,0x18,0x13,0x09,0x27,0xe5,0xe5,0x09,0x17,0x06, 0x02,0x4f,0x4f,0x09,0x2b,0x05,0xe6,0x0a,0x3b,0x27,0x5b,0x3c,0xe6,0x3c,0x5b,0x1e, 0x26,0x27,0xe5,0xe5,0x03,0x10,0x31,0x2a,0x05,0x03,0x05,0x02,0x02,0x05,0x03,0x05, 0x03,0x05,0xe5,0x01,0x05,0x14,0x13,0x29,0x06,0xe5,0xe6,0x46,0x27,0x15,0x46,0x33, 0x05,0xe5,0x01,0x04,0x57,0xa9,0x03,0x14,0x09,0x09,0x09,0x09,0x09,0xe6,0x06,0x09, 0xe6,0x06,0x09,0xe6,0x06,0x09,0x0b,0x09,0x09,0x09,0x09,0x09,0x09,0xe6,0x06,0x09, 0x09,0x09,0x09,0x0a,0x02,0x05,0x09,0x08,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07, 0xe5,0x05,0x01,0xe5,0x07,0xe5,0x05,0x01,0xe5,0x07,0xe5,0x05,0x02,0x09,0x07,0x03, 0x09,0x09,0x09,0x09,0x04,0x03,0xe5,0x07,0xe5,0x05,0x01,0xe5,0x07,0xe5,0x07,0xe5, 0x08,0x09,0x10,0xe6,0x15,0x05,0x03,0x05,0x03,0x05,0x03,0x05,0x03,0x05,0x03,0x05, 0x03,0x05,0x03,0x09,0x05,0x03,0x04,0x09,0x05,0x05,0x09,0x09,0x09,0x14,0x03,0x09, 0x05,0x03,0x09,0x27,0x01,0x0d,0x03,0x03,0x05,0x03,0x05,0x03,0x05,0x03,0x05,0x03, 0x09,0x05,0x03,0x09,0x05,0x03,0x09,0x01,0x04,0x04,0x04,0x06,0x04,0x04,0x04,0x04, 0x04,0x04,0x04,0x06,0x0b,0x03,0x09,0x09,0x09,0x09,0x03,0x05,0x03,0x0d,0x02,0x01, 0x0b,0x06,0x01,0x02,0x01,0x04,0x02,0x01,0x04,0x02,0x01,0x04,0x02,0x01,0x04,0x02, 0x01,0x01,0x05,0x01,0x04,0x02,0x01,0x01,0x05,0x01,0x02,0x01,0x02,0x01,0x01,0xe5, 0x06,0x02,0x02,0x03,0x02,0x02,0x05,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x03, 0x02,0x02,0x02,0x04,0x02,0x01,0x04,0x02,0x01,0x01,0xe5,0x03,0x01,0x01,0x02,0x04, 0x02,0x01,0x02,0x01,0x01,0x02,0x04,0x04,0x04,0x06,0x02,0x02,0xe6,0xe5,0xe5,0xff, 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
gpl-2.0