repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
heicrd/android_kernel_motorola_omap4-common | drivers/video/aty/radeon_backlight.c | 11013 | 6262 | /*
* Backlight code for ATI Radeon based graphic cards
*
* Copyright (c) 2000 Ani Joshi <ajoshi@kernel.crashing.org>
* Copyright (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
* Copyright (c) 2006 Michael Hanselmann <linux-kernel@hansmi.ch>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include "radeonfb.h"
#include <linux/backlight.h>
#include <linux/slab.h>
#ifdef CONFIG_PMAC_BACKLIGHT
#include <asm/backlight.h>
#endif
#define MAX_RADEON_LEVEL 0xFF
struct radeon_bl_privdata {
struct radeonfb_info *rinfo;
uint8_t negative;
};
static int radeon_bl_get_level_brightness(struct radeon_bl_privdata *pdata,
int level)
{
int rlevel;
/* Get and convert the value */
/* No locking of bl_curve since we read a single value */
rlevel = pdata->rinfo->info->bl_curve[level] *
FB_BACKLIGHT_MAX / MAX_RADEON_LEVEL;
if (rlevel < 0)
rlevel = 0;
else if (rlevel > MAX_RADEON_LEVEL)
rlevel = MAX_RADEON_LEVEL;
if (pdata->negative)
rlevel = MAX_RADEON_LEVEL - rlevel;
return rlevel;
}
static int radeon_bl_update_status(struct backlight_device *bd)
{
struct radeon_bl_privdata *pdata = bl_get_data(bd);
struct radeonfb_info *rinfo = pdata->rinfo;
u32 lvds_gen_cntl, tmpPixclksCntl;
int level;
if (rinfo->mon1_type != MT_LCD)
return 0;
/* We turn off the LCD completely instead of just dimming the
* backlight. This provides some greater power saving and the display
* is useless without backlight anyway.
*/
if (bd->props.power != FB_BLANK_UNBLANK ||
bd->props.fb_blank != FB_BLANK_UNBLANK)
level = 0;
else
level = bd->props.brightness;
del_timer_sync(&rinfo->lvds_timer);
radeon_engine_idle();
lvds_gen_cntl = INREG(LVDS_GEN_CNTL);
if (level > 0) {
lvds_gen_cntl &= ~LVDS_DISPLAY_DIS;
if (!(lvds_gen_cntl & LVDS_BLON) || !(lvds_gen_cntl & LVDS_ON)) {
lvds_gen_cntl |= (rinfo->init_state.lvds_gen_cntl & LVDS_DIGON);
lvds_gen_cntl |= LVDS_BLON | LVDS_EN;
OUTREG(LVDS_GEN_CNTL, lvds_gen_cntl);
lvds_gen_cntl &= ~LVDS_BL_MOD_LEVEL_MASK;
lvds_gen_cntl |=
(radeon_bl_get_level_brightness(pdata, level) <<
LVDS_BL_MOD_LEVEL_SHIFT);
lvds_gen_cntl |= LVDS_ON;
lvds_gen_cntl |= (rinfo->init_state.lvds_gen_cntl & LVDS_BL_MOD_EN);
rinfo->pending_lvds_gen_cntl = lvds_gen_cntl;
mod_timer(&rinfo->lvds_timer,
jiffies + msecs_to_jiffies(rinfo->panel_info.pwr_delay));
} else {
lvds_gen_cntl &= ~LVDS_BL_MOD_LEVEL_MASK;
lvds_gen_cntl |=
(radeon_bl_get_level_brightness(pdata, level) <<
LVDS_BL_MOD_LEVEL_SHIFT);
OUTREG(LVDS_GEN_CNTL, lvds_gen_cntl);
}
rinfo->init_state.lvds_gen_cntl &= ~LVDS_STATE_MASK;
rinfo->init_state.lvds_gen_cntl |= rinfo->pending_lvds_gen_cntl
& LVDS_STATE_MASK;
} else {
/* Asic bug, when turning off LVDS_ON, we have to make sure
RADEON_PIXCLK_LVDS_ALWAYS_ON bit is off
*/
tmpPixclksCntl = INPLL(PIXCLKS_CNTL);
if (rinfo->is_mobility || rinfo->is_IGP)
OUTPLLP(PIXCLKS_CNTL, 0, ~PIXCLK_LVDS_ALWAYS_ONb);
lvds_gen_cntl &= ~(LVDS_BL_MOD_LEVEL_MASK | LVDS_BL_MOD_EN);
lvds_gen_cntl |= (radeon_bl_get_level_brightness(pdata, 0) <<
LVDS_BL_MOD_LEVEL_SHIFT);
lvds_gen_cntl |= LVDS_DISPLAY_DIS;
OUTREG(LVDS_GEN_CNTL, lvds_gen_cntl);
udelay(100);
lvds_gen_cntl &= ~(LVDS_ON | LVDS_EN);
OUTREG(LVDS_GEN_CNTL, lvds_gen_cntl);
lvds_gen_cntl &= ~(LVDS_DIGON);
rinfo->pending_lvds_gen_cntl = lvds_gen_cntl;
mod_timer(&rinfo->lvds_timer,
jiffies + msecs_to_jiffies(rinfo->panel_info.pwr_delay));
if (rinfo->is_mobility || rinfo->is_IGP)
OUTPLL(PIXCLKS_CNTL, tmpPixclksCntl);
}
rinfo->init_state.lvds_gen_cntl &= ~LVDS_STATE_MASK;
rinfo->init_state.lvds_gen_cntl |= (lvds_gen_cntl & LVDS_STATE_MASK);
return 0;
}
static int radeon_bl_get_brightness(struct backlight_device *bd)
{
return bd->props.brightness;
}
static const struct backlight_ops radeon_bl_data = {
.get_brightness = radeon_bl_get_brightness,
.update_status = radeon_bl_update_status,
};
void radeonfb_bl_init(struct radeonfb_info *rinfo)
{
struct backlight_properties props;
struct backlight_device *bd;
struct radeon_bl_privdata *pdata;
char name[12];
if (rinfo->mon1_type != MT_LCD)
return;
#ifdef CONFIG_PMAC_BACKLIGHT
if (!pmac_has_backlight_type("ati") &&
!pmac_has_backlight_type("mnca"))
return;
#endif
pdata = kmalloc(sizeof(struct radeon_bl_privdata), GFP_KERNEL);
if (!pdata) {
printk("radeonfb: Memory allocation failed\n");
goto error;
}
snprintf(name, sizeof(name), "radeonbl%d", rinfo->info->node);
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
bd = backlight_device_register(name, rinfo->info->dev, pdata,
&radeon_bl_data, &props);
if (IS_ERR(bd)) {
rinfo->info->bl_dev = NULL;
printk("radeonfb: Backlight registration failed\n");
goto error;
}
pdata->rinfo = rinfo;
/* Pardon me for that hack... maybe some day we can figure out in what
* direction backlight should work on a given panel?
*/
pdata->negative =
(rinfo->family != CHIP_FAMILY_RV200 &&
rinfo->family != CHIP_FAMILY_RV250 &&
rinfo->family != CHIP_FAMILY_RV280 &&
rinfo->family != CHIP_FAMILY_RV350);
#ifdef CONFIG_PMAC_BACKLIGHT
pdata->negative = pdata->negative ||
of_machine_is_compatible("PowerBook4,3") ||
of_machine_is_compatible("PowerBook6,3") ||
of_machine_is_compatible("PowerBook6,5");
#endif
rinfo->info->bl_dev = bd;
fb_bl_default_curve(rinfo->info, 0,
63 * FB_BACKLIGHT_MAX / MAX_RADEON_LEVEL,
217 * FB_BACKLIGHT_MAX / MAX_RADEON_LEVEL);
bd->props.brightness = bd->props.max_brightness;
bd->props.power = FB_BLANK_UNBLANK;
backlight_update_status(bd);
printk("radeonfb: Backlight initialized (%s)\n", name);
return;
error:
kfree(pdata);
return;
}
void radeonfb_bl_exit(struct radeonfb_info *rinfo)
{
struct backlight_device *bd = rinfo->info->bl_dev;
if (bd) {
struct radeon_bl_privdata *pdata;
pdata = bl_get_data(bd);
backlight_device_unregister(bd);
kfree(pdata);
rinfo->info->bl_dev = NULL;
printk("radeonfb: Backlight unloaded\n");
}
}
| gpl-2.0 |
willsowerbutts/FUZIX | Applications/games/myst05.c | 6 | 65611 | #define NUM_OBJ 91
#define WORDSIZE 4
#define GAME_MAGIC 562
#include <stdint.h>
struct location {
const uint8_t *text;
uint8_t exit[6];
};
const uint8_t toomuch[] = { "I am carrying too much. " };
const uint8_t dead[] = { "I am dead.\n" };
const uint8_t stored_msg[] = { "I have stored " };
const uint8_t stored_msg2[] = { " treasures. On a scale of 0 to 100, that rates " };
const uint8_t dotnewline[] = { ".\n" };
const uint8_t newline[] = { "\n" };
const uint8_t carrying[] = { "I am carrying:\n" };
const uint8_t dashstr[] = { " - " };
const uint8_t nothing[] = { "nothing" };
const uint8_t lightout[] = { "My light has run out." };
const uint8_t lightoutin[] = { "My light runs out in " };
const uint8_t turns[] = { "turns" };
const uint8_t turn[] = { "turn" };
const uint8_t whattodo[] = { "\nTell me what to do ? " };
const uint8_t prompt[] = { "\n> " };
const uint8_t dontknow[] = { "You use word(s) I don't know! " };
const uint8_t givedirn[] = { "Give me a direction too. " };
const uint8_t darkdanger[] = { "Dangerous to move in the dark! " };
const uint8_t brokeneck[] = { "I fell down and broke my neck. " };
const uint8_t cantgo[] = { "I can't go in that direction. " };
const uint8_t dontunderstand[] = { "I don't understand your command. " };
const uint8_t notyet[] = { "I can't do that yet. " };
const uint8_t beyondpower[] = { "It is beyond my power to do that. " };
const uint8_t okmsg[] = { "O.K. " };
const uint8_t whatstr[] = { "What ? " };
const uint8_t itsdark[] = { "I can't see. It is too dark!" };
const uint8_t youare[] = { "I am in a " };
const uint8_t nonestr[] = { "none" };
const uint8_t obexit[] = { "\nObvious exits: " };
const uint8_t canalsosee[] = { "I can also see: " };
const uint8_t playagain[] = { "Do you want to play again Y/N: " };
const uint8_t invcond[] = { "INVCOND" };
const uint8_t *exitmsgptr[] = {
(uint8_t *)"North",
(uint8_t *)"South",
(uint8_t *)"East",
(uint8_t *)"West",
(uint8_t *)"Up",
(uint8_t *)"Down"
};
/*
*
* Game database follows below. Although linked into the same asm
* file to make life easier this is just "mere aggregation" for
* convenience, due to limits in the tool chain and the game licence
* not the GPL applies to the game database.
*/
const uint8_t startlamp = 200;
const uint8_t lightfill = 200;
const uint8_t startcarried = 1;
const uint8_t maxcar = 6;
const uint8_t treasure = 0;
const uint8_t treasures = 0;
const uint8_t lastloc = 45;
const uint8_t startloc = 1;
const uint8_t loctxt_0[] = {
67, 111, 112, 121, 114, 105, 103, 104, 116, 32, 40, 67, 41, 32, 49, 57, 56, 50, 32, 66, 46, 72, 79, 87, 65, 82, 84, 72, 0 };
const uint8_t loctxt_1[] = {
42, 73, 39, 109, 32, 105, 110, 32, 116, 104, 101, 32, 102, 114, 101, 105, 103, 104, 116, 101, 114, 39, 115, 32, 115, 111, 99, 105, 97, 108, 32, 114, 111, 111, 109, 0 };
const uint8_t loctxt_2[] = {
99, 114, 101, 119, 109, 97, 110, 39, 115, 32, 99, 97, 98, 105, 110, 0 };
const uint8_t loctxt_3[] = {
110, 105, 99, 101, 32, 119, 97, 114, 109, 32, 98, 117, 110, 107, 0 };
const uint8_t loctxt_4[] = {
99, 114, 101, 119, 109, 97, 110, 39, 115, 32, 99, 97, 98, 105, 110, 0 };
const uint8_t loctxt_5[] = {
42, 73, 39, 109, 32, 111, 110, 32, 97, 32, 99, 114, 101, 119, 109, 97, 110, 39, 115, 32, 98, 117, 110, 107, 0 };
const uint8_t loctxt_6[] = {
108, 97, 114, 103, 101, 32, 97, 105, 114, 32, 118, 101, 110, 116, 0 };
const uint8_t loctxt_7[] = {
42, 73, 39, 109, 32, 97, 116, 32, 116, 104, 101, 32, 106, 117, 110, 99, 116, 105, 111, 110, 32, 111, 102, 32, 116, 104, 114, 101, 101, 32, 112, 97, 115, 115, 97, 103, 101, 115, 0 };
const uint8_t loctxt_8[] = {
42, 73, 39, 109, 32, 111, 110, 32, 116, 104, 101, 32, 115, 104, 105, 112, 39, 115, 32, 66, 114, 105, 100, 103, 101, 0 };
const uint8_t loctxt_9[] = {
42, 73, 39, 109, 32, 105, 110, 32, 97, 110, 32, 65, 105, 114, 108, 111, 99, 107, 0 };
const uint8_t loctxt_10[] = {
42, 73, 39, 109, 32, 105, 110, 115, 105, 100, 101, 32, 116, 104, 101, 32, 99, 111, 110, 116, 114, 111, 108, 32, 99, 111, 110, 115, 111, 108, 101, 0 };
const uint8_t loctxt_11[] = {
112, 97, 115, 115, 97, 103, 101, 119, 97, 121, 0 };
const uint8_t loctxt_12[] = {
42, 73, 39, 109, 32, 105, 110, 32, 116, 104, 101, 32, 71, 97, 108, 108, 101, 121, 0 };
const uint8_t loctxt_13[] = {
42, 73, 39, 109, 32, 105, 110, 32, 116, 104, 101, 32, 67, 97, 112, 116, 97, 105, 110, 39, 115, 32, 99, 97, 98, 105, 110, 0 };
const uint8_t loctxt_14[] = {
42, 73, 39, 109, 32, 111, 110, 32, 116, 104, 101, 32, 67, 97, 112, 116, 97, 105, 110, 39, 115, 32, 98, 117, 110, 107, 0 };
const uint8_t loctxt_15[] = {
108, 97, 114, 103, 101, 32, 109, 101, 116, 97, 108, 32, 112, 105, 112, 101, 0 };
const uint8_t loctxt_16[] = {
42, 73, 39, 109, 32, 105, 110, 32, 116, 104, 101, 32, 101, 109, 101, 114, 103, 101, 110, 99, 121, 32, 65, 105, 114, 108, 111, 99, 107, 0 };
const uint8_t loctxt_17[] = {
42, 73, 39, 109, 32, 111, 117, 116, 32, 111, 110, 32, 116, 104, 101, 32, 72, 117, 108, 108, 0 };
const uint8_t loctxt_18[] = {
83, 104, 117, 116, 116, 108, 101, 32, 67, 114, 97, 102, 116, 32, 98, 97, 121, 0 };
const uint8_t loctxt_19[] = {
83, 104, 117, 116, 116, 108, 101, 32, 67, 114, 97, 102, 116, 0 };
const uint8_t loctxt_20[] = {
109, 97, 122, 101, 32, 111, 102, 32, 118, 101, 110, 116, 105, 108, 97, 116, 105, 111, 110, 32, 100, 117, 99, 116, 115, 0 };
const uint8_t loctxt_21[] = {
109, 97, 122, 101, 32, 111, 102, 32, 118, 101, 110, 116, 105, 108, 97, 116, 105, 111, 110, 32, 100, 117, 99, 116, 115, 0 };
const uint8_t loctxt_22[] = {
109, 97, 122, 101, 32, 111, 102, 32, 118, 101, 110, 116, 105, 108, 97, 116, 105, 111, 110, 32, 100, 117, 99, 116, 115, 0 };
const uint8_t loctxt_23[] = {
109, 97, 122, 101, 32, 111, 102, 32, 118, 101, 110, 116, 105, 108, 97, 116, 105, 111, 110, 32, 100, 117, 99, 116, 115, 0 };
const uint8_t loctxt_24[] = {
109, 97, 122, 101, 32, 111, 102, 32, 118, 101, 110, 116, 105, 108, 97, 116, 105, 111, 110, 32, 100, 117, 99, 116, 115, 0 };
const uint8_t loctxt_25[] = {
109, 97, 122, 101, 32, 111, 102, 32, 118, 101, 110, 116, 105, 108, 97, 116, 105, 111, 110, 32, 100, 117, 99, 116, 115, 0 };
const uint8_t loctxt_26[] = {
109, 97, 122, 101, 32, 111, 102, 32, 118, 101, 110, 116, 105, 108, 97, 116, 105, 111, 110, 32, 100, 117, 99, 116, 115, 0 };
const uint8_t loctxt_27[] = {
109, 97, 122, 101, 32, 111, 102, 32, 118, 101, 110, 116, 105, 108, 97, 116, 105, 111, 110, 32, 100, 117, 99, 116, 115, 0 };
const uint8_t loctxt_28[] = {
119, 111, 114, 107, 115, 104, 111, 112, 0 };
const uint8_t loctxt_29[] = {
42, 73, 39, 109, 32, 117, 110, 100, 101, 114, 110, 101, 97, 116, 104, 32, 116, 104, 101, 32, 76, 97, 116, 104, 101, 0 };
const uint8_t loctxt_30[] = {
115, 116, 111, 114, 101, 114, 111, 111, 109, 0 };
const uint8_t loctxt_31[] = {
42, 73, 39, 109, 32, 105, 110, 32, 116, 104, 101, 32, 82, 101, 97, 99, 116, 111, 114, 32, 114, 111, 111, 109, 0 };
const uint8_t loctxt_32[] = {
42, 73, 39, 109, 32, 105, 110, 32, 116, 104, 101, 32, 67, 82, 69, 65, 84, 85, 82, 69, 39, 83, 32, 104, 105, 100, 101, 111, 117, 116, 0 };
const uint8_t loctxt_33[] = {
116, 111, 116, 97, 108, 108, 121, 32, 119, 114, 101, 99, 107, 101, 100, 32, 99, 97, 98, 105, 110, 0 };
const uint8_t loctxt_34[] = {
42, 73, 39, 109, 32, 111, 110, 32, 97, 32, 98, 117, 110, 107, 0 };
const uint8_t loctxt_35[] = {
99, 97, 98, 108, 101, 32, 114, 117, 110, 119, 97, 121, 0 };
const uint8_t loctxt_36[] = {
99, 97, 114, 103, 111, 32, 115, 116, 111, 114, 97, 103, 101, 32, 104, 111, 108, 100, 0 };
const uint8_t loctxt_37[] = {
108, 97, 114, 103, 101, 32, 99, 97, 103, 101, 0 };
const uint8_t loctxt_38[] = {
99, 97, 114, 103, 111, 32, 115, 116, 111, 114, 97, 103, 101, 32, 104, 111, 108, 100, 0 };
const uint8_t loctxt_39[] = {
108, 97, 114, 103, 101, 32, 99, 114, 97, 116, 101, 0 };
const uint8_t loctxt_40[] = {
76, 97, 114, 100, 101, 114, 0 };
const uint8_t loctxt_41[] = {
109, 101, 116, 97, 108, 32, 112, 97, 115, 115, 97, 103, 101, 119, 97, 121, 0 };
const uint8_t loctxt_42[] = {
42, 73, 39, 109, 32, 97, 116, 32, 97, 32, 106, 117, 110, 99, 116, 105, 111, 110, 32, 111, 102, 32, 116, 104, 114, 101, 101, 32, 112, 97, 115, 115, 97, 103, 101, 115, 0 };
const uint8_t loctxt_43[] = {
109, 101, 116, 97, 108, 32, 112, 97, 115, 115, 97, 103, 101, 119, 97, 121, 0 };
const uint8_t loctxt_44[] = {
101, 115, 114, 102, 111, 116, 100, 100, 116, 99, 111, 116, 103, 0 };
const uint8_t loctxt_45[] = {
42, 73, 39, 109, 32, 105, 110, 32, 116, 104, 101, 32, 77, 65, 78, 85, 82, 69, 32, 116, 104, 105, 115, 32, 116, 105, 109, 101, 32, 33, 33, 0 };
const struct location locdata[] = {
{ loctxt_0, { 0, 0, 0, 0, 0, 0 } },
{ loctxt_1, { 0, 2, 0, 4, 0, 0 } },
{ loctxt_2, { 1, 0, 0, 0, 0, 0 } },
{ loctxt_3, { 0, 0, 0, 0, 0, 2 } },
{ loctxt_4, { 0, 0, 1, 0, 0, 0 } },
{ loctxt_5, { 0, 0, 0, 0, 0, 4 } },
{ loctxt_6, { 7, 0, 0, 0, 0, 4 } },
{ loctxt_7, { 0, 11, 8, 9, 0, 0 } },
{ loctxt_8, { 41, 7, 0, 0, 0, 0 } },
{ loctxt_9, { 0, 0, 7, 0, 0, 0 } },
{ loctxt_10, { 8, 0, 0, 0, 0, 0 } },
{ loctxt_11, { 7, 13, 0, 0, 0, 0 } },
{ loctxt_12, { 0, 1, 42, 11, 0, 0 } },
{ loctxt_13, { 11, 0, 43, 42, 0, 0 } },
{ loctxt_14, { 0, 0, 0, 0, 0, 13 } },
{ loctxt_15, { 0, 0, 0, 0, 16, 13 } },
{ loctxt_16, { 0, 0, 0, 0, 0, 15 } },
{ loctxt_17, { 0, 0, 0, 0, 0, 0 } },
{ loctxt_18, { 0, 0, 0, 0, 0, 0 } },
{ loctxt_19, { 0, 0, 0, 0, 0, 0 } },
{ loctxt_20, { 20, 20, 21, 20, 8, 20 } },
{ loctxt_21, { 21, 23, 21, 21, 33, 21 } },
{ loctxt_22, { 20, 24, 22, 22, 32, 22 } },
{ loctxt_23, { 23, 23, 23, 22, 28, 23 } },
{ loctxt_24, { 24, 24, 25, 24, 24, 24 } },
{ loctxt_25, { 23, 27, 25, 25, 25, 25 } },
{ loctxt_26, { 24, 26, 26, 30, 26, 26 } },
{ loctxt_27, { 27, 27, 27, 26, 27, 31 } },
{ loctxt_28, { 0, 0, 0, 0, 0, 25 } },
{ loctxt_29, { 28, 0, 0, 0, 0, 0 } },
{ loctxt_30, { 0, 0, 26, 0, 0, 0 } },
{ loctxt_31, { 0, 0, 0, 0, 24, 0 } },
{ loctxt_32, { 0, 0, 0, 0, 0, 22 } },
{ loctxt_33, { 0, 0, 0, 0, 0, 22 } },
{ loctxt_34, { 0, 0, 0, 0, 0, 33 } },
{ loctxt_35, { 0, 0, 0, 36, 0, 33 } },
{ loctxt_36, { 38, 0, 35, 0, 0, 0 } },
{ loctxt_37, { 0, 0, 0, 0, 0, 0 } },
{ loctxt_38, { 0, 36, 0, 0, 0, 0 } },
{ loctxt_39, { 0, 0, 0, 38, 0, 0 } },
{ loctxt_40, { 12, 0, 0, 0, 0, 0 } },
{ loctxt_41, { 42, 8, 0, 0, 0, 0 } },
{ loctxt_42, { 0, 41, 13, 12, 0, 0 } },
{ loctxt_43, { 0, 0, 28, 13, 0, 0 } },
{ loctxt_44, { 0, 0, 0, 0, 0, 0 } },
{ loctxt_45, { 0, 0, 0, 0, 0, 0 } },
};
const uint8_t objinit[] = {
255,
0,
1,
1,
2,
2,
0,
0,
4,
0,
0,
0,
5,
6,
8,
0,
0,
0,
9,
10,
0,
0,
11,
0,
12,
12,
0,
0,
0,
0,
13,
0,
0,
0,
15,
16,
16,
16,
0,
0,
0,
17,
18,
18,
19,
28,
29,
31,
0,
31,
0,
0,
28,
30,
0,
0,
0,
32,
0,
0,
0,
0,
32,
0,
0,
40,
12,
0,
24,
40,
0,
33,
0,
0,
35,
0,
0,
0,
36,
37,
0,
38,
39,
1,
0,
0,
0,
0,
0,
0,
0,
};
const uint8_t objtxt_0[] = {
87, 97, 116, 99, 104, 0 };
const uint8_t objtxt_1[] = {
32, 0 };
const uint8_t objtxt_2[] = {
67, 111, 117, 99, 104, 0 };
const uint8_t objtxt_3[] = {
67, 108, 111, 115, 101, 100, 32, 115, 116, 101, 101, 108, 32, 100, 111, 111, 114, 0 };
const uint8_t objtxt_4[] = {
67, 114, 101, 119, 109, 97, 110, 39, 115, 32, 66, 117, 110, 107, 0 };
const uint8_t objtxt_5[] = {
79, 112, 101, 110, 32, 100, 111, 111, 114, 0 };
const uint8_t objtxt_6[] = {
67, 108, 111, 115, 101, 100, 32, 100, 111, 111, 114, 0 };
const uint8_t objtxt_7[] = {
65, 117, 116, 111, 32, 100, 105, 115, 112, 101, 110, 115, 101, 32, 112, 105, 108, 108, 111, 119, 0 };
const uint8_t objtxt_8[] = {
67, 114, 101, 119, 109, 97, 110, 39, 115, 32, 66, 117, 110, 107, 0 };
const uint8_t objtxt_9[] = {
71, 108, 111, 119, 105, 110, 103, 32, 73, 108, 108, 117, 109, 105, 110, 97, 110, 116, 32, 82, 111, 100, 0 };
const uint8_t objtxt_10[] = {
68, 117, 108, 108, 32, 73, 108, 108, 117, 109, 105, 110, 97, 110, 116, 32, 82, 111, 100, 0 };
const uint8_t objtxt_11[] = {
0 };
const uint8_t objtxt_12[] = {
76, 97, 114, 103, 101, 32, 65, 105, 114, 32, 118, 101, 110, 116, 0 };
const uint8_t objtxt_13[] = {
68, 117, 115, 116, 0 };
const uint8_t objtxt_14[] = {
67, 111, 110, 116, 114, 111, 108, 32, 67, 111, 110, 115, 111, 108, 101, 0 };
const uint8_t objtxt_15[] = {
83, 109, 97, 108, 108, 32, 109, 101, 116, 97, 108, 32, 114, 105, 110, 103, 32, 105, 110, 32, 102, 108, 111, 111, 114, 0 };
const uint8_t objtxt_16[] = {
79, 112, 101, 110, 32, 72, 97, 116, 99, 104, 0 };
const uint8_t objtxt_17[] = {
77, 101, 116, 97, 108, 32, 72, 111, 111, 107, 0 };
const uint8_t objtxt_18[] = {
79, 82, 65, 78, 71, 69, 32, 66, 117, 116, 116, 111, 110, 0 };
const uint8_t objtxt_19[] = {
69, 108, 101, 99, 116, 114, 105, 99, 97, 108, 32, 101, 100, 103, 101, 32, 99, 111, 110, 110, 101, 99, 116, 111, 114, 0 };
const uint8_t objtxt_20[] = {
67, 105, 114, 99, 117, 105, 116, 32, 66, 111, 97, 114, 100, 0 };
const uint8_t objtxt_21[] = {
80, 108, 117, 103, 103, 101, 100, 32, 105, 110, 32, 67, 105, 114, 99, 117, 105, 116, 32, 66, 111, 97, 114, 100, 0 };
const uint8_t objtxt_22[] = {
67, 108, 111, 115, 101, 100, 32, 115, 116, 101, 101, 108, 32, 100, 111, 111, 114, 0 };
const uint8_t objtxt_23[] = {
79, 112, 101, 110, 32, 100, 111, 111, 114, 0 };
const uint8_t objtxt_24[] = {
79, 118, 101, 110, 0 };
const uint8_t objtxt_25[] = {
76, 97, 114, 100, 101, 114, 0 };
const uint8_t objtxt_26[] = {
70, 114, 117, 105, 116, 32, 99, 97, 107, 101, 0 };
const uint8_t objtxt_27[] = {
68, 114, 117, 103, 103, 101, 100, 32, 70, 114, 117, 105, 116, 32, 99, 97, 107, 101, 0 };
const uint8_t objtxt_28[] = {
66, 76, 65, 67, 75, 32, 66, 117, 116, 116, 111, 110, 0 };
const uint8_t objtxt_29[] = {
87, 72, 73, 84, 69, 32, 66, 117, 116, 116, 111, 110, 0 };
const uint8_t objtxt_30[] = {
67, 97, 112, 116, 97, 105, 110, 39, 115, 32, 66, 117, 110, 107, 0 };
const uint8_t objtxt_31[] = {
77, 101, 116, 97, 108, 32, 80, 97, 110, 101, 108, 0 };
const uint8_t objtxt_32[] = {
82, 101, 116, 97, 105, 110, 105, 110, 103, 32, 67, 108, 105, 112, 115, 0 };
const uint8_t objtxt_33[] = {
72, 111, 108, 101, 0 };
const uint8_t objtxt_34[] = {
68, 117, 115, 116, 0 };
const uint8_t objtxt_35[] = {
82, 69, 68, 32, 66, 117, 116, 116, 111, 110, 0 };
const uint8_t objtxt_36[] = {
79, 112, 101, 110, 32, 105, 110, 110, 101, 114, 32, 100, 111, 111, 114, 0 };
const uint8_t objtxt_37[] = {
67, 108, 111, 115, 101, 100, 32, 111, 117, 116, 101, 114, 32, 100, 111, 111, 114, 0 };
const uint8_t objtxt_38[] = {
67, 108, 111, 115, 101, 100, 32, 105, 110, 110, 101, 114, 32, 100, 111, 111, 114, 0 };
const uint8_t objtxt_39[] = {
79, 112, 101, 110, 32, 111, 117, 116, 101, 114, 32, 100, 111, 111, 114, 0 };
const uint8_t objtxt_40[] = {
72, 97, 116, 99, 104, 0 };
const uint8_t objtxt_41[] = {
79, 112, 101, 110, 32, 104, 97, 116, 99, 104, 0 };
const uint8_t objtxt_42[] = {
83, 104, 117, 116, 116, 108, 101, 32, 67, 114, 97, 102, 116, 0 };
const uint8_t objtxt_43[] = {
76, 101, 118, 101, 114, 0 };
const uint8_t objtxt_44[] = {
89, 69, 76, 76, 79, 87, 32, 66, 117, 116, 116, 111, 110, 0 };
const uint8_t objtxt_45[] = {
67, 101, 110, 116, 114, 101, 32, 76, 97, 116, 104, 101, 0 };
const uint8_t objtxt_46[] = {
84, 111, 111, 108, 32, 67, 111, 109, 112, 97, 114, 116, 109, 101, 110, 116, 0 };
const uint8_t objtxt_47[] = {
72, 97, 109, 109, 101, 114, 0 };
const uint8_t objtxt_48[] = {
83, 99, 114, 101, 119, 100, 114, 105, 118, 101, 114, 32, 66, 108, 97, 100, 101, 0 };
const uint8_t objtxt_49[] = {
80, 105, 101, 99, 101, 32, 111, 102, 32, 87, 111, 111, 100, 0 };
const uint8_t objtxt_50[] = {
82, 111, 117, 103, 104, 32, 77, 101, 116, 97, 108, 32, 66, 97, 114, 0 };
const uint8_t objtxt_51[] = {
83, 99, 114, 101, 119, 100, 114, 105, 118, 101, 114, 0 };
const uint8_t objtxt_52[] = {
83, 111, 99, 107, 101, 116, 0 };
const uint8_t objtxt_53[] = {
76, 111, 99, 107, 101, 114, 0 };
const uint8_t objtxt_54[] = {
83, 109, 97, 115, 104, 101, 100, 32, 100, 111, 111, 114, 0 };
const uint8_t objtxt_55[] = {
83, 112, 97, 99, 101, 32, 83, 117, 105, 116, 0 };
const uint8_t objtxt_56[] = {
83, 112, 97, 99, 101, 32, 83, 117, 105, 116, 32, 40, 119, 111, 114, 110, 41, 0 };
const uint8_t objtxt_57[] = {
77, 97, 103, 110, 101, 116, 105, 99, 32, 66, 111, 111, 116, 115, 0 };
const uint8_t objtxt_58[] = {
77, 97, 103, 110, 101, 116, 105, 99, 32, 66, 111, 111, 116, 115, 32, 40, 119, 111, 114, 110, 41, 0 };
const uint8_t objtxt_59[] = {
67, 82, 69, 65, 84, 85, 82, 69, 0 };
const uint8_t objtxt_60[] = {
32, 0 };
const uint8_t objtxt_61[] = {
83, 108, 101, 101, 112, 105, 110, 103, 32, 67, 82, 69, 65, 84, 85, 82, 69, 0 };
const uint8_t objtxt_62[] = {
67, 82, 69, 65, 84, 85, 82, 69, 0 };
const uint8_t objtxt_63[] = {
87, 104, 105, 116, 101, 32, 84, 97, 98, 108, 101, 116, 115, 0 };
const uint8_t objtxt_64[] = {
67, 97, 107, 101, 32, 77, 105, 120, 0 };
const uint8_t objtxt_65[] = {
66, 97, 103, 32, 111, 102, 32, 70, 108, 111, 117, 114, 0 };
const uint8_t objtxt_66[] = {
66, 111, 116, 116, 108, 101, 32, 111, 102, 32, 87, 97, 116, 101, 114, 0 };
const uint8_t objtxt_67[] = {
80, 117, 100, 100, 108, 101, 32, 111, 102, 32, 87, 97, 116, 101, 114, 0 };
const uint8_t objtxt_68[] = {
67, 97, 107, 101, 32, 84, 105, 110, 0 };
const uint8_t objtxt_69[] = {
66, 97, 103, 32, 111, 102, 32, 82, 97, 105, 115, 105, 110, 115, 0 };
const uint8_t objtxt_70[] = {
79, 112, 101, 110, 32, 115, 116, 101, 101, 108, 32, 100, 111, 111, 114, 0 };
const uint8_t objtxt_71[] = {
87, 114, 101, 99, 107, 97, 103, 101, 0 };
const uint8_t objtxt_72[] = {
66, 117, 110, 107, 0 };
const uint8_t objtxt_73[] = {
83, 109, 97, 108, 108, 32, 75, 101, 121, 0 };
const uint8_t objtxt_74[] = {
76, 101, 110, 103, 116, 104, 32, 111, 102, 32, 67, 97, 98, 108, 101, 0 };
const uint8_t objtxt_75[] = {
82, 111, 117, 110, 100, 32, 66, 108, 111, 99, 107, 0 };
const uint8_t objtxt_76[] = {
77, 101, 116, 97, 108, 32, 71, 114, 105, 108, 108, 101, 0 };
const uint8_t objtxt_77[] = {
67, 97, 98, 108, 101, 115, 0 };
const uint8_t objtxt_78[] = {
83, 112, 101, 99, 105, 109, 101, 110, 32, 67, 97, 103, 101, 0 };
const uint8_t objtxt_79[] = {
76, 111, 99, 107, 101, 100, 32, 100, 111, 111, 114, 0 };
const uint8_t objtxt_80[] = {
79, 112, 101, 110, 32, 100, 111, 111, 114, 0 };
const uint8_t objtxt_81[] = {
72, 117, 103, 101, 32, 67, 114, 97, 116, 101, 0 };
const uint8_t objtxt_82[] = {
83, 113, 117, 97, 114, 101, 32, 66, 108, 111, 99, 107, 0 };
const uint8_t objtxt_83[] = {
83, 105, 103, 110, 0 };
const uint8_t objtxt_84[] = {
67, 114, 117, 109, 112, 108, 101, 100, 32, 78, 111, 116, 101, 0 };
const uint8_t objtxt_85[] = {
67, 97, 107, 101, 32, 84, 105, 110, 32, 40, 119, 105, 116, 104, 32, 119, 97, 116, 101, 114, 32, 105, 110, 41, 0 };
const uint8_t objtxt_86[] = {
72, 111, 108, 101, 0 };
const uint8_t objtxt_87[] = {
79, 118, 101, 110, 32, 40, 110, 111, 119, 32, 102, 117, 110, 99, 116, 105, 111, 110, 97, 108, 41, 0 };
const uint8_t objtxt_88[] = {
76, 97, 116, 104, 101, 32, 104, 111, 111, 107, 101, 100, 32, 117, 112, 32, 116, 111, 32, 112, 111, 119, 101, 114, 32, 115, 117, 112, 112, 108, 121, 0 };
const uint8_t objtxt_89[] = {
0 };
const uint8_t objtxt_90[] = {
0 };
const uint8_t *objtext[] = {
objtxt_0,
objtxt_1,
objtxt_2,
objtxt_3,
objtxt_4,
objtxt_5,
objtxt_6,
objtxt_7,
objtxt_8,
objtxt_9,
objtxt_10,
objtxt_11,
objtxt_12,
objtxt_13,
objtxt_14,
objtxt_15,
objtxt_16,
objtxt_17,
objtxt_18,
objtxt_19,
objtxt_20,
objtxt_21,
objtxt_22,
objtxt_23,
objtxt_24,
objtxt_25,
objtxt_26,
objtxt_27,
objtxt_28,
objtxt_29,
objtxt_30,
objtxt_31,
objtxt_32,
objtxt_33,
objtxt_34,
objtxt_35,
objtxt_36,
objtxt_37,
objtxt_38,
objtxt_39,
objtxt_40,
objtxt_41,
objtxt_42,
objtxt_43,
objtxt_44,
objtxt_45,
objtxt_46,
objtxt_47,
objtxt_48,
objtxt_49,
objtxt_50,
objtxt_51,
objtxt_52,
objtxt_53,
objtxt_54,
objtxt_55,
objtxt_56,
objtxt_57,
objtxt_58,
objtxt_59,
objtxt_60,
objtxt_61,
objtxt_62,
objtxt_63,
objtxt_64,
objtxt_65,
objtxt_66,
objtxt_67,
objtxt_68,
objtxt_69,
objtxt_70,
objtxt_71,
objtxt_72,
objtxt_73,
objtxt_74,
objtxt_75,
objtxt_76,
objtxt_77,
objtxt_78,
objtxt_79,
objtxt_80,
objtxt_81,
objtxt_82,
objtxt_83,
objtxt_84,
objtxt_85,
objtxt_86,
objtxt_87,
objtxt_88,
objtxt_89,
objtxt_90,
};
const uint8_t msgtxt_0[] = {
0 };
const uint8_t msgtxt_1[] = {
87, 101, 108, 99, 111, 109, 101, 32, 116, 111, 32, 34, 69, 83, 67, 65, 80, 69, 32, 70, 82, 79, 77, 32, 80, 85, 76, 83, 65, 82, 32, 55, 34, 0 };
const uint8_t msgtxt_2[] = {
98, 121, 32, 66, 114, 105, 97, 110, 32, 72, 111, 119, 97, 114, 116, 104, 32, 38, 32, 87, 104, 101, 114, 110, 101, 114, 32, 66, 97, 114, 110, 101, 115, 0 };
const uint8_t msgtxt_3[] = {
68, 101, 100, 105, 99, 97, 116, 101, 100, 32, 116, 111, 32, 87, 104, 101, 114, 110, 101, 114, 39, 115, 32, 77, 111, 116, 104, 101, 114, 32, 45, 32, 77, 73, 76, 68, 82, 69, 68, 46, 0 };
const uint8_t msgtxt_4[] = {
84, 101, 108, 108, 32, 109, 101, 32, 104, 111, 119, 32, 63, 0 };
const uint8_t msgtxt_5[] = {
84, 114, 121, 32, 69, 88, 65, 77, 73, 78, 73, 78, 71, 32, 116, 104, 105, 110, 103, 115, 0 };
const uint8_t msgtxt_6[] = {
83, 97, 121, 115, 32, 45, 0 };
const uint8_t msgtxt_7[] = {
73, 32, 104, 97, 118, 101, 32, 98, 101, 101, 110, 32, 104, 101, 114, 101, 0 };
const uint8_t msgtxt_8[] = {
109, 111, 118, 101, 115, 0 };
const uint8_t msgtxt_9[] = {
79, 46, 107, 46, 0 };
const uint8_t msgtxt_10[] = {
73, 39, 109, 32, 119, 101, 97, 114, 105, 110, 103, 32, 105, 116, 32, 33, 0 };
const uint8_t msgtxt_11[] = {
73, 32, 115, 101, 101, 32, 110, 111, 116, 104, 105, 110, 103, 32, 111, 102, 32, 105, 110, 116, 101, 114, 101, 115, 116, 0 };
const uint8_t msgtxt_12[] = {
73, 39, 118, 101, 32, 102, 111, 117, 110, 100, 32, 115, 111, 109, 101, 116, 104, 105, 110, 103, 33, 0 };
const uint8_t msgtxt_13[] = {
67, 97, 110, 39, 116, 32, 117, 115, 101, 0 };
const uint8_t msgtxt_14[] = {
73, 32, 115, 101, 101, 0 };
const uint8_t msgtxt_15[] = {
87, 104, 101, 114, 101, 32, 63, 0 };
const uint8_t msgtxt_16[] = {
83, 111, 114, 114, 121, 0 };
const uint8_t msgtxt_17[] = {
84, 105, 109, 101, 32, 112, 97, 115, 115, 101, 115, 46, 46, 46, 46, 46, 46, 0 };
const uint8_t msgtxt_18[] = {
71, 111, 32, 119, 97, 115, 104, 32, 121, 111, 117, 114, 32, 109, 111, 117, 116, 104, 32, 111, 117, 116, 33, 0 };
const uint8_t msgtxt_19[] = {
84, 104, 101, 114, 101, 39, 115, 32, 97, 0 };
const uint8_t msgtxt_20[] = {
78, 111, 116, 104, 105, 110, 103, 32, 104, 97, 112, 112, 101, 110, 115, 0 };
const uint8_t msgtxt_21[] = {
83, 111, 109, 101, 116, 104, 105, 110, 103, 32, 104, 97, 112, 112, 101, 110, 101, 100, 33, 0 };
const uint8_t msgtxt_22[] = {
84, 104, 97, 110, 107, 115, 33, 0 };
const uint8_t msgtxt_23[] = {
89, 111, 117, 32, 104, 97, 118, 101, 32, 69, 83, 67, 65, 80, 69, 68, 33, 0 };
const uint8_t msgtxt_24[] = {
34, 80, 85, 76, 83, 65, 82, 32, 55, 32, 67, 114, 101, 119, 32, 111, 110, 108, 121, 34, 0 };
const uint8_t msgtxt_25[] = {
65, 114, 116, 105, 102, 105, 99, 105, 97, 108, 32, 68, 97, 121, 108, 105, 103, 104, 116, 32, 104, 97, 115, 32, 101, 110, 100, 101, 100, 33, 0 };
const uint8_t msgtxt_26[] = {
67, 82, 69, 65, 84, 85, 82, 69, 32, 114, 105, 112, 115, 32, 109, 121, 32, 104, 101, 97, 100, 32, 111, 102, 102, 33, 0 };
const uint8_t msgtxt_27[] = {
73, 32, 116, 105, 101, 32, 116, 104, 101, 32, 72, 97, 110, 107, 105, 101, 32, 97, 99, 114, 111, 115, 115, 32, 109, 121, 32, 102, 97, 99, 101, 0 };
const uint8_t msgtxt_28[] = {
68, 117, 115, 116, 32, 114, 105, 115, 101, 115, 32, 97, 110, 100, 32, 67, 72, 79, 75, 69, 83, 32, 109, 101, 33, 0 };
const uint8_t msgtxt_29[] = {
73, 32, 99, 97, 110, 39, 116, 32, 114, 101, 97, 99, 104, 32, 105, 116, 0 };
const uint8_t msgtxt_30[] = {
105, 116, 32, 110, 101, 101, 100, 115, 32, 114, 101, 112, 97, 105, 114, 105, 110, 103, 0 };
const uint8_t msgtxt_31[] = {
105, 116, 32, 102, 105, 116, 115, 32, 110, 101, 97, 116, 108, 121, 32, 105, 110, 32, 109, 121, 32, 104, 97, 110, 100, 0 };
const uint8_t msgtxt_32[] = {
67, 108, 97, 110, 103, 45, 103, 45, 103, 45, 103, 0 };
const uint8_t msgtxt_33[] = {
67, 79, 78, 71, 82, 65, 84, 85, 76, 65, 84, 73, 79, 78, 83, 32, 33, 33, 0 };
const uint8_t msgtxt_34[] = {
78, 111, 32, 65, 73, 82, 33, 0 };
const uint8_t msgtxt_35[] = {
84, 104, 101, 32, 67, 82, 69, 65, 84, 85, 82, 69, 32, 106, 117, 115, 116, 32, 115, 108, 105, 116, 104, 101, 114, 101, 100, 32, 105, 110, 33, 0 };
const uint8_t msgtxt_36[] = {
110, 111, 32, 112, 111, 105, 110, 116, 32, 105, 110, 32, 116, 104, 97, 116, 33, 0 };
const uint8_t msgtxt_37[] = {
68, 79, 79, 82, 83, 32, 97, 114, 101, 32, 99, 108, 111, 115, 101, 100, 32, 45, 32, 73, 32, 67, 82, 65, 83, 72, 33, 0 };
const uint8_t msgtxt_38[] = {
73, 32, 99, 97, 110, 39, 116, 32, 109, 111, 118, 101, 33, 0 };
const uint8_t msgtxt_39[] = {
73, 39, 118, 101, 32, 109, 97, 100, 101, 32, 97, 32, 67, 65, 75, 69, 33, 0 };
const uint8_t msgtxt_40[] = {
72, 79, 84, 32, 105, 110, 32, 116, 104, 101, 114, 101, 33, 0 };
const uint8_t msgtxt_41[] = {
84, 104, 101, 32, 67, 82, 69, 65, 84, 85, 82, 69, 32, 97, 116, 101, 32, 116, 104, 101, 32, 108, 111, 116, 33, 0 };
const uint8_t msgtxt_42[] = {
73, 116, 115, 32, 111, 112, 101, 110, 33, 0 };
const uint8_t msgtxt_43[] = {
85, 115, 101, 32, 97, 32, 67, 111, 108, 111, 117, 114, 0 };
const uint8_t msgtxt_44[] = {
71, 114, 105, 108, 108, 32, 99, 111, 109, 101, 115, 32, 97, 119, 97, 121, 32, 119, 105, 116, 104, 32, 97, 32, 115, 116, 114, 111, 110, 103, 32, 112, 117, 108, 108, 33, 0 };
const uint8_t msgtxt_45[] = {
116, 104, 97, 116, 32, 111, 110, 101, 32, 105, 115, 32, 108, 111, 111, 115, 101, 0 };
const uint8_t msgtxt_46[] = {
87, 104, 101, 101, 101, 101, 101, 33, 0 };
const uint8_t msgtxt_47[] = {
84, 104, 117, 100, 33, 32, 32, 73, 32, 102, 97, 108, 108, 32, 98, 97, 99, 107, 32, 100, 111, 119, 110, 33, 0 };
const uint8_t msgtxt_48[] = {
105, 116, 39, 115, 32, 108, 97, 98, 101, 108, 108, 101, 100, 32, 34, 82, 65, 87, 32, 77, 65, 84, 69, 82, 73, 65, 76, 83, 34, 0 };
const uint8_t msgtxt_49[] = {
68, 111, 111, 114, 32, 111, 112, 101, 110, 115, 32, 105, 110, 32, 102, 114, 111, 110, 116, 32, 111, 102, 32, 109, 101, 0 };
const uint8_t msgtxt_50[] = {
68, 111, 111, 114, 32, 115, 108, 97, 109, 115, 32, 98, 101, 104, 105, 110, 100, 32, 109, 101, 33, 0 };
const uint8_t msgtxt_51[] = {
73, 39, 109, 32, 110, 111, 116, 32, 119, 101, 97, 114, 105, 110, 103, 32, 105, 116, 33, 0 };
const uint8_t msgtxt_52[] = {
76, 97, 116, 104, 101, 32, 119, 111, 114, 107, 115, 0 };
const uint8_t msgtxt_53[] = {
97, 32, 82, 79, 85, 78, 68, 32, 104, 111, 108, 101, 0 };
const uint8_t msgtxt_54[] = {
89, 111, 117, 32, 116, 104, 105, 110, 107, 32, 116, 104, 105, 115, 32, 105, 115, 32, 115, 111, 109, 101, 32, 115, 111, 114, 116, 32, 111, 102, 32, 103, 97, 109, 101, 63, 0 };
const uint8_t msgtxt_55[] = {
46, 46, 46, 46, 97, 115, 32, 116, 104, 101, 32, 111, 110, 108, 121, 32, 115, 117, 114, 118, 105, 118, 105, 110, 103, 32, 109, 101, 109, 98, 101, 114, 32, 111, 102, 0 };
const uint8_t msgtxt_56[] = {
116, 104, 101, 32, 80, 85, 76, 83, 65, 82, 32, 55, 32, 99, 114, 101, 119, 46, 46, 46, 46, 0 };
const uint8_t msgtxt_57[] = {
105, 116, 39, 115, 32, 105, 110, 32, 109, 121, 32, 104, 97, 110, 100, 119, 114, 105, 116, 105, 110, 103, 33, 0 };
const uint8_t msgtxt_58[] = {
90, 45, 122, 45, 122, 45, 122, 45, 122, 45, 122, 45, 122, 0 };
const uint8_t msgtxt_59[] = {
65, 114, 116, 105, 102, 105, 99, 105, 97, 108, 32, 100, 97, 121, 108, 105, 103, 104, 116, 32, 104, 97, 115, 32, 98, 101, 103, 117, 110, 33, 0 };
const uint8_t msgtxt_60[] = {
73, 32, 99, 111, 117, 108, 100, 32, 103, 101, 116, 32, 105, 110, 32, 116, 104, 101, 114, 101, 33, 0 };
const uint8_t msgtxt_61[] = {
115, 111, 109, 101, 116, 104, 105, 110, 103, 0 };
const uint8_t msgtxt_62[] = {
73, 116, 39, 115, 32, 67, 108, 111, 115, 101, 100, 33, 0 };
const uint8_t msgtxt_63[] = {
73, 116, 32, 119, 105, 108, 108, 32, 110, 111, 116, 32, 99, 108, 111, 115, 101, 0 };
const uint8_t msgtxt_64[] = {
73, 32, 102, 97, 108, 108, 32, 65, 115, 108, 101, 101, 112, 33, 33, 0 };
const uint8_t msgtxt_65[] = {
73, 39, 109, 32, 66, 108, 97, 115, 116, 101, 100, 32, 105, 110, 116, 111, 32, 100, 101, 101, 112, 32, 115, 112, 97, 99, 101, 33, 0 };
const uint8_t msgtxt_66[] = {
84, 104, 101, 32, 105, 110, 103, 114, 101, 100, 105, 101, 110, 116, 115, 32, 97, 114, 101, 32, 110, 111, 119, 32, 77, 105, 120, 101, 100, 33, 0 };
const uint8_t msgtxt_67[] = {
97, 32, 67, 97, 98, 108, 101, 32, 105, 115, 32, 109, 105, 115, 115, 105, 110, 103, 33, 0 };
const uint8_t msgtxt_68[] = {
73, 32, 83, 116, 97, 114, 118, 101, 32, 116, 111, 32, 100, 101, 97, 116, 104, 33, 0 };
const uint8_t msgtxt_69[] = {
73, 116, 32, 119, 105, 108, 108, 32, 116, 117, 114, 110, 33, 0 };
const uint8_t msgtxt_70[] = {
85, 115, 101, 32, 97, 32, 115, 104, 97, 112, 101, 0 };
const uint8_t msgtxt_71[] = {
105, 46, 101, 46, 32, 32, 34, 71, 69, 84, 32, 83, 81, 85, 65, 82, 69, 34, 0 };
const uint8_t msgtxt_72[] = {
73, 32, 119, 111, 117, 108, 100, 32, 103, 101, 116, 32, 105, 110, 100, 105, 103, 101, 115, 116, 105, 111, 110, 33, 0 };
const uint8_t msgtxt_73[] = {
83, 116, 105, 108, 108, 32, 104, 117, 110, 103, 114, 121, 44, 32, 105, 116, 32, 112, 114, 111, 99, 101, 101, 100, 115, 32, 116, 111, 32, 101, 97, 116, 32, 77, 69, 32, 33, 33, 0 };
const uint8_t msgtxt_74[] = {
84, 104, 101, 110, 32, 102, 101, 108, 108, 32, 97, 115, 108, 101, 101, 112, 33, 0 };
const uint8_t msgtxt_75[] = {
0 };
const uint8_t *msgptr[] = {
msgtxt_0,
msgtxt_1,
msgtxt_2,
msgtxt_3,
msgtxt_4,
msgtxt_5,
msgtxt_6,
msgtxt_7,
msgtxt_8,
msgtxt_9,
msgtxt_10,
msgtxt_11,
msgtxt_12,
msgtxt_13,
msgtxt_14,
msgtxt_15,
msgtxt_16,
msgtxt_17,
msgtxt_18,
msgtxt_19,
msgtxt_20,
msgtxt_21,
msgtxt_22,
msgtxt_23,
msgtxt_24,
msgtxt_25,
msgtxt_26,
msgtxt_27,
msgtxt_28,
msgtxt_29,
msgtxt_30,
msgtxt_31,
msgtxt_32,
msgtxt_33,
msgtxt_34,
msgtxt_35,
msgtxt_36,
msgtxt_37,
msgtxt_38,
msgtxt_39,
msgtxt_40,
msgtxt_41,
msgtxt_42,
msgtxt_43,
msgtxt_44,
msgtxt_45,
msgtxt_46,
msgtxt_47,
msgtxt_48,
msgtxt_49,
msgtxt_50,
msgtxt_51,
msgtxt_52,
msgtxt_53,
msgtxt_54,
msgtxt_55,
msgtxt_56,
msgtxt_57,
msgtxt_58,
msgtxt_59,
msgtxt_60,
msgtxt_61,
msgtxt_62,
msgtxt_63,
msgtxt_64,
msgtxt_65,
msgtxt_66,
msgtxt_67,
msgtxt_68,
msgtxt_69,
msgtxt_70,
msgtxt_71,
msgtxt_72,
msgtxt_73,
msgtxt_74,
msgtxt_75,
};
const uint8_t status[] = {
167,
9, 0,
1, 2, 3, 86,
175,
9, 0, 0, 0, 32, 244,
81, 79, 81, 73,
206,
0, 1, 0, 100, 0, 1,
81, 79, 81,
206,
0, 2, 0, 50, 0, 2,
81, 79, 81,
201,
0, 8, 0, 10,
58, 58,
206,
0, 5, 0, 2, 0, 5,
81, 79, 81,
207,
0, 3, 0, 0, 0, 3,
81, 79, 81, 67,
180,
4, 3, 14, 5, 8, 4, 0, 5, 0, 6,
72,
166,
0, 1,
81, 77, 73,
202,
19, 0, 0, 2,
56, 58, 25,
197,
0, 1,
81, 64,
162,
81, 77, 73,
199,
19, 0,
35, 26, 61, 63,
192,
81,
170,
4, 20, 0, 10,
60, 56, 64,
170,
4, 21, 0, 10,
60, 56, 64,
170,
4, 22, 0, 10,
60, 56, 64,
170,
4, 23, 0, 10,
60, 56, 64,
170,
4, 24, 0, 10,
60, 56, 64,
170,
4, 25, 0, 10,
60, 56, 64,
170,
4, 26, 0, 10,
60, 56, 64,
170,
4, 27, 0, 10,
60, 56, 64,
168,
4, 8, 0, 10,
58,
168,
4, 28, 0, 10,
58,
168,
4, 30, 0, 10,
58,
168,
4, 31, 0, 10,
58,
168,
4, 33, 0, 10,
58,
169,
4, 8, 9, 2,
57, 64,
169,
4, 28, 9, 2,
57, 64,
169,
4, 30, 9, 2,
57, 64,
169,
4, 31, 9, 2,
57, 64,
165,
4, 32,
57, 64,
169,
4, 33, 9, 2,
57, 64,
170,
9, 10, 0, 2,
81, 77, 73,
199,
19, 0,
35, 26, 61, 63,
196,
0, 2,
81,
170,
4, 17, 6, 58,
115, 61, 63,
174,
0, 3, 0, 1, 0, 3,
81, 82, 81,
170,
2, 79, 6, 73,
118, 61, 63,
170,
2, 62, 0, 5,
81, 77, 73,
199,
19, 0,
88, 26, 61, 63,
196,
0, 5,
81,
178,
5, 62, 0, 5, 0, 2, 0, 5,
81, 79, 81,
164,
4, 45,
63,
135, 10,
4, 6,
88, 28, 61, 63,
};
const uint8_t actions[] = {
5, 65, 15,
1, 10,
14, 119,
21, 65, 98,
2, 2, 14, 84, 14, 73, 0, 73, 0, 1,
62, 73,
201,
0, 84, 0, 1,
62, 12,
21, 115, 98,
2, 2, 14, 10, 14, 9, 0, 10, 0, 1,
62, 12,
14, 54, 8,
2, 5, 0, 5, 0, 6,
72, 9, 73,
206,
0, 7, 0, 3, 0, 4,
62, 58, 21,
4, 54, 8,
2, 23,
113,
13, 54, 8,
2, 80, 0, 79, 0, 80,
72, 9,
9, 1, 10,
2, 4, 0, 3,
54, 9,
9, 1, 10,
2, 72, 0, 34,
54, 9,
9, 1, 10,
2, 30, 0, 14,
54, 9,
9, 1, 10,
2, 8, 0, 5,
54, 9,
21, 115, 12,
2, 7, 14, 20, 14, 21, 0, 20, 0, 3,
62, 12,
4, 115, 12,
2, 7,
11,
17, 65, 10,
4, 5, 14, 17, 0, 17, 0, 5,
62, 12,
21, 65, 10,
4, 14, 14, 63, 8, 8, 0, 63, 0, 14,
62, 12,
4, 65, 10,
4, 3,
11,
4, 65, 10,
4, 34,
11,
1, 65, 87,
120, 121,
9, 1, 13,
2, 12, 0, 6,
54, 9,
1, 67, 87,
120, 121,
17, 65, 17,
2, 14, 5, 28, 0, 28, 0, 8,
62, 73,
201,
0, 29, 0, 8,
62, 12,
5, 65, 17,
2, 28,
14, 110,
9, 1, 17,
2, 14, 0, 10,
54, 9,
22, 100, 28,
1, 20, 4, 10, 0, 20, 0, 21, 0, 21,
72, 53, 9,
9, 10, 28,
2, 20, 0, 20,
52, 9,
18, 10, 28,
2, 21, 0, 20, 0, 21, 0, 20,
72, 52, 9,
13, 103, 37,
2, 29, 13, 21, 0, 5,
58, 21,
8, 103, 37,
2, 29, 14, 21,
20,
4, 103, 36,
2, 28,
20,
6, 103, 26,
2, 18,
115, 61, 63,
16, 103, 45,
2, 35, 1, 56, 8, 5, 1, 58,
73,
210,
0, 36, 0, 38, 0, 37, 0, 39,
72, 72, 9,
8, 103, 45,
2, 35, 9, 5,
20,
14, 103, 45,
2, 35, 8, 5, 6, 56,
34, 61, 63,
14, 103, 45,
2, 35, 8, 5, 6, 58,
115, 61, 63,
10, 103, 51,
2, 44, 8, 6,
23, 33, 63,
10, 103, 51,
2, 44, 9, 6,
37, 61, 63,
21, 65, 19,
4, 8, 14, 15, 14, 16, 0, 15, 0, 8,
62, 12,
4, 34, 22,
2, 15,
4,
21, 96, 24,
2, 15, 1, 17, 14, 16, 0, 15, 0, 16,
72, 12,
14, 67, 15,
1, 10, 0, 9, 0, 10,
72, 21, 64,
14, 67, 15,
1, 9, 0, 9, 0, 10,
72, 21, 64,
8, 1, 23,
2, 16, 0, 20,
54,
9, 1, 23,
2, 41, 0, 18,
54, 9,
17, 22, 81,
2, 71, 14, 72, 0, 72, 0, 33,
62, 12,
21, 65, 78,
4, 34, 14, 76, 14, 86, 0, 76, 0, 34,
62, 12,
21, 65, 78,
4, 14, 14, 31, 14, 33, 0, 31, 0, 14,
62, 12,
13, 6, 88,
2, 76, 0, 76, 0, 86,
72, 44,
9, 43, 0,
2, 86, 0, 35,
54, 9,
9, 43, 0,
2, 33, 0, 15,
54, 9,
11, 43, 0,
5, 86, 5, 33,
46, 88, 88, 47,
0, 103, 25,
43,
9, 1, 90,
2, 81, 0, 39,
54, 9,
19, 121, 52,
2, 45, 1, 74, 0, 7, 0, 74,
58, 59, 21, 73,
206,
0, 45, 0, 88, 0, 52,
72, 59, 102,
23, 67, 85,
2, 88, 1, 82, 8, 7, 0, 82, 0, 75,
72, 9, 21, 73,
196,
0, 75,
53,
5, 67, 85,
9, 7,
14, 117,
5, 65, 52,
2, 45,
14, 30,
9, 1, 52,
4, 28, 0, 29,
54, 9,
21, 65, 53,
2, 46, 14, 48, 14, 51, 0, 48, 0, 29,
62, 73,
201,
0, 50, 0, 29,
62, 12,
21, 10, 76,
4, 12, 1, 68, 2, 67, 0, 68, 0, 85,
72, 73,
197,
0, 67,
59, 9,
4, 30, 64,
2, 53,
4,
22, 96, 55,
2, 53, 1, 47, 14, 54, 0, 54, 0, 30,
62, 9, 32,
5, 96, 55,
5, 53,
13, 84,
21, 65, 64,
2, 54, 14, 55, 14, 56, 0, 55, 0, 30,
62, 12,
21, 65, 39,
2, 31, 14, 32, 14, 33, 0, 32, 0, 14,
62, 12,
22, 121, 61,
1, 48, 1, 49, 0, 48, 0, 51, 0, 49,
72, 59, 9,
23, 6, 39,
2, 31, 1, 51, 0, 33, 0, 31, 0, 32,
72, 59, 9, 12,
8, 1, 46,
2, 39, 0, 17,
54,
10, 34, 50,
2, 43, 0, 6,
58, 9, 21,
9, 1, 47,
2, 42, 0, 19,
54, 9,
13, 7, 8,
2, 22, 0, 22, 0, 23,
72, 9,
4, 7, 8,
2, 3,
16,
9, 1, 33,
2, 25, 0, 40,
54, 9,
22, 1, 8,
2, 23, 13, 3, 0, 70, 0, 3, 0, 12,
72, 54, 9,
13, 1, 8,
2, 23, 14, 3, 0, 12,
54, 9,
4, 1, 8,
2, 3,
112,
9, 1, 8,
2, 5, 0, 4,
54, 9,
9, 1, 8,
2, 36, 0, 15,
54, 9,
9, 1, 8,
2, 39, 0, 17,
54, 9,
9, 1, 8,
2, 70, 0, 12,
54, 9,
9, 1, 8,
2, 80, 0, 36,
54, 9,
22, 45, 75,
1, 66, 1, 68, 0, 68, 0, 85, 0, 66,
72, 59, 9,
22, 45, 75,
1, 66, 6, 68, 0, 66, 0, 67, 0, 67,
72, 53, 9,
18, 45, 77,
1, 85, 0, 85, 0, 67, 0, 67,
72, 53, 73,
201,
0, 67, 0, 68,
75, 9,
16, 143, 0,
12, 63, 3, 85, 3, 65, 3, 69,
73,
210,
0, 85, 0, 64, 0, 65, 0, 69,
72, 59, 59,
198,
0, 8,
58, 9, 116,
16, 143, 0,
3, 63, 3, 85, 3, 65, 3, 69,
73,
210,
0, 85, 0, 64, 0, 65, 0, 69,
72, 59, 59,
203,
0, 8, 0, 63,
60, 59, 9, 116,
22, 121, 30,
2, 24, 1, 75, 0, 24, 0, 87, 0, 75,
72, 59, 9,
22, 144, 34,
2, 87, 1, 64, 8, 8, 0, 64, 0, 26,
72, 9, 39,
22, 144, 34,
2, 87, 1, 64, 9, 8, 0, 64, 0, 27,
72, 9, 39,
10, 1, 89,
2, 78, 0, 37,
54, 9, 50,
18, 7, 8,
2, 79, 1, 73, 0, 79, 0, 80,
72, 9, 49,
13, 7, 8,
2, 6, 0, 5, 0, 6,
72, 9,
5, 60, 92,
2, 83,
6, 24,
6, 60, 93,
1, 84,
6, 105, 106,
5, 65, 93,
3, 84,
14, 107,
4, 65, 90,
2, 81,
48,
11, 65, 35,
1, 0, 0, 3,
81, 7, 78, 73,
197,
0, 3,
8, 81,
4, 5, 67,
1, 58,
10,
13, 5, 67,
1, 57, 0, 57, 0, 58,
72, 9,
4, 5, 65,
1, 56,
10,
13, 5, 65,
1, 55, 0, 55, 0, 56,
72, 9,
5, 6, 65,
1, 56,
14, 36,
13, 6, 67,
1, 58, 0, 58, 0, 57,
72, 9,
4, 6, 67,
1, 57,
51,
5, 65, 30,
2, 24,
14, 103,
7, 49, 71,
1, 63,
114, 26, 61, 63,
23, 40, 34,
2, 62, 1, 27, 0, 62, 0, 61, 0, 27,
72, 59, 41, 124,
15, 40, 34,
1, 26, 2, 62, 0, 26,
41, 123, 61, 63,
23, 16, 70,
1, 27, 2, 62, 0, 62, 0, 61, 0, 27,
72, 59, 41, 124,
11, 16, 70,
1, 26, 2, 62,
41, 123, 61, 63,
7, 49, 34,
1, 26,
22, 88, 35, 73,
195,
88, 26, 61, 63,
7, 49, 34,
1, 27,
114, 26, 61, 63,
4, 65, 30,
2, 87,
40,
12, 10, 76,
4, 12, 2, 67, 1, 68,
73,
206,
0, 68, 0, 85, 0, 67,
72, 59, 9,
19, 134, 0,
4, 3, 8, 15, 0, 1, 0, 100,
81, 79, 9, 73,
198,
0, 1,
81, 108, 88,
199,
0, 2,
60, 109, 57, 64,
3, 134, 0,
114, 26, 61, 63,
0, 4, 0,
71,
0, 3, 0,
63,
0, 91, 0,
18,
0, 9, 0,
104,
0, 2, 0,
66,
0, 132, 0,
5,
0, 115, 0,
4,
1, 54, 0,
14, 36,
0, 65, 0,
11,
0, 1, 0,
15,
0, 37, 0,
4,
0, 22, 0,
11,
1, 67, 0,
14, 36,
1, 30, 0,
14, 36,
0, 34, 0,
4,
1, 96, 0,
13, 85,
1, 87, 0,
9, 85,
3, 8, 0,
9, 17, 88, 88,
1, 10, 87,
120, 121,
5, 18, 65,
1, 56,
16, 10,
4, 18, 67,
1, 58,
16,
1, 49, 0,
16, 122,
255,
};
const uint8_t verbs[] = {
65, 85, 84, 79,
71, 79, 32, 32,
73, 78, 86, 69,
81, 85, 73, 84,
83, 65, 86, 69,
87, 69, 65, 82,
82, 69, 77, 79,
79, 80, 69, 78,
87, 65, 73, 84,
83, 67, 79, 82,
71, 69, 84, 32,
212, 65, 75, 69,
207, 66, 84, 65,
199, 82, 65, 66,
68, 73, 71, 32,
67, 76, 73, 77,
70, 69, 69, 68,
76, 79, 79, 75,
68, 82, 79, 80,
204, 69, 65, 86,
196, 69, 80, 79,
196, 85, 77, 80,
83, 69, 65, 82,
198, 82, 73, 83,
84, 72, 82, 79,
211, 76, 73, 78,
195, 72, 85, 67,
212, 79, 83, 83,
75, 73, 76, 76,
193, 84, 84, 65,
83, 77, 65, 83,
196, 69, 83, 84,
194, 82, 69, 65,
200, 73, 84, 32,
80, 85, 76, 76,
212, 85, 71, 32,
196, 82, 65, 71,
84, 73, 69, 32,
198, 65, 83, 84,
215, 82, 65, 80,
71, 73, 86, 69,
207, 70, 70, 69,
208, 65, 89, 32,
74, 85, 77, 80,
204, 69, 65, 80,
69, 77, 80, 84,
211, 80, 82, 73,
208, 79, 85, 82,
70, 73, 76, 76,
69, 65, 84, 32,
212, 65, 83, 84,
196, 69, 86, 79,
85, 78, 76, 79,
76, 79, 67, 75,
67, 76, 79, 83,
72, 79, 76, 68,
199, 82, 65, 83,
87, 65, 86, 69,
199, 69, 83, 84,
80, 76, 65, 89,
82, 69, 65, 68,
83, 87, 73, 77,
208, 65, 68, 68,
83, 77, 79, 75,
208, 85, 70, 70,
69, 88, 65, 77,
201, 78, 83, 80,
84, 85, 82, 78,
210, 79, 84, 65,
212, 87, 73, 83,
82, 85, 66, 32,
208, 79, 76, 73,
211, 72, 73, 78,
67, 85, 84, 32,
195, 72, 79, 80,
211, 76, 73, 67,
200, 65, 67, 75,
76, 73, 71, 72,
201, 71, 78, 73,
194, 85, 82, 78,
207, 78, 32, 32,
85, 78, 76, 73,
197, 88, 84, 73,
207, 70, 70, 32,
70, 73, 82, 69,
211, 72, 79, 79,
194, 76, 65, 83,
83, 65, 89, 32,
211, 72, 79, 85,
212, 65, 76, 75,
65, 83, 75, 32,
70, 85, 67, 75,
208, 73, 83, 83,
70, 79, 82, 67,
204, 69, 86, 69,
208, 82, 73, 83,
85, 83, 69, 32,
193, 80, 80, 76,
197, 88, 69, 82,
215, 73, 84, 72,
70, 73, 84, 32,
201, 78, 83, 69,
208, 76, 85, 71,
80, 82, 69, 83,
208, 85, 83, 72,
84, 79, 85, 67,
198, 69, 69, 76,
75, 73, 67, 75,
211, 84, 65, 77,
83, 77, 69, 76,
211, 78, 73, 70,
76, 73, 83, 84,
200, 69, 65, 82,
72, 73, 68, 69,
195, 79, 78, 67,
77, 79, 86, 69,
211, 72, 73, 70,
70, 82, 69, 69,
210, 69, 76, 69,
213, 78, 84, 73,
213, 78, 68, 79,
77, 65, 75, 69,
194, 85, 73, 76,
198, 73, 88, 32,
210, 69, 80, 65,
83, 84, 65, 66,
208, 79, 75, 69,
208, 82, 79, 68,
87, 82, 73, 84,
202, 79, 84, 32,
206, 79, 84, 69,
80, 82, 65, 89,
72, 69, 76, 80,
200, 73, 78, 84,
83, 76, 69, 69,
87, 65, 75, 69,
193, 87, 65, 75,
210, 79, 85, 83,
84, 82, 73, 77,
211, 72, 65, 86,
211, 67, 82, 65,
77, 69, 76, 84,
84, 72, 73, 78,
77, 73, 88, 32,
66, 65, 75, 69,
67, 72, 65, 82,
0,
};
const uint8_t nouns[] = {
65, 78, 89, 32,
78, 79, 82, 84,
83, 79, 85, 84,
69, 65, 83, 84,
87, 69, 83, 84,
85, 80, 32, 32,
68, 79, 87, 78,
72, 65, 78, 75,
68, 79, 79, 82,
211, 84, 69, 69,
66, 85, 78, 75,
194, 69, 68, 32,
80, 73, 76, 76,
86, 69, 78, 84,
193, 73, 82, 32,
82, 79, 68, 32,
68, 85, 83, 84,
67, 79, 78, 84,
195, 79, 78, 83,
70, 76, 79, 79,
199, 82, 79, 85,
196, 69, 67, 75,
82, 73, 78, 71,
72, 65, 84, 67,
72, 79, 79, 75,
66, 85, 84, 84,
79, 82, 65, 78,
67, 79, 78, 78,
67, 73, 82, 67,
194, 79, 65, 82,
79, 86, 69, 78,
211, 84, 79, 86,
195, 79, 79, 75,
76, 65, 82, 68,
67, 65, 75, 69,
87, 65, 84, 67,
66, 76, 65, 67,
87, 72, 73, 84,
65, 87, 65, 89,
80, 65, 78, 69,
205, 69, 84, 65,
67, 76, 73, 80,
210, 69, 84, 65,
72, 79, 76, 69,
208, 73, 80, 69,
82, 69, 68, 32,
79, 85, 84, 32,
83, 72, 85, 84,
195, 82, 65, 70,
80, 85, 76, 83,
76, 69, 86, 69,
89, 69, 76, 76,
76, 65, 84, 72,
84, 79, 79, 76,
195, 79, 77, 80,
72, 65, 77, 77,
87, 79, 79, 68,
200, 65, 78, 68,
66, 76, 65, 68,
66, 65, 82, 32,
210, 79, 85, 71,
83, 67, 82, 69,
70, 73, 76, 69,
212, 82, 73, 65,
76, 79, 67, 75,
83, 85, 73, 84,
211, 80, 65, 67,
66, 79, 79, 84,
205, 65, 71, 78,
71, 82, 69, 69,
67, 82, 69, 65,
84, 65, 66, 76,
77, 73, 88, 32,
70, 76, 79, 85,
82, 65, 73, 83,
66, 79, 84, 84,
87, 65, 84, 69,
84, 73, 78, 32,
67, 69, 73, 76,
210, 79, 79, 70,
87, 65, 76, 76,
87, 82, 69, 67,
75, 69, 89, 32,
67, 65, 66, 76,
215, 73, 82, 69,
83, 81, 85, 65,
82, 79, 85, 78,
66, 76, 79, 67,
71, 82, 73, 76,
67, 65, 71, 69,
67, 82, 65, 84,
194, 79, 88, 32,
83, 73, 71, 78,
78, 79, 84, 69,
71, 65, 77, 69,
89, 79, 85, 32,
79, 70, 70, 32,
84, 72, 65, 84,
67, 79, 85, 67,
70, 73, 78, 71,
78, 79, 83, 69,
83, 79, 67, 75,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
32, 32, 32, 32,
0,
};
const uint8_t automap[] = {
87, 65, 84, 67,
0,
82, 79, 68, 32,
9,
82, 79, 68, 32,
10,
72, 79, 79, 75,
17,
67, 73, 82, 67,
20,
76, 65, 82, 68,
25,
67, 65, 75, 69,
26,
67, 65, 75, 69,
27,
72, 65, 77, 77,
47,
66, 76, 65, 68,
48,
87, 79, 79, 68,
49,
66, 65, 82, 32,
50,
83, 67, 82, 69,
51,
83, 85, 73, 84,
55,
66, 79, 79, 84,
57,
84, 65, 66, 76,
63,
77, 73, 88, 32,
64,
70, 76, 79, 85,
65,
66, 79, 84, 84,
66,
84, 73, 78, 32,
68,
82, 65, 73, 83,
69,
75, 69, 89, 32,
73,
67, 65, 66, 76,
74,
82, 79, 85, 78,
75,
83, 81, 85, 65,
82,
78, 79, 84, 69,
84,
84, 73, 78, 32,
85,
0,
};
#include <string.h>
#include <stdlib.h>
#include <ctype.h>
#include <unistd.h>
#include <fcntl.h>
#include <setjmp.h>
#ifdef __linux__
#include <stdio.h>
#endif
static jmp_buf restart;
struct savearea {
uint16_t magic;
uint8_t carried;
uint8_t lighttime;
uint8_t location;
uint8_t objloc[NUM_OBJ];
uint8_t roomsave[6];
uint8_t savedroom;
uint32_t bitflags;
int16_t counter;
int16_t counter_array[16];
};
static char linebuf[81];
static char *nounbuf;
static char wordbuf[WORDSIZE + 1];
static uint8_t verb;
static uint8_t noun;
static const uint8_t *linestart;
static uint8_t linematch;
static uint8_t actmatch;
static uint8_t continuation;
static uint16_t *param;
static uint16_t param_buf[5];
static uint8_t redraw;
static uint8_t rows, cols;
static struct savearea game;
static void error(const char *p);
#define VERB_GO 1
#define VERB_GET 10
#define VERB_DROP 18
#define LIGHTOUT 16
#define DARKFLAG 15
#define LIGHT_SOURCE 9
/* Define this because 1 << n might be 16bit */
#define ONEBIT ((uint32_t)1)
#define REDRAW 1
#define REDRAW_MAYBE 2
#ifdef CONFIG_IO_CURSES
#include <curses.h>
#define REDRAW_MASK (REDRAW|REDRAW_MAYBE)
static char wbuf[81];
static int wbp = 0;
static int xpos = 0, ypos = 0;
static int bottom;
static WINDOW *topwin, *botwin, *curwin;
static void flush_word(void)
{
wbuf[wbp] = 0;
waddstr(curwin, wbuf);
xpos += wbp;
wbp = 0;
}
static void new_line(void)
{
xpos = 0;
if (curwin == topwin)
ypos++;
else {
scroll(curwin);
ypos = bottom;
}
wmove(curwin, ypos, xpos);
}
static void char_out(char c)
{
if (c == '\n') {
flush_word();
new_line();
return;
}
if (c != ' ') {
if (wbp < 80)
wbuf[wbp++] = c;
return;
}
if (xpos + wbp >= cols)
new_line();
flush_word();
waddch(curwin, ' ');
xpos++;
}
static void strout_lower(const uint8_t *p)
{
while(*p)
char_out(*p++);
}
static void strout_lower_spc(const uint8_t *p)
{
strout_lower(p);
char_out(' ');
}
static void decout_lower(uint16_t v)
{
#ifdef __linux__
char buf[9];
snprintf(buf, 8, "%d", v); /* FIXME: avoid expensive snprintf */
strout_lower((uint8_t *)buf);
#else
strout_lower((uint8_t *)_itoa(v));
#endif
}
static void strout_upper(const uint8_t *p)
{
strout_lower(p);
}
static char readchar(void)
{
wrefresh(botwin);
return wgetch(botwin);
}
static void line_input(void)
{
int c;
char *p = linebuf;
do {
wmove(botwin, ypos, xpos);
wrefresh(botwin);
c = wgetch(botwin);
if (c == 8 || c == 127) {
if (p > linebuf) {
xpos--;
mvwaddch(botwin, ypos, xpos, ' ');
p--;
}
continue;
}
if (c > 31 && c < 127) {
if (p < linebuf + 80 && xpos < cols - 1) {
*p++ = c;
mvwaddch(botwin, ypos, xpos, c);
xpos++;
}
continue;
}
}
while (c != 13 && c != 10);
*p = 0;
new_line();
}
static int saved_x;
static void begin_upper(void)
{
saved_x = xpos;
curwin = topwin;
werase(topwin);
ypos = 0;
xpos = 0;
}
static void end_upper(void)
{
flush_word();
curwin = botwin;
xpos = saved_x;
ypos = bottom;
wrefresh(topwin);
}
static void display_init(void)
{
int trow;
initscr();
noecho();
cbreak();
nonl();
getmaxyx(stdscr, rows, cols);
if (rows < 16)
error("display too small");
trow = 10;
if (rows / 2 < 10)
trow = rows / 2;
bottom = rows - trow;
topwin = newwin(trow, cols, 0, 0);
botwin = newwin(bottom--, cols, trow, 0);
if (!topwin || !botwin)
error("curses");
scrollok(botwin, TRUE);
curwin = botwin;
new_line();
}
static void display_exit(void)
{
endwin();
}
#else
#define REDRAW_MASK REDRAW
static char wbuf[80];
static int wbp = 0;
static int xpos = 0;
static void display_init(void)
{
char *c;
#ifdef TIOCGWINSZ
struct winsize w;
if (ioctl(0, TIOCGWINSZ, &w) != -1) {
rows = w.ws_row;
cols = ws.ws_col;
return;
}
#elif VTSIZE
int16_t v = ioctl(0, VTSIZE, 0);
if (v != -1) {
rows = v >> 8;
cols = v;
return;
}
#endif
c = getenv("COLS");
rows = 25;
cols = c ? atoi(c): 80;
if (cols == 0)
cols = 80;
}
static void display_exit(void)
{
}
static void flush_word(void)
{
write(1, wbuf, wbp);
xpos += wbp;
wbp = 0;
}
static void char_out(char c)
{
if (c == '\n') {
flush_word();
write(1, "\n", 1);
xpos = 0;
return;
}
if (c != ' ') {
if (wbp < 80)
wbuf[wbp++] = c;
return;
}
if (xpos + wbp >= cols) {
xpos = 0;
write(1,"\n", 1);
}
flush_word();
write(1," ", 1);
xpos++;
}
static void strout_lower(const uint8_t *p)
{
while(*p)
char_out(*p++);
}
static void strout_lower_spc(const uint8_t *p)
{
strout_lower(p);
char_out(' ');
}
static void decout_lower(uint16_t v)
{
#ifdef __linux__
char buf[9];
snprintf(buf, 8, "%d", v); /* FIXME: avoid expensive snprintf */
strout_lower((uint8_t *)buf);
#else
strout_lower((uint8_t *)_itoa(v));
#endif
}
static void strout_upper(const uint8_t *p)
{
strout_lower(p);
}
static void line_input(void)
{
int l = read(0, linebuf, sizeof(linebuf));
if (l < 0)
error("read");
linebuf[l] = 0;
if (l && linebuf[l-1] == '\n')
linebuf[l-1] = 0;
}
static char readchar(void)
{
line_input();
return *linebuf;
}
static void begin_upper(void)
{
strout_upper("\n\n\n\n");
}
static void end_upper(void)
{
uint8_t l = cols;
char_out('\n');
while(l--)
char_out('-');
char_out('\n');
}
#endif
/******************** Common code ******************/
static uint8_t yes_or_no(void)
{
char c;
do {
c = readchar();
if (c == 'Y' || c == 'y' || c == 'J' || c == 'j')
return 1;
} while(c != -1 && c != 'N' && c != 'n');
return 0;
}
static void exit_game(uint8_t code)
{
display_exit();
exit(code);
}
static void error(const char *p)
{
display_exit();
write(2, p, strlen(p));
exit(1);
}
static uint8_t random_chance(uint8_t v)
{
v = v + v + (v >> 1); /* scale as 0-249 */
if (((rand() >> 3) & 0xFF) <= v)
return 1;
return 0;
}
static char *skip_spaces(char *p)
{
while(*p && isspace(*p))
p++;
return p;
}
static char *copyword(char *p)
{
char *t = wordbuf;
p = skip_spaces(p);
memset(wordbuf, ' ', WORDSIZE+1);
while (*p && !isspace(*p) && t < wordbuf + WORDSIZE)
*t++ = *p++;
while(*p && !isspace(*p))
p++;
return p;
}
static int wordeq(const uint8_t *a, const char *b, uint8_t l)
{
while(l--)
if ((*a++ & 0x7F) != toupper(*b++))
return 0;
return 1;
}
static uint8_t whichword(const uint8_t *p)
{
uint8_t code = 0;
uint8_t i = 0;
if (*wordbuf == 0 || *wordbuf == ' ')
return 0; /* No word */
i--;
do {
i++;
if (!(*p & 0x80))
code = i;
if (wordeq(p, wordbuf, WORDSIZE))
return code;
p += WORDSIZE;
} while(*p != 0);
return 255;
}
static void scan_noun(char *x)
{
x = skip_spaces(x);
nounbuf = x;
copyword(x);
noun = whichword(nouns);
}
static void scan_input(void)
{
char *x = copyword(linebuf);
verb = whichword(verbs);
scan_noun(x);
}
void abbrevs(void)
{
char *x = skip_spaces(linebuf);
const char *p = NULL;
if (x[1] != 0 && x[1] != ' ')
return;
switch(toupper(*x)) {
case 'N':
p = "NORTH";
break;
case 'E':
p = "EAST";
break;
case 'S':
p = "SOUTH";
break;
case 'W':
p = "WEST";
break;
case 'U':
p = "UP";
break;
case 'D':
p = "DOWN";
break;
case 'I':
p = "INVEN";
break;
}
if (p)
strcpy(linebuf, p);
}
static const uint8_t *run_conditions(const uint8_t *p, uint8_t n)
{
uint8_t i;
for (i = 0; i < n; i++) {
uint8_t opc = *p++;
uint16_t par = *p++ | ((opc & 0xE0) >> 5);
uint8_t op = game.objloc[par];
opc &= 0x1F;
switch(opc) {
case 0:
*param++ = par;
break;
case 1:
if (op != 255)
return NULL;
break;
case 2:
if (op != game.location)
return NULL;
break;
case 3:
if (op != 255 && op != game.location)
return NULL;
break;
case 4:
if (game.location != par)
return NULL;
break;
case 5:
if (op == game.location)
return NULL;
break;
case 6:
if (op == 255)
return NULL;
break;
case 7:
if (game.location == par)
return NULL;
break;
case 8:
if (!(game.bitflags & (ONEBIT << par)))
return NULL;
break;
case 9:
if (game.bitflags & (ONEBIT << par))
return NULL;
break;
case 10:
if (!game.carried)
return NULL;
break;
case 11:
if (game.carried)
return NULL;
break;
case 12:
if (op == 255 || op == game.location)
return NULL;
break;
case 13:
if (op == 0)
return NULL;
break;
case 14:
if (op != 0)
return NULL;
break;
case 15:
if (game.counter > par)
return NULL;
break;
case 16:
if (game.counter < par)
return NULL;
break;
case 17:
if (op != objinit[par])
return NULL;
break;
case 18:
if (op == objinit[par])
return NULL;
break;
case 19:
if (game.counter != par)
return NULL;
break;
default:
error("BADCOND");
}
}
return p;
}
uint8_t islight(void)
{
uint8_t l = game.objloc[LIGHT_SOURCE];
if (!(game.bitflags & (ONEBIT << DARKFLAG)))
return 1;
if (l == 255 || l == game.location)
return 1;
return 0;
}
static void action_look(void)
{
const uint8_t *e;
const uint8_t *p;
uint8_t c;
uint8_t f = 1;
const uint8_t **op = objtext;
redraw = 0;
begin_upper();
if (!islight()) {
strout_upper(itsdark);
end_upper();
return;
}
p = locdata[game.location].text;
e = locdata[game.location].exit;
if (*p == '*')
p++;
else
strout_upper(youare);
strout_upper(p);
strout_upper(newline);
strout_upper(obexit);
for (c = 0; c < 6; c++) {
if (*e++) {
if (f)
f = 0;
else
strout_upper(dashstr);
strout_upper(exitmsgptr[c]);
}
}
if (f)
strout_upper(nonestr);
strout_upper(dotnewline);
f = 1;
e = game.objloc;
while(e < game.objloc + NUM_OBJ) {
if (*e++ == game.location) {
if (f) {
strout_upper(canalsosee);
f = 0;
} else
strout_upper(dashstr);
strout_upper(*op);
}
op++;
}
end_upper();
}
static void action_delay(void)
{
sleep(2);
}
static void action_dead(void)
{
strout_lower(dead);
game.bitflags &= ~(ONEBIT << DARKFLAG);
game.location = lastloc;
action_look();
}
static void action_quit(void)
{
strout_lower(playagain);
if (yes_or_no())
longjmp(restart, 0);
exit_game(0);
}
static void action_score(void)
{
uint8_t *p = game.objloc;
const uint8_t **m = objtext;
uint8_t t = 0, s = 0;
while(p < game.objloc + NUM_OBJ) {
if (*m[0] == '*') {
t++;
if (*p == treasure)
s++;
}
m++;
p++;
}
strout_lower(stored_msg);
decout_lower(s);
strout_lower(stored_msg2);
decout_lower((s * (uint16_t)100) / t);
strout_lower(dotnewline);
if (s == t)
action_quit();
}
static void action_inventory(void)
{
uint8_t *p = game.objloc;
const uint8_t **m = objtext;
uint8_t f = 1;
strout_lower(carrying);
if (game.carried == 0)
strout_lower(nothing);
else {
while(p < game.objloc + NUM_OBJ) {
if (*p == 255) {
if (!f)
strout_lower(dashstr);
else
f = 0;
strout_lower(*m);
}
m++;
p++;
}
}
strout_lower(dotnewline);
}
static char *filename(void)
{
strout_lower("File name ? ");
line_input();
return skip_spaces(linebuf);
}
static void action_save(void)
{
int fd;
char *p = filename();
if (*p == 0)
return;
game.magic = GAME_MAGIC;
fd = open(p, O_WRONLY|O_CREAT|O_TRUNC, 0600);
if (fd == -1 || write(fd, &game, sizeof(game)) != sizeof(game) || close(fd) == -1)
strout_lower("Save failed.\n");
close(fd); /* Double closing is safe for non error path */
}
static int action_restore(void)
{
while(1) {
char *p = filename();
int fd;
if (*p == 0)
return 0;
fd = open(p, O_RDONLY, 0600);
if (fd != -1 && read(fd, &game, sizeof(game)) == sizeof(game) && close(fd) != -1 &&
game.magic == GAME_MAGIC)
return 1;
strout_lower("Load failed.\n");
close(fd);
}
}
static void moveitem(uint8_t i, uint8_t l)
{
uint8_t *p = game.objloc + i;
if (*p == game.location)
redraw |= REDRAW_MAYBE;
if (l == game.location)
redraw |= REDRAW;
*p = l;
}
static void run_actions(const uint8_t *p, uint8_t n)
{
uint8_t i;
for (i = 0; i < n; i++) {
uint8_t a = *p++;
uint8_t tmp;
uint16_t tmp16;
if (a < 50) {
strout_lower_spc(msgptr[a]);
continue;
}
if (a > 102 ) {
strout_lower_spc(msgptr[a - 50]);
continue;
}
switch(a) {
case 51: /* nop - check */
break;
case 52: /* Get */
if (game.carried >= maxcar)
strout_lower(toomuch);
else
moveitem(*param++, 255);
break;
case 53: /* Drop */
moveitem(*param++, game.location);
break;
case 54: /* Go */
game.location = *param++;
redraw = REDRAW;
break;
case 55: /* Destroy */
case 59: /* ?? */
moveitem(*param++, 0);
break;
case 56: /* Set dark flag */
game.bitflags |= (ONEBIT << DARKFLAG);
break;
case 57: /* Clear dark flag */
game.bitflags &= ~(ONEBIT << DARKFLAG);
break;
case 58: /* Set bit */
game.bitflags |= (ONEBIT << *param++);
break;
/* 59 see 55 */
case 60: /* Clear bit */
game.bitflags &= ~(ONEBIT << *param++);
break;
case 61: /* Dead */
action_dead();
break;
case 64: /* Look */
case 76: /* Also Look ?? */
action_look();
break;
case 62: /* Place obj, loc */
tmp = *param++;
moveitem(tmp, *param++);
break;
case 63: /* Game over */
action_quit();
case 65: /* Score */
action_score();
break;
case 66: /* Inventory */
action_inventory();
case 67: /* Set bit 0 */
game.bitflags |= (ONEBIT << 0);
break;
case 68: /* Clear bit 0 */
game.bitflags &= ~(ONEBIT << 0);
break;
case 69: /* Refill lamp */
game.lighttime = lightfill;
game.bitflags &= ~(ONEBIT << LIGHTOUT);
moveitem(LIGHT_SOURCE, 255);
break;
case 70: /* Wipe lower */
/* TODO */
break;
case 71: /* Save */
action_save();
break;
case 72: /* Swap two objects */
tmp = game.objloc[*param];
moveitem(*param, game.objloc[param[1]]);
moveitem(param[1], tmp);
param += 2;
break;
case 73:
continuation = 1;
break;
case 74: /* Get without weight rule */
moveitem(*param++, 255);
break;
case 75: /* Put one item by another */
moveitem(*param, game.objloc[param[1]]);
param += 2;
break;
case 77: /* Decrement counter */
if (game.counter >= 0)
game.counter--;
break;
case 78: /* Display counter */
decout_lower(game.counter);
break;
case 79: /* Set counter */
game.counter = *param++;
break;
case 80: /* Swap player and saved room */
tmp = game.savedroom;
game.savedroom = game.location;
game.location = tmp;
redraw = REDRAW;
break;
case 81: /* Swap counter and counter n */
tmp16 = game.counter;
game.counter = game.counter_array[*param];
game.counter_array[*param++] = tmp16;
break;
case 82: /* Add to counter */
game.counter += *param++;
break;
case 83: /* Subtract from counter */
game.counter -= *param++;
if (game.counter < 0)
game.counter = -1;
break;
case 84: /* Print noun, newline */
strout_lower((uint8_t *)nounbuf);
/* Fall through */
case 86: /* Print newline */
strout_lower(newline);
break;
case 85: /* Print noun */
strout_lower((uint8_t *)nounbuf);
break;
case 87: /* Swap player and saveroom array entry */
tmp16 = *param++;
tmp = game.roomsave[tmp16];
game.roomsave[tmp16] = game.location;
if (tmp != game.location) {
game.location = tmp;
redraw = REDRAW;
}
break;
case 88:
action_delay();
break;
case 89:
param++; /* SAGA etc specials */
break;
default:
error("BADACT");
}
}
}
void next_line(void)
{
uint8_t c = *linestart++;
if (!(c & 0x80))
linestart += 2; /* Skip verb/noun */
else if (!(c & 0x60))
linestart++; /* Skip random value */
linestart += (c & 3) + 1; /* Actions 1 - 4 */
c >>= 1;
c &= 0x0E; /* 2 x conditions */
linestart += c;
}
void run_line(const uint8_t *ptr, uint8_t c, uint8_t a)
{
memset(param_buf, 0, sizeof(param_buf));
param = param_buf;
if (c)
ptr = run_conditions(ptr, c);
if (ptr) {
actmatch = 1;
param = param_buf;
run_actions(ptr, a);
}
next_line();
}
void run_table(const uint8_t *tp)
{
continuation = 0;
linestart = tp;
while(1) {
uint8_t hdr;
uint8_t c, a;
tp = linestart;
hdr = *tp++;
c = (hdr >> 2) & 0x07;
a = (hdr & 3) + 1;
/* printf("H%02X c = %d a = %d\n", hdr, c, a); */
if (hdr == 255)
return; /* End of table */
if (hdr & 0x80) {
if (hdr & 0x40) { /* Auto 0 */
if (continuation)
run_line(tp, c, a);
else
next_line();
continue;
}
continuation = 0;
if (!(hdr & 0x20)) { /* Auto number */
if (random_chance(*tp++))
run_line(tp, c, a);
else
next_line();
continue;
}
run_line(tp, c, a);
} else {
if (actmatch)
return;
/* printf("VN %d %d\n", *tp, tp[1]); */
linematch = 1;
continuation = 0;
if (*tp++ == verb && (*tp == noun || *tp == 0))
run_line(tp+1, c, a);
else
next_line();
}
}
}
uint8_t autonoun(uint8_t loc)
{
const uint8_t *p = automap;
if (*wordbuf == ' ' || *wordbuf == 0)
return 255;
while(*p) {
if (strncasecmp((const char *)p, wordbuf, WORDSIZE) == 0 && game.objloc[p[WORDSIZE]] == loc)
return p[WORDSIZE];
p += WORDSIZE + 1;
}
return 255;
}
void run_command(void)
{
uint8_t tmp;
run_table(actions);
if (actmatch)
return;
if (verb == VERB_GET) { /* Get */
if (noun == 0)
strout_lower(whatstr);
else if (game.carried >= maxcar)
strout_lower(toomuch);
else {
tmp = autonoun(game.location);
if (tmp == 255)
strout_lower(beyondpower);
else
moveitem(tmp, 255);
}
actmatch = 1;
return;
}
if (verb == VERB_DROP) { /* Drop */
if (noun == 0)
strout_lower(whatstr);
else {
tmp = autonoun(255);
if (tmp == 255)
strout_lower(beyondpower);
else
moveitem(tmp, game.location);
}
actmatch = 1;
return;
}
}
void process_light(void)
{
uint8_t l;
if ((l = game.objloc[LIGHT_SOURCE]) == 0)
return;
if (game.lighttime == 255)
return;
if (!--game.lighttime) {
game.bitflags &= ~(ONEBIT << LIGHTOUT); /* Check clear ! */
if (l == 255 || l == game.location) {
strout_lower(lightout);
redraw = REDRAW_MAYBE;
return;
}
}
if (game.lighttime > 25)
return;
strout_lower(lightoutin);
decout_lower(game.lighttime);
strout_lower(game.lighttime == 1 ? turn : turns);
}
void main_loop(void)
{
uint8_t first = 1;
char *p;
action_look();
while (1) {
if (!first)
process_light();
else
first = 0;
verb = 0;
noun = 0;
run_table(status);
if (redraw & REDRAW_MASK)
action_look();
strout_lower(whattodo);
do {
do {
strout_lower(prompt);
line_input();
abbrevs();
p = skip_spaces(linebuf);
}
while(*p == 0);
scan_noun(p);
if (noun && noun <= 6) {
verb = VERB_GO;
break;
}
scan_input();
if (verb == 255)
strout_lower(dontknow);
} while (verb == 255);
if (verb == VERB_GO) {
if (!noun) {
strout_lower(givedirn);
continue;
}
if (noun <= 6) {
uint8_t light = islight();
uint8_t dir;
if (!light)
strout_lower(darkdanger);
dir = locdata[game.location].exit[noun - 1];
if (!dir) {
if (!light) {
strout_lower(brokeneck);
action_delay();
action_dead();
continue;
}
strout_lower(cantgo);
continue;
}
game.location = dir;
redraw = REDRAW;
continue;
}
}
linematch = 0;
actmatch = 0;
run_command();
if (actmatch)
continue;
if (linematch) {
strout_lower(notyet);
continue;
}
strout_lower(dontunderstand);
}
}
void start_game(void)
{
memcpy(game.objloc, objinit, sizeof(game.objloc));
game.bitflags = 0;
game.counter = 0;
memset(game.counter_array, 0, sizeof(game.counter_array));
game.savedroom = 0;
memset(game.roomsave, 0, sizeof(game.roomsave));
game.location = startloc;
game.lighttime = startlamp;
game.carried = startcarried;
}
int main(int argc, char *argv[])
{
display_init();
setjmp(restart);
strout_lower("Restore a saved game ? ");
if (!yes_or_no() || !action_restore())
start_game();
main_loop();
}
| gpl-2.0 |
defconoi/Unleashed-Flo-Kernel | kernel/sysctl.c | 6 | 58869 | /*
* sysctl.c: General linux system control interface
*
* Begun 24 March 1995, Stephen Tweedie
* Added /proc support, Dec 1995
* Added bdflush entry and intvec min/max checking, 2/23/96, Tom Dyas.
* Added hooks for /proc/sys/net (minor, minor patch), 96/4/1, Mike Shaver.
* Added kernel/java-{interpreter,appletviewer}, 96/5/10, Mike Shaver.
* Dynamic registration fixes, Stephen Tweedie.
* Added kswapd-interval, ctrl-alt-del, printk stuff, 1/8/97, Chris Horn.
* Made sysctl support optional via CONFIG_SYSCTL, 1/10/97, Chris
* Horn.
* Added proc_doulongvec_ms_jiffies_minmax, 09/08/99, Carlos H. Bauer.
* Added proc_doulongvec_minmax, 09/08/99, Carlos H. Bauer.
* Changed linked lists to use list.h instead of lists.h, 02/24/00, Bill
* Wendling.
* The list_for_each() macro wasn't appropriate for the sysctl loop.
* Removed it and replaced it with older style, 03/23/00, Bill Wendling
*/
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/slab.h>
#include <linux/sysctl.h>
#include <linux/bitmap.h>
#include <linux/signal.h>
#include <linux/printk.h>
#include <linux/proc_fs.h>
#include <linux/security.h>
#include <linux/ctype.h>
#include <linux/kmemcheck.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/kobject.h>
#include <linux/net.h>
#include <linux/sysrq.h>
#include <linux/highuid.h>
#include <linux/writeback.h>
#include <linux/ratelimit.h>
#include <linux/compaction.h>
#include <linux/hugetlb.h>
#include <linux/initrd.h>
#include <linux/key.h>
#include <linux/times.h>
#include <linux/limits.h>
#include <linux/dcache.h>
#include <linux/dnotify.h>
#include <linux/syscalls.h>
#include <linux/vmstat.h>
#include <linux/nfs_fs.h>
#include <linux/acpi.h>
#include <linux/reboot.h>
#include <linux/ftrace.h>
#include <linux/perf_event.h>
#include <linux/kprobes.h>
#include <linux/pipe_fs_i.h>
#include <linux/oom.h>
#include <linux/kmod.h>
#include <linux/capability.h>
#include <linux/binfmts.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
#ifdef CONFIG_X86
#include <asm/nmi.h>
#include <asm/stacktrace.h>
#include <asm/io.h>
#endif
#ifdef CONFIG_SPARC
#include <asm/setup.h>
#endif
#ifdef CONFIG_BSD_PROCESS_ACCT
#include <linux/acct.h>
#endif
#ifdef CONFIG_RT_MUTEXES
#include <linux/rtmutex.h>
#endif
#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_LOCK_STAT)
#include <linux/lockdep.h>
#endif
#ifdef CONFIG_CHR_DEV_SG
#include <scsi/sg.h>
#endif
#ifdef CONFIG_LOCKUP_DETECTOR
#include <linux/nmi.h>
#endif
#if defined(CONFIG_SYSCTL)
/* External variables not in a header file. */
extern int sysctl_overcommit_memory;
extern int sysctl_overcommit_ratio;
extern int max_threads;
extern int core_uses_pid;
extern int suid_dumpable;
extern char core_pattern[];
extern unsigned int core_pipe_limit;
extern int pid_max;
extern int min_free_kbytes;
extern int min_free_order_shift;
extern int pid_max_min, pid_max_max;
extern int sysctl_drop_caches;
extern int percpu_pagelist_fraction;
extern int compat_log;
extern int latencytop_enabled;
extern int sysctl_nr_open_min, sysctl_nr_open_max;
#ifndef CONFIG_MMU
extern int sysctl_nr_trim_pages;
#endif
#ifdef CONFIG_BLOCK
extern int blk_iopoll_enabled;
#endif
/* Constants used for minimum and maximum */
#ifdef CONFIG_LOCKUP_DETECTOR
static int sixty = 60;
static int neg_one = -1;
#endif
static int zero;
static int __maybe_unused one = 1;
static int __maybe_unused two = 2;
static int __maybe_unused three = 3;
static unsigned long one_ul = 1;
static int one_hundred = 100;
#ifdef CONFIG_PRINTK
static int ten_thousand = 10000;
#endif
/* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
static unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
static int maxolduid = 65535;
static int minolduid;
static int min_percpu_pagelist_fract = 8;
static int ngroups_max = NGROUPS_MAX;
static const int cap_last_cap = CAP_LAST_CAP;
/*this is needed for proc_doulongvec_minmax of sysctl_hung_task_timeout_secs */
#ifdef CONFIG_DETECT_HUNG_TASK
static unsigned long hung_task_timeout_max = (LONG_MAX/HZ);
#endif
#ifdef CONFIG_INOTIFY_USER
#include <linux/inotify.h>
#endif
#ifdef CONFIG_SPARC
#endif
#ifdef CONFIG_SPARC64
extern int sysctl_tsb_ratio;
#endif
#ifdef __hppa__
extern int pwrsw_enabled;
extern int unaligned_enabled;
#endif
#ifdef CONFIG_IA64
extern int no_unaligned_warning;
extern int unaligned_dump_stack;
#endif
#ifdef CONFIG_PROC_SYSCTL
static int proc_do_cad_pid(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
static int proc_taint(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
#endif
#ifdef CONFIG_PRINTK
static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
#endif
#ifdef CONFIG_MAGIC_SYSRQ
/* Note: sysrq code uses it's own private copy */
static int __sysrq_enabled = SYSRQ_DEFAULT_ENABLE;
static int sysrq_sysctl_handler(ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int error;
error = proc_dointvec(table, write, buffer, lenp, ppos);
if (error)
return error;
if (write)
sysrq_toggle_support(__sysrq_enabled);
return 0;
}
#endif
static struct ctl_table kern_table[];
static struct ctl_table vm_table[];
static struct ctl_table fs_table[];
static struct ctl_table debug_table[];
static struct ctl_table dev_table[];
extern struct ctl_table random_table[];
#ifdef CONFIG_EPOLL
extern struct ctl_table epoll_table[];
#endif
#ifdef HAVE_ARCH_PICK_MMAP_LAYOUT
int sysctl_legacy_va_layout;
#endif
/* The default sysctl tables: */
static struct ctl_table sysctl_base_table[] = {
{
.procname = "kernel",
.mode = 0555,
.child = kern_table,
},
{
.procname = "vm",
.mode = 0555,
.child = vm_table,
},
{
.procname = "fs",
.mode = 0555,
.child = fs_table,
},
{
.procname = "debug",
.mode = 0555,
.child = debug_table,
},
{
.procname = "dev",
.mode = 0555,
.child = dev_table,
},
{ }
};
#ifdef CONFIG_SCHED_DEBUG
static int min_sched_granularity_ns = 100000; /* 100 usecs */
static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */
static int min_wakeup_granularity_ns; /* 0 usecs */
static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
static int min_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE;
static int max_sched_tunable_scaling = SCHED_TUNABLESCALING_END-1;
#endif
#ifdef CONFIG_COMPACTION
static int min_extfrag_threshold;
static int max_extfrag_threshold = 1000;
#endif
static struct ctl_table kern_table[] = {
{
.procname = "sched_child_runs_first",
.data = &sysctl_sched_child_runs_first,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#ifdef CONFIG_SCHED_DEBUG
{
.procname = "sched_min_granularity_ns",
.data = &sysctl_sched_min_granularity,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sched_proc_update_handler,
.extra1 = &min_sched_granularity_ns,
.extra2 = &max_sched_granularity_ns,
},
{
.procname = "sched_latency_ns",
.data = &sysctl_sched_latency,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sched_proc_update_handler,
.extra1 = &min_sched_granularity_ns,
.extra2 = &max_sched_granularity_ns,
},
{
.procname = "sched_wakeup_granularity_ns",
.data = &sysctl_sched_wakeup_granularity,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sched_proc_update_handler,
.extra1 = &min_wakeup_granularity_ns,
.extra2 = &max_wakeup_granularity_ns,
},
{
.procname = "sched_tunable_scaling",
.data = &sysctl_sched_tunable_scaling,
.maxlen = sizeof(enum sched_tunable_scaling),
.mode = 0644,
.proc_handler = sched_proc_update_handler,
.extra1 = &min_sched_tunable_scaling,
.extra2 = &max_sched_tunable_scaling,
},
{
.procname = "sched_migration_cost",
.data = &sysctl_sched_migration_cost,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "sched_nr_migrate",
.data = &sysctl_sched_nr_migrate,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "sched_time_avg",
.data = &sysctl_sched_time_avg,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "sched_shares_window",
.data = &sysctl_sched_shares_window,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "timer_migration",
.data = &sysctl_timer_migration,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &one,
},
#endif
{
.procname = "sched_rt_period_us",
.data = &sysctl_sched_rt_period,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sched_rt_handler,
},
{
.procname = "sched_rt_runtime_us",
.data = &sysctl_sched_rt_runtime,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = sched_rt_handler,
},
#ifdef CONFIG_SCHED_AUTOGROUP
{
.procname = "sched_autogroup_enabled",
.data = &sysctl_sched_autogroup_enabled,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &one,
},
#endif
#ifdef CONFIG_CFS_BANDWIDTH
{
.procname = "sched_cfs_bandwidth_slice_us",
.data = &sysctl_sched_cfs_bandwidth_slice,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
.extra1 = &one,
},
#endif
#ifdef CONFIG_PROVE_LOCKING
{
.procname = "prove_locking",
.data = &prove_locking,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
#ifdef CONFIG_LOCK_STAT
{
.procname = "lock_stat",
.data = &lock_stat,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
{
.procname = "panic",
.data = &panic_timeout,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "core_uses_pid",
.data = &core_uses_pid,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "core_pattern",
.data = core_pattern,
.maxlen = CORENAME_MAX_SIZE,
.mode = 0644,
.proc_handler = proc_dostring,
},
{
.procname = "core_pipe_limit",
.data = &core_pipe_limit,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#ifdef CONFIG_PROC_SYSCTL
{
.procname = "tainted",
.maxlen = sizeof(long),
.mode = 0644,
.proc_handler = proc_taint,
},
#endif
#ifdef CONFIG_LATENCYTOP
{
.procname = "latencytop",
.data = &latencytop_enabled,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
#ifdef CONFIG_BLK_DEV_INITRD
{
.procname = "real-root-dev",
.data = &real_root_dev,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
{
.procname = "print-fatal-signals",
.data = &print_fatal_signals,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#ifdef CONFIG_SPARC
{
.procname = "reboot-cmd",
.data = reboot_command,
.maxlen = 256,
.mode = 0644,
.proc_handler = proc_dostring,
},
{
.procname = "stop-a",
.data = &stop_a_enabled,
.maxlen = sizeof (int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "scons-poweroff",
.data = &scons_pwroff,
.maxlen = sizeof (int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
#ifdef CONFIG_SPARC64
{
.procname = "tsb-ratio",
.data = &sysctl_tsb_ratio,
.maxlen = sizeof (int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
#ifdef __hppa__
{
.procname = "soft-power",
.data = &pwrsw_enabled,
.maxlen = sizeof (int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "unaligned-trap",
.data = &unaligned_enabled,
.maxlen = sizeof (int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
{
.procname = "ctrl-alt-del",
.data = &C_A_D,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#ifdef CONFIG_FUNCTION_TRACER
{
.procname = "ftrace_enabled",
.data = &ftrace_enabled,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = ftrace_enable_sysctl,
},
#endif
#ifdef CONFIG_STACK_TRACER
{
.procname = "stack_tracer_enabled",
.data = &stack_tracer_enabled,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = stack_trace_sysctl,
},
#endif
#ifdef CONFIG_TRACING
{
.procname = "ftrace_dump_on_oops",
.data = &ftrace_dump_on_oops,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
#ifdef CONFIG_MODULES
{
.procname = "modprobe",
.data = &modprobe_path,
.maxlen = KMOD_PATH_LEN,
.mode = 0644,
.proc_handler = proc_dostring,
},
{
.procname = "modules_disabled",
.data = &modules_disabled,
.maxlen = sizeof(int),
.mode = 0644,
/* only handle a transition from default "0" to "1" */
.proc_handler = proc_dointvec_minmax,
.extra1 = &one,
.extra2 = &one,
},
#endif
#ifdef CONFIG_HOTPLUG
{
.procname = "hotplug",
.data = &uevent_helper,
.maxlen = UEVENT_HELPER_PATH_LEN,
.mode = 0644,
.proc_handler = proc_dostring,
},
#endif
#ifdef CONFIG_CHR_DEV_SG
{
.procname = "sg-big-buff",
.data = &sg_big_buff,
.maxlen = sizeof (int),
.mode = 0444,
.proc_handler = proc_dointvec,
},
#endif
#ifdef CONFIG_BSD_PROCESS_ACCT
{
.procname = "acct",
.data = &acct_parm,
.maxlen = 3*sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
#ifdef CONFIG_MAGIC_SYSRQ
{
.procname = "sysrq",
.data = &__sysrq_enabled,
.maxlen = sizeof (int),
.mode = 0644,
.proc_handler = sysrq_sysctl_handler,
},
#endif
#ifdef CONFIG_PROC_SYSCTL
{
.procname = "cad_pid",
.data = NULL,
.maxlen = sizeof (int),
.mode = 0600,
.proc_handler = proc_do_cad_pid,
},
#endif
{
.procname = "threads-max",
.data = &max_threads,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "random",
.mode = 0555,
.child = random_table,
},
{
.procname = "usermodehelper",
.mode = 0555,
.child = usermodehelper_table,
},
{
.procname = "overflowuid",
.data = &overflowuid,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &minolduid,
.extra2 = &maxolduid,
},
{
.procname = "overflowgid",
.data = &overflowgid,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &minolduid,
.extra2 = &maxolduid,
},
#ifdef CONFIG_S390
#ifdef CONFIG_MATHEMU
{
.procname = "ieee_emulation_warnings",
.data = &sysctl_ieee_emulation_warnings,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
{
.procname = "userprocess_debug",
.data = &show_unhandled_signals,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
{
.procname = "pid_max",
.data = &pid_max,
.maxlen = sizeof (int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &pid_max_min,
.extra2 = &pid_max_max,
},
{
.procname = "panic_on_oops",
.data = &panic_on_oops,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#if defined CONFIG_PRINTK
{
.procname = "printk",
.data = &console_loglevel,
.maxlen = 4*sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "printk_ratelimit",
.data = &printk_ratelimit_state.interval,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "printk_ratelimit_burst",
.data = &printk_ratelimit_state.burst,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "printk_delay",
.data = &printk_delay_msec,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &ten_thousand,
},
{
.procname = "dmesg_restrict",
.data = &dmesg_restrict,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax_sysadmin,
.extra1 = &zero,
.extra2 = &one,
},
{
.procname = "kptr_restrict",
.data = &kptr_restrict,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax_sysadmin,
.extra1 = &zero,
.extra2 = &two,
},
#endif
{
.procname = "ngroups_max",
.data = &ngroups_max,
.maxlen = sizeof (int),
.mode = 0444,
.proc_handler = proc_dointvec,
},
{
.procname = "cap_last_cap",
.data = (void *)&cap_last_cap,
.maxlen = sizeof(int),
.mode = 0444,
.proc_handler = proc_dointvec,
},
#if defined(CONFIG_LOCKUP_DETECTOR)
{
.procname = "watchdog",
.data = &watchdog_enabled,
.maxlen = sizeof (int),
.mode = 0644,
.proc_handler = proc_dowatchdog,
.extra1 = &zero,
.extra2 = &one,
},
{
.procname = "watchdog_thresh",
.data = &watchdog_thresh,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dowatchdog,
.extra1 = &neg_one,
.extra2 = &sixty,
},
{
.procname = "softlockup_panic",
.data = &softlockup_panic,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &one,
},
{
.procname = "nmi_watchdog",
.data = &watchdog_enabled,
.maxlen = sizeof (int),
.mode = 0644,
.proc_handler = proc_dowatchdog,
.extra1 = &zero,
.extra2 = &one,
},
#endif
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86)
{
.procname = "unknown_nmi_panic",
.data = &unknown_nmi_panic,
.maxlen = sizeof (int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
#if defined(CONFIG_X86)
{
.procname = "panic_on_unrecovered_nmi",
.data = &panic_on_unrecovered_nmi,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "panic_on_io_nmi",
.data = &panic_on_io_nmi,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#ifdef CONFIG_DEBUG_STACKOVERFLOW
{
.procname = "panic_on_stackoverflow",
.data = &sysctl_panic_on_stackoverflow,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
{
.procname = "bootloader_type",
.data = &bootloader_type,
.maxlen = sizeof (int),
.mode = 0444,
.proc_handler = proc_dointvec,
},
{
.procname = "bootloader_version",
.data = &bootloader_version,
.maxlen = sizeof (int),
.mode = 0444,
.proc_handler = proc_dointvec,
},
{
.procname = "kstack_depth_to_print",
.data = &kstack_depth_to_print,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "io_delay_type",
.data = &io_delay_type,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
#if defined(CONFIG_MMU)
{
.procname = "randomize_va_space",
.data = &randomize_va_space,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
#if defined(CONFIG_S390) && defined(CONFIG_SMP)
{
.procname = "spin_retry",
.data = &spin_retry,
.maxlen = sizeof (int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
#if defined(CONFIG_ACPI_SLEEP) && defined(CONFIG_X86)
{
.procname = "acpi_video_flags",
.data = &acpi_realmode_flags,
.maxlen = sizeof (unsigned long),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
},
#endif
#ifdef CONFIG_IA64
{
.procname = "ignore-unaligned-usertrap",
.data = &no_unaligned_warning,
.maxlen = sizeof (int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "unaligned-dump-stack",
.data = &unaligned_dump_stack,
.maxlen = sizeof (int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
#ifdef CONFIG_DETECT_HUNG_TASK
{
.procname = "hung_task_panic",
.data = &sysctl_hung_task_panic,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &one,
},
{
.procname = "hung_task_check_count",
.data = &sysctl_hung_task_check_count,
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
},
{
.procname = "hung_task_timeout_secs",
.data = &sysctl_hung_task_timeout_secs,
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = proc_dohung_task_timeout_secs,
.extra2 = &hung_task_timeout_max,
},
{
.procname = "hung_task_warnings",
.data = &sysctl_hung_task_warnings,
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
},
#endif
#ifdef CONFIG_COMPAT
{
.procname = "compat-log",
.data = &compat_log,
.maxlen = sizeof (int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
#ifdef CONFIG_RT_MUTEXES
{
.procname = "max_lock_depth",
.data = &max_lock_depth,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
{
.procname = "poweroff_cmd",
.data = &poweroff_cmd,
.maxlen = POWEROFF_CMD_PATH_LEN,
.mode = 0644,
.proc_handler = proc_dostring,
},
#ifdef CONFIG_KEYS
{
.procname = "keys",
.mode = 0555,
.child = key_sysctls,
},
#endif
#ifdef CONFIG_RCU_TORTURE_TEST
{
.procname = "rcutorture_runnable",
.data = &rcutorture_runnable,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
#ifdef CONFIG_PERF_EVENTS
/*
* User-space scripts rely on the existence of this file
* as a feature check for perf_events being enabled.
*
* So it's an ABI, do not remove!
*/
{
.procname = "perf_event_paranoid",
.data = &sysctl_perf_event_paranoid,
.maxlen = sizeof(sysctl_perf_event_paranoid),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "perf_event_mlock_kb",
.data = &sysctl_perf_event_mlock,
.maxlen = sizeof(sysctl_perf_event_mlock),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "perf_event_max_sample_rate",
.data = &sysctl_perf_event_sample_rate,
.maxlen = sizeof(sysctl_perf_event_sample_rate),
.mode = 0644,
.proc_handler = perf_proc_update_handler,
},
#endif
#ifdef CONFIG_KMEMCHECK
{
.procname = "kmemcheck",
.data = &kmemcheck_enabled,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
#ifdef CONFIG_BLOCK
{
.procname = "blk_iopoll",
.data = &blk_iopoll_enabled,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
#ifdef CONFIG_ARM
{
.procname = "boot_reason",
.data = &boot_reason,
.maxlen = sizeof(int),
.mode = 0444,
.proc_handler = proc_dointvec,
},
#endif
/*
* NOTE: do not add new entries to this table unless you have read
* Documentation/sysctl/ctl_unnumbered.txt
*/
{ }
};
static struct ctl_table vm_table[] = {
{
.procname = "overcommit_memory",
.data = &sysctl_overcommit_memory,
.maxlen = sizeof(sysctl_overcommit_memory),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &two,
},
{
.procname = "panic_on_oom",
.data = &sysctl_panic_on_oom,
.maxlen = sizeof(sysctl_panic_on_oom),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &two,
},
{
.procname = "oom_kill_allocating_task",
.data = &sysctl_oom_kill_allocating_task,
.maxlen = sizeof(sysctl_oom_kill_allocating_task),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "oom_dump_tasks",
.data = &sysctl_oom_dump_tasks,
.maxlen = sizeof(sysctl_oom_dump_tasks),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "overcommit_ratio",
.data = &sysctl_overcommit_ratio,
.maxlen = sizeof(sysctl_overcommit_ratio),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "page-cluster",
.data = &page_cluster,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
},
{
.procname = "dirty_background_ratio",
.data = &dirty_background_ratio,
.maxlen = sizeof(dirty_background_ratio),
.mode = 0644,
.proc_handler = dirty_background_ratio_handler,
.extra1 = &zero,
.extra2 = &one_hundred,
},
{
.procname = "dirty_background_bytes",
.data = &dirty_background_bytes,
.maxlen = sizeof(dirty_background_bytes),
.mode = 0644,
.proc_handler = dirty_background_bytes_handler,
.extra1 = &one_ul,
},
{
.procname = "dirty_ratio",
.data = &vm_dirty_ratio,
.maxlen = sizeof(vm_dirty_ratio),
.mode = 0644,
.proc_handler = dirty_ratio_handler,
.extra1 = &zero,
.extra2 = &one_hundred,
},
{
.procname = "dirty_bytes",
.data = &vm_dirty_bytes,
.maxlen = sizeof(vm_dirty_bytes),
.mode = 0644,
.proc_handler = dirty_bytes_handler,
.extra1 = &dirty_bytes_min,
},
{
.procname = "dirty_writeback_centisecs",
.data = &dirty_writeback_interval,
.maxlen = sizeof(dirty_writeback_interval),
.mode = 0644,
.proc_handler = dirty_writeback_centisecs_handler,
},
{
.procname = "dirty_expire_centisecs",
.data = &dirty_expire_interval,
.maxlen = sizeof(dirty_expire_interval),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
},
{
.procname = "nr_pdflush_threads",
.data = &nr_pdflush_threads,
.maxlen = sizeof nr_pdflush_threads,
.mode = 0444 /* read-only*/,
.proc_handler = proc_dointvec,
},
{
.procname = "swappiness",
.data = &vm_swappiness,
.maxlen = sizeof(vm_swappiness),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &one_hundred,
},
#ifdef CONFIG_HUGETLB_PAGE
{
.procname = "nr_hugepages",
.data = NULL,
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = hugetlb_sysctl_handler,
.extra1 = (void *)&hugetlb_zero,
.extra2 = (void *)&hugetlb_infinity,
},
#ifdef CONFIG_NUMA
{
.procname = "nr_hugepages_mempolicy",
.data = NULL,
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = &hugetlb_mempolicy_sysctl_handler,
.extra1 = (void *)&hugetlb_zero,
.extra2 = (void *)&hugetlb_infinity,
},
#endif
{
.procname = "hugetlb_shm_group",
.data = &sysctl_hugetlb_shm_group,
.maxlen = sizeof(gid_t),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "hugepages_treat_as_movable",
.data = &hugepages_treat_as_movable,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = hugetlb_treat_movable_handler,
},
{
.procname = "nr_overcommit_hugepages",
.data = NULL,
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = hugetlb_overcommit_handler,
.extra1 = (void *)&hugetlb_zero,
.extra2 = (void *)&hugetlb_infinity,
},
#endif
{
.procname = "lowmem_reserve_ratio",
.data = &sysctl_lowmem_reserve_ratio,
.maxlen = sizeof(sysctl_lowmem_reserve_ratio),
.mode = 0644,
.proc_handler = lowmem_reserve_ratio_sysctl_handler,
},
{
.procname = "drop_caches",
.data = &sysctl_drop_caches,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = drop_caches_sysctl_handler,
.extra1 = &one,
.extra2 = &three,
},
#ifdef CONFIG_COMPACTION
{
.procname = "compact_memory",
.data = &sysctl_compact_memory,
.maxlen = sizeof(int),
.mode = 0200,
.proc_handler = sysctl_compaction_handler,
},
{
.procname = "extfrag_threshold",
.data = &sysctl_extfrag_threshold,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = sysctl_extfrag_handler,
.extra1 = &min_extfrag_threshold,
.extra2 = &max_extfrag_threshold,
},
#endif /* CONFIG_COMPACTION */
{
.procname = "min_free_kbytes",
.data = &min_free_kbytes,
.maxlen = sizeof(min_free_kbytes),
.mode = 0644,
.proc_handler = min_free_kbytes_sysctl_handler,
.extra1 = &zero,
},
{
.procname = "min_free_order_shift",
.data = &min_free_order_shift,
.maxlen = sizeof(min_free_order_shift),
.mode = 0644,
.proc_handler = &proc_dointvec
},
{
.procname = "percpu_pagelist_fraction",
.data = &percpu_pagelist_fraction,
.maxlen = sizeof(percpu_pagelist_fraction),
.mode = 0644,
.proc_handler = percpu_pagelist_fraction_sysctl_handler,
.extra1 = &min_percpu_pagelist_fract,
},
#ifdef CONFIG_MMU
{
.procname = "max_map_count",
.data = &sysctl_max_map_count,
.maxlen = sizeof(sysctl_max_map_count),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
},
#else
{
.procname = "nr_trim_pages",
.data = &sysctl_nr_trim_pages,
.maxlen = sizeof(sysctl_nr_trim_pages),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
},
#endif
{
.procname = "laptop_mode",
.data = &laptop_mode,
.maxlen = sizeof(laptop_mode),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "block_dump",
.data = &block_dump,
.maxlen = sizeof(block_dump),
.mode = 0644,
.proc_handler = proc_dointvec,
.extra1 = &zero,
},
{
.procname = "vfs_cache_pressure",
.data = &sysctl_vfs_cache_pressure,
.maxlen = sizeof(sysctl_vfs_cache_pressure),
.mode = 0644,
.proc_handler = proc_dointvec,
.extra1 = &zero,
},
#ifdef HAVE_ARCH_PICK_MMAP_LAYOUT
{
.procname = "legacy_va_layout",
.data = &sysctl_legacy_va_layout,
.maxlen = sizeof(sysctl_legacy_va_layout),
.mode = 0644,
.proc_handler = proc_dointvec,
.extra1 = &zero,
},
#endif
#ifdef CONFIG_NUMA
{
.procname = "zone_reclaim_mode",
.data = &zone_reclaim_mode,
.maxlen = sizeof(zone_reclaim_mode),
.mode = 0644,
.proc_handler = proc_dointvec,
.extra1 = &zero,
},
{
.procname = "min_unmapped_ratio",
.data = &sysctl_min_unmapped_ratio,
.maxlen = sizeof(sysctl_min_unmapped_ratio),
.mode = 0644,
.proc_handler = sysctl_min_unmapped_ratio_sysctl_handler,
.extra1 = &zero,
.extra2 = &one_hundred,
},
{
.procname = "min_slab_ratio",
.data = &sysctl_min_slab_ratio,
.maxlen = sizeof(sysctl_min_slab_ratio),
.mode = 0644,
.proc_handler = sysctl_min_slab_ratio_sysctl_handler,
.extra1 = &zero,
.extra2 = &one_hundred,
},
#endif
#ifdef CONFIG_SMP
{
.procname = "stat_interval",
.data = &sysctl_stat_interval,
.maxlen = sizeof(sysctl_stat_interval),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
#endif
#ifdef CONFIG_MMU
{
.procname = "mmap_min_addr",
.data = &dac_mmap_min_addr,
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = mmap_min_addr_handler,
},
#endif
#ifdef CONFIG_NUMA
{
.procname = "numa_zonelist_order",
.data = &numa_zonelist_order,
.maxlen = NUMA_ZONELIST_ORDER_LEN,
.mode = 0644,
.proc_handler = numa_zonelist_order_handler,
},
#endif
#if (defined(CONFIG_X86_32) && !defined(CONFIG_UML))|| \
(defined(CONFIG_SUPERH) && defined(CONFIG_VSYSCALL))
{
.procname = "vdso_enabled",
.data = &vdso_enabled,
.maxlen = sizeof(vdso_enabled),
.mode = 0644,
.proc_handler = proc_dointvec,
.extra1 = &zero,
},
#endif
#ifdef CONFIG_HIGHMEM
{
.procname = "highmem_is_dirtyable",
.data = &vm_highmem_is_dirtyable,
.maxlen = sizeof(vm_highmem_is_dirtyable),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &one,
},
#endif
{
.procname = "scan_unevictable_pages",
.data = &scan_unevictable_pages,
.maxlen = sizeof(scan_unevictable_pages),
.mode = 0644,
.proc_handler = scan_unevictable_handler,
},
#ifdef CONFIG_MEMORY_FAILURE
{
.procname = "memory_failure_early_kill",
.data = &sysctl_memory_failure_early_kill,
.maxlen = sizeof(sysctl_memory_failure_early_kill),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &one,
},
{
.procname = "memory_failure_recovery",
.data = &sysctl_memory_failure_recovery,
.maxlen = sizeof(sysctl_memory_failure_recovery),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &one,
},
#endif
{ }
};
#if defined(CONFIG_BINFMT_MISC) || defined(CONFIG_BINFMT_MISC_MODULE)
static struct ctl_table binfmt_misc_table[] = {
{ }
};
#endif
static struct ctl_table fs_table[] = {
{
.procname = "inode-nr",
.data = &inodes_stat,
.maxlen = 2*sizeof(int),
.mode = 0444,
.proc_handler = proc_nr_inodes,
},
{
.procname = "inode-state",
.data = &inodes_stat,
.maxlen = 7*sizeof(int),
.mode = 0444,
.proc_handler = proc_nr_inodes,
},
{
.procname = "file-nr",
.data = &files_stat,
.maxlen = sizeof(files_stat),
.mode = 0444,
.proc_handler = proc_nr_files,
},
{
.procname = "file-max",
.data = &files_stat.max_files,
.maxlen = sizeof(files_stat.max_files),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
},
{
.procname = "nr_open",
.data = &sysctl_nr_open,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &sysctl_nr_open_min,
.extra2 = &sysctl_nr_open_max,
},
{
.procname = "dentry-state",
.data = &dentry_stat,
.maxlen = 6*sizeof(int),
.mode = 0444,
.proc_handler = proc_nr_dentry,
},
{
.procname = "overflowuid",
.data = &fs_overflowuid,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &minolduid,
.extra2 = &maxolduid,
},
{
.procname = "overflowgid",
.data = &fs_overflowgid,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &minolduid,
.extra2 = &maxolduid,
},
#ifdef CONFIG_FILE_LOCKING
{
.procname = "leases-enable",
.data = &leases_enable,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
#ifdef CONFIG_DNOTIFY
{
.procname = "dir-notify-enable",
.data = &dir_notify_enable,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
#ifdef CONFIG_MMU
#ifdef CONFIG_FILE_LOCKING
{
.procname = "lease-break-time",
.data = &lease_break_time,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
#ifdef CONFIG_AIO
{
.procname = "aio-nr",
.data = &aio_nr,
.maxlen = sizeof(aio_nr),
.mode = 0444,
.proc_handler = proc_doulongvec_minmax,
},
{
.procname = "aio-max-nr",
.data = &aio_max_nr,
.maxlen = sizeof(aio_max_nr),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
},
#endif /* CONFIG_AIO */
#ifdef CONFIG_INOTIFY_USER
{
.procname = "inotify",
.mode = 0555,
.child = inotify_table,
},
#endif
#ifdef CONFIG_EPOLL
{
.procname = "epoll",
.mode = 0555,
.child = epoll_table,
},
#endif
#endif
{
.procname = "suid_dumpable",
.data = &suid_dumpable,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &two,
},
#if defined(CONFIG_BINFMT_MISC) || defined(CONFIG_BINFMT_MISC_MODULE)
{
.procname = "binfmt_misc",
.mode = 0555,
.child = binfmt_misc_table,
},
#endif
{
.procname = "pipe-max-size",
.data = &pipe_max_size,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &pipe_proc_fn,
.extra1 = &pipe_min_size,
},
{ }
};
static struct ctl_table debug_table[] = {
#if defined(CONFIG_X86) || defined(CONFIG_PPC) || defined(CONFIG_SPARC) || \
defined(CONFIG_S390) || defined(CONFIG_TILE)
{
.procname = "exception-trace",
.data = &show_unhandled_signals,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
},
#endif
#if defined(CONFIG_OPTPROBES)
{
.procname = "kprobes-optimization",
.data = &sysctl_kprobes_optimization,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_kprobes_optimization_handler,
.extra1 = &zero,
.extra2 = &one,
},
#endif
{ }
};
static struct ctl_table dev_table[] = {
{ }
};
int __init sysctl_init(void)
{
register_sysctl_table(sysctl_base_table);
return 0;
}
#endif /* CONFIG_SYSCTL */
/*
* /proc/sys support
*/
#ifdef CONFIG_PROC_SYSCTL
static int _proc_do_string(void* data, int maxlen, int write,
void __user *buffer,
size_t *lenp, loff_t *ppos)
{
size_t len;
char __user *p;
char c;
if (!data || !maxlen || !*lenp) {
*lenp = 0;
return 0;
}
if (write) {
len = 0;
p = buffer;
while (len < *lenp) {
if (get_user(c, p++))
return -EFAULT;
if (c == 0 || c == '\n')
break;
len++;
}
if (len >= maxlen)
len = maxlen-1;
if(copy_from_user(data, buffer, len))
return -EFAULT;
((char *) data)[len] = 0;
*ppos += *lenp;
} else {
len = strlen(data);
if (len > maxlen)
len = maxlen;
if (*ppos > len) {
*lenp = 0;
return 0;
}
data += *ppos;
len -= *ppos;
if (len > *lenp)
len = *lenp;
if (len)
if(copy_to_user(buffer, data, len))
return -EFAULT;
if (len < *lenp) {
if(put_user('\n', ((char __user *) buffer) + len))
return -EFAULT;
len++;
}
*lenp = len;
*ppos += len;
}
return 0;
}
/**
* proc_dostring - read a string sysctl
* @table: the sysctl table
* @write: %TRUE if this is a write to the sysctl file
* @buffer: the user buffer
* @lenp: the size of the user buffer
* @ppos: file position
*
* Reads/writes a string from/to the user buffer. If the kernel
* buffer provided is not large enough to hold the string, the
* string is truncated. The copied string is %NULL-terminated.
* If the string is being read by the user process, it is copied
* and a newline '\n' is added. It is truncated if the buffer is
* not large enough.
*
* Returns 0 on success.
*/
int proc_dostring(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return _proc_do_string(table->data, table->maxlen, write,
buffer, lenp, ppos);
}
static size_t proc_skip_spaces(char **buf)
{
size_t ret;
char *tmp = skip_spaces(*buf);
ret = tmp - *buf;
*buf = tmp;
return ret;
}
static void proc_skip_char(char **buf, size_t *size, const char v)
{
while (*size) {
if (**buf != v)
break;
(*size)--;
(*buf)++;
}
}
#define TMPBUFLEN 22
/**
* proc_get_long - reads an ASCII formatted integer from a user buffer
*
* @buf: a kernel buffer
* @size: size of the kernel buffer
* @val: this is where the number will be stored
* @neg: set to %TRUE if number is negative
* @perm_tr: a vector which contains the allowed trailers
* @perm_tr_len: size of the perm_tr vector
* @tr: pointer to store the trailer character
*
* In case of success %0 is returned and @buf and @size are updated with
* the amount of bytes read. If @tr is non-NULL and a trailing
* character exists (size is non-zero after returning from this
* function), @tr is updated with the trailing character.
*/
static int proc_get_long(char **buf, size_t *size,
unsigned long *val, bool *neg,
const char *perm_tr, unsigned perm_tr_len, char *tr)
{
int len;
char *p, tmp[TMPBUFLEN];
if (!*size)
return -EINVAL;
len = *size;
if (len > TMPBUFLEN - 1)
len = TMPBUFLEN - 1;
memcpy(tmp, *buf, len);
tmp[len] = 0;
p = tmp;
if (*p == '-' && *size > 1) {
*neg = true;
p++;
} else
*neg = false;
if (!isdigit(*p))
return -EINVAL;
*val = simple_strtoul(p, &p, 0);
len = p - tmp;
/* We don't know if the next char is whitespace thus we may accept
* invalid integers (e.g. 1234...a) or two integers instead of one
* (e.g. 123...1). So lets not allow such large numbers. */
if (len == TMPBUFLEN - 1)
return -EINVAL;
if (len < *size && perm_tr_len && !memchr(perm_tr, *p, perm_tr_len))
return -EINVAL;
if (tr && (len < *size))
*tr = *p;
*buf += len;
*size -= len;
return 0;
}
/**
* proc_put_long - converts an integer to a decimal ASCII formatted string
*
* @buf: the user buffer
* @size: the size of the user buffer
* @val: the integer to be converted
* @neg: sign of the number, %TRUE for negative
*
* In case of success %0 is returned and @buf and @size are updated with
* the amount of bytes written.
*/
static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
bool neg)
{
int len;
char tmp[TMPBUFLEN], *p = tmp;
sprintf(p, "%s%lu", neg ? "-" : "", val);
len = strlen(tmp);
if (len > *size)
len = *size;
if (copy_to_user(*buf, tmp, len))
return -EFAULT;
*size -= len;
*buf += len;
return 0;
}
#undef TMPBUFLEN
static int proc_put_char(void __user **buf, size_t *size, char c)
{
if (*size) {
char __user **buffer = (char __user **)buf;
if (put_user(c, *buffer))
return -EFAULT;
(*size)--, (*buffer)++;
*buf = *buffer;
}
return 0;
}
static int do_proc_dointvec_conv(bool *negp, unsigned long *lvalp,
int *valp,
int write, void *data)
{
if (write) {
*valp = *negp ? -*lvalp : *lvalp;
} else {
int val = *valp;
if (val < 0) {
*negp = true;
*lvalp = (unsigned long)-val;
} else {
*negp = false;
*lvalp = (unsigned long)val;
}
}
return 0;
}
static const char proc_wspace_sep[] = { ' ', '\t', '\n' };
static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table,
int write, void __user *buffer,
size_t *lenp, loff_t *ppos,
int (*conv)(bool *negp, unsigned long *lvalp, int *valp,
int write, void *data),
void *data)
{
int *i, vleft, first = 1, err = 0;
unsigned long page = 0;
size_t left;
char *kbuf;
if (!tbl_data || !table->maxlen || !*lenp || (*ppos && !write)) {
*lenp = 0;
return 0;
}
i = (int *) tbl_data;
vleft = table->maxlen / sizeof(*i);
left = *lenp;
if (!conv)
conv = do_proc_dointvec_conv;
if (write) {
if (left > PAGE_SIZE - 1)
left = PAGE_SIZE - 1;
page = __get_free_page(GFP_TEMPORARY);
kbuf = (char *) page;
if (!kbuf)
return -ENOMEM;
if (copy_from_user(kbuf, buffer, left)) {
err = -EFAULT;
goto free;
}
kbuf[left] = 0;
}
for (; left && vleft--; i++, first=0) {
unsigned long lval;
bool neg;
if (write) {
left -= proc_skip_spaces(&kbuf);
if (!left)
break;
err = proc_get_long(&kbuf, &left, &lval, &neg,
proc_wspace_sep,
sizeof(proc_wspace_sep), NULL);
if (err)
break;
if (conv(&neg, &lval, i, 1, data)) {
err = -EINVAL;
break;
}
} else {
if (conv(&neg, &lval, i, 0, data)) {
err = -EINVAL;
break;
}
if (!first)
err = proc_put_char(&buffer, &left, '\t');
if (err)
break;
err = proc_put_long(&buffer, &left, lval, neg);
if (err)
break;
}
}
if (!write && !first && left && !err)
err = proc_put_char(&buffer, &left, '\n');
if (write && !err && left)
left -= proc_skip_spaces(&kbuf);
free:
if (write) {
free_page(page);
if (first)
return err ? : -EINVAL;
}
*lenp -= left;
*ppos += *lenp;
return err;
}
static int do_proc_dointvec(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos,
int (*conv)(bool *negp, unsigned long *lvalp, int *valp,
int write, void *data),
void *data)
{
return __do_proc_dointvec(table->data, table, write,
buffer, lenp, ppos, conv, data);
}
/**
* proc_dointvec - read a vector of integers
* @table: the sysctl table
* @write: %TRUE if this is a write to the sysctl file
* @buffer: the user buffer
* @lenp: the size of the user buffer
* @ppos: file position
*
* Reads/writes up to table->maxlen/sizeof(unsigned int) integer
* values from/to the user buffer, treated as an ASCII string.
*
* Returns 0 on success.
*/
int proc_dointvec(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return do_proc_dointvec(table,write,buffer,lenp,ppos,
NULL,NULL);
}
/*
* Taint values can only be increased
* This means we can safely use a temporary.
*/
static int proc_taint(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table t;
unsigned long tmptaint = get_taint();
int err;
if (write && !capable(CAP_SYS_ADMIN))
return -EPERM;
t = *table;
t.data = &tmptaint;
err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos);
if (err < 0)
return err;
if (write) {
/*
* Poor man's atomic or. Not worth adding a primitive
* to everyone's atomic.h for this
*/
int i;
for (i = 0; i < BITS_PER_LONG && tmptaint >> i; i++) {
if ((tmptaint >> i) & 1)
add_taint(i);
}
}
return err;
}
#ifdef CONFIG_PRINTK
static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
if (write && !capable(CAP_SYS_ADMIN))
return -EPERM;
return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
}
#endif
struct do_proc_dointvec_minmax_conv_param {
int *min;
int *max;
};
static int do_proc_dointvec_minmax_conv(bool *negp, unsigned long *lvalp,
int *valp,
int write, void *data)
{
struct do_proc_dointvec_minmax_conv_param *param = data;
if (write) {
int val = *negp ? -*lvalp : *lvalp;
if ((param->min && *param->min > val) ||
(param->max && *param->max < val))
return -EINVAL;
*valp = val;
} else {
int val = *valp;
if (val < 0) {
*negp = true;
*lvalp = (unsigned long)-val;
} else {
*negp = false;
*lvalp = (unsigned long)val;
}
}
return 0;
}
/**
* proc_dointvec_minmax - read a vector of integers with min/max values
* @table: the sysctl table
* @write: %TRUE if this is a write to the sysctl file
* @buffer: the user buffer
* @lenp: the size of the user buffer
* @ppos: file position
*
* Reads/writes up to table->maxlen/sizeof(unsigned int) integer
* values from/to the user buffer, treated as an ASCII string.
*
* This routine will ensure the values are within the range specified by
* table->extra1 (min) and table->extra2 (max).
*
* Returns 0 on success.
*/
int proc_dointvec_minmax(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct do_proc_dointvec_minmax_conv_param param = {
.min = (int *) table->extra1,
.max = (int *) table->extra2,
};
return do_proc_dointvec(table, write, buffer, lenp, ppos,
do_proc_dointvec_minmax_conv, ¶m);
}
static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int write,
void __user *buffer,
size_t *lenp, loff_t *ppos,
unsigned long convmul,
unsigned long convdiv)
{
unsigned long *i, *min, *max;
int vleft, first = 1, err = 0;
unsigned long page = 0;
size_t left;
char *kbuf;
if (!data || !table->maxlen || !*lenp || (*ppos && !write)) {
*lenp = 0;
return 0;
}
i = (unsigned long *) data;
min = (unsigned long *) table->extra1;
max = (unsigned long *) table->extra2;
vleft = table->maxlen / sizeof(unsigned long);
left = *lenp;
if (write) {
if (left > PAGE_SIZE - 1)
left = PAGE_SIZE - 1;
page = __get_free_page(GFP_TEMPORARY);
kbuf = (char *) page;
if (!kbuf)
return -ENOMEM;
if (copy_from_user(kbuf, buffer, left)) {
err = -EFAULT;
goto free;
}
kbuf[left] = 0;
}
for (; left && vleft--; i++, first = 0) {
unsigned long val;
if (write) {
bool neg;
left -= proc_skip_spaces(&kbuf);
err = proc_get_long(&kbuf, &left, &val, &neg,
proc_wspace_sep,
sizeof(proc_wspace_sep), NULL);
if (err)
break;
if (neg)
continue;
if ((min && val < *min) || (max && val > *max))
continue;
*i = val;
} else {
val = convdiv * (*i) / convmul;
if (!first)
err = proc_put_char(&buffer, &left, '\t');
err = proc_put_long(&buffer, &left, val, false);
if (err)
break;
}
}
if (!write && !first && left && !err)
err = proc_put_char(&buffer, &left, '\n');
if (write && !err)
left -= proc_skip_spaces(&kbuf);
free:
if (write) {
free_page(page);
if (first)
return err ? : -EINVAL;
}
*lenp -= left;
*ppos += *lenp;
return err;
}
static int do_proc_doulongvec_minmax(struct ctl_table *table, int write,
void __user *buffer,
size_t *lenp, loff_t *ppos,
unsigned long convmul,
unsigned long convdiv)
{
return __do_proc_doulongvec_minmax(table->data, table, write,
buffer, lenp, ppos, convmul, convdiv);
}
/**
* proc_doulongvec_minmax - read a vector of long integers with min/max values
* @table: the sysctl table
* @write: %TRUE if this is a write to the sysctl file
* @buffer: the user buffer
* @lenp: the size of the user buffer
* @ppos: file position
*
* Reads/writes up to table->maxlen/sizeof(unsigned long) unsigned long
* values from/to the user buffer, treated as an ASCII string.
*
* This routine will ensure the values are within the range specified by
* table->extra1 (min) and table->extra2 (max).
*
* Returns 0 on success.
*/
int proc_doulongvec_minmax(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return do_proc_doulongvec_minmax(table, write, buffer, lenp, ppos, 1l, 1l);
}
/**
* proc_doulongvec_ms_jiffies_minmax - read a vector of millisecond values with min/max values
* @table: the sysctl table
* @write: %TRUE if this is a write to the sysctl file
* @buffer: the user buffer
* @lenp: the size of the user buffer
* @ppos: file position
*
* Reads/writes up to table->maxlen/sizeof(unsigned long) unsigned long
* values from/to the user buffer, treated as an ASCII string. The values
* are treated as milliseconds, and converted to jiffies when they are stored.
*
* This routine will ensure the values are within the range specified by
* table->extra1 (min) and table->extra2 (max).
*
* Returns 0 on success.
*/
int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write,
void __user *buffer,
size_t *lenp, loff_t *ppos)
{
return do_proc_doulongvec_minmax(table, write, buffer,
lenp, ppos, HZ, 1000l);
}
static int do_proc_dointvec_jiffies_conv(bool *negp, unsigned long *lvalp,
int *valp,
int write, void *data)
{
if (write) {
if (*lvalp > LONG_MAX / HZ)
return 1;
*valp = *negp ? -(*lvalp*HZ) : (*lvalp*HZ);
} else {
int val = *valp;
unsigned long lval;
if (val < 0) {
*negp = true;
lval = (unsigned long)-val;
} else {
*negp = false;
lval = (unsigned long)val;
}
*lvalp = lval / HZ;
}
return 0;
}
static int do_proc_dointvec_userhz_jiffies_conv(bool *negp, unsigned long *lvalp,
int *valp,
int write, void *data)
{
if (write) {
if (USER_HZ < HZ && *lvalp > (LONG_MAX / HZ) * USER_HZ)
return 1;
*valp = clock_t_to_jiffies(*negp ? -*lvalp : *lvalp);
} else {
int val = *valp;
unsigned long lval;
if (val < 0) {
*negp = true;
lval = (unsigned long)-val;
} else {
*negp = false;
lval = (unsigned long)val;
}
*lvalp = jiffies_to_clock_t(lval);
}
return 0;
}
static int do_proc_dointvec_ms_jiffies_conv(bool *negp, unsigned long *lvalp,
int *valp,
int write, void *data)
{
if (write) {
*valp = msecs_to_jiffies(*negp ? -*lvalp : *lvalp);
} else {
int val = *valp;
unsigned long lval;
if (val < 0) {
*negp = true;
lval = (unsigned long)-val;
} else {
*negp = false;
lval = (unsigned long)val;
}
*lvalp = jiffies_to_msecs(lval);
}
return 0;
}
/**
* proc_dointvec_jiffies - read a vector of integers as seconds
* @table: the sysctl table
* @write: %TRUE if this is a write to the sysctl file
* @buffer: the user buffer
* @lenp: the size of the user buffer
* @ppos: file position
*
* Reads/writes up to table->maxlen/sizeof(unsigned int) integer
* values from/to the user buffer, treated as an ASCII string.
* The values read are assumed to be in seconds, and are converted into
* jiffies.
*
* Returns 0 on success.
*/
int proc_dointvec_jiffies(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return do_proc_dointvec(table,write,buffer,lenp,ppos,
do_proc_dointvec_jiffies_conv,NULL);
}
/**
* proc_dointvec_userhz_jiffies - read a vector of integers as 1/USER_HZ seconds
* @table: the sysctl table
* @write: %TRUE if this is a write to the sysctl file
* @buffer: the user buffer
* @lenp: the size of the user buffer
* @ppos: pointer to the file position
*
* Reads/writes up to table->maxlen/sizeof(unsigned int) integer
* values from/to the user buffer, treated as an ASCII string.
* The values read are assumed to be in 1/USER_HZ seconds, and
* are converted into jiffies.
*
* Returns 0 on success.
*/
int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return do_proc_dointvec(table,write,buffer,lenp,ppos,
do_proc_dointvec_userhz_jiffies_conv,NULL);
}
/**
* proc_dointvec_ms_jiffies - read a vector of integers as 1 milliseconds
* @table: the sysctl table
* @write: %TRUE if this is a write to the sysctl file
* @buffer: the user buffer
* @lenp: the size of the user buffer
* @ppos: file position
* @ppos: the current position in the file
*
* Reads/writes up to table->maxlen/sizeof(unsigned int) integer
* values from/to the user buffer, treated as an ASCII string.
* The values read are assumed to be in 1/1000 seconds, and
* are converted into jiffies.
*
* Returns 0 on success.
*/
int proc_dointvec_ms_jiffies(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return do_proc_dointvec(table, write, buffer, lenp, ppos,
do_proc_dointvec_ms_jiffies_conv, NULL);
}
static int proc_do_cad_pid(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct pid *new_pid;
pid_t tmp;
int r;
tmp = pid_vnr(cad_pid);
r = __do_proc_dointvec(&tmp, table, write, buffer,
lenp, ppos, NULL, NULL);
if (r || !write)
return r;
new_pid = find_get_pid(tmp);
if (!new_pid)
return -ESRCH;
put_pid(xchg(&cad_pid, new_pid));
return 0;
}
/**
* proc_do_large_bitmap - read/write from/to a large bitmap
* @table: the sysctl table
* @write: %TRUE if this is a write to the sysctl file
* @buffer: the user buffer
* @lenp: the size of the user buffer
* @ppos: file position
*
* The bitmap is stored at table->data and the bitmap length (in bits)
* in table->maxlen.
*
* We use a range comma separated format (e.g. 1,3-4,10-10) so that
* large bitmaps may be represented in a compact manner. Writing into
* the file will clear the bitmap then update it with the given input.
*
* Returns 0 on success.
*/
int proc_do_large_bitmap(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int err = 0;
bool first = 1;
size_t left = *lenp;
unsigned long bitmap_len = table->maxlen;
unsigned long *bitmap = (unsigned long *) table->data;
unsigned long *tmp_bitmap = NULL;
char tr_a[] = { '-', ',', '\n' }, tr_b[] = { ',', '\n', 0 }, c;
if (!bitmap_len || !left || (*ppos && !write)) {
*lenp = 0;
return 0;
}
if (write) {
unsigned long page = 0;
char *kbuf;
if (left > PAGE_SIZE - 1)
left = PAGE_SIZE - 1;
page = __get_free_page(GFP_TEMPORARY);
kbuf = (char *) page;
if (!kbuf)
return -ENOMEM;
if (copy_from_user(kbuf, buffer, left)) {
free_page(page);
return -EFAULT;
}
kbuf[left] = 0;
tmp_bitmap = kzalloc(BITS_TO_LONGS(bitmap_len) * sizeof(unsigned long),
GFP_KERNEL);
if (!tmp_bitmap) {
free_page(page);
return -ENOMEM;
}
proc_skip_char(&kbuf, &left, '\n');
while (!err && left) {
unsigned long val_a, val_b;
bool neg;
err = proc_get_long(&kbuf, &left, &val_a, &neg, tr_a,
sizeof(tr_a), &c);
if (err)
break;
if (val_a >= bitmap_len || neg) {
err = -EINVAL;
break;
}
val_b = val_a;
if (left) {
kbuf++;
left--;
}
if (c == '-') {
err = proc_get_long(&kbuf, &left, &val_b,
&neg, tr_b, sizeof(tr_b),
&c);
if (err)
break;
if (val_b >= bitmap_len || neg ||
val_a > val_b) {
err = -EINVAL;
break;
}
if (left) {
kbuf++;
left--;
}
}
bitmap_set(tmp_bitmap, val_a, val_b - val_a + 1);
first = 0;
proc_skip_char(&kbuf, &left, '\n');
}
free_page(page);
} else {
unsigned long bit_a, bit_b = 0;
while (left) {
bit_a = find_next_bit(bitmap, bitmap_len, bit_b);
if (bit_a >= bitmap_len)
break;
bit_b = find_next_zero_bit(bitmap, bitmap_len,
bit_a + 1) - 1;
if (!first) {
err = proc_put_char(&buffer, &left, ',');
if (err)
break;
}
err = proc_put_long(&buffer, &left, bit_a, false);
if (err)
break;
if (bit_a != bit_b) {
err = proc_put_char(&buffer, &left, '-');
if (err)
break;
err = proc_put_long(&buffer, &left, bit_b, false);
if (err)
break;
}
first = 0; bit_b++;
}
if (!err)
err = proc_put_char(&buffer, &left, '\n');
}
if (!err) {
if (write) {
if (*ppos)
bitmap_or(bitmap, bitmap, tmp_bitmap, bitmap_len);
else
bitmap_copy(bitmap, tmp_bitmap, bitmap_len);
}
kfree(tmp_bitmap);
*lenp -= left;
*ppos += *lenp;
return 0;
} else {
kfree(tmp_bitmap);
return err;
}
}
#else /* CONFIG_PROC_SYSCTL */
int proc_dostring(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
int proc_dointvec(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
int proc_dointvec_minmax(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
int proc_dointvec_jiffies(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
int proc_dointvec_ms_jiffies(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
int proc_doulongvec_minmax(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write,
void __user *buffer,
size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
#endif /* CONFIG_PROC_SYSCTL */
/*
* No sense putting this after each symbol definition, twice,
* exception granted :-)
*/
EXPORT_SYMBOL(proc_dointvec);
EXPORT_SYMBOL(proc_dointvec_jiffies);
EXPORT_SYMBOL(proc_dointvec_minmax);
EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
EXPORT_SYMBOL(proc_dostring);
EXPORT_SYMBOL(proc_doulongvec_minmax);
EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
| gpl-2.0 |
giridhar123/GiriProject | src/server/scripts/Northrend/Ulduar/Ulduar/instance_ulduar.cpp | 6 | 40340 | /*
* Copyright (C) 2008-2013 TrinityCore <http://www.trinitycore.org/>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "ScriptMgr.h"
#include "ScriptedCreature.h"
#include "InstanceScript.h"
#include "ulduar.h"
#include "Player.h"
#include "WorldPacket.h"
static DoorData const doorData[] =
{
{GO_LEVIATHAN_DOOR, BOSS_LEVIATHAN, DOOR_TYPE_ROOM, BOUNDARY_S },
{GO_XT_002_DOOR, BOSS_XT002, DOOR_TYPE_ROOM, BOUNDARY_S },
{GO_DOODAD_UL_SIGILDOOR_03, BOSS_ALGALON, DOOR_TYPE_ROOM, BOUNDARY_W },
{GO_DOODAD_UL_UNIVERSEFLOOR_01, BOSS_ALGALON, DOOR_TYPE_ROOM, BOUNDARY_NONE },
{GO_DOODAD_UL_UNIVERSEFLOOR_02, BOSS_ALGALON, DOOR_TYPE_SPAWN_HOLE, BOUNDARY_NONE },
{GO_DOODAD_UL_UNIVERSEGLOBE01, BOSS_ALGALON, DOOR_TYPE_SPAWN_HOLE, BOUNDARY_NONE },
{GO_DOODAD_UL_ULDUAR_TRAPDOOR_03, BOSS_ALGALON, DOOR_TYPE_SPAWN_HOLE, BOUNDARY_NONE },
{0, 0, DOOR_TYPE_ROOM, BOUNDARY_NONE },
};
MinionData const minionData[] =
{
{ NPC_STEELBREAKER, BOSS_ASSEMBLY_OF_IRON },
{ NPC_MOLGEIM, BOSS_ASSEMBLY_OF_IRON },
{ NPC_BRUNDIR, BOSS_ASSEMBLY_OF_IRON },
{ 0, 0, }
};
class instance_ulduar : public InstanceMapScript
{
public:
instance_ulduar() : InstanceMapScript("instance_ulduar", 603) { }
struct instance_ulduar_InstanceMapScript : public InstanceScript
{
instance_ulduar_InstanceMapScript(InstanceMap* map) : InstanceScript(map) { }
// Creatures
uint64 LeviathanGUID;
uint64 IgnisGUID;
uint64 RazorscaleGUID;
uint64 RazorscaleController;
uint64 RazorHarpoonGUIDs[4];
uint64 ExpeditionCommanderGUID;
uint64 XT002GUID;
uint64 XTToyPileGUIDs[4];
uint64 AssemblyGUIDs[3];
uint64 KologarnGUID;
uint64 AuriayaGUID;
uint64 MimironGUID;
uint64 HodirGUID;
uint64 ThorimGUID;
uint64 FreyaGUID;
uint64 KeeperGUIDs[3];
uint64 VezaxGUID;
uint64 YoggSaronGUID;
uint64 AlgalonGUID;
uint64 LeviathanGateGUID;
uint64 VezaxDoorGUID;
// GameObjects
uint64 KologarnChestGUID;
uint64 KologarnBridgeGUID;
uint64 KologarnDoorGUID;
uint64 ThorimChestGUID;
uint64 HodirRareCacheGUID;
uint64 HodirChestGUID;
uint64 HodirDoorGUID;
uint64 HodirIceDoorGUID;
uint64 ArchivumDoorGUID;
uint64 AlgalonSigilDoorGUID[3];
uint64 AlgalonFloorGUID[2];
uint64 AlgalonUniverseGUID;
uint64 AlgalonTrapdoorGUID;
uint64 BrannBronzebeardAlgGUID;
uint64 GiftOfTheObserverGUID;
// Miscellaneous
uint32 TeamInInstance;
uint32 HodirRareCacheData;
uint32 ColossusData;
uint8 elderCount;
bool conSpeedAtory;
bool Unbroken;
std::set<uint64> mRubbleSpawns;
void Initialize()
{
SetBossNumber(MAX_ENCOUNTER);
LoadDoorData(doorData);
LoadMinionData(minionData);
IgnisGUID = 0;
RazorscaleGUID = 0;
RazorscaleController = 0;
ExpeditionCommanderGUID = 0;
XT002GUID = 0;
KologarnGUID = 0;
AuriayaGUID = 0;
MimironGUID = 0;
HodirGUID = 0;
ThorimGUID = 0;
FreyaGUID = 0;
VezaxGUID = 0;
YoggSaronGUID = 0;
AlgalonGUID = 0;
KologarnChestGUID = 0;
KologarnBridgeGUID = 0;
ThorimChestGUID = 0;
HodirRareCacheGUID = 0;
HodirChestGUID = 0;
LeviathanGateGUID = 0;
VezaxDoorGUID = 0;
HodirDoorGUID = 0;
HodirIceDoorGUID = 0;
ArchivumDoorGUID = 0;
AlgalonUniverseGUID = 0;
AlgalonTrapdoorGUID = 0;
BrannBronzebeardAlgGUID = 0;
GiftOfTheObserverGUID = 0;
_algalonTimer = 61;
_maxArmorItemLevel = 0;
_maxWeaponItemLevel = 0;
TeamInInstance = 0;
HodirRareCacheData = 0;
ColossusData = 0;
elderCount = 0;
conSpeedAtory = false;
Unbroken = true;
_algalonSummoned = false;
_summonAlgalon = false;
memset(AlgalonSigilDoorGUID, 0, sizeof(AlgalonSigilDoorGUID));
memset(AlgalonFloorGUID, 0, sizeof(AlgalonFloorGUID));
memset(XTToyPileGUIDs, 0, sizeof(XTToyPileGUIDs));
memset(AssemblyGUIDs, 0, sizeof(AssemblyGUIDs));
memset(RazorHarpoonGUIDs, 0, sizeof(RazorHarpoonGUIDs));
memset(KeeperGUIDs, 0, sizeof(KeeperGUIDs));
}
void FillInitialWorldStates(WorldPacket& packet)
{
packet << uint32(WORLD_STATE_ALGALON_TIMER_ENABLED) << uint32(_algalonTimer && _algalonTimer <= 60);
packet << uint32(WORLD_STATE_ALGALON_DESPAWN_TIMER) << uint32(std::min<uint32>(_algalonTimer, 60));
}
void OnPlayerEnter(Player* player)
{
if (!TeamInInstance)
TeamInInstance = player->GetTeam();
if (_summonAlgalon)
{
_summonAlgalon = false;
TempSummon* algalon = instance->SummonCreature(NPC_ALGALON, AlgalonLandPos);
if (_algalonTimer && _algalonTimer <= 60)
algalon->AI()->DoAction(ACTION_INIT_ALGALON);
else
algalon->RemoveFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_IMMUNE_TO_PC);
}
}
void OnCreatureCreate(Creature* creature)
{
if (!TeamInInstance)
{
Map::PlayerList const& Players = instance->GetPlayers();
if (!Players.isEmpty())
if (Player* player = Players.begin()->getSource())
TeamInInstance = player->GetTeam();
}
switch (creature->GetEntry())
{
case NPC_LEVIATHAN:
LeviathanGUID = creature->GetGUID();
break;
case NPC_IGNIS:
IgnisGUID = creature->GetGUID();
break;
case NPC_RAZORSCALE:
RazorscaleGUID = creature->GetGUID();
break;
case NPC_RAZORSCALE_CONTROLLER:
RazorscaleController = creature->GetGUID();
break;
case NPC_EXPEDITION_COMMANDER:
ExpeditionCommanderGUID = creature->GetGUID();
break;
case NPC_XT002:
XT002GUID = creature->GetGUID();
break;
case NPC_XT_TOY_PILE:
for (uint8 i = 0; i < 4; ++i)
if (!XTToyPileGUIDs[i])
{
XTToyPileGUIDs[i] = creature->GetGUID();
break;
}
break;
// Assembly of Iron
case NPC_STEELBREAKER:
AssemblyGUIDs[0] = creature->GetGUID();
AddMinion(creature, true);
break;
case NPC_MOLGEIM:
AssemblyGUIDs[1] = creature->GetGUID();
AddMinion(creature, true);
break;
case NPC_BRUNDIR:
AssemblyGUIDs[2] = creature->GetGUID();
AddMinion(creature, true);
break;
// Freya's Keeper
case NPC_IRONBRANCH:
KeeperGUIDs[0] = creature->GetGUID();
if (GetBossState(BOSS_FREYA) == DONE)
creature->DespawnOrUnsummon();
break;
case NPC_BRIGHTLEAF:
KeeperGUIDs[1] = creature->GetGUID();
if (GetBossState(BOSS_FREYA) == DONE)
creature->DespawnOrUnsummon();
break;
case NPC_STONEBARK:
KeeperGUIDs[2] = creature->GetGUID();
if (GetBossState(BOSS_FREYA) == DONE)
creature->DespawnOrUnsummon();
break;
// Kologarn
case NPC_KOLOGARN:
KologarnGUID = creature->GetGUID();
break;
case NPC_AURIAYA:
AuriayaGUID = creature->GetGUID();
break;
case NPC_MIMIRON:
MimironGUID = creature->GetGUID();
break;
case NPC_HODIR:
HodirGUID = creature->GetGUID();
break;
case NPC_THORIM:
ThorimGUID = creature->GetGUID();
break;
case NPC_FREYA:
FreyaGUID = creature->GetGUID();
break;
case NPC_VEZAX:
VezaxGUID = creature->GetGUID();
break;
case NPC_YOGGSARON:
YoggSaronGUID = creature->GetGUID();
break;
case NPC_ALGALON:
AlgalonGUID = creature->GetGUID();
break;
// Hodir's Helper NPCs
case NPC_EIVI_NIGHTFEATHER:
if (TeamInInstance == HORDE)
creature->UpdateEntry(NPC_TOR_GREYCLOUD, HORDE);
break;
case NPC_ELLIE_NIGHTFEATHER:
if (TeamInInstance == HORDE)
creature->UpdateEntry(NPC_KAR_GREYCLOUD, HORDE);
break;
case NPC_ELEMENTALIST_MAHFUUN:
if (TeamInInstance == HORDE)
creature->UpdateEntry(NPC_SPIRITWALKER_TARA, HORDE);
break;
case NPC_ELEMENTALIST_AVUUN:
if (TeamInInstance == HORDE)
creature->UpdateEntry(NPC_SPIRITWALKER_YONA, HORDE);
break;
case NPC_MISSY_FLAMECUFFS:
if (TeamInInstance == HORDE)
creature->UpdateEntry(NPC_AMIRA_BLAZEWEAVER, HORDE);
break;
case NPC_SISSY_FLAMECUFFS:
if (TeamInInstance == HORDE)
creature->UpdateEntry(NPC_VEESHA_BLAZEWEAVER, HORDE);
break;
case NPC_FIELD_MEDIC_PENNY:
if (TeamInInstance == HORDE)
creature->UpdateEntry(NPC_BATTLE_PRIEST_ELIZA, HORDE);
break;
case NPC_FIELD_MEDIC_JESSI:
if (TeamInInstance == HORDE)
creature->UpdateEntry(NPC_BATTLE_PRIEST_GINA, HORDE);
break;
case NPC_BRANN_BRONZBEARD_ALG:
BrannBronzebeardAlgGUID = creature->GetGUID();
break;
//! These creatures are summoned by something else than Algalon
//! but need to be controlled/despawned by him - so they need to be
//! registered in his summon list
case NPC_ALGALON_VOID_ZONE_VISUAL_STALKER:
case NPC_ALGALON_STALKER_ASTEROID_TARGET_01:
case NPC_ALGALON_STALKER_ASTEROID_TARGET_02:
case NPC_UNLEASHED_DARK_MATTER:
if (Creature* algalon = instance->GetCreature(AlgalonGUID))
algalon->AI()->JustSummoned(creature);
break;
}
}
void OnCreatureRemove(Creature* creature)
{
switch (creature->GetEntry())
{
case NPC_XT_TOY_PILE:
for (uint8 i = 0; i < 4; ++i)
if (XTToyPileGUIDs[i] == creature->GetGUID())
{
XTToyPileGUIDs[i] = 0;
break;
}
break;
case NPC_STEELBREAKER:
case NPC_MOLGEIM:
case NPC_BRUNDIR:
AddMinion(creature, false);
break;
case NPC_BRANN_BRONZBEARD_ALG:
if (BrannBronzebeardAlgGUID == creature->GetGUID())
BrannBronzebeardAlgGUID = 0;
break;
default:
break;
}
}
void OnGameObjectCreate(GameObject* gameObject)
{
switch (gameObject->GetEntry())
{
case GO_KOLOGARN_CHEST_HERO:
case GO_KOLOGARN_CHEST:
KologarnChestGUID = gameObject->GetGUID();
break;
case GO_KOLOGARN_BRIDGE:
KologarnBridgeGUID = gameObject->GetGUID();
if (GetBossState(BOSS_KOLOGARN) == DONE)
HandleGameObject(0, false, gameObject);
break;
case GO_KOLOGARN_DOOR:
KologarnDoorGUID = gameObject->GetGUID();
break;
case GO_THORIM_CHEST_HERO:
case GO_THORIM_CHEST:
ThorimChestGUID = gameObject->GetGUID();
break;
case GO_HODIR_RARE_CACHE_OF_WINTER_HERO:
case GO_HODIR_RARE_CACHE_OF_WINTER:
HodirRareCacheGUID = gameObject->GetGUID();
break;
case GO_HODIR_CHEST_HERO:
case GO_HODIR_CHEST:
HodirChestGUID = gameObject->GetGUID();
break;
case GO_LEVIATHAN_DOOR:
AddDoor(gameObject, true);
break;
case GO_LEVIATHAN_GATE:
LeviathanGateGUID = gameObject->GetGUID();
if (GetBossState(BOSS_LEVIATHAN) == DONE)
gameObject->SetGoState(GO_STATE_ACTIVE_ALTERNATIVE);
break;
case GO_XT_002_DOOR:
AddDoor(gameObject, true);
break;
case GO_VEZAX_DOOR:
VezaxDoorGUID = gameObject->GetGUID();
HandleGameObject(0, false, gameObject);
break;
case GO_RAZOR_HARPOON_1:
RazorHarpoonGUIDs[0] = gameObject->GetGUID();
break;
case GO_RAZOR_HARPOON_2:
RazorHarpoonGUIDs[1] = gameObject->GetGUID();
break;
case GO_RAZOR_HARPOON_3:
RazorHarpoonGUIDs[2] = gameObject->GetGUID();
break;
case GO_RAZOR_HARPOON_4:
RazorHarpoonGUIDs[3] = gameObject->GetGUID();
break;
case GO_MOLE_MACHINE:
if (GetBossState(BOSS_RAZORSCALE) == IN_PROGRESS)
gameObject->SetGoState(GO_STATE_ACTIVE);
case GO_HODIR_DOOR:
HodirDoorGUID = gameObject->GetGUID();
break;
case GO_HODIR_ICE_DOOR:
HodirIceDoorGUID = gameObject->GetGUID();
break;
case GO_ARCHIVUM_DOOR:
ArchivumDoorGUID = gameObject->GetGUID();
if (GetBossState(BOSS_ASSEMBLY_OF_IRON) != DONE)
HandleGameObject(ArchivumDoorGUID, false);
break;
case GO_CELESTIAL_PLANETARIUM_ACCESS_10:
case GO_CELESTIAL_PLANETARIUM_ACCESS_25:
if (_algalonSummoned)
gameObject->SetFlag(GAMEOBJECT_FLAGS, GO_FLAG_IN_USE);
break;
case GO_DOODAD_UL_SIGILDOOR_01:
AlgalonSigilDoorGUID[0] = gameObject->GetGUID();
if (_algalonSummoned)
gameObject->SetGoState(GO_STATE_ACTIVE);
break;
case GO_DOODAD_UL_SIGILDOOR_02:
AlgalonSigilDoorGUID[1] = gameObject->GetGUID();
if (_algalonSummoned)
gameObject->SetGoState(GO_STATE_ACTIVE);
break;
case GO_DOODAD_UL_SIGILDOOR_03:
AlgalonSigilDoorGUID[2] = gameObject->GetGUID();
AddDoor(gameObject, true);
break;
case GO_DOODAD_UL_UNIVERSEFLOOR_01:
AlgalonFloorGUID[0] = gameObject->GetGUID();
AddDoor(gameObject, true);
break;
case GO_DOODAD_UL_UNIVERSEFLOOR_02:
AlgalonFloorGUID[1] = gameObject->GetGUID();
AddDoor(gameObject, true);
break;
case GO_DOODAD_UL_UNIVERSEGLOBE01:
AlgalonUniverseGUID = gameObject->GetGUID();
AddDoor(gameObject, true);
break;
case GO_DOODAD_UL_ULDUAR_TRAPDOOR_03:
AlgalonTrapdoorGUID = gameObject->GetGUID();
AddDoor(gameObject, true);
break;
case GO_GIFT_OF_THE_OBSERVER_10:
case GO_GIFT_OF_THE_OBSERVER_25:
GiftOfTheObserverGUID = gameObject->GetGUID();
break;
}
}
void OnGameObjectRemove(GameObject* gameObject)
{
switch (gameObject->GetEntry())
{
case GO_LEVIATHAN_DOOR:
case GO_XT_002_DOOR:
case GO_DOODAD_UL_SIGILDOOR_03:
case GO_DOODAD_UL_UNIVERSEFLOOR_01:
case GO_DOODAD_UL_UNIVERSEFLOOR_02:
case GO_DOODAD_UL_UNIVERSEGLOBE01:
case GO_DOODAD_UL_ULDUAR_TRAPDOOR_03:
AddDoor(gameObject, false);
break;
default:
break;
}
}
void OnUnitDeath(Unit* unit)
{
Creature* creature = unit->ToCreature();
if (!creature)
return;
switch (creature->GetEntry())
{
case NPC_CORRUPTED_SERVITOR:
case NPC_MISGUIDED_NYMPH:
case NPC_GUARDIAN_LASHER:
case NPC_FOREST_SWARMER:
case NPC_MANGROVE_ENT:
case NPC_IRONROOT_LASHER:
case NPC_NATURES_BLADE:
case NPC_GUARDIAN_OF_LIFE:
if (!conSpeedAtory)
{
DoStartTimedAchievement(ACHIEVEMENT_TIMED_TYPE_EVENT, CRITERIA_CON_SPEED_ATORY);
conSpeedAtory = true;
}
break;
default:
break;
}
}
void ProcessEvent(WorldObject* /*gameObject*/, uint32 eventId)
{
// Flame Leviathan's Tower Event triggers
Creature* FlameLeviathan = instance->GetCreature(LeviathanGUID);
if (FlameLeviathan && FlameLeviathan->isAlive()) // No leviathan, no event triggering ;)
{
switch (eventId)
{
case EVENT_TOWER_OF_STORM_DESTROYED:
FlameLeviathan->AI()->DoAction(ACTION_TOWER_OF_STORM_DESTROYED);
break;
case EVENT_TOWER_OF_FROST_DESTROYED:
FlameLeviathan->AI()->DoAction(ACTION_TOWER_OF_FROST_DESTROYED);
break;
case EVENT_TOWER_OF_FLAMES_DESTROYED:
FlameLeviathan->AI()->DoAction(ACTION_TOWER_OF_FLAMES_DESTROYED);
break;
case EVENT_TOWER_OF_LIFE_DESTROYED:
FlameLeviathan->AI()->DoAction(ACTION_TOWER_OF_LIFE_DESTROYED);
break;
}
}
}
bool SetBossState(uint32 type, EncounterState state)
{
if (!InstanceScript::SetBossState(type, state))
return false;
switch (type)
{
case BOSS_LEVIATHAN:
case BOSS_IGNIS:
case BOSS_RAZORSCALE:
case BOSS_XT002:
case BOSS_AURIAYA:
case BOSS_MIMIRON:
case BOSS_FREYA:
break;
case BOSS_ASSEMBLY_OF_IRON:
if (state == DONE)
HandleGameObject(ArchivumDoorGUID, true);
break;
case BOSS_VEZAX:
if (state == DONE)
HandleGameObject(VezaxDoorGUID, true);
break;
case BOSS_YOGGSARON:
break;
case BOSS_KOLOGARN:
if (state == DONE)
{
if (GameObject* gameObject = instance->GetGameObject(KologarnChestGUID))
{
gameObject->SetRespawnTime(gameObject->GetRespawnDelay());
gameObject->RemoveFlag(GAMEOBJECT_FLAGS, GO_FLAG_NOT_SELECTABLE);
}
HandleGameObject(KologarnBridgeGUID, false);
}
if (state == IN_PROGRESS)
HandleGameObject(KologarnDoorGUID, false);
else
HandleGameObject(KologarnDoorGUID, true);
break;
case BOSS_HODIR:
if (state == DONE)
{
if (GameObject* HodirRareCache = instance->GetGameObject(HodirRareCacheGUID))
if (GetData(DATA_HODIR_RARE_CACHE))
HodirRareCache->RemoveFlag(GAMEOBJECT_FLAGS, GO_FLAG_NOT_SELECTABLE);
if (GameObject* HodirChest = instance->GetGameObject(HodirChestGUID))
HodirChest->SetRespawnTime(HodirChest->GetRespawnDelay());
HandleGameObject(HodirDoorGUID, true);
HandleGameObject(HodirIceDoorGUID, true);
}
break;
case BOSS_THORIM:
if (state == DONE)
if (GameObject* gameObject = instance->GetGameObject(ThorimChestGUID))
gameObject->SetRespawnTime(gameObject->GetRespawnDelay());
break;
case BOSS_ALGALON:
if (state == DONE)
{
_events.CancelEvent(EVENT_UPDATE_ALGALON_TIMER);
_events.CancelEvent(EVENT_DESPAWN_ALGALON);
DoUpdateWorldState(WORLD_STATE_ALGALON_TIMER_ENABLED, 0);
_algalonTimer = 61;
if (GameObject* gameObject = instance->GetGameObject(GiftOfTheObserverGUID))
gameObject->SetRespawnTime(gameObject->GetRespawnDelay());
// get item level (recheck weapons)
Map::PlayerList const& players = instance->GetPlayers();
for (Map::PlayerList::const_iterator itr = players.begin(); itr != players.end(); ++itr)
if (Player* player = itr->getSource())
for (uint8 slot = EQUIPMENT_SLOT_MAINHAND; slot <= EQUIPMENT_SLOT_RANGED; ++slot)
if (Item* item = player->GetItemByPos(INVENTORY_SLOT_BAG_0, slot))
if (item->GetTemplate()->ItemLevel > _maxWeaponItemLevel)
_maxWeaponItemLevel = item->GetTemplate()->ItemLevel;
}
else if (state == IN_PROGRESS)
{
// get item level (armor cannot be swapped in combat)
Map::PlayerList const& players = instance->GetPlayers();
for (Map::PlayerList::const_iterator itr = players.begin(); itr != players.end(); ++itr)
{
if (Player* player = itr->getSource())
{
for (uint8 slot = EQUIPMENT_SLOT_START; slot < EQUIPMENT_SLOT_END; ++slot)
{
if (slot == EQUIPMENT_SLOT_TABARD || slot == EQUIPMENT_SLOT_BODY)
continue;
if (Item* item = player->GetItemByPos(INVENTORY_SLOT_BAG_0, slot))
{
if (slot >= EQUIPMENT_SLOT_MAINHAND && slot <= EQUIPMENT_SLOT_RANGED)
{
if (item->GetTemplate()->ItemLevel > _maxWeaponItemLevel)
_maxWeaponItemLevel = item->GetTemplate()->ItemLevel;
}
else if (item->GetTemplate()->ItemLevel > _maxArmorItemLevel)
_maxArmorItemLevel = item->GetTemplate()->ItemLevel;
}
}
}
}
}
break;
}
return true;
}
void SetData(uint32 type, uint32 data)
{
switch (type)
{
case DATA_COLOSSUS:
ColossusData = data;
if (data == 2)
{
if (Creature* Leviathan = instance->GetCreature(LeviathanGUID))
Leviathan->AI()->DoAction(ACTION_MOVE_TO_CENTER_POSITION);
if (GameObject* gameObject = instance->GetGameObject(LeviathanGateGUID))
gameObject->SetGoState(GO_STATE_ACTIVE_ALTERNATIVE);
SaveToDB();
}
break;
case DATA_HODIR_RARE_CACHE:
HodirRareCacheData = data;
if (!HodirRareCacheData)
{
if (Creature* Hodir = instance->GetCreature(HodirGUID))
if (GameObject* gameObject = instance->GetGameObject(HodirRareCacheGUID))
Hodir->RemoveGameObject(gameObject, false);
}
break;
case DATA_UNBROKEN:
Unbroken = bool(data);
break;
case EVENT_DESPAWN_ALGALON:
DoUpdateWorldState(WORLD_STATE_ALGALON_TIMER_ENABLED, 1);
DoUpdateWorldState(WORLD_STATE_ALGALON_DESPAWN_TIMER, 60);
_algalonTimer = 60;
_events.ScheduleEvent(EVENT_DESPAWN_ALGALON, 3600000);
_events.ScheduleEvent(EVENT_UPDATE_ALGALON_TIMER, 60000);
break;
case DATA_ALGALON_SUMMON_STATE:
_algalonSummoned = true;
break;
default:
break;
}
}
void SetData64(uint32 /*type*/, uint64 /*data*/)
{
}
uint64 GetData64(uint32 data) const
{
switch (data)
{
case BOSS_LEVIATHAN:
return LeviathanGUID;
case BOSS_IGNIS:
return IgnisGUID;
case BOSS_RAZORSCALE:
return RazorscaleGUID;
case DATA_RAZORSCALE_CONTROL:
return RazorscaleController;
case BOSS_XT002:
return XT002GUID;
case DATA_TOY_PILE_0:
case DATA_TOY_PILE_1:
case DATA_TOY_PILE_2:
case DATA_TOY_PILE_3:
return XTToyPileGUIDs[data - DATA_TOY_PILE_0];
case BOSS_KOLOGARN:
return KologarnGUID;
case BOSS_AURIAYA:
return AuriayaGUID;
case BOSS_MIMIRON:
return MimironGUID;
case BOSS_HODIR:
return HodirGUID;
case BOSS_THORIM:
return ThorimGUID;
case BOSS_FREYA:
return FreyaGUID;
case BOSS_VEZAX:
return VezaxGUID;
case BOSS_YOGGSARON:
return YoggSaronGUID;
case BOSS_ALGALON:
return AlgalonGUID;
// Razorscale expedition commander
case DATA_EXPEDITION_COMMANDER:
return ExpeditionCommanderGUID;
case GO_RAZOR_HARPOON_1:
return RazorHarpoonGUIDs[0];
case GO_RAZOR_HARPOON_2:
return RazorHarpoonGUIDs[1];
case GO_RAZOR_HARPOON_3:
return RazorHarpoonGUIDs[2];
case GO_RAZOR_HARPOON_4:
return RazorHarpoonGUIDs[3];
// Assembly of Iron
case DATA_STEELBREAKER:
return AssemblyGUIDs[0];
case DATA_MOLGEIM:
return AssemblyGUIDs[1];
case DATA_BRUNDIR:
return AssemblyGUIDs[2];
// Freya's Keepers
case BOSS_BRIGHTLEAF:
return KeeperGUIDs[0];
case BOSS_IRONBRANCH:
return KeeperGUIDs[1];
case BOSS_STONEBARK:
return KeeperGUIDs[2];
case DATA_SIGILDOOR_01:
return AlgalonSigilDoorGUID[0];
case DATA_SIGILDOOR_02:
return AlgalonSigilDoorGUID[1];
case DATA_SIGILDOOR_03:
return AlgalonSigilDoorGUID[2];
case DATA_UNIVERSE_FLOOR_01:
return AlgalonFloorGUID[0];
case DATA_UNIVERSE_FLOOR_02:
return AlgalonFloorGUID[1];
case DATA_UNIVERSE_GLOBE:
return AlgalonUniverseGUID;
case DATA_ALGALON_TRAPDOOR:
return AlgalonTrapdoorGUID;
case DATA_BRANN_BRONZEBEARD_ALG:
return BrannBronzebeardAlgGUID;
}
return 0;
}
uint32 GetData(uint32 type) const
{
switch (type)
{
case DATA_COLOSSUS:
return ColossusData;
case DATA_HODIR_RARE_CACHE:
return HodirRareCacheData;
case DATA_UNBROKEN:
return uint32(Unbroken);
default:
break;
}
return 0;
}
bool CheckAchievementCriteriaMeet(uint32 criteriaId, Player const* , Unit const* /* = NULL */, uint32 /* = 0 */)
{
switch (criteriaId)
{
case CRITERIA_HERALD_OF_TITANS:
return _maxArmorItemLevel <= MAX_HERALD_ARMOR_ITEMLEVEL && _maxWeaponItemLevel <= MAX_HERALD_WEAPON_ITEMLEVEL;
}
return false;
}
std::string GetSaveData()
{
OUT_SAVE_INST_DATA;
std::ostringstream saveStream;
saveStream << "U U " << GetBossSaveData() << GetData(DATA_COLOSSUS) << ' ' << _algalonTimer << ' ' << (_algalonSummoned ? 1 : 0);
OUT_SAVE_INST_DATA_COMPLETE;
return saveStream.str();
}
void Load(char const* strIn)
{
if (!strIn)
{
OUT_LOAD_INST_DATA_FAIL;
return;
}
OUT_LOAD_INST_DATA(strIn);
char dataHead1, dataHead2;
std::istringstream loadStream(strIn);
loadStream >> dataHead1 >> dataHead2;
if (dataHead1 == 'U' && dataHead2 == 'U')
{
for (uint8 i = 0; i < MAX_ENCOUNTER; ++i)
{
uint32 tmpState;
loadStream >> tmpState;
if (tmpState == IN_PROGRESS || tmpState > SPECIAL)
tmpState = NOT_STARTED;
SetBossState(i, EncounterState(tmpState));
}
uint32 tempState;
loadStream >> tempState;
if (tempState == IN_PROGRESS || tempState > SPECIAL)
tempState = NOT_STARTED;
SetData(DATA_COLOSSUS, tempState);
loadStream >> _algalonTimer;
loadStream >> tempState;
_algalonSummoned = tempState != 0;
if (_algalonSummoned && GetBossState(BOSS_ALGALON) != DONE)
{
_summonAlgalon = true;
if (_algalonTimer && _algalonTimer <= 60)
{
_events.ScheduleEvent(EVENT_UPDATE_ALGALON_TIMER, 60000);
DoUpdateWorldState(WORLD_STATE_ALGALON_TIMER_ENABLED, 1);
DoUpdateWorldState(WORLD_STATE_ALGALON_DESPAWN_TIMER, _algalonTimer);
}
}
}
OUT_LOAD_INST_DATA_COMPLETE;
}
void Update(uint32 diff)
{
if (_events.Empty())
return;
_events.Update(diff);
while (uint32 eventId = _events.ExecuteEvent())
{
switch (eventId)
{
case EVENT_UPDATE_ALGALON_TIMER:
SaveToDB();
DoUpdateWorldState(WORLD_STATE_ALGALON_DESPAWN_TIMER, --_algalonTimer);
if (_algalonTimer)
_events.ScheduleEvent(EVENT_UPDATE_ALGALON_TIMER, 60000);
else
{
DoUpdateWorldState(WORLD_STATE_ALGALON_TIMER_ENABLED, 0);
_events.CancelEvent(EVENT_UPDATE_ALGALON_TIMER);
if (Creature* algalon = instance->GetCreature(AlgalonGUID))
algalon->AI()->DoAction(EVENT_DESPAWN_ALGALON);
}
break;
}
}
}
private:
EventMap _events;
uint32 _algalonTimer;
bool _summonAlgalon;
bool _algalonSummoned;
uint32 _maxArmorItemLevel;
uint32 _maxWeaponItemLevel;
};
InstanceScript* GetInstanceScript(InstanceMap* map) const
{
return new instance_ulduar_InstanceMapScript(map);
}
};
void AddSC_instance_ulduar()
{
new instance_ulduar();
}
| gpl-2.0 |
dominik-th/xbmc | xbmc/cores/dvdplayer/DVDCodecs/Video/DVDVideoCodec.cpp | 6 | 2609 | /*
* Copyright (C) 2010-2013 Team XBMC
* http://xbmc.org
*
* This Program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This Program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with XBMC; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*
*/
#include "DVDVideoCodec.h"
#include "windowing/WindowingFactory.h"
#include "settings/Settings.h"
#include "settings/lib/Setting.h"
bool CDVDVideoCodec::IsSettingVisible(const std::string &condition, const std::string &value, const CSetting *setting, void *data)
{
if (setting == NULL || value.empty())
return false;
const std::string &settingId = setting->GetId();
// check if we are running on nvidia hardware
std::string gpuvendor = g_Windowing.GetRenderVendor();
std::transform(gpuvendor.begin(), gpuvendor.end(), gpuvendor.begin(), ::tolower);
bool isNvidia = (gpuvendor.compare(0, 6, "nvidia") == 0);
bool isIntel = (gpuvendor.compare(0, 5, "intel") == 0);
// nvidia does only need mpeg-4 setting
if (isNvidia)
{
if (settingId == "videoplayer.usevdpaumpeg4")
return true;
return false; //will also hide intel settings on nvidia hardware
}
else if (isIntel) // intel needs vc1, mpeg-2 and mpeg4 setting
{
if (settingId == "videoplayer.usevaapimpeg4")
return true;
if (settingId == "videoplayer.usevaapivc1")
return true;
if (settingId == "videoplayer.usevaapimpeg2")
return true;
return false; //this will also hide nvidia settings on intel hardware
}
// if we don't know the hardware we are running on e.g. amd oss vdpau
// or fglrx with xvba-driver we show everything
return true;
}
bool CDVDVideoCodec::IsCodecDisabled(DVDCodecAvailableType* map, unsigned int size, AVCodecID id)
{
int index = -1;
for (unsigned int i = 0; i < size; ++i)
{
if(map[i].codec == id)
{
index = (int) i;
break;
}
}
if(index > -1)
return (!CSettings::Get().GetBool(map[index].setting) || !CDVDVideoCodec::IsSettingVisible("unused", "unused", CSettings::Get().GetSetting(map[index].setting), NULL));
return false; //don't disable what we don't have
}
| gpl-2.0 |
kamarush/android_kernel_sony_yuga_lp | kernel/sched/core.c | 6 | 212589 | /*
* kernel/sched/core.c
*
* Kernel scheduler and related syscalls
*
* Copyright (C) 1991-2002 Linus Torvalds
*
* 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
* make semaphores SMP safe
* 1998-11-19 Implemented schedule_timeout() and related stuff
* by Andrea Arcangeli
* 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
* hybrid priority-list and round-robin design with
* an array-switch method of distributing timeslices
* and per-CPU runqueues. Cleanups and useful suggestions
* by Davide Libenzi, preemptible kernel bits by Robert Love.
* 2003-09-03 Interactivity tuning by Con Kolivas.
* 2004-04-02 Scheduler domains code by Nick Piggin
* 2007-04-15 Work begun on replacing all interactivity tuning with a
* fair scheduling design by Con Kolivas.
* 2007-05-05 Load balancing (smp-nice) and other improvements
* by Peter Williams
* 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
* 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
* 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
* Thomas Gleixner, Mike Kravetz
*/
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/nmi.h>
#include <linux/init.h>
#include <linux/uaccess.h>
#include <linux/highmem.h>
#include <asm/mmu_context.h>
#include <linux/interrupt.h>
#include <linux/capability.h>
#include <linux/completion.h>
#include <linux/kernel_stat.h>
#include <linux/debug_locks.h>
#include <linux/perf_event.h>
#include <linux/security.h>
#include <linux/notifier.h>
#include <linux/profile.h>
#include <linux/freezer.h>
#include <linux/vmalloc.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/pid_namespace.h>
#include <linux/smp.h>
#include <linux/threads.h>
#include <linux/timer.h>
#include <linux/rcupdate.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/percpu.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/sysctl.h>
#include <linux/syscalls.h>
#include <linux/times.h>
#include <linux/tsacct_kern.h>
#include <linux/kprobes.h>
#include <linux/delayacct.h>
#include <linux/unistd.h>
#include <linux/pagemap.h>
#include <linux/hrtimer.h>
#include <linux/tick.h>
#include <linux/debugfs.h>
#include <linux/ctype.h>
#include <linux/ftrace.h>
#include <linux/slab.h>
#include <linux/init_task.h>
#include <linux/binfmts.h>
#include <linux/cpufreq.h>
#include <asm/switch_to.h>
#include <asm/tlb.h>
#include <asm/irq_regs.h>
#include <asm/mutex.h>
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#endif
#include "sched.h"
#include "../workqueue_sched.h"
#include "../smpboot.h"
#define CREATE_TRACE_POINTS
#include <trace/events/sched.h>
ATOMIC_NOTIFIER_HEAD(migration_notifier_head);
void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
{
unsigned long delta;
ktime_t soft, hard, now;
for (;;) {
if (hrtimer_active(period_timer))
break;
now = hrtimer_cb_get_time(period_timer);
hrtimer_forward(period_timer, now, period);
soft = hrtimer_get_softexpires(period_timer);
hard = hrtimer_get_expires(period_timer);
delta = ktime_to_ns(ktime_sub(hard, soft));
__hrtimer_start_range_ns(period_timer, soft, delta,
HRTIMER_MODE_ABS_PINNED, 0);
}
}
DEFINE_MUTEX(sched_domains_mutex);
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
static void update_rq_clock_task(struct rq *rq, s64 delta);
void update_rq_clock(struct rq *rq)
{
s64 delta;
if (rq->skip_clock_update > 0)
return;
delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
rq->clock += delta;
update_rq_clock_task(rq, delta);
}
/*
* Debugging: various feature bits
*/
#define SCHED_FEAT(name, enabled) \
(1UL << __SCHED_FEAT_##name) * enabled |
const_debug unsigned int sysctl_sched_features =
#include "features.h"
0;
#undef SCHED_FEAT
#ifdef CONFIG_SCHED_DEBUG
#define SCHED_FEAT(name, enabled) \
#name ,
static __read_mostly char *sched_feat_names[] = {
#include "features.h"
NULL
};
#undef SCHED_FEAT
static int sched_feat_show(struct seq_file *m, void *v)
{
int i;
for (i = 0; i < __SCHED_FEAT_NR; i++) {
if (!(sysctl_sched_features & (1UL << i)))
seq_puts(m, "NO_");
seq_printf(m, "%s ", sched_feat_names[i]);
}
seq_puts(m, "\n");
return 0;
}
#ifdef HAVE_JUMP_LABEL
#define jump_label_key__true STATIC_KEY_INIT_TRUE
#define jump_label_key__false STATIC_KEY_INIT_FALSE
#define SCHED_FEAT(name, enabled) \
jump_label_key__##enabled ,
struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
#include "features.h"
};
#undef SCHED_FEAT
static void sched_feat_disable(int i)
{
if (static_key_enabled(&sched_feat_keys[i]))
static_key_slow_dec(&sched_feat_keys[i]);
}
static void sched_feat_enable(int i)
{
if (!static_key_enabled(&sched_feat_keys[i]))
static_key_slow_inc(&sched_feat_keys[i]);
}
#else
static void sched_feat_disable(int i) { };
static void sched_feat_enable(int i) { };
#endif /* HAVE_JUMP_LABEL */
static ssize_t
sched_feat_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char buf[64];
char *cmp;
int neg = 0;
int i;
if (cnt > 63)
cnt = 63;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = 0;
cmp = strstrip(buf);
if (strncmp(cmp, "NO_", 3) == 0) {
neg = 1;
cmp += 3;
}
for (i = 0; i < __SCHED_FEAT_NR; i++) {
if (strcmp(cmp, sched_feat_names[i]) == 0) {
if (neg) {
sysctl_sched_features &= ~(1UL << i);
sched_feat_disable(i);
} else {
sysctl_sched_features |= (1UL << i);
sched_feat_enable(i);
}
break;
}
}
if (i == __SCHED_FEAT_NR)
return -EINVAL;
*ppos += cnt;
return cnt;
}
static int sched_feat_open(struct inode *inode, struct file *filp)
{
return single_open(filp, sched_feat_show, NULL);
}
static const struct file_operations sched_feat_fops = {
.open = sched_feat_open,
.write = sched_feat_write,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static __init int sched_init_debug(void)
{
debugfs_create_file("sched_features", 0644, NULL, NULL,
&sched_feat_fops);
return 0;
}
late_initcall(sched_init_debug);
#endif /* CONFIG_SCHED_DEBUG */
/*
* Number of tasks to iterate in a single balance run.
* Limited because this is done with IRQs disabled.
*/
const_debug unsigned int sysctl_sched_nr_migrate = 32;
/*
* period over which we average the RT time consumption, measured
* in ms.
*
* default: 1s
*/
const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
/*
* period over which we measure -rt task cpu usage in us.
* default: 1s
*/
unsigned int sysctl_sched_rt_period = 1000000;
__read_mostly int scheduler_running;
/*
* part of the period that we allow rt tasks to run in us.
* default: 0.95s
*/
int sysctl_sched_rt_runtime = 950000;
/*
* Maximum possible frequency across all cpus. Task demand and cpu
* capacity (cpu_power) metrics could be scaled in reference to it.
*/
static unsigned int max_possible_freq = 1;
/*
* __task_rq_lock - lock the rq @p resides on.
*/
static inline struct rq *__task_rq_lock(struct task_struct *p)
__acquires(rq->lock)
{
struct rq *rq;
lockdep_assert_held(&p->pi_lock);
for (;;) {
rq = task_rq(p);
raw_spin_lock(&rq->lock);
if (likely(rq == task_rq(p)))
return rq;
raw_spin_unlock(&rq->lock);
}
}
/*
* task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
*/
static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
__acquires(p->pi_lock)
__acquires(rq->lock)
{
struct rq *rq;
for (;;) {
raw_spin_lock_irqsave(&p->pi_lock, *flags);
rq = task_rq(p);
raw_spin_lock(&rq->lock);
if (likely(rq == task_rq(p)))
return rq;
raw_spin_unlock(&rq->lock);
raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
}
}
static void __task_rq_unlock(struct rq *rq)
__releases(rq->lock)
{
raw_spin_unlock(&rq->lock);
}
static inline void
task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
__releases(rq->lock)
__releases(p->pi_lock)
{
raw_spin_unlock(&rq->lock);
raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
}
/*
* this_rq_lock - lock this runqueue and disable interrupts.
*/
static struct rq *this_rq_lock(void)
__acquires(rq->lock)
{
struct rq *rq;
local_irq_disable();
rq = this_rq();
raw_spin_lock(&rq->lock);
return rq;
}
#ifdef CONFIG_SCHED_HRTICK
/*
* Use HR-timers to deliver accurate preemption points.
*
* Its all a bit involved since we cannot program an hrt while holding the
* rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a
* reschedule event.
*
* When we get rescheduled we reprogram the hrtick_timer outside of the
* rq->lock.
*/
static void hrtick_clear(struct rq *rq)
{
if (hrtimer_active(&rq->hrtick_timer))
hrtimer_cancel(&rq->hrtick_timer);
}
/*
* High-resolution timer tick.
* Runs from hardirq context with interrupts disabled.
*/
static enum hrtimer_restart hrtick(struct hrtimer *timer)
{
struct rq *rq = container_of(timer, struct rq, hrtick_timer);
WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
raw_spin_lock(&rq->lock);
update_rq_clock(rq);
rq->curr->sched_class->task_tick(rq, rq->curr, 1);
raw_spin_unlock(&rq->lock);
return HRTIMER_NORESTART;
}
#ifdef CONFIG_SMP
/*
* called from hardirq (IPI) context
*/
static void __hrtick_start(void *arg)
{
struct rq *rq = arg;
raw_spin_lock(&rq->lock);
hrtimer_restart(&rq->hrtick_timer);
rq->hrtick_csd_pending = 0;
raw_spin_unlock(&rq->lock);
}
/*
* Called to set the hrtick timer state.
*
* called with rq->lock held and irqs disabled
*/
void hrtick_start(struct rq *rq, u64 delay)
{
struct hrtimer *timer = &rq->hrtick_timer;
ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
hrtimer_set_expires(timer, time);
if (rq == this_rq()) {
hrtimer_restart(timer);
} else if (!rq->hrtick_csd_pending) {
__smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
rq->hrtick_csd_pending = 1;
}
}
static int
hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
int cpu = (int)(long)hcpu;
switch (action) {
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
case CPU_DEAD:
case CPU_DEAD_FROZEN:
hrtick_clear(cpu_rq(cpu));
return NOTIFY_OK;
}
return NOTIFY_DONE;
}
static __init void init_hrtick(void)
{
hotcpu_notifier(hotplug_hrtick, 0);
}
#else
/*
* Called to set the hrtick timer state.
*
* called with rq->lock held and irqs disabled
*/
void hrtick_start(struct rq *rq, u64 delay)
{
__hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
HRTIMER_MODE_REL_PINNED, 0);
}
static inline void init_hrtick(void)
{
}
#endif /* CONFIG_SMP */
static void init_rq_hrtick(struct rq *rq)
{
#ifdef CONFIG_SMP
rq->hrtick_csd_pending = 0;
rq->hrtick_csd.flags = 0;
rq->hrtick_csd.func = __hrtick_start;
rq->hrtick_csd.info = rq;
#endif
hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rq->hrtick_timer.function = hrtick;
}
#else /* CONFIG_SCHED_HRTICK */
static inline void hrtick_clear(struct rq *rq)
{
}
static inline void init_rq_hrtick(struct rq *rq)
{
}
static inline void init_hrtick(void)
{
}
#endif /* CONFIG_SCHED_HRTICK */
/*
* resched_task - mark a task 'to be rescheduled now'.
*
* On UP this means the setting of the need_resched flag, on SMP it
* might also involve a cross-CPU call to trigger the scheduler on
* the target CPU.
*/
#ifdef CONFIG_SMP
#ifndef tsk_is_polling
#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
#endif
void resched_task(struct task_struct *p)
{
int cpu;
assert_raw_spin_locked(&task_rq(p)->lock);
if (test_tsk_need_resched(p))
return;
set_tsk_need_resched(p);
cpu = task_cpu(p);
if (cpu == smp_processor_id())
return;
/* NEED_RESCHED must be visible before we test polling */
smp_mb();
if (!tsk_is_polling(p))
smp_send_reschedule(cpu);
}
void resched_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
if (!raw_spin_trylock_irqsave(&rq->lock, flags))
return;
resched_task(cpu_curr(cpu));
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
#ifdef CONFIG_NO_HZ
/*
* In the semi idle case, use the nearest busy cpu for migrating timers
* from an idle cpu. This is good for power-savings.
*
* We don't do similar optimization for completely idle system, as
* selecting an idle cpu will add more delays to the timers than intended
* (as that cpu's timer base may not be uptodate wrt jiffies etc).
*/
int get_nohz_timer_target(void)
{
int cpu = smp_processor_id();
int i;
struct sched_domain *sd;
rcu_read_lock();
for_each_domain(cpu, sd) {
for_each_cpu(i, sched_domain_span(sd)) {
if (!idle_cpu(i)) {
cpu = i;
goto unlock;
}
}
}
unlock:
rcu_read_unlock();
return cpu;
}
/*
* When add_timer_on() enqueues a timer into the timer wheel of an
* idle CPU then this timer might expire before the next timer event
* which is scheduled to wake up that CPU. In case of a completely
* idle system the next event might even be infinite time into the
* future. wake_up_idle_cpu() ensures that the CPU is woken up and
* leaves the inner idle loop so the newly added timer is taken into
* account when the CPU goes back to idle and evaluates the timer
* wheel for the next timer event.
*/
void wake_up_idle_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
if (cpu == smp_processor_id())
return;
/*
* This is safe, as this function is called with the timer
* wheel base lock of (cpu) held. When the CPU is on the way
* to idle and has not yet set rq->curr to idle then it will
* be serialized on the timer wheel base lock and take the new
* timer into account automatically.
*/
if (rq->curr != rq->idle)
return;
/*
* We can set TIF_RESCHED on the idle task of the other CPU
* lockless. The worst case is that the other CPU runs the
* idle task through an additional NOOP schedule()
*/
set_tsk_need_resched(rq->idle);
/* NEED_RESCHED must be visible before we test polling */
smp_mb();
if (!tsk_is_polling(rq->idle))
smp_send_reschedule(cpu);
}
static inline bool got_nohz_idle_kick(void)
{
int cpu = smp_processor_id();
if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)))
return false;
if (idle_cpu(cpu) && !need_resched())
return true;
/*
* We can't run Idle Load Balance on this CPU for this time so we
* cancel it and clear NOHZ_BALANCE_KICK
*/
clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
return false;
}
#else /* CONFIG_NO_HZ */
static inline bool got_nohz_idle_kick(void)
{
return false;
}
#endif /* CONFIG_NO_HZ */
void sched_avg_update(struct rq *rq)
{
s64 period = sched_avg_period();
while ((s64)(rq->clock - rq->age_stamp) > period) {
/*
* Inline assembly required to prevent the compiler
* optimising this loop into a divmod call.
* See __iter_div_u64_rem() for another example of this.
*/
asm("" : "+rm" (rq->age_stamp));
rq->age_stamp += period;
rq->rt_avg /= 2;
}
}
#else /* !CONFIG_SMP */
void resched_task(struct task_struct *p)
{
assert_raw_spin_locked(&task_rq(p)->lock);
set_tsk_need_resched(p);
}
#endif /* CONFIG_SMP */
#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
(defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
/*
* Iterate task_group tree rooted at *from, calling @down when first entering a
* node and @up when leaving it for the final time.
*
* Caller must hold rcu_lock or sufficient equivalent.
*/
int walk_tg_tree_from(struct task_group *from,
tg_visitor down, tg_visitor up, void *data)
{
struct task_group *parent, *child;
int ret;
parent = from;
down:
ret = (*down)(parent, data);
if (ret)
goto out;
list_for_each_entry_rcu(child, &parent->children, siblings) {
parent = child;
goto down;
up:
continue;
}
ret = (*up)(parent, data);
if (ret || parent == from)
goto out;
child = parent;
parent = parent->parent;
if (parent)
goto up;
out:
return ret;
}
int tg_nop(struct task_group *tg, void *data)
{
return 0;
}
#endif
static void set_load_weight(struct task_struct *p)
{
int prio = p->static_prio - MAX_RT_PRIO;
struct load_weight *load = &p->se.load;
/*
* SCHED_IDLE tasks get minimal weight:
*/
if (p->policy == SCHED_IDLE) {
load->weight = scale_load(WEIGHT_IDLEPRIO);
load->inv_weight = WMULT_IDLEPRIO;
return;
}
load->weight = scale_load(prio_to_weight[prio]);
load->inv_weight = prio_to_wmult[prio];
}
static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
{
update_rq_clock(rq);
sched_info_queued(p);
p->sched_class->enqueue_task(rq, p, flags);
trace_sched_enq_deq_task(p, 1);
inc_cumulative_runnable_avg(rq, p);
}
static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
{
update_rq_clock(rq);
sched_info_dequeued(p);
p->sched_class->dequeue_task(rq, p, flags);
trace_sched_enq_deq_task(p, 0);
dec_cumulative_runnable_avg(rq, p);
}
void activate_task(struct rq *rq, struct task_struct *p, int flags)
{
if (task_contributes_to_load(p))
rq->nr_uninterruptible--;
enqueue_task(rq, p, flags);
}
void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
{
if (task_contributes_to_load(p))
rq->nr_uninterruptible++;
dequeue_task(rq, p, flags);
}
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
/*
* There are no locks covering percpu hardirq/softirq time.
* They are only modified in account_system_vtime, on corresponding CPU
* with interrupts disabled. So, writes are safe.
* They are read and saved off onto struct rq in update_rq_clock().
* This may result in other CPU reading this CPU's irq time and can
* race with irq/account_system_vtime on this CPU. We would either get old
* or new value with a side effect of accounting a slice of irq time to wrong
* task when irq is in progress while we read rq->clock. That is a worthy
* compromise in place of having locks on each irq in account_system_time.
*/
static DEFINE_PER_CPU(u64, cpu_hardirq_time);
static DEFINE_PER_CPU(u64, cpu_softirq_time);
static DEFINE_PER_CPU(u64, irq_start_time);
static int sched_clock_irqtime;
void enable_sched_clock_irqtime(void)
{
sched_clock_irqtime = 1;
}
void disable_sched_clock_irqtime(void)
{
sched_clock_irqtime = 0;
}
#ifndef CONFIG_64BIT
static DEFINE_PER_CPU(seqcount_t, irq_time_seq);
static inline void irq_time_write_begin(void)
{
__this_cpu_inc(irq_time_seq.sequence);
smp_wmb();
}
static inline void irq_time_write_end(void)
{
smp_wmb();
__this_cpu_inc(irq_time_seq.sequence);
}
static inline u64 irq_time_read(int cpu)
{
u64 irq_time;
unsigned seq;
do {
seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
irq_time = per_cpu(cpu_softirq_time, cpu) +
per_cpu(cpu_hardirq_time, cpu);
} while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
return irq_time;
}
#else /* CONFIG_64BIT */
static inline void irq_time_write_begin(void)
{
}
static inline void irq_time_write_end(void)
{
}
static inline u64 irq_time_read(int cpu)
{
return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
}
#endif /* CONFIG_64BIT */
/*
* Called before incrementing preempt_count on {soft,}irq_enter
* and before decrementing preempt_count on {soft,}irq_exit.
*/
void account_system_vtime(struct task_struct *curr)
{
unsigned long flags;
s64 delta;
int cpu;
if (!sched_clock_irqtime)
return;
local_irq_save(flags);
cpu = smp_processor_id();
delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
__this_cpu_add(irq_start_time, delta);
irq_time_write_begin();
/*
* We do not account for softirq time from ksoftirqd here.
* We want to continue accounting softirq time to ksoftirqd thread
* in that case, so as not to confuse scheduler with a special task
* that do not consume any time, but still wants to run.
*/
if (hardirq_count())
__this_cpu_add(cpu_hardirq_time, delta);
else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
__this_cpu_add(cpu_softirq_time, delta);
irq_time_write_end();
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(account_system_vtime);
#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
#ifdef CONFIG_PARAVIRT
static inline u64 steal_ticks(u64 steal)
{
if (unlikely(steal > NSEC_PER_SEC))
return div_u64(steal, TICK_NSEC);
return __iter_div_u64_rem(steal, TICK_NSEC, &steal);
}
#endif
static void update_rq_clock_task(struct rq *rq, s64 delta)
{
/*
* In theory, the compile should just see 0 here, and optimize out the call
* to sched_rt_avg_update. But I don't trust it...
*/
#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
s64 steal = 0, irq_delta = 0;
#endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
/*
* Since irq_time is only updated on {soft,}irq_exit, we might run into
* this case when a previous update_rq_clock() happened inside a
* {soft,}irq region.
*
* When this happens, we stop ->clock_task and only update the
* prev_irq_time stamp to account for the part that fit, so that a next
* update will consume the rest. This ensures ->clock_task is
* monotonic.
*
* It does however cause some slight miss-attribution of {soft,}irq
* time, a more accurate solution would be to update the irq_time using
* the current rq->clock timestamp, except that would require using
* atomic ops.
*/
if (irq_delta > delta)
irq_delta = delta;
rq->prev_irq_time += irq_delta;
delta -= irq_delta;
#endif
#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
if (static_key_false((¶virt_steal_rq_enabled))) {
u64 st;
steal = paravirt_steal_clock(cpu_of(rq));
steal -= rq->prev_steal_time_rq;
if (unlikely(steal > delta))
steal = delta;
st = steal_ticks(steal);
steal = st * TICK_NSEC;
rq->prev_steal_time_rq += steal;
delta -= steal;
}
#endif
rq->clock_task += delta;
#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
if ((irq_delta + steal) && sched_feat(NONTASK_POWER))
sched_rt_avg_update(rq, irq_delta + steal);
#endif
}
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
static int irqtime_account_hi_update(void)
{
u64 *cpustat = kcpustat_this_cpu->cpustat;
unsigned long flags;
u64 latest_ns;
int ret = 0;
local_irq_save(flags);
latest_ns = this_cpu_read(cpu_hardirq_time);
if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_IRQ])
ret = 1;
local_irq_restore(flags);
return ret;
}
static int irqtime_account_si_update(void)
{
u64 *cpustat = kcpustat_this_cpu->cpustat;
unsigned long flags;
u64 latest_ns;
int ret = 0;
local_irq_save(flags);
latest_ns = this_cpu_read(cpu_softirq_time);
if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_SOFTIRQ])
ret = 1;
local_irq_restore(flags);
return ret;
}
#else /* CONFIG_IRQ_TIME_ACCOUNTING */
#define sched_clock_irqtime (0)
#endif
void sched_set_stop_task(int cpu, struct task_struct *stop)
{
struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
struct task_struct *old_stop = cpu_rq(cpu)->stop;
if (stop) {
/*
* Make it appear like a SCHED_FIFO task, its something
* userspace knows about and won't get confused about.
*
* Also, it will make PI more or less work without too
* much confusion -- but then, stop work should not
* rely on PI working anyway.
*/
sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m);
stop->sched_class = &stop_sched_class;
}
cpu_rq(cpu)->stop = stop;
if (old_stop) {
/*
* Reset it back to a normal scheduling class so that
* it can die in pieces.
*/
old_stop->sched_class = &rt_sched_class;
}
}
/*
* __normal_prio - return the priority that is based on the static prio
*/
static inline int __normal_prio(struct task_struct *p)
{
return p->static_prio;
}
/*
* Calculate the expected normal priority: i.e. priority
* without taking RT-inheritance into account. Might be
* boosted by interactivity modifiers. Changes upon fork,
* setprio syscalls, and whenever the interactivity
* estimator recalculates.
*/
static inline int normal_prio(struct task_struct *p)
{
int prio;
if (task_has_rt_policy(p))
prio = MAX_RT_PRIO-1 - p->rt_priority;
else
prio = __normal_prio(p);
return prio;
}
/*
* Calculate the current priority, i.e. the priority
* taken into account by the scheduler. This value might
* be boosted by RT tasks, or might be boosted by
* interactivity modifiers. Will be RT if the task got
* RT-boosted. If not then it returns p->normal_prio.
*/
static int effective_prio(struct task_struct *p)
{
p->normal_prio = normal_prio(p);
/*
* If we are RT tasks or we were boosted to RT priority,
* keep the priority unchanged. Otherwise, update priority
* to the normal priority:
*/
if (!rt_prio(p->prio))
return p->normal_prio;
return p->prio;
}
/**
* task_curr - is this task currently executing on a CPU?
* @p: the task in question.
*/
inline int task_curr(const struct task_struct *p)
{
return cpu_curr(task_cpu(p)) == p;
}
static inline void check_class_changed(struct rq *rq, struct task_struct *p,
const struct sched_class *prev_class,
int oldprio)
{
if (prev_class != p->sched_class) {
if (prev_class->switched_from)
prev_class->switched_from(rq, p);
p->sched_class->switched_to(rq, p);
} else if (oldprio != p->prio)
p->sched_class->prio_changed(rq, p, oldprio);
}
void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
{
const struct sched_class *class;
if (p->sched_class == rq->curr->sched_class) {
rq->curr->sched_class->check_preempt_curr(rq, p, flags);
} else {
for_each_class(class) {
if (class == rq->curr->sched_class)
break;
if (class == p->sched_class) {
resched_task(rq->curr);
break;
}
}
}
/*
* A queue event has occurred, and we're going to schedule. In
* this case, we can save a useless back to back clock update.
*/
if (rq->curr->on_rq && test_tsk_need_resched(rq->curr))
rq->skip_clock_update = 1;
}
static ATOMIC_NOTIFIER_HEAD(task_migration_notifier);
void register_task_migration_notifier(struct notifier_block *n)
{
atomic_notifier_chain_register(&task_migration_notifier, n);
}
#ifdef CONFIG_SMP
void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
{
#ifdef CONFIG_SCHED_DEBUG
/*
* We should never call set_task_cpu() on a blocked task,
* ttwu() will sort out the placement.
*/
WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
!(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
#ifdef CONFIG_LOCKDEP
/*
* The caller should hold either p->pi_lock or rq->lock, when changing
* a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
*
* sched_move_task() holds both and thus holding either pins the cgroup,
* see task_group().
*
* Furthermore, all task_rq users should acquire both locks, see
* task_rq_lock().
*/
WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
lockdep_is_held(&task_rq(p)->lock)));
#endif
#endif
trace_sched_migrate_task(p, new_cpu);
if (task_cpu(p) != new_cpu) {
struct task_migration_notifier tmn;
if (p->sched_class->migrate_task_rq)
p->sched_class->migrate_task_rq(p, new_cpu);
p->se.nr_migrations++;
perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
tmn.task = p;
tmn.from_cpu = task_cpu(p);
tmn.to_cpu = new_cpu;
atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn);
}
__set_task_cpu(p, new_cpu);
}
struct migration_arg {
struct task_struct *task;
int dest_cpu;
};
static int migration_cpu_stop(void *data);
/*
* wait_task_inactive - wait for a thread to unschedule.
*
* If @match_state is nonzero, it's the @p->state value just checked and
* not expected to change. If it changes, i.e. @p might have woken up,
* then return zero. When we succeed in waiting for @p to be off its CPU,
* we return a positive number (its total switch count). If a second call
* a short while later returns the same number, the caller can be sure that
* @p has remained unscheduled the whole time.
*
* The caller must ensure that the task *will* unschedule sometime soon,
* else this function might spin for a *long* time. This function can't
* be called with interrupts off, or it may introduce deadlock with
* smp_call_function() if an IPI is sent by the same process we are
* waiting to become inactive.
*/
unsigned long wait_task_inactive(struct task_struct *p, long match_state)
{
unsigned long flags;
int running, on_rq;
unsigned long ncsw;
struct rq *rq;
for (;;) {
/*
* We do the initial early heuristics without holding
* any task-queue locks at all. We'll only try to get
* the runqueue lock when things look like they will
* work out!
*/
rq = task_rq(p);
/*
* If the task is actively running on another CPU
* still, just relax and busy-wait without holding
* any locks.
*
* NOTE! Since we don't hold any locks, it's not
* even sure that "rq" stays as the right runqueue!
* But we don't care, since "task_running()" will
* return false if the runqueue has changed and p
* is actually now running somewhere else!
*/
while (task_running(rq, p)) {
if (match_state && unlikely(p->state != match_state))
return 0;
cpu_relax();
}
/*
* Ok, time to look more closely! We need the rq
* lock now, to be *sure*. If we're wrong, we'll
* just go back and repeat.
*/
rq = task_rq_lock(p, &flags);
trace_sched_wait_task(p);
running = task_running(rq, p);
on_rq = p->on_rq;
ncsw = 0;
if (!match_state || p->state == match_state)
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
task_rq_unlock(rq, p, &flags);
/*
* If it changed from the expected state, bail out now.
*/
if (unlikely(!ncsw))
break;
/*
* Was it really running after all now that we
* checked with the proper locks actually held?
*
* Oops. Go back and try again..
*/
if (unlikely(running)) {
cpu_relax();
continue;
}
/*
* It's not enough that it's not actively running,
* it must be off the runqueue _entirely_, and not
* preempted!
*
* So if it was still runnable (but just not actively
* running right now), it's preempted, and we should
* yield - it could be a while.
*/
if (unlikely(on_rq)) {
ktime_t to = ktime_set(0, NSEC_PER_MSEC);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_hrtimeout(&to, HRTIMER_MODE_REL);
continue;
}
/*
* Ahh, all good. It wasn't running, and it wasn't
* runnable, which means that it will never become
* running in the future either. We're all done!
*/
break;
}
return ncsw;
}
/***
* kick_process - kick a running thread to enter/exit the kernel
* @p: the to-be-kicked thread
*
* Cause a process which is running on another CPU to enter
* kernel-mode, without any delay. (to get signals handled.)
*
* NOTE: this function doesn't have to take the runqueue lock,
* because all it wants to ensure is that the remote task enters
* the kernel. If the IPI races and the task has been migrated
* to another CPU then no harm is done and the purpose has been
* achieved as well.
*/
void kick_process(struct task_struct *p)
{
int cpu;
preempt_disable();
cpu = task_cpu(p);
if ((cpu != smp_processor_id()) && task_curr(p))
smp_send_reschedule(cpu);
preempt_enable();
}
EXPORT_SYMBOL_GPL(kick_process);
#endif /* CONFIG_SMP */
#ifdef CONFIG_SMP
/*
* ->cpus_allowed is protected by both rq->lock and p->pi_lock
*/
static int select_fallback_rq(int cpu, struct task_struct *p)
{
int nid = cpu_to_node(cpu);
const struct cpumask *nodemask = NULL;
enum { cpuset, possible, fail } state = cpuset;
int dest_cpu;
/*
* If the node that the cpu is on has been offlined, cpu_to_node()
* will return -1. There is no cpu on the node, and we should
* select the cpu on the other node.
*/
if (nid != -1) {
nodemask = cpumask_of_node(nid);
/* Look for allowed, online CPU in same node. */
for_each_cpu(dest_cpu, nodemask) {
if (!cpu_online(dest_cpu))
continue;
if (!cpu_active(dest_cpu))
continue;
if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
return dest_cpu;
}
}
for (;;) {
/* Any allowed, online CPU? */
for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
if (!cpu_online(dest_cpu))
continue;
if (!cpu_active(dest_cpu))
continue;
goto out;
}
switch (state) {
case cpuset:
/* No more Mr. Nice Guy. */
cpuset_cpus_allowed_fallback(p);
state = possible;
break;
case possible:
do_set_cpus_allowed(p, cpu_possible_mask);
state = fail;
break;
case fail:
BUG();
break;
}
}
out:
if (state != cpuset) {
/*
* Don't tell them about moving exiting tasks or
* kernel threads (both mm NULL), since they never
* leave kernel.
*/
if (p->mm && printk_ratelimit()) {
printk_sched("process %d (%s) no longer affine to cpu%d\n",
task_pid_nr(p), p->comm, cpu);
}
}
return dest_cpu;
}
/*
* The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
*/
static inline
int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
{
int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
/*
* In order not to call set_task_cpu() on a blocking task we need
* to rely on ttwu() to place the task on a valid ->cpus_allowed
* cpu.
*
* Since this is common to all placement strategies, this lives here.
*
* [ this allows ->select_task() to simply return task_cpu(p) and
* not worry about this generic constraint ]
*/
if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) ||
!cpu_online(cpu)))
cpu = select_fallback_rq(task_cpu(p), p);
return cpu;
}
static void update_avg(u64 *avg, u64 sample)
{
s64 diff = sample - *avg;
*avg += diff >> 3;
}
#endif
static void
ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
{
#ifdef CONFIG_SCHEDSTATS
struct rq *rq = this_rq();
#ifdef CONFIG_SMP
int this_cpu = smp_processor_id();
if (cpu == this_cpu) {
schedstat_inc(rq, ttwu_local);
schedstat_inc(p, se.statistics.nr_wakeups_local);
} else {
struct sched_domain *sd;
schedstat_inc(p, se.statistics.nr_wakeups_remote);
rcu_read_lock();
for_each_domain(this_cpu, sd) {
if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
schedstat_inc(sd, ttwu_wake_remote);
break;
}
}
rcu_read_unlock();
}
if (wake_flags & WF_MIGRATED)
schedstat_inc(p, se.statistics.nr_wakeups_migrate);
#endif /* CONFIG_SMP */
schedstat_inc(rq, ttwu_count);
schedstat_inc(p, se.statistics.nr_wakeups);
if (wake_flags & WF_SYNC)
schedstat_inc(p, se.statistics.nr_wakeups_sync);
#endif /* CONFIG_SCHEDSTATS */
}
static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
{
activate_task(rq, p, en_flags);
p->on_rq = 1;
/* if a worker is waking up, notify workqueue */
if (p->flags & PF_WQ_WORKER)
wq_worker_waking_up(p, cpu_of(rq));
}
/* Window size (in ns) */
__read_mostly unsigned int sched_ravg_window = 10000000;
/* Min window size (in ns) = 10ms */
__read_mostly unsigned int min_sched_ravg_window = 10000000;
/* Max window size (in ns) = 1s */
__read_mostly unsigned int max_sched_ravg_window = 1000000000;
/*
* Called when new window is starting for a task, to record cpu usage over
* recently concluded window(s). Normally 'samples' should be 1. It can be > 1
* when, say, a real-time task runs without preemption for several windows at a
* stretch.
*/
static inline void
update_history(struct rq *rq, struct task_struct *p, u32 runtime, int samples)
{
u32 *hist = &p->ravg.sum_history[0];
int ridx, widx;
u32 sum = 0, avg;
/* Ignore windows where task had no activity */
if (!runtime)
return;
/* Push new 'runtime' value onto stack */
widx = RAVG_HIST_SIZE - 1;
ridx = widx - samples;
for (; ridx >= 0; --widx, --ridx) {
hist[widx] = hist[ridx];
sum += hist[widx];
}
for (widx = 0; widx < samples && widx < RAVG_HIST_SIZE; widx++) {
hist[widx] = runtime;
sum += hist[widx];
}
p->ravg.sum = 0;
if (p->on_rq) {
rq->cumulative_runnable_avg -= p->ravg.demand;
BUG_ON((s64)rq->cumulative_runnable_avg < 0);
}
avg = sum / RAVG_HIST_SIZE;
p->ravg.demand = max(avg, runtime);
if (p->on_rq)
rq->cumulative_runnable_avg += p->ravg.demand;
}
static int __init set_sched_ravg_window(char *str)
{
get_option(&str, &sched_ravg_window);
return 0;
}
early_param("sched_ravg_window", set_sched_ravg_window);
void update_task_ravg(struct task_struct *p, struct rq *rq, int update_sum)
{
u32 window_size = sched_ravg_window;
int new_window;
u64 wallclock = sched_clock();
if (is_idle_task(p) || (sched_ravg_window < min_sched_ravg_window))
return;
do {
s64 delta = 0;
int n;
u64 now = wallclock;
new_window = 0;
delta = now - p->ravg.window_start;
BUG_ON(delta < 0);
if (delta > window_size) {
p->ravg.window_start += window_size;
now = p->ravg.window_start;
new_window = 1;
}
if (update_sum) {
unsigned int cur_freq = rq->cur_freq;
delta = now - p->ravg.mark_start;
BUG_ON(delta < 0);
if (unlikely(cur_freq > max_possible_freq))
cur_freq = max_possible_freq;
delta = div64_u64(delta * cur_freq,
max_possible_freq);
p->ravg.sum += delta;
WARN_ON(p->ravg.sum > window_size);
}
if (!new_window)
break;
update_history(rq, p, p->ravg.sum, 1);
delta = wallclock - p->ravg.window_start;
BUG_ON(delta < 0);
n = div64_u64(delta, window_size);
if (n) {
if (!update_sum)
p->ravg.window_start = wallclock;
else
p->ravg.window_start += (u64)n *
(u64)window_size;
BUG_ON(p->ravg.window_start > wallclock);
if (update_sum)
update_history(rq, p, window_size, n);
}
p->ravg.mark_start = p->ravg.window_start;
} while (new_window);
p->ravg.mark_start = wallclock;
}
/*
* Mark the task runnable and perform wakeup-preemption.
*/
static void
ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
{
trace_sched_wakeup(p, true);
check_preempt_curr(rq, p, wake_flags);
update_task_ravg(p, rq, 0);
p->state = TASK_RUNNING;
#ifdef CONFIG_SMP
if (p->sched_class->task_woken)
p->sched_class->task_woken(rq, p);
if (rq->idle_stamp) {
u64 delta = rq->clock - rq->idle_stamp;
u64 max = 2*sysctl_sched_migration_cost;
if (delta > max)
rq->avg_idle = max;
else
update_avg(&rq->avg_idle, delta);
rq->idle_stamp = 0;
}
#endif
}
static void
ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
{
#ifdef CONFIG_SMP
if (p->sched_contributes_to_load)
rq->nr_uninterruptible--;
#endif
ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
ttwu_do_wakeup(rq, p, wake_flags);
}
/*
* Called in case the task @p isn't fully descheduled from its runqueue,
* in this case we must do a remote wakeup. Its a 'light' wakeup though,
* since all we need to do is flip p->state to TASK_RUNNING, since
* the task is still ->on_rq.
*/
static int ttwu_remote(struct task_struct *p, int wake_flags)
{
struct rq *rq;
int ret = 0;
rq = __task_rq_lock(p);
if (p->on_rq) {
ttwu_do_wakeup(rq, p, wake_flags);
ret = 1;
}
__task_rq_unlock(rq);
return ret;
}
#ifdef CONFIG_SMP
static void sched_ttwu_pending(void)
{
struct rq *rq = this_rq();
struct llist_node *llist = llist_del_all(&rq->wake_list);
struct task_struct *p;
raw_spin_lock(&rq->lock);
while (llist) {
p = llist_entry(llist, struct task_struct, wake_entry);
llist = llist_next(llist);
ttwu_do_activate(rq, p, 0);
}
raw_spin_unlock(&rq->lock);
}
void scheduler_ipi(void)
{
if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
return;
/*
* Not all reschedule IPI handlers call irq_enter/irq_exit, since
* traditionally all their work was done from the interrupt return
* path. Now that we actually do some work, we need to make sure
* we do call them.
*
* Some archs already do call them, luckily irq_enter/exit nest
* properly.
*
* Arguably we should visit all archs and update all handlers,
* however a fair share of IPIs are still resched only so this would
* somewhat pessimize the simple resched case.
*/
irq_enter();
sched_ttwu_pending();
/*
* Check if someone kicked us for doing the nohz idle load balance.
*/
if (unlikely(got_nohz_idle_kick())) {
this_rq()->idle_balance = 1;
raise_softirq_irqoff(SCHED_SOFTIRQ);
}
irq_exit();
}
static void ttwu_queue_remote(struct task_struct *p, int cpu)
{
if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list))
smp_send_reschedule(cpu);
}
bool cpus_share_cache(int this_cpu, int that_cpu)
{
return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
}
#endif /* CONFIG_SMP */
static void ttwu_queue(struct task_struct *p, int cpu)
{
struct rq *rq = cpu_rq(cpu);
#if defined(CONFIG_SMP)
if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
sched_clock_cpu(cpu); /* sync clocks x-cpu */
ttwu_queue_remote(p, cpu);
return;
}
#endif
raw_spin_lock(&rq->lock);
ttwu_do_activate(rq, p, 0);
raw_spin_unlock(&rq->lock);
}
__read_mostly unsigned int sysctl_sched_wakeup_load_threshold = 110;
/**
* try_to_wake_up - wake up a thread
* @p: the thread to be awakened
* @state: the mask of task states that can be woken
* @wake_flags: wake modifier flags (WF_*)
*
* Put it on the run-queue if it's not already there. The "current"
* thread is always on the run-queue (except when the actual
* re-schedule is in progress), and as such you're allowed to do
* the simpler "current->state = TASK_RUNNING" to mark yourself
* runnable without the overhead of this.
*
* Returns %true if @p was woken up, %false if it was already running
* or @state didn't match @p's state.
*/
static int
try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
{
unsigned long flags;
int cpu, src_cpu, success = 0;
/*
* If we are going to wake up a thread waiting for CONDITION we
* need to ensure that CONDITION=1 done by the caller can not be
* reordered with p->state check below. This pairs with mb() in
* set_current_state() the waiting thread does.
*/
smp_mb__before_spinlock();
raw_spin_lock_irqsave(&p->pi_lock, flags);
src_cpu = task_cpu(p);
cpu = src_cpu;
if (!(p->state & state))
goto out;
success = 1; /* we're going to change ->state */
if (p->on_rq && ttwu_remote(p, wake_flags))
goto stat;
#ifdef CONFIG_SMP
/*
* If the owning (remote) cpu is still in the middle of schedule() with
* this task as prev, wait until its done referencing the task.
*/
while (p->on_cpu)
cpu_relax();
/*
* Pairs with the smp_wmb() in finish_lock_switch().
*/
smp_rmb();
p->sched_contributes_to_load = !!task_contributes_to_load(p);
p->state = TASK_WAKING;
if (p->sched_class->task_waking)
p->sched_class->task_waking(p);
cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
if (src_cpu != cpu) {
wake_flags |= WF_MIGRATED;
set_task_cpu(p, cpu);
}
#endif /* CONFIG_SMP */
ttwu_queue(p, cpu);
stat:
ttwu_stat(p, cpu, wake_flags);
out:
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
if (task_notify_on_migrate(p)) {
struct migration_notify_data mnd;
mnd.src_cpu = src_cpu;
mnd.dest_cpu = cpu;
mnd.load = pct_task_load(p);
/*
* Call the migration notifier with mnd for foreground task
* migrations as well as for wakeups if their load is above
* sysctl_sched_wakeup_load_threshold. This would prompt the
* cpu-boost to boost the CPU frequency on wake up of a heavy
* weight foreground task
*/
if ((src_cpu != cpu) || (mnd.load >
sysctl_sched_wakeup_load_threshold))
atomic_notifier_call_chain(&migration_notifier_head,
0, (void *)&mnd);
}
return success;
}
/**
* try_to_wake_up_local - try to wake up a local task with rq lock held
* @p: the thread to be awakened
*
* Put @p on the run-queue if it's not already there. The caller must
* ensure that this_rq() is locked, @p is bound to this_rq() and not
* the current task.
*/
static void try_to_wake_up_local(struct task_struct *p)
{
struct rq *rq = task_rq(p);
if (WARN_ON(rq != this_rq()) ||
WARN_ON(p == current))
return;
lockdep_assert_held(&rq->lock);
if (!raw_spin_trylock(&p->pi_lock)) {
raw_spin_unlock(&rq->lock);
raw_spin_lock(&p->pi_lock);
raw_spin_lock(&rq->lock);
}
if (!(p->state & TASK_NORMAL))
goto out;
if (!p->on_rq)
ttwu_activate(rq, p, ENQUEUE_WAKEUP);
ttwu_do_wakeup(rq, p, 0);
ttwu_stat(p, smp_processor_id(), 0);
out:
raw_spin_unlock(&p->pi_lock);
}
/**
* wake_up_process - Wake up a specific process
* @p: The process to be woken up.
*
* Attempt to wake up the nominated process and move it to the set of runnable
* processes. Returns 1 if the process was woken up, 0 if it was already
* running.
*
* It may be assumed that this function implies a write memory barrier before
* changing the task state if and only if any tasks are woken up.
*/
int wake_up_process(struct task_struct *p)
{
WARN_ON(task_is_stopped_or_traced(p));
return try_to_wake_up(p, TASK_NORMAL, 0);
}
EXPORT_SYMBOL(wake_up_process);
int wake_up_state(struct task_struct *p, unsigned int state)
{
return try_to_wake_up(p, state, 0);
}
/*
* Perform scheduler related setup for a newly forked process p.
* p is forked by current.
*
* __sched_fork() is basic setup used by init_idle() too:
*/
static void __sched_fork(struct task_struct *p)
{
p->on_rq = 0;
p->se.on_rq = 0;
p->se.exec_start = 0;
p->se.sum_exec_runtime = 0;
p->se.prev_sum_exec_runtime = 0;
p->se.nr_migrations = 0;
p->se.vruntime = 0;
init_new_task_load(p);
INIT_LIST_HEAD(&p->se.group_node);
/*
* Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
* removed when useful for applications beyond shares distribution (e.g.
* load-balance).
*/
#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
p->se.avg.runnable_avg_period = 0;
p->se.avg.runnable_avg_sum = 0;
#endif
#ifdef CONFIG_SCHEDSTATS
memset(&p->se.statistics, 0, sizeof(p->se.statistics));
#endif
INIT_LIST_HEAD(&p->rt.run_list);
#ifdef CONFIG_PREEMPT_NOTIFIERS
INIT_HLIST_HEAD(&p->preempt_notifiers);
#endif
}
/*
* fork()/clone()-time setup:
*/
void sched_fork(struct task_struct *p)
{
unsigned long flags;
int cpu = get_cpu();
__sched_fork(p);
/*
* We mark the process as running here. This guarantees that
* nobody will actually run it, and a signal or other external
* event cannot wake it up and insert it on the runqueue either.
*/
p->state = TASK_RUNNING;
/*
* Make sure we do not leak PI boosting priority to the child.
*/
p->prio = current->normal_prio;
/*
* Revert to default priority/policy on fork if requested.
*/
if (unlikely(p->sched_reset_on_fork)) {
if (task_has_rt_policy(p)) {
p->policy = SCHED_NORMAL;
p->static_prio = NICE_TO_PRIO(0);
p->rt_priority = 0;
} else if (PRIO_TO_NICE(p->static_prio) < 0)
p->static_prio = NICE_TO_PRIO(0);
p->prio = p->normal_prio = __normal_prio(p);
set_load_weight(p);
/*
* We don't need the reset flag anymore after the fork. It has
* fulfilled its duty:
*/
p->sched_reset_on_fork = 0;
}
if (!rt_prio(p->prio))
p->sched_class = &fair_sched_class;
if (p->sched_class->task_fork)
p->sched_class->task_fork(p);
/*
* The child is not yet in the pid-hash so no cgroup attach races,
* and the cgroup is pinned to this child due to cgroup_fork()
* is ran before sched_fork().
*
* Silence PROVE_RCU.
*/
raw_spin_lock_irqsave(&p->pi_lock, flags);
set_task_cpu(p, cpu);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
if (likely(sched_info_on()))
memset(&p->sched_info, 0, sizeof(p->sched_info));
#endif
#if defined(CONFIG_SMP)
p->on_cpu = 0;
#endif
#ifdef CONFIG_PREEMPT_COUNT
/* Want to start with kernel preemption disabled. */
task_thread_info(p)->preempt_count = 1;
#endif
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
#endif
put_cpu();
}
/*
* wake_up_new_task - wake up a newly created task for the first time.
*
* This function will do some initial scheduler statistics housekeeping
* that must be done for every newly created context, then puts the task
* on the runqueue and wakes it.
*/
void wake_up_new_task(struct task_struct *p)
{
unsigned long flags;
struct rq *rq;
raw_spin_lock_irqsave(&p->pi_lock, flags);
#ifdef CONFIG_SMP
/*
* Fork balancing, do it here and not earlier because:
* - cpus_allowed can change in the fork path
* - any previously selected cpu might disappear through hotplug
*/
set_task_cpu(p, select_task_rq(p, SD_BALANCE_FORK, 0));
#endif
rq = __task_rq_lock(p);
activate_task(rq, p, 0);
p->on_rq = 1;
trace_sched_wakeup_new(p, true);
check_preempt_curr(rq, p, WF_FORK);
#ifdef CONFIG_SMP
if (p->sched_class->task_woken)
p->sched_class->task_woken(rq, p);
#endif
task_rq_unlock(rq, p, &flags);
}
#ifdef CONFIG_PREEMPT_NOTIFIERS
/**
* preempt_notifier_register - tell me when current is being preempted & rescheduled
* @notifier: notifier struct to register
*/
void preempt_notifier_register(struct preempt_notifier *notifier)
{
hlist_add_head(¬ifier->link, ¤t->preempt_notifiers);
}
EXPORT_SYMBOL_GPL(preempt_notifier_register);
/**
* preempt_notifier_unregister - no longer interested in preemption notifications
* @notifier: notifier struct to unregister
*
* This is safe to call from within a preemption notifier.
*/
void preempt_notifier_unregister(struct preempt_notifier *notifier)
{
hlist_del(¬ifier->link);
}
EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
{
struct preempt_notifier *notifier;
struct hlist_node *node;
hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
notifier->ops->sched_in(notifier, raw_smp_processor_id());
}
static void
fire_sched_out_preempt_notifiers(struct task_struct *curr,
struct task_struct *next)
{
struct preempt_notifier *notifier;
struct hlist_node *node;
hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
notifier->ops->sched_out(notifier, next);
}
#else /* !CONFIG_PREEMPT_NOTIFIERS */
static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
{
}
static void
fire_sched_out_preempt_notifiers(struct task_struct *curr,
struct task_struct *next)
{
}
#endif /* CONFIG_PREEMPT_NOTIFIERS */
/**
* prepare_task_switch - prepare to switch tasks
* @rq: the runqueue preparing to switch
* @prev: the current task that is being switched out
* @next: the task we are going to switch to.
*
* This is called with the rq lock held and interrupts off. It must
* be paired with a subsequent finish_task_switch after the context
* switch.
*
* prepare_task_switch sets up locking and calls architecture specific
* hooks.
*/
static inline void
prepare_task_switch(struct rq *rq, struct task_struct *prev,
struct task_struct *next)
{
sched_info_switch(prev, next);
perf_event_task_sched_out(prev, next);
fire_sched_out_preempt_notifiers(prev, next);
prepare_lock_switch(rq, next);
prepare_arch_switch(next);
trace_sched_switch(prev, next);
}
/**
* finish_task_switch - clean up after a task-switch
* @rq: runqueue associated with task-switch
* @prev: the thread we just switched away from.
*
* finish_task_switch must be called after the context switch, paired
* with a prepare_task_switch call before the context switch.
* finish_task_switch will reconcile locking set up by prepare_task_switch,
* and do any other architecture-specific cleanup actions.
*
* Note that we may have delayed dropping an mm in context_switch(). If
* so, we finish that here outside of the runqueue lock. (Doing it
* with the lock held can cause deadlocks; see schedule() for
* details.)
*/
static void finish_task_switch(struct rq *rq, struct task_struct *prev)
__releases(rq->lock)
{
struct mm_struct *mm = rq->prev_mm;
long prev_state;
rq->prev_mm = NULL;
/*
* A task struct has one reference for the use as "current".
* If a task dies, then it sets TASK_DEAD in tsk->state and calls
* schedule one last time. The schedule call will never return, and
* the scheduled task must drop that reference.
* The test for TASK_DEAD must occur while the runqueue locks are
* still held, otherwise prev could be scheduled on another cpu, die
* there before we look at prev->state, and then the reference would
* be dropped twice.
* Manfred Spraul <manfred@colorfullife.com>
*/
prev_state = prev->state;
finish_arch_switch(prev);
perf_event_task_sched_in(prev, current);
finish_lock_switch(rq, prev);
finish_arch_post_lock_switch();
fire_sched_in_preempt_notifiers(current);
if (mm)
mmdrop(mm);
if (unlikely(prev_state == TASK_DEAD)) {
/*
* Remove function-return probe instances associated with this
* task and put them back on the free list.
*/
kprobe_flush_task(prev);
put_task_struct(prev);
}
}
#ifdef CONFIG_SMP
/* assumes rq->lock is held */
static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
{
if (prev->sched_class->pre_schedule)
prev->sched_class->pre_schedule(rq, prev);
}
/* rq->lock is NOT held, but preemption is disabled */
static inline void post_schedule(struct rq *rq)
{
if (rq->post_schedule) {
unsigned long flags;
raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->curr->sched_class->post_schedule)
rq->curr->sched_class->post_schedule(rq);
raw_spin_unlock_irqrestore(&rq->lock, flags);
rq->post_schedule = 0;
}
}
#else
static inline void pre_schedule(struct rq *rq, struct task_struct *p)
{
}
static inline void post_schedule(struct rq *rq)
{
}
#endif
/**
* schedule_tail - first thing a freshly forked thread must call.
* @prev: the thread we just switched away from.
*/
asmlinkage void schedule_tail(struct task_struct *prev)
__releases(rq->lock)
{
struct rq *rq = this_rq();
finish_task_switch(rq, prev);
/*
* FIXME: do we need to worry about rq being invalidated by the
* task_switch?
*/
post_schedule(rq);
#ifdef __ARCH_WANT_UNLOCKED_CTXSW
/* In this case, finish_task_switch does not reenable preemption */
preempt_enable();
#endif
if (current->set_child_tid)
put_user(task_pid_vnr(current), current->set_child_tid);
}
/*
* context_switch - switch to the new MM and the new
* thread's register state.
*/
static inline void
context_switch(struct rq *rq, struct task_struct *prev,
struct task_struct *next)
{
struct mm_struct *mm, *oldmm;
prepare_task_switch(rq, prev, next);
mm = next->mm;
oldmm = prev->active_mm;
/*
* For paravirt, this is coupled with an exit in switch_to to
* combine the page table reload and the switch backend into
* one hypercall.
*/
arch_start_context_switch(prev);
if (!mm) {
next->active_mm = oldmm;
atomic_inc(&oldmm->mm_count);
enter_lazy_tlb(oldmm, next);
} else
switch_mm(oldmm, mm, next);
if (!prev->mm) {
prev->active_mm = NULL;
rq->prev_mm = oldmm;
}
/*
* Since the runqueue lock will be released by the next
* task (which is an invalid locking op but in the case
* of the scheduler it's an obvious special-case), so we
* do an early lockdep release here:
*/
#ifndef __ARCH_WANT_UNLOCKED_CTXSW
spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
#endif
/* Here we just switch the register state and the stack. */
switch_to(prev, next, prev);
barrier();
/*
* this_rq must be evaluated again because prev may have moved
* CPUs since it called schedule(), thus the 'rq' on its stack
* frame will be invalid.
*/
finish_task_switch(this_rq(), prev);
}
/*
* nr_running, nr_uninterruptible and nr_context_switches:
*
* externally visible scheduler statistics: current number of runnable
* threads, current number of uninterruptible-sleeping threads, total
* number of context switches performed since bootup.
*/
unsigned long nr_running(void)
{
unsigned long i, sum = 0;
for_each_online_cpu(i)
sum += cpu_rq(i)->nr_running;
return sum;
}
unsigned long nr_uninterruptible(void)
{
unsigned long i, sum = 0;
for_each_possible_cpu(i)
sum += cpu_rq(i)->nr_uninterruptible;
/*
* Since we read the counters lockless, it might be slightly
* inaccurate. Do not allow it to go below zero though:
*/
if (unlikely((long)sum < 0))
sum = 0;
return sum;
}
unsigned long long nr_context_switches(void)
{
int i;
unsigned long long sum = 0;
for_each_possible_cpu(i)
sum += cpu_rq(i)->nr_switches;
return sum;
}
unsigned long nr_iowait(void)
{
unsigned long i, sum = 0;
for_each_possible_cpu(i)
sum += atomic_read(&cpu_rq(i)->nr_iowait);
return sum;
}
unsigned long nr_iowait_cpu(int cpu)
{
struct rq *this = cpu_rq(cpu);
return atomic_read(&this->nr_iowait);
}
unsigned long this_cpu_load(void)
{
struct rq *this = this_rq();
return this->cpu_load[0];
}
/*
* Global load-average calculations
*
* We take a distributed and async approach to calculating the global load-avg
* in order to minimize overhead.
*
* The global load average is an exponentially decaying average of nr_running +
* nr_uninterruptible.
*
* Once every LOAD_FREQ:
*
* nr_active = 0;
* for_each_possible_cpu(cpu)
* nr_active += cpu_of(cpu)->nr_running + cpu_of(cpu)->nr_uninterruptible;
*
* avenrun[n] = avenrun[0] * exp_n + nr_active * (1 - exp_n)
*
* Due to a number of reasons the above turns in the mess below:
*
* - for_each_possible_cpu() is prohibitively expensive on machines with
* serious number of cpus, therefore we need to take a distributed approach
* to calculating nr_active.
*
* \Sum_i x_i(t) = \Sum_i x_i(t) - x_i(t_0) | x_i(t_0) := 0
* = \Sum_i { \Sum_j=1 x_i(t_j) - x_i(t_j-1) }
*
* So assuming nr_active := 0 when we start out -- true per definition, we
* can simply take per-cpu deltas and fold those into a global accumulate
* to obtain the same result. See calc_load_fold_active().
*
* Furthermore, in order to avoid synchronizing all per-cpu delta folding
* across the machine, we assume 10 ticks is sufficient time for every
* cpu to have completed this task.
*
* This places an upper-bound on the IRQ-off latency of the machine. Then
* again, being late doesn't loose the delta, just wrecks the sample.
*
* - cpu_rq()->nr_uninterruptible isn't accurately tracked per-cpu because
* this would add another cross-cpu cacheline miss and atomic operation
* to the wakeup path. Instead we increment on whatever cpu the task ran
* when it went into uninterruptible state and decrement on whatever cpu
* did the wakeup. This means that only the sum of nr_uninterruptible over
* all cpus yields the correct result.
*
* This covers the NO_HZ=n code, for extra head-aches, see the comment below.
*/
/* Variables and functions for calc_load */
static atomic_long_t calc_load_tasks;
static unsigned long calc_load_update;
unsigned long avenrun[3];
EXPORT_SYMBOL(avenrun); /* should be removed */
/**
* get_avenrun - get the load average array
* @loads: pointer to dest load array
* @offset: offset to add
* @shift: shift count to shift the result left
*
* These values are estimates at best, so no need for locking.
*/
void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
{
loads[0] = (avenrun[0] + offset) << shift;
loads[1] = (avenrun[1] + offset) << shift;
loads[2] = (avenrun[2] + offset) << shift;
}
static long calc_load_fold_active(struct rq *this_rq)
{
long nr_active, delta = 0;
nr_active = this_rq->nr_running;
nr_active += (long) this_rq->nr_uninterruptible;
if (nr_active != this_rq->calc_load_active) {
delta = nr_active - this_rq->calc_load_active;
this_rq->calc_load_active = nr_active;
}
return delta;
}
/*
* a1 = a0 * e + a * (1 - e)
*/
static unsigned long
calc_load(unsigned long load, unsigned long exp, unsigned long active)
{
load *= exp;
load += active * (FIXED_1 - exp);
load += 1UL << (FSHIFT - 1);
return load >> FSHIFT;
}
#ifdef CONFIG_NO_HZ
/*
* Handle NO_HZ for the global load-average.
*
* Since the above described distributed algorithm to compute the global
* load-average relies on per-cpu sampling from the tick, it is affected by
* NO_HZ.
*
* The basic idea is to fold the nr_active delta into a global idle-delta upon
* entering NO_HZ state such that we can include this as an 'extra' cpu delta
* when we read the global state.
*
* Obviously reality has to ruin such a delightfully simple scheme:
*
* - When we go NO_HZ idle during the window, we can negate our sample
* contribution, causing under-accounting.
*
* We avoid this by keeping two idle-delta counters and flipping them
* when the window starts, thus separating old and new NO_HZ load.
*
* The only trick is the slight shift in index flip for read vs write.
*
* 0s 5s 10s 15s
* +10 +10 +10 +10
* |-|-----------|-|-----------|-|-----------|-|
* r:0 0 1 1 0 0 1 1 0
* w:0 1 1 0 0 1 1 0 0
*
* This ensures we'll fold the old idle contribution in this window while
* accumlating the new one.
*
* - When we wake up from NO_HZ idle during the window, we push up our
* contribution, since we effectively move our sample point to a known
* busy state.
*
* This is solved by pushing the window forward, and thus skipping the
* sample, for this cpu (effectively using the idle-delta for this cpu which
* was in effect at the time the window opened). This also solves the issue
* of having to deal with a cpu having been in NOHZ idle for multiple
* LOAD_FREQ intervals.
*
* When making the ILB scale, we should try to pull this in as well.
*/
static atomic_long_t calc_load_idle[2];
static int calc_load_idx;
static inline int calc_load_write_idx(void)
{
int idx = calc_load_idx;
/*
* See calc_global_nohz(), if we observe the new index, we also
* need to observe the new update time.
*/
smp_rmb();
/*
* If the folding window started, make sure we start writing in the
* next idle-delta.
*/
if (!time_before(jiffies, calc_load_update))
idx++;
return idx & 1;
}
static inline int calc_load_read_idx(void)
{
return calc_load_idx & 1;
}
void calc_load_enter_idle(void)
{
struct rq *this_rq = this_rq();
long delta;
/*
* We're going into NOHZ mode, if there's any pending delta, fold it
* into the pending idle delta.
*/
delta = calc_load_fold_active(this_rq);
if (delta) {
int idx = calc_load_write_idx();
atomic_long_add(delta, &calc_load_idle[idx]);
}
}
void calc_load_exit_idle(void)
{
struct rq *this_rq = this_rq();
/*
* If we're still before the sample window, we're done.
*/
if (time_before(jiffies, this_rq->calc_load_update))
return;
/*
* We woke inside or after the sample window, this means we're already
* accounted through the nohz accounting, so skip the entire deal and
* sync up for the next window.
*/
this_rq->calc_load_update = calc_load_update;
if (time_before(jiffies, this_rq->calc_load_update + 10))
this_rq->calc_load_update += LOAD_FREQ;
}
static long calc_load_fold_idle(void)
{
int idx = calc_load_read_idx();
long delta = 0;
if (atomic_long_read(&calc_load_idle[idx]))
delta = atomic_long_xchg(&calc_load_idle[idx], 0);
return delta;
}
/**
* fixed_power_int - compute: x^n, in O(log n) time
*
* @x: base of the power
* @frac_bits: fractional bits of @x
* @n: power to raise @x to.
*
* By exploiting the relation between the definition of the natural power
* function: x^n := x*x*...*x (x multiplied by itself for n times), and
* the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
* (where: n_i \elem {0, 1}, the binary vector representing n),
* we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
* of course trivially computable in O(log_2 n), the length of our binary
* vector.
*/
static unsigned long
fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n)
{
unsigned long result = 1UL << frac_bits;
if (n) for (;;) {
if (n & 1) {
result *= x;
result += 1UL << (frac_bits - 1);
result >>= frac_bits;
}
n >>= 1;
if (!n)
break;
x *= x;
x += 1UL << (frac_bits - 1);
x >>= frac_bits;
}
return result;
}
/*
* a1 = a0 * e + a * (1 - e)
*
* a2 = a1 * e + a * (1 - e)
* = (a0 * e + a * (1 - e)) * e + a * (1 - e)
* = a0 * e^2 + a * (1 - e) * (1 + e)
*
* a3 = a2 * e + a * (1 - e)
* = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e)
* = a0 * e^3 + a * (1 - e) * (1 + e + e^2)
*
* ...
*
* an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1]
* = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e)
* = a0 * e^n + a * (1 - e^n)
*
* [1] application of the geometric series:
*
* n 1 - x^(n+1)
* S_n := \Sum x^i = -------------
* i=0 1 - x
*/
static unsigned long
calc_load_n(unsigned long load, unsigned long exp,
unsigned long active, unsigned int n)
{
return calc_load(load, fixed_power_int(exp, FSHIFT, n), active);
}
/*
* NO_HZ can leave us missing all per-cpu ticks calling
* calc_load_account_active(), but since an idle CPU folds its delta into
* calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold
* in the pending idle delta if our idle period crossed a load cycle boundary.
*
* Once we've updated the global active value, we need to apply the exponential
* weights adjusted to the number of cycles missed.
*/
static void calc_global_nohz(void)
{
long delta, active, n;
if (!time_before(jiffies, calc_load_update + 10)) {
/*
* Catch-up, fold however many we are behind still
*/
delta = jiffies - calc_load_update - 10;
n = 1 + (delta / LOAD_FREQ);
active = atomic_long_read(&calc_load_tasks);
active = active > 0 ? active * FIXED_1 : 0;
avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
calc_load_update += n * LOAD_FREQ;
}
/*
* Flip the idle index...
*
* Make sure we first write the new time then flip the index, so that
* calc_load_write_idx() will see the new time when it reads the new
* index, this avoids a double flip messing things up.
*/
smp_wmb();
calc_load_idx++;
}
#else /* !CONFIG_NO_HZ */
static inline long calc_load_fold_idle(void) { return 0; }
static inline void calc_global_nohz(void) { }
#endif /* CONFIG_NO_HZ */
/*
* calc_load - update the avenrun load estimates 10 ticks after the
* CPUs have updated calc_load_tasks.
*/
void calc_global_load(unsigned long ticks)
{
long active, delta;
if (time_before(jiffies, calc_load_update + 10))
return;
/*
* Fold the 'old' idle-delta to include all NO_HZ cpus.
*/
delta = calc_load_fold_idle();
if (delta)
atomic_long_add(delta, &calc_load_tasks);
active = atomic_long_read(&calc_load_tasks);
active = active > 0 ? active * FIXED_1 : 0;
avenrun[0] = calc_load(avenrun[0], EXP_1, active);
avenrun[1] = calc_load(avenrun[1], EXP_5, active);
avenrun[2] = calc_load(avenrun[2], EXP_15, active);
calc_load_update += LOAD_FREQ;
/*
* In case we idled for multiple LOAD_FREQ intervals, catch up in bulk.
*/
calc_global_nohz();
}
/*
* Called from update_cpu_load() to periodically update this CPU's
* active count.
*/
static void calc_load_account_active(struct rq *this_rq)
{
long delta;
if (time_before(jiffies, this_rq->calc_load_update))
return;
delta = calc_load_fold_active(this_rq);
if (delta)
atomic_long_add(delta, &calc_load_tasks);
this_rq->calc_load_update += LOAD_FREQ;
}
/*
* End of global load-average stuff
*/
/*
* The exact cpuload at various idx values, calculated at every tick would be
* load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
*
* If a cpu misses updates for n-1 ticks (as it was idle) and update gets called
* on nth tick when cpu may be busy, then we have:
* load = ((2^idx - 1) / 2^idx)^(n-1) * load
* load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load
*
* decay_load_missed() below does efficient calculation of
* load = ((2^idx - 1) / 2^idx)^(n-1) * load
* avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load
*
* The calculation is approximated on a 128 point scale.
* degrade_zero_ticks is the number of ticks after which load at any
* particular idx is approximated to be zero.
* degrade_factor is a precomputed table, a row for each load idx.
* Each column corresponds to degradation factor for a power of two ticks,
* based on 128 point scale.
* Example:
* row 2, col 3 (=12) says that the degradation at load idx 2 after
* 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8).
*
* With this power of 2 load factors, we can degrade the load n times
* by looking at 1 bits in n and doing as many mult/shift instead of
* n mult/shifts needed by the exact degradation.
*/
#define DEGRADE_SHIFT 7
static const unsigned char
degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
static const unsigned char
degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
{0, 0, 0, 0, 0, 0, 0, 0},
{64, 32, 8, 0, 0, 0, 0, 0},
{96, 72, 40, 12, 1, 0, 0},
{112, 98, 75, 43, 15, 1, 0},
{120, 112, 98, 76, 45, 16, 2} };
/*
* Update cpu_load for any missed ticks, due to tickless idle. The backlog
* would be when CPU is idle and so we just decay the old load without
* adding any new load.
*/
static unsigned long
decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
{
int j = 0;
if (!missed_updates)
return load;
if (missed_updates >= degrade_zero_ticks[idx])
return 0;
if (idx == 1)
return load >> missed_updates;
while (missed_updates) {
if (missed_updates % 2)
load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
missed_updates >>= 1;
j++;
}
return load;
}
/*
* Update rq->cpu_load[] statistics. This function is usually called every
* scheduler tick (TICK_NSEC). With tickless idle this will not be called
* every tick. We fix it up based on jiffies.
*/
static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
unsigned long pending_updates)
{
int i, scale;
this_rq->nr_load_updates++;
/* Update our load: */
this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
unsigned long old_load, new_load;
/* scale is effectively 1 << i now, and >> i divides by scale */
old_load = this_rq->cpu_load[i];
old_load = decay_load_missed(old_load, pending_updates - 1, i);
new_load = this_load;
/*
* Round up the averaging division if load is increasing. This
* prevents us from getting stuck on 9 if the load is 10, for
* example.
*/
if (new_load > old_load)
new_load += scale - 1;
this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
}
sched_avg_update(this_rq);
}
#ifdef CONFIG_NO_HZ
/*
* There is no sane way to deal with nohz on smp when using jiffies because the
* cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading
* causing off-by-one errors in observed deltas; {0,2} instead of {1,1}.
*
* Therefore we cannot use the delta approach from the regular tick since that
* would seriously skew the load calculation. However we'll make do for those
* updates happening while idle (nohz_idle_balance) or coming out of idle
* (tick_nohz_idle_exit).
*
* This means we might still be one tick off for nohz periods.
*/
/*
* Called from nohz_idle_balance() to update the load ratings before doing the
* idle balance.
*/
void update_idle_cpu_load(struct rq *this_rq)
{
unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
unsigned long load = this_rq->load.weight;
unsigned long pending_updates;
/*
* bail if there's load or we're actually up-to-date.
*/
if (load || curr_jiffies == this_rq->last_load_update_tick)
return;
pending_updates = curr_jiffies - this_rq->last_load_update_tick;
this_rq->last_load_update_tick = curr_jiffies;
__update_cpu_load(this_rq, load, pending_updates);
}
/*
* Called from tick_nohz_idle_exit() -- try and fix up the ticks we missed.
*/
void update_cpu_load_nohz(void)
{
struct rq *this_rq = this_rq();
unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
unsigned long pending_updates;
if (curr_jiffies == this_rq->last_load_update_tick)
return;
raw_spin_lock(&this_rq->lock);
pending_updates = curr_jiffies - this_rq->last_load_update_tick;
if (pending_updates) {
this_rq->last_load_update_tick = curr_jiffies;
/*
* We were idle, this means load 0, the current load might be
* !0 due to remote wakeups and the sort.
*/
__update_cpu_load(this_rq, 0, pending_updates);
}
raw_spin_unlock(&this_rq->lock);
}
#endif /* CONFIG_NO_HZ */
/*
* Called from scheduler_tick()
*/
static void update_cpu_load_active(struct rq *this_rq)
{
/*
* See the mess around update_idle_cpu_load() / update_cpu_load_nohz().
*/
this_rq->last_load_update_tick = jiffies;
__update_cpu_load(this_rq, this_rq->load.weight, 1);
calc_load_account_active(this_rq);
}
#ifdef CONFIG_SMP
/*
* sched_exec - execve() is a valuable balancing opportunity, because at
* this point the task has the smallest effective memory and cache footprint.
*/
void sched_exec(void)
{
struct task_struct *p = current;
unsigned long flags;
int dest_cpu;
raw_spin_lock_irqsave(&p->pi_lock, flags);
dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0);
if (dest_cpu == smp_processor_id())
goto unlock;
if (likely(cpu_active(dest_cpu))) {
struct migration_arg arg = { p, dest_cpu };
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
return;
}
unlock:
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
}
#endif
DEFINE_PER_CPU(struct kernel_stat, kstat);
DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
EXPORT_PER_CPU_SYMBOL(kstat);
EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
/*
* Return any ns on the sched_clock that have not yet been accounted in
* @p in case that task is currently running.
*
* Called with task_rq_lock() held on @rq.
*/
static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
{
u64 ns = 0;
if (task_current(rq, p)) {
update_rq_clock(rq);
ns = rq->clock_task - p->se.exec_start;
if ((s64)ns < 0)
ns = 0;
}
return ns;
}
unsigned long long task_delta_exec(struct task_struct *p)
{
unsigned long flags;
struct rq *rq;
u64 ns = 0;
rq = task_rq_lock(p, &flags);
ns = do_task_delta_exec(p, rq);
task_rq_unlock(rq, p, &flags);
return ns;
}
/*
* Return accounted runtime for the task.
* In case the task is currently running, return the runtime plus current's
* pending runtime that have not been accounted yet.
*/
unsigned long long task_sched_runtime(struct task_struct *p)
{
unsigned long flags;
struct rq *rq;
u64 ns = 0;
rq = task_rq_lock(p, &flags);
ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
task_rq_unlock(rq, p, &flags);
return ns;
}
#ifdef CONFIG_CGROUP_CPUACCT
struct cgroup_subsys cpuacct_subsys;
struct cpuacct root_cpuacct;
#endif
static inline void task_group_account_field(struct task_struct *p, int index,
u64 tmp)
{
#ifdef CONFIG_CGROUP_CPUACCT
struct kernel_cpustat *kcpustat;
struct cpuacct *ca;
#endif
/*
* Since all updates are sure to touch the root cgroup, we
* get ourselves ahead and touch it first. If the root cgroup
* is the only cgroup, then nothing else should be necessary.
*
*/
__get_cpu_var(kernel_cpustat).cpustat[index] += tmp;
#ifdef CONFIG_CGROUP_CPUACCT
if (unlikely(!cpuacct_subsys.active))
return;
rcu_read_lock();
ca = task_ca(p);
while (ca && (ca != &root_cpuacct)) {
kcpustat = this_cpu_ptr(ca->cpustat);
kcpustat->cpustat[index] += tmp;
ca = parent_ca(ca);
}
rcu_read_unlock();
#endif
}
/*
* Account user cpu time to a process.
* @p: the process that the cpu time gets accounted to
* @cputime: the cpu time spent in user space since the last update
* @cputime_scaled: cputime scaled by cpu frequency
*/
void account_user_time(struct task_struct *p, cputime_t cputime,
cputime_t cputime_scaled)
{
int index;
/* Add user time to process. */
p->utime += cputime;
p->utimescaled += cputime_scaled;
account_group_user_time(p, cputime);
index = (TASK_NICE(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
/* Add user time to cpustat. */
task_group_account_field(p, index, (__force u64) cputime);
/* Account for user time used */
acct_update_integrals(p);
}
/*
* Account guest cpu time to a process.
* @p: the process that the cpu time gets accounted to
* @cputime: the cpu time spent in virtual machine since the last update
* @cputime_scaled: cputime scaled by cpu frequency
*/
static void account_guest_time(struct task_struct *p, cputime_t cputime,
cputime_t cputime_scaled)
{
u64 *cpustat = kcpustat_this_cpu->cpustat;
/* Add guest time to process. */
p->utime += cputime;
p->utimescaled += cputime_scaled;
account_group_user_time(p, cputime);
p->gtime += cputime;
/* Add guest time to cpustat. */
if (TASK_NICE(p) > 0) {
cpustat[CPUTIME_NICE] += (__force u64) cputime;
cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime;
} else {
cpustat[CPUTIME_USER] += (__force u64) cputime;
cpustat[CPUTIME_GUEST] += (__force u64) cputime;
}
}
/*
* Account system cpu time to a process and desired cpustat field
* @p: the process that the cpu time gets accounted to
* @cputime: the cpu time spent in kernel space since the last update
* @cputime_scaled: cputime scaled by cpu frequency
* @target_cputime64: pointer to cpustat field that has to be updated
*/
static inline
void __account_system_time(struct task_struct *p, cputime_t cputime,
cputime_t cputime_scaled, int index)
{
/* Add system time to process. */
p->stime += cputime;
p->stimescaled += cputime_scaled;
account_group_system_time(p, cputime);
/* Add system time to cpustat. */
task_group_account_field(p, index, (__force u64) cputime);
/* Account for system time used */
acct_update_integrals(p);
}
/*
* Account system cpu time to a process.
* @p: the process that the cpu time gets accounted to
* @hardirq_offset: the offset to subtract from hardirq_count()
* @cputime: the cpu time spent in kernel space since the last update
* @cputime_scaled: cputime scaled by cpu frequency
*/
void account_system_time(struct task_struct *p, int hardirq_offset,
cputime_t cputime, cputime_t cputime_scaled)
{
int index;
if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
account_guest_time(p, cputime, cputime_scaled);
return;
}
if (hardirq_count() - hardirq_offset)
index = CPUTIME_IRQ;
else if (in_serving_softirq())
index = CPUTIME_SOFTIRQ;
else
index = CPUTIME_SYSTEM;
__account_system_time(p, cputime, cputime_scaled, index);
}
/*
* Account for involuntary wait time.
* @cputime: the cpu time spent in involuntary wait
*/
void account_steal_time(cputime_t cputime)
{
u64 *cpustat = kcpustat_this_cpu->cpustat;
cpustat[CPUTIME_STEAL] += (__force u64) cputime;
}
/*
* Account for idle time.
* @cputime: the cpu time spent in idle wait
*/
void account_idle_time(cputime_t cputime)
{
u64 *cpustat = kcpustat_this_cpu->cpustat;
struct rq *rq = this_rq();
if (atomic_read(&rq->nr_iowait) > 0)
cpustat[CPUTIME_IOWAIT] += (__force u64) cputime;
else
cpustat[CPUTIME_IDLE] += (__force u64) cputime;
}
static __always_inline bool steal_account_process_tick(void)
{
#ifdef CONFIG_PARAVIRT
if (static_key_false(¶virt_steal_enabled)) {
u64 steal, st = 0;
steal = paravirt_steal_clock(smp_processor_id());
steal -= this_rq()->prev_steal_time;
st = steal_ticks(steal);
this_rq()->prev_steal_time += st * TICK_NSEC;
account_steal_time(st);
return st;
}
#endif
return false;
}
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
/*
* Account a tick to a process and cpustat
* @p: the process that the cpu time gets accounted to
* @user_tick: is the tick from userspace
* @rq: the pointer to rq
*
* Tick demultiplexing follows the order
* - pending hardirq update
* - pending softirq update
* - user_time
* - idle_time
* - system time
* - check for guest_time
* - else account as system_time
*
* Check for hardirq is done both for system and user time as there is
* no timer going off while we are on hardirq and hence we may never get an
* opportunity to update it solely in system time.
* p->stime and friends are only updated on system time and not on irq
* softirq as those do not count in task exec_runtime any more.
*/
static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
struct rq *rq)
{
cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
u64 *cpustat = kcpustat_this_cpu->cpustat;
if (steal_account_process_tick())
return;
if (irqtime_account_hi_update()) {
cpustat[CPUTIME_IRQ] += (__force u64) cputime_one_jiffy;
} else if (irqtime_account_si_update()) {
cpustat[CPUTIME_SOFTIRQ] += (__force u64) cputime_one_jiffy;
} else if (this_cpu_ksoftirqd() == p) {
/*
* ksoftirqd time do not get accounted in cpu_softirq_time.
* So, we have to handle it separately here.
* Also, p->stime needs to be updated for ksoftirqd.
*/
__account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
CPUTIME_SOFTIRQ);
} else if (user_tick) {
account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
} else if (p == rq->idle) {
account_idle_time(cputime_one_jiffy);
} else if (p->flags & PF_VCPU) { /* System time or guest time */
account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
} else {
__account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
CPUTIME_SYSTEM);
}
}
static void irqtime_account_idle_ticks(int ticks)
{
int i;
struct rq *rq = this_rq();
for (i = 0; i < ticks; i++)
irqtime_account_process_tick(current, 0, rq);
}
#else /* CONFIG_IRQ_TIME_ACCOUNTING */
static void irqtime_account_idle_ticks(int ticks) {}
static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
struct rq *rq) {}
#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
/*
* Account a single tick of cpu time.
* @p: the process that the cpu time gets accounted to
* @user_tick: indicates if the tick is a user or a system tick
*/
void account_process_tick(struct task_struct *p, int user_tick)
{
cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
struct rq *rq = this_rq();
if (sched_clock_irqtime) {
irqtime_account_process_tick(p, user_tick, rq);
return;
}
if (steal_account_process_tick())
return;
if (user_tick)
account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
one_jiffy_scaled);
else
account_idle_time(cputime_one_jiffy);
}
/*
* Account multiple ticks of steal time.
* @p: the process from which the cpu time has been stolen
* @ticks: number of stolen ticks
*/
void account_steal_ticks(unsigned long ticks)
{
account_steal_time(jiffies_to_cputime(ticks));
}
/*
* Account multiple ticks of idle time.
* @ticks: number of stolen ticks
*/
void account_idle_ticks(unsigned long ticks)
{
if (sched_clock_irqtime) {
irqtime_account_idle_ticks(ticks);
return;
}
account_idle_time(jiffies_to_cputime(ticks));
}
#endif
/*
* Use precise platform statistics if available:
*/
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
{
*ut = p->utime;
*st = p->stime;
}
void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
{
struct task_cputime cputime;
thread_group_cputime(p, &cputime);
*ut = cputime.utime;
*st = cputime.stime;
}
#else
#ifndef nsecs_to_cputime
# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
#endif
void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
{
cputime_t rtime, utime = p->utime, total = utime + p->stime;
/*
* Use CFS's precise accounting:
*/
rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
if (total) {
u64 temp = (__force u64) rtime;
temp *= (__force u64) utime;
do_div(temp, (__force u32) total);
utime = (__force cputime_t) temp;
} else
utime = rtime;
/*
* Compare with previous values, to keep monotonicity:
*/
p->prev_utime = max(p->prev_utime, utime);
p->prev_stime = max(p->prev_stime, rtime - p->prev_utime);
*ut = p->prev_utime;
*st = p->prev_stime;
}
/*
* Must be called with siglock held.
*/
void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
{
struct signal_struct *sig = p->signal;
struct task_cputime cputime;
cputime_t rtime, utime, total;
thread_group_cputime(p, &cputime);
total = cputime.utime + cputime.stime;
rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
if (total) {
u64 temp = (__force u64) rtime;
temp *= (__force u64) cputime.utime;
do_div(temp, (__force u32) total);
utime = (__force cputime_t) temp;
} else
utime = rtime;
sig->prev_utime = max(sig->prev_utime, utime);
sig->prev_stime = max(sig->prev_stime, rtime - sig->prev_utime);
*ut = sig->prev_utime;
*st = sig->prev_stime;
}
#endif
/*
* This function gets called by the timer code, with HZ frequency.
* We call it with interrupts disabled.
*/
void scheduler_tick(void)
{
int cpu = smp_processor_id();
struct rq *rq = cpu_rq(cpu);
struct task_struct *curr = rq->curr;
sched_clock_tick();
raw_spin_lock(&rq->lock);
update_rq_clock(rq);
update_cpu_load_active(rq);
curr->sched_class->task_tick(rq, curr, 0);
raw_spin_unlock(&rq->lock);
perf_event_task_tick();
#ifdef CONFIG_SMP
rq->idle_balance = idle_cpu(cpu);
trigger_load_balance(rq, cpu);
#endif
}
notrace unsigned long get_parent_ip(unsigned long addr)
{
if (in_lock_functions(addr)) {
addr = CALLER_ADDR2;
if (in_lock_functions(addr))
addr = CALLER_ADDR3;
}
return addr;
}
#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
defined(CONFIG_PREEMPT_TRACER))
void __kprobes add_preempt_count(int val)
{
#ifdef CONFIG_DEBUG_PREEMPT
/*
* Underflow?
*/
if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
return;
#endif
preempt_count() += val;
#ifdef CONFIG_DEBUG_PREEMPT
/*
* Spinlock count overflowing soon?
*/
DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
PREEMPT_MASK - 10);
#endif
if (preempt_count() == val)
trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
}
EXPORT_SYMBOL(add_preempt_count);
void __kprobes sub_preempt_count(int val)
{
#ifdef CONFIG_DEBUG_PREEMPT
/*
* Underflow?
*/
if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
return;
/*
* Is the spinlock portion underflowing?
*/
if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
!(preempt_count() & PREEMPT_MASK)))
return;
#endif
if (preempt_count() == val)
trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
preempt_count() -= val;
}
EXPORT_SYMBOL(sub_preempt_count);
#endif
/*
* Print scheduling while atomic bug:
*/
static noinline void __schedule_bug(struct task_struct *prev)
{
if (oops_in_progress)
return;
printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
prev->comm, prev->pid, preempt_count());
debug_show_held_locks(prev);
print_modules();
if (irqs_disabled())
print_irqtrace_events(prev);
dump_stack();
}
/*
* Various schedule()-time debugging checks and statistics:
*/
static inline void schedule_debug(struct task_struct *prev)
{
/*
* Test if we are atomic. Since do_exit() needs to call into
* schedule() atomically, we ignore that path for now.
* Otherwise, whine if we are scheduling when we should not be.
*/
if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
__schedule_bug(prev);
rcu_sleep_check();
profile_hit(SCHED_PROFILING, __builtin_return_address(0));
schedstat_inc(this_rq(), sched_count);
}
static void put_prev_task(struct rq *rq, struct task_struct *prev)
{
update_task_ravg(prev, rq, 1);
if (prev->on_rq || rq->skip_clock_update < 0)
update_rq_clock(rq);
prev->sched_class->put_prev_task(rq, prev);
}
/*
* Pick up the highest-prio task:
*/
static inline struct task_struct *
pick_next_task(struct rq *rq)
{
const struct sched_class *class;
struct task_struct *p;
/*
* Optimization: we know that if all tasks are in
* the fair class we can call that function directly:
*/
if (likely(rq->nr_running == rq->cfs.h_nr_running)) {
p = fair_sched_class.pick_next_task(rq);
if (likely(p)) {
update_task_ravg(p, rq, 1);
return p;
}
}
for_each_class(class) {
p = class->pick_next_task(rq);
if (p) {
update_task_ravg(p, rq, 1);
return p;
}
}
BUG(); /* the idle class will always have a runnable task */
}
/*
* __schedule() is the main scheduler function.
*/
static void __sched __schedule(void)
{
struct task_struct *prev, *next;
unsigned long *switch_count;
struct rq *rq;
int cpu;
need_resched:
preempt_disable();
cpu = smp_processor_id();
rq = cpu_rq(cpu);
rcu_note_context_switch(cpu);
prev = rq->curr;
schedule_debug(prev);
if (sched_feat(HRTICK))
hrtick_clear(rq);
/*
* Make sure that signal_pending_state()->signal_pending() below
* can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
* done by the caller to avoid the race with signal_wake_up().
*/
smp_mb__before_spinlock();
raw_spin_lock_irq(&rq->lock);
switch_count = &prev->nivcsw;
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
if (unlikely(signal_pending_state(prev->state, prev))) {
prev->state = TASK_RUNNING;
} else {
deactivate_task(rq, prev, DEQUEUE_SLEEP);
prev->on_rq = 0;
/*
* If a worker went to sleep, notify and ask workqueue
* whether it wants to wake up a task to maintain
* concurrency.
*/
if (prev->flags & PF_WQ_WORKER) {
struct task_struct *to_wakeup;
to_wakeup = wq_worker_sleeping(prev, cpu);
if (to_wakeup)
try_to_wake_up_local(to_wakeup);
}
}
switch_count = &prev->nvcsw;
}
pre_schedule(rq, prev);
if (unlikely(!rq->nr_running))
idle_balance(cpu, rq);
put_prev_task(rq, prev);
next = pick_next_task(rq);
clear_tsk_need_resched(prev);
rq->skip_clock_update = 0;
if (likely(prev != next)) {
rq->nr_switches++;
rq->curr = next;
++*switch_count;
context_switch(rq, prev, next); /* unlocks the rq */
/*
* The context switch have flipped the stack from under us
* and restored the local variables which were saved when
* this task called schedule() in the past. prev == current
* is still correct, but it can be moved to another cpu/rq.
*/
cpu = smp_processor_id();
rq = cpu_rq(cpu);
} else
raw_spin_unlock_irq(&rq->lock);
post_schedule(rq);
sched_preempt_enable_no_resched();
if (need_resched())
goto need_resched;
}
static inline void sched_submit_work(struct task_struct *tsk)
{
if (!tsk->state || tsk_is_pi_blocked(tsk))
return;
/*
* If we are going to sleep and we have plugged IO queued,
* make sure to submit it to avoid deadlocks.
*/
if (blk_needs_flush_plug(tsk))
blk_schedule_flush_plug(tsk);
}
asmlinkage void __sched schedule(void)
{
struct task_struct *tsk = current;
sched_submit_work(tsk);
__schedule();
}
EXPORT_SYMBOL(schedule);
/**
* schedule_preempt_disabled - called with preemption disabled
*
* Returns with preemption disabled. Note: preempt_count must be 1
*/
void __sched schedule_preempt_disabled(void)
{
sched_preempt_enable_no_resched();
schedule();
preempt_disable();
}
#ifdef CONFIG_PREEMPT
/*
* this is the entry point to schedule() from in-kernel preemption
* off of preempt_enable. Kernel preemptions off return from interrupt
* occur there and call schedule directly.
*/
asmlinkage void __sched notrace preempt_schedule(void)
{
struct thread_info *ti = current_thread_info();
/*
* If there is a non-zero preempt_count or interrupts are disabled,
* we do not want to preempt the current task. Just return..
*/
if (likely(ti->preempt_count || irqs_disabled()))
return;
do {
add_preempt_count_notrace(PREEMPT_ACTIVE);
__schedule();
sub_preempt_count_notrace(PREEMPT_ACTIVE);
/*
* Check again in case we missed a preemption opportunity
* between schedule and now.
*/
barrier();
} while (need_resched());
}
EXPORT_SYMBOL(preempt_schedule);
/*
* this is the entry point to schedule() from kernel preemption
* off of irq context.
* Note, that this is called and return with irqs disabled. This will
* protect us against recursive calling from irq.
*/
asmlinkage void __sched preempt_schedule_irq(void)
{
struct thread_info *ti = current_thread_info();
/* Catch callers which need to be fixed */
BUG_ON(ti->preempt_count || !irqs_disabled());
do {
add_preempt_count(PREEMPT_ACTIVE);
local_irq_enable();
__schedule();
local_irq_disable();
sub_preempt_count(PREEMPT_ACTIVE);
/*
* Check again in case we missed a preemption opportunity
* between schedule and now.
*/
barrier();
} while (need_resched());
}
#endif /* CONFIG_PREEMPT */
int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
void *key)
{
return try_to_wake_up(curr->private, mode, wake_flags);
}
EXPORT_SYMBOL(default_wake_function);
/*
* The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
* wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
* number) then we wake all the non-exclusive tasks and one exclusive task.
*
* There are circumstances in which we can try to wake a task which has already
* started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
* zero in this (rare) case, and we handle it by continuing to scan the queue.
*/
static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
int nr_exclusive, int wake_flags, void *key)
{
wait_queue_t *curr, *next;
list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
unsigned flags = curr->flags;
if (curr->func(curr, mode, wake_flags, key) &&
(flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
break;
}
}
/**
* __wake_up - wake up threads blocked on a waitqueue.
* @q: the waitqueue
* @mode: which threads
* @nr_exclusive: how many wake-one or wake-many threads to wake up
* @key: is directly passed to the wakeup function
*
* It may be assumed that this function implies a write memory barrier before
* changing the task state if and only if any tasks are woken up.
*/
void __wake_up(wait_queue_head_t *q, unsigned int mode,
int nr_exclusive, void *key)
{
unsigned long flags;
spin_lock_irqsave(&q->lock, flags);
__wake_up_common(q, mode, nr_exclusive, 0, key);
spin_unlock_irqrestore(&q->lock, flags);
}
EXPORT_SYMBOL(__wake_up);
/*
* Same as __wake_up but called with the spinlock in wait_queue_head_t held.
*/
void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr)
{
__wake_up_common(q, mode, nr, 0, NULL);
}
EXPORT_SYMBOL_GPL(__wake_up_locked);
void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
{
__wake_up_common(q, mode, 1, 0, key);
}
EXPORT_SYMBOL_GPL(__wake_up_locked_key);
/**
* __wake_up_sync_key - wake up threads blocked on a waitqueue.
* @q: the waitqueue
* @mode: which threads
* @nr_exclusive: how many wake-one or wake-many threads to wake up
* @key: opaque value to be passed to wakeup targets
*
* The sync wakeup differs that the waker knows that it will schedule
* away soon, so while the target thread will be woken up, it will not
* be migrated to another CPU - ie. the two threads are 'synchronized'
* with each other. This can prevent needless bouncing between CPUs.
*
* On UP it can prevent extra preemption.
*
* It may be assumed that this function implies a write memory barrier before
* changing the task state if and only if any tasks are woken up.
*/
void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
int nr_exclusive, void *key)
{
unsigned long flags;
int wake_flags = WF_SYNC;
if (unlikely(!q))
return;
if (unlikely(!nr_exclusive))
wake_flags = 0;
spin_lock_irqsave(&q->lock, flags);
__wake_up_common(q, mode, nr_exclusive, wake_flags, key);
spin_unlock_irqrestore(&q->lock, flags);
}
EXPORT_SYMBOL_GPL(__wake_up_sync_key);
/*
* __wake_up_sync - see __wake_up_sync_key()
*/
void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
{
__wake_up_sync_key(q, mode, nr_exclusive, NULL);
}
EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
/**
* complete: - signals a single thread waiting on this completion
* @x: holds the state of this particular completion
*
* This will wake up a single thread waiting on this completion. Threads will be
* awakened in the same order in which they were queued.
*
* See also complete_all(), wait_for_completion() and related routines.
*
* It may be assumed that this function implies a write memory barrier before
* changing the task state if and only if any tasks are woken up.
*/
void complete(struct completion *x)
{
unsigned long flags;
spin_lock_irqsave(&x->wait.lock, flags);
x->done++;
__wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
spin_unlock_irqrestore(&x->wait.lock, flags);
}
EXPORT_SYMBOL(complete);
/**
* complete_all: - signals all threads waiting on this completion
* @x: holds the state of this particular completion
*
* This will wake up all threads waiting on this particular completion event.
*
* It may be assumed that this function implies a write memory barrier before
* changing the task state if and only if any tasks are woken up.
*/
void complete_all(struct completion *x)
{
unsigned long flags;
spin_lock_irqsave(&x->wait.lock, flags);
x->done += UINT_MAX/2;
__wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
spin_unlock_irqrestore(&x->wait.lock, flags);
}
EXPORT_SYMBOL(complete_all);
static inline long __sched
do_wait_for_common(struct completion *x, long timeout, int state, int iowait)
{
if (!x->done) {
DECLARE_WAITQUEUE(wait, current);
__add_wait_queue_tail_exclusive(&x->wait, &wait);
do {
if (signal_pending_state(state, current)) {
timeout = -ERESTARTSYS;
break;
}
__set_current_state(state);
spin_unlock_irq(&x->wait.lock);
if (iowait)
timeout = io_schedule_timeout(timeout);
else
timeout = schedule_timeout(timeout);
spin_lock_irq(&x->wait.lock);
} while (!x->done && timeout);
__remove_wait_queue(&x->wait, &wait);
if (!x->done)
return timeout;
}
x->done--;
return timeout ?: 1;
}
static long __sched
wait_for_common(struct completion *x, long timeout, int state, int iowait)
{
might_sleep();
spin_lock_irq(&x->wait.lock);
timeout = do_wait_for_common(x, timeout, state, iowait);
spin_unlock_irq(&x->wait.lock);
return timeout;
}
/**
* wait_for_completion: - waits for completion of a task
* @x: holds the state of this particular completion
*
* This waits to be signaled for completion of a specific task. It is NOT
* interruptible and there is no timeout.
*
* See also similar routines (i.e. wait_for_completion_timeout()) with timeout
* and interrupt capability. Also see complete().
*/
void __sched wait_for_completion(struct completion *x)
{
wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE, 0);
}
EXPORT_SYMBOL(wait_for_completion);
/**
* wait_for_completion_io: - waits for completion of a task
* @x: holds the state of this particular completion
*
* This waits for completion of a specific task to be signaled. Treats any
* sleeping as waiting for IO for the purposes of process accounting.
*/
void __sched wait_for_completion_io(struct completion *x)
{
wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE, 1);
}
EXPORT_SYMBOL(wait_for_completion_io);
/**
* wait_for_completion_timeout: - waits for completion of a task (w/timeout)
* @x: holds the state of this particular completion
* @timeout: timeout value in jiffies
*
* This waits for either a completion of a specific task to be signaled or for a
* specified timeout to expire. The timeout is in jiffies. It is not
* interruptible.
*
* The return value is 0 if timed out, and positive (at least 1, or number of
* jiffies left till timeout) if completed.
*/
unsigned long __sched
wait_for_completion_timeout(struct completion *x, unsigned long timeout)
{
return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE, 0);
}
EXPORT_SYMBOL(wait_for_completion_timeout);
/**
* wait_for_completion_interruptible: - waits for completion of a task (w/intr)
* @x: holds the state of this particular completion
*
* This waits for completion of a specific task to be signaled. It is
* interruptible.
*
* The return value is -ERESTARTSYS if interrupted, 0 if completed.
*/
int __sched wait_for_completion_interruptible(struct completion *x)
{
long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT,
TASK_INTERRUPTIBLE, 0);
if (t == -ERESTARTSYS)
return t;
return 0;
}
EXPORT_SYMBOL(wait_for_completion_interruptible);
/**
* wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
* @x: holds the state of this particular completion
* @timeout: timeout value in jiffies
*
* This waits for either a completion of a specific task to be signaled or for a
* specified timeout to expire. It is interruptible. The timeout is in jiffies.
*
* The return value is -ERESTARTSYS if interrupted, 0 if timed out,
* positive (at least 1, or number of jiffies left till timeout) if completed.
*/
long __sched
wait_for_completion_interruptible_timeout(struct completion *x,
unsigned long timeout)
{
return wait_for_common(x, timeout, TASK_INTERRUPTIBLE, 0);
}
EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
/**
* wait_for_completion_killable: - waits for completion of a task (killable)
* @x: holds the state of this particular completion
*
* This waits to be signaled for completion of a specific task. It can be
* interrupted by a kill signal.
*
* The return value is -ERESTARTSYS if interrupted, 0 if completed.
*/
int __sched wait_for_completion_killable(struct completion *x)
{
long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE, 0);
if (t == -ERESTARTSYS)
return t;
return 0;
}
EXPORT_SYMBOL(wait_for_completion_killable);
/**
* wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
* @x: holds the state of this particular completion
* @timeout: timeout value in jiffies
*
* This waits for either a completion of a specific task to be
* signaled or for a specified timeout to expire. It can be
* interrupted by a kill signal. The timeout is in jiffies.
*
* The return value is -ERESTARTSYS if interrupted, 0 if timed out,
* positive (at least 1, or number of jiffies left till timeout) if completed.
*/
long __sched
wait_for_completion_killable_timeout(struct completion *x,
unsigned long timeout)
{
return wait_for_common(x, timeout, TASK_KILLABLE, 0);
}
EXPORT_SYMBOL(wait_for_completion_killable_timeout);
/**
* try_wait_for_completion - try to decrement a completion without blocking
* @x: completion structure
*
* Returns: 0 if a decrement cannot be done without blocking
* 1 if a decrement succeeded.
*
* If a completion is being used as a counting completion,
* attempt to decrement the counter without blocking. This
* enables us to avoid waiting if the resource the completion
* is protecting is not available.
*/
bool try_wait_for_completion(struct completion *x)
{
unsigned long flags;
int ret = 1;
spin_lock_irqsave(&x->wait.lock, flags);
if (!x->done)
ret = 0;
else
x->done--;
spin_unlock_irqrestore(&x->wait.lock, flags);
return ret;
}
EXPORT_SYMBOL(try_wait_for_completion);
/**
* completion_done - Test to see if a completion has any waiters
* @x: completion structure
*
* Returns: 0 if there are waiters (wait_for_completion() in progress)
* 1 if there are no waiters.
*
*/
bool completion_done(struct completion *x)
{
unsigned long flags;
int ret = 1;
spin_lock_irqsave(&x->wait.lock, flags);
if (!x->done)
ret = 0;
spin_unlock_irqrestore(&x->wait.lock, flags);
return ret;
}
EXPORT_SYMBOL(completion_done);
static long __sched
sleep_on_common(wait_queue_head_t *q, int state, long timeout)
{
unsigned long flags;
wait_queue_t wait;
init_waitqueue_entry(&wait, current);
__set_current_state(state);
spin_lock_irqsave(&q->lock, flags);
__add_wait_queue(q, &wait);
spin_unlock(&q->lock);
timeout = schedule_timeout(timeout);
spin_lock_irq(&q->lock);
__remove_wait_queue(q, &wait);
spin_unlock_irqrestore(&q->lock, flags);
return timeout;
}
void __sched interruptible_sleep_on(wait_queue_head_t *q)
{
sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
}
EXPORT_SYMBOL(interruptible_sleep_on);
long __sched
interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
{
return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
}
EXPORT_SYMBOL(interruptible_sleep_on_timeout);
void __sched sleep_on(wait_queue_head_t *q)
{
sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
}
EXPORT_SYMBOL(sleep_on);
long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
{
return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
}
EXPORT_SYMBOL(sleep_on_timeout);
#ifdef CONFIG_RT_MUTEXES
/*
* rt_mutex_setprio - set the current priority of a task
* @p: task
* @prio: prio value (kernel-internal form)
*
* This function changes the 'effective' priority of a task. It does
* not touch ->normal_prio like __setscheduler().
*
* Used by the rt_mutex code to implement priority inheritance logic.
*/
void rt_mutex_setprio(struct task_struct *p, int prio)
{
int oldprio, on_rq, running;
struct rq *rq;
const struct sched_class *prev_class;
BUG_ON(prio < 0 || prio > MAX_PRIO);
rq = __task_rq_lock(p);
/*
* Idle task boosting is a nono in general. There is one
* exception, when PREEMPT_RT and NOHZ is active:
*
* The idle task calls get_next_timer_interrupt() and holds
* the timer wheel base->lock on the CPU and another CPU wants
* to access the timer (probably to cancel it). We can safely
* ignore the boosting request, as the idle CPU runs this code
* with interrupts disabled and will complete the lock
* protected section without being interrupted. So there is no
* real need to boost.
*/
if (unlikely(p == rq->idle)) {
WARN_ON(p != rq->curr);
WARN_ON(p->pi_blocked_on);
goto out_unlock;
}
trace_sched_pi_setprio(p, prio);
oldprio = p->prio;
prev_class = p->sched_class;
on_rq = p->on_rq;
running = task_current(rq, p);
if (on_rq)
dequeue_task(rq, p, 0);
if (running)
p->sched_class->put_prev_task(rq, p);
if (rt_prio(prio))
p->sched_class = &rt_sched_class;
else
p->sched_class = &fair_sched_class;
p->prio = prio;
if (running)
p->sched_class->set_curr_task(rq);
if (on_rq)
enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
check_class_changed(rq, p, prev_class, oldprio);
out_unlock:
__task_rq_unlock(rq);
}
#endif
void set_user_nice(struct task_struct *p, long nice)
{
int old_prio, delta, on_rq;
unsigned long flags;
struct rq *rq;
if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
return;
/*
* We have to be careful, if called from sys_setpriority(),
* the task might be in the middle of scheduling on another CPU.
*/
rq = task_rq_lock(p, &flags);
/*
* The RT priorities are set via sched_setscheduler(), but we still
* allow the 'normal' nice value to be set - but as expected
* it wont have any effect on scheduling until the task is
* SCHED_FIFO/SCHED_RR:
*/
if (task_has_rt_policy(p)) {
p->static_prio = NICE_TO_PRIO(nice);
goto out_unlock;
}
on_rq = p->on_rq;
if (on_rq)
dequeue_task(rq, p, 0);
p->static_prio = NICE_TO_PRIO(nice);
set_load_weight(p);
old_prio = p->prio;
p->prio = effective_prio(p);
delta = p->prio - old_prio;
if (on_rq) {
enqueue_task(rq, p, 0);
/*
* If the task increased its priority or is running and
* lowered its priority, then reschedule its CPU:
*/
if (delta < 0 || (delta > 0 && task_running(rq, p)))
resched_task(rq->curr);
}
out_unlock:
task_rq_unlock(rq, p, &flags);
}
EXPORT_SYMBOL(set_user_nice);
/*
* can_nice - check if a task can reduce its nice value
* @p: task
* @nice: nice value
*/
int can_nice(const struct task_struct *p, const int nice)
{
/* convert nice value [19,-20] to rlimit style value [1,40] */
int nice_rlim = 20 - nice;
return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
capable(CAP_SYS_NICE));
}
#ifdef __ARCH_WANT_SYS_NICE
/*
* sys_nice - change the priority of the current process.
* @increment: priority increment
*
* sys_setpriority is a more generic, but much slower function that
* does similar things.
*/
SYSCALL_DEFINE1(nice, int, increment)
{
long nice, retval;
/*
* Setpriority might change our priority at the same moment.
* We don't have to worry. Conceptually one call occurs first
* and we have a single winner.
*/
if (increment < -40)
increment = -40;
if (increment > 40)
increment = 40;
nice = TASK_NICE(current) + increment;
if (nice < -20)
nice = -20;
if (nice > 19)
nice = 19;
if (increment < 0 && !can_nice(current, nice))
return -EPERM;
retval = security_task_setnice(current, nice);
if (retval)
return retval;
set_user_nice(current, nice);
return 0;
}
#endif
/**
* task_prio - return the priority value of a given task.
* @p: the task in question.
*
* This is the priority value as seen by users in /proc.
* RT tasks are offset by -200. Normal tasks are centered
* around 0, value goes from -16 to +15.
*/
int task_prio(const struct task_struct *p)
{
return p->prio - MAX_RT_PRIO;
}
/**
* task_nice - return the nice value of a given task.
* @p: the task in question.
*/
int task_nice(const struct task_struct *p)
{
return TASK_NICE(p);
}
EXPORT_SYMBOL(task_nice);
/**
* idle_cpu - is a given cpu idle currently?
* @cpu: the processor in question.
*/
int idle_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
if (rq->curr != rq->idle)
return 0;
if (rq->nr_running)
return 0;
#ifdef CONFIG_SMP
if (!llist_empty(&rq->wake_list))
return 0;
#endif
return 1;
}
/**
* idle_task - return the idle task for a given cpu.
* @cpu: the processor in question.
*/
struct task_struct *idle_task(int cpu)
{
return cpu_rq(cpu)->idle;
}
/**
* find_process_by_pid - find a process with a matching PID value.
* @pid: the pid in question.
*/
static struct task_struct *find_process_by_pid(pid_t pid)
{
return pid ? find_task_by_vpid(pid) : current;
}
/* Actually do priority change: must hold rq lock. */
static void
__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
{
p->policy = policy;
p->rt_priority = prio;
p->normal_prio = normal_prio(p);
/* we are holding p->pi_lock already */
p->prio = rt_mutex_getprio(p);
if (rt_prio(p->prio))
p->sched_class = &rt_sched_class;
else
p->sched_class = &fair_sched_class;
set_load_weight(p);
}
/*
* check the target process has a UID that matches the current process's
*/
static bool check_same_owner(struct task_struct *p)
{
const struct cred *cred = current_cred(), *pcred;
bool match;
rcu_read_lock();
pcred = __task_cred(p);
if (cred->user->user_ns == pcred->user->user_ns)
match = (cred->euid == pcred->euid ||
cred->euid == pcred->uid);
else
match = false;
rcu_read_unlock();
return match;
}
static int __sched_setscheduler(struct task_struct *p, int policy,
const struct sched_param *param, bool user)
{
int retval, oldprio, oldpolicy = -1, on_rq, running;
unsigned long flags;
const struct sched_class *prev_class;
struct rq *rq;
int reset_on_fork;
/* may grab non-irq protected spin_locks */
BUG_ON(in_interrupt());
recheck:
/* double check policy once rq lock held */
if (policy < 0) {
reset_on_fork = p->sched_reset_on_fork;
policy = oldpolicy = p->policy;
} else {
reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
policy &= ~SCHED_RESET_ON_FORK;
if (policy != SCHED_FIFO && policy != SCHED_RR &&
policy != SCHED_NORMAL && policy != SCHED_BATCH &&
policy != SCHED_IDLE)
return -EINVAL;
}
/*
* Valid priorities for SCHED_FIFO and SCHED_RR are
* 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
* SCHED_BATCH and SCHED_IDLE is 0.
*/
if (param->sched_priority < 0 ||
(p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
(!p->mm && param->sched_priority > MAX_RT_PRIO-1))
return -EINVAL;
if (rt_policy(policy) != (param->sched_priority != 0))
return -EINVAL;
/*
* Allow unprivileged RT tasks to decrease priority:
*/
if (user && !capable(CAP_SYS_NICE)) {
if (rt_policy(policy)) {
unsigned long rlim_rtprio =
task_rlimit(p, RLIMIT_RTPRIO);
/* can't set/change the rt policy */
if (policy != p->policy && !rlim_rtprio)
return -EPERM;
/* can't increase priority */
if (param->sched_priority > p->rt_priority &&
param->sched_priority > rlim_rtprio)
return -EPERM;
}
/*
* Treat SCHED_IDLE as nice 20. Only allow a switch to
* SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
*/
if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) {
if (!can_nice(p, TASK_NICE(p)))
return -EPERM;
}
/* can't change other user's priorities */
if (!check_same_owner(p))
return -EPERM;
/* Normal users shall not reset the sched_reset_on_fork flag */
if (p->sched_reset_on_fork && !reset_on_fork)
return -EPERM;
}
if (user) {
retval = security_task_setscheduler(p);
if (retval)
return retval;
}
/*
* make sure no PI-waiters arrive (or leave) while we are
* changing the priority of the task:
*
* To be able to change p->policy safely, the appropriate
* runqueue lock must be held.
*/
rq = task_rq_lock(p, &flags);
/*
* Changing the policy of the stop threads its a very bad idea
*/
if (p == rq->stop) {
task_rq_unlock(rq, p, &flags);
return -EINVAL;
}
/*
* If not changing anything there's no need to proceed further:
*/
if (unlikely(policy == p->policy && (!rt_policy(policy) ||
param->sched_priority == p->rt_priority))) {
__task_rq_unlock(rq);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
return 0;
}
#ifdef CONFIG_RT_GROUP_SCHED
if (user) {
/*
* Do not allow realtime tasks into groups that have no runtime
* assigned.
*/
if (rt_bandwidth_enabled() && rt_policy(policy) &&
task_group(p)->rt_bandwidth.rt_runtime == 0 &&
!task_group_is_autogroup(task_group(p))) {
task_rq_unlock(rq, p, &flags);
return -EPERM;
}
}
#endif
/* recheck policy now with rq lock held */
if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
policy = oldpolicy = -1;
task_rq_unlock(rq, p, &flags);
goto recheck;
}
on_rq = p->on_rq;
running = task_current(rq, p);
if (on_rq)
dequeue_task(rq, p, 0);
if (running)
p->sched_class->put_prev_task(rq, p);
p->sched_reset_on_fork = reset_on_fork;
oldprio = p->prio;
prev_class = p->sched_class;
__setscheduler(rq, p, policy, param->sched_priority);
if (running)
p->sched_class->set_curr_task(rq);
if (on_rq)
enqueue_task(rq, p, 0);
check_class_changed(rq, p, prev_class, oldprio);
task_rq_unlock(rq, p, &flags);
rt_mutex_adjust_pi(p);
return 0;
}
/**
* sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
* @p: the task in question.
* @policy: new policy.
* @param: structure containing the new RT priority.
*
* NOTE that the task may be already dead.
*/
int sched_setscheduler(struct task_struct *p, int policy,
const struct sched_param *param)
{
return __sched_setscheduler(p, policy, param, true);
}
EXPORT_SYMBOL_GPL(sched_setscheduler);
/**
* sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
* @p: the task in question.
* @policy: new policy.
* @param: structure containing the new RT priority.
*
* Just like sched_setscheduler, only don't bother checking if the
* current context has permission. For example, this is needed in
* stop_machine(): we create temporary high priority worker threads,
* but our caller might not have that capability.
*/
int sched_setscheduler_nocheck(struct task_struct *p, int policy,
const struct sched_param *param)
{
return __sched_setscheduler(p, policy, param, false);
}
static int
do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
{
struct sched_param lparam;
struct task_struct *p;
int retval;
if (!param || pid < 0)
return -EINVAL;
if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
return -EFAULT;
rcu_read_lock();
retval = -ESRCH;
p = find_process_by_pid(pid);
if (p != NULL)
retval = sched_setscheduler(p, policy, &lparam);
rcu_read_unlock();
return retval;
}
/**
* sys_sched_setscheduler - set/change the scheduler policy and RT priority
* @pid: the pid in question.
* @policy: new policy.
* @param: structure containing the new RT priority.
*/
SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
struct sched_param __user *, param)
{
/* negative values for policy are not valid */
if (policy < 0)
return -EINVAL;
return do_sched_setscheduler(pid, policy, param);
}
/**
* sys_sched_setparam - set/change the RT priority of a thread
* @pid: the pid in question.
* @param: structure containing the new RT priority.
*/
SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
{
return do_sched_setscheduler(pid, -1, param);
}
/**
* sys_sched_getscheduler - get the policy (scheduling class) of a thread
* @pid: the pid in question.
*/
SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
{
struct task_struct *p;
int retval;
if (pid < 0)
return -EINVAL;
retval = -ESRCH;
rcu_read_lock();
p = find_process_by_pid(pid);
if (p) {
retval = security_task_getscheduler(p);
if (!retval)
retval = p->policy
| (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
}
rcu_read_unlock();
return retval;
}
/**
* sys_sched_getparam - get the RT priority of a thread
* @pid: the pid in question.
* @param: structure containing the RT priority.
*/
SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
{
struct sched_param lp;
struct task_struct *p;
int retval;
if (!param || pid < 0)
return -EINVAL;
rcu_read_lock();
p = find_process_by_pid(pid);
retval = -ESRCH;
if (!p)
goto out_unlock;
retval = security_task_getscheduler(p);
if (retval)
goto out_unlock;
lp.sched_priority = p->rt_priority;
rcu_read_unlock();
/*
* This one might sleep, we cannot do it with a spinlock held ...
*/
retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
return retval;
out_unlock:
rcu_read_unlock();
return retval;
}
long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
{
cpumask_var_t cpus_allowed, new_mask;
struct task_struct *p;
int retval;
rcu_read_lock();
p = find_process_by_pid(pid);
if (!p) {
rcu_read_unlock();
return -ESRCH;
}
/* Prevent p going away */
get_task_struct(p);
rcu_read_unlock();
if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
retval = -ENOMEM;
goto out_put_task;
}
if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
retval = -ENOMEM;
goto out_free_cpus_allowed;
}
retval = -EPERM;
if (!check_same_owner(p) && !ns_capable(task_user_ns(p), CAP_SYS_NICE))
goto out_unlock;
retval = security_task_setscheduler(p);
if (retval)
goto out_unlock;
cpuset_cpus_allowed(p, cpus_allowed);
cpumask_and(new_mask, in_mask, cpus_allowed);
again:
retval = set_cpus_allowed_ptr(p, new_mask);
if (!retval) {
cpuset_cpus_allowed(p, cpus_allowed);
if (!cpumask_subset(new_mask, cpus_allowed)) {
/*
* We must have raced with a concurrent cpuset
* update. Just reset the cpus_allowed to the
* cpuset's cpus_allowed
*/
cpumask_copy(new_mask, cpus_allowed);
goto again;
}
}
out_unlock:
free_cpumask_var(new_mask);
out_free_cpus_allowed:
free_cpumask_var(cpus_allowed);
out_put_task:
put_task_struct(p);
return retval;
}
static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
struct cpumask *new_mask)
{
if (len < cpumask_size())
cpumask_clear(new_mask);
else if (len > cpumask_size())
len = cpumask_size();
return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
}
/**
* sys_sched_setaffinity - set the cpu affinity of a process
* @pid: pid of the process
* @len: length in bytes of the bitmask pointed to by user_mask_ptr
* @user_mask_ptr: user-space pointer to the new cpu mask
*/
SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
unsigned long __user *, user_mask_ptr)
{
cpumask_var_t new_mask;
int retval;
if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
return -ENOMEM;
retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
if (retval == 0)
retval = sched_setaffinity(pid, new_mask);
free_cpumask_var(new_mask);
return retval;
}
long sched_getaffinity(pid_t pid, struct cpumask *mask)
{
struct task_struct *p;
unsigned long flags;
int retval;
rcu_read_lock();
retval = -ESRCH;
p = find_process_by_pid(pid);
if (!p)
goto out_unlock;
retval = security_task_getscheduler(p);
if (retval)
goto out_unlock;
raw_spin_lock_irqsave(&p->pi_lock, flags);
cpumask_and(mask, &p->cpus_allowed, cpu_active_mask);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
out_unlock:
rcu_read_unlock();
return retval;
}
/**
* sys_sched_getaffinity - get the cpu affinity of a process
* @pid: pid of the process
* @len: length in bytes of the bitmask pointed to by user_mask_ptr
* @user_mask_ptr: user-space pointer to hold the current cpu mask
*/
SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
unsigned long __user *, user_mask_ptr)
{
int ret;
cpumask_var_t mask;
if ((len * BITS_PER_BYTE) < nr_cpu_ids)
return -EINVAL;
if (len & (sizeof(unsigned long)-1))
return -EINVAL;
if (!alloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
ret = sched_getaffinity(pid, mask);
if (ret == 0) {
size_t retlen = min_t(size_t, len, cpumask_size());
if (copy_to_user(user_mask_ptr, mask, retlen))
ret = -EFAULT;
else
ret = retlen;
}
free_cpumask_var(mask);
return ret;
}
/**
* sys_sched_yield - yield the current processor to other threads.
*
* This function yields the current CPU to other tasks. If there are no
* other threads running on this CPU then this function will return.
*/
SYSCALL_DEFINE0(sched_yield)
{
struct rq *rq = this_rq_lock();
schedstat_inc(rq, yld_count);
current->sched_class->yield_task(rq);
/*
* Since we are going to call schedule() anyway, there's
* no need to preempt or enable interrupts:
*/
__release(rq->lock);
spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
do_raw_spin_unlock(&rq->lock);
sched_preempt_enable_no_resched();
schedule();
return 0;
}
static inline int should_resched(void)
{
return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
}
static void __cond_resched(void)
{
add_preempt_count(PREEMPT_ACTIVE);
__schedule();
sub_preempt_count(PREEMPT_ACTIVE);
}
int __sched _cond_resched(void)
{
if (should_resched()) {
__cond_resched();
return 1;
}
return 0;
}
EXPORT_SYMBOL(_cond_resched);
/*
* __cond_resched_lock() - if a reschedule is pending, drop the given lock,
* call schedule, and on return reacquire the lock.
*
* This works OK both with and without CONFIG_PREEMPT. We do strange low-level
* operations here to prevent schedule() from being called twice (once via
* spin_unlock(), once by hand).
*/
int __cond_resched_lock(spinlock_t *lock)
{
int resched = should_resched();
int ret = 0;
lockdep_assert_held(lock);
if (spin_needbreak(lock) || resched) {
spin_unlock(lock);
if (resched)
__cond_resched();
else
cpu_relax();
ret = 1;
spin_lock(lock);
}
return ret;
}
EXPORT_SYMBOL(__cond_resched_lock);
int __sched __cond_resched_softirq(void)
{
BUG_ON(!in_softirq());
if (should_resched()) {
local_bh_enable();
__cond_resched();
local_bh_disable();
return 1;
}
return 0;
}
EXPORT_SYMBOL(__cond_resched_softirq);
/**
* yield - yield the current processor to other threads.
*
* Do not ever use this function, there's a 99% chance you're doing it wrong.
*
* The scheduler is at all times free to pick the calling task as the most
* eligible task to run, if removing the yield() call from your code breaks
* it, its already broken.
*
* Typical broken usage is:
*
* while (!event)
* yield();
*
* where one assumes that yield() will let 'the other' process run that will
* make event true. If the current task is a SCHED_FIFO task that will never
* happen. Never use yield() as a progress guarantee!!
*
* If you want to use yield() to wait for something, use wait_event().
* If you want to use yield() to be 'nice' for others, use cond_resched().
* If you still want to use yield(), do not!
*/
void __sched yield(void)
{
set_current_state(TASK_RUNNING);
sys_sched_yield();
}
EXPORT_SYMBOL(yield);
/**
* yield_to - yield the current processor to another thread in
* your thread group, or accelerate that thread toward the
* processor it's on.
* @p: target task
* @preempt: whether task preemption is allowed or not
*
* It's the caller's job to ensure that the target task struct
* can't go away on us before we can do any checks.
*
* Returns true if we indeed boosted the target task.
*/
bool __sched yield_to(struct task_struct *p, bool preempt)
{
struct task_struct *curr = current;
struct rq *rq, *p_rq;
unsigned long flags;
bool yielded = 0;
local_irq_save(flags);
rq = this_rq();
again:
p_rq = task_rq(p);
double_rq_lock(rq, p_rq);
while (task_rq(p) != p_rq) {
double_rq_unlock(rq, p_rq);
goto again;
}
if (!curr->sched_class->yield_to_task)
goto out;
if (curr->sched_class != p->sched_class)
goto out;
if (task_running(p_rq, p) || p->state)
goto out;
yielded = curr->sched_class->yield_to_task(rq, p, preempt);
if (yielded) {
schedstat_inc(rq, yld_count);
/*
* Make p's CPU reschedule; pick_next_entity takes care of
* fairness.
*/
if (preempt && rq != p_rq)
resched_task(p_rq->curr);
} else {
/*
* We might have set it in task_yield_fair(), but are
* not going to schedule(), so don't want to skip
* the next update.
*/
rq->skip_clock_update = 0;
}
out:
double_rq_unlock(rq, p_rq);
local_irq_restore(flags);
if (yielded)
schedule();
return yielded;
}
EXPORT_SYMBOL_GPL(yield_to);
/*
* This task is about to go to sleep on IO. Increment rq->nr_iowait so
* that process accounting knows that this is a task in IO wait state.
*/
void __sched io_schedule(void)
{
struct rq *rq = raw_rq();
delayacct_blkio_start();
atomic_inc(&rq->nr_iowait);
blk_flush_plug(current);
current->in_iowait = 1;
schedule();
current->in_iowait = 0;
atomic_dec(&rq->nr_iowait);
delayacct_blkio_end();
}
EXPORT_SYMBOL(io_schedule);
long __sched io_schedule_timeout(long timeout)
{
struct rq *rq = raw_rq();
long ret;
delayacct_blkio_start();
atomic_inc(&rq->nr_iowait);
blk_flush_plug(current);
current->in_iowait = 1;
ret = schedule_timeout(timeout);
current->in_iowait = 0;
atomic_dec(&rq->nr_iowait);
delayacct_blkio_end();
return ret;
}
/**
* sys_sched_get_priority_max - return maximum RT priority.
* @policy: scheduling class.
*
* this syscall returns the maximum rt_priority that can be used
* by a given scheduling class.
*/
SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
{
int ret = -EINVAL;
switch (policy) {
case SCHED_FIFO:
case SCHED_RR:
ret = MAX_USER_RT_PRIO-1;
break;
case SCHED_NORMAL:
case SCHED_BATCH:
case SCHED_IDLE:
ret = 0;
break;
}
return ret;
}
/**
* sys_sched_get_priority_min - return minimum RT priority.
* @policy: scheduling class.
*
* this syscall returns the minimum rt_priority that can be used
* by a given scheduling class.
*/
SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
{
int ret = -EINVAL;
switch (policy) {
case SCHED_FIFO:
case SCHED_RR:
ret = 1;
break;
case SCHED_NORMAL:
case SCHED_BATCH:
case SCHED_IDLE:
ret = 0;
}
return ret;
}
/**
* sys_sched_rr_get_interval - return the default timeslice of a process.
* @pid: pid of the process.
* @interval: userspace pointer to the timeslice value.
*
* this syscall writes the default timeslice value of a given process
* into the user-space timespec buffer. A value of '0' means infinity.
*/
SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
struct timespec __user *, interval)
{
struct task_struct *p;
unsigned int time_slice;
unsigned long flags;
struct rq *rq;
int retval;
struct timespec t;
if (pid < 0)
return -EINVAL;
retval = -ESRCH;
rcu_read_lock();
p = find_process_by_pid(pid);
if (!p)
goto out_unlock;
retval = security_task_getscheduler(p);
if (retval)
goto out_unlock;
rq = task_rq_lock(p, &flags);
time_slice = p->sched_class->get_rr_interval(rq, p);
task_rq_unlock(rq, p, &flags);
rcu_read_unlock();
jiffies_to_timespec(time_slice, &t);
retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
return retval;
out_unlock:
rcu_read_unlock();
return retval;
}
static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
void sched_show_task(struct task_struct *p)
{
unsigned long free = 0;
unsigned state;
state = p->state ? __ffs(p->state) + 1 : 0;
printk(KERN_INFO "%-15.15s %c", p->comm,
state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
#if BITS_PER_LONG == 32
if (state == TASK_RUNNING)
printk(KERN_CONT " running ");
else
printk(KERN_CONT " %08lx ", thread_saved_pc(p));
#else
if (state == TASK_RUNNING)
printk(KERN_CONT " running task ");
else
printk(KERN_CONT " %016lx ", thread_saved_pc(p));
#endif
#ifdef CONFIG_DEBUG_STACK_USAGE
free = stack_not_used(p);
#endif
printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
task_pid_nr(p), task_pid_nr(rcu_dereference(p->real_parent)),
(unsigned long)task_thread_info(p)->flags);
show_stack(p, NULL);
}
void show_state_filter(unsigned long state_filter)
{
struct task_struct *g, *p;
#if BITS_PER_LONG == 32
printk(KERN_INFO
" task PC stack pid father\n");
#else
printk(KERN_INFO
" task PC stack pid father\n");
#endif
rcu_read_lock();
do_each_thread(g, p) {
/*
* reset the NMI-timeout, listing all files on a slow
* console might take a lot of time:
*/
touch_nmi_watchdog();
if (!state_filter || (p->state & state_filter))
sched_show_task(p);
} while_each_thread(g, p);
touch_all_softlockup_watchdogs();
#ifdef CONFIG_SYSRQ_SCHED_DEBUG
sysrq_sched_debug_show();
#endif
rcu_read_unlock();
/*
* Only show locks if all tasks are dumped:
*/
if (!state_filter)
debug_show_all_locks();
}
void __cpuinit init_idle_bootup_task(struct task_struct *idle)
{
idle->sched_class = &idle_sched_class;
}
/**
* init_idle - set up an idle thread for a given CPU
* @idle: task in question
* @cpu: cpu the idle task belongs to
*
* NOTE: this function does not set the idle thread's NEED_RESCHED
* flag, to make booting more robust.
*/
void __cpuinit init_idle(struct task_struct *idle, int cpu)
{
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
raw_spin_lock_irqsave(&rq->lock, flags);
__sched_fork(idle);
idle->state = TASK_RUNNING;
idle->se.exec_start = sched_clock();
do_set_cpus_allowed(idle, cpumask_of(cpu));
/*
* We're having a chicken and egg problem, even though we are
* holding rq->lock, the cpu isn't yet set to this cpu so the
* lockdep check in task_group() will fail.
*
* Similar case to sched_fork(). / Alternatively we could
* use task_rq_lock() here and obtain the other rq->lock.
*
* Silence PROVE_RCU
*/
rcu_read_lock();
__set_task_cpu(idle, cpu);
rcu_read_unlock();
rq->curr = rq->idle = idle;
#if defined(CONFIG_SMP)
idle->on_cpu = 1;
#endif
raw_spin_unlock_irqrestore(&rq->lock, flags);
/* Set the preempt count _outside_ the spinlocks! */
task_thread_info(idle)->preempt_count = 0;
/*
* The idle tasks have their own, simple scheduling class:
*/
idle->sched_class = &idle_sched_class;
ftrace_graph_init_idle_task(idle, cpu);
#if defined(CONFIG_SMP)
sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
#endif
}
#ifdef CONFIG_SMP
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
if (p->sched_class && p->sched_class->set_cpus_allowed)
p->sched_class->set_cpus_allowed(p, new_mask);
cpumask_copy(&p->cpus_allowed, new_mask);
p->nr_cpus_allowed = cpumask_weight(new_mask);
}
/*
* This is how migration works:
*
* 1) we invoke migration_cpu_stop() on the target CPU using
* stop_one_cpu().
* 2) stopper starts to run (implicitly forcing the migrated thread
* off the CPU)
* 3) it checks whether the migrated task is still in the wrong runqueue.
* 4) if it's in the wrong runqueue then the migration thread removes
* it and puts it into the right queue.
* 5) stopper completes and stop_one_cpu() returns and the migration
* is done.
*/
/*
* Change a given task's CPU affinity. Migrate the thread to a
* proper CPU and schedule it away if the CPU it's executing on
* is removed from the allowed bitmask.
*
* NOTE: the caller must have a valid reference to the task, the
* task must not exit() & deallocate itself prematurely. The
* call is not atomic; no spinlocks may be held.
*/
int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
{
unsigned long flags;
struct rq *rq;
unsigned int dest_cpu;
int ret = 0;
rq = task_rq_lock(p, &flags);
if (cpumask_equal(&p->cpus_allowed, new_mask))
goto out;
if (!cpumask_intersects(new_mask, cpu_active_mask)) {
ret = -EINVAL;
goto out;
}
if (unlikely((p->flags & PF_THREAD_BOUND) && p != current)) {
ret = -EINVAL;
goto out;
}
do_set_cpus_allowed(p, new_mask);
/* Can the task run on the task's current CPU? If so, we're done */
if (cpumask_test_cpu(task_cpu(p), new_mask))
goto out;
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
if (p->on_rq) {
struct migration_arg arg = { p, dest_cpu };
/* Need help from migration thread: drop lock and wait. */
task_rq_unlock(rq, p, &flags);
stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
tlb_migrate_finish(p->mm);
return 0;
}
out:
task_rq_unlock(rq, p, &flags);
return ret;
}
EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
/*
* Move (not current) task off this cpu, onto dest cpu. We're doing
* this because either it can't run here any more (set_cpus_allowed()
* away from this CPU, or CPU going down), or because we're
* attempting to rebalance this task on exec (sched_exec).
*
* So we race with normal scheduler movements, but that's OK, as long
* as the task is no longer on this CPU.
*
* Returns non-zero if task was successfully migrated.
*/
static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
{
struct rq *rq_dest, *rq_src;
bool moved = false;
int ret = 0;
if (unlikely(!cpu_active(dest_cpu)))
return ret;
rq_src = cpu_rq(src_cpu);
rq_dest = cpu_rq(dest_cpu);
raw_spin_lock(&p->pi_lock);
double_rq_lock(rq_src, rq_dest);
/* Already moved. */
if (task_cpu(p) != src_cpu)
goto done;
/* Affinity changed (again). */
if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
goto fail;
/*
* If we're not on a rq, the next wake-up will ensure we're
* placed properly.
*/
if (p->on_rq) {
dequeue_task(rq_src, p, 0);
set_task_cpu(p, dest_cpu);
enqueue_task(rq_dest, p, 0);
check_preempt_curr(rq_dest, p, 0);
moved = true;
}
done:
ret = 1;
fail:
double_rq_unlock(rq_src, rq_dest);
raw_spin_unlock(&p->pi_lock);
if (moved && task_notify_on_migrate(p)) {
struct migration_notify_data mnd;
mnd.src_cpu = src_cpu;
mnd.dest_cpu = dest_cpu;
mnd.load = pct_task_load(p);
atomic_notifier_call_chain(&migration_notifier_head,
0, (void *)&mnd);
}
return ret;
}
/*
* migration_cpu_stop - this will be executed by a highprio stopper thread
* and performs thread migration by bumping thread off CPU then
* 'pushing' onto another runqueue.
*/
static int migration_cpu_stop(void *data)
{
struct migration_arg *arg = data;
/*
* The original target cpu might have gone down and we might
* be on another cpu but it doesn't matter.
*/
local_irq_disable();
__migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
local_irq_enable();
return 0;
}
#ifdef CONFIG_HOTPLUG_CPU
/*
* Ensures that the idle task is using init_mm right before its cpu goes
* offline.
*/
void idle_task_exit(void)
{
struct mm_struct *mm = current->active_mm;
BUG_ON(cpu_online(smp_processor_id()));
if (mm != &init_mm)
switch_mm(mm, &init_mm, current);
mmdrop(mm);
}
/*
* Since this CPU is going 'away' for a while, fold any nr_active delta
* we might have. Assumes we're called after migrate_tasks() so that the
* nr_active count is stable.
*
* Also see the comment "Global load-average calculations".
*/
static void calc_load_migrate(struct rq *rq)
{
long delta = calc_load_fold_active(rq);
if (delta)
atomic_long_add(delta, &calc_load_tasks);
}
/*
* Migrate all tasks from the rq, sleeping tasks will be migrated by
* try_to_wake_up()->select_task_rq().
*
* Called with rq->lock held even though we'er in stop_machine() and
* there's no concurrency possible, we hold the required locks anyway
* because of lock validation efforts.
*/
static void migrate_tasks(unsigned int dead_cpu)
{
struct rq *rq = cpu_rq(dead_cpu);
struct task_struct *next, *stop = rq->stop;
int dest_cpu;
/*
* Fudge the rq selection such that the below task selection loop
* doesn't get stuck on the currently eligible stop task.
*
* We're currently inside stop_machine() and the rq is either stuck
* in the stop_machine_cpu_stop() loop, or we're executing this code,
* either way we should never end up calling schedule() until we're
* done here.
*/
rq->stop = NULL;
for ( ; ; ) {
/*
* There's this thread running, bail when that's the only
* remaining thread.
*/
if (rq->nr_running == 1)
break;
next = pick_next_task(rq);
BUG_ON(!next);
next->sched_class->put_prev_task(rq, next);
/* Find suitable destination for @next, with force if needed. */
dest_cpu = select_fallback_rq(dead_cpu, next);
raw_spin_unlock(&rq->lock);
__migrate_task(next, dead_cpu, dest_cpu);
raw_spin_lock(&rq->lock);
}
rq->stop = stop;
}
#endif /* CONFIG_HOTPLUG_CPU */
#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
static struct ctl_table sd_ctl_dir[] = {
{
.procname = "sched_domain",
.mode = 0555,
},
{}
};
static struct ctl_table sd_ctl_root[] = {
{
.procname = "kernel",
.mode = 0555,
.child = sd_ctl_dir,
},
{}
};
static struct ctl_table *sd_alloc_ctl_entry(int n)
{
struct ctl_table *entry =
kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
return entry;
}
static void sd_free_ctl_entry(struct ctl_table **tablep)
{
struct ctl_table *entry;
/*
* In the intermediate directories, both the child directory and
* procname are dynamically allocated and could fail but the mode
* will always be set. In the lowest directory the names are
* static strings and all have proc handlers.
*/
for (entry = *tablep; entry->mode; entry++) {
if (entry->child)
sd_free_ctl_entry(&entry->child);
if (entry->proc_handler == NULL)
kfree(entry->procname);
}
kfree(*tablep);
*tablep = NULL;
}
static void
set_table_entry(struct ctl_table *entry,
const char *procname, void *data, int maxlen,
umode_t mode, proc_handler *proc_handler)
{
entry->procname = procname;
entry->data = data;
entry->maxlen = maxlen;
entry->mode = mode;
entry->proc_handler = proc_handler;
}
static struct ctl_table *
sd_alloc_ctl_domain_table(struct sched_domain *sd)
{
struct ctl_table *table = sd_alloc_ctl_entry(13);
if (table == NULL)
return NULL;
set_table_entry(&table[0], "min_interval", &sd->min_interval,
sizeof(long), 0644, proc_doulongvec_minmax);
set_table_entry(&table[1], "max_interval", &sd->max_interval,
sizeof(long), 0644, proc_doulongvec_minmax);
set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
sizeof(int), 0644, proc_dointvec_minmax);
set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
sizeof(int), 0644, proc_dointvec_minmax);
set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
sizeof(int), 0644, proc_dointvec_minmax);
set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
sizeof(int), 0644, proc_dointvec_minmax);
set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
sizeof(int), 0644, proc_dointvec_minmax);
set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
sizeof(int), 0644, proc_dointvec_minmax);
set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
sizeof(int), 0644, proc_dointvec_minmax);
set_table_entry(&table[9], "cache_nice_tries",
&sd->cache_nice_tries,
sizeof(int), 0644, proc_dointvec_minmax);
set_table_entry(&table[10], "flags", &sd->flags,
sizeof(int), 0644, proc_dointvec_minmax);
set_table_entry(&table[11], "name", sd->name,
CORENAME_MAX_SIZE, 0444, proc_dostring);
/* &table[12] is terminator */
return table;
}
static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
{
struct ctl_table *entry, *table;
struct sched_domain *sd;
int domain_num = 0, i;
char buf[32];
for_each_domain(cpu, sd)
domain_num++;
entry = table = sd_alloc_ctl_entry(domain_num + 1);
if (table == NULL)
return NULL;
i = 0;
for_each_domain(cpu, sd) {
snprintf(buf, 32, "domain%d", i);
entry->procname = kstrdup(buf, GFP_KERNEL);
entry->mode = 0555;
entry->child = sd_alloc_ctl_domain_table(sd);
entry++;
i++;
}
return table;
}
static struct ctl_table_header *sd_sysctl_header;
static void register_sched_domain_sysctl(void)
{
int i, cpu_num = num_possible_cpus();
struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
char buf[32];
WARN_ON(sd_ctl_dir[0].child);
sd_ctl_dir[0].child = entry;
if (entry == NULL)
return;
for_each_possible_cpu(i) {
snprintf(buf, 32, "cpu%d", i);
entry->procname = kstrdup(buf, GFP_KERNEL);
entry->mode = 0555;
entry->child = sd_alloc_ctl_cpu_table(i);
entry++;
}
WARN_ON(sd_sysctl_header);
sd_sysctl_header = register_sysctl_table(sd_ctl_root);
}
/* may be called multiple times per register */
static void unregister_sched_domain_sysctl(void)
{
if (sd_sysctl_header)
unregister_sysctl_table(sd_sysctl_header);
sd_sysctl_header = NULL;
if (sd_ctl_dir[0].child)
sd_free_ctl_entry(&sd_ctl_dir[0].child);
}
#else
static void register_sched_domain_sysctl(void)
{
}
static void unregister_sched_domain_sysctl(void)
{
}
#endif
static void set_rq_online(struct rq *rq)
{
if (!rq->online) {
const struct sched_class *class;
cpumask_set_cpu(rq->cpu, rq->rd->online);
rq->online = 1;
for_each_class(class) {
if (class->rq_online)
class->rq_online(rq);
}
}
}
static void set_rq_offline(struct rq *rq)
{
if (rq->online) {
const struct sched_class *class;
for_each_class(class) {
if (class->rq_offline)
class->rq_offline(rq);
}
cpumask_clear_cpu(rq->cpu, rq->rd->online);
rq->online = 0;
}
}
/*
* migration_call - callback that gets triggered when a CPU is added.
* Here we can start up the necessary migration thread for the new CPU.
*/
static int __cpuinit
migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
int cpu = (long)hcpu;
unsigned long flags;
struct rq *rq = cpu_rq(cpu);
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
rq->calc_load_update = calc_load_update;
break;
case CPU_ONLINE:
/* Update our root-domain */
raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
set_rq_online(rq);
}
raw_spin_unlock_irqrestore(&rq->lock, flags);
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_DYING:
sched_ttwu_pending();
/* Update our root-domain */
raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
set_rq_offline(rq);
}
migrate_tasks(cpu);
BUG_ON(rq->nr_running != 1); /* the migration thread */
raw_spin_unlock_irqrestore(&rq->lock, flags);
break;
case CPU_DEAD:
calc_load_migrate(rq);
break;
#endif
}
update_max_interval();
return NOTIFY_OK;
}
/*
* Register at high priority so that task migration (migrate_all_tasks)
* happens before everything else. This has to be lower priority than
* the notifier in the perf_event subsystem, though.
*/
static struct notifier_block __cpuinitdata migration_notifier = {
.notifier_call = migration_call,
.priority = CPU_PRI_MIGRATION,
};
static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_DOWN_FAILED:
set_cpu_active((long)hcpu, true);
return NOTIFY_OK;
default:
return NOTIFY_DONE;
}
}
static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_DOWN_PREPARE:
set_cpu_active((long)hcpu, false);
return NOTIFY_OK;
default:
return NOTIFY_DONE;
}
}
static int __init migration_init(void)
{
void *cpu = (void *)(long)smp_processor_id();
int err;
/* Initialize migration for the boot CPU */
err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
BUG_ON(err == NOTIFY_BAD);
migration_call(&migration_notifier, CPU_ONLINE, cpu);
register_cpu_notifier(&migration_notifier);
/* Register cpu active notifiers */
cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
return 0;
}
early_initcall(migration_init);
#endif
#ifdef CONFIG_SMP
static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
#ifdef CONFIG_SCHED_DEBUG
static __read_mostly int sched_domain_debug_enabled;
static int __init sched_domain_debug_setup(char *str)
{
sched_domain_debug_enabled = 1;
return 0;
}
early_param("sched_debug", sched_domain_debug_setup);
static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
struct cpumask *groupmask)
{
struct sched_group *group = sd->groups;
char str[256];
cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
cpumask_clear(groupmask);
printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
if (!(sd->flags & SD_LOAD_BALANCE)) {
printk("does not load-balance\n");
if (sd->parent)
printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
" has parent");
return -1;
}
printk(KERN_CONT "span %s level %s\n", str, sd->name);
if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
printk(KERN_ERR "ERROR: domain->span does not contain "
"CPU%d\n", cpu);
}
if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
printk(KERN_ERR "ERROR: domain->groups does not contain"
" CPU%d\n", cpu);
}
printk(KERN_DEBUG "%*s groups:", level + 1, "");
do {
if (!group) {
printk("\n");
printk(KERN_ERR "ERROR: group is NULL\n");
break;
}
if (!group->sgp->power) {
printk(KERN_CONT "\n");
printk(KERN_ERR "ERROR: domain->cpu_power not "
"set\n");
break;
}
if (!cpumask_weight(sched_group_cpus(group))) {
printk(KERN_CONT "\n");
printk(KERN_ERR "ERROR: empty group\n");
break;
}
if (cpumask_intersects(groupmask, sched_group_cpus(group))) {
printk(KERN_CONT "\n");
printk(KERN_ERR "ERROR: repeated CPUs\n");
break;
}
cpumask_or(groupmask, groupmask, sched_group_cpus(group));
cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
printk(KERN_CONT " %s", str);
if (group->sgp->power != SCHED_POWER_SCALE) {
printk(KERN_CONT " (cpu_power = %d)",
group->sgp->power);
}
group = group->next;
} while (group != sd->groups);
printk(KERN_CONT "\n");
if (!cpumask_equal(sched_domain_span(sd), groupmask))
printk(KERN_ERR "ERROR: groups don't span domain->span\n");
if (sd->parent &&
!cpumask_subset(groupmask, sched_domain_span(sd->parent)))
printk(KERN_ERR "ERROR: parent span is not a superset "
"of domain->span\n");
return 0;
}
static void sched_domain_debug(struct sched_domain *sd, int cpu)
{
int level = 0;
if (!sched_domain_debug_enabled)
return;
if (!sd) {
printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
return;
}
printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
for (;;) {
if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
break;
level++;
sd = sd->parent;
if (!sd)
break;
}
}
#else /* !CONFIG_SCHED_DEBUG */
# define sched_domain_debug(sd, cpu) do { } while (0)
#endif /* CONFIG_SCHED_DEBUG */
static int sd_degenerate(struct sched_domain *sd)
{
if (cpumask_weight(sched_domain_span(sd)) == 1)
return 1;
/* Following flags need at least 2 groups */
if (sd->flags & (SD_LOAD_BALANCE |
SD_BALANCE_NEWIDLE |
SD_BALANCE_FORK |
SD_BALANCE_EXEC |
SD_SHARE_CPUPOWER |
SD_SHARE_PKG_RESOURCES)) {
if (sd->groups != sd->groups->next)
return 0;
}
/* Following flags don't use groups */
if (sd->flags & (SD_WAKE_AFFINE))
return 0;
return 1;
}
static int
sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
{
unsigned long cflags = sd->flags, pflags = parent->flags;
if (sd_degenerate(parent))
return 1;
if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
return 0;
/* Flags needing groups don't count if only 1 group in parent */
if (parent->groups == parent->groups->next) {
pflags &= ~(SD_LOAD_BALANCE |
SD_BALANCE_NEWIDLE |
SD_BALANCE_FORK |
SD_BALANCE_EXEC |
SD_SHARE_CPUPOWER |
SD_SHARE_PKG_RESOURCES);
if (nr_node_ids == 1)
pflags &= ~SD_SERIALIZE;
}
if (~cflags & pflags)
return 0;
return 1;
}
static void free_rootdomain(struct rcu_head *rcu)
{
struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
cpupri_cleanup(&rd->cpupri);
free_cpumask_var(rd->rto_mask);
free_cpumask_var(rd->online);
free_cpumask_var(rd->span);
kfree(rd);
}
static void rq_attach_root(struct rq *rq, struct root_domain *rd)
{
struct root_domain *old_rd = NULL;
unsigned long flags;
raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
old_rd = rq->rd;
if (cpumask_test_cpu(rq->cpu, old_rd->online))
set_rq_offline(rq);
cpumask_clear_cpu(rq->cpu, old_rd->span);
/*
* If we dont want to free the old_rt yet then
* set old_rd to NULL to skip the freeing later
* in this function:
*/
if (!atomic_dec_and_test(&old_rd->refcount))
old_rd = NULL;
}
atomic_inc(&rd->refcount);
rq->rd = rd;
cpumask_set_cpu(rq->cpu, rd->span);
if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
set_rq_online(rq);
raw_spin_unlock_irqrestore(&rq->lock, flags);
if (old_rd)
call_rcu_sched(&old_rd->rcu, free_rootdomain);
}
static int init_rootdomain(struct root_domain *rd)
{
memset(rd, 0, sizeof(*rd));
if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
goto out;
if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
goto free_span;
if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
goto free_online;
if (cpupri_init(&rd->cpupri) != 0)
goto free_rto_mask;
return 0;
free_rto_mask:
free_cpumask_var(rd->rto_mask);
free_online:
free_cpumask_var(rd->online);
free_span:
free_cpumask_var(rd->span);
out:
return -ENOMEM;
}
/*
* By default the system creates a single root-domain with all cpus as
* members (mimicking the global state we have today).
*/
struct root_domain def_root_domain;
static void init_defrootdomain(void)
{
init_rootdomain(&def_root_domain);
atomic_set(&def_root_domain.refcount, 1);
}
static struct root_domain *alloc_rootdomain(void)
{
struct root_domain *rd;
rd = kmalloc(sizeof(*rd), GFP_KERNEL);
if (!rd)
return NULL;
if (init_rootdomain(rd) != 0) {
kfree(rd);
return NULL;
}
return rd;
}
static void free_sched_groups(struct sched_group *sg, int free_sgp)
{
struct sched_group *tmp, *first;
if (!sg)
return;
first = sg;
do {
tmp = sg->next;
if (free_sgp && atomic_dec_and_test(&sg->sgp->ref))
kfree(sg->sgp);
kfree(sg);
sg = tmp;
} while (sg != first);
}
static void free_sched_domain(struct rcu_head *rcu)
{
struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
/*
* If its an overlapping domain it has private groups, iterate and
* nuke them all.
*/
if (sd->flags & SD_OVERLAP) {
free_sched_groups(sd->groups, 1);
} else if (atomic_dec_and_test(&sd->groups->ref)) {
kfree(sd->groups->sgp);
kfree(sd->groups);
}
kfree(sd);
}
static void destroy_sched_domain(struct sched_domain *sd, int cpu)
{
call_rcu(&sd->rcu, free_sched_domain);
}
static void destroy_sched_domains(struct sched_domain *sd, int cpu)
{
for (; sd; sd = sd->parent)
destroy_sched_domain(sd, cpu);
}
/*
* Keep a special pointer to the highest sched_domain that has
* SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
* allows us to avoid some pointer chasing select_idle_sibling().
*
* Also keep a unique ID per domain (we use the first cpu number in
* the cpumask of the domain), this allows us to quickly tell if
* two cpus are in the same cache domain, see cpus_share_cache().
*/
DEFINE_PER_CPU(struct sched_domain *, sd_llc);
DEFINE_PER_CPU(int, sd_llc_id);
static void update_top_cache_domain(int cpu)
{
struct sched_domain *sd;
int id = cpu;
sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
if (sd)
id = cpumask_first(sched_domain_span(sd));
rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
per_cpu(sd_llc_id, cpu) = id;
}
/*
* Attach the domain 'sd' to 'cpu' as its base domain. Callers must
* hold the hotplug lock.
*/
static void
cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
{
struct rq *rq = cpu_rq(cpu);
struct sched_domain *tmp;
unsigned long next_balance = rq->next_balance;
/* Remove the sched domains which do not contribute to scheduling. */
for (tmp = sd; tmp; ) {
struct sched_domain *parent = tmp->parent;
if (!parent)
break;
if (sd_parent_degenerate(tmp, parent)) {
tmp->parent = parent->parent;
if (parent->parent)
parent->parent->child = tmp;
destroy_sched_domain(parent, cpu);
} else
tmp = tmp->parent;
}
if (sd && sd_degenerate(sd)) {
tmp = sd;
sd = sd->parent;
destroy_sched_domain(tmp, cpu);
if (sd)
sd->child = NULL;
}
for (tmp = sd; tmp; ) {
unsigned long interval;
interval = msecs_to_jiffies(tmp->balance_interval);
if (time_after(next_balance, tmp->last_balance + interval))
next_balance = tmp->last_balance + interval;
tmp = tmp->parent;
}
rq->next_balance = next_balance;
sched_domain_debug(sd, cpu);
rq_attach_root(rq, rd);
tmp = rq->sd;
rcu_assign_pointer(rq->sd, sd);
destroy_sched_domains(tmp, cpu);
update_top_cache_domain(cpu);
}
/* cpus with isolated domains */
static cpumask_var_t cpu_isolated_map;
/* Setup the mask of cpus configured for isolated domains */
static int __init isolated_cpu_setup(char *str)
{
alloc_bootmem_cpumask_var(&cpu_isolated_map);
cpulist_parse(str, cpu_isolated_map);
return 1;
}
__setup("isolcpus=", isolated_cpu_setup);
#ifdef CONFIG_NUMA
/**
* find_next_best_node - find the next node to include in a sched_domain
* @node: node whose sched_domain we're building
* @used_nodes: nodes already in the sched_domain
*
* Find the next node to include in a given scheduling domain. Simply
* finds the closest node not already in the @used_nodes map.
*
* Should use nodemask_t.
*/
static int find_next_best_node(int node, nodemask_t *used_nodes)
{
int i, n, val, min_val, best_node = -1;
min_val = INT_MAX;
for (i = 0; i < nr_node_ids; i++) {
/* Start at @node */
n = (node + i) % nr_node_ids;
if (!nr_cpus_node(n))
continue;
/* Skip already used nodes */
if (node_isset(n, *used_nodes))
continue;
/* Simple min distance search */
val = node_distance(node, n);
if (val < min_val) {
min_val = val;
best_node = n;
}
}
if (best_node != -1)
node_set(best_node, *used_nodes);
return best_node;
}
/**
* sched_domain_node_span - get a cpumask for a node's sched_domain
* @node: node whose cpumask we're constructing
* @span: resulting cpumask
*
* Given a node, construct a good cpumask for its sched_domain to span. It
* should be one that prevents unnecessary balancing, but also spreads tasks
* out optimally.
*/
static void sched_domain_node_span(int node, struct cpumask *span)
{
nodemask_t used_nodes;
int i;
cpumask_clear(span);
nodes_clear(used_nodes);
cpumask_or(span, span, cpumask_of_node(node));
node_set(node, used_nodes);
for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
int next_node = find_next_best_node(node, &used_nodes);
if (next_node < 0)
break;
cpumask_or(span, span, cpumask_of_node(next_node));
}
}
static const struct cpumask *cpu_node_mask(int cpu)
{
lockdep_assert_held(&sched_domains_mutex);
sched_domain_node_span(cpu_to_node(cpu), sched_domains_tmpmask);
return sched_domains_tmpmask;
}
static const struct cpumask *cpu_allnodes_mask(int cpu)
{
return cpu_possible_mask;
}
#endif /* CONFIG_NUMA */
static const struct cpumask *cpu_cpu_mask(int cpu)
{
return cpumask_of_node(cpu_to_node(cpu));
}
struct sd_data {
struct sched_domain **__percpu sd;
struct sched_group **__percpu sg;
struct sched_group_power **__percpu sgp;
};
struct s_data {
struct sched_domain ** __percpu sd;
struct root_domain *rd;
};
enum s_alloc {
sa_rootdomain,
sa_sd,
sa_sd_storage,
sa_none,
};
struct sched_domain_topology_level;
typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu);
typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
#define SDTL_OVERLAP 0x01
struct sched_domain_topology_level {
sched_domain_init_f init;
sched_domain_mask_f mask;
int flags;
struct sd_data data;
};
static int
build_overlap_sched_groups(struct sched_domain *sd, int cpu)
{
struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
const struct cpumask *span = sched_domain_span(sd);
struct cpumask *covered = sched_domains_tmpmask;
struct sd_data *sdd = sd->private;
struct sched_domain *child;
int i;
cpumask_clear(covered);
for_each_cpu(i, span) {
struct cpumask *sg_span;
if (cpumask_test_cpu(i, covered))
continue;
sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
GFP_KERNEL, cpu_to_node(cpu));
if (!sg)
goto fail;
sg_span = sched_group_cpus(sg);
child = *per_cpu_ptr(sdd->sd, i);
if (child->child) {
child = child->child;
cpumask_copy(sg_span, sched_domain_span(child));
} else
cpumask_set_cpu(i, sg_span);
cpumask_or(covered, covered, sg_span);
sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span));
atomic_inc(&sg->sgp->ref);
if (cpumask_test_cpu(cpu, sg_span))
groups = sg;
if (!first)
first = sg;
if (last)
last->next = sg;
last = sg;
last->next = first;
}
sd->groups = groups;
return 0;
fail:
free_sched_groups(first, 0);
return -ENOMEM;
}
static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
{
struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
struct sched_domain *child = sd->child;
if (child)
cpu = cpumask_first(sched_domain_span(child));
if (sg) {
*sg = *per_cpu_ptr(sdd->sg, cpu);
(*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu);
atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */
}
return cpu;
}
/*
* build_sched_groups will build a circular linked list of the groups
* covered by the given span, and will set each group's ->cpumask correctly,
* and ->cpu_power to 0.
*
* Assumes the sched_domain tree is fully constructed
*/
static int
build_sched_groups(struct sched_domain *sd, int cpu)
{
struct sched_group *first = NULL, *last = NULL;
struct sd_data *sdd = sd->private;
const struct cpumask *span = sched_domain_span(sd);
struct cpumask *covered;
int i;
get_group(cpu, sdd, &sd->groups);
atomic_inc(&sd->groups->ref);
if (cpu != cpumask_first(sched_domain_span(sd)))
return 0;
lockdep_assert_held(&sched_domains_mutex);
covered = sched_domains_tmpmask;
cpumask_clear(covered);
for_each_cpu(i, span) {
struct sched_group *sg;
int group = get_group(i, sdd, &sg);
int j;
if (cpumask_test_cpu(i, covered))
continue;
cpumask_clear(sched_group_cpus(sg));
sg->sgp->power = 0;
for_each_cpu(j, span) {
if (get_group(j, sdd, NULL) != group)
continue;
cpumask_set_cpu(j, covered);
cpumask_set_cpu(j, sched_group_cpus(sg));
}
if (!first)
first = sg;
if (last)
last->next = sg;
last = sg;
}
last->next = first;
return 0;
}
/*
* Initialize sched groups cpu_power.
*
* cpu_power indicates the capacity of sched group, which is used while
* distributing the load between different sched groups in a sched domain.
* Typically cpu_power for all the groups in a sched domain will be same unless
* there are asymmetries in the topology. If there are asymmetries, group
* having more cpu_power will pickup more load compared to the group having
* less cpu_power.
*/
static void init_sched_groups_power(int cpu, struct sched_domain *sd)
{
struct sched_group *sg = sd->groups;
WARN_ON(!sd || !sg);
do {
sg->group_weight = cpumask_weight(sched_group_cpus(sg));
sg = sg->next;
} while (sg != sd->groups);
if (cpu != group_first_cpu(sg))
return;
update_group_power(sd, cpu);
atomic_set(&sg->sgp->nr_busy_cpus, sg->group_weight);
}
int __weak arch_sd_sibling_asym_packing(void)
{
return 0*SD_ASYM_PACKING;
}
/*
* Initializers for schedule domains
* Non-inlined to reduce accumulated stack pressure in build_sched_domains()
*/
#ifdef CONFIG_SCHED_DEBUG
# define SD_INIT_NAME(sd, type) sd->name = #type
#else
# define SD_INIT_NAME(sd, type) do { } while (0)
#endif
#define SD_INIT_FUNC(type) \
static noinline struct sched_domain * \
sd_init_##type(struct sched_domain_topology_level *tl, int cpu) \
{ \
struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); \
*sd = SD_##type##_INIT; \
SD_INIT_NAME(sd, type); \
sd->private = &tl->data; \
return sd; \
}
SD_INIT_FUNC(CPU)
#ifdef CONFIG_NUMA
SD_INIT_FUNC(ALLNODES)
SD_INIT_FUNC(NODE)
#endif
#ifdef CONFIG_SCHED_SMT
SD_INIT_FUNC(SIBLING)
#endif
#ifdef CONFIG_SCHED_MC
SD_INIT_FUNC(MC)
#endif
#ifdef CONFIG_SCHED_BOOK
SD_INIT_FUNC(BOOK)
#endif
static int default_relax_domain_level = -1;
int sched_domain_level_max;
static int __init setup_relax_domain_level(char *str)
{
unsigned long val;
val = simple_strtoul(str, NULL, 0);
if (val < sched_domain_level_max)
default_relax_domain_level = val;
return 1;
}
__setup("relax_domain_level=", setup_relax_domain_level);
static void set_domain_attribute(struct sched_domain *sd,
struct sched_domain_attr *attr)
{
int request;
if (!attr || attr->relax_domain_level < 0) {
if (default_relax_domain_level < 0)
return;
else
request = default_relax_domain_level;
} else
request = attr->relax_domain_level;
if (request < sd->level) {
/* turn off idle balance on this domain */
sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
} else {
/* turn on idle balance on this domain */
sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
}
}
static void __sdt_free(const struct cpumask *cpu_map);
static int __sdt_alloc(const struct cpumask *cpu_map);
static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
const struct cpumask *cpu_map)
{
switch (what) {
case sa_rootdomain:
if (!atomic_read(&d->rd->refcount))
free_rootdomain(&d->rd->rcu); /* fall through */
case sa_sd:
free_percpu(d->sd); /* fall through */
case sa_sd_storage:
__sdt_free(cpu_map); /* fall through */
case sa_none:
break;
}
}
static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
const struct cpumask *cpu_map)
{
memset(d, 0, sizeof(*d));
if (__sdt_alloc(cpu_map))
return sa_sd_storage;
d->sd = alloc_percpu(struct sched_domain *);
if (!d->sd)
return sa_sd_storage;
d->rd = alloc_rootdomain();
if (!d->rd)
return sa_sd;
return sa_rootdomain;
}
/*
* NULL the sd_data elements we've used to build the sched_domain and
* sched_group structure so that the subsequent __free_domain_allocs()
* will not free the data we're using.
*/
static void claim_allocations(int cpu, struct sched_domain *sd)
{
struct sd_data *sdd = sd->private;
WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
*per_cpu_ptr(sdd->sd, cpu) = NULL;
if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
*per_cpu_ptr(sdd->sg, cpu) = NULL;
if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref))
*per_cpu_ptr(sdd->sgp, cpu) = NULL;
}
#ifdef CONFIG_SCHED_SMT
static const struct cpumask *cpu_smt_mask(int cpu)
{
return topology_thread_cpumask(cpu);
}
#endif
/*
* Topology list, bottom-up.
*/
static struct sched_domain_topology_level default_topology[] = {
#ifdef CONFIG_SCHED_SMT
{ sd_init_SIBLING, cpu_smt_mask, },
#endif
#ifdef CONFIG_SCHED_MC
{ sd_init_MC, cpu_coregroup_mask, },
#endif
#ifdef CONFIG_SCHED_BOOK
{ sd_init_BOOK, cpu_book_mask, },
#endif
{ sd_init_CPU, cpu_cpu_mask, },
#ifdef CONFIG_NUMA
{ sd_init_NODE, cpu_node_mask, SDTL_OVERLAP, },
{ sd_init_ALLNODES, cpu_allnodes_mask, },
#endif
{ NULL, },
};
static struct sched_domain_topology_level *sched_domain_topology = default_topology;
static int __sdt_alloc(const struct cpumask *cpu_map)
{
struct sched_domain_topology_level *tl;
int j;
for (tl = sched_domain_topology; tl->init; tl++) {
struct sd_data *sdd = &tl->data;
sdd->sd = alloc_percpu(struct sched_domain *);
if (!sdd->sd)
return -ENOMEM;
sdd->sg = alloc_percpu(struct sched_group *);
if (!sdd->sg)
return -ENOMEM;
sdd->sgp = alloc_percpu(struct sched_group_power *);
if (!sdd->sgp)
return -ENOMEM;
for_each_cpu(j, cpu_map) {
struct sched_domain *sd;
struct sched_group *sg;
struct sched_group_power *sgp;
sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
GFP_KERNEL, cpu_to_node(j));
if (!sd)
return -ENOMEM;
*per_cpu_ptr(sdd->sd, j) = sd;
sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
GFP_KERNEL, cpu_to_node(j));
if (!sg)
return -ENOMEM;
sg->next = sg;
*per_cpu_ptr(sdd->sg, j) = sg;
sgp = kzalloc_node(sizeof(struct sched_group_power),
GFP_KERNEL, cpu_to_node(j));
if (!sgp)
return -ENOMEM;
*per_cpu_ptr(sdd->sgp, j) = sgp;
}
}
return 0;
}
static void __sdt_free(const struct cpumask *cpu_map)
{
struct sched_domain_topology_level *tl;
int j;
for (tl = sched_domain_topology; tl->init; tl++) {
struct sd_data *sdd = &tl->data;
for_each_cpu(j, cpu_map) {
struct sched_domain *sd;
if (sdd->sd) {
sd = *per_cpu_ptr(sdd->sd, j);
if (sd && (sd->flags & SD_OVERLAP))
free_sched_groups(sd->groups, 0);
kfree(*per_cpu_ptr(sdd->sd, j));
}
if (sdd->sg)
kfree(*per_cpu_ptr(sdd->sg, j));
if (sdd->sgp)
kfree(*per_cpu_ptr(sdd->sgp, j));
}
free_percpu(sdd->sd);
sdd->sd = NULL;
free_percpu(sdd->sg);
sdd->sg = NULL;
free_percpu(sdd->sgp);
sdd->sgp = NULL;
}
}
struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
struct s_data *d, const struct cpumask *cpu_map,
struct sched_domain_attr *attr, struct sched_domain *child,
int cpu)
{
struct sched_domain *sd = tl->init(tl, cpu);
if (!sd)
return child;
set_domain_attribute(sd, attr);
cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
if (child) {
sd->level = child->level + 1;
sched_domain_level_max = max(sched_domain_level_max, sd->level);
child->parent = sd;
}
sd->child = child;
return sd;
}
/*
* Build sched domains for a given set of cpus and attach the sched domains
* to the individual cpus
*/
static int build_sched_domains(const struct cpumask *cpu_map,
struct sched_domain_attr *attr)
{
enum s_alloc alloc_state = sa_none;
struct sched_domain *sd;
struct s_data d;
int i, ret = -ENOMEM;
alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
if (alloc_state != sa_rootdomain)
goto error;
/* Set up domains for cpus specified by the cpu_map. */
for_each_cpu(i, cpu_map) {
struct sched_domain_topology_level *tl;
sd = NULL;
for (tl = sched_domain_topology; tl->init; tl++) {
sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i);
if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
sd->flags |= SD_OVERLAP;
if (cpumask_equal(cpu_map, sched_domain_span(sd)))
break;
}
while (sd->child)
sd = sd->child;
*per_cpu_ptr(d.sd, i) = sd;
}
/* Build the groups for the domains */
for_each_cpu(i, cpu_map) {
for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
sd->span_weight = cpumask_weight(sched_domain_span(sd));
if (sd->flags & SD_OVERLAP) {
if (build_overlap_sched_groups(sd, i))
goto error;
} else {
if (build_sched_groups(sd, i))
goto error;
}
}
}
/* Calculate CPU power for physical packages and nodes */
for (i = nr_cpumask_bits-1; i >= 0; i--) {
if (!cpumask_test_cpu(i, cpu_map))
continue;
for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
claim_allocations(i, sd);
init_sched_groups_power(i, sd);
}
}
/* Attach the domains */
rcu_read_lock();
for_each_cpu(i, cpu_map) {
sd = *per_cpu_ptr(d.sd, i);
cpu_attach_domain(sd, d.rd, i);
}
rcu_read_unlock();
ret = 0;
error:
__free_domain_allocs(&d, alloc_state, cpu_map);
return ret;
}
static cpumask_var_t *doms_cur; /* current sched domains */
static int ndoms_cur; /* number of sched domains in 'doms_cur' */
static struct sched_domain_attr *dattr_cur;
/* attribues of custom domains in 'doms_cur' */
/*
* Special case: If a kmalloc of a doms_cur partition (array of
* cpumask) fails, then fallback to a single sched domain,
* as determined by the single cpumask fallback_doms.
*/
static cpumask_var_t fallback_doms;
/*
* arch_update_cpu_topology lets virtualized architectures update the
* cpu core maps. It is supposed to return 1 if the topology changed
* or 0 if it stayed the same.
*/
int __attribute__((weak)) arch_update_cpu_topology(void)
{
return 0;
}
cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
{
int i;
cpumask_var_t *doms;
doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
if (!doms)
return NULL;
for (i = 0; i < ndoms; i++) {
if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
free_sched_domains(doms, i);
return NULL;
}
}
return doms;
}
void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
{
unsigned int i;
for (i = 0; i < ndoms; i++)
free_cpumask_var(doms[i]);
kfree(doms);
}
/*
* Set up scheduler domains and groups. Callers must hold the hotplug lock.
* For now this just excludes isolated cpus, but could be used to
* exclude other special cases in the future.
*/
static int init_sched_domains(const struct cpumask *cpu_map)
{
int err;
arch_update_cpu_topology();
ndoms_cur = 1;
doms_cur = alloc_sched_domains(ndoms_cur);
if (!doms_cur)
doms_cur = &fallback_doms;
cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
dattr_cur = NULL;
err = build_sched_domains(doms_cur[0], NULL);
register_sched_domain_sysctl();
return err;
}
/*
* Detach sched domains from a group of cpus specified in cpu_map
* These cpus will now be attached to the NULL domain
*/
static void detach_destroy_domains(const struct cpumask *cpu_map)
{
int i;
rcu_read_lock();
for_each_cpu(i, cpu_map)
cpu_attach_domain(NULL, &def_root_domain, i);
rcu_read_unlock();
}
/* handle null as "default" */
static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
struct sched_domain_attr *new, int idx_new)
{
struct sched_domain_attr tmp;
/* fast path */
if (!new && !cur)
return 1;
tmp = SD_ATTR_INIT;
return !memcmp(cur ? (cur + idx_cur) : &tmp,
new ? (new + idx_new) : &tmp,
sizeof(struct sched_domain_attr));
}
/*
* Partition sched domains as specified by the 'ndoms_new'
* cpumasks in the array doms_new[] of cpumasks. This compares
* doms_new[] to the current sched domain partitioning, doms_cur[].
* It destroys each deleted domain and builds each new domain.
*
* 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
* The masks don't intersect (don't overlap.) We should setup one
* sched domain for each mask. CPUs not in any of the cpumasks will
* not be load balanced. If the same cpumask appears both in the
* current 'doms_cur' domains and in the new 'doms_new', we can leave
* it as it is.
*
* The passed in 'doms_new' should be allocated using
* alloc_sched_domains. This routine takes ownership of it and will
* free_sched_domains it when done with it. If the caller failed the
* alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
* and partition_sched_domains() will fallback to the single partition
* 'fallback_doms', it also forces the domains to be rebuilt.
*
* If doms_new == NULL it will be replaced with cpu_online_mask.
* ndoms_new == 0 is a special case for destroying existing domains,
* and it will not create the default domain.
*
* Call with hotplug lock held
*/
void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
struct sched_domain_attr *dattr_new)
{
int i, j, n;
int new_topology;
mutex_lock(&sched_domains_mutex);
/* always unregister in case we don't destroy any domains */
unregister_sched_domain_sysctl();
/* Let architecture update cpu core mappings. */
new_topology = arch_update_cpu_topology();
n = doms_new ? ndoms_new : 0;
/* Destroy deleted domains */
for (i = 0; i < ndoms_cur; i++) {
for (j = 0; j < n && !new_topology; j++) {
if (cpumask_equal(doms_cur[i], doms_new[j])
&& dattrs_equal(dattr_cur, i, dattr_new, j))
goto match1;
}
/* no match - a current sched domain not in new doms_new[] */
detach_destroy_domains(doms_cur[i]);
match1:
;
}
if (doms_new == NULL) {
ndoms_cur = 0;
doms_new = &fallback_doms;
cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
WARN_ON_ONCE(dattr_new);
}
/* Build new domains */
for (i = 0; i < ndoms_new; i++) {
for (j = 0; j < ndoms_cur && !new_topology; j++) {
if (cpumask_equal(doms_new[i], doms_cur[j])
&& dattrs_equal(dattr_new, i, dattr_cur, j))
goto match2;
}
/* no match - add a new doms_new */
build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
match2:
;
}
/* Remember the new sched domains */
if (doms_cur != &fallback_doms)
free_sched_domains(doms_cur, ndoms_cur);
kfree(dattr_cur); /* kfree(NULL) is safe */
doms_cur = doms_new;
dattr_cur = dattr_new;
ndoms_cur = ndoms_new;
register_sched_domain_sysctl();
mutex_unlock(&sched_domains_mutex);
}
static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */
/*
* Update cpusets according to cpu_active mask. If cpusets are
* disabled, cpuset_update_active_cpus() becomes a simple wrapper
* around partition_sched_domains().
*
* If we come here as part of a suspend/resume, don't touch cpusets because we
* want to restore it back to its original state upon resume anyway.
*/
static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
void *hcpu)
{
switch (action) {
case CPU_ONLINE_FROZEN:
case CPU_DOWN_FAILED_FROZEN:
/*
* num_cpus_frozen tracks how many CPUs are involved in suspend
* resume sequence. As long as this is not the last online
* operation in the resume sequence, just build a single sched
* domain, ignoring cpusets.
*/
num_cpus_frozen--;
if (likely(num_cpus_frozen)) {
partition_sched_domains(1, NULL, NULL);
break;
}
/*
* This is the last CPU online operation. So fall through and
* restore the original sched domains by considering the
* cpuset configurations.
*/
case CPU_ONLINE:
case CPU_DOWN_FAILED:
cpuset_update_active_cpus();
break;
default:
return NOTIFY_DONE;
}
return NOTIFY_OK;
}
static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
void *hcpu)
{
switch (action) {
case CPU_DOWN_PREPARE:
cpuset_update_active_cpus();
break;
case CPU_DOWN_PREPARE_FROZEN:
num_cpus_frozen++;
partition_sched_domains(1, NULL, NULL);
break;
default:
return NOTIFY_DONE;
}
return NOTIFY_OK;
}
void __init sched_init_smp(void)
{
cpumask_var_t non_isolated_cpus;
alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
mutex_lock(&sched_domains_mutex);
init_sched_domains(cpu_active_mask);
cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
if (cpumask_empty(non_isolated_cpus))
cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
mutex_unlock(&sched_domains_mutex);
hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
init_hrtick();
/* Move init over to a non-isolated CPU */
if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
BUG();
sched_init_granularity();
free_cpumask_var(non_isolated_cpus);
init_sched_rt_class();
}
#else
void __init sched_init_smp(void)
{
sched_init_granularity();
}
#endif /* CONFIG_SMP */
static int cpufreq_notifier_policy(struct notifier_block *nb,
unsigned long val, void *data)
{
struct cpufreq_policy *policy = (struct cpufreq_policy *)data;
int i;
if (val != CPUFREQ_NOTIFY)
return 0;
for_each_cpu(i, policy->related_cpus) {
cpu_rq(i)->min_freq = policy->min;
cpu_rq(i)->max_freq = policy->max;
}
max_possible_freq = max(max_possible_freq, policy->cpuinfo.max_freq);
return 0;
}
static int cpufreq_notifier_trans(struct notifier_block *nb,
unsigned long val, void *data)
{
struct cpufreq_freqs *freq = (struct cpufreq_freqs *)data;
unsigned int cpu = freq->cpu, new_freq = freq->new;
if (val != CPUFREQ_POSTCHANGE)
return 0;
BUG_ON(!new_freq);
cpu_rq(cpu)->cur_freq = new_freq;
return 0;
}
static struct notifier_block notifier_policy_block = {
.notifier_call = cpufreq_notifier_policy
};
static struct notifier_block notifier_trans_block = {
.notifier_call = cpufreq_notifier_trans
};
static int register_sched_callback(void)
{
int ret;
ret = cpufreq_register_notifier(¬ifier_policy_block,
CPUFREQ_POLICY_NOTIFIER);
if (!ret)
ret = cpufreq_register_notifier(¬ifier_trans_block,
CPUFREQ_TRANSITION_NOTIFIER);
return 0;
}
core_initcall(register_sched_callback);
const_debug unsigned int sysctl_timer_migration = 1;
int in_sched_functions(unsigned long addr)
{
return in_lock_functions(addr) ||
(addr >= (unsigned long)__sched_text_start
&& addr < (unsigned long)__sched_text_end);
}
#ifdef CONFIG_CGROUP_SCHED
struct task_group root_task_group;
LIST_HEAD(task_groups);
#endif
DECLARE_PER_CPU(cpumask_var_t, load_balance_mask);
void __init sched_init(void)
{
int i, j;
unsigned long alloc_size = 0, ptr;
#ifdef CONFIG_FAIR_GROUP_SCHED
alloc_size += 2 * nr_cpu_ids * sizeof(void **);
#endif
#ifdef CONFIG_RT_GROUP_SCHED
alloc_size += 2 * nr_cpu_ids * sizeof(void **);
#endif
#ifdef CONFIG_CPUMASK_OFFSTACK
alloc_size += num_possible_cpus() * cpumask_size();
#endif
if (alloc_size) {
ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
#ifdef CONFIG_FAIR_GROUP_SCHED
root_task_group.se = (struct sched_entity **)ptr;
ptr += nr_cpu_ids * sizeof(void **);
root_task_group.cfs_rq = (struct cfs_rq **)ptr;
ptr += nr_cpu_ids * sizeof(void **);
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
root_task_group.rt_se = (struct sched_rt_entity **)ptr;
ptr += nr_cpu_ids * sizeof(void **);
root_task_group.rt_rq = (struct rt_rq **)ptr;
ptr += nr_cpu_ids * sizeof(void **);
#endif /* CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_CPUMASK_OFFSTACK
for_each_possible_cpu(i) {
per_cpu(load_balance_mask, i) = (void *)ptr;
ptr += cpumask_size();
}
#endif /* CONFIG_CPUMASK_OFFSTACK */
}
#ifdef CONFIG_SMP
init_defrootdomain();
#endif
init_rt_bandwidth(&def_rt_bandwidth,
global_rt_period(), global_rt_runtime());
#ifdef CONFIG_RT_GROUP_SCHED
init_rt_bandwidth(&root_task_group.rt_bandwidth,
global_rt_period(), global_rt_runtime());
#endif /* CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_CGROUP_SCHED
list_add(&root_task_group.list, &task_groups);
INIT_LIST_HEAD(&root_task_group.children);
INIT_LIST_HEAD(&root_task_group.siblings);
autogroup_init(&init_task);
#endif /* CONFIG_CGROUP_SCHED */
#ifdef CONFIG_CGROUP_CPUACCT
root_cpuacct.cpustat = &kernel_cpustat;
root_cpuacct.cpuusage = alloc_percpu(u64);
/* Too early, not expected to fail */
BUG_ON(!root_cpuacct.cpuusage);
#endif
for_each_possible_cpu(i) {
struct rq *rq;
rq = cpu_rq(i);
raw_spin_lock_init(&rq->lock);
rq->nr_running = 0;
rq->calc_load_active = 0;
rq->calc_load_update = jiffies + LOAD_FREQ;
init_cfs_rq(&rq->cfs);
init_rt_rq(&rq->rt, rq);
#ifdef CONFIG_FAIR_GROUP_SCHED
root_task_group.shares = ROOT_TASK_GROUP_LOAD;
INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
/*
* How much cpu bandwidth does root_task_group get?
*
* In case of task-groups formed thr' the cgroup filesystem, it
* gets 100% of the cpu resources in the system. This overall
* system cpu resource is divided among the tasks of
* root_task_group and its child task-groups in a fair manner,
* based on each entity's (task or task-group's) weight
* (se->load.weight).
*
* In other words, if root_task_group has 10 tasks of weight
* 1024) and two child groups A0 and A1 (of weight 1024 each),
* then A0's share of the cpu resource is:
*
* A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
*
* We achieve this by letting root_task_group's tasks sit
* directly in rq->cfs (i.e root_task_group->se[] = NULL).
*/
init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
#endif /* CONFIG_FAIR_GROUP_SCHED */
rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
#ifdef CONFIG_RT_GROUP_SCHED
INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
#endif
for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
rq->cpu_load[j] = 0;
rq->last_load_update_tick = jiffies;
#ifdef CONFIG_SMP
rq->sd = NULL;
rq->rd = NULL;
rq->cpu_power = SCHED_POWER_SCALE;
rq->post_schedule = 0;
rq->active_balance = 0;
rq->next_balance = jiffies;
rq->push_cpu = 0;
rq->cpu = i;
rq->online = 0;
rq->idle_stamp = 0;
rq->avg_idle = 2*sysctl_sched_migration_cost;
rq->cur_freq = 0;
rq->max_freq = 0;
rq->min_freq = 0;
rq->cumulative_runnable_avg = 0;
INIT_LIST_HEAD(&rq->cfs_tasks);
rq_attach_root(rq, &def_root_domain);
#ifdef CONFIG_NO_HZ
rq->nohz_flags = 0;
#endif
#endif
init_rq_hrtick(rq);
atomic_set(&rq->nr_iowait, 0);
}
set_load_weight(&init_task);
#ifdef CONFIG_PREEMPT_NOTIFIERS
INIT_HLIST_HEAD(&init_task.preempt_notifiers);
#endif
#ifdef CONFIG_RT_MUTEXES
plist_head_init(&init_task.pi_waiters);
#endif
/*
* The boot idle thread does lazy MMU switching as well:
*/
atomic_inc(&init_mm.mm_count);
enter_lazy_tlb(&init_mm, current);
/*
* Make us the idle thread. Technically, schedule() should not be
* called from this thread, however somewhere below it might be,
* but because we are the idle thread, we just pick up running again
* when this runqueue becomes "idle".
*/
init_idle(current, smp_processor_id());
calc_load_update = jiffies + LOAD_FREQ;
/*
* During early bootup we pretend to be a normal task:
*/
current->sched_class = &fair_sched_class;
#ifdef CONFIG_SMP
zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
/* May be allocated at isolcpus cmdline parse time */
if (cpu_isolated_map == NULL)
zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
idle_thread_set_boot_cpu();
#endif
init_sched_fair_class();
scheduler_running = 1;
}
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
static inline int preempt_count_equals(int preempt_offset)
{
int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
return (nested == preempt_offset);
}
static int __might_sleep_init_called;
int __init __might_sleep_init(void)
{
__might_sleep_init_called = 1;
return 0;
}
early_initcall(__might_sleep_init);
void __might_sleep(const char *file, int line, int preempt_offset)
{
static unsigned long prev_jiffy; /* ratelimiting */
rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
oops_in_progress)
return;
if (system_state != SYSTEM_RUNNING &&
(!__might_sleep_init_called || system_state != SYSTEM_BOOTING))
return;
if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
return;
prev_jiffy = jiffies;
printk(KERN_ERR
"BUG: sleeping function called from invalid context at %s:%d\n",
file, line);
printk(KERN_ERR
"in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
in_atomic(), irqs_disabled(),
current->pid, current->comm);
debug_show_held_locks(current);
if (irqs_disabled())
print_irqtrace_events(current);
dump_stack();
}
EXPORT_SYMBOL(__might_sleep);
#endif
#ifdef CONFIG_MAGIC_SYSRQ
static void normalize_task(struct rq *rq, struct task_struct *p)
{
const struct sched_class *prev_class = p->sched_class;
int old_prio = p->prio;
int on_rq;
on_rq = p->on_rq;
if (on_rq)
dequeue_task(rq, p, 0);
__setscheduler(rq, p, SCHED_NORMAL, 0);
if (on_rq) {
enqueue_task(rq, p, 0);
resched_task(rq->curr);
}
check_class_changed(rq, p, prev_class, old_prio);
}
void normalize_rt_tasks(void)
{
struct task_struct *g, *p;
unsigned long flags;
struct rq *rq;
read_lock_irqsave(&tasklist_lock, flags);
do_each_thread(g, p) {
/*
* Only normalize user tasks:
*/
if (!p->mm)
continue;
p->se.exec_start = 0;
#ifdef CONFIG_SCHEDSTATS
p->se.statistics.wait_start = 0;
p->se.statistics.sleep_start = 0;
p->se.statistics.block_start = 0;
#endif
if (!rt_task(p)) {
/*
* Renice negative nice level userspace
* tasks back to 0:
*/
if (TASK_NICE(p) < 0 && p->mm)
set_user_nice(p, 0);
continue;
}
raw_spin_lock(&p->pi_lock);
rq = __task_rq_lock(p);
normalize_task(rq, p);
__task_rq_unlock(rq);
raw_spin_unlock(&p->pi_lock);
} while_each_thread(g, p);
read_unlock_irqrestore(&tasklist_lock, flags);
}
#endif /* CONFIG_MAGIC_SYSRQ */
#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
/*
* These functions are only useful for the IA64 MCA handling, or kdb.
*
* They can only be called when the whole system has been
* stopped - every CPU needs to be quiescent, and no scheduling
* activity can take place. Using them for anything else would
* be a serious bug, and as a result, they aren't even visible
* under any other configuration.
*/
/**
* curr_task - return the current task for a given cpu.
* @cpu: the processor in question.
*
* ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
*/
struct task_struct *curr_task(int cpu)
{
return cpu_curr(cpu);
}
#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
#ifdef CONFIG_IA64
/**
* set_curr_task - set the current task for a given cpu.
* @cpu: the processor in question.
* @p: the task pointer to set.
*
* Description: This function must only be used when non-maskable interrupts
* are serviced on a separate stack. It allows the architecture to switch the
* notion of the current task on a cpu in a non-blocking manner. This function
* must be called with all CPU's synchronized, and interrupts disabled, the
* and caller must save the original value of the current task (see
* curr_task() above) and restore that value before reenabling interrupts and
* re-starting the system.
*
* ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
*/
void set_curr_task(int cpu, struct task_struct *p)
{
cpu_curr(cpu) = p;
}
#endif
#ifdef CONFIG_CGROUP_SCHED
/* task_group_lock serializes the addition/removal of task groups */
static DEFINE_SPINLOCK(task_group_lock);
static void free_sched_group(struct task_group *tg)
{
free_fair_sched_group(tg);
free_rt_sched_group(tg);
autogroup_free(tg);
kfree(tg);
}
/* allocate runqueue etc for a new task group */
struct task_group *sched_create_group(struct task_group *parent)
{
struct task_group *tg;
unsigned long flags;
tg = kzalloc(sizeof(*tg), GFP_KERNEL);
if (!tg)
return ERR_PTR(-ENOMEM);
if (!alloc_fair_sched_group(tg, parent))
goto err;
if (!alloc_rt_sched_group(tg, parent))
goto err;
spin_lock_irqsave(&task_group_lock, flags);
list_add_rcu(&tg->list, &task_groups);
WARN_ON(!parent); /* root should already exist */
tg->parent = parent;
INIT_LIST_HEAD(&tg->children);
list_add_rcu(&tg->siblings, &parent->children);
spin_unlock_irqrestore(&task_group_lock, flags);
return tg;
err:
free_sched_group(tg);
return ERR_PTR(-ENOMEM);
}
/* rcu callback to free various structures associated with a task group */
static void free_sched_group_rcu(struct rcu_head *rhp)
{
/* now it should be safe to free those cfs_rqs */
free_sched_group(container_of(rhp, struct task_group, rcu));
}
/* Destroy runqueue etc associated with a task group */
void sched_destroy_group(struct task_group *tg)
{
unsigned long flags;
int i;
/* end participation in shares distribution */
for_each_possible_cpu(i)
unregister_fair_sched_group(tg, i);
spin_lock_irqsave(&task_group_lock, flags);
list_del_rcu(&tg->list);
list_del_rcu(&tg->siblings);
spin_unlock_irqrestore(&task_group_lock, flags);
/* wait for possible concurrent references to cfs_rqs complete */
call_rcu(&tg->rcu, free_sched_group_rcu);
}
/* change task's runqueue when it moves between groups.
* The caller of this function should have put the task in its new group
* by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
* reflect its new group.
*/
void sched_move_task(struct task_struct *tsk)
{
struct task_group *tg;
int on_rq, running;
unsigned long flags;
struct rq *rq;
rq = task_rq_lock(tsk, &flags);
running = task_current(rq, tsk);
on_rq = tsk->on_rq;
if (on_rq)
dequeue_task(rq, tsk, 0);
if (unlikely(running))
tsk->sched_class->put_prev_task(rq, tsk);
tg = container_of(task_subsys_state_check(tsk, cpu_cgroup_subsys_id,
lockdep_is_held(&tsk->sighand->siglock)),
struct task_group, css);
tg = autogroup_task_group(tsk, tg);
tsk->sched_task_group = tg;
#ifdef CONFIG_FAIR_GROUP_SCHED
if (tsk->sched_class->task_move_group)
tsk->sched_class->task_move_group(tsk, on_rq);
else
#endif
set_task_rq(tsk, task_cpu(tsk));
if (unlikely(running))
tsk->sched_class->set_curr_task(rq);
if (on_rq)
enqueue_task(rq, tsk, 0);
task_rq_unlock(rq, tsk, &flags);
}
#endif /* CONFIG_CGROUP_SCHED */
#if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_CFS_BANDWIDTH)
static unsigned long to_ratio(u64 period, u64 runtime)
{
if (runtime == RUNTIME_INF)
return 1ULL << 20;
return div64_u64(runtime << 20, period);
}
#endif
#ifdef CONFIG_RT_GROUP_SCHED
/*
* Ensure that the real time constraints are schedulable.
*/
static DEFINE_MUTEX(rt_constraints_mutex);
/* Must be called with tasklist_lock held */
static inline int tg_has_rt_tasks(struct task_group *tg)
{
struct task_struct *g, *p;
do_each_thread(g, p) {
if (rt_task(p) && task_rq(p)->rt.tg == tg)
return 1;
} while_each_thread(g, p);
return 0;
}
struct rt_schedulable_data {
struct task_group *tg;
u64 rt_period;
u64 rt_runtime;
};
static int tg_rt_schedulable(struct task_group *tg, void *data)
{
struct rt_schedulable_data *d = data;
struct task_group *child;
unsigned long total, sum = 0;
u64 period, runtime;
period = ktime_to_ns(tg->rt_bandwidth.rt_period);
runtime = tg->rt_bandwidth.rt_runtime;
if (tg == d->tg) {
period = d->rt_period;
runtime = d->rt_runtime;
}
/*
* Cannot have more runtime than the period.
*/
if (runtime > period && runtime != RUNTIME_INF)
return -EINVAL;
/*
* Ensure we don't starve existing RT tasks.
*/
if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
return -EBUSY;
total = to_ratio(period, runtime);
/*
* Nobody can have more than the global setting allows.
*/
if (total > to_ratio(global_rt_period(), global_rt_runtime()))
return -EINVAL;
/*
* The sum of our children's runtime should not exceed our own.
*/
list_for_each_entry_rcu(child, &tg->children, siblings) {
period = ktime_to_ns(child->rt_bandwidth.rt_period);
runtime = child->rt_bandwidth.rt_runtime;
if (child == d->tg) {
period = d->rt_period;
runtime = d->rt_runtime;
}
sum += to_ratio(period, runtime);
}
if (sum > total)
return -EINVAL;
return 0;
}
static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
{
int ret;
struct rt_schedulable_data data = {
.tg = tg,
.rt_period = period,
.rt_runtime = runtime,
};
rcu_read_lock();
ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
rcu_read_unlock();
return ret;
}
static int tg_set_rt_bandwidth(struct task_group *tg,
u64 rt_period, u64 rt_runtime)
{
int i, err = 0;
mutex_lock(&rt_constraints_mutex);
read_lock(&tasklist_lock);
err = __rt_schedulable(tg, rt_period, rt_runtime);
if (err)
goto unlock;
raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
tg->rt_bandwidth.rt_runtime = rt_runtime;
for_each_possible_cpu(i) {
struct rt_rq *rt_rq = tg->rt_rq[i];
raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = rt_runtime;
raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
unlock:
read_unlock(&tasklist_lock);
mutex_unlock(&rt_constraints_mutex);
return err;
}
int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
{
u64 rt_runtime, rt_period;
rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
if (rt_runtime_us < 0)
rt_runtime = RUNTIME_INF;
return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
}
long sched_group_rt_runtime(struct task_group *tg)
{
u64 rt_runtime_us;
if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
return -1;
rt_runtime_us = tg->rt_bandwidth.rt_runtime;
do_div(rt_runtime_us, NSEC_PER_USEC);
return rt_runtime_us;
}
int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
{
u64 rt_runtime, rt_period;
rt_period = (u64)rt_period_us * NSEC_PER_USEC;
rt_runtime = tg->rt_bandwidth.rt_runtime;
if (rt_period == 0)
return -EINVAL;
return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
}
long sched_group_rt_period(struct task_group *tg)
{
u64 rt_period_us;
rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
do_div(rt_period_us, NSEC_PER_USEC);
return rt_period_us;
}
static int sched_rt_global_constraints(void)
{
u64 runtime, period;
int ret = 0;
if (sysctl_sched_rt_period <= 0)
return -EINVAL;
runtime = global_rt_runtime();
period = global_rt_period();
/*
* Sanity check on the sysctl variables.
*/
if (runtime > period && runtime != RUNTIME_INF)
return -EINVAL;
mutex_lock(&rt_constraints_mutex);
read_lock(&tasklist_lock);
ret = __rt_schedulable(NULL, 0, 0);
read_unlock(&tasklist_lock);
mutex_unlock(&rt_constraints_mutex);
return ret;
}
int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
{
/* Don't accept realtime tasks when there is no way for them to run */
if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
return 0;
return 1;
}
#else /* !CONFIG_RT_GROUP_SCHED */
static int sched_rt_global_constraints(void)
{
unsigned long flags;
int i;
if (sysctl_sched_rt_period <= 0)
return -EINVAL;
/*
* There's always some RT tasks in the root group
* -- migration, kstopmachine etc..
*/
if (sysctl_sched_rt_runtime == 0)
return -EBUSY;
raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
for_each_possible_cpu(i) {
struct rt_rq *rt_rq = &cpu_rq(i)->rt;
raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = global_rt_runtime();
raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
return 0;
}
#endif /* CONFIG_RT_GROUP_SCHED */
int sched_rt_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int ret;
int old_period, old_runtime;
static DEFINE_MUTEX(mutex);
mutex_lock(&mutex);
old_period = sysctl_sched_rt_period;
old_runtime = sysctl_sched_rt_runtime;
ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (!ret && write) {
ret = sched_rt_global_constraints();
if (ret) {
sysctl_sched_rt_period = old_period;
sysctl_sched_rt_runtime = old_runtime;
} else {
def_rt_bandwidth.rt_runtime = global_rt_runtime();
def_rt_bandwidth.rt_period =
ns_to_ktime(global_rt_period());
}
}
mutex_unlock(&mutex);
return ret;
}
#ifdef CONFIG_CGROUP_SCHED
/* return corresponding task_group object of a cgroup */
static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
{
return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
struct task_group, css);
}
static struct cgroup_subsys_state *cpu_cgroup_create(struct cgroup *cgrp)
{
struct task_group *tg, *parent;
if (!cgrp->parent) {
/* This is early initialization for the top cgroup */
return &root_task_group.css;
}
parent = cgroup_tg(cgrp->parent);
tg = sched_create_group(parent);
if (IS_ERR(tg))
return ERR_PTR(-ENOMEM);
return &tg->css;
}
static void cpu_cgroup_destroy(struct cgroup *cgrp)
{
struct task_group *tg = cgroup_tg(cgrp);
sched_destroy_group(tg);
}
static int
cpu_cgroup_allow_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
{
const struct cred *cred = current_cred(), *tcred;
struct task_struct *task;
cgroup_taskset_for_each(task, cgrp, tset) {
tcred = __task_cred(task);
if ((current != task) && !capable(CAP_SYS_NICE) &&
cred->euid != tcred->uid && cred->euid != tcred->suid)
return -EACCES;
}
return 0;
}
static int cpu_cgroup_can_attach(struct cgroup *cgrp,
struct cgroup_taskset *tset)
{
struct task_struct *task;
cgroup_taskset_for_each(task, cgrp, tset) {
#ifdef CONFIG_RT_GROUP_SCHED
if (!sched_rt_can_attach(cgroup_tg(cgrp), task))
return -EINVAL;
#else
/* We don't support RT-tasks being in separate groups */
if (task->sched_class != &fair_sched_class)
return -EINVAL;
#endif
}
return 0;
}
static void cpu_cgroup_attach(struct cgroup *cgrp,
struct cgroup_taskset *tset)
{
struct task_struct *task;
cgroup_taskset_for_each(task, cgrp, tset)
sched_move_task(task);
}
static void
cpu_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
struct task_struct *task)
{
/*
* cgroup_exit() is called in the copy_process() failure path.
* Ignore this case since the task hasn't ran yet, this avoids
* trying to poke a half freed task state from generic code.
*/
if (!(task->flags & PF_EXITING))
return;
sched_move_task(task);
}
static u64 cpu_notify_on_migrate_read_u64(struct cgroup *cgrp,
struct cftype *cft)
{
struct task_group *tg = cgroup_tg(cgrp);
return tg->notify_on_migrate;
}
static int cpu_notify_on_migrate_write_u64(struct cgroup *cgrp,
struct cftype *cft, u64 notify)
{
struct task_group *tg = cgroup_tg(cgrp);
tg->notify_on_migrate = (notify > 0);
return 0;
}
#ifdef CONFIG_FAIR_GROUP_SCHED
static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
u64 shareval)
{
return sched_group_set_shares(cgroup_tg(cgrp), scale_load(shareval));
}
static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
{
struct task_group *tg = cgroup_tg(cgrp);
return (u64) scale_load_down(tg->shares);
}
#ifdef CONFIG_CFS_BANDWIDTH
static DEFINE_MUTEX(cfs_constraints_mutex);
const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
{
int i, ret = 0, runtime_enabled, runtime_was_enabled;
struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
if (tg == &root_task_group)
return -EINVAL;
/*
* Ensure we have at some amount of bandwidth every period. This is
* to prevent reaching a state of large arrears when throttled via
* entity_tick() resulting in prolonged exit starvation.
*/
if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
return -EINVAL;
/*
* Likewise, bound things on the otherside by preventing insane quota
* periods. This also allows us to normalize in computing quota
* feasibility.
*/
if (period > max_cfs_quota_period)
return -EINVAL;
mutex_lock(&cfs_constraints_mutex);
ret = __cfs_schedulable(tg, period, quota);
if (ret)
goto out_unlock;
runtime_enabled = quota != RUNTIME_INF;
runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
/*
* If we need to toggle cfs_bandwidth_used, off->on must occur
* before making related changes, and on->off must occur afterwards
*/
if (runtime_enabled && !runtime_was_enabled)
cfs_bandwidth_usage_inc();
raw_spin_lock_irq(&cfs_b->lock);
cfs_b->period = ns_to_ktime(period);
cfs_b->quota = quota;
__refill_cfs_bandwidth_runtime(cfs_b);
/* restart the period timer (if active) to handle new period expiry */
if (runtime_enabled && cfs_b->timer_active) {
/* force a reprogram */
cfs_b->timer_active = 0;
__start_cfs_bandwidth(cfs_b);
}
raw_spin_unlock_irq(&cfs_b->lock);
for_each_possible_cpu(i) {
struct cfs_rq *cfs_rq = tg->cfs_rq[i];
struct rq *rq = cfs_rq->rq;
raw_spin_lock_irq(&rq->lock);
cfs_rq->runtime_enabled = runtime_enabled;
cfs_rq->runtime_remaining = 0;
if (cfs_rq->throttled)
unthrottle_cfs_rq(cfs_rq);
raw_spin_unlock_irq(&rq->lock);
}
if (runtime_was_enabled && !runtime_enabled)
cfs_bandwidth_usage_dec();
out_unlock:
mutex_unlock(&cfs_constraints_mutex);
return ret;
}
int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
{
u64 quota, period;
period = ktime_to_ns(tg->cfs_bandwidth.period);
if (cfs_quota_us < 0)
quota = RUNTIME_INF;
else
quota = (u64)cfs_quota_us * NSEC_PER_USEC;
return tg_set_cfs_bandwidth(tg, period, quota);
}
long tg_get_cfs_quota(struct task_group *tg)
{
u64 quota_us;
if (tg->cfs_bandwidth.quota == RUNTIME_INF)
return -1;
quota_us = tg->cfs_bandwidth.quota;
do_div(quota_us, NSEC_PER_USEC);
return quota_us;
}
int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
{
u64 quota, period;
period = (u64)cfs_period_us * NSEC_PER_USEC;
quota = tg->cfs_bandwidth.quota;
return tg_set_cfs_bandwidth(tg, period, quota);
}
long tg_get_cfs_period(struct task_group *tg)
{
u64 cfs_period_us;
cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
do_div(cfs_period_us, NSEC_PER_USEC);
return cfs_period_us;
}
static s64 cpu_cfs_quota_read_s64(struct cgroup *cgrp, struct cftype *cft)
{
return tg_get_cfs_quota(cgroup_tg(cgrp));
}
static int cpu_cfs_quota_write_s64(struct cgroup *cgrp, struct cftype *cftype,
s64 cfs_quota_us)
{
return tg_set_cfs_quota(cgroup_tg(cgrp), cfs_quota_us);
}
static u64 cpu_cfs_period_read_u64(struct cgroup *cgrp, struct cftype *cft)
{
return tg_get_cfs_period(cgroup_tg(cgrp));
}
static int cpu_cfs_period_write_u64(struct cgroup *cgrp, struct cftype *cftype,
u64 cfs_period_us)
{
return tg_set_cfs_period(cgroup_tg(cgrp), cfs_period_us);
}
struct cfs_schedulable_data {
struct task_group *tg;
u64 period, quota;
};
/*
* normalize group quota/period to be quota/max_period
* note: units are usecs
*/
static u64 normalize_cfs_quota(struct task_group *tg,
struct cfs_schedulable_data *d)
{
u64 quota, period;
if (tg == d->tg) {
period = d->period;
quota = d->quota;
} else {
period = tg_get_cfs_period(tg);
quota = tg_get_cfs_quota(tg);
}
/* note: these should typically be equivalent */
if (quota == RUNTIME_INF || quota == -1)
return RUNTIME_INF;
return to_ratio(period, quota);
}
static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
{
struct cfs_schedulable_data *d = data;
struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
s64 quota = 0, parent_quota = -1;
if (!tg->parent) {
quota = RUNTIME_INF;
} else {
struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
quota = normalize_cfs_quota(tg, d);
parent_quota = parent_b->hierarchal_quota;
/*
* ensure max(child_quota) <= parent_quota, inherit when no
* limit is set
*/
if (quota == RUNTIME_INF)
quota = parent_quota;
else if (parent_quota != RUNTIME_INF && quota > parent_quota)
return -EINVAL;
}
cfs_b->hierarchal_quota = quota;
return 0;
}
static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
{
int ret;
struct cfs_schedulable_data data = {
.tg = tg,
.period = period,
.quota = quota,
};
if (quota != RUNTIME_INF) {
do_div(data.period, NSEC_PER_USEC);
do_div(data.quota, NSEC_PER_USEC);
}
rcu_read_lock();
ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
rcu_read_unlock();
return ret;
}
static int cpu_stats_show(struct cgroup *cgrp, struct cftype *cft,
struct cgroup_map_cb *cb)
{
struct task_group *tg = cgroup_tg(cgrp);
struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
cb->fill(cb, "nr_periods", cfs_b->nr_periods);
cb->fill(cb, "nr_throttled", cfs_b->nr_throttled);
cb->fill(cb, "throttled_time", cfs_b->throttled_time);
return 0;
}
#endif /* CONFIG_CFS_BANDWIDTH */
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
s64 val)
{
return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
}
static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft)
{
return sched_group_rt_runtime(cgroup_tg(cgrp));
}
static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
u64 rt_period_us)
{
return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us);
}
static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
{
return sched_group_rt_period(cgroup_tg(cgrp));
}
#endif /* CONFIG_RT_GROUP_SCHED */
static struct cftype cpu_files[] = {
{
.name = "notify_on_migrate",
.read_u64 = cpu_notify_on_migrate_read_u64,
.write_u64 = cpu_notify_on_migrate_write_u64,
},
#ifdef CONFIG_FAIR_GROUP_SCHED
{
.name = "shares",
.read_u64 = cpu_shares_read_u64,
.write_u64 = cpu_shares_write_u64,
},
#endif
#ifdef CONFIG_CFS_BANDWIDTH
{
.name = "cfs_quota_us",
.read_s64 = cpu_cfs_quota_read_s64,
.write_s64 = cpu_cfs_quota_write_s64,
},
{
.name = "cfs_period_us",
.read_u64 = cpu_cfs_period_read_u64,
.write_u64 = cpu_cfs_period_write_u64,
},
{
.name = "stat",
.read_map = cpu_stats_show,
},
#endif
#ifdef CONFIG_RT_GROUP_SCHED
{
.name = "rt_runtime_us",
.read_s64 = cpu_rt_runtime_read,
.write_s64 = cpu_rt_runtime_write,
},
{
.name = "rt_period_us",
.read_u64 = cpu_rt_period_read_uint,
.write_u64 = cpu_rt_period_write_uint,
},
#endif
};
static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
{
return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files));
}
struct cgroup_subsys cpu_cgroup_subsys = {
.name = "cpu",
.create = cpu_cgroup_create,
.destroy = cpu_cgroup_destroy,
.can_attach = cpu_cgroup_can_attach,
.attach = cpu_cgroup_attach,
.allow_attach = cpu_cgroup_allow_attach,
.exit = cpu_cgroup_exit,
.populate = cpu_cgroup_populate,
.subsys_id = cpu_cgroup_subsys_id,
.early_init = 1,
};
#endif /* CONFIG_CGROUP_SCHED */
#ifdef CONFIG_CGROUP_CPUACCT
/*
* CPU accounting code for task groups.
*
* Based on the work by Paul Menage (menage@google.com) and Balbir Singh
* (balbir@in.ibm.com).
*/
/* create a new cpu accounting group */
static struct cgroup_subsys_state *cpuacct_create(struct cgroup *cgrp)
{
struct cpuacct *ca;
if (!cgrp->parent)
return &root_cpuacct.css;
ca = kzalloc(sizeof(*ca), GFP_KERNEL);
if (!ca)
goto out;
ca->cpuusage = alloc_percpu(u64);
if (!ca->cpuusage)
goto out_free_ca;
ca->cpustat = alloc_percpu(struct kernel_cpustat);
if (!ca->cpustat)
goto out_free_cpuusage;
return &ca->css;
out_free_cpuusage:
free_percpu(ca->cpuusage);
out_free_ca:
kfree(ca);
out:
return ERR_PTR(-ENOMEM);
}
/* destroy an existing cpu accounting group */
static void cpuacct_destroy(struct cgroup *cgrp)
{
struct cpuacct *ca = cgroup_ca(cgrp);
free_percpu(ca->cpustat);
free_percpu(ca->cpuusage);
kfree(ca);
}
static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
{
u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
u64 data;
#ifndef CONFIG_64BIT
/*
* Take rq->lock to make 64-bit read safe on 32-bit platforms.
*/
raw_spin_lock_irq(&cpu_rq(cpu)->lock);
data = *cpuusage;
raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
#else
data = *cpuusage;
#endif
return data;
}
static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
{
u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
#ifndef CONFIG_64BIT
/*
* Take rq->lock to make 64-bit write safe on 32-bit platforms.
*/
raw_spin_lock_irq(&cpu_rq(cpu)->lock);
*cpuusage = val;
raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
#else
*cpuusage = val;
#endif
}
/* return total cpu usage (in nanoseconds) of a group */
static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
{
struct cpuacct *ca = cgroup_ca(cgrp);
u64 totalcpuusage = 0;
int i;
for_each_present_cpu(i)
totalcpuusage += cpuacct_cpuusage_read(ca, i);
return totalcpuusage;
}
static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
u64 reset)
{
struct cpuacct *ca = cgroup_ca(cgrp);
int err = 0;
int i;
if (reset) {
err = -EINVAL;
goto out;
}
for_each_present_cpu(i)
cpuacct_cpuusage_write(ca, i, 0);
out:
return err;
}
static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft,
struct seq_file *m)
{
struct cpuacct *ca = cgroup_ca(cgroup);
u64 percpu;
int i;
for_each_present_cpu(i) {
percpu = cpuacct_cpuusage_read(ca, i);
seq_printf(m, "%llu ", (unsigned long long) percpu);
}
seq_printf(m, "\n");
return 0;
}
static const char *cpuacct_stat_desc[] = {
[CPUACCT_STAT_USER] = "user",
[CPUACCT_STAT_SYSTEM] = "system",
};
static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
struct cgroup_map_cb *cb)
{
struct cpuacct *ca = cgroup_ca(cgrp);
int cpu;
s64 val = 0;
for_each_online_cpu(cpu) {
struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu);
val += kcpustat->cpustat[CPUTIME_USER];
val += kcpustat->cpustat[CPUTIME_NICE];
}
val = cputime64_to_clock_t(val);
cb->fill(cb, cpuacct_stat_desc[CPUACCT_STAT_USER], val);
val = 0;
for_each_online_cpu(cpu) {
struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu);
val += kcpustat->cpustat[CPUTIME_SYSTEM];
val += kcpustat->cpustat[CPUTIME_IRQ];
val += kcpustat->cpustat[CPUTIME_SOFTIRQ];
}
val = cputime64_to_clock_t(val);
cb->fill(cb, cpuacct_stat_desc[CPUACCT_STAT_SYSTEM], val);
return 0;
}
static struct cftype files[] = {
{
.name = "usage",
.read_u64 = cpuusage_read,
.write_u64 = cpuusage_write,
},
{
.name = "usage_percpu",
.read_seq_string = cpuacct_percpu_seq_read,
},
{
.name = "stat",
.read_map = cpuacct_stats_show,
},
};
static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
{
return cgroup_add_files(cgrp, ss, files, ARRAY_SIZE(files));
}
/*
* charge this task's execution time to its accounting group.
*
* called with rq->lock held.
*/
void cpuacct_charge(struct task_struct *tsk, u64 cputime)
{
struct cpuacct *ca;
int cpu;
if (unlikely(!cpuacct_subsys.active))
return;
cpu = task_cpu(tsk);
rcu_read_lock();
ca = task_ca(tsk);
for (; ca; ca = parent_ca(ca)) {
u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
*cpuusage += cputime;
}
rcu_read_unlock();
}
struct cgroup_subsys cpuacct_subsys = {
.name = "cpuacct",
.create = cpuacct_create,
.destroy = cpuacct_destroy,
.populate = cpuacct_populate,
.subsys_id = cpuacct_subsys_id,
};
#endif /* CONFIG_CGROUP_CPUACCT */
| gpl-2.0 |
MeditProfi/mplayer | libmpcodecs/img_format.c | 6 | 9646 | /*
* This file is part of MPlayer.
*
* MPlayer is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* MPlayer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with MPlayer; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "config.h"
#include "img_format.h"
#include "stdio.h"
#include "libavutil/bswap.h"
const char *vo_format_name(int format)
{
static char unknown_format[20];
switch(format)
{
case IMGFMT_RGB1: return "RGB 1-bit";
case IMGFMT_RGB4: return "RGB 4-bit";
case IMGFMT_RG4B: return "RGB 4-bit per byte";
case IMGFMT_RGB8: return "RGB 8-bit";
case IMGFMT_RGB12: return "RGB 12-bit";
case IMGFMT_RGB15: return "RGB 15-bit";
case IMGFMT_RGB16: return "RGB 16-bit";
case IMGFMT_RGB24: return "RGB 24-bit";
// case IMGFMT_RGB32: return "RGB 32-bit";
case IMGFMT_RGB48LE: return "RGB 48-bit LE";
case IMGFMT_RGB48BE: return "RGB 48-bit BE";
case IMGFMT_RGB64LE: return "RGB 64-bit LE";
case IMGFMT_RGB64BE: return "RGB 64-bit BE";
case IMGFMT_BGR1: return "BGR 1-bit";
case IMGFMT_BGR4: return "BGR 4-bit";
case IMGFMT_BG4B: return "BGR 4-bit per byte";
case IMGFMT_BGR8: return "BGR 8-bit";
case IMGFMT_BGR12: return "BGR 12-bit";
case IMGFMT_BGR15: return "BGR 15-bit";
case IMGFMT_BGR16: return "BGR 16-bit";
case IMGFMT_BGR24: return "BGR 24-bit";
// case IMGFMT_BGR32: return "BGR 32-bit";
case IMGFMT_BGR48LE: return "BGR 48-bit LE";
case IMGFMT_BGR48BE: return "BGR 48-bit BE";
case IMGFMT_ABGR: return "ABGR";
case IMGFMT_BGRA: return "BGRA";
case IMGFMT_ARGB: return "ARGB";
case IMGFMT_RGBA: return "RGBA";
case IMGFMT_XYZ12LE: return "XYZ 36-bit LE";
case IMGFMT_XYZ12BE: return "XYZ 36-bit BE";
case IMGFMT_GBR24P: return "Planar GBR 24-bit";
case IMGFMT_GBR12P: return "Planar GBR 36-bit";
case IMGFMT_GBR14P: return "Planar GBR 42-bit";
case IMGFMT_YVU9: return "Planar YVU9";
case IMGFMT_IF09: return "Planar IF09";
case IMGFMT_YV12: return "Planar YV12";
case IMGFMT_I420: return "Planar I420";
case IMGFMT_IYUV: return "Planar IYUV";
case IMGFMT_CLPL: return "Planar CLPL";
case IMGFMT_Y800: return "Planar Y800";
case IMGFMT_Y8: return "Planar Y8";
case IMGFMT_Y8A: return "Planar Y8 with alpha";
case IMGFMT_Y16_LE: return "Planar Y16 little-endian";
case IMGFMT_Y16_BE: return "Planar Y16 big-endian";
case IMGFMT_420P16_LE: return "Planar 420P 16-bit little-endian";
case IMGFMT_420P16_BE: return "Planar 420P 16-bit big-endian";
case IMGFMT_420P14_LE: return "Planar 420P 14-bit little-endian";
case IMGFMT_420P14_BE: return "Planar 420P 14-bit big-endian";
case IMGFMT_420P12_LE: return "Planar 420P 12-bit little-endian";
case IMGFMT_420P12_BE: return "Planar 420P 12-bit big-endian";
case IMGFMT_420P10_LE: return "Planar 420P 10-bit little-endian";
case IMGFMT_420P10_BE: return "Planar 420P 10-bit big-endian";
case IMGFMT_420P9_LE: return "Planar 420P 9-bit little-endian";
case IMGFMT_420P9_BE: return "Planar 420P 9-bit big-endian";
case IMGFMT_422P16_LE: return "Planar 422P 16-bit little-endian";
case IMGFMT_422P16_BE: return "Planar 422P 16-bit big-endian";
case IMGFMT_422P14_LE: return "Planar 422P 14-bit little-endian";
case IMGFMT_422P14_BE: return "Planar 422P 14-bit big-endian";
case IMGFMT_422P12_LE: return "Planar 422P 12-bit little-endian";
case IMGFMT_422P12_BE: return "Planar 422P 12-bit big-endian";
case IMGFMT_422P10_LE: return "Planar 422P 10-bit little-endian";
case IMGFMT_422P10_BE: return "Planar 422P 10-bit big-endian";
case IMGFMT_422P9_LE: return "Planar 422P 9-bit little-endian";
case IMGFMT_422P9_BE: return "Planar 422P 9-bit big-endian";
case IMGFMT_444P16_LE: return "Planar 444P 16-bit little-endian";
case IMGFMT_444P16_BE: return "Planar 444P 16-bit big-endian";
case IMGFMT_444P14_LE: return "Planar 444P 14-bit little-endian";
case IMGFMT_444P14_BE: return "Planar 444P 14-bit big-endian";
case IMGFMT_444P12_LE: return "Planar 444P 12-bit little-endian";
case IMGFMT_444P12_BE: return "Planar 444P 12-bit big-endian";
case IMGFMT_444P10_LE: return "Planar 444P 10-bit little-endian";
case IMGFMT_444P10_BE: return "Planar 444P 10-bit big-endian";
case IMGFMT_444P9_LE: return "Planar 444P 9-bit little-endian";
case IMGFMT_444P9_BE: return "Planar 444P 9-bit big-endian";
case IMGFMT_420A: return "Planar 420P with alpha";
case IMGFMT_444P: return "Planar 444P";
case IMGFMT_444A: return "Planar 444P with alpha";
case IMGFMT_422P: return "Planar 422P";
case IMGFMT_422A: return "Planar 422P with alpha";
case IMGFMT_411P: return "Planar 411P";
case IMGFMT_440P: return "Planar 440P";
case IMGFMT_NV12: return "Planar NV12";
case IMGFMT_NV21: return "Planar NV21";
case IMGFMT_HM12: return "Planar NV12 Macroblock";
case IMGFMT_IUYV: return "Packed IUYV";
case IMGFMT_IY41: return "Packed IY41";
case IMGFMT_IYU1: return "Packed IYU1";
case IMGFMT_IYU2: return "Packed IYU2";
case IMGFMT_UYVY: return "Packed UYVY";
case IMGFMT_UYNV: return "Packed UYNV";
case IMGFMT_cyuv: return "Packed CYUV";
case IMGFMT_Y422: return "Packed Y422";
case IMGFMT_YUY2: return "Packed YUY2";
case IMGFMT_YUNV: return "Packed YUNV";
case IMGFMT_YVYU: return "Packed YVYU";
case IMGFMT_Y41P: return "Packed Y41P";
case IMGFMT_Y211: return "Packed Y211";
case IMGFMT_Y41T: return "Packed Y41T";
case IMGFMT_Y42T: return "Packed Y42T";
case IMGFMT_V422: return "Packed V422";
case IMGFMT_V655: return "Packed V655";
case IMGFMT_CLJR: return "Packed CLJR";
case IMGFMT_YUVP: return "Packed YUVP";
case IMGFMT_UYVP: return "Packed UYVP";
case IMGFMT_MPEGPES: return "Mpeg PES";
case IMGFMT_ZRMJPEGNI: return "Zoran MJPEG non-interlaced";
case IMGFMT_ZRMJPEGIT: return "Zoran MJPEG top field first";
case IMGFMT_ZRMJPEGIB: return "Zoran MJPEG bottom field first";
case IMGFMT_XVMC_MOCO_MPEG2: return "MPEG1/2 Motion Compensation";
case IMGFMT_XVMC_IDCT_MPEG2: return "MPEG1/2 Motion Compensation and IDCT";
case IMGFMT_VDPAU_MPEG1: return "MPEG1 VDPAU acceleration";
case IMGFMT_VDPAU_MPEG2: return "MPEG2 VDPAU acceleration";
case IMGFMT_VDPAU_H264: return "H.264 VDPAU acceleration";
case IMGFMT_VDPAU_MPEG4: return "MPEG-4 Part 2 VDPAU acceleration";
case IMGFMT_VDPAU_WMV3: return "WMV3 VDPAU acceleration";
case IMGFMT_VDPAU_VC1: return "VC1 VDPAU acceleration";
case IMGFMT_VDPAU_HEVC: return "HEVC VDPAU acceleration";
}
snprintf(unknown_format,20,"Unknown 0x%04x",format);
return unknown_format;
}
int mp_get_chroma_shift(int format, int *x_shift, int *y_shift, int *component_bits)
{
int xs = 0, ys = 0;
int bpp;
int err = 0;
int bits = 8;
if ((format & 0xff0000f0) == 0x34000050)
format = av_bswap32(format);
if ((format & 0xf00000ff) == 0x50000034) {
switch (format >> 24) {
case 0x50:
break;
case 0x51:
bits = 16;
break;
case 0x52:
bits = 10;
break;
case 0x53:
bits = 9;
break;
default:
err = 1;
break;
}
switch (format & 0x00ffffff) {
case 0x00343434: // 444
xs = 0;
ys = 0;
break;
case 0x00323234: // 422
xs = 1;
ys = 0;
break;
case 0x00303234: // 420
xs = 1;
ys = 1;
break;
case 0x00313134: // 411
xs = 2;
ys = 0;
break;
case 0x00303434: // 440
xs = 0;
ys = 1;
break;
default:
err = 1;
break;
}
} else switch (format) {
case IMGFMT_444A:
xs = 0;
ys = 0;
break;
case IMGFMT_422A:
xs = 1;
ys = 0;
break;
case IMGFMT_420A:
case IMGFMT_I420:
case IMGFMT_IYUV:
case IMGFMT_YV12:
xs = 1;
ys = 1;
break;
case IMGFMT_IF09:
case IMGFMT_YVU9:
xs = 2;
ys = 2;
break;
case IMGFMT_Y8:
case IMGFMT_Y800:
xs = 31;
ys = 31;
break;
case IMGFMT_NV12:
case IMGFMT_NV21:
xs = 1;
ys = 1;
// TODO: allowing this though currently breaks
// things all over the place.
err = 1;
break;
default:
err = 1;
break;
}
if (x_shift) *x_shift = xs;
if (y_shift) *y_shift = ys;
if (component_bits) *component_bits = bits;
bpp = 8 + ((16 >> xs) >> ys);
if (format == IMGFMT_420A || format == IMGFMT_422A || format == IMGFMT_444A)
bpp += 8;
bpp *= (bits + 7) >> 3;
return err ? 0 : bpp;
}
| gpl-2.0 |
chenleo/gromacs453pf | src/gmxlib/nonbonded/nb_kernel_bluegene/nb_kernel113_bluegene.c | 6 | 2199 | /*
* Copyright (c) 2006, International Business Machines (IBM) Inc.
*
* Author: Mathias Puetz
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of IBM nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY IBM AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
#include<config.h>
#endif
#ifdef DEBUG
#include <stdio.h>
#endif
#include <math.h>
#define COULOMB_NONE 0
#define COULOMB_CUTOFF 1
#define REACTION_FIELD 2
#define COULOMB_TAB 3
#define GENERALIZED_BORN 4
#define VDW_NONE 0
#define LENNARD_JONES 1
#define BUCKINGHAM 2
#define VDW_TAB 3
#define COULOMB COULOMB_CUTOFF
#define VDW LENNARD_JONES
#include "interaction.h"
#undef NO_FORCE
#undef NB_KERNEL
#define NB_KERNEL nb_kernel113_bluegene
#include "nb_kernel_w4_bluegene.h"
#define NO_FORCE 1
#undef NB_KERNEL
#define NB_KERNEL nb_kernel113nf_bluegene
#include "nb_kernel_w4_bluegene.h"
| gpl-2.0 |
sameed7/sesc-drowsy | src/libpower/wattch/wattch_time.c | 6 | 36708 |
/*------------------------------------------------------------
* Copyright 1994 Digital Equipment Corporation and Steve Wilton
* All Rights Reserved
*
* Permission to use, copy, and modify this software and its documentation is
* hereby granted only under the following terms and conditions. Both the
* above copyright notice and this permission notice must appear in all copies
* of the software, derivative works or modified versions, and any portions
* thereof, and both notices must appear in supporting documentation.
*
* Users of this software agree to the terms and conditions set forth herein,
* and hereby grant back to Digital a non-exclusive, unrestricted, royalty-
* free right and license under any changes, enhancements or extensions
* made to the core functions of the software, including but not limited to
* those affording compatibility with other hardware or software
* environments, but excluding applications which incorporate this software.
* Users further agree to use their best efforts to return to Digital any
* such changes, enhancements or extensions that they make and inform Digital
* of noteworthy uses of this software. Correspondence should be provided
* to Digital at:
*
* Director of Licensing
* Western Research Laboratory
* Digital Equipment Corporation
* 100 Hamilton Avenue
* Palo Alto, California 94301
*
* This software may be distributed (but not offered for sale or transferred
* for compensation) to third parties, provided such third parties agree to
* abide by the terms and conditions of this notice.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND DIGITAL EQUIPMENT CORP. DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DIGITAL EQUIPMENT
* CORPORATION BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
* SOFTWARE.
*------------------------------------------------------------*/
#include <math.h>
#include <stdio.h>
#include "def.h"
/*----------------------------------------------------------------------*/
double logtwo(x)
double x;
{
if (x<=0) printf("%e\n",x);
return( (double) (log(x)/log(2.0)) );
}
/*----------------------------------------------------------------------*/
double gatecap(width,wirelength) /* returns gate capacitance in Farads */
double width; /* gate width in um (length is Leff) */
double wirelength; /* poly wire length going to gate in lambda */
{
return(width*Leff*Cgate+wirelength*Cpolywire*Leff);
}
double gatecappass(width,wirelength) /* returns gate capacitance in Farads */
double width; /* gate width in um (length is Leff) */
double wirelength; /* poly wire length going to gate in lambda */
{
return(width*Leff*Cgatepass+wirelength*Cpolywire*Leff);
}
/*----------------------------------------------------------------------*/
/* Routine for calculating drain capacitances. The draincap routine
* folds transistors larger than 10um */
double draincap(width,nchannel,stack) /* returns drain cap in Farads */
double width; /* um */
int nchannel; /* whether n or p-channel (boolean) */
int stack; /* number of transistors in series that are on */
{
double Cdiffside,Cdiffarea,Coverlap,cap;
Cdiffside = (nchannel) ? Cndiffside : Cpdiffside;
Cdiffarea = (nchannel) ? Cndiffarea : Cpdiffarea;
Coverlap = (nchannel) ? (Cndiffovlp+Cnoxideovlp) :
(Cpdiffovlp+Cpoxideovlp);
/* calculate directly-connected (non-stacked) capacitance */
/* then add in capacitance due to stacking */
if (width >= 10) {
cap = 3.0*Leff*width/2.0*Cdiffarea + 6.0*Leff*Cdiffside +
width*Coverlap;
cap += (double)(stack-1)*(Leff*width*Cdiffarea +
4.0*Leff*Cdiffside + 2.0*width*Coverlap);
} else {
cap = 3.0*Leff*width*Cdiffarea + (6.0*Leff+width)*Cdiffside +
width*Coverlap;
cap += (double)(stack-1)*(Leff*width*Cdiffarea +
2.0*Leff*Cdiffside + 2.0*width*Coverlap);
}
return(cap);
}
/*----------------------------------------------------------------------*/
/* The following routines estimate the effective resistance of an
on transistor as described in the tech report. The first routine
gives the "switching" resistance, and the second gives the
"full-on" resistance */
double transresswitch(width,nchannel,stack) /* returns resistance in ohms */
double width; /* um */
int nchannel; /* whether n or p-channel (boolean) */
int stack; /* number of transistors in series */
{
double restrans;
restrans = (nchannel) ? (Rnchannelstatic):
(Rpchannelstatic);
/* calculate resistance of stack - assume all but switching trans
have 0.8X the resistance since they are on throughout switching */
return((1.0+((stack-1.0)*0.8))*restrans/width);
}
/*----------------------------------------------------------------------*/
double transreson(width,nchannel,stack) /* returns resistance in ohms */
double width; /* um */
int nchannel; /* whether n or p-channel (boolean) */
int stack; /* number of transistors in series */
{
double restrans;
restrans = (nchannel) ? Rnchannelon : Rpchannelon;
/* calculate resistance of stack. Unlike transres, we don't
multiply the stacked transistors by 0.8 */
return(stack*restrans/width);
}
/*----------------------------------------------------------------------*/
/* This routine operates in reverse: given a resistance, it finds
* the transistor width that would have this R. It is used in the
* data wordline to estimate the wordline driver size. */
double restowidth(res,nchannel) /* returns width in um */
double res; /* resistance in ohms */
int nchannel; /* whether N-channel or P-channel */
{
double restrans;
restrans = (nchannel) ? Rnchannelon : Rpchannelon;
return(restrans/res);
}
/*----------------------------------------------------------------------*/
double horowitz(inputramptime,tf,vs1,vs2,rise)
double inputramptime, /* input rise time */
tf, /* time constant of gate */
vs1,vs2; /* threshold voltages */
int rise; /* whether INPUT rise or fall (boolean) */
{
double a,b,td;
a = inputramptime/tf;
if (rise==RISE) {
b = 0.5;
td = tf*sqrt( log(vs1)*log(vs1)+2*a*b*(1.0-vs1)) +
tf*(log(vs1)-log(vs2));
} else {
b = 0.4;
td = tf*sqrt( log(1.0-vs1)*log(1.0-vs1)+2*a*b*(vs1)) +
tf*(log(1.0-vs1)-log(1.0-vs2));
}
return(td);
}
/*======================================================================*/
/*
* This part of the code contains routines for each section as
* described in the tech report. See the tech report for more details
* and explanations */
/*----------------------------------------------------------------------*/
/* Decoder delay: (see section 6.1 of tech report) */
double decoder_delay(C, B, A, Ndwl, Ndbl, Nspd, Ntwl, Ntbl, Ntspd, Tdecdrive,
Tdecoder1, Tdecoder2,outrisetime)
int C,B,A,Ndwl,Ndbl,Nspd,Ntwl,Ntbl,Ntspd;
double *Tdecdrive,*Tdecoder1,*Tdecoder2,*outrisetime;
{
double Ceq,Req,Rwire,rows,tf,nextinputtime,vth=0.0,tstep,m,a,b,c;
int numstack;
/* Calculate rise time. Consider two inverters */
Ceq = draincap(Wdecdrivep,PCH,1)+draincap(Wdecdriven,NCH,1) +
gatecap(Wdecdrivep+Wdecdriven,0.0);
tf = Ceq*transreson(Wdecdriven,NCH,1);
nextinputtime = horowitz(0.0,tf,VTHINV100x60,VTHINV100x60,FALL)/
(VTHINV100x60);
Ceq = draincap(Wdecdrivep,PCH,1)+draincap(Wdecdriven,NCH,1) +
gatecap(Wdecdrivep+Wdecdriven,0.0);
tf = Ceq*transreson(Wdecdriven,NCH,1);
nextinputtime = horowitz(nextinputtime,tf,VTHINV100x60,VTHINV100x60,
RISE)/
(1.0-VTHINV100x60);
/* First stage: driving the decoders */
rows = C/(8*B*A*Ndbl*Nspd);
Ceq = draincap(Wdecdrivep,PCH,1)+draincap(Wdecdriven,NCH,1) +
4*gatecap(Wdec3to8n+Wdec3to8p,10.0)*(Ndwl*Ndbl)+
Cwordmetal*0.25*8*B*A*Ndbl*Nspd;
Rwire = Rwordmetal*0.125*8*B*A*Ndbl*Nspd;
tf = (Rwire + transreson(Wdecdrivep,PCH,1))*Ceq;
*Tdecdrive = horowitz(nextinputtime,tf,VTHINV100x60,VTHNAND60x90,
FALL);
nextinputtime = *Tdecdrive/VTHNAND60x90;
/* second stage: driving a bunch of nor gates with a nand */
numstack =
ceil((1.0/3.0)*logtwo( (double)((double)C/(double)(B*A*Ndbl*Nspd))));
if (numstack==0) numstack = 1;
if (numstack>5) numstack = 5;
Ceq = 3*draincap(Wdec3to8p,PCH,1) +draincap(Wdec3to8n,NCH,3) +
gatecap(WdecNORn+WdecNORp,((numstack*40)+20.0))*rows +
Cbitmetal*rows*8;
Rwire = Rbitmetal*rows*8/2;
tf = Ceq*(Rwire+transreson(Wdec3to8n,NCH,3));
/* we only want to charge the output to the threshold of the
nor gate. But the threshold depends on the number of inputs
to the nor. */
switch(numstack) {
case 1: vth = VTHNOR12x4x1; break;
case 2: vth = VTHNOR12x4x2; break;
case 3: vth = VTHNOR12x4x3; break;
case 4: vth = VTHNOR12x4x4; break;
case 5: vth = VTHNOR12x4x4; break;
default: printf("error:numstack=%d C[%d] B[%d] A[%d] ndbl[%d] nspd[%d]\n",numstack, C, B, A, Ndbl, Nspd);
}
*Tdecoder1 = horowitz(nextinputtime,tf,VTHNAND60x90,vth,RISE);
nextinputtime = *Tdecoder1/(1.0-vth);
/* Final stage: driving an inverter with the nor */
Req = transreson(WdecNORp,PCH,numstack);
Ceq = (gatecap(Wdecinvn+Wdecinvp,20.0)+
numstack*draincap(WdecNORn,NCH,1)+
draincap(WdecNORp,PCH,numstack));
tf = Req*Ceq;
*Tdecoder2 = horowitz(nextinputtime,tf,vth,VSINV,FALL);
*outrisetime = *Tdecoder2/(VSINV);
return(*Tdecdrive+*Tdecoder1+*Tdecoder2);
}
/*----------------------------------------------------------------------*/
/* Decoder delay in the tag array (see section 6.1 of tech report) */
double decoder_tag_delay(C, B, A, Ndwl, Ndbl, Nspd, Ntwl, Ntbl, Ntspd,
Tdecdrive, Tdecoder1, Tdecoder2,outrisetime)
int C,B,A,Ndwl,Ndbl,Nspd,Ntwl,Ntbl,Ntspd;
double *Tdecdrive,*Tdecoder1,*Tdecoder2,*outrisetime;
{
double Ceq,Req,Rwire,rows,tf,nextinputtime,vth=0.0,tstep,m,a,b,c;
int numstack;
/* Calculate rise time. Consider two inverters */
Ceq = draincap(Wdecdrivep,PCH,1)+draincap(Wdecdriven,NCH,1) +
gatecap(Wdecdrivep+Wdecdriven,0.0);
tf = Ceq*transreson(Wdecdriven,NCH,1);
nextinputtime = horowitz(0.0,tf,VTHINV100x60,VTHINV100x60,FALL)/
(VTHINV100x60);
Ceq = draincap(Wdecdrivep,PCH,1)+draincap(Wdecdriven,NCH,1) +
gatecap(Wdecdrivep+Wdecdriven,0.0);
tf = Ceq*transreson(Wdecdriven,NCH,1);
nextinputtime = horowitz(nextinputtime,tf,VTHINV100x60,VTHINV100x60,
RISE)/
(1.0-VTHINV100x60);
/* First stage: driving the decoders */
rows = C/(8*B*A*Ntbl*Ntspd);
Ceq = draincap(Wdecdrivep,PCH,1)+draincap(Wdecdriven,NCH,1) +
4*gatecap(Wdec3to8n+Wdec3to8p,10.0)*(Ntwl*Ntbl)+
Cwordmetal*0.25*8*B*A*Ntbl*Ntspd;
Rwire = Rwordmetal*0.125*8*B*A*Ntbl*Ntspd;
tf = (Rwire + transreson(Wdecdrivep,PCH,1))*Ceq;
*Tdecdrive = horowitz(nextinputtime,tf,VTHINV100x60,VTHNAND60x90,
FALL);
nextinputtime = *Tdecdrive/VTHNAND60x90;
/* second stage: driving a bunch of nor gates with a nand */
numstack =
ceil((1.0/3.0)*logtwo( (double)((double)C/(double)(B*A*Ntbl*Ntspd))));
if (numstack==0) numstack = 1;
if (numstack>5) numstack = 5;
Ceq = 3*draincap(Wdec3to8p,PCH,1) +draincap(Wdec3to8n,NCH,3) +
gatecap(WdecNORn+WdecNORp,((numstack*40)+20.0))*rows +
Cbitmetal*rows*8;
Rwire = Rbitmetal*rows*8/2;
tf = Ceq*(Rwire+transreson(Wdec3to8n,NCH,3));
/* we only want to charge the output to the threshold of the
nor gate. But the threshold depends on the number of inputs
to the nor. */
switch(numstack) {
case 1: vth = VTHNOR12x4x1; break;
case 2: vth = VTHNOR12x4x2; break;
case 3: vth = VTHNOR12x4x3; break;
case 4: vth = VTHNOR12x4x4; break;
case 5: vth = VTHNOR12x4x4; break;
case 6: vth = VTHNOR12x4x4; break;
default: printf("error:numstack=%d\n",numstack);
}
*Tdecoder1 = horowitz(nextinputtime,tf,VTHNAND60x90,vth,RISE);
nextinputtime = *Tdecoder1/(1.0-vth);
/* Final stage: driving an inverter with the nor */
Req = transreson(WdecNORp,PCH,numstack);
Ceq = (gatecap(Wdecinvn+Wdecinvp,20.0)+
numstack*draincap(WdecNORn,NCH,1)+
draincap(WdecNORp,PCH,numstack));
tf = Req*Ceq;
*Tdecoder2 = horowitz(nextinputtime,tf,vth,VSINV,FALL);
*outrisetime = *Tdecoder2/(VSINV);
return(*Tdecdrive+*Tdecoder1+*Tdecoder2);
}
/*----------------------------------------------------------------------*/
/* Data array wordline delay (see section 6.2 of tech report) */
double wordline_delay(B,A,Ndwl,Nspd,inrisetime,outrisetime)
int B,A,Ndwl,Nspd;
double inrisetime,*outrisetime;
{
double Rpdrive,nextrisetime;
double desiredrisetime,psize,nsize;
double tf,nextinputtime,Ceq,Req,Rline,Cline;
int cols;
double Tworddrivedel,Twordchargedel;
cols = 8*B*A*Nspd/Ndwl;
/* Choose a transistor size that makes sense */
/* Use a first-order approx */
desiredrisetime = krise*log((double)(cols))/2.0;
Cline = (gatecappass(Wmemcella,0.0)+
gatecappass(Wmemcella,0.0)+
Cwordmetal)*cols;
Rpdrive = desiredrisetime/(Cline*log(VSINV)*-1.0);
psize = restowidth(Rpdrive,PCH);
if (psize > Wworddrivemax) {
psize = Wworddrivemax;
}
/* Now that we have a reasonable psize, do the rest as before */
/* If we keep the ratio the same as the tag wordline driver,
the threshold voltage will be close to VSINV */
nsize = psize * Wdecinvn/Wdecinvp;
Ceq = draincap(Wdecinvn,NCH,1) + draincap(Wdecinvp,PCH,1) +
gatecap(nsize+psize,20.0);
tf = transreson(Wdecinvn,NCH,1)*Ceq;
Tworddrivedel = horowitz(inrisetime,tf,VSINV,VSINV,RISE);
nextinputtime = Tworddrivedel/(1.0-VSINV);
Cline = (gatecappass(Wmemcella,(BitWidth-2*Wmemcella)/2.0)+
gatecappass(Wmemcella,(BitWidth-2*Wmemcella)/2.0)+
Cwordmetal)*cols+
draincap(nsize,NCH,1) + draincap(psize,PCH,1);
Rline = Rwordmetal*cols/2;
tf = (transreson(psize,PCH,1)+Rline)*Cline;
Twordchargedel = horowitz(nextinputtime,tf,VSINV,VSINV,FALL);
*outrisetime = Twordchargedel/VSINV;
return(Tworddrivedel+Twordchargedel);
}
/*----------------------------------------------------------------------*/
/* Tag array wordline delay (see section 6.3 of tech report) */
double wordline_tag_delay(C,A,Ntspd,Ntwl,inrisetime,outrisetime)
int C,A,Ntspd,Ntwl;
double *outrisetime;
double inrisetime;
{
double tf,m,a,b,c;
double Cline,Rline,Ceq,nextinputtime;
int tagbits;
double Tworddrivedel,Twordchargedel;
/* number of tag bits */
tagbits = ADDRESS_BITS+2-(int)logtwo((double)C)+(int)logtwo((double)A);
/* first stage */
Ceq = draincap(Wdecinvn,NCH,1) + draincap(Wdecinvp,PCH,1) +
gatecap(Wdecinvn+Wdecinvp,20.0);
tf = transreson(Wdecinvn,NCH,1)*Ceq;
Tworddrivedel = horowitz(inrisetime,tf,VSINV,VSINV,RISE);
nextinputtime = Tworddrivedel/(1.0-VSINV);
/* second stage */
Cline = (gatecappass(Wmemcella,(BitWidth-2*Wmemcella)/2.0)+
gatecappass(Wmemcella,(BitWidth-2*Wmemcella)/2.0)+
Cwordmetal)*tagbits*A*Ntspd/Ntwl+
draincap(Wdecinvn,NCH,1) + draincap(Wdecinvp,PCH,1);
Rline = Rwordmetal*tagbits*A*Ntspd/(2*Ntwl);
tf = (transreson(Wdecinvp,PCH,1)+Rline)*Cline;
Twordchargedel = horowitz(nextinputtime,tf,VSINV,VSINV,FALL);
*outrisetime = Twordchargedel/VSINV;
return(Tworddrivedel+Twordchargedel);
}
/*----------------------------------------------------------------------*/
/* Data array bitline: (see section 6.4 in tech report) */
double bitline_delay(C,A,B,Ndwl,Ndbl,Nspd,inrisetime,outrisetime)
int C,A,B,Ndwl,Ndbl,Nspd;
double inrisetime,*outrisetime;
{
double Tbit,Cline,Ccolmux,Rlineb,r1,r2,c1,c2,a,b,c;
double m,tstep;
double Cbitrow; /* bitline capacitance due to access transistor */
int rows,cols;
Cbitrow = draincap(Wmemcella,NCH,1)/2.0; /* due to shared contact */
rows = C/(B*A*Ndbl*Nspd);
cols = 8*B*A*Nspd/Ndwl;
if (Ndbl*Nspd == 1) {
Cline = rows*(Cbitrow+Cbitmetal)+2*draincap(Wbitpreequ,PCH,1);
Ccolmux = 2*gatecap(WsenseQ1to4,10.0);
Rlineb = Rbitmetal*rows/2.0;
r1 = Rlineb;
} else {
Cline = rows*(Cbitrow+Cbitmetal) + 2*draincap(Wbitpreequ,PCH,1) +
draincap(Wbitmuxn,NCH,1);
Ccolmux = Nspd*Ndbl*(draincap(Wbitmuxn,NCH,1))+2*gatecap(WsenseQ1to4,10.0);
Rlineb = Rbitmetal*rows/2.0;
r1 = Rlineb +
transreson(Wbitmuxn,NCH,1);
}
r2 = transreson(Wmemcella,NCH,1) +
transreson(Wmemcella*Wmemcellbscale,NCH,1);
c1 = Ccolmux;
c2 = Cline;
tstep = (r2*c2+(r1+r2)*c1)*log((Vbitpre)/(Vbitpre-Vbitsense));
/* take input rise time into account */
m = Vdd/inrisetime;
if (tstep <= (0.5*(Vdd-Vt)/m)) {
a = m;
b = 2*((Vdd*0.5)-Vt);
c = -2*tstep*(Vdd-Vt)+1/m*((Vdd*0.5)-Vt)*
((Vdd*0.5)-Vt);
Tbit = (-b+sqrt(b*b-4*a*c))/(2*a);
} else {
Tbit = tstep + (Vdd+Vt)/(2*m) - (Vdd*0.5)/m;
}
*outrisetime = Tbit/(log((Vbitpre-Vbitsense)/Vdd));
return(Tbit);
}
/*----------------------------------------------------------------------*/
/* Tag array bitline: (see section 6.4 in tech report) */
double bitline_tag_delay(C,A,B,Ntwl,Ntbl,Ntspd,inrisetime,outrisetime)
int C,A,B,Ntwl,Ntbl,Ntspd;
double inrisetime,*outrisetime;
{
double Tbit,Cline,Ccolmux,Rlineb,r1,r2,c1,c2,a,b,c;
double m,tstep;
double Cbitrow; /* bitline capacitance due to access transistor */
int rows,cols;
Cbitrow = draincap(Wmemcella,NCH,1)/2.0; /* due to shared contact */
rows = C/(B*A*Ntbl*Ntspd);
cols = 8*B*A*Ntspd/Ntwl;
if (Ntbl*Ntspd == 1) {
Cline = rows*(Cbitrow+Cbitmetal)+2*draincap(Wbitpreequ,PCH,1);
Ccolmux = 2*gatecap(WsenseQ1to4,10.0);
Rlineb = Rbitmetal*rows/2.0;
r1 = Rlineb;
} else {
Cline = rows*(Cbitrow+Cbitmetal) + 2*draincap(Wbitpreequ,PCH,1) +
draincap(Wbitmuxn,NCH,1);
Ccolmux = Ntspd*Ntbl*(draincap(Wbitmuxn,NCH,1))+2*gatecap(WsenseQ1to4,10.0);
Rlineb = Rbitmetal*rows/2.0;
r1 = Rlineb +
transreson(Wbitmuxn,NCH,1);
}
r2 = transreson(Wmemcella,NCH,1) +
transreson(Wmemcella*Wmemcellbscale,NCH,1);
c1 = Ccolmux;
c2 = Cline;
tstep = (r2*c2+(r1+r2)*c1)*log((Vbitpre)/(Vbitpre-Vbitsense));
/* take into account input rise time */
m = Vdd/inrisetime;
if (tstep <= (0.5*(Vdd-Vt)/m)) {
a = m;
b = 2*((Vdd*0.5)-Vt);
c = -2*tstep*(Vdd-Vt)+1/m*((Vdd*0.5)-Vt)*
((Vdd*0.5)-Vt);
Tbit = (-b+sqrt(b*b-4*a*c))/(2*a);
} else {
Tbit = tstep + (Vdd+Vt)/(2*m) - (Vdd*0.5)/m;
}
*outrisetime = Tbit/(log((Vbitpre-Vbitsense)/Vdd));
return(Tbit);
}
/*----------------------------------------------------------------------*/
/* It is assumed the sense amps have a constant delay
(see section 6.5) */
double sense_amp_delay(inrisetime,outrisetime)
double inrisetime,*outrisetime;
{
*outrisetime = tfalldata;
return(tsensedata);
}
/*--------------------------------------------------------------*/
double sense_amp_tag_delay(inrisetime,outrisetime)
double inrisetime,*outrisetime;
{
*outrisetime = tfalltag;
return(tsensetag);
}
/*----------------------------------------------------------------------*/
/* Comparator Delay (see section 6.6) */
double compare_time(C,A,Ntbl,Ntspd,inputtime,outputtime)
int C,A,Ntbl,Ntspd;
double inputtime,*outputtime;
{
double Req,Ceq,tf,st1del,st2del,st3del,nextinputtime,m;
double c1,c2,r1,r2,tstep,a,b,c;
double Tcomparatorni;
int cols,tagbits;
/* First Inverter */
Ceq = gatecap(Wcompinvn2+Wcompinvp2,10.0) +
draincap(Wcompinvp1,PCH,1) + draincap(Wcompinvn1,NCH,1);
Req = transreson(Wcompinvp1,PCH,1);
tf = Req*Ceq;
st1del = horowitz(inputtime,tf,VTHCOMPINV,VTHCOMPINV,FALL);
nextinputtime = st1del/VTHCOMPINV;
/* Second Inverter */
Ceq = gatecap(Wcompinvn3+Wcompinvp3,10.0) +
draincap(Wcompinvp2,PCH,1) + draincap(Wcompinvn2,NCH,1);
Req = transreson(Wcompinvn2,NCH,1);
tf = Req*Ceq;
st2del = horowitz(inputtime,tf,VTHCOMPINV,VTHCOMPINV,RISE);
nextinputtime = st1del/(1.0-VTHCOMPINV);
/* Third Inverter */
Ceq = gatecap(Wevalinvn+Wevalinvp,10.0) +
draincap(Wcompinvp3,PCH,1) + draincap(Wcompinvn3,NCH,1);
Req = transreson(Wcompinvp3,PCH,1);
tf = Req*Ceq;
st3del = horowitz(nextinputtime,tf,VTHCOMPINV,VTHEVALINV,FALL);
nextinputtime = st1del/(VTHEVALINV);
/* Final Inverter (virtual ground driver) discharging compare part */
tagbits = ADDRESS_BITS - (int)logtwo((double)C) + (int)logtwo((double)A);
cols = tagbits*Ntbl*Ntspd;
r1 = transreson(Wcompn,NCH,2);
r2 = transresswitch(Wevalinvn,NCH,1);
c2 = (tagbits)*(draincap(Wcompn,NCH,1)+draincap(Wcompn,NCH,2))+
draincap(Wevalinvp,PCH,1) + draincap(Wevalinvn,NCH,1);
c1 = (tagbits)*(draincap(Wcompn,NCH,1)+draincap(Wcompn,NCH,2))
+draincap(Wcompp,PCH,1) + gatecap(Wmuxdrv12n+Wmuxdrv12p,20.0) +
cols*Cwordmetal;
/* time to go to threshold of mux driver */
tstep = (r2*c2+(r1+r2)*c1)*log(1.0/VTHMUXDRV1);
/* take into account non-zero input rise time */
m = Vdd/nextinputtime;
if ((tstep) <= (0.5*(Vdd-Vt)/m)) {
a = m;
b = 2*((Vdd*VTHEVALINV)-Vt);
c = -2*(tstep)*(Vdd-Vt)+1/m*((Vdd*VTHEVALINV)-Vt)*((Vdd*VTHEVALINV)-Vt);
Tcomparatorni = (-b+sqrt(b*b-4*a*c))/(2*a);
} else {
Tcomparatorni = (tstep) + (Vdd+Vt)/(2*m) - (Vdd*VTHEVALINV)/m;
}
*outputtime = Tcomparatorni/(1.0-VTHMUXDRV1);
return(Tcomparatorni+st1del+st2del+st3del);
}
/*----------------------------------------------------------------------*/
/* Delay of the multiplexor Driver (see section 6.7) */
double mux_driver_delay(C,B,A,Ndbl,Nspd,Ndwl,Ntbl,Ntspd,inputtime,outputtime)
int C,B,A,Ndbl,Nspd,Ndwl,Ntbl,Ntspd;
double inputtime,*outputtime;
{
double Ceq,Req,tf,nextinputtime;
double Tst1,Tst2,Tst3;
/* first driver stage - Inverte "match" to produce "matchb" */
/* the critical path is the DESELECTED case, so consider what
happens when the address bit is true, but match goes low */
Ceq = gatecap(WmuxdrvNORn+WmuxdrvNORp,15.0)*(8*B/BITOUT) +
draincap(Wmuxdrv12n,NCH,1) + draincap(Wmuxdrv12p,PCH,1);
Req = transreson(Wmuxdrv12p,PCH,1);
tf = Ceq*Req;
Tst1 = horowitz(inputtime,tf,VTHMUXDRV1,VTHMUXDRV2,FALL);
nextinputtime = Tst1/VTHMUXDRV2;
/* second driver stage - NOR "matchb" with address bits to produce sel */
Ceq = gatecap(Wmuxdrv3n+Wmuxdrv3p,15.0) + 2*draincap(WmuxdrvNORn,NCH,1) +
draincap(WmuxdrvNORp,PCH,2);
Req = transreson(WmuxdrvNORn,NCH,1);
tf = Ceq*Req;
Tst2 = horowitz(nextinputtime,tf,VTHMUXDRV2,VTHMUXDRV3,RISE);
nextinputtime = Tst2/(1-VTHMUXDRV3);
/* third driver stage - invert "select" to produce "select bar" */
Ceq = BITOUT*gatecap(Woutdrvseln+Woutdrvselp+Woutdrvnorn+Woutdrvnorp,20.0)+
draincap(Wmuxdrv3p,PCH,1) + draincap(Wmuxdrv3n,NCH,1) +
Cwordmetal*8*B*A*Nspd*Ndbl/2.0;
Req = (Rwordmetal*8*B*A*Nspd*Ndbl/2)/2 + transreson(Wmuxdrv3p,PCH,1);
tf = Ceq*Req;
Tst3 = horowitz(nextinputtime,tf,VTHMUXDRV3,VTHOUTDRINV,FALL);
*outputtime = Tst3/(VTHOUTDRINV);
return(Tst1 + Tst2 + Tst3);
}
/*----------------------------------------------------------------------*/
/* Valid driver (see section 6.9 of tech report)
Note that this will only be called for a direct mapped cache */
double valid_driver_delay(C,A,Ntbl,Ntspd,inputtime)
int C,A,Ntbl,Ntspd;
double inputtime;
{
double Ceq,Tst1,tf;
Ceq = draincap(Wmuxdrv12n,NCH,1)+draincap(Wmuxdrv12p,PCH,1)+Cout;
tf = Ceq*transreson(Wmuxdrv12p,PCH,1);
Tst1 = horowitz(inputtime,tf,VTHMUXDRV1,0.5,FALL);
return(Tst1);
}
/*----------------------------------------------------------------------*/
/* Data output delay (data side) -- see section 6.8
This is the time through the NAND/NOR gate and the final inverter
assuming sel is already present */
double dataoutput_delay(C,B,A,Ndbl,Nspd,Ndwl,
inrisetime,outrisetime)
int C,B,A,Ndbl,Nspd,Ndwl;
double *outrisetime,inrisetime;
{
double Ceq,Rwire,Rline;
double aspectRatio; /* as height over width */
double ramBlocks; /* number of RAM blocks */
double tf;
double nordel,outdel,nextinputtime;
double hstack,vstack;
/* calculate some layout info */
aspectRatio = (2.0*C)/(8.0*B*B*A*A*Ndbl*Ndbl*Nspd*Nspd);
hstack = (aspectRatio > 1.0) ? aspectRatio : 1.0/aspectRatio;
ramBlocks = Ndwl*Ndbl;
hstack = hstack * sqrt(ramBlocks/ hstack);
vstack = ramBlocks/ hstack;
/* Delay of NOR gate */
Ceq = 2*draincap(Woutdrvnorn,NCH,1)+draincap(Woutdrvnorp,PCH,2)+
gatecap(Woutdrivern,10.0);
tf = Ceq*transreson(Woutdrvnorp,PCH,2);
nordel = horowitz(inrisetime,tf,VTHOUTDRNOR,VTHOUTDRIVE,FALL);
nextinputtime = nordel/(VTHOUTDRIVE);
/* Delay of final output driver */
Ceq = (draincap(Woutdrivern,NCH,1)+draincap(Woutdriverp,PCH,1))*
((8*B*A)/BITOUT) +
Cwordmetal*(8*B*A*Nspd* (vstack)) + Cout;
Rwire = Rwordmetal*(8*B*A*Nspd* (vstack))/2;
tf = Ceq*(transreson(Woutdriverp,PCH,1)+Rwire);
outdel = horowitz(nextinputtime,tf,VTHOUTDRIVE,0.5,RISE);
*outrisetime = outdel/0.5;
return(outdel+nordel);
}
/*----------------------------------------------------------------------*/
/* Sel inverter delay (part of the output driver) see section 6.8 */
double selb_delay_tag_path(inrisetime,outrisetime)
double inrisetime,*outrisetime;
{
double Ceq,Tst1,tf;
Ceq = draincap(Woutdrvseln,NCH,1)+draincap(Woutdrvselp,PCH,1)+
gatecap(Woutdrvnandn+Woutdrvnandp,10.0);
tf = Ceq*transreson(Woutdrvseln,NCH,1);
Tst1 = horowitz(inrisetime,tf,VTHOUTDRINV,VTHOUTDRNAND,RISE);
*outrisetime = Tst1/(1.0-VTHOUTDRNAND);
return(Tst1);
}
/*----------------------------------------------------------------------*/
/* This routine calculates the extra time required after an access before
* the next access can occur [ie. it returns (cycle time-access time)].
*/
double precharge_delay(worddata)
double worddata;
{
double Ceq,tf,pretime;
/* as discussed in the tech report, the delay is the delay of
4 inverter delays (each with fanout of 4) plus the delay of
the wordline */
Ceq = draincap(Wdecinvn,NCH,1)+draincap(Wdecinvp,PCH,1)+
4*gatecap(Wdecinvn+Wdecinvp,0.0);
tf = Ceq*transreson(Wdecinvn,NCH,1);
pretime = 4*horowitz(0.0,tf,0.5,0.5,RISE) + worddata;
return(pretime);
}
/*======================================================================*/
/* returns TRUE if the parameters make up a valid organization */
/* Layout concerns drive any restrictions you might add here */
int organizational_parameters_valid(rows,cols,Ndwl,Ndbl,Nspd,Ntwl,Ntbl,Ntspd)
int rows,cols,Ndwl,Ndbl,Nspd,Ntwl,Ntbl,Ntspd;
{
/* don't want more than 8 subarrays for each of data/tag */
if (Ndwl*Ndbl>MAXSUBARRAYS) return(FALSE);
if (Ntwl*Ntbl>MAXSUBARRAYS) return(FALSE);
/* add more constraints here as necessary */
return(TRUE);
}
/*----------------------------------------------------------------------*/
void wattch_calculate_time(result,parameters)
result_type *result;
parameter_type *parameters;
{
int Ndwl,Ndbl,Nspd,Ntwl,Ntbl,Ntspd,rows,columns,tag_driver_size1,tag_driver_size2;
double access_time;
double before_mux,after_mux;
double decoder_data_driver,decoder_data_3to8,decoder_data_inv;
double decoder_data,decoder_tag,wordline_data,wordline_tag;
double decoder_tag_driver,decoder_tag_3to8,decoder_tag_inv;
double bitline_data,bitline_tag,sense_amp_data,sense_amp_tag;
double compare_tag,mux_driver,data_output,selb=0.0;
double time_till_compare,time_till_select,driver_cap,valid_driver;
double cycle_time, precharge_del;
double outrisetime,inrisetime;
rows = parameters->number_of_sets;
columns = 8*parameters->block_size*parameters->associativity;
/* go through possible Ndbl,Ndwl and find the smallest */
/* Because of area considerations, I don't think it makes sense
to break either dimension up larger than MAXN */
result->cycle_time = BIGNUM;
result->access_time = BIGNUM;
for (Nspd=1;Nspd<=MAXSPD;Nspd=Nspd*2) {
for (Ndwl=1;Ndwl<=MAXN;Ndwl=Ndwl*2) {
for (Ndbl=1;Ndbl<=MAXN;Ndbl=Ndbl*2) {
for (Ntspd=1;Ntspd<=MAXSPD;Ntspd=Ntspd*2) {
for (Ntwl=1;Ntwl<=1;Ntwl=Ntwl*2) {
for (Ntbl=1;Ntbl<=MAXN;Ntbl=Ntbl*2) {
if (organizational_parameters_valid
(rows,columns,Ndwl,Ndbl,Nspd,Ntwl,Ntbl,Ntspd)) {
/* Calculate data side of cache */
decoder_data = decoder_delay(parameters->cache_size,parameters->block_size,
parameters->associativity,Ndwl,Ndbl,Nspd,Ntwl,Ntbl,Ntspd,
&decoder_data_driver,&decoder_data_3to8,
&decoder_data_inv,&outrisetime);
inrisetime = outrisetime;
wordline_data = wordline_delay(parameters->block_size,
parameters->associativity,Ndwl,Nspd,
inrisetime,&outrisetime);
inrisetime = outrisetime;
bitline_data = bitline_delay(parameters->cache_size,parameters->associativity,
parameters->block_size,Ndwl,Ndbl,Nspd,
inrisetime,&outrisetime);
inrisetime = outrisetime;
sense_amp_data = sense_amp_delay(inrisetime,&outrisetime);
inrisetime = outrisetime;
data_output = dataoutput_delay(parameters->cache_size,parameters->block_size,
parameters->associativity,Ndbl,Nspd,Ndwl,
inrisetime,&outrisetime);
inrisetime = outrisetime;
/* if the associativity is 1, the data output can come right
after the sense amp. Otherwise, it has to wait until
the data access has been done. */
if (parameters->associativity==1) {
before_mux = decoder_data + wordline_data + bitline_data +
sense_amp_data + data_output;
after_mux = 0;
} else {
before_mux = decoder_data + wordline_data + bitline_data +
sense_amp_data;
after_mux = data_output;
}
/*
* Now worry about the tag side.
*/
decoder_tag = decoder_tag_delay(parameters->cache_size,
parameters->block_size,parameters->associativity,
Ndwl,Ndbl,Nspd,Ntwl,Ntbl,Ntspd,
&decoder_tag_driver,&decoder_tag_3to8,
&decoder_tag_inv,&outrisetime);
inrisetime = outrisetime;
wordline_tag = wordline_tag_delay(parameters->cache_size,
parameters->associativity,Ntspd,Ntwl,
inrisetime,&outrisetime);
inrisetime = outrisetime;
bitline_tag = bitline_tag_delay(parameters->cache_size,parameters->associativity,
parameters->block_size,Ntwl,Ntbl,Ntspd,
inrisetime,&outrisetime);
inrisetime = outrisetime;
sense_amp_tag = sense_amp_tag_delay(inrisetime,&outrisetime);
inrisetime = outrisetime;
compare_tag = compare_time(parameters->cache_size,parameters->associativity,
Ntbl,Ntspd,
inrisetime,&outrisetime);
inrisetime = outrisetime;
if (parameters->associativity == 1) {
mux_driver = 0;
valid_driver = valid_driver_delay(parameters->cache_size,
parameters->associativity,Ntbl,Ntspd,inrisetime);
time_till_compare = decoder_tag + wordline_tag + bitline_tag +
sense_amp_tag;
time_till_select = time_till_compare+ compare_tag + valid_driver;
/*
* From the above info, calculate the total access time
*/
access_time = MAX(before_mux+after_mux,time_till_select);
} else {
mux_driver = mux_driver_delay(parameters->cache_size,parameters->block_size,
parameters->associativity,Ndbl,Nspd,Ndwl,Ntbl,Ntspd,
inrisetime,&outrisetime);
selb = selb_delay_tag_path(inrisetime,&outrisetime);
valid_driver = 0;
time_till_compare = decoder_tag + wordline_tag + bitline_tag +
sense_amp_tag;
time_till_select = time_till_compare+ compare_tag + mux_driver
+ selb;
access_time = MAX(before_mux,time_till_select) +after_mux;
}
/*
* Calcuate the cycle time
*/
precharge_del = precharge_delay(wordline_data);
cycle_time = access_time + precharge_del;
/*
* The parameters are for a 0.8um process. A quick way to
* scale the results to another process is to divide all
* the results by FUDGEFACTOR. Normally, FUDGEFACTOR is 1.
*/
if (result->cycle_time+1e-11*(result->best_Ndwl+result->best_Ndbl+result->best_Nspd+result->best_Ntwl+result->best_Ntbl+result->best_Ntspd) > cycle_time/FUDGEFACTOR+1e-11*(Ndwl+Ndbl+Nspd+Ntwl+Ntbl+Ntspd)) {
result->cycle_time = cycle_time/FUDGEFACTOR;
result->access_time = access_time/FUDGEFACTOR;
result->best_Ndwl = Ndwl;
result->best_Ndbl = Ndbl;
result->best_Nspd = Nspd;
result->best_Ntwl = Ntwl;
result->best_Ntbl = Ntbl;
result->best_Ntspd = Ntspd;
result->decoder_delay_data = decoder_data/FUDGEFACTOR;
result->decoder_delay_tag = decoder_tag/FUDGEFACTOR;
result->dec_tag_driver = decoder_tag_driver/FUDGEFACTOR;
result->dec_tag_3to8 = decoder_tag_3to8/FUDGEFACTOR;
result->dec_tag_inv = decoder_tag_inv/FUDGEFACTOR;
result->dec_data_driver = decoder_data_driver/FUDGEFACTOR;
result->dec_data_3to8 = decoder_data_3to8/FUDGEFACTOR;
result->dec_data_inv = decoder_data_inv/FUDGEFACTOR;
result->wordline_delay_data = wordline_data/FUDGEFACTOR;
result->wordline_delay_tag = wordline_tag/FUDGEFACTOR;
result->bitline_delay_data = bitline_data/FUDGEFACTOR;
result->bitline_delay_tag = bitline_tag/FUDGEFACTOR;
result->sense_amp_delay_data = sense_amp_data/FUDGEFACTOR;
result->sense_amp_delay_tag = sense_amp_tag/FUDGEFACTOR;
result->compare_part_delay = compare_tag/FUDGEFACTOR;
result->drive_mux_delay = mux_driver/FUDGEFACTOR;
result->selb_delay = selb/FUDGEFACTOR;
result->drive_valid_delay = valid_driver/FUDGEFACTOR;
result->data_output_delay = data_output/FUDGEFACTOR;
result->precharge_delay = precharge_del/FUDGEFACTOR;
}
}
}
}
}
}
}
}
}
| gpl-2.0 |
Sony-Kitakami/android_kernel_sony_kitakami | drivers/usb/host/xhci.c | 6 | 148505 | /*
* xHCI host controller driver
*
* Copyright (C) 2008 Intel Corp.
*
* Author: Sarah Sharp
* Some code borrowed from the Linux EHCI driver.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/pci.h>
#include <linux/irq.h>
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/dmi.h>
#include <linux/dma-mapping.h>
#include "xhci.h"
#define DRIVER_AUTHOR "Sarah Sharp"
#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
/* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
static int link_quirk;
module_param(link_quirk, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
/* TODO: copied from ehci-hcd.c - can this be refactored? */
/*
* xhci_handshake - spin reading hc until handshake completes or fails
* @ptr: address of hc register to be read
* @mask: bits to look at in result of read
* @done: value of those bits when handshake succeeds
* @usec: timeout in microseconds
*
* Returns negative errno, or zero on success
*
* Success happens when the "mask" bits have the specified value (hardware
* handshake done). There are two failure modes: "usec" have passed (major
* hardware flakeout), or the register reads as all-ones (hardware removed).
*/
int xhci_handshake(struct xhci_hcd *xhci, void __iomem *ptr,
u32 mask, u32 done, int usec)
{
u32 result;
do {
result = xhci_readl(xhci, ptr);
if (result == ~(u32)0) /* card removed */
return -ENODEV;
result &= mask;
if (result == done)
return 0;
udelay(1);
usec--;
} while (usec > 0);
return -ETIMEDOUT;
}
/*
* Disable interrupts and begin the xHCI halting process.
*/
void xhci_quiesce(struct xhci_hcd *xhci)
{
u32 halted;
u32 cmd;
u32 mask;
mask = ~(XHCI_IRQS);
halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT;
if (!halted)
mask &= ~CMD_RUN;
cmd = xhci_readl(xhci, &xhci->op_regs->command);
cmd &= mask;
xhci_writel(xhci, cmd, &xhci->op_regs->command);
}
/*
* Force HC into halt state.
*
* Disable any IRQs and clear the run/stop bit.
* HC will complete any current and actively pipelined transactions, and
* should halt within 16 ms of the run/stop bit being cleared.
* Read HC Halted bit in the status register to see when the HC is finished.
*/
int xhci_halt(struct xhci_hcd *xhci)
{
int ret;
struct usb_hcd *hcd = xhci_to_hcd(xhci);
xhci_dbg(xhci, "// Halt the HC\n");
xhci_quiesce(xhci);
ret = xhci_handshake(xhci, &xhci->op_regs->status,
STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
if (!ret) {
xhci->xhc_state |= XHCI_STATE_HALTED;
xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
} else {
xhci_warn(xhci, "Host not halted after %u microseconds.\n",
XHCI_MAX_HALT_USEC);
if (hcd->driver->halt_failed_cleanup)
hcd->driver->halt_failed_cleanup(hcd);
}
return ret;
}
/*
* Set the run bit and wait for the host to be running.
*/
int xhci_start(struct xhci_hcd *xhci)
{
u32 temp;
int ret;
temp = xhci_readl(xhci, &xhci->op_regs->command);
temp |= (CMD_RUN);
xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
temp);
xhci_writel(xhci, temp, &xhci->op_regs->command);
/*
* Wait for the HCHalted Status bit to be 0 to indicate the host is
* running.
*/
ret = xhci_handshake(xhci, &xhci->op_regs->status,
STS_HALT, 0, XHCI_MAX_HALT_USEC);
if (ret == -ETIMEDOUT)
xhci_err(xhci, "Host took too long to start, "
"waited %u microseconds.\n",
XHCI_MAX_HALT_USEC);
if (!ret)
xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
return ret;
}
/*
* Reset a halted HC.
*
* This resets pipelines, timers, counters, state machines, etc.
* Transactions will be terminated immediately, and operational registers
* will be set to their defaults.
*/
int xhci_reset(struct xhci_hcd *xhci)
{
u32 command;
u32 state;
int ret, i;
state = xhci_readl(xhci, &xhci->op_regs->status);
if ((state & STS_HALT) == 0) {
xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
return 0;
}
xhci_dbg(xhci, "// Reset the HC\n");
command = xhci_readl(xhci, &xhci->op_regs->command);
command |= CMD_RESET;
xhci_writel(xhci, command, &xhci->op_regs->command);
ret = xhci_handshake(xhci, &xhci->op_regs->command,
CMD_RESET, 0, 10 * 1000 * 1000);
if (ret)
return ret;
xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n");
/*
* xHCI cannot write to any doorbells or operational registers other
* than status until the "Controller Not Ready" flag is cleared.
*/
ret = xhci_handshake(xhci, &xhci->op_regs->status,
STS_CNR, 0, 10 * 1000 * 1000);
for (i = 0; i < 2; ++i) {
xhci->bus_state[i].port_c_suspend = 0;
xhci->bus_state[i].suspended_ports = 0;
xhci->bus_state[i].resuming_ports = 0;
}
return ret;
}
#ifdef CONFIG_PCI
static int xhci_free_msi(struct xhci_hcd *xhci)
{
int i;
if (!xhci->msix_entries)
return -EINVAL;
for (i = 0; i < xhci->msix_count; i++)
if (xhci->msix_entries[i].vector)
free_irq(xhci->msix_entries[i].vector,
xhci_to_hcd(xhci));
return 0;
}
/*
* Set up MSI
*/
static int xhci_setup_msi(struct xhci_hcd *xhci)
{
int ret;
struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
ret = pci_enable_msi(pdev);
if (ret) {
xhci_dbg(xhci, "failed to allocate MSI entry\n");
return ret;
}
ret = request_irq(pdev->irq, (irq_handler_t)xhci_msi_irq,
0, "xhci_hcd", xhci_to_hcd(xhci));
if (ret) {
xhci_dbg(xhci, "disable MSI interrupt\n");
pci_disable_msi(pdev);
}
return ret;
}
/*
* Free IRQs
* free all IRQs request
*/
static void xhci_free_irq(struct xhci_hcd *xhci)
{
struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
int ret;
/* return if using legacy interrupt */
if (xhci_to_hcd(xhci)->irq > 0)
return;
ret = xhci_free_msi(xhci);
if (!ret)
return;
if (pdev->irq > 0)
free_irq(pdev->irq, xhci_to_hcd(xhci));
return;
}
/*
* Set up MSI-X
*/
static int xhci_setup_msix(struct xhci_hcd *xhci)
{
int i, ret = 0;
struct usb_hcd *hcd = xhci_to_hcd(xhci);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
/*
* calculate number of msi-x vectors supported.
* - HCS_MAX_INTRS: the max number of interrupts the host can handle,
* with max number of interrupters based on the xhci HCSPARAMS1.
* - num_online_cpus: maximum msi-x vectors per CPUs core.
* Add additional 1 vector to ensure always available interrupt.
*/
xhci->msix_count = min(num_online_cpus() + 1,
HCS_MAX_INTRS(xhci->hcs_params1));
xhci->msix_entries =
kmalloc((sizeof(struct msix_entry))*xhci->msix_count,
GFP_KERNEL);
if (!xhci->msix_entries) {
xhci_err(xhci, "Failed to allocate MSI-X entries\n");
return -ENOMEM;
}
for (i = 0; i < xhci->msix_count; i++) {
xhci->msix_entries[i].entry = i;
xhci->msix_entries[i].vector = 0;
}
ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
if (ret) {
xhci_dbg(xhci, "Failed to enable MSI-X\n");
goto free_entries;
}
for (i = 0; i < xhci->msix_count; i++) {
ret = request_irq(xhci->msix_entries[i].vector,
(irq_handler_t)xhci_msi_irq,
0, "xhci_hcd", xhci_to_hcd(xhci));
if (ret)
goto disable_msix;
}
hcd->msix_enabled = 1;
return ret;
disable_msix:
xhci_dbg(xhci, "disable MSI-X interrupt\n");
xhci_free_irq(xhci);
pci_disable_msix(pdev);
free_entries:
kfree(xhci->msix_entries);
xhci->msix_entries = NULL;
return ret;
}
/* Free any IRQs and disable MSI-X */
static void xhci_cleanup_msix(struct xhci_hcd *xhci)
{
struct usb_hcd *hcd = xhci_to_hcd(xhci);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
if (xhci->quirks & XHCI_PLAT)
return;
xhci_free_irq(xhci);
if (xhci->msix_entries) {
pci_disable_msix(pdev);
kfree(xhci->msix_entries);
xhci->msix_entries = NULL;
} else {
pci_disable_msi(pdev);
}
hcd->msix_enabled = 0;
return;
}
static int xhci_try_enable_msi(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct pci_dev *pdev;
int ret;
/* The xhci platform device has set up IRQs through usb_add_hcd. */
if (xhci->quirks & XHCI_PLAT)
return 0;
pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
/*
* Some Fresco Logic host controllers advertise MSI, but fail to
* generate interrupts. Don't even try to enable MSI.
*/
if (xhci->quirks & XHCI_BROKEN_MSI)
goto legacy_irq;
/* unregister the legacy interrupt */
if (hcd->irq)
free_irq(hcd->irq, hcd);
hcd->irq = 0;
ret = xhci_setup_msix(xhci);
if (ret)
/* fall back to msi*/
ret = xhci_setup_msi(xhci);
if (!ret)
/* hcd->irq is 0, we have MSI */
return 0;
if (!pdev->irq) {
xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
return -EINVAL;
}
legacy_irq:
/* fall back to legacy interrupt*/
ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
hcd->irq_descr, hcd);
if (ret) {
xhci_err(xhci, "request interrupt %d failed\n",
pdev->irq);
return ret;
}
hcd->irq = pdev->irq;
return 0;
}
#else
static inline int xhci_try_enable_msi(struct usb_hcd *hcd)
{
return 0;
}
static inline void xhci_cleanup_msix(struct xhci_hcd *xhci)
{
}
#endif /* CONFIG_PCI */
static void compliance_mode_recovery(unsigned long arg)
{
struct xhci_hcd *xhci;
struct usb_hcd *hcd;
u32 temp;
int i;
xhci = (struct xhci_hcd *)arg;
for (i = 0; i < xhci->num_usb3_ports; i++) {
temp = xhci_readl(xhci, xhci->usb3_ports[i]);
if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
/*
* Compliance Mode Detected. Letting USB Core
* handle the Warm Reset
*/
xhci_dbg(xhci, "Compliance mode detected->port %d\n",
i + 1);
xhci_dbg(xhci, "Attempting compliance mode recovery\n");
hcd = xhci->shared_hcd;
if (hcd->state == HC_STATE_SUSPENDED)
usb_hcd_resume_root_hub(hcd);
usb_hcd_poll_rh_status(hcd);
}
}
if (xhci->port_status_u0 != ((1 << xhci->num_usb3_ports)-1))
mod_timer(&xhci->comp_mode_recovery_timer,
jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
}
/*
* Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver
* that causes ports behind that hardware to enter compliance mode sometimes.
* The quirk creates a timer that polls every 2 seconds the link state of
* each host controller's port and recovers it by issuing a Warm reset
* if Compliance mode is detected, otherwise the port will become "dead" (no
* device connections or disconnections will be detected anymore). Becasue no
* status event is generated when entering compliance mode (per xhci spec),
* this quirk is needed on systems that have the failing hardware installed.
*/
static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
{
xhci->port_status_u0 = 0;
init_timer(&xhci->comp_mode_recovery_timer);
xhci->comp_mode_recovery_timer.data = (unsigned long) xhci;
xhci->comp_mode_recovery_timer.function = compliance_mode_recovery;
xhci->comp_mode_recovery_timer.expires = jiffies +
msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
set_timer_slack(&xhci->comp_mode_recovery_timer,
msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
add_timer(&xhci->comp_mode_recovery_timer);
xhci_dbg(xhci, "Compliance mode recovery timer initialized\n");
}
/*
* This function identifies the systems that have installed the SN65LVPE502CP
* USB3.0 re-driver and that need the Compliance Mode Quirk.
* Systems:
* Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820
*/
bool xhci_compliance_mode_recovery_timer_quirk_check(void)
{
const char *dmi_product_name, *dmi_sys_vendor;
dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
if (!dmi_product_name || !dmi_sys_vendor)
return false;
if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
return false;
if (strstr(dmi_product_name, "Z420") ||
strstr(dmi_product_name, "Z620") ||
strstr(dmi_product_name, "Z820") ||
strstr(dmi_product_name, "Z1 Workstation"))
return true;
return false;
}
static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
{
return (xhci->port_status_u0 == ((1 << xhci->num_usb3_ports)-1));
}
/*
* Initialize memory for HCD and xHC (one-time init).
*
* Program the PAGESIZE register, initialize the device context array, create
* device contexts (?), set up a command ring segment (or two?), create event
* ring (one for now).
*/
int xhci_init(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int retval = 0;
xhci_dbg(xhci, "xhci_init\n");
spin_lock_init(&xhci->lock);
if (xhci->hci_version == 0x95 && link_quirk) {
xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n");
xhci->quirks |= XHCI_LINK_TRB_QUIRK;
} else {
xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n");
}
retval = xhci_mem_init(xhci, GFP_KERNEL);
xhci_dbg(xhci, "Finished xhci_init\n");
/* Initializing Compliance Mode Recovery Data If Needed */
if (xhci_compliance_mode_recovery_timer_quirk_check()) {
xhci->quirks |= XHCI_COMP_MODE_QUIRK;
compliance_mode_recovery_timer_init(xhci);
}
return retval;
}
/*-------------------------------------------------------------------------*/
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
static void xhci_event_ring_work(unsigned long arg)
{
unsigned long flags;
int temp;
u64 temp_64;
struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
int i, j;
xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies);
spin_lock_irqsave(&xhci->lock, flags);
temp = xhci_readl(xhci, &xhci->op_regs->status);
xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
(xhci->xhc_state & XHCI_STATE_HALTED)) {
xhci_dbg(xhci, "HW died, polling stopped.\n");
spin_unlock_irqrestore(&xhci->lock, flags);
return;
}
temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp);
xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask);
xhci->error_bitmask = 0;
xhci_dbg(xhci, "Event ring:\n");
xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
temp_64 &= ~ERST_PTR_MASK;
xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
xhci_dbg(xhci, "Command ring:\n");
xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
xhci_dbg_cmd_ptrs(xhci);
for (i = 0; i < MAX_HC_SLOTS; ++i) {
if (!xhci->devs[i])
continue;
for (j = 0; j < 31; ++j) {
xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]);
}
}
spin_unlock_irqrestore(&xhci->lock, flags);
if (!xhci->zombie)
mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ);
else
xhci_dbg(xhci, "Quit polling the event ring.\n");
}
#endif
static int xhci_run_finished(struct xhci_hcd *xhci)
{
if (xhci_start(xhci)) {
xhci_halt(xhci);
return -ENODEV;
}
xhci->shared_hcd->state = HC_STATE_RUNNING;
xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
if (xhci->quirks & XHCI_NEC_HOST)
xhci_ring_cmd_db(xhci);
xhci_dbg(xhci, "Finished xhci_run for USB3 roothub\n");
return 0;
}
/*
* Start the HC after it was halted.
*
* This function is called by the USB core when the HC driver is added.
* Its opposite is xhci_stop().
*
* xhci_init() must be called once before this function can be called.
* Reset the HC, enable device slot contexts, program DCBAAP, and
* set command ring pointer and event ring pointer.
*
* Setup MSI-X vectors and enable interrupts.
*/
int xhci_run(struct usb_hcd *hcd)
{
u32 temp;
u64 temp_64;
int ret;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
/* Start the xHCI host controller running only after the USB 2.0 roothub
* is setup.
*/
hcd->uses_new_polling = 1;
if (!usb_hcd_is_primary_hcd(hcd))
return xhci_run_finished(xhci);
xhci_dbg(xhci, "xhci_run\n");
xhci_dbg(xhci, "Calling HCD init\n");
/* Initialize HCD and host controller data structures. */
ret = xhci_init(hcd);
if (ret)
return ret;
xhci_dbg(xhci, "Called HCD init\n");
ret = xhci_try_enable_msi(hcd);
if (ret)
return ret;
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
init_timer(&xhci->event_ring_timer);
xhci->event_ring_timer.data = (unsigned long) xhci;
xhci->event_ring_timer.function = xhci_event_ring_work;
/* Poll the event ring */
xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ;
xhci->zombie = 0;
xhci_dbg(xhci, "Setting event ring polling timer\n");
add_timer(&xhci->event_ring_timer);
#endif
xhci_dbg(xhci, "Command ring memory map follows:\n");
xhci_debug_ring(xhci, xhci->cmd_ring);
xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
xhci_dbg_cmd_ptrs(xhci);
xhci_dbg(xhci, "ERST memory map follows:\n");
xhci_dbg_erst(xhci, &xhci->erst);
xhci_dbg(xhci, "Event ring:\n");
xhci_debug_ring(xhci, xhci->event_ring);
xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
temp_64 &= ~ERST_PTR_MASK;
xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
xhci_dbg(xhci, "// Set the interrupt modulation register\n");
temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
temp &= ~ER_IRQ_INTERVAL_MASK;
temp |= (u32) 160;
xhci_writel(xhci, temp, &xhci->ir_set->irq_control);
/* Set the HCD state before we enable the irqs */
temp = xhci_readl(xhci, &xhci->op_regs->command);
temp |= (CMD_EIE);
xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n",
temp);
xhci_writel(xhci, temp, &xhci->op_regs->command);
temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
xhci_writel(xhci, ER_IRQ_ENABLE(temp),
&xhci->ir_set->irq_pending);
xhci_print_ir_set(xhci, 0);
if (xhci->quirks & XHCI_NEC_HOST)
xhci_queue_vendor_command(xhci, 0, 0, 0,
TRB_TYPE(TRB_NEC_GET_FW));
xhci_dbg(xhci, "Finished xhci_run for USB2 roothub\n");
return 0;
}
static void xhci_only_stop_hcd(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
spin_lock_irq(&xhci->lock);
xhci_halt(xhci);
/* The shared_hcd is going to be deallocated shortly (the USB core only
* calls this function when allocation fails in usb_add_hcd(), or
* usb_remove_hcd() is called). So we need to unset xHCI's pointer.
*/
xhci->shared_hcd = NULL;
spin_unlock_irq(&xhci->lock);
}
/*
* Stop xHCI driver.
*
* This function is called by the USB core when the HC driver is removed.
* Its opposite is xhci_run().
*
* Disable device contexts, disable IRQs, and quiesce the HC.
* Reset the HC, finish any completed transactions, and cleanup memory.
*/
void xhci_stop(struct usb_hcd *hcd)
{
u32 temp;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
if (!usb_hcd_is_primary_hcd(hcd)) {
xhci_only_stop_hcd(xhci->shared_hcd);
return;
}
spin_lock_irq(&xhci->lock);
/* Make sure the xHC is halted for a USB3 roothub
* (xhci_stop() could be called as part of failed init).
*/
xhci_halt(xhci);
xhci_reset(xhci);
spin_unlock_irq(&xhci->lock);
xhci_cleanup_msix(xhci);
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
/* Tell the event ring poll function not to reschedule */
xhci->zombie = 1;
del_timer_sync(&xhci->event_ring_timer);
#endif
/* Deleting Compliance Mode Recovery Timer */
if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
(!(xhci_all_ports_seen_u0(xhci)))) {
del_timer_sync(&xhci->comp_mode_recovery_timer);
xhci_dbg(xhci, "%s: compliance mode recovery timer deleted\n",
__func__);
}
if (xhci->quirks & XHCI_AMD_PLL_FIX)
usb_amd_dev_put();
xhci_dbg(xhci, "// Disabling event ring interrupts\n");
temp = xhci_readl(xhci, &xhci->op_regs->status);
xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
xhci_writel(xhci, ER_IRQ_DISABLE(temp),
&xhci->ir_set->irq_pending);
xhci_print_ir_set(xhci, 0);
xhci_dbg(xhci, "cleaning up memory\n");
xhci_mem_cleanup(xhci);
xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
xhci_readl(xhci, &xhci->op_regs->status));
}
/*
* Shutdown HC (not bus-specific)
*
* This is called when the machine is rebooting or halting. We assume that the
* machine will be powered off, and the HC's internal state will be reset.
* Don't bother to free memory.
*
* This will only ever be called with the main usb_hcd (the USB3 roothub).
*/
void xhci_shutdown(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
usb_disable_xhci_ports(to_pci_dev(hcd->self.controller));
spin_lock_irq(&xhci->lock);
xhci_halt(xhci);
spin_unlock_irq(&xhci->lock);
xhci_cleanup_msix(xhci);
xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
xhci_readl(xhci, &xhci->op_regs->status));
}
#ifdef CONFIG_PM
#ifdef CONFIG_PCI
static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
{
int i;
if (xhci->msix_entries) {
for (i = 0; i < xhci->msix_count; i++)
synchronize_irq(xhci->msix_entries[i].vector);
}
}
#else
static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
{
}
#endif /* CONFIG_PCI */
static void xhci_save_registers(struct xhci_hcd *xhci)
{
xhci->s3.command = xhci_readl(xhci, &xhci->op_regs->command);
xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification);
xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg);
xhci->s3.erst_size = xhci_readl(xhci, &xhci->ir_set->erst_size);
xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control);
}
static void xhci_restore_registers(struct xhci_hcd *xhci)
{
xhci_writel(xhci, xhci->s3.command, &xhci->op_regs->command);
xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg);
xhci_writel(xhci, xhci->s3.erst_size, &xhci->ir_set->erst_size);
xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control);
}
static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
{
u64 val_64;
/* step 2: initialize command ring buffer */
val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
(xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
xhci->cmd_ring->dequeue) &
(u64) ~CMD_RING_RSVD_BITS) |
xhci->cmd_ring->cycle_state;
xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n",
(long unsigned long) val_64);
xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
}
/*
* The whole command ring must be cleared to zero when we suspend the host.
*
* The host doesn't save the command ring pointer in the suspend well, so we
* need to re-program it on resume. Unfortunately, the pointer must be 64-byte
* aligned, because of the reserved bits in the command ring dequeue pointer
* register. Therefore, we can't just set the dequeue pointer back in the
* middle of the ring (TRBs are 16-byte aligned).
*/
static void xhci_clear_command_ring(struct xhci_hcd *xhci)
{
struct xhci_ring *ring;
struct xhci_segment *seg;
ring = xhci->cmd_ring;
seg = ring->deq_seg;
do {
memset(seg->trbs, 0,
sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
cpu_to_le32(~TRB_CYCLE);
seg = seg->next;
} while (seg != ring->deq_seg);
/* Reset the software enqueue and dequeue pointers */
ring->deq_seg = ring->first_seg;
ring->dequeue = ring->first_seg->trbs;
ring->enq_seg = ring->deq_seg;
ring->enqueue = ring->dequeue;
ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
/*
* Ring is now zeroed, so the HW should look for change of ownership
* when the cycle bit is set to 1.
*/
ring->cycle_state = 1;
/*
* Reset the hardware dequeue pointer.
* Yes, this will need to be re-written after resume, but we're paranoid
* and want to make sure the hardware doesn't access bogus memory
* because, say, the BIOS or an SMI started the host without changing
* the command ring pointers.
*/
xhci_set_cmd_ring_deq(xhci);
}
/*
* Stop HC (not bus-specific)
*
* This is called when the machine transition into S3/S4 mode.
*
*/
int xhci_suspend(struct xhci_hcd *xhci)
{
int rc = 0;
struct usb_hcd *hcd = xhci_to_hcd(xhci);
u32 command;
if (hcd->state != HC_STATE_SUSPENDED ||
xhci->shared_hcd->state != HC_STATE_SUSPENDED)
return -EINVAL;
/* Don't poll the roothubs on bus suspend. */
xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
del_timer_sync(&hcd->rh_timer);
clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
del_timer_sync(&xhci->shared_hcd->rh_timer);
spin_lock_irq(&xhci->lock);
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
/* step 1: stop endpoint */
/* skipped assuming that port suspend has done */
/* step 2: clear Run/Stop bit */
command = xhci_readl(xhci, &xhci->op_regs->command);
command &= ~CMD_RUN;
xhci_writel(xhci, command, &xhci->op_regs->command);
if (xhci_handshake(xhci, &xhci->op_regs->status,
STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC)) {
xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
spin_unlock_irq(&xhci->lock);
return -ETIMEDOUT;
}
xhci_clear_command_ring(xhci);
/* step 3: save registers */
xhci_save_registers(xhci);
/* step 4: set CSS flag */
command = xhci_readl(xhci, &xhci->op_regs->command);
command |= CMD_CSS;
xhci_writel(xhci, command, &xhci->op_regs->command);
if (xhci_handshake(xhci, &xhci->op_regs->status,
STS_SAVE, 0, 10 * 1000)) {
xhci_warn(xhci, "WARN: xHC save state timeout\n");
spin_unlock_irq(&xhci->lock);
return -ETIMEDOUT;
}
spin_unlock_irq(&xhci->lock);
/*
* Deleting Compliance Mode Recovery Timer because the xHCI Host
* is about to be suspended.
*/
if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
(!(xhci_all_ports_seen_u0(xhci)))) {
del_timer_sync(&xhci->comp_mode_recovery_timer);
xhci_dbg(xhci, "%s: compliance mode recovery timer deleted\n",
__func__);
}
/* step 5: remove core well power */
/* synchronize irq when using MSI-X */
xhci_msix_sync_irqs(xhci);
return rc;
}
/*
* start xHC (not bus-specific)
*
* This is called when the machine transition from S3/S4 mode.
*
*/
int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
{
u32 command, temp = 0, status;
struct usb_hcd *hcd = xhci_to_hcd(xhci);
struct usb_hcd *secondary_hcd;
int retval = 0;
bool comp_timer_running = false;
/* Wait a bit if either of the roothubs need to settle from the
* transition into bus suspend.
*/
if (time_before(jiffies, xhci->bus_state[0].next_statechange) ||
time_before(jiffies,
xhci->bus_state[1].next_statechange))
msleep(100);
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
spin_lock_irq(&xhci->lock);
if (xhci->quirks & XHCI_RESET_ON_RESUME)
hibernated = true;
if (!hibernated) {
/* step 1: restore register */
xhci_restore_registers(xhci);
/* step 2: initialize command ring buffer */
xhci_set_cmd_ring_deq(xhci);
/* step 3: restore state and start state*/
/* step 3: set CRS flag */
command = xhci_readl(xhci, &xhci->op_regs->command);
command |= CMD_CRS;
xhci_writel(xhci, command, &xhci->op_regs->command);
if (xhci_handshake(xhci, &xhci->op_regs->status,
STS_RESTORE, 0, 10 * 1000)) {
xhci_warn(xhci, "WARN: xHC restore state timeout\n");
spin_unlock_irq(&xhci->lock);
return -ETIMEDOUT;
}
temp = xhci_readl(xhci, &xhci->op_regs->status);
}
/* If restore operation fails, re-initialize the HC during resume */
if ((temp & STS_SRE) || hibernated) {
if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
!(xhci_all_ports_seen_u0(xhci))) {
del_timer_sync(&xhci->comp_mode_recovery_timer);
xhci_dbg(xhci, "Compliance Mode Recovery Timer deleted!\n");
}
/* Let the USB core know _both_ roothubs lost power. */
usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
xhci_dbg(xhci, "Stop HCD\n");
xhci_halt(xhci);
xhci_reset(xhci);
spin_unlock_irq(&xhci->lock);
xhci_cleanup_msix(xhci);
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
/* Tell the event ring poll function not to reschedule */
xhci->zombie = 1;
del_timer_sync(&xhci->event_ring_timer);
#endif
xhci_dbg(xhci, "// Disabling event ring interrupts\n");
temp = xhci_readl(xhci, &xhci->op_regs->status);
xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
xhci_writel(xhci, ER_IRQ_DISABLE(temp),
&xhci->ir_set->irq_pending);
xhci_print_ir_set(xhci, 0);
xhci_dbg(xhci, "cleaning up memory\n");
xhci_mem_cleanup(xhci);
xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
xhci_readl(xhci, &xhci->op_regs->status));
/* USB core calls the PCI reinit and start functions twice:
* first with the primary HCD, and then with the secondary HCD.
* If we don't do the same, the host will never be started.
*/
if (!usb_hcd_is_primary_hcd(hcd))
secondary_hcd = hcd;
else
secondary_hcd = xhci->shared_hcd;
xhci_dbg(xhci, "Initialize the xhci_hcd\n");
retval = xhci_init(hcd->primary_hcd);
if (retval)
return retval;
comp_timer_running = true;
xhci_dbg(xhci, "Start the primary HCD\n");
retval = xhci_run(hcd->primary_hcd);
if (!retval) {
xhci_dbg(xhci, "Start the secondary HCD\n");
retval = xhci_run(secondary_hcd);
}
hcd->state = HC_STATE_SUSPENDED;
xhci->shared_hcd->state = HC_STATE_SUSPENDED;
goto done;
}
/* step 4: set Run/Stop bit */
command = xhci_readl(xhci, &xhci->op_regs->command);
command |= CMD_RUN;
xhci_writel(xhci, command, &xhci->op_regs->command);
xhci_handshake(xhci, &xhci->op_regs->status, STS_HALT,
0, 250 * 1000);
/* step 5: walk topology and initialize portsc,
* portpmsc and portli
*/
/* this is done in bus_resume */
/* step 6: restart each of the previously
* Running endpoints by ringing their doorbells
*/
spin_unlock_irq(&xhci->lock);
done:
if (retval == 0) {
/* Resume root hubs only when have pending events. */
status = readl(&xhci->op_regs->status);
if (status & STS_EINT) {
usb_hcd_resume_root_hub(hcd);
usb_hcd_resume_root_hub(xhci->shared_hcd);
}
}
/*
* If system is subject to the Quirk, Compliance Mode Timer needs to
* be re-initialized Always after a system resume. Ports are subject
* to suffer the Compliance Mode issue again. It doesn't matter if
* ports have entered previously to U0 before system's suspension.
*/
if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
compliance_mode_recovery_timer_init(xhci);
/* Re-enable port polling. */
xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
usb_hcd_poll_rh_status(hcd);
set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
usb_hcd_poll_rh_status(xhci->shared_hcd);
return retval;
}
#endif /* CONFIG_PM */
/*-------------------------------------------------------------------------*/
/**
* xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
* HCDs. Find the index for an endpoint given its descriptor. Use the return
* value to right shift 1 for the bitmask.
*
* Index = (epnum * 2) + direction - 1,
* where direction = 0 for OUT, 1 for IN.
* For control endpoints, the IN index is used (OUT index is unused), so
* index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
*/
unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
{
unsigned int index;
if (usb_endpoint_xfer_control(desc))
index = (unsigned int) (usb_endpoint_num(desc)*2);
else
index = (unsigned int) (usb_endpoint_num(desc)*2) +
(usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
return index;
}
/* Find the flag for this endpoint (for use in the control context). Use the
* endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
* bit 1, etc.
*/
unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
{
return 1 << (xhci_get_endpoint_index(desc) + 1);
}
/* Find the flag for this endpoint (for use in the control context). Use the
* endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
* bit 1, etc.
*/
unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
{
return 1 << (ep_index + 1);
}
/* Compute the last valid endpoint context index. Basically, this is the
* endpoint index plus one. For slot contexts with more than valid endpoint,
* we find the most significant bit set in the added contexts flags.
* e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
* fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
*/
unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
{
return fls(added_ctxs) - 1;
}
/* Returns 1 if the arguments are OK;
* returns 0 this is a root hub; returns -EINVAL for NULL pointers.
*/
static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
const char *func) {
struct xhci_hcd *xhci;
struct xhci_virt_device *virt_dev;
if (!hcd || (check_ep && !ep) || !udev) {
printk(KERN_DEBUG "xHCI %s called with invalid args\n",
func);
return -EINVAL;
}
if (!udev->parent) {
printk(KERN_DEBUG "xHCI %s called for root hub\n",
func);
return 0;
}
xhci = hcd_to_xhci(hcd);
if (check_virt_dev) {
if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
printk(KERN_DEBUG "xHCI %s called with unaddressed "
"device\n", func);
return -EINVAL;
}
virt_dev = xhci->devs[udev->slot_id];
if (virt_dev->udev != udev) {
printk(KERN_DEBUG "xHCI %s called with udev and "
"virt_dev does not match\n", func);
return -EINVAL;
}
}
if (xhci->xhc_state & XHCI_STATE_HALTED)
return -ENODEV;
return 1;
}
static int xhci_configure_endpoint(struct xhci_hcd *xhci,
struct usb_device *udev, struct xhci_command *command,
bool ctx_change, bool must_succeed);
/*
* Full speed devices may have a max packet size greater than 8 bytes, but the
* USB core doesn't know that until it reads the first 8 bytes of the
* descriptor. If the usb_device's max packet size changes after that point,
* we need to issue an evaluate context command and wait on it.
*/
static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
unsigned int ep_index, struct urb *urb)
{
struct xhci_container_ctx *in_ctx;
struct xhci_container_ctx *out_ctx;
struct xhci_input_control_ctx *ctrl_ctx;
struct xhci_ep_ctx *ep_ctx;
int max_packet_size;
int hw_max_packet_size;
int ret = 0;
out_ctx = xhci->devs[slot_id]->out_ctx;
ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
if (hw_max_packet_size != max_packet_size) {
xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n");
xhci_dbg(xhci, "Max packet size in usb_device = %d\n",
max_packet_size);
xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n",
hw_max_packet_size);
xhci_dbg(xhci, "Issuing evaluate context command.\n");
/* Set up the modified control endpoint 0 */
xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
xhci->devs[slot_id]->out_ctx, ep_index);
in_ctx = xhci->devs[slot_id]->in_ctx;
ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
/* Set up the input context flags for the command */
/* FIXME: This won't work if a non-default control endpoint
* changes max packet sizes.
*/
ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
ctrl_ctx->drop_flags = 0;
xhci_dbg(xhci, "Slot %d input context\n", slot_id);
xhci_dbg_ctx(xhci, in_ctx, ep_index);
xhci_dbg(xhci, "Slot %d output context\n", slot_id);
xhci_dbg_ctx(xhci, out_ctx, ep_index);
ret = xhci_configure_endpoint(xhci, urb->dev, NULL,
true, false);
/* Clean up the input context for later use by bandwidth
* functions.
*/
ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
}
return ret;
}
/*
* non-error returns are a promise to giveback() the urb later
* we drop ownership so next owner (or urb unlink) can get it
*/
int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_td *buffer;
unsigned long flags;
int ret = 0;
unsigned int slot_id, ep_index;
struct urb_priv *urb_priv;
int size, i;
if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
true, true, __func__) <= 0)
return -EINVAL;
slot_id = urb->dev->slot_id;
ep_index = xhci_get_endpoint_index(&urb->ep->desc);
if (!HCD_HW_ACCESSIBLE(hcd)) {
if (!in_interrupt())
xhci_dbg(xhci, "urb submitted during PCI suspend\n");
ret = -ESHUTDOWN;
goto exit;
}
if (usb_endpoint_xfer_isoc(&urb->ep->desc))
size = urb->number_of_packets;
else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
urb->transfer_buffer_length > 0 &&
urb->transfer_flags & URB_ZERO_PACKET &&
!(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc)))
size = 2;
else
size = 1;
urb_priv = kzalloc(sizeof(struct urb_priv) +
size * sizeof(struct xhci_td *), mem_flags);
if (!urb_priv)
return -ENOMEM;
buffer = kzalloc(size * sizeof(struct xhci_td), mem_flags);
if (!buffer) {
kfree(urb_priv);
return -ENOMEM;
}
for (i = 0; i < size; i++) {
urb_priv->td[i] = buffer;
buffer++;
}
urb_priv->length = size;
urb_priv->td_cnt = 0;
urb->hcpriv = urb_priv;
if (usb_endpoint_xfer_control(&urb->ep->desc)) {
/* Check to see if the max packet size for the default control
* endpoint changed during FS device enumeration
*/
if (urb->dev->speed == USB_SPEED_FULL) {
ret = xhci_check_maxpacket(xhci, slot_id,
ep_index, urb);
if (ret < 0) {
xhci_urb_free_priv(xhci, urb_priv);
urb->hcpriv = NULL;
return ret;
}
}
/* We have a spinlock and interrupts disabled, so we must pass
* atomic context to this function, which may allocate memory.
*/
spin_lock_irqsave(&xhci->lock, flags);
if (xhci->xhc_state & XHCI_STATE_DYING)
goto dying;
ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
slot_id, ep_index);
if (ret)
goto free_priv;
spin_unlock_irqrestore(&xhci->lock, flags);
} else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
spin_lock_irqsave(&xhci->lock, flags);
if (xhci->xhc_state & XHCI_STATE_DYING)
goto dying;
if (xhci->devs[slot_id]->eps[ep_index].ep_state &
EP_GETTING_STREAMS) {
xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
"is transitioning to using streams.\n");
ret = -EINVAL;
} else if (xhci->devs[slot_id]->eps[ep_index].ep_state &
EP_GETTING_NO_STREAMS) {
xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
"is transitioning to "
"not having streams.\n");
ret = -EINVAL;
} else {
ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
slot_id, ep_index);
}
if (ret)
goto free_priv;
spin_unlock_irqrestore(&xhci->lock, flags);
} else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
spin_lock_irqsave(&xhci->lock, flags);
if (xhci->xhc_state & XHCI_STATE_DYING)
goto dying;
ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
slot_id, ep_index);
if (ret)
goto free_priv;
spin_unlock_irqrestore(&xhci->lock, flags);
} else {
spin_lock_irqsave(&xhci->lock, flags);
if (xhci->xhc_state & XHCI_STATE_DYING)
goto dying;
ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
slot_id, ep_index);
if (ret)
goto free_priv;
spin_unlock_irqrestore(&xhci->lock, flags);
}
exit:
return ret;
dying:
xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
"non-responsive xHCI host.\n",
urb->ep->desc.bEndpointAddress, urb);
ret = -ESHUTDOWN;
free_priv:
xhci_urb_free_priv(xhci, urb_priv);
urb->hcpriv = NULL;
spin_unlock_irqrestore(&xhci->lock, flags);
return ret;
}
/* Get the right ring for the given URB.
* If the endpoint supports streams, boundary check the URB's stream ID.
* If the endpoint doesn't support streams, return the singular endpoint ring.
*/
static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
struct urb *urb)
{
unsigned int slot_id;
unsigned int ep_index;
unsigned int stream_id;
struct xhci_virt_ep *ep;
slot_id = urb->dev->slot_id;
ep_index = xhci_get_endpoint_index(&urb->ep->desc);
stream_id = urb->stream_id;
ep = &xhci->devs[slot_id]->eps[ep_index];
/* Common case: no streams */
if (!(ep->ep_state & EP_HAS_STREAMS))
return ep->ring;
if (stream_id == 0) {
xhci_warn(xhci,
"WARN: Slot ID %u, ep index %u has streams, "
"but URB has no stream ID.\n",
slot_id, ep_index);
return NULL;
}
if (stream_id < ep->stream_info->num_streams)
return ep->stream_info->stream_rings[stream_id];
xhci_warn(xhci,
"WARN: Slot ID %u, ep index %u has "
"stream IDs 1 to %u allocated, "
"but stream ID %u is requested.\n",
slot_id, ep_index,
ep->stream_info->num_streams - 1,
stream_id);
return NULL;
}
/*
* Remove the URB's TD from the endpoint ring. This may cause the HC to stop
* USB transfers, potentially stopping in the middle of a TRB buffer. The HC
* should pick up where it left off in the TD, unless a Set Transfer Ring
* Dequeue Pointer is issued.
*
* The TRBs that make up the buffers for the canceled URB will be "removed" from
* the ring. Since the ring is a contiguous structure, they can't be physically
* removed. Instead, there are two options:
*
* 1) If the HC is in the middle of processing the URB to be canceled, we
* simply move the ring's dequeue pointer past those TRBs using the Set
* Transfer Ring Dequeue Pointer command. This will be the common case,
* when drivers timeout on the last submitted URB and attempt to cancel.
*
* 2) If the HC is in the middle of a different TD, we turn the TRBs into a
* series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
* HC will need to invalidate the any TRBs it has cached after the stop
* endpoint command, as noted in the xHCI 0.95 errata.
*
* 3) The TD may have completed by the time the Stop Endpoint Command
* completes, so software needs to handle that case too.
*
* This function should protect against the TD enqueueing code ringing the
* doorbell while this code is waiting for a Stop Endpoint command to complete.
* It also needs to account for multiple cancellations on happening at the same
* time for the same endpoint.
*
* Note that this function can be called in any context, or so says
* usb_hcd_unlink_urb()
*/
int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
unsigned long flags;
int ret, i;
u32 temp;
struct xhci_hcd *xhci;
struct urb_priv *urb_priv;
struct xhci_td *td;
unsigned int ep_index;
struct xhci_ring *ep_ring;
struct xhci_virt_ep *ep;
xhci = hcd_to_xhci(hcd);
spin_lock_irqsave(&xhci->lock, flags);
/* Make sure the URB hasn't completed or been unlinked already */
ret = usb_hcd_check_unlink_urb(hcd, urb, status);
if (ret || !urb->hcpriv)
goto done;
temp = xhci_readl(xhci, &xhci->op_regs->status);
if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
xhci_dbg(xhci, "HW died, freeing TD.\n");
urb_priv = urb->hcpriv;
for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
td = urb_priv->td[i];
if (!list_empty(&td->td_list))
list_del_init(&td->td_list);
if (!list_empty(&td->cancelled_td_list))
list_del_init(&td->cancelled_td_list);
}
usb_hcd_unlink_urb_from_ep(hcd, urb);
spin_unlock_irqrestore(&xhci->lock, flags);
usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
xhci_urb_free_priv(xhci, urb_priv);
return ret;
}
if ((xhci->xhc_state & XHCI_STATE_DYING) ||
(xhci->xhc_state & XHCI_STATE_HALTED)) {
xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on "
"non-responsive xHCI host.\n",
urb->ep->desc.bEndpointAddress, urb);
/* Let the stop endpoint command watchdog timer (which set this
* state) finish cleaning up the endpoint TD lists. We must
* have caught it in the middle of dropping a lock and giving
* back an URB.
*/
goto done;
}
ep_index = xhci_get_endpoint_index(&urb->ep->desc);
ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
if (!ep_ring) {
ret = -EINVAL;
goto done;
}
urb_priv = urb->hcpriv;
i = urb_priv->td_cnt;
if (i < urb_priv->length)
xhci_dbg(xhci, "Cancel URB %p, dev %s, ep 0x%x, "
"starting at offset 0x%llx\n",
urb, urb->dev->devpath,
urb->ep->desc.bEndpointAddress,
(unsigned long long) xhci_trb_virt_to_dma(
urb_priv->td[i]->start_seg,
urb_priv->td[i]->first_trb));
for (; i < urb_priv->length; i++) {
td = urb_priv->td[i];
list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
}
/* Queue a stop endpoint command, but only if this is
* the first cancellation to be handled.
*/
if (!(ep->ep_state & EP_HALT_PENDING)) {
ep->ep_state |= EP_HALT_PENDING;
ep->stop_cmds_pending++;
ep->stop_cmd_timer.expires = jiffies +
XHCI_STOP_EP_CMD_TIMEOUT * HZ;
add_timer(&ep->stop_cmd_timer);
xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index, 0);
xhci_ring_cmd_db(xhci);
}
done:
spin_unlock_irqrestore(&xhci->lock, flags);
return ret;
}
/* Drop an endpoint from a new bandwidth configuration for this device.
* Only one call to this function is allowed per endpoint before
* check_bandwidth() or reset_bandwidth() must be called.
* A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
* add the endpoint to the schedule with possibly new parameters denoted by a
* different endpoint descriptor in usb_host_endpoint.
* A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
* not allowed.
*
* The USB core will not allow URBs to be queued to an endpoint that is being
* disabled, so there's no need for mutual exclusion to protect
* the xhci->devs[slot_id] structure.
*/
int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint *ep)
{
struct xhci_hcd *xhci;
struct xhci_container_ctx *in_ctx, *out_ctx;
struct xhci_input_control_ctx *ctrl_ctx;
struct xhci_slot_ctx *slot_ctx;
unsigned int last_ctx;
unsigned int ep_index;
struct xhci_ep_ctx *ep_ctx;
u32 drop_flag;
u32 new_add_flags, new_drop_flags, new_slot_info;
int ret;
ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
if (ret <= 0)
return ret;
xhci = hcd_to_xhci(hcd);
if (xhci->xhc_state & XHCI_STATE_DYING)
return -ENODEV;
xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
drop_flag = xhci_get_endpoint_flag(&ep->desc);
if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
__func__, drop_flag);
return 0;
}
in_ctx = xhci->devs[udev->slot_id]->in_ctx;
out_ctx = xhci->devs[udev->slot_id]->out_ctx;
ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
ep_index = xhci_get_endpoint_index(&ep->desc);
ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
/* If the HC already knows the endpoint is disabled,
* or the HCD has noted it is disabled, ignore this request
*/
if (((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
cpu_to_le32(EP_STATE_DISABLED)) ||
le32_to_cpu(ctrl_ctx->drop_flags) &
xhci_get_endpoint_flag(&ep->desc)) {
xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
__func__, ep);
return 0;
}
ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
last_ctx = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags));
slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
/* Update the last valid endpoint context, if we deleted the last one */
if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) >
LAST_CTX(last_ctx)) {
slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
}
new_slot_info = le32_to_cpu(slot_ctx->dev_info);
xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
(unsigned int) ep->desc.bEndpointAddress,
udev->slot_id,
(unsigned int) new_drop_flags,
(unsigned int) new_add_flags,
(unsigned int) new_slot_info);
return 0;
}
/* Add an endpoint to a new possible bandwidth configuration for this device.
* Only one call to this function is allowed per endpoint before
* check_bandwidth() or reset_bandwidth() must be called.
* A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
* add the endpoint to the schedule with possibly new parameters denoted by a
* different endpoint descriptor in usb_host_endpoint.
* A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
* not allowed.
*
* The USB core will not allow URBs to be queued to an endpoint until the
* configuration or alt setting is installed in the device, so there's no need
* for mutual exclusion to protect the xhci->devs[slot_id] structure.
*/
int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint *ep)
{
struct xhci_hcd *xhci;
struct xhci_container_ctx *in_ctx, *out_ctx;
unsigned int ep_index;
struct xhci_slot_ctx *slot_ctx;
struct xhci_input_control_ctx *ctrl_ctx;
u32 added_ctxs;
unsigned int last_ctx;
u32 new_add_flags, new_drop_flags, new_slot_info;
struct xhci_virt_device *virt_dev;
int ret = 0;
ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
if (ret <= 0) {
/* So we won't queue a reset ep command for a root hub */
ep->hcpriv = NULL;
return ret;
}
xhci = hcd_to_xhci(hcd);
if (xhci->xhc_state & XHCI_STATE_DYING)
return -ENODEV;
added_ctxs = xhci_get_endpoint_flag(&ep->desc);
last_ctx = xhci_last_valid_endpoint(added_ctxs);
if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
/* FIXME when we have to issue an evaluate endpoint command to
* deal with ep0 max packet size changing once we get the
* descriptors
*/
xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
__func__, added_ctxs);
return 0;
}
virt_dev = xhci->devs[udev->slot_id];
in_ctx = virt_dev->in_ctx;
out_ctx = virt_dev->out_ctx;
ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
ep_index = xhci_get_endpoint_index(&ep->desc);
/* If this endpoint is already in use, and the upper layers are trying
* to add it again without dropping it, reject the addition.
*/
if (virt_dev->eps[ep_index].ring &&
!(le32_to_cpu(ctrl_ctx->drop_flags) &
xhci_get_endpoint_flag(&ep->desc))) {
xhci_warn(xhci, "Trying to add endpoint 0x%x "
"without dropping it.\n",
(unsigned int) ep->desc.bEndpointAddress);
return -EINVAL;
}
/* If the HCD has already noted the endpoint is enabled,
* ignore this request.
*/
if (le32_to_cpu(ctrl_ctx->add_flags) &
xhci_get_endpoint_flag(&ep->desc)) {
xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
__func__, ep);
return 0;
}
/*
* Configuration and alternate setting changes must be done in
* process context, not interrupt context (or so documenation
* for usb_set_interface() and usb_set_configuration() claim).
*/
if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
__func__, ep->desc.bEndpointAddress);
return -ENOMEM;
}
ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
/* If xhci_endpoint_disable() was called for this endpoint, but the
* xHC hasn't been notified yet through the check_bandwidth() call,
* this re-adds a new state for the endpoint from the new endpoint
* descriptors. We must drop and re-add this endpoint, so we leave the
* drop flags alone.
*/
new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
/* Update the last valid endpoint context, if we just added one past */
if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) <
LAST_CTX(last_ctx)) {
slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
}
new_slot_info = le32_to_cpu(slot_ctx->dev_info);
/* Store the usb_device pointer for later use */
ep->hcpriv = udev;
xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
(unsigned int) ep->desc.bEndpointAddress,
udev->slot_id,
(unsigned int) new_drop_flags,
(unsigned int) new_add_flags,
(unsigned int) new_slot_info);
return 0;
}
static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
{
struct xhci_input_control_ctx *ctrl_ctx;
struct xhci_ep_ctx *ep_ctx;
struct xhci_slot_ctx *slot_ctx;
int i;
/* When a device's add flag and drop flag are zero, any subsequent
* configure endpoint command will leave that endpoint's state
* untouched. Make sure we don't leave any old state in the input
* endpoint contexts.
*/
ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
ctrl_ctx->drop_flags = 0;
ctrl_ctx->add_flags = 0;
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
/* Endpoint 0 is always valid */
slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
for (i = 1; i < 31; ++i) {
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
ep_ctx->ep_info = 0;
ep_ctx->ep_info2 = 0;
ep_ctx->deq = 0;
ep_ctx->tx_info = 0;
}
}
static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
struct usb_device *udev, u32 *cmd_status)
{
int ret;
switch (*cmd_status) {
case COMP_ENOMEM:
dev_warn(&udev->dev, "Not enough host controller resources "
"for new device state.\n");
ret = -ENOMEM;
/* FIXME: can we allocate more resources for the HC? */
break;
case COMP_BW_ERR:
case COMP_2ND_BW_ERR:
dev_warn(&udev->dev, "Not enough bandwidth "
"for new device state.\n");
ret = -ENOSPC;
/* FIXME: can we go back to the old state? */
break;
case COMP_TRB_ERR:
/* the HCD set up something wrong */
dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
"add flag = 1, "
"and endpoint is not disabled.\n");
ret = -EINVAL;
break;
case COMP_DEV_ERR:
dev_warn(&udev->dev, "ERROR: Incompatible device for endpoint "
"configure command.\n");
ret = -ENODEV;
break;
case COMP_SUCCESS:
dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
ret = 0;
break;
default:
xhci_err(xhci, "ERROR: unexpected command completion "
"code 0x%x.\n", *cmd_status);
ret = -EINVAL;
break;
}
return ret;
}
static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
struct usb_device *udev, u32 *cmd_status)
{
int ret;
struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
switch (*cmd_status) {
case COMP_EINVAL:
dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate "
"context command.\n");
ret = -EINVAL;
break;
case COMP_EBADSLT:
dev_warn(&udev->dev, "WARN: slot not enabled for"
"evaluate context command.\n");
ret = -EINVAL;
break;
case COMP_CTX_STATE:
dev_warn(&udev->dev, "WARN: invalid context state for "
"evaluate context command.\n");
xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1);
ret = -EINVAL;
break;
case COMP_DEV_ERR:
dev_warn(&udev->dev, "ERROR: Incompatible device for evaluate "
"context command.\n");
ret = -ENODEV;
break;
case COMP_MEL_ERR:
/* Max Exit Latency too large error */
dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
ret = -EINVAL;
break;
case COMP_SUCCESS:
dev_dbg(&udev->dev, "Successful evaluate context command\n");
ret = 0;
break;
default:
xhci_err(xhci, "ERROR: unexpected command completion "
"code 0x%x.\n", *cmd_status);
ret = -EINVAL;
break;
}
return ret;
}
static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
struct xhci_container_ctx *in_ctx)
{
struct xhci_input_control_ctx *ctrl_ctx;
u32 valid_add_flags;
u32 valid_drop_flags;
ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
/* Ignore the slot flag (bit 0), and the default control endpoint flag
* (bit 1). The default control endpoint is added during the Address
* Device command and is never removed until the slot is disabled.
*/
valid_add_flags = ctrl_ctx->add_flags >> 2;
valid_drop_flags = ctrl_ctx->drop_flags >> 2;
/* Use hweight32 to count the number of ones in the add flags, or
* number of endpoints added. Don't count endpoints that are changed
* (both added and dropped).
*/
return hweight32(valid_add_flags) -
hweight32(valid_add_flags & valid_drop_flags);
}
static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
struct xhci_container_ctx *in_ctx)
{
struct xhci_input_control_ctx *ctrl_ctx;
u32 valid_add_flags;
u32 valid_drop_flags;
ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
valid_add_flags = ctrl_ctx->add_flags >> 2;
valid_drop_flags = ctrl_ctx->drop_flags >> 2;
return hweight32(valid_drop_flags) -
hweight32(valid_add_flags & valid_drop_flags);
}
/*
* We need to reserve the new number of endpoints before the configure endpoint
* command completes. We can't subtract the dropped endpoints from the number
* of active endpoints until the command completes because we can oversubscribe
* the host in this case:
*
* - the first configure endpoint command drops more endpoints than it adds
* - a second configure endpoint command that adds more endpoints is queued
* - the first configure endpoint command fails, so the config is unchanged
* - the second command may succeed, even though there isn't enough resources
*
* Must be called with xhci->lock held.
*/
static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
struct xhci_container_ctx *in_ctx)
{
u32 added_eps;
added_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
xhci_dbg(xhci, "Not enough ep ctxs: "
"%u active, need to add %u, limit is %u.\n",
xhci->num_active_eps, added_eps,
xhci->limit_active_eps);
return -ENOMEM;
}
xhci->num_active_eps += added_eps;
xhci_dbg(xhci, "Adding %u ep ctxs, %u now active.\n", added_eps,
xhci->num_active_eps);
return 0;
}
/*
* The configure endpoint was failed by the xHC for some other reason, so we
* need to revert the resources that failed configuration would have used.
*
* Must be called with xhci->lock held.
*/
static void xhci_free_host_resources(struct xhci_hcd *xhci,
struct xhci_container_ctx *in_ctx)
{
u32 num_failed_eps;
num_failed_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
xhci->num_active_eps -= num_failed_eps;
xhci_dbg(xhci, "Removing %u failed ep ctxs, %u now active.\n",
num_failed_eps,
xhci->num_active_eps);
}
/*
* Now that the command has completed, clean up the active endpoint count by
* subtracting out the endpoints that were dropped (but not changed).
*
* Must be called with xhci->lock held.
*/
static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
struct xhci_container_ctx *in_ctx)
{
u32 num_dropped_eps;
num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, in_ctx);
xhci->num_active_eps -= num_dropped_eps;
if (num_dropped_eps)
xhci_dbg(xhci, "Removing %u dropped ep ctxs, %u now active.\n",
num_dropped_eps,
xhci->num_active_eps);
}
static unsigned int xhci_get_block_size(struct usb_device *udev)
{
switch (udev->speed) {
case USB_SPEED_LOW:
case USB_SPEED_FULL:
return FS_BLOCK;
case USB_SPEED_HIGH:
return HS_BLOCK;
case USB_SPEED_SUPER:
return SS_BLOCK;
case USB_SPEED_UNKNOWN:
case USB_SPEED_WIRELESS:
default:
/* Should never happen */
return 1;
}
}
static unsigned int
xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
{
if (interval_bw->overhead[LS_OVERHEAD_TYPE])
return LS_OVERHEAD;
if (interval_bw->overhead[FS_OVERHEAD_TYPE])
return FS_OVERHEAD;
return HS_OVERHEAD;
}
/* If we are changing a LS/FS device under a HS hub,
* make sure (if we are activating a new TT) that the HS bus has enough
* bandwidth for this new TT.
*/
static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
int old_active_eps)
{
struct xhci_interval_bw_table *bw_table;
struct xhci_tt_bw_info *tt_info;
/* Find the bandwidth table for the root port this TT is attached to. */
bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
tt_info = virt_dev->tt_info;
/* If this TT already had active endpoints, the bandwidth for this TT
* has already been added. Removing all periodic endpoints (and thus
* making the TT enactive) will only decrease the bandwidth used.
*/
if (old_active_eps)
return 0;
if (old_active_eps == 0 && tt_info->active_eps != 0) {
if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
return -ENOMEM;
return 0;
}
/* Not sure why we would have no new active endpoints...
*
* Maybe because of an Evaluate Context change for a hub update or a
* control endpoint 0 max packet size change?
* FIXME: skip the bandwidth calculation in that case.
*/
return 0;
}
static int xhci_check_ss_bw(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev)
{
unsigned int bw_reserved;
bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
return -ENOMEM;
bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
return -ENOMEM;
return 0;
}
/*
* This algorithm is a very conservative estimate of the worst-case scheduling
* scenario for any one interval. The hardware dynamically schedules the
* packets, so we can't tell which microframe could be the limiting factor in
* the bandwidth scheduling. This only takes into account periodic endpoints.
*
* Obviously, we can't solve an NP complete problem to find the minimum worst
* case scenario. Instead, we come up with an estimate that is no less than
* the worst case bandwidth used for any one microframe, but may be an
* over-estimate.
*
* We walk the requirements for each endpoint by interval, starting with the
* smallest interval, and place packets in the schedule where there is only one
* possible way to schedule packets for that interval. In order to simplify
* this algorithm, we record the largest max packet size for each interval, and
* assume all packets will be that size.
*
* For interval 0, we obviously must schedule all packets for each interval.
* The bandwidth for interval 0 is just the amount of data to be transmitted
* (the sum of all max ESIT payload sizes, plus any overhead per packet times
* the number of packets).
*
* For interval 1, we have two possible microframes to schedule those packets
* in. For this algorithm, if we can schedule the same number of packets for
* each possible scheduling opportunity (each microframe), we will do so. The
* remaining number of packets will be saved to be transmitted in the gaps in
* the next interval's scheduling sequence.
*
* As we move those remaining packets to be scheduled with interval 2 packets,
* we have to double the number of remaining packets to transmit. This is
* because the intervals are actually powers of 2, and we would be transmitting
* the previous interval's packets twice in this interval. We also have to be
* sure that when we look at the largest max packet size for this interval, we
* also look at the largest max packet size for the remaining packets and take
* the greater of the two.
*
* The algorithm continues to evenly distribute packets in each scheduling
* opportunity, and push the remaining packets out, until we get to the last
* interval. Then those packets and their associated overhead are just added
* to the bandwidth used.
*/
static int xhci_check_bw_table(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
int old_active_eps)
{
unsigned int bw_reserved;
unsigned int max_bandwidth;
unsigned int bw_used;
unsigned int block_size;
struct xhci_interval_bw_table *bw_table;
unsigned int packet_size = 0;
unsigned int overhead = 0;
unsigned int packets_transmitted = 0;
unsigned int packets_remaining = 0;
unsigned int i;
if (virt_dev->udev->speed == USB_SPEED_SUPER)
return xhci_check_ss_bw(xhci, virt_dev);
if (virt_dev->udev->speed == USB_SPEED_HIGH) {
max_bandwidth = HS_BW_LIMIT;
/* Convert percent of bus BW reserved to blocks reserved */
bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
} else {
max_bandwidth = FS_BW_LIMIT;
bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
}
bw_table = virt_dev->bw_table;
/* We need to translate the max packet size and max ESIT payloads into
* the units the hardware uses.
*/
block_size = xhci_get_block_size(virt_dev->udev);
/* If we are manipulating a LS/FS device under a HS hub, double check
* that the HS bus has enough bandwidth if we are activing a new TT.
*/
if (virt_dev->tt_info) {
xhci_dbg(xhci, "Recalculating BW for rootport %u\n",
virt_dev->real_port);
if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
xhci_warn(xhci, "Not enough bandwidth on HS bus for "
"newly activated TT.\n");
return -ENOMEM;
}
xhci_dbg(xhci, "Recalculating BW for TT slot %u port %u\n",
virt_dev->tt_info->slot_id,
virt_dev->tt_info->ttport);
} else {
xhci_dbg(xhci, "Recalculating BW for rootport %u\n",
virt_dev->real_port);
}
/* Add in how much bandwidth will be used for interval zero, or the
* rounded max ESIT payload + number of packets * largest overhead.
*/
bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
bw_table->interval_bw[0].num_packets *
xhci_get_largest_overhead(&bw_table->interval_bw[0]);
for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
unsigned int bw_added;
unsigned int largest_mps;
unsigned int interval_overhead;
/*
* How many packets could we transmit in this interval?
* If packets didn't fit in the previous interval, we will need
* to transmit that many packets twice within this interval.
*/
packets_remaining = 2 * packets_remaining +
bw_table->interval_bw[i].num_packets;
/* Find the largest max packet size of this or the previous
* interval.
*/
if (list_empty(&bw_table->interval_bw[i].endpoints))
largest_mps = 0;
else {
struct xhci_virt_ep *virt_ep;
struct list_head *ep_entry;
ep_entry = bw_table->interval_bw[i].endpoints.next;
virt_ep = list_entry(ep_entry,
struct xhci_virt_ep, bw_endpoint_list);
/* Convert to blocks, rounding up */
largest_mps = DIV_ROUND_UP(
virt_ep->bw_info.max_packet_size,
block_size);
}
if (largest_mps > packet_size)
packet_size = largest_mps;
/* Use the larger overhead of this or the previous interval. */
interval_overhead = xhci_get_largest_overhead(
&bw_table->interval_bw[i]);
if (interval_overhead > overhead)
overhead = interval_overhead;
/* How many packets can we evenly distribute across
* (1 << (i + 1)) possible scheduling opportunities?
*/
packets_transmitted = packets_remaining >> (i + 1);
/* Add in the bandwidth used for those scheduled packets */
bw_added = packets_transmitted * (overhead + packet_size);
/* How many packets do we have remaining to transmit? */
packets_remaining = packets_remaining % (1 << (i + 1));
/* What largest max packet size should those packets have? */
/* If we've transmitted all packets, don't carry over the
* largest packet size.
*/
if (packets_remaining == 0) {
packet_size = 0;
overhead = 0;
} else if (packets_transmitted > 0) {
/* Otherwise if we do have remaining packets, and we've
* scheduled some packets in this interval, take the
* largest max packet size from endpoints with this
* interval.
*/
packet_size = largest_mps;
overhead = interval_overhead;
}
/* Otherwise carry over packet_size and overhead from the last
* time we had a remainder.
*/
bw_used += bw_added;
if (bw_used > max_bandwidth) {
xhci_warn(xhci, "Not enough bandwidth. "
"Proposed: %u, Max: %u\n",
bw_used, max_bandwidth);
return -ENOMEM;
}
}
/*
* Ok, we know we have some packets left over after even-handedly
* scheduling interval 15. We don't know which microframes they will
* fit into, so we over-schedule and say they will be scheduled every
* microframe.
*/
if (packets_remaining > 0)
bw_used += overhead + packet_size;
if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
unsigned int port_index = virt_dev->real_port - 1;
/* OK, we're manipulating a HS device attached to a
* root port bandwidth domain. Include the number of active TTs
* in the bandwidth used.
*/
bw_used += TT_HS_OVERHEAD *
xhci->rh_bw[port_index].num_active_tts;
}
xhci_dbg(xhci, "Final bandwidth: %u, Limit: %u, Reserved: %u, "
"Available: %u " "percent\n",
bw_used, max_bandwidth, bw_reserved,
(max_bandwidth - bw_used - bw_reserved) * 100 /
max_bandwidth);
bw_used += bw_reserved;
if (bw_used > max_bandwidth) {
xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
bw_used, max_bandwidth);
return -ENOMEM;
}
bw_table->bw_used = bw_used;
return 0;
}
static bool xhci_is_async_ep(unsigned int ep_type)
{
return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
ep_type != ISOC_IN_EP &&
ep_type != INT_IN_EP);
}
static bool xhci_is_sync_in_ep(unsigned int ep_type)
{
return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP);
}
static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
{
unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
if (ep_bw->ep_interval == 0)
return SS_OVERHEAD_BURST +
(ep_bw->mult * ep_bw->num_packets *
(SS_OVERHEAD + mps));
return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
(SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
1 << ep_bw->ep_interval);
}
void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
struct xhci_bw_info *ep_bw,
struct xhci_interval_bw_table *bw_table,
struct usb_device *udev,
struct xhci_virt_ep *virt_ep,
struct xhci_tt_bw_info *tt_info)
{
struct xhci_interval_bw *interval_bw;
int normalized_interval;
if (xhci_is_async_ep(ep_bw->type))
return;
if (udev->speed == USB_SPEED_SUPER) {
if (xhci_is_sync_in_ep(ep_bw->type))
xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
xhci_get_ss_bw_consumed(ep_bw);
else
xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
xhci_get_ss_bw_consumed(ep_bw);
return;
}
/* SuperSpeed endpoints never get added to intervals in the table, so
* this check is only valid for HS/FS/LS devices.
*/
if (list_empty(&virt_ep->bw_endpoint_list))
return;
/* For LS/FS devices, we need to translate the interval expressed in
* microframes to frames.
*/
if (udev->speed == USB_SPEED_HIGH)
normalized_interval = ep_bw->ep_interval;
else
normalized_interval = ep_bw->ep_interval - 3;
if (normalized_interval == 0)
bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
interval_bw = &bw_table->interval_bw[normalized_interval];
interval_bw->num_packets -= ep_bw->num_packets;
switch (udev->speed) {
case USB_SPEED_LOW:
interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
break;
case USB_SPEED_FULL:
interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
break;
case USB_SPEED_HIGH:
interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
break;
case USB_SPEED_SUPER:
case USB_SPEED_UNKNOWN:
case USB_SPEED_WIRELESS:
/* Should never happen because only LS/FS/HS endpoints will get
* added to the endpoint list.
*/
return;
}
if (tt_info)
tt_info->active_eps -= 1;
list_del_init(&virt_ep->bw_endpoint_list);
}
static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
struct xhci_bw_info *ep_bw,
struct xhci_interval_bw_table *bw_table,
struct usb_device *udev,
struct xhci_virt_ep *virt_ep,
struct xhci_tt_bw_info *tt_info)
{
struct xhci_interval_bw *interval_bw;
struct xhci_virt_ep *smaller_ep;
int normalized_interval;
if (xhci_is_async_ep(ep_bw->type))
return;
if (udev->speed == USB_SPEED_SUPER) {
if (xhci_is_sync_in_ep(ep_bw->type))
xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
xhci_get_ss_bw_consumed(ep_bw);
else
xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
xhci_get_ss_bw_consumed(ep_bw);
return;
}
/* For LS/FS devices, we need to translate the interval expressed in
* microframes to frames.
*/
if (udev->speed == USB_SPEED_HIGH)
normalized_interval = ep_bw->ep_interval;
else
normalized_interval = ep_bw->ep_interval - 3;
if (normalized_interval == 0)
bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
interval_bw = &bw_table->interval_bw[normalized_interval];
interval_bw->num_packets += ep_bw->num_packets;
switch (udev->speed) {
case USB_SPEED_LOW:
interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
break;
case USB_SPEED_FULL:
interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
break;
case USB_SPEED_HIGH:
interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
break;
case USB_SPEED_SUPER:
case USB_SPEED_UNKNOWN:
case USB_SPEED_WIRELESS:
/* Should never happen because only LS/FS/HS endpoints will get
* added to the endpoint list.
*/
return;
}
if (tt_info)
tt_info->active_eps += 1;
/* Insert the endpoint into the list, largest max packet size first. */
list_for_each_entry(smaller_ep, &interval_bw->endpoints,
bw_endpoint_list) {
if (ep_bw->max_packet_size >=
smaller_ep->bw_info.max_packet_size) {
/* Add the new ep before the smaller endpoint */
list_add_tail(&virt_ep->bw_endpoint_list,
&smaller_ep->bw_endpoint_list);
return;
}
}
/* Add the new endpoint at the end of the list. */
list_add_tail(&virt_ep->bw_endpoint_list,
&interval_bw->endpoints);
}
void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
int old_active_eps)
{
struct xhci_root_port_bw_info *rh_bw_info;
if (!virt_dev->tt_info)
return;
rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
if (old_active_eps == 0 &&
virt_dev->tt_info->active_eps != 0) {
rh_bw_info->num_active_tts += 1;
rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
} else if (old_active_eps != 0 &&
virt_dev->tt_info->active_eps == 0) {
rh_bw_info->num_active_tts -= 1;
rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
}
}
static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
struct xhci_container_ctx *in_ctx)
{
struct xhci_bw_info ep_bw_info[31];
int i;
struct xhci_input_control_ctx *ctrl_ctx;
int old_active_eps = 0;
if (virt_dev->tt_info)
old_active_eps = virt_dev->tt_info->active_eps;
ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
for (i = 0; i < 31; i++) {
if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
continue;
/* Make a copy of the BW info in case we need to revert this */
memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
sizeof(ep_bw_info[i]));
/* Drop the endpoint from the interval table if the endpoint is
* being dropped or changed.
*/
if (EP_IS_DROPPED(ctrl_ctx, i))
xhci_drop_ep_from_interval_table(xhci,
&virt_dev->eps[i].bw_info,
virt_dev->bw_table,
virt_dev->udev,
&virt_dev->eps[i],
virt_dev->tt_info);
}
/* Overwrite the information stored in the endpoints' bw_info */
xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
for (i = 0; i < 31; i++) {
/* Add any changed or added endpoints to the interval table */
if (EP_IS_ADDED(ctrl_ctx, i))
xhci_add_ep_to_interval_table(xhci,
&virt_dev->eps[i].bw_info,
virt_dev->bw_table,
virt_dev->udev,
&virt_dev->eps[i],
virt_dev->tt_info);
}
if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
/* Ok, this fits in the bandwidth we have.
* Update the number of active TTs.
*/
xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
return 0;
}
/* We don't have enough bandwidth for this, revert the stored info. */
for (i = 0; i < 31; i++) {
if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
continue;
/* Drop the new copies of any added or changed endpoints from
* the interval table.
*/
if (EP_IS_ADDED(ctrl_ctx, i)) {
xhci_drop_ep_from_interval_table(xhci,
&virt_dev->eps[i].bw_info,
virt_dev->bw_table,
virt_dev->udev,
&virt_dev->eps[i],
virt_dev->tt_info);
}
/* Revert the endpoint back to its old information */
memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
sizeof(ep_bw_info[i]));
/* Add any changed or dropped endpoints back into the table */
if (EP_IS_DROPPED(ctrl_ctx, i))
xhci_add_ep_to_interval_table(xhci,
&virt_dev->eps[i].bw_info,
virt_dev->bw_table,
virt_dev->udev,
&virt_dev->eps[i],
virt_dev->tt_info);
}
return -ENOMEM;
}
/* Issue a configure endpoint command or evaluate context command
* and wait for it to finish.
*/
static int xhci_configure_endpoint(struct xhci_hcd *xhci,
struct usb_device *udev,
struct xhci_command *command,
bool ctx_change, bool must_succeed)
{
int ret;
int timeleft;
unsigned long flags;
struct xhci_container_ctx *in_ctx;
struct completion *cmd_completion;
u32 *cmd_status;
struct xhci_virt_device *virt_dev;
union xhci_trb *cmd_trb;
spin_lock_irqsave(&xhci->lock, flags);
virt_dev = xhci->devs[udev->slot_id];
if (command)
in_ctx = command->in_ctx;
else
in_ctx = virt_dev->in_ctx;
if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
xhci_reserve_host_resources(xhci, in_ctx)) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_warn(xhci, "Not enough host resources, "
"active endpoint contexts = %u\n",
xhci->num_active_eps);
return -ENOMEM;
}
if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
xhci_reserve_bandwidth(xhci, virt_dev, in_ctx)) {
if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
xhci_free_host_resources(xhci, in_ctx);
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_warn(xhci, "Not enough bandwidth\n");
return -ENOMEM;
}
if (command) {
cmd_completion = command->completion;
cmd_status = &command->status;
command->command_trb = xhci_find_next_enqueue(xhci->cmd_ring);
list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
} else {
cmd_completion = &virt_dev->cmd_completion;
cmd_status = &virt_dev->cmd_status;
}
init_completion(cmd_completion);
cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring);
if (!ctx_change)
ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
udev->slot_id, must_succeed);
else
ret = xhci_queue_evaluate_context(xhci, in_ctx->dma,
udev->slot_id, must_succeed);
if (ret < 0) {
if (command)
list_del(&command->cmd_list);
if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
xhci_free_host_resources(xhci, in_ctx);
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
return -ENOMEM;
}
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
/* Wait for the configure endpoint command to complete */
timeleft = wait_for_completion_interruptible_timeout(
cmd_completion,
XHCI_CMD_DEFAULT_TIMEOUT);
if (timeleft <= 0) {
xhci_warn(xhci, "%s while waiting for %s command\n",
timeleft == 0 ? "Timeout" : "Signal",
ctx_change == 0 ?
"configure endpoint" :
"evaluate context");
/* cancel the configure endpoint command */
ret = xhci_cancel_cmd(xhci, command, cmd_trb);
if (ret < 0)
return ret;
return -ETIME;
}
if (!ctx_change)
ret = xhci_configure_endpoint_result(xhci, udev, cmd_status);
else
ret = xhci_evaluate_context_result(xhci, udev, cmd_status);
if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
spin_lock_irqsave(&xhci->lock, flags);
/* If the command failed, remove the reserved resources.
* Otherwise, clean up the estimate to include dropped eps.
*/
if (ret)
xhci_free_host_resources(xhci, in_ctx);
else
xhci_finish_resource_reservation(xhci, in_ctx);
spin_unlock_irqrestore(&xhci->lock, flags);
}
return ret;
}
/* Called after one or more calls to xhci_add_endpoint() or
* xhci_drop_endpoint(). If this call fails, the USB core is expected
* to call xhci_reset_bandwidth().
*
* Since we are in the middle of changing either configuration or
* installing a new alt setting, the USB core won't allow URBs to be
* enqueued for any endpoint on the old config or interface. Nothing
* else should be touching the xhci->devs[slot_id] structure, so we
* don't need to take the xhci->lock for manipulating that.
*/
int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
{
int i;
int ret = 0;
struct xhci_hcd *xhci;
struct xhci_virt_device *virt_dev;
struct xhci_input_control_ctx *ctrl_ctx;
struct xhci_slot_ctx *slot_ctx;
ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
if (ret <= 0)
return ret;
xhci = hcd_to_xhci(hcd);
if (xhci->xhc_state & XHCI_STATE_DYING)
return -ENODEV;
xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
virt_dev = xhci->devs[udev->slot_id];
/* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
/* Don't issue the command if there's no endpoints to update. */
if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
ctrl_ctx->drop_flags == 0)
return 0;
xhci_dbg(xhci, "New Input Control Context:\n");
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
xhci_dbg_ctx(xhci, virt_dev->in_ctx,
LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
ret = xhci_configure_endpoint(xhci, udev, NULL,
false, false);
if (ret) {
/* Callee should call reset_bandwidth() */
return ret;
}
xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
xhci_dbg_ctx(xhci, virt_dev->out_ctx,
LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
/* Free any rings that were dropped, but not changed. */
for (i = 1; i < 31; ++i) {
if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
!(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1))))
xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
}
xhci_zero_in_ctx(xhci, virt_dev);
/*
* Install any rings for completely new endpoints or changed endpoints,
* and free or cache any old rings from changed endpoints.
*/
for (i = 1; i < 31; ++i) {
if (!virt_dev->eps[i].new_ring)
continue;
/* Only cache or free the old ring if it exists.
* It may not if this is the first add of an endpoint.
*/
if (virt_dev->eps[i].ring) {
xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
}
virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
virt_dev->eps[i].new_ring = NULL;
}
return ret;
}
void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
{
struct xhci_hcd *xhci;
struct xhci_virt_device *virt_dev;
int i, ret;
ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
if (ret <= 0)
return;
xhci = hcd_to_xhci(hcd);
xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
virt_dev = xhci->devs[udev->slot_id];
/* Free any rings allocated for added endpoints */
for (i = 0; i < 31; ++i) {
if (virt_dev->eps[i].new_ring) {
xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
virt_dev->eps[i].new_ring = NULL;
}
}
xhci_zero_in_ctx(xhci, virt_dev);
}
static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
struct xhci_container_ctx *in_ctx,
struct xhci_container_ctx *out_ctx,
u32 add_flags, u32 drop_flags)
{
struct xhci_input_control_ctx *ctrl_ctx;
ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
ctrl_ctx->add_flags = cpu_to_le32(add_flags);
ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
xhci_slot_copy(xhci, in_ctx, out_ctx);
ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
xhci_dbg(xhci, "Input Context:\n");
xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
}
static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
struct xhci_dequeue_state *deq_state)
{
struct xhci_container_ctx *in_ctx;
struct xhci_ep_ctx *ep_ctx;
u32 added_ctxs;
dma_addr_t addr;
xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
xhci->devs[slot_id]->out_ctx, ep_index);
in_ctx = xhci->devs[slot_id]->in_ctx;
ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
deq_state->new_deq_ptr);
if (addr == 0) {
xhci_warn(xhci, "WARN Cannot submit config ep after "
"reset ep command\n");
xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
deq_state->new_deq_seg,
deq_state->new_deq_ptr);
return;
}
ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs);
}
void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
struct usb_device *udev, unsigned int ep_index)
{
struct xhci_dequeue_state deq_state;
struct xhci_virt_ep *ep;
xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n");
ep = &xhci->devs[udev->slot_id]->eps[ep_index];
/* We need to move the HW's dequeue pointer past this TD,
* or it will attempt to resend it on the next doorbell ring.
*/
xhci_find_new_dequeue_state(xhci, udev->slot_id,
ep_index, ep->stopped_stream, ep->stopped_td,
&deq_state);
/* HW with the reset endpoint quirk will use the saved dequeue state to
* issue a configure endpoint command later.
*/
if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
xhci_dbg(xhci, "Queueing new dequeue state\n");
xhci_queue_new_dequeue_state(xhci, udev->slot_id,
ep_index, ep->stopped_stream, &deq_state);
} else {
/* Better hope no one uses the input context between now and the
* reset endpoint completion!
* XXX: No idea how this hardware will react when stream rings
* are enabled.
*/
xhci_dbg(xhci, "Setting up input context for "
"configure endpoint command\n");
xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
ep_index, &deq_state);
}
}
/* Deal with stalled endpoints. The core should have sent the control message
* to clear the halt condition. However, we need to make the xHCI hardware
* reset its sequence number, since a device will expect a sequence number of
* zero after the halt condition is cleared.
* Context: in_interrupt
*/
void xhci_endpoint_reset(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
struct xhci_hcd *xhci;
struct usb_device *udev;
unsigned int ep_index;
unsigned long flags;
int ret;
struct xhci_virt_ep *virt_ep;
xhci = hcd_to_xhci(hcd);
udev = (struct usb_device *) ep->hcpriv;
/* Called with a root hub endpoint (or an endpoint that wasn't added
* with xhci_add_endpoint()
*/
if (!ep->hcpriv)
return;
ep_index = xhci_get_endpoint_index(&ep->desc);
virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index];
if (!virt_ep->stopped_td) {
xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n",
ep->desc.bEndpointAddress);
return;
}
if (usb_endpoint_xfer_control(&ep->desc)) {
xhci_dbg(xhci, "Control endpoint stall already handled.\n");
return;
}
xhci_dbg(xhci, "Queueing reset endpoint command\n");
spin_lock_irqsave(&xhci->lock, flags);
ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index);
/*
* Can't change the ring dequeue pointer until it's transitioned to the
* stopped state, which is only upon a successful reset endpoint
* command. Better hope that last command worked!
*/
if (!ret) {
xhci_cleanup_stalled_ring(xhci, udev, ep_index);
kfree(virt_ep->stopped_td);
xhci_ring_cmd_db(xhci);
}
virt_ep->stopped_td = NULL;
virt_ep->stopped_trb = NULL;
virt_ep->stopped_stream = 0;
spin_unlock_irqrestore(&xhci->lock, flags);
if (ret)
xhci_warn(xhci, "FIXME allocate a new ring segment\n");
}
static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
struct usb_device *udev, struct usb_host_endpoint *ep,
unsigned int slot_id)
{
int ret;
unsigned int ep_index;
unsigned int ep_state;
if (!ep)
return -EINVAL;
ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
if (ret <= 0)
return -EINVAL;
if (ep->ss_ep_comp.bmAttributes == 0) {
xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
" descriptor for ep 0x%x does not support streams\n",
ep->desc.bEndpointAddress);
return -EINVAL;
}
ep_index = xhci_get_endpoint_index(&ep->desc);
ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
if (ep_state & EP_HAS_STREAMS ||
ep_state & EP_GETTING_STREAMS) {
xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
"already has streams set up.\n",
ep->desc.bEndpointAddress);
xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
"dynamic stream context array reallocation.\n");
return -EINVAL;
}
if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
"endpoint 0x%x; URBs are pending.\n",
ep->desc.bEndpointAddress);
return -EINVAL;
}
return 0;
}
static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
unsigned int *num_streams, unsigned int *num_stream_ctxs)
{
unsigned int max_streams;
/* The stream context array size must be a power of two */
*num_stream_ctxs = roundup_pow_of_two(*num_streams);
/*
* Find out how many primary stream array entries the host controller
* supports. Later we may use secondary stream arrays (similar to 2nd
* level page entries), but that's an optional feature for xHCI host
* controllers. xHCs must support at least 4 stream IDs.
*/
max_streams = HCC_MAX_PSA(xhci->hcc_params);
if (*num_stream_ctxs > max_streams) {
xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
max_streams);
*num_stream_ctxs = max_streams;
*num_streams = max_streams;
}
}
/* Returns an error code if one of the endpoint already has streams.
* This does not change any data structures, it only checks and gathers
* information.
*/
static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
struct usb_device *udev,
struct usb_host_endpoint **eps, unsigned int num_eps,
unsigned int *num_streams, u32 *changed_ep_bitmask)
{
unsigned int max_streams;
unsigned int endpoint_flag;
int i;
int ret;
for (i = 0; i < num_eps; i++) {
ret = xhci_check_streams_endpoint(xhci, udev,
eps[i], udev->slot_id);
if (ret < 0)
return ret;
max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
if (max_streams < (*num_streams - 1)) {
xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
eps[i]->desc.bEndpointAddress,
max_streams);
*num_streams = max_streams+1;
}
endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
if (*changed_ep_bitmask & endpoint_flag)
return -EINVAL;
*changed_ep_bitmask |= endpoint_flag;
}
return 0;
}
static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
struct usb_device *udev,
struct usb_host_endpoint **eps, unsigned int num_eps)
{
u32 changed_ep_bitmask = 0;
unsigned int slot_id;
unsigned int ep_index;
unsigned int ep_state;
int i;
slot_id = udev->slot_id;
if (!xhci->devs[slot_id])
return 0;
for (i = 0; i < num_eps; i++) {
ep_index = xhci_get_endpoint_index(&eps[i]->desc);
ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
/* Are streams already being freed for the endpoint? */
if (ep_state & EP_GETTING_NO_STREAMS) {
xhci_warn(xhci, "WARN Can't disable streams for "
"endpoint 0x%x\n, "
"streams are being disabled already.",
eps[i]->desc.bEndpointAddress);
return 0;
}
/* Are there actually any streams to free? */
if (!(ep_state & EP_HAS_STREAMS) &&
!(ep_state & EP_GETTING_STREAMS)) {
xhci_warn(xhci, "WARN Can't disable streams for "
"endpoint 0x%x\n, "
"streams are already disabled!",
eps[i]->desc.bEndpointAddress);
xhci_warn(xhci, "WARN xhci_free_streams() called "
"with non-streams endpoint\n");
return 0;
}
changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
}
return changed_ep_bitmask;
}
/*
* The USB device drivers use this function (though the HCD interface in USB
* core) to prepare a set of bulk endpoints to use streams. Streams are used to
* coordinate mass storage command queueing across multiple endpoints (basically
* a stream ID == a task ID).
*
* Setting up streams involves allocating the same size stream context array
* for each endpoint and issuing a configure endpoint command for all endpoints.
*
* Don't allow the call to succeed if one endpoint only supports one stream
* (which means it doesn't support streams at all).
*
* Drivers may get less stream IDs than they asked for, if the host controller
* hardware or endpoints claim they can't support the number of requested
* stream IDs.
*/
int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint **eps, unsigned int num_eps,
unsigned int num_streams, gfp_t mem_flags)
{
int i, ret;
struct xhci_hcd *xhci;
struct xhci_virt_device *vdev;
struct xhci_command *config_cmd;
unsigned int ep_index;
unsigned int num_stream_ctxs;
unsigned long flags;
u32 changed_ep_bitmask = 0;
if (!eps)
return -EINVAL;
/* Add one to the number of streams requested to account for
* stream 0 that is reserved for xHCI usage.
*/
num_streams += 1;
xhci = hcd_to_xhci(hcd);
xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
num_streams);
config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
if (!config_cmd) {
xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
return -ENOMEM;
}
/* Check to make sure all endpoints are not already configured for
* streams. While we're at it, find the maximum number of streams that
* all the endpoints will support and check for duplicate endpoints.
*/
spin_lock_irqsave(&xhci->lock, flags);
ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
num_eps, &num_streams, &changed_ep_bitmask);
if (ret < 0) {
xhci_free_command(xhci, config_cmd);
spin_unlock_irqrestore(&xhci->lock, flags);
return ret;
}
if (num_streams <= 1) {
xhci_warn(xhci, "WARN: endpoints can't handle "
"more than one stream.\n");
xhci_free_command(xhci, config_cmd);
spin_unlock_irqrestore(&xhci->lock, flags);
return -EINVAL;
}
vdev = xhci->devs[udev->slot_id];
/* Mark each endpoint as being in transition, so
* xhci_urb_enqueue() will reject all URBs.
*/
for (i = 0; i < num_eps; i++) {
ep_index = xhci_get_endpoint_index(&eps[i]->desc);
vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
}
spin_unlock_irqrestore(&xhci->lock, flags);
/* Setup internal data structures and allocate HW data structures for
* streams (but don't install the HW structures in the input context
* until we're sure all memory allocation succeeded).
*/
xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
num_stream_ctxs, num_streams);
for (i = 0; i < num_eps; i++) {
ep_index = xhci_get_endpoint_index(&eps[i]->desc);
vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
num_stream_ctxs,
num_streams, mem_flags);
if (!vdev->eps[ep_index].stream_info)
goto cleanup;
/* Set maxPstreams in endpoint context and update deq ptr to
* point to stream context array. FIXME
*/
}
/* Set up the input context for a configure endpoint command. */
for (i = 0; i < num_eps; i++) {
struct xhci_ep_ctx *ep_ctx;
ep_index = xhci_get_endpoint_index(&eps[i]->desc);
ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
xhci_endpoint_copy(xhci, config_cmd->in_ctx,
vdev->out_ctx, ep_index);
xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
vdev->eps[ep_index].stream_info);
}
/* Tell the HW to drop its old copy of the endpoint context info
* and add the updated copy from the input context.
*/
xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
/* Issue and wait for the configure endpoint command */
ret = xhci_configure_endpoint(xhci, udev, config_cmd,
false, false);
/* xHC rejected the configure endpoint command for some reason, so we
* leave the old ring intact and free our internal streams data
* structure.
*/
if (ret < 0)
goto cleanup;
spin_lock_irqsave(&xhci->lock, flags);
for (i = 0; i < num_eps; i++) {
ep_index = xhci_get_endpoint_index(&eps[i]->desc);
vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
udev->slot_id, ep_index);
vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
}
xhci_free_command(xhci, config_cmd);
spin_unlock_irqrestore(&xhci->lock, flags);
/* Subtract 1 for stream 0, which drivers can't use */
return num_streams - 1;
cleanup:
/* If it didn't work, free the streams! */
for (i = 0; i < num_eps; i++) {
ep_index = xhci_get_endpoint_index(&eps[i]->desc);
xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
vdev->eps[ep_index].stream_info = NULL;
/* FIXME Unset maxPstreams in endpoint context and
* update deq ptr to point to normal string ring.
*/
vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
xhci_endpoint_zero(xhci, vdev, eps[i]);
}
xhci_free_command(xhci, config_cmd);
return -ENOMEM;
}
/* Transition the endpoint from using streams to being a "normal" endpoint
* without streams.
*
* Modify the endpoint context state, submit a configure endpoint command,
* and free all endpoint rings for streams if that completes successfully.
*/
int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint **eps, unsigned int num_eps,
gfp_t mem_flags)
{
int i, ret;
struct xhci_hcd *xhci;
struct xhci_virt_device *vdev;
struct xhci_command *command;
unsigned int ep_index;
unsigned long flags;
u32 changed_ep_bitmask;
xhci = hcd_to_xhci(hcd);
vdev = xhci->devs[udev->slot_id];
/* Set up a configure endpoint command to remove the streams rings */
spin_lock_irqsave(&xhci->lock, flags);
changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
udev, eps, num_eps);
if (changed_ep_bitmask == 0) {
spin_unlock_irqrestore(&xhci->lock, flags);
return -EINVAL;
}
/* Use the xhci_command structure from the first endpoint. We may have
* allocated too many, but the driver may call xhci_free_streams() for
* each endpoint it grouped into one call to xhci_alloc_streams().
*/
ep_index = xhci_get_endpoint_index(&eps[0]->desc);
command = vdev->eps[ep_index].stream_info->free_streams_command;
for (i = 0; i < num_eps; i++) {
struct xhci_ep_ctx *ep_ctx;
ep_index = xhci_get_endpoint_index(&eps[i]->desc);
ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
EP_GETTING_NO_STREAMS;
xhci_endpoint_copy(xhci, command->in_ctx,
vdev->out_ctx, ep_index);
xhci_setup_no_streams_ep_input_ctx(xhci, ep_ctx,
&vdev->eps[ep_index]);
}
xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
spin_unlock_irqrestore(&xhci->lock, flags);
/* Issue and wait for the configure endpoint command,
* which must succeed.
*/
ret = xhci_configure_endpoint(xhci, udev, command,
false, true);
/* xHC rejected the configure endpoint command for some reason, so we
* leave the streams rings intact.
*/
if (ret < 0)
return ret;
spin_lock_irqsave(&xhci->lock, flags);
for (i = 0; i < num_eps; i++) {
ep_index = xhci_get_endpoint_index(&eps[i]->desc);
xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
vdev->eps[ep_index].stream_info = NULL;
/* FIXME Unset maxPstreams in endpoint context and
* update deq ptr to point to normal string ring.
*/
vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
}
spin_unlock_irqrestore(&xhci->lock, flags);
return 0;
}
/*
* Deletes endpoint resources for endpoints that were active before a Reset
* Device command, or a Disable Slot command. The Reset Device command leaves
* the control endpoint intact, whereas the Disable Slot command deletes it.
*
* Must be called with xhci->lock held.
*/
void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev, bool drop_control_ep)
{
int i;
unsigned int num_dropped_eps = 0;
unsigned int drop_flags = 0;
for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
if (virt_dev->eps[i].ring) {
drop_flags |= 1 << i;
num_dropped_eps++;
}
}
xhci->num_active_eps -= num_dropped_eps;
if (num_dropped_eps)
xhci_dbg(xhci, "Dropped %u ep ctxs, flags = 0x%x, "
"%u now active.\n",
num_dropped_eps, drop_flags,
xhci->num_active_eps);
}
/*
* This submits a Reset Device Command, which will set the device state to 0,
* set the device address to 0, and disable all the endpoints except the default
* control endpoint. The USB core should come back and call
* xhci_address_device(), and then re-set up the configuration. If this is
* called because of a usb_reset_and_verify_device(), then the old alternate
* settings will be re-installed through the normal bandwidth allocation
* functions.
*
* Wait for the Reset Device command to finish. Remove all structures
* associated with the endpoints that were disabled. Clear the input device
* structure? Cache the rings? Reset the control endpoint 0 max packet size?
*
* If the virt_dev to be reset does not exist or does not match the udev,
* it means the device is lost, possibly due to the xHC restore error and
* re-initialization during S3/S4. In this case, call xhci_alloc_dev() to
* re-allocate the device.
*/
int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
{
int ret, i;
unsigned long flags;
struct xhci_hcd *xhci;
unsigned int slot_id;
struct xhci_virt_device *virt_dev;
struct xhci_command *reset_device_cmd;
int timeleft;
int last_freed_endpoint;
struct xhci_slot_ctx *slot_ctx;
int old_active_eps = 0;
ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
if (ret <= 0)
return ret;
xhci = hcd_to_xhci(hcd);
slot_id = udev->slot_id;
virt_dev = xhci->devs[slot_id];
if (!virt_dev) {
xhci_dbg(xhci, "The device to be reset with slot ID %u does "
"not exist. Re-allocate the device\n", slot_id);
ret = xhci_alloc_dev(hcd, udev);
if (ret == 1)
return 0;
else
return -EINVAL;
}
if (virt_dev->tt_info)
old_active_eps = virt_dev->tt_info->active_eps;
if (virt_dev->udev != udev) {
/* If the virt_dev and the udev does not match, this virt_dev
* may belong to another udev.
* Re-allocate the device.
*/
xhci_dbg(xhci, "The device to be reset with slot ID %u does "
"not match the udev. Re-allocate the device\n",
slot_id);
ret = xhci_alloc_dev(hcd, udev);
if (ret == 1)
return 0;
else
return -EINVAL;
}
/* If device is not setup, there is no point in resetting it */
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
SLOT_STATE_DISABLED)
return 0;
xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
/* Allocate the command structure that holds the struct completion.
* Assume we're in process context, since the normal device reset
* process has to wait for the device anyway. Storage devices are
* reset as part of error handling, so use GFP_NOIO instead of
* GFP_KERNEL.
*/
reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
if (!reset_device_cmd) {
xhci_dbg(xhci, "Couldn't allocate command structure.\n");
return -ENOMEM;
}
/* Attempt to submit the Reset Device command to the command ring */
spin_lock_irqsave(&xhci->lock, flags);
reset_device_cmd->command_trb = xhci_find_next_enqueue(xhci->cmd_ring);
list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
ret = xhci_queue_reset_device(xhci, slot_id);
if (ret) {
xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
list_del(&reset_device_cmd->cmd_list);
spin_unlock_irqrestore(&xhci->lock, flags);
goto command_cleanup;
}
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
/* Wait for the Reset Device command to finish */
timeleft = wait_for_completion_interruptible_timeout(
reset_device_cmd->completion,
USB_CTRL_SET_TIMEOUT);
if (timeleft <= 0) {
xhci_warn(xhci, "%s while waiting for reset device command\n",
timeleft == 0 ? "Timeout" : "Signal");
spin_lock_irqsave(&xhci->lock, flags);
/* The timeout might have raced with the event ring handler, so
* only delete from the list if the item isn't poisoned.
*/
if (reset_device_cmd->cmd_list.next != LIST_POISON1)
list_del(&reset_device_cmd->cmd_list);
spin_unlock_irqrestore(&xhci->lock, flags);
ret = -ETIME;
goto command_cleanup;
}
/* The Reset Device command can't fail, according to the 0.95/0.96 spec,
* unless we tried to reset a slot ID that wasn't enabled,
* or the device wasn't in the addressed or configured state.
*/
ret = reset_device_cmd->status;
switch (ret) {
case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */
case COMP_CTX_STATE: /* 0.96 completion code for same thing */
xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n",
slot_id,
xhci_get_slot_state(xhci, virt_dev->out_ctx));
xhci_info(xhci, "Not freeing device rings.\n");
/* Don't treat this as an error. May change my mind later. */
ret = 0;
goto command_cleanup;
case COMP_SUCCESS:
xhci_dbg(xhci, "Successful reset device command.\n");
break;
default:
if (xhci_is_vendor_info_code(xhci, ret))
break;
xhci_warn(xhci, "Unknown completion code %u for "
"reset device command.\n", ret);
ret = -EINVAL;
goto command_cleanup;
}
/* Free up host controller endpoint resources */
if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
spin_lock_irqsave(&xhci->lock, flags);
/* Don't delete the default control endpoint resources */
xhci_free_device_endpoint_resources(xhci, virt_dev, false);
spin_unlock_irqrestore(&xhci->lock, flags);
}
/* Everything but endpoint 0 is disabled, so free or cache the rings. */
last_freed_endpoint = 1;
for (i = 1; i < 31; ++i) {
struct xhci_virt_ep *ep = &virt_dev->eps[i];
if (ep->ep_state & EP_HAS_STREAMS) {
xhci_free_stream_info(xhci, ep->stream_info);
ep->stream_info = NULL;
ep->ep_state &= ~EP_HAS_STREAMS;
}
if (ep->ring) {
xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
last_freed_endpoint = i;
}
if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
xhci_drop_ep_from_interval_table(xhci,
&virt_dev->eps[i].bw_info,
virt_dev->bw_table,
udev,
&virt_dev->eps[i],
virt_dev->tt_info);
xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
}
/* If necessary, update the number of active TTs on this root port */
xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
ret = 0;
command_cleanup:
xhci_free_command(xhci, reset_device_cmd);
return ret;
}
/*
* At this point, the struct usb_device is about to go away, the device has
* disconnected, and all traffic has been stopped and the endpoints have been
* disabled. Free any HC data structures associated with that device.
*/
void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_virt_device *virt_dev;
#ifndef CONFIG_USB_DEFAULT_PERSIST
struct device *dev = hcd->self.controller;
#endif
unsigned long flags;
u32 state;
int i, ret;
#ifndef CONFIG_USB_DEFAULT_PERSIST
/*
* We called pm_runtime_get_noresume when the device was attached.
* Decrement the counter here to allow controller to runtime suspend
* if no devices remain.
*/
if (xhci->quirks & XHCI_RESET_ON_RESUME)
pm_runtime_put_noidle(dev);
#endif
ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
/* If the host is halted due to driver unload, we still need to free the
* device.
*/
if (ret <= 0 && ret != -ENODEV)
return;
virt_dev = xhci->devs[udev->slot_id];
/* Stop any wayward timer functions (which may grab the lock) */
for (i = 0; i < 31; ++i) {
virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING;
del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
}
if (udev->usb2_hw_lpm_enabled) {
xhci_set_usb2_hardware_lpm(hcd, udev, 0);
udev->usb2_hw_lpm_enabled = 0;
}
spin_lock_irqsave(&xhci->lock, flags);
/* Don't disable the slot if the host controller is dead. */
state = xhci_readl(xhci, &xhci->op_regs->status);
if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
(xhci->xhc_state & XHCI_STATE_HALTED)) {
xhci_free_virt_device(xhci, udev->slot_id);
spin_unlock_irqrestore(&xhci->lock, flags);
return;
}
if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
return;
}
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
/*
* Event command completion handler will free any data structures
* associated with the slot. XXX Can free sleep?
*/
}
/*
* Checks if we have enough host controller resources for the default control
* endpoint.
*
* Must be called with xhci->lock held.
*/
static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
{
if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
xhci_dbg(xhci, "Not enough ep ctxs: "
"%u active, need to add 1, limit is %u.\n",
xhci->num_active_eps, xhci->limit_active_eps);
return -ENOMEM;
}
xhci->num_active_eps += 1;
xhci_dbg(xhci, "Adding 1 ep ctx, %u now active.\n",
xhci->num_active_eps);
return 0;
}
/*
* Returns 0 if the xHC ran out of device slots, the Enable Slot command
* timed out, or allocating memory failed. Returns 1 on success.
*/
int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
#ifndef CONFIG_USB_DEFAULT_PERSIST
struct device *dev = hcd->self.controller;
#endif
unsigned long flags;
int timeleft;
int ret;
union xhci_trb *cmd_trb;
spin_lock_irqsave(&xhci->lock, flags);
cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring);
ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
if (ret) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
return 0;
}
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
/* XXX: how much time for xHC slot assignment? */
timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
XHCI_CMD_DEFAULT_TIMEOUT);
if (timeleft <= 0) {
xhci_warn(xhci, "%s while waiting for a slot\n",
timeleft == 0 ? "Timeout" : "Signal");
/* cancel the enable slot request */
return xhci_cancel_cmd(xhci, NULL, cmd_trb);
}
if (!xhci->slot_id) {
xhci_err(xhci, "Error while assigning device slot ID\n");
return 0;
}
if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
spin_lock_irqsave(&xhci->lock, flags);
ret = xhci_reserve_host_control_ep_resources(xhci);
if (ret) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_warn(xhci, "Not enough host resources, "
"active endpoint contexts = %u\n",
xhci->num_active_eps);
goto disable_slot;
}
spin_unlock_irqrestore(&xhci->lock, flags);
}
/* Use GFP_NOIO, since this function can be called from
* xhci_discover_or_reset_device(), which may be called as part of
* mass storage driver error handling.
*/
if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
goto disable_slot;
}
udev->slot_id = xhci->slot_id;
#ifndef CONFIG_USB_DEFAULT_PERSIST
/*
* If resetting upon resume, we can't put the controller into runtime
* suspend if there is a device attached.
*/
if (xhci->quirks & XHCI_RESET_ON_RESUME)
pm_runtime_get_noresume(dev);
#endif
/* Is this a LS or FS device under a HS hub? */
/* Hub or peripherial? */
return 1;
disable_slot:
/* Disable slot, if we can do it without mem alloc */
spin_lock_irqsave(&xhci->lock, flags);
if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
return 0;
}
/*
* Issue an Address Device command (which will issue a SetAddress request to
* the device).
* We should be protected by the usb_address0_mutex in khubd's hub_port_init, so
* we should only issue and wait on one address command at the same time.
*
* We add one to the device address issued by the hardware because the USB core
* uses address 1 for the root hubs (even though they're not really devices).
*/
int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
{
unsigned long flags;
int timeleft;
struct xhci_virt_device *virt_dev;
int ret = 0;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_slot_ctx *slot_ctx;
struct xhci_input_control_ctx *ctrl_ctx;
u64 temp_64;
union xhci_trb *cmd_trb;
if (!udev->slot_id) {
xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
return -EINVAL;
}
virt_dev = xhci->devs[udev->slot_id];
if (WARN_ON(!virt_dev)) {
/*
* In plug/unplug torture test with an NEC controller,
* a zero-dereference was observed once due to virt_dev = 0.
* Print useful debug rather than crash if it is observed again!
*/
xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
udev->slot_id);
return -EINVAL;
}
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
/*
* If this is the first Set Address since device plug-in or
* virt_device realloaction after a resume with an xHCI power loss,
* then set up the slot context.
*/
if (!slot_ctx->dev_info)
xhci_setup_addressable_virt_dev(xhci, udev);
/* Otherwise, update the control endpoint ring enqueue pointer. */
else
xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
ctrl_ctx->drop_flags = 0;
xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
spin_lock_irqsave(&xhci->lock, flags);
cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring);
ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
udev->slot_id);
if (ret) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
return ret;
}
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
/* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
XHCI_CMD_DEFAULT_TIMEOUT);
/* FIXME: From section 4.3.4: "Software shall be responsible for timing
* the SetAddress() "recovery interval" required by USB and aborting the
* command on a timeout.
*/
if (timeleft <= 0) {
xhci_warn(xhci, "%s while waiting for address device command\n",
timeleft == 0 ? "Timeout" : "Signal");
/* cancel the address device command */
ret = xhci_cancel_cmd(xhci, NULL, cmd_trb);
if (ret < 0)
return ret;
return -ETIME;
}
switch (virt_dev->cmd_status) {
case COMP_CTX_STATE:
case COMP_EBADSLT:
xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n",
udev->slot_id);
ret = -EINVAL;
break;
case COMP_TX_ERR:
dev_warn(&udev->dev, "Device not responding to set address.\n");
ret = -EPROTO;
break;
case COMP_DEV_ERR:
dev_warn(&udev->dev, "ERROR: Incompatible device for address "
"device command.\n");
ret = -ENODEV;
break;
case COMP_SUCCESS:
xhci_dbg(xhci, "Successful Address Device command\n");
break;
default:
xhci_err(xhci, "ERROR: unexpected command completion "
"code 0x%x.\n", virt_dev->cmd_status);
xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
ret = -EINVAL;
break;
}
if (ret) {
return ret;
}
temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64);
xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n",
udev->slot_id,
&xhci->dcbaa->dev_context_ptrs[udev->slot_id],
(unsigned long long)
le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
(unsigned long long)virt_dev->out_ctx->dma);
xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
/*
* USB core uses address 1 for the roothubs, so we add one to the
* address given back to us by the HC.
*/
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
/* Use kernel assigned address for devices; store xHC assigned
* address locally. */
virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK)
+ 1;
/* Zero the input context control for later use */
ctrl_ctx->add_flags = 0;
ctrl_ctx->drop_flags = 0;
xhci_dbg(xhci, "Internal device address = %d\n", virt_dev->address);
return 0;
}
/*
* Transfer the port index into real index in the HW port status
* registers. Caculate offset between the port's PORTSC register
* and port status base. Divide the number of per port register
* to get the real index. The raw port number bases 1.
*/
int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
__le32 __iomem *base_addr = &xhci->op_regs->port_status_base;
__le32 __iomem *addr;
int raw_port;
if (hcd->speed != HCD_USB3)
addr = xhci->usb2_ports[port1 - 1];
else
addr = xhci->usb3_ports[port1 - 1];
raw_port = (addr - base_addr)/NUM_PORT_REGS + 1;
return raw_port;
}
#ifdef CONFIG_PM_RUNTIME
/* BESL to HIRD Encoding array for USB2 LPM */
static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
/* Calculate HIRD/BESL for USB2 PORTPMSC*/
static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
struct usb_device *udev)
{
int u2del, besl, besl_host;
int besl_device = 0;
u32 field;
u2del = HCS_U2_LATENCY(xhci->hcs_params3);
field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
if (field & USB_BESL_SUPPORT) {
for (besl_host = 0; besl_host < 16; besl_host++) {
if (xhci_besl_encoding[besl_host] >= u2del)
break;
}
/* Use baseline BESL value as default */
if (field & USB_BESL_BASELINE_VALID)
besl_device = USB_GET_BESL_BASELINE(field);
else if (field & USB_BESL_DEEP_VALID)
besl_device = USB_GET_BESL_DEEP(field);
} else {
if (u2del <= 50)
besl_host = 0;
else
besl_host = (u2del - 51) / 75 + 1;
}
besl = besl_host + besl_device;
if (besl > 15)
besl = 15;
return besl;
}
static int xhci_usb2_software_lpm_test(struct usb_hcd *hcd,
struct usb_device *udev)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct dev_info *dev_info;
__le32 __iomem **port_array;
__le32 __iomem *addr, *pm_addr;
u32 temp, dev_id;
unsigned int port_num;
unsigned long flags;
int hird;
int ret;
if (hcd->speed == HCD_USB3 || !xhci->sw_lpm_support ||
!udev->lpm_capable)
return -EINVAL;
/* we only support lpm for non-hub device connected to root hub yet */
if (!udev->parent || udev->parent->parent ||
udev->descriptor.bDeviceClass == USB_CLASS_HUB)
return -EINVAL;
spin_lock_irqsave(&xhci->lock, flags);
/* Look for devices in lpm_failed_devs list */
dev_id = le16_to_cpu(udev->descriptor.idVendor) << 16 |
le16_to_cpu(udev->descriptor.idProduct);
list_for_each_entry(dev_info, &xhci->lpm_failed_devs, list) {
if (dev_info->dev_id == dev_id) {
ret = -EINVAL;
goto finish;
}
}
port_array = xhci->usb2_ports;
port_num = udev->portnum - 1;
if (port_num > HCS_MAX_PORTS(xhci->hcs_params1)) {
xhci_dbg(xhci, "invalid port number %d\n", udev->portnum);
ret = -EINVAL;
goto finish;
}
/*
* Test USB 2.0 software LPM.
* FIXME: some xHCI 1.0 hosts may implement a new register to set up
* hardware-controlled USB 2.0 LPM. See section 5.4.11 and 4.23.5.1.1.1
* in the June 2011 errata release.
*/
xhci_dbg(xhci, "test port %d software LPM\n", port_num);
/*
* Set L1 Device Slot and HIRD/BESL.
* Check device's USB 2.0 extension descriptor to determine whether
* HIRD or BESL shoule be used. See USB2.0 LPM errata.
*/
pm_addr = port_array[port_num] + 1;
hird = xhci_calculate_hird_besl(xhci, udev);
temp = PORT_L1DS(udev->slot_id) | PORT_HIRD(hird);
xhci_writel(xhci, temp, pm_addr);
if (xhci->quirks & XHCI_PORTSC_DELAY)
ndelay(100);
/* Set port link state to U2(L1) */
addr = port_array[port_num];
xhci_set_link_state(xhci, port_array, port_num, XDEV_U2);
/* wait for ACK */
spin_unlock_irqrestore(&xhci->lock, flags);
msleep(10);
spin_lock_irqsave(&xhci->lock, flags);
/* Check L1 Status */
ret = xhci_handshake(xhci, pm_addr,
PORT_L1S_MASK, PORT_L1S_SUCCESS, 125);
if (ret != -ETIMEDOUT) {
/* enter L1 successfully */
temp = xhci_readl(xhci, addr);
xhci_dbg(xhci, "port %d entered L1 state, port status 0x%x\n",
port_num, temp);
ret = 0;
} else {
temp = xhci_readl(xhci, pm_addr);
xhci_dbg(xhci, "port %d software lpm failed, L1 status %d\n",
port_num, temp & PORT_L1S_MASK);
ret = -EINVAL;
}
/* Resume the port */
xhci_set_link_state(xhci, port_array, port_num, XDEV_U0);
spin_unlock_irqrestore(&xhci->lock, flags);
msleep(10);
spin_lock_irqsave(&xhci->lock, flags);
/* Clear PLC */
xhci_test_and_clear_bit(xhci, port_array, port_num, PORT_PLC);
/* Check PORTSC to make sure the device is in the right state */
if (!ret) {
temp = xhci_readl(xhci, addr);
xhci_dbg(xhci, "resumed port %d status 0x%x\n", port_num, temp);
if (!(temp & PORT_CONNECT) || !(temp & PORT_PE) ||
(temp & PORT_PLS_MASK) != XDEV_U0) {
xhci_dbg(xhci, "port L1 resume fail\n");
ret = -EINVAL;
}
}
if (ret) {
/* Insert dev to lpm_failed_devs list */
xhci_warn(xhci, "device LPM test failed, may disconnect and "
"re-enumerate\n");
dev_info = kzalloc(sizeof(struct dev_info), GFP_ATOMIC);
if (!dev_info) {
ret = -ENOMEM;
goto finish;
}
dev_info->dev_id = dev_id;
INIT_LIST_HEAD(&dev_info->list);
list_add(&dev_info->list, &xhci->lpm_failed_devs);
} else {
xhci_ring_device(xhci, udev->slot_id);
}
finish:
spin_unlock_irqrestore(&xhci->lock, flags);
return ret;
}
int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
struct usb_device *udev, int enable)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
__le32 __iomem **port_array;
__le32 __iomem *pm_addr;
u32 temp;
unsigned int port_num;
unsigned long flags;
int hird;
bool delay = false;
if (hcd->speed == HCD_USB3 || !xhci->hw_lpm_support ||
!udev->lpm_capable)
return -EPERM;
if (!udev->parent || udev->parent->parent ||
udev->descriptor.bDeviceClass == USB_CLASS_HUB)
return -EPERM;
if (udev->usb2_hw_lpm_capable != 1)
return -EPERM;
if (xhci->quirks & XHCI_PORTSC_DELAY)
delay = true;
spin_lock_irqsave(&xhci->lock, flags);
port_array = xhci->usb2_ports;
port_num = udev->portnum - 1;
pm_addr = port_array[port_num] + 1;
temp = xhci_readl(xhci, pm_addr);
xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
enable ? "enable" : "disable", port_num);
hird = xhci_calculate_hird_besl(xhci, udev);
if (enable) {
temp &= ~PORT_HIRD_MASK;
temp |= PORT_HIRD(hird) | PORT_RWE;
xhci_writel(xhci, temp, pm_addr);
if (delay)
ndelay(100);
temp = xhci_readl(xhci, pm_addr);
temp |= PORT_HLE;
xhci_writel(xhci, temp, pm_addr);
if (delay)
ndelay(100);
} else {
temp &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK);
xhci_writel(xhci, temp, pm_addr);
if (delay)
ndelay(100);
}
spin_unlock_irqrestore(&xhci->lock, flags);
return 0;
}
int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int ret;
ret = xhci_usb2_software_lpm_test(hcd, udev);
if (!ret) {
xhci_dbg(xhci, "software LPM test succeed\n");
if (xhci->hw_lpm_support == 1) {
udev->usb2_hw_lpm_capable = 1;
ret = xhci_set_usb2_hardware_lpm(hcd, udev, 1);
if (!ret)
udev->usb2_hw_lpm_enabled = 1;
}
}
return 0;
}
#else
int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
struct usb_device *udev, int enable)
{
return 0;
}
int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
{
return 0;
}
#endif /* CONFIG_PM_RUNTIME */
/*---------------------- USB 3.0 Link PM functions ------------------------*/
#ifdef CONFIG_PM
/* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */
static unsigned long long xhci_service_interval_to_ns(
struct usb_endpoint_descriptor *desc)
{
return (1ULL << (desc->bInterval - 1)) * 125 * 1000;
}
static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
enum usb3_link_state state)
{
unsigned long long sel;
unsigned long long pel;
unsigned int max_sel_pel;
char *state_name;
switch (state) {
case USB3_LPM_U1:
/* Convert SEL and PEL stored in nanoseconds to microseconds */
sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL;
state_name = "U1";
break;
case USB3_LPM_U2:
sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
pel = DIV_ROUND_UP(udev->u2_params.pel, 1000);
max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL;
state_name = "U2";
break;
default:
dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
__func__);
return USB3_LPM_DISABLED;
}
if (sel <= max_sel_pel && pel <= max_sel_pel)
return USB3_LPM_DEVICE_INITIATED;
if (sel > max_sel_pel)
dev_dbg(&udev->dev, "Device-initiated %s disabled "
"due to long SEL %llu ms\n",
state_name, sel);
else
dev_dbg(&udev->dev, "Device-initiated %s disabled "
"due to long PEL %llu\n ms",
state_name, pel);
return USB3_LPM_DISABLED;
}
/* Returns the hub-encoded U1 timeout value.
* The U1 timeout should be the maximum of the following values:
* - For control endpoints, U1 system exit latency (SEL) * 3
* - For bulk endpoints, U1 SEL * 5
* - For interrupt endpoints:
* - Notification EPs, U1 SEL * 3
* - Periodic EPs, max(105% of bInterval, U1 SEL * 2)
* - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2)
*/
static u16 xhci_calculate_intel_u1_timeout(struct usb_device *udev,
struct usb_endpoint_descriptor *desc)
{
unsigned long long timeout_ns;
int ep_type;
int intr_type;
ep_type = usb_endpoint_type(desc);
switch (ep_type) {
case USB_ENDPOINT_XFER_CONTROL:
timeout_ns = udev->u1_params.sel * 3;
break;
case USB_ENDPOINT_XFER_BULK:
timeout_ns = udev->u1_params.sel * 5;
break;
case USB_ENDPOINT_XFER_INT:
intr_type = usb_endpoint_interrupt_type(desc);
if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) {
timeout_ns = udev->u1_params.sel * 3;
break;
}
/* Otherwise the calculation is the same as isoc eps */
case USB_ENDPOINT_XFER_ISOC:
timeout_ns = xhci_service_interval_to_ns(desc);
timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
if (timeout_ns < udev->u1_params.sel * 2)
timeout_ns = udev->u1_params.sel * 2;
break;
default:
return 0;
}
/* The U1 timeout is encoded in 1us intervals. */
timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000);
/* Don't return a timeout of zero, because that's USB3_LPM_DISABLED. */
if (timeout_ns == USB3_LPM_DISABLED)
timeout_ns++;
/* If the necessary timeout value is bigger than what we can set in the
* USB 3.0 hub, we have to disable hub-initiated U1.
*/
if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT)
return timeout_ns;
dev_dbg(&udev->dev, "Hub-initiated U1 disabled "
"due to long timeout %llu ms\n", timeout_ns);
return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1);
}
/* Returns the hub-encoded U2 timeout value.
* The U2 timeout should be the maximum of:
* - 10 ms (to avoid the bandwidth impact on the scheduler)
* - largest bInterval of any active periodic endpoint (to avoid going
* into lower power link states between intervals).
* - the U2 Exit Latency of the device
*/
static u16 xhci_calculate_intel_u2_timeout(struct usb_device *udev,
struct usb_endpoint_descriptor *desc)
{
unsigned long long timeout_ns;
unsigned long long u2_del_ns;
timeout_ns = 10 * 1000 * 1000;
if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) &&
(xhci_service_interval_to_ns(desc) > timeout_ns))
timeout_ns = xhci_service_interval_to_ns(desc);
u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL;
if (u2_del_ns > timeout_ns)
timeout_ns = u2_del_ns;
/* The U2 timeout is encoded in 256us intervals */
timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
/* If the necessary timeout value is bigger than what we can set in the
* USB 3.0 hub, we have to disable hub-initiated U2.
*/
if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT)
return timeout_ns;
dev_dbg(&udev->dev, "Hub-initiated U2 disabled "
"due to long timeout %llu ms\n", timeout_ns);
return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2);
}
static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci,
struct usb_device *udev,
struct usb_endpoint_descriptor *desc,
enum usb3_link_state state,
u16 *timeout)
{
if (state == USB3_LPM_U1) {
if (xhci->quirks & XHCI_INTEL_HOST)
return xhci_calculate_intel_u1_timeout(udev, desc);
} else {
if (xhci->quirks & XHCI_INTEL_HOST)
return xhci_calculate_intel_u2_timeout(udev, desc);
}
return USB3_LPM_DISABLED;
}
static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
struct usb_device *udev,
struct usb_endpoint_descriptor *desc,
enum usb3_link_state state,
u16 *timeout)
{
u16 alt_timeout;
alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
desc, state, timeout);
/* If we found we can't enable hub-initiated LPM, or
* the U1 or U2 exit latency was too high to allow
* device-initiated LPM as well, just stop searching.
*/
if (alt_timeout == USB3_LPM_DISABLED ||
alt_timeout == USB3_LPM_DEVICE_INITIATED) {
*timeout = alt_timeout;
return -E2BIG;
}
if (alt_timeout > *timeout)
*timeout = alt_timeout;
return 0;
}
static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
struct usb_device *udev,
struct usb_host_interface *alt,
enum usb3_link_state state,
u16 *timeout)
{
int j;
for (j = 0; j < alt->desc.bNumEndpoints; j++) {
if (xhci_update_timeout_for_endpoint(xhci, udev,
&alt->endpoint[j].desc, state, timeout))
return -E2BIG;
continue;
}
return 0;
}
static int xhci_check_intel_tier_policy(struct usb_device *udev,
enum usb3_link_state state)
{
struct usb_device *parent;
unsigned int num_hubs;
if (state == USB3_LPM_U2)
return 0;
/* Don't enable U1 if the device is on a 2nd tier hub or lower. */
for (parent = udev->parent, num_hubs = 0; parent->parent;
parent = parent->parent)
num_hubs++;
if (num_hubs < 2)
return 0;
dev_dbg(&udev->dev, "Disabling U1 link state for device"
" below second-tier hub.\n");
dev_dbg(&udev->dev, "Plug device into first-tier hub "
"to decrease power consumption.\n");
return -E2BIG;
}
static int xhci_check_tier_policy(struct xhci_hcd *xhci,
struct usb_device *udev,
enum usb3_link_state state)
{
if (xhci->quirks & XHCI_INTEL_HOST)
return xhci_check_intel_tier_policy(udev, state);
return -EINVAL;
}
/* Returns the U1 or U2 timeout that should be enabled.
* If the tier check or timeout setting functions return with a non-zero exit
* code, that means the timeout value has been finalized and we shouldn't look
* at any more endpoints.
*/
static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
struct usb_device *udev, enum usb3_link_state state)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct usb_host_config *config;
char *state_name;
int i;
u16 timeout = USB3_LPM_DISABLED;
if (state == USB3_LPM_U1)
state_name = "U1";
else if (state == USB3_LPM_U2)
state_name = "U2";
else {
dev_warn(&udev->dev, "Can't enable unknown link state %i\n",
state);
return timeout;
}
if (xhci_check_tier_policy(xhci, udev, state) < 0)
return timeout;
/* Gather some information about the currently installed configuration
* and alternate interface settings.
*/
if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc,
state, &timeout))
return timeout;
config = udev->actconfig;
if (!config)
return timeout;
for (i = 0; i < USB_MAXINTERFACES; i++) {
struct usb_driver *driver;
struct usb_interface *intf = config->interface[i];
if (!intf)
continue;
/* Check if any currently bound drivers want hub-initiated LPM
* disabled.
*/
if (intf->dev.driver) {
driver = to_usb_driver(intf->dev.driver);
if (driver && driver->disable_hub_initiated_lpm) {
dev_dbg(&udev->dev, "Hub-initiated %s disabled "
"at request of driver %s\n",
state_name, driver->name);
return xhci_get_timeout_no_hub_lpm(udev, state);
}
}
/* Not sure how this could happen... */
if (!intf->cur_altsetting)
continue;
if (xhci_update_timeout_for_interface(xhci, udev,
intf->cur_altsetting,
state, &timeout))
return timeout;
}
return timeout;
}
/*
* Issue an Evaluate Context command to change the Maximum Exit Latency in the
* slot context. If that succeeds, store the new MEL in the xhci_virt_device.
*/
static int xhci_change_max_exit_latency(struct xhci_hcd *xhci,
struct usb_device *udev, u16 max_exit_latency)
{
struct xhci_virt_device *virt_dev;
struct xhci_command *command;
struct xhci_input_control_ctx *ctrl_ctx;
struct xhci_slot_ctx *slot_ctx;
unsigned long flags;
int ret;
spin_lock_irqsave(&xhci->lock, flags);
virt_dev = xhci->devs[udev->slot_id];
/*
* virt_dev might not exists yet if xHC resumed from hibernate (S4) and
* xHC was re-initialized. Exit latency will be set later after
* hub_port_finish_reset() is done and xhci->devs[] are re-allocated
*/
if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
spin_unlock_irqrestore(&xhci->lock, flags);
return 0;
}
/* Attempt to issue an Evaluate Context command to change the MEL. */
command = xhci->lpm_command;
xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
spin_unlock_irqrestore(&xhci->lock, flags);
ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx);
ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
xhci_dbg(xhci, "Set up evaluate context for LPM MEL change.\n");
xhci_dbg(xhci, "Slot %u Input Context:\n", udev->slot_id);
xhci_dbg_ctx(xhci, command->in_ctx, 0);
/* Issue and wait for the evaluate context command. */
ret = xhci_configure_endpoint(xhci, udev, command,
true, true);
xhci_dbg(xhci, "Slot %u Output Context:\n", udev->slot_id);
xhci_dbg_ctx(xhci, virt_dev->out_ctx, 0);
if (!ret) {
spin_lock_irqsave(&xhci->lock, flags);
virt_dev->current_mel = max_exit_latency;
spin_unlock_irqrestore(&xhci->lock, flags);
}
return ret;
}
static int calculate_max_exit_latency(struct usb_device *udev,
enum usb3_link_state state_changed,
u16 hub_encoded_timeout)
{
unsigned long long u1_mel_us = 0;
unsigned long long u2_mel_us = 0;
unsigned long long mel_us = 0;
bool disabling_u1;
bool disabling_u2;
bool enabling_u1;
bool enabling_u2;
disabling_u1 = (state_changed == USB3_LPM_U1 &&
hub_encoded_timeout == USB3_LPM_DISABLED);
disabling_u2 = (state_changed == USB3_LPM_U2 &&
hub_encoded_timeout == USB3_LPM_DISABLED);
enabling_u1 = (state_changed == USB3_LPM_U1 &&
hub_encoded_timeout != USB3_LPM_DISABLED);
enabling_u2 = (state_changed == USB3_LPM_U2 &&
hub_encoded_timeout != USB3_LPM_DISABLED);
/* If U1 was already enabled and we're not disabling it,
* or we're going to enable U1, account for the U1 max exit latency.
*/
if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) ||
enabling_u1)
u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000);
if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) ||
enabling_u2)
u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000);
if (u1_mel_us > u2_mel_us)
mel_us = u1_mel_us;
else
mel_us = u2_mel_us;
/* xHCI host controller max exit latency field is only 16 bits wide. */
if (mel_us > MAX_EXIT) {
dev_warn(&udev->dev, "Link PM max exit latency of %lluus "
"is too big.\n", mel_us);
return -E2BIG;
}
return mel_us;
}
/* Returns the USB3 hub-encoded value for the U1/U2 timeout. */
int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
struct usb_device *udev, enum usb3_link_state state)
{
struct xhci_hcd *xhci;
u16 hub_encoded_timeout;
int mel;
int ret;
xhci = hcd_to_xhci(hcd);
/* The LPM timeout values are pretty host-controller specific, so don't
* enable hub-initiated timeouts unless the vendor has provided
* information about their timeout algorithm.
*/
if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
!xhci->devs[udev->slot_id])
return USB3_LPM_DISABLED;
hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
if (mel < 0) {
/* Max Exit Latency is too big, disable LPM. */
hub_encoded_timeout = USB3_LPM_DISABLED;
mel = 0;
}
ret = xhci_change_max_exit_latency(xhci, udev, mel);
if (ret)
return ret;
return hub_encoded_timeout;
}
int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
struct usb_device *udev, enum usb3_link_state state)
{
struct xhci_hcd *xhci;
u16 mel;
int ret;
xhci = hcd_to_xhci(hcd);
if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
!xhci->devs[udev->slot_id])
return 0;
mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED);
ret = xhci_change_max_exit_latency(xhci, udev, mel);
if (ret)
return ret;
return 0;
}
#else /* CONFIG_PM */
int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
struct usb_device *udev, enum usb3_link_state state)
{
return USB3_LPM_DISABLED;
}
int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
struct usb_device *udev, enum usb3_link_state state)
{
return 0;
}
#endif /* CONFIG_PM */
/*-------------------------------------------------------------------------*/
/* Once a hub descriptor is fetched for a device, we need to update the xHC's
* internal data structures for the device.
*/
int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
struct usb_tt *tt, gfp_t mem_flags)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_virt_device *vdev;
struct xhci_command *config_cmd;
struct xhci_input_control_ctx *ctrl_ctx;
struct xhci_slot_ctx *slot_ctx;
unsigned long flags;
unsigned think_time;
int ret;
/* Ignore root hubs */
if (!hdev->parent)
return 0;
vdev = xhci->devs[hdev->slot_id];
if (!vdev) {
xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
return -EINVAL;
}
config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
if (!config_cmd) {
xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
return -ENOMEM;
}
spin_lock_irqsave(&xhci->lock, flags);
if (hdev->speed == USB_SPEED_HIGH &&
xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
xhci_free_command(xhci, config_cmd);
spin_unlock_irqrestore(&xhci->lock, flags);
return -ENOMEM;
}
xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx);
ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
/*
* refer to section 6.2.2: MTT should be 0 for full speed hub,
* but it may be already set to 1 when setup an xHCI virtual
* device, so clear it anyway.
*/
if (tt->multi)
slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
else if (hdev->speed == USB_SPEED_FULL)
slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT);
if (xhci->hci_version > 0x95) {
xhci_dbg(xhci, "xHCI version %x needs hub "
"TT think time and number of ports\n",
(unsigned int) xhci->hci_version);
slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
/* Set TT think time - convert from ns to FS bit times.
* 0 = 8 FS bit times, 1 = 16 FS bit times,
* 2 = 24 FS bit times, 3 = 32 FS bit times.
*
* xHCI 1.0: this field shall be 0 if the device is not a
* High-spped hub.
*/
think_time = tt->think_time;
if (think_time != 0)
think_time = (think_time / 666) - 1;
if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
slot_ctx->tt_info |=
cpu_to_le32(TT_THINK_TIME(think_time));
} else {
xhci_dbg(xhci, "xHCI version %x doesn't need hub "
"TT think time or number of ports\n",
(unsigned int) xhci->hci_version);
}
slot_ctx->dev_state = 0;
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg(xhci, "Set up %s for hub device.\n",
(xhci->hci_version > 0x95) ?
"configure endpoint" : "evaluate context");
xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id);
xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0);
/* Issue and wait for the configure endpoint or
* evaluate context command.
*/
if (xhci->hci_version > 0x95)
ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
false, false);
else
ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
true, false);
xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id);
xhci_dbg_ctx(xhci, vdev->out_ctx, 0);
xhci_free_command(xhci, config_cmd);
return ret;
}
int xhci_get_frame(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
/* EHCI mods by the periodic size. Why? */
return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3;
}
int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
{
struct xhci_hcd *xhci;
struct device *dev = hcd->self.controller;
int retval;
u32 temp;
/* Accept arbitrarily long scatter-gather lists */
hcd->self.sg_tablesize = ~0;
/* XHCI controllers don't stop the ep queue on short packets :| */
hcd->self.no_stop_on_short = 1;
if (usb_hcd_is_primary_hcd(hcd)) {
xhci = kzalloc(sizeof(struct xhci_hcd), GFP_KERNEL);
if (!xhci)
return -ENOMEM;
*((struct xhci_hcd **) hcd->hcd_priv) = xhci;
xhci->main_hcd = hcd;
/* Mark the first roothub as being USB 2.0.
* The xHCI driver will register the USB 3.0 roothub.
*/
hcd->speed = HCD_USB2;
hcd->self.root_hub->speed = USB_SPEED_HIGH;
/*
* USB 2.0 roothub under xHCI has an integrated TT,
* (rate matching hub) as opposed to having an OHCI/UHCI
* companion controller.
*/
hcd->has_tt = 1;
} else {
/* xHCI private pointer was set in xhci_pci_probe for the second
* registered roothub.
*/
xhci = hcd_to_xhci(hcd);
temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
if (HCC_64BIT_ADDR(temp)) {
xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64));
} else {
dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32));
}
return 0;
}
xhci->cap_regs = hcd->regs;
xhci->op_regs = hcd->regs +
HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase));
xhci->run_regs = hcd->regs +
(xhci_readl(xhci, &xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
/* Cache read-only capability registers */
xhci->hcs_params1 = xhci_readl(xhci, &xhci->cap_regs->hcs_params1);
xhci->hcs_params2 = xhci_readl(xhci, &xhci->cap_regs->hcs_params2);
xhci->hcs_params3 = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
xhci->hci_version = HC_VERSION(xhci->hcc_params);
xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
xhci_print_registers(xhci);
get_quirks(dev, xhci);
/* In xhci controllers which follow xhci 1.0 spec gives a spurious
* success event after a short transfer. This quirk will ignore such
* spurious event.
*/
if (xhci->hci_version > 0x96)
xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
/* Make sure the HC is halted. */
retval = xhci_halt(xhci);
if (retval)
goto error;
xhci_dbg(xhci, "Resetting HCD\n");
/* Reset the internal HC memory state and registers. */
retval = xhci_reset(xhci);
if (retval)
goto error;
if (xhci->quirks & XHCI_RESET_DELAY)
usleep_range(350, 1000);
xhci_dbg(xhci, "Reset complete\n");
temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
if (HCC_64BIT_ADDR(temp)) {
xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64));
} else {
dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32));
}
return 0;
error:
kfree(xhci);
return retval;
}
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_LICENSE("GPL");
static int __init xhci_hcd_init(void)
{
int retval;
retval = xhci_register_pci();
if (retval < 0) {
printk(KERN_DEBUG "Problem registering PCI driver.");
return retval;
}
retval = xhci_register_plat();
if (retval < 0) {
printk(KERN_DEBUG "Problem registering platform driver.");
goto unreg_pci;
}
/*
* Check the compiler generated sizes of structures that must be laid
* out in specific ways for hardware access.
*/
BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
/* xhci_device_control has eight fields, and also
* embeds one xhci_slot_ctx and 31 xhci_ep_ctx
*/
BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8);
BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
/* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
if (usb_disabled())
return -ENODEV;
return 0;
unreg_pci:
xhci_unregister_pci();
return retval;
}
module_init(xhci_hcd_init);
static void __exit xhci_hcd_cleanup(void)
{
xhci_unregister_pci();
xhci_unregister_plat();
}
module_exit(xhci_hcd_cleanup);
| gpl-2.0 |
StudTeam6/competition | sw/airborne/subsystems/ins/ins_float_invariant.c | 6 | 21889 | /*
* Copyright (C) 2012-2013 Jean-Philippe Condomines, Gautier Hattenberger
*
* This file is part of paparazzi.
*
* paparazzi is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* paparazzi is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with paparazzi; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*/
/**
* @file subsystems/ins/ins_float_invariant.c
* @author Jean-Philippe Condomines <jp.condomines@gmail.com>
*
* INS using invariant filter.
*
*/
#include "subsystems/ins/ins_float_invariant.h"
#include "subsystems/ahrs/ahrs_int_utils.h"
#include "subsystems/ahrs/ahrs_aligner.h"
#include "subsystems/ins.h"
#include "subsystems/gps.h"
#include "generated/airframe.h"
#include "generated/flight_plan.h"
#if INS_FINV_USE_UTM
#include "firmwares/fixedwing/nav.h"
#endif
#include "math/pprz_algebra_float.h"
#include "math/pprz_algebra_int.h"
#include "math/pprz_rk_float.h"
#include "math/pprz_isa.h"
#include "state.h"
// for debugging
#if SEND_INVARIANT_FILTER || PERIODIC_TELEMETRY
#include "subsystems/datalink/telemetry.h"
#endif
#if LOG_INVARIANT_FILTER
#include "sdLog.h"
#include "subsystems/chibios-libopencm3/chibios_sdlog.h"
bool_t log_started = FALSE;
#endif
/*------------- =*= Invariant Observers =*= -------------*
*
* State vector :
*
* x = [q0 q1 q2 q3 vx vy vz px py pz wb1 wb2 wb3 hb as]'
*
* Dynamic model (dim = 15) :
*
* x_qdot = 0.5 * x_quat * ( x_rates - x_bias );
* x_Vdot = A + 1/as (q * am * (q)-1);
* x_Xdot = V;
* x_bias_dot = 0;
* x_asdot = 0;
* x_hbdot = 0;
*
* Observation model (dim = 10):
* yv = V;
* yx = X;
* yh = <X,e3> - hb;
* yb = (q)-1 *B * q; (B : magnetometers)
*
*------------------------------------------------------*/
// Default values for the tuning gains
// Tuning parameter of speed error on attitude (e-2)
#ifndef INS_INV_LV
#define INS_INV_LV 2.
#endif
// Tuning parameter of mag error on attitude (e-2)
#ifndef INS_INV_LB
#define INS_INV_LB 6.
#endif
// Tuning parameter of horizontal speed error on speed
#ifndef INS_INV_MV
#define INS_INV_MV 8.
#endif
// Tuning parameter of vertical speed error on speed
#ifndef INS_INV_MVZ
#define INS_INV_MVZ 15.
#endif
// Tuning parameter of baro error on vertical speed
#ifndef INS_INV_MH
#define INS_INV_MH 0.2
#endif
// Tuning parameter of horizontal position error on position
#ifndef INS_INV_NX
#define INS_INV_NX 0.8
#endif
// Tuning parameter of vertical position error on position
#ifndef INS_INV_NXZ
#define INS_INV_NXZ 0.5
#endif
// Tuning parameter of baro error on vertical position
#ifndef INS_INV_NH
#define INS_INV_NH 1.2
#endif
// Tuning parameter of speed error on gyro biases (e-3)
#ifndef INS_INV_OV
#define INS_INV_OV 1.2
#endif
// Tuning parameter of mag error on gyro biases (e-3)
#ifndef INS_INV_OB
#define INS_INV_OB 1.
#endif
// Tuning parameter of speed error on accel biases (e-2)
#ifndef INS_INV_RV
#define INS_INV_RV 4.
#endif
// Tuning parameter of baro error on accel biases (vertical projection) (e-8)
#ifndef INS_INV_RH
#define INS_INV_RH 8.
#endif
// Tuning parameter of baro error on baro bias
#ifndef INS_INV_SH
#define INS_INV_SH 0.01
#endif
struct InsFloatInv ins_float_inv;
/* earth gravity model */
static const struct FloatVect3 A = { 0.f, 0.f, 9.81f };
/* earth magnetic model */
//static const struct FloatVect3 B = { (float)(INS_H_X), (float)(INS_H_Y), (float)(INS_H_Z) };
#define B ins_float_inv.mag_h
/* barometer */
bool_t ins_baro_initialized;
/* gps */
bool_t ins_gps_fix_once;
/* error computation */
static inline void error_output(struct InsFloatInv *_ins);
/* propagation model (called by runge-kutta library) */
static inline void invariant_model(float *o, const float *x, const int n, const float *u, const int m);
/** Right multiplication by a quaternion.
* vi * q
*/
void float_quat_vmul_right(struct FloatQuat *mright, const struct FloatQuat *q,
struct FloatVect3 *vi);
/* init state and measurements */
static inline void init_invariant_state(void)
{
// init state
float_quat_identity(&ins_float_inv.state.quat);
FLOAT_RATES_ZERO(ins_float_inv.state.bias);
FLOAT_VECT3_ZERO(ins_float_inv.state.pos);
FLOAT_VECT3_ZERO(ins_float_inv.state.speed);
ins_float_inv.state.as = 1.0f;
ins_float_inv.state.hb = 0.0f;
// init measures
FLOAT_VECT3_ZERO(ins_float_inv.meas.pos_gps);
FLOAT_VECT3_ZERO(ins_float_inv.meas.speed_gps);
ins_float_inv.meas.baro_alt = 0.0f;
// init baro
ins_baro_initialized = FALSE;
ins_gps_fix_once = FALSE;
}
#if SEND_INVARIANT_FILTER || PERIODIC_TELEMETRY
static void send_inv_filter(struct transport_tx *trans, struct link_device *dev)
{
struct FloatEulers eulers;
FLOAT_EULERS_OF_QUAT(eulers, ins_float_inv.state.quat);
pprz_msg_send_INV_FILTER(trans, dev,
AC_ID,
&ins_float_inv.state.quat.qi,
&eulers.phi,
&eulers.theta,
&eulers.psi,
&ins_float_inv.state.speed.x,
&ins_float_inv.state.speed.y,
&ins_float_inv.state.speed.z,
&ins_float_inv.state.pos.x,
&ins_float_inv.state.pos.y,
&ins_float_inv.state.pos.z,
&ins_float_inv.state.bias.p,
&ins_float_inv.state.bias.q,
&ins_float_inv.state.bias.r,
&ins_float_inv.state.as,
&ins_float_inv.state.hb,
&ins_float_inv.meas.baro_alt,
&ins_float_inv.meas.pos_gps.z);
}
#endif
void ins_float_invariant_init(void)
{
// init position
#if INS_FINV_USE_UTM
struct UtmCoor_f utm0;
utm0.north = (float)nav_utm_north0;
utm0.east = (float)nav_utm_east0;
utm0.alt = GROUND_ALT;
utm0.zone = nav_utm_zone0;
stateSetLocalUtmOrigin_f(&utm0);
stateSetPositionUtm_f(&utm0);
#else
struct LlaCoor_i llh_nav0; /* Height above the ellipsoid */
llh_nav0.lat = NAV_LAT0;
llh_nav0.lon = NAV_LON0;
/* NAV_ALT0 = ground alt above msl, NAV_MSL0 = geoid-height (msl) over ellipsoid */
llh_nav0.alt = NAV_ALT0 + NAV_MSL0;
struct EcefCoor_i ecef_nav0;
ecef_of_lla_i(&ecef_nav0, &llh_nav0);
struct LtpDef_i ltp_def;
ltp_def_from_ecef_i(<p_def, &ecef_nav0);
ltp_def.hmsl = NAV_ALT0;
stateSetLocalOrigin_i(<p_def);
#endif
B.x = INS_H_X;
B.y = INS_H_Y;
B.z = INS_H_Z;
// init state and measurements
init_invariant_state();
// init gains
ins_float_inv.gains.lv = INS_INV_LV;
ins_float_inv.gains.lb = INS_INV_LB;
ins_float_inv.gains.mv = INS_INV_MV;
ins_float_inv.gains.mvz = INS_INV_MVZ;
ins_float_inv.gains.mh = INS_INV_MH;
ins_float_inv.gains.nx = INS_INV_NX;
ins_float_inv.gains.nxz = INS_INV_NXZ;
ins_float_inv.gains.nh = INS_INV_NH;
ins_float_inv.gains.ov = INS_INV_OV;
ins_float_inv.gains.ob = INS_INV_OB;
ins_float_inv.gains.rv = INS_INV_RV;
ins_float_inv.gains.rh = INS_INV_RH;
ins_float_inv.gains.sh = INS_INV_SH;
ins_float_inv.is_aligned = FALSE;
ins_float_inv.reset = FALSE;
#if PERIODIC_TELEMETRY
register_periodic_telemetry(DefaultPeriodic, PPRZ_MSG_ID_INV_FILTER, send_inv_filter);
#endif
}
void ins_reset_local_origin(void)
{
#if INS_FINV_USE_UTM
struct UtmCoor_f utm;
#ifdef GPS_USE_LATLONG
/* Recompute UTM coordinates in this zone */
struct LlaCoor_f lla;
LLA_FLOAT_OF_BFP(lla, gps.lla_pos);
utm.zone = (gps.lla_pos.lon / 1e7 + 180) / 6 + 1;
utm_of_lla_f(&utm, &lla);
#else
utm.zone = gps.utm_pos.zone;
utm.east = gps.utm_pos.east / 100.0f;
utm.north = gps.utm_pos.north / 100.0f;
#endif
// ground_alt
utm.alt = gps.hmsl / 1000.0f;
// reset state UTM ref
stateSetLocalUtmOrigin_f(&utm);
#else
struct LtpDef_i ltp_def;
ltp_def_from_ecef_i(<p_def, &gps.ecef_pos);
ltp_def.hmsl = gps.hmsl;
stateSetLocalOrigin_i(<p_def);
#endif
}
void ins_reset_altitude_ref(void)
{
#if INS_FINV_USE_UTM
struct UtmCoor_f utm = state.utm_origin_f;
utm.alt = gps.hmsl / 1000.0f;
stateSetLocalUtmOrigin_f(&utm);
#else
struct LlaCoor_i lla = {
.lat = state.ned_origin_i.lla.lat,
.lon = state.ned_origin_i.lla.lon,
.alt = gps.lla_pos.alt
};
struct LtpDef_i ltp_def;
ltp_def_from_lla_i(<p_def, &lla);
ltp_def.hmsl = gps.hmsl;
stateSetLocalOrigin_i(<p_def);
#endif
}
void ins_float_invariant_align(struct Int32Rates *lp_gyro,
struct Int32Vect3 *lp_accel,
struct Int32Vect3 *lp_mag)
{
/* Compute an initial orientation from accel and mag directly as quaternion */
ahrs_float_get_quat_from_accel_mag(&ins_float_inv.state.quat, lp_accel, lp_mag);
/* use average gyro as initial value for bias */
struct FloatRates bias0;
RATES_COPY(bias0, *lp_gyro);
RATES_FLOAT_OF_BFP(ins_float_inv.state.bias, bias0);
/* push initial values to state interface */
stateSetNedToBodyQuat_f(&ins_float_inv.state.quat);
// ins and ahrs are now running
ins_float_inv.is_aligned = TRUE;
}
void ins_float_invariant_propagate(struct Int32Rates* gyro, struct Int32Vect3* accel, float dt)
{
struct FloatRates body_rates;
// realign all the filter if needed
// a complete init cycle is required
if (ins_float_inv.reset) {
ins_float_inv.reset = FALSE;
ins_float_inv.is_aligned = FALSE;
init_invariant_state();
}
// fill command vector
struct Int32Rates gyro_meas_body;
struct Int32RMat *body_to_imu_rmat = orientationGetRMat_i(&ins_float_inv.body_to_imu);
int32_rmat_transp_ratemult(&gyro_meas_body, body_to_imu_rmat, gyro);
RATES_FLOAT_OF_BFP(ins_float_inv.cmd.rates, gyro_meas_body);
struct Int32Vect3 accel_meas_body;
int32_rmat_transp_vmult(&accel_meas_body, body_to_imu_rmat, accel);
ACCELS_FLOAT_OF_BFP(ins_float_inv.cmd.accel, accel_meas_body);
// update correction gains
error_output(&ins_float_inv);
// propagate model
struct inv_state new_state;
runge_kutta_4_float((float *)&new_state,
(float *)&ins_float_inv.state, INV_STATE_DIM,
(float *)&ins_float_inv.cmd, INV_COMMAND_DIM,
invariant_model, dt);
ins_float_inv.state = new_state;
// normalize quaternion
FLOAT_QUAT_NORMALIZE(ins_float_inv.state.quat);
// set global state
stateSetNedToBodyQuat_f(&ins_float_inv.state.quat);
RATES_DIFF(body_rates, ins_float_inv.cmd.rates, ins_float_inv.state.bias);
stateSetBodyRates_f(&body_rates);
stateSetPositionNed_f(&ins_float_inv.state.pos);
stateSetSpeedNed_f(&ins_float_inv.state.speed);
// untilt accel and remove gravity
struct FloatQuat q_b2n;
float_quat_invert(&q_b2n, &ins_float_inv.state.quat);
struct FloatVect3 accel_n;
float_quat_vmult(&accel_n, &q_b2n, &ins_float_inv.cmd.accel);
VECT3_SMUL(accel_n, accel_n, 1. / (ins_float_inv.state.as));
VECT3_ADD(accel_n, A);
stateSetAccelNed_f((struct NedCoor_f *)&accel_n);
//------------------------------------------------------------//
#if SEND_INVARIANT_FILTER
RunOnceEvery(3, send_inv_filter(&(DefaultChannel).trans_tx, &(DefaultDevice).device));
#endif
#if LOG_INVARIANT_FILTER
if (pprzLogFile != -1) {
if (!log_started) {
// log file header
sdLogWriteLog(pprzLogFile,
"p q r ax ay az gx gy gz gvx gvy gvz mx my mz b qi qx qy qz bp bq br vx vy vz px py pz hb as\n");
log_started = TRUE;
} else {
sdLogWriteLog(pprzLogFile,
"%.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f\n",
ins_float_inv.cmd.rates.p,
ins_float_inv.cmd.rates.q,
ins_float_inv.cmd.rates.r,
ins_float_inv.cmd.accel.x,
ins_float_inv.cmd.accel.y,
ins_float_inv.cmd.accel.z,
ins_float_inv.meas.pos_gps.x,
ins_float_inv.meas.pos_gps.y,
ins_float_inv.meas.pos_gps.z,
ins_float_inv.meas.speed_gps.x,
ins_float_inv.meas.speed_gps.y,
ins_float_inv.meas.speed_gps.z,
ins_float_inv.meas.mag.x,
ins_float_inv.meas.mag.y,
ins_float_inv.meas.mag.z,
ins_float_inv.meas.baro_alt,
ins_float_inv.state.quat.qi,
ins_float_inv.state.quat.qx,
ins_float_inv.state.quat.qy,
ins_float_inv.state.quat.qz,
ins_float_inv.state.bias.p,
ins_float_inv.state.bias.q,
ins_float_inv.state.bias.r,
ins_float_inv.state.speed.x,
ins_float_inv.state.speed.y,
ins_float_inv.state.speed.z,
ins_float_inv.state.pos.x,
ins_float_inv.state.pos.y,
ins_float_inv.state.pos.z,
ins_float_inv.state.hb,
ins_float_inv.state.as);
}
}
#endif
}
void ins_float_invariant_update_gps(struct GpsState *gps_s)
{
if (gps_s->fix >= GPS_FIX_3D && ins_float_inv.is_aligned) {
ins_gps_fix_once = TRUE;
#if INS_FINV_USE_UTM
if (state.utm_initialized_f) {
// position (local ned)
ins_float_inv.meas.pos_gps.x = (gps_s->utm_pos.north / 100.0f) - state.utm_origin_f.north;
ins_float_inv.meas.pos_gps.y = (gps_s->utm_pos.east / 100.0f) - state.utm_origin_f.east;
ins_float_inv.meas.pos_gps.z = state.utm_origin_f.alt - (gps_s->hmsl / 1000.0f);
// speed
ins_float_inv.meas.speed_gps.x = gps_s->ned_vel.x / 100.0f;
ins_float_inv.meas.speed_gps.y = gps_s->ned_vel.y / 100.0f;
ins_float_inv.meas.speed_gps.z = gps_s->ned_vel.z / 100.0f;
}
#else
if (state.ned_initialized_f) {
struct EcefCoor_f ecef_pos, ecef_vel;
ECEF_FLOAT_OF_BFP(ecef_pos, gps_s->ecef_pos);
ned_of_ecef_point_f(&ins_float_inv.meas.pos_gps, &state.ned_origin_f, &ecef_pos);
ECEF_FLOAT_OF_BFP(ecef_vel, gps_s->ecef_vel);
ned_of_ecef_vect_f(&ins_float_inv.meas.speed_gps, &state.ned_origin_f, &ecef_vel);
}
#endif
}
}
void ins_float_invariant_update_baro(float pressure)
{
static float ins_qfe = 101325.0f;
static float alpha = 10.0f;
static int32_t i = 1;
static float baro_moy = 0.0f;
static float baro_prev = 0.0f;
if (!ins_baro_initialized) {
// try to find a stable qfe
// TODO generic function in pprz_isa ?
if (i == 1) {
baro_moy = pressure;
baro_prev = pressure;
}
baro_moy = (baro_moy * (i - 1) + pressure) / i;
alpha = (10.*alpha + (baro_moy - baro_prev)) / (11.0f);
baro_prev = baro_moy;
// test stop condition
if (fabs(alpha) < 0.005f) {
ins_qfe = baro_moy;
ins_baro_initialized = TRUE;
}
if (i == 250) {
ins_qfe = pressure;
ins_baro_initialized = TRUE;
}
i++;
} else { /* normal update with baro measurement */
ins_float_inv.meas.baro_alt = -pprz_isa_height_of_pressure(pressure, ins_qfe); // Z down
}
}
// assume mag is dead when values are not moving anymore
#define MAG_FROZEN_COUNT 30
void ins_float_invariant_update_mag(struct Int32Vect3* mag)
{
static uint32_t mag_frozen_count = MAG_FROZEN_COUNT;
static int32_t last_mx = 0;
if (last_mx == mag->x) {
mag_frozen_count--;
if (mag_frozen_count == 0) {
// if mag is dead, better set measurements to zero
FLOAT_VECT3_ZERO(ins_float_inv.meas.mag);
mag_frozen_count = MAG_FROZEN_COUNT;
}
} else {
// values are moving
struct Int32RMat *body_to_imu_rmat = orientationGetRMat_i(&ins_float_inv.body_to_imu);
struct Int32Vect3 mag_meas_body;
// new values in body frame
int32_rmat_transp_vmult(&mag_meas_body, body_to_imu_rmat, mag);
MAGS_FLOAT_OF_BFP(ins_float_inv.meas.mag, mag_meas_body);
// reset counter
mag_frozen_count = MAG_FROZEN_COUNT;
}
last_mx = mag->x;
}
/** Compute dynamic mode
*
* x_dot = evolution_model + (gain_matrix * error)
*/
static inline void invariant_model(float *o, const float *x, const int n, const float *u,
const int m __attribute__((unused)))
{
#pragma GCC diagnostic push // require GCC 4.6
#pragma GCC diagnostic ignored "-Wcast-qual"
struct inv_state *s = (struct inv_state *)x;
struct inv_command *c = (struct inv_command *)u;
#pragma GCC diagnostic pop // require GCC 4.6
struct inv_state s_dot;
struct FloatRates rates_unbiased;
struct FloatVect3 tmp_vect;
struct FloatQuat tmp_quat;
// test accel sensitivity
if (fabs(s->as) < 0.1) {
// too small, return x_dot = 0 to avoid division by 0
float_vect_zero(o, n);
// TODO set ins state to error
return;
}
/* dot_q = 0.5 * q * (x_rates - x_bias) + LE * q + (1 - ||q||^2) * q */
RATES_DIFF(rates_unbiased, c->rates, s->bias);
/* qd = 0.5 * q * rates_unbiased = -0.5 * rates_unbiased * q */
float_quat_derivative(&s_dot.quat, &rates_unbiased, &(s->quat));
float_quat_vmul_right(&tmp_quat, &(s->quat), &ins_float_inv.corr.LE);
QUAT_ADD(s_dot.quat, tmp_quat);
float norm2_r = 1. - FLOAT_QUAT_NORM2(s->quat);
QUAT_SMUL(tmp_quat, s->quat, norm2_r);
QUAT_ADD(s_dot.quat, tmp_quat);
/* dot_V = A + (1/as) * (q * am * q-1) + ME */
struct FloatQuat q_b2n;
float_quat_invert(&q_b2n, &(s->quat));
float_quat_vmult((struct FloatVect3 *)&s_dot.speed, &q_b2n, &(c->accel));
VECT3_SMUL(s_dot.speed, s_dot.speed, 1. / (s->as));
VECT3_ADD(s_dot.speed, A);
VECT3_ADD(s_dot.speed, ins_float_inv.corr.ME);
/* dot_X = V + NE */
VECT3_SUM(s_dot.pos, s->speed, ins_float_inv.corr.NE);
/* bias_dot = q-1 * (OE) * q */
float_quat_vmult(&tmp_vect, &(s->quat), &ins_float_inv.corr.OE);
RATES_ASSIGN(s_dot.bias, tmp_vect.x, tmp_vect.y, tmp_vect.z);
/* as_dot = as * RE */
s_dot.as = (s->as) * (ins_float_inv.corr.RE);
/* hb_dot = SE */
s_dot.hb = ins_float_inv.corr.SE;
// set output
memcpy(o, &s_dot, n * sizeof(float));
}
/** Compute correction vectors
* E = ( ŷ - y )
* LE, ME, NE, OE : ( gain matrix * error )
*/
static inline void error_output(struct InsFloatInv *_ins)
{
struct FloatVect3 YBt, I, Ev, Eb, Ex, Itemp, Ebtemp, Evtemp;
float Eh;
float temp;
// test accel sensitivity
if (fabs(_ins->state.as) < 0.1) {
// too small, don't do anything to avoid division by 0
return;
}
/* YBt = q * yB * q-1 */
struct FloatQuat q_b2n;
float_quat_invert(&q_b2n, &(_ins->state.quat));
float_quat_vmult(&YBt, &q_b2n, &(_ins->meas.mag));
float_quat_vmult(&I, &q_b2n, &(_ins->cmd.accel));
VECT3_SMUL(I, I, 1. / (_ins->state.as));
/*--------- E = ( ŷ - y ) ----------*/
/* Eb = ( B - YBt ) */
VECT3_DIFF(Eb, B, YBt);
// pos and speed error only if GPS data are valid
// or while waiting first GPS data to prevent diverging
if ((gps.fix >= GPS_FIX_3D && ins_float_inv.is_aligned
#if INS_FINV_USE_UTM
&& state.utm_initialized_f
#else
&& state.ned_initialized_f
#endif
) || !ins_gps_fix_once) {
/* Ev = (V - YV) */
VECT3_DIFF(Ev, _ins->state.speed, _ins->meas.speed_gps);
/* Ex = (X - YX) */
VECT3_DIFF(Ex, _ins->state.pos, _ins->meas.pos_gps);
} else {
FLOAT_VECT3_ZERO(Ev);
FLOAT_VECT3_ZERO(Ex);
}
/* Eh = < X,e3 > - hb - YH */
Eh = _ins->state.pos.z - _ins->state.hb - _ins->meas.baro_alt;
/*--------------Gains--------------*/
/**** LvEv + LbEb = -lvIa x Ev + lb < B x Eb, Ia > Ia *****/
VECT3_SMUL(Itemp, I, -_ins->gains.lv / 100.);
VECT3_CROSS_PRODUCT(Evtemp, Itemp, Ev);
VECT3_CROSS_PRODUCT(Ebtemp, B, Eb);
temp = VECT3_DOT_PRODUCT(Ebtemp, I);
temp = (_ins->gains.lb / 100.) * temp;
VECT3_SMUL(Ebtemp, I, temp);
VECT3_ADD(Evtemp, Ebtemp);
VECT3_COPY(_ins->corr.LE, Evtemp);
/***** MvEv + MhEh = -mv * Ev + (-mh * <Eh,e3>)********/
_ins->corr.ME.x = (-_ins->gains.mv) * Ev.x + 0.;
_ins->corr.ME.y = (-_ins->gains.mv) * Ev.y + 0.;
_ins->corr.ME.z = ((-_ins->gains.mvz) * Ev.z) + ((-_ins->gains.mh) * Eh);
/****** NxEx + NhEh = -nx * Ex + (-nh * <Eh, e3>) ********/
_ins->corr.NE.x = (-_ins->gains.nx) * Ex.x + 0.;
_ins->corr.NE.y = (-_ins->gains.nx) * Ex.y + 0.;
_ins->corr.NE.z = ((-_ins->gains.nxz) * Ex.z) + ((-_ins->gains.nh) * Eh);
/****** OvEv + ObEb = ovIa x Ev - ob < B x Eb, Ia > Ia ********/
VECT3_SMUL(Itemp, I, _ins->gains.ov / 1000.);
VECT3_CROSS_PRODUCT(Evtemp, Itemp, Ev);
VECT3_CROSS_PRODUCT(Ebtemp, B, Eb);
temp = VECT3_DOT_PRODUCT(Ebtemp, I);
temp = (-_ins->gains.ob / 1000.) * temp;
VECT3_SMUL(Ebtemp, I, temp);
VECT3_ADD(Evtemp, Ebtemp);
VECT3_COPY(_ins->corr.OE, Evtemp);
/* a scalar */
/****** RvEv + RhEh = rv < Ia, Ev > + (-rhEh) **************/
_ins->corr.RE = ((_ins->gains.rv / 100.) * VECT3_DOT_PRODUCT(Ev, I)) + ((-_ins->gains.rh / 10000.) * Eh);
/****** ShEh ******/
_ins->corr.SE = (_ins->gains.sh) * Eh;
}
void float_quat_vmul_right(struct FloatQuat *mright, const struct FloatQuat *q,
struct FloatVect3 *vi)
{
struct FloatVect3 qvec, v1, v2;
float qi;
FLOAT_QUAT_EXTRACT(qvec, *q);
qi = - VECT3_DOT_PRODUCT(*vi, qvec);
VECT3_CROSS_PRODUCT(v1, *vi, qvec);
VECT3_SMUL(v2, *vi, q->qi);
VECT3_ADD(v2, v1);
QUAT_ASSIGN(*mright, qi, v2.x, v2.y, v2.z);
}
void ins_float_inv_set_body_to_imu_quat(struct FloatQuat *q_b2i)
{
orientationSetQuat_f(&ins_float_inv.body_to_imu, q_b2i);
if (!ins_float_inv.is_aligned) {
/* Set ltp_to_imu so that body is zero */
ins_float_inv.state.quat = *q_b2i;
}
}
| gpl-2.0 |
javelinanddart/android_kernel_samsung_msm8974 | drivers/gpu/drm/drm_edid.c | 6 | 47189 | /*
* Copyright (c) 2006 Luc Verhaegen (quirks list)
* Copyright (c) 2007-2008 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
* Copyright 2010 Red Hat, Inc.
*
* DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from
* FB layer.
* Copyright (C) 2006 Dennis Munsie <dmunsie@cecropia.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/export.h>
#include "drmP.h"
#include "drm_edid.h"
#include "drm_edid_modes.h"
#define version_greater(edid, maj, min) \
(((edid)->version > (maj)) || \
((edid)->version == (maj) && (edid)->revision > (min)))
#define EDID_EST_TIMINGS 16
#define EDID_STD_TIMINGS 8
#define EDID_DETAILED_TIMINGS 4
/*
* EDID blocks out in the wild have a variety of bugs, try to collect
* them here (note that userspace may work around broken monitors first,
* but fixes should make their way here so that the kernel "just works"
* on as many displays as possible).
*/
/* First detailed mode wrong, use largest 60Hz mode */
#define EDID_QUIRK_PREFER_LARGE_60 (1 << 0)
/* Reported 135MHz pixel clock is too high, needs adjustment */
#define EDID_QUIRK_135_CLOCK_TOO_HIGH (1 << 1)
/* Prefer the largest mode at 75 Hz */
#define EDID_QUIRK_PREFER_LARGE_75 (1 << 2)
/* Detail timing is in cm not mm */
#define EDID_QUIRK_DETAILED_IN_CM (1 << 3)
/* Detailed timing descriptors have bogus size values, so just take the
* maximum size and use that.
*/
#define EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE (1 << 4)
/* Monitor forgot to set the first detailed is preferred bit. */
#define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5)
/* use +hsync +vsync for detailed mode */
#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
/* Force reduced-blanking timings for detailed modes */
#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
/* Force 8bpc */
#define EDID_QUIRK_FORCE_8BPC (1 << 8)
struct detailed_mode_closure {
struct drm_connector *connector;
struct edid *edid;
bool preferred;
u32 quirks;
int modes;
};
#define LEVEL_DMT 0
#define LEVEL_GTF 1
#define LEVEL_GTF2 2
#define LEVEL_CVT 3
static struct edid_quirk {
char *vendor;
int product_id;
u32 quirks;
} edid_quirk_list[] = {
/* Acer AL1706 */
{ "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
/* Acer F51 */
{ "API", 0x7602, EDID_QUIRK_PREFER_LARGE_60 },
/* Unknown Acer */
{ "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
/* Belinea 10 15 55 */
{ "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
{ "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
/* Envision Peripherals, Inc. EN-7100e */
{ "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH },
/* Envision EN2028 */
{ "EPI", 8232, EDID_QUIRK_PREFER_LARGE_60 },
/* Funai Electronics PM36B */
{ "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
EDID_QUIRK_DETAILED_IN_CM },
/* LG Philips LCD LP154W01-A5 */
{ "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
{ "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
/* Philips 107p5 CRT */
{ "PHL", 57364, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
/* Proview AY765C */
{ "PTS", 765, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
/* Samsung SyncMaster 205BW. Note: irony */
{ "SAM", 541, EDID_QUIRK_DETAILED_SYNC_PP },
/* Samsung SyncMaster 22[5-6]BW */
{ "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
{ "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
/* ViewSonic VA2026w */
{ "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
/* Medion MD 30217 PG */
{ "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
/* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
{ "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
};
/*** DDC fetch and block validation ***/
static const u8 edid_header[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
};
/*
* Sanity check the header of the base EDID block. Return 8 if the header
* is perfect, down to 0 if it's totally wrong.
*/
int drm_edid_header_is_valid(const u8 *raw_edid)
{
int i, score = 0;
for (i = 0; i < sizeof(edid_header); i++)
if (raw_edid[i] == edid_header[i])
score++;
return score;
}
EXPORT_SYMBOL(drm_edid_header_is_valid);
/*
* Sanity check the EDID block (base or extension). Return 0 if the block
* doesn't check out, or 1 if it's valid.
*/
bool drm_edid_block_valid(u8 *raw_edid)
{
int i;
u8 csum = 0;
struct edid *edid = (struct edid *)raw_edid;
if (raw_edid[0] == 0x00) {
int score = drm_edid_header_is_valid(raw_edid);
if (score == 8) ;
else if (score >= 6) {
DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
memcpy(raw_edid, edid_header, sizeof(edid_header));
} else {
goto bad;
}
}
for (i = 0; i < EDID_LENGTH; i++)
csum += raw_edid[i];
if (csum) {
DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
/* allow CEA to slide through, switches mangle this */
if (raw_edid[0] != 0x02)
goto bad;
}
/* per-block-type checks */
switch (raw_edid[0]) {
case 0: /* base */
if (edid->version != 1) {
DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
goto bad;
}
if (edid->revision > 4)
DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
break;
default:
break;
}
return 1;
bad:
if (raw_edid) {
printk(KERN_ERR "Raw EDID:\n");
print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1,
raw_edid, EDID_LENGTH, false);
}
return 0;
}
EXPORT_SYMBOL(drm_edid_block_valid);
/**
* drm_edid_is_valid - sanity check EDID data
* @edid: EDID data
*
* Sanity-check an entire EDID record (including extensions)
*/
bool drm_edid_is_valid(struct edid *edid)
{
int i;
u8 *raw = (u8 *)edid;
if (!edid)
return false;
for (i = 0; i <= edid->extensions; i++)
if (!drm_edid_block_valid(raw + i * EDID_LENGTH))
return false;
return true;
}
EXPORT_SYMBOL(drm_edid_is_valid);
#define DDC_SEGMENT_ADDR 0x30
/**
* Get EDID information via I2C.
*
* \param adapter : i2c device adaptor
* \param buf : EDID data buffer to be filled
* \param len : EDID data buffer length
* \return 0 on success or -1 on failure.
*
* Try to fetch EDID information by calling i2c driver function.
*/
static int
drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
int block, int len)
{
unsigned char start = block * EDID_LENGTH;
int ret, retries = 5;
/* The core i2c driver will automatically retry the transfer if the
* adapter reports EAGAIN. However, we find that bit-banging transfers
* are susceptible to errors under a heavily loaded machine and
* generate spurious NAKs and timeouts. Retrying the transfer
* of the individual block a few times seems to overcome this.
*/
do {
struct i2c_msg msgs[] = {
{
.addr = DDC_ADDR,
.flags = 0,
.len = 1,
.buf = &start,
}, {
.addr = DDC_ADDR,
.flags = I2C_M_RD,
.len = len,
.buf = buf,
}
};
ret = i2c_transfer(adapter, msgs, 2);
if (ret == -ENXIO) {
DRM_DEBUG_KMS("drm: skipping non-existent adapter %s\n",
adapter->name);
break;
}
} while (ret != 2 && --retries);
return ret == 2 ? 0 : -1;
}
static bool drm_edid_is_zero(u8 *in_edid, int length)
{
int i;
u32 *raw_edid = (u32 *)in_edid;
for (i = 0; i < length / 4; i++)
if (*(raw_edid + i) != 0)
return false;
return true;
}
static u8 *
drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
{
int i, j = 0, valid_extensions = 0;
u8 *block, *new;
if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
return NULL;
/* base block fetch */
for (i = 0; i < 4; i++) {
if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH))
goto out;
if (drm_edid_block_valid(block))
break;
if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) {
connector->null_edid_counter++;
goto carp;
}
}
if (i == 4)
goto carp;
/* if there's no extensions, we're done */
if (block[0x7e] == 0)
return block;
new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
if (!new)
goto out;
block = new;
for (j = 1; j <= block[0x7e]; j++) {
for (i = 0; i < 4; i++) {
if (drm_do_probe_ddc_edid(adapter,
block + (valid_extensions + 1) * EDID_LENGTH,
j, EDID_LENGTH))
goto out;
if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH)) {
valid_extensions++;
break;
}
}
if (i == 4)
dev_warn(connector->dev->dev,
"%s: Ignoring invalid EDID block %d.\n",
drm_get_connector_name(connector), j);
}
if (valid_extensions != block[0x7e]) {
block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
block[0x7e] = valid_extensions;
new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
if (!new)
goto out;
block = new;
}
return block;
carp:
dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n",
drm_get_connector_name(connector), j);
out:
kfree(block);
return NULL;
}
/**
* Probe DDC presence.
*
* \param adapter : i2c device adaptor
* \return 1 on success
*/
static bool
drm_probe_ddc(struct i2c_adapter *adapter)
{
unsigned char out;
return (drm_do_probe_ddc_edid(adapter, &out, 0, 1) == 0);
}
/**
* drm_get_edid - get EDID data, if available
* @connector: connector we're probing
* @adapter: i2c adapter to use for DDC
*
* Poke the given i2c channel to grab EDID data if possible. If found,
* attach it to the connector.
*
* Return edid data or NULL if we couldn't find any.
*/
struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter)
{
struct edid *edid = NULL;
if (drm_probe_ddc(adapter))
edid = (struct edid *)drm_do_get_edid(connector, adapter);
connector->display_info.raw_edid = (char *)edid;
return edid;
}
EXPORT_SYMBOL(drm_get_edid);
/*** EDID parsing ***/
/**
* edid_vendor - match a string against EDID's obfuscated vendor field
* @edid: EDID to match
* @vendor: vendor string
*
* Returns true if @vendor is in @edid, false otherwise
*/
static bool edid_vendor(struct edid *edid, char *vendor)
{
char edid_vendor[3];
edid_vendor[0] = ((edid->mfg_id[0] & 0x7c) >> 2) + '@';
edid_vendor[1] = (((edid->mfg_id[0] & 0x3) << 3) |
((edid->mfg_id[1] & 0xe0) >> 5)) + '@';
edid_vendor[2] = (edid->mfg_id[1] & 0x1f) + '@';
return !strncmp(edid_vendor, vendor, 3);
}
/**
* edid_get_quirks - return quirk flags for a given EDID
* @edid: EDID to process
*
* This tells subsequent routines what fixes they need to apply.
*/
static u32 edid_get_quirks(struct edid *edid)
{
struct edid_quirk *quirk;
int i;
for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
quirk = &edid_quirk_list[i];
if (edid_vendor(edid, quirk->vendor) &&
(EDID_PRODUCT_ID(edid) == quirk->product_id))
return quirk->quirks;
}
return 0;
}
#define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay)
#define MODE_REFRESH_DIFF(m,r) (abs((m)->vrefresh - target_refresh))
/**
* edid_fixup_preferred - set preferred modes based on quirk list
* @connector: has mode list to fix up
* @quirks: quirks list
*
* Walk the mode list for @connector, clearing the preferred status
* on existing modes and setting it anew for the right mode ala @quirks.
*/
static void edid_fixup_preferred(struct drm_connector *connector,
u32 quirks)
{
struct drm_display_mode *t, *cur_mode, *preferred_mode;
int target_refresh = 0;
if (list_empty(&connector->probed_modes))
return;
if (quirks & EDID_QUIRK_PREFER_LARGE_60)
target_refresh = 60;
if (quirks & EDID_QUIRK_PREFER_LARGE_75)
target_refresh = 75;
preferred_mode = list_first_entry(&connector->probed_modes,
struct drm_display_mode, head);
list_for_each_entry_safe(cur_mode, t, &connector->probed_modes, head) {
cur_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
if (cur_mode == preferred_mode)
continue;
/* Largest mode is preferred */
if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode))
preferred_mode = cur_mode;
/* At a given size, try to get closest to target refresh */
if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) &&
MODE_REFRESH_DIFF(cur_mode, target_refresh) <
MODE_REFRESH_DIFF(preferred_mode, target_refresh)) {
preferred_mode = cur_mode;
}
}
preferred_mode->type |= DRM_MODE_TYPE_PREFERRED;
}
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh)
{
struct drm_display_mode *mode = NULL;
int i;
for (i = 0; i < drm_num_dmt_modes; i++) {
const struct drm_display_mode *ptr = &drm_dmt_modes[i];
if (hsize == ptr->hdisplay &&
vsize == ptr->vdisplay &&
fresh == drm_mode_vrefresh(ptr)) {
/* get the expected default mode */
mode = drm_mode_duplicate(dev, ptr);
break;
}
}
return mode;
}
EXPORT_SYMBOL(drm_mode_find_dmt);
typedef void detailed_cb(struct detailed_timing *timing, void *closure);
static void
cea_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
{
int i, n = 0;
u8 d = ext[0x02];
u8 *det_base = ext + d;
n = (127 - d) / 18;
for (i = 0; i < n; i++)
cb((struct detailed_timing *)(det_base + 18 * i), closure);
}
static void
vtb_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
{
unsigned int i, n = min((int)ext[0x02], 6);
u8 *det_base = ext + 5;
if (ext[0x01] != 1)
return; /* unknown version */
for (i = 0; i < n; i++)
cb((struct detailed_timing *)(det_base + 18 * i), closure);
}
static void
drm_for_each_detailed_block(u8 *raw_edid, detailed_cb *cb, void *closure)
{
int i;
struct edid *edid = (struct edid *)raw_edid;
if (edid == NULL)
return;
for (i = 0; i < EDID_DETAILED_TIMINGS; i++)
cb(&(edid->detailed_timings[i]), closure);
for (i = 1; i <= raw_edid[0x7e]; i++) {
u8 *ext = raw_edid + (i * EDID_LENGTH);
switch (*ext) {
case CEA_EXT:
cea_for_each_detailed_block(ext, cb, closure);
break;
case VTB_EXT:
vtb_for_each_detailed_block(ext, cb, closure);
break;
default:
break;
}
}
}
static void
is_rb(struct detailed_timing *t, void *data)
{
u8 *r = (u8 *)t;
if (r[3] == EDID_DETAIL_MONITOR_RANGE)
if (r[15] & 0x10)
*(bool *)data = true;
}
/* EDID 1.4 defines this explicitly. For EDID 1.3, we guess, badly. */
static bool
drm_monitor_supports_rb(struct edid *edid)
{
if (edid->revision >= 4) {
bool ret = false;
drm_for_each_detailed_block((u8 *)edid, is_rb, &ret);
return ret;
}
return ((edid->input & DRM_EDID_INPUT_DIGITAL) != 0);
}
static void
find_gtf2(struct detailed_timing *t, void *data)
{
u8 *r = (u8 *)t;
if (r[3] == EDID_DETAIL_MONITOR_RANGE && r[10] == 0x02)
*(u8 **)data = r;
}
/* Secondary GTF curve kicks in above some break frequency */
static int
drm_gtf2_hbreak(struct edid *edid)
{
u8 *r = NULL;
drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
return r ? (r[12] * 2) : 0;
}
static int
drm_gtf2_2c(struct edid *edid)
{
u8 *r = NULL;
drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
return r ? r[13] : 0;
}
static int
drm_gtf2_m(struct edid *edid)
{
u8 *r = NULL;
drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
return r ? (r[15] << 8) + r[14] : 0;
}
static int
drm_gtf2_k(struct edid *edid)
{
u8 *r = NULL;
drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
return r ? r[16] : 0;
}
static int
drm_gtf2_2j(struct edid *edid)
{
u8 *r = NULL;
drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
return r ? r[17] : 0;
}
/**
* standard_timing_level - get std. timing level(CVT/GTF/DMT)
* @edid: EDID block to scan
*/
static int standard_timing_level(struct edid *edid)
{
if (edid->revision >= 2) {
if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF))
return LEVEL_CVT;
if (drm_gtf2_hbreak(edid))
return LEVEL_GTF2;
return LEVEL_GTF;
}
return LEVEL_DMT;
}
/*
* 0 is reserved. The spec says 0x01 fill for unused timings. Some old
* monitors fill with ascii space (0x20) instead.
*/
static int
bad_std_timing(u8 a, u8 b)
{
return (a == 0x00 && b == 0x00) ||
(a == 0x01 && b == 0x01) ||
(a == 0x20 && b == 0x20);
}
/**
* drm_mode_std - convert standard mode info (width, height, refresh) into mode
* @t: standard timing params
* @timing_level: standard timing level
*
* Take the standard timing params (in this case width, aspect, and refresh)
* and convert them into a real mode using CVT/GTF/DMT.
*/
static struct drm_display_mode *
drm_mode_std(struct drm_connector *connector, struct edid *edid,
struct std_timing *t, int revision)
{
struct drm_device *dev = connector->dev;
struct drm_display_mode *m, *mode = NULL;
int hsize, vsize;
int vrefresh_rate;
unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK)
>> EDID_TIMING_ASPECT_SHIFT;
unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK)
>> EDID_TIMING_VFREQ_SHIFT;
int timing_level = standard_timing_level(edid);
if (bad_std_timing(t->hsize, t->vfreq_aspect))
return NULL;
/* According to the EDID spec, the hdisplay = hsize * 8 + 248 */
hsize = t->hsize * 8 + 248;
/* vrefresh_rate = vfreq + 60 */
vrefresh_rate = vfreq + 60;
/* the vdisplay is calculated based on the aspect ratio */
if (aspect_ratio == 0) {
if (revision < 3)
vsize = hsize;
else
vsize = (hsize * 10) / 16;
} else if (aspect_ratio == 1)
vsize = (hsize * 3) / 4;
else if (aspect_ratio == 2)
vsize = (hsize * 4) / 5;
else
vsize = (hsize * 9) / 16;
/* HDTV hack, part 1 */
if (vrefresh_rate == 60 &&
((hsize == 1360 && vsize == 765) ||
(hsize == 1368 && vsize == 769))) {
hsize = 1366;
vsize = 768;
}
/*
* If this connector already has a mode for this size and refresh
* rate (because it came from detailed or CVT info), use that
* instead. This way we don't have to guess at interlace or
* reduced blanking.
*/
list_for_each_entry(m, &connector->probed_modes, head)
if (m->hdisplay == hsize && m->vdisplay == vsize &&
drm_mode_vrefresh(m) == vrefresh_rate)
return NULL;
/* HDTV hack, part 2 */
if (hsize == 1366 && vsize == 768 && vrefresh_rate == 60) {
mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0,
false);
mode->hdisplay = 1366;
mode->hsync_start = mode->hsync_start - 1;
mode->hsync_end = mode->hsync_end - 1;
return mode;
}
/* check whether it can be found in default mode table */
mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate);
if (mode)
return mode;
switch (timing_level) {
case LEVEL_DMT:
break;
case LEVEL_GTF:
mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
break;
case LEVEL_GTF2:
/*
* This is potentially wrong if there's ever a monitor with
* more than one ranges section, each claiming a different
* secondary GTF curve. Please don't do that.
*/
mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) {
drm_mode_destroy(dev, mode);
mode = drm_gtf_mode_complex(dev, hsize, vsize,
vrefresh_rate, 0, 0,
drm_gtf2_m(edid),
drm_gtf2_2c(edid),
drm_gtf2_k(edid),
drm_gtf2_2j(edid));
}
break;
case LEVEL_CVT:
mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
false);
break;
}
return mode;
}
/*
* EDID is delightfully ambiguous about how interlaced modes are to be
* encoded. Our internal representation is of frame height, but some
* HDTV detailed timings are encoded as field height.
*
* The format list here is from CEA, in frame size. Technically we
* should be checking refresh rate too. Whatever.
*/
static void
drm_mode_do_interlace_quirk(struct drm_display_mode *mode,
struct detailed_pixel_timing *pt)
{
int i;
static const struct {
int w, h;
} cea_interlaced[] = {
{ 1920, 1080 },
{ 720, 480 },
{ 1440, 480 },
{ 2880, 480 },
{ 720, 576 },
{ 1440, 576 },
{ 2880, 576 },
};
if (!(pt->misc & DRM_EDID_PT_INTERLACED))
return;
for (i = 0; i < ARRAY_SIZE(cea_interlaced); i++) {
if ((mode->hdisplay == cea_interlaced[i].w) &&
(mode->vdisplay == cea_interlaced[i].h / 2)) {
mode->vdisplay *= 2;
mode->vsync_start *= 2;
mode->vsync_end *= 2;
mode->vtotal *= 2;
mode->vtotal |= 1;
}
}
mode->flags |= DRM_MODE_FLAG_INTERLACE;
}
/**
* drm_mode_detailed - create a new mode from an EDID detailed timing section
* @dev: DRM device (needed to create new mode)
* @edid: EDID block
* @timing: EDID detailed timing info
* @quirks: quirks to apply
*
* An EDID detailed timing block contains enough info for us to create and
* return a new struct drm_display_mode.
*/
static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
struct edid *edid,
struct detailed_timing *timing,
u32 quirks)
{
struct drm_display_mode *mode;
struct detailed_pixel_timing *pt = &timing->data.pixel_data;
unsigned hactive = (pt->hactive_hblank_hi & 0xf0) << 4 | pt->hactive_lo;
unsigned vactive = (pt->vactive_vblank_hi & 0xf0) << 4 | pt->vactive_lo;
unsigned hblank = (pt->hactive_hblank_hi & 0xf) << 8 | pt->hblank_lo;
unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo;
unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo;
unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo;
unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) << 2 | pt->vsync_offset_pulse_width_lo >> 4;
unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf);
/* ignore tiny modes */
if (hactive < 64 || vactive < 64)
return NULL;
if (pt->misc & DRM_EDID_PT_STEREO) {
printk(KERN_WARNING "stereo mode not supported\n");
return NULL;
}
if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
printk(KERN_WARNING "composite sync not supported\n");
}
/* it is incorrect if hsync/vsync width is zero */
if (!hsync_pulse_width || !vsync_pulse_width) {
DRM_DEBUG_KMS("Incorrect Detailed timing. "
"Wrong Hsync/Vsync pulse width\n");
return NULL;
}
if (quirks & EDID_QUIRK_FORCE_REDUCED_BLANKING) {
mode = drm_cvt_mode(dev, hactive, vactive, 60, true, false, false);
if (!mode)
return NULL;
goto set_size;
}
mode = drm_mode_create(dev);
if (!mode)
return NULL;
if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
timing->pixel_clock = cpu_to_le16(1088);
mode->clock = le16_to_cpu(timing->pixel_clock) * 10;
mode->hdisplay = hactive;
mode->hsync_start = mode->hdisplay + hsync_offset;
mode->hsync_end = mode->hsync_start + hsync_pulse_width;
mode->htotal = mode->hdisplay + hblank;
mode->vdisplay = vactive;
mode->vsync_start = mode->vdisplay + vsync_offset;
mode->vsync_end = mode->vsync_start + vsync_pulse_width;
mode->vtotal = mode->vdisplay + vblank;
/* Some EDIDs have bogus h/vtotal values */
if (mode->hsync_end > mode->htotal)
mode->htotal = mode->hsync_end + 1;
if (mode->vsync_end > mode->vtotal)
mode->vtotal = mode->vsync_end + 1;
drm_mode_do_interlace_quirk(mode, pt);
if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
}
mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
set_size:
mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
if (quirks & EDID_QUIRK_DETAILED_IN_CM) {
mode->width_mm *= 10;
mode->height_mm *= 10;
}
if (quirks & EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE) {
mode->width_mm = edid->width_cm * 10;
mode->height_mm = edid->height_cm * 10;
}
mode->type = DRM_MODE_TYPE_DRIVER;
mode->vrefresh = drm_mode_vrefresh(mode);
drm_mode_set_name(mode);
return mode;
}
static bool
mode_is_rb(const struct drm_display_mode *mode)
{
return (mode->htotal - mode->hdisplay == 160) &&
(mode->hsync_end - mode->hdisplay == 80) &&
(mode->hsync_end - mode->hsync_start == 32) &&
(mode->vsync_start - mode->vdisplay == 3);
}
static bool
mode_in_hsync_range(const struct drm_display_mode *mode,
struct edid *edid, u8 *t)
{
int hsync, hmin, hmax;
hmin = t[7];
if (edid->revision >= 4)
hmin += ((t[4] & 0x04) ? 255 : 0);
hmax = t[8];
if (edid->revision >= 4)
hmax += ((t[4] & 0x08) ? 255 : 0);
hsync = drm_mode_hsync(mode);
return (hsync <= hmax && hsync >= hmin);
}
static bool
mode_in_vsync_range(const struct drm_display_mode *mode,
struct edid *edid, u8 *t)
{
int vsync, vmin, vmax;
vmin = t[5];
if (edid->revision >= 4)
vmin += ((t[4] & 0x01) ? 255 : 0);
vmax = t[6];
if (edid->revision >= 4)
vmax += ((t[4] & 0x02) ? 255 : 0);
vsync = drm_mode_vrefresh(mode);
return (vsync <= vmax && vsync >= vmin);
}
static u32
range_pixel_clock(struct edid *edid, u8 *t)
{
/* unspecified */
if (t[9] == 0 || t[9] == 255)
return 0;
/* 1.4 with CVT support gives us real precision, yay */
if (edid->revision >= 4 && t[10] == 0x04)
return (t[9] * 10000) - ((t[12] >> 2) * 250);
/* 1.3 is pathetic, so fuzz up a bit */
return t[9] * 10000 + 5001;
}
static bool
mode_in_range(const struct drm_display_mode *mode, struct edid *edid,
struct detailed_timing *timing)
{
u32 max_clock;
u8 *t = (u8 *)timing;
if (!mode_in_hsync_range(mode, edid, t))
return false;
if (!mode_in_vsync_range(mode, edid, t))
return false;
if ((max_clock = range_pixel_clock(edid, t)))
if (mode->clock > max_clock)
return false;
/* 1.4 max horizontal check */
if (edid->revision >= 4 && t[10] == 0x04)
if (t[13] && mode->hdisplay > 8 * (t[13] + (256 * (t[12]&0x3))))
return false;
if (mode_is_rb(mode) && !drm_monitor_supports_rb(edid))
return false;
return true;
}
/*
* XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will
* need to account for them.
*/
static int
drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
struct detailed_timing *timing)
{
int i, modes = 0;
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
for (i = 0; i < drm_num_dmt_modes; i++) {
if (mode_in_range(drm_dmt_modes + i, edid, timing)) {
newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
}
}
}
return modes;
}
static void
do_inferred_modes(struct detailed_timing *timing, void *c)
{
struct detailed_mode_closure *closure = c;
struct detailed_non_pixel *data = &timing->data.other_data;
int gtf = (closure->edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
if (gtf && data->type == EDID_DETAIL_MONITOR_RANGE)
closure->modes += drm_gtf_modes_for_range(closure->connector,
closure->edid,
timing);
}
static int
add_inferred_modes(struct drm_connector *connector, struct edid *edid)
{
struct detailed_mode_closure closure = {
connector, edid, 0, 0, 0
};
if (version_greater(edid, 1, 0))
drm_for_each_detailed_block((u8 *)edid, do_inferred_modes,
&closure);
return closure.modes;
}
static int
drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
{
int i, j, m, modes = 0;
struct drm_display_mode *mode;
u8 *est = ((u8 *)timing) + 5;
for (i = 0; i < 6; i++) {
for (j = 7; j > 0; j--) {
m = (i * 8) + (7 - j);
if (m >= ARRAY_SIZE(est3_modes))
break;
if (est[i] & (1 << j)) {
mode = drm_mode_find_dmt(connector->dev,
est3_modes[m].w,
est3_modes[m].h,
est3_modes[m].r
/*, est3_modes[m].rb */);
if (mode) {
drm_mode_probed_add(connector, mode);
modes++;
}
}
}
}
return modes;
}
static void
do_established_modes(struct detailed_timing *timing, void *c)
{
struct detailed_mode_closure *closure = c;
struct detailed_non_pixel *data = &timing->data.other_data;
if (data->type == EDID_DETAIL_EST_TIMINGS)
closure->modes += drm_est3_modes(closure->connector, timing);
}
/**
* add_established_modes - get est. modes from EDID and add them
* @edid: EDID block to scan
*
* Each EDID block contains a bitmap of the supported "established modes" list
* (defined above). Tease them out and add them to the global modes list.
*/
static int
add_established_modes(struct drm_connector *connector, struct edid *edid)
{
struct drm_device *dev = connector->dev;
unsigned long est_bits = edid->established_timings.t1 |
(edid->established_timings.t2 << 8) |
((edid->established_timings.mfg_rsvd & 0x80) << 9);
int i, modes = 0;
struct detailed_mode_closure closure = {
connector, edid, 0, 0, 0
};
for (i = 0; i <= EDID_EST_TIMINGS; i++) {
if (est_bits & (1<<i)) {
struct drm_display_mode *newmode;
newmode = drm_mode_duplicate(dev, &edid_est_modes[i]);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
}
}
}
if (version_greater(edid, 1, 0))
drm_for_each_detailed_block((u8 *)edid,
do_established_modes, &closure);
return modes + closure.modes;
}
static void
do_standard_modes(struct detailed_timing *timing, void *c)
{
struct detailed_mode_closure *closure = c;
struct detailed_non_pixel *data = &timing->data.other_data;
struct drm_connector *connector = closure->connector;
struct edid *edid = closure->edid;
if (data->type == EDID_DETAIL_STD_MODES) {
int i;
for (i = 0; i < 6; i++) {
struct std_timing *std;
struct drm_display_mode *newmode;
std = &data->data.timings[i];
newmode = drm_mode_std(connector, edid, std,
edid->revision);
if (newmode) {
drm_mode_probed_add(connector, newmode);
closure->modes++;
}
}
}
}
/**
* add_standard_modes - get std. modes from EDID and add them
* @edid: EDID block to scan
*
* Standard modes can be calculated using the appropriate standard (DMT,
* GTF or CVT. Grab them from @edid and add them to the list.
*/
static int
add_standard_modes(struct drm_connector *connector, struct edid *edid)
{
int i, modes = 0;
struct detailed_mode_closure closure = {
connector, edid, 0, 0, 0
};
for (i = 0; i < EDID_STD_TIMINGS; i++) {
struct drm_display_mode *newmode;
newmode = drm_mode_std(connector, edid,
&edid->standard_timings[i],
edid->revision);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
}
}
if (version_greater(edid, 1, 0))
drm_for_each_detailed_block((u8 *)edid, do_standard_modes,
&closure);
/* XXX should also look for standard codes in VTB blocks */
return modes + closure.modes;
}
static int drm_cvt_modes(struct drm_connector *connector,
struct detailed_timing *timing)
{
int i, j, modes = 0;
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
struct cvt_timing *cvt;
const int rates[] = { 60, 85, 75, 60, 50 };
const u8 empty[3] = { 0, 0, 0 };
for (i = 0; i < 4; i++) {
int uninitialized_var(width), height;
cvt = &(timing->data.other_data.data.cvt[i]);
if (!memcmp(cvt->code, empty, 3))
continue;
height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 4) + 1) * 2;
switch (cvt->code[1] & 0x0c) {
case 0x00:
width = height * 4 / 3;
break;
case 0x04:
width = height * 16 / 9;
break;
case 0x08:
width = height * 16 / 10;
break;
case 0x0c:
width = height * 15 / 9;
break;
}
for (j = 1; j < 5; j++) {
if (cvt->code[2] & (1 << j)) {
newmode = drm_cvt_mode(dev, width, height,
rates[j], j == 0,
false, false);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
}
}
}
}
return modes;
}
static void
do_cvt_mode(struct detailed_timing *timing, void *c)
{
struct detailed_mode_closure *closure = c;
struct detailed_non_pixel *data = &timing->data.other_data;
if (data->type == EDID_DETAIL_CVT_3BYTE)
closure->modes += drm_cvt_modes(closure->connector, timing);
}
static int
add_cvt_modes(struct drm_connector *connector, struct edid *edid)
{
struct detailed_mode_closure closure = {
connector, edid, 0, 0, 0
};
if (version_greater(edid, 1, 2))
drm_for_each_detailed_block((u8 *)edid, do_cvt_mode, &closure);
/* XXX should also look for CVT codes in VTB blocks */
return closure.modes;
}
static void
do_detailed_mode(struct detailed_timing *timing, void *c)
{
struct detailed_mode_closure *closure = c;
struct drm_display_mode *newmode;
if (timing->pixel_clock) {
newmode = drm_mode_detailed(closure->connector->dev,
closure->edid, timing,
closure->quirks);
if (!newmode)
return;
if (closure->preferred)
newmode->type |= DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(closure->connector, newmode);
closure->modes++;
closure->preferred = 0;
}
}
/*
* add_detailed_modes - Add modes from detailed timings
* @connector: attached connector
* @edid: EDID block to scan
* @quirks: quirks to apply
*/
static int
add_detailed_modes(struct drm_connector *connector, struct edid *edid,
u32 quirks)
{
struct detailed_mode_closure closure = {
connector,
edid,
1,
quirks,
0
};
if (closure.preferred && !version_greater(edid, 1, 3))
closure.preferred =
(edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
drm_for_each_detailed_block((u8 *)edid, do_detailed_mode, &closure);
return closure.modes;
}
#define HDMI_IDENTIFIER 0x000C03
#define AUDIO_BLOCK 0x01
#define VIDEO_BLOCK 0x02
#define VENDOR_BLOCK 0x03
#define SPEAKER_BLOCK 0x04
#define EDID_BASIC_AUDIO (1 << 6)
/**
* Search EDID for CEA extension block.
*/
u8 *drm_find_cea_extension(struct edid *edid)
{
u8 *edid_ext = NULL;
int i;
/* No EDID or EDID extensions */
if (edid == NULL || edid->extensions == 0)
return NULL;
/* Find CEA extension */
for (i = 0; i < edid->extensions; i++) {
edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1);
if (edid_ext[0] == CEA_EXT)
break;
}
if (i == edid->extensions)
return NULL;
return edid_ext;
}
EXPORT_SYMBOL(drm_find_cea_extension);
static int
do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
{
struct drm_device *dev = connector->dev;
u8 * mode, cea_mode;
int modes = 0;
for (mode = db; mode < db + len; mode++) {
cea_mode = (*mode & 127) - 1; /* CEA modes are numbered 1..127 */
if (cea_mode < drm_num_cea_modes) {
struct drm_display_mode *newmode;
newmode = drm_mode_duplicate(dev,
&edid_cea_modes[cea_mode]);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
}
}
}
return modes;
}
static int
add_cea_modes(struct drm_connector *connector, struct edid *edid)
{
u8 * cea = drm_find_cea_extension(edid);
u8 * db, dbl;
int modes = 0;
if (cea && cea[1] >= 3) {
for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) {
dbl = db[0] & 0x1f;
if (((db[0] & 0xe0) >> 5) == VIDEO_BLOCK)
modes += do_cea_modes (connector, db+1, dbl);
}
}
return modes;
}
static void
parse_hdmi_vsdb(struct drm_connector *connector, uint8_t *db)
{
connector->eld[5] |= (db[6] >> 7) << 1; /* Supports_AI */
connector->dvi_dual = db[6] & 1;
connector->max_tmds_clock = db[7] * 5;
connector->latency_present[0] = db[8] >> 7;
connector->latency_present[1] = (db[8] >> 6) & 1;
connector->video_latency[0] = db[9];
connector->audio_latency[0] = db[10];
connector->video_latency[1] = db[11];
connector->audio_latency[1] = db[12];
DRM_LOG_KMS("HDMI: DVI dual %d, "
"max TMDS clock %d, "
"latency present %d %d, "
"video latency %d %d, "
"audio latency %d %d\n",
connector->dvi_dual,
connector->max_tmds_clock,
(int) connector->latency_present[0],
(int) connector->latency_present[1],
connector->video_latency[0],
connector->video_latency[1],
connector->audio_latency[0],
connector->audio_latency[1]);
}
static void
monitor_name(struct detailed_timing *t, void *data)
{
if (t->data.other_data.type == EDID_DETAIL_MONITOR_NAME)
*(u8 **)data = t->data.other_data.data.str.str;
}
/**
* drm_edid_to_eld - build ELD from EDID
* @connector: connector corresponding to the HDMI/DP sink
* @edid: EDID to parse
*
* Fill the ELD (EDID-Like Data) buffer for passing to the audio driver.
* Some ELD fields are left to the graphics driver caller:
* - Conn_Type
* - HDCP
* - Port_ID
*/
void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
{
uint8_t *eld = connector->eld;
u8 *cea;
u8 *name;
u8 *db;
int sad_count = 0;
int mnl;
int dbl;
memset(eld, 0, sizeof(connector->eld));
cea = drm_find_cea_extension(edid);
if (!cea) {
DRM_DEBUG_KMS("ELD: no CEA Extension found\n");
return;
}
name = NULL;
drm_for_each_detailed_block((u8 *)edid, monitor_name, &name);
for (mnl = 0; name && mnl < 13; mnl++) {
if (name[mnl] == 0x0a)
break;
eld[20 + mnl] = name[mnl];
}
eld[4] = (cea[1] << 5) | mnl;
DRM_DEBUG_KMS("ELD monitor %s\n", eld + 20);
eld[0] = 2 << 3; /* ELD version: 2 */
eld[16] = edid->mfg_id[0];
eld[17] = edid->mfg_id[1];
eld[18] = edid->prod_code[0];
eld[19] = edid->prod_code[1];
if (cea[1] >= 3)
for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) {
dbl = db[0] & 0x1f;
switch ((db[0] & 0xe0) >> 5) {
case AUDIO_BLOCK:
/* Audio Data Block, contains SADs */
sad_count = dbl / 3;
memcpy(eld + 20 + mnl, &db[1], dbl);
break;
case SPEAKER_BLOCK:
/* Speaker Allocation Data Block */
eld[7] = db[1];
break;
case VENDOR_BLOCK:
/* HDMI Vendor-Specific Data Block */
if (db[1] == 0x03 && db[2] == 0x0c && db[3] == 0)
parse_hdmi_vsdb(connector, db);
break;
default:
break;
}
}
eld[5] |= sad_count << 4;
eld[2] = (20 + mnl + sad_count * 3 + 3) / 4;
DRM_DEBUG_KMS("ELD size %d, SAD count %d\n", (int)eld[2], sad_count);
}
EXPORT_SYMBOL(drm_edid_to_eld);
/**
* drm_av_sync_delay - HDMI/DP sink audio-video sync delay in millisecond
* @connector: connector associated with the HDMI/DP sink
* @mode: the display mode
*/
int drm_av_sync_delay(struct drm_connector *connector,
struct drm_display_mode *mode)
{
int i = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
int a, v;
if (!connector->latency_present[0])
return 0;
if (!connector->latency_present[1])
i = 0;
a = connector->audio_latency[i];
v = connector->video_latency[i];
/*
* HDMI/DP sink doesn't support audio or video?
*/
if (a == 255 || v == 255)
return 0;
/*
* Convert raw EDID values to millisecond.
* Treat unknown latency as 0ms.
*/
if (a)
a = min(2 * (a - 1), 500);
if (v)
v = min(2 * (v - 1), 500);
return max(v - a, 0);
}
EXPORT_SYMBOL(drm_av_sync_delay);
/**
* drm_select_eld - select one ELD from multiple HDMI/DP sinks
* @encoder: the encoder just changed display mode
* @mode: the adjusted display mode
*
* It's possible for one encoder to be associated with multiple HDMI/DP sinks.
* The policy is now hard coded to simply use the first HDMI/DP sink's ELD.
*/
struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
struct drm_connector *connector;
struct drm_device *dev = encoder->dev;
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
if (connector->encoder == encoder && connector->eld[0])
return connector;
return NULL;
}
EXPORT_SYMBOL(drm_select_eld);
/**
* drm_detect_hdmi_monitor - detect whether monitor is hdmi.
* @edid: monitor EDID information
*
* Parse the CEA extension according to CEA-861-B.
* Return true if HDMI, false if not or unknown.
*/
bool drm_detect_hdmi_monitor(struct edid *edid)
{
u8 *edid_ext;
int i, hdmi_id;
int start_offset, end_offset;
bool is_hdmi = false;
edid_ext = drm_find_cea_extension(edid);
if (!edid_ext)
goto end;
/* Data block offset in CEA extension block */
start_offset = 4;
end_offset = edid_ext[2];
/*
* Because HDMI identifier is in Vendor Specific Block,
* search it from all data blocks of CEA extension.
*/
for (i = start_offset; i < end_offset;
/* Increased by data block len */
i += ((edid_ext[i] & 0x1f) + 1)) {
/* Find vendor specific block */
if ((edid_ext[i] >> 5) == VENDOR_BLOCK) {
hdmi_id = edid_ext[i + 1] | (edid_ext[i + 2] << 8) |
edid_ext[i + 3] << 16;
/* Find HDMI identifier */
if (hdmi_id == HDMI_IDENTIFIER)
is_hdmi = true;
break;
}
}
end:
return is_hdmi;
}
EXPORT_SYMBOL(drm_detect_hdmi_monitor);
/**
* drm_detect_monitor_audio - check monitor audio capability
*
* Monitor should have CEA extension block.
* If monitor has 'basic audio', but no CEA audio blocks, it's 'basic
* audio' only. If there is any audio extension block and supported
* audio format, assume at least 'basic audio' support, even if 'basic
* audio' is not defined in EDID.
*
*/
bool drm_detect_monitor_audio(struct edid *edid)
{
u8 *edid_ext;
int i, j;
bool has_audio = false;
int start_offset, end_offset;
edid_ext = drm_find_cea_extension(edid);
if (!edid_ext)
goto end;
has_audio = ((edid_ext[3] & EDID_BASIC_AUDIO) != 0);
if (has_audio) {
DRM_DEBUG_KMS("Monitor has basic audio support\n");
goto end;
}
/* Data block offset in CEA extension block */
start_offset = 4;
end_offset = edid_ext[2];
for (i = start_offset; i < end_offset;
i += ((edid_ext[i] & 0x1f) + 1)) {
if ((edid_ext[i] >> 5) == AUDIO_BLOCK) {
has_audio = true;
for (j = 1; j < (edid_ext[i] & 0x1f); j += 3)
DRM_DEBUG_KMS("CEA audio format %d\n",
(edid_ext[i + j] >> 3) & 0xf);
goto end;
}
}
end:
return has_audio;
}
EXPORT_SYMBOL(drm_detect_monitor_audio);
/**
* drm_add_display_info - pull display info out if present
* @edid: EDID data
* @info: display info (attached to connector)
*
* Grab any available display info and stuff it into the drm_display_info
* structure that's part of the connector. Useful for tracking bpp and
* color spaces.
*/
static void drm_add_display_info(struct edid *edid,
struct drm_display_info *info)
{
u8 *edid_ext;
info->width_mm = edid->width_cm * 10;
info->height_mm = edid->height_cm * 10;
/* driver figures it out in this case */
info->bpc = 0;
info->color_formats = 0;
/* Only defined for 1.4 with digital displays */
if (edid->revision < 4)
return;
if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
return;
switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) {
case DRM_EDID_DIGITAL_DEPTH_6:
info->bpc = 6;
break;
case DRM_EDID_DIGITAL_DEPTH_8:
info->bpc = 8;
break;
case DRM_EDID_DIGITAL_DEPTH_10:
info->bpc = 10;
break;
case DRM_EDID_DIGITAL_DEPTH_12:
info->bpc = 12;
break;
case DRM_EDID_DIGITAL_DEPTH_14:
info->bpc = 14;
break;
case DRM_EDID_DIGITAL_DEPTH_16:
info->bpc = 16;
break;
case DRM_EDID_DIGITAL_DEPTH_UNDEF:
default:
info->bpc = 0;
break;
}
info->color_formats = DRM_COLOR_FORMAT_RGB444;
if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB444)
info->color_formats = DRM_COLOR_FORMAT_YCRCB444;
if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB422)
info->color_formats = DRM_COLOR_FORMAT_YCRCB422;
/* Get data from CEA blocks if present */
edid_ext = drm_find_cea_extension(edid);
if (!edid_ext)
return;
info->cea_rev = edid_ext[1];
}
/**
* drm_add_edid_modes - add modes from EDID data, if available
* @connector: connector we're probing
* @edid: edid data
*
* Add the specified modes to the connector's mode list.
*
* Return number of modes added or 0 if we couldn't find any.
*/
int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
{
int num_modes = 0;
u32 quirks;
if (edid == NULL) {
return 0;
}
if (!drm_edid_is_valid(edid)) {
dev_warn(connector->dev->dev, "%s: EDID invalid.\n",
drm_get_connector_name(connector));
return 0;
}
quirks = edid_get_quirks(edid);
/*
* EDID spec says modes should be preferred in this order:
* - preferred detailed mode
* - other detailed modes from base block
* - detailed modes from extension blocks
* - CVT 3-byte code modes
* - standard timing codes
* - established timing codes
* - modes inferred from GTF or CVT range information
*
* We get this pretty much right.
*
* XXX order for additional mode types in extension blocks?
*/
num_modes += add_detailed_modes(connector, edid, quirks);
num_modes += add_cvt_modes(connector, edid);
num_modes += add_standard_modes(connector, edid);
num_modes += add_established_modes(connector, edid);
if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
num_modes += add_inferred_modes(connector, edid);
num_modes += add_cea_modes(connector, edid);
if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
edid_fixup_preferred(connector, quirks);
drm_add_display_info(edid, &connector->display_info);
if (quirks & EDID_QUIRK_FORCE_8BPC)
connector->display_info.bpc = 8;
return num_modes;
}
EXPORT_SYMBOL(drm_add_edid_modes);
/**
* drm_add_modes_noedid - add modes for the connectors without EDID
* @connector: connector we're probing
* @hdisplay: the horizontal display limit
* @vdisplay: the vertical display limit
*
* Add the specified modes to the connector's mode list. Only when the
* hdisplay/vdisplay is not beyond the given limit, it will be added.
*
* Return number of modes added or 0 if we couldn't find any.
*/
int drm_add_modes_noedid(struct drm_connector *connector,
int hdisplay, int vdisplay)
{
int i, count, num_modes = 0;
struct drm_display_mode *mode;
struct drm_device *dev = connector->dev;
count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
if (hdisplay < 0)
hdisplay = 0;
if (vdisplay < 0)
vdisplay = 0;
for (i = 0; i < count; i++) {
const struct drm_display_mode *ptr = &drm_dmt_modes[i];
if (hdisplay && vdisplay) {
/*
* Only when two are valid, they will be used to check
* whether the mode should be added to the mode list of
* the connector.
*/
if (ptr->hdisplay > hdisplay ||
ptr->vdisplay > vdisplay)
continue;
}
if (drm_mode_vrefresh(ptr) > 61)
continue;
mode = drm_mode_duplicate(dev, ptr);
if (mode) {
drm_mode_probed_add(connector, mode);
num_modes++;
}
}
return num_modes;
}
EXPORT_SYMBOL(drm_add_modes_noedid);
| gpl-2.0 |
meg23/gnucash | src/gnome-utils/gnc-period-select.c | 6 | 25162 | /*
* gnc-period-select.c -- Accounting Period selection widget
*
* Copyright (c) 2005 David Hampton <hampton@employees.org>
* All rights reserved.
*
* Gnucash is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public License
* as published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* Gnucash is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, contact:
*
* Free Software Foundation Voice: +1-617-542-5942
* 51 Franklin Street, Fifth Floor Fax: +1-617-542-2652
* Boston, MA 02110-1301, USA gnu@gnu.org
*/
/** @addtogroup GUI
@{ */
/** @file gnc-period-select.c
@brief A custom widget for selecting accounting periods.
@author David Hampton <hampton@employees.org>
*/
#include "config.h"
#include <gtk/gtk.h>
#include <glib/gi18n.h>
#include "gnc-date.h"
#include "gnc-period-select.h"
#include "gnc-prefs.h"
#include <gnc-gdate-utils.h>
enum
{
PROP_0,
PROP_FY_END,
PROP_SHOW_DATE,
PROP_DATE_BASE,
PROP_PS_ACTIVE,
};
enum
{
CHANGED,
LAST_SIGNAL
};
static guint signals[LAST_SIGNAL] = { 0 };
/** Declarations *********************************************************/
static void gnc_period_select_init (GncPeriodSelect *gce);
static void gnc_period_select_class_init (GncPeriodSelectClass *klass);
static void gnc_period_select_finalize (GObject *object);
static GtkComboBoxClass *parent_class;
const gchar *start_strings[GNC_ACCOUNTING_PERIOD_LAST] =
{
/* CY Strings */
N_("Today"),
N_("Start of this month"),
N_("Start of previous month"),
N_("Start of this quarter"),
N_("Start of previous quarter"),
N_("Start of this year"),
N_("Start of previous year"),
/* FY Strings */
N_("Start of this accounting period"),
N_("Start of previous accounting period"),
};
const gchar *end_strings[GNC_ACCOUNTING_PERIOD_LAST] =
{
/* CY Strings */
N_("Today"),
N_("End of this month"),
N_("End of previous month"),
N_("End of this quarter"),
N_("End of previous quarter"),
N_("End of this year"),
N_("End of previous year"),
/* FY Strings */
N_("End of this accounting period"),
N_("End of previous accounting period"),
};
/** Private Data Structure ***********************************************/
typedef struct _GncPeriodSelectPrivate GncPeriodSelectPrivate;
struct _GncPeriodSelectPrivate
{
GtkWidget *selector;
gboolean start;
GDate *fy_end;
GDate *date_base;
GtkWidget *date_label;
GtkWidget *date_align;
};
#define GNC_PERIOD_SELECT_GET_PRIVATE(o) \
(G_TYPE_INSTANCE_GET_PRIVATE ((o), GNC_TYPE_PERIOD_SELECT, GncPeriodSelectPrivate))
/************************************************************/
/* Signal Functions */
/************************************************************/
/* Tells a GncPeriodSelect object to emit a "changed" signal.
*/
static void
gnc_period_select_changed (GncPeriodSelect *period)
{
g_return_if_fail(GNC_IS_PERIOD_SELECT(period));
g_signal_emit(G_OBJECT(period), signals[CHANGED], 0);
}
/** Update the user visible sample date label if it exists on this
* widget. This label is for user feedback only.
*
* @param period The GncPeriodSelect object to update.
*/
static void
gnc_period_sample_update_date_label (GncPeriodSelect *period)
{
GncPeriodSelectPrivate *priv;
gchar time_string[MAX_DATE_LENGTH];
GDate *date;
GncAccountingPeriod which;
g_return_if_fail(GNC_IS_PERIOD_SELECT(period));
priv = GNC_PERIOD_SELECT_GET_PRIVATE(period);
if (!priv->date_label)
return;
which = gtk_combo_box_get_active (GTK_COMBO_BOX (priv->selector));
if (which == -1)
date = g_date_new_dmy (31, 7, 2013);
else if (priv->start)
date = gnc_accounting_period_start_gdate (which, priv->fy_end,
priv->date_base);
else
date = gnc_accounting_period_end_gdate (which, priv->fy_end,
priv->date_base);
qof_print_gdate (time_string, MAX_DATE_LENGTH, date);
gtk_label_set_label (GTK_LABEL(priv->date_label), time_string);
g_date_free (date);
}
/** Handle the "changed" signal from the GtkComboBox that is embedded
* in this GncPeriodSelect object. When called, this function
* will delegate the actual update work to the GncPeriodSelect widget
* to do the necessary updates of internal widgets and state.
*
* @param box The combo box that changed.
*
* @param period The GncPeriodSelect containing the combo box.
*/
static void
gnc_period_sample_combobox_changed (GtkComboBox *box, GncPeriodSelect *period)
{
g_return_if_fail(GNC_IS_PERIOD_SELECT(period));
g_object_set (G_OBJECT (period),
"active",
gtk_combo_box_get_active (box),
NULL);
}
/** Handle an application wide change in the date format. This
* function will be called when the preference for the date format is
* updated. It doesn't really care what the new format is, because
* the date string is generated elsewhere. It just needs to know to
* update the date label so that it matches the newly selected format.
*
* @param prefs Unused.
*
* @param pref Unused.
*
* @param period The GncPeriodSelect that needs to be updated.
*/
static void
gnc_period_sample_new_date_format (gpointer prefs, gchar *pref,
GncPeriodSelect *period)
{
gnc_period_sample_update_date_label(period);
}
/************************************************************/
/* Property Functions */
/************************************************************/
/* Set an item in the GncPeriodSelect to be the active one.
* This will first update the internal GtkCombobox (blocking
* its "changed" callback to prevent an infinite loop).
* Then it will update the sample label and finally it will
* emit a "changed" signal of it's own for other objects
* listening for this signal.
*/
static void
gnc_period_select_set_active_internal (GncPeriodSelect *period,
GncAccountingPeriod which)
{
GncPeriodSelectPrivate *priv;
g_return_if_fail(period != NULL);
g_return_if_fail(GNC_IS_PERIOD_SELECT(period));
g_return_if_fail(which >= 0);
g_return_if_fail(which < GNC_ACCOUNTING_PERIOD_LAST);
priv = GNC_PERIOD_SELECT_GET_PRIVATE(period);
g_signal_handlers_block_by_func(G_OBJECT(period),
G_CALLBACK(gnc_period_sample_combobox_changed), period);
gtk_combo_box_set_active(GTK_COMBO_BOX(priv->selector), which);
g_signal_handlers_unblock_by_func(G_OBJECT(period),
G_CALLBACK(gnc_period_sample_combobox_changed), period);
/* Update this widget */
gnc_period_sample_update_date_label(period);
/* Pass it on... */
gnc_period_select_changed(period);
}
/** @name GncPeriodSelect Properties
@{ */
/* Get the current value of the fiscal year end setting from a
* GncPeriodSelect widget. If the result is NULL then fiscal years
* are not currently supported.
*/
GDate *
gnc_period_select_get_fy_end (GncPeriodSelect *period)
{
GncPeriodSelectPrivate *priv;
priv = GNC_PERIOD_SELECT_GET_PRIVATE(period);
g_return_val_if_fail(period != NULL, NULL);
g_return_val_if_fail(GNC_IS_PERIOD_SELECT(period), NULL);
priv = GNC_PERIOD_SELECT_GET_PRIVATE(period);
if (!priv->fy_end)
return NULL;
return g_date_new_dmy(g_date_get_day(priv->fy_end),
g_date_get_month(priv->fy_end),
G_DATE_BAD_YEAR);
}
/* Set the fiscal year end on a GncPeriodSelect widget. If set to a
* value other than NULL then widget will include fiscal accounting
* period like "this fiscal year".
*/
void
gnc_period_select_set_fy_end (GncPeriodSelect *period, const GDate *fy_end)
{
GncPeriodSelectPrivate *priv;
const gchar *label;
gint i;
g_return_if_fail(period != NULL);
g_return_if_fail(GNC_IS_PERIOD_SELECT(period));
priv = GNC_PERIOD_SELECT_GET_PRIVATE(period);
if (priv->fy_end)
g_date_free(priv->fy_end);
if (fy_end)
{
priv->fy_end = g_date_new_dmy(g_date_get_day(fy_end),
g_date_get_month(fy_end),
G_DATE_BAD_YEAR);
}
else
{
priv->fy_end = NULL;
}
if (fy_end)
{
for (i = GNC_ACCOUNTING_PERIOD_CYEAR_LAST; i < GNC_ACCOUNTING_PERIOD_FYEAR_LAST; i++)
{
label = priv->start ? _(start_strings[i]) : _(end_strings[i]);
gtk_combo_box_text_append_text(GTK_COMBO_BOX_TEXT(priv->selector), label);
}
}
else
{
for (i = GNC_ACCOUNTING_PERIOD_FYEAR_LAST - 1; i >= GNC_ACCOUNTING_PERIOD_FYEAR_LAST; i--)
{
gtk_combo_box_text_remove(GTK_COMBO_BOX_TEXT(priv->selector), i);
}
}
}
static void
gnc_period_select_set_date_common (GncPeriodSelect *period, const GDate *date)
{
GncPeriodSelectPrivate *priv;
priv = GNC_PERIOD_SELECT_GET_PRIVATE(period);
if (date)
{
if (priv->date_base)
g_date_free(priv->date_base);
priv->date_base = g_date_new_dmy(g_date_get_day(date),
g_date_get_month(date),
g_date_get_year(date));
if (priv->date_label == NULL)
{
priv->date_align = gtk_alignment_new(0.5, 0.5, 0, 0);
gtk_alignment_set_padding(GTK_ALIGNMENT(priv->date_align), 0, 0, 6, 0);
gtk_box_pack_start(GTK_BOX(period), priv->date_align, TRUE, TRUE, 0);
priv->date_label = gtk_label_new("");
gtk_container_add(GTK_CONTAINER(priv->date_align), priv->date_label);
gtk_widget_show_all(priv->date_align);
}
gnc_period_sample_update_date_label(period);
return;
}
if (priv->date_base)
{
g_date_free(priv->date_base);
priv->date_base = NULL;
gtk_widget_destroy(priv->date_align);
priv->date_align = NULL;
priv->date_label = NULL;
}
}
/* Get the current value of the "show date" setting from a
* GncPeriodSelect widget.
*/
gboolean
gnc_period_select_get_show_date (GncPeriodSelect *period)
{
GncPeriodSelectPrivate *priv;
g_return_val_if_fail(period != NULL, FALSE);
g_return_val_if_fail(GNC_IS_PERIOD_SELECT(period), FALSE);
priv = GNC_PERIOD_SELECT_GET_PRIVATE(period);
return (priv->date_base != NULL);
}
/* Set the "show date" setting on a GncPeriodSelect widget. If set
* to TRUE then a GtkLabel will be used to show the date
* corresponding to the selected time period.
*/
void
gnc_period_select_set_show_date (GncPeriodSelect *period, const gboolean show_date)
{
GDate date;
g_return_if_fail(period != NULL);
g_return_if_fail(GNC_IS_PERIOD_SELECT(period));
if (show_date)
{
g_date_clear(&date, 1);
gnc_gdate_set_time64(&date, gnc_time (NULL));
gnc_period_select_set_date_common(period, &date);
}
else
{
gnc_period_select_set_date_common(period, NULL);
}
}
GDate *
gnc_period_select_get_date_base (GncPeriodSelect *period)
{
GncPeriodSelectPrivate *priv;
g_return_val_if_fail(period != NULL, NULL);
g_return_val_if_fail(GNC_IS_PERIOD_SELECT(period), NULL);
priv = GNC_PERIOD_SELECT_GET_PRIVATE(period);
if (!priv->date_base)
return NULL;
return g_date_new_dmy(g_date_get_day(priv->date_base),
g_date_get_month(priv->date_base),
g_date_get_year(priv->date_base));
}
/* Set the base date used by a GncPeriodSelect widget. All example
* dates presented by the widget will be computed from this date.
*/
void
gnc_period_select_set_date_base (GncPeriodSelect *period, const GDate *date_base)
{
g_return_if_fail(period != NULL);
g_return_if_fail(GNC_IS_PERIOD_SELECT(period));
gnc_period_select_set_date_common(period, date_base);
}
/** Retrieve a property specific to this GncPeriodSelect object. This is
* nothing more than a dispatch function for routines that can be
* called directly. It has the nice feature of allowing a single
* function call to retrieve multiple properties.
*
* @internal
*/
static void
gnc_period_select_get_property (GObject *object,
guint prop_id,
GValue *value,
GParamSpec *pspec)
{
GncPeriodSelect *period = GNC_PERIOD_SELECT(object);
switch (prop_id)
{
case PROP_FY_END:
g_value_set_pointer(value, gnc_period_select_get_fy_end(period));
break;
case PROP_SHOW_DATE:
g_value_set_boolean(value, gnc_period_select_get_show_date(period));
break;
case PROP_DATE_BASE:
g_value_set_pointer(value, gnc_period_select_get_date_base(period));
break;
case PROP_PS_ACTIVE:
g_value_set_int(value, gnc_period_select_get_active(period));
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec);
break;
}
}
/** Set a property specific to this GncPeriodSelect object. This is
* nothing more than a dispatch function for routines that can be
* called directly. It has the nice feature of allowing a new widget
* to be created with a varargs list specifying the properties,
* instead of having to explicitly call each property function.
*
* @internal
*/
static void
gnc_period_select_set_property (GObject *object,
guint prop_id,
const GValue *value,
GParamSpec *pspec)
{
GncPeriodSelect *period = GNC_PERIOD_SELECT(object);
switch (prop_id)
{
case PROP_FY_END:
gnc_period_select_set_fy_end(period, g_value_get_pointer(value));
break;
case PROP_SHOW_DATE:
gnc_period_select_set_show_date(period, g_value_get_boolean(value));
break;
case PROP_DATE_BASE:
gnc_period_select_set_date_base(period, g_value_get_pointer(value));
break;
case PROP_PS_ACTIVE:
gnc_period_select_set_active_internal(period, g_value_get_int(value));
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec);
break;
}
}
/** @} */
/************************************************************/
/* Core Implementation */
/************************************************************/
/** @name GncPeriodSelect Core Implementation
@{ */
/* Returns the GType of a GncPeriodSelect widget.
*/
GType
gnc_period_select_get_type (void)
{
static GType period_select_type = 0;
if (period_select_type == 0)
{
static const GTypeInfo period_select_info =
{
sizeof (GncPeriodSelectClass),
NULL,
NULL,
(GClassInitFunc) gnc_period_select_class_init,
NULL,
NULL,
sizeof (GncPeriodSelect),
0, /* n_preallocs */
(GInstanceInitFunc) gnc_period_select_init,
NULL
};
period_select_type = g_type_register_static(GTK_TYPE_HBOX,
"GncPeriodSelect",
&period_select_info, 0);
}
return period_select_type;
}
/** Initialize the class for the a Period Selection widget. This
* will set up any function pointers that override functions in the
* parent class, and also installs the proprieties that are unique to
* this class.
*
* @param klass The new class structure created by the object system.
*
* @internal
*/
static void
gnc_period_select_class_init (GncPeriodSelectClass *klass)
{
GObjectClass *gobject_class;
parent_class = g_type_class_peek_parent(klass);
gobject_class = G_OBJECT_CLASS(klass);
gobject_class->set_property = gnc_period_select_set_property;
gobject_class->get_property = gnc_period_select_get_property;
gobject_class->finalize = gnc_period_select_finalize;
signals[CHANGED] = g_signal_new("changed",
G_OBJECT_CLASS_TYPE (klass),
G_SIGNAL_RUN_FIRST,
G_STRUCT_OFFSET(GncPeriodSelectClass, changed),
NULL, NULL,
g_cclosure_marshal_VOID__VOID,
G_TYPE_NONE,
0);
g_object_class_install_property(gobject_class,
PROP_FY_END,
g_param_spec_pointer("fy-end",
"Fiscal Year End",
"The fiscal year to use for this widget",
G_PARAM_READWRITE));
g_object_class_install_property(gobject_class,
PROP_SHOW_DATE,
g_param_spec_boolean("show-date",
"Show Date",
"Show the start/end date of the accounting period in this widget",
FALSE,
G_PARAM_READWRITE));
g_object_class_install_property(gobject_class,
PROP_DATE_BASE,
g_param_spec_pointer("date-base",
"Date Base",
"The starting date to use for display calculations",
G_PARAM_READWRITE));
g_object_class_install_property(gobject_class,
PROP_PS_ACTIVE,
g_param_spec_int("active",
"Active period",
"The currently selected period in the list of periods",
-1,
G_MAXINT,
0,
G_PARAM_READWRITE));
g_type_class_add_private(klass, sizeof(GncPeriodSelectPrivate));
}
/** Initialize a new instance of a gnucash accounting period selection
* widget. This function allocates and initializes the object
* private storage space.
*
* @param period The new object instance created by the object system.
*
* @internal
*/
static void
gnc_period_select_init (GncPeriodSelect *period)
{
GncPeriodSelectPrivate *priv;
priv = GNC_PERIOD_SELECT_GET_PRIVATE(period);
priv->start = TRUE;
}
/** Finalize the GncPeriodSelect object. This function is called from
* the G_Object level to complete the destruction of the object. It
* should release any memory not previously released by the destroy
* function (i.e. the private data structure), then chain up to the
* parent's destroy function.
*
* @param object The object being destroyed.
*
* @internal
*/
static void
gnc_period_select_finalize (GObject *object)
{
GncPeriodSelectPrivate *priv;
GncPeriodSelect *period;
g_return_if_fail (object != NULL);
g_return_if_fail (GNC_IS_PERIOD_SELECT (object));
period = GNC_PERIOD_SELECT(object);
priv = GNC_PERIOD_SELECT_GET_PRIVATE(period);
/* Stop tracking changes to date formatting */
gnc_prefs_remove_cb_by_func (GNC_PREFS_GROUP_GENERAL, GNC_PREF_DATE_FORMAT,
gnc_period_sample_new_date_format, period);
/* The selector and date_label were added to the hbox. They will be
* freed automatically. */
if (priv->fy_end)
g_date_free(priv->fy_end);
if (priv->date_base)
g_date_free(priv->date_base);
/* Do not free the private data structure. It is part of a larger
* memory block allocated by the type system. */
if (G_OBJECT_CLASS(parent_class)->finalize)
(* G_OBJECT_CLASS(parent_class)->finalize) (object);
}
/* Create a new GncPeriodSelect widget which is used to select a
* accounting period like "previous month" or "this year".
*
* @param starting_labels If set to TRUE then all the labels will
* refer to the "Start of...". If FALSE, labels will refer to "End
* of...".
*
* @return A GncPeriodSelect widget.
*/
GtkWidget *
gnc_period_select_new (gboolean starting_labels)
{
GncPeriodSelectPrivate *priv;
GncPeriodSelect *period;
const gchar *label;
gint i;
period = g_object_new(GNC_TYPE_PERIOD_SELECT, NULL);
/* Set up private data structures */
priv = GNC_PERIOD_SELECT_GET_PRIVATE(period);
priv->selector = gtk_combo_box_text_new();
priv->start = starting_labels;
/* Add the internal widgets to the hbox */
gtk_box_pack_start(GTK_BOX(period), priv->selector, TRUE, TRUE, 0);
gtk_widget_show(priv->selector);
/* Find out when the combo box changes */
g_signal_connect(G_OBJECT(priv->selector), "changed",
G_CALLBACK(gnc_period_sample_combobox_changed), period);
/* Build all the labels except the fiscal year labels */
for (i = 0; i < GNC_ACCOUNTING_PERIOD_CYEAR_LAST; i++)
{
label = starting_labels ? _(start_strings[i]) : _(end_strings[i]);
gtk_combo_box_text_append_text(GTK_COMBO_BOX_TEXT(priv->selector), label);
}
/* Track changes to date formatting */
gnc_prefs_register_cb (GNC_PREFS_GROUP_GENERAL, GNC_PREF_DATE_FORMAT,
gnc_period_sample_new_date_format, period);
return GTK_WIDGET (period);
}
/* Create a new GncPeriodSelect widget from a glade file. The int1
* argument passed from glade is used to determine whether the widget
* uses labels for start times or end times. A non-zero int2
* argument indicates that an example date should be shown.
*/
GtkWidget *
gnc_period_select_new_glade (gchar *widget_name,
gchar *string1, gchar *string2,
gint int1, gint int2)
{
GtkWidget *widget;
widget = gnc_period_select_new(int1 != 0);
if (int2)
gnc_period_select_set_show_date(GNC_PERIOD_SELECT(widget), TRUE);
gtk_widget_show(widget);
return widget;
}
/** @} */
/************************************************************/
/* Auxiliary Functions */
/************************************************************/
/* Set which item in the GncPeriodSelect is initially selected. This
* is used to set the initial selection before the widget is shown to
* the user.
*/
void
gnc_period_select_set_active (GncPeriodSelect *period,
GncAccountingPeriod which)
{
g_return_if_fail(period != NULL);
g_return_if_fail(GNC_IS_PERIOD_SELECT(period));
g_return_if_fail(which >= 0);
g_return_if_fail(which < GNC_ACCOUNTING_PERIOD_LAST);
g_object_set (G_OBJECT (period), "active", which, NULL);
}
/* Get the currently selected accounting period from a
* GncPeriodSelect widget. This is used to retrieve the user's
* selection in the form of an enum.
*/
GncAccountingPeriod
gnc_period_select_get_active (GncPeriodSelect *period)
{
GncPeriodSelectPrivate *priv;
g_return_val_if_fail(period != NULL, -1);
g_return_val_if_fail(GNC_IS_PERIOD_SELECT(period), -1);
priv = GNC_PERIOD_SELECT_GET_PRIVATE(period);
return gtk_combo_box_get_active(GTK_COMBO_BOX(priv->selector));
}
/* Get the currently selected accounting period choice from a
* GncPeriodSelect widget. This is used to retrieve the user's
* selection in the form of a GDate.
*/
GDate *
gnc_period_select_get_date (GncPeriodSelect *period)
{
GncPeriodSelectPrivate *priv;
GncAccountingPeriod which;
g_return_val_if_fail(period != NULL, 0);
g_return_val_if_fail(GNC_IS_PERIOD_SELECT(period), 0);
priv = GNC_PERIOD_SELECT_GET_PRIVATE(period);
which = gtk_combo_box_get_active(GTK_COMBO_BOX(priv->selector));
if (which == -1)
return NULL;
if (priv->start)
return gnc_accounting_period_start_gdate(which, priv->fy_end,
priv->date_base);
return gnc_accounting_period_end_gdate(which, priv->fy_end,
priv->date_base);
}
/** @} */
| gpl-2.0 |
richardtrip/noteII | drivers/gpio/pca953x.c | 1542 | 17438 | /*
* pca953x.c - 4/8/16 bit I/O ports
*
* Copyright (C) 2005 Ben Gardner <bgardner@wabtec.com>
* Copyright (C) 2007 Marvell International Ltd.
*
* Derived from drivers/i2c/chips/pca9539.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/i2c.h>
#include <linux/i2c/pca953x.h>
#include <linux/slab.h>
#ifdef CONFIG_OF_GPIO
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
#endif
#define PCA953X_INPUT 0
#define PCA953X_OUTPUT 1
#define PCA953X_INVERT 2
#define PCA953X_DIRECTION 3
#define PCA957X_IN 0
#define PCA957X_INVRT 1
#define PCA957X_BKEN 2
#define PCA957X_PUPD 3
#define PCA957X_CFG 4
#define PCA957X_OUT 5
#define PCA957X_MSK 6
#define PCA957X_INTS 7
#define PCA_GPIO_MASK 0x00FF
#define PCA_INT 0x0100
#define PCA953X_TYPE 0x1000
#define PCA957X_TYPE 0x2000
static const struct i2c_device_id pca953x_id[] = {
{ "pca9534", 8 | PCA953X_TYPE | PCA_INT, },
{ "pca9535", 16 | PCA953X_TYPE | PCA_INT, },
{ "pca9536", 4 | PCA953X_TYPE, },
{ "pca9537", 4 | PCA953X_TYPE | PCA_INT, },
{ "pca9538", 8 | PCA953X_TYPE | PCA_INT, },
{ "pca9539", 16 | PCA953X_TYPE | PCA_INT, },
{ "pca9554", 8 | PCA953X_TYPE | PCA_INT, },
{ "pca9555", 16 | PCA953X_TYPE | PCA_INT, },
{ "pca9556", 8 | PCA953X_TYPE, },
{ "pca9557", 8 | PCA953X_TYPE, },
{ "pca9574", 8 | PCA957X_TYPE | PCA_INT, },
{ "pca9575", 16 | PCA957X_TYPE | PCA_INT, },
{ "max7310", 8 | PCA953X_TYPE, },
{ "max7312", 16 | PCA953X_TYPE | PCA_INT, },
{ "max7313", 16 | PCA953X_TYPE | PCA_INT, },
{ "max7315", 8 | PCA953X_TYPE | PCA_INT, },
{ "pca6107", 8 | PCA953X_TYPE | PCA_INT, },
{ "tca6408", 8 | PCA953X_TYPE | PCA_INT, },
{ "tca6416", 16 | PCA953X_TYPE | PCA_INT, },
/* NYET: { "tca6424", 24, }, */
{ }
};
MODULE_DEVICE_TABLE(i2c, pca953x_id);
struct pca953x_chip {
unsigned gpio_start;
uint16_t reg_output;
uint16_t reg_direction;
struct mutex i2c_lock;
#ifdef CONFIG_GPIO_PCA953X_IRQ
struct mutex irq_lock;
uint16_t irq_mask;
uint16_t irq_stat;
uint16_t irq_trig_raise;
uint16_t irq_trig_fall;
int irq_base;
#endif
struct i2c_client *client;
struct pca953x_platform_data *dyn_pdata;
struct gpio_chip gpio_chip;
const char *const *names;
int chip_type;
};
static int pca953x_write_reg(struct pca953x_chip *chip, int reg, uint16_t val)
{
int ret = 0;
if (chip->gpio_chip.ngpio <= 8)
ret = i2c_smbus_write_byte_data(chip->client, reg, val);
else {
switch (chip->chip_type) {
case PCA953X_TYPE:
ret = i2c_smbus_write_word_data(chip->client,
reg << 1, val);
break;
case PCA957X_TYPE:
ret = i2c_smbus_write_byte_data(chip->client, reg << 1,
val & 0xff);
if (ret < 0)
break;
ret = i2c_smbus_write_byte_data(chip->client,
(reg << 1) + 1,
(val & 0xff00) >> 8);
break;
}
}
if (ret < 0) {
dev_err(&chip->client->dev, "failed writing register\n");
return ret;
}
return 0;
}
static int pca953x_read_reg(struct pca953x_chip *chip, int reg, uint16_t *val)
{
int ret;
if (chip->gpio_chip.ngpio <= 8)
ret = i2c_smbus_read_byte_data(chip->client, reg);
else
ret = i2c_smbus_read_word_data(chip->client, reg << 1);
if (ret < 0) {
dev_err(&chip->client->dev, "failed reading register\n");
return ret;
}
*val = (uint16_t)ret;
return 0;
}
static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip;
uint16_t reg_val;
int ret, offset = 0;
chip = container_of(gc, struct pca953x_chip, gpio_chip);
mutex_lock(&chip->i2c_lock);
reg_val = chip->reg_direction | (1u << off);
switch (chip->chip_type) {
case PCA953X_TYPE:
offset = PCA953X_DIRECTION;
break;
case PCA957X_TYPE:
offset = PCA957X_CFG;
break;
}
ret = pca953x_write_reg(chip, offset, reg_val);
if (ret)
goto exit;
chip->reg_direction = reg_val;
ret = 0;
exit:
mutex_unlock(&chip->i2c_lock);
return ret;
}
static int pca953x_gpio_direction_output(struct gpio_chip *gc,
unsigned off, int val)
{
struct pca953x_chip *chip;
uint16_t reg_val;
int ret, offset = 0;
chip = container_of(gc, struct pca953x_chip, gpio_chip);
mutex_lock(&chip->i2c_lock);
/* set output level */
if (val)
reg_val = chip->reg_output | (1u << off);
else
reg_val = chip->reg_output & ~(1u << off);
switch (chip->chip_type) {
case PCA953X_TYPE:
offset = PCA953X_OUTPUT;
break;
case PCA957X_TYPE:
offset = PCA957X_OUT;
break;
}
ret = pca953x_write_reg(chip, offset, reg_val);
if (ret)
goto exit;
chip->reg_output = reg_val;
/* then direction */
reg_val = chip->reg_direction & ~(1u << off);
switch (chip->chip_type) {
case PCA953X_TYPE:
offset = PCA953X_DIRECTION;
break;
case PCA957X_TYPE:
offset = PCA957X_CFG;
break;
}
ret = pca953x_write_reg(chip, offset, reg_val);
if (ret)
goto exit;
chip->reg_direction = reg_val;
ret = 0;
exit:
mutex_unlock(&chip->i2c_lock);
return ret;
}
static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip;
uint16_t reg_val;
int ret, offset = 0;
chip = container_of(gc, struct pca953x_chip, gpio_chip);
mutex_lock(&chip->i2c_lock);
switch (chip->chip_type) {
case PCA953X_TYPE:
offset = PCA953X_INPUT;
break;
case PCA957X_TYPE:
offset = PCA957X_IN;
break;
}
ret = pca953x_read_reg(chip, offset, ®_val);
mutex_unlock(&chip->i2c_lock);
if (ret < 0) {
/* NOTE: diagnostic already emitted; that's all we should
* do unless gpio_*_value_cansleep() calls become different
* from their nonsleeping siblings (and report faults).
*/
return 0;
}
return (reg_val & (1u << off)) ? 1 : 0;
}
static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
{
struct pca953x_chip *chip;
uint16_t reg_val;
int ret, offset = 0;
chip = container_of(gc, struct pca953x_chip, gpio_chip);
mutex_lock(&chip->i2c_lock);
if (val)
reg_val = chip->reg_output | (1u << off);
else
reg_val = chip->reg_output & ~(1u << off);
switch (chip->chip_type) {
case PCA953X_TYPE:
offset = PCA953X_OUTPUT;
break;
case PCA957X_TYPE:
offset = PCA957X_OUT;
break;
}
ret = pca953x_write_reg(chip, offset, reg_val);
if (ret)
goto exit;
chip->reg_output = reg_val;
exit:
mutex_unlock(&chip->i2c_lock);
}
static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios)
{
struct gpio_chip *gc;
gc = &chip->gpio_chip;
gc->direction_input = pca953x_gpio_direction_input;
gc->direction_output = pca953x_gpio_direction_output;
gc->get = pca953x_gpio_get_value;
gc->set = pca953x_gpio_set_value;
gc->can_sleep = 1;
gc->base = chip->gpio_start;
gc->ngpio = gpios;
gc->label = chip->client->name;
gc->dev = &chip->client->dev;
gc->owner = THIS_MODULE;
gc->names = chip->names;
}
#ifdef CONFIG_GPIO_PCA953X_IRQ
static int pca953x_gpio_to_irq(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip;
chip = container_of(gc, struct pca953x_chip, gpio_chip);
return chip->irq_base + off;
}
static void pca953x_irq_mask(struct irq_data *d)
{
struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
chip->irq_mask &= ~(1 << (d->irq - chip->irq_base));
}
static void pca953x_irq_unmask(struct irq_data *d)
{
struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
chip->irq_mask |= 1 << (d->irq - chip->irq_base);
}
static void pca953x_irq_bus_lock(struct irq_data *d)
{
struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
mutex_lock(&chip->irq_lock);
}
static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
{
struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
uint16_t new_irqs;
uint16_t level;
/* Look for any newly setup interrupt */
new_irqs = chip->irq_trig_fall | chip->irq_trig_raise;
new_irqs &= ~chip->reg_direction;
while (new_irqs) {
level = __ffs(new_irqs);
pca953x_gpio_direction_input(&chip->gpio_chip, level);
new_irqs &= ~(1 << level);
}
mutex_unlock(&chip->irq_lock);
}
static int pca953x_irq_set_type(struct irq_data *d, unsigned int type)
{
struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
uint16_t level = d->irq - chip->irq_base;
uint16_t mask = 1 << level;
if (!(type & IRQ_TYPE_EDGE_BOTH)) {
dev_err(&chip->client->dev, "irq %d: unsupported type %d\n",
d->irq, type);
return -EINVAL;
}
if (type & IRQ_TYPE_EDGE_FALLING)
chip->irq_trig_fall |= mask;
else
chip->irq_trig_fall &= ~mask;
if (type & IRQ_TYPE_EDGE_RISING)
chip->irq_trig_raise |= mask;
else
chip->irq_trig_raise &= ~mask;
return 0;
}
static struct irq_chip pca953x_irq_chip = {
.name = "pca953x",
.irq_mask = pca953x_irq_mask,
.irq_unmask = pca953x_irq_unmask,
.irq_bus_lock = pca953x_irq_bus_lock,
.irq_bus_sync_unlock = pca953x_irq_bus_sync_unlock,
.irq_set_type = pca953x_irq_set_type,
};
static uint16_t pca953x_irq_pending(struct pca953x_chip *chip)
{
uint16_t cur_stat;
uint16_t old_stat;
uint16_t pending;
uint16_t trigger;
int ret, offset = 0;
switch (chip->chip_type) {
case PCA953X_TYPE:
offset = PCA953X_INPUT;
break;
case PCA957X_TYPE:
offset = PCA957X_IN;
break;
}
ret = pca953x_read_reg(chip, offset, &cur_stat);
if (ret)
return 0;
/* Remove output pins from the equation */
cur_stat &= chip->reg_direction;
old_stat = chip->irq_stat;
trigger = (cur_stat ^ old_stat) & chip->irq_mask;
if (!trigger)
return 0;
chip->irq_stat = cur_stat;
pending = (old_stat & chip->irq_trig_fall) |
(cur_stat & chip->irq_trig_raise);
pending &= trigger;
return pending;
}
static irqreturn_t pca953x_irq_handler(int irq, void *devid)
{
struct pca953x_chip *chip = devid;
uint16_t pending;
uint16_t level;
pending = pca953x_irq_pending(chip);
if (!pending)
return IRQ_HANDLED;
do {
level = __ffs(pending);
handle_nested_irq(level + chip->irq_base);
pending &= ~(1 << level);
} while (pending);
return IRQ_HANDLED;
}
static int pca953x_irq_setup(struct pca953x_chip *chip,
const struct i2c_device_id *id)
{
struct i2c_client *client = chip->client;
struct pca953x_platform_data *pdata = client->dev.platform_data;
int ret, offset = 0;
if (pdata->irq_base != -1
&& (id->driver_data & PCA_INT)) {
int lvl;
switch (chip->chip_type) {
case PCA953X_TYPE:
offset = PCA953X_INPUT;
break;
case PCA957X_TYPE:
offset = PCA957X_IN;
break;
}
ret = pca953x_read_reg(chip, offset, &chip->irq_stat);
if (ret)
goto out_failed;
/*
* There is no way to know which GPIO line generated the
* interrupt. We have to rely on the previous read for
* this purpose.
*/
chip->irq_stat &= chip->reg_direction;
chip->irq_base = pdata->irq_base;
mutex_init(&chip->irq_lock);
for (lvl = 0; lvl < chip->gpio_chip.ngpio; lvl++) {
int irq = lvl + chip->irq_base;
irq_set_chip_data(irq, chip);
irq_set_chip(irq, &pca953x_irq_chip);
irq_set_nested_thread(irq, true);
#ifdef CONFIG_ARM
set_irq_flags(irq, IRQF_VALID);
#else
irq_set_noprobe(irq);
#endif
}
ret = request_threaded_irq(client->irq,
NULL,
pca953x_irq_handler,
IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
dev_name(&client->dev), chip);
if (ret) {
dev_err(&client->dev, "failed to request irq %d\n",
client->irq);
goto out_failed;
}
chip->gpio_chip.to_irq = pca953x_gpio_to_irq;
}
return 0;
out_failed:
chip->irq_base = -1;
return ret;
}
static void pca953x_irq_teardown(struct pca953x_chip *chip)
{
if (chip->irq_base != -1)
free_irq(chip->client->irq, chip);
}
#else /* CONFIG_GPIO_PCA953X_IRQ */
static int pca953x_irq_setup(struct pca953x_chip *chip,
const struct i2c_device_id *id)
{
struct i2c_client *client = chip->client;
struct pca953x_platform_data *pdata = client->dev.platform_data;
if (pdata->irq_base != -1 && (id->driver_data & PCA_INT))
dev_warn(&client->dev, "interrupt support not compiled in\n");
return 0;
}
static void pca953x_irq_teardown(struct pca953x_chip *chip)
{
}
#endif
/*
* Handlers for alternative sources of platform_data
*/
#ifdef CONFIG_OF_GPIO
/*
* Translate OpenFirmware node properties into platform_data
*/
static struct pca953x_platform_data *
pca953x_get_alt_pdata(struct i2c_client *client)
{
struct pca953x_platform_data *pdata;
struct device_node *node;
const __be32 *val;
int size;
node = client->dev.of_node;
if (node == NULL)
return NULL;
pdata = kzalloc(sizeof(struct pca953x_platform_data), GFP_KERNEL);
if (pdata == NULL) {
dev_err(&client->dev, "Unable to allocate platform_data\n");
return NULL;
}
pdata->gpio_base = -1;
val = of_get_property(node, "linux,gpio-base", &size);
if (val) {
if (size != sizeof(*val))
dev_warn(&client->dev, "%s: wrong linux,gpio-base\n",
node->full_name);
else
pdata->gpio_base = be32_to_cpup(val);
}
val = of_get_property(node, "polarity", NULL);
if (val)
pdata->invert = *val;
return pdata;
}
#else
static struct pca953x_platform_data *
pca953x_get_alt_pdata(struct i2c_client *client)
{
return NULL;
}
#endif
static int __devinit device_pca953x_init(struct pca953x_chip *chip, int invert)
{
int ret;
ret = pca953x_read_reg(chip, PCA953X_OUTPUT, &chip->reg_output);
if (ret)
goto out;
ret = pca953x_read_reg(chip, PCA953X_DIRECTION,
&chip->reg_direction);
if (ret)
goto out;
/* set platform specific polarity inversion */
ret = pca953x_write_reg(chip, PCA953X_INVERT, invert);
if (ret)
goto out;
return 0;
out:
return ret;
}
static int __devinit device_pca957x_init(struct pca953x_chip *chip, int invert)
{
int ret;
uint16_t val = 0;
/* Let every port in proper state, that could save power */
pca953x_write_reg(chip, PCA957X_PUPD, 0x0);
pca953x_write_reg(chip, PCA957X_CFG, 0xffff);
pca953x_write_reg(chip, PCA957X_OUT, 0x0);
ret = pca953x_read_reg(chip, PCA957X_IN, &val);
if (ret)
goto out;
ret = pca953x_read_reg(chip, PCA957X_OUT, &chip->reg_output);
if (ret)
goto out;
ret = pca953x_read_reg(chip, PCA957X_CFG, &chip->reg_direction);
if (ret)
goto out;
/* set platform specific polarity inversion */
pca953x_write_reg(chip, PCA957X_INVRT, invert);
/* To enable register 6, 7 to controll pull up and pull down */
pca953x_write_reg(chip, PCA957X_BKEN, 0x202);
return 0;
out:
return ret;
}
static int __devinit pca953x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct pca953x_platform_data *pdata;
struct pca953x_chip *chip;
int ret = 0;
chip = kzalloc(sizeof(struct pca953x_chip), GFP_KERNEL);
if (chip == NULL)
return -ENOMEM;
pdata = client->dev.platform_data;
if (pdata == NULL) {
pdata = pca953x_get_alt_pdata(client);
/*
* Unlike normal platform_data, this is allocated
* dynamically and must be freed in the driver
*/
chip->dyn_pdata = pdata;
}
if (pdata == NULL) {
dev_dbg(&client->dev, "no platform data\n");
ret = -EINVAL;
goto out_failed;
}
chip->client = client;
chip->gpio_start = pdata->gpio_base;
chip->names = pdata->names;
chip->chip_type = id->driver_data & (PCA953X_TYPE | PCA957X_TYPE);
mutex_init(&chip->i2c_lock);
/* initialize cached registers from their original values.
* we can't share this chip with another i2c master.
*/
pca953x_setup_gpio(chip, id->driver_data & PCA_GPIO_MASK);
if (chip->chip_type == PCA953X_TYPE)
device_pca953x_init(chip, pdata->invert);
else if (chip->chip_type == PCA957X_TYPE)
device_pca957x_init(chip, pdata->invert);
else
goto out_failed;
ret = pca953x_irq_setup(chip, id);
if (ret)
goto out_failed;
ret = gpiochip_add(&chip->gpio_chip);
if (ret)
goto out_failed_irq;
if (pdata->setup) {
ret = pdata->setup(client, chip->gpio_chip.base,
chip->gpio_chip.ngpio, pdata->context);
if (ret < 0)
dev_warn(&client->dev, "setup failed, %d\n", ret);
}
i2c_set_clientdata(client, chip);
return 0;
out_failed_irq:
pca953x_irq_teardown(chip);
out_failed:
kfree(chip->dyn_pdata);
kfree(chip);
return ret;
}
static int pca953x_remove(struct i2c_client *client)
{
struct pca953x_platform_data *pdata = client->dev.platform_data;
struct pca953x_chip *chip = i2c_get_clientdata(client);
int ret = 0;
if (pdata->teardown) {
ret = pdata->teardown(client, chip->gpio_chip.base,
chip->gpio_chip.ngpio, pdata->context);
if (ret < 0) {
dev_err(&client->dev, "%s failed, %d\n",
"teardown", ret);
return ret;
}
}
ret = gpiochip_remove(&chip->gpio_chip);
if (ret) {
dev_err(&client->dev, "%s failed, %d\n",
"gpiochip_remove()", ret);
return ret;
}
pca953x_irq_teardown(chip);
kfree(chip->dyn_pdata);
kfree(chip);
return 0;
}
static struct i2c_driver pca953x_driver = {
.driver = {
.name = "pca953x",
},
.probe = pca953x_probe,
.remove = pca953x_remove,
.id_table = pca953x_id,
};
static int __init pca953x_init(void)
{
return i2c_add_driver(&pca953x_driver);
}
/* register after i2c postcore initcall and before
* subsys initcalls that may rely on these GPIOs
*/
subsys_initcall(pca953x_init);
static void __exit pca953x_exit(void)
{
i2c_del_driver(&pca953x_driver);
}
module_exit(pca953x_exit);
MODULE_AUTHOR("eric miao <eric.miao@marvell.com>");
MODULE_DESCRIPTION("GPIO expander driver for PCA953x");
MODULE_LICENSE("GPL");
| gpl-2.0 |
CalcProgrammer1/ubuntu-kernel-quincyatt | drivers/tty/serial/imx.c | 2310 | 36315 | /*
* Driver for Motorola IMX serial ports
*
* Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
*
* Author: Sascha Hauer <sascha@saschahauer.de>
* Copyright (C) 2004 Pengutronix
*
* Copyright (C) 2009 emlix GmbH
* Author: Fabian Godehardt (added IrDA support for iMX)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* [29-Mar-2005] Mike Lee
* Added hardware handshake
*/
#if defined(CONFIG_SERIAL_IMX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
#endif
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/sysrq.h>
#include <linux/platform_device.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/rational.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <mach/hardware.h>
#include <mach/imx-uart.h>
/* Register definitions */
#define URXD0 0x0 /* Receiver Register */
#define URTX0 0x40 /* Transmitter Register */
#define UCR1 0x80 /* Control Register 1 */
#define UCR2 0x84 /* Control Register 2 */
#define UCR3 0x88 /* Control Register 3 */
#define UCR4 0x8c /* Control Register 4 */
#define UFCR 0x90 /* FIFO Control Register */
#define USR1 0x94 /* Status Register 1 */
#define USR2 0x98 /* Status Register 2 */
#define UESC 0x9c /* Escape Character Register */
#define UTIM 0xa0 /* Escape Timer Register */
#define UBIR 0xa4 /* BRM Incremental Register */
#define UBMR 0xa8 /* BRM Modulator Register */
#define UBRC 0xac /* Baud Rate Count Register */
#define MX2_ONEMS 0xb0 /* One Millisecond register */
#define UTS (cpu_is_mx1() ? 0xd0 : 0xb4) /* UART Test Register */
/* UART Control Register Bit Fields.*/
#define URXD_CHARRDY (1<<15)
#define URXD_ERR (1<<14)
#define URXD_OVRRUN (1<<13)
#define URXD_FRMERR (1<<12)
#define URXD_BRK (1<<11)
#define URXD_PRERR (1<<10)
#define UCR1_ADEN (1<<15) /* Auto detect interrupt */
#define UCR1_ADBR (1<<14) /* Auto detect baud rate */
#define UCR1_TRDYEN (1<<13) /* Transmitter ready interrupt enable */
#define UCR1_IDEN (1<<12) /* Idle condition interrupt */
#define UCR1_RRDYEN (1<<9) /* Recv ready interrupt enable */
#define UCR1_RDMAEN (1<<8) /* Recv ready DMA enable */
#define UCR1_IREN (1<<7) /* Infrared interface enable */
#define UCR1_TXMPTYEN (1<<6) /* Transimitter empty interrupt enable */
#define UCR1_RTSDEN (1<<5) /* RTS delta interrupt enable */
#define UCR1_SNDBRK (1<<4) /* Send break */
#define UCR1_TDMAEN (1<<3) /* Transmitter ready DMA enable */
#define MX1_UCR1_UARTCLKEN (1<<2) /* UART clock enabled, mx1 only */
#define UCR1_DOZE (1<<1) /* Doze */
#define UCR1_UARTEN (1<<0) /* UART enabled */
#define UCR2_ESCI (1<<15) /* Escape seq interrupt enable */
#define UCR2_IRTS (1<<14) /* Ignore RTS pin */
#define UCR2_CTSC (1<<13) /* CTS pin control */
#define UCR2_CTS (1<<12) /* Clear to send */
#define UCR2_ESCEN (1<<11) /* Escape enable */
#define UCR2_PREN (1<<8) /* Parity enable */
#define UCR2_PROE (1<<7) /* Parity odd/even */
#define UCR2_STPB (1<<6) /* Stop */
#define UCR2_WS (1<<5) /* Word size */
#define UCR2_RTSEN (1<<4) /* Request to send interrupt enable */
#define UCR2_TXEN (1<<2) /* Transmitter enabled */
#define UCR2_RXEN (1<<1) /* Receiver enabled */
#define UCR2_SRST (1<<0) /* SW reset */
#define UCR3_DTREN (1<<13) /* DTR interrupt enable */
#define UCR3_PARERREN (1<<12) /* Parity enable */
#define UCR3_FRAERREN (1<<11) /* Frame error interrupt enable */
#define UCR3_DSR (1<<10) /* Data set ready */
#define UCR3_DCD (1<<9) /* Data carrier detect */
#define UCR3_RI (1<<8) /* Ring indicator */
#define UCR3_TIMEOUTEN (1<<7) /* Timeout interrupt enable */
#define UCR3_RXDSEN (1<<6) /* Receive status interrupt enable */
#define UCR3_AIRINTEN (1<<5) /* Async IR wake interrupt enable */
#define UCR3_AWAKEN (1<<4) /* Async wake interrupt enable */
#define MX1_UCR3_REF25 (1<<3) /* Ref freq 25 MHz, only on mx1 */
#define MX1_UCR3_REF30 (1<<2) /* Ref Freq 30 MHz, only on mx1 */
#define MX2_UCR3_RXDMUXSEL (1<<2) /* RXD Muxed Input Select, on mx2/mx3 */
#define UCR3_INVT (1<<1) /* Inverted Infrared transmission */
#define UCR3_BPEN (1<<0) /* Preset registers enable */
#define UCR4_CTSTL_SHF 10 /* CTS trigger level shift */
#define UCR4_CTSTL_MASK 0x3F /* CTS trigger is 6 bits wide */
#define UCR4_INVR (1<<9) /* Inverted infrared reception */
#define UCR4_ENIRI (1<<8) /* Serial infrared interrupt enable */
#define UCR4_WKEN (1<<7) /* Wake interrupt enable */
#define UCR4_REF16 (1<<6) /* Ref freq 16 MHz */
#define UCR4_IRSC (1<<5) /* IR special case */
#define UCR4_TCEN (1<<3) /* Transmit complete interrupt enable */
#define UCR4_BKEN (1<<2) /* Break condition interrupt enable */
#define UCR4_OREN (1<<1) /* Receiver overrun interrupt enable */
#define UCR4_DREN (1<<0) /* Recv data ready interrupt enable */
#define UFCR_RXTL_SHF 0 /* Receiver trigger level shift */
#define UFCR_RFDIV (7<<7) /* Reference freq divider mask */
#define UFCR_RFDIV_REG(x) (((x) < 7 ? 6 - (x) : 6) << 7)
#define UFCR_TXTL_SHF 10 /* Transmitter trigger level shift */
#define USR1_PARITYERR (1<<15) /* Parity error interrupt flag */
#define USR1_RTSS (1<<14) /* RTS pin status */
#define USR1_TRDY (1<<13) /* Transmitter ready interrupt/dma flag */
#define USR1_RTSD (1<<12) /* RTS delta */
#define USR1_ESCF (1<<11) /* Escape seq interrupt flag */
#define USR1_FRAMERR (1<<10) /* Frame error interrupt flag */
#define USR1_RRDY (1<<9) /* Receiver ready interrupt/dma flag */
#define USR1_TIMEOUT (1<<7) /* Receive timeout interrupt status */
#define USR1_RXDS (1<<6) /* Receiver idle interrupt flag */
#define USR1_AIRINT (1<<5) /* Async IR wake interrupt flag */
#define USR1_AWAKE (1<<4) /* Aysnc wake interrupt flag */
#define USR2_ADET (1<<15) /* Auto baud rate detect complete */
#define USR2_TXFE (1<<14) /* Transmit buffer FIFO empty */
#define USR2_DTRF (1<<13) /* DTR edge interrupt flag */
#define USR2_IDLE (1<<12) /* Idle condition */
#define USR2_IRINT (1<<8) /* Serial infrared interrupt flag */
#define USR2_WAKE (1<<7) /* Wake */
#define USR2_RTSF (1<<4) /* RTS edge interrupt flag */
#define USR2_TXDC (1<<3) /* Transmitter complete */
#define USR2_BRCD (1<<2) /* Break condition */
#define USR2_ORE (1<<1) /* Overrun error */
#define USR2_RDR (1<<0) /* Recv data ready */
#define UTS_FRCPERR (1<<13) /* Force parity error */
#define UTS_LOOP (1<<12) /* Loop tx and rx */
#define UTS_TXEMPTY (1<<6) /* TxFIFO empty */
#define UTS_RXEMPTY (1<<5) /* RxFIFO empty */
#define UTS_TXFULL (1<<4) /* TxFIFO full */
#define UTS_RXFULL (1<<3) /* RxFIFO full */
#define UTS_SOFTRST (1<<0) /* Software reset */
/* We've been assigned a range on the "Low-density serial ports" major */
#define SERIAL_IMX_MAJOR 207
#define MINOR_START 16
#define DEV_NAME "ttymxc"
#define MAX_INTERNAL_IRQ MXC_INTERNAL_IRQS
/*
* This determines how often we check the modem status signals
* for any change. They generally aren't connected to an IRQ
* so we have to poll them. We also check immediately before
* filling the TX fifo incase CTS has been dropped.
*/
#define MCTRL_TIMEOUT (250*HZ/1000)
#define DRIVER_NAME "IMX-uart"
#define UART_NR 8
struct imx_port {
struct uart_port port;
struct timer_list timer;
unsigned int old_status;
int txirq,rxirq,rtsirq;
unsigned int have_rtscts:1;
unsigned int use_irda:1;
unsigned int irda_inv_rx:1;
unsigned int irda_inv_tx:1;
unsigned short trcv_delay; /* transceiver delay */
struct clk *clk;
};
#ifdef CONFIG_IRDA
#define USE_IRDA(sport) ((sport)->use_irda)
#else
#define USE_IRDA(sport) (0)
#endif
/*
* Handle any change of modem status signal since we were last called.
*/
static void imx_mctrl_check(struct imx_port *sport)
{
unsigned int status, changed;
status = sport->port.ops->get_mctrl(&sport->port);
changed = status ^ sport->old_status;
if (changed == 0)
return;
sport->old_status = status;
if (changed & TIOCM_RI)
sport->port.icount.rng++;
if (changed & TIOCM_DSR)
sport->port.icount.dsr++;
if (changed & TIOCM_CAR)
uart_handle_dcd_change(&sport->port, status & TIOCM_CAR);
if (changed & TIOCM_CTS)
uart_handle_cts_change(&sport->port, status & TIOCM_CTS);
wake_up_interruptible(&sport->port.state->port.delta_msr_wait);
}
/*
* This is our per-port timeout handler, for checking the
* modem status signals.
*/
static void imx_timeout(unsigned long data)
{
struct imx_port *sport = (struct imx_port *)data;
unsigned long flags;
if (sport->port.state) {
spin_lock_irqsave(&sport->port.lock, flags);
imx_mctrl_check(sport);
spin_unlock_irqrestore(&sport->port.lock, flags);
mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT);
}
}
/*
* interrupts disabled on entry
*/
static void imx_stop_tx(struct uart_port *port)
{
struct imx_port *sport = (struct imx_port *)port;
unsigned long temp;
if (USE_IRDA(sport)) {
/* half duplex - wait for end of transmission */
int n = 256;
while ((--n > 0) &&
!(readl(sport->port.membase + USR2) & USR2_TXDC)) {
udelay(5);
barrier();
}
/*
* irda transceiver - wait a bit more to avoid
* cutoff, hardware dependent
*/
udelay(sport->trcv_delay);
/*
* half duplex - reactivate receive mode,
* flush receive pipe echo crap
*/
if (readl(sport->port.membase + USR2) & USR2_TXDC) {
temp = readl(sport->port.membase + UCR1);
temp &= ~(UCR1_TXMPTYEN | UCR1_TRDYEN);
writel(temp, sport->port.membase + UCR1);
temp = readl(sport->port.membase + UCR4);
temp &= ~(UCR4_TCEN);
writel(temp, sport->port.membase + UCR4);
while (readl(sport->port.membase + URXD0) &
URXD_CHARRDY)
barrier();
temp = readl(sport->port.membase + UCR1);
temp |= UCR1_RRDYEN;
writel(temp, sport->port.membase + UCR1);
temp = readl(sport->port.membase + UCR4);
temp |= UCR4_DREN;
writel(temp, sport->port.membase + UCR4);
}
return;
}
temp = readl(sport->port.membase + UCR1);
writel(temp & ~UCR1_TXMPTYEN, sport->port.membase + UCR1);
}
/*
* interrupts disabled on entry
*/
static void imx_stop_rx(struct uart_port *port)
{
struct imx_port *sport = (struct imx_port *)port;
unsigned long temp;
temp = readl(sport->port.membase + UCR2);
writel(temp &~ UCR2_RXEN, sport->port.membase + UCR2);
}
/*
* Set the modem control timer to fire immediately.
*/
static void imx_enable_ms(struct uart_port *port)
{
struct imx_port *sport = (struct imx_port *)port;
mod_timer(&sport->timer, jiffies);
}
static inline void imx_transmit_buffer(struct imx_port *sport)
{
struct circ_buf *xmit = &sport->port.state->xmit;
while (!uart_circ_empty(xmit) &&
!(readl(sport->port.membase + UTS) & UTS_TXFULL)) {
/* send xmit->buf[xmit->tail]
* out the port here */
writel(xmit->buf[xmit->tail], sport->port.membase + URTX0);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
sport->port.icount.tx++;
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&sport->port);
if (uart_circ_empty(xmit))
imx_stop_tx(&sport->port);
}
/*
* interrupts disabled on entry
*/
static void imx_start_tx(struct uart_port *port)
{
struct imx_port *sport = (struct imx_port *)port;
unsigned long temp;
if (USE_IRDA(sport)) {
/* half duplex in IrDA mode; have to disable receive mode */
temp = readl(sport->port.membase + UCR4);
temp &= ~(UCR4_DREN);
writel(temp, sport->port.membase + UCR4);
temp = readl(sport->port.membase + UCR1);
temp &= ~(UCR1_RRDYEN);
writel(temp, sport->port.membase + UCR1);
}
temp = readl(sport->port.membase + UCR1);
writel(temp | UCR1_TXMPTYEN, sport->port.membase + UCR1);
if (USE_IRDA(sport)) {
temp = readl(sport->port.membase + UCR1);
temp |= UCR1_TRDYEN;
writel(temp, sport->port.membase + UCR1);
temp = readl(sport->port.membase + UCR4);
temp |= UCR4_TCEN;
writel(temp, sport->port.membase + UCR4);
}
if (readl(sport->port.membase + UTS) & UTS_TXEMPTY)
imx_transmit_buffer(sport);
}
static irqreturn_t imx_rtsint(int irq, void *dev_id)
{
struct imx_port *sport = dev_id;
unsigned int val;
unsigned long flags;
spin_lock_irqsave(&sport->port.lock, flags);
writel(USR1_RTSD, sport->port.membase + USR1);
val = readl(sport->port.membase + USR1) & USR1_RTSS;
uart_handle_cts_change(&sport->port, !!val);
wake_up_interruptible(&sport->port.state->port.delta_msr_wait);
spin_unlock_irqrestore(&sport->port.lock, flags);
return IRQ_HANDLED;
}
static irqreturn_t imx_txint(int irq, void *dev_id)
{
struct imx_port *sport = dev_id;
struct circ_buf *xmit = &sport->port.state->xmit;
unsigned long flags;
spin_lock_irqsave(&sport->port.lock,flags);
if (sport->port.x_char)
{
/* Send next char */
writel(sport->port.x_char, sport->port.membase + URTX0);
goto out;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)) {
imx_stop_tx(&sport->port);
goto out;
}
imx_transmit_buffer(sport);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&sport->port);
out:
spin_unlock_irqrestore(&sport->port.lock,flags);
return IRQ_HANDLED;
}
static irqreturn_t imx_rxint(int irq, void *dev_id)
{
struct imx_port *sport = dev_id;
unsigned int rx,flg,ignored = 0;
struct tty_struct *tty = sport->port.state->port.tty;
unsigned long flags, temp;
spin_lock_irqsave(&sport->port.lock,flags);
while (readl(sport->port.membase + USR2) & USR2_RDR) {
flg = TTY_NORMAL;
sport->port.icount.rx++;
rx = readl(sport->port.membase + URXD0);
temp = readl(sport->port.membase + USR2);
if (temp & USR2_BRCD) {
writel(USR2_BRCD, sport->port.membase + USR2);
if (uart_handle_break(&sport->port))
continue;
}
if (uart_handle_sysrq_char(&sport->port, (unsigned char)rx))
continue;
if (rx & (URXD_PRERR | URXD_OVRRUN | URXD_FRMERR) ) {
if (rx & URXD_PRERR)
sport->port.icount.parity++;
else if (rx & URXD_FRMERR)
sport->port.icount.frame++;
if (rx & URXD_OVRRUN)
sport->port.icount.overrun++;
if (rx & sport->port.ignore_status_mask) {
if (++ignored > 100)
goto out;
continue;
}
rx &= sport->port.read_status_mask;
if (rx & URXD_PRERR)
flg = TTY_PARITY;
else if (rx & URXD_FRMERR)
flg = TTY_FRAME;
if (rx & URXD_OVRRUN)
flg = TTY_OVERRUN;
#ifdef SUPPORT_SYSRQ
sport->port.sysrq = 0;
#endif
}
tty_insert_flip_char(tty, rx, flg);
}
out:
spin_unlock_irqrestore(&sport->port.lock,flags);
tty_flip_buffer_push(tty);
return IRQ_HANDLED;
}
static irqreturn_t imx_int(int irq, void *dev_id)
{
struct imx_port *sport = dev_id;
unsigned int sts;
sts = readl(sport->port.membase + USR1);
if (sts & USR1_RRDY)
imx_rxint(irq, dev_id);
if (sts & USR1_TRDY &&
readl(sport->port.membase + UCR1) & UCR1_TXMPTYEN)
imx_txint(irq, dev_id);
if (sts & USR1_RTSD)
imx_rtsint(irq, dev_id);
return IRQ_HANDLED;
}
/*
* Return TIOCSER_TEMT when transmitter is not busy.
*/
static unsigned int imx_tx_empty(struct uart_port *port)
{
struct imx_port *sport = (struct imx_port *)port;
return (readl(sport->port.membase + USR2) & USR2_TXDC) ? TIOCSER_TEMT : 0;
}
/*
* We have a modem side uart, so the meanings of RTS and CTS are inverted.
*/
static unsigned int imx_get_mctrl(struct uart_port *port)
{
struct imx_port *sport = (struct imx_port *)port;
unsigned int tmp = TIOCM_DSR | TIOCM_CAR;
if (readl(sport->port.membase + USR1) & USR1_RTSS)
tmp |= TIOCM_CTS;
if (readl(sport->port.membase + UCR2) & UCR2_CTS)
tmp |= TIOCM_RTS;
return tmp;
}
static void imx_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
struct imx_port *sport = (struct imx_port *)port;
unsigned long temp;
temp = readl(sport->port.membase + UCR2) & ~UCR2_CTS;
if (mctrl & TIOCM_RTS)
temp |= UCR2_CTS;
writel(temp, sport->port.membase + UCR2);
}
/*
* Interrupts always disabled.
*/
static void imx_break_ctl(struct uart_port *port, int break_state)
{
struct imx_port *sport = (struct imx_port *)port;
unsigned long flags, temp;
spin_lock_irqsave(&sport->port.lock, flags);
temp = readl(sport->port.membase + UCR1) & ~UCR1_SNDBRK;
if ( break_state != 0 )
temp |= UCR1_SNDBRK;
writel(temp, sport->port.membase + UCR1);
spin_unlock_irqrestore(&sport->port.lock, flags);
}
#define TXTL 2 /* reset default */
#define RXTL 1 /* reset default */
static int imx_setup_ufcr(struct imx_port *sport, unsigned int mode)
{
unsigned int val;
unsigned int ufcr_rfdiv;
/* set receiver / transmitter trigger level.
* RFDIV is set such way to satisfy requested uartclk value
*/
val = TXTL << 10 | RXTL;
ufcr_rfdiv = (clk_get_rate(sport->clk) + sport->port.uartclk / 2)
/ sport->port.uartclk;
if(!ufcr_rfdiv)
ufcr_rfdiv = 1;
val |= UFCR_RFDIV_REG(ufcr_rfdiv);
writel(val, sport->port.membase + UFCR);
return 0;
}
/* half the RX buffer size */
#define CTSTL 16
static int imx_startup(struct uart_port *port)
{
struct imx_port *sport = (struct imx_port *)port;
int retval;
unsigned long flags, temp;
imx_setup_ufcr(sport, 0);
/* disable the DREN bit (Data Ready interrupt enable) before
* requesting IRQs
*/
temp = readl(sport->port.membase + UCR4);
if (USE_IRDA(sport))
temp |= UCR4_IRSC;
/* set the trigger level for CTS */
temp &= ~(UCR4_CTSTL_MASK<< UCR4_CTSTL_SHF);
temp |= CTSTL<< UCR4_CTSTL_SHF;
writel(temp & ~UCR4_DREN, sport->port.membase + UCR4);
if (USE_IRDA(sport)) {
/* reset fifo's and state machines */
int i = 100;
temp = readl(sport->port.membase + UCR2);
temp &= ~UCR2_SRST;
writel(temp, sport->port.membase + UCR2);
while (!(readl(sport->port.membase + UCR2) & UCR2_SRST) &&
(--i > 0)) {
udelay(1);
}
}
/*
* Allocate the IRQ(s) i.MX1 has three interrupts whereas later
* chips only have one interrupt.
*/
if (sport->txirq > 0) {
retval = request_irq(sport->rxirq, imx_rxint, 0,
DRIVER_NAME, sport);
if (retval)
goto error_out1;
retval = request_irq(sport->txirq, imx_txint, 0,
DRIVER_NAME, sport);
if (retval)
goto error_out2;
/* do not use RTS IRQ on IrDA */
if (!USE_IRDA(sport)) {
retval = request_irq(sport->rtsirq, imx_rtsint,
(sport->rtsirq < MAX_INTERNAL_IRQ) ? 0 :
IRQF_TRIGGER_FALLING |
IRQF_TRIGGER_RISING,
DRIVER_NAME, sport);
if (retval)
goto error_out3;
}
} else {
retval = request_irq(sport->port.irq, imx_int, 0,
DRIVER_NAME, sport);
if (retval) {
free_irq(sport->port.irq, sport);
goto error_out1;
}
}
/*
* Finally, clear and enable interrupts
*/
writel(USR1_RTSD, sport->port.membase + USR1);
temp = readl(sport->port.membase + UCR1);
temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN;
if (USE_IRDA(sport)) {
temp |= UCR1_IREN;
temp &= ~(UCR1_RTSDEN);
}
writel(temp, sport->port.membase + UCR1);
temp = readl(sport->port.membase + UCR2);
temp |= (UCR2_RXEN | UCR2_TXEN);
writel(temp, sport->port.membase + UCR2);
if (USE_IRDA(sport)) {
/* clear RX-FIFO */
int i = 64;
while ((--i > 0) &&
(readl(sport->port.membase + URXD0) & URXD_CHARRDY)) {
barrier();
}
}
if (!cpu_is_mx1()) {
temp = readl(sport->port.membase + UCR3);
temp |= MX2_UCR3_RXDMUXSEL;
writel(temp, sport->port.membase + UCR3);
}
if (USE_IRDA(sport)) {
temp = readl(sport->port.membase + UCR4);
if (sport->irda_inv_rx)
temp |= UCR4_INVR;
else
temp &= ~(UCR4_INVR);
writel(temp | UCR4_DREN, sport->port.membase + UCR4);
temp = readl(sport->port.membase + UCR3);
if (sport->irda_inv_tx)
temp |= UCR3_INVT;
else
temp &= ~(UCR3_INVT);
writel(temp, sport->port.membase + UCR3);
}
/*
* Enable modem status interrupts
*/
spin_lock_irqsave(&sport->port.lock,flags);
imx_enable_ms(&sport->port);
spin_unlock_irqrestore(&sport->port.lock,flags);
if (USE_IRDA(sport)) {
struct imxuart_platform_data *pdata;
pdata = sport->port.dev->platform_data;
sport->irda_inv_rx = pdata->irda_inv_rx;
sport->irda_inv_tx = pdata->irda_inv_tx;
sport->trcv_delay = pdata->transceiver_delay;
if (pdata->irda_enable)
pdata->irda_enable(1);
}
return 0;
error_out3:
if (sport->txirq)
free_irq(sport->txirq, sport);
error_out2:
if (sport->rxirq)
free_irq(sport->rxirq, sport);
error_out1:
return retval;
}
static void imx_shutdown(struct uart_port *port)
{
struct imx_port *sport = (struct imx_port *)port;
unsigned long temp;
temp = readl(sport->port.membase + UCR2);
temp &= ~(UCR2_TXEN);
writel(temp, sport->port.membase + UCR2);
if (USE_IRDA(sport)) {
struct imxuart_platform_data *pdata;
pdata = sport->port.dev->platform_data;
if (pdata->irda_enable)
pdata->irda_enable(0);
}
/*
* Stop our timer.
*/
del_timer_sync(&sport->timer);
/*
* Free the interrupts
*/
if (sport->txirq > 0) {
if (!USE_IRDA(sport))
free_irq(sport->rtsirq, sport);
free_irq(sport->txirq, sport);
free_irq(sport->rxirq, sport);
} else
free_irq(sport->port.irq, sport);
/*
* Disable all interrupts, port and break condition.
*/
temp = readl(sport->port.membase + UCR1);
temp &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN);
if (USE_IRDA(sport))
temp &= ~(UCR1_IREN);
writel(temp, sport->port.membase + UCR1);
}
static void
imx_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
struct imx_port *sport = (struct imx_port *)port;
unsigned long flags;
unsigned int ucr2, old_ucr1, old_txrxen, baud, quot;
unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
unsigned int div, ufcr;
unsigned long num, denom;
uint64_t tdiv64;
/*
* If we don't support modem control lines, don't allow
* these to be set.
*/
if (0) {
termios->c_cflag &= ~(HUPCL | CRTSCTS | CMSPAR);
termios->c_cflag |= CLOCAL;
}
/*
* We only support CS7 and CS8.
*/
while ((termios->c_cflag & CSIZE) != CS7 &&
(termios->c_cflag & CSIZE) != CS8) {
termios->c_cflag &= ~CSIZE;
termios->c_cflag |= old_csize;
old_csize = CS8;
}
if ((termios->c_cflag & CSIZE) == CS8)
ucr2 = UCR2_WS | UCR2_SRST | UCR2_IRTS;
else
ucr2 = UCR2_SRST | UCR2_IRTS;
if (termios->c_cflag & CRTSCTS) {
if( sport->have_rtscts ) {
ucr2 &= ~UCR2_IRTS;
ucr2 |= UCR2_CTSC;
} else {
termios->c_cflag &= ~CRTSCTS;
}
}
if (termios->c_cflag & CSTOPB)
ucr2 |= UCR2_STPB;
if (termios->c_cflag & PARENB) {
ucr2 |= UCR2_PREN;
if (termios->c_cflag & PARODD)
ucr2 |= UCR2_PROE;
}
/*
* Ask the core to calculate the divisor for us.
*/
baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16);
quot = uart_get_divisor(port, baud);
spin_lock_irqsave(&sport->port.lock, flags);
sport->port.read_status_mask = 0;
if (termios->c_iflag & INPCK)
sport->port.read_status_mask |= (URXD_FRMERR | URXD_PRERR);
if (termios->c_iflag & (BRKINT | PARMRK))
sport->port.read_status_mask |= URXD_BRK;
/*
* Characters to ignore
*/
sport->port.ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
sport->port.ignore_status_mask |= URXD_PRERR;
if (termios->c_iflag & IGNBRK) {
sport->port.ignore_status_mask |= URXD_BRK;
/*
* If we're ignoring parity and break indicators,
* ignore overruns too (for real raw support).
*/
if (termios->c_iflag & IGNPAR)
sport->port.ignore_status_mask |= URXD_OVRRUN;
}
del_timer_sync(&sport->timer);
/*
* Update the per-port timeout.
*/
uart_update_timeout(port, termios->c_cflag, baud);
/*
* disable interrupts and drain transmitter
*/
old_ucr1 = readl(sport->port.membase + UCR1);
writel(old_ucr1 & ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN),
sport->port.membase + UCR1);
while ( !(readl(sport->port.membase + USR2) & USR2_TXDC))
barrier();
/* then, disable everything */
old_txrxen = readl(sport->port.membase + UCR2);
writel(old_txrxen & ~( UCR2_TXEN | UCR2_RXEN),
sport->port.membase + UCR2);
old_txrxen &= (UCR2_TXEN | UCR2_RXEN);
if (USE_IRDA(sport)) {
/*
* use maximum available submodule frequency to
* avoid missing short pulses due to low sampling rate
*/
div = 1;
} else {
div = sport->port.uartclk / (baud * 16);
if (div > 7)
div = 7;
if (!div)
div = 1;
}
rational_best_approximation(16 * div * baud, sport->port.uartclk,
1 << 16, 1 << 16, &num, &denom);
tdiv64 = sport->port.uartclk;
tdiv64 *= num;
do_div(tdiv64, denom * 16 * div);
tty_termios_encode_baud_rate(termios,
(speed_t)tdiv64, (speed_t)tdiv64);
num -= 1;
denom -= 1;
ufcr = readl(sport->port.membase + UFCR);
ufcr = (ufcr & (~UFCR_RFDIV)) | UFCR_RFDIV_REG(div);
writel(ufcr, sport->port.membase + UFCR);
writel(num, sport->port.membase + UBIR);
writel(denom, sport->port.membase + UBMR);
if (!cpu_is_mx1())
writel(sport->port.uartclk / div / 1000,
sport->port.membase + MX2_ONEMS);
writel(old_ucr1, sport->port.membase + UCR1);
/* set the parity, stop bits and data size */
writel(ucr2 | old_txrxen, sport->port.membase + UCR2);
if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
imx_enable_ms(&sport->port);
spin_unlock_irqrestore(&sport->port.lock, flags);
}
static const char *imx_type(struct uart_port *port)
{
struct imx_port *sport = (struct imx_port *)port;
return sport->port.type == PORT_IMX ? "IMX" : NULL;
}
/*
* Release the memory region(s) being used by 'port'.
*/
static void imx_release_port(struct uart_port *port)
{
struct platform_device *pdev = to_platform_device(port->dev);
struct resource *mmres;
mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(mmres->start, mmres->end - mmres->start + 1);
}
/*
* Request the memory region(s) being used by 'port'.
*/
static int imx_request_port(struct uart_port *port)
{
struct platform_device *pdev = to_platform_device(port->dev);
struct resource *mmres;
void *ret;
mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mmres)
return -ENODEV;
ret = request_mem_region(mmres->start, mmres->end - mmres->start + 1,
"imx-uart");
return ret ? 0 : -EBUSY;
}
/*
* Configure/autoconfigure the port.
*/
static void imx_config_port(struct uart_port *port, int flags)
{
struct imx_port *sport = (struct imx_port *)port;
if (flags & UART_CONFIG_TYPE &&
imx_request_port(&sport->port) == 0)
sport->port.type = PORT_IMX;
}
/*
* Verify the new serial_struct (for TIOCSSERIAL).
* The only change we allow are to the flags and type, and
* even then only between PORT_IMX and PORT_UNKNOWN
*/
static int
imx_verify_port(struct uart_port *port, struct serial_struct *ser)
{
struct imx_port *sport = (struct imx_port *)port;
int ret = 0;
if (ser->type != PORT_UNKNOWN && ser->type != PORT_IMX)
ret = -EINVAL;
if (sport->port.irq != ser->irq)
ret = -EINVAL;
if (ser->io_type != UPIO_MEM)
ret = -EINVAL;
if (sport->port.uartclk / 16 != ser->baud_base)
ret = -EINVAL;
if ((void *)sport->port.mapbase != ser->iomem_base)
ret = -EINVAL;
if (sport->port.iobase != ser->port)
ret = -EINVAL;
if (ser->hub6 != 0)
ret = -EINVAL;
return ret;
}
static struct uart_ops imx_pops = {
.tx_empty = imx_tx_empty,
.set_mctrl = imx_set_mctrl,
.get_mctrl = imx_get_mctrl,
.stop_tx = imx_stop_tx,
.start_tx = imx_start_tx,
.stop_rx = imx_stop_rx,
.enable_ms = imx_enable_ms,
.break_ctl = imx_break_ctl,
.startup = imx_startup,
.shutdown = imx_shutdown,
.set_termios = imx_set_termios,
.type = imx_type,
.release_port = imx_release_port,
.request_port = imx_request_port,
.config_port = imx_config_port,
.verify_port = imx_verify_port,
};
static struct imx_port *imx_ports[UART_NR];
#ifdef CONFIG_SERIAL_IMX_CONSOLE
static void imx_console_putchar(struct uart_port *port, int ch)
{
struct imx_port *sport = (struct imx_port *)port;
while (readl(sport->port.membase + UTS) & UTS_TXFULL)
barrier();
writel(ch, sport->port.membase + URTX0);
}
/*
* Interrupts are disabled on entering
*/
static void
imx_console_write(struct console *co, const char *s, unsigned int count)
{
struct imx_port *sport = imx_ports[co->index];
unsigned int old_ucr1, old_ucr2, ucr1;
/*
* First, save UCR1/2 and then disable interrupts
*/
ucr1 = old_ucr1 = readl(sport->port.membase + UCR1);
old_ucr2 = readl(sport->port.membase + UCR2);
if (cpu_is_mx1())
ucr1 |= MX1_UCR1_UARTCLKEN;
ucr1 |= UCR1_UARTEN;
ucr1 &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN);
writel(ucr1, sport->port.membase + UCR1);
writel(old_ucr2 | UCR2_TXEN, sport->port.membase + UCR2);
uart_console_write(&sport->port, s, count, imx_console_putchar);
/*
* Finally, wait for transmitter to become empty
* and restore UCR1/2
*/
while (!(readl(sport->port.membase + USR2) & USR2_TXDC));
writel(old_ucr1, sport->port.membase + UCR1);
writel(old_ucr2, sport->port.membase + UCR2);
}
/*
* If the port was already initialised (eg, by a boot loader),
* try to determine the current setup.
*/
static void __init
imx_console_get_options(struct imx_port *sport, int *baud,
int *parity, int *bits)
{
if (readl(sport->port.membase + UCR1) & UCR1_UARTEN) {
/* ok, the port was enabled */
unsigned int ucr2, ubir,ubmr, uartclk;
unsigned int baud_raw;
unsigned int ucfr_rfdiv;
ucr2 = readl(sport->port.membase + UCR2);
*parity = 'n';
if (ucr2 & UCR2_PREN) {
if (ucr2 & UCR2_PROE)
*parity = 'o';
else
*parity = 'e';
}
if (ucr2 & UCR2_WS)
*bits = 8;
else
*bits = 7;
ubir = readl(sport->port.membase + UBIR) & 0xffff;
ubmr = readl(sport->port.membase + UBMR) & 0xffff;
ucfr_rfdiv = (readl(sport->port.membase + UFCR) & UFCR_RFDIV) >> 7;
if (ucfr_rfdiv == 6)
ucfr_rfdiv = 7;
else
ucfr_rfdiv = 6 - ucfr_rfdiv;
uartclk = clk_get_rate(sport->clk);
uartclk /= ucfr_rfdiv;
{ /*
* The next code provides exact computation of
* baud_raw = round(((uartclk/16) * (ubir + 1)) / (ubmr + 1))
* without need of float support or long long division,
* which would be required to prevent 32bit arithmetic overflow
*/
unsigned int mul = ubir + 1;
unsigned int div = 16 * (ubmr + 1);
unsigned int rem = uartclk % div;
baud_raw = (uartclk / div) * mul;
baud_raw += (rem * mul + div / 2) / div;
*baud = (baud_raw + 50) / 100 * 100;
}
if(*baud != baud_raw)
printk(KERN_INFO "Serial: Console IMX rounded baud rate from %d to %d\n",
baud_raw, *baud);
}
}
static int __init
imx_console_setup(struct console *co, char *options)
{
struct imx_port *sport;
int baud = 9600;
int bits = 8;
int parity = 'n';
int flow = 'n';
/*
* Check whether an invalid uart number has been specified, and
* if so, search for the first available port that does have
* console support.
*/
if (co->index == -1 || co->index >= ARRAY_SIZE(imx_ports))
co->index = 0;
sport = imx_ports[co->index];
if(sport == NULL)
return -ENODEV;
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
else
imx_console_get_options(sport, &baud, &parity, &bits);
imx_setup_ufcr(sport, 0);
return uart_set_options(&sport->port, co, baud, parity, bits, flow);
}
static struct uart_driver imx_reg;
static struct console imx_console = {
.name = DEV_NAME,
.write = imx_console_write,
.device = uart_console_device,
.setup = imx_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &imx_reg,
};
#define IMX_CONSOLE &imx_console
#else
#define IMX_CONSOLE NULL
#endif
static struct uart_driver imx_reg = {
.owner = THIS_MODULE,
.driver_name = DRIVER_NAME,
.dev_name = DEV_NAME,
.major = SERIAL_IMX_MAJOR,
.minor = MINOR_START,
.nr = ARRAY_SIZE(imx_ports),
.cons = IMX_CONSOLE,
};
static int serial_imx_suspend(struct platform_device *dev, pm_message_t state)
{
struct imx_port *sport = platform_get_drvdata(dev);
if (sport)
uart_suspend_port(&imx_reg, &sport->port);
return 0;
}
static int serial_imx_resume(struct platform_device *dev)
{
struct imx_port *sport = platform_get_drvdata(dev);
if (sport)
uart_resume_port(&imx_reg, &sport->port);
return 0;
}
static int serial_imx_probe(struct platform_device *pdev)
{
struct imx_port *sport;
struct imxuart_platform_data *pdata;
void __iomem *base;
int ret = 0;
struct resource *res;
sport = kzalloc(sizeof(*sport), GFP_KERNEL);
if (!sport)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
ret = -ENODEV;
goto free;
}
base = ioremap(res->start, PAGE_SIZE);
if (!base) {
ret = -ENOMEM;
goto free;
}
sport->port.dev = &pdev->dev;
sport->port.mapbase = res->start;
sport->port.membase = base;
sport->port.type = PORT_IMX,
sport->port.iotype = UPIO_MEM;
sport->port.irq = platform_get_irq(pdev, 0);
sport->rxirq = platform_get_irq(pdev, 0);
sport->txirq = platform_get_irq(pdev, 1);
sport->rtsirq = platform_get_irq(pdev, 2);
sport->port.fifosize = 32;
sport->port.ops = &imx_pops;
sport->port.flags = UPF_BOOT_AUTOCONF;
sport->port.line = pdev->id;
init_timer(&sport->timer);
sport->timer.function = imx_timeout;
sport->timer.data = (unsigned long)sport;
sport->clk = clk_get(&pdev->dev, "uart");
if (IS_ERR(sport->clk)) {
ret = PTR_ERR(sport->clk);
goto unmap;
}
clk_enable(sport->clk);
sport->port.uartclk = clk_get_rate(sport->clk);
imx_ports[pdev->id] = sport;
pdata = pdev->dev.platform_data;
if (pdata && (pdata->flags & IMXUART_HAVE_RTSCTS))
sport->have_rtscts = 1;
#ifdef CONFIG_IRDA
if (pdata && (pdata->flags & IMXUART_IRDA))
sport->use_irda = 1;
#endif
if (pdata && pdata->init) {
ret = pdata->init(pdev);
if (ret)
goto clkput;
}
ret = uart_add_one_port(&imx_reg, &sport->port);
if (ret)
goto deinit;
platform_set_drvdata(pdev, &sport->port);
return 0;
deinit:
if (pdata && pdata->exit)
pdata->exit(pdev);
clkput:
clk_put(sport->clk);
clk_disable(sport->clk);
unmap:
iounmap(sport->port.membase);
free:
kfree(sport);
return ret;
}
static int serial_imx_remove(struct platform_device *pdev)
{
struct imxuart_platform_data *pdata;
struct imx_port *sport = platform_get_drvdata(pdev);
pdata = pdev->dev.platform_data;
platform_set_drvdata(pdev, NULL);
if (sport) {
uart_remove_one_port(&imx_reg, &sport->port);
clk_put(sport->clk);
}
clk_disable(sport->clk);
if (pdata && pdata->exit)
pdata->exit(pdev);
iounmap(sport->port.membase);
kfree(sport);
return 0;
}
static struct platform_driver serial_imx_driver = {
.probe = serial_imx_probe,
.remove = serial_imx_remove,
.suspend = serial_imx_suspend,
.resume = serial_imx_resume,
.driver = {
.name = "imx-uart",
.owner = THIS_MODULE,
},
};
static int __init imx_serial_init(void)
{
int ret;
printk(KERN_INFO "Serial: IMX driver\n");
ret = uart_register_driver(&imx_reg);
if (ret)
return ret;
ret = platform_driver_register(&serial_imx_driver);
if (ret != 0)
uart_unregister_driver(&imx_reg);
return 0;
}
static void __exit imx_serial_exit(void)
{
platform_driver_unregister(&serial_imx_driver);
uart_unregister_driver(&imx_reg);
}
module_init(imx_serial_init);
module_exit(imx_serial_exit);
MODULE_AUTHOR("Sascha Hauer");
MODULE_DESCRIPTION("IMX generic serial port driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:imx-uart");
| gpl-2.0 |
placiano/linux-3.10.y | drivers/tty/cyclades.c | 2310 | 112895 | #undef BLOCKMOVE
#define Z_WAKE
#undef Z_EXT_CHARS_IN_BUFFER
/*
* This file contains the driver for the Cyclades async multiport
* serial boards.
*
* Initially written by Randolph Bentson <bentson@grieg.seaslug.org>.
* Modified and maintained by Marcio Saito <marcio@cyclades.com>.
*
* Copyright (C) 2007-2009 Jiri Slaby <jirislaby@gmail.com>
*
* Much of the design and some of the code came from serial.c
* which was copyright (C) 1991, 1992 Linus Torvalds. It was
* extensively rewritten by Theodore Ts'o, 8/16/92 -- 9/14/92,
* and then fixed as suggested by Michael K. Johnson 12/12/92.
* Converted to pci probing and cleaned up by Jiri Slaby.
*
*/
#define CY_VERSION "2.6"
/* If you need to install more boards than NR_CARDS, change the constant
in the definition below. No other change is necessary to support up to
eight boards. Beyond that you'll have to extend cy_isa_addresses. */
#define NR_CARDS 4
/*
If the total number of ports is larger than NR_PORTS, change this
constant in the definition below. No other change is necessary to
support more boards/ports. */
#define NR_PORTS 256
#define ZO_V1 0
#define ZO_V2 1
#define ZE_V1 2
#define SERIAL_PARANOIA_CHECK
#undef CY_DEBUG_OPEN
#undef CY_DEBUG_THROTTLE
#undef CY_DEBUG_OTHER
#undef CY_DEBUG_IO
#undef CY_DEBUG_COUNT
#undef CY_DEBUG_DTR
#undef CY_DEBUG_INTERRUPTS
#undef CY_16Y_HACK
#undef CY_ENABLE_MONITORING
#undef CY_PCI_DEBUG
/*
* Include section
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial.h>
#include <linux/major.h>
#include <linux/string.h>
#include <linux/fcntl.h>
#include <linux/ptrace.h>
#include <linux/cyclades.h>
#include <linux/mm.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/bitops.h>
#include <linux/firmware.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/stat.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
static void cy_send_xchar(struct tty_struct *tty, char ch);
#ifndef SERIAL_XMIT_SIZE
#define SERIAL_XMIT_SIZE (min(PAGE_SIZE, 4096))
#endif
#define STD_COM_FLAGS (0)
/* firmware stuff */
#define ZL_MAX_BLOCKS 16
#define DRIVER_VERSION 0x02010203
#define RAM_SIZE 0x80000
enum zblock_type {
ZBLOCK_PRG = 0,
ZBLOCK_FPGA = 1
};
struct zfile_header {
char name[64];
char date[32];
char aux[32];
u32 n_config;
u32 config_offset;
u32 n_blocks;
u32 block_offset;
u32 reserved[9];
} __attribute__ ((packed));
struct zfile_config {
char name[64];
u32 mailbox;
u32 function;
u32 n_blocks;
u32 block_list[ZL_MAX_BLOCKS];
} __attribute__ ((packed));
struct zfile_block {
u32 type;
u32 file_offset;
u32 ram_offset;
u32 size;
} __attribute__ ((packed));
static struct tty_driver *cy_serial_driver;
#ifdef CONFIG_ISA
/* This is the address lookup table. The driver will probe for
Cyclom-Y/ISA boards at all addresses in here. If you want the
driver to probe addresses at a different address, add it to
this table. If the driver is probing some other board and
causing problems, remove the offending address from this table.
*/
static unsigned int cy_isa_addresses[] = {
0xD0000,
0xD2000,
0xD4000,
0xD6000,
0xD8000,
0xDA000,
0xDC000,
0xDE000,
0, 0, 0, 0, 0, 0, 0, 0
};
#define NR_ISA_ADDRS ARRAY_SIZE(cy_isa_addresses)
static long maddr[NR_CARDS];
static int irq[NR_CARDS];
module_param_array(maddr, long, NULL, 0);
module_param_array(irq, int, NULL, 0);
#endif /* CONFIG_ISA */
/* This is the per-card data structure containing address, irq, number of
channels, etc. This driver supports a maximum of NR_CARDS cards.
*/
static struct cyclades_card cy_card[NR_CARDS];
static int cy_next_channel; /* next minor available */
/*
* This is used to look up the divisor speeds and the timeouts
* We're normally limited to 15 distinct baud rates. The extra
* are accessed via settings in info->port.flags.
* 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
* 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
* HI VHI
* 20
*/
static const int baud_table[] = {
0, 50, 75, 110, 134, 150, 200, 300, 600, 1200,
1800, 2400, 4800, 9600, 19200, 38400, 57600, 76800, 115200, 150000,
230400, 0
};
static const char baud_co_25[] = { /* 25 MHz clock option table */
/* value => 00 01 02 03 04 */
/* divide by 8 32 128 512 2048 */
0x00, 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x03, 0x02,
0x02, 0x02, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
static const char baud_bpr_25[] = { /* 25 MHz baud rate period table */
0x00, 0xf5, 0xa3, 0x6f, 0x5c, 0x51, 0xf5, 0xa3, 0x51, 0xa3,
0x6d, 0x51, 0xa3, 0x51, 0xa3, 0x51, 0x36, 0x29, 0x1b, 0x15
};
static const char baud_co_60[] = { /* 60 MHz clock option table (CD1400 J) */
/* value => 00 01 02 03 04 */
/* divide by 8 32 128 512 2048 */
0x00, 0x00, 0x00, 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, 0x03,
0x03, 0x02, 0x02, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00
};
static const char baud_bpr_60[] = { /* 60 MHz baud rate period table (CD1400 J) */
0x00, 0x82, 0x21, 0xff, 0xdb, 0xc3, 0x92, 0x62, 0xc3, 0x62,
0x41, 0xc3, 0x62, 0xc3, 0x62, 0xc3, 0x82, 0x62, 0x41, 0x32,
0x21
};
static const char baud_cor3[] = { /* receive threshold */
0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a,
0x0a, 0x0a, 0x0a, 0x09, 0x09, 0x08, 0x08, 0x08, 0x08, 0x07,
0x07
};
/*
* The Cyclades driver implements HW flow control as any serial driver.
* The cyclades_port structure member rflow and the vector rflow_thr
* allows us to take advantage of a special feature in the CD1400 to avoid
* data loss even when the system interrupt latency is too high. These flags
* are to be used only with very special applications. Setting these flags
* requires the use of a special cable (DTR and RTS reversed). In the new
* CD1400-based boards (rev. 6.00 or later), there is no need for special
* cables.
*/
static const char rflow_thr[] = { /* rflow threshold */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a,
0x0a
};
/* The Cyclom-Ye has placed the sequential chips in non-sequential
* address order. This look-up table overcomes that problem.
*/
static const unsigned int cy_chip_offset[] = { 0x0000,
0x0400,
0x0800,
0x0C00,
0x0200,
0x0600,
0x0A00,
0x0E00
};
/* PCI related definitions */
#ifdef CONFIG_PCI
static const struct pci_device_id cy_pci_dev_id[] = {
/* PCI < 1Mb */
{ PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Y_Lo) },
/* PCI > 1Mb */
{ PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Y_Hi) },
/* 4Y PCI < 1Mb */
{ PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_4Y_Lo) },
/* 4Y PCI > 1Mb */
{ PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_4Y_Hi) },
/* 8Y PCI < 1Mb */
{ PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_8Y_Lo) },
/* 8Y PCI > 1Mb */
{ PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_8Y_Hi) },
/* Z PCI < 1Mb */
{ PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Z_Lo) },
/* Z PCI > 1Mb */
{ PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Z_Hi) },
{ } /* end of table */
};
MODULE_DEVICE_TABLE(pci, cy_pci_dev_id);
#endif
static void cy_start(struct tty_struct *);
static void cy_set_line_char(struct cyclades_port *, struct tty_struct *);
static int cyz_issue_cmd(struct cyclades_card *, __u32, __u8, __u32);
#ifdef CONFIG_ISA
static unsigned detect_isa_irq(void __iomem *);
#endif /* CONFIG_ISA */
#ifndef CONFIG_CYZ_INTR
static void cyz_poll(unsigned long);
/* The Cyclades-Z polling cycle is defined by this variable */
static long cyz_polling_cycle = CZ_DEF_POLL;
static DEFINE_TIMER(cyz_timerlist, cyz_poll, 0, 0);
#else /* CONFIG_CYZ_INTR */
static void cyz_rx_restart(unsigned long);
static struct timer_list cyz_rx_full_timer[NR_PORTS];
#endif /* CONFIG_CYZ_INTR */
static inline void cyy_writeb(struct cyclades_port *port, u32 reg, u8 val)
{
struct cyclades_card *card = port->card;
cy_writeb(port->u.cyy.base_addr + (reg << card->bus_index), val);
}
static inline u8 cyy_readb(struct cyclades_port *port, u32 reg)
{
struct cyclades_card *card = port->card;
return readb(port->u.cyy.base_addr + (reg << card->bus_index));
}
static inline bool cy_is_Z(struct cyclades_card *card)
{
return card->num_chips == (unsigned int)-1;
}
static inline bool __cyz_fpga_loaded(struct RUNTIME_9060 __iomem *ctl_addr)
{
return readl(&ctl_addr->init_ctrl) & (1 << 17);
}
static inline bool cyz_fpga_loaded(struct cyclades_card *card)
{
return __cyz_fpga_loaded(card->ctl_addr.p9060);
}
static inline bool cyz_is_loaded(struct cyclades_card *card)
{
struct FIRM_ID __iomem *fw_id = card->base_addr + ID_ADDRESS;
return (card->hw_ver == ZO_V1 || cyz_fpga_loaded(card)) &&
readl(&fw_id->signature) == ZFIRM_ID;
}
static inline int serial_paranoia_check(struct cyclades_port *info,
const char *name, const char *routine)
{
#ifdef SERIAL_PARANOIA_CHECK
if (!info) {
printk(KERN_WARNING "cyc Warning: null cyclades_port for (%s) "
"in %s\n", name, routine);
return 1;
}
if (info->magic != CYCLADES_MAGIC) {
printk(KERN_WARNING "cyc Warning: bad magic number for serial "
"struct (%s) in %s\n", name, routine);
return 1;
}
#endif
return 0;
}
/***********************************************************/
/********* Start of block of Cyclom-Y specific code ********/
/* This routine waits up to 1000 micro-seconds for the previous
command to the Cirrus chip to complete and then issues the
new command. An error is returned if the previous command
didn't finish within the time limit.
This function is only called from inside spinlock-protected code.
*/
static int __cyy_issue_cmd(void __iomem *base_addr, u8 cmd, int index)
{
void __iomem *ccr = base_addr + (CyCCR << index);
unsigned int i;
/* Check to see that the previous command has completed */
for (i = 0; i < 100; i++) {
if (readb(ccr) == 0)
break;
udelay(10L);
}
/* if the CCR never cleared, the previous command
didn't finish within the "reasonable time" */
if (i == 100)
return -1;
/* Issue the new command */
cy_writeb(ccr, cmd);
return 0;
}
static inline int cyy_issue_cmd(struct cyclades_port *port, u8 cmd)
{
return __cyy_issue_cmd(port->u.cyy.base_addr, cmd,
port->card->bus_index);
}
#ifdef CONFIG_ISA
/* ISA interrupt detection code */
static unsigned detect_isa_irq(void __iomem *address)
{
int irq;
unsigned long irqs, flags;
int save_xir, save_car;
int index = 0; /* IRQ probing is only for ISA */
/* forget possible initially masked and pending IRQ */
irq = probe_irq_off(probe_irq_on());
/* Clear interrupts on the board first */
cy_writeb(address + (Cy_ClrIntr << index), 0);
/* Cy_ClrIntr is 0x1800 */
irqs = probe_irq_on();
/* Wait ... */
msleep(5);
/* Enable the Tx interrupts on the CD1400 */
local_irq_save(flags);
cy_writeb(address + (CyCAR << index), 0);
__cyy_issue_cmd(address, CyCHAN_CTL | CyENB_XMTR, index);
cy_writeb(address + (CyCAR << index), 0);
cy_writeb(address + (CySRER << index),
readb(address + (CySRER << index)) | CyTxRdy);
local_irq_restore(flags);
/* Wait ... */
msleep(5);
/* Check which interrupt is in use */
irq = probe_irq_off(irqs);
/* Clean up */
save_xir = (u_char) readb(address + (CyTIR << index));
save_car = readb(address + (CyCAR << index));
cy_writeb(address + (CyCAR << index), (save_xir & 0x3));
cy_writeb(address + (CySRER << index),
readb(address + (CySRER << index)) & ~CyTxRdy);
cy_writeb(address + (CyTIR << index), (save_xir & 0x3f));
cy_writeb(address + (CyCAR << index), (save_car));
cy_writeb(address + (Cy_ClrIntr << index), 0);
/* Cy_ClrIntr is 0x1800 */
return (irq > 0) ? irq : 0;
}
#endif /* CONFIG_ISA */
static void cyy_chip_rx(struct cyclades_card *cinfo, int chip,
void __iomem *base_addr)
{
struct cyclades_port *info;
struct tty_port *port;
int len, index = cinfo->bus_index;
u8 ivr, save_xir, channel, save_car, data, char_count;
#ifdef CY_DEBUG_INTERRUPTS
printk(KERN_DEBUG "cyy_interrupt: rcvd intr, chip %d\n", chip);
#endif
/* determine the channel & change to that context */
save_xir = readb(base_addr + (CyRIR << index));
channel = save_xir & CyIRChannel;
info = &cinfo->ports[channel + chip * 4];
port = &info->port;
save_car = cyy_readb(info, CyCAR);
cyy_writeb(info, CyCAR, save_xir);
ivr = cyy_readb(info, CyRIVR) & CyIVRMask;
/* there is an open port for this data */
if (ivr == CyIVRRxEx) { /* exception */
data = cyy_readb(info, CyRDSR);
/* For statistics only */
if (data & CyBREAK)
info->icount.brk++;
else if (data & CyFRAME)
info->icount.frame++;
else if (data & CyPARITY)
info->icount.parity++;
else if (data & CyOVERRUN)
info->icount.overrun++;
if (data & info->ignore_status_mask) {
info->icount.rx++;
return;
}
if (tty_buffer_request_room(port, 1)) {
if (data & info->read_status_mask) {
if (data & CyBREAK) {
tty_insert_flip_char(port,
cyy_readb(info, CyRDSR),
TTY_BREAK);
info->icount.rx++;
if (port->flags & ASYNC_SAK) {
struct tty_struct *tty =
tty_port_tty_get(port);
if (tty) {
do_SAK(tty);
tty_kref_put(tty);
}
}
} else if (data & CyFRAME) {
tty_insert_flip_char(port,
cyy_readb(info, CyRDSR),
TTY_FRAME);
info->icount.rx++;
info->idle_stats.frame_errs++;
} else if (data & CyPARITY) {
/* Pieces of seven... */
tty_insert_flip_char(port,
cyy_readb(info, CyRDSR),
TTY_PARITY);
info->icount.rx++;
info->idle_stats.parity_errs++;
} else if (data & CyOVERRUN) {
tty_insert_flip_char(port, 0,
TTY_OVERRUN);
info->icount.rx++;
/* If the flip buffer itself is
overflowing, we still lose
the next incoming character.
*/
tty_insert_flip_char(port,
cyy_readb(info, CyRDSR),
TTY_FRAME);
info->icount.rx++;
info->idle_stats.overruns++;
/* These two conditions may imply */
/* a normal read should be done. */
/* } else if(data & CyTIMEOUT) { */
/* } else if(data & CySPECHAR) { */
} else {
tty_insert_flip_char(port, 0,
TTY_NORMAL);
info->icount.rx++;
}
} else {
tty_insert_flip_char(port, 0, TTY_NORMAL);
info->icount.rx++;
}
} else {
/* there was a software buffer overrun and nothing
* could be done about it!!! */
info->icount.buf_overrun++;
info->idle_stats.overruns++;
}
} else { /* normal character reception */
/* load # chars available from the chip */
char_count = cyy_readb(info, CyRDCR);
#ifdef CY_ENABLE_MONITORING
++info->mon.int_count;
info->mon.char_count += char_count;
if (char_count > info->mon.char_max)
info->mon.char_max = char_count;
info->mon.char_last = char_count;
#endif
len = tty_buffer_request_room(port, char_count);
while (len--) {
data = cyy_readb(info, CyRDSR);
tty_insert_flip_char(port, data, TTY_NORMAL);
info->idle_stats.recv_bytes++;
info->icount.rx++;
#ifdef CY_16Y_HACK
udelay(10L);
#endif
}
info->idle_stats.recv_idle = jiffies;
}
tty_schedule_flip(port);
/* end of service */
cyy_writeb(info, CyRIR, save_xir & 0x3f);
cyy_writeb(info, CyCAR, save_car);
}
static void cyy_chip_tx(struct cyclades_card *cinfo, unsigned int chip,
void __iomem *base_addr)
{
struct cyclades_port *info;
struct tty_struct *tty;
int char_count, index = cinfo->bus_index;
u8 save_xir, channel, save_car, outch;
/* Since we only get here when the transmit buffer
is empty, we know we can always stuff a dozen
characters. */
#ifdef CY_DEBUG_INTERRUPTS
printk(KERN_DEBUG "cyy_interrupt: xmit intr, chip %d\n", chip);
#endif
/* determine the channel & change to that context */
save_xir = readb(base_addr + (CyTIR << index));
channel = save_xir & CyIRChannel;
save_car = readb(base_addr + (CyCAR << index));
cy_writeb(base_addr + (CyCAR << index), save_xir);
info = &cinfo->ports[channel + chip * 4];
tty = tty_port_tty_get(&info->port);
if (tty == NULL) {
cyy_writeb(info, CySRER, cyy_readb(info, CySRER) & ~CyTxRdy);
goto end;
}
/* load the on-chip space for outbound data */
char_count = info->xmit_fifo_size;
if (info->x_char) { /* send special char */
outch = info->x_char;
cyy_writeb(info, CyTDR, outch);
char_count--;
info->icount.tx++;
info->x_char = 0;
}
if (info->breakon || info->breakoff) {
if (info->breakon) {
cyy_writeb(info, CyTDR, 0);
cyy_writeb(info, CyTDR, 0x81);
info->breakon = 0;
char_count -= 2;
}
if (info->breakoff) {
cyy_writeb(info, CyTDR, 0);
cyy_writeb(info, CyTDR, 0x83);
info->breakoff = 0;
char_count -= 2;
}
}
while (char_count-- > 0) {
if (!info->xmit_cnt) {
if (cyy_readb(info, CySRER) & CyTxMpty) {
cyy_writeb(info, CySRER,
cyy_readb(info, CySRER) & ~CyTxMpty);
} else {
cyy_writeb(info, CySRER, CyTxMpty |
(cyy_readb(info, CySRER) & ~CyTxRdy));
}
goto done;
}
if (info->port.xmit_buf == NULL) {
cyy_writeb(info, CySRER,
cyy_readb(info, CySRER) & ~CyTxRdy);
goto done;
}
if (tty->stopped || tty->hw_stopped) {
cyy_writeb(info, CySRER,
cyy_readb(info, CySRER) & ~CyTxRdy);
goto done;
}
/* Because the Embedded Transmit Commands have been enabled,
* we must check to see if the escape character, NULL, is being
* sent. If it is, we must ensure that there is room for it to
* be doubled in the output stream. Therefore we no longer
* advance the pointer when the character is fetched, but
* rather wait until after the check for a NULL output
* character. This is necessary because there may not be room
* for the two chars needed to send a NULL.)
*/
outch = info->port.xmit_buf[info->xmit_tail];
if (outch) {
info->xmit_cnt--;
info->xmit_tail = (info->xmit_tail + 1) &
(SERIAL_XMIT_SIZE - 1);
cyy_writeb(info, CyTDR, outch);
info->icount.tx++;
} else {
if (char_count > 1) {
info->xmit_cnt--;
info->xmit_tail = (info->xmit_tail + 1) &
(SERIAL_XMIT_SIZE - 1);
cyy_writeb(info, CyTDR, outch);
cyy_writeb(info, CyTDR, 0);
info->icount.tx++;
char_count--;
}
}
}
done:
tty_wakeup(tty);
tty_kref_put(tty);
end:
/* end of service */
cyy_writeb(info, CyTIR, save_xir & 0x3f);
cyy_writeb(info, CyCAR, save_car);
}
static void cyy_chip_modem(struct cyclades_card *cinfo, int chip,
void __iomem *base_addr)
{
struct cyclades_port *info;
struct tty_struct *tty;
int index = cinfo->bus_index;
u8 save_xir, channel, save_car, mdm_change, mdm_status;
/* determine the channel & change to that context */
save_xir = readb(base_addr + (CyMIR << index));
channel = save_xir & CyIRChannel;
info = &cinfo->ports[channel + chip * 4];
save_car = cyy_readb(info, CyCAR);
cyy_writeb(info, CyCAR, save_xir);
mdm_change = cyy_readb(info, CyMISR);
mdm_status = cyy_readb(info, CyMSVR1);
tty = tty_port_tty_get(&info->port);
if (!tty)
goto end;
if (mdm_change & CyANY_DELTA) {
/* For statistics only */
if (mdm_change & CyDCD)
info->icount.dcd++;
if (mdm_change & CyCTS)
info->icount.cts++;
if (mdm_change & CyDSR)
info->icount.dsr++;
if (mdm_change & CyRI)
info->icount.rng++;
wake_up_interruptible(&info->port.delta_msr_wait);
}
if ((mdm_change & CyDCD) && (info->port.flags & ASYNC_CHECK_CD)) {
if (mdm_status & CyDCD)
wake_up_interruptible(&info->port.open_wait);
else
tty_hangup(tty);
}
if ((mdm_change & CyCTS) && tty_port_cts_enabled(&info->port)) {
if (tty->hw_stopped) {
if (mdm_status & CyCTS) {
/* cy_start isn't used
because... !!! */
tty->hw_stopped = 0;
cyy_writeb(info, CySRER,
cyy_readb(info, CySRER) | CyTxRdy);
tty_wakeup(tty);
}
} else {
if (!(mdm_status & CyCTS)) {
/* cy_stop isn't used
because ... !!! */
tty->hw_stopped = 1;
cyy_writeb(info, CySRER,
cyy_readb(info, CySRER) & ~CyTxRdy);
}
}
}
/* if (mdm_change & CyDSR) {
}
if (mdm_change & CyRI) {
}*/
tty_kref_put(tty);
end:
/* end of service */
cyy_writeb(info, CyMIR, save_xir & 0x3f);
cyy_writeb(info, CyCAR, save_car);
}
/* The real interrupt service routine is called
whenever the card wants its hand held--chars
received, out buffer empty, modem change, etc.
*/
static irqreturn_t cyy_interrupt(int irq, void *dev_id)
{
int status;
struct cyclades_card *cinfo = dev_id;
void __iomem *base_addr, *card_base_addr;
unsigned int chip, too_many, had_work;
int index;
if (unlikely(cinfo == NULL)) {
#ifdef CY_DEBUG_INTERRUPTS
printk(KERN_DEBUG "cyy_interrupt: spurious interrupt %d\n",
irq);
#endif
return IRQ_NONE; /* spurious interrupt */
}
card_base_addr = cinfo->base_addr;
index = cinfo->bus_index;
/* card was not initialized yet (e.g. DEBUG_SHIRQ) */
if (unlikely(card_base_addr == NULL))
return IRQ_HANDLED;
/* This loop checks all chips in the card. Make a note whenever
_any_ chip had some work to do, as this is considered an
indication that there will be more to do. Only when no chip
has any work does this outermost loop exit.
*/
do {
had_work = 0;
for (chip = 0; chip < cinfo->num_chips; chip++) {
base_addr = cinfo->base_addr +
(cy_chip_offset[chip] << index);
too_many = 0;
while ((status = readb(base_addr +
(CySVRR << index))) != 0x00) {
had_work++;
/* The purpose of the following test is to ensure that
no chip can monopolize the driver. This forces the
chips to be checked in a round-robin fashion (after
draining each of a bunch (1000) of characters).
*/
if (1000 < too_many++)
break;
spin_lock(&cinfo->card_lock);
if (status & CySRReceive) /* rx intr */
cyy_chip_rx(cinfo, chip, base_addr);
if (status & CySRTransmit) /* tx intr */
cyy_chip_tx(cinfo, chip, base_addr);
if (status & CySRModem) /* modem intr */
cyy_chip_modem(cinfo, chip, base_addr);
spin_unlock(&cinfo->card_lock);
}
}
} while (had_work);
/* clear interrupts */
spin_lock(&cinfo->card_lock);
cy_writeb(card_base_addr + (Cy_ClrIntr << index), 0);
/* Cy_ClrIntr is 0x1800 */
spin_unlock(&cinfo->card_lock);
return IRQ_HANDLED;
} /* cyy_interrupt */
static void cyy_change_rts_dtr(struct cyclades_port *info, unsigned int set,
unsigned int clear)
{
struct cyclades_card *card = info->card;
int channel = info->line - card->first_line;
u32 rts, dtr, msvrr, msvrd;
channel &= 0x03;
if (info->rtsdtr_inv) {
msvrr = CyMSVR2;
msvrd = CyMSVR1;
rts = CyDTR;
dtr = CyRTS;
} else {
msvrr = CyMSVR1;
msvrd = CyMSVR2;
rts = CyRTS;
dtr = CyDTR;
}
if (set & TIOCM_RTS) {
cyy_writeb(info, CyCAR, channel);
cyy_writeb(info, msvrr, rts);
}
if (clear & TIOCM_RTS) {
cyy_writeb(info, CyCAR, channel);
cyy_writeb(info, msvrr, ~rts);
}
if (set & TIOCM_DTR) {
cyy_writeb(info, CyCAR, channel);
cyy_writeb(info, msvrd, dtr);
#ifdef CY_DEBUG_DTR
printk(KERN_DEBUG "cyc:set_modem_info raising DTR\n");
printk(KERN_DEBUG " status: 0x%x, 0x%x\n",
cyy_readb(info, CyMSVR1),
cyy_readb(info, CyMSVR2));
#endif
}
if (clear & TIOCM_DTR) {
cyy_writeb(info, CyCAR, channel);
cyy_writeb(info, msvrd, ~dtr);
#ifdef CY_DEBUG_DTR
printk(KERN_DEBUG "cyc:set_modem_info dropping DTR\n");
printk(KERN_DEBUG " status: 0x%x, 0x%x\n",
cyy_readb(info, CyMSVR1),
cyy_readb(info, CyMSVR2));
#endif
}
}
/***********************************************************/
/********* End of block of Cyclom-Y specific code **********/
/******** Start of block of Cyclades-Z specific code *******/
/***********************************************************/
static int
cyz_fetch_msg(struct cyclades_card *cinfo,
__u32 *channel, __u8 *cmd, __u32 *param)
{
struct BOARD_CTRL __iomem *board_ctrl = cinfo->board_ctrl;
unsigned long loc_doorbell;
loc_doorbell = readl(&cinfo->ctl_addr.p9060->loc_doorbell);
if (loc_doorbell) {
*cmd = (char)(0xff & loc_doorbell);
*channel = readl(&board_ctrl->fwcmd_channel);
*param = (__u32) readl(&board_ctrl->fwcmd_param);
cy_writel(&cinfo->ctl_addr.p9060->loc_doorbell, 0xffffffff);
return 1;
}
return 0;
} /* cyz_fetch_msg */
static int
cyz_issue_cmd(struct cyclades_card *cinfo,
__u32 channel, __u8 cmd, __u32 param)
{
struct BOARD_CTRL __iomem *board_ctrl = cinfo->board_ctrl;
__u32 __iomem *pci_doorbell;
unsigned int index;
if (!cyz_is_loaded(cinfo))
return -1;
index = 0;
pci_doorbell = &cinfo->ctl_addr.p9060->pci_doorbell;
while ((readl(pci_doorbell) & 0xff) != 0) {
if (index++ == 1000)
return (int)(readl(pci_doorbell) & 0xff);
udelay(50L);
}
cy_writel(&board_ctrl->hcmd_channel, channel);
cy_writel(&board_ctrl->hcmd_param, param);
cy_writel(pci_doorbell, (long)cmd);
return 0;
} /* cyz_issue_cmd */
static void cyz_handle_rx(struct cyclades_port *info)
{
struct BUF_CTRL __iomem *buf_ctrl = info->u.cyz.buf_ctrl;
struct cyclades_card *cinfo = info->card;
struct tty_port *port = &info->port;
unsigned int char_count;
int len;
#ifdef BLOCKMOVE
unsigned char *buf;
#else
char data;
#endif
__u32 rx_put, rx_get, new_rx_get, rx_bufsize, rx_bufaddr;
rx_get = new_rx_get = readl(&buf_ctrl->rx_get);
rx_put = readl(&buf_ctrl->rx_put);
rx_bufsize = readl(&buf_ctrl->rx_bufsize);
rx_bufaddr = readl(&buf_ctrl->rx_bufaddr);
if (rx_put >= rx_get)
char_count = rx_put - rx_get;
else
char_count = rx_put - rx_get + rx_bufsize;
if (!char_count)
return;
#ifdef CY_ENABLE_MONITORING
info->mon.int_count++;
info->mon.char_count += char_count;
if (char_count > info->mon.char_max)
info->mon.char_max = char_count;
info->mon.char_last = char_count;
#endif
#ifdef BLOCKMOVE
/* we'd like to use memcpy(t, f, n) and memset(s, c, count)
for performance, but because of buffer boundaries, there
may be several steps to the operation */
while (1) {
len = tty_prepare_flip_string(port, &buf,
char_count);
if (!len)
break;
len = min_t(unsigned int, min(len, char_count),
rx_bufsize - new_rx_get);
memcpy_fromio(buf, cinfo->base_addr +
rx_bufaddr + new_rx_get, len);
new_rx_get = (new_rx_get + len) &
(rx_bufsize - 1);
char_count -= len;
info->icount.rx += len;
info->idle_stats.recv_bytes += len;
}
#else
len = tty_buffer_request_room(port, char_count);
while (len--) {
data = readb(cinfo->base_addr + rx_bufaddr +
new_rx_get);
new_rx_get = (new_rx_get + 1) &
(rx_bufsize - 1);
tty_insert_flip_char(port, data, TTY_NORMAL);
info->idle_stats.recv_bytes++;
info->icount.rx++;
}
#endif
#ifdef CONFIG_CYZ_INTR
/* Recalculate the number of chars in the RX buffer and issue
a cmd in case it's higher than the RX high water mark */
rx_put = readl(&buf_ctrl->rx_put);
if (rx_put >= rx_get)
char_count = rx_put - rx_get;
else
char_count = rx_put - rx_get + rx_bufsize;
if (char_count >= readl(&buf_ctrl->rx_threshold) &&
!timer_pending(&cyz_rx_full_timer[
info->line]))
mod_timer(&cyz_rx_full_timer[info->line],
jiffies + 1);
#endif
info->idle_stats.recv_idle = jiffies;
tty_schedule_flip(&info->port);
/* Update rx_get */
cy_writel(&buf_ctrl->rx_get, new_rx_get);
}
static void cyz_handle_tx(struct cyclades_port *info)
{
struct BUF_CTRL __iomem *buf_ctrl = info->u.cyz.buf_ctrl;
struct cyclades_card *cinfo = info->card;
struct tty_struct *tty;
u8 data;
unsigned int char_count;
#ifdef BLOCKMOVE
int small_count;
#endif
__u32 tx_put, tx_get, tx_bufsize, tx_bufaddr;
if (info->xmit_cnt <= 0) /* Nothing to transmit */
return;
tx_get = readl(&buf_ctrl->tx_get);
tx_put = readl(&buf_ctrl->tx_put);
tx_bufsize = readl(&buf_ctrl->tx_bufsize);
tx_bufaddr = readl(&buf_ctrl->tx_bufaddr);
if (tx_put >= tx_get)
char_count = tx_get - tx_put - 1 + tx_bufsize;
else
char_count = tx_get - tx_put - 1;
if (!char_count)
return;
tty = tty_port_tty_get(&info->port);
if (tty == NULL)
goto ztxdone;
if (info->x_char) { /* send special char */
data = info->x_char;
cy_writeb(cinfo->base_addr + tx_bufaddr + tx_put, data);
tx_put = (tx_put + 1) & (tx_bufsize - 1);
info->x_char = 0;
char_count--;
info->icount.tx++;
}
#ifdef BLOCKMOVE
while (0 < (small_count = min_t(unsigned int,
tx_bufsize - tx_put, min_t(unsigned int,
(SERIAL_XMIT_SIZE - info->xmit_tail),
min_t(unsigned int, info->xmit_cnt,
char_count))))) {
memcpy_toio((char *)(cinfo->base_addr + tx_bufaddr + tx_put),
&info->port.xmit_buf[info->xmit_tail],
small_count);
tx_put = (tx_put + small_count) & (tx_bufsize - 1);
char_count -= small_count;
info->icount.tx += small_count;
info->xmit_cnt -= small_count;
info->xmit_tail = (info->xmit_tail + small_count) &
(SERIAL_XMIT_SIZE - 1);
}
#else
while (info->xmit_cnt && char_count) {
data = info->port.xmit_buf[info->xmit_tail];
info->xmit_cnt--;
info->xmit_tail = (info->xmit_tail + 1) &
(SERIAL_XMIT_SIZE - 1);
cy_writeb(cinfo->base_addr + tx_bufaddr + tx_put, data);
tx_put = (tx_put + 1) & (tx_bufsize - 1);
char_count--;
info->icount.tx++;
}
#endif
tty_wakeup(tty);
tty_kref_put(tty);
ztxdone:
/* Update tx_put */
cy_writel(&buf_ctrl->tx_put, tx_put);
}
static void cyz_handle_cmd(struct cyclades_card *cinfo)
{
struct BOARD_CTRL __iomem *board_ctrl = cinfo->board_ctrl;
struct cyclades_port *info;
__u32 channel, param, fw_ver;
__u8 cmd;
int special_count;
int delta_count;
fw_ver = readl(&board_ctrl->fw_version);
while (cyz_fetch_msg(cinfo, &channel, &cmd, ¶m) == 1) {
special_count = 0;
delta_count = 0;
info = &cinfo->ports[channel];
switch (cmd) {
case C_CM_PR_ERROR:
tty_insert_flip_char(&info->port, 0, TTY_PARITY);
info->icount.rx++;
special_count++;
break;
case C_CM_FR_ERROR:
tty_insert_flip_char(&info->port, 0, TTY_FRAME);
info->icount.rx++;
special_count++;
break;
case C_CM_RXBRK:
tty_insert_flip_char(&info->port, 0, TTY_BREAK);
info->icount.rx++;
special_count++;
break;
case C_CM_MDCD:
info->icount.dcd++;
delta_count++;
if (info->port.flags & ASYNC_CHECK_CD) {
u32 dcd = fw_ver > 241 ? param :
readl(&info->u.cyz.ch_ctrl->rs_status);
if (dcd & C_RS_DCD)
wake_up_interruptible(&info->port.open_wait);
else
tty_port_tty_hangup(&info->port, false);
}
break;
case C_CM_MCTS:
info->icount.cts++;
delta_count++;
break;
case C_CM_MRI:
info->icount.rng++;
delta_count++;
break;
case C_CM_MDSR:
info->icount.dsr++;
delta_count++;
break;
#ifdef Z_WAKE
case C_CM_IOCTLW:
complete(&info->shutdown_wait);
break;
#endif
#ifdef CONFIG_CYZ_INTR
case C_CM_RXHIWM:
case C_CM_RXNNDT:
case C_CM_INTBACK2:
/* Reception Interrupt */
#ifdef CY_DEBUG_INTERRUPTS
printk(KERN_DEBUG "cyz_interrupt: rcvd intr, card %d, "
"port %ld\n", info->card, channel);
#endif
cyz_handle_rx(info);
break;
case C_CM_TXBEMPTY:
case C_CM_TXLOWWM:
case C_CM_INTBACK:
/* Transmission Interrupt */
#ifdef CY_DEBUG_INTERRUPTS
printk(KERN_DEBUG "cyz_interrupt: xmit intr, card %d, "
"port %ld\n", info->card, channel);
#endif
cyz_handle_tx(info);
break;
#endif /* CONFIG_CYZ_INTR */
case C_CM_FATAL:
/* should do something with this !!! */
break;
default:
break;
}
if (delta_count)
wake_up_interruptible(&info->port.delta_msr_wait);
if (special_count)
tty_schedule_flip(&info->port);
}
}
#ifdef CONFIG_CYZ_INTR
static irqreturn_t cyz_interrupt(int irq, void *dev_id)
{
struct cyclades_card *cinfo = dev_id;
if (unlikely(!cyz_is_loaded(cinfo))) {
#ifdef CY_DEBUG_INTERRUPTS
printk(KERN_DEBUG "cyz_interrupt: board not yet loaded "
"(IRQ%d).\n", irq);
#endif
return IRQ_NONE;
}
/* Handle the interrupts */
cyz_handle_cmd(cinfo);
return IRQ_HANDLED;
} /* cyz_interrupt */
static void cyz_rx_restart(unsigned long arg)
{
struct cyclades_port *info = (struct cyclades_port *)arg;
struct cyclades_card *card = info->card;
int retval;
__u32 channel = info->line - card->first_line;
unsigned long flags;
spin_lock_irqsave(&card->card_lock, flags);
retval = cyz_issue_cmd(card, channel, C_CM_INTBACK2, 0L);
if (retval != 0) {
printk(KERN_ERR "cyc:cyz_rx_restart retval on ttyC%d was %x\n",
info->line, retval);
}
spin_unlock_irqrestore(&card->card_lock, flags);
}
#else /* CONFIG_CYZ_INTR */
static void cyz_poll(unsigned long arg)
{
struct cyclades_card *cinfo;
struct cyclades_port *info;
unsigned long expires = jiffies + HZ;
unsigned int port, card;
for (card = 0; card < NR_CARDS; card++) {
cinfo = &cy_card[card];
if (!cy_is_Z(cinfo))
continue;
if (!cyz_is_loaded(cinfo))
continue;
/* Skip first polling cycle to avoid racing conditions with the FW */
if (!cinfo->intr_enabled) {
cinfo->intr_enabled = 1;
continue;
}
cyz_handle_cmd(cinfo);
for (port = 0; port < cinfo->nports; port++) {
info = &cinfo->ports[port];
if (!info->throttle)
cyz_handle_rx(info);
cyz_handle_tx(info);
}
/* poll every 'cyz_polling_cycle' period */
expires = jiffies + cyz_polling_cycle;
}
mod_timer(&cyz_timerlist, expires);
} /* cyz_poll */
#endif /* CONFIG_CYZ_INTR */
/********** End of block of Cyclades-Z specific code *********/
/***********************************************************/
/* This is called whenever a port becomes active;
interrupts are enabled and DTR & RTS are turned on.
*/
static int cy_startup(struct cyclades_port *info, struct tty_struct *tty)
{
struct cyclades_card *card;
unsigned long flags;
int retval = 0;
int channel;
unsigned long page;
card = info->card;
channel = info->line - card->first_line;
page = get_zeroed_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
spin_lock_irqsave(&card->card_lock, flags);
if (info->port.flags & ASYNC_INITIALIZED)
goto errout;
if (!info->type) {
set_bit(TTY_IO_ERROR, &tty->flags);
goto errout;
}
if (info->port.xmit_buf)
free_page(page);
else
info->port.xmit_buf = (unsigned char *)page;
spin_unlock_irqrestore(&card->card_lock, flags);
cy_set_line_char(info, tty);
if (!cy_is_Z(card)) {
channel &= 0x03;
spin_lock_irqsave(&card->card_lock, flags);
cyy_writeb(info, CyCAR, channel);
cyy_writeb(info, CyRTPR,
(info->default_timeout ? info->default_timeout : 0x02));
/* 10ms rx timeout */
cyy_issue_cmd(info, CyCHAN_CTL | CyENB_RCVR | CyENB_XMTR);
cyy_change_rts_dtr(info, TIOCM_RTS | TIOCM_DTR, 0);
cyy_writeb(info, CySRER, cyy_readb(info, CySRER) | CyRxData);
} else {
struct CH_CTRL __iomem *ch_ctrl = info->u.cyz.ch_ctrl;
if (!cyz_is_loaded(card))
return -ENODEV;
#ifdef CY_DEBUG_OPEN
printk(KERN_DEBUG "cyc startup Z card %d, channel %d, "
"base_addr %p\n", card, channel, card->base_addr);
#endif
spin_lock_irqsave(&card->card_lock, flags);
cy_writel(&ch_ctrl->op_mode, C_CH_ENABLE);
#ifdef Z_WAKE
#ifdef CONFIG_CYZ_INTR
cy_writel(&ch_ctrl->intr_enable,
C_IN_TXBEMPTY | C_IN_TXLOWWM | C_IN_RXHIWM |
C_IN_RXNNDT | C_IN_IOCTLW | C_IN_MDCD);
#else
cy_writel(&ch_ctrl->intr_enable,
C_IN_IOCTLW | C_IN_MDCD);
#endif /* CONFIG_CYZ_INTR */
#else
#ifdef CONFIG_CYZ_INTR
cy_writel(&ch_ctrl->intr_enable,
C_IN_TXBEMPTY | C_IN_TXLOWWM | C_IN_RXHIWM |
C_IN_RXNNDT | C_IN_MDCD);
#else
cy_writel(&ch_ctrl->intr_enable, C_IN_MDCD);
#endif /* CONFIG_CYZ_INTR */
#endif /* Z_WAKE */
retval = cyz_issue_cmd(card, channel, C_CM_IOCTL, 0L);
if (retval != 0) {
printk(KERN_ERR "cyc:startup(1) retval on ttyC%d was "
"%x\n", info->line, retval);
}
/* Flush RX buffers before raising DTR and RTS */
retval = cyz_issue_cmd(card, channel, C_CM_FLUSH_RX, 0L);
if (retval != 0) {
printk(KERN_ERR "cyc:startup(2) retval on ttyC%d was "
"%x\n", info->line, retval);
}
/* set timeout !!! */
/* set RTS and DTR !!! */
tty_port_raise_dtr_rts(&info->port);
/* enable send, recv, modem !!! */
}
info->port.flags |= ASYNC_INITIALIZED;
clear_bit(TTY_IO_ERROR, &tty->flags);
info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
info->breakon = info->breakoff = 0;
memset((char *)&info->idle_stats, 0, sizeof(info->idle_stats));
info->idle_stats.in_use =
info->idle_stats.recv_idle =
info->idle_stats.xmit_idle = jiffies;
spin_unlock_irqrestore(&card->card_lock, flags);
#ifdef CY_DEBUG_OPEN
printk(KERN_DEBUG "cyc startup done\n");
#endif
return 0;
errout:
spin_unlock_irqrestore(&card->card_lock, flags);
free_page(page);
return retval;
} /* startup */
static void start_xmit(struct cyclades_port *info)
{
struct cyclades_card *card = info->card;
unsigned long flags;
int channel = info->line - card->first_line;
if (!cy_is_Z(card)) {
spin_lock_irqsave(&card->card_lock, flags);
cyy_writeb(info, CyCAR, channel & 0x03);
cyy_writeb(info, CySRER, cyy_readb(info, CySRER) | CyTxRdy);
spin_unlock_irqrestore(&card->card_lock, flags);
} else {
#ifdef CONFIG_CYZ_INTR
int retval;
spin_lock_irqsave(&card->card_lock, flags);
retval = cyz_issue_cmd(card, channel, C_CM_INTBACK, 0L);
if (retval != 0) {
printk(KERN_ERR "cyc:start_xmit retval on ttyC%d was "
"%x\n", info->line, retval);
}
spin_unlock_irqrestore(&card->card_lock, flags);
#else /* CONFIG_CYZ_INTR */
/* Don't have to do anything at this time */
#endif /* CONFIG_CYZ_INTR */
}
} /* start_xmit */
/*
* This routine shuts down a serial port; interrupts are disabled,
* and DTR is dropped if the hangup on close termio flag is on.
*/
static void cy_shutdown(struct cyclades_port *info, struct tty_struct *tty)
{
struct cyclades_card *card;
unsigned long flags;
if (!(info->port.flags & ASYNC_INITIALIZED))
return;
card = info->card;
if (!cy_is_Z(card)) {
spin_lock_irqsave(&card->card_lock, flags);
/* Clear delta_msr_wait queue to avoid mem leaks. */
wake_up_interruptible(&info->port.delta_msr_wait);
if (info->port.xmit_buf) {
unsigned char *temp;
temp = info->port.xmit_buf;
info->port.xmit_buf = NULL;
free_page((unsigned long)temp);
}
if (tty->termios.c_cflag & HUPCL)
cyy_change_rts_dtr(info, 0, TIOCM_RTS | TIOCM_DTR);
cyy_issue_cmd(info, CyCHAN_CTL | CyDIS_RCVR);
/* it may be appropriate to clear _XMIT at
some later date (after testing)!!! */
set_bit(TTY_IO_ERROR, &tty->flags);
info->port.flags &= ~ASYNC_INITIALIZED;
spin_unlock_irqrestore(&card->card_lock, flags);
} else {
#ifdef CY_DEBUG_OPEN
int channel = info->line - card->first_line;
printk(KERN_DEBUG "cyc shutdown Z card %d, channel %d, "
"base_addr %p\n", card, channel, card->base_addr);
#endif
if (!cyz_is_loaded(card))
return;
spin_lock_irqsave(&card->card_lock, flags);
if (info->port.xmit_buf) {
unsigned char *temp;
temp = info->port.xmit_buf;
info->port.xmit_buf = NULL;
free_page((unsigned long)temp);
}
if (tty->termios.c_cflag & HUPCL)
tty_port_lower_dtr_rts(&info->port);
set_bit(TTY_IO_ERROR, &tty->flags);
info->port.flags &= ~ASYNC_INITIALIZED;
spin_unlock_irqrestore(&card->card_lock, flags);
}
#ifdef CY_DEBUG_OPEN
printk(KERN_DEBUG "cyc shutdown done\n");
#endif
} /* shutdown */
/*
* ------------------------------------------------------------
* cy_open() and friends
* ------------------------------------------------------------
*/
/*
* This routine is called whenever a serial port is opened. It
* performs the serial-specific initialization for the tty structure.
*/
static int cy_open(struct tty_struct *tty, struct file *filp)
{
struct cyclades_port *info;
unsigned int i, line = tty->index;
int retval;
for (i = 0; i < NR_CARDS; i++)
if (line < cy_card[i].first_line + cy_card[i].nports &&
line >= cy_card[i].first_line)
break;
if (i >= NR_CARDS)
return -ENODEV;
info = &cy_card[i].ports[line - cy_card[i].first_line];
if (info->line < 0)
return -ENODEV;
/* If the card's firmware hasn't been loaded,
treat it as absent from the system. This
will make the user pay attention.
*/
if (cy_is_Z(info->card)) {
struct cyclades_card *cinfo = info->card;
struct FIRM_ID __iomem *firm_id = cinfo->base_addr + ID_ADDRESS;
if (!cyz_is_loaded(cinfo)) {
if (cinfo->hw_ver == ZE_V1 && cyz_fpga_loaded(cinfo) &&
readl(&firm_id->signature) ==
ZFIRM_HLT) {
printk(KERN_ERR "cyc:Cyclades-Z Error: you "
"need an external power supply for "
"this number of ports.\nFirmware "
"halted.\n");
} else {
printk(KERN_ERR "cyc:Cyclades-Z firmware not "
"yet loaded\n");
}
return -ENODEV;
}
#ifdef CONFIG_CYZ_INTR
else {
/* In case this Z board is operating in interrupt mode, its
interrupts should be enabled as soon as the first open
happens to one of its ports. */
if (!cinfo->intr_enabled) {
u16 intr;
/* Enable interrupts on the PLX chip */
intr = readw(&cinfo->ctl_addr.p9060->
intr_ctrl_stat) | 0x0900;
cy_writew(&cinfo->ctl_addr.p9060->
intr_ctrl_stat, intr);
/* Enable interrupts on the FW */
retval = cyz_issue_cmd(cinfo, 0,
C_CM_IRQ_ENBL, 0L);
if (retval != 0) {
printk(KERN_ERR "cyc:IRQ enable retval "
"was %x\n", retval);
}
cinfo->intr_enabled = 1;
}
}
#endif /* CONFIG_CYZ_INTR */
/* Make sure this Z port really exists in hardware */
if (info->line > (cinfo->first_line + cinfo->nports - 1))
return -ENODEV;
}
#ifdef CY_DEBUG_OTHER
printk(KERN_DEBUG "cyc:cy_open ttyC%d\n", info->line);
#endif
tty->driver_data = info;
if (serial_paranoia_check(info, tty->name, "cy_open"))
return -ENODEV;
#ifdef CY_DEBUG_OPEN
printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
info->port.count);
#endif
info->port.count++;
#ifdef CY_DEBUG_COUNT
printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
current->pid, info->port.count);
#endif
/*
* If the port is the middle of closing, bail out now
*/
if (tty_hung_up_p(filp) || (info->port.flags & ASYNC_CLOSING)) {
wait_event_interruptible_tty(tty, info->port.close_wait,
!(info->port.flags & ASYNC_CLOSING));
return (info->port.flags & ASYNC_HUP_NOTIFY) ? -EAGAIN: -ERESTARTSYS;
}
/*
* Start up serial port
*/
retval = cy_startup(info, tty);
if (retval)
return retval;
retval = tty_port_block_til_ready(&info->port, tty, filp);
if (retval) {
#ifdef CY_DEBUG_OPEN
printk(KERN_DEBUG "cyc:cy_open returning after block_til_ready "
"with %d\n", retval);
#endif
return retval;
}
info->throttle = 0;
tty_port_tty_set(&info->port, tty);
#ifdef CY_DEBUG_OPEN
printk(KERN_DEBUG "cyc:cy_open done\n");
#endif
return 0;
} /* cy_open */
/*
* cy_wait_until_sent() --- wait until the transmitter is empty
*/
static void cy_wait_until_sent(struct tty_struct *tty, int timeout)
{
struct cyclades_card *card;
struct cyclades_port *info = tty->driver_data;
unsigned long orig_jiffies;
int char_time;
if (serial_paranoia_check(info, tty->name, "cy_wait_until_sent"))
return;
if (info->xmit_fifo_size == 0)
return; /* Just in case.... */
orig_jiffies = jiffies;
/*
* Set the check interval to be 1/5 of the estimated time to
* send a single character, and make it at least 1. The check
* interval should also be less than the timeout.
*
* Note: we have to use pretty tight timings here to satisfy
* the NIST-PCTS.
*/
char_time = (info->timeout - HZ / 50) / info->xmit_fifo_size;
char_time = char_time / 5;
if (char_time <= 0)
char_time = 1;
if (timeout < 0)
timeout = 0;
if (timeout)
char_time = min(char_time, timeout);
/*
* If the transmitter hasn't cleared in twice the approximate
* amount of time to send the entire FIFO, it probably won't
* ever clear. This assumes the UART isn't doing flow
* control, which is currently the case. Hence, if it ever
* takes longer than info->timeout, this is probably due to a
* UART bug of some kind. So, we clamp the timeout parameter at
* 2*info->timeout.
*/
if (!timeout || timeout > 2 * info->timeout)
timeout = 2 * info->timeout;
card = info->card;
if (!cy_is_Z(card)) {
while (cyy_readb(info, CySRER) & CyTxRdy) {
if (msleep_interruptible(jiffies_to_msecs(char_time)))
break;
if (timeout && time_after(jiffies, orig_jiffies +
timeout))
break;
}
}
/* Run one more char cycle */
msleep_interruptible(jiffies_to_msecs(char_time * 5));
}
static void cy_flush_buffer(struct tty_struct *tty)
{
struct cyclades_port *info = tty->driver_data;
struct cyclades_card *card;
int channel, retval;
unsigned long flags;
#ifdef CY_DEBUG_IO
printk(KERN_DEBUG "cyc:cy_flush_buffer ttyC%d\n", info->line);
#endif
if (serial_paranoia_check(info, tty->name, "cy_flush_buffer"))
return;
card = info->card;
channel = info->line - card->first_line;
spin_lock_irqsave(&card->card_lock, flags);
info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
spin_unlock_irqrestore(&card->card_lock, flags);
if (cy_is_Z(card)) { /* If it is a Z card, flush the on-board
buffers as well */
spin_lock_irqsave(&card->card_lock, flags);
retval = cyz_issue_cmd(card, channel, C_CM_FLUSH_TX, 0L);
if (retval != 0) {
printk(KERN_ERR "cyc: flush_buffer retval on ttyC%d "
"was %x\n", info->line, retval);
}
spin_unlock_irqrestore(&card->card_lock, flags);
}
tty_wakeup(tty);
} /* cy_flush_buffer */
static void cy_do_close(struct tty_port *port)
{
struct cyclades_port *info = container_of(port, struct cyclades_port,
port);
struct cyclades_card *card;
unsigned long flags;
int channel;
card = info->card;
channel = info->line - card->first_line;
spin_lock_irqsave(&card->card_lock, flags);
if (!cy_is_Z(card)) {
/* Stop accepting input */
cyy_writeb(info, CyCAR, channel & 0x03);
cyy_writeb(info, CySRER, cyy_readb(info, CySRER) & ~CyRxData);
if (info->port.flags & ASYNC_INITIALIZED) {
/* Waiting for on-board buffers to be empty before
closing the port */
spin_unlock_irqrestore(&card->card_lock, flags);
cy_wait_until_sent(port->tty, info->timeout);
spin_lock_irqsave(&card->card_lock, flags);
}
} else {
#ifdef Z_WAKE
/* Waiting for on-board buffers to be empty before closing
the port */
struct CH_CTRL __iomem *ch_ctrl = info->u.cyz.ch_ctrl;
int retval;
if (readl(&ch_ctrl->flow_status) != C_FS_TXIDLE) {
retval = cyz_issue_cmd(card, channel, C_CM_IOCTLW, 0L);
if (retval != 0) {
printk(KERN_DEBUG "cyc:cy_close retval on "
"ttyC%d was %x\n", info->line, retval);
}
spin_unlock_irqrestore(&card->card_lock, flags);
wait_for_completion_interruptible(&info->shutdown_wait);
spin_lock_irqsave(&card->card_lock, flags);
}
#endif
}
spin_unlock_irqrestore(&card->card_lock, flags);
cy_shutdown(info, port->tty);
}
/*
* This routine is called when a particular tty device is closed.
*/
static void cy_close(struct tty_struct *tty, struct file *filp)
{
struct cyclades_port *info = tty->driver_data;
if (!info || serial_paranoia_check(info, tty->name, "cy_close"))
return;
tty_port_close(&info->port, tty, filp);
} /* cy_close */
/* This routine gets called when tty_write has put something into
* the write_queue. The characters may come from user space or
* kernel space.
*
* This routine will return the number of characters actually
* accepted for writing.
*
* If the port is not already transmitting stuff, start it off by
* enabling interrupts. The interrupt service routine will then
* ensure that the characters are sent.
* If the port is already active, there is no need to kick it.
*
*/
static int cy_write(struct tty_struct *tty, const unsigned char *buf, int count)
{
struct cyclades_port *info = tty->driver_data;
unsigned long flags;
int c, ret = 0;
#ifdef CY_DEBUG_IO
printk(KERN_DEBUG "cyc:cy_write ttyC%d\n", info->line);
#endif
if (serial_paranoia_check(info, tty->name, "cy_write"))
return 0;
if (!info->port.xmit_buf)
return 0;
spin_lock_irqsave(&info->card->card_lock, flags);
while (1) {
c = min(count, (int)(SERIAL_XMIT_SIZE - info->xmit_cnt - 1));
c = min(c, (int)(SERIAL_XMIT_SIZE - info->xmit_head));
if (c <= 0)
break;
memcpy(info->port.xmit_buf + info->xmit_head, buf, c);
info->xmit_head = (info->xmit_head + c) &
(SERIAL_XMIT_SIZE - 1);
info->xmit_cnt += c;
buf += c;
count -= c;
ret += c;
}
spin_unlock_irqrestore(&info->card->card_lock, flags);
info->idle_stats.xmit_bytes += ret;
info->idle_stats.xmit_idle = jiffies;
if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped)
start_xmit(info);
return ret;
} /* cy_write */
/*
* This routine is called by the kernel to write a single
* character to the tty device. If the kernel uses this routine,
* it must call the flush_chars() routine (if defined) when it is
* done stuffing characters into the driver. If there is no room
* in the queue, the character is ignored.
*/
static int cy_put_char(struct tty_struct *tty, unsigned char ch)
{
struct cyclades_port *info = tty->driver_data;
unsigned long flags;
#ifdef CY_DEBUG_IO
printk(KERN_DEBUG "cyc:cy_put_char ttyC%d\n", info->line);
#endif
if (serial_paranoia_check(info, tty->name, "cy_put_char"))
return 0;
if (!info->port.xmit_buf)
return 0;
spin_lock_irqsave(&info->card->card_lock, flags);
if (info->xmit_cnt >= (int)(SERIAL_XMIT_SIZE - 1)) {
spin_unlock_irqrestore(&info->card->card_lock, flags);
return 0;
}
info->port.xmit_buf[info->xmit_head++] = ch;
info->xmit_head &= SERIAL_XMIT_SIZE - 1;
info->xmit_cnt++;
info->idle_stats.xmit_bytes++;
info->idle_stats.xmit_idle = jiffies;
spin_unlock_irqrestore(&info->card->card_lock, flags);
return 1;
} /* cy_put_char */
/*
* This routine is called by the kernel after it has written a
* series of characters to the tty device using put_char().
*/
static void cy_flush_chars(struct tty_struct *tty)
{
struct cyclades_port *info = tty->driver_data;
#ifdef CY_DEBUG_IO
printk(KERN_DEBUG "cyc:cy_flush_chars ttyC%d\n", info->line);
#endif
if (serial_paranoia_check(info, tty->name, "cy_flush_chars"))
return;
if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped ||
!info->port.xmit_buf)
return;
start_xmit(info);
} /* cy_flush_chars */
/*
* This routine returns the numbers of characters the tty driver
* will accept for queuing to be written. This number is subject
* to change as output buffers get emptied, or if the output flow
* control is activated.
*/
static int cy_write_room(struct tty_struct *tty)
{
struct cyclades_port *info = tty->driver_data;
int ret;
#ifdef CY_DEBUG_IO
printk(KERN_DEBUG "cyc:cy_write_room ttyC%d\n", info->line);
#endif
if (serial_paranoia_check(info, tty->name, "cy_write_room"))
return 0;
ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1;
if (ret < 0)
ret = 0;
return ret;
} /* cy_write_room */
static int cy_chars_in_buffer(struct tty_struct *tty)
{
struct cyclades_port *info = tty->driver_data;
if (serial_paranoia_check(info, tty->name, "cy_chars_in_buffer"))
return 0;
#ifdef Z_EXT_CHARS_IN_BUFFER
if (!cy_is_Z(info->card)) {
#endif /* Z_EXT_CHARS_IN_BUFFER */
#ifdef CY_DEBUG_IO
printk(KERN_DEBUG "cyc:cy_chars_in_buffer ttyC%d %d\n",
info->line, info->xmit_cnt);
#endif
return info->xmit_cnt;
#ifdef Z_EXT_CHARS_IN_BUFFER
} else {
struct BUF_CTRL __iomem *buf_ctrl = info->u.cyz.buf_ctrl;
int char_count;
__u32 tx_put, tx_get, tx_bufsize;
tx_get = readl(&buf_ctrl->tx_get);
tx_put = readl(&buf_ctrl->tx_put);
tx_bufsize = readl(&buf_ctrl->tx_bufsize);
if (tx_put >= tx_get)
char_count = tx_put - tx_get;
else
char_count = tx_put - tx_get + tx_bufsize;
#ifdef CY_DEBUG_IO
printk(KERN_DEBUG "cyc:cy_chars_in_buffer ttyC%d %d\n",
info->line, info->xmit_cnt + char_count);
#endif
return info->xmit_cnt + char_count;
}
#endif /* Z_EXT_CHARS_IN_BUFFER */
} /* cy_chars_in_buffer */
/*
* ------------------------------------------------------------
* cy_ioctl() and friends
* ------------------------------------------------------------
*/
static void cyy_baud_calc(struct cyclades_port *info, __u32 baud)
{
int co, co_val, bpr;
__u32 cy_clock = ((info->chip_rev >= CD1400_REV_J) ? 60000000 :
25000000);
if (baud == 0) {
info->tbpr = info->tco = info->rbpr = info->rco = 0;
return;
}
/* determine which prescaler to use */
for (co = 4, co_val = 2048; co; co--, co_val >>= 2) {
if (cy_clock / co_val / baud > 63)
break;
}
bpr = (cy_clock / co_val * 2 / baud + 1) / 2;
if (bpr > 255)
bpr = 255;
info->tbpr = info->rbpr = bpr;
info->tco = info->rco = co;
}
/*
* This routine finds or computes the various line characteristics.
* It used to be called config_setup
*/
static void cy_set_line_char(struct cyclades_port *info, struct tty_struct *tty)
{
struct cyclades_card *card;
unsigned long flags;
int channel;
unsigned cflag, iflag;
int baud, baud_rate = 0;
int i;
if (info->line == -1)
return;
cflag = tty->termios.c_cflag;
iflag = tty->termios.c_iflag;
/*
* Set up the tty->alt_speed kludge
*/
if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
tty->alt_speed = 57600;
if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
tty->alt_speed = 115200;
if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI)
tty->alt_speed = 230400;
if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP)
tty->alt_speed = 460800;
card = info->card;
channel = info->line - card->first_line;
if (!cy_is_Z(card)) {
u32 cflags;
/* baud rate */
baud = tty_get_baud_rate(tty);
if (baud == 38400 && (info->port.flags & ASYNC_SPD_MASK) ==
ASYNC_SPD_CUST) {
if (info->custom_divisor)
baud_rate = info->baud / info->custom_divisor;
else
baud_rate = info->baud;
} else if (baud > CD1400_MAX_SPEED) {
baud = CD1400_MAX_SPEED;
}
/* find the baud index */
for (i = 0; i < 20; i++) {
if (baud == baud_table[i])
break;
}
if (i == 20)
i = 19; /* CD1400_MAX_SPEED */
if (baud == 38400 && (info->port.flags & ASYNC_SPD_MASK) ==
ASYNC_SPD_CUST) {
cyy_baud_calc(info, baud_rate);
} else {
if (info->chip_rev >= CD1400_REV_J) {
/* It is a CD1400 rev. J or later */
info->tbpr = baud_bpr_60[i]; /* Tx BPR */
info->tco = baud_co_60[i]; /* Tx CO */
info->rbpr = baud_bpr_60[i]; /* Rx BPR */
info->rco = baud_co_60[i]; /* Rx CO */
} else {
info->tbpr = baud_bpr_25[i]; /* Tx BPR */
info->tco = baud_co_25[i]; /* Tx CO */
info->rbpr = baud_bpr_25[i]; /* Rx BPR */
info->rco = baud_co_25[i]; /* Rx CO */
}
}
if (baud_table[i] == 134) {
/* get it right for 134.5 baud */
info->timeout = (info->xmit_fifo_size * HZ * 30 / 269) +
2;
} else if (baud == 38400 && (info->port.flags & ASYNC_SPD_MASK) ==
ASYNC_SPD_CUST) {
info->timeout = (info->xmit_fifo_size * HZ * 15 /
baud_rate) + 2;
} else if (baud_table[i]) {
info->timeout = (info->xmit_fifo_size * HZ * 15 /
baud_table[i]) + 2;
/* this needs to be propagated into the card info */
} else {
info->timeout = 0;
}
/* By tradition (is it a standard?) a baud rate of zero
implies the line should be/has been closed. A bit
later in this routine such a test is performed. */
/* byte size and parity */
info->cor5 = 0;
info->cor4 = 0;
/* receive threshold */
info->cor3 = (info->default_threshold ?
info->default_threshold : baud_cor3[i]);
info->cor2 = CyETC;
switch (cflag & CSIZE) {
case CS5:
info->cor1 = Cy_5_BITS;
break;
case CS6:
info->cor1 = Cy_6_BITS;
break;
case CS7:
info->cor1 = Cy_7_BITS;
break;
case CS8:
info->cor1 = Cy_8_BITS;
break;
}
if (cflag & CSTOPB)
info->cor1 |= Cy_2_STOP;
if (cflag & PARENB) {
if (cflag & PARODD)
info->cor1 |= CyPARITY_O;
else
info->cor1 |= CyPARITY_E;
} else
info->cor1 |= CyPARITY_NONE;
/* CTS flow control flag */
if (cflag & CRTSCTS) {
info->port.flags |= ASYNC_CTS_FLOW;
info->cor2 |= CyCtsAE;
} else {
info->port.flags &= ~ASYNC_CTS_FLOW;
info->cor2 &= ~CyCtsAE;
}
if (cflag & CLOCAL)
info->port.flags &= ~ASYNC_CHECK_CD;
else
info->port.flags |= ASYNC_CHECK_CD;
/***********************************************
The hardware option, CyRtsAO, presents RTS when
the chip has characters to send. Since most modems
use RTS as reverse (inbound) flow control, this
option is not used. If inbound flow control is
necessary, DTR can be programmed to provide the
appropriate signals for use with a non-standard
cable. Contact Marcio Saito for details.
***********************************************/
channel &= 0x03;
spin_lock_irqsave(&card->card_lock, flags);
cyy_writeb(info, CyCAR, channel);
/* tx and rx baud rate */
cyy_writeb(info, CyTCOR, info->tco);
cyy_writeb(info, CyTBPR, info->tbpr);
cyy_writeb(info, CyRCOR, info->rco);
cyy_writeb(info, CyRBPR, info->rbpr);
/* set line characteristics according configuration */
cyy_writeb(info, CySCHR1, START_CHAR(tty));
cyy_writeb(info, CySCHR2, STOP_CHAR(tty));
cyy_writeb(info, CyCOR1, info->cor1);
cyy_writeb(info, CyCOR2, info->cor2);
cyy_writeb(info, CyCOR3, info->cor3);
cyy_writeb(info, CyCOR4, info->cor4);
cyy_writeb(info, CyCOR5, info->cor5);
cyy_issue_cmd(info, CyCOR_CHANGE | CyCOR1ch | CyCOR2ch |
CyCOR3ch);
/* !!! Is this needed? */
cyy_writeb(info, CyCAR, channel);
cyy_writeb(info, CyRTPR,
(info->default_timeout ? info->default_timeout : 0x02));
/* 10ms rx timeout */
cflags = CyCTS;
if (!C_CLOCAL(tty))
cflags |= CyDSR | CyRI | CyDCD;
/* without modem intr */
cyy_writeb(info, CySRER, cyy_readb(info, CySRER) | CyMdmCh);
/* act on 1->0 modem transitions */
if ((cflag & CRTSCTS) && info->rflow)
cyy_writeb(info, CyMCOR1, cflags | rflow_thr[i]);
else
cyy_writeb(info, CyMCOR1, cflags);
/* act on 0->1 modem transitions */
cyy_writeb(info, CyMCOR2, cflags);
if (i == 0) /* baud rate is zero, turn off line */
cyy_change_rts_dtr(info, 0, TIOCM_DTR);
else
cyy_change_rts_dtr(info, TIOCM_DTR, 0);
clear_bit(TTY_IO_ERROR, &tty->flags);
spin_unlock_irqrestore(&card->card_lock, flags);
} else {
struct CH_CTRL __iomem *ch_ctrl = info->u.cyz.ch_ctrl;
__u32 sw_flow;
int retval;
if (!cyz_is_loaded(card))
return;
/* baud rate */
baud = tty_get_baud_rate(tty);
if (baud == 38400 && (info->port.flags & ASYNC_SPD_MASK) ==
ASYNC_SPD_CUST) {
if (info->custom_divisor)
baud_rate = info->baud / info->custom_divisor;
else
baud_rate = info->baud;
} else if (baud > CYZ_MAX_SPEED) {
baud = CYZ_MAX_SPEED;
}
cy_writel(&ch_ctrl->comm_baud, baud);
if (baud == 134) {
/* get it right for 134.5 baud */
info->timeout = (info->xmit_fifo_size * HZ * 30 / 269) +
2;
} else if (baud == 38400 && (info->port.flags & ASYNC_SPD_MASK) ==
ASYNC_SPD_CUST) {
info->timeout = (info->xmit_fifo_size * HZ * 15 /
baud_rate) + 2;
} else if (baud) {
info->timeout = (info->xmit_fifo_size * HZ * 15 /
baud) + 2;
/* this needs to be propagated into the card info */
} else {
info->timeout = 0;
}
/* byte size and parity */
switch (cflag & CSIZE) {
case CS5:
cy_writel(&ch_ctrl->comm_data_l, C_DL_CS5);
break;
case CS6:
cy_writel(&ch_ctrl->comm_data_l, C_DL_CS6);
break;
case CS7:
cy_writel(&ch_ctrl->comm_data_l, C_DL_CS7);
break;
case CS8:
cy_writel(&ch_ctrl->comm_data_l, C_DL_CS8);
break;
}
if (cflag & CSTOPB) {
cy_writel(&ch_ctrl->comm_data_l,
readl(&ch_ctrl->comm_data_l) | C_DL_2STOP);
} else {
cy_writel(&ch_ctrl->comm_data_l,
readl(&ch_ctrl->comm_data_l) | C_DL_1STOP);
}
if (cflag & PARENB) {
if (cflag & PARODD)
cy_writel(&ch_ctrl->comm_parity, C_PR_ODD);
else
cy_writel(&ch_ctrl->comm_parity, C_PR_EVEN);
} else
cy_writel(&ch_ctrl->comm_parity, C_PR_NONE);
/* CTS flow control flag */
if (cflag & CRTSCTS) {
cy_writel(&ch_ctrl->hw_flow,
readl(&ch_ctrl->hw_flow) | C_RS_CTS | C_RS_RTS);
} else {
cy_writel(&ch_ctrl->hw_flow, readl(&ch_ctrl->hw_flow) &
~(C_RS_CTS | C_RS_RTS));
}
/* As the HW flow control is done in firmware, the driver
doesn't need to care about it */
info->port.flags &= ~ASYNC_CTS_FLOW;
/* XON/XOFF/XANY flow control flags */
sw_flow = 0;
if (iflag & IXON) {
sw_flow |= C_FL_OXX;
if (iflag & IXANY)
sw_flow |= C_FL_OIXANY;
}
cy_writel(&ch_ctrl->sw_flow, sw_flow);
retval = cyz_issue_cmd(card, channel, C_CM_IOCTL, 0L);
if (retval != 0) {
printk(KERN_ERR "cyc:set_line_char retval on ttyC%d "
"was %x\n", info->line, retval);
}
/* CD sensitivity */
if (cflag & CLOCAL)
info->port.flags &= ~ASYNC_CHECK_CD;
else
info->port.flags |= ASYNC_CHECK_CD;
if (baud == 0) { /* baud rate is zero, turn off line */
cy_writel(&ch_ctrl->rs_control,
readl(&ch_ctrl->rs_control) & ~C_RS_DTR);
#ifdef CY_DEBUG_DTR
printk(KERN_DEBUG "cyc:set_line_char dropping Z DTR\n");
#endif
} else {
cy_writel(&ch_ctrl->rs_control,
readl(&ch_ctrl->rs_control) | C_RS_DTR);
#ifdef CY_DEBUG_DTR
printk(KERN_DEBUG "cyc:set_line_char raising Z DTR\n");
#endif
}
retval = cyz_issue_cmd(card, channel, C_CM_IOCTLM, 0L);
if (retval != 0) {
printk(KERN_ERR "cyc:set_line_char(2) retval on ttyC%d "
"was %x\n", info->line, retval);
}
clear_bit(TTY_IO_ERROR, &tty->flags);
}
} /* set_line_char */
static int cy_get_serial_info(struct cyclades_port *info,
struct serial_struct __user *retinfo)
{
struct cyclades_card *cinfo = info->card;
struct serial_struct tmp = {
.type = info->type,
.line = info->line,
.port = (info->card - cy_card) * 0x100 + info->line -
cinfo->first_line,
.irq = cinfo->irq,
.flags = info->port.flags,
.close_delay = info->port.close_delay,
.closing_wait = info->port.closing_wait,
.baud_base = info->baud,
.custom_divisor = info->custom_divisor,
.hub6 = 0, /*!!! */
};
return copy_to_user(retinfo, &tmp, sizeof(*retinfo)) ? -EFAULT : 0;
}
static int
cy_set_serial_info(struct cyclades_port *info, struct tty_struct *tty,
struct serial_struct __user *new_info)
{
struct serial_struct new_serial;
int ret;
if (copy_from_user(&new_serial, new_info, sizeof(new_serial)))
return -EFAULT;
mutex_lock(&info->port.mutex);
if (!capable(CAP_SYS_ADMIN)) {
if (new_serial.close_delay != info->port.close_delay ||
new_serial.baud_base != info->baud ||
(new_serial.flags & ASYNC_FLAGS &
~ASYNC_USR_MASK) !=
(info->port.flags & ASYNC_FLAGS & ~ASYNC_USR_MASK))
{
mutex_unlock(&info->port.mutex);
return -EPERM;
}
info->port.flags = (info->port.flags & ~ASYNC_USR_MASK) |
(new_serial.flags & ASYNC_USR_MASK);
info->baud = new_serial.baud_base;
info->custom_divisor = new_serial.custom_divisor;
goto check_and_exit;
}
/*
* OK, past this point, all the error checking has been done.
* At this point, we start making changes.....
*/
info->baud = new_serial.baud_base;
info->custom_divisor = new_serial.custom_divisor;
info->port.flags = (info->port.flags & ~ASYNC_FLAGS) |
(new_serial.flags & ASYNC_FLAGS);
info->port.close_delay = new_serial.close_delay * HZ / 100;
info->port.closing_wait = new_serial.closing_wait * HZ / 100;
check_and_exit:
if (info->port.flags & ASYNC_INITIALIZED) {
cy_set_line_char(info, tty);
ret = 0;
} else {
ret = cy_startup(info, tty);
}
mutex_unlock(&info->port.mutex);
return ret;
} /* set_serial_info */
/*
* get_lsr_info - get line status register info
*
* Purpose: Let user call ioctl() to get info when the UART physically
* is emptied. On bus types like RS485, the transmitter must
* release the bus after transmitting. This must be done when
* the transmit shift register is empty, not be done when the
* transmit holding register is empty. This functionality
* allows an RS485 driver to be written in user space.
*/
static int get_lsr_info(struct cyclades_port *info, unsigned int __user *value)
{
struct cyclades_card *card = info->card;
unsigned int result;
unsigned long flags;
u8 status;
if (!cy_is_Z(card)) {
spin_lock_irqsave(&card->card_lock, flags);
status = cyy_readb(info, CySRER) & (CyTxRdy | CyTxMpty);
spin_unlock_irqrestore(&card->card_lock, flags);
result = (status ? 0 : TIOCSER_TEMT);
} else {
/* Not supported yet */
return -EINVAL;
}
return put_user(result, value);
}
static int cy_tiocmget(struct tty_struct *tty)
{
struct cyclades_port *info = tty->driver_data;
struct cyclades_card *card;
int result;
if (serial_paranoia_check(info, tty->name, __func__))
return -ENODEV;
card = info->card;
if (!cy_is_Z(card)) {
unsigned long flags;
int channel = info->line - card->first_line;
u8 status;
spin_lock_irqsave(&card->card_lock, flags);
cyy_writeb(info, CyCAR, channel & 0x03);
status = cyy_readb(info, CyMSVR1);
status |= cyy_readb(info, CyMSVR2);
spin_unlock_irqrestore(&card->card_lock, flags);
if (info->rtsdtr_inv) {
result = ((status & CyRTS) ? TIOCM_DTR : 0) |
((status & CyDTR) ? TIOCM_RTS : 0);
} else {
result = ((status & CyRTS) ? TIOCM_RTS : 0) |
((status & CyDTR) ? TIOCM_DTR : 0);
}
result |= ((status & CyDCD) ? TIOCM_CAR : 0) |
((status & CyRI) ? TIOCM_RNG : 0) |
((status & CyDSR) ? TIOCM_DSR : 0) |
((status & CyCTS) ? TIOCM_CTS : 0);
} else {
u32 lstatus;
if (!cyz_is_loaded(card)) {
result = -ENODEV;
goto end;
}
lstatus = readl(&info->u.cyz.ch_ctrl->rs_status);
result = ((lstatus & C_RS_RTS) ? TIOCM_RTS : 0) |
((lstatus & C_RS_DTR) ? TIOCM_DTR : 0) |
((lstatus & C_RS_DCD) ? TIOCM_CAR : 0) |
((lstatus & C_RS_RI) ? TIOCM_RNG : 0) |
((lstatus & C_RS_DSR) ? TIOCM_DSR : 0) |
((lstatus & C_RS_CTS) ? TIOCM_CTS : 0);
}
end:
return result;
} /* cy_tiomget */
static int
cy_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct cyclades_port *info = tty->driver_data;
struct cyclades_card *card;
unsigned long flags;
if (serial_paranoia_check(info, tty->name, __func__))
return -ENODEV;
card = info->card;
if (!cy_is_Z(card)) {
spin_lock_irqsave(&card->card_lock, flags);
cyy_change_rts_dtr(info, set, clear);
spin_unlock_irqrestore(&card->card_lock, flags);
} else {
struct CH_CTRL __iomem *ch_ctrl = info->u.cyz.ch_ctrl;
int retval, channel = info->line - card->first_line;
u32 rs;
if (!cyz_is_loaded(card))
return -ENODEV;
spin_lock_irqsave(&card->card_lock, flags);
rs = readl(&ch_ctrl->rs_control);
if (set & TIOCM_RTS)
rs |= C_RS_RTS;
if (clear & TIOCM_RTS)
rs &= ~C_RS_RTS;
if (set & TIOCM_DTR) {
rs |= C_RS_DTR;
#ifdef CY_DEBUG_DTR
printk(KERN_DEBUG "cyc:set_modem_info raising Z DTR\n");
#endif
}
if (clear & TIOCM_DTR) {
rs &= ~C_RS_DTR;
#ifdef CY_DEBUG_DTR
printk(KERN_DEBUG "cyc:set_modem_info clearing "
"Z DTR\n");
#endif
}
cy_writel(&ch_ctrl->rs_control, rs);
retval = cyz_issue_cmd(card, channel, C_CM_IOCTLM, 0L);
spin_unlock_irqrestore(&card->card_lock, flags);
if (retval != 0) {
printk(KERN_ERR "cyc:set_modem_info retval on ttyC%d "
"was %x\n", info->line, retval);
}
}
return 0;
}
/*
* cy_break() --- routine which turns the break handling on or off
*/
static int cy_break(struct tty_struct *tty, int break_state)
{
struct cyclades_port *info = tty->driver_data;
struct cyclades_card *card;
unsigned long flags;
int retval = 0;
if (serial_paranoia_check(info, tty->name, "cy_break"))
return -EINVAL;
card = info->card;
spin_lock_irqsave(&card->card_lock, flags);
if (!cy_is_Z(card)) {
/* Let the transmit ISR take care of this (since it
requires stuffing characters into the output stream).
*/
if (break_state == -1) {
if (!info->breakon) {
info->breakon = 1;
if (!info->xmit_cnt) {
spin_unlock_irqrestore(&card->card_lock, flags);
start_xmit(info);
spin_lock_irqsave(&card->card_lock, flags);
}
}
} else {
if (!info->breakoff) {
info->breakoff = 1;
if (!info->xmit_cnt) {
spin_unlock_irqrestore(&card->card_lock, flags);
start_xmit(info);
spin_lock_irqsave(&card->card_lock, flags);
}
}
}
} else {
if (break_state == -1) {
retval = cyz_issue_cmd(card,
info->line - card->first_line,
C_CM_SET_BREAK, 0L);
if (retval != 0) {
printk(KERN_ERR "cyc:cy_break (set) retval on "
"ttyC%d was %x\n", info->line, retval);
}
} else {
retval = cyz_issue_cmd(card,
info->line - card->first_line,
C_CM_CLR_BREAK, 0L);
if (retval != 0) {
printk(KERN_DEBUG "cyc:cy_break (clr) retval "
"on ttyC%d was %x\n", info->line,
retval);
}
}
}
spin_unlock_irqrestore(&card->card_lock, flags);
return retval;
} /* cy_break */
static int set_threshold(struct cyclades_port *info, unsigned long value)
{
struct cyclades_card *card = info->card;
unsigned long flags;
if (!cy_is_Z(card)) {
info->cor3 &= ~CyREC_FIFO;
info->cor3 |= value & CyREC_FIFO;
spin_lock_irqsave(&card->card_lock, flags);
cyy_writeb(info, CyCOR3, info->cor3);
cyy_issue_cmd(info, CyCOR_CHANGE | CyCOR3ch);
spin_unlock_irqrestore(&card->card_lock, flags);
}
return 0;
} /* set_threshold */
static int get_threshold(struct cyclades_port *info,
unsigned long __user *value)
{
struct cyclades_card *card = info->card;
if (!cy_is_Z(card)) {
u8 tmp = cyy_readb(info, CyCOR3) & CyREC_FIFO;
return put_user(tmp, value);
}
return 0;
} /* get_threshold */
static int set_timeout(struct cyclades_port *info, unsigned long value)
{
struct cyclades_card *card = info->card;
unsigned long flags;
if (!cy_is_Z(card)) {
spin_lock_irqsave(&card->card_lock, flags);
cyy_writeb(info, CyRTPR, value & 0xff);
spin_unlock_irqrestore(&card->card_lock, flags);
}
return 0;
} /* set_timeout */
static int get_timeout(struct cyclades_port *info,
unsigned long __user *value)
{
struct cyclades_card *card = info->card;
if (!cy_is_Z(card)) {
u8 tmp = cyy_readb(info, CyRTPR);
return put_user(tmp, value);
}
return 0;
} /* get_timeout */
static int cy_cflags_changed(struct cyclades_port *info, unsigned long arg,
struct cyclades_icount *cprev)
{
struct cyclades_icount cnow;
unsigned long flags;
int ret;
spin_lock_irqsave(&info->card->card_lock, flags);
cnow = info->icount; /* atomic copy */
spin_unlock_irqrestore(&info->card->card_lock, flags);
ret = ((arg & TIOCM_RNG) && (cnow.rng != cprev->rng)) ||
((arg & TIOCM_DSR) && (cnow.dsr != cprev->dsr)) ||
((arg & TIOCM_CD) && (cnow.dcd != cprev->dcd)) ||
((arg & TIOCM_CTS) && (cnow.cts != cprev->cts));
*cprev = cnow;
return ret;
}
/*
* This routine allows the tty driver to implement device-
* specific ioctl's. If the ioctl number passed in cmd is
* not recognized by the driver, it should return ENOIOCTLCMD.
*/
static int
cy_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct cyclades_port *info = tty->driver_data;
struct cyclades_icount cnow; /* kernel counter temps */
int ret_val = 0;
unsigned long flags;
void __user *argp = (void __user *)arg;
if (serial_paranoia_check(info, tty->name, "cy_ioctl"))
return -ENODEV;
#ifdef CY_DEBUG_OTHER
printk(KERN_DEBUG "cyc:cy_ioctl ttyC%d, cmd = %x arg = %lx\n",
info->line, cmd, arg);
#endif
switch (cmd) {
case CYGETMON:
if (copy_to_user(argp, &info->mon, sizeof(info->mon))) {
ret_val = -EFAULT;
break;
}
memset(&info->mon, 0, sizeof(info->mon));
break;
case CYGETTHRESH:
ret_val = get_threshold(info, argp);
break;
case CYSETTHRESH:
ret_val = set_threshold(info, arg);
break;
case CYGETDEFTHRESH:
ret_val = put_user(info->default_threshold,
(unsigned long __user *)argp);
break;
case CYSETDEFTHRESH:
info->default_threshold = arg & 0x0f;
break;
case CYGETTIMEOUT:
ret_val = get_timeout(info, argp);
break;
case CYSETTIMEOUT:
ret_val = set_timeout(info, arg);
break;
case CYGETDEFTIMEOUT:
ret_val = put_user(info->default_timeout,
(unsigned long __user *)argp);
break;
case CYSETDEFTIMEOUT:
info->default_timeout = arg & 0xff;
break;
case CYSETRFLOW:
info->rflow = (int)arg;
break;
case CYGETRFLOW:
ret_val = info->rflow;
break;
case CYSETRTSDTR_INV:
info->rtsdtr_inv = (int)arg;
break;
case CYGETRTSDTR_INV:
ret_val = info->rtsdtr_inv;
break;
case CYGETCD1400VER:
ret_val = info->chip_rev;
break;
#ifndef CONFIG_CYZ_INTR
case CYZSETPOLLCYCLE:
cyz_polling_cycle = (arg * HZ) / 1000;
break;
case CYZGETPOLLCYCLE:
ret_val = (cyz_polling_cycle * 1000) / HZ;
break;
#endif /* CONFIG_CYZ_INTR */
case CYSETWAIT:
info->port.closing_wait = (unsigned short)arg * HZ / 100;
break;
case CYGETWAIT:
ret_val = info->port.closing_wait / (HZ / 100);
break;
case TIOCGSERIAL:
ret_val = cy_get_serial_info(info, argp);
break;
case TIOCSSERIAL:
ret_val = cy_set_serial_info(info, tty, argp);
break;
case TIOCSERGETLSR: /* Get line status register */
ret_val = get_lsr_info(info, argp);
break;
/*
* Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change
* - mask passed in arg for lines of interest
* (use |'ed TIOCM_RNG/DSR/CD/CTS for masking)
* Caller should use TIOCGICOUNT to see which one it was
*/
case TIOCMIWAIT:
spin_lock_irqsave(&info->card->card_lock, flags);
/* note the counters on entry */
cnow = info->icount;
spin_unlock_irqrestore(&info->card->card_lock, flags);
ret_val = wait_event_interruptible(info->port.delta_msr_wait,
cy_cflags_changed(info, arg, &cnow));
break;
/*
* Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
* Return: write counters to the user passed counter struct
* NB: both 1->0 and 0->1 transitions are counted except for
* RI where only 0->1 is counted.
*/
default:
ret_val = -ENOIOCTLCMD;
}
#ifdef CY_DEBUG_OTHER
printk(KERN_DEBUG "cyc:cy_ioctl done\n");
#endif
return ret_val;
} /* cy_ioctl */
static int cy_get_icount(struct tty_struct *tty,
struct serial_icounter_struct *sic)
{
struct cyclades_port *info = tty->driver_data;
struct cyclades_icount cnow; /* Used to snapshot */
unsigned long flags;
spin_lock_irqsave(&info->card->card_lock, flags);
cnow = info->icount;
spin_unlock_irqrestore(&info->card->card_lock, flags);
sic->cts = cnow.cts;
sic->dsr = cnow.dsr;
sic->rng = cnow.rng;
sic->dcd = cnow.dcd;
sic->rx = cnow.rx;
sic->tx = cnow.tx;
sic->frame = cnow.frame;
sic->overrun = cnow.overrun;
sic->parity = cnow.parity;
sic->brk = cnow.brk;
sic->buf_overrun = cnow.buf_overrun;
return 0;
}
/*
* This routine allows the tty driver to be notified when
* device's termios settings have changed. Note that a
* well-designed tty driver should be prepared to accept the case
* where old == NULL, and try to do something rational.
*/
static void cy_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
{
struct cyclades_port *info = tty->driver_data;
#ifdef CY_DEBUG_OTHER
printk(KERN_DEBUG "cyc:cy_set_termios ttyC%d\n", info->line);
#endif
cy_set_line_char(info, tty);
if ((old_termios->c_cflag & CRTSCTS) &&
!(tty->termios.c_cflag & CRTSCTS)) {
tty->hw_stopped = 0;
cy_start(tty);
}
#if 0
/*
* No need to wake up processes in open wait, since they
* sample the CLOCAL flag once, and don't recheck it.
* XXX It's not clear whether the current behavior is correct
* or not. Hence, this may change.....
*/
if (!(old_termios->c_cflag & CLOCAL) &&
(tty->termios.c_cflag & CLOCAL))
wake_up_interruptible(&info->port.open_wait);
#endif
} /* cy_set_termios */
/* This function is used to send a high-priority XON/XOFF character to
the device.
*/
static void cy_send_xchar(struct tty_struct *tty, char ch)
{
struct cyclades_port *info = tty->driver_data;
struct cyclades_card *card;
int channel;
if (serial_paranoia_check(info, tty->name, "cy_send_xchar"))
return;
info->x_char = ch;
if (ch)
cy_start(tty);
card = info->card;
channel = info->line - card->first_line;
if (cy_is_Z(card)) {
if (ch == STOP_CHAR(tty))
cyz_issue_cmd(card, channel, C_CM_SENDXOFF, 0L);
else if (ch == START_CHAR(tty))
cyz_issue_cmd(card, channel, C_CM_SENDXON, 0L);
}
}
/* This routine is called by the upper-layer tty layer to signal
that incoming characters should be throttled because the input
buffers are close to full.
*/
static void cy_throttle(struct tty_struct *tty)
{
struct cyclades_port *info = tty->driver_data;
struct cyclades_card *card;
unsigned long flags;
#ifdef CY_DEBUG_THROTTLE
char buf[64];
printk(KERN_DEBUG "cyc:throttle %s: %ld...ttyC%d\n", tty_name(tty, buf),
tty->ldisc.chars_in_buffer(tty), info->line);
#endif
if (serial_paranoia_check(info, tty->name, "cy_throttle"))
return;
card = info->card;
if (I_IXOFF(tty)) {
if (!cy_is_Z(card))
cy_send_xchar(tty, STOP_CHAR(tty));
else
info->throttle = 1;
}
if (tty->termios.c_cflag & CRTSCTS) {
if (!cy_is_Z(card)) {
spin_lock_irqsave(&card->card_lock, flags);
cyy_change_rts_dtr(info, 0, TIOCM_RTS);
spin_unlock_irqrestore(&card->card_lock, flags);
} else {
info->throttle = 1;
}
}
} /* cy_throttle */
/*
* This routine notifies the tty driver that it should signal
* that characters can now be sent to the tty without fear of
* overrunning the input buffers of the line disciplines.
*/
static void cy_unthrottle(struct tty_struct *tty)
{
struct cyclades_port *info = tty->driver_data;
struct cyclades_card *card;
unsigned long flags;
#ifdef CY_DEBUG_THROTTLE
char buf[64];
printk(KERN_DEBUG "cyc:unthrottle %s: %ld...ttyC%d\n",
tty_name(tty, buf), tty_chars_in_buffer(tty), info->line);
#endif
if (serial_paranoia_check(info, tty->name, "cy_unthrottle"))
return;
if (I_IXOFF(tty)) {
if (info->x_char)
info->x_char = 0;
else
cy_send_xchar(tty, START_CHAR(tty));
}
if (tty->termios.c_cflag & CRTSCTS) {
card = info->card;
if (!cy_is_Z(card)) {
spin_lock_irqsave(&card->card_lock, flags);
cyy_change_rts_dtr(info, TIOCM_RTS, 0);
spin_unlock_irqrestore(&card->card_lock, flags);
} else {
info->throttle = 0;
}
}
} /* cy_unthrottle */
/* cy_start and cy_stop provide software output flow control as a
function of XON/XOFF, software CTS, and other such stuff.
*/
static void cy_stop(struct tty_struct *tty)
{
struct cyclades_card *cinfo;
struct cyclades_port *info = tty->driver_data;
int channel;
unsigned long flags;
#ifdef CY_DEBUG_OTHER
printk(KERN_DEBUG "cyc:cy_stop ttyC%d\n", info->line);
#endif
if (serial_paranoia_check(info, tty->name, "cy_stop"))
return;
cinfo = info->card;
channel = info->line - cinfo->first_line;
if (!cy_is_Z(cinfo)) {
spin_lock_irqsave(&cinfo->card_lock, flags);
cyy_writeb(info, CyCAR, channel & 0x03);
cyy_writeb(info, CySRER, cyy_readb(info, CySRER) & ~CyTxRdy);
spin_unlock_irqrestore(&cinfo->card_lock, flags);
}
} /* cy_stop */
static void cy_start(struct tty_struct *tty)
{
struct cyclades_card *cinfo;
struct cyclades_port *info = tty->driver_data;
int channel;
unsigned long flags;
#ifdef CY_DEBUG_OTHER
printk(KERN_DEBUG "cyc:cy_start ttyC%d\n", info->line);
#endif
if (serial_paranoia_check(info, tty->name, "cy_start"))
return;
cinfo = info->card;
channel = info->line - cinfo->first_line;
if (!cy_is_Z(cinfo)) {
spin_lock_irqsave(&cinfo->card_lock, flags);
cyy_writeb(info, CyCAR, channel & 0x03);
cyy_writeb(info, CySRER, cyy_readb(info, CySRER) | CyTxRdy);
spin_unlock_irqrestore(&cinfo->card_lock, flags);
}
} /* cy_start */
/*
* cy_hangup() --- called by tty_hangup() when a hangup is signaled.
*/
static void cy_hangup(struct tty_struct *tty)
{
struct cyclades_port *info = tty->driver_data;
#ifdef CY_DEBUG_OTHER
printk(KERN_DEBUG "cyc:cy_hangup ttyC%d\n", info->line);
#endif
if (serial_paranoia_check(info, tty->name, "cy_hangup"))
return;
cy_flush_buffer(tty);
cy_shutdown(info, tty);
tty_port_hangup(&info->port);
} /* cy_hangup */
static int cyy_carrier_raised(struct tty_port *port)
{
struct cyclades_port *info = container_of(port, struct cyclades_port,
port);
struct cyclades_card *cinfo = info->card;
unsigned long flags;
int channel = info->line - cinfo->first_line;
u32 cd;
spin_lock_irqsave(&cinfo->card_lock, flags);
cyy_writeb(info, CyCAR, channel & 0x03);
cd = cyy_readb(info, CyMSVR1) & CyDCD;
spin_unlock_irqrestore(&cinfo->card_lock, flags);
return cd;
}
static void cyy_dtr_rts(struct tty_port *port, int raise)
{
struct cyclades_port *info = container_of(port, struct cyclades_port,
port);
struct cyclades_card *cinfo = info->card;
unsigned long flags;
spin_lock_irqsave(&cinfo->card_lock, flags);
cyy_change_rts_dtr(info, raise ? TIOCM_RTS | TIOCM_DTR : 0,
raise ? 0 : TIOCM_RTS | TIOCM_DTR);
spin_unlock_irqrestore(&cinfo->card_lock, flags);
}
static int cyz_carrier_raised(struct tty_port *port)
{
struct cyclades_port *info = container_of(port, struct cyclades_port,
port);
return readl(&info->u.cyz.ch_ctrl->rs_status) & C_RS_DCD;
}
static void cyz_dtr_rts(struct tty_port *port, int raise)
{
struct cyclades_port *info = container_of(port, struct cyclades_port,
port);
struct cyclades_card *cinfo = info->card;
struct CH_CTRL __iomem *ch_ctrl = info->u.cyz.ch_ctrl;
int ret, channel = info->line - cinfo->first_line;
u32 rs;
rs = readl(&ch_ctrl->rs_control);
if (raise)
rs |= C_RS_RTS | C_RS_DTR;
else
rs &= ~(C_RS_RTS | C_RS_DTR);
cy_writel(&ch_ctrl->rs_control, rs);
ret = cyz_issue_cmd(cinfo, channel, C_CM_IOCTLM, 0L);
if (ret != 0)
printk(KERN_ERR "%s: retval on ttyC%d was %x\n",
__func__, info->line, ret);
#ifdef CY_DEBUG_DTR
printk(KERN_DEBUG "%s: raising Z DTR\n", __func__);
#endif
}
static const struct tty_port_operations cyy_port_ops = {
.carrier_raised = cyy_carrier_raised,
.dtr_rts = cyy_dtr_rts,
.shutdown = cy_do_close,
};
static const struct tty_port_operations cyz_port_ops = {
.carrier_raised = cyz_carrier_raised,
.dtr_rts = cyz_dtr_rts,
.shutdown = cy_do_close,
};
/*
* ---------------------------------------------------------------------
* cy_init() and friends
*
* cy_init() is called at boot-time to initialize the serial driver.
* ---------------------------------------------------------------------
*/
static int cy_init_card(struct cyclades_card *cinfo)
{
struct cyclades_port *info;
unsigned int channel, port;
spin_lock_init(&cinfo->card_lock);
cinfo->intr_enabled = 0;
cinfo->ports = kcalloc(cinfo->nports, sizeof(*cinfo->ports),
GFP_KERNEL);
if (cinfo->ports == NULL) {
printk(KERN_ERR "Cyclades: cannot allocate ports\n");
return -ENOMEM;
}
for (channel = 0, port = cinfo->first_line; channel < cinfo->nports;
channel++, port++) {
info = &cinfo->ports[channel];
tty_port_init(&info->port);
info->magic = CYCLADES_MAGIC;
info->card = cinfo;
info->line = port;
info->port.closing_wait = CLOSING_WAIT_DELAY;
info->port.close_delay = 5 * HZ / 10;
info->port.flags = STD_COM_FLAGS;
init_completion(&info->shutdown_wait);
if (cy_is_Z(cinfo)) {
struct FIRM_ID *firm_id = cinfo->base_addr + ID_ADDRESS;
struct ZFW_CTRL *zfw_ctrl;
info->port.ops = &cyz_port_ops;
info->type = PORT_STARTECH;
zfw_ctrl = cinfo->base_addr +
(readl(&firm_id->zfwctrl_addr) & 0xfffff);
info->u.cyz.ch_ctrl = &zfw_ctrl->ch_ctrl[channel];
info->u.cyz.buf_ctrl = &zfw_ctrl->buf_ctrl[channel];
if (cinfo->hw_ver == ZO_V1)
info->xmit_fifo_size = CYZ_FIFO_SIZE;
else
info->xmit_fifo_size = 4 * CYZ_FIFO_SIZE;
#ifdef CONFIG_CYZ_INTR
setup_timer(&cyz_rx_full_timer[port],
cyz_rx_restart, (unsigned long)info);
#endif
} else {
unsigned short chip_number;
int index = cinfo->bus_index;
info->port.ops = &cyy_port_ops;
info->type = PORT_CIRRUS;
info->xmit_fifo_size = CyMAX_CHAR_FIFO;
info->cor1 = CyPARITY_NONE | Cy_1_STOP | Cy_8_BITS;
info->cor2 = CyETC;
info->cor3 = 0x08; /* _very_ small rcv threshold */
chip_number = channel / CyPORTS_PER_CHIP;
info->u.cyy.base_addr = cinfo->base_addr +
(cy_chip_offset[chip_number] << index);
info->chip_rev = cyy_readb(info, CyGFRCR);
if (info->chip_rev >= CD1400_REV_J) {
/* It is a CD1400 rev. J or later */
info->tbpr = baud_bpr_60[13]; /* Tx BPR */
info->tco = baud_co_60[13]; /* Tx CO */
info->rbpr = baud_bpr_60[13]; /* Rx BPR */
info->rco = baud_co_60[13]; /* Rx CO */
info->rtsdtr_inv = 1;
} else {
info->tbpr = baud_bpr_25[13]; /* Tx BPR */
info->tco = baud_co_25[13]; /* Tx CO */
info->rbpr = baud_bpr_25[13]; /* Rx BPR */
info->rco = baud_co_25[13]; /* Rx CO */
info->rtsdtr_inv = 0;
}
info->read_status_mask = CyTIMEOUT | CySPECHAR |
CyBREAK | CyPARITY | CyFRAME | CyOVERRUN;
}
}
#ifndef CONFIG_CYZ_INTR
if (cy_is_Z(cinfo) && !timer_pending(&cyz_timerlist)) {
mod_timer(&cyz_timerlist, jiffies + 1);
#ifdef CY_PCI_DEBUG
printk(KERN_DEBUG "Cyclades-Z polling initialized\n");
#endif
}
#endif
return 0;
}
/* initialize chips on Cyclom-Y card -- return number of valid
chips (which is number of ports/4) */
static unsigned short cyy_init_card(void __iomem *true_base_addr,
int index)
{
unsigned int chip_number;
void __iomem *base_addr;
cy_writeb(true_base_addr + (Cy_HwReset << index), 0);
/* Cy_HwReset is 0x1400 */
cy_writeb(true_base_addr + (Cy_ClrIntr << index), 0);
/* Cy_ClrIntr is 0x1800 */
udelay(500L);
for (chip_number = 0; chip_number < CyMAX_CHIPS_PER_CARD;
chip_number++) {
base_addr =
true_base_addr + (cy_chip_offset[chip_number] << index);
mdelay(1);
if (readb(base_addr + (CyCCR << index)) != 0x00) {
/*************
printk(" chip #%d at %#6lx is never idle (CCR != 0)\n",
chip_number, (unsigned long)base_addr);
*************/
return chip_number;
}
cy_writeb(base_addr + (CyGFRCR << index), 0);
udelay(10L);
/* The Cyclom-16Y does not decode address bit 9 and therefore
cannot distinguish between references to chip 0 and a non-
existent chip 4. If the preceding clearing of the supposed
chip 4 GFRCR register appears at chip 0, there is no chip 4
and this must be a Cyclom-16Y, not a Cyclom-32Ye.
*/
if (chip_number == 4 && readb(true_base_addr +
(cy_chip_offset[0] << index) +
(CyGFRCR << index)) == 0) {
return chip_number;
}
cy_writeb(base_addr + (CyCCR << index), CyCHIP_RESET);
mdelay(1);
if (readb(base_addr + (CyGFRCR << index)) == 0x00) {
/*
printk(" chip #%d at %#6lx is not responding ",
chip_number, (unsigned long)base_addr);
printk("(GFRCR stayed 0)\n",
*/
return chip_number;
}
if ((0xf0 & (readb(base_addr + (CyGFRCR << index)))) !=
0x40) {
/*
printk(" chip #%d at %#6lx is not valid (GFRCR == "
"%#2x)\n",
chip_number, (unsigned long)base_addr,
base_addr[CyGFRCR<<index]);
*/
return chip_number;
}
cy_writeb(base_addr + (CyGCR << index), CyCH0_SERIAL);
if (readb(base_addr + (CyGFRCR << index)) >= CD1400_REV_J) {
/* It is a CD1400 rev. J or later */
/* Impossible to reach 5ms with this chip.
Changed to 2ms instead (f = 500 Hz). */
cy_writeb(base_addr + (CyPPR << index), CyCLOCK_60_2MS);
} else {
/* f = 200 Hz */
cy_writeb(base_addr + (CyPPR << index), CyCLOCK_25_5MS);
}
/*
printk(" chip #%d at %#6lx is rev 0x%2x\n",
chip_number, (unsigned long)base_addr,
readb(base_addr+(CyGFRCR<<index)));
*/
}
return chip_number;
} /* cyy_init_card */
/*
* ---------------------------------------------------------------------
* cy_detect_isa() - Probe for Cyclom-Y/ISA boards.
* sets global variables and return the number of ISA boards found.
* ---------------------------------------------------------------------
*/
static int __init cy_detect_isa(void)
{
#ifdef CONFIG_ISA
struct cyclades_card *card;
unsigned short cy_isa_irq, nboard;
void __iomem *cy_isa_address;
unsigned short i, j, k, cy_isa_nchan;
int isparam = 0;
nboard = 0;
/* Check for module parameters */
for (i = 0; i < NR_CARDS; i++) {
if (maddr[i] || i) {
isparam = 1;
cy_isa_addresses[i] = maddr[i];
}
if (!maddr[i])
break;
}
/* scan the address table probing for Cyclom-Y/ISA boards */
for (i = 0; i < NR_ISA_ADDRS; i++) {
unsigned int isa_address = cy_isa_addresses[i];
if (isa_address == 0x0000)
return nboard;
/* probe for CD1400... */
cy_isa_address = ioremap_nocache(isa_address, CyISA_Ywin);
if (cy_isa_address == NULL) {
printk(KERN_ERR "Cyclom-Y/ISA: can't remap base "
"address\n");
continue;
}
cy_isa_nchan = CyPORTS_PER_CHIP *
cyy_init_card(cy_isa_address, 0);
if (cy_isa_nchan == 0) {
iounmap(cy_isa_address);
continue;
}
if (isparam && i < NR_CARDS && irq[i])
cy_isa_irq = irq[i];
else
/* find out the board's irq by probing */
cy_isa_irq = detect_isa_irq(cy_isa_address);
if (cy_isa_irq == 0) {
printk(KERN_ERR "Cyclom-Y/ISA found at 0x%lx, but the "
"IRQ could not be detected.\n",
(unsigned long)cy_isa_address);
iounmap(cy_isa_address);
continue;
}
if ((cy_next_channel + cy_isa_nchan) > NR_PORTS) {
printk(KERN_ERR "Cyclom-Y/ISA found at 0x%lx, but no "
"more channels are available. Change NR_PORTS "
"in cyclades.c and recompile kernel.\n",
(unsigned long)cy_isa_address);
iounmap(cy_isa_address);
return nboard;
}
/* fill the next cy_card structure available */
for (j = 0; j < NR_CARDS; j++) {
card = &cy_card[j];
if (card->base_addr == NULL)
break;
}
if (j == NR_CARDS) { /* no more cy_cards available */
printk(KERN_ERR "Cyclom-Y/ISA found at 0x%lx, but no "
"more cards can be used. Change NR_CARDS in "
"cyclades.c and recompile kernel.\n",
(unsigned long)cy_isa_address);
iounmap(cy_isa_address);
return nboard;
}
/* allocate IRQ */
if (request_irq(cy_isa_irq, cyy_interrupt,
0, "Cyclom-Y", card)) {
printk(KERN_ERR "Cyclom-Y/ISA found at 0x%lx, but "
"could not allocate IRQ#%d.\n",
(unsigned long)cy_isa_address, cy_isa_irq);
iounmap(cy_isa_address);
return nboard;
}
/* set cy_card */
card->base_addr = cy_isa_address;
card->ctl_addr.p9050 = NULL;
card->irq = (int)cy_isa_irq;
card->bus_index = 0;
card->first_line = cy_next_channel;
card->num_chips = cy_isa_nchan / CyPORTS_PER_CHIP;
card->nports = cy_isa_nchan;
if (cy_init_card(card)) {
card->base_addr = NULL;
free_irq(cy_isa_irq, card);
iounmap(cy_isa_address);
continue;
}
nboard++;
printk(KERN_INFO "Cyclom-Y/ISA #%d: 0x%lx-0x%lx, IRQ%d found: "
"%d channels starting from port %d\n",
j + 1, (unsigned long)cy_isa_address,
(unsigned long)(cy_isa_address + (CyISA_Ywin - 1)),
cy_isa_irq, cy_isa_nchan, cy_next_channel);
for (k = 0, j = cy_next_channel;
j < cy_next_channel + cy_isa_nchan; j++, k++)
tty_port_register_device(&card->ports[k].port,
cy_serial_driver, j, NULL);
cy_next_channel += cy_isa_nchan;
}
return nboard;
#else
return 0;
#endif /* CONFIG_ISA */
} /* cy_detect_isa */
#ifdef CONFIG_PCI
static inline int cyc_isfwstr(const char *str, unsigned int size)
{
unsigned int a;
for (a = 0; a < size && *str; a++, str++)
if (*str & 0x80)
return -EINVAL;
for (; a < size; a++, str++)
if (*str)
return -EINVAL;
return 0;
}
static inline void cyz_fpga_copy(void __iomem *fpga, const u8 *data,
unsigned int size)
{
for (; size > 0; size--) {
cy_writel(fpga, *data++);
udelay(10);
}
}
static void plx_init(struct pci_dev *pdev, int irq,
struct RUNTIME_9060 __iomem *addr)
{
/* Reset PLX */
cy_writel(&addr->init_ctrl, readl(&addr->init_ctrl) | 0x40000000);
udelay(100L);
cy_writel(&addr->init_ctrl, readl(&addr->init_ctrl) & ~0x40000000);
/* Reload Config. Registers from EEPROM */
cy_writel(&addr->init_ctrl, readl(&addr->init_ctrl) | 0x20000000);
udelay(100L);
cy_writel(&addr->init_ctrl, readl(&addr->init_ctrl) & ~0x20000000);
/* For some yet unknown reason, once the PLX9060 reloads the EEPROM,
* the IRQ is lost and, thus, we have to re-write it to the PCI config.
* registers. This will remain here until we find a permanent fix.
*/
pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, irq);
}
static int __cyz_load_fw(const struct firmware *fw,
const char *name, const u32 mailbox, void __iomem *base,
void __iomem *fpga)
{
const void *ptr = fw->data;
const struct zfile_header *h = ptr;
const struct zfile_config *c, *cs;
const struct zfile_block *b, *bs;
unsigned int a, tmp, len = fw->size;
#define BAD_FW KERN_ERR "Bad firmware: "
if (len < sizeof(*h)) {
printk(BAD_FW "too short: %u<%zu\n", len, sizeof(*h));
return -EINVAL;
}
cs = ptr + h->config_offset;
bs = ptr + h->block_offset;
if ((void *)(cs + h->n_config) > ptr + len ||
(void *)(bs + h->n_blocks) > ptr + len) {
printk(BAD_FW "too short");
return -EINVAL;
}
if (cyc_isfwstr(h->name, sizeof(h->name)) ||
cyc_isfwstr(h->date, sizeof(h->date))) {
printk(BAD_FW "bad formatted header string\n");
return -EINVAL;
}
if (strncmp(name, h->name, sizeof(h->name))) {
printk(BAD_FW "bad name '%s' (expected '%s')\n", h->name, name);
return -EINVAL;
}
tmp = 0;
for (c = cs; c < cs + h->n_config; c++) {
for (a = 0; a < c->n_blocks; a++)
if (c->block_list[a] > h->n_blocks) {
printk(BAD_FW "bad block ref number in cfgs\n");
return -EINVAL;
}
if (c->mailbox == mailbox && c->function == 0) /* 0 is normal */
tmp++;
}
if (!tmp) {
printk(BAD_FW "nothing appropriate\n");
return -EINVAL;
}
for (b = bs; b < bs + h->n_blocks; b++)
if (b->file_offset + b->size > len) {
printk(BAD_FW "bad block data offset\n");
return -EINVAL;
}
/* everything is OK, let's seek'n'load it */
for (c = cs; c < cs + h->n_config; c++)
if (c->mailbox == mailbox && c->function == 0)
break;
for (a = 0; a < c->n_blocks; a++) {
b = &bs[c->block_list[a]];
if (b->type == ZBLOCK_FPGA) {
if (fpga != NULL)
cyz_fpga_copy(fpga, ptr + b->file_offset,
b->size);
} else {
if (base != NULL)
memcpy_toio(base + b->ram_offset,
ptr + b->file_offset, b->size);
}
}
#undef BAD_FW
return 0;
}
static int cyz_load_fw(struct pci_dev *pdev, void __iomem *base_addr,
struct RUNTIME_9060 __iomem *ctl_addr, int irq)
{
const struct firmware *fw;
struct FIRM_ID __iomem *fid = base_addr + ID_ADDRESS;
struct CUSTOM_REG __iomem *cust = base_addr;
struct ZFW_CTRL __iomem *pt_zfwctrl;
void __iomem *tmp;
u32 mailbox, status, nchan;
unsigned int i;
int retval;
retval = request_firmware(&fw, "cyzfirm.bin", &pdev->dev);
if (retval) {
dev_err(&pdev->dev, "can't get firmware\n");
goto err;
}
/* Check whether the firmware is already loaded and running. If
positive, skip this board */
if (__cyz_fpga_loaded(ctl_addr) && readl(&fid->signature) == ZFIRM_ID) {
u32 cntval = readl(base_addr + 0x190);
udelay(100);
if (cntval != readl(base_addr + 0x190)) {
/* FW counter is working, FW is running */
dev_dbg(&pdev->dev, "Cyclades-Z FW already loaded. "
"Skipping board.\n");
retval = 0;
goto err_rel;
}
}
/* start boot */
cy_writel(&ctl_addr->intr_ctrl_stat, readl(&ctl_addr->intr_ctrl_stat) &
~0x00030800UL);
mailbox = readl(&ctl_addr->mail_box_0);
if (mailbox == 0 || __cyz_fpga_loaded(ctl_addr)) {
/* stops CPU and set window to beginning of RAM */
cy_writel(&ctl_addr->loc_addr_base, WIN_CREG);
cy_writel(&cust->cpu_stop, 0);
cy_writel(&ctl_addr->loc_addr_base, WIN_RAM);
udelay(100);
}
plx_init(pdev, irq, ctl_addr);
if (mailbox != 0) {
/* load FPGA */
retval = __cyz_load_fw(fw, "Cyclom-Z", mailbox, NULL,
base_addr);
if (retval)
goto err_rel;
if (!__cyz_fpga_loaded(ctl_addr)) {
dev_err(&pdev->dev, "fw upload successful, but fw is "
"not loaded\n");
goto err_rel;
}
}
/* stops CPU and set window to beginning of RAM */
cy_writel(&ctl_addr->loc_addr_base, WIN_CREG);
cy_writel(&cust->cpu_stop, 0);
cy_writel(&ctl_addr->loc_addr_base, WIN_RAM);
udelay(100);
/* clear memory */
for (tmp = base_addr; tmp < base_addr + RAM_SIZE; tmp++)
cy_writeb(tmp, 255);
if (mailbox != 0) {
/* set window to last 512K of RAM */
cy_writel(&ctl_addr->loc_addr_base, WIN_RAM + RAM_SIZE);
for (tmp = base_addr; tmp < base_addr + RAM_SIZE; tmp++)
cy_writeb(tmp, 255);
/* set window to beginning of RAM */
cy_writel(&ctl_addr->loc_addr_base, WIN_RAM);
}
retval = __cyz_load_fw(fw, "Cyclom-Z", mailbox, base_addr, NULL);
release_firmware(fw);
if (retval)
goto err;
/* finish boot and start boards */
cy_writel(&ctl_addr->loc_addr_base, WIN_CREG);
cy_writel(&cust->cpu_start, 0);
cy_writel(&ctl_addr->loc_addr_base, WIN_RAM);
i = 0;
while ((status = readl(&fid->signature)) != ZFIRM_ID && i++ < 40)
msleep(100);
if (status != ZFIRM_ID) {
if (status == ZFIRM_HLT) {
dev_err(&pdev->dev, "you need an external power supply "
"for this number of ports. Firmware halted and "
"board reset.\n");
retval = -EIO;
goto err;
}
dev_warn(&pdev->dev, "fid->signature = 0x%x... Waiting "
"some more time\n", status);
while ((status = readl(&fid->signature)) != ZFIRM_ID &&
i++ < 200)
msleep(100);
if (status != ZFIRM_ID) {
dev_err(&pdev->dev, "Board not started in 20 seconds! "
"Giving up. (fid->signature = 0x%x)\n",
status);
dev_info(&pdev->dev, "*** Warning ***: if you are "
"upgrading the FW, please power cycle the "
"system before loading the new FW to the "
"Cyclades-Z.\n");
if (__cyz_fpga_loaded(ctl_addr))
plx_init(pdev, irq, ctl_addr);
retval = -EIO;
goto err;
}
dev_dbg(&pdev->dev, "Firmware started after %d seconds.\n",
i / 10);
}
pt_zfwctrl = base_addr + readl(&fid->zfwctrl_addr);
dev_dbg(&pdev->dev, "fid=> %p, zfwctrl_addr=> %x, npt_zfwctrl=> %p\n",
base_addr + ID_ADDRESS, readl(&fid->zfwctrl_addr),
base_addr + readl(&fid->zfwctrl_addr));
nchan = readl(&pt_zfwctrl->board_ctrl.n_channel);
dev_info(&pdev->dev, "Cyclades-Z FW loaded: version = %x, ports = %u\n",
readl(&pt_zfwctrl->board_ctrl.fw_version), nchan);
if (nchan == 0) {
dev_warn(&pdev->dev, "no Cyclades-Z ports were found. Please "
"check the connection between the Z host card and the "
"serial expanders.\n");
if (__cyz_fpga_loaded(ctl_addr))
plx_init(pdev, irq, ctl_addr);
dev_info(&pdev->dev, "Null number of ports detected. Board "
"reset.\n");
retval = 0;
goto err;
}
cy_writel(&pt_zfwctrl->board_ctrl.op_system, C_OS_LINUX);
cy_writel(&pt_zfwctrl->board_ctrl.dr_version, DRIVER_VERSION);
/*
Early firmware failed to start looking for commands.
This enables firmware interrupts for those commands.
*/
cy_writel(&ctl_addr->intr_ctrl_stat, readl(&ctl_addr->intr_ctrl_stat) |
(1 << 17));
cy_writel(&ctl_addr->intr_ctrl_stat, readl(&ctl_addr->intr_ctrl_stat) |
0x00030800UL);
return nchan;
err_rel:
release_firmware(fw);
err:
return retval;
}
static int cy_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct cyclades_card *card;
void __iomem *addr0 = NULL, *addr2 = NULL;
char *card_name = NULL;
u32 uninitialized_var(mailbox);
unsigned int device_id, nchan = 0, card_no, i, j;
unsigned char plx_ver;
int retval, irq;
retval = pci_enable_device(pdev);
if (retval) {
dev_err(&pdev->dev, "cannot enable device\n");
goto err;
}
/* read PCI configuration area */
irq = pdev->irq;
device_id = pdev->device & ~PCI_DEVICE_ID_MASK;
#if defined(__alpha__)
if (device_id == PCI_DEVICE_ID_CYCLOM_Y_Lo) { /* below 1M? */
dev_err(&pdev->dev, "Cyclom-Y/PCI not supported for low "
"addresses on Alpha systems.\n");
retval = -EIO;
goto err_dis;
}
#endif
if (device_id == PCI_DEVICE_ID_CYCLOM_Z_Lo) {
dev_err(&pdev->dev, "Cyclades-Z/PCI not supported for low "
"addresses\n");
retval = -EIO;
goto err_dis;
}
if (pci_resource_flags(pdev, 2) & IORESOURCE_IO) {
dev_warn(&pdev->dev, "PCI I/O bit incorrectly set. Ignoring "
"it...\n");
pdev->resource[2].flags &= ~IORESOURCE_IO;
}
retval = pci_request_regions(pdev, "cyclades");
if (retval) {
dev_err(&pdev->dev, "failed to reserve resources\n");
goto err_dis;
}
retval = -EIO;
if (device_id == PCI_DEVICE_ID_CYCLOM_Y_Lo ||
device_id == PCI_DEVICE_ID_CYCLOM_Y_Hi) {
card_name = "Cyclom-Y";
addr0 = ioremap_nocache(pci_resource_start(pdev, 0),
CyPCI_Yctl);
if (addr0 == NULL) {
dev_err(&pdev->dev, "can't remap ctl region\n");
goto err_reg;
}
addr2 = ioremap_nocache(pci_resource_start(pdev, 2),
CyPCI_Ywin);
if (addr2 == NULL) {
dev_err(&pdev->dev, "can't remap base region\n");
goto err_unmap;
}
nchan = CyPORTS_PER_CHIP * cyy_init_card(addr2, 1);
if (nchan == 0) {
dev_err(&pdev->dev, "Cyclom-Y PCI host card with no "
"Serial-Modules\n");
goto err_unmap;
}
} else if (device_id == PCI_DEVICE_ID_CYCLOM_Z_Hi) {
struct RUNTIME_9060 __iomem *ctl_addr;
ctl_addr = addr0 = ioremap_nocache(pci_resource_start(pdev, 0),
CyPCI_Zctl);
if (addr0 == NULL) {
dev_err(&pdev->dev, "can't remap ctl region\n");
goto err_reg;
}
/* Disable interrupts on the PLX before resetting it */
cy_writew(&ctl_addr->intr_ctrl_stat,
readw(&ctl_addr->intr_ctrl_stat) & ~0x0900);
plx_init(pdev, irq, addr0);
mailbox = readl(&ctl_addr->mail_box_0);
addr2 = ioremap_nocache(pci_resource_start(pdev, 2),
mailbox == ZE_V1 ? CyPCI_Ze_win : CyPCI_Zwin);
if (addr2 == NULL) {
dev_err(&pdev->dev, "can't remap base region\n");
goto err_unmap;
}
if (mailbox == ZE_V1) {
card_name = "Cyclades-Ze";
} else {
card_name = "Cyclades-8Zo";
#ifdef CY_PCI_DEBUG
if (mailbox == ZO_V1) {
cy_writel(&ctl_addr->loc_addr_base, WIN_CREG);
dev_info(&pdev->dev, "Cyclades-8Zo/PCI: FPGA "
"id %lx, ver %lx\n", (ulong)(0xff &
readl(&((struct CUSTOM_REG *)addr2)->
fpga_id)), (ulong)(0xff &
readl(&((struct CUSTOM_REG *)addr2)->
fpga_version)));
cy_writel(&ctl_addr->loc_addr_base, WIN_RAM);
} else {
dev_info(&pdev->dev, "Cyclades-Z/PCI: New "
"Cyclades-Z board. FPGA not loaded\n");
}
#endif
/* The following clears the firmware id word. This
ensures that the driver will not attempt to talk to
the board until it has been properly initialized.
*/
if ((mailbox == ZO_V1) || (mailbox == ZO_V2))
cy_writel(addr2 + ID_ADDRESS, 0L);
}
retval = cyz_load_fw(pdev, addr2, addr0, irq);
if (retval <= 0)
goto err_unmap;
nchan = retval;
}
if ((cy_next_channel + nchan) > NR_PORTS) {
dev_err(&pdev->dev, "Cyclades-8Zo/PCI found, but no "
"channels are available. Change NR_PORTS in "
"cyclades.c and recompile kernel.\n");
goto err_unmap;
}
/* fill the next cy_card structure available */
for (card_no = 0; card_no < NR_CARDS; card_no++) {
card = &cy_card[card_no];
if (card->base_addr == NULL)
break;
}
if (card_no == NR_CARDS) { /* no more cy_cards available */
dev_err(&pdev->dev, "Cyclades-8Zo/PCI found, but no "
"more cards can be used. Change NR_CARDS in "
"cyclades.c and recompile kernel.\n");
goto err_unmap;
}
if (device_id == PCI_DEVICE_ID_CYCLOM_Y_Lo ||
device_id == PCI_DEVICE_ID_CYCLOM_Y_Hi) {
/* allocate IRQ */
retval = request_irq(irq, cyy_interrupt,
IRQF_SHARED, "Cyclom-Y", card);
if (retval) {
dev_err(&pdev->dev, "could not allocate IRQ\n");
goto err_unmap;
}
card->num_chips = nchan / CyPORTS_PER_CHIP;
} else {
struct FIRM_ID __iomem *firm_id = addr2 + ID_ADDRESS;
struct ZFW_CTRL __iomem *zfw_ctrl;
zfw_ctrl = addr2 + (readl(&firm_id->zfwctrl_addr) & 0xfffff);
card->hw_ver = mailbox;
card->num_chips = (unsigned int)-1;
card->board_ctrl = &zfw_ctrl->board_ctrl;
#ifdef CONFIG_CYZ_INTR
/* allocate IRQ only if board has an IRQ */
if (irq != 0 && irq != 255) {
retval = request_irq(irq, cyz_interrupt,
IRQF_SHARED, "Cyclades-Z", card);
if (retval) {
dev_err(&pdev->dev, "could not allocate IRQ\n");
goto err_unmap;
}
}
#endif /* CONFIG_CYZ_INTR */
}
/* set cy_card */
card->base_addr = addr2;
card->ctl_addr.p9050 = addr0;
card->irq = irq;
card->bus_index = 1;
card->first_line = cy_next_channel;
card->nports = nchan;
retval = cy_init_card(card);
if (retval)
goto err_null;
pci_set_drvdata(pdev, card);
if (device_id == PCI_DEVICE_ID_CYCLOM_Y_Lo ||
device_id == PCI_DEVICE_ID_CYCLOM_Y_Hi) {
/* enable interrupts in the PCI interface */
plx_ver = readb(addr2 + CyPLX_VER) & 0x0f;
switch (plx_ver) {
case PLX_9050:
cy_writeb(addr0 + 0x4c, 0x43);
break;
case PLX_9060:
case PLX_9080:
default: /* Old boards, use PLX_9060 */
{
struct RUNTIME_9060 __iomem *ctl_addr = addr0;
plx_init(pdev, irq, ctl_addr);
cy_writew(&ctl_addr->intr_ctrl_stat,
readw(&ctl_addr->intr_ctrl_stat) | 0x0900);
break;
}
}
}
dev_info(&pdev->dev, "%s/PCI #%d found: %d channels starting from "
"port %d.\n", card_name, card_no + 1, nchan, cy_next_channel);
for (j = 0, i = cy_next_channel; i < cy_next_channel + nchan; i++, j++)
tty_port_register_device(&card->ports[j].port,
cy_serial_driver, i, &pdev->dev);
cy_next_channel += nchan;
return 0;
err_null:
card->base_addr = NULL;
free_irq(irq, card);
err_unmap:
iounmap(addr0);
if (addr2)
iounmap(addr2);
err_reg:
pci_release_regions(pdev);
err_dis:
pci_disable_device(pdev);
err:
return retval;
}
static void cy_pci_remove(struct pci_dev *pdev)
{
struct cyclades_card *cinfo = pci_get_drvdata(pdev);
unsigned int i, channel;
/* non-Z with old PLX */
if (!cy_is_Z(cinfo) && (readb(cinfo->base_addr + CyPLX_VER) & 0x0f) ==
PLX_9050)
cy_writeb(cinfo->ctl_addr.p9050 + 0x4c, 0);
else
#ifndef CONFIG_CYZ_INTR
if (!cy_is_Z(cinfo))
#endif
cy_writew(&cinfo->ctl_addr.p9060->intr_ctrl_stat,
readw(&cinfo->ctl_addr.p9060->intr_ctrl_stat) &
~0x0900);
iounmap(cinfo->base_addr);
if (cinfo->ctl_addr.p9050)
iounmap(cinfo->ctl_addr.p9050);
if (cinfo->irq
#ifndef CONFIG_CYZ_INTR
&& !cy_is_Z(cinfo)
#endif /* CONFIG_CYZ_INTR */
)
free_irq(cinfo->irq, cinfo);
pci_release_regions(pdev);
cinfo->base_addr = NULL;
for (channel = 0, i = cinfo->first_line; i < cinfo->first_line +
cinfo->nports; i++, channel++) {
tty_unregister_device(cy_serial_driver, i);
tty_port_destroy(&cinfo->ports[channel].port);
}
cinfo->nports = 0;
kfree(cinfo->ports);
}
static struct pci_driver cy_pci_driver = {
.name = "cyclades",
.id_table = cy_pci_dev_id,
.probe = cy_pci_probe,
.remove = cy_pci_remove
};
#endif
static int cyclades_proc_show(struct seq_file *m, void *v)
{
struct cyclades_port *info;
unsigned int i, j;
__u32 cur_jifs = jiffies;
seq_puts(m, "Dev TimeOpen BytesOut IdleOut BytesIn "
"IdleIn Overruns Ldisc\n");
/* Output one line for each known port */
for (i = 0; i < NR_CARDS; i++)
for (j = 0; j < cy_card[i].nports; j++) {
info = &cy_card[i].ports[j];
if (info->port.count) {
/* XXX is the ldisc num worth this? */
struct tty_struct *tty;
struct tty_ldisc *ld;
int num = 0;
tty = tty_port_tty_get(&info->port);
if (tty) {
ld = tty_ldisc_ref(tty);
if (ld) {
num = ld->ops->num;
tty_ldisc_deref(ld);
}
tty_kref_put(tty);
}
seq_printf(m, "%3d %8lu %10lu %8lu "
"%10lu %8lu %9lu %6d\n", info->line,
(cur_jifs - info->idle_stats.in_use) /
HZ, info->idle_stats.xmit_bytes,
(cur_jifs - info->idle_stats.xmit_idle)/
HZ, info->idle_stats.recv_bytes,
(cur_jifs - info->idle_stats.recv_idle)/
HZ, info->idle_stats.overruns,
num);
} else
seq_printf(m, "%3d %8lu %10lu %8lu "
"%10lu %8lu %9lu %6ld\n",
info->line, 0L, 0L, 0L, 0L, 0L, 0L, 0L);
}
return 0;
}
static int cyclades_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, cyclades_proc_show, NULL);
}
static const struct file_operations cyclades_proc_fops = {
.owner = THIS_MODULE,
.open = cyclades_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/* The serial driver boot-time initialization code!
Hardware I/O ports are mapped to character special devices on a
first found, first allocated manner. That is, this code searches
for Cyclom cards in the system. As each is found, it is probed
to discover how many chips (and thus how many ports) are present.
These ports are mapped to the tty ports 32 and upward in monotonic
fashion. If an 8-port card is replaced with a 16-port card, the
port mapping on a following card will shift.
This approach is different from what is used in the other serial
device driver because the Cyclom is more properly a multiplexer,
not just an aggregation of serial ports on one card.
If there are more cards with more ports than have been
statically allocated above, a warning is printed and the
extra ports are ignored.
*/
static const struct tty_operations cy_ops = {
.open = cy_open,
.close = cy_close,
.write = cy_write,
.put_char = cy_put_char,
.flush_chars = cy_flush_chars,
.write_room = cy_write_room,
.chars_in_buffer = cy_chars_in_buffer,
.flush_buffer = cy_flush_buffer,
.ioctl = cy_ioctl,
.throttle = cy_throttle,
.unthrottle = cy_unthrottle,
.set_termios = cy_set_termios,
.stop = cy_stop,
.start = cy_start,
.hangup = cy_hangup,
.break_ctl = cy_break,
.wait_until_sent = cy_wait_until_sent,
.tiocmget = cy_tiocmget,
.tiocmset = cy_tiocmset,
.get_icount = cy_get_icount,
.proc_fops = &cyclades_proc_fops,
};
static int __init cy_init(void)
{
unsigned int nboards;
int retval = -ENOMEM;
cy_serial_driver = alloc_tty_driver(NR_PORTS);
if (!cy_serial_driver)
goto err;
printk(KERN_INFO "Cyclades driver " CY_VERSION "\n");
/* Initialize the tty_driver structure */
cy_serial_driver->driver_name = "cyclades";
cy_serial_driver->name = "ttyC";
cy_serial_driver->major = CYCLADES_MAJOR;
cy_serial_driver->minor_start = 0;
cy_serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
cy_serial_driver->subtype = SERIAL_TYPE_NORMAL;
cy_serial_driver->init_termios = tty_std_termios;
cy_serial_driver->init_termios.c_cflag =
B9600 | CS8 | CREAD | HUPCL | CLOCAL;
cy_serial_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
tty_set_operations(cy_serial_driver, &cy_ops);
retval = tty_register_driver(cy_serial_driver);
if (retval) {
printk(KERN_ERR "Couldn't register Cyclades serial driver\n");
goto err_frtty;
}
/* the code below is responsible to find the boards. Each different
type of board has its own detection routine. If a board is found,
the next cy_card structure available is set by the detection
routine. These functions are responsible for checking the
availability of cy_card and cy_port data structures and updating
the cy_next_channel. */
/* look for isa boards */
nboards = cy_detect_isa();
#ifdef CONFIG_PCI
/* look for pci boards */
retval = pci_register_driver(&cy_pci_driver);
if (retval && !nboards) {
tty_unregister_driver(cy_serial_driver);
goto err_frtty;
}
#endif
return 0;
err_frtty:
put_tty_driver(cy_serial_driver);
err:
return retval;
} /* cy_init */
static void __exit cy_cleanup_module(void)
{
struct cyclades_card *card;
unsigned int i, e1;
#ifndef CONFIG_CYZ_INTR
del_timer_sync(&cyz_timerlist);
#endif /* CONFIG_CYZ_INTR */
e1 = tty_unregister_driver(cy_serial_driver);
if (e1)
printk(KERN_ERR "failed to unregister Cyclades serial "
"driver(%d)\n", e1);
#ifdef CONFIG_PCI
pci_unregister_driver(&cy_pci_driver);
#endif
for (i = 0; i < NR_CARDS; i++) {
card = &cy_card[i];
if (card->base_addr) {
/* clear interrupt */
cy_writeb(card->base_addr + Cy_ClrIntr, 0);
iounmap(card->base_addr);
if (card->ctl_addr.p9050)
iounmap(card->ctl_addr.p9050);
if (card->irq
#ifndef CONFIG_CYZ_INTR
&& !cy_is_Z(card)
#endif /* CONFIG_CYZ_INTR */
)
free_irq(card->irq, card);
for (e1 = card->first_line; e1 < card->first_line +
card->nports; e1++)
tty_unregister_device(cy_serial_driver, e1);
kfree(card->ports);
}
}
put_tty_driver(cy_serial_driver);
} /* cy_cleanup_module */
module_init(cy_init);
module_exit(cy_cleanup_module);
MODULE_LICENSE("GPL");
MODULE_VERSION(CY_VERSION);
MODULE_ALIAS_CHARDEV_MAJOR(CYCLADES_MAJOR);
MODULE_FIRMWARE("cyzfirm.bin");
| gpl-2.0 |
BlissRoms-Kernels/android_kernel_huawei_kiwi | sound/core/device.c | 2566 | 6751 | /*
* Device management routines
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/export.h>
#include <linux/errno.h>
#include <sound/core.h>
/**
* snd_device_new - create an ALSA device component
* @card: the card instance
* @type: the device type, SNDRV_DEV_XXX
* @device_data: the data pointer of this device
* @ops: the operator table
*
* Creates a new device component for the given data pointer.
* The device will be assigned to the card and managed together
* by the card.
*
* The data pointer plays a role as the identifier, too, so the
* pointer address must be unique and unchanged.
*
* Return: Zero if successful, or a negative error code on failure.
*/
int snd_device_new(struct snd_card *card, snd_device_type_t type,
void *device_data, struct snd_device_ops *ops)
{
struct snd_device *dev;
if (snd_BUG_ON(!card || !device_data || !ops))
return -ENXIO;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (dev == NULL) {
snd_printk(KERN_ERR "Cannot allocate device\n");
return -ENOMEM;
}
dev->card = card;
dev->type = type;
dev->state = SNDRV_DEV_BUILD;
dev->device_data = device_data;
dev->ops = ops;
list_add(&dev->list, &card->devices); /* add to the head of list */
return 0;
}
EXPORT_SYMBOL(snd_device_new);
/**
* snd_device_free - release the device from the card
* @card: the card instance
* @device_data: the data pointer to release
*
* Removes the device from the list on the card and invokes the
* callbacks, dev_disconnect and dev_free, corresponding to the state.
* Then release the device.
*
* Return: Zero if successful, or a negative error code on failure or if the
* device not found.
*/
int snd_device_free(struct snd_card *card, void *device_data)
{
struct snd_device *dev;
if (snd_BUG_ON(!card || !device_data))
return -ENXIO;
list_for_each_entry(dev, &card->devices, list) {
if (dev->device_data != device_data)
continue;
/* unlink */
list_del(&dev->list);
if (dev->state == SNDRV_DEV_REGISTERED &&
dev->ops->dev_disconnect)
if (dev->ops->dev_disconnect(dev))
snd_printk(KERN_ERR
"device disconnect failure\n");
if (dev->ops->dev_free) {
if (dev->ops->dev_free(dev))
snd_printk(KERN_ERR "device free failure\n");
}
kfree(dev);
return 0;
}
snd_printd("device free %p (from %pF), not found\n", device_data,
__builtin_return_address(0));
return -ENXIO;
}
EXPORT_SYMBOL(snd_device_free);
/**
* snd_device_disconnect - disconnect the device
* @card: the card instance
* @device_data: the data pointer to disconnect
*
* Turns the device into the disconnection state, invoking
* dev_disconnect callback, if the device was already registered.
*
* Usually called from snd_card_disconnect().
*
* Return: Zero if successful, or a negative error code on failure or if the
* device not found.
*/
int snd_device_disconnect(struct snd_card *card, void *device_data)
{
struct snd_device *dev;
if (snd_BUG_ON(!card || !device_data))
return -ENXIO;
list_for_each_entry(dev, &card->devices, list) {
if (dev->device_data != device_data)
continue;
if (dev->state == SNDRV_DEV_REGISTERED &&
dev->ops->dev_disconnect) {
if (dev->ops->dev_disconnect(dev))
snd_printk(KERN_ERR "device disconnect failure\n");
dev->state = SNDRV_DEV_DISCONNECTED;
}
return 0;
}
snd_printd("device disconnect %p (from %pF), not found\n", device_data,
__builtin_return_address(0));
return -ENXIO;
}
/**
* snd_device_register - register the device
* @card: the card instance
* @device_data: the data pointer to register
*
* Registers the device which was already created via
* snd_device_new(). Usually this is called from snd_card_register(),
* but it can be called later if any new devices are created after
* invocation of snd_card_register().
*
* Return: Zero if successful, or a negative error code on failure or if the
* device not found.
*/
int snd_device_register(struct snd_card *card, void *device_data)
{
struct snd_device *dev;
int err;
if (snd_BUG_ON(!card || !device_data))
return -ENXIO;
list_for_each_entry(dev, &card->devices, list) {
if (dev->device_data != device_data)
continue;
if (dev->state == SNDRV_DEV_BUILD && dev->ops->dev_register) {
if ((err = dev->ops->dev_register(dev)) < 0)
return err;
dev->state = SNDRV_DEV_REGISTERED;
return 0;
}
snd_printd("snd_device_register busy\n");
return -EBUSY;
}
snd_BUG();
return -ENXIO;
}
EXPORT_SYMBOL(snd_device_register);
/*
* register all the devices on the card.
* called from init.c
*/
int snd_device_register_all(struct snd_card *card)
{
struct snd_device *dev;
int err;
if (snd_BUG_ON(!card))
return -ENXIO;
list_for_each_entry(dev, &card->devices, list) {
if (dev->state == SNDRV_DEV_BUILD && dev->ops->dev_register) {
if ((err = dev->ops->dev_register(dev)) < 0)
return err;
dev->state = SNDRV_DEV_REGISTERED;
}
}
return 0;
}
/*
* disconnect all the devices on the card.
* called from init.c
*/
int snd_device_disconnect_all(struct snd_card *card)
{
struct snd_device *dev;
int err = 0;
if (snd_BUG_ON(!card))
return -ENXIO;
list_for_each_entry(dev, &card->devices, list) {
if (snd_device_disconnect(card, dev->device_data) < 0)
err = -ENXIO;
}
return err;
}
/*
* release all the devices on the card.
* called from init.c
*/
int snd_device_free_all(struct snd_card *card, snd_device_cmd_t cmd)
{
struct snd_device *dev;
int err;
unsigned int range_low, range_high, type;
if (snd_BUG_ON(!card))
return -ENXIO;
range_low = (__force unsigned int)cmd * SNDRV_DEV_TYPE_RANGE_SIZE;
range_high = range_low + SNDRV_DEV_TYPE_RANGE_SIZE - 1;
__again:
list_for_each_entry(dev, &card->devices, list) {
type = (__force unsigned int)dev->type;
if (type >= range_low && type <= range_high) {
if ((err = snd_device_free(card, dev->device_data)) < 0)
return err;
goto __again;
}
}
return 0;
}
| gpl-2.0 |
Split-Screen/android_kernel_huawei_angler | drivers/uwb/lc-rc.c | 2822 | 11167 | /*
* Ultra Wide Band
* Life cycle of radio controllers
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*
*
* FIXME: docs
*
* A UWB radio controller is also a UWB device, so it embeds one...
*
* List of RCs comes from the 'struct class uwb_rc_class'.
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/random.h>
#include <linux/kdev_t.h>
#include <linux/etherdevice.h>
#include <linux/usb.h>
#include <linux/slab.h>
#include <linux/export.h>
#include "uwb-internal.h"
static int uwb_rc_index_match(struct device *dev, const void *data)
{
const int *index = data;
struct uwb_rc *rc = dev_get_drvdata(dev);
if (rc->index == *index)
return 1;
return 0;
}
static struct uwb_rc *uwb_rc_find_by_index(int index)
{
struct device *dev;
struct uwb_rc *rc = NULL;
dev = class_find_device(&uwb_rc_class, NULL, &index, uwb_rc_index_match);
if (dev)
rc = dev_get_drvdata(dev);
return rc;
}
static int uwb_rc_new_index(void)
{
int index = 0;
for (;;) {
if (!uwb_rc_find_by_index(index))
return index;
if (++index < 0)
index = 0;
}
}
/**
* Release the backing device of a uwb_rc that has been dynamically allocated.
*/
static void uwb_rc_sys_release(struct device *dev)
{
struct uwb_dev *uwb_dev = container_of(dev, struct uwb_dev, dev);
struct uwb_rc *rc = container_of(uwb_dev, struct uwb_rc, uwb_dev);
uwb_rc_ie_release(rc);
kfree(rc);
}
void uwb_rc_init(struct uwb_rc *rc)
{
struct uwb_dev *uwb_dev = &rc->uwb_dev;
uwb_dev_init(uwb_dev);
rc->uwb_dev.dev.class = &uwb_rc_class;
rc->uwb_dev.dev.release = uwb_rc_sys_release;
uwb_rc_neh_create(rc);
rc->beaconing = -1;
rc->scan_type = UWB_SCAN_DISABLED;
INIT_LIST_HEAD(&rc->notifs_chain.list);
mutex_init(&rc->notifs_chain.mutex);
INIT_LIST_HEAD(&rc->uwb_beca.list);
mutex_init(&rc->uwb_beca.mutex);
uwb_drp_avail_init(rc);
uwb_rc_ie_init(rc);
uwb_rsv_init(rc);
uwb_rc_pal_init(rc);
}
EXPORT_SYMBOL_GPL(uwb_rc_init);
struct uwb_rc *uwb_rc_alloc(void)
{
struct uwb_rc *rc;
rc = kzalloc(sizeof(*rc), GFP_KERNEL);
if (rc == NULL)
return NULL;
uwb_rc_init(rc);
return rc;
}
EXPORT_SYMBOL_GPL(uwb_rc_alloc);
static struct attribute *rc_attrs[] = {
&dev_attr_mac_address.attr,
&dev_attr_scan.attr,
&dev_attr_beacon.attr,
NULL,
};
static struct attribute_group rc_attr_group = {
.attrs = rc_attrs,
};
/*
* Registration of sysfs specific stuff
*/
static int uwb_rc_sys_add(struct uwb_rc *rc)
{
return sysfs_create_group(&rc->uwb_dev.dev.kobj, &rc_attr_group);
}
static void __uwb_rc_sys_rm(struct uwb_rc *rc)
{
sysfs_remove_group(&rc->uwb_dev.dev.kobj, &rc_attr_group);
}
/**
* uwb_rc_mac_addr_setup - get an RC's EUI-48 address or set it
* @rc: the radio controller.
*
* If the EUI-48 address is 00:00:00:00:00:00 or FF:FF:FF:FF:FF:FF
* then a random locally administered EUI-48 is generated and set on
* the device. The probability of address collisions is sufficiently
* unlikely (1/2^40 = 9.1e-13) that they're not checked for.
*/
static
int uwb_rc_mac_addr_setup(struct uwb_rc *rc)
{
int result;
struct device *dev = &rc->uwb_dev.dev;
struct uwb_dev *uwb_dev = &rc->uwb_dev;
char devname[UWB_ADDR_STRSIZE];
struct uwb_mac_addr addr;
result = uwb_rc_mac_addr_get(rc, &addr);
if (result < 0) {
dev_err(dev, "cannot retrieve UWB EUI-48 address: %d\n", result);
return result;
}
if (uwb_mac_addr_unset(&addr) || uwb_mac_addr_bcast(&addr)) {
addr.data[0] = 0x02; /* locally administered and unicast */
get_random_bytes(&addr.data[1], sizeof(addr.data)-1);
result = uwb_rc_mac_addr_set(rc, &addr);
if (result < 0) {
uwb_mac_addr_print(devname, sizeof(devname), &addr);
dev_err(dev, "cannot set EUI-48 address %s: %d\n",
devname, result);
return result;
}
}
uwb_dev->mac_addr = addr;
return 0;
}
static int uwb_rc_setup(struct uwb_rc *rc)
{
int result;
struct device *dev = &rc->uwb_dev.dev;
result = uwb_radio_setup(rc);
if (result < 0) {
dev_err(dev, "cannot setup UWB radio: %d\n", result);
goto error;
}
result = uwb_rc_mac_addr_setup(rc);
if (result < 0) {
dev_err(dev, "cannot setup UWB MAC address: %d\n", result);
goto error;
}
result = uwb_rc_dev_addr_assign(rc);
if (result < 0) {
dev_err(dev, "cannot assign UWB DevAddr: %d\n", result);
goto error;
}
result = uwb_rc_ie_setup(rc);
if (result < 0) {
dev_err(dev, "cannot setup IE subsystem: %d\n", result);
goto error_ie_setup;
}
result = uwb_rsv_setup(rc);
if (result < 0) {
dev_err(dev, "cannot setup reservation subsystem: %d\n", result);
goto error_rsv_setup;
}
uwb_dbg_add_rc(rc);
return 0;
error_rsv_setup:
uwb_rc_ie_release(rc);
error_ie_setup:
error:
return result;
}
/**
* Register a new UWB radio controller
*
* Did you call uwb_rc_init() on your rc?
*
* We assume that this is being called with a > 0 refcount on
* it [through ops->{get|put}_device(). We'll take our own, though.
*
* @parent_dev is our real device, the one that provides the actual UWB device
*/
int uwb_rc_add(struct uwb_rc *rc, struct device *parent_dev, void *priv)
{
int result;
struct device *dev;
char macbuf[UWB_ADDR_STRSIZE], devbuf[UWB_ADDR_STRSIZE];
rc->index = uwb_rc_new_index();
dev = &rc->uwb_dev.dev;
dev_set_name(dev, "uwb%d", rc->index);
rc->priv = priv;
init_waitqueue_head(&rc->uwbd.wq);
INIT_LIST_HEAD(&rc->uwbd.event_list);
spin_lock_init(&rc->uwbd.event_list_lock);
uwbd_start(rc);
result = rc->start(rc);
if (result < 0)
goto error_rc_start;
result = uwb_rc_setup(rc);
if (result < 0) {
dev_err(dev, "cannot setup UWB radio controller: %d\n", result);
goto error_rc_setup;
}
result = uwb_dev_add(&rc->uwb_dev, parent_dev, rc);
if (result < 0 && result != -EADDRNOTAVAIL)
goto error_dev_add;
result = uwb_rc_sys_add(rc);
if (result < 0) {
dev_err(parent_dev, "cannot register UWB radio controller "
"dev attributes: %d\n", result);
goto error_sys_add;
}
uwb_mac_addr_print(macbuf, sizeof(macbuf), &rc->uwb_dev.mac_addr);
uwb_dev_addr_print(devbuf, sizeof(devbuf), &rc->uwb_dev.dev_addr);
dev_info(dev,
"new uwb radio controller (mac %s dev %s) on %s %s\n",
macbuf, devbuf, parent_dev->bus->name, dev_name(parent_dev));
rc->ready = 1;
return 0;
error_sys_add:
uwb_dev_rm(&rc->uwb_dev);
error_dev_add:
error_rc_setup:
rc->stop(rc);
error_rc_start:
uwbd_stop(rc);
return result;
}
EXPORT_SYMBOL_GPL(uwb_rc_add);
static int uwb_dev_offair_helper(struct device *dev, void *priv)
{
struct uwb_dev *uwb_dev = to_uwb_dev(dev);
return __uwb_dev_offair(uwb_dev, uwb_dev->rc);
}
/*
* Remove a Radio Controller; stop beaconing/scanning, disconnect all children
*/
void uwb_rc_rm(struct uwb_rc *rc)
{
rc->ready = 0;
uwb_dbg_del_rc(rc);
uwb_rsv_remove_all(rc);
uwb_radio_shutdown(rc);
rc->stop(rc);
uwbd_stop(rc);
uwb_rc_neh_destroy(rc);
uwb_dev_lock(&rc->uwb_dev);
rc->priv = NULL;
rc->cmd = NULL;
uwb_dev_unlock(&rc->uwb_dev);
mutex_lock(&rc->uwb_beca.mutex);
uwb_dev_for_each(rc, uwb_dev_offair_helper, NULL);
__uwb_rc_sys_rm(rc);
mutex_unlock(&rc->uwb_beca.mutex);
uwb_rsv_cleanup(rc);
uwb_beca_release(rc);
uwb_dev_rm(&rc->uwb_dev);
}
EXPORT_SYMBOL_GPL(uwb_rc_rm);
static int find_rc_try_get(struct device *dev, const void *data)
{
const struct uwb_rc *target_rc = data;
struct uwb_rc *rc = dev_get_drvdata(dev);
if (rc == NULL) {
WARN_ON(1);
return 0;
}
if (rc == target_rc) {
if (rc->ready == 0)
return 0;
else
return 1;
}
return 0;
}
/**
* Given a radio controller descriptor, validate and refcount it
*
* @returns NULL if the rc does not exist or is quiescing; the ptr to
* it otherwise.
*/
struct uwb_rc *__uwb_rc_try_get(struct uwb_rc *target_rc)
{
struct device *dev;
struct uwb_rc *rc = NULL;
dev = class_find_device(&uwb_rc_class, NULL, target_rc,
find_rc_try_get);
if (dev) {
rc = dev_get_drvdata(dev);
__uwb_rc_get(rc);
}
return rc;
}
EXPORT_SYMBOL_GPL(__uwb_rc_try_get);
/*
* RC get for external refcount acquirers...
*
* Increments the refcount of the device and it's backend modules
*/
static inline struct uwb_rc *uwb_rc_get(struct uwb_rc *rc)
{
if (rc->ready == 0)
return NULL;
uwb_dev_get(&rc->uwb_dev);
return rc;
}
static int find_rc_grandpa(struct device *dev, const void *data)
{
const struct device *grandpa_dev = data;
struct uwb_rc *rc = dev_get_drvdata(dev);
if (rc->uwb_dev.dev.parent->parent == grandpa_dev) {
rc = uwb_rc_get(rc);
return 1;
}
return 0;
}
/**
* Locate and refcount a radio controller given a common grand-parent
*
* @grandpa_dev Pointer to the 'grandparent' device structure.
* @returns NULL If the rc does not exist or is quiescing; the ptr to
* it otherwise, properly referenced.
*
* The Radio Control interface (or the UWB Radio Controller) is always
* an interface of a device. The parent is the interface, the
* grandparent is the device that encapsulates the interface.
*
* There is no need to lock around as the "grandpa" would be
* refcounted by the target, and to remove the referemes, the
* uwb_rc_class->sem would have to be taken--we hold it, ergo we
* should be safe.
*/
struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *grandpa_dev)
{
struct device *dev;
struct uwb_rc *rc = NULL;
dev = class_find_device(&uwb_rc_class, NULL, grandpa_dev,
find_rc_grandpa);
if (dev)
rc = dev_get_drvdata(dev);
return rc;
}
EXPORT_SYMBOL_GPL(uwb_rc_get_by_grandpa);
/**
* Find a radio controller by device address
*
* @returns the pointer to the radio controller, properly referenced
*/
static int find_rc_dev(struct device *dev, const void *data)
{
const struct uwb_dev_addr *addr = data;
struct uwb_rc *rc = dev_get_drvdata(dev);
if (rc == NULL) {
WARN_ON(1);
return 0;
}
if (!uwb_dev_addr_cmp(&rc->uwb_dev.dev_addr, addr)) {
rc = uwb_rc_get(rc);
return 1;
}
return 0;
}
struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *addr)
{
struct device *dev;
struct uwb_rc *rc = NULL;
dev = class_find_device(&uwb_rc_class, NULL, addr, find_rc_dev);
if (dev)
rc = dev_get_drvdata(dev);
return rc;
}
EXPORT_SYMBOL_GPL(uwb_rc_get_by_dev);
/**
* Drop a reference on a radio controller
*
* This is the version that should be done by entities external to the
* UWB Radio Control stack (ie: clients of the API).
*/
void uwb_rc_put(struct uwb_rc *rc)
{
__uwb_rc_put(rc);
}
EXPORT_SYMBOL_GPL(uwb_rc_put);
| gpl-2.0 |
RIP95/android_kernel_zte_msm8960 | sound/pci/ice1712/ice1724.c | 3590 | 78725 | /*
* ALSA driver for VT1724 ICEnsemble ICE1724 / VIA VT1724 (Envy24HT)
* VIA VT1720 (Envy24PT)
*
* Copyright (c) 2000 Jaroslav Kysela <perex@perex.cz>
* 2002 James Stafford <jstafford@ampltd.com>
* 2003 Takashi Iwai <tiwai@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <sound/core.h>
#include <sound/info.h>
#include <sound/rawmidi.h>
#include <sound/initval.h>
#include <sound/asoundef.h>
#include "ice1712.h"
#include "envy24ht.h"
/* lowlevel routines */
#include "amp.h"
#include "revo.h"
#include "aureon.h"
#include "vt1720_mobo.h"
#include "pontis.h"
#include "prodigy192.h"
#include "prodigy_hifi.h"
#include "juli.h"
#include "maya44.h"
#include "phase.h"
#include "wtm.h"
#include "se.h"
#include "quartet.h"
MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
MODULE_DESCRIPTION("VIA ICEnsemble ICE1724/1720 (Envy24HT/PT)");
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("{"
REVO_DEVICE_DESC
AMP_AUDIO2000_DEVICE_DESC
AUREON_DEVICE_DESC
VT1720_MOBO_DEVICE_DESC
PONTIS_DEVICE_DESC
PRODIGY192_DEVICE_DESC
PRODIGY_HIFI_DEVICE_DESC
JULI_DEVICE_DESC
MAYA44_DEVICE_DESC
PHASE_DEVICE_DESC
WTM_DEVICE_DESC
SE_DEVICE_DESC
QTET_DEVICE_DESC
"{VIA,VT1720},"
"{VIA,VT1724},"
"{ICEnsemble,Generic ICE1724},"
"{ICEnsemble,Generic Envy24HT}"
"{ICEnsemble,Generic Envy24PT}}");
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */
static char *model[SNDRV_CARDS];
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for ICE1724 soundcard.");
module_param_array(id, charp, NULL, 0444);
MODULE_PARM_DESC(id, "ID string for ICE1724 soundcard.");
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable ICE1724 soundcard.");
module_param_array(model, charp, NULL, 0444);
MODULE_PARM_DESC(model, "Use the given board model.");
/* Both VT1720 and VT1724 have the same PCI IDs */
static DEFINE_PCI_DEVICE_TABLE(snd_vt1724_ids) = {
{ PCI_VDEVICE(ICE, PCI_DEVICE_ID_VT1724), 0 },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, snd_vt1724_ids);
static int PRO_RATE_LOCKED;
static int PRO_RATE_RESET = 1;
static unsigned int PRO_RATE_DEFAULT = 44100;
static char *ext_clock_names[1] = { "IEC958 In" };
/*
* Basic I/O
*/
/*
* default rates, default clock routines
*/
/* check whether the clock mode is spdif-in */
static inline int stdclock_is_spdif_master(struct snd_ice1712 *ice)
{
return (inb(ICEMT1724(ice, RATE)) & VT1724_SPDIF_MASTER) ? 1 : 0;
}
/*
* locking rate makes sense only for internal clock mode
*/
static inline int is_pro_rate_locked(struct snd_ice1712 *ice)
{
return (!ice->is_spdif_master(ice)) && PRO_RATE_LOCKED;
}
/*
* ac97 section
*/
static unsigned char snd_vt1724_ac97_ready(struct snd_ice1712 *ice)
{
unsigned char old_cmd;
int tm;
for (tm = 0; tm < 0x10000; tm++) {
old_cmd = inb(ICEMT1724(ice, AC97_CMD));
if (old_cmd & (VT1724_AC97_WRITE | VT1724_AC97_READ))
continue;
if (!(old_cmd & VT1724_AC97_READY))
continue;
return old_cmd;
}
snd_printd(KERN_ERR "snd_vt1724_ac97_ready: timeout\n");
return old_cmd;
}
static int snd_vt1724_ac97_wait_bit(struct snd_ice1712 *ice, unsigned char bit)
{
int tm;
for (tm = 0; tm < 0x10000; tm++)
if ((inb(ICEMT1724(ice, AC97_CMD)) & bit) == 0)
return 0;
snd_printd(KERN_ERR "snd_vt1724_ac97_wait_bit: timeout\n");
return -EIO;
}
static void snd_vt1724_ac97_write(struct snd_ac97 *ac97,
unsigned short reg,
unsigned short val)
{
struct snd_ice1712 *ice = ac97->private_data;
unsigned char old_cmd;
old_cmd = snd_vt1724_ac97_ready(ice);
old_cmd &= ~VT1724_AC97_ID_MASK;
old_cmd |= ac97->num;
outb(reg, ICEMT1724(ice, AC97_INDEX));
outw(val, ICEMT1724(ice, AC97_DATA));
outb(old_cmd | VT1724_AC97_WRITE, ICEMT1724(ice, AC97_CMD));
snd_vt1724_ac97_wait_bit(ice, VT1724_AC97_WRITE);
}
static unsigned short snd_vt1724_ac97_read(struct snd_ac97 *ac97, unsigned short reg)
{
struct snd_ice1712 *ice = ac97->private_data;
unsigned char old_cmd;
old_cmd = snd_vt1724_ac97_ready(ice);
old_cmd &= ~VT1724_AC97_ID_MASK;
old_cmd |= ac97->num;
outb(reg, ICEMT1724(ice, AC97_INDEX));
outb(old_cmd | VT1724_AC97_READ, ICEMT1724(ice, AC97_CMD));
if (snd_vt1724_ac97_wait_bit(ice, VT1724_AC97_READ) < 0)
return ~0;
return inw(ICEMT1724(ice, AC97_DATA));
}
/*
* GPIO operations
*/
/* set gpio direction 0 = read, 1 = write */
static void snd_vt1724_set_gpio_dir(struct snd_ice1712 *ice, unsigned int data)
{
outl(data, ICEREG1724(ice, GPIO_DIRECTION));
inw(ICEREG1724(ice, GPIO_DIRECTION)); /* dummy read for pci-posting */
}
/* get gpio direction 0 = read, 1 = write */
static unsigned int snd_vt1724_get_gpio_dir(struct snd_ice1712 *ice)
{
return inl(ICEREG1724(ice, GPIO_DIRECTION));
}
/* set the gpio mask (0 = writable) */
static void snd_vt1724_set_gpio_mask(struct snd_ice1712 *ice, unsigned int data)
{
outw(data, ICEREG1724(ice, GPIO_WRITE_MASK));
if (!ice->vt1720) /* VT1720 supports only 16 GPIO bits */
outb((data >> 16) & 0xff, ICEREG1724(ice, GPIO_WRITE_MASK_22));
inw(ICEREG1724(ice, GPIO_WRITE_MASK)); /* dummy read for pci-posting */
}
static unsigned int snd_vt1724_get_gpio_mask(struct snd_ice1712 *ice)
{
unsigned int mask;
if (!ice->vt1720)
mask = (unsigned int)inb(ICEREG1724(ice, GPIO_WRITE_MASK_22));
else
mask = 0;
mask = (mask << 16) | inw(ICEREG1724(ice, GPIO_WRITE_MASK));
return mask;
}
static void snd_vt1724_set_gpio_data(struct snd_ice1712 *ice, unsigned int data)
{
outw(data, ICEREG1724(ice, GPIO_DATA));
if (!ice->vt1720)
outb(data >> 16, ICEREG1724(ice, GPIO_DATA_22));
inw(ICEREG1724(ice, GPIO_DATA)); /* dummy read for pci-posting */
}
static unsigned int snd_vt1724_get_gpio_data(struct snd_ice1712 *ice)
{
unsigned int data;
if (!ice->vt1720)
data = (unsigned int)inb(ICEREG1724(ice, GPIO_DATA_22));
else
data = 0;
data = (data << 16) | inw(ICEREG1724(ice, GPIO_DATA));
return data;
}
/*
* MIDI
*/
static void vt1724_midi_clear_rx(struct snd_ice1712 *ice)
{
unsigned int count;
for (count = inb(ICEREG1724(ice, MPU_RXFIFO)); count > 0; --count)
inb(ICEREG1724(ice, MPU_DATA));
}
static inline struct snd_rawmidi_substream *
get_rawmidi_substream(struct snd_ice1712 *ice, unsigned int stream)
{
return list_first_entry(&ice->rmidi[0]->streams[stream].substreams,
struct snd_rawmidi_substream, list);
}
static void enable_midi_irq(struct snd_ice1712 *ice, u8 flag, int enable);
static void vt1724_midi_write(struct snd_ice1712 *ice)
{
struct snd_rawmidi_substream *s;
int count, i;
u8 buffer[32];
s = get_rawmidi_substream(ice, SNDRV_RAWMIDI_STREAM_OUTPUT);
count = 31 - inb(ICEREG1724(ice, MPU_TXFIFO));
if (count > 0) {
count = snd_rawmidi_transmit(s, buffer, count);
for (i = 0; i < count; ++i)
outb(buffer[i], ICEREG1724(ice, MPU_DATA));
}
/* mask irq when all bytes have been transmitted.
* enabled again in output_trigger when the new data comes in.
*/
enable_midi_irq(ice, VT1724_IRQ_MPU_TX,
!snd_rawmidi_transmit_empty(s));
}
static void vt1724_midi_read(struct snd_ice1712 *ice)
{
struct snd_rawmidi_substream *s;
int count, i;
u8 buffer[32];
s = get_rawmidi_substream(ice, SNDRV_RAWMIDI_STREAM_INPUT);
count = inb(ICEREG1724(ice, MPU_RXFIFO));
if (count > 0) {
count = min(count, 32);
for (i = 0; i < count; ++i)
buffer[i] = inb(ICEREG1724(ice, MPU_DATA));
snd_rawmidi_receive(s, buffer, count);
}
}
/* call with ice->reg_lock */
static void enable_midi_irq(struct snd_ice1712 *ice, u8 flag, int enable)
{
u8 mask = inb(ICEREG1724(ice, IRQMASK));
if (enable)
mask &= ~flag;
else
mask |= flag;
outb(mask, ICEREG1724(ice, IRQMASK));
}
static void vt1724_enable_midi_irq(struct snd_rawmidi_substream *substream,
u8 flag, int enable)
{
struct snd_ice1712 *ice = substream->rmidi->private_data;
spin_lock_irq(&ice->reg_lock);
enable_midi_irq(ice, flag, enable);
spin_unlock_irq(&ice->reg_lock);
}
static int vt1724_midi_output_open(struct snd_rawmidi_substream *s)
{
return 0;
}
static int vt1724_midi_output_close(struct snd_rawmidi_substream *s)
{
return 0;
}
static void vt1724_midi_output_trigger(struct snd_rawmidi_substream *s, int up)
{
struct snd_ice1712 *ice = s->rmidi->private_data;
unsigned long flags;
spin_lock_irqsave(&ice->reg_lock, flags);
if (up) {
ice->midi_output = 1;
vt1724_midi_write(ice);
} else {
ice->midi_output = 0;
enable_midi_irq(ice, VT1724_IRQ_MPU_TX, 0);
}
spin_unlock_irqrestore(&ice->reg_lock, flags);
}
static void vt1724_midi_output_drain(struct snd_rawmidi_substream *s)
{
struct snd_ice1712 *ice = s->rmidi->private_data;
unsigned long timeout;
vt1724_enable_midi_irq(s, VT1724_IRQ_MPU_TX, 0);
/* 32 bytes should be transmitted in less than about 12 ms */
timeout = jiffies + msecs_to_jiffies(15);
do {
if (inb(ICEREG1724(ice, MPU_CTRL)) & VT1724_MPU_TX_EMPTY)
break;
schedule_timeout_uninterruptible(1);
} while (time_after(timeout, jiffies));
}
static struct snd_rawmidi_ops vt1724_midi_output_ops = {
.open = vt1724_midi_output_open,
.close = vt1724_midi_output_close,
.trigger = vt1724_midi_output_trigger,
.drain = vt1724_midi_output_drain,
};
static int vt1724_midi_input_open(struct snd_rawmidi_substream *s)
{
vt1724_midi_clear_rx(s->rmidi->private_data);
vt1724_enable_midi_irq(s, VT1724_IRQ_MPU_RX, 1);
return 0;
}
static int vt1724_midi_input_close(struct snd_rawmidi_substream *s)
{
vt1724_enable_midi_irq(s, VT1724_IRQ_MPU_RX, 0);
return 0;
}
static void vt1724_midi_input_trigger(struct snd_rawmidi_substream *s, int up)
{
struct snd_ice1712 *ice = s->rmidi->private_data;
unsigned long flags;
spin_lock_irqsave(&ice->reg_lock, flags);
if (up) {
ice->midi_input = 1;
vt1724_midi_read(ice);
} else {
ice->midi_input = 0;
}
spin_unlock_irqrestore(&ice->reg_lock, flags);
}
static struct snd_rawmidi_ops vt1724_midi_input_ops = {
.open = vt1724_midi_input_open,
.close = vt1724_midi_input_close,
.trigger = vt1724_midi_input_trigger,
};
/*
* Interrupt handler
*/
static irqreturn_t snd_vt1724_interrupt(int irq, void *dev_id)
{
struct snd_ice1712 *ice = dev_id;
unsigned char status;
unsigned char status_mask =
VT1724_IRQ_MPU_RX | VT1724_IRQ_MPU_TX | VT1724_IRQ_MTPCM;
int handled = 0;
int timeout = 0;
while (1) {
status = inb(ICEREG1724(ice, IRQSTAT));
status &= status_mask;
if (status == 0)
break;
spin_lock(&ice->reg_lock);
if (++timeout > 10) {
status = inb(ICEREG1724(ice, IRQSTAT));
printk(KERN_ERR "ice1724: Too long irq loop, "
"status = 0x%x\n", status);
if (status & VT1724_IRQ_MPU_TX) {
printk(KERN_ERR "ice1724: Disabling MPU_TX\n");
enable_midi_irq(ice, VT1724_IRQ_MPU_TX, 0);
}
spin_unlock(&ice->reg_lock);
break;
}
handled = 1;
if (status & VT1724_IRQ_MPU_TX) {
if (ice->midi_output)
vt1724_midi_write(ice);
else
enable_midi_irq(ice, VT1724_IRQ_MPU_TX, 0);
/* Due to mysterical reasons, MPU_TX is always
* generated (and can't be cleared) when a PCM
* playback is going. So let's ignore at the
* next loop.
*/
status_mask &= ~VT1724_IRQ_MPU_TX;
}
if (status & VT1724_IRQ_MPU_RX) {
if (ice->midi_input)
vt1724_midi_read(ice);
else
vt1724_midi_clear_rx(ice);
}
/* ack MPU irq */
outb(status, ICEREG1724(ice, IRQSTAT));
spin_unlock(&ice->reg_lock);
if (status & VT1724_IRQ_MTPCM) {
/*
* Multi-track PCM
* PCM assignment are:
* Playback DMA0 (M/C) = playback_pro_substream
* Playback DMA1 = playback_con_substream_ds[0]
* Playback DMA2 = playback_con_substream_ds[1]
* Playback DMA3 = playback_con_substream_ds[2]
* Playback DMA4 (SPDIF) = playback_con_substream
* Record DMA0 = capture_pro_substream
* Record DMA1 = capture_con_substream
*/
unsigned char mtstat = inb(ICEMT1724(ice, IRQ));
if (mtstat & VT1724_MULTI_PDMA0) {
if (ice->playback_pro_substream)
snd_pcm_period_elapsed(ice->playback_pro_substream);
}
if (mtstat & VT1724_MULTI_RDMA0) {
if (ice->capture_pro_substream)
snd_pcm_period_elapsed(ice->capture_pro_substream);
}
if (mtstat & VT1724_MULTI_PDMA1) {
if (ice->playback_con_substream_ds[0])
snd_pcm_period_elapsed(ice->playback_con_substream_ds[0]);
}
if (mtstat & VT1724_MULTI_PDMA2) {
if (ice->playback_con_substream_ds[1])
snd_pcm_period_elapsed(ice->playback_con_substream_ds[1]);
}
if (mtstat & VT1724_MULTI_PDMA3) {
if (ice->playback_con_substream_ds[2])
snd_pcm_period_elapsed(ice->playback_con_substream_ds[2]);
}
if (mtstat & VT1724_MULTI_PDMA4) {
if (ice->playback_con_substream)
snd_pcm_period_elapsed(ice->playback_con_substream);
}
if (mtstat & VT1724_MULTI_RDMA1) {
if (ice->capture_con_substream)
snd_pcm_period_elapsed(ice->capture_con_substream);
}
/* ack anyway to avoid freeze */
outb(mtstat, ICEMT1724(ice, IRQ));
/* ought to really handle this properly */
if (mtstat & VT1724_MULTI_FIFO_ERR) {
unsigned char fstat = inb(ICEMT1724(ice, DMA_FIFO_ERR));
outb(fstat, ICEMT1724(ice, DMA_FIFO_ERR));
outb(VT1724_MULTI_FIFO_ERR | inb(ICEMT1724(ice, DMA_INT_MASK)), ICEMT1724(ice, DMA_INT_MASK));
/* If I don't do this, I get machine lockup due to continual interrupts */
}
}
}
return IRQ_RETVAL(handled);
}
/*
* PCM code - professional part (multitrack)
*/
static unsigned int rates[] = {
8000, 9600, 11025, 12000, 16000, 22050, 24000,
32000, 44100, 48000, 64000, 88200, 96000,
176400, 192000,
};
static struct snd_pcm_hw_constraint_list hw_constraints_rates_96 = {
.count = ARRAY_SIZE(rates) - 2, /* up to 96000 */
.list = rates,
.mask = 0,
};
static struct snd_pcm_hw_constraint_list hw_constraints_rates_48 = {
.count = ARRAY_SIZE(rates) - 5, /* up to 48000 */
.list = rates,
.mask = 0,
};
static struct snd_pcm_hw_constraint_list hw_constraints_rates_192 = {
.count = ARRAY_SIZE(rates),
.list = rates,
.mask = 0,
};
struct vt1724_pcm_reg {
unsigned int addr; /* ADDR register offset */
unsigned int size; /* SIZE register offset */
unsigned int count; /* COUNT register offset */
unsigned int start; /* start & pause bit */
};
static int snd_vt1724_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct snd_ice1712 *ice = snd_pcm_substream_chip(substream);
unsigned char what;
unsigned char old;
struct snd_pcm_substream *s;
what = 0;
snd_pcm_group_for_each_entry(s, substream) {
if (snd_pcm_substream_chip(s) == ice) {
const struct vt1724_pcm_reg *reg;
reg = s->runtime->private_data;
what |= reg->start;
snd_pcm_trigger_done(s, substream);
}
}
switch (cmd) {
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
spin_lock(&ice->reg_lock);
old = inb(ICEMT1724(ice, DMA_PAUSE));
if (cmd == SNDRV_PCM_TRIGGER_PAUSE_PUSH)
old |= what;
else
old &= ~what;
outb(old, ICEMT1724(ice, DMA_PAUSE));
spin_unlock(&ice->reg_lock);
break;
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
spin_lock(&ice->reg_lock);
old = inb(ICEMT1724(ice, DMA_CONTROL));
if (cmd == SNDRV_PCM_TRIGGER_START)
old |= what;
else
old &= ~what;
outb(old, ICEMT1724(ice, DMA_CONTROL));
spin_unlock(&ice->reg_lock);
break;
case SNDRV_PCM_TRIGGER_RESUME:
/* apps will have to restart stream */
break;
default:
return -EINVAL;
}
return 0;
}
/*
*/
#define DMA_STARTS (VT1724_RDMA0_START|VT1724_PDMA0_START|VT1724_RDMA1_START|\
VT1724_PDMA1_START|VT1724_PDMA2_START|VT1724_PDMA3_START|VT1724_PDMA4_START)
#define DMA_PAUSES (VT1724_RDMA0_PAUSE|VT1724_PDMA0_PAUSE|VT1724_RDMA1_PAUSE|\
VT1724_PDMA1_PAUSE|VT1724_PDMA2_PAUSE|VT1724_PDMA3_PAUSE|VT1724_PDMA4_PAUSE)
static const unsigned int stdclock_rate_list[16] = {
48000, 24000, 12000, 9600, 32000, 16000, 8000, 96000, 44100,
22050, 11025, 88200, 176400, 0, 192000, 64000
};
static unsigned int stdclock_get_rate(struct snd_ice1712 *ice)
{
unsigned int rate;
rate = stdclock_rate_list[inb(ICEMT1724(ice, RATE)) & 15];
return rate;
}
static void stdclock_set_rate(struct snd_ice1712 *ice, unsigned int rate)
{
int i;
for (i = 0; i < ARRAY_SIZE(stdclock_rate_list); i++) {
if (stdclock_rate_list[i] == rate) {
outb(i, ICEMT1724(ice, RATE));
return;
}
}
}
static unsigned char stdclock_set_mclk(struct snd_ice1712 *ice,
unsigned int rate)
{
unsigned char val, old;
/* check MT02 */
if (ice->eeprom.data[ICE_EEP2_ACLINK] & VT1724_CFG_PRO_I2S) {
val = old = inb(ICEMT1724(ice, I2S_FORMAT));
if (rate > 96000)
val |= VT1724_MT_I2S_MCLK_128X; /* 128x MCLK */
else
val &= ~VT1724_MT_I2S_MCLK_128X; /* 256x MCLK */
if (val != old) {
outb(val, ICEMT1724(ice, I2S_FORMAT));
/* master clock changed */
return 1;
}
}
/* no change in master clock */
return 0;
}
static int snd_vt1724_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate,
int force)
{
unsigned long flags;
unsigned char mclk_change;
unsigned int i, old_rate;
if (rate > ice->hw_rates->list[ice->hw_rates->count - 1])
return -EINVAL;
spin_lock_irqsave(&ice->reg_lock, flags);
if ((inb(ICEMT1724(ice, DMA_CONTROL)) & DMA_STARTS) ||
(inb(ICEMT1724(ice, DMA_PAUSE)) & DMA_PAUSES)) {
/* running? we cannot change the rate now... */
spin_unlock_irqrestore(&ice->reg_lock, flags);
return ((rate == ice->cur_rate) && !force) ? 0 : -EBUSY;
}
if (!force && is_pro_rate_locked(ice)) {
/* comparing required and current rate - makes sense for
* internal clock only */
spin_unlock_irqrestore(&ice->reg_lock, flags);
return (rate == ice->cur_rate) ? 0 : -EBUSY;
}
if (force || !ice->is_spdif_master(ice)) {
/* force means the rate was switched by ucontrol, otherwise
* setting clock rate for internal clock mode */
old_rate = ice->get_rate(ice);
if (force || (old_rate != rate))
ice->set_rate(ice, rate);
else if (rate == ice->cur_rate) {
spin_unlock_irqrestore(&ice->reg_lock, flags);
return 0;
}
}
ice->cur_rate = rate;
/* setting master clock */
mclk_change = ice->set_mclk(ice, rate);
spin_unlock_irqrestore(&ice->reg_lock, flags);
if (mclk_change && ice->gpio.i2s_mclk_changed)
ice->gpio.i2s_mclk_changed(ice);
if (ice->gpio.set_pro_rate)
ice->gpio.set_pro_rate(ice, rate);
/* set up codecs */
for (i = 0; i < ice->akm_codecs; i++) {
if (ice->akm[i].ops.set_rate_val)
ice->akm[i].ops.set_rate_val(&ice->akm[i], rate);
}
if (ice->spdif.ops.setup_rate)
ice->spdif.ops.setup_rate(ice, rate);
return 0;
}
static int snd_vt1724_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct snd_ice1712 *ice = snd_pcm_substream_chip(substream);
int i, chs, err;
chs = params_channels(hw_params);
mutex_lock(&ice->open_mutex);
/* mark surround channels */
if (substream == ice->playback_pro_substream) {
/* PDMA0 can be multi-channel up to 8 */
chs = chs / 2 - 1;
for (i = 0; i < chs; i++) {
if (ice->pcm_reserved[i] &&
ice->pcm_reserved[i] != substream) {
mutex_unlock(&ice->open_mutex);
return -EBUSY;
}
ice->pcm_reserved[i] = substream;
}
for (; i < 3; i++) {
if (ice->pcm_reserved[i] == substream)
ice->pcm_reserved[i] = NULL;
}
} else {
for (i = 0; i < 3; i++) {
/* check individual playback stream */
if (ice->playback_con_substream_ds[i] == substream) {
if (ice->pcm_reserved[i] &&
ice->pcm_reserved[i] != substream) {
mutex_unlock(&ice->open_mutex);
return -EBUSY;
}
ice->pcm_reserved[i] = substream;
break;
}
}
}
mutex_unlock(&ice->open_mutex);
err = snd_vt1724_set_pro_rate(ice, params_rate(hw_params), 0);
if (err < 0)
return err;
return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
}
static int snd_vt1724_pcm_hw_free(struct snd_pcm_substream *substream)
{
struct snd_ice1712 *ice = snd_pcm_substream_chip(substream);
int i;
mutex_lock(&ice->open_mutex);
/* unmark surround channels */
for (i = 0; i < 3; i++)
if (ice->pcm_reserved[i] == substream)
ice->pcm_reserved[i] = NULL;
mutex_unlock(&ice->open_mutex);
return snd_pcm_lib_free_pages(substream);
}
static int snd_vt1724_playback_pro_prepare(struct snd_pcm_substream *substream)
{
struct snd_ice1712 *ice = snd_pcm_substream_chip(substream);
unsigned char val;
unsigned int size;
spin_lock_irq(&ice->reg_lock);
val = (8 - substream->runtime->channels) >> 1;
outb(val, ICEMT1724(ice, BURST));
outl(substream->runtime->dma_addr, ICEMT1724(ice, PLAYBACK_ADDR));
size = (snd_pcm_lib_buffer_bytes(substream) >> 2) - 1;
/* outl(size, ICEMT1724(ice, PLAYBACK_SIZE)); */
outw(size, ICEMT1724(ice, PLAYBACK_SIZE));
outb(size >> 16, ICEMT1724(ice, PLAYBACK_SIZE) + 2);
size = (snd_pcm_lib_period_bytes(substream) >> 2) - 1;
/* outl(size, ICEMT1724(ice, PLAYBACK_COUNT)); */
outw(size, ICEMT1724(ice, PLAYBACK_COUNT));
outb(size >> 16, ICEMT1724(ice, PLAYBACK_COUNT) + 2);
spin_unlock_irq(&ice->reg_lock);
/*
printk(KERN_DEBUG "pro prepare: ch = %d, addr = 0x%x, "
"buffer = 0x%x, period = 0x%x\n",
substream->runtime->channels,
(unsigned int)substream->runtime->dma_addr,
snd_pcm_lib_buffer_bytes(substream),
snd_pcm_lib_period_bytes(substream));
*/
return 0;
}
static snd_pcm_uframes_t snd_vt1724_playback_pro_pointer(struct snd_pcm_substream *substream)
{
struct snd_ice1712 *ice = snd_pcm_substream_chip(substream);
size_t ptr;
if (!(inl(ICEMT1724(ice, DMA_CONTROL)) & VT1724_PDMA0_START))
return 0;
#if 0 /* read PLAYBACK_ADDR */
ptr = inl(ICEMT1724(ice, PLAYBACK_ADDR));
if (ptr < substream->runtime->dma_addr) {
snd_printd("ice1724: invalid negative ptr\n");
return 0;
}
ptr -= substream->runtime->dma_addr;
ptr = bytes_to_frames(substream->runtime, ptr);
if (ptr >= substream->runtime->buffer_size) {
snd_printd("ice1724: invalid ptr %d (size=%d)\n",
(int)ptr, (int)substream->runtime->period_size);
return 0;
}
#else /* read PLAYBACK_SIZE */
ptr = inl(ICEMT1724(ice, PLAYBACK_SIZE)) & 0xffffff;
ptr = (ptr + 1) << 2;
ptr = bytes_to_frames(substream->runtime, ptr);
if (!ptr)
;
else if (ptr <= substream->runtime->buffer_size)
ptr = substream->runtime->buffer_size - ptr;
else {
snd_printd("ice1724: invalid ptr %d (size=%d)\n",
(int)ptr, (int)substream->runtime->buffer_size);
ptr = 0;
}
#endif
return ptr;
}
static int snd_vt1724_pcm_prepare(struct snd_pcm_substream *substream)
{
struct snd_ice1712 *ice = snd_pcm_substream_chip(substream);
const struct vt1724_pcm_reg *reg = substream->runtime->private_data;
spin_lock_irq(&ice->reg_lock);
outl(substream->runtime->dma_addr, ice->profi_port + reg->addr);
outw((snd_pcm_lib_buffer_bytes(substream) >> 2) - 1,
ice->profi_port + reg->size);
outw((snd_pcm_lib_period_bytes(substream) >> 2) - 1,
ice->profi_port + reg->count);
spin_unlock_irq(&ice->reg_lock);
return 0;
}
static snd_pcm_uframes_t snd_vt1724_pcm_pointer(struct snd_pcm_substream *substream)
{
struct snd_ice1712 *ice = snd_pcm_substream_chip(substream);
const struct vt1724_pcm_reg *reg = substream->runtime->private_data;
size_t ptr;
if (!(inl(ICEMT1724(ice, DMA_CONTROL)) & reg->start))
return 0;
#if 0 /* use ADDR register */
ptr = inl(ice->profi_port + reg->addr);
ptr -= substream->runtime->dma_addr;
return bytes_to_frames(substream->runtime, ptr);
#else /* use SIZE register */
ptr = inw(ice->profi_port + reg->size);
ptr = (ptr + 1) << 2;
ptr = bytes_to_frames(substream->runtime, ptr);
if (!ptr)
;
else if (ptr <= substream->runtime->buffer_size)
ptr = substream->runtime->buffer_size - ptr;
else {
snd_printd("ice1724: invalid ptr %d (size=%d)\n",
(int)ptr, (int)substream->runtime->buffer_size);
ptr = 0;
}
return ptr;
#endif
}
static const struct vt1724_pcm_reg vt1724_pdma0_reg = {
.addr = VT1724_MT_PLAYBACK_ADDR,
.size = VT1724_MT_PLAYBACK_SIZE,
.count = VT1724_MT_PLAYBACK_COUNT,
.start = VT1724_PDMA0_START,
};
static const struct vt1724_pcm_reg vt1724_pdma4_reg = {
.addr = VT1724_MT_PDMA4_ADDR,
.size = VT1724_MT_PDMA4_SIZE,
.count = VT1724_MT_PDMA4_COUNT,
.start = VT1724_PDMA4_START,
};
static const struct vt1724_pcm_reg vt1724_rdma0_reg = {
.addr = VT1724_MT_CAPTURE_ADDR,
.size = VT1724_MT_CAPTURE_SIZE,
.count = VT1724_MT_CAPTURE_COUNT,
.start = VT1724_RDMA0_START,
};
static const struct vt1724_pcm_reg vt1724_rdma1_reg = {
.addr = VT1724_MT_RDMA1_ADDR,
.size = VT1724_MT_RDMA1_SIZE,
.count = VT1724_MT_RDMA1_COUNT,
.start = VT1724_RDMA1_START,
};
#define vt1724_playback_pro_reg vt1724_pdma0_reg
#define vt1724_playback_spdif_reg vt1724_pdma4_reg
#define vt1724_capture_pro_reg vt1724_rdma0_reg
#define vt1724_capture_spdif_reg vt1724_rdma1_reg
static const struct snd_pcm_hardware snd_vt1724_playback_pro = {
.info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_SYNC_START),
.formats = SNDRV_PCM_FMTBIT_S32_LE,
.rates = SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_8000_192000,
.rate_min = 8000,
.rate_max = 192000,
.channels_min = 2,
.channels_max = 8,
.buffer_bytes_max = (1UL << 21), /* 19bits dword */
.period_bytes_min = 8 * 4 * 2, /* FIXME: constraints needed */
.period_bytes_max = (1UL << 21),
.periods_min = 2,
.periods_max = 1024,
};
static const struct snd_pcm_hardware snd_vt1724_spdif = {
.info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_SYNC_START),
.formats = SNDRV_PCM_FMTBIT_S32_LE,
.rates = (SNDRV_PCM_RATE_32000|SNDRV_PCM_RATE_44100|
SNDRV_PCM_RATE_48000|SNDRV_PCM_RATE_88200|
SNDRV_PCM_RATE_96000|SNDRV_PCM_RATE_176400|
SNDRV_PCM_RATE_192000),
.rate_min = 32000,
.rate_max = 192000,
.channels_min = 2,
.channels_max = 2,
.buffer_bytes_max = (1UL << 18), /* 16bits dword */
.period_bytes_min = 2 * 4 * 2,
.period_bytes_max = (1UL << 18),
.periods_min = 2,
.periods_max = 1024,
};
static const struct snd_pcm_hardware snd_vt1724_2ch_stereo = {
.info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_SYNC_START),
.formats = SNDRV_PCM_FMTBIT_S32_LE,
.rates = SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_8000_192000,
.rate_min = 8000,
.rate_max = 192000,
.channels_min = 2,
.channels_max = 2,
.buffer_bytes_max = (1UL << 18), /* 16bits dword */
.period_bytes_min = 2 * 4 * 2,
.period_bytes_max = (1UL << 18),
.periods_min = 2,
.periods_max = 1024,
};
/*
* set rate constraints
*/
static void set_std_hw_rates(struct snd_ice1712 *ice)
{
if (ice->eeprom.data[ICE_EEP2_ACLINK] & VT1724_CFG_PRO_I2S) {
/* I2S */
/* VT1720 doesn't support more than 96kHz */
if ((ice->eeprom.data[ICE_EEP2_I2S] & 0x08) && !ice->vt1720)
ice->hw_rates = &hw_constraints_rates_192;
else
ice->hw_rates = &hw_constraints_rates_96;
} else {
/* ACLINK */
ice->hw_rates = &hw_constraints_rates_48;
}
}
static int set_rate_constraints(struct snd_ice1712 *ice,
struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
runtime->hw.rate_min = ice->hw_rates->list[0];
runtime->hw.rate_max = ice->hw_rates->list[ice->hw_rates->count - 1];
runtime->hw.rates = SNDRV_PCM_RATE_KNOT;
return snd_pcm_hw_constraint_list(runtime, 0,
SNDRV_PCM_HW_PARAM_RATE,
ice->hw_rates);
}
/* multi-channel playback needs alignment 8x32bit regardless of the channels
* actually used
*/
#define VT1724_BUFFER_ALIGN 0x20
static int snd_vt1724_playback_pro_open(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_ice1712 *ice = snd_pcm_substream_chip(substream);
int chs, num_indeps;
runtime->private_data = (void *)&vt1724_playback_pro_reg;
ice->playback_pro_substream = substream;
runtime->hw = snd_vt1724_playback_pro;
snd_pcm_set_sync(substream);
snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24);
set_rate_constraints(ice, substream);
mutex_lock(&ice->open_mutex);
/* calculate the currently available channels */
num_indeps = ice->num_total_dacs / 2 - 1;
for (chs = 0; chs < num_indeps; chs++) {
if (ice->pcm_reserved[chs])
break;
}
chs = (chs + 1) * 2;
runtime->hw.channels_max = chs;
if (chs > 2) /* channels must be even */
snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS, 2);
mutex_unlock(&ice->open_mutex);
snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
VT1724_BUFFER_ALIGN);
snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
VT1724_BUFFER_ALIGN);
if (ice->pro_open)
ice->pro_open(ice, substream);
return 0;
}
static int snd_vt1724_capture_pro_open(struct snd_pcm_substream *substream)
{
struct snd_ice1712 *ice = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
runtime->private_data = (void *)&vt1724_capture_pro_reg;
ice->capture_pro_substream = substream;
runtime->hw = snd_vt1724_2ch_stereo;
snd_pcm_set_sync(substream);
snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24);
set_rate_constraints(ice, substream);
snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
VT1724_BUFFER_ALIGN);
snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
VT1724_BUFFER_ALIGN);
if (ice->pro_open)
ice->pro_open(ice, substream);
return 0;
}
static int snd_vt1724_playback_pro_close(struct snd_pcm_substream *substream)
{
struct snd_ice1712 *ice = snd_pcm_substream_chip(substream);
if (PRO_RATE_RESET)
snd_vt1724_set_pro_rate(ice, ice->pro_rate_default, 0);
ice->playback_pro_substream = NULL;
return 0;
}
static int snd_vt1724_capture_pro_close(struct snd_pcm_substream *substream)
{
struct snd_ice1712 *ice = snd_pcm_substream_chip(substream);
if (PRO_RATE_RESET)
snd_vt1724_set_pro_rate(ice, ice->pro_rate_default, 0);
ice->capture_pro_substream = NULL;
return 0;
}
static struct snd_pcm_ops snd_vt1724_playback_pro_ops = {
.open = snd_vt1724_playback_pro_open,
.close = snd_vt1724_playback_pro_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_vt1724_pcm_hw_params,
.hw_free = snd_vt1724_pcm_hw_free,
.prepare = snd_vt1724_playback_pro_prepare,
.trigger = snd_vt1724_pcm_trigger,
.pointer = snd_vt1724_playback_pro_pointer,
};
static struct snd_pcm_ops snd_vt1724_capture_pro_ops = {
.open = snd_vt1724_capture_pro_open,
.close = snd_vt1724_capture_pro_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_vt1724_pcm_hw_params,
.hw_free = snd_vt1724_pcm_hw_free,
.prepare = snd_vt1724_pcm_prepare,
.trigger = snd_vt1724_pcm_trigger,
.pointer = snd_vt1724_pcm_pointer,
};
static int __devinit snd_vt1724_pcm_profi(struct snd_ice1712 *ice, int device)
{
struct snd_pcm *pcm;
int err;
err = snd_pcm_new(ice->card, "ICE1724", device, 1, 1, &pcm);
if (err < 0)
return err;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_vt1724_playback_pro_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_vt1724_capture_pro_ops);
pcm->private_data = ice;
pcm->info_flags = 0;
strcpy(pcm->name, "ICE1724");
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
snd_dma_pci_data(ice->pci),
256*1024, 256*1024);
ice->pcm_pro = pcm;
return 0;
}
/*
* SPDIF PCM
*/
/* update spdif control bits; call with reg_lock */
static void update_spdif_bits(struct snd_ice1712 *ice, unsigned int val)
{
unsigned char cbit, disabled;
cbit = inb(ICEREG1724(ice, SPDIF_CFG));
disabled = cbit & ~VT1724_CFG_SPDIF_OUT_EN;
if (cbit != disabled)
outb(disabled, ICEREG1724(ice, SPDIF_CFG));
outw(val, ICEMT1724(ice, SPDIF_CTRL));
if (cbit != disabled)
outb(cbit, ICEREG1724(ice, SPDIF_CFG));
outw(val, ICEMT1724(ice, SPDIF_CTRL));
}
/* update SPDIF control bits according to the given rate */
static void update_spdif_rate(struct snd_ice1712 *ice, unsigned int rate)
{
unsigned int val, nval;
unsigned long flags;
spin_lock_irqsave(&ice->reg_lock, flags);
nval = val = inw(ICEMT1724(ice, SPDIF_CTRL));
nval &= ~(7 << 12);
switch (rate) {
case 44100: break;
case 48000: nval |= 2 << 12; break;
case 32000: nval |= 3 << 12; break;
case 88200: nval |= 4 << 12; break;
case 96000: nval |= 5 << 12; break;
case 192000: nval |= 6 << 12; break;
case 176400: nval |= 7 << 12; break;
}
if (val != nval)
update_spdif_bits(ice, nval);
spin_unlock_irqrestore(&ice->reg_lock, flags);
}
static int snd_vt1724_playback_spdif_prepare(struct snd_pcm_substream *substream)
{
struct snd_ice1712 *ice = snd_pcm_substream_chip(substream);
if (!ice->force_pdma4)
update_spdif_rate(ice, substream->runtime->rate);
return snd_vt1724_pcm_prepare(substream);
}
static int snd_vt1724_playback_spdif_open(struct snd_pcm_substream *substream)
{
struct snd_ice1712 *ice = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
runtime->private_data = (void *)&vt1724_playback_spdif_reg;
ice->playback_con_substream = substream;
if (ice->force_pdma4) {
runtime->hw = snd_vt1724_2ch_stereo;
set_rate_constraints(ice, substream);
} else
runtime->hw = snd_vt1724_spdif;
snd_pcm_set_sync(substream);
snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24);
snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
VT1724_BUFFER_ALIGN);
snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
VT1724_BUFFER_ALIGN);
if (ice->spdif.ops.open)
ice->spdif.ops.open(ice, substream);
return 0;
}
static int snd_vt1724_playback_spdif_close(struct snd_pcm_substream *substream)
{
struct snd_ice1712 *ice = snd_pcm_substream_chip(substream);
if (PRO_RATE_RESET)
snd_vt1724_set_pro_rate(ice, ice->pro_rate_default, 0);
ice->playback_con_substream = NULL;
if (ice->spdif.ops.close)
ice->spdif.ops.close(ice, substream);
return 0;
}
static int snd_vt1724_capture_spdif_open(struct snd_pcm_substream *substream)
{
struct snd_ice1712 *ice = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
runtime->private_data = (void *)&vt1724_capture_spdif_reg;
ice->capture_con_substream = substream;
if (ice->force_rdma1) {
runtime->hw = snd_vt1724_2ch_stereo;
set_rate_constraints(ice, substream);
} else
runtime->hw = snd_vt1724_spdif;
snd_pcm_set_sync(substream);
snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24);
snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
VT1724_BUFFER_ALIGN);
snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
VT1724_BUFFER_ALIGN);
if (ice->spdif.ops.open)
ice->spdif.ops.open(ice, substream);
return 0;
}
static int snd_vt1724_capture_spdif_close(struct snd_pcm_substream *substream)
{
struct snd_ice1712 *ice = snd_pcm_substream_chip(substream);
if (PRO_RATE_RESET)
snd_vt1724_set_pro_rate(ice, ice->pro_rate_default, 0);
ice->capture_con_substream = NULL;
if (ice->spdif.ops.close)
ice->spdif.ops.close(ice, substream);
return 0;
}
static struct snd_pcm_ops snd_vt1724_playback_spdif_ops = {
.open = snd_vt1724_playback_spdif_open,
.close = snd_vt1724_playback_spdif_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_vt1724_pcm_hw_params,
.hw_free = snd_vt1724_pcm_hw_free,
.prepare = snd_vt1724_playback_spdif_prepare,
.trigger = snd_vt1724_pcm_trigger,
.pointer = snd_vt1724_pcm_pointer,
};
static struct snd_pcm_ops snd_vt1724_capture_spdif_ops = {
.open = snd_vt1724_capture_spdif_open,
.close = snd_vt1724_capture_spdif_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_vt1724_pcm_hw_params,
.hw_free = snd_vt1724_pcm_hw_free,
.prepare = snd_vt1724_pcm_prepare,
.trigger = snd_vt1724_pcm_trigger,
.pointer = snd_vt1724_pcm_pointer,
};
static int __devinit snd_vt1724_pcm_spdif(struct snd_ice1712 *ice, int device)
{
char *name;
struct snd_pcm *pcm;
int play, capt;
int err;
if (ice->force_pdma4 ||
(ice->eeprom.data[ICE_EEP2_SPDIF] & VT1724_CFG_SPDIF_OUT_INT)) {
play = 1;
ice->has_spdif = 1;
} else
play = 0;
if (ice->force_rdma1 ||
(ice->eeprom.data[ICE_EEP2_SPDIF] & VT1724_CFG_SPDIF_IN)) {
capt = 1;
ice->has_spdif = 1;
} else
capt = 0;
if (!play && !capt)
return 0; /* no spdif device */
if (ice->force_pdma4 || ice->force_rdma1)
name = "ICE1724 Secondary";
else
name = "ICE1724 IEC958";
err = snd_pcm_new(ice->card, name, device, play, capt, &pcm);
if (err < 0)
return err;
if (play)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
&snd_vt1724_playback_spdif_ops);
if (capt)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
&snd_vt1724_capture_spdif_ops);
pcm->private_data = ice;
pcm->info_flags = 0;
strcpy(pcm->name, name);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
snd_dma_pci_data(ice->pci),
256*1024, 256*1024);
ice->pcm = pcm;
return 0;
}
/*
* independent surround PCMs
*/
static const struct vt1724_pcm_reg vt1724_playback_dma_regs[3] = {
{
.addr = VT1724_MT_PDMA1_ADDR,
.size = VT1724_MT_PDMA1_SIZE,
.count = VT1724_MT_PDMA1_COUNT,
.start = VT1724_PDMA1_START,
},
{
.addr = VT1724_MT_PDMA2_ADDR,
.size = VT1724_MT_PDMA2_SIZE,
.count = VT1724_MT_PDMA2_COUNT,
.start = VT1724_PDMA2_START,
},
{
.addr = VT1724_MT_PDMA3_ADDR,
.size = VT1724_MT_PDMA3_SIZE,
.count = VT1724_MT_PDMA3_COUNT,
.start = VT1724_PDMA3_START,
},
};
static int snd_vt1724_playback_indep_prepare(struct snd_pcm_substream *substream)
{
struct snd_ice1712 *ice = snd_pcm_substream_chip(substream);
unsigned char val;
spin_lock_irq(&ice->reg_lock);
val = 3 - substream->number;
if (inb(ICEMT1724(ice, BURST)) < val)
outb(val, ICEMT1724(ice, BURST));
spin_unlock_irq(&ice->reg_lock);
return snd_vt1724_pcm_prepare(substream);
}
static int snd_vt1724_playback_indep_open(struct snd_pcm_substream *substream)
{
struct snd_ice1712 *ice = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
mutex_lock(&ice->open_mutex);
/* already used by PDMA0? */
if (ice->pcm_reserved[substream->number]) {
mutex_unlock(&ice->open_mutex);
return -EBUSY; /* FIXME: should handle blocking mode properly */
}
mutex_unlock(&ice->open_mutex);
runtime->private_data = (void *)&vt1724_playback_dma_regs[substream->number];
ice->playback_con_substream_ds[substream->number] = substream;
runtime->hw = snd_vt1724_2ch_stereo;
snd_pcm_set_sync(substream);
snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24);
set_rate_constraints(ice, substream);
return 0;
}
static int snd_vt1724_playback_indep_close(struct snd_pcm_substream *substream)
{
struct snd_ice1712 *ice = snd_pcm_substream_chip(substream);
if (PRO_RATE_RESET)
snd_vt1724_set_pro_rate(ice, ice->pro_rate_default, 0);
ice->playback_con_substream_ds[substream->number] = NULL;
ice->pcm_reserved[substream->number] = NULL;
return 0;
}
static struct snd_pcm_ops snd_vt1724_playback_indep_ops = {
.open = snd_vt1724_playback_indep_open,
.close = snd_vt1724_playback_indep_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_vt1724_pcm_hw_params,
.hw_free = snd_vt1724_pcm_hw_free,
.prepare = snd_vt1724_playback_indep_prepare,
.trigger = snd_vt1724_pcm_trigger,
.pointer = snd_vt1724_pcm_pointer,
};
static int __devinit snd_vt1724_pcm_indep(struct snd_ice1712 *ice, int device)
{
struct snd_pcm *pcm;
int play;
int err;
play = ice->num_total_dacs / 2 - 1;
if (play <= 0)
return 0;
err = snd_pcm_new(ice->card, "ICE1724 Surrounds", device, play, 0, &pcm);
if (err < 0)
return err;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
&snd_vt1724_playback_indep_ops);
pcm->private_data = ice;
pcm->info_flags = 0;
strcpy(pcm->name, "ICE1724 Surround PCM");
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
snd_dma_pci_data(ice->pci),
256*1024, 256*1024);
ice->pcm_ds = pcm;
return 0;
}
/*
* Mixer section
*/
static int __devinit snd_vt1724_ac97_mixer(struct snd_ice1712 *ice)
{
int err;
if (!(ice->eeprom.data[ICE_EEP2_ACLINK] & VT1724_CFG_PRO_I2S)) {
struct snd_ac97_bus *pbus;
struct snd_ac97_template ac97;
static struct snd_ac97_bus_ops ops = {
.write = snd_vt1724_ac97_write,
.read = snd_vt1724_ac97_read,
};
/* cold reset */
outb(inb(ICEMT1724(ice, AC97_CMD)) | 0x80, ICEMT1724(ice, AC97_CMD));
mdelay(5); /* FIXME */
outb(inb(ICEMT1724(ice, AC97_CMD)) & ~0x80, ICEMT1724(ice, AC97_CMD));
err = snd_ac97_bus(ice->card, 0, &ops, NULL, &pbus);
if (err < 0)
return err;
memset(&ac97, 0, sizeof(ac97));
ac97.private_data = ice;
err = snd_ac97_mixer(pbus, &ac97, &ice->ac97);
if (err < 0)
printk(KERN_WARNING "ice1712: cannot initialize pro ac97, skipped\n");
else
return 0;
}
/* I2S mixer only */
strcat(ice->card->mixername, "ICE1724 - multitrack");
return 0;
}
/*
*
*/
static inline unsigned int eeprom_triple(struct snd_ice1712 *ice, int idx)
{
return (unsigned int)ice->eeprom.data[idx] | \
((unsigned int)ice->eeprom.data[idx + 1] << 8) | \
((unsigned int)ice->eeprom.data[idx + 2] << 16);
}
static void snd_vt1724_proc_read(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_ice1712 *ice = entry->private_data;
unsigned int idx;
snd_iprintf(buffer, "%s\n\n", ice->card->longname);
snd_iprintf(buffer, "EEPROM:\n");
snd_iprintf(buffer, " Subvendor : 0x%x\n", ice->eeprom.subvendor);
snd_iprintf(buffer, " Size : %i bytes\n", ice->eeprom.size);
snd_iprintf(buffer, " Version : %i\n", ice->eeprom.version);
snd_iprintf(buffer, " System Config : 0x%x\n",
ice->eeprom.data[ICE_EEP2_SYSCONF]);
snd_iprintf(buffer, " ACLink : 0x%x\n",
ice->eeprom.data[ICE_EEP2_ACLINK]);
snd_iprintf(buffer, " I2S : 0x%x\n",
ice->eeprom.data[ICE_EEP2_I2S]);
snd_iprintf(buffer, " S/PDIF : 0x%x\n",
ice->eeprom.data[ICE_EEP2_SPDIF]);
snd_iprintf(buffer, " GPIO direction : 0x%x\n",
ice->eeprom.gpiodir);
snd_iprintf(buffer, " GPIO mask : 0x%x\n",
ice->eeprom.gpiomask);
snd_iprintf(buffer, " GPIO state : 0x%x\n",
ice->eeprom.gpiostate);
for (idx = 0x12; idx < ice->eeprom.size; idx++)
snd_iprintf(buffer, " Extra #%02i : 0x%x\n",
idx, ice->eeprom.data[idx]);
snd_iprintf(buffer, "\nRegisters:\n");
snd_iprintf(buffer, " PSDOUT03 : 0x%08x\n",
(unsigned)inl(ICEMT1724(ice, ROUTE_PLAYBACK)));
for (idx = 0x0; idx < 0x20 ; idx++)
snd_iprintf(buffer, " CCS%02x : 0x%02x\n",
idx, inb(ice->port+idx));
for (idx = 0x0; idx < 0x30 ; idx++)
snd_iprintf(buffer, " MT%02x : 0x%02x\n",
idx, inb(ice->profi_port+idx));
}
static void __devinit snd_vt1724_proc_init(struct snd_ice1712 *ice)
{
struct snd_info_entry *entry;
if (!snd_card_proc_new(ice->card, "ice1724", &entry))
snd_info_set_text_ops(entry, ice, snd_vt1724_proc_read);
}
/*
*
*/
static int snd_vt1724_eeprom_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
uinfo->count = sizeof(struct snd_ice1712_eeprom);
return 0;
}
static int snd_vt1724_eeprom_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
memcpy(ucontrol->value.bytes.data, &ice->eeprom, sizeof(ice->eeprom));
return 0;
}
static struct snd_kcontrol_new snd_vt1724_eeprom __devinitdata = {
.iface = SNDRV_CTL_ELEM_IFACE_CARD,
.name = "ICE1724 EEPROM",
.access = SNDRV_CTL_ELEM_ACCESS_READ,
.info = snd_vt1724_eeprom_info,
.get = snd_vt1724_eeprom_get
};
/*
*/
static int snd_vt1724_spdif_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
uinfo->count = 1;
return 0;
}
static unsigned int encode_spdif_bits(struct snd_aes_iec958 *diga)
{
unsigned int val, rbits;
val = diga->status[0] & 0x03; /* professional, non-audio */
if (val & 0x01) {
/* professional */
if ((diga->status[0] & IEC958_AES0_PRO_EMPHASIS) ==
IEC958_AES0_PRO_EMPHASIS_5015)
val |= 1U << 3;
rbits = (diga->status[4] >> 3) & 0x0f;
if (rbits) {
switch (rbits) {
case 2: val |= 5 << 12; break; /* 96k */
case 3: val |= 6 << 12; break; /* 192k */
case 10: val |= 4 << 12; break; /* 88.2k */
case 11: val |= 7 << 12; break; /* 176.4k */
}
} else {
switch (diga->status[0] & IEC958_AES0_PRO_FS) {
case IEC958_AES0_PRO_FS_44100:
break;
case IEC958_AES0_PRO_FS_32000:
val |= 3U << 12;
break;
default:
val |= 2U << 12;
break;
}
}
} else {
/* consumer */
val |= diga->status[1] & 0x04; /* copyright */
if ((diga->status[0] & IEC958_AES0_CON_EMPHASIS) ==
IEC958_AES0_CON_EMPHASIS_5015)
val |= 1U << 3;
val |= (unsigned int)(diga->status[1] & 0x3f) << 4; /* category */
val |= (unsigned int)(diga->status[3] & IEC958_AES3_CON_FS) << 12; /* fs */
}
return val;
}
static void decode_spdif_bits(struct snd_aes_iec958 *diga, unsigned int val)
{
memset(diga->status, 0, sizeof(diga->status));
diga->status[0] = val & 0x03; /* professional, non-audio */
if (val & 0x01) {
/* professional */
if (val & (1U << 3))
diga->status[0] |= IEC958_AES0_PRO_EMPHASIS_5015;
switch ((val >> 12) & 0x7) {
case 0:
break;
case 2:
diga->status[0] |= IEC958_AES0_PRO_FS_32000;
break;
default:
diga->status[0] |= IEC958_AES0_PRO_FS_48000;
break;
}
} else {
/* consumer */
diga->status[0] |= val & (1U << 2); /* copyright */
if (val & (1U << 3))
diga->status[0] |= IEC958_AES0_CON_EMPHASIS_5015;
diga->status[1] |= (val >> 4) & 0x3f; /* category */
diga->status[3] |= (val >> 12) & 0x07; /* fs */
}
}
static int snd_vt1724_spdif_default_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
unsigned int val;
val = inw(ICEMT1724(ice, SPDIF_CTRL));
decode_spdif_bits(&ucontrol->value.iec958, val);
return 0;
}
static int snd_vt1724_spdif_default_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
unsigned int val, old;
val = encode_spdif_bits(&ucontrol->value.iec958);
spin_lock_irq(&ice->reg_lock);
old = inw(ICEMT1724(ice, SPDIF_CTRL));
if (val != old)
update_spdif_bits(ice, val);
spin_unlock_irq(&ice->reg_lock);
return val != old;
}
static struct snd_kcontrol_new snd_vt1724_spdif_default __devinitdata =
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, DEFAULT),
.info = snd_vt1724_spdif_info,
.get = snd_vt1724_spdif_default_get,
.put = snd_vt1724_spdif_default_put
};
static int snd_vt1724_spdif_maskc_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
ucontrol->value.iec958.status[0] = IEC958_AES0_NONAUDIO |
IEC958_AES0_PROFESSIONAL |
IEC958_AES0_CON_NOT_COPYRIGHT |
IEC958_AES0_CON_EMPHASIS;
ucontrol->value.iec958.status[1] = IEC958_AES1_CON_ORIGINAL |
IEC958_AES1_CON_CATEGORY;
ucontrol->value.iec958.status[3] = IEC958_AES3_CON_FS;
return 0;
}
static int snd_vt1724_spdif_maskp_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
ucontrol->value.iec958.status[0] = IEC958_AES0_NONAUDIO |
IEC958_AES0_PROFESSIONAL |
IEC958_AES0_PRO_FS |
IEC958_AES0_PRO_EMPHASIS;
return 0;
}
static struct snd_kcontrol_new snd_vt1724_spdif_maskc __devinitdata =
{
.access = SNDRV_CTL_ELEM_ACCESS_READ,
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, CON_MASK),
.info = snd_vt1724_spdif_info,
.get = snd_vt1724_spdif_maskc_get,
};
static struct snd_kcontrol_new snd_vt1724_spdif_maskp __devinitdata =
{
.access = SNDRV_CTL_ELEM_ACCESS_READ,
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, PRO_MASK),
.info = snd_vt1724_spdif_info,
.get = snd_vt1724_spdif_maskp_get,
};
#define snd_vt1724_spdif_sw_info snd_ctl_boolean_mono_info
static int snd_vt1724_spdif_sw_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
ucontrol->value.integer.value[0] = inb(ICEREG1724(ice, SPDIF_CFG)) &
VT1724_CFG_SPDIF_OUT_EN ? 1 : 0;
return 0;
}
static int snd_vt1724_spdif_sw_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
unsigned char old, val;
spin_lock_irq(&ice->reg_lock);
old = val = inb(ICEREG1724(ice, SPDIF_CFG));
val &= ~VT1724_CFG_SPDIF_OUT_EN;
if (ucontrol->value.integer.value[0])
val |= VT1724_CFG_SPDIF_OUT_EN;
if (old != val)
outb(val, ICEREG1724(ice, SPDIF_CFG));
spin_unlock_irq(&ice->reg_lock);
return old != val;
}
static struct snd_kcontrol_new snd_vt1724_spdif_switch __devinitdata =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
/* FIXME: the following conflict with IEC958 Playback Route */
/* .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, SWITCH), */
.name = SNDRV_CTL_NAME_IEC958("Output ", NONE, SWITCH),
.info = snd_vt1724_spdif_sw_info,
.get = snd_vt1724_spdif_sw_get,
.put = snd_vt1724_spdif_sw_put
};
#if 0 /* NOT USED YET */
/*
* GPIO access from extern
*/
#define snd_vt1724_gpio_info snd_ctl_boolean_mono_info
int snd_vt1724_gpio_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
int shift = kcontrol->private_value & 0xff;
int invert = (kcontrol->private_value & (1<<24)) ? 1 : 0;
snd_ice1712_save_gpio_status(ice);
ucontrol->value.integer.value[0] =
(snd_ice1712_gpio_read(ice) & (1 << shift) ? 1 : 0) ^ invert;
snd_ice1712_restore_gpio_status(ice);
return 0;
}
int snd_ice1712_gpio_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
int shift = kcontrol->private_value & 0xff;
int invert = (kcontrol->private_value & (1<<24)) ? mask : 0;
unsigned int val, nval;
if (kcontrol->private_value & (1 << 31))
return -EPERM;
nval = (ucontrol->value.integer.value[0] ? (1 << shift) : 0) ^ invert;
snd_ice1712_save_gpio_status(ice);
val = snd_ice1712_gpio_read(ice);
nval |= val & ~(1 << shift);
if (val != nval)
snd_ice1712_gpio_write(ice, nval);
snd_ice1712_restore_gpio_status(ice);
return val != nval;
}
#endif /* NOT USED YET */
/*
* rate
*/
static int snd_vt1724_pro_internal_clock_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
int hw_rates_count = ice->hw_rates->count;
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
uinfo->value.enumerated.items = hw_rates_count + ice->ext_clock_count;
/* upper limit - keep at top */
if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
if (uinfo->value.enumerated.item >= hw_rates_count)
/* ext_clock items */
strcpy(uinfo->value.enumerated.name,
ice->ext_clock_names[
uinfo->value.enumerated.item - hw_rates_count]);
else
/* int clock items */
sprintf(uinfo->value.enumerated.name, "%d",
ice->hw_rates->list[uinfo->value.enumerated.item]);
return 0;
}
static int snd_vt1724_pro_internal_clock_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
unsigned int i, rate;
spin_lock_irq(&ice->reg_lock);
if (ice->is_spdif_master(ice)) {
ucontrol->value.enumerated.item[0] = ice->hw_rates->count +
ice->get_spdif_master_type(ice);
} else {
rate = ice->get_rate(ice);
ucontrol->value.enumerated.item[0] = 0;
for (i = 0; i < ice->hw_rates->count; i++) {
if (ice->hw_rates->list[i] == rate) {
ucontrol->value.enumerated.item[0] = i;
break;
}
}
}
spin_unlock_irq(&ice->reg_lock);
return 0;
}
static int stdclock_get_spdif_master_type(struct snd_ice1712 *ice)
{
/* standard external clock - only single type - SPDIF IN */
return 0;
}
/* setting clock to external - SPDIF */
static int stdclock_set_spdif_clock(struct snd_ice1712 *ice, int type)
{
unsigned char oval;
unsigned char i2s_oval;
oval = inb(ICEMT1724(ice, RATE));
outb(oval | VT1724_SPDIF_MASTER, ICEMT1724(ice, RATE));
/* setting 256fs */
i2s_oval = inb(ICEMT1724(ice, I2S_FORMAT));
outb(i2s_oval & ~VT1724_MT_I2S_MCLK_128X, ICEMT1724(ice, I2S_FORMAT));
return 0;
}
static int snd_vt1724_pro_internal_clock_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
unsigned int old_rate, new_rate;
unsigned int item = ucontrol->value.enumerated.item[0];
unsigned int first_ext_clock = ice->hw_rates->count;
if (item > first_ext_clock + ice->ext_clock_count - 1)
return -EINVAL;
/* if rate = 0 => external clock */
spin_lock_irq(&ice->reg_lock);
if (ice->is_spdif_master(ice))
old_rate = 0;
else
old_rate = ice->get_rate(ice);
if (item >= first_ext_clock) {
/* switching to external clock */
ice->set_spdif_clock(ice, item - first_ext_clock);
new_rate = 0;
} else {
/* internal on-card clock */
new_rate = ice->hw_rates->list[item];
ice->pro_rate_default = new_rate;
spin_unlock_irq(&ice->reg_lock);
snd_vt1724_set_pro_rate(ice, ice->pro_rate_default, 1);
spin_lock_irq(&ice->reg_lock);
}
spin_unlock_irq(&ice->reg_lock);
/* the first switch to the ext. clock mode? */
if (old_rate != new_rate && !new_rate) {
/* notify akm chips as well */
unsigned int i;
if (ice->gpio.set_pro_rate)
ice->gpio.set_pro_rate(ice, 0);
for (i = 0; i < ice->akm_codecs; i++) {
if (ice->akm[i].ops.set_rate_val)
ice->akm[i].ops.set_rate_val(&ice->akm[i], 0);
}
}
return old_rate != new_rate;
}
static struct snd_kcontrol_new snd_vt1724_pro_internal_clock __devinitdata = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Multi Track Internal Clock",
.info = snd_vt1724_pro_internal_clock_info,
.get = snd_vt1724_pro_internal_clock_get,
.put = snd_vt1724_pro_internal_clock_put
};
#define snd_vt1724_pro_rate_locking_info snd_ctl_boolean_mono_info
static int snd_vt1724_pro_rate_locking_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
ucontrol->value.integer.value[0] = PRO_RATE_LOCKED;
return 0;
}
static int snd_vt1724_pro_rate_locking_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
int change = 0, nval;
nval = ucontrol->value.integer.value[0] ? 1 : 0;
spin_lock_irq(&ice->reg_lock);
change = PRO_RATE_LOCKED != nval;
PRO_RATE_LOCKED = nval;
spin_unlock_irq(&ice->reg_lock);
return change;
}
static struct snd_kcontrol_new snd_vt1724_pro_rate_locking __devinitdata = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Multi Track Rate Locking",
.info = snd_vt1724_pro_rate_locking_info,
.get = snd_vt1724_pro_rate_locking_get,
.put = snd_vt1724_pro_rate_locking_put
};
#define snd_vt1724_pro_rate_reset_info snd_ctl_boolean_mono_info
static int snd_vt1724_pro_rate_reset_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
ucontrol->value.integer.value[0] = PRO_RATE_RESET ? 1 : 0;
return 0;
}
static int snd_vt1724_pro_rate_reset_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
int change = 0, nval;
nval = ucontrol->value.integer.value[0] ? 1 : 0;
spin_lock_irq(&ice->reg_lock);
change = PRO_RATE_RESET != nval;
PRO_RATE_RESET = nval;
spin_unlock_irq(&ice->reg_lock);
return change;
}
static struct snd_kcontrol_new snd_vt1724_pro_rate_reset __devinitdata = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Multi Track Rate Reset",
.info = snd_vt1724_pro_rate_reset_info,
.get = snd_vt1724_pro_rate_reset_get,
.put = snd_vt1724_pro_rate_reset_put
};
/*
* routing
*/
static int snd_vt1724_pro_route_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
static char *texts[] = {
"PCM Out", /* 0 */
"H/W In 0", "H/W In 1", /* 1-2 */
"IEC958 In L", "IEC958 In R", /* 3-4 */
};
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
uinfo->value.enumerated.items = 5;
if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
return 0;
}
static inline int analog_route_shift(int idx)
{
return (idx % 2) * 12 + ((idx / 2) * 3) + 8;
}
static inline int digital_route_shift(int idx)
{
return idx * 3;
}
int snd_ice1724_get_route_val(struct snd_ice1712 *ice, int shift)
{
unsigned long val;
unsigned char eitem;
static const unsigned char xlate[8] = {
0, 255, 1, 2, 255, 255, 3, 4,
};
val = inl(ICEMT1724(ice, ROUTE_PLAYBACK));
val >>= shift;
val &= 7; /* we now have 3 bits per output */
eitem = xlate[val];
if (eitem == 255) {
snd_BUG();
return 0;
}
return eitem;
}
int snd_ice1724_put_route_val(struct snd_ice1712 *ice, unsigned int val,
int shift)
{
unsigned int old_val, nval;
int change;
static const unsigned char xroute[8] = {
0, /* PCM */
2, /* PSDIN0 Left */
3, /* PSDIN0 Right */
6, /* SPDIN Left */
7, /* SPDIN Right */
};
nval = xroute[val % 5];
val = old_val = inl(ICEMT1724(ice, ROUTE_PLAYBACK));
val &= ~(0x07 << shift);
val |= nval << shift;
change = val != old_val;
if (change)
outl(val, ICEMT1724(ice, ROUTE_PLAYBACK));
return change;
}
static int snd_vt1724_pro_route_analog_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
ucontrol->value.enumerated.item[0] =
snd_ice1724_get_route_val(ice, analog_route_shift(idx));
return 0;
}
static int snd_vt1724_pro_route_analog_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
return snd_ice1724_put_route_val(ice,
ucontrol->value.enumerated.item[0],
analog_route_shift(idx));
}
static int snd_vt1724_pro_route_spdif_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
ucontrol->value.enumerated.item[0] =
snd_ice1724_get_route_val(ice, digital_route_shift(idx));
return 0;
}
static int snd_vt1724_pro_route_spdif_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
return snd_ice1724_put_route_val(ice,
ucontrol->value.enumerated.item[0],
digital_route_shift(idx));
}
static struct snd_kcontrol_new snd_vt1724_mixer_pro_analog_route __devinitdata =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "H/W Playback Route",
.info = snd_vt1724_pro_route_info,
.get = snd_vt1724_pro_route_analog_get,
.put = snd_vt1724_pro_route_analog_put,
};
static struct snd_kcontrol_new snd_vt1724_mixer_pro_spdif_route __devinitdata = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, NONE) "Route",
.info = snd_vt1724_pro_route_info,
.get = snd_vt1724_pro_route_spdif_get,
.put = snd_vt1724_pro_route_spdif_put,
.count = 2,
};
static int snd_vt1724_pro_peak_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 22; /* FIXME: for compatibility with ice1712... */
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 255;
return 0;
}
static int snd_vt1724_pro_peak_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
int idx;
spin_lock_irq(&ice->reg_lock);
for (idx = 0; idx < 22; idx++) {
outb(idx, ICEMT1724(ice, MONITOR_PEAKINDEX));
ucontrol->value.integer.value[idx] =
inb(ICEMT1724(ice, MONITOR_PEAKDATA));
}
spin_unlock_irq(&ice->reg_lock);
return 0;
}
static struct snd_kcontrol_new snd_vt1724_mixer_pro_peak __devinitdata = {
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = "Multi Track Peak",
.access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
.info = snd_vt1724_pro_peak_info,
.get = snd_vt1724_pro_peak_get
};
/*
*
*/
static struct snd_ice1712_card_info no_matched __devinitdata;
static struct snd_ice1712_card_info *card_tables[] __devinitdata = {
snd_vt1724_revo_cards,
snd_vt1724_amp_cards,
snd_vt1724_aureon_cards,
snd_vt1720_mobo_cards,
snd_vt1720_pontis_cards,
snd_vt1724_prodigy_hifi_cards,
snd_vt1724_prodigy192_cards,
snd_vt1724_juli_cards,
snd_vt1724_maya44_cards,
snd_vt1724_phase_cards,
snd_vt1724_wtm_cards,
snd_vt1724_se_cards,
snd_vt1724_qtet_cards,
NULL,
};
/*
*/
static void wait_i2c_busy(struct snd_ice1712 *ice)
{
int t = 0x10000;
while ((inb(ICEREG1724(ice, I2C_CTRL)) & VT1724_I2C_BUSY) && t--)
;
if (t == -1)
printk(KERN_ERR "ice1724: i2c busy timeout\n");
}
unsigned char snd_vt1724_read_i2c(struct snd_ice1712 *ice,
unsigned char dev, unsigned char addr)
{
unsigned char val;
mutex_lock(&ice->i2c_mutex);
wait_i2c_busy(ice);
outb(addr, ICEREG1724(ice, I2C_BYTE_ADDR));
outb(dev & ~VT1724_I2C_WRITE, ICEREG1724(ice, I2C_DEV_ADDR));
wait_i2c_busy(ice);
val = inb(ICEREG1724(ice, I2C_DATA));
mutex_unlock(&ice->i2c_mutex);
/*
printk(KERN_DEBUG "i2c_read: [0x%x,0x%x] = 0x%x\n", dev, addr, val);
*/
return val;
}
void snd_vt1724_write_i2c(struct snd_ice1712 *ice,
unsigned char dev, unsigned char addr, unsigned char data)
{
mutex_lock(&ice->i2c_mutex);
wait_i2c_busy(ice);
/*
printk(KERN_DEBUG "i2c_write: [0x%x,0x%x] = 0x%x\n", dev, addr, data);
*/
outb(addr, ICEREG1724(ice, I2C_BYTE_ADDR));
outb(data, ICEREG1724(ice, I2C_DATA));
outb(dev | VT1724_I2C_WRITE, ICEREG1724(ice, I2C_DEV_ADDR));
wait_i2c_busy(ice);
mutex_unlock(&ice->i2c_mutex);
}
static int __devinit snd_vt1724_read_eeprom(struct snd_ice1712 *ice,
const char *modelname)
{
const int dev = 0xa0; /* EEPROM device address */
unsigned int i, size;
struct snd_ice1712_card_info * const *tbl, *c;
if (!modelname || !*modelname) {
ice->eeprom.subvendor = 0;
if ((inb(ICEREG1724(ice, I2C_CTRL)) & VT1724_I2C_EEPROM) != 0)
ice->eeprom.subvendor =
(snd_vt1724_read_i2c(ice, dev, 0x00) << 0) |
(snd_vt1724_read_i2c(ice, dev, 0x01) << 8) |
(snd_vt1724_read_i2c(ice, dev, 0x02) << 16) |
(snd_vt1724_read_i2c(ice, dev, 0x03) << 24);
if (ice->eeprom.subvendor == 0 ||
ice->eeprom.subvendor == (unsigned int)-1) {
/* invalid subvendor from EEPROM, try the PCI
* subststem ID instead
*/
u16 vendor, device;
pci_read_config_word(ice->pci, PCI_SUBSYSTEM_VENDOR_ID,
&vendor);
pci_read_config_word(ice->pci, PCI_SUBSYSTEM_ID, &device);
ice->eeprom.subvendor =
((unsigned int)swab16(vendor) << 16) | swab16(device);
if (ice->eeprom.subvendor == 0 ||
ice->eeprom.subvendor == (unsigned int)-1) {
printk(KERN_ERR "ice1724: No valid ID is found\n");
return -ENXIO;
}
}
}
for (tbl = card_tables; *tbl; tbl++) {
for (c = *tbl; c->subvendor; c++) {
if (modelname && c->model &&
!strcmp(modelname, c->model)) {
printk(KERN_INFO "ice1724: Using board model %s\n",
c->name);
ice->eeprom.subvendor = c->subvendor;
} else if (c->subvendor != ice->eeprom.subvendor)
continue;
if (!c->eeprom_size || !c->eeprom_data)
goto found;
/* if the EEPROM is given by the driver, use it */
snd_printdd("using the defined eeprom..\n");
ice->eeprom.version = 2;
ice->eeprom.size = c->eeprom_size + 6;
memcpy(ice->eeprom.data, c->eeprom_data, c->eeprom_size);
goto read_skipped;
}
}
printk(KERN_WARNING "ice1724: No matching model found for ID 0x%x\n",
ice->eeprom.subvendor);
found:
ice->eeprom.size = snd_vt1724_read_i2c(ice, dev, 0x04);
if (ice->eeprom.size < 6)
ice->eeprom.size = 32;
else if (ice->eeprom.size > 32) {
printk(KERN_ERR "ice1724: Invalid EEPROM (size = %i)\n",
ice->eeprom.size);
return -EIO;
}
ice->eeprom.version = snd_vt1724_read_i2c(ice, dev, 0x05);
if (ice->eeprom.version != 2)
printk(KERN_WARNING "ice1724: Invalid EEPROM version %i\n",
ice->eeprom.version);
size = ice->eeprom.size - 6;
for (i = 0; i < size; i++)
ice->eeprom.data[i] = snd_vt1724_read_i2c(ice, dev, i + 6);
read_skipped:
ice->eeprom.gpiomask = eeprom_triple(ice, ICE_EEP2_GPIO_MASK);
ice->eeprom.gpiostate = eeprom_triple(ice, ICE_EEP2_GPIO_STATE);
ice->eeprom.gpiodir = eeprom_triple(ice, ICE_EEP2_GPIO_DIR);
return 0;
}
static void snd_vt1724_chip_reset(struct snd_ice1712 *ice)
{
outb(VT1724_RESET , ICEREG1724(ice, CONTROL));
inb(ICEREG1724(ice, CONTROL)); /* pci posting flush */
msleep(10);
outb(0, ICEREG1724(ice, CONTROL));
inb(ICEREG1724(ice, CONTROL)); /* pci posting flush */
msleep(10);
}
static int snd_vt1724_chip_init(struct snd_ice1712 *ice)
{
outb(ice->eeprom.data[ICE_EEP2_SYSCONF], ICEREG1724(ice, SYS_CFG));
outb(ice->eeprom.data[ICE_EEP2_ACLINK], ICEREG1724(ice, AC97_CFG));
outb(ice->eeprom.data[ICE_EEP2_I2S], ICEREG1724(ice, I2S_FEATURES));
outb(ice->eeprom.data[ICE_EEP2_SPDIF], ICEREG1724(ice, SPDIF_CFG));
ice->gpio.write_mask = ice->eeprom.gpiomask;
ice->gpio.direction = ice->eeprom.gpiodir;
snd_vt1724_set_gpio_mask(ice, ice->eeprom.gpiomask);
snd_vt1724_set_gpio_dir(ice, ice->eeprom.gpiodir);
snd_vt1724_set_gpio_data(ice, ice->eeprom.gpiostate);
outb(0, ICEREG1724(ice, POWERDOWN));
/* MPU_RX and TX irq masks are cleared later dynamically */
outb(VT1724_IRQ_MPU_RX | VT1724_IRQ_MPU_TX , ICEREG1724(ice, IRQMASK));
/* don't handle FIFO overrun/underruns (just yet),
* since they cause machine lockups
*/
outb(VT1724_MULTI_FIFO_ERR, ICEMT1724(ice, DMA_INT_MASK));
return 0;
}
static int __devinit snd_vt1724_spdif_build_controls(struct snd_ice1712 *ice)
{
int err;
struct snd_kcontrol *kctl;
if (snd_BUG_ON(!ice->pcm))
return -EIO;
if (!ice->own_routing) {
err = snd_ctl_add(ice->card,
snd_ctl_new1(&snd_vt1724_mixer_pro_spdif_route, ice));
if (err < 0)
return err;
}
err = snd_ctl_add(ice->card, snd_ctl_new1(&snd_vt1724_spdif_switch, ice));
if (err < 0)
return err;
err = snd_ctl_add(ice->card, kctl = snd_ctl_new1(&snd_vt1724_spdif_default, ice));
if (err < 0)
return err;
kctl->id.device = ice->pcm->device;
err = snd_ctl_add(ice->card, kctl = snd_ctl_new1(&snd_vt1724_spdif_maskc, ice));
if (err < 0)
return err;
kctl->id.device = ice->pcm->device;
err = snd_ctl_add(ice->card, kctl = snd_ctl_new1(&snd_vt1724_spdif_maskp, ice));
if (err < 0)
return err;
kctl->id.device = ice->pcm->device;
#if 0 /* use default only */
err = snd_ctl_add(ice->card, kctl = snd_ctl_new1(&snd_vt1724_spdif_stream, ice));
if (err < 0)
return err;
kctl->id.device = ice->pcm->device;
ice->spdif.stream_ctl = kctl;
#endif
return 0;
}
static int __devinit snd_vt1724_build_controls(struct snd_ice1712 *ice)
{
int err;
err = snd_ctl_add(ice->card, snd_ctl_new1(&snd_vt1724_eeprom, ice));
if (err < 0)
return err;
err = snd_ctl_add(ice->card, snd_ctl_new1(&snd_vt1724_pro_internal_clock, ice));
if (err < 0)
return err;
err = snd_ctl_add(ice->card, snd_ctl_new1(&snd_vt1724_pro_rate_locking, ice));
if (err < 0)
return err;
err = snd_ctl_add(ice->card, snd_ctl_new1(&snd_vt1724_pro_rate_reset, ice));
if (err < 0)
return err;
if (!ice->own_routing && ice->num_total_dacs > 0) {
struct snd_kcontrol_new tmp = snd_vt1724_mixer_pro_analog_route;
tmp.count = ice->num_total_dacs;
if (ice->vt1720 && tmp.count > 2)
tmp.count = 2;
err = snd_ctl_add(ice->card, snd_ctl_new1(&tmp, ice));
if (err < 0)
return err;
}
err = snd_ctl_add(ice->card, snd_ctl_new1(&snd_vt1724_mixer_pro_peak, ice));
if (err < 0)
return err;
return 0;
}
static int snd_vt1724_free(struct snd_ice1712 *ice)
{
if (!ice->port)
goto __hw_end;
/* mask all interrupts */
outb(0xff, ICEMT1724(ice, DMA_INT_MASK));
outb(0xff, ICEREG1724(ice, IRQMASK));
/* --- */
__hw_end:
if (ice->irq >= 0)
free_irq(ice->irq, ice);
pci_release_regions(ice->pci);
snd_ice1712_akm4xxx_free(ice);
pci_disable_device(ice->pci);
kfree(ice->spec);
kfree(ice);
return 0;
}
static int snd_vt1724_dev_free(struct snd_device *device)
{
struct snd_ice1712 *ice = device->device_data;
return snd_vt1724_free(ice);
}
static int __devinit snd_vt1724_create(struct snd_card *card,
struct pci_dev *pci,
const char *modelname,
struct snd_ice1712 **r_ice1712)
{
struct snd_ice1712 *ice;
int err;
static struct snd_device_ops ops = {
.dev_free = snd_vt1724_dev_free,
};
*r_ice1712 = NULL;
/* enable PCI device */
err = pci_enable_device(pci);
if (err < 0)
return err;
ice = kzalloc(sizeof(*ice), GFP_KERNEL);
if (ice == NULL) {
pci_disable_device(pci);
return -ENOMEM;
}
ice->vt1724 = 1;
spin_lock_init(&ice->reg_lock);
mutex_init(&ice->gpio_mutex);
mutex_init(&ice->open_mutex);
mutex_init(&ice->i2c_mutex);
ice->gpio.set_mask = snd_vt1724_set_gpio_mask;
ice->gpio.get_mask = snd_vt1724_get_gpio_mask;
ice->gpio.set_dir = snd_vt1724_set_gpio_dir;
ice->gpio.get_dir = snd_vt1724_get_gpio_dir;
ice->gpio.set_data = snd_vt1724_set_gpio_data;
ice->gpio.get_data = snd_vt1724_get_gpio_data;
ice->card = card;
ice->pci = pci;
ice->irq = -1;
pci_set_master(pci);
snd_vt1724_proc_init(ice);
synchronize_irq(pci->irq);
card->private_data = ice;
err = pci_request_regions(pci, "ICE1724");
if (err < 0) {
kfree(ice);
pci_disable_device(pci);
return err;
}
ice->port = pci_resource_start(pci, 0);
ice->profi_port = pci_resource_start(pci, 1);
if (request_irq(pci->irq, snd_vt1724_interrupt,
IRQF_SHARED, "ICE1724", ice)) {
snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq);
snd_vt1724_free(ice);
return -EIO;
}
ice->irq = pci->irq;
snd_vt1724_chip_reset(ice);
if (snd_vt1724_read_eeprom(ice, modelname) < 0) {
snd_vt1724_free(ice);
return -EIO;
}
if (snd_vt1724_chip_init(ice) < 0) {
snd_vt1724_free(ice);
return -EIO;
}
err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, ice, &ops);
if (err < 0) {
snd_vt1724_free(ice);
return err;
}
snd_card_set_dev(card, &pci->dev);
*r_ice1712 = ice;
return 0;
}
/*
*
* Registration
*
*/
static int __devinit snd_vt1724_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
struct snd_ice1712 *ice;
int pcm_dev = 0, err;
struct snd_ice1712_card_info * const *tbl, *c;
if (dev >= SNDRV_CARDS)
return -ENODEV;
if (!enable[dev]) {
dev++;
return -ENOENT;
}
err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card);
if (err < 0)
return err;
strcpy(card->driver, "ICE1724");
strcpy(card->shortname, "ICEnsemble ICE1724");
err = snd_vt1724_create(card, pci, model[dev], &ice);
if (err < 0) {
snd_card_free(card);
return err;
}
/* field init before calling chip_init */
ice->ext_clock_count = 0;
for (tbl = card_tables; *tbl; tbl++) {
for (c = *tbl; c->subvendor; c++) {
if (c->subvendor == ice->eeprom.subvendor) {
strcpy(card->shortname, c->name);
if (c->driver) /* specific driver? */
strcpy(card->driver, c->driver);
if (c->chip_init) {
err = c->chip_init(ice);
if (err < 0) {
snd_card_free(card);
return err;
}
}
goto __found;
}
}
}
c = &no_matched;
__found:
/*
* VT1724 has separate DMAs for the analog and the SPDIF streams while
* ICE1712 has only one for both (mixed up).
*
* Confusingly the analog PCM is named "professional" here because it
* was called so in ice1712 driver, and vt1724 driver is derived from
* ice1712 driver.
*/
ice->pro_rate_default = PRO_RATE_DEFAULT;
if (!ice->is_spdif_master)
ice->is_spdif_master = stdclock_is_spdif_master;
if (!ice->get_rate)
ice->get_rate = stdclock_get_rate;
if (!ice->set_rate)
ice->set_rate = stdclock_set_rate;
if (!ice->set_mclk)
ice->set_mclk = stdclock_set_mclk;
if (!ice->set_spdif_clock)
ice->set_spdif_clock = stdclock_set_spdif_clock;
if (!ice->get_spdif_master_type)
ice->get_spdif_master_type = stdclock_get_spdif_master_type;
if (!ice->ext_clock_names)
ice->ext_clock_names = ext_clock_names;
if (!ice->ext_clock_count)
ice->ext_clock_count = ARRAY_SIZE(ext_clock_names);
if (!ice->hw_rates)
set_std_hw_rates(ice);
err = snd_vt1724_pcm_profi(ice, pcm_dev++);
if (err < 0) {
snd_card_free(card);
return err;
}
err = snd_vt1724_pcm_spdif(ice, pcm_dev++);
if (err < 0) {
snd_card_free(card);
return err;
}
err = snd_vt1724_pcm_indep(ice, pcm_dev++);
if (err < 0) {
snd_card_free(card);
return err;
}
err = snd_vt1724_ac97_mixer(ice);
if (err < 0) {
snd_card_free(card);
return err;
}
err = snd_vt1724_build_controls(ice);
if (err < 0) {
snd_card_free(card);
return err;
}
if (ice->pcm && ice->has_spdif) { /* has SPDIF I/O */
err = snd_vt1724_spdif_build_controls(ice);
if (err < 0) {
snd_card_free(card);
return err;
}
}
if (c->build_controls) {
err = c->build_controls(ice);
if (err < 0) {
snd_card_free(card);
return err;
}
}
if (!c->no_mpu401) {
if (ice->eeprom.data[ICE_EEP2_SYSCONF] & VT1724_CFG_MPU401) {
struct snd_rawmidi *rmidi;
err = snd_rawmidi_new(card, "MIDI", 0, 1, 1, &rmidi);
if (err < 0) {
snd_card_free(card);
return err;
}
ice->rmidi[0] = rmidi;
rmidi->private_data = ice;
strcpy(rmidi->name, "ICE1724 MIDI");
rmidi->info_flags = SNDRV_RAWMIDI_INFO_OUTPUT |
SNDRV_RAWMIDI_INFO_INPUT |
SNDRV_RAWMIDI_INFO_DUPLEX;
snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT,
&vt1724_midi_output_ops);
snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT,
&vt1724_midi_input_ops);
/* set watermarks */
outb(VT1724_MPU_RX_FIFO | 0x1,
ICEREG1724(ice, MPU_FIFO_WM));
outb(0x1, ICEREG1724(ice, MPU_FIFO_WM));
/* set UART mode */
outb(VT1724_MPU_UART, ICEREG1724(ice, MPU_CTRL));
}
}
sprintf(card->longname, "%s at 0x%lx, irq %i",
card->shortname, ice->port, ice->irq);
err = snd_card_register(card);
if (err < 0) {
snd_card_free(card);
return err;
}
pci_set_drvdata(pci, card);
dev++;
return 0;
}
static void __devexit snd_vt1724_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
}
#ifdef CONFIG_PM
static int snd_vt1724_suspend(struct pci_dev *pci, pm_message_t state)
{
struct snd_card *card = pci_get_drvdata(pci);
struct snd_ice1712 *ice = card->private_data;
if (!ice->pm_suspend_enabled)
return 0;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
snd_pcm_suspend_all(ice->pcm);
snd_pcm_suspend_all(ice->pcm_pro);
snd_pcm_suspend_all(ice->pcm_ds);
snd_ac97_suspend(ice->ac97);
spin_lock_irq(&ice->reg_lock);
ice->pm_saved_is_spdif_master = ice->is_spdif_master(ice);
ice->pm_saved_spdif_ctrl = inw(ICEMT1724(ice, SPDIF_CTRL));
ice->pm_saved_spdif_cfg = inb(ICEREG1724(ice, SPDIF_CFG));
ice->pm_saved_route = inl(ICEMT1724(ice, ROUTE_PLAYBACK));
spin_unlock_irq(&ice->reg_lock);
if (ice->pm_suspend)
ice->pm_suspend(ice);
pci_disable_device(pci);
pci_save_state(pci);
pci_set_power_state(pci, pci_choose_state(pci, state));
return 0;
}
static int snd_vt1724_resume(struct pci_dev *pci)
{
struct snd_card *card = pci_get_drvdata(pci);
struct snd_ice1712 *ice = card->private_data;
if (!ice->pm_suspend_enabled)
return 0;
pci_set_power_state(pci, PCI_D0);
pci_restore_state(pci);
if (pci_enable_device(pci) < 0) {
snd_card_disconnect(card);
return -EIO;
}
pci_set_master(pci);
snd_vt1724_chip_reset(ice);
if (snd_vt1724_chip_init(ice) < 0) {
snd_card_disconnect(card);
return -EIO;
}
if (ice->pm_resume)
ice->pm_resume(ice);
if (ice->pm_saved_is_spdif_master) {
/* switching to external clock via SPDIF */
ice->set_spdif_clock(ice, 0);
} else {
/* internal on-card clock */
snd_vt1724_set_pro_rate(ice, ice->pro_rate_default, 1);
}
update_spdif_bits(ice, ice->pm_saved_spdif_ctrl);
outb(ice->pm_saved_spdif_cfg, ICEREG1724(ice, SPDIF_CFG));
outl(ice->pm_saved_route, ICEMT1724(ice, ROUTE_PLAYBACK));
if (ice->ac97)
snd_ac97_resume(ice->ac97);
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
#endif
static struct pci_driver driver = {
.name = "ICE1724",
.id_table = snd_vt1724_ids,
.probe = snd_vt1724_probe,
.remove = __devexit_p(snd_vt1724_remove),
#ifdef CONFIG_PM
.suspend = snd_vt1724_suspend,
.resume = snd_vt1724_resume,
#endif
};
static int __init alsa_card_ice1724_init(void)
{
return pci_register_driver(&driver);
}
static void __exit alsa_card_ice1724_exit(void)
{
pci_unregister_driver(&driver);
}
module_init(alsa_card_ice1724_init)
module_exit(alsa_card_ice1724_exit)
| gpl-2.0 |
mautz-kernel/SFOS-hammerhead-custom-kernel | arch/tile/lib/cacheflush.c | 4614 | 5545 | /*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#include <asm/page.h>
#include <asm/cacheflush.h>
#include <arch/icache.h>
#include <arch/spr_def.h>
void __flush_icache_range(unsigned long start, unsigned long end)
{
invalidate_icache((const void *)start, end - start, PAGE_SIZE);
}
/* Force a load instruction to issue. */
static inline void force_load(char *p)
{
*(volatile char *)p;
}
/*
* Flush and invalidate a VA range that is homed remotely on a single
* core (if "!hfh") or homed via hash-for-home (if "hfh"), waiting
* until the memory controller holds the flushed values.
*/
void finv_buffer_remote(void *buffer, size_t size, int hfh)
{
char *p, *base;
size_t step_size, load_count;
/*
* On TILEPro the striping granularity is a fixed 8KB; on
* TILE-Gx it is configurable, and we rely on the fact that
* the hypervisor always configures maximum striping, so that
* bits 9 and 10 of the PA are part of the stripe function, so
* every 512 bytes we hit a striping boundary.
*
*/
#ifdef __tilegx__
const unsigned long STRIPE_WIDTH = 512;
#else
const unsigned long STRIPE_WIDTH = 8192;
#endif
#ifdef __tilegx__
/*
* On TILE-Gx, we must disable the dstream prefetcher before doing
* a cache flush; otherwise, we could end up with data in the cache
* that we don't want there. Note that normally we'd do an mf
* after the SPR write to disabling the prefetcher, but we do one
* below, before any further loads, so there's no need to do it
* here.
*/
uint_reg_t old_dstream_pf = __insn_mfspr(SPR_DSTREAM_PF);
__insn_mtspr(SPR_DSTREAM_PF, 0);
#endif
/*
* Flush and invalidate the buffer out of the local L1/L2
* and request the home cache to flush and invalidate as well.
*/
__finv_buffer(buffer, size);
/*
* Wait for the home cache to acknowledge that it has processed
* all the flush-and-invalidate requests. This does not mean
* that the flushed data has reached the memory controller yet,
* but it does mean the home cache is processing the flushes.
*/
__insn_mf();
/*
* Issue a load to the last cache line, which can't complete
* until all the previously-issued flushes to the same memory
* controller have also completed. If we weren't striping
* memory, that one load would be sufficient, but since we may
* be, we also need to back up to the last load issued to
* another memory controller, which would be the point where
* we crossed a "striping" boundary (the granularity of striping
* across memory controllers). Keep backing up and doing this
* until we are before the beginning of the buffer, or have
* hit all the controllers.
*
* If we are flushing a hash-for-home buffer, it's even worse.
* Each line may be homed on a different tile, and each tile
* may have up to four lines that are on different
* controllers. So as we walk backwards, we have to touch
* enough cache lines to satisfy these constraints. In
* practice this ends up being close enough to "load from
* every cache line on a full memory stripe on each
* controller" that we simply do that, to simplify the logic.
*
* On TILE-Gx the hash-for-home function is much more complex,
* with the upshot being we can't readily guarantee we have
* hit both entries in the 128-entry AMT that were hit by any
* load in the entire range, so we just re-load them all.
* With larger buffers, we may want to consider using a hypervisor
* trap to issue loads directly to each hash-for-home tile for
* each controller (doing it from Linux would trash the TLB).
*/
if (hfh) {
step_size = L2_CACHE_BYTES;
#ifdef __tilegx__
load_count = (size + L2_CACHE_BYTES - 1) / L2_CACHE_BYTES;
#else
load_count = (STRIPE_WIDTH / L2_CACHE_BYTES) *
(1 << CHIP_LOG_NUM_MSHIMS());
#endif
} else {
step_size = STRIPE_WIDTH;
load_count = (1 << CHIP_LOG_NUM_MSHIMS());
}
/* Load the last byte of the buffer. */
p = (char *)buffer + size - 1;
force_load(p);
/* Bump down to the end of the previous stripe or cache line. */
p -= step_size;
p = (char *)((unsigned long)p | (step_size - 1));
/* Figure out how far back we need to go. */
base = p - (step_size * (load_count - 2));
if ((unsigned long)base < (unsigned long)buffer)
base = buffer;
/*
* Fire all the loads we need. The MAF only has eight entries
* so we can have at most eight outstanding loads, so we
* unroll by that amount.
*/
#pragma unroll 8
for (; p >= base; p -= step_size)
force_load(p);
/*
* Repeat, but with inv's instead of loads, to get rid of the
* data we just loaded into our own cache and the old home L3.
* No need to unroll since inv's don't target a register.
*/
p = (char *)buffer + size - 1;
__insn_inv(p);
p -= step_size;
p = (char *)((unsigned long)p | (step_size - 1));
for (; p >= base; p -= step_size)
__insn_inv(p);
/* Wait for the load+inv's (and thus finvs) to have completed. */
__insn_mf();
#ifdef __tilegx__
/* Reenable the prefetcher. */
__insn_mtspr(SPR_DSTREAM_PF, old_dstream_pf);
#endif
}
| gpl-2.0 |
daedae1112/kernel | drivers/ide/ide-pnp.c | 4614 | 2678 | /*
* This file provides autodetection for ISA PnP IDE interfaces.
* It was tested with "ESS ES1868 Plug and Play AudioDrive" IDE interface.
*
* Copyright (C) 2000 Andrey Panin <pazke@donpac.ru>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* You should have received a copy of the GNU General Public License
* (for example /usr/src/linux/COPYING); if not, write to the Free
* Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/pnp.h>
#include <linux/ide.h>
#define DRV_NAME "ide-pnp"
/* Add your devices here :)) */
static struct pnp_device_id idepnp_devices[] = {
/* Generic ESDI/IDE/ATA compatible hard disk controller */
{.id = "PNP0600", .driver_data = 0},
{.id = ""}
};
static const struct ide_port_info ide_pnp_port_info = {
.host_flags = IDE_HFLAG_NO_DMA,
.chipset = ide_generic,
};
static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
{
struct ide_host *host;
unsigned long base, ctl;
int rc;
struct ide_hw hw, *hws[] = { &hw };
printk(KERN_INFO DRV_NAME ": generic PnP IDE interface\n");
if (!(pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && pnp_irq_valid(dev, 0)))
return -1;
base = pnp_port_start(dev, 0);
ctl = pnp_port_start(dev, 1);
if (!request_region(base, 8, DRV_NAME)) {
printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
DRV_NAME, base, base + 7);
return -EBUSY;
}
if (!request_region(ctl, 1, DRV_NAME)) {
printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
DRV_NAME, ctl);
release_region(base, 8);
return -EBUSY;
}
memset(&hw, 0, sizeof(hw));
ide_std_init_ports(&hw, base, ctl);
hw.irq = pnp_irq(dev, 0);
rc = ide_host_add(&ide_pnp_port_info, hws, 1, &host);
if (rc)
goto out;
pnp_set_drvdata(dev, host);
return 0;
out:
release_region(ctl, 1);
release_region(base, 8);
return rc;
}
static void idepnp_remove(struct pnp_dev *dev)
{
struct ide_host *host = pnp_get_drvdata(dev);
ide_host_remove(host);
release_region(pnp_port_start(dev, 1), 1);
release_region(pnp_port_start(dev, 0), 8);
}
static struct pnp_driver idepnp_driver = {
.name = "ide",
.id_table = idepnp_devices,
.probe = idepnp_probe,
.remove = idepnp_remove,
};
static int __init pnpide_init(void)
{
return pnp_register_driver(&idepnp_driver);
}
static void __exit pnpide_exit(void)
{
pnp_unregister_driver(&idepnp_driver);
}
module_init(pnpide_init);
module_exit(pnpide_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
ibazzi/rk3288-kernel | drivers/media/video/rk_camsys/ext_flashled_drv/leds-rt8547.c | 7 | 23156 | /*
* drivers/leds/leds-rt8547.c
* Driver for Richtek RT8547 LED Flash IC
*
* Copyright (C) 2014 Richtek Technology Corp.
* cy_huang <cy_huang@richtek.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/of_gpio.h>
#ifdef CONFIG_OF
#include <linux/of.h>
#endif /* #ifdef CONFIG_OF */
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#endif /* #ifdef CONFIG_DEBUG_FS */
#include "rtfled.h"
#include "leds-rt8547.h"
struct rt8547_chip {
rt_fled_info_t base;
struct device *dev;
struct rt8547_platform_data *pdata;
spinlock_t io_lock;
unsigned char suspend:1;
int in_use_mode;
#ifdef CONFIG_DEBUG_FS
struct flashlight_device *fled_dev;
unsigned char reg_addr;
unsigned char reg_data;
#endif /* #ifdef CONFIG_DEBUG_FS */
};
#ifdef CONFIG_DEBUG_FS
struct rt_debug_st {
void *info;
int id;
};
enum {
RT8547_DBG_REG,
RT8547_DBG_DATA,
RT8547_DBG_REGS,
RT8547_DBG_FLED,
RT8547_DBG_MAX
};
static struct dentry *debugfs_rt_dent;
static struct dentry *debugfs_file[RT8547_DBG_MAX];
static struct rt_debug_st rtdbg_data[RT8547_DBG_MAX];
#endif /* #ifdef CONFIG_DEBUG_FS */
static unsigned char rt8547_reg_initval[] = {
0x06, /* REG 0x01 */
0x12, /* REG 0x02 */
0x02, /* REG 0x03 */
0x0F, /* REG 0x04 */
};
static inline int rt8547_send_bit(struct rt8547_platform_data *pdata,
unsigned char bit)
{
if (bit) {
gpio_set_value(pdata->flset_gpio, (~(pdata->flset_active) & 0x1));
udelay(RT8547_SHORT_DELAY);
gpio_set_value(pdata->flset_gpio, ((pdata->flset_active) & 0x1));
udelay(RT8547_LONG_DELAY);
} else {
gpio_set_value(pdata->flset_gpio, (~(pdata->flset_active) & 0x1));
udelay(RT8547_LONG_DELAY);
gpio_set_value(pdata->flset_gpio, ((pdata->flset_active) & 0x1));
udelay(RT8547_SHORT_DELAY);
}
return 0;
}
static inline int rt8547_send_byte(struct rt8547_platform_data *pdata,
unsigned char byte)
{
int i;
/*Send order is high bit to low bit */
for (i = 7; i >= 0; i--)
rt8547_send_bit(pdata, byte & (0x1 << i));
return 0;
}
static inline int rt8547_send_special_byte(struct rt8547_platform_data *pdata,
unsigned char byte)
{
int i;
/*Only send three bit for register address */
for (i = 2; i >= 0; i--)
rt8547_send_bit(pdata, byte & (0x1 << i));
return 0;
}
static inline int rt8547_start_xfer(struct rt8547_platform_data *pdata)
{
gpio_set_value(pdata->flset_gpio, ((pdata->flset_active) & 0x1));
udelay(RT8547_START_DELAY);
return 0;
}
static inline int rt8547_stop_xfer(struct rt8547_platform_data *pdata)
{
/*Redundant one bit as the stop condition */
rt8547_send_bit(pdata, 1);
return 0;
}
static int rt8547_send_data(struct rt8547_chip *chip, unsigned char reg,
unsigned char data)
{
struct rt8547_platform_data *pdata = chip->pdata;
unsigned long flags;
unsigned char xfer_data[3]; /*0: adddr, 1: reg, 2: reg data*/
xfer_data[0] = RT8547_ONEWIRE_ADDR;
xfer_data[1] = reg;
xfer_data[2] = data;
RT_DBG("rt8547-> 0: 0x%02x, 1: 0x%02x, 2: 0x%02x\n", xfer_data[0],
xfer_data[1], xfer_data[2]);
spin_lock_irqsave(&chip->io_lock, flags);
rt8547_start_xfer(pdata);
rt8547_send_byte(pdata, xfer_data[0]);
rt8547_send_special_byte(pdata, xfer_data[1]);
rt8547_send_byte(pdata, xfer_data[2]);
rt8547_stop_xfer(pdata);
spin_unlock_irqrestore(&chip->io_lock, flags);
/*write back to reg array*/
rt8547_reg_initval[reg - 1] = data;
return 0;
}
#ifdef CONFIG_DEBUG_FS
static int reg_debug_open(struct inode *inode, struct file *file)
{
file->private_data = inode->i_private;
return 0;
}
static int get_parameters(char *buf, long int *param1, int num_of_par)
{
char *token;
int base, cnt;
token = strsep(&buf, " ");
for (cnt = 0; cnt < num_of_par; cnt++) {
if (token != NULL) {
if ((token[1] == 'x') || (token[1] == 'X'))
base = 16;
else
base = 10;
if (kstrtoul(token, base, ¶m1[cnt]) != 0)
return -EINVAL;
token = strsep(&buf, " ");
} else
return -EINVAL;
}
return 0;
}
static ssize_t reg_debug_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *ppos)
{
struct rt_debug_st *st = filp->private_data;
struct rt8547_chip *di = st->info;
char lbuf[1000];
int i = 0, j = 0;
lbuf[0] = '\0';
switch (st->id) {
case RT8547_DBG_REG:
snprintf(lbuf, sizeof(lbuf), "0x%x\n", di->reg_addr);
break;
case RT8547_DBG_DATA:
di->reg_data = rt8547_reg_initval[di->reg_addr - 1];
snprintf(lbuf, sizeof(lbuf), "0x%x\n", di->reg_data);
break;
case RT8547_DBG_REGS:
for (i = RT8547_FLED_REG0; i < RT8547_FLED_REGMAX; i++)
j += snprintf(lbuf + j, 20, "0x%02x:%02x\n", i,
rt8547_reg_initval[i - 1]);
break;
case RT8547_DBG_FLED:
snprintf(lbuf, sizeof(lbuf), "%d\n", di->in_use_mode);
break;
default:
return -EINVAL;
}
return simple_read_from_buffer(ubuf, count, ppos, lbuf, strlen(lbuf));
}
static ssize_t reg_debug_write(struct file *filp,
const char __user *ubuf, size_t cnt,
loff_t *ppos)
{
struct rt_debug_st *st = filp->private_data;
struct rt8547_chip *di = st->info;
char lbuf[32];
int rc;
long int param[5];
if (cnt > sizeof(lbuf) - 1)
return -EINVAL;
rc = copy_from_user(lbuf, ubuf, cnt);
if (rc)
return -EFAULT;
lbuf[cnt] = '\0';
switch (st->id) {
case RT8547_DBG_REG:
rc = get_parameters(lbuf, param, 1);
if ((param[0] < RT8547_FLED_REGMAX) && (rc == 0)) {
if ((param[0] >= RT8547_FLED_REG0
&& param[0] <= RT8547_FLED_REG3))
di->reg_addr = (unsigned char)param[0];
else
rc = -EINVAL;
} else
rc = -EINVAL;
break;
case RT8547_DBG_DATA:
rc = get_parameters(lbuf, param, 1);
if ((param[0] <= 0xff) && (rc == 0)) {
rt8547_send_data(di, di->reg_addr,
(unsigned char)param[0]);
} else
rc = -EINVAL;
break;
case RT8547_DBG_FLED:
if (!di->fled_dev)
di->fled_dev = find_flashlight_by_name("rt-flash-led");
rc = get_parameters(lbuf, param, 1);
if ((param[0] <= FLASHLIGHT_MODE_FLASH) && (rc == 0)
&& di->fled_dev) {
switch (param[0]) {
case FLASHLIGHT_MODE_TORCH:
flashlight_set_torch_brightness(di->fled_dev,
2);
flashlight_set_mode(di->fled_dev,
FLASHLIGHT_MODE_TORCH);
break;
case FLASHLIGHT_MODE_FLASH:
flashlight_set_strobe_timeout(di->fled_dev,
256, 256);
flashlight_set_strobe_brightness(di->fled_dev,
18);
flashlight_set_mode(di->fled_dev,
FLASHLIGHT_MODE_FLASH);
flashlight_strobe(di->fled_dev);
break;
case FLASHLIGHT_MODE_OFF:
flashlight_set_mode(di->fled_dev,
FLASHLIGHT_MODE_OFF);
break;
}
} else
rc = -EINVAL;
break;
default:
return -EINVAL;
}
if (rc == 0)
rc = cnt;
return rc;
}
static const struct file_operations reg_debug_ops = {
.open = reg_debug_open,
.write = reg_debug_write,
.read = reg_debug_read
};
static void rt8547_create_debugfs(struct rt8547_chip *chip)
{
RT_DBG("add debugfs for RT8547\n");
debugfs_rt_dent = debugfs_create_dir("rt8547_dbg", 0);
if (!IS_ERR(debugfs_rt_dent)) {
rtdbg_data[0].info = chip;
rtdbg_data[0].id = RT8547_DBG_REG;
debugfs_file[0] = debugfs_create_file("reg",
S_IFREG | S_IRUGO,
debugfs_rt_dent,
(void *)&rtdbg_data[0],
®_debug_ops);
rtdbg_data[1].info = chip;
rtdbg_data[1].id = RT8547_DBG_DATA;
debugfs_file[1] = debugfs_create_file("data",
S_IFREG | S_IRUGO,
debugfs_rt_dent,
(void *)&rtdbg_data[1],
®_debug_ops);
rtdbg_data[2].info = chip;
rtdbg_data[2].id = RT8547_DBG_REGS;
debugfs_file[2] = debugfs_create_file("regs",
S_IFREG | S_IRUGO,
debugfs_rt_dent,
(void *)&rtdbg_data[2],
®_debug_ops);
rtdbg_data[3].info = chip;
rtdbg_data[3].id = RT8547_DBG_FLED;
debugfs_file[3] = debugfs_create_file("fled",
S_IFREG | S_IRUGO,
debugfs_rt_dent,
(void *)&rtdbg_data[3],
®_debug_ops);
} else {
dev_err(chip->dev, "create debugfs failed\n");
}
}
static void rt8547_remove_debugfs(void)
{
if (!IS_ERR(debugfs_rt_dent))
debugfs_remove_recursive(debugfs_rt_dent);
}
#endif /* #ifdef CONFIG_DEBUG_FS */
static inline void rt8547_fled_power_on(struct rt8547_platform_data *pdata)
{
if (gpio_is_valid(pdata->flset_gpio))
gpio_set_value(pdata->flset_gpio, ((pdata->flset_active) & 0x1));
}
static inline void rt8547_fled_power_off(struct rt8547_platform_data *pdata)
{
if (gpio_is_valid(pdata->flset_gpio))
gpio_set_value(pdata->flset_gpio, (~(pdata->flset_active) & 0x1));
udelay(RT8547_STOP_DELAY);
}
static inline void rt8547_fled_ctrl_en(struct rt8547_platform_data *pdata,
int en)
{
if (gpio_is_valid(pdata->ctl_gpio)){
if (en)
gpio_set_value(pdata->ctl_gpio, ((pdata->ctl_active) & 0x1));
else
gpio_set_value(pdata->ctl_gpio, (~(pdata->ctl_active) & 0x1));
}
RT_DBG("en %d\n", en);
}
static inline void rt8547_fled_flash_en(struct rt8547_platform_data *pdata,
int en)
{
if (gpio_is_valid(pdata->flen_gpio)){
if (en)
gpio_set_value(pdata->flen_gpio, ((pdata->flen_active) & 0x1));
else
gpio_set_value(pdata->flen_gpio, (~(pdata->flen_active) & 0x1));
}
RT_DBG("en %d\n", en);
}
static int rt8547_fled_init(struct rt_fled_info *info)
{
RT_DBG("\n");
return 0;
}
static int rt8547_fled_resume(struct rt_fled_info *info)
{
struct rt8547_chip *fi = (struct rt8547_chip *)info;
RT_DBG("\n");
fi->suspend = 0;
return 0;
}
static int rt8547_fled_suspend(struct rt_fled_info *info, pm_message_t state)
{
struct rt8547_chip *fi = (struct rt8547_chip *)info;
RT_DBG("\n");
fi->suspend = 1;
return 0;
}
static int rt8547_fled_set_mode(struct rt_fled_info *info,
flashlight_mode_t mode)
{
struct rt8547_chip *fi = (struct rt8547_chip *)info;
unsigned char tmp = 0;
int ret = 0;
RT_DBG("mode=%d\n", mode);
switch (mode) {
case FLASHLIGHT_MODE_TORCH:
if (fi->in_use_mode == FLASHLIGHT_MODE_OFF)
rt8547_fled_power_on(fi->pdata);
tmp = rt8547_reg_initval[RT8547_FLED_REG2 - 1];
tmp |= RT8547_MODESEL_MASK;
rt8547_send_data(fi, RT8547_FLED_REG2, tmp);
rt8547_fled_ctrl_en(fi->pdata, 1);
rt8547_fled_flash_en(fi->pdata, 1);
fi->in_use_mode = mode;
break;
case FLASHLIGHT_MODE_FLASH:
if (fi->in_use_mode == FLASHLIGHT_MODE_OFF)
rt8547_fled_power_on(fi->pdata);
tmp = rt8547_reg_initval[RT8547_FLED_REG2 - 1];
tmp &= ~RT8547_MODESEL_MASK;
rt8547_send_data(fi, RT8547_FLED_REG2, tmp);
fi->in_use_mode = mode;
break;
case FLASHLIGHT_MODE_OFF:
rt8547_fled_flash_en(fi->pdata, 0);
rt8547_fled_ctrl_en(fi->pdata, 0);
if (fi->in_use_mode != FLASHLIGHT_MODE_OFF)
rt8547_fled_power_off(fi->pdata);
fi->in_use_mode = mode;
break;
case FLASHLIGHT_MODE_MIXED:
default:
ret = -EINVAL;
}
return 0;
}
static int rt8547_fled_get_mode(struct rt_fled_info *info)
{
struct rt8547_chip *fi = (struct rt8547_chip *)info;
RT_DBG("\n");
return fi->in_use_mode;
}
static int rt8547_fled_strobe(struct rt_fled_info *info)
{
struct rt8547_chip *fi = (struct rt8547_chip *)info;
RT_DBG("\n");
rt8547_fled_flash_en(fi->pdata, 0);
rt8547_fled_ctrl_en(fi->pdata, 0);
rt8547_fled_ctrl_en(fi->pdata, 1);
rt8547_fled_flash_en(fi->pdata, 1);
return 0;
}
static int rt8547_fled_torch_current_list(struct rt_fled_info *info,
int selector)
{
RT_DBG("selector=%d\n", selector);
return 25000 + selector * 25000; /* unit: uA */
}
static int rt8547_fled_strobe_current_list(struct rt_fled_info *info,
int selector)
{
RT_DBG("selector=%d\n", selector);
return 100000 + selector * 50000; /* unit: uA */
}
static int rt8547_fled_timeout_level_list(struct rt_fled_info *info,
int selector)
{
RT_DBG("selector=%d\n", selector);
return 100000 + selector * 50000; /* unit: uA */
}
static int rt8547_fled_lv_protection_list(struct rt_fled_info *info,
int selector)
{
RT_DBG("selector=%d\n", selector);
return 3000 + selector * 100; /* unit: mV */
}
static int rt8547_fled_strobe_timeout_list(struct rt_fled_info *info,
int selector)
{
RT_DBG("selector=%d\n", selector);
return 64 + selector * 32; /* unit: mS */
}
static int rt8547_fled_set_torch_current_sel(struct rt_fled_info *info,
int selector)
{
struct rt8547_chip *fi = (struct rt8547_chip *)info;
unsigned char tmp = 0;
RT_DBG("selector=%d\n", selector);
tmp = rt8547_reg_initval[RT8547_FLED_REG2 - 1];
tmp &= ~RT8547_TCLEVEL_MASK;
tmp |= selector;
rt8547_send_data(fi, RT8547_FLED_REG2, tmp);
return 0;
}
static int rt8547_fled_set_strobe_current_sel(struct rt_fled_info *info,
int selector)
{
struct rt8547_chip *fi = (struct rt8547_chip *)info;
unsigned char tmp = 0;
RT_DBG("selector=%d\n", selector);
tmp = rt8547_reg_initval[RT8547_FLED_REG1 - 1];
tmp &= ~RT8547_SCLEVEL_MASK;
tmp |= selector;
rt8547_send_data(fi, RT8547_FLED_REG1, tmp);
return 0;
}
static int rt8547_fled_set_timeout_level_sel(struct rt_fled_info *info,
int selector)
{
struct rt8547_chip *fi = (struct rt8547_chip *)info;
unsigned char tmp = 0;
RT_DBG("selector=%d\n", selector);
if (selector > RT8547_TOL_MAX)
return -EINVAL;
tmp = rt8547_reg_initval[RT8547_FLED_REG1 - 1];
tmp &= ~RT8547_TOCLEVEL_MASK;
tmp |= (selector << RT8547_TOCLEVEL_SHFT);
rt8547_send_data(fi, RT8547_FLED_REG1, tmp);
return 0;
}
static int rt8547_fled_set_lv_protection_sel(struct rt_fled_info *info,
int selector)
{
struct rt8547_chip *fi = (struct rt8547_chip *)info;
unsigned char tmp = 0;
RT_DBG("selector=%d\n", selector);
if (selector > RT8547_LVP_MAX)
return -EINVAL;
tmp = rt8547_reg_initval[RT8547_FLED_REG0 - 1];
tmp &= ~RT8547_LVP_MASK;
tmp |= selector;
rt8547_send_data(fi, RT8547_FLED_REG0, tmp);
return 0;
}
static int rt8547_fled_set_strobe_timeout_sel(struct rt_fled_info *info,
int selector)
{
struct rt8547_chip *fi = (struct rt8547_chip *)info;
unsigned char tmp = 0;
RT_DBG("selector=%d\n", selector);
if (selector > RT8547_STO_MAX)
return -EINVAL;
tmp = rt8547_reg_initval[RT8547_FLED_REG3 - 1];
tmp &= ~RT8547_STO_MASK;
tmp |= selector;
rt8547_send_data(fi, RT8547_FLED_REG3, tmp);
return 0;
}
static int rt8547_fled_get_torch_current_sel(struct rt_fled_info *info)
{
int selector =
rt8547_reg_initval[RT8547_FLED_REG2 - 1] & RT8547_TCLEVEL_MASK;
return selector;
}
static int rt8547_fled_get_strobe_current_sel(struct rt_fled_info *info)
{
int selector =
rt8547_reg_initval[RT8547_FLED_REG1 - 1] & RT8547_SCLEVEL_MASK;
return selector;
}
static int rt8547_fled_get_timeout_level_sel(struct rt_fled_info *info)
{
int selector =
rt8547_reg_initval[RT8547_FLED_REG1 - 1] & RT8547_TOCLEVEL_MASK;
selector >>= RT8547_TOCLEVEL_SHFT;
return selector;
}
static int rt8547_fled_get_lv_protection_sel(struct rt_fled_info *info)
{
int selector =
rt8547_reg_initval[RT8547_FLED_REG0 - 1] & RT8547_LVP_MASK;
return selector;
}
static int rt8547_fled_get_strobe_timeout_sel(struct rt_fled_info *info)
{
int selector =
rt8547_reg_initval[RT8547_FLED_REG3 - 1] & RT8547_STO_MASK;
return selector;
}
static struct rt_fled_hal rt8547_fled_hal = {
.fled_init = rt8547_fled_init,
.fled_suspend = rt8547_fled_suspend,
.fled_resume = rt8547_fled_resume,
.fled_set_mode = rt8547_fled_set_mode,
.fled_get_mode = rt8547_fled_get_mode,
.fled_strobe = rt8547_fled_strobe,
.fled_torch_current_list = rt8547_fled_torch_current_list,
.fled_strobe_current_list = rt8547_fled_strobe_current_list,
.fled_timeout_level_list = rt8547_fled_timeout_level_list,
.fled_lv_protection_list = rt8547_fled_lv_protection_list,
.fled_strobe_timeout_list = rt8547_fled_strobe_timeout_list,
/* method to set */
.fled_set_torch_current_sel = rt8547_fled_set_torch_current_sel,
.fled_set_strobe_current_sel = rt8547_fled_set_strobe_current_sel,
.fled_set_timeout_level_sel = rt8547_fled_set_timeout_level_sel,
.fled_set_lv_protection_sel = rt8547_fled_set_lv_protection_sel,
.fled_set_strobe_timeout_sel = rt8547_fled_set_strobe_timeout_sel,
/* method to get */
.fled_get_torch_current_sel = rt8547_fled_get_torch_current_sel,
.fled_get_strobe_current_sel = rt8547_fled_get_strobe_current_sel,
.fled_get_timeout_level_sel = rt8547_fled_get_timeout_level_sel,
.fled_get_lv_protection_sel = rt8547_fled_get_lv_protection_sel,
.fled_get_strobe_timeout_sel = rt8547_fled_get_strobe_timeout_sel,
};
static struct flashlight_properties rt8547_fled_props = {
.type = FLASHLIGHT_TYPE_LED,
.torch_brightness = 2,
.torch_max_brightness = 15,
.strobe_brightness = 18,
.strobe_max_brightness = 30,
.strobe_delay = 2,
.strobe_timeout = 544,
.alias_name = "rt8547-fled",
};
static void rt8547_parse_dt(struct rt8547_platform_data *pdata,
struct device *dev)
{
#ifdef CONFIG_OF
struct device_node *np = dev->of_node;
u32 tmp;
if (of_property_read_u32(np, "rt,def_lvp", &tmp) < 0) {
dev_warn(dev, "use 3V as the default lvp\n");
} else {
if (tmp > RT8547_LVP_MAX)
tmp = RT8547_LVP_MAX;
rt8547_reg_initval[RT8547_FLED_REG0 - 1] &= ~RT8547_LVP_MASK;
rt8547_reg_initval[RT8547_FLED_REG0 - 1] |= tmp;
}
if (of_property_read_u32(np, "rt,def_tol", &tmp) < 0) {
dev_warn(dev, "use 100mA as the default timeout level\n");
} else {
if (tmp > RT8547_TOL_MAX)
tmp = RT8547_TOL_MAX;
tmp <<= RT8547_TOCLEVEL_SHFT;
rt8547_reg_initval[RT8547_FLED_REG1 - 1] &=
~RT8547_TOCLEVEL_MASK;
rt8547_reg_initval[RT8547_FLED_REG1 - 1] |= tmp;
}
pdata->flen_gpio = of_get_named_gpio(np, "rt,flen_gpio", 0);
pdata->ctl_gpio = of_get_named_gpio(np, "rt,ctl_gpio", 0);
pdata->flset_gpio = of_get_named_gpio(np, "rt,flset_gpio", 0);
#endif /* #ifdef CONFIG_OF */
}
static void rt8547_parse_pdata(struct rt8547_platform_data *pdata,
struct device *dev)
{
u32 tmp;
tmp = pdata->def_lvp;
rt8547_reg_initval[RT8547_FLED_REG0 - 1] &= ~RT8547_LVP_MASK;
rt8547_reg_initval[RT8547_FLED_REG0 - 1] |= tmp;
tmp = pdata->def_tol;
tmp <<= RT8547_TOCLEVEL_SHFT;
rt8547_reg_initval[RT8547_FLED_REG1 - 1] &= ~RT8547_TOCLEVEL_MASK;
rt8547_reg_initval[RT8547_FLED_REG1 - 1] |= tmp;
}
static int rt8547_io_init(struct rt8547_platform_data *pdata,
struct device *dev)
{
int rc = 0;
if (gpio_is_valid(pdata->flen_gpio)) {
rc = gpio_request_one(pdata->flen_gpio, ((~(pdata->flen_active) & 0x1) ? GPIOF_OUT_INIT_HIGH:GPIOF_OUT_INIT_LOW),
"rt8547_flen");
if (rc < 0) {
dev_err(dev, "request rt8547 flash en pin fail\n");
goto gpio_request1;
}
}
if(gpio_is_valid(pdata->ctl_gpio)){
rc = gpio_request_one(pdata->ctl_gpio, ((~(pdata->ctl_active) & 0x1) ? GPIOF_OUT_INIT_HIGH:GPIOF_OUT_INIT_LOW),
"rt8547_ctl");
if (rc < 0) {
dev_err(dev, "request rt8547 ctl pin fail\n");
goto gpio_request2;
}
}
if(gpio_is_valid(pdata->flset_gpio)){
rc = gpio_request_one(pdata->flset_gpio, ((~(pdata->flset_active) & 0x1) ? GPIOF_OUT_INIT_HIGH:GPIOF_OUT_INIT_LOW),
"rt8547_flset");
if (rc < 0) {
dev_err(dev, "request rt8547 flash set pin fail\n");
/*GPIO(gpio7 GPIO_B5) is EBUSY when register after factory data reset, but after power on ,it's normal*/
/*goto gpio_request3;*/
}
}
return 0;
/*
gpio_request3:
if(gpio_is_valid(pdata->ctl_gpio))
gpio_free(pdata->ctl_gpio);
*/
gpio_request2:
if (gpio_is_valid(pdata->flen_gpio))
gpio_free(pdata->flen_gpio);
gpio_request1:
return rc;
}
static int rt8547_io_deinit(struct rt8547_platform_data *pdata)
{
if (gpio_is_valid(pdata->flen_gpio)){
gpio_direction_input(pdata->flen_gpio);
gpio_free(pdata->flen_gpio);
}
if(gpio_is_valid(pdata->ctl_gpio)){
gpio_direction_input(pdata->ctl_gpio);
gpio_free(pdata->ctl_gpio);
}
if(gpio_is_valid(pdata->flset_gpio)){
gpio_direction_input(pdata->flset_gpio);
gpio_free(pdata->flset_gpio);
}
return 0;
}
static void rt8547_reg_init(struct rt8547_chip *chip)
{
RT_DBG("\n");
rt8547_send_data(chip, RT8547_FLED_REG0,
rt8547_reg_initval[RT8547_FLED_REG0 - 1]);
rt8547_send_data(chip, RT8547_FLED_REG1,
rt8547_reg_initval[RT8547_FLED_REG1 - 1]);
rt8547_send_data(chip, RT8547_FLED_REG2,
rt8547_reg_initval[RT8547_FLED_REG2 - 1]);
rt8547_send_data(chip, RT8547_FLED_REG3,
rt8547_reg_initval[RT8547_FLED_REG3 - 1]);
}
static struct platform_device rt_fled_pdev = {
.name = "rt-flash-led",
.id = -1,
};
static int rt8547_led_probe(struct platform_device *pdev)
{
struct rt8547_platform_data *pdata = pdev->dev.platform_data;
struct rt8547_chip *chip;
bool use_dt = pdev->dev.of_node;
int ret = 0;
chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
if (use_dt) {
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
goto err_probe;
rt8547_parse_dt(pdata, &pdev->dev);
} else {
if (!pdata)
goto err_probe;
rt8547_parse_pdata(pdata, &pdev->dev);
}
ret = rt8547_io_init(pdata, &pdev->dev);
if (ret < 0)
goto err_io;
chip->dev = &pdev->dev;
chip->pdata = pdata;
spin_lock_init(&chip->io_lock);
chip->in_use_mode = FLASHLIGHT_MODE_OFF;
platform_set_drvdata(pdev, chip);
rt8547_fled_power_on(pdata);
rt8547_reg_init(chip);
rt8547_fled_power_off(pdata);
chip->base.hal = &rt8547_fled_hal;
chip->base.init_props = &rt8547_fled_props;
rt_fled_pdev.dev.parent = &pdev->dev;
ret = platform_device_register(&rt_fled_pdev);
if (ret < 0) {
dev_err(&pdev->dev, "register rtfled fail\n");
goto err_io;
}
#ifdef CONFIG_DEBUG_FS
rt8547_create_debugfs(chip);
#endif /* #ifdef CONFIG_DEBUG_FS */
dev_info(&pdev->dev, "driver successfully registered\n");
return 0;
err_io:
if (use_dt)
devm_kfree(&pdev->dev, pdata);
err_probe:
devm_kfree(&pdev->dev, chip);
return ret;
}
static int rt8547_led_remove(struct platform_device *pdev)
{
struct rt8547_chip *chip = platform_get_drvdata(pdev);
#ifdef CONFIG_DEBUG_FS
rt8547_remove_debugfs();
#endif /* #ifdef CONFIG_DEBUG_FS */
platform_device_unregister(&rt_fled_pdev);
rt8547_io_deinit(chip->pdata);
return 0;
}
static const struct of_device_id rt_match_table[] = {
{.compatible = "rt,rt8547",},
{},
};
static struct platform_driver rt8547_led_driver = {
.driver = {
.name = "rt8547",
.owner = THIS_MODULE,
.of_match_table = rt_match_table,
},
.probe = rt8547_led_probe,
.remove = rt8547_led_remove,
};
static int rt8547_led_init(void)
{
return platform_driver_register(&rt8547_led_driver);
}
module_init(rt8547_led_init);
static void rt8547_led_exit(void)
{
platform_driver_unregister(&rt8547_led_driver);
}
module_exit(rt8547_led_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("CY Huang <cy_huang@richtek.com>");
MODULE_DESCRIPTION("LED Flash Driver for RT8547");
MODULE_VERSION(RT8547_DRV_VER);
| gpl-2.0 |
lightstudio/uve | madplays-0.15.1b/madmix.c | 7 | 7433 | /*
* madplay - MPEG audio decoder and player
* Copyright (C) 2000-2004 Robert Leslie
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* $Id: madmix.c,v 1.24 2004/01/23 09:41:31 rob Exp $
*/
# ifdef HAVE_CONFIG_H
# include "config.h"
# endif
# include "global.h"
# include <stdio.h>
# include <stdarg.h>
# include <stdlib.h>
# include <string.h>
# include <unistd.h>
# ifdef HAVE_ERRNO_H
# include <errno.h>
# endif
# include <mad.h>
# include "getopt.h"
# include "gettext.h"
# include "audio.h"
struct audio {
char const *fname;
FILE *file;
int active;
mad_fixed_t scale;
struct mad_frame frame;
};
char const *argv0;
/*
* NAME: error()
* DESCRIPTION: show a labeled error message
*/
static
void error(char const *id, char const *format, ...)
{
int err;
va_list args;
err = errno;
if (id)
fprintf(stderr, "%s: ", id);
va_start(args, format);
if (*format == ':') {
if (format[1] == 0) {
format = va_arg(args, char const *);
errno = err;
perror(format);
}
else {
errno = err;
perror(format + 1);
}
}
else {
vfprintf(stderr, format, args);
fputc('\n', stderr);
}
va_end(args);
}
/*
* NAME: do_output()
* DESCRIPTION: play mixed output
*/
static
int do_output(int (*audio)(union audio_control *),
struct mad_frame *frame, struct mad_synth *synth)
{
union audio_control control;
static unsigned int channels;
static unsigned long speed;
if (channels != synth->pcm.channels ||
speed != synth->pcm.samplerate) {
audio_control_init(&control, AUDIO_COMMAND_CONFIG);
control.config.channels = synth->pcm.channels;
control.config.speed = synth->pcm.samplerate;
if (audio(&control) == -1) {
error("output", audio_error);
return -1;
}
channels = synth->pcm.channels;
speed = synth->pcm.samplerate;
}
audio_control_init(&control, AUDIO_COMMAND_PLAY);
control.play.nsamples = synth->pcm.length;
control.play.samples[0] = synth->pcm.samples[0];
control.play.samples[1] = synth->pcm.samples[1];
control.play.mode = AUDIO_MODE_DITHER;
if (audio(&control) == -1) {
error("output", audio_error);
return -1;
}
return 0;
}
/*
* NAME: do_mix()
* DESCRIPTION: perform mixing and audio output
*/
static
int do_mix(struct audio *mix, int ninputs, int (*audio)(union audio_control *))
{
struct mad_frame frame;
struct mad_synth synth;
int i, count;
mad_frame_init(&frame);
mad_synth_init(&synth);
count = ninputs;
while (1) {
int ch, s, sb;
mad_frame_mute(&frame);
for (i = 0; i < ninputs; ++i) {
if (!mix[i].active)
continue;
if (fread(&mix[i].frame, sizeof(mix[i].frame), 1, mix[i].file) != 1) {
if (ferror(mix[i].file))
error("fread", ":", mix[i].fname);
mix[i].active = 0;
--count;
continue;
}
mix[i].frame.overlap = 0;
if (frame.header.layer == 0) {
frame.header.layer = mix[i].frame.header.layer;
frame.header.mode = mix[i].frame.header.mode;
frame.header.mode_extension = mix[i].frame.header.mode_extension;
frame.header.emphasis = mix[i].frame.header.emphasis;
frame.header.bitrate = mix[i].frame.header.bitrate;
frame.header.samplerate = mix[i].frame.header.samplerate;
frame.header.flags = mix[i].frame.header.flags;
frame.header.private_bits = mix[i].frame.header.private_bits;
frame.header.duration = mix[i].frame.header.duration;
}
for (ch = 0; ch < 2; ++ch) {
for (s = 0; s < 36; ++s) {
for (sb = 0; sb < 32; ++sb) {
frame.sbsample[ch][s][sb] +=
mad_f_mul(mix[i].frame.sbsample[ch][s][sb], mix[i].scale);
}
}
}
}
if (count == 0)
break;
mad_synth_frame(&synth, &frame);
do_output(audio, &frame, &synth);
}
mad_synth_finish(&synth);
mad_frame_finish(&frame);
return 0;
}
/*
* NAME: audio->init()
* DESCRIPTION: initialize the audio output module
*/
static
int audio_init(int (*audio)(union audio_control *), char const *path)
{
union audio_control control;
audio_control_init(&control, AUDIO_COMMAND_INIT);
control.init.path = path;
if (audio(&control) == -1) {
error("audio", audio_error, control.init.path);
return -1;
}
return 0;
}
/*
* NAME: audio->finish()
* DESCRIPTION: terminate the audio output module
*/
static
int audio_finish(int (*audio)(union audio_control *))
{
union audio_control control;
audio_control_init(&control, AUDIO_COMMAND_FINISH);
if (audio(&control) == -1) {
error("audio", audio_error);
return -1;
}
return 0;
}
/*
* NAME: usage()
* DESCRIPTION: display usage message and exit
*/
static
void usage(char const *argv0)
{
fprintf(stderr, _("Usage: %s input1 [input2 ...]\n"), argv0);
}
/*
* NAME: main()
* DESCRIPTION: program entry point
*/
int main(int argc, char *argv[])
{
int opt, ninputs, i, result = 0;
int (*output)(union audio_control *) = 0;
char const *fname, *opath = 0;
FILE *file;
struct audio *mix;
argv0 = argv[0];
if (argc > 1) {
if (strcmp(argv[1], "--version") == 0) {
printf("%s - %s\n", mad_version, mad_copyright);
printf(_("Build options: %s\n"), mad_build);
return 0;
}
if (strcmp(argv[1], "--help") == 0) {
usage(argv[0]);
return 0;
}
}
while ((opt = getopt(argc, argv, "o:")) != -1) {
switch (opt) {
case 'o':
opath = optarg;
output = audio_output(&opath);
if (output == 0) {
error(0, _("%s: unknown output format type"), opath);
return 2;
}
break;
default:
usage(argv[0]);
return 1;
}
}
if (optind == argc) {
usage(argv[0]);
return 1;
}
if (output == 0)
output = audio_output(0);
if (audio_init(output, opath) == -1)
return 2;
ninputs = argc - optind;
mix = malloc(ninputs * sizeof(*mix));
if (mix == 0) {
error(0, _("not enough memory to allocate mixing buffers"));
return 3;
}
printf(_("mixing %d streams\n"), ninputs);
for (i = 0; i < ninputs; ++i) {
if (strcmp(argv[optind + i], "-") == 0) {
fname = "stdin";
file = stdin;
}
else {
fname = argv[optind + i];
file = fopen(fname, "rb");
if (file == 0) {
error(0, ":", fname);
return 4;
}
}
mix[i].fname = fname;
mix[i].file = file;
mix[i].active = 1;
mix[i].scale = mad_f_tofixed(1.0); /* / ninputs); */
}
if (do_mix(mix, ninputs, output) == -1)
result = 5;
for (i = 0; i < ninputs; ++i) {
file = mix[i].file;
if (file != stdin) {
if (fclose(file) == EOF) {
error(0, ":", mix[i].fname);
result = 6;
}
}
}
free(mix);
if (audio_finish(output) == -1)
result = 7;
return result;
}
| gpl-2.0 |
spinlockirqsave/linux_kernels | drivers/s390/block/dasd_profile.c | 7 | 6051 | #include <linux/mm.h>
#include <linux/dasd.h>
#include "dasd_types.h"
#define PRINTK_HEADER "dasd_profile:"
static long dasd_io_reqs=0; /* number of requests processed at all */
static long dasd_io_secs[16]; /* histogram of request's sizes */
static long dasd_io_times[16]; /* histogram of requests's times */
static long dasd_io_timps[16]; /* histogram of requests's times per sector */
static long dasd_io_time1[16]; /* histogram of time from build to start */
static long dasd_io_time2[16]; /* histogram of time from start to irq */
static long dasd_io_time2ps[16]; /* histogram of time from start to irq */
static long dasd_io_time3[16]; /* histogram of time from irq to end */
void
dasd_profile_add ( cqr_t *cqr )
{
int ind;
long strtime,irqtime,endtime,tottime;
long tottimeps,sectors;
long help;
if ( ! cqr -> req )
return;
sectors = cqr -> req -> nr_sectors;
strtime = ((cqr->startclk - cqr->buildclk) >> 12);
irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
endtime = ((cqr->endclk - cqr->stopclk) >> 12);
tottime = ((cqr->endclk - cqr->buildclk) >> 12);
tottimeps = tottime / sectors;
if (! dasd_io_reqs ++){
for ( ind = 0; ind < 16; ind ++) {
dasd_io_secs[ind] = 0;
dasd_io_times[ind]=0;
dasd_io_timps[ind]=0;
dasd_io_time1[ind]=0;
dasd_io_time2[ind]=0;
dasd_io_time2ps[ind]=0;
dasd_io_time3[ind]=0;
}
};
for ( ind = 0, help = sectors >> 3;
ind < 15 && help;
help = help >> 1,ind ++);
dasd_io_secs[ind] ++;
for ( ind = 0, help = tottime >> 3;
ind < 15 && help;
help = help >> 1,ind ++);
dasd_io_times[ind] ++;
for ( ind = 0, help = tottimeps >> 3;
ind < 15 && help;
help = help >> 1,ind ++);
dasd_io_timps[ind] ++;
for ( ind = 0, help = strtime >> 3;
ind < 15 && help;
help = help >> 1,ind ++);
dasd_io_time1[ind] ++;
for ( ind = 0, help = irqtime >> 3;
ind < 15 && help;
help = help >> 1,ind ++);
dasd_io_time2[ind] ++;
for ( ind = 0, help = (irqtime/sectors) >> 3;
ind < 15 && help;
help = help >> 1,ind ++);
dasd_io_time2ps[ind] ++;
for ( ind = 0, help = endtime >> 3;
ind < 15 && help;
help = help >> 1,ind ++);
dasd_io_time3[ind] ++;
}
int
dasd_proc_read_statistics ( char * buf, char **start,
off_t off, int len, int d)
{
int i;
int shift, help;
for ( shift = 0, help = dasd_io_reqs;
help > 8192;
help = help >> 1,shift ++);
len = sprintf ( buf, "%ld dasd I/O requests\n", dasd_io_reqs);
len += sprintf ( buf+len, "__<4 ___8 __16 __32 __64 _128 _256 _512 __1k __2k __4k __8k _16k _32k _64k >64k\n");
len += sprintf ( buf+len, "Histogram of sizes (512B secs)\n");
for ( i = 0; i < 16; i ++) {
len += sprintf ( buf+len, "%4ld ",dasd_io_secs[i] >> shift );
}
len += sprintf ( buf+len, "\n");
len += sprintf ( buf+len, "Histogram of I/O times\n");
for ( i = 0; i < 16; i ++) {
len += sprintf ( buf+len, "%4ld ",dasd_io_times[i] >> shift );
}
len += sprintf ( buf+len, "\n");
len += sprintf ( buf+len, "Histogram of I/O times per sector\n");
for ( i = 0; i < 16; i ++) {
len += sprintf ( buf+len, "%4ld ",dasd_io_timps[i] >> shift );
}
len += sprintf ( buf+len, "\n");
len += sprintf ( buf+len, "Histogram of I/O time till ssch\n");
for ( i = 0; i < 16; i ++) {
len += sprintf ( buf+len, "%4ld ",dasd_io_time1[i] >> shift );
}
len += sprintf ( buf+len, "\n");
len += sprintf ( buf+len, "Histogram of I/O time between ssch and irq\n");
for ( i = 0; i < 16; i ++) {
len += sprintf ( buf+len, "%4ld ",dasd_io_time2[i] >> shift );
}
len += sprintf ( buf+len, "\n");
len += sprintf ( buf+len, "Histogram of I/O time between ssch and irq per sector\n");
for ( i = 0; i < 16; i ++) {
len += sprintf ( buf+len, "%4ld ",dasd_io_time2ps[i] >> shift );
}
len += sprintf ( buf+len, "\n");
len += sprintf ( buf+len, "Histogram of I/O time between irq and end\n");
for ( i = 0; i < 16; i ++) {
len += sprintf ( buf+len, "%4ld ",dasd_io_time3[i] >> shift );
}
len += sprintf ( buf+len, "\n");
return len;
}
typedef
struct {
union {
unsigned long long clock;
struct {
unsigned int ts1;
unsigned int ts2 : 20;
unsigned int unused : 8;
unsigned int cpu : 4;
} __attribute__ ((packed)) s;
} __attribute__ ((packed)) u;
unsigned long caller_address;
unsigned long tag;
} __attribute__ ((packed)) dasd_debug_entry;
static dasd_debug_entry *dasd_debug_area = NULL;
static dasd_debug_entry *dasd_debug_actual;
static spinlock_t debug_lock = SPIN_LOCK_UNLOCKED;
void
dasd_debug ( unsigned long tag )
{
long flags;
dasd_debug_entry *d;
/* initialize in first call ... */
if ( ! dasd_debug_area ) {
dasd_debug_actual = dasd_debug_area =
(dasd_debug_entry *) get_free_page (GFP_ATOMIC);
if ( ! dasd_debug_area ) {
PRINT_WARN("No debug area allocated\n");
return;
}
memset (dasd_debug_area,0,PAGE_SIZE);
}
/* renormalize to page */
spin_lock_irqsave(&debug_lock,flags);
dasd_debug_actual = (dasd_debug_entry *)
( (unsigned long) dasd_debug_area +
( ( (unsigned long)dasd_debug_actual -
(unsigned long)dasd_debug_area ) % PAGE_SIZE ) );
d = dasd_debug_actual ++;
spin_unlock_irqrestore(&debug_lock,flags);
/* write CPUID to lowest 12 bits of clock... */
__asm__ __volatile__ ( "STCK %0"
:"=m" (d->u.clock));
d->tag = tag;
d -> caller_address = (unsigned long) __builtin_return_address(0);
d->u.s.cpu = smp_processor_id();
}
int
dasd_proc_read_debug ( char * buf, char **start,
off_t off, int len, int dd)
{
dasd_debug_entry *d;
char tag[9] = { 0, };
long flags;
spin_lock_irqsave(&debug_lock,flags);
len = 0;
for( d = dasd_debug_area;
len < 4068 ;
d ++ ) {
if ( *(char*)(&d->tag) == 'D' ) {
memcpy(tag,&(d->tag),4);
tag[4]=0;
}
else {
sprintf(tag,"%08lx", d->tag);
tag[8]=0;
}
len += sprintf ( buf+len,
"%x %08x%05x %08lx (%8s)\n",
d->u.s.cpu, d->u.s.ts1, d->u.s.ts2,
d->caller_address,tag);
}
spin_unlock_irqrestore(&debug_lock,flags);
return len;
}
| gpl-2.0 |
william-wfei/linux | drivers/scsi/scsi_lib.c | 7 | 87914 | /*
* Copyright (C) 1999 Eric Youngdale
* Copyright (C) 2014 Christoph Hellwig
*
* SCSI queueing library.
* Initial versions: Eric Youngdale (eric@andante.org).
* Based upon conversations with large numbers
* of people at Linux Expo.
*/
#include <linux/bio.h>
#include <linux/bitops.h>
#include <linux/blkdev.h>
#include <linux/completion.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/hardirq.h>
#include <linux/scatterlist.h>
#include <linux/blk-mq.h>
#include <linux/ratelimit.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_driver.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_dh.h>
#include <trace/events/scsi.h>
#include "scsi_priv.h"
#include "scsi_logging.h"
static struct kmem_cache *scsi_sdb_cache;
static struct kmem_cache *scsi_sense_cache;
static struct kmem_cache *scsi_sense_isadma_cache;
static DEFINE_MUTEX(scsi_sense_cache_mutex);
static inline struct kmem_cache *
scsi_select_sense_cache(struct Scsi_Host *shost)
{
return shost->unchecked_isa_dma ?
scsi_sense_isadma_cache : scsi_sense_cache;
}
static void scsi_free_sense_buffer(struct Scsi_Host *shost,
unsigned char *sense_buffer)
{
kmem_cache_free(scsi_select_sense_cache(shost), sense_buffer);
}
static unsigned char *scsi_alloc_sense_buffer(struct Scsi_Host *shost,
gfp_t gfp_mask, int numa_node)
{
return kmem_cache_alloc_node(scsi_select_sense_cache(shost), gfp_mask,
numa_node);
}
int scsi_init_sense_cache(struct Scsi_Host *shost)
{
struct kmem_cache *cache;
int ret = 0;
cache = scsi_select_sense_cache(shost);
if (cache)
return 0;
mutex_lock(&scsi_sense_cache_mutex);
if (shost->unchecked_isa_dma) {
scsi_sense_isadma_cache =
kmem_cache_create("scsi_sense_cache(DMA)",
SCSI_SENSE_BUFFERSIZE, 0,
SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL);
if (!scsi_sense_isadma_cache)
ret = -ENOMEM;
} else {
scsi_sense_cache =
kmem_cache_create("scsi_sense_cache",
SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN, NULL);
if (!scsi_sense_cache)
ret = -ENOMEM;
}
mutex_unlock(&scsi_sense_cache_mutex);
return ret;
}
/*
* When to reinvoke queueing after a resource shortage. It's 3 msecs to
* not change behaviour from the previous unplug mechanism, experimentation
* may prove this needs changing.
*/
#define SCSI_QUEUE_DELAY 3
static void
scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
{
struct Scsi_Host *host = cmd->device->host;
struct scsi_device *device = cmd->device;
struct scsi_target *starget = scsi_target(device);
/*
* Set the appropriate busy bit for the device/host.
*
* If the host/device isn't busy, assume that something actually
* completed, and that we should be able to queue a command now.
*
* Note that the prior mid-layer assumption that any host could
* always queue at least one command is now broken. The mid-layer
* will implement a user specifiable stall (see
* scsi_host.max_host_blocked and scsi_device.max_device_blocked)
* if a command is requeued with no other commands outstanding
* either for the device or for the host.
*/
switch (reason) {
case SCSI_MLQUEUE_HOST_BUSY:
atomic_set(&host->host_blocked, host->max_host_blocked);
break;
case SCSI_MLQUEUE_DEVICE_BUSY:
case SCSI_MLQUEUE_EH_RETRY:
atomic_set(&device->device_blocked,
device->max_device_blocked);
break;
case SCSI_MLQUEUE_TARGET_BUSY:
atomic_set(&starget->target_blocked,
starget->max_target_blocked);
break;
}
}
static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
{
struct scsi_device *sdev = cmd->device;
blk_mq_requeue_request(cmd->request, true);
put_device(&sdev->sdev_gendev);
}
/**
* __scsi_queue_insert - private queue insertion
* @cmd: The SCSI command being requeued
* @reason: The reason for the requeue
* @unbusy: Whether the queue should be unbusied
*
* This is a private queue insertion. The public interface
* scsi_queue_insert() always assumes the queue should be unbusied
* because it's always called before the completion. This function is
* for a requeue after completion, which should only occur in this
* file.
*/
static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
{
struct scsi_device *device = cmd->device;
struct request_queue *q = device->request_queue;
unsigned long flags;
SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd,
"Inserting command %p into mlqueue\n", cmd));
scsi_set_blocked(cmd, reason);
/*
* Decrement the counters, since these commands are no longer
* active on the host/device.
*/
if (unbusy)
scsi_device_unbusy(device);
/*
* Requeue this command. It will go before all other commands
* that are already in the queue. Schedule requeue work under
* lock such that the kblockd_schedule_work() call happens
* before blk_cleanup_queue() finishes.
*/
cmd->result = 0;
if (q->mq_ops) {
scsi_mq_requeue_cmd(cmd);
return;
}
spin_lock_irqsave(q->queue_lock, flags);
blk_requeue_request(q, cmd->request);
kblockd_schedule_work(&device->requeue_work);
spin_unlock_irqrestore(q->queue_lock, flags);
}
/*
* Function: scsi_queue_insert()
*
* Purpose: Insert a command in the midlevel queue.
*
* Arguments: cmd - command that we are adding to queue.
* reason - why we are inserting command to queue.
*
* Lock status: Assumed that lock is not held upon entry.
*
* Returns: Nothing.
*
* Notes: We do this for one of two cases. Either the host is busy
* and it cannot accept any more commands for the time being,
* or the device returned QUEUE_FULL and can accept no more
* commands.
* Notes: This could be called either from an interrupt context or a
* normal process context.
*/
void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
{
__scsi_queue_insert(cmd, reason, 1);
}
/**
* scsi_execute - insert request and wait for the result
* @sdev: scsi device
* @cmd: scsi command
* @data_direction: data direction
* @buffer: data buffer
* @bufflen: len of buffer
* @sense: optional sense buffer
* @sshdr: optional decoded sense header
* @timeout: request timeout in seconds
* @retries: number of times to retry request
* @flags: flags for ->cmd_flags
* @rq_flags: flags for ->rq_flags
* @resid: optional residual length
*
* returns the req->errors value which is the scsi_cmnd result
* field.
*/
int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen,
unsigned char *sense, struct scsi_sense_hdr *sshdr,
int timeout, int retries, u64 flags, req_flags_t rq_flags,
int *resid)
{
struct request *req;
struct scsi_request *rq;
int ret = DRIVER_ERROR << 24;
req = blk_get_request(sdev->request_queue,
data_direction == DMA_TO_DEVICE ?
REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, __GFP_RECLAIM);
if (IS_ERR(req))
return ret;
rq = scsi_req(req);
scsi_req_init(req);
if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
buffer, bufflen, __GFP_RECLAIM))
goto out;
rq->cmd_len = COMMAND_SIZE(cmd[0]);
memcpy(rq->cmd, cmd, rq->cmd_len);
req->retries = retries;
req->timeout = timeout;
req->cmd_flags |= flags;
req->rq_flags |= rq_flags | RQF_QUIET | RQF_PREEMPT;
/*
* head injection *required* here otherwise quiesce won't work
*/
blk_execute_rq(req->q, NULL, req, 1);
/*
* Some devices (USB mass-storage in particular) may transfer
* garbage data together with a residue indicating that the data
* is invalid. Prevent the garbage from being misinterpreted
* and prevent security leaks by zeroing out the excess data.
*/
if (unlikely(rq->resid_len > 0 && rq->resid_len <= bufflen))
memset(buffer + (bufflen - rq->resid_len), 0, rq->resid_len);
if (resid)
*resid = rq->resid_len;
if (sense && rq->sense_len)
memcpy(sense, rq->sense, SCSI_SENSE_BUFFERSIZE);
if (sshdr)
scsi_normalize_sense(rq->sense, rq->sense_len, sshdr);
ret = req->errors;
out:
blk_put_request(req);
return ret;
}
EXPORT_SYMBOL(scsi_execute);
/*
* Function: scsi_init_cmd_errh()
*
* Purpose: Initialize cmd fields related to error handling.
*
* Arguments: cmd - command that is ready to be queued.
*
* Notes: This function has the job of initializing a number of
* fields related to error handling. Typically this will
* be called once for each command, as required.
*/
static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
{
cmd->serial_number = 0;
scsi_set_resid(cmd, 0);
memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
if (cmd->cmd_len == 0)
cmd->cmd_len = scsi_command_size(cmd->cmnd);
}
void scsi_device_unbusy(struct scsi_device *sdev)
{
struct Scsi_Host *shost = sdev->host;
struct scsi_target *starget = scsi_target(sdev);
unsigned long flags;
atomic_dec(&shost->host_busy);
if (starget->can_queue > 0)
atomic_dec(&starget->target_busy);
if (unlikely(scsi_host_in_recovery(shost) &&
(shost->host_failed || shost->host_eh_scheduled))) {
spin_lock_irqsave(shost->host_lock, flags);
scsi_eh_wakeup(shost);
spin_unlock_irqrestore(shost->host_lock, flags);
}
atomic_dec(&sdev->device_busy);
}
static void scsi_kick_queue(struct request_queue *q)
{
if (q->mq_ops)
blk_mq_start_hw_queues(q);
else
blk_run_queue(q);
}
/*
* Called for single_lun devices on IO completion. Clear starget_sdev_user,
* and call blk_run_queue for all the scsi_devices on the target -
* including current_sdev first.
*
* Called with *no* scsi locks held.
*/
static void scsi_single_lun_run(struct scsi_device *current_sdev)
{
struct Scsi_Host *shost = current_sdev->host;
struct scsi_device *sdev, *tmp;
struct scsi_target *starget = scsi_target(current_sdev);
unsigned long flags;
spin_lock_irqsave(shost->host_lock, flags);
starget->starget_sdev_user = NULL;
spin_unlock_irqrestore(shost->host_lock, flags);
/*
* Call blk_run_queue for all LUNs on the target, starting with
* current_sdev. We race with others (to set starget_sdev_user),
* but in most cases, we will be first. Ideally, each LU on the
* target would get some limited time or requests on the target.
*/
scsi_kick_queue(current_sdev->request_queue);
spin_lock_irqsave(shost->host_lock, flags);
if (starget->starget_sdev_user)
goto out;
list_for_each_entry_safe(sdev, tmp, &starget->devices,
same_target_siblings) {
if (sdev == current_sdev)
continue;
if (scsi_device_get(sdev))
continue;
spin_unlock_irqrestore(shost->host_lock, flags);
scsi_kick_queue(sdev->request_queue);
spin_lock_irqsave(shost->host_lock, flags);
scsi_device_put(sdev);
}
out:
spin_unlock_irqrestore(shost->host_lock, flags);
}
static inline bool scsi_device_is_busy(struct scsi_device *sdev)
{
if (atomic_read(&sdev->device_busy) >= sdev->queue_depth)
return true;
if (atomic_read(&sdev->device_blocked) > 0)
return true;
return false;
}
static inline bool scsi_target_is_busy(struct scsi_target *starget)
{
if (starget->can_queue > 0) {
if (atomic_read(&starget->target_busy) >= starget->can_queue)
return true;
if (atomic_read(&starget->target_blocked) > 0)
return true;
}
return false;
}
static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
{
if (shost->can_queue > 0 &&
atomic_read(&shost->host_busy) >= shost->can_queue)
return true;
if (atomic_read(&shost->host_blocked) > 0)
return true;
if (shost->host_self_blocked)
return true;
return false;
}
static void scsi_starved_list_run(struct Scsi_Host *shost)
{
LIST_HEAD(starved_list);
struct scsi_device *sdev;
unsigned long flags;
spin_lock_irqsave(shost->host_lock, flags);
list_splice_init(&shost->starved_list, &starved_list);
while (!list_empty(&starved_list)) {
struct request_queue *slq;
/*
* As long as shost is accepting commands and we have
* starved queues, call blk_run_queue. scsi_request_fn
* drops the queue_lock and can add us back to the
* starved_list.
*
* host_lock protects the starved_list and starved_entry.
* scsi_request_fn must get the host_lock before checking
* or modifying starved_list or starved_entry.
*/
if (scsi_host_is_busy(shost))
break;
sdev = list_entry(starved_list.next,
struct scsi_device, starved_entry);
list_del_init(&sdev->starved_entry);
if (scsi_target_is_busy(scsi_target(sdev))) {
list_move_tail(&sdev->starved_entry,
&shost->starved_list);
continue;
}
/*
* Once we drop the host lock, a racing scsi_remove_device()
* call may remove the sdev from the starved list and destroy
* it and the queue. Mitigate by taking a reference to the
* queue and never touching the sdev again after we drop the
* host lock. Note: if __scsi_remove_device() invokes
* blk_cleanup_queue() before the queue is run from this
* function then blk_run_queue() will return immediately since
* blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING.
*/
slq = sdev->request_queue;
if (!blk_get_queue(slq))
continue;
spin_unlock_irqrestore(shost->host_lock, flags);
scsi_kick_queue(slq);
blk_put_queue(slq);
spin_lock_irqsave(shost->host_lock, flags);
}
/* put any unprocessed entries back */
list_splice(&starved_list, &shost->starved_list);
spin_unlock_irqrestore(shost->host_lock, flags);
}
/*
* Function: scsi_run_queue()
*
* Purpose: Select a proper request queue to serve next
*
* Arguments: q - last request's queue
*
* Returns: Nothing
*
* Notes: The previous command was completely finished, start
* a new one if possible.
*/
static void scsi_run_queue(struct request_queue *q)
{
struct scsi_device *sdev = q->queuedata;
if (scsi_target(sdev)->single_lun)
scsi_single_lun_run(sdev);
if (!list_empty(&sdev->host->starved_list))
scsi_starved_list_run(sdev->host);
if (q->mq_ops)
blk_mq_start_stopped_hw_queues(q, false);
else
blk_run_queue(q);
}
void scsi_requeue_run_queue(struct work_struct *work)
{
struct scsi_device *sdev;
struct request_queue *q;
sdev = container_of(work, struct scsi_device, requeue_work);
q = sdev->request_queue;
scsi_run_queue(q);
}
/*
* Function: scsi_requeue_command()
*
* Purpose: Handle post-processing of completed commands.
*
* Arguments: q - queue to operate on
* cmd - command that may need to be requeued.
*
* Returns: Nothing
*
* Notes: After command completion, there may be blocks left
* over which weren't finished by the previous command
* this can be for a number of reasons - the main one is
* I/O errors in the middle of the request, in which case
* we need to request the blocks that come after the bad
* sector.
* Notes: Upon return, cmd is a stale pointer.
*/
static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
{
struct scsi_device *sdev = cmd->device;
struct request *req = cmd->request;
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
blk_unprep_request(req);
req->special = NULL;
scsi_put_command(cmd);
blk_requeue_request(q, req);
spin_unlock_irqrestore(q->queue_lock, flags);
scsi_run_queue(q);
put_device(&sdev->sdev_gendev);
}
void scsi_run_host_queues(struct Scsi_Host *shost)
{
struct scsi_device *sdev;
shost_for_each_device(sdev, shost)
scsi_run_queue(sdev->request_queue);
}
static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
{
if (!blk_rq_is_passthrough(cmd->request)) {
struct scsi_driver *drv = scsi_cmd_to_driver(cmd);
if (drv->uninit_command)
drv->uninit_command(cmd);
}
}
static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd)
{
struct scsi_data_buffer *sdb;
if (cmd->sdb.table.nents)
sg_free_table_chained(&cmd->sdb.table, true);
if (cmd->request->next_rq) {
sdb = cmd->request->next_rq->special;
if (sdb)
sg_free_table_chained(&sdb->table, true);
}
if (scsi_prot_sg_count(cmd))
sg_free_table_chained(&cmd->prot_sdb->table, true);
}
static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
{
struct scsi_device *sdev = cmd->device;
struct Scsi_Host *shost = sdev->host;
unsigned long flags;
scsi_mq_free_sgtables(cmd);
scsi_uninit_cmd(cmd);
if (shost->use_cmd_list) {
BUG_ON(list_empty(&cmd->list));
spin_lock_irqsave(&sdev->list_lock, flags);
list_del_init(&cmd->list);
spin_unlock_irqrestore(&sdev->list_lock, flags);
}
}
/*
* Function: scsi_release_buffers()
*
* Purpose: Free resources allocate for a scsi_command.
*
* Arguments: cmd - command that we are bailing.
*
* Lock status: Assumed that no lock is held upon entry.
*
* Returns: Nothing
*
* Notes: In the event that an upper level driver rejects a
* command, we must release resources allocated during
* the __init_io() function. Primarily this would involve
* the scatter-gather table.
*/
static void scsi_release_buffers(struct scsi_cmnd *cmd)
{
if (cmd->sdb.table.nents)
sg_free_table_chained(&cmd->sdb.table, false);
memset(&cmd->sdb, 0, sizeof(cmd->sdb));
if (scsi_prot_sg_count(cmd))
sg_free_table_chained(&cmd->prot_sdb->table, false);
}
static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd)
{
struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special;
sg_free_table_chained(&bidi_sdb->table, false);
kmem_cache_free(scsi_sdb_cache, bidi_sdb);
cmd->request->next_rq->special = NULL;
}
static bool scsi_end_request(struct request *req, int error,
unsigned int bytes, unsigned int bidi_bytes)
{
struct scsi_cmnd *cmd = req->special;
struct scsi_device *sdev = cmd->device;
struct request_queue *q = sdev->request_queue;
if (blk_update_request(req, error, bytes))
return true;
/* Bidi request must be completed as a whole */
if (unlikely(bidi_bytes) &&
blk_update_request(req->next_rq, error, bidi_bytes))
return true;
if (blk_queue_add_random(q))
add_disk_randomness(req->rq_disk);
if (req->mq_ctx) {
/*
* In the MQ case the command gets freed by __blk_mq_end_request,
* so we have to do all cleanup that depends on it earlier.
*
* We also can't kick the queues from irq context, so we
* will have to defer it to a workqueue.
*/
scsi_mq_uninit_cmd(cmd);
__blk_mq_end_request(req, error);
if (scsi_target(sdev)->single_lun ||
!list_empty(&sdev->host->starved_list))
kblockd_schedule_work(&sdev->requeue_work);
else
blk_mq_start_stopped_hw_queues(q, true);
} else {
unsigned long flags;
if (bidi_bytes)
scsi_release_bidi_buffers(cmd);
scsi_release_buffers(cmd);
scsi_put_command(cmd);
spin_lock_irqsave(q->queue_lock, flags);
blk_finish_request(req, error);
spin_unlock_irqrestore(q->queue_lock, flags);
scsi_run_queue(q);
}
put_device(&sdev->sdev_gendev);
return false;
}
/**
* __scsi_error_from_host_byte - translate SCSI error code into errno
* @cmd: SCSI command (unused)
* @result: scsi error code
*
* Translate SCSI error code into standard UNIX errno.
* Return values:
* -ENOLINK temporary transport failure
* -EREMOTEIO permanent target failure, do not retry
* -EBADE permanent nexus failure, retry on other path
* -ENOSPC No write space available
* -ENODATA Medium error
* -EIO unspecified I/O error
*/
static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
{
int error = 0;
switch(host_byte(result)) {
case DID_TRANSPORT_FAILFAST:
error = -ENOLINK;
break;
case DID_TARGET_FAILURE:
set_host_byte(cmd, DID_OK);
error = -EREMOTEIO;
break;
case DID_NEXUS_FAILURE:
set_host_byte(cmd, DID_OK);
error = -EBADE;
break;
case DID_ALLOC_FAILURE:
set_host_byte(cmd, DID_OK);
error = -ENOSPC;
break;
case DID_MEDIUM_ERROR:
set_host_byte(cmd, DID_OK);
error = -ENODATA;
break;
default:
error = -EIO;
break;
}
return error;
}
/*
* Function: scsi_io_completion()
*
* Purpose: Completion processing for block device I/O requests.
*
* Arguments: cmd - command that is finished.
*
* Lock status: Assumed that no lock is held upon entry.
*
* Returns: Nothing
*
* Notes: We will finish off the specified number of sectors. If we
* are done, the command block will be released and the queue
* function will be goosed. If we are not done then we have to
* figure out what to do next:
*
* a) We can call scsi_requeue_command(). The request
* will be unprepared and put back on the queue. Then
* a new command will be created for it. This should
* be used if we made forward progress, or if we want
* to switch from READ(10) to READ(6) for example.
*
* b) We can call __scsi_queue_insert(). The request will
* be put back on the queue and retried using the same
* command as before, possibly after a delay.
*
* c) We can call scsi_end_request() with -EIO to fail
* the remainder of the request.
*/
void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
{
int result = cmd->result;
struct request_queue *q = cmd->device->request_queue;
struct request *req = cmd->request;
int error = 0;
struct scsi_sense_hdr sshdr;
bool sense_valid = false;
int sense_deferred = 0, level = 0;
enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
ACTION_DELAYED_RETRY} action;
unsigned long wait_for = (cmd->allowed + 1) * req->timeout;
if (result) {
sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
if (sense_valid)
sense_deferred = scsi_sense_is_deferred(&sshdr);
}
if (blk_rq_is_passthrough(req)) {
if (result) {
if (sense_valid) {
/*
* SG_IO wants current and deferred errors
*/
scsi_req(req)->sense_len =
min(8 + cmd->sense_buffer[7],
SCSI_SENSE_BUFFERSIZE);
}
if (!sense_deferred)
error = __scsi_error_from_host_byte(cmd, result);
}
/*
* __scsi_error_from_host_byte may have reset the host_byte
*/
req->errors = cmd->result;
scsi_req(req)->resid_len = scsi_get_resid(cmd);
if (scsi_bidi_cmnd(cmd)) {
/*
* Bidi commands Must be complete as a whole,
* both sides at once.
*/
scsi_req(req->next_rq)->resid_len = scsi_in(cmd)->resid;
if (scsi_end_request(req, 0, blk_rq_bytes(req),
blk_rq_bytes(req->next_rq)))
BUG();
return;
}
} else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) {
/*
* Flush commands do not transfers any data, and thus cannot use
* good_bytes != blk_rq_bytes(req) as the signal for an error.
* This sets the error explicitly for the problem case.
*/
error = __scsi_error_from_host_byte(cmd, result);
}
/* no bidi support for !blk_rq_is_passthrough yet */
BUG_ON(blk_bidi_rq(req));
/*
* Next deal with any sectors which we were able to correctly
* handle.
*/
SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd,
"%u sectors total, %d bytes done.\n",
blk_rq_sectors(req), good_bytes));
/*
* Recovered errors need reporting, but they're always treated as
* success, so fiddle the result code here. For passthrough requests
* we already took a copy of the original into rq->errors which
* is what gets returned to the user
*/
if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
/* if ATA PASS-THROUGH INFORMATION AVAILABLE skip
* print since caller wants ATA registers. Only occurs on
* SCSI ATA PASS_THROUGH commands when CK_COND=1
*/
if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
;
else if (!(req->rq_flags & RQF_QUIET))
scsi_print_sense(cmd);
result = 0;
/* for passthrough error may be set */
error = 0;
}
/*
* special case: failed zero length commands always need to
* drop down into the retry code. Otherwise, if we finished
* all bytes in the request we are done now.
*/
if (!(blk_rq_bytes(req) == 0 && error) &&
!scsi_end_request(req, error, good_bytes, 0))
return;
/*
* Kill remainder if no retrys.
*/
if (error && scsi_noretry_cmd(cmd)) {
if (scsi_end_request(req, error, blk_rq_bytes(req), 0))
BUG();
return;
}
/*
* If there had been no error, but we have leftover bytes in the
* requeues just queue the command up again.
*/
if (result == 0)
goto requeue;
error = __scsi_error_from_host_byte(cmd, result);
if (host_byte(result) == DID_RESET) {
/* Third party bus reset or reset for error recovery
* reasons. Just retry the command and see what
* happens.
*/
action = ACTION_RETRY;
} else if (sense_valid && !sense_deferred) {
switch (sshdr.sense_key) {
case UNIT_ATTENTION:
if (cmd->device->removable) {
/* Detected disc change. Set a bit
* and quietly refuse further access.
*/
cmd->device->changed = 1;
action = ACTION_FAIL;
} else {
/* Must have been a power glitch, or a
* bus reset. Could not have been a
* media change, so we just retry the
* command and see what happens.
*/
action = ACTION_RETRY;
}
break;
case ILLEGAL_REQUEST:
/* If we had an ILLEGAL REQUEST returned, then
* we may have performed an unsupported
* command. The only thing this should be
* would be a ten byte read where only a six
* byte read was supported. Also, on a system
* where READ CAPACITY failed, we may have
* read past the end of the disk.
*/
if ((cmd->device->use_10_for_rw &&
sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
(cmd->cmnd[0] == READ_10 ||
cmd->cmnd[0] == WRITE_10)) {
/* This will issue a new 6-byte command. */
cmd->device->use_10_for_rw = 0;
action = ACTION_REPREP;
} else if (sshdr.asc == 0x10) /* DIX */ {
action = ACTION_FAIL;
error = -EILSEQ;
/* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
} else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
action = ACTION_FAIL;
error = -EREMOTEIO;
} else
action = ACTION_FAIL;
break;
case ABORTED_COMMAND:
action = ACTION_FAIL;
if (sshdr.asc == 0x10) /* DIF */
error = -EILSEQ;
break;
case NOT_READY:
/* If the device is in the process of becoming
* ready, or has a temporary blockage, retry.
*/
if (sshdr.asc == 0x04) {
switch (sshdr.ascq) {
case 0x01: /* becoming ready */
case 0x04: /* format in progress */
case 0x05: /* rebuild in progress */
case 0x06: /* recalculation in progress */
case 0x07: /* operation in progress */
case 0x08: /* Long write in progress */
case 0x09: /* self test in progress */
case 0x14: /* space allocation in progress */
action = ACTION_DELAYED_RETRY;
break;
default:
action = ACTION_FAIL;
break;
}
} else
action = ACTION_FAIL;
break;
case VOLUME_OVERFLOW:
/* See SSC3rXX or current. */
action = ACTION_FAIL;
break;
default:
action = ACTION_FAIL;
break;
}
} else
action = ACTION_FAIL;
if (action != ACTION_FAIL &&
time_before(cmd->jiffies_at_alloc + wait_for, jiffies))
action = ACTION_FAIL;
switch (action) {
case ACTION_FAIL:
/* Give up and fail the remainder of the request */
if (!(req->rq_flags & RQF_QUIET)) {
static DEFINE_RATELIMIT_STATE(_rs,
DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
if (unlikely(scsi_logging_level))
level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
SCSI_LOG_MLCOMPLETE_BITS);
/*
* if logging is enabled the failure will be printed
* in scsi_log_completion(), so avoid duplicate messages
*/
if (!level && __ratelimit(&_rs)) {
scsi_print_result(cmd, NULL, FAILED);
if (driver_byte(result) & DRIVER_SENSE)
scsi_print_sense(cmd);
scsi_print_command(cmd);
}
}
if (!scsi_end_request(req, error, blk_rq_err_bytes(req), 0))
return;
/*FALLTHRU*/
case ACTION_REPREP:
requeue:
/* Unprep the request and put it back at the head of the queue.
* A new command will be prepared and issued.
*/
if (q->mq_ops) {
cmd->request->rq_flags &= ~RQF_DONTPREP;
scsi_mq_uninit_cmd(cmd);
scsi_mq_requeue_cmd(cmd);
} else {
scsi_release_buffers(cmd);
scsi_requeue_command(q, cmd);
}
break;
case ACTION_RETRY:
/* Retry the same command immediately */
__scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0);
break;
case ACTION_DELAYED_RETRY:
/* Retry the same command after a delay */
__scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
break;
}
}
static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb)
{
int count;
/*
* If sg table allocation fails, requeue request later.
*/
if (unlikely(sg_alloc_table_chained(&sdb->table,
blk_rq_nr_phys_segments(req), sdb->table.sgl)))
return BLKPREP_DEFER;
/*
* Next, walk the list, and fill in the addresses and sizes of
* each segment.
*/
count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
BUG_ON(count > sdb->table.nents);
sdb->table.nents = count;
sdb->length = blk_rq_payload_bytes(req);
return BLKPREP_OK;
}
/*
* Function: scsi_init_io()
*
* Purpose: SCSI I/O initialize function.
*
* Arguments: cmd - Command descriptor we wish to initialize
*
* Returns: 0 on success
* BLKPREP_DEFER if the failure is retryable
* BLKPREP_KILL if the failure is fatal
*/
int scsi_init_io(struct scsi_cmnd *cmd)
{
struct scsi_device *sdev = cmd->device;
struct request *rq = cmd->request;
bool is_mq = (rq->mq_ctx != NULL);
int error;
if (WARN_ON_ONCE(!blk_rq_nr_phys_segments(rq)))
return -EINVAL;
error = scsi_init_sgtable(rq, &cmd->sdb);
if (error)
goto err_exit;
if (blk_bidi_rq(rq)) {
if (!rq->q->mq_ops) {
struct scsi_data_buffer *bidi_sdb =
kmem_cache_zalloc(scsi_sdb_cache, GFP_ATOMIC);
if (!bidi_sdb) {
error = BLKPREP_DEFER;
goto err_exit;
}
rq->next_rq->special = bidi_sdb;
}
error = scsi_init_sgtable(rq->next_rq, rq->next_rq->special);
if (error)
goto err_exit;
}
if (blk_integrity_rq(rq)) {
struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
int ivecs, count;
if (prot_sdb == NULL) {
/*
* This can happen if someone (e.g. multipath)
* queues a command to a device on an adapter
* that does not support DIX.
*/
WARN_ON_ONCE(1);
error = BLKPREP_KILL;
goto err_exit;
}
ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
if (sg_alloc_table_chained(&prot_sdb->table, ivecs,
prot_sdb->table.sgl)) {
error = BLKPREP_DEFER;
goto err_exit;
}
count = blk_rq_map_integrity_sg(rq->q, rq->bio,
prot_sdb->table.sgl);
BUG_ON(unlikely(count > ivecs));
BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q)));
cmd->prot_sdb = prot_sdb;
cmd->prot_sdb->table.nents = count;
}
return BLKPREP_OK;
err_exit:
if (is_mq) {
scsi_mq_free_sgtables(cmd);
} else {
scsi_release_buffers(cmd);
cmd->request->special = NULL;
scsi_put_command(cmd);
put_device(&sdev->sdev_gendev);
}
return error;
}
EXPORT_SYMBOL(scsi_init_io);
void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
{
void *buf = cmd->sense_buffer;
void *prot = cmd->prot_sdb;
unsigned long flags;
/* zero out the cmd, except for the embedded scsi_request */
memset((char *)cmd + sizeof(cmd->req), 0,
sizeof(*cmd) - sizeof(cmd->req) + dev->host->hostt->cmd_size);
cmd->device = dev;
cmd->sense_buffer = buf;
cmd->prot_sdb = prot;
INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
cmd->jiffies_at_alloc = jiffies;
spin_lock_irqsave(&dev->list_lock, flags);
list_add_tail(&cmd->list, &dev->cmd_list);
spin_unlock_irqrestore(&dev->list_lock, flags);
}
static int scsi_setup_scsi_cmnd(struct scsi_device *sdev, struct request *req)
{
struct scsi_cmnd *cmd = req->special;
/*
* Passthrough requests may transfer data, in which case they must
* a bio attached to them. Or they might contain a SCSI command
* that does not transfer data, in which case they may optionally
* submit a request without an attached bio.
*/
if (req->bio) {
int ret = scsi_init_io(cmd);
if (unlikely(ret))
return ret;
} else {
BUG_ON(blk_rq_bytes(req));
memset(&cmd->sdb, 0, sizeof(cmd->sdb));
}
cmd->cmd_len = scsi_req(req)->cmd_len;
cmd->cmnd = scsi_req(req)->cmd;
cmd->transfersize = blk_rq_bytes(req);
cmd->allowed = req->retries;
return BLKPREP_OK;
}
/*
* Setup a normal block command. These are simple request from filesystems
* that still need to be translated to SCSI CDBs from the ULD.
*/
static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
{
struct scsi_cmnd *cmd = req->special;
if (unlikely(sdev->handler && sdev->handler->prep_fn)) {
int ret = sdev->handler->prep_fn(sdev, req);
if (ret != BLKPREP_OK)
return ret;
}
cmd->cmnd = scsi_req(req)->cmd = scsi_req(req)->__cmd;
memset(cmd->cmnd, 0, BLK_MAX_CDB);
return scsi_cmd_to_driver(cmd)->init_command(cmd);
}
static int scsi_setup_cmnd(struct scsi_device *sdev, struct request *req)
{
struct scsi_cmnd *cmd = req->special;
if (!blk_rq_bytes(req))
cmd->sc_data_direction = DMA_NONE;
else if (rq_data_dir(req) == WRITE)
cmd->sc_data_direction = DMA_TO_DEVICE;
else
cmd->sc_data_direction = DMA_FROM_DEVICE;
if (blk_rq_is_scsi(req))
return scsi_setup_scsi_cmnd(sdev, req);
else
return scsi_setup_fs_cmnd(sdev, req);
}
static int
scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
{
int ret = BLKPREP_OK;
/*
* If the device is not in running state we will reject some
* or all commands.
*/
if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
switch (sdev->sdev_state) {
case SDEV_OFFLINE:
case SDEV_TRANSPORT_OFFLINE:
/*
* If the device is offline we refuse to process any
* commands. The device must be brought online
* before trying any recovery commands.
*/
sdev_printk(KERN_ERR, sdev,
"rejecting I/O to offline device\n");
ret = BLKPREP_KILL;
break;
case SDEV_DEL:
/*
* If the device is fully deleted, we refuse to
* process any commands as well.
*/
sdev_printk(KERN_ERR, sdev,
"rejecting I/O to dead device\n");
ret = BLKPREP_KILL;
break;
case SDEV_BLOCK:
case SDEV_CREATED_BLOCK:
ret = BLKPREP_DEFER;
break;
case SDEV_QUIESCE:
/*
* If the devices is blocked we defer normal commands.
*/
if (!(req->rq_flags & RQF_PREEMPT))
ret = BLKPREP_DEFER;
break;
default:
/*
* For any other not fully online state we only allow
* special commands. In particular any user initiated
* command is not allowed.
*/
if (!(req->rq_flags & RQF_PREEMPT))
ret = BLKPREP_KILL;
break;
}
}
return ret;
}
static int
scsi_prep_return(struct request_queue *q, struct request *req, int ret)
{
struct scsi_device *sdev = q->queuedata;
switch (ret) {
case BLKPREP_KILL:
case BLKPREP_INVALID:
req->errors = DID_NO_CONNECT << 16;
/* release the command and kill it */
if (req->special) {
struct scsi_cmnd *cmd = req->special;
scsi_release_buffers(cmd);
scsi_put_command(cmd);
put_device(&sdev->sdev_gendev);
req->special = NULL;
}
break;
case BLKPREP_DEFER:
/*
* If we defer, the blk_peek_request() returns NULL, but the
* queue must be restarted, so we schedule a callback to happen
* shortly.
*/
if (atomic_read(&sdev->device_busy) == 0)
blk_delay_queue(q, SCSI_QUEUE_DELAY);
break;
default:
req->rq_flags |= RQF_DONTPREP;
}
return ret;
}
static int scsi_prep_fn(struct request_queue *q, struct request *req)
{
struct scsi_device *sdev = q->queuedata;
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
int ret;
ret = scsi_prep_state_check(sdev, req);
if (ret != BLKPREP_OK)
goto out;
if (!req->special) {
/* Bail if we can't get a reference to the device */
if (unlikely(!get_device(&sdev->sdev_gendev))) {
ret = BLKPREP_DEFER;
goto out;
}
scsi_init_command(sdev, cmd);
req->special = cmd;
}
cmd->tag = req->tag;
cmd->request = req;
cmd->prot_op = SCSI_PROT_NORMAL;
ret = scsi_setup_cmnd(sdev, req);
out:
return scsi_prep_return(q, req, ret);
}
static void scsi_unprep_fn(struct request_queue *q, struct request *req)
{
scsi_uninit_cmd(req->special);
}
/*
* scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
* return 0.
*
* Called with the queue_lock held.
*/
static inline int scsi_dev_queue_ready(struct request_queue *q,
struct scsi_device *sdev)
{
unsigned int busy;
busy = atomic_inc_return(&sdev->device_busy) - 1;
if (atomic_read(&sdev->device_blocked)) {
if (busy)
goto out_dec;
/*
* unblock after device_blocked iterates to zero
*/
if (atomic_dec_return(&sdev->device_blocked) > 0) {
/*
* For the MQ case we take care of this in the caller.
*/
if (!q->mq_ops)
blk_delay_queue(q, SCSI_QUEUE_DELAY);
goto out_dec;
}
SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev,
"unblocking device at zero depth\n"));
}
if (busy >= sdev->queue_depth)
goto out_dec;
return 1;
out_dec:
atomic_dec(&sdev->device_busy);
return 0;
}
/*
* scsi_target_queue_ready: checks if there we can send commands to target
* @sdev: scsi device on starget to check.
*/
static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
struct scsi_device *sdev)
{
struct scsi_target *starget = scsi_target(sdev);
unsigned int busy;
if (starget->single_lun) {
spin_lock_irq(shost->host_lock);
if (starget->starget_sdev_user &&
starget->starget_sdev_user != sdev) {
spin_unlock_irq(shost->host_lock);
return 0;
}
starget->starget_sdev_user = sdev;
spin_unlock_irq(shost->host_lock);
}
if (starget->can_queue <= 0)
return 1;
busy = atomic_inc_return(&starget->target_busy) - 1;
if (atomic_read(&starget->target_blocked) > 0) {
if (busy)
goto starved;
/*
* unblock after target_blocked iterates to zero
*/
if (atomic_dec_return(&starget->target_blocked) > 0)
goto out_dec;
SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
"unblocking target at zero depth\n"));
}
if (busy >= starget->can_queue)
goto starved;
return 1;
starved:
spin_lock_irq(shost->host_lock);
list_move_tail(&sdev->starved_entry, &shost->starved_list);
spin_unlock_irq(shost->host_lock);
out_dec:
if (starget->can_queue > 0)
atomic_dec(&starget->target_busy);
return 0;
}
/*
* scsi_host_queue_ready: if we can send requests to shost, return 1 else
* return 0. We must end up running the queue again whenever 0 is
* returned, else IO can hang.
*/
static inline int scsi_host_queue_ready(struct request_queue *q,
struct Scsi_Host *shost,
struct scsi_device *sdev)
{
unsigned int busy;
if (scsi_host_in_recovery(shost))
return 0;
busy = atomic_inc_return(&shost->host_busy) - 1;
if (atomic_read(&shost->host_blocked) > 0) {
if (busy)
goto starved;
/*
* unblock after host_blocked iterates to zero
*/
if (atomic_dec_return(&shost->host_blocked) > 0)
goto out_dec;
SCSI_LOG_MLQUEUE(3,
shost_printk(KERN_INFO, shost,
"unblocking host at zero depth\n"));
}
if (shost->can_queue > 0 && busy >= shost->can_queue)
goto starved;
if (shost->host_self_blocked)
goto starved;
/* We're OK to process the command, so we can't be starved */
if (!list_empty(&sdev->starved_entry)) {
spin_lock_irq(shost->host_lock);
if (!list_empty(&sdev->starved_entry))
list_del_init(&sdev->starved_entry);
spin_unlock_irq(shost->host_lock);
}
return 1;
starved:
spin_lock_irq(shost->host_lock);
if (list_empty(&sdev->starved_entry))
list_add_tail(&sdev->starved_entry, &shost->starved_list);
spin_unlock_irq(shost->host_lock);
out_dec:
atomic_dec(&shost->host_busy);
return 0;
}
/*
* Busy state exporting function for request stacking drivers.
*
* For efficiency, no lock is taken to check the busy state of
* shost/starget/sdev, since the returned value is not guaranteed and
* may be changed after request stacking drivers call the function,
* regardless of taking lock or not.
*
* When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi
* needs to return 'not busy'. Otherwise, request stacking drivers
* may hold requests forever.
*/
static int scsi_lld_busy(struct request_queue *q)
{
struct scsi_device *sdev = q->queuedata;
struct Scsi_Host *shost;
if (blk_queue_dying(q))
return 0;
shost = sdev->host;
/*
* Ignore host/starget busy state.
* Since block layer does not have a concept of fairness across
* multiple queues, congestion of host/starget needs to be handled
* in SCSI layer.
*/
if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
return 1;
return 0;
}
/*
* Kill a request for a dead device
*/
static void scsi_kill_request(struct request *req, struct request_queue *q)
{
struct scsi_cmnd *cmd = req->special;
struct scsi_device *sdev;
struct scsi_target *starget;
struct Scsi_Host *shost;
blk_start_request(req);
scmd_printk(KERN_INFO, cmd, "killing request\n");
sdev = cmd->device;
starget = scsi_target(sdev);
shost = sdev->host;
scsi_init_cmd_errh(cmd);
cmd->result = DID_NO_CONNECT << 16;
atomic_inc(&cmd->device->iorequest_cnt);
/*
* SCSI request completion path will do scsi_device_unbusy(),
* bump busy counts. To bump the counters, we need to dance
* with the locks as normal issue path does.
*/
atomic_inc(&sdev->device_busy);
atomic_inc(&shost->host_busy);
if (starget->can_queue > 0)
atomic_inc(&starget->target_busy);
blk_complete_request(req);
}
static void scsi_softirq_done(struct request *rq)
{
struct scsi_cmnd *cmd = rq->special;
unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
int disposition;
INIT_LIST_HEAD(&cmd->eh_entry);
atomic_inc(&cmd->device->iodone_cnt);
if (cmd->result)
atomic_inc(&cmd->device->ioerr_cnt);
disposition = scsi_decide_disposition(cmd);
if (disposition != SUCCESS &&
time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
sdev_printk(KERN_ERR, cmd->device,
"timing out command, waited %lus\n",
wait_for/HZ);
disposition = SUCCESS;
}
scsi_log_completion(cmd, disposition);
switch (disposition) {
case SUCCESS:
scsi_finish_command(cmd);
break;
case NEEDS_RETRY:
scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
break;
case ADD_TO_MLQUEUE:
scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
break;
default:
if (!scsi_eh_scmd_add(cmd, 0))
scsi_finish_command(cmd);
}
}
/**
* scsi_dispatch_command - Dispatch a command to the low-level driver.
* @cmd: command block we are dispatching.
*
* Return: nonzero return request was rejected and device's queue needs to be
* plugged.
*/
static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
{
struct Scsi_Host *host = cmd->device->host;
int rtn = 0;
atomic_inc(&cmd->device->iorequest_cnt);
/* check if the device is still usable */
if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
/* in SDEV_DEL we error all commands. DID_NO_CONNECT
* returns an immediate error upwards, and signals
* that the device is no longer present */
cmd->result = DID_NO_CONNECT << 16;
goto done;
}
/* Check to see if the scsi lld made this device blocked. */
if (unlikely(scsi_device_blocked(cmd->device))) {
/*
* in blocked state, the command is just put back on
* the device queue. The suspend state has already
* blocked the queue so future requests should not
* occur until the device transitions out of the
* suspend state.
*/
SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
"queuecommand : device blocked\n"));
return SCSI_MLQUEUE_DEVICE_BUSY;
}
/* Store the LUN value in cmnd, if needed. */
if (cmd->device->lun_in_cdb)
cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
(cmd->device->lun << 5 & 0xe0);
scsi_log_send(cmd);
/*
* Before we queue this command, check if the command
* length exceeds what the host adapter can handle.
*/
if (cmd->cmd_len > cmd->device->host->max_cmd_len) {
SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
"queuecommand : command too long. "
"cdb_size=%d host->max_cmd_len=%d\n",
cmd->cmd_len, cmd->device->host->max_cmd_len));
cmd->result = (DID_ABORT << 16);
goto done;
}
if (unlikely(host->shost_state == SHOST_DEL)) {
cmd->result = (DID_NO_CONNECT << 16);
goto done;
}
trace_scsi_dispatch_cmd_start(cmd);
rtn = host->hostt->queuecommand(host, cmd);
if (rtn) {
trace_scsi_dispatch_cmd_error(cmd, rtn);
if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
rtn != SCSI_MLQUEUE_TARGET_BUSY)
rtn = SCSI_MLQUEUE_HOST_BUSY;
SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
"queuecommand : request rejected\n"));
}
return rtn;
done:
cmd->scsi_done(cmd);
return 0;
}
/**
* scsi_done - Invoke completion on finished SCSI command.
* @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
* ownership back to SCSI Core -- i.e. the LLDD has finished with it.
*
* Description: This function is the mid-level's (SCSI Core) interrupt routine,
* which regains ownership of the SCSI command (de facto) from a LLDD, and
* calls blk_complete_request() for further processing.
*
* This function is interrupt context safe.
*/
static void scsi_done(struct scsi_cmnd *cmd)
{
trace_scsi_dispatch_cmd_done(cmd);
blk_complete_request(cmd->request);
}
/*
* Function: scsi_request_fn()
*
* Purpose: Main strategy routine for SCSI.
*
* Arguments: q - Pointer to actual queue.
*
* Returns: Nothing
*
* Lock status: IO request lock assumed to be held when called.
*/
static void scsi_request_fn(struct request_queue *q)
__releases(q->queue_lock)
__acquires(q->queue_lock)
{
struct scsi_device *sdev = q->queuedata;
struct Scsi_Host *shost;
struct scsi_cmnd *cmd;
struct request *req;
/*
* To start with, we keep looping until the queue is empty, or until
* the host is no longer able to accept any more requests.
*/
shost = sdev->host;
for (;;) {
int rtn;
/*
* get next queueable request. We do this early to make sure
* that the request is fully prepared even if we cannot
* accept it.
*/
req = blk_peek_request(q);
if (!req)
break;
if (unlikely(!scsi_device_online(sdev))) {
sdev_printk(KERN_ERR, sdev,
"rejecting I/O to offline device\n");
scsi_kill_request(req, q);
continue;
}
if (!scsi_dev_queue_ready(q, sdev))
break;
/*
* Remove the request from the request list.
*/
if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
blk_start_request(req);
spin_unlock_irq(q->queue_lock);
cmd = req->special;
if (unlikely(cmd == NULL)) {
printk(KERN_CRIT "impossible request in %s.\n"
"please mail a stack trace to "
"linux-scsi@vger.kernel.org\n",
__func__);
blk_dump_rq_flags(req, "foo");
BUG();
}
/*
* We hit this when the driver is using a host wide
* tag map. For device level tag maps the queue_depth check
* in the device ready fn would prevent us from trying
* to allocate a tag. Since the map is a shared host resource
* we add the dev to the starved list so it eventually gets
* a run when a tag is freed.
*/
if (blk_queue_tagged(q) && !(req->rq_flags & RQF_QUEUED)) {
spin_lock_irq(shost->host_lock);
if (list_empty(&sdev->starved_entry))
list_add_tail(&sdev->starved_entry,
&shost->starved_list);
spin_unlock_irq(shost->host_lock);
goto not_ready;
}
if (!scsi_target_queue_ready(shost, sdev))
goto not_ready;
if (!scsi_host_queue_ready(q, shost, sdev))
goto host_not_ready;
if (sdev->simple_tags)
cmd->flags |= SCMD_TAGGED;
else
cmd->flags &= ~SCMD_TAGGED;
/*
* Finally, initialize any error handling parameters, and set up
* the timers for timeouts.
*/
scsi_init_cmd_errh(cmd);
/*
* Dispatch the command to the low-level driver.
*/
cmd->scsi_done = scsi_done;
rtn = scsi_dispatch_cmd(cmd);
if (rtn) {
scsi_queue_insert(cmd, rtn);
spin_lock_irq(q->queue_lock);
goto out_delay;
}
spin_lock_irq(q->queue_lock);
}
return;
host_not_ready:
if (scsi_target(sdev)->can_queue > 0)
atomic_dec(&scsi_target(sdev)->target_busy);
not_ready:
/*
* lock q, handle tag, requeue req, and decrement device_busy. We
* must return with queue_lock held.
*
* Decrementing device_busy without checking it is OK, as all such
* cases (host limits or settings) should run the queue at some
* later time.
*/
spin_lock_irq(q->queue_lock);
blk_requeue_request(q, req);
atomic_dec(&sdev->device_busy);
out_delay:
if (!atomic_read(&sdev->device_busy) && !scsi_device_blocked(sdev))
blk_delay_queue(q, SCSI_QUEUE_DELAY);
}
static inline int prep_to_mq(int ret)
{
switch (ret) {
case BLKPREP_OK:
return BLK_MQ_RQ_QUEUE_OK;
case BLKPREP_DEFER:
return BLK_MQ_RQ_QUEUE_BUSY;
default:
return BLK_MQ_RQ_QUEUE_ERROR;
}
}
static int scsi_mq_prep_fn(struct request *req)
{
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
struct scsi_device *sdev = req->q->queuedata;
struct Scsi_Host *shost = sdev->host;
unsigned char *sense_buf = cmd->sense_buffer;
struct scatterlist *sg;
/* zero out the cmd, except for the embedded scsi_request */
memset((char *)cmd + sizeof(cmd->req), 0,
sizeof(*cmd) - sizeof(cmd->req));
req->special = cmd;
cmd->request = req;
cmd->device = sdev;
cmd->sense_buffer = sense_buf;
cmd->tag = req->tag;
cmd->prot_op = SCSI_PROT_NORMAL;
INIT_LIST_HEAD(&cmd->list);
INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
cmd->jiffies_at_alloc = jiffies;
if (shost->use_cmd_list) {
spin_lock_irq(&sdev->list_lock);
list_add_tail(&cmd->list, &sdev->cmd_list);
spin_unlock_irq(&sdev->list_lock);
}
sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
cmd->sdb.table.sgl = sg;
if (scsi_host_get_prot(shost)) {
cmd->prot_sdb = (void *)sg +
min_t(unsigned int,
shost->sg_tablesize, SG_CHUNK_SIZE) *
sizeof(struct scatterlist);
memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer));
cmd->prot_sdb->table.sgl =
(struct scatterlist *)(cmd->prot_sdb + 1);
}
if (blk_bidi_rq(req)) {
struct request *next_rq = req->next_rq;
struct scsi_data_buffer *bidi_sdb = blk_mq_rq_to_pdu(next_rq);
memset(bidi_sdb, 0, sizeof(struct scsi_data_buffer));
bidi_sdb->table.sgl =
(struct scatterlist *)(bidi_sdb + 1);
next_rq->special = bidi_sdb;
}
blk_mq_start_request(req);
return scsi_setup_cmnd(sdev, req);
}
static void scsi_mq_done(struct scsi_cmnd *cmd)
{
trace_scsi_dispatch_cmd_done(cmd);
blk_mq_complete_request(cmd->request, cmd->request->errors);
}
static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct request *req = bd->rq;
struct request_queue *q = req->q;
struct scsi_device *sdev = q->queuedata;
struct Scsi_Host *shost = sdev->host;
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
int ret;
int reason;
ret = prep_to_mq(scsi_prep_state_check(sdev, req));
if (ret != BLK_MQ_RQ_QUEUE_OK)
goto out;
ret = BLK_MQ_RQ_QUEUE_BUSY;
if (!get_device(&sdev->sdev_gendev))
goto out;
if (!scsi_dev_queue_ready(q, sdev))
goto out_put_device;
if (!scsi_target_queue_ready(shost, sdev))
goto out_dec_device_busy;
if (!scsi_host_queue_ready(q, shost, sdev))
goto out_dec_target_busy;
if (!(req->rq_flags & RQF_DONTPREP)) {
ret = prep_to_mq(scsi_mq_prep_fn(req));
if (ret != BLK_MQ_RQ_QUEUE_OK)
goto out_dec_host_busy;
req->rq_flags |= RQF_DONTPREP;
} else {
blk_mq_start_request(req);
}
if (sdev->simple_tags)
cmd->flags |= SCMD_TAGGED;
else
cmd->flags &= ~SCMD_TAGGED;
scsi_init_cmd_errh(cmd);
cmd->scsi_done = scsi_mq_done;
reason = scsi_dispatch_cmd(cmd);
if (reason) {
scsi_set_blocked(cmd, reason);
ret = BLK_MQ_RQ_QUEUE_BUSY;
goto out_dec_host_busy;
}
return BLK_MQ_RQ_QUEUE_OK;
out_dec_host_busy:
atomic_dec(&shost->host_busy);
out_dec_target_busy:
if (scsi_target(sdev)->can_queue > 0)
atomic_dec(&scsi_target(sdev)->target_busy);
out_dec_device_busy:
atomic_dec(&sdev->device_busy);
out_put_device:
put_device(&sdev->sdev_gendev);
out:
switch (ret) {
case BLK_MQ_RQ_QUEUE_BUSY:
if (atomic_read(&sdev->device_busy) == 0 &&
!scsi_device_blocked(sdev))
blk_mq_delay_queue(hctx, SCSI_QUEUE_DELAY);
break;
case BLK_MQ_RQ_QUEUE_ERROR:
/*
* Make sure to release all allocated ressources when
* we hit an error, as we will never see this command
* again.
*/
if (req->rq_flags & RQF_DONTPREP)
scsi_mq_uninit_cmd(cmd);
break;
default:
break;
}
return ret;
}
static enum blk_eh_timer_return scsi_timeout(struct request *req,
bool reserved)
{
if (reserved)
return BLK_EH_RESET_TIMER;
return scsi_times_out(req);
}
static int scsi_init_request(void *data, struct request *rq,
unsigned int hctx_idx, unsigned int request_idx,
unsigned int numa_node)
{
struct Scsi_Host *shost = data;
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
cmd->sense_buffer =
scsi_alloc_sense_buffer(shost, GFP_KERNEL, numa_node);
if (!cmd->sense_buffer)
return -ENOMEM;
cmd->req.sense = cmd->sense_buffer;
return 0;
}
static void scsi_exit_request(void *data, struct request *rq,
unsigned int hctx_idx, unsigned int request_idx)
{
struct Scsi_Host *shost = data;
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
scsi_free_sense_buffer(shost, cmd->sense_buffer);
}
static int scsi_map_queues(struct blk_mq_tag_set *set)
{
struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set);
if (shost->hostt->map_queues)
return shost->hostt->map_queues(shost);
return blk_mq_map_queues(set);
}
static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
{
struct device *host_dev;
u64 bounce_limit = 0xffffffff;
if (shost->unchecked_isa_dma)
return BLK_BOUNCE_ISA;
/*
* Platforms with virtual-DMA translation
* hardware have no practical limit.
*/
if (!PCI_DMA_BUS_IS_PHYS)
return BLK_BOUNCE_ANY;
host_dev = scsi_get_device(shost);
if (host_dev && host_dev->dma_mask)
bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT;
return bounce_limit;
}
void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
{
struct device *dev = shost->dma_dev;
/*
* this limit is imposed by hardware restrictions
*/
blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
SG_MAX_SEGMENTS));
if (scsi_host_prot_dma(shost)) {
shost->sg_prot_tablesize =
min_not_zero(shost->sg_prot_tablesize,
(unsigned short)SCSI_MAX_PROT_SG_SEGMENTS);
BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize);
blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
}
blk_queue_max_hw_sectors(q, shost->max_sectors);
blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
blk_queue_segment_boundary(q, shost->dma_boundary);
dma_set_seg_boundary(dev, shost->dma_boundary);
blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
if (!shost->use_clustering)
q->limits.cluster = 0;
/*
* set a reasonable default alignment on word boundaries: the
* host and device may alter it using
* blk_queue_update_dma_alignment() later.
*/
blk_queue_dma_alignment(q, 0x03);
}
EXPORT_SYMBOL_GPL(__scsi_init_queue);
static int scsi_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
{
struct Scsi_Host *shost = q->rq_alloc_data;
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
memset(cmd, 0, sizeof(*cmd));
cmd->sense_buffer = scsi_alloc_sense_buffer(shost, gfp, NUMA_NO_NODE);
if (!cmd->sense_buffer)
goto fail;
cmd->req.sense = cmd->sense_buffer;
if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) {
cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp);
if (!cmd->prot_sdb)
goto fail_free_sense;
}
return 0;
fail_free_sense:
scsi_free_sense_buffer(shost, cmd->sense_buffer);
fail:
return -ENOMEM;
}
static void scsi_exit_rq(struct request_queue *q, struct request *rq)
{
struct Scsi_Host *shost = q->rq_alloc_data;
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
if (cmd->prot_sdb)
kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb);
scsi_free_sense_buffer(shost, cmd->sense_buffer);
}
struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
{
struct Scsi_Host *shost = sdev->host;
struct request_queue *q;
q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE);
if (!q)
return NULL;
q->cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
q->rq_alloc_data = shost;
q->request_fn = scsi_request_fn;
q->init_rq_fn = scsi_init_rq;
q->exit_rq_fn = scsi_exit_rq;
if (blk_init_allocated_queue(q) < 0) {
blk_cleanup_queue(q);
return NULL;
}
__scsi_init_queue(shost, q);
blk_queue_prep_rq(q, scsi_prep_fn);
blk_queue_unprep_rq(q, scsi_unprep_fn);
blk_queue_softirq_done(q, scsi_softirq_done);
blk_queue_rq_timed_out(q, scsi_times_out);
blk_queue_lld_busy(q, scsi_lld_busy);
return q;
}
static struct blk_mq_ops scsi_mq_ops = {
.queue_rq = scsi_queue_rq,
.complete = scsi_softirq_done,
.timeout = scsi_timeout,
.init_request = scsi_init_request,
.exit_request = scsi_exit_request,
.map_queues = scsi_map_queues,
};
struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev)
{
sdev->request_queue = blk_mq_init_queue(&sdev->host->tag_set);
if (IS_ERR(sdev->request_queue))
return NULL;
sdev->request_queue->queuedata = sdev;
__scsi_init_queue(sdev->host, sdev->request_queue);
return sdev->request_queue;
}
int scsi_mq_setup_tags(struct Scsi_Host *shost)
{
unsigned int cmd_size, sgl_size, tbl_size;
tbl_size = shost->sg_tablesize;
if (tbl_size > SG_CHUNK_SIZE)
tbl_size = SG_CHUNK_SIZE;
sgl_size = tbl_size * sizeof(struct scatterlist);
cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size;
if (scsi_host_get_prot(shost))
cmd_size += sizeof(struct scsi_data_buffer) + sgl_size;
memset(&shost->tag_set, 0, sizeof(shost->tag_set));
shost->tag_set.ops = &scsi_mq_ops;
shost->tag_set.nr_hw_queues = shost->nr_hw_queues ? : 1;
shost->tag_set.queue_depth = shost->can_queue;
shost->tag_set.cmd_size = cmd_size;
shost->tag_set.numa_node = NUMA_NO_NODE;
shost->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
shost->tag_set.flags |=
BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy);
shost->tag_set.driver_data = shost;
return blk_mq_alloc_tag_set(&shost->tag_set);
}
void scsi_mq_destroy_tags(struct Scsi_Host *shost)
{
blk_mq_free_tag_set(&shost->tag_set);
}
/**
* scsi_device_from_queue - return sdev associated with a request_queue
* @q: The request queue to return the sdev from
*
* Return the sdev associated with a request queue or NULL if the
* request_queue does not reference a SCSI device.
*/
struct scsi_device *scsi_device_from_queue(struct request_queue *q)
{
struct scsi_device *sdev = NULL;
if (q->mq_ops) {
if (q->mq_ops == &scsi_mq_ops)
sdev = q->queuedata;
} else if (q->request_fn == scsi_request_fn)
sdev = q->queuedata;
if (!sdev || !get_device(&sdev->sdev_gendev))
sdev = NULL;
return sdev;
}
EXPORT_SYMBOL_GPL(scsi_device_from_queue);
/*
* Function: scsi_block_requests()
*
* Purpose: Utility function used by low-level drivers to prevent further
* commands from being queued to the device.
*
* Arguments: shost - Host in question
*
* Returns: Nothing
*
* Lock status: No locks are assumed held.
*
* Notes: There is no timer nor any other means by which the requests
* get unblocked other than the low-level driver calling
* scsi_unblock_requests().
*/
void scsi_block_requests(struct Scsi_Host *shost)
{
shost->host_self_blocked = 1;
}
EXPORT_SYMBOL(scsi_block_requests);
/*
* Function: scsi_unblock_requests()
*
* Purpose: Utility function used by low-level drivers to allow further
* commands from being queued to the device.
*
* Arguments: shost - Host in question
*
* Returns: Nothing
*
* Lock status: No locks are assumed held.
*
* Notes: There is no timer nor any other means by which the requests
* get unblocked other than the low-level driver calling
* scsi_unblock_requests().
*
* This is done as an API function so that changes to the
* internals of the scsi mid-layer won't require wholesale
* changes to drivers that use this feature.
*/
void scsi_unblock_requests(struct Scsi_Host *shost)
{
shost->host_self_blocked = 0;
scsi_run_host_queues(shost);
}
EXPORT_SYMBOL(scsi_unblock_requests);
int __init scsi_init_queue(void)
{
scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
sizeof(struct scsi_data_buffer),
0, 0, NULL);
if (!scsi_sdb_cache) {
printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
return -ENOMEM;
}
return 0;
}
void scsi_exit_queue(void)
{
kmem_cache_destroy(scsi_sense_cache);
kmem_cache_destroy(scsi_sense_isadma_cache);
kmem_cache_destroy(scsi_sdb_cache);
}
/**
* scsi_mode_select - issue a mode select
* @sdev: SCSI device to be queried
* @pf: Page format bit (1 == standard, 0 == vendor specific)
* @sp: Save page bit (0 == don't save, 1 == save)
* @modepage: mode page being requested
* @buffer: request buffer (may not be smaller than eight bytes)
* @len: length of request buffer.
* @timeout: command timeout
* @retries: number of retries before failing
* @data: returns a structure abstracting the mode header data
* @sshdr: place to put sense data (or NULL if no sense to be collected).
* must be SCSI_SENSE_BUFFERSIZE big.
*
* Returns zero if successful; negative error number or scsi
* status on error
*
*/
int
scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
unsigned char *buffer, int len, int timeout, int retries,
struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
{
unsigned char cmd[10];
unsigned char *real_buffer;
int ret;
memset(cmd, 0, sizeof(cmd));
cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
if (sdev->use_10_for_ms) {
if (len > 65535)
return -EINVAL;
real_buffer = kmalloc(8 + len, GFP_KERNEL);
if (!real_buffer)
return -ENOMEM;
memcpy(real_buffer + 8, buffer, len);
len += 8;
real_buffer[0] = 0;
real_buffer[1] = 0;
real_buffer[2] = data->medium_type;
real_buffer[3] = data->device_specific;
real_buffer[4] = data->longlba ? 0x01 : 0;
real_buffer[5] = 0;
real_buffer[6] = data->block_descriptor_length >> 8;
real_buffer[7] = data->block_descriptor_length;
cmd[0] = MODE_SELECT_10;
cmd[7] = len >> 8;
cmd[8] = len;
} else {
if (len > 255 || data->block_descriptor_length > 255 ||
data->longlba)
return -EINVAL;
real_buffer = kmalloc(4 + len, GFP_KERNEL);
if (!real_buffer)
return -ENOMEM;
memcpy(real_buffer + 4, buffer, len);
len += 4;
real_buffer[0] = 0;
real_buffer[1] = data->medium_type;
real_buffer[2] = data->device_specific;
real_buffer[3] = data->block_descriptor_length;
cmd[0] = MODE_SELECT;
cmd[4] = len;
}
ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
sshdr, timeout, retries, NULL);
kfree(real_buffer);
return ret;
}
EXPORT_SYMBOL_GPL(scsi_mode_select);
/**
* scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary.
* @sdev: SCSI device to be queried
* @dbd: set if mode sense will allow block descriptors to be returned
* @modepage: mode page being requested
* @buffer: request buffer (may not be smaller than eight bytes)
* @len: length of request buffer.
* @timeout: command timeout
* @retries: number of retries before failing
* @data: returns a structure abstracting the mode header data
* @sshdr: place to put sense data (or NULL if no sense to be collected).
* must be SCSI_SENSE_BUFFERSIZE big.
*
* Returns zero if unsuccessful, or the header offset (either 4
* or 8 depending on whether a six or ten byte command was
* issued) if successful.
*/
int
scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
unsigned char *buffer, int len, int timeout, int retries,
struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
{
unsigned char cmd[12];
int use_10_for_ms;
int header_length;
int result, retry_count = retries;
struct scsi_sense_hdr my_sshdr;
memset(data, 0, sizeof(*data));
memset(&cmd[0], 0, 12);
cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */
cmd[2] = modepage;
/* caller might not be interested in sense, but we need it */
if (!sshdr)
sshdr = &my_sshdr;
retry:
use_10_for_ms = sdev->use_10_for_ms;
if (use_10_for_ms) {
if (len < 8)
len = 8;
cmd[0] = MODE_SENSE_10;
cmd[8] = len;
header_length = 8;
} else {
if (len < 4)
len = 4;
cmd[0] = MODE_SENSE;
cmd[4] = len;
header_length = 4;
}
memset(buffer, 0, len);
result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
sshdr, timeout, retries, NULL);
/* This code looks awful: what it's doing is making sure an
* ILLEGAL REQUEST sense return identifies the actual command
* byte as the problem. MODE_SENSE commands can return
* ILLEGAL REQUEST if the code page isn't supported */
if (use_10_for_ms && !scsi_status_is_good(result) &&
(driver_byte(result) & DRIVER_SENSE)) {
if (scsi_sense_valid(sshdr)) {
if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
(sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
/*
* Invalid command operation code
*/
sdev->use_10_for_ms = 0;
goto retry;
}
}
}
if(scsi_status_is_good(result)) {
if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
(modepage == 6 || modepage == 8))) {
/* Initio breakage? */
header_length = 0;
data->length = 13;
data->medium_type = 0;
data->device_specific = 0;
data->longlba = 0;
data->block_descriptor_length = 0;
} else if(use_10_for_ms) {
data->length = buffer[0]*256 + buffer[1] + 2;
data->medium_type = buffer[2];
data->device_specific = buffer[3];
data->longlba = buffer[4] & 0x01;
data->block_descriptor_length = buffer[6]*256
+ buffer[7];
} else {
data->length = buffer[0] + 1;
data->medium_type = buffer[1];
data->device_specific = buffer[2];
data->block_descriptor_length = buffer[3];
}
data->header_length = header_length;
} else if ((status_byte(result) == CHECK_CONDITION) &&
scsi_sense_valid(sshdr) &&
sshdr->sense_key == UNIT_ATTENTION && retry_count) {
retry_count--;
goto retry;
}
return result;
}
EXPORT_SYMBOL(scsi_mode_sense);
/**
* scsi_test_unit_ready - test if unit is ready
* @sdev: scsi device to change the state of.
* @timeout: command timeout
* @retries: number of retries before failing
* @sshdr: outpout pointer for decoded sense information.
*
* Returns zero if unsuccessful or an error if TUR failed. For
* removable media, UNIT_ATTENTION sets ->changed flag.
**/
int
scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
struct scsi_sense_hdr *sshdr)
{
char cmd[] = {
TEST_UNIT_READY, 0, 0, 0, 0, 0,
};
int result;
/* try to eat the UNIT_ATTENTION if there are enough retries */
do {
result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
timeout, retries, NULL);
if (sdev->removable && scsi_sense_valid(sshdr) &&
sshdr->sense_key == UNIT_ATTENTION)
sdev->changed = 1;
} while (scsi_sense_valid(sshdr) &&
sshdr->sense_key == UNIT_ATTENTION && --retries);
return result;
}
EXPORT_SYMBOL(scsi_test_unit_ready);
/**
* scsi_device_set_state - Take the given device through the device state model.
* @sdev: scsi device to change the state of.
* @state: state to change to.
*
* Returns zero if unsuccessful or an error if the requested
* transition is illegal.
*/
int
scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
{
enum scsi_device_state oldstate = sdev->sdev_state;
if (state == oldstate)
return 0;
switch (state) {
case SDEV_CREATED:
switch (oldstate) {
case SDEV_CREATED_BLOCK:
break;
default:
goto illegal;
}
break;
case SDEV_RUNNING:
switch (oldstate) {
case SDEV_CREATED:
case SDEV_OFFLINE:
case SDEV_TRANSPORT_OFFLINE:
case SDEV_QUIESCE:
case SDEV_BLOCK:
break;
default:
goto illegal;
}
break;
case SDEV_QUIESCE:
switch (oldstate) {
case SDEV_RUNNING:
case SDEV_OFFLINE:
case SDEV_TRANSPORT_OFFLINE:
break;
default:
goto illegal;
}
break;
case SDEV_OFFLINE:
case SDEV_TRANSPORT_OFFLINE:
switch (oldstate) {
case SDEV_CREATED:
case SDEV_RUNNING:
case SDEV_QUIESCE:
case SDEV_BLOCK:
break;
default:
goto illegal;
}
break;
case SDEV_BLOCK:
switch (oldstate) {
case SDEV_RUNNING:
case SDEV_CREATED_BLOCK:
break;
default:
goto illegal;
}
break;
case SDEV_CREATED_BLOCK:
switch (oldstate) {
case SDEV_CREATED:
break;
default:
goto illegal;
}
break;
case SDEV_CANCEL:
switch (oldstate) {
case SDEV_CREATED:
case SDEV_RUNNING:
case SDEV_QUIESCE:
case SDEV_OFFLINE:
case SDEV_TRANSPORT_OFFLINE:
case SDEV_BLOCK:
break;
default:
goto illegal;
}
break;
case SDEV_DEL:
switch (oldstate) {
case SDEV_CREATED:
case SDEV_RUNNING:
case SDEV_OFFLINE:
case SDEV_TRANSPORT_OFFLINE:
case SDEV_CANCEL:
case SDEV_CREATED_BLOCK:
break;
default:
goto illegal;
}
break;
}
sdev->sdev_state = state;
return 0;
illegal:
SCSI_LOG_ERROR_RECOVERY(1,
sdev_printk(KERN_ERR, sdev,
"Illegal state transition %s->%s",
scsi_device_state_name(oldstate),
scsi_device_state_name(state))
);
return -EINVAL;
}
EXPORT_SYMBOL(scsi_device_set_state);
/**
* sdev_evt_emit - emit a single SCSI device uevent
* @sdev: associated SCSI device
* @evt: event to emit
*
* Send a single uevent (scsi_event) to the associated scsi_device.
*/
static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
{
int idx = 0;
char *envp[3];
switch (evt->evt_type) {
case SDEV_EVT_MEDIA_CHANGE:
envp[idx++] = "SDEV_MEDIA_CHANGE=1";
break;
case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
scsi_rescan_device(&sdev->sdev_gendev);
envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED";
break;
case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED";
break;
case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED";
break;
case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED";
break;
case SDEV_EVT_LUN_CHANGE_REPORTED:
envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED";
break;
case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
envp[idx++] = "SDEV_UA=ASYMMETRIC_ACCESS_STATE_CHANGED";
break;
default:
/* do nothing */
break;
}
envp[idx++] = NULL;
kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
}
/**
* sdev_evt_thread - send a uevent for each scsi event
* @work: work struct for scsi_device
*
* Dispatch queued events to their associated scsi_device kobjects
* as uevents.
*/
void scsi_evt_thread(struct work_struct *work)
{
struct scsi_device *sdev;
enum scsi_device_event evt_type;
LIST_HEAD(event_list);
sdev = container_of(work, struct scsi_device, event_work);
for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++)
if (test_and_clear_bit(evt_type, sdev->pending_events))
sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL);
while (1) {
struct scsi_event *evt;
struct list_head *this, *tmp;
unsigned long flags;
spin_lock_irqsave(&sdev->list_lock, flags);
list_splice_init(&sdev->event_list, &event_list);
spin_unlock_irqrestore(&sdev->list_lock, flags);
if (list_empty(&event_list))
break;
list_for_each_safe(this, tmp, &event_list) {
evt = list_entry(this, struct scsi_event, node);
list_del(&evt->node);
scsi_evt_emit(sdev, evt);
kfree(evt);
}
}
}
/**
* sdev_evt_send - send asserted event to uevent thread
* @sdev: scsi_device event occurred on
* @evt: event to send
*
* Assert scsi device event asynchronously.
*/
void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
{
unsigned long flags;
#if 0
/* FIXME: currently this check eliminates all media change events
* for polled devices. Need to update to discriminate between AN
* and polled events */
if (!test_bit(evt->evt_type, sdev->supported_events)) {
kfree(evt);
return;
}
#endif
spin_lock_irqsave(&sdev->list_lock, flags);
list_add_tail(&evt->node, &sdev->event_list);
schedule_work(&sdev->event_work);
spin_unlock_irqrestore(&sdev->list_lock, flags);
}
EXPORT_SYMBOL_GPL(sdev_evt_send);
/**
* sdev_evt_alloc - allocate a new scsi event
* @evt_type: type of event to allocate
* @gfpflags: GFP flags for allocation
*
* Allocates and returns a new scsi_event.
*/
struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
gfp_t gfpflags)
{
struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
if (!evt)
return NULL;
evt->evt_type = evt_type;
INIT_LIST_HEAD(&evt->node);
/* evt_type-specific initialization, if any */
switch (evt_type) {
case SDEV_EVT_MEDIA_CHANGE:
case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
case SDEV_EVT_LUN_CHANGE_REPORTED:
case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
default:
/* do nothing */
break;
}
return evt;
}
EXPORT_SYMBOL_GPL(sdev_evt_alloc);
/**
* sdev_evt_send_simple - send asserted event to uevent thread
* @sdev: scsi_device event occurred on
* @evt_type: type of event to send
* @gfpflags: GFP flags for allocation
*
* Assert scsi device event asynchronously, given an event type.
*/
void sdev_evt_send_simple(struct scsi_device *sdev,
enum scsi_device_event evt_type, gfp_t gfpflags)
{
struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
if (!evt) {
sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
evt_type);
return;
}
sdev_evt_send(sdev, evt);
}
EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
/**
* scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
* @sdev: SCSI device to count the number of scsi_request_fn() callers for.
*/
static int scsi_request_fn_active(struct scsi_device *sdev)
{
struct request_queue *q = sdev->request_queue;
int request_fn_active;
WARN_ON_ONCE(sdev->host->use_blk_mq);
spin_lock_irq(q->queue_lock);
request_fn_active = q->request_fn_active;
spin_unlock_irq(q->queue_lock);
return request_fn_active;
}
/**
* scsi_wait_for_queuecommand() - wait for ongoing queuecommand() calls
* @sdev: SCSI device pointer.
*
* Wait until the ongoing shost->hostt->queuecommand() calls that are
* invoked from scsi_request_fn() have finished.
*/
static void scsi_wait_for_queuecommand(struct scsi_device *sdev)
{
WARN_ON_ONCE(sdev->host->use_blk_mq);
while (scsi_request_fn_active(sdev))
msleep(20);
}
/**
* scsi_device_quiesce - Block user issued commands.
* @sdev: scsi device to quiesce.
*
* This works by trying to transition to the SDEV_QUIESCE state
* (which must be a legal transition). When the device is in this
* state, only special requests will be accepted, all others will
* be deferred. Since special requests may also be requeued requests,
* a successful return doesn't guarantee the device will be
* totally quiescent.
*
* Must be called with user context, may sleep.
*
* Returns zero if unsuccessful or an error if not.
*/
int
scsi_device_quiesce(struct scsi_device *sdev)
{
int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
if (err)
return err;
scsi_run_queue(sdev->request_queue);
while (atomic_read(&sdev->device_busy)) {
msleep_interruptible(200);
scsi_run_queue(sdev->request_queue);
}
return 0;
}
EXPORT_SYMBOL(scsi_device_quiesce);
/**
* scsi_device_resume - Restart user issued commands to a quiesced device.
* @sdev: scsi device to resume.
*
* Moves the device from quiesced back to running and restarts the
* queues.
*
* Must be called with user context, may sleep.
*/
void scsi_device_resume(struct scsi_device *sdev)
{
/* check if the device state was mutated prior to resume, and if
* so assume the state is being managed elsewhere (for example
* device deleted during suspend)
*/
if (sdev->sdev_state != SDEV_QUIESCE ||
scsi_device_set_state(sdev, SDEV_RUNNING))
return;
scsi_run_queue(sdev->request_queue);
}
EXPORT_SYMBOL(scsi_device_resume);
static void
device_quiesce_fn(struct scsi_device *sdev, void *data)
{
scsi_device_quiesce(sdev);
}
void
scsi_target_quiesce(struct scsi_target *starget)
{
starget_for_each_device(starget, NULL, device_quiesce_fn);
}
EXPORT_SYMBOL(scsi_target_quiesce);
static void
device_resume_fn(struct scsi_device *sdev, void *data)
{
scsi_device_resume(sdev);
}
void
scsi_target_resume(struct scsi_target *starget)
{
starget_for_each_device(starget, NULL, device_resume_fn);
}
EXPORT_SYMBOL(scsi_target_resume);
/**
* scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state
* @sdev: device to block
* @wait: Whether or not to wait until ongoing .queuecommand() /
* .queue_rq() calls have finished.
*
* Block request made by scsi lld's to temporarily stop all
* scsi commands on the specified device. May sleep.
*
* Returns zero if successful or error if not
*
* Notes:
* This routine transitions the device to the SDEV_BLOCK state
* (which must be a legal transition). When the device is in this
* state, all commands are deferred until the scsi lld reenables
* the device with scsi_device_unblock or device_block_tmo fires.
*
* To do: avoid that scsi_send_eh_cmnd() calls queuecommand() after
* scsi_internal_device_block() has blocked a SCSI device and also
* remove the rport mutex lock and unlock calls from srp_queuecommand().
*/
int
scsi_internal_device_block(struct scsi_device *sdev, bool wait)
{
struct request_queue *q = sdev->request_queue;
unsigned long flags;
int err = 0;
err = scsi_device_set_state(sdev, SDEV_BLOCK);
if (err) {
err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
if (err)
return err;
}
/*
* The device has transitioned to SDEV_BLOCK. Stop the
* block layer from calling the midlayer with this device's
* request queue.
*/
if (q->mq_ops) {
if (wait)
blk_mq_quiesce_queue(q);
else
blk_mq_stop_hw_queues(q);
} else {
spin_lock_irqsave(q->queue_lock, flags);
blk_stop_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
if (wait)
scsi_wait_for_queuecommand(sdev);
}
return 0;
}
EXPORT_SYMBOL_GPL(scsi_internal_device_block);
/**
* scsi_internal_device_unblock - resume a device after a block request
* @sdev: device to resume
* @new_state: state to set devices to after unblocking
*
* Called by scsi lld's or the midlayer to restart the device queue
* for the previously suspended scsi device. Called from interrupt or
* normal process context.
*
* Returns zero if successful or error if not.
*
* Notes:
* This routine transitions the device to the SDEV_RUNNING state
* or to one of the offline states (which must be a legal transition)
* allowing the midlayer to goose the queue for this device.
*/
int
scsi_internal_device_unblock(struct scsi_device *sdev,
enum scsi_device_state new_state)
{
struct request_queue *q = sdev->request_queue;
unsigned long flags;
/*
* Try to transition the scsi device to SDEV_RUNNING or one of the
* offlined states and goose the device queue if successful.
*/
if ((sdev->sdev_state == SDEV_BLOCK) ||
(sdev->sdev_state == SDEV_TRANSPORT_OFFLINE))
sdev->sdev_state = new_state;
else if (sdev->sdev_state == SDEV_CREATED_BLOCK) {
if (new_state == SDEV_TRANSPORT_OFFLINE ||
new_state == SDEV_OFFLINE)
sdev->sdev_state = new_state;
else
sdev->sdev_state = SDEV_CREATED;
} else if (sdev->sdev_state != SDEV_CANCEL &&
sdev->sdev_state != SDEV_OFFLINE)
return -EINVAL;
if (q->mq_ops) {
blk_mq_start_stopped_hw_queues(q, false);
} else {
spin_lock_irqsave(q->queue_lock, flags);
blk_start_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
return 0;
}
EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
static void
device_block(struct scsi_device *sdev, void *data)
{
scsi_internal_device_block(sdev, true);
}
static int
target_block(struct device *dev, void *data)
{
if (scsi_is_target_device(dev))
starget_for_each_device(to_scsi_target(dev), NULL,
device_block);
return 0;
}
void
scsi_target_block(struct device *dev)
{
if (scsi_is_target_device(dev))
starget_for_each_device(to_scsi_target(dev), NULL,
device_block);
else
device_for_each_child(dev, NULL, target_block);
}
EXPORT_SYMBOL_GPL(scsi_target_block);
static void
device_unblock(struct scsi_device *sdev, void *data)
{
scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data);
}
static int
target_unblock(struct device *dev, void *data)
{
if (scsi_is_target_device(dev))
starget_for_each_device(to_scsi_target(dev), data,
device_unblock);
return 0;
}
void
scsi_target_unblock(struct device *dev, enum scsi_device_state new_state)
{
if (scsi_is_target_device(dev))
starget_for_each_device(to_scsi_target(dev), &new_state,
device_unblock);
else
device_for_each_child(dev, &new_state, target_unblock);
}
EXPORT_SYMBOL_GPL(scsi_target_unblock);
/**
* scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
* @sgl: scatter-gather list
* @sg_count: number of segments in sg
* @offset: offset in bytes into sg, on return offset into the mapped area
* @len: bytes to map, on return number of bytes mapped
*
* Returns virtual address of the start of the mapped page
*/
void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
size_t *offset, size_t *len)
{
int i;
size_t sg_len = 0, len_complete = 0;
struct scatterlist *sg;
struct page *page;
WARN_ON(!irqs_disabled());
for_each_sg(sgl, sg, sg_count, i) {
len_complete = sg_len; /* Complete sg-entries */
sg_len += sg->length;
if (sg_len > *offset)
break;
}
if (unlikely(i == sg_count)) {
printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
"elements %d\n",
__func__, sg_len, *offset, sg_count);
WARN_ON(1);
return NULL;
}
/* Offset starting from the beginning of first page in this sg-entry */
*offset = *offset - len_complete + sg->offset;
/* Assumption: contiguous pages can be accessed as "page + i" */
page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
*offset &= ~PAGE_MASK;
/* Bytes in this sg-entry from *offset to the end of the page */
sg_len = PAGE_SIZE - *offset;
if (*len > sg_len)
*len = sg_len;
return kmap_atomic(page);
}
EXPORT_SYMBOL(scsi_kmap_atomic_sg);
/**
* scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg
* @virt: virtual address to be unmapped
*/
void scsi_kunmap_atomic_sg(void *virt)
{
kunmap_atomic(virt);
}
EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
void sdev_disable_disk_events(struct scsi_device *sdev)
{
atomic_inc(&sdev->disk_events_disable_depth);
}
EXPORT_SYMBOL(sdev_disable_disk_events);
void sdev_enable_disk_events(struct scsi_device *sdev)
{
if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0))
return;
atomic_dec(&sdev->disk_events_disable_depth);
}
EXPORT_SYMBOL(sdev_enable_disk_events);
/**
* scsi_vpd_lun_id - return a unique device identification
* @sdev: SCSI device
* @id: buffer for the identification
* @id_len: length of the buffer
*
* Copies a unique device identification into @id based
* on the information in the VPD page 0x83 of the device.
* The string will be formatted as a SCSI name string.
*
* Returns the length of the identification or error on failure.
* If the identifier is longer than the supplied buffer the actual
* identifier length is returned and the buffer is not zero-padded.
*/
int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
{
u8 cur_id_type = 0xff;
u8 cur_id_size = 0;
unsigned char *d, *cur_id_str;
unsigned char __rcu *vpd_pg83;
int id_size = -EINVAL;
rcu_read_lock();
vpd_pg83 = rcu_dereference(sdev->vpd_pg83);
if (!vpd_pg83) {
rcu_read_unlock();
return -ENXIO;
}
/*
* Look for the correct descriptor.
* Order of preference for lun descriptor:
* - SCSI name string
* - NAA IEEE Registered Extended
* - EUI-64 based 16-byte
* - EUI-64 based 12-byte
* - NAA IEEE Registered
* - NAA IEEE Extended
* - T10 Vendor ID
* as longer descriptors reduce the likelyhood
* of identification clashes.
*/
/* The id string must be at least 20 bytes + terminating NULL byte */
if (id_len < 21) {
rcu_read_unlock();
return -EINVAL;
}
memset(id, 0, id_len);
d = vpd_pg83 + 4;
while (d < vpd_pg83 + sdev->vpd_pg83_len) {
/* Skip designators not referring to the LUN */
if ((d[1] & 0x30) != 0x00)
goto next_desig;
switch (d[1] & 0xf) {
case 0x1:
/* T10 Vendor ID */
if (cur_id_size > d[3])
break;
/* Prefer anything */
if (cur_id_type > 0x01 && cur_id_type != 0xff)
break;
cur_id_size = d[3];
if (cur_id_size + 4 > id_len)
cur_id_size = id_len - 4;
cur_id_str = d + 4;
cur_id_type = d[1] & 0xf;
id_size = snprintf(id, id_len, "t10.%*pE",
cur_id_size, cur_id_str);
break;
case 0x2:
/* EUI-64 */
if (cur_id_size > d[3])
break;
/* Prefer NAA IEEE Registered Extended */
if (cur_id_type == 0x3 &&
cur_id_size == d[3])
break;
cur_id_size = d[3];
cur_id_str = d + 4;
cur_id_type = d[1] & 0xf;
switch (cur_id_size) {
case 8:
id_size = snprintf(id, id_len,
"eui.%8phN",
cur_id_str);
break;
case 12:
id_size = snprintf(id, id_len,
"eui.%12phN",
cur_id_str);
break;
case 16:
id_size = snprintf(id, id_len,
"eui.%16phN",
cur_id_str);
break;
default:
cur_id_size = 0;
break;
}
break;
case 0x3:
/* NAA */
if (cur_id_size > d[3])
break;
cur_id_size = d[3];
cur_id_str = d + 4;
cur_id_type = d[1] & 0xf;
switch (cur_id_size) {
case 8:
id_size = snprintf(id, id_len,
"naa.%8phN",
cur_id_str);
break;
case 16:
id_size = snprintf(id, id_len,
"naa.%16phN",
cur_id_str);
break;
default:
cur_id_size = 0;
break;
}
break;
case 0x8:
/* SCSI name string */
if (cur_id_size + 4 > d[3])
break;
/* Prefer others for truncated descriptor */
if (cur_id_size && d[3] > id_len)
break;
cur_id_size = id_size = d[3];
cur_id_str = d + 4;
cur_id_type = d[1] & 0xf;
if (cur_id_size >= id_len)
cur_id_size = id_len - 1;
memcpy(id, cur_id_str, cur_id_size);
/* Decrease priority for truncated descriptor */
if (cur_id_size != id_size)
cur_id_size = 6;
break;
default:
break;
}
next_desig:
d += d[3] + 4;
}
rcu_read_unlock();
return id_size;
}
EXPORT_SYMBOL(scsi_vpd_lun_id);
/*
* scsi_vpd_tpg_id - return a target port group identifier
* @sdev: SCSI device
*
* Returns the Target Port Group identifier from the information
* froom VPD page 0x83 of the device.
*
* Returns the identifier or error on failure.
*/
int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id)
{
unsigned char *d;
unsigned char __rcu *vpd_pg83;
int group_id = -EAGAIN, rel_port = -1;
rcu_read_lock();
vpd_pg83 = rcu_dereference(sdev->vpd_pg83);
if (!vpd_pg83) {
rcu_read_unlock();
return -ENXIO;
}
d = sdev->vpd_pg83 + 4;
while (d < sdev->vpd_pg83 + sdev->vpd_pg83_len) {
switch (d[1] & 0xf) {
case 0x4:
/* Relative target port */
rel_port = get_unaligned_be16(&d[6]);
break;
case 0x5:
/* Target port group */
group_id = get_unaligned_be16(&d[6]);
break;
default:
break;
}
d += d[3] + 4;
}
rcu_read_unlock();
if (group_id >= 0 && rel_id && rel_port != -1)
*rel_id = rel_port;
return group_id;
}
EXPORT_SYMBOL(scsi_vpd_tpg_id);
| gpl-2.0 |
uclaros/QGIS | src/core/raster/qgsmultibandcolorrenderer.cpp | 7 | 20680 | /***************************************************************************
qgsmultibandcolorrenderer.cpp
-----------------------------
begin : December 2011
copyright : (C) 2011 by Marco Hugentobler
email : marco at sourcepole dot ch
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
#include "qgsmultibandcolorrenderer.h"
#include "qgscontrastenhancement.h"
#include "qgsrastertransparency.h"
#include "qgsrasterviewport.h"
#include "qgslayertreemodellegendnode.h"
#include "qgssymbol.h"
#include <QDomDocument>
#include <QDomElement>
#include <QImage>
#include <QSet>
QgsMultiBandColorRenderer::QgsMultiBandColorRenderer( QgsRasterInterface *input, int redBand, int greenBand, int blueBand,
QgsContrastEnhancement *redEnhancement,
QgsContrastEnhancement *greenEnhancement,
QgsContrastEnhancement *blueEnhancement )
: QgsRasterRenderer( input, QStringLiteral( "multibandcolor" ) )
, mRedBand( redBand )
, mGreenBand( greenBand )
, mBlueBand( blueBand )
, mRedContrastEnhancement( redEnhancement )
, mGreenContrastEnhancement( greenEnhancement )
, mBlueContrastEnhancement( blueEnhancement )
{
}
QgsMultiBandColorRenderer::~QgsMultiBandColorRenderer()
{
delete mRedContrastEnhancement;
delete mGreenContrastEnhancement;
delete mBlueContrastEnhancement;
}
QgsMultiBandColorRenderer *QgsMultiBandColorRenderer::clone() const
{
QgsMultiBandColorRenderer *renderer = new QgsMultiBandColorRenderer( nullptr, mRedBand, mGreenBand, mBlueBand );
renderer->copyCommonProperties( this );
if ( mRedContrastEnhancement )
{
renderer->setRedContrastEnhancement( new QgsContrastEnhancement( *mRedContrastEnhancement ) );
}
if ( mGreenContrastEnhancement )
{
renderer->setGreenContrastEnhancement( new QgsContrastEnhancement( *mGreenContrastEnhancement ) );
}
if ( mBlueContrastEnhancement )
{
renderer->setBlueContrastEnhancement( new QgsContrastEnhancement( *mBlueContrastEnhancement ) );
}
return renderer;
}
Qgis::RasterRendererFlags QgsMultiBandColorRenderer::flags() const
{
return Qgis::RasterRendererFlag::InternalLayerOpacityHandling;
}
void QgsMultiBandColorRenderer::setRedContrastEnhancement( QgsContrastEnhancement *ce )
{
delete mRedContrastEnhancement;
mRedContrastEnhancement = ce;
}
void QgsMultiBandColorRenderer::setGreenContrastEnhancement( QgsContrastEnhancement *ce )
{
delete mGreenContrastEnhancement;
mGreenContrastEnhancement = ce;
}
void QgsMultiBandColorRenderer::setBlueContrastEnhancement( QgsContrastEnhancement *ce )
{
delete mBlueContrastEnhancement;
mBlueContrastEnhancement = ce;
}
QgsRasterRenderer *QgsMultiBandColorRenderer::create( const QDomElement &elem, QgsRasterInterface *input )
{
if ( elem.isNull() )
{
return nullptr;
}
//red band, green band, blue band
const int redBand = elem.attribute( QStringLiteral( "redBand" ), QStringLiteral( "-1" ) ).toInt();
const int greenBand = elem.attribute( QStringLiteral( "greenBand" ), QStringLiteral( "-1" ) ).toInt();
const int blueBand = elem.attribute( QStringLiteral( "blueBand" ), QStringLiteral( "-1" ) ).toInt();
//contrast enhancements
QgsContrastEnhancement *redContrastEnhancement = nullptr;
const QDomElement redContrastElem = elem.firstChildElement( QStringLiteral( "redContrastEnhancement" ) );
if ( !redContrastElem.isNull() )
{
redContrastEnhancement = new QgsContrastEnhancement( ( Qgis::DataType )(
input->dataType( redBand ) ) );
redContrastEnhancement->readXml( redContrastElem );
}
QgsContrastEnhancement *greenContrastEnhancement = nullptr;
const QDomElement greenContrastElem = elem.firstChildElement( QStringLiteral( "greenContrastEnhancement" ) );
if ( !greenContrastElem.isNull() )
{
greenContrastEnhancement = new QgsContrastEnhancement( ( Qgis::DataType )(
input->dataType( greenBand ) ) );
greenContrastEnhancement->readXml( greenContrastElem );
}
QgsContrastEnhancement *blueContrastEnhancement = nullptr;
const QDomElement blueContrastElem = elem.firstChildElement( QStringLiteral( "blueContrastEnhancement" ) );
if ( !blueContrastElem.isNull() )
{
blueContrastEnhancement = new QgsContrastEnhancement( ( Qgis::DataType )(
input->dataType( blueBand ) ) );
blueContrastEnhancement->readXml( blueContrastElem );
}
QgsRasterRenderer *r = new QgsMultiBandColorRenderer( input, redBand, greenBand, blueBand, redContrastEnhancement,
greenContrastEnhancement, blueContrastEnhancement );
r->readXml( elem );
return r;
}
QgsRasterBlock *QgsMultiBandColorRenderer::block( int bandNo, QgsRectangle const &extent, int width, int height, QgsRasterBlockFeedback *feedback )
{
Q_UNUSED( bandNo )
std::unique_ptr< QgsRasterBlock > outputBlock( new QgsRasterBlock() );
if ( !mInput )
{
return outputBlock.release();
}
//In some (common) cases, we can simplify the drawing loop considerably and save render time
bool fastDraw = ( !usesTransparency()
&& mRedBand > 0 && mGreenBand > 0 && mBlueBand > 0
&& mAlphaBand < 1 );
QList<int> bands;
if ( mRedBand > 0 )
{
bands << mRedBand;
}
if ( mGreenBand > 0 )
{
bands << mGreenBand;
}
if ( mBlueBand > 0 )
{
bands << mBlueBand;
}
if ( bands.empty() )
{
// no need to draw anything if no band is set
// TODO:: we should probably return default color block
return outputBlock.release();
}
if ( mAlphaBand > 0 )
{
bands << mAlphaBand;
}
QMap<int, QgsRasterBlock *> bandBlocks;
QgsRasterBlock *defaultPointer = nullptr;
QList<int>::const_iterator bandIt = bands.constBegin();
for ( ; bandIt != bands.constEnd(); ++bandIt )
{
bandBlocks.insert( *bandIt, defaultPointer );
}
QgsRasterBlock *redBlock = nullptr;
QgsRasterBlock *greenBlock = nullptr;
QgsRasterBlock *blueBlock = nullptr;
QgsRasterBlock *alphaBlock = nullptr;
bandIt = bands.constBegin();
for ( ; bandIt != bands.constEnd(); ++bandIt )
{
bandBlocks[*bandIt] = mInput->block( *bandIt, extent, width, height, feedback );
if ( !bandBlocks[*bandIt] )
{
// We should free the allocated mem from block().
QgsDebugMsg( QStringLiteral( "No input band" ) );
--bandIt;
for ( ; bandIt != bands.constBegin(); --bandIt )
{
delete bandBlocks[*bandIt];
}
return outputBlock.release();
}
}
if ( mRedBand > 0 )
{
redBlock = bandBlocks[mRedBand];
}
if ( mGreenBand > 0 )
{
greenBlock = bandBlocks[mGreenBand];
}
if ( mBlueBand > 0 )
{
blueBlock = bandBlocks[mBlueBand];
}
if ( mAlphaBand > 0 )
{
alphaBlock = bandBlocks[mAlphaBand];
}
if ( !outputBlock->reset( Qgis::DataType::ARGB32_Premultiplied, width, height ) )
{
for ( int i = 0; i < bandBlocks.size(); i++ )
{
delete bandBlocks.value( i );
}
return outputBlock.release();
}
QRgb *outputBlockColorData = outputBlock->colorData();
// faster data access to data for the common case that input data are coming from RGB image with 8-bit bands
const bool hasByteRgb = ( redBlock && greenBlock && blueBlock && redBlock->dataType() == Qgis::DataType::Byte && greenBlock->dataType() == Qgis::DataType::Byte && blueBlock->dataType() == Qgis::DataType::Byte );
const quint8 *redData = nullptr, *greenData = nullptr, *blueData = nullptr;
if ( hasByteRgb )
{
redData = redBlock->byteData();
greenData = greenBlock->byteData();
blueData = blueBlock->byteData();
}
const QRgb myDefaultColor = renderColorForNodataPixel();
if ( fastDraw )
{
// By default RGB raster layers have contrast enhancement assigned and normally that requires us to take the slow
// route that applies the enhancement. However if the algorithm type is "no enhancement" and all input bands are byte-sized,
// no transform would be applied to the input values and we can take the fast route.
bool hasEnhancement;
if ( hasByteRgb )
{
hasEnhancement =
( mRedContrastEnhancement && mRedContrastEnhancement->contrastEnhancementAlgorithm() != QgsContrastEnhancement::NoEnhancement ) ||
( mGreenContrastEnhancement && mGreenContrastEnhancement->contrastEnhancementAlgorithm() != QgsContrastEnhancement::NoEnhancement ) ||
( mBlueContrastEnhancement && mBlueContrastEnhancement->contrastEnhancementAlgorithm() != QgsContrastEnhancement::NoEnhancement );
}
else
{
hasEnhancement = mRedContrastEnhancement || mGreenContrastEnhancement || mBlueContrastEnhancement;
}
if ( hasEnhancement )
fastDraw = false;
}
const qgssize count = ( qgssize )width * height;
for ( qgssize i = 0; i < count; i++ )
{
if ( fastDraw ) //fast rendering if no transparency, stretching, color inversion, etc.
{
if ( hasByteRgb )
{
if ( redBlock->isNoData( i ) ||
greenBlock->isNoData( i ) ||
blueBlock->isNoData( i ) )
{
outputBlock->setColor( i, myDefaultColor );
}
else
{
outputBlockColorData[i] = qRgb( redData[i], greenData[i], blueData[i] );
}
}
else
{
bool redIsNoData = false;
bool greenIsNoData = false;
bool blueIsNoData = false;
int redVal = 0;
int greenVal = 0;
int blueVal = 0;
redVal = redBlock->valueAndNoData( i, redIsNoData );
// as soon as any channel has a no data value, don't do any more work -- the result will
// always be the nodata color!
if ( !redIsNoData )
greenVal = greenBlock->valueAndNoData( i, greenIsNoData );
if ( !redIsNoData && !greenIsNoData )
blueVal = blueBlock->valueAndNoData( i, blueIsNoData );
if ( redIsNoData ||
greenIsNoData ||
blueIsNoData )
{
outputBlock->setColor( i, myDefaultColor );
}
else
{
outputBlockColorData[i] = qRgb( redVal, greenVal, blueVal );
}
}
continue;
}
bool isNoData = false;
double redVal = 0;
double greenVal = 0;
double blueVal = 0;
if ( mRedBand > 0 )
{
redVal = redBlock->valueAndNoData( i, isNoData );
}
if ( !isNoData && mGreenBand > 0 )
{
greenVal = greenBlock->valueAndNoData( i, isNoData );
}
if ( !isNoData && mBlueBand > 0 )
{
blueVal = blueBlock->valueAndNoData( i, isNoData );
}
if ( isNoData )
{
outputBlock->setColor( i, myDefaultColor );
continue;
}
//apply default color if red, green or blue not in displayable range
if ( ( mRedContrastEnhancement && !mRedContrastEnhancement->isValueInDisplayableRange( redVal ) )
|| ( mGreenContrastEnhancement && !mGreenContrastEnhancement->isValueInDisplayableRange( redVal ) )
|| ( mBlueContrastEnhancement && !mBlueContrastEnhancement->isValueInDisplayableRange( redVal ) ) )
{
outputBlock->setColor( i, myDefaultColor );
continue;
}
//stretch color values
if ( mRedContrastEnhancement )
{
redVal = mRedContrastEnhancement->enhanceContrast( redVal );
}
if ( mGreenContrastEnhancement )
{
greenVal = mGreenContrastEnhancement->enhanceContrast( greenVal );
}
if ( mBlueContrastEnhancement )
{
blueVal = mBlueContrastEnhancement->enhanceContrast( blueVal );
}
//opacity
double currentOpacity = mOpacity;
if ( mRasterTransparency )
{
currentOpacity = mRasterTransparency->alphaValue( redVal, greenVal, blueVal, mOpacity * 255 ) / 255.0;
}
if ( mAlphaBand > 0 )
{
currentOpacity *= alphaBlock->value( i ) / 255.0;
}
if ( qgsDoubleNear( currentOpacity, 1.0 ) )
{
outputBlock->setColor( i, qRgba( redVal, greenVal, blueVal, 255 ) );
}
else
{
outputBlock->setColor( i, qRgba( currentOpacity * redVal, currentOpacity * greenVal, currentOpacity * blueVal, currentOpacity * 255 ) );
}
}
//delete input blocks
QMap<int, QgsRasterBlock *>::const_iterator bandDelIt = bandBlocks.constBegin();
for ( ; bandDelIt != bandBlocks.constEnd(); ++bandDelIt )
{
delete bandDelIt.value();
}
return outputBlock.release();
}
void QgsMultiBandColorRenderer::writeXml( QDomDocument &doc, QDomElement &parentElem ) const
{
if ( parentElem.isNull() )
{
return;
}
QDomElement rasterRendererElem = doc.createElement( QStringLiteral( "rasterrenderer" ) );
_writeXml( doc, rasterRendererElem );
rasterRendererElem.setAttribute( QStringLiteral( "redBand" ), mRedBand );
rasterRendererElem.setAttribute( QStringLiteral( "greenBand" ), mGreenBand );
rasterRendererElem.setAttribute( QStringLiteral( "blueBand" ), mBlueBand );
//contrast enhancement
if ( mRedContrastEnhancement )
{
QDomElement redContrastElem = doc.createElement( QStringLiteral( "redContrastEnhancement" ) );
mRedContrastEnhancement->writeXml( doc, redContrastElem );
rasterRendererElem.appendChild( redContrastElem );
}
if ( mGreenContrastEnhancement )
{
QDomElement greenContrastElem = doc.createElement( QStringLiteral( "greenContrastEnhancement" ) );
mGreenContrastEnhancement->writeXml( doc, greenContrastElem );
rasterRendererElem.appendChild( greenContrastElem );
}
if ( mBlueContrastEnhancement )
{
QDomElement blueContrastElem = doc.createElement( QStringLiteral( "blueContrastEnhancement" ) );
mBlueContrastEnhancement->writeXml( doc, blueContrastElem );
rasterRendererElem.appendChild( blueContrastElem );
}
parentElem.appendChild( rasterRendererElem );
}
QList<int> QgsMultiBandColorRenderer::usesBands() const
{
QList<int> bandList;
if ( mRedBand != -1 )
{
bandList << mRedBand;
}
if ( mGreenBand != -1 )
{
bandList << mGreenBand;
}
if ( mBlueBand != -1 )
{
bandList << mBlueBand;
}
return bandList;
}
QList<QgsLayerTreeModelLegendNode *> QgsMultiBandColorRenderer::createLegendNodes( QgsLayerTreeLayer *nodeLayer )
{
QList<QgsLayerTreeModelLegendNode *> res;
if ( mRedBand != -1 )
{
res << new QgsRasterSymbolLegendNode( nodeLayer, QColor( 255, 0, 0 ), displayBandName( mRedBand ) );
}
if ( mGreenBand != -1 )
{
res << new QgsRasterSymbolLegendNode( nodeLayer, QColor( 0, 255, 0 ), displayBandName( mGreenBand ) );
}
if ( mBlueBand != -1 )
{
res << new QgsRasterSymbolLegendNode( nodeLayer, QColor( 0, 0, 255 ), displayBandName( mBlueBand ) );
}
return res;
}
void QgsMultiBandColorRenderer::toSld( QDomDocument &doc, QDomElement &element, const QVariantMap &props ) const
{
// create base structure
QgsRasterRenderer::toSld( doc, element, props );
#if 0
// TODO: the following jumped code is necessary to avoid to export channelSelection in
// case it's set as default value. The drawback is that it's necessary to calc band
// statistics that can be really slow depending on dataProvider and rastr location.
// this is the reason this part of code is commented and the channelSelection is
// always exported.
//
// before to export check if the band combination and contrast setting are the
// default ones to avoid to export this tags
bool isDefaultCombination = true;
QList<int> defaultBandCombination( { 1, 2, 3 } );
isDefaultCombination = isDefaultCombination && ( usesBands() == defaultBandCombination );
isDefaultCombination = isDefaultCombination && (
mRedContrastEnhancement->contrastEnhancementAlgorithm() == QgsContrastEnhancement::StretchToMinimumMaximum &&
mGreenContrastEnhancement->contrastEnhancementAlgorithm() == QgsContrastEnhancement::StretchToMinimumMaximum &&
mBlueContrastEnhancement->contrastEnhancementAlgorithm() == QgsContrastEnhancement::StretchToMinimumMaximum
);
// compute raster statistics (slow) only if true the previous conditions
if ( isDefaultCombination )
{
QgsRasterBandStats statRed = bandStatistics( 1, QgsRasterBandStats::Min | QgsRasterBandStats::Max );
isDefaultCombination = isDefaultCombination && (
( mRedContrastEnhancement->minimumValue() == statRed.minimumValue &&
mRedContrastEnhancement->maximumValue() == statRed.maximumValue )
);
}
if ( isDefaultCombination )
{
QgsRasterBandStats statGreen = bandStatistics( 2, QgsRasterBandStats::Min | QgsRasterBandStats::Max );
isDefaultCombination = isDefaultCombination && (
( mGreenContrastEnhancement->minimumValue() == statGreen.minimumValue &&
mGreenContrastEnhancement->maximumValue() == statGreen.maximumValue )
);
}
if ( isDefaultCombination )
{
QgsRasterBandStats statBlue = bandStatistics( 3, QgsRasterBandStats::Min | QgsRasterBandStats::Max );
isDefaultCombination = isDefaultCombination && (
( mBlueContrastEnhancement->minimumValue() == statBlue.minimumValue &&
mBlueContrastEnhancement->maximumValue() == statBlue.maximumValue )
);
}
if ( isDefaultCombination )
return;
#endif
// look for RasterSymbolizer tag
QDomNodeList elements = element.elementsByTagName( QStringLiteral( "sld:RasterSymbolizer" ) );
if ( elements.size() == 0 )
return;
// there SHOULD be only one
QDomElement rasterSymbolizerElem = elements.at( 0 ).toElement();
// add Channel Selection tags
// Need to insert channelSelection in the correct sequence as in SLD standard e.g.
// after opacity or geometry or as first element after sld:RasterSymbolizer
QDomElement channelSelectionElem = doc.createElement( QStringLiteral( "sld:ChannelSelection" ) );
elements = rasterSymbolizerElem.elementsByTagName( QStringLiteral( "sld:Opacity" ) );
if ( elements.size() != 0 )
{
rasterSymbolizerElem.insertAfter( channelSelectionElem, elements.at( 0 ) );
}
else
{
elements = rasterSymbolizerElem.elementsByTagName( QStringLiteral( "sld:Geometry" ) );
if ( elements.size() != 0 )
{
rasterSymbolizerElem.insertAfter( channelSelectionElem, elements.at( 0 ) );
}
else
{
rasterSymbolizerElem.insertBefore( channelSelectionElem, rasterSymbolizerElem.firstChild() );
}
}
// for each mapped band
static QStringList tags { QStringLiteral( "sld:RedChannel" ), QStringLiteral( "sld:GreenChannel" ), QStringLiteral( "sld:BlueChannel" ) };
QList<QgsContrastEnhancement *> contrastEnhancements;
contrastEnhancements.append( mRedContrastEnhancement );
contrastEnhancements.append( mGreenContrastEnhancement );
contrastEnhancements.append( mBlueContrastEnhancement );
const QList<int> bands = usesBands();
QList<int>::const_iterator bandIt = bands.constBegin();
for ( int tagCounter = 0 ; bandIt != bands.constEnd(); ++bandIt, ++tagCounter )
{
if ( *bandIt < 0 )
continue;
QDomElement channelElem = doc.createElement( tags[ tagCounter ] );
channelSelectionElem.appendChild( channelElem );
// set band
QDomElement sourceChannelNameElem = doc.createElement( QStringLiteral( "sld:SourceChannelName" ) );
sourceChannelNameElem.appendChild( doc.createTextNode( QString::number( *bandIt ) ) );
channelElem.appendChild( sourceChannelNameElem );
// set ContrastEnhancement for each band
// NO ContrastEnhancement parameter for the entire bands is managed e.g.
// because min/max values can vary depending on band.
if ( contrastEnhancements[ tagCounter ] )
{
QDomElement contrastEnhancementElem = doc.createElement( QStringLiteral( "sld:ContrastEnhancement" ) );
contrastEnhancements[ tagCounter ]->toSld( doc, contrastEnhancementElem );
channelElem.appendChild( contrastEnhancementElem );
}
}
}
| gpl-2.0 |
kidmaple/CoolWall | linux-2.6.x/arch/powerpc/sysdev/cpm2_common.c | 7 | 7554 | /*
* General Purpose functions for the global management of the
* 8260 Communication Processor Module.
* Copyright (c) 1999-2001 Dan Malek <dan@embeddedalley.com>
* Copyright (c) 2000 MontaVista Software, Inc (source@mvista.com)
* 2.3.99 Updates
*
* 2006 (c) MontaVista Software, Inc.
* Vitaly Bordug <vbordug@ru.mvista.com>
* Merged to arch/powerpc from arch/ppc/syslib/cpm2_common.c
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
/*
*
* In addition to the individual control of the communication
* channels, there are a few functions that globally affect the
* communication processor.
*
* Buffer descriptors must be allocated from the dual ported memory
* space. The allocator for that is here. When the communication
* process is reset, we reclaim the memory available. There is
* currently no deallocator for this memory.
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/mpc8260.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/cpm2.h>
#include <asm/rheap.h>
#include <asm/fs_pd.h>
#include <sysdev/fsl_soc.h>
static void cpm2_dpinit(void);
cpm_cpm2_t *cpmp; /* Pointer to comm processor space */
/* We allocate this here because it is used almost exclusively for
* the communication processor devices.
*/
cpm2_map_t *cpm2_immr;
intctl_cpm2_t *cpm2_intctl;
#define CPM_MAP_SIZE (0x40000) /* 256k - the PQ3 reserve this amount
of space for CPM as it is larger
than on PQ2 */
void
cpm2_reset(void)
{
cpm2_immr = (cpm2_map_t *)ioremap(CPM_MAP_ADDR, CPM_MAP_SIZE);
cpm2_intctl = cpm2_map(im_intctl);
/* Reclaim the DP memory for our use.
*/
cpm2_dpinit();
/* Tell everyone where the comm processor resides.
*/
cpmp = &cpm2_immr->im_cpm;
}
/* Set a baud rate generator. This needs lots of work. There are
* eight BRGs, which can be connected to the CPM channels or output
* as clocks. The BRGs are in two different block of internal
* memory mapped space.
* The baud rate clock is the system clock divided by something.
* It was set up long ago during the initial boot phase and is
* is given to us.
* Baud rate clocks are zero-based in the driver code (as that maps
* to port numbers). Documentation uses 1-based numbering.
*/
#define BRG_INT_CLK (get_brgfreq())
#define BRG_UART_CLK (BRG_INT_CLK/16)
/* This function is used by UARTS, or anything else that uses a 16x
* oversampled clock.
*/
void
cpm_setbrg(uint brg, uint rate)
{
volatile uint *bp;
/* This is good enough to get SMCs running.....
*/
if (brg < 4) {
bp = cpm2_map_size(im_brgc1, 16);
} else {
bp = cpm2_map_size(im_brgc5, 16);
brg -= 4;
}
bp += brg;
out_be32(bp, (((BRG_UART_CLK / rate) - 1) << 1) | CPM_BRG_EN);
cpm2_unmap(bp);
}
/* This function is used to set high speed synchronous baud rate
* clocks.
*/
void
cpm2_fastbrg(uint brg, uint rate, int div16)
{
volatile uint *bp;
if (brg < 4) {
bp = cpm2_map_size(im_brgc1, 16);
}
else {
bp = cpm2_map_size(im_brgc5, 16);
brg -= 4;
}
bp += brg;
*bp = ((BRG_INT_CLK / rate) << 1) | CPM_BRG_EN;
if (div16)
*bp |= CPM_BRG_DIV16;
cpm2_unmap(bp);
}
int cpm2_clk_setup(enum cpm_clk_target target, int clock, int mode)
{
int ret = 0;
int shift;
int i, bits = 0;
cpmux_t *im_cpmux;
u32 *reg;
u32 mask = 7;
u8 clk_map [24][3] = {
{CPM_CLK_FCC1, CPM_BRG5, 0},
{CPM_CLK_FCC1, CPM_BRG6, 1},
{CPM_CLK_FCC1, CPM_BRG7, 2},
{CPM_CLK_FCC1, CPM_BRG8, 3},
{CPM_CLK_FCC1, CPM_CLK9, 4},
{CPM_CLK_FCC1, CPM_CLK10, 5},
{CPM_CLK_FCC1, CPM_CLK11, 6},
{CPM_CLK_FCC1, CPM_CLK12, 7},
{CPM_CLK_FCC2, CPM_BRG5, 0},
{CPM_CLK_FCC2, CPM_BRG6, 1},
{CPM_CLK_FCC2, CPM_BRG7, 2},
{CPM_CLK_FCC2, CPM_BRG8, 3},
{CPM_CLK_FCC2, CPM_CLK13, 4},
{CPM_CLK_FCC2, CPM_CLK14, 5},
{CPM_CLK_FCC2, CPM_CLK15, 6},
{CPM_CLK_FCC2, CPM_CLK16, 7},
{CPM_CLK_FCC3, CPM_BRG5, 0},
{CPM_CLK_FCC3, CPM_BRG6, 1},
{CPM_CLK_FCC3, CPM_BRG7, 2},
{CPM_CLK_FCC3, CPM_BRG8, 3},
{CPM_CLK_FCC3, CPM_CLK13, 4},
{CPM_CLK_FCC3, CPM_CLK14, 5},
{CPM_CLK_FCC3, CPM_CLK15, 6},
{CPM_CLK_FCC3, CPM_CLK16, 7}
};
im_cpmux = cpm2_map(im_cpmux);
switch (target) {
case CPM_CLK_SCC1:
reg = &im_cpmux->cmx_scr;
shift = 24;
case CPM_CLK_SCC2:
reg = &im_cpmux->cmx_scr;
shift = 16;
break;
case CPM_CLK_SCC3:
reg = &im_cpmux->cmx_scr;
shift = 8;
break;
case CPM_CLK_SCC4:
reg = &im_cpmux->cmx_scr;
shift = 0;
break;
case CPM_CLK_FCC1:
reg = &im_cpmux->cmx_fcr;
shift = 24;
break;
case CPM_CLK_FCC2:
reg = &im_cpmux->cmx_fcr;
shift = 16;
break;
case CPM_CLK_FCC3:
reg = &im_cpmux->cmx_fcr;
shift = 8;
break;
default:
printk(KERN_ERR "cpm2_clock_setup: invalid clock target\n");
return -EINVAL;
}
if (mode == CPM_CLK_RX)
shift +=3;
for (i=0; i<24; i++) {
if (clk_map[i][0] == target && clk_map[i][1] == clock) {
bits = clk_map[i][2];
break;
}
}
if (i == sizeof(clk_map)/3)
ret = -EINVAL;
bits <<= shift;
mask <<= shift;
out_be32(reg, (in_be32(reg) & ~mask) | bits);
cpm2_unmap(im_cpmux);
return ret;
}
/*
* dpalloc / dpfree bits.
*/
static spinlock_t cpm_dpmem_lock;
/* 16 blocks should be enough to satisfy all requests
* until the memory subsystem goes up... */
static rh_block_t cpm_boot_dpmem_rh_block[16];
static rh_info_t cpm_dpmem_info;
static u8* im_dprambase;
static void cpm2_dpinit(void)
{
spin_lock_init(&cpm_dpmem_lock);
im_dprambase = ioremap(CPM_MAP_ADDR, CPM_DATAONLY_BASE + CPM_DATAONLY_SIZE);
/* initialize the info header */
rh_init(&cpm_dpmem_info, 1,
sizeof(cpm_boot_dpmem_rh_block) /
sizeof(cpm_boot_dpmem_rh_block[0]),
cpm_boot_dpmem_rh_block);
/* Attach the usable dpmem area */
/* XXX: This is actually crap. CPM_DATAONLY_BASE and
* CPM_DATAONLY_SIZE is only a subset of the available dpram. It
* varies with the processor and the microcode patches activated.
* But the following should be at least safe.
*/
rh_attach_region(&cpm_dpmem_info, CPM_DATAONLY_BASE, CPM_DATAONLY_SIZE);
}
/* This function returns an index into the DPRAM area.
*/
unsigned long cpm_dpalloc(uint size, uint align)
{
unsigned long start;
unsigned long flags;
spin_lock_irqsave(&cpm_dpmem_lock, flags);
cpm_dpmem_info.alignment = align;
start = rh_alloc(&cpm_dpmem_info, size, "commproc");
spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
return (uint)start;
}
EXPORT_SYMBOL(cpm_dpalloc);
int cpm_dpfree(unsigned long offset)
{
int ret;
unsigned long flags;
spin_lock_irqsave(&cpm_dpmem_lock, flags);
ret = rh_free(&cpm_dpmem_info, offset);
spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
return ret;
}
EXPORT_SYMBOL(cpm_dpfree);
/* not sure if this is ever needed */
unsigned long cpm_dpalloc_fixed(unsigned long offset, uint size, uint align)
{
unsigned long start;
unsigned long flags;
spin_lock_irqsave(&cpm_dpmem_lock, flags);
cpm_dpmem_info.alignment = align;
start = rh_alloc_fixed(&cpm_dpmem_info, offset, size, "commproc");
spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
return start;
}
EXPORT_SYMBOL(cpm_dpalloc_fixed);
void cpm_dpdump(void)
{
rh_dump(&cpm_dpmem_info);
}
EXPORT_SYMBOL(cpm_dpdump);
void *cpm_dpram_addr(unsigned long offset)
{
return (void *)(im_dprambase + offset);
}
EXPORT_SYMBOL(cpm_dpram_addr);
| gpl-2.0 |
0-wiz-0/audacity | src/prefs/DevicePrefs.cpp | 7 | 10430 | /**********************************************************************
Audacity: A Digital Audio Editor
DevicePrefs.cpp
Joshua Haberman
Dominic Mazzoni
James Crook
*******************************************************************//**
\class DevicePrefs
\brief A PrefsPanel used to select recording and playback devices and
other settings.
Presents interface for user to select the recording device and
playback device, from the list of choices that PortAudio
makes available.
Also lets user decide how many channels to record.
*//********************************************************************/
#include "../Audacity.h"
#include <wx/defs.h>
#include <wx/choice.h>
#include <wx/intl.h>
#include <wx/log.h>
#include "portaudio.h"
#include "../AudioIO.h"
#include "../Internat.h"
#include "../Prefs.h"
#include "../ShuttleGui.h"
#include "../DeviceManager.h"
#include "DevicePrefs.h"
enum {
HostID = 10000,
PlayID,
RecordID,
ChannelsID
};
BEGIN_EVENT_TABLE(DevicePrefs, PrefsPanel)
EVT_CHOICE(HostID, DevicePrefs::OnHost)
EVT_CHOICE(RecordID, DevicePrefs::OnDevice)
END_EVENT_TABLE()
DevicePrefs::DevicePrefs(wxWindow * parent)
: PrefsPanel(parent, _("Devices"))
{
Populate();
}
DevicePrefs::~DevicePrefs()
{
}
void DevicePrefs::Populate()
{
// First any pre-processing for constructing the GUI.
GetNamesAndLabels();
// Get current setting for devices
mPlayDevice = gPrefs->Read(wxT("/AudioIO/PlaybackDevice"), wxT(""));
mRecordDevice = gPrefs->Read(wxT("/AudioIO/RecordingDevice"), wxT(""));
mRecordSource = gPrefs->Read(wxT("/AudioIO/RecordingSource"), wxT(""));
mRecordChannels = gPrefs->Read(wxT("/AudioIO/RecordChannels"), 2L);
//------------------------- Main section --------------------
// Now construct the GUI itself.
// Use 'eIsCreatingFromPrefs' so that the GUI is
// initialised with values from gPrefs.
ShuttleGui S(this, eIsCreatingFromPrefs);
PopulateOrExchange(S);
// ----------------------- End of main section --------------
wxCommandEvent e;
OnHost(e);
}
void DevicePrefs::GetNamesAndLabels()
{
// Gather list of hosts. Only added hosts that have devices attached.
int nDevices = Pa_GetDeviceCount();
for (int i = 0; i < nDevices; i++) {
const PaDeviceInfo *info = Pa_GetDeviceInfo(i);
if (info->maxOutputChannels > 0 || info->maxInputChannels > 0) {
wxString name = wxSafeConvertMB2WX(Pa_GetHostApiInfo(info->hostApi)->name);
if (mHostNames.Index(name) == wxNOT_FOUND) {
mHostNames.Add(name);
mHostLabels.Add(name);
}
}
}
}
void DevicePrefs::PopulateOrExchange(ShuttleGui & S)
{
wxArrayString empty;
S.SetBorder(2);
S.StartStatic(_("Interface"));
{
S.StartMultiColumn(2);
{
S.Id(HostID);
mHost = S.TieChoice(_("&Host:"),
wxT("/AudioIO/Host"),
wxT(""),
mHostNames,
mHostLabels);
S.SetSizeHints(mHostNames);
S.AddPrompt(_("Using:"));
S.AddFixedText(wxString(wxSafeConvertMB2WX(Pa_GetVersionText())));
}
S.EndMultiColumn();
}
S.EndStatic();
S.StartStatic(_("Playback"));
{
S.StartMultiColumn(2);
{
S.Id(PlayID);
mPlay = S.AddChoice(_("&Device:"),
wxEmptyString,
&empty);
}
S.EndMultiColumn();
}
S.EndStatic();
S.StartStatic(_("Recording"));
{
S.StartMultiColumn(2);
{
S.Id(RecordID);
mRecord = S.AddChoice(_("De&vice:"),
wxEmptyString,
&empty);
S.Id(ChannelsID);
mChannels = S.AddChoice(_("Cha&nnels:"),
wxEmptyString,
&empty);
}
S.EndMultiColumn();
}
S.EndStatic();
}
void DevicePrefs::OnHost(wxCommandEvent & e)
{
// Bail if we have no hosts
if (mHostNames.size() < 1)
return;
// Find the index for the host API selected
int index = -1;
wxString apiName = mHostNames[mHost->GetCurrentSelection()];
int nHosts = Pa_GetHostApiCount();
for (int i = 0; i < nHosts; ++i) {
wxString name = wxSafeConvertMB2WX(Pa_GetHostApiInfo(i)->name);
if (name == apiName) {
index = i;
break;
}
}
// We should always find the host!
if (index < 0) {
wxLogDebug(wxT("DevicePrefs::OnHost(): API index not found"));
return;
}
int nDevices = Pa_GetDeviceCount();
if (nDevices == 0) {
mHost->Clear();
mHost->Append(_("No audio interfaces"), (void *) NULL);
mHost->SetSelection(0);
}
const std::vector<DeviceSourceMap> &inMaps = DeviceManager::Instance()->GetInputDeviceMaps();
const std::vector<DeviceSourceMap> &outMaps = DeviceManager::Instance()->GetOutputDeviceMaps();
wxArrayString playnames;
wxArrayString recordnames;
size_t i;
int devindex; /* temp variable to hold the numeric ID of each device in turn */
wxString device;
wxString recDevice;
recDevice = mRecordDevice;
if (this->mRecordSource != wxT(""))
recDevice += wxT(": ") + mRecordSource;
mRecord->Clear();
for (i = 0; i < inMaps.size(); i++) {
if (index == inMaps[i].hostIndex) {
device = MakeDeviceSourceString(&inMaps[i]);
devindex = mRecord->Append(device);
// We need to const cast here because SetClientData is a wx function
// It is okay beause the original variable is non-const.
mRecord->SetClientData(devindex, const_cast<DeviceSourceMap *>(&inMaps[i]));
if (device == recDevice) { /* if this is the default device, select it */
mRecord->SetSelection(devindex);
}
}
}
mPlay->Clear();
for (i = 0; i < outMaps.size(); i++) {
if (index == outMaps[i].hostIndex) {
device = MakeDeviceSourceString(&outMaps[i]);
devindex = mPlay->Append(device);
mPlay->SetClientData(devindex, const_cast<DeviceSourceMap *>(&outMaps[i]));
if (device == mPlayDevice) { /* if this is the default device, select it */
mPlay->SetSelection(devindex);
}
}
}
/* deal with not having any devices at all */
if (mPlay->GetCount() == 0) {
playnames.Add(_("No devices found"));
mPlay->Append(playnames[0], (void *) NULL);
mPlay->SetSelection(0);
}
if (mRecord->GetCount() == 0) {
recordnames.Add(_("No devices found"));
mRecord->Append(recordnames[0], (void *) NULL);
mRecord->SetSelection(0);
}
/* what if we have no device selected? we should choose the default on
* this API, as defined by PortAudio. We then fall back to using 0 only if
* that fails */
if (mPlay->GetCount() && mPlay->GetSelection() == wxNOT_FOUND) {
DeviceSourceMap *defaultMap = DeviceManager::Instance()->GetDefaultOutputDevice(index);
if (defaultMap)
mPlay->SetStringSelection(MakeDeviceSourceString(defaultMap));
if (mPlay->GetSelection() == wxNOT_FOUND) {
mPlay->SetSelection(0);
}
}
if (mRecord->GetCount() && mRecord->GetSelection() == wxNOT_FOUND) {
DeviceSourceMap *defaultMap = DeviceManager::Instance()->GetDefaultInputDevice(index);
if (defaultMap)
mRecord->SetStringSelection(MakeDeviceSourceString(defaultMap));
if (mPlay->GetSelection() == wxNOT_FOUND) {
mPlay->SetSelection(0);
}
}
ShuttleGui S(this, eIsCreating);
S.SetSizeHints(mPlay, mPlay->GetStrings());
S.SetSizeHints(mRecord, mRecord->GetStrings());
OnDevice(e);
}
void DevicePrefs::OnDevice(wxCommandEvent & WXUNUSED(event))
{
int ndx = mRecord->GetCurrentSelection();
if (ndx == wxNOT_FOUND) {
ndx = 0;
}
int sel = mChannels->GetSelection();
int cnt = 0;
DeviceSourceMap *inMap = (DeviceSourceMap *) mRecord->GetClientData(ndx);
if (inMap != NULL) {
cnt = inMap->numChannels;
}
if (sel != wxNOT_FOUND) {
mRecordChannels = sel + 1;
}
mChannels->Clear();
// Mimic old behavior
if (cnt <= 0) {
cnt = 16;
}
// Place an artifical limit on the number of channels to prevent an
// outrageous number. I don't know if this is really necessary, but
// it doesn't hurt.
if (cnt > 256) {
cnt = 256;
}
wxArrayString channelnames;
// Channel counts, mono, stereo etc...
for (int i = 0; i < cnt; i++) {
wxString name;
if (i == 0) {
name = _("1 (Mono)");
}
else if (i == 1) {
name = _("2 (Stereo)");
}
else {
name = wxString::Format(wxT("%d"), i + 1);
}
channelnames.Add(name);
int index = mChannels->Append(name);
if (i == mRecordChannels - 1) {
mChannels->SetSelection(index);
}
}
if (mChannels->GetCount() && mChannels->GetCurrentSelection() == wxNOT_FOUND) {
mChannels->SetSelection(0);
}
ShuttleGui S(this, eIsCreating);
S.SetSizeHints(mChannels, channelnames);
Layout();
}
bool DevicePrefs::Apply()
{
ShuttleGui S(this, eIsSavingToPrefs);
PopulateOrExchange(S);
DeviceSourceMap *map = NULL;
if (mPlay->GetCount() > 0) {
map = (DeviceSourceMap *) mPlay->GetClientData(
mPlay->GetSelection());
}
if (map) {
gPrefs->Write(wxT("/AudioIO/PlaybackDevice"), map->deviceString);
gPrefs->Flush();
}
map = NULL;
if (mRecord->GetCount() > 0) {
map = (DeviceSourceMap *) mRecord->GetClientData(mRecord->GetSelection());
}
if (map) {
gPrefs->Write(wxT("/AudioIO/RecordingDevice"),
map->deviceString);
gPrefs->Write(wxT("/AudioIO/RecordingSourceIndex"),
map->sourceIndex);
if (map->totalSources >= 1) {
gPrefs->Write(wxT("/AudioIO/RecordingSource"),
map->sourceString);
} else {
gPrefs->Write(wxT("/AudioIO/RecordingSource"),
wxT(""));
}
gPrefs->Write(wxT("/AudioIO/RecordChannels"),
mChannels->GetSelection() + 1);
return gPrefs->Flush();
}
return true;
}
PrefsPanel *DevicePrefsFactory::Create(wxWindow *parent)
{
return new DevicePrefs(parent);
}
| gpl-2.0 |
rsalveti/dpdk | drivers/net/i40e/i40e_fdir.c | 7 | 45169 | /*-
* BSD LICENSE
*
* Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/queue.h>
#include <stdio.h>
#include <errno.h>
#include <stdint.h>
#include <string.h>
#include <unistd.h>
#include <stdarg.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_log.h>
#include <rte_memzone.h>
#include <rte_malloc.h>
#include <rte_arp.h>
#include <rte_ip.h>
#include <rte_udp.h>
#include <rte_tcp.h>
#include <rte_sctp.h>
#include "i40e_logs.h"
#include "base/i40e_type.h"
#include "base/i40e_prototype.h"
#include "i40e_ethdev.h"
#include "i40e_rxtx.h"
#define I40E_FDIR_MZ_NAME "FDIR_MEMZONE"
#ifndef IPV6_ADDR_LEN
#define IPV6_ADDR_LEN 16
#endif
#define I40E_FDIR_PKT_LEN 512
#define I40E_FDIR_IP_DEFAULT_LEN 420
#define I40E_FDIR_IP_DEFAULT_TTL 0x40
#define I40E_FDIR_IP_DEFAULT_VERSION_IHL 0x45
#define I40E_FDIR_TCP_DEFAULT_DATAOFF 0x50
#define I40E_FDIR_IPv6_DEFAULT_VTC_FLOW 0x60000000
#define I40E_FDIR_IPv6_TC_OFFSET 20
#define I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS 0xFF
#define I40E_FDIR_IPv6_PAYLOAD_LEN 380
#define I40E_FDIR_UDP_DEFAULT_LEN 400
/* Wait count and interval for fdir filter programming */
#define I40E_FDIR_WAIT_COUNT 10
#define I40E_FDIR_WAIT_INTERVAL_US 1000
/* Wait count and interval for fdir filter flush */
#define I40E_FDIR_FLUSH_RETRY 50
#define I40E_FDIR_FLUSH_INTERVAL_MS 5
#define I40E_COUNTER_PF 2
/* Statistic counter index for one pf */
#define I40E_COUNTER_INDEX_FDIR(pf_id) (0 + (pf_id) * I40E_COUNTER_PF)
#define I40E_MAX_FLX_SOURCE_OFF 480
#define I40E_FLX_OFFSET_IN_FIELD_VECTOR 50
#define NONUSE_FLX_PIT_DEST_OFF 63
#define NONUSE_FLX_PIT_FSIZE 1
#define MK_FLX_PIT(src_offset, fsize, dst_offset) ( \
(((src_offset) << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT) & \
I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK) | \
(((fsize) << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) & \
I40E_PRTQF_FLX_PIT_FSIZE_MASK) | \
((((dst_offset) + I40E_FLX_OFFSET_IN_FIELD_VECTOR) << \
I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT) & \
I40E_PRTQF_FLX_PIT_DEST_OFF_MASK))
#define I40E_FDIR_FLOWS ( \
(1 << RTE_ETH_FLOW_FRAG_IPV4) | \
(1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
(1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
(1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
(1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
(1 << RTE_ETH_FLOW_FRAG_IPV6) | \
(1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
(1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
(1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
(1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
(1 << RTE_ETH_FLOW_L2_PAYLOAD))
#define I40E_FLEX_WORD_MASK(off) (0x80 >> (off))
static int i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq);
static int i40e_check_fdir_flex_conf(
const struct rte_eth_fdir_flex_conf *conf);
static void i40e_set_flx_pld_cfg(struct i40e_pf *pf,
const struct rte_eth_flex_payload_cfg *cfg);
static void i40e_set_flex_mask_on_pctype(struct i40e_pf *pf,
enum i40e_filter_pctype pctype,
const struct rte_eth_fdir_flex_mask *mask_cfg);
static int i40e_fdir_construct_pkt(struct i40e_pf *pf,
const struct rte_eth_fdir_input *fdir_input,
unsigned char *raw_pkt);
static int i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *filter,
bool add);
static int i40e_fdir_filter_programming(struct i40e_pf *pf,
enum i40e_filter_pctype pctype,
const struct rte_eth_fdir_filter *filter,
bool add);
static int i40e_fdir_flush(struct rte_eth_dev *dev);
static void i40e_fdir_info_get(struct rte_eth_dev *dev,
struct rte_eth_fdir_info *fdir);
static void i40e_fdir_stats_get(struct rte_eth_dev *dev,
struct rte_eth_fdir_stats *stat);
static int
i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
{
struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
struct i40e_hmc_obj_rxq rx_ctx;
int err = I40E_SUCCESS;
memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
/* Init the RX queue in hardware */
rx_ctx.dbuff = I40E_RXBUF_SZ_1024 >> I40E_RXQ_CTX_DBUFF_SHIFT;
rx_ctx.hbuff = 0;
rx_ctx.base = rxq->rx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT;
rx_ctx.qlen = rxq->nb_rx_desc;
#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
rx_ctx.dsize = 1;
#endif
rx_ctx.dtype = i40e_header_split_none;
rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE;
rx_ctx.rxmax = ETHER_MAX_LEN;
rx_ctx.tphrdesc_ena = 1;
rx_ctx.tphwdesc_ena = 1;
rx_ctx.tphdata_ena = 1;
rx_ctx.tphhead_ena = 1;
rx_ctx.lrxqthresh = 2;
rx_ctx.crcstrip = 0;
rx_ctx.l2tsel = 1;
rx_ctx.showiv = 1;
rx_ctx.prefena = 1;
err = i40e_clear_lan_rx_queue_context(hw, rxq->reg_idx);
if (err != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Failed to clear FDIR RX queue context.");
return err;
}
err = i40e_set_lan_rx_queue_context(hw, rxq->reg_idx, &rx_ctx);
if (err != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Failed to set FDIR RX queue context.");
return err;
}
rxq->qrx_tail = hw->hw_addr +
I40E_QRX_TAIL(rxq->vsi->base_queue);
rte_wmb();
/* Init the RX tail regieter. */
I40E_PCI_REG_WRITE(rxq->qrx_tail, 0);
I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
return err;
}
/*
* i40e_fdir_setup - reserve and initialize the Flow Director resources
* @pf: board private structure
*/
int
i40e_fdir_setup(struct i40e_pf *pf)
{
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
struct i40e_vsi *vsi;
int err = I40E_SUCCESS;
char z_name[RTE_MEMZONE_NAMESIZE];
const struct rte_memzone *mz = NULL;
struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
if ((pf->flags & I40E_FLAG_FDIR) == 0) {
PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
return I40E_NOT_SUPPORTED;
}
PMD_DRV_LOG(INFO, "FDIR HW Capabilities: num_filters_guaranteed = %u,"
" num_filters_best_effort = %u.",
hw->func_caps.fd_filters_guaranteed,
hw->func_caps.fd_filters_best_effort);
vsi = pf->fdir.fdir_vsi;
if (vsi) {
PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
return I40E_SUCCESS;
}
/* make new FDIR VSI */
vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->main_vsi, 0);
if (!vsi) {
PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
return I40E_ERR_NO_AVAILABLE_VSI;
}
pf->fdir.fdir_vsi = vsi;
/*Fdir tx queue setup*/
err = i40e_fdir_setup_tx_resources(pf);
if (err) {
PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
goto fail_setup_tx;
}
/*Fdir rx queue setup*/
err = i40e_fdir_setup_rx_resources(pf);
if (err) {
PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
goto fail_setup_rx;
}
err = i40e_tx_queue_init(pf->fdir.txq);
if (err) {
PMD_DRV_LOG(ERR, "Failed to do FDIR TX initialization.");
goto fail_mem;
}
/* need switch on before dev start*/
err = i40e_switch_tx_queue(hw, vsi->base_queue, TRUE);
if (err) {
PMD_DRV_LOG(ERR, "Failed to do fdir TX switch on.");
goto fail_mem;
}
/* Init the rx queue in hardware */
err = i40e_fdir_rx_queue_init(pf->fdir.rxq);
if (err) {
PMD_DRV_LOG(ERR, "Failed to do FDIR RX initialization.");
goto fail_mem;
}
/* switch on rx queue */
err = i40e_switch_rx_queue(hw, vsi->base_queue, TRUE);
if (err) {
PMD_DRV_LOG(ERR, "Failed to do FDIR RX switch on.");
goto fail_mem;
}
/* reserve memory for the fdir programming packet */
snprintf(z_name, sizeof(z_name), "%s_%s_%d",
eth_dev->driver->pci_drv.name,
I40E_FDIR_MZ_NAME,
eth_dev->data->port_id);
mz = i40e_memzone_reserve(z_name, I40E_FDIR_PKT_LEN, SOCKET_ID_ANY);
if (!mz) {
PMD_DRV_LOG(ERR, "Cannot init memzone for "
"flow director program packet.");
err = I40E_ERR_NO_MEMORY;
goto fail_mem;
}
pf->fdir.prg_pkt = mz->addr;
pf->fdir.dma_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
pf->fdir.match_counter_index = I40E_COUNTER_INDEX_FDIR(hw->pf_id);
PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
vsi->base_queue);
return I40E_SUCCESS;
fail_mem:
i40e_dev_rx_queue_release(pf->fdir.rxq);
pf->fdir.rxq = NULL;
fail_setup_rx:
i40e_dev_tx_queue_release(pf->fdir.txq);
pf->fdir.txq = NULL;
fail_setup_tx:
i40e_vsi_release(vsi);
pf->fdir.fdir_vsi = NULL;
return err;
}
/*
* i40e_fdir_teardown - release the Flow Director resources
* @pf: board private structure
*/
void
i40e_fdir_teardown(struct i40e_pf *pf)
{
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
struct i40e_vsi *vsi;
vsi = pf->fdir.fdir_vsi;
if (!vsi)
return;
i40e_switch_tx_queue(hw, vsi->base_queue, FALSE);
i40e_switch_rx_queue(hw, vsi->base_queue, FALSE);
i40e_dev_rx_queue_release(pf->fdir.rxq);
pf->fdir.rxq = NULL;
i40e_dev_tx_queue_release(pf->fdir.txq);
pf->fdir.txq = NULL;
i40e_vsi_release(vsi);
pf->fdir.fdir_vsi = NULL;
}
/* check whether the flow director table in empty */
static inline int
i40e_fdir_empty(struct i40e_hw *hw)
{
uint32_t guarant_cnt, best_cnt;
guarant_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
best_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
if (best_cnt + guarant_cnt > 0)
return -1;
return 0;
}
/*
* Initialize the configuration about bytes stream extracted as flexible payload
* and mask setting
*/
static inline void
i40e_init_flx_pld(struct i40e_pf *pf)
{
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
uint8_t pctype;
int i, index;
/*
* Define the bytes stream extracted as flexible payload in
* field vector. By default, select 8 words from the beginning
* of payload as flexible payload.
*/
for (i = I40E_FLXPLD_L2_IDX; i < I40E_MAX_FLXPLD_LAYER; i++) {
index = i * I40E_MAX_FLXPLD_FIED;
pf->fdir.flex_set[index].src_offset = 0;
pf->fdir.flex_set[index].size = I40E_FDIR_MAX_FLEXWORD_NUM;
pf->fdir.flex_set[index].dst_offset = 0;
I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(index), 0x0000C900);
I40E_WRITE_REG(hw,
I40E_PRTQF_FLX_PIT(index + 1), 0x0000FC29);/*non-used*/
I40E_WRITE_REG(hw,
I40E_PRTQF_FLX_PIT(index + 2), 0x0000FC2A);/*non-used*/
}
/* initialize the masks */
for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
if (!I40E_VALID_PCTYPE((enum i40e_filter_pctype)pctype))
continue;
pf->fdir.flex_mask[pctype].word_mask = 0;
i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), 0);
for (i = 0; i < I40E_FDIR_BITMASK_NUM_WORD; i++) {
pf->fdir.flex_mask[pctype].bitmask[i].offset = 0;
pf->fdir.flex_mask[pctype].bitmask[i].mask = 0;
i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), 0);
}
}
}
#define I40E_WORD(hi, lo) (uint16_t)((((hi) << 8) & 0xFF00) | ((lo) & 0xFF))
#define I40E_VALIDATE_FLEX_PIT(flex_pit1, flex_pit2) do { \
if ((flex_pit2).src_offset < \
(flex_pit1).src_offset + (flex_pit1).size) { \
PMD_DRV_LOG(ERR, "src_offset should be not" \
" less than than previous offset" \
" + previous FSIZE."); \
return -EINVAL; \
} \
} while (0)
/*
* i40e_srcoff_to_flx_pit - transform the src_offset into flex_pit structure,
* and the flex_pit will be sorted by it's src_offset value
*/
static inline uint16_t
i40e_srcoff_to_flx_pit(const uint16_t *src_offset,
struct i40e_fdir_flex_pit *flex_pit)
{
uint16_t src_tmp, size, num = 0;
uint16_t i, k, j = 0;
while (j < I40E_FDIR_MAX_FLEX_LEN) {
size = 1;
for (; j < I40E_FDIR_MAX_FLEX_LEN - 1; j++) {
if (src_offset[j + 1] == src_offset[j] + 1)
size++;
else
break;
}
src_tmp = src_offset[j] + 1 - size;
/* the flex_pit need to be sort by src_offset */
for (i = 0; i < num; i++) {
if (src_tmp < flex_pit[i].src_offset)
break;
}
/* if insert required, move backward */
for (k = num; k > i; k--)
flex_pit[k] = flex_pit[k - 1];
/* insert */
flex_pit[i].dst_offset = j + 1 - size;
flex_pit[i].src_offset = src_tmp;
flex_pit[i].size = size;
j++;
num++;
}
return num;
}
/* i40e_check_fdir_flex_payload -check flex payload configuration arguments */
static inline int
i40e_check_fdir_flex_payload(const struct rte_eth_flex_payload_cfg *flex_cfg)
{
struct i40e_fdir_flex_pit flex_pit[I40E_FDIR_MAX_FLEX_LEN];
uint16_t num, i;
for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i++) {
if (flex_cfg->src_offset[i] >= I40E_MAX_FLX_SOURCE_OFF) {
PMD_DRV_LOG(ERR, "exceeds maxmial payload limit.");
return -EINVAL;
}
}
memset(flex_pit, 0, sizeof(flex_pit));
num = i40e_srcoff_to_flx_pit(flex_cfg->src_offset, flex_pit);
if (num > I40E_MAX_FLXPLD_FIED) {
PMD_DRV_LOG(ERR, "exceeds maxmial number of flex fields.");
return -EINVAL;
}
for (i = 0; i < num; i++) {
if (flex_pit[i].size & 0x01 || flex_pit[i].dst_offset & 0x01 ||
flex_pit[i].src_offset & 0x01) {
PMD_DRV_LOG(ERR, "flexpayload should be measured"
" in word");
return -EINVAL;
}
if (i != num - 1)
I40E_VALIDATE_FLEX_PIT(flex_pit[i], flex_pit[i + 1]);
}
return 0;
}
/*
* i40e_check_fdir_flex_conf -check if the flex payload and mask configuration
* arguments are valid
*/
static int
i40e_check_fdir_flex_conf(const struct rte_eth_fdir_flex_conf *conf)
{
const struct rte_eth_flex_payload_cfg *flex_cfg;
const struct rte_eth_fdir_flex_mask *flex_mask;
uint16_t mask_tmp;
uint8_t nb_bitmask;
uint16_t i, j;
int ret = 0;
if (conf == NULL) {
PMD_DRV_LOG(INFO, "NULL pointer.");
return -EINVAL;
}
/* check flexible payload setting configuration */
if (conf->nb_payloads > RTE_ETH_L4_PAYLOAD) {
PMD_DRV_LOG(ERR, "invalid number of payload setting.");
return -EINVAL;
}
for (i = 0; i < conf->nb_payloads; i++) {
flex_cfg = &conf->flex_set[i];
if (flex_cfg->type > RTE_ETH_L4_PAYLOAD) {
PMD_DRV_LOG(ERR, "invalid payload type.");
return -EINVAL;
}
ret = i40e_check_fdir_flex_payload(flex_cfg);
if (ret < 0) {
PMD_DRV_LOG(ERR, "invalid flex payload arguments.");
return -EINVAL;
}
}
/* check flex mask setting configuration */
if (conf->nb_flexmasks >= RTE_ETH_FLOW_MAX) {
PMD_DRV_LOG(ERR, "invalid number of flex masks.");
return -EINVAL;
}
for (i = 0; i < conf->nb_flexmasks; i++) {
flex_mask = &conf->flex_mask[i];
if (!I40E_VALID_FLOW(flex_mask->flow_type)) {
PMD_DRV_LOG(WARNING, "invalid flow type.");
return -EINVAL;
}
nb_bitmask = 0;
for (j = 0; j < I40E_FDIR_MAX_FLEX_LEN; j += sizeof(uint16_t)) {
mask_tmp = I40E_WORD(flex_mask->mask[j],
flex_mask->mask[j + 1]);
if (mask_tmp != 0x0 && mask_tmp != UINT16_MAX) {
nb_bitmask++;
if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD) {
PMD_DRV_LOG(ERR, " exceed maximal"
" number of bitmasks.");
return -EINVAL;
}
}
}
}
return 0;
}
/*
* i40e_set_flx_pld_cfg -configure the rule how bytes stream is extracted as flexible payload
* @pf: board private structure
* @cfg: the rule how bytes stream is extracted as flexible payload
*/
static void
i40e_set_flx_pld_cfg(struct i40e_pf *pf,
const struct rte_eth_flex_payload_cfg *cfg)
{
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
struct i40e_fdir_flex_pit flex_pit[I40E_MAX_FLXPLD_FIED];
uint32_t flx_pit;
uint16_t num, min_next_off; /* in words */
uint8_t field_idx = 0;
uint8_t layer_idx = 0;
uint16_t i;
if (cfg->type == RTE_ETH_L2_PAYLOAD)
layer_idx = I40E_FLXPLD_L2_IDX;
else if (cfg->type == RTE_ETH_L3_PAYLOAD)
layer_idx = I40E_FLXPLD_L3_IDX;
else if (cfg->type == RTE_ETH_L4_PAYLOAD)
layer_idx = I40E_FLXPLD_L4_IDX;
memset(flex_pit, 0, sizeof(flex_pit));
num = i40e_srcoff_to_flx_pit(cfg->src_offset, flex_pit);
for (i = 0; i < RTE_MIN(num, RTE_DIM(flex_pit)); i++) {
field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
/* record the info in fdir structure */
pf->fdir.flex_set[field_idx].src_offset =
flex_pit[i].src_offset / sizeof(uint16_t);
pf->fdir.flex_set[field_idx].size =
flex_pit[i].size / sizeof(uint16_t);
pf->fdir.flex_set[field_idx].dst_offset =
flex_pit[i].dst_offset / sizeof(uint16_t);
flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
pf->fdir.flex_set[field_idx].size,
pf->fdir.flex_set[field_idx].dst_offset);
I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
}
min_next_off = pf->fdir.flex_set[field_idx].src_offset +
pf->fdir.flex_set[field_idx].size;
for (; i < I40E_MAX_FLXPLD_FIED; i++) {
/* set the non-used register obeying register's constrain */
flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
NONUSE_FLX_PIT_DEST_OFF);
I40E_WRITE_REG(hw,
I40E_PRTQF_FLX_PIT(layer_idx * I40E_MAX_FLXPLD_FIED + i),
flx_pit);
min_next_off++;
}
}
/*
* i40e_set_flex_mask_on_pctype - configure the mask on flexible payload
* @pf: board private structure
* @pctype: packet classify type
* @flex_masks: mask for flexible payload
*/
static void
i40e_set_flex_mask_on_pctype(struct i40e_pf *pf,
enum i40e_filter_pctype pctype,
const struct rte_eth_fdir_flex_mask *mask_cfg)
{
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
struct i40e_fdir_flex_mask *flex_mask;
uint32_t flxinset, fd_mask;
uint16_t mask_tmp;
uint8_t i, nb_bitmask = 0;
flex_mask = &pf->fdir.flex_mask[pctype];
memset(flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
mask_tmp = I40E_WORD(mask_cfg->mask[i], mask_cfg->mask[i + 1]);
if (mask_tmp != 0x0) {
flex_mask->word_mask |=
I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
if (mask_tmp != UINT16_MAX) {
/* set bit mask */
flex_mask->bitmask[nb_bitmask].mask = ~mask_tmp;
flex_mask->bitmask[nb_bitmask].offset =
i / sizeof(uint16_t);
nb_bitmask++;
}
}
}
/* write mask to hw */
flxinset = (flex_mask->word_mask <<
I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
I40E_PRTQF_FD_FLXINSET_INSET_MASK;
i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
for (i = 0; i < nb_bitmask; i++) {
fd_mask = (flex_mask->bitmask[i].mask <<
I40E_PRTQF_FD_MSK_MASK_SHIFT) &
I40E_PRTQF_FD_MSK_MASK_MASK;
fd_mask |= ((flex_mask->bitmask[i].offset +
I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
I40E_PRTQF_FD_MSK_OFFSET_MASK;
i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
}
}
/*
* Configure flow director related setting
*/
int
i40e_fdir_configure(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_eth_fdir_flex_conf *conf;
enum i40e_filter_pctype pctype;
uint32_t val;
uint8_t i;
int ret = 0;
/*
* configuration need to be done before
* flow director filters are added
* If filters exist, flush them.
*/
if (i40e_fdir_empty(hw) < 0) {
ret = i40e_fdir_flush(dev);
if (ret) {
PMD_DRV_LOG(ERR, "failed to flush fdir table.");
return ret;
}
}
/* enable FDIR filter */
val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
i40e_init_flx_pld(pf); /* set flex config to default value */
conf = &dev->data->dev_conf.fdir_conf.flex_conf;
ret = i40e_check_fdir_flex_conf(conf);
if (ret < 0) {
PMD_DRV_LOG(ERR, " invalid configuration arguments.");
return -EINVAL;
}
/* configure flex payload */
for (i = 0; i < conf->nb_payloads; i++)
i40e_set_flx_pld_cfg(pf, &conf->flex_set[i]);
/* configure flex mask*/
for (i = 0; i < conf->nb_flexmasks; i++) {
pctype = i40e_flowtype_to_pctype(conf->flex_mask[i].flow_type);
i40e_set_flex_mask_on_pctype(pf, pctype, &conf->flex_mask[i]);
}
return ret;
}
static inline int
i40e_fdir_fill_eth_ip_head(const struct rte_eth_fdir_input *fdir_input,
unsigned char *raw_pkt,
bool vlan)
{
static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
uint16_t *ether_type;
uint8_t len = 2 * sizeof(struct ether_addr);
struct ipv4_hdr *ip;
struct ipv6_hdr *ip6;
static const uint8_t next_proto[] = {
[RTE_ETH_FLOW_FRAG_IPV4] = IPPROTO_IP,
[RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
[RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
[RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] = IPPROTO_SCTP,
[RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] = IPPROTO_IP,
[RTE_ETH_FLOW_FRAG_IPV6] = IPPROTO_NONE,
[RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
[RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
[RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] = IPPROTO_SCTP,
[RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] = IPPROTO_NONE,
};
raw_pkt += 2 * sizeof(struct ether_addr);
if (vlan && fdir_input->flow_ext.vlan_tci) {
rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
rte_memcpy(raw_pkt + sizeof(uint16_t),
&fdir_input->flow_ext.vlan_tci,
sizeof(uint16_t));
raw_pkt += sizeof(vlan_frame);
len += sizeof(vlan_frame);
}
ether_type = (uint16_t *)raw_pkt;
raw_pkt += sizeof(uint16_t);
len += sizeof(uint16_t);
switch (fdir_input->flow_type) {
case RTE_ETH_FLOW_L2_PAYLOAD:
*ether_type = fdir_input->flow.l2_flow.ether_type;
break;
case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
case RTE_ETH_FLOW_FRAG_IPV4:
ip = (struct ipv4_hdr *)raw_pkt;
*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
/* set len to by default */
ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
fdir_input->flow.ip4_flow.proto :
next_proto[fdir_input->flow_type];
ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
fdir_input->flow.ip4_flow.ttl :
I40E_FDIR_IP_DEFAULT_TTL;
ip->type_of_service = fdir_input->flow.ip4_flow.tos;
/*
* The source and destination fields in the transmitted packet
* need to be presented in a reversed order with respect
* to the expected received packets.
*/
ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
len += sizeof(struct ipv4_hdr);
break;
case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
case RTE_ETH_FLOW_FRAG_IPV6:
ip6 = (struct ipv6_hdr *)raw_pkt;
*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
ip6->vtc_flow =
rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
(fdir_input->flow.ipv6_flow.tc <<
I40E_FDIR_IPv6_TC_OFFSET));
ip6->payload_len =
rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
ip6->proto = fdir_input->flow.ipv6_flow.proto ?
fdir_input->flow.ipv6_flow.proto :
next_proto[fdir_input->flow_type];
ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
fdir_input->flow.ipv6_flow.hop_limits :
I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
/*
* The source and destination fields in the transmitted packet
* need to be presented in a reversed order with respect
* to the expected received packets.
*/
rte_memcpy(&(ip6->src_addr),
&(fdir_input->flow.ipv6_flow.dst_ip),
IPV6_ADDR_LEN);
rte_memcpy(&(ip6->dst_addr),
&(fdir_input->flow.ipv6_flow.src_ip),
IPV6_ADDR_LEN);
len += sizeof(struct ipv6_hdr);
break;
default:
PMD_DRV_LOG(ERR, "unknown flow type %u.",
fdir_input->flow_type);
return -1;
}
return len;
}
/*
* i40e_fdir_construct_pkt - construct packet based on fields in input
* @pf: board private structure
* @fdir_input: input set of the flow director entry
* @raw_pkt: a packet to be constructed
*/
static int
i40e_fdir_construct_pkt(struct i40e_pf *pf,
const struct rte_eth_fdir_input *fdir_input,
unsigned char *raw_pkt)
{
unsigned char *payload, *ptr;
struct udp_hdr *udp;
struct tcp_hdr *tcp;
struct sctp_hdr *sctp;
uint8_t size, dst = 0;
uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
int len;
/* fill the ethernet and IP head */
len = i40e_fdir_fill_eth_ip_head(fdir_input, raw_pkt,
!!fdir_input->flow_ext.vlan_tci);
if (len < 0)
return -EINVAL;
/* fill the L4 head */
switch (fdir_input->flow_type) {
case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
udp = (struct udp_hdr *)(raw_pkt + len);
payload = (unsigned char *)udp + sizeof(struct udp_hdr);
/*
* The source and destination fields in the transmitted packet
* need to be presented in a reversed order with respect
* to the expected received packets.
*/
udp->src_port = fdir_input->flow.udp4_flow.dst_port;
udp->dst_port = fdir_input->flow.udp4_flow.src_port;
udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
break;
case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
tcp = (struct tcp_hdr *)(raw_pkt + len);
payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
/*
* The source and destination fields in the transmitted packet
* need to be presented in a reversed order with respect
* to the expected received packets.
*/
tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
break;
case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
sctp = (struct sctp_hdr *)(raw_pkt + len);
payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
/*
* The source and destination fields in the transmitted packet
* need to be presented in a reversed order with respect
* to the expected received packets.
*/
sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
break;
case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
case RTE_ETH_FLOW_FRAG_IPV4:
payload = raw_pkt + len;
set_idx = I40E_FLXPLD_L3_IDX;
break;
case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
udp = (struct udp_hdr *)(raw_pkt + len);
payload = (unsigned char *)udp + sizeof(struct udp_hdr);
/*
* The source and destination fields in the transmitted packet
* need to be presented in a reversed order with respect
* to the expected received packets.
*/
udp->src_port = fdir_input->flow.udp6_flow.dst_port;
udp->dst_port = fdir_input->flow.udp6_flow.src_port;
udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
break;
case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
tcp = (struct tcp_hdr *)(raw_pkt + len);
payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
/*
* The source and destination fields in the transmitted packet
* need to be presented in a reversed order with respect
* to the expected received packets.
*/
tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
break;
case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
sctp = (struct sctp_hdr *)(raw_pkt + len);
payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
/*
* The source and destination fields in the transmitted packet
* need to be presented in a reversed order with respect
* to the expected received packets.
*/
sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
break;
case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
case RTE_ETH_FLOW_FRAG_IPV6:
payload = raw_pkt + len;
set_idx = I40E_FLXPLD_L3_IDX;
break;
case RTE_ETH_FLOW_L2_PAYLOAD:
payload = raw_pkt + len;
/*
* ARP packet is a special case on which the payload
* starts after the whole ARP header
*/
if (fdir_input->flow.l2_flow.ether_type ==
rte_cpu_to_be_16(ETHER_TYPE_ARP))
payload += sizeof(struct arp_hdr);
set_idx = I40E_FLXPLD_L2_IDX;
break;
default:
PMD_DRV_LOG(ERR, "unknown flow type %u.", fdir_input->flow_type);
return -EINVAL;
}
/* fill the flexbytes to payload */
for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i;
size = pf->fdir.flex_set[pit_idx].size;
if (size == 0)
continue;
dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t);
ptr = payload +
pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t);
(void)rte_memcpy(ptr,
&fdir_input->flow_ext.flexbytes[dst],
size * sizeof(uint16_t));
}
return 0;
}
/* Construct the tx flags */
static inline uint64_t
i40e_build_ctob(uint32_t td_cmd,
uint32_t td_offset,
unsigned int size,
uint32_t td_tag)
{
return rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DATA |
((uint64_t)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
((uint64_t)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
((uint64_t)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
((uint64_t)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
}
/*
* check the programming status descriptor in rx queue.
* done after Programming Flow Director is programmed on
* tx queue
*/
static inline int
i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
{
volatile union i40e_rx_desc *rxdp;
uint64_t qword1;
uint32_t rx_status;
uint32_t len, id;
uint32_t error;
int ret = 0;
rxdp = &rxq->rx_ring[rxq->rx_tail];
qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK)
>> I40E_RXD_QW1_STATUS_SHIFT;
if (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
len = qword1 >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT;
id = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
if (len == I40E_RX_PROG_STATUS_DESC_LENGTH &&
id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) {
error = (qword1 &
I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
if (error == (0x1 <<
I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
PMD_DRV_LOG(ERR, "Failed to add FDIR filter"
" (FD_ID %u): programming status"
" reported.",
rxdp->wb.qword0.hi_dword.fd_id);
ret = -1;
} else if (error == (0x1 <<
I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
PMD_DRV_LOG(ERR, "Failed to delete FDIR filter"
" (FD_ID %u): programming status"
" reported.",
rxdp->wb.qword0.hi_dword.fd_id);
ret = -1;
} else
PMD_DRV_LOG(ERR, "invalid programming status"
" reported, error = %u.", error);
} else
PMD_DRV_LOG(ERR, "unknown programming status"
" reported, len = %d, id = %u.", len, id);
rxdp->wb.qword1.status_error_len = 0;
rxq->rx_tail++;
if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
rxq->rx_tail = 0;
}
return ret;
}
/*
* i40e_add_del_fdir_filter - add or remove a flow director filter.
* @pf: board private structure
* @filter: fdir filter entry
* @add: 0 - delete, 1 - add
*/
static int
i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *filter,
bool add)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
enum i40e_filter_pctype pctype;
int ret = 0;
if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
PMD_DRV_LOG(ERR, "FDIR is not enabled, please"
" check the mode in fdir_conf.");
return -ENOTSUP;
}
if (!I40E_VALID_FLOW(filter->input.flow_type)) {
PMD_DRV_LOG(ERR, "invalid flow_type input.");
return -EINVAL;
}
if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
PMD_DRV_LOG(ERR, "Invalid queue ID");
return -EINVAL;
}
if (filter->input.flow_ext.is_vf &&
filter->input.flow_ext.dst_id >= pf->vf_num) {
PMD_DRV_LOG(ERR, "Invalid VF ID");
return -EINVAL;
}
memset(pkt, 0, I40E_FDIR_PKT_LEN);
ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
if (ret < 0) {
PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
return ret;
}
pctype = i40e_flowtype_to_pctype(filter->input.flow_type);
ret = i40e_fdir_filter_programming(pf, pctype, filter, add);
if (ret < 0) {
PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
pctype);
return ret;
}
return ret;
}
/*
* i40e_fdir_filter_programming - Program a flow director filter rule.
* Is done by Flow Director Programming Descriptor followed by packet
* structure that contains the filter fields need to match.
* @pf: board private structure
* @pctype: pctype
* @filter: fdir filter entry
* @add: 0 - delete, 1 - add
*/
static int
i40e_fdir_filter_programming(struct i40e_pf *pf,
enum i40e_filter_pctype pctype,
const struct rte_eth_fdir_filter *filter,
bool add)
{
struct i40e_tx_queue *txq = pf->fdir.txq;
struct i40e_rx_queue *rxq = pf->fdir.rxq;
const struct rte_eth_fdir_action *fdir_action = &filter->action;
volatile struct i40e_tx_desc *txdp;
volatile struct i40e_filter_program_desc *fdirdp;
uint32_t td_cmd;
uint16_t vsi_id, i;
uint8_t dest;
PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
fdirdp = (volatile struct i40e_filter_program_desc *)
(&(txq->tx_ring[txq->tx_tail]));
fdirdp->qindex_flex_ptype_vsi =
rte_cpu_to_le_32((fdir_action->rx_queue <<
I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
I40E_TXD_FLTR_QW0_QINDEX_MASK);
fdirdp->qindex_flex_ptype_vsi |=
rte_cpu_to_le_32((fdir_action->flex_off <<
I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
fdirdp->qindex_flex_ptype_vsi |=
rte_cpu_to_le_32((pctype <<
I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
I40E_TXD_FLTR_QW0_PCTYPE_MASK);
if (filter->input.flow_ext.is_vf)
vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id;
else
/* Use LAN VSI Id by default */
vsi_id = pf->main_vsi->vsi_id;
fdirdp->qindex_flex_ptype_vsi |=
rte_cpu_to_le_32(((uint32_t)vsi_id <<
I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
fdirdp->dtype_cmd_cntindex =
rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG);
if (add)
fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
I40E_TXD_FLTR_QW1_PCMD_SHIFT);
else
fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
I40E_TXD_FLTR_QW1_PCMD_SHIFT);
if (fdir_action->behavior == RTE_ETH_FDIR_REJECT)
dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
else if (fdir_action->behavior == RTE_ETH_FDIR_ACCEPT)
dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
else if (fdir_action->behavior == RTE_ETH_FDIR_PASSTHRU)
dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER;
else {
PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
" unsupported fdir behavior.");
return -EINVAL;
}
fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest <<
I40E_TXD_FLTR_QW1_DEST_SHIFT) &
I40E_TXD_FLTR_QW1_DEST_MASK);
fdirdp->dtype_cmd_cntindex |=
rte_cpu_to_le_32((fdir_action->report_status<<
I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
fdirdp->dtype_cmd_cntindex |=
rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
fdirdp->dtype_cmd_cntindex |=
rte_cpu_to_le_32((pf->fdir.match_counter_index <<
I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id);
PMD_DRV_LOG(INFO, "filling transmit descriptor.");
txdp = &(txq->tx_ring[txq->tx_tail + 1]);
txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
td_cmd = I40E_TX_DESC_CMD_EOP |
I40E_TX_DESC_CMD_RS |
I40E_TX_DESC_CMD_DUMMY;
txdp->cmd_type_offset_bsz =
i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0);
txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */
if (txq->tx_tail >= txq->nb_tx_desc)
txq->tx_tail = 0;
/* Update the tx tail register */
rte_wmb();
I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
for (i = 0; i < I40E_FDIR_WAIT_COUNT; i++) {
rte_delay_us(I40E_FDIR_WAIT_INTERVAL_US);
if ((txdp->cmd_type_offset_bsz &
rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
break;
}
if (i >= I40E_FDIR_WAIT_COUNT) {
PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
" time out to get DD on tx queue.");
return -ETIMEDOUT;
}
/* totally delay 10 ms to check programming status*/
rte_delay_us((I40E_FDIR_WAIT_COUNT - i) * I40E_FDIR_WAIT_INTERVAL_US);
if (i40e_check_fdir_programming_status(rxq) < 0) {
PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
" programming status reported.");
return -ENOSYS;
}
return 0;
}
/*
* i40e_fdir_flush - clear all filters of Flow Director table
* @pf: board private structure
*/
static int
i40e_fdir_flush(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
uint32_t reg;
uint16_t guarant_cnt, best_cnt;
uint16_t i;
I40E_WRITE_REG(hw, I40E_PFQF_CTL_1, I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
I40E_WRITE_FLUSH(hw);
for (i = 0; i < I40E_FDIR_FLUSH_RETRY; i++) {
rte_delay_ms(I40E_FDIR_FLUSH_INTERVAL_MS);
reg = I40E_READ_REG(hw, I40E_PFQF_CTL_1);
if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
break;
}
if (i >= I40E_FDIR_FLUSH_RETRY) {
PMD_DRV_LOG(ERR, "FD table did not flush, may need more time.");
return -ETIMEDOUT;
}
guarant_cnt = (uint16_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
best_cnt = (uint16_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
if (guarant_cnt != 0 || best_cnt != 0) {
PMD_DRV_LOG(ERR, "Failed to flush FD table.");
return -ENOSYS;
} else
PMD_DRV_LOG(INFO, "FD table Flush success.");
return 0;
}
static inline void
i40e_fdir_info_get_flex_set(struct i40e_pf *pf,
struct rte_eth_flex_payload_cfg *flex_set,
uint16_t *num)
{
struct i40e_fdir_flex_pit *flex_pit;
struct rte_eth_flex_payload_cfg *ptr = flex_set;
uint16_t src, dst, size, j, k;
uint8_t i, layer_idx;
for (layer_idx = I40E_FLXPLD_L2_IDX;
layer_idx <= I40E_FLXPLD_L4_IDX;
layer_idx++) {
if (layer_idx == I40E_FLXPLD_L2_IDX)
ptr->type = RTE_ETH_L2_PAYLOAD;
else if (layer_idx == I40E_FLXPLD_L3_IDX)
ptr->type = RTE_ETH_L3_PAYLOAD;
else if (layer_idx == I40E_FLXPLD_L4_IDX)
ptr->type = RTE_ETH_L4_PAYLOAD;
for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
flex_pit = &pf->fdir.flex_set[layer_idx *
I40E_MAX_FLXPLD_FIED + i];
if (flex_pit->size == 0)
continue;
src = flex_pit->src_offset * sizeof(uint16_t);
dst = flex_pit->dst_offset * sizeof(uint16_t);
size = flex_pit->size * sizeof(uint16_t);
for (j = src, k = dst; j < src + size; j++, k++)
ptr->src_offset[k] = j;
}
(*num)++;
ptr++;
}
}
static inline void
i40e_fdir_info_get_flex_mask(struct i40e_pf *pf,
struct rte_eth_fdir_flex_mask *flex_mask,
uint16_t *num)
{
struct i40e_fdir_flex_mask *mask;
struct rte_eth_fdir_flex_mask *ptr = flex_mask;
uint16_t flow_type;
uint8_t i, j;
uint16_t off_bytes, mask_tmp;
for (i = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
i <= I40E_FILTER_PCTYPE_L2_PAYLOAD;
i++) {
mask = &pf->fdir.flex_mask[i];
if (!I40E_VALID_PCTYPE((enum i40e_filter_pctype)i))
continue;
flow_type = i40e_pctype_to_flowtype((enum i40e_filter_pctype)i);
for (j = 0; j < I40E_FDIR_MAX_FLEXWORD_NUM; j++) {
if (mask->word_mask & I40E_FLEX_WORD_MASK(j)) {
ptr->mask[j * sizeof(uint16_t)] = UINT8_MAX;
ptr->mask[j * sizeof(uint16_t) + 1] = UINT8_MAX;
} else {
ptr->mask[j * sizeof(uint16_t)] = 0x0;
ptr->mask[j * sizeof(uint16_t) + 1] = 0x0;
}
}
for (j = 0; j < I40E_FDIR_BITMASK_NUM_WORD; j++) {
off_bytes = mask->bitmask[j].offset * sizeof(uint16_t);
mask_tmp = ~mask->bitmask[j].mask;
ptr->mask[off_bytes] &= I40E_HI_BYTE(mask_tmp);
ptr->mask[off_bytes + 1] &= I40E_LO_BYTE(mask_tmp);
}
ptr->flow_type = flow_type;
ptr++;
(*num)++;
}
}
/*
* i40e_fdir_info_get - get information of Flow Director
* @pf: ethernet device to get info from
* @fdir: a pointer to a structure of type *rte_eth_fdir_info* to be filled with
* the flow director information.
*/
static void
i40e_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
uint16_t num_flex_set = 0;
uint16_t num_flex_mask = 0;
if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT)
fdir->mode = RTE_FDIR_MODE_PERFECT;
else
fdir->mode = RTE_FDIR_MODE_NONE;
fdir->guarant_spc =
(uint32_t)hw->func_caps.fd_filters_guaranteed;
fdir->best_spc =
(uint32_t)hw->func_caps.fd_filters_best_effort;
fdir->max_flexpayload = I40E_FDIR_MAX_FLEX_LEN;
fdir->flow_types_mask[0] = I40E_FDIR_FLOWS;
fdir->flex_payload_unit = sizeof(uint16_t);
fdir->flex_bitmask_unit = sizeof(uint16_t);
fdir->max_flex_payload_segment_num = I40E_MAX_FLXPLD_FIED;
fdir->flex_payload_limit = I40E_MAX_FLX_SOURCE_OFF;
fdir->max_flex_bitmask_num = I40E_FDIR_BITMASK_NUM_WORD;
i40e_fdir_info_get_flex_set(pf,
fdir->flex_conf.flex_set,
&num_flex_set);
i40e_fdir_info_get_flex_mask(pf,
fdir->flex_conf.flex_mask,
&num_flex_mask);
fdir->flex_conf.nb_payloads = num_flex_set;
fdir->flex_conf.nb_flexmasks = num_flex_mask;
}
/*
* i40e_fdir_stat_get - get statistics of Flow Director
* @pf: ethernet device to get info from
* @stat: a pointer to a structure of type *rte_eth_fdir_stats* to be filled with
* the flow director statistics.
*/
static void
i40e_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *stat)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
uint32_t fdstat;
fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
stat->guarant_cnt =
(uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
stat->best_cnt =
(uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
}
static int
i40e_fdir_filter_set(struct rte_eth_dev *dev,
struct rte_eth_fdir_filter_info *info)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
int ret = 0;
if (!info) {
PMD_DRV_LOG(ERR, "Invalid pointer");
return -EFAULT;
}
switch (info->info_type) {
case RTE_ETH_FDIR_FILTER_INPUT_SET_SELECT:
ret = i40e_fdir_filter_inset_select(pf,
&(info->info.input_set_conf));
break;
default:
PMD_DRV_LOG(ERR, "FD filter info type (%d) not supported",
info->info_type);
return -EINVAL;
}
return ret;
}
/*
* i40e_fdir_ctrl_func - deal with all operations on flow director.
* @pf: board private structure
* @filter_op:operation will be taken.
* @arg: a pointer to specific structure corresponding to the filter_op
*/
int
i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
int ret = 0;
if ((pf->flags & I40E_FLAG_FDIR) == 0)
return -ENOTSUP;
if (filter_op == RTE_ETH_FILTER_NOP)
return 0;
if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
return -EINVAL;
switch (filter_op) {
case RTE_ETH_FILTER_ADD:
ret = i40e_add_del_fdir_filter(dev,
(struct rte_eth_fdir_filter *)arg,
TRUE);
break;
case RTE_ETH_FILTER_DELETE:
ret = i40e_add_del_fdir_filter(dev,
(struct rte_eth_fdir_filter *)arg,
FALSE);
break;
case RTE_ETH_FILTER_FLUSH:
ret = i40e_fdir_flush(dev);
break;
case RTE_ETH_FILTER_INFO:
i40e_fdir_info_get(dev, (struct rte_eth_fdir_info *)arg);
break;
case RTE_ETH_FILTER_SET:
ret = i40e_fdir_filter_set(dev,
(struct rte_eth_fdir_filter_info *)arg);
break;
case RTE_ETH_FILTER_STATS:
i40e_fdir_stats_get(dev, (struct rte_eth_fdir_stats *)arg);
break;
default:
PMD_DRV_LOG(ERR, "unknown operation %u.", filter_op);
ret = -EINVAL;
break;
}
return ret;
}
| gpl-2.0 |
rhuitl/uClinux | user/gdb/sim/common/callback.c | 7 | 25245 | /* Remote target callback routines.
Copyright 1995, 1996, 1997, 2000, 2002, 2003, 2004, 2007, 2008
Free Software Foundation, Inc.
Contributed by Cygnus Solutions.
This file is part of GDB.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
/* This file provides a standard way for targets to talk to the host OS
level. */
#ifdef HAVE_CONFIG_H
#include "cconfig.h"
#endif
#include "ansidecl.h"
#include <stdarg.h>
#include <stdio.h>
#ifdef HAVE_STDLIB_H
#include <stdlib.h>
#endif
#ifdef HAVE_STRING_H
#include <string.h>
#else
#ifdef HAVE_STRINGS_H
#include <strings.h>
#endif
#endif
#ifdef HAVE_LIMITS_H
/* For PIPE_BUF. */
#include <limits.h>
#endif
#include <errno.h>
#include <fcntl.h>
#include <time.h>
#include <sys/types.h>
#include <sys/stat.h>
#include "gdb/callback.h"
#include "targ-vals.h"
/* For xmalloc. */
#include "libiberty.h"
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifndef PIPE_BUF
#define PIPE_BUF 512
#endif
/* ??? sim_cb_printf should be cb_printf, but until the callback support is
broken out of the simulator directory, these are here to not require
sim-utils.h. */
void sim_cb_printf PARAMS ((host_callback *, const char *, ...));
void sim_cb_eprintf PARAMS ((host_callback *, const char *, ...));
extern CB_TARGET_DEFS_MAP cb_init_syscall_map[];
extern CB_TARGET_DEFS_MAP cb_init_errno_map[];
extern CB_TARGET_DEFS_MAP cb_init_open_map[];
extern int system PARAMS ((const char *));
static int os_init PARAMS ((host_callback *));
static int os_shutdown PARAMS ((host_callback *));
static int os_unlink PARAMS ((host_callback *, const char *));
static long os_time PARAMS ((host_callback *, long *));
static int os_system PARAMS ((host_callback *, const char *));
static int os_rename PARAMS ((host_callback *, const char *, const char *));
static int os_write_stdout PARAMS ((host_callback *, const char *, int));
static void os_flush_stdout PARAMS ((host_callback *));
static int os_write_stderr PARAMS ((host_callback *, const char *, int));
static void os_flush_stderr PARAMS ((host_callback *));
static int os_write PARAMS ((host_callback *, int, const char *, int));
static int os_read_stdin PARAMS ((host_callback *, char *, int));
static int os_read PARAMS ((host_callback *, int, char *, int));
static int os_open PARAMS ((host_callback *, const char *, int));
static int os_lseek PARAMS ((host_callback *, int, long, int));
static int os_isatty PARAMS ((host_callback *, int));
static int os_get_errno PARAMS ((host_callback *));
static int os_close PARAMS ((host_callback *, int));
static void os_vprintf_filtered PARAMS ((host_callback *, const char *, va_list));
static void os_evprintf_filtered PARAMS ((host_callback *, const char *, va_list));
static void os_error PARAMS ((host_callback *, const char *, ...));
static int fdmap PARAMS ((host_callback *, int));
static int fdbad PARAMS ((host_callback *, int));
static int wrap PARAMS ((host_callback *, int));
/* Set the callback copy of errno from what we see now. */
static int
wrap (p, val)
host_callback *p;
int val;
{
p->last_errno = errno;
return val;
}
/* Make sure the FD provided is ok. If not, return non-zero
and set errno. */
static int
fdbad (p, fd)
host_callback *p;
int fd;
{
if (fd < 0 || fd > MAX_CALLBACK_FDS || p->fd_buddy[fd] < 0)
{
p->last_errno = EINVAL;
return -1;
}
return 0;
}
static int
fdmap (p, fd)
host_callback *p;
int fd;
{
return p->fdmap[fd];
}
static int
os_close (p, fd)
host_callback *p;
int fd;
{
int result;
int i, next;
result = fdbad (p, fd);
if (result)
return result;
/* If this file descripter has one or more buddies (originals /
duplicates from a dup), just remove it from the circular list. */
for (i = fd; (next = p->fd_buddy[i]) != fd; )
i = next;
if (fd != i)
p->fd_buddy[i] = p->fd_buddy[fd];
else
{
if (p->ispipe[fd])
{
int other = p->ispipe[fd];
int reader, writer;
if (other > 0)
{
/* Closing the read side. */
reader = fd;
writer = other;
}
else
{
/* Closing the write side. */
writer = fd;
reader = -other;
}
/* If there was data in the buffer, make a last "now empty"
call, then deallocate data. */
if (p->pipe_buffer[writer].buffer != NULL)
{
(*p->pipe_empty) (p, reader, writer);
free (p->pipe_buffer[writer].buffer);
p->pipe_buffer[writer].buffer = NULL;
}
/* Clear pipe data for this side. */
p->pipe_buffer[fd].size = 0;
p->ispipe[fd] = 0;
/* If this was the first close, mark the other side as the
only remaining side. */
if (fd != abs (other))
p->ispipe[abs (other)] = -other;
p->fd_buddy[fd] = -1;
return 0;
}
result = wrap (p, close (fdmap (p, fd)));
}
p->fd_buddy[fd] = -1;
return result;
}
/* taken from gdb/util.c:notice_quit() - should be in a library */
#if defined(__GO32__) || defined (_MSC_VER)
static int
os_poll_quit (p)
host_callback *p;
{
#if defined(__GO32__)
int kbhit ();
int getkey ();
if (kbhit ())
{
int k = getkey ();
if (k == 1)
{
return 1;
}
else if (k == 2)
{
return 1;
}
else
{
sim_cb_eprintf (p, "CTRL-A to quit, CTRL-B to quit harder\n");
}
}
#endif
#if defined (_MSC_VER)
/* NB - this will not compile! */
int k = win32pollquit();
if (k == 1)
return 1;
else if (k == 2)
return 1;
#endif
return 0;
}
#else
#define os_poll_quit 0
#endif /* defined(__GO32__) || defined(_MSC_VER) */
static int
os_get_errno (p)
host_callback *p;
{
return cb_host_to_target_errno (p, p->last_errno);
}
static int
os_isatty (p, fd)
host_callback *p;
int fd;
{
int result;
result = fdbad (p, fd);
if (result)
return result;
result = wrap (p, isatty (fdmap (p, fd)));
return result;
}
static int
os_lseek (p, fd, off, way)
host_callback *p;
int fd;
long off;
int way;
{
int result;
result = fdbad (p, fd);
if (result)
return result;
result = lseek (fdmap (p, fd), off, way);
return result;
}
static int
os_open (p, name, flags)
host_callback *p;
const char *name;
int flags;
{
int i;
for (i = 0; i < MAX_CALLBACK_FDS; i++)
{
if (p->fd_buddy[i] < 0)
{
int f = open (name, cb_target_to_host_open (p, flags), 0644);
if (f < 0)
{
p->last_errno = errno;
return f;
}
p->fd_buddy[i] = i;
p->fdmap[i] = f;
return i;
}
}
p->last_errno = EMFILE;
return -1;
}
static int
os_read (p, fd, buf, len)
host_callback *p;
int fd;
char *buf;
int len;
{
int result;
result = fdbad (p, fd);
if (result)
return result;
if (p->ispipe[fd])
{
int writer = p->ispipe[fd];
/* Can't read from the write-end. */
if (writer < 0)
{
p->last_errno = EBADF;
return -1;
}
/* Nothing to read if nothing is written. */
if (p->pipe_buffer[writer].size == 0)
return 0;
/* Truncate read request size to buffer size minus what's already
read. */
if (len > p->pipe_buffer[writer].size - p->pipe_buffer[fd].size)
len = p->pipe_buffer[writer].size - p->pipe_buffer[fd].size;
memcpy (buf, p->pipe_buffer[writer].buffer + p->pipe_buffer[fd].size,
len);
/* Account for what we just read. */
p->pipe_buffer[fd].size += len;
/* If we've read everything, empty and deallocate the buffer and
signal buffer-empty to client. (This isn't expected to be a
hot path in the simulator, so we don't hold on to the buffer.) */
if (p->pipe_buffer[fd].size == p->pipe_buffer[writer].size)
{
free (p->pipe_buffer[writer].buffer);
p->pipe_buffer[writer].buffer = NULL;
p->pipe_buffer[fd].size = 0;
p->pipe_buffer[writer].size = 0;
(*p->pipe_empty) (p, fd, writer);
}
return len;
}
result = wrap (p, read (fdmap (p, fd), buf, len));
return result;
}
static int
os_read_stdin (p, buf, len)
host_callback *p;
char *buf;
int len;
{
return wrap (p, read (0, buf, len));
}
static int
os_write (p, fd, buf, len)
host_callback *p;
int fd;
const char *buf;
int len;
{
int result;
int real_fd;
result = fdbad (p, fd);
if (result)
return result;
if (p->ispipe[fd])
{
int reader = -p->ispipe[fd];
/* Can't write to the read-end. */
if (reader < 0)
{
p->last_errno = EBADF;
return -1;
}
/* Can't write to pipe with closed read end.
FIXME: We should send a SIGPIPE. */
if (reader == fd)
{
p->last_errno = EPIPE;
return -1;
}
/* As a sanity-check, we bail out it the buffered contents is much
larger than the size of the buffer on the host. We don't want
to run out of memory in the simulator due to a target program
bug if we can help it. Unfortunately, regarding the value that
reaches the simulated program, it's no use returning *less*
than the requested amount, because cb_syscall loops calling
this function until the whole amount is done. */
if (p->pipe_buffer[fd].size + len > 10 * PIPE_BUF)
{
p->last_errno = EFBIG;
return -1;
}
p->pipe_buffer[fd].buffer
= xrealloc (p->pipe_buffer[fd].buffer, p->pipe_buffer[fd].size + len);
memcpy (p->pipe_buffer[fd].buffer + p->pipe_buffer[fd].size,
buf, len);
p->pipe_buffer[fd].size += len;
(*p->pipe_nonempty) (p, reader, fd);
return len;
}
real_fd = fdmap (p, fd);
switch (real_fd)
{
default:
result = wrap (p, write (real_fd, buf, len));
break;
case 1:
result = p->write_stdout (p, buf, len);
break;
case 2:
result = p->write_stderr (p, buf, len);
break;
}
return result;
}
static int
os_write_stdout (p, buf, len)
host_callback *p ATTRIBUTE_UNUSED;
const char *buf;
int len;
{
return fwrite (buf, 1, len, stdout);
}
static void
os_flush_stdout (p)
host_callback *p ATTRIBUTE_UNUSED;
{
fflush (stdout);
}
static int
os_write_stderr (p, buf, len)
host_callback *p ATTRIBUTE_UNUSED;
const char *buf;
int len;
{
return fwrite (buf, 1, len, stderr);
}
static void
os_flush_stderr (p)
host_callback *p ATTRIBUTE_UNUSED;
{
fflush (stderr);
}
static int
os_rename (p, f1, f2)
host_callback *p;
const char *f1;
const char *f2;
{
return wrap (p, rename (f1, f2));
}
static int
os_system (p, s)
host_callback *p;
const char *s;
{
return wrap (p, system (s));
}
static long
os_time (p, t)
host_callback *p;
long *t;
{
return wrap (p, time (t));
}
static int
os_unlink (p, f1)
host_callback *p;
const char *f1;
{
return wrap (p, unlink (f1));
}
static int
os_stat (p, file, buf)
host_callback *p;
const char *file;
struct stat *buf;
{
/* ??? There is an issue of when to translate to the target layout.
One could do that inside this function, or one could have the
caller do it. It's more flexible to let the caller do it, though
I'm not sure the flexibility will ever be useful. */
return wrap (p, stat (file, buf));
}
static int
os_fstat (p, fd, buf)
host_callback *p;
int fd;
struct stat *buf;
{
if (fdbad (p, fd))
return -1;
if (p->ispipe[fd])
{
#if defined (HAVE_STRUCT_STAT_ST_ATIME) || defined (HAVE_STRUCT_STAT_ST_CTIME) || defined (HAVE_STRUCT_STAT_ST_MTIME)
time_t t = (*p->time) (p, NULL);
#endif
/* We have to fake the struct stat contents, since the pipe is
made up in the simulator. */
memset (buf, 0, sizeof (*buf));
#ifdef HAVE_STRUCT_STAT_ST_MODE
buf->st_mode = S_IFIFO;
#endif
/* If more accurate tracking than current-time is needed (for
example, on GNU/Linux we get accurate numbers), the p->time
callback (which may be something other than os_time) should
happen for each read and write, and we'd need to keep track of
atime, ctime and mtime. */
#ifdef HAVE_STRUCT_STAT_ST_ATIME
buf->st_atime = t;
#endif
#ifdef HAVE_STRUCT_STAT_ST_CTIME
buf->st_ctime = t;
#endif
#ifdef HAVE_STRUCT_STAT_ST_MTIME
buf->st_mtime = t;
#endif
return 0;
}
/* ??? There is an issue of when to translate to the target layout.
One could do that inside this function, or one could have the
caller do it. It's more flexible to let the caller do it, though
I'm not sure the flexibility will ever be useful. */
return wrap (p, fstat (fdmap (p, fd), buf));
}
static int
os_lstat (p, file, buf)
host_callback *p;
const char *file;
struct stat *buf;
{
/* NOTE: hpn/2004-12-12: Same issue here as with os_fstat. */
#ifdef HAVE_LSTAT
return wrap (p, lstat (file, buf));
#else
return wrap (p, stat (file, buf));
#endif
}
static int
os_ftruncate (p, fd, len)
host_callback *p;
int fd;
long len;
{
int result;
result = fdbad (p, fd);
if (p->ispipe[fd])
{
p->last_errno = EINVAL;
return -1;
}
if (result)
return result;
#ifdef HAVE_FTRUNCATE
result = wrap (p, ftruncate (fdmap (p, fd), len));
#else
p->last_errno = EINVAL;
result = -1;
#endif
return result;
}
static int
os_truncate (p, file, len)
host_callback *p;
const char *file;
long len;
{
#ifdef HAVE_TRUNCATE
return wrap (p, truncate (file, len));
#else
p->last_errno = EINVAL;
return -1;
#endif
}
static int
os_pipe (p, filedes)
host_callback *p;
int *filedes;
{
int i;
/* We deliberately don't use fd 0. It's probably stdin anyway. */
for (i = 1; i < MAX_CALLBACK_FDS; i++)
{
int j;
if (p->fd_buddy[i] < 0)
for (j = i + 1; j < MAX_CALLBACK_FDS; j++)
if (p->fd_buddy[j] < 0)
{
/* Found two free fd:s. Set stat to allocated and mark
pipeness. */
p->fd_buddy[i] = i;
p->fd_buddy[j] = j;
p->ispipe[i] = j;
p->ispipe[j] = -i;
filedes[0] = i;
filedes[1] = j;
/* Poison the FD map to make bugs apparent. */
p->fdmap[i] = -1;
p->fdmap[j] = -1;
return 0;
}
}
p->last_errno = EMFILE;
return -1;
}
/* Stub functions for pipe support. They should always be overridden in
targets using the pipe support, but that's up to the target. */
/* Called when the simulator says that the pipe at (reader, writer) is
now empty (so the writer should leave its waiting state). */
static void
os_pipe_empty (p, reader, writer)
host_callback *p;
int reader;
int writer;
{
}
/* Called when the simulator says the pipe at (reader, writer) is now
non-empty (so the writer should wait). */
static void
os_pipe_nonempty (p, reader, writer)
host_callback *p;
int reader;
int writer;
{
}
static int
os_shutdown (p)
host_callback *p;
{
int i, next, j;
for (i = 0; i < MAX_CALLBACK_FDS; i++)
{
int do_close = 1;
/* Zero out all pipe state. Don't call callbacks for non-empty
pipes; the target program has likely terminated at this point
or we're called at initialization time. */
p->ispipe[i] = 0;
p->pipe_buffer[i].size = 0;
p->pipe_buffer[i].buffer = NULL;
next = p->fd_buddy[i];
if (next < 0)
continue;
do
{
j = next;
if (j == MAX_CALLBACK_FDS)
do_close = 0;
next = p->fd_buddy[j];
p->fd_buddy[j] = -1;
/* At the initial call of os_init, we got -1, 0, 0, 0, ... */
if (next < 0)
{
p->fd_buddy[i] = -1;
do_close = 0;
break;
}
}
while (j != i);
if (do_close)
close (p->fdmap[i]);
}
return 1;
}
static int
os_init (p)
host_callback *p;
{
int i;
os_shutdown (p);
for (i = 0; i < 3; i++)
{
p->fdmap[i] = i;
p->fd_buddy[i] = i - 1;
}
p->fd_buddy[0] = MAX_CALLBACK_FDS;
p->fd_buddy[MAX_CALLBACK_FDS] = 2;
p->syscall_map = cb_init_syscall_map;
p->errno_map = cb_init_errno_map;
p->open_map = cb_init_open_map;
return 1;
}
/* DEPRECATED */
/* VARARGS */
static void
os_printf_filtered (host_callback *p ATTRIBUTE_UNUSED, const char *format, ...)
{
va_list args;
va_start (args, format);
vfprintf (stdout, format, args);
va_end (args);
}
/* VARARGS */
static void
os_vprintf_filtered (host_callback *p ATTRIBUTE_UNUSED, const char *format, va_list args)
{
vprintf (format, args);
}
/* VARARGS */
static void
os_evprintf_filtered (host_callback *p ATTRIBUTE_UNUSED, const char *format, va_list args)
{
vfprintf (stderr, format, args);
}
/* VARARGS */
static void
os_error (host_callback *p ATTRIBUTE_UNUSED, const char *format, ...)
{
va_list args;
va_start (args, format);
vfprintf (stderr, format, args);
fprintf (stderr, "\n");
va_end (args);
exit (1);
}
host_callback default_callback =
{
os_close,
os_get_errno,
os_isatty,
os_lseek,
os_open,
os_read,
os_read_stdin,
os_rename,
os_system,
os_time,
os_unlink,
os_write,
os_write_stdout,
os_flush_stdout,
os_write_stderr,
os_flush_stderr,
os_stat,
os_fstat,
os_lstat,
os_ftruncate,
os_truncate,
os_pipe,
os_pipe_empty,
os_pipe_nonempty,
os_poll_quit,
os_shutdown,
os_init,
os_printf_filtered, /* deprecated */
os_vprintf_filtered,
os_evprintf_filtered,
os_error,
0, /* last errno */
{ 0, }, /* fdmap */
{ -1, }, /* fd_buddy */
{ 0, }, /* ispipe */
{ { 0, 0 }, }, /* pipe_buffer */
0, /* syscall_map */
0, /* errno_map */
0, /* open_map */
0, /* signal_map */
0, /* stat_map */
/* Defaults expected to be overridden at initialization, where needed. */
BFD_ENDIAN_UNKNOWN, /* target_endian */
4, /* target_sizeof_int */
HOST_CALLBACK_MAGIC,
};
/* Read in a file describing the target's system call values.
E.g. maybe someone will want to use something other than newlib.
This assumes that the basic system call recognition and value passing/
returning is supported. So maybe some coding/recompilation will be
necessary, but not as much.
If an error occurs, the existing mapping is not changed. */
CB_RC
cb_read_target_syscall_maps (cb, file)
host_callback *cb;
const char *file;
{
CB_TARGET_DEFS_MAP *syscall_map, *errno_map, *open_map, *signal_map;
const char *stat_map;
FILE *f;
if ((f = fopen (file, "r")) == NULL)
return CB_RC_ACCESS;
/* ... read in and parse file ... */
fclose (f);
return CB_RC_NO_MEM; /* FIXME:wip */
/* Free storage allocated for any existing maps. */
if (cb->syscall_map)
free (cb->syscall_map);
if (cb->errno_map)
free (cb->errno_map);
if (cb->open_map)
free (cb->open_map);
if (cb->signal_map)
free (cb->signal_map);
if (cb->stat_map)
free ((PTR) cb->stat_map);
cb->syscall_map = syscall_map;
cb->errno_map = errno_map;
cb->open_map = open_map;
cb->signal_map = signal_map;
cb->stat_map = stat_map;
return CB_RC_OK;
}
/* Translate the target's version of a syscall number to the host's.
This isn't actually the host's version, rather a canonical form.
??? Perhaps this should be renamed to ..._canon_syscall. */
int
cb_target_to_host_syscall (cb, target_val)
host_callback *cb;
int target_val;
{
CB_TARGET_DEFS_MAP *m;
for (m = &cb->syscall_map[0]; m->target_val != -1; ++m)
if (m->target_val == target_val)
return m->host_val;
return -1;
}
/* FIXME: sort tables if large.
Alternatively, an obvious improvement for errno conversion is
to machine generate a function with a large switch(). */
/* Translate the host's version of errno to the target's. */
int
cb_host_to_target_errno (cb, host_val)
host_callback *cb;
int host_val;
{
CB_TARGET_DEFS_MAP *m;
for (m = &cb->errno_map[0]; m->host_val; ++m)
if (m->host_val == host_val)
return m->target_val;
/* ??? Which error to return in this case is up for grabs.
Note that some missing values may have standard alternatives.
For now return 0 and require caller to deal with it. */
return 0;
}
/* Given a set of target bitmasks for the open system call,
return the host equivalent.
Mapping open flag values is best done by looping so there's no need
to machine generate this function. */
int
cb_target_to_host_open (cb, target_val)
host_callback *cb;
int target_val;
{
int host_val = 0;
CB_TARGET_DEFS_MAP *m;
for (m = &cb->open_map[0]; m->host_val != -1; ++m)
{
switch (m->target_val)
{
/* O_RDONLY can be (and usually is) 0 which needs to be treated
specially. */
case TARGET_O_RDONLY :
case TARGET_O_WRONLY :
case TARGET_O_RDWR :
if ((target_val & (TARGET_O_RDONLY | TARGET_O_WRONLY | TARGET_O_RDWR))
== m->target_val)
host_val |= m->host_val;
/* Handle the host/target differentiating between binary and
text mode. Only one case is of importance */
#if ! defined (TARGET_O_BINARY) && defined (O_BINARY)
host_val |= O_BINARY;
#endif
break;
default :
if ((m->target_val & target_val) == m->target_val)
host_val |= m->host_val;
break;
}
}
return host_val;
}
/* Utility for e.g. cb_host_to_target_stat to store values in the target's
stat struct. */
void
cb_store_target_endian (cb, p, size, val)
host_callback *cb;
char *p;
int size;
long val; /* ??? must be as big as target word size */
{
if (cb->target_endian == BFD_ENDIAN_BIG)
{
p += size;
while (size-- > 0)
{
*--p = val;
val >>= 8;
}
}
else
{
while (size-- > 0)
{
*p++ = val;
val >>= 8;
}
}
}
/* Translate a host's stat struct into a target's.
If HS is NULL, just compute the length of the buffer required,
TS is ignored.
The result is the size of the target's stat struct,
or zero if an error occurred during the translation. */
int
cb_host_to_target_stat (cb, hs, ts)
host_callback *cb;
const struct stat *hs;
PTR ts;
{
const char *m = cb->stat_map;
char *p;
if (hs == NULL)
ts = NULL;
p = ts;
while (m)
{
char *q = strchr (m, ',');
int size;
/* FIXME: Use sscanf? */
if (q == NULL)
{
/* FIXME: print error message */
return 0;
}
size = atoi (q + 1);
if (size == 0)
{
/* FIXME: print error message */
return 0;
}
if (hs != NULL)
{
if (0)
;
/* Defined here to avoid emacs indigestion on a lone "else". */
#undef ST_x
#define ST_x(FLD) \
else if (strncmp (m, #FLD, q - m) == 0) \
cb_store_target_endian (cb, p, size, hs->FLD)
#ifdef HAVE_STRUCT_STAT_ST_DEV
ST_x (st_dev);
#endif
#ifdef HAVE_STRUCT_STAT_ST_INO
ST_x (st_ino);
#endif
#ifdef HAVE_STRUCT_STAT_ST_MODE
ST_x (st_mode);
#endif
#ifdef HAVE_STRUCT_STAT_ST_NLINK
ST_x (st_nlink);
#endif
#ifdef HAVE_STRUCT_STAT_ST_UID
ST_x (st_uid);
#endif
#ifdef HAVE_STRUCT_STAT_ST_GID
ST_x (st_gid);
#endif
#ifdef HAVE_STRUCT_STAT_ST_RDEV
ST_x (st_rdev);
#endif
#ifdef HAVE_STRUCT_STAT_ST_SIZE
ST_x (st_size);
#endif
#ifdef HAVE_STRUCT_STAT_ST_BLKSIZE
ST_x (st_blksize);
#endif
#ifdef HAVE_STRUCT_STAT_ST_BLOCKS
ST_x (st_blocks);
#endif
#ifdef HAVE_STRUCT_STAT_ST_ATIME
ST_x (st_atime);
#endif
#ifdef HAVE_STRUCT_STAT_ST_MTIME
ST_x (st_mtime);
#endif
#ifdef HAVE_STRUCT_STAT_ST_CTIME
ST_x (st_ctime);
#endif
#undef ST_x
/* FIXME:wip */
else
/* Unsupported field, store 0. */
cb_store_target_endian (cb, p, size, 0);
}
p += size;
m = strchr (q, ':');
if (m)
++m;
}
return p - (char *) ts;
}
/* Cover functions to the vfprintf callbacks.
??? If one thinks of the callbacks as a subsystem onto itself [or part of
a larger "remote target subsystem"] with a well defined interface, then
one would think that the subsystem would provide these. However, until
one is allowed to create such a subsystem (with its own source tree
independent of any particular user), such a critter can't exist. Thus
these functions are here for the time being. */
void
sim_cb_printf (host_callback *p, const char *fmt, ...)
{
va_list ap;
va_start (ap, fmt);
p->vprintf_filtered (p, fmt, ap);
va_end (ap);
}
void
sim_cb_eprintf (host_callback *p, const char *fmt, ...)
{
va_list ap;
va_start (ap, fmt);
p->evprintf_filtered (p, fmt, ap);
va_end (ap);
}
int
cb_is_stdin (host_callback *cb, int fd)
{
return fdbad (cb, fd) ? 0 : fdmap (cb, fd) == 0;
}
int
cb_is_stdout (host_callback *cb, int fd)
{
return fdbad (cb, fd) ? 0 : fdmap (cb, fd) == 1;
}
int
cb_is_stderr (host_callback *cb, int fd)
{
return fdbad (cb, fd) ? 0 : fdmap (cb, fd) == 2;
}
| gpl-2.0 |
rdanbrook/nestopia | source/core/board/NstBoardBmcResetBased4in1.cpp | 7 | 2239 | ////////////////////////////////////////////////////////////////////////////////////////
//
// Nestopia - NES/Famicom emulator written in C++
//
// Copyright (C) 2003-2008 Martin Freij
//
// This file is part of Nestopia.
//
// Nestopia is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// Nestopia is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Nestopia; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
////////////////////////////////////////////////////////////////////////////////////////
#include "NstBoard.hpp"
#include "NstBoardBmcResetBased4in1.hpp"
namespace Nes
{
namespace Core
{
namespace Boards
{
namespace Bmc
{
#ifdef NST_MSVC_OPTIMIZE
#pragma optimize("s", on)
#endif
void ResetBased4in1::SubReset(const bool hard)
{
if (hard)
resetSwitch = 0;
else
resetSwitch = (resetSwitch + 1) & 0x3;
chr.SwapBank<SIZE_8K,0x0000>( resetSwitch );
prg.SwapBanks<SIZE_16K,0x0000>( resetSwitch, resetSwitch );
}
void ResetBased4in1::SubLoad(State::Loader& state,const dword baseChunk)
{
NST_VERIFY( baseChunk == (AsciiId<'B','R','4'>::V) );
if (baseChunk == AsciiId<'B','R','4'>::V)
{
while (const dword chunk = state.Begin())
{
if (chunk == AsciiId<'R','E','G'>::V)
resetSwitch = state.Read8() & 0x3;
state.End();
}
}
}
void ResetBased4in1::SubSave(State::Saver& state) const
{
state.Begin( AsciiId<'B','R','4'>::V ).Begin( AsciiId<'R','E','G'>::V ).Write8( resetSwitch ).End().End();
}
#ifdef NST_MSVC_OPTIMIZE
#pragma optimize("", on)
#endif
}
}
}
}
| gpl-2.0 |
atsidaev/sdcc-z80-gas | device/lib/pic16/libc/stdlib/x_ftoa.c | 7 | 4286 | /*-------------------------------------------------------------------------
x_ftoa.c - wrapper function to use _convert_float
Copyright (C) 2004, Vangelis Rokas <vrokas at otenet.gr>
This library is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2, or (at your option) any
later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this library; see the file COPYING. If not, write to the
Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
As a special exception, if you link this library with other files,
some of which are compiled with SDCC, to produce an executable,
this library does not by itself cause the resulting executable to
be covered by the GNU General Public License. This exception does
not however invalidate any other reasons why the executable file
might be covered by the GNU General Public License.
-------------------------------------------------------------------------*/
#include <float.h>
extern convert_frac;
extern convert_int;
/* char x_ftoa(float, __data char *, unsigned char, unsigned char); */
extern POSTDEC1;
extern PLUSW2;
extern FSR0L;
extern FSR0H;
extern PREINC1;
extern PREINC2;
extern FSR2L;
extern FSR2H;
#define _vv0x00 0x00
#define _vv0x01 0x01
#define _vv0x02 0x02
#define _vv0x03 0x03
#define _vv0x04 0x04
/* do not warn about unreferenced arguments/missing return values */
#pragma save
#pragma disable_warning 59
#pragma disable_warning 85
char x_cnvint_wrap(unsigned long num, __data char *buffer)
{
__asm
movff _vv0x00, _POSTDEC1
movff _vv0x01, _POSTDEC1
movff _vv0x02, _POSTDEC1
movff _vv0x03, _POSTDEC1
movlw 2
movff _PLUSW2, _vv0x00
movlw 3
movff _PLUSW2, _vv0x01
movlw 4
movff _PLUSW2, _vv0x02
movlw 5
movff _PLUSW2, _vv0x03
movlw 6
movff _PLUSW2, _FSR0L
movlw 7
movff _PLUSW2, _FSR0H
call _convert_int
/* return value is already in WREG */
movff _PREINC1, _vv0x03
movff _PREINC1, _vv0x02
movff _PREINC1, _vv0x01
movff _PREINC1, _vv0x00
__endasm ;
}
char x_cnvfrac_wrap(unsigned long num, __data char *buffer, unsigned char prec)
{
num;
buffer;
prec;
__asm
movff _vv0x00, _POSTDEC1
movff _vv0x01, _POSTDEC1
movff _vv0x02, _POSTDEC1
movff _vv0x03, _POSTDEC1
movff _vv0x04, _POSTDEC1
movlw 2
movff _PLUSW2, _vv0x00
movlw 3
movff _PLUSW2, _vv0x01
movlw 4
movff _PLUSW2, _vv0x02
movlw 5
movff _PLUSW2, _vv0x03
movlw 6
movff _PLUSW2, _FSR0L
movlw 7
movff _PLUSW2, _FSR0H
movlw 8
movff _PLUSW2, _vv0x04
call _convert_frac
/* return value is already in WREG */
movff _PREINC1, _vv0x04
movff _PREINC1, _vv0x03
movff _PREINC1, _vv0x02
movff _PREINC1, _vv0x01
movff _PREINC1, _vv0x00
__endasm ;
}
#pragma restore
union float_long {
unsigned long l;
float f;
};
char x_ftoa(float num, __data char *buffer, unsigned char buflen, unsigned char prec)
{
char len;
char expn;
unsigned long ll;
unsigned long li;
// volatile
union float_long f_l;
len = buflen;
while(len--)buffer[len] = 0;
f_l.f = num;
if((f_l.l & SIGNBIT) == SIGNBIT) {
f_l.l &= ~SIGNBIT;
*buffer = '-';
buffer++;
}
expn = EXCESS - EXP(f_l.l); // - 24;
ll = MANT(f_l.l);
li = 0;
while( expn ) {
if(expn < 0) {
li <<= 1;
if(ll & 0x00800000UL)li |= 1;
ll <<= 1;
expn++;
} else {
ll >>= 1;
expn--;
}
}
if(li)
len = x_cnvint_wrap(li, buffer);
else {
*buffer = '0'; len = 1;
}
buffer += len;
if(prec) {
*buffer = '.'; len++;
buffer++;
len += x_cnvfrac_wrap(ll, buffer, 24-prec);
buffer[ prec ] = '\0';
}
return (len);
}
| gpl-2.0 |
SebDieBln/QGIS | src/gui/qgsextentgroupbox.cpp | 7 | 3831 | #include "qgsextentgroupbox.h"
#include "qgscoordinatetransform.h"
#include "qgsrasterblock.h"
QgsExtentGroupBox::QgsExtentGroupBox( QWidget* parent )
: QgsCollapsibleGroupBox( parent )
, mTitleBase( tr( "Extent" ) )
, mExtentState( OriginalExtent )
{
setupUi( this );
mXMinLineEdit->setValidator( new QDoubleValidator( this ) );
mXMaxLineEdit->setValidator( new QDoubleValidator( this ) );
mYMinLineEdit->setValidator( new QDoubleValidator( this ) );
mYMaxLineEdit->setValidator( new QDoubleValidator( this ) );
connect( mCurrentExtentButton, SIGNAL( clicked() ), this, SLOT( setOutputExtentFromCurrent() ) );
connect( mOriginalExtentButton, SIGNAL( clicked() ), this, SLOT( setOutputExtentFromOriginal() ) );
connect( this, SIGNAL( clicked( bool ) ), this, SLOT( groupBoxClicked() ) );
}
void QgsExtentGroupBox::setOriginalExtent( const QgsRectangle& originalExtent, const QgsCoordinateReferenceSystem& originalCrs )
{
mOriginalExtent = originalExtent;
mOriginalCrs = originalCrs;
}
void QgsExtentGroupBox::setCurrentExtent( const QgsRectangle& currentExtent, const QgsCoordinateReferenceSystem& currentCrs )
{
mCurrentExtent = currentExtent;
mCurrentCrs = currentCrs;
}
void QgsExtentGroupBox::setOutputCrs( const QgsCoordinateReferenceSystem& outputCrs )
{
mOutputCrs = outputCrs;
}
void QgsExtentGroupBox::setOutputExtent( const QgsRectangle& r, const QgsCoordinateReferenceSystem& srcCrs, ExtentState state )
{
QgsRectangle extent;
if ( mOutputCrs == srcCrs )
{
extent = r;
}
else
{
QgsCoordinateTransform ct( srcCrs, mOutputCrs );
extent = ct.transformBoundingBox( r );
}
mXMinLineEdit->setText( QgsRasterBlock::printValue( extent.xMinimum() ) );
mXMaxLineEdit->setText( QgsRasterBlock::printValue( extent.xMaximum() ) );
mYMinLineEdit->setText( QgsRasterBlock::printValue( extent.yMinimum() ) );
mYMaxLineEdit->setText( QgsRasterBlock::printValue( extent.yMaximum() ) );
mExtentState = state;
if ( isCheckable() && !isChecked() )
setChecked( true );
updateTitle();
emit extentChanged( extent );
}
void QgsExtentGroupBox::setOutputExtentFromLineEdit()
{
mExtentState = UserExtent;
updateTitle();
emit extentChanged( outputExtent() );
}
void QgsExtentGroupBox::updateTitle()
{
QString msg;
switch ( mExtentState )
{
case OriginalExtent:
msg = tr( "layer" );
break;
case CurrentExtent:
msg = tr( "map view" );
break;
case UserExtent:
msg = tr( "user defined" );
break;
default:
break;
}
if ( isCheckable() && !isChecked() )
msg = tr( "none" );
msg = tr( "%1 (current: %2)" ).arg( mTitleBase, msg );
setTitle( msg );
}
void QgsExtentGroupBox::setOutputExtentFromCurrent()
{
setOutputExtent( mCurrentExtent, mCurrentCrs, CurrentExtent );
}
void QgsExtentGroupBox::setOutputExtentFromOriginal()
{
setOutputExtent( mOriginalExtent, mOriginalCrs, OriginalExtent );
}
void QgsExtentGroupBox::setOutputExtentFromUser( const QgsRectangle& extent, const QgsCoordinateReferenceSystem& crs )
{
setOutputExtent( extent, crs, UserExtent );
}
void QgsExtentGroupBox::groupBoxClicked()
{
if ( !isCheckable() )
return;
updateTitle();
// output extent just went from null to something (or vice versa)
emit extentChanged( outputExtent() );
}
QgsRectangle QgsExtentGroupBox::outputExtent() const
{
if ( isCheckable() && !isChecked() )
return QgsRectangle();
return QgsRectangle( mXMinLineEdit->text().toDouble(), mYMinLineEdit->text().toDouble(),
mXMaxLineEdit->text().toDouble(), mYMaxLineEdit->text().toDouble() );
}
void QgsExtentGroupBox::setTitleBase( const QString& title )
{
mTitleBase = title;
updateTitle();
}
QString QgsExtentGroupBox::titleBase() const
{
return mTitleBase;
}
| gpl-2.0 |
lostemp/android-kernel-v3.10 | kernel/cgroup.c | 7 | 147503 | /*
* Generic process-grouping system.
*
* Based originally on the cpuset system, extracted by Paul Menage
* Copyright (C) 2006 Google, Inc
*
* Notifications support
* Copyright (C) 2009 Nokia Corporation
* Author: Kirill A. Shutemov
*
* Copyright notices from the original cpuset code:
* --------------------------------------------------
* Copyright (C) 2003 BULL SA.
* Copyright (C) 2004-2006 Silicon Graphics, Inc.
*
* Portions derived from Patrick Mochel's sysfs code.
* sysfs is Copyright (c) 2001-3 Patrick Mochel
*
* 2003-10-10 Written by Simon Derr.
* 2003-10-22 Updates by Stephen Hemminger.
* 2004 May-July Rework by Paul Jackson.
* ---------------------------------------------------
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of the Linux
* distribution for more details.
*/
#include <linux/cgroup.h>
#include <linux/cred.h>
#include <linux/ctype.h>
#include <linux/errno.h>
#include <linux/init_task.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/proc_fs.h>
#include <linux/rcupdate.h>
#include <linux/sched.h>
#include <linux/backing-dev.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/magic.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/sort.h>
#include <linux/kmod.h>
#include <linux/module.h>
#include <linux/delayacct.h>
#include <linux/cgroupstats.h>
#include <linux/hashtable.h>
#include <linux/namei.h>
#include <linux/pid_namespace.h>
#include <linux/idr.h>
#include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
#include <linux/eventfd.h>
#include <linux/poll.h>
#include <linux/flex_array.h> /* used in cgroup_attach_task */
#include <linux/kthread.h>
#include <linux/atomic.h>
/* css deactivation bias, makes css->refcnt negative to deny new trygets */
#define CSS_DEACT_BIAS INT_MIN
/*
* cgroup_mutex is the master lock. Any modification to cgroup or its
* hierarchy must be performed while holding it.
*
* cgroup_root_mutex nests inside cgroup_mutex and should be held to modify
* cgroupfs_root of any cgroup hierarchy - subsys list, flags,
* release_agent_path and so on. Modifying requires both cgroup_mutex and
* cgroup_root_mutex. Readers can acquire either of the two. This is to
* break the following locking order cycle.
*
* A. cgroup_mutex -> cred_guard_mutex -> s_type->i_mutex_key -> namespace_sem
* B. namespace_sem -> cgroup_mutex
*
* B happens only through cgroup_show_options() and using cgroup_root_mutex
* breaks it.
*/
#ifdef CONFIG_PROVE_RCU
DEFINE_MUTEX(cgroup_mutex);
EXPORT_SYMBOL_GPL(cgroup_mutex); /* only for task_subsys_state_check() */
#else
static DEFINE_MUTEX(cgroup_mutex);
#endif
static DEFINE_MUTEX(cgroup_root_mutex);
/*
* Generate an array of cgroup subsystem pointers. At boot time, this is
* populated with the built in subsystems, and modular subsystems are
* registered after that. The mutable section of this array is protected by
* cgroup_mutex.
*/
#define SUBSYS(_x) [_x ## _subsys_id] = &_x ## _subsys,
#define IS_SUBSYS_ENABLED(option) IS_BUILTIN(option)
static struct cgroup_subsys *subsys[CGROUP_SUBSYS_COUNT] = {
#include <linux/cgroup_subsys.h>
};
/*
* The "rootnode" hierarchy is the "dummy hierarchy", reserved for the
* subsystems that are otherwise unattached - it never has more than a
* single cgroup, and all tasks are part of that cgroup.
*/
static struct cgroupfs_root rootnode;
/*
* cgroupfs file entry, pointed to from leaf dentry->d_fsdata.
*/
struct cfent {
struct list_head node;
struct dentry *dentry;
struct cftype *type;
/* file xattrs */
struct simple_xattrs xattrs;
};
/*
* CSS ID -- ID per subsys's Cgroup Subsys State(CSS). used only when
* cgroup_subsys->use_id != 0.
*/
#define CSS_ID_MAX (65535)
struct css_id {
/*
* The css to which this ID points. This pointer is set to valid value
* after cgroup is populated. If cgroup is removed, this will be NULL.
* This pointer is expected to be RCU-safe because destroy()
* is called after synchronize_rcu(). But for safe use, css_tryget()
* should be used for avoiding race.
*/
struct cgroup_subsys_state __rcu *css;
/*
* ID of this css.
*/
unsigned short id;
/*
* Depth in hierarchy which this ID belongs to.
*/
unsigned short depth;
/*
* ID is freed by RCU. (and lookup routine is RCU safe.)
*/
struct rcu_head rcu_head;
/*
* Hierarchy of CSS ID belongs to.
*/
unsigned short stack[0]; /* Array of Length (depth+1) */
};
/*
* cgroup_event represents events which userspace want to receive.
*/
struct cgroup_event {
/*
* Cgroup which the event belongs to.
*/
struct cgroup *cgrp;
/*
* Control file which the event associated.
*/
struct cftype *cft;
/*
* eventfd to signal userspace about the event.
*/
struct eventfd_ctx *eventfd;
/*
* Each of these stored in a list by the cgroup.
*/
struct list_head list;
/*
* All fields below needed to unregister event when
* userspace closes eventfd.
*/
poll_table pt;
wait_queue_head_t *wqh;
wait_queue_t wait;
struct work_struct remove;
};
/* The list of hierarchy roots */
static LIST_HEAD(roots);
static int root_count;
static DEFINE_IDA(hierarchy_ida);
static int next_hierarchy_id;
static DEFINE_SPINLOCK(hierarchy_id_lock);
/* dummytop is a shorthand for the dummy hierarchy's top cgroup */
#define dummytop (&rootnode.top_cgroup)
static struct cgroup_name root_cgroup_name = { .name = "/" };
/* This flag indicates whether tasks in the fork and exit paths should
* check for fork/exit handlers to call. This avoids us having to do
* extra work in the fork/exit path if none of the subsystems need to
* be called.
*/
static int need_forkexit_callback __read_mostly;
static int cgroup_destroy_locked(struct cgroup *cgrp);
static int cgroup_addrm_files(struct cgroup *cgrp, struct cgroup_subsys *subsys,
struct cftype cfts[], bool is_add);
static int css_unbias_refcnt(int refcnt)
{
return refcnt >= 0 ? refcnt : refcnt - CSS_DEACT_BIAS;
}
/* the current nr of refs, always >= 0 whether @css is deactivated or not */
static int css_refcnt(struct cgroup_subsys_state *css)
{
int v = atomic_read(&css->refcnt);
return css_unbias_refcnt(v);
}
/* convenient tests for these bits */
inline int cgroup_is_removed(const struct cgroup *cgrp)
{
return test_bit(CGRP_REMOVED, &cgrp->flags);
}
/**
* cgroup_is_descendant - test ancestry
* @cgrp: the cgroup to be tested
* @ancestor: possible ancestor of @cgrp
*
* Test whether @cgrp is a descendant of @ancestor. It also returns %true
* if @cgrp == @ancestor. This function is safe to call as long as @cgrp
* and @ancestor are accessible.
*/
bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor)
{
while (cgrp) {
if (cgrp == ancestor)
return true;
cgrp = cgrp->parent;
}
return false;
}
EXPORT_SYMBOL_GPL(cgroup_is_descendant);
static int cgroup_is_releasable(const struct cgroup *cgrp)
{
const int bits =
(1 << CGRP_RELEASABLE) |
(1 << CGRP_NOTIFY_ON_RELEASE);
return (cgrp->flags & bits) == bits;
}
static int notify_on_release(const struct cgroup *cgrp)
{
return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
}
/*
* for_each_subsys() allows you to iterate on each subsystem attached to
* an active hierarchy
*/
#define for_each_subsys(_root, _ss) \
list_for_each_entry(_ss, &_root->subsys_list, sibling)
/* for_each_active_root() allows you to iterate across the active hierarchies */
#define for_each_active_root(_root) \
list_for_each_entry(_root, &roots, root_list)
static inline struct cgroup *__d_cgrp(struct dentry *dentry)
{
return dentry->d_fsdata;
}
static inline struct cfent *__d_cfe(struct dentry *dentry)
{
return dentry->d_fsdata;
}
static inline struct cftype *__d_cft(struct dentry *dentry)
{
return __d_cfe(dentry)->type;
}
/**
* cgroup_lock_live_group - take cgroup_mutex and check that cgrp is alive.
* @cgrp: the cgroup to be checked for liveness
*
* On success, returns true; the mutex should be later unlocked. On
* failure returns false with no lock held.
*/
static bool cgroup_lock_live_group(struct cgroup *cgrp)
{
mutex_lock(&cgroup_mutex);
if (cgroup_is_removed(cgrp)) {
mutex_unlock(&cgroup_mutex);
return false;
}
return true;
}
/* the list of cgroups eligible for automatic release. Protected by
* release_list_lock */
static LIST_HEAD(release_list);
static DEFINE_RAW_SPINLOCK(release_list_lock);
static void cgroup_release_agent(struct work_struct *work);
static DECLARE_WORK(release_agent_work, cgroup_release_agent);
static void check_for_release(struct cgroup *cgrp);
/* Link structure for associating css_set objects with cgroups */
struct cg_cgroup_link {
/*
* List running through cg_cgroup_links associated with a
* cgroup, anchored on cgroup->css_sets
*/
struct list_head cgrp_link_list;
struct cgroup *cgrp;
/*
* List running through cg_cgroup_links pointing at a
* single css_set object, anchored on css_set->cg_links
*/
struct list_head cg_link_list;
struct css_set *cg;
};
/* The default css_set - used by init and its children prior to any
* hierarchies being mounted. It contains a pointer to the root state
* for each subsystem. Also used to anchor the list of css_sets. Not
* reference-counted, to improve performance when child cgroups
* haven't been created.
*/
static struct css_set init_css_set;
static struct cg_cgroup_link init_css_set_link;
static int cgroup_init_idr(struct cgroup_subsys *ss,
struct cgroup_subsys_state *css);
/* css_set_lock protects the list of css_set objects, and the
* chain of tasks off each css_set. Nests outside task->alloc_lock
* due to cgroup_iter_start() */
static DEFINE_RWLOCK(css_set_lock);
static int css_set_count;
/*
* hash table for cgroup groups. This improves the performance to find
* an existing css_set. This hash doesn't (currently) take into
* account cgroups in empty hierarchies.
*/
#define CSS_SET_HASH_BITS 7
static DEFINE_HASHTABLE(css_set_table, CSS_SET_HASH_BITS);
static unsigned long css_set_hash(struct cgroup_subsys_state *css[])
{
int i;
unsigned long key = 0UL;
for (i = 0; i < CGROUP_SUBSYS_COUNT; i++)
key += (unsigned long)css[i];
key = (key >> 16) ^ key;
return key;
}
/* We don't maintain the lists running through each css_set to its
* task until after the first call to cgroup_iter_start(). This
* reduces the fork()/exit() overhead for people who have cgroups
* compiled into their kernel but not actually in use */
static int use_task_css_set_links __read_mostly;
static void __put_css_set(struct css_set *cg, int taskexit)
{
struct cg_cgroup_link *link;
struct cg_cgroup_link *saved_link;
/*
* Ensure that the refcount doesn't hit zero while any readers
* can see it. Similar to atomic_dec_and_lock(), but for an
* rwlock
*/
if (atomic_add_unless(&cg->refcount, -1, 1))
return;
write_lock(&css_set_lock);
if (!atomic_dec_and_test(&cg->refcount)) {
write_unlock(&css_set_lock);
return;
}
/* This css_set is dead. unlink it and release cgroup refcounts */
hash_del(&cg->hlist);
css_set_count--;
list_for_each_entry_safe(link, saved_link, &cg->cg_links,
cg_link_list) {
struct cgroup *cgrp = link->cgrp;
list_del(&link->cg_link_list);
list_del(&link->cgrp_link_list);
/*
* We may not be holding cgroup_mutex, and if cgrp->count is
* dropped to 0 the cgroup can be destroyed at any time, hence
* rcu_read_lock is used to keep it alive.
*/
rcu_read_lock();
if (atomic_dec_and_test(&cgrp->count) &&
notify_on_release(cgrp)) {
if (taskexit)
set_bit(CGRP_RELEASABLE, &cgrp->flags);
check_for_release(cgrp);
}
rcu_read_unlock();
kfree(link);
}
write_unlock(&css_set_lock);
kfree_rcu(cg, rcu_head);
}
/*
* refcounted get/put for css_set objects
*/
static inline void get_css_set(struct css_set *cg)
{
atomic_inc(&cg->refcount);
}
static inline void put_css_set(struct css_set *cg)
{
__put_css_set(cg, 0);
}
static inline void put_css_set_taskexit(struct css_set *cg)
{
__put_css_set(cg, 1);
}
/*
* compare_css_sets - helper function for find_existing_css_set().
* @cg: candidate css_set being tested
* @old_cg: existing css_set for a task
* @new_cgrp: cgroup that's being entered by the task
* @template: desired set of css pointers in css_set (pre-calculated)
*
* Returns true if "cg" matches "old_cg" except for the hierarchy
* which "new_cgrp" belongs to, for which it should match "new_cgrp".
*/
static bool compare_css_sets(struct css_set *cg,
struct css_set *old_cg,
struct cgroup *new_cgrp,
struct cgroup_subsys_state *template[])
{
struct list_head *l1, *l2;
if (memcmp(template, cg->subsys, sizeof(cg->subsys))) {
/* Not all subsystems matched */
return false;
}
/*
* Compare cgroup pointers in order to distinguish between
* different cgroups in heirarchies with no subsystems. We
* could get by with just this check alone (and skip the
* memcmp above) but on most setups the memcmp check will
* avoid the need for this more expensive check on almost all
* candidates.
*/
l1 = &cg->cg_links;
l2 = &old_cg->cg_links;
while (1) {
struct cg_cgroup_link *cgl1, *cgl2;
struct cgroup *cg1, *cg2;
l1 = l1->next;
l2 = l2->next;
/* See if we reached the end - both lists are equal length. */
if (l1 == &cg->cg_links) {
BUG_ON(l2 != &old_cg->cg_links);
break;
} else {
BUG_ON(l2 == &old_cg->cg_links);
}
/* Locate the cgroups associated with these links. */
cgl1 = list_entry(l1, struct cg_cgroup_link, cg_link_list);
cgl2 = list_entry(l2, struct cg_cgroup_link, cg_link_list);
cg1 = cgl1->cgrp;
cg2 = cgl2->cgrp;
/* Hierarchies should be linked in the same order. */
BUG_ON(cg1->root != cg2->root);
/*
* If this hierarchy is the hierarchy of the cgroup
* that's changing, then we need to check that this
* css_set points to the new cgroup; if it's any other
* hierarchy, then this css_set should point to the
* same cgroup as the old css_set.
*/
if (cg1->root == new_cgrp->root) {
if (cg1 != new_cgrp)
return false;
} else {
if (cg1 != cg2)
return false;
}
}
return true;
}
/*
* find_existing_css_set() is a helper for
* find_css_set(), and checks to see whether an existing
* css_set is suitable.
*
* oldcg: the cgroup group that we're using before the cgroup
* transition
*
* cgrp: the cgroup that we're moving into
*
* template: location in which to build the desired set of subsystem
* state objects for the new cgroup group
*/
static struct css_set *find_existing_css_set(
struct css_set *oldcg,
struct cgroup *cgrp,
struct cgroup_subsys_state *template[])
{
int i;
struct cgroupfs_root *root = cgrp->root;
struct css_set *cg;
unsigned long key;
/*
* Build the set of subsystem state objects that we want to see in the
* new css_set. while subsystems can change globally, the entries here
* won't change, so no need for locking.
*/
for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
if (root->subsys_mask & (1UL << i)) {
/* Subsystem is in this hierarchy. So we want
* the subsystem state from the new
* cgroup */
template[i] = cgrp->subsys[i];
} else {
/* Subsystem is not in this hierarchy, so we
* don't want to change the subsystem state */
template[i] = oldcg->subsys[i];
}
}
key = css_set_hash(template);
hash_for_each_possible(css_set_table, cg, hlist, key) {
if (!compare_css_sets(cg, oldcg, cgrp, template))
continue;
/* This css_set matches what we need */
return cg;
}
/* No existing cgroup group matched */
return NULL;
}
static void free_cg_links(struct list_head *tmp)
{
struct cg_cgroup_link *link;
struct cg_cgroup_link *saved_link;
list_for_each_entry_safe(link, saved_link, tmp, cgrp_link_list) {
list_del(&link->cgrp_link_list);
kfree(link);
}
}
/*
* allocate_cg_links() allocates "count" cg_cgroup_link structures
* and chains them on tmp through their cgrp_link_list fields. Returns 0 on
* success or a negative error
*/
static int allocate_cg_links(int count, struct list_head *tmp)
{
struct cg_cgroup_link *link;
int i;
INIT_LIST_HEAD(tmp);
for (i = 0; i < count; i++) {
link = kmalloc(sizeof(*link), GFP_KERNEL);
if (!link) {
free_cg_links(tmp);
return -ENOMEM;
}
list_add(&link->cgrp_link_list, tmp);
}
return 0;
}
/**
* link_css_set - a helper function to link a css_set to a cgroup
* @tmp_cg_links: cg_cgroup_link objects allocated by allocate_cg_links()
* @cg: the css_set to be linked
* @cgrp: the destination cgroup
*/
static void link_css_set(struct list_head *tmp_cg_links,
struct css_set *cg, struct cgroup *cgrp)
{
struct cg_cgroup_link *link;
BUG_ON(list_empty(tmp_cg_links));
link = list_first_entry(tmp_cg_links, struct cg_cgroup_link,
cgrp_link_list);
link->cg = cg;
link->cgrp = cgrp;
atomic_inc(&cgrp->count);
list_move(&link->cgrp_link_list, &cgrp->css_sets);
/*
* Always add links to the tail of the list so that the list
* is sorted by order of hierarchy creation
*/
list_add_tail(&link->cg_link_list, &cg->cg_links);
}
/*
* find_css_set() takes an existing cgroup group and a
* cgroup object, and returns a css_set object that's
* equivalent to the old group, but with the given cgroup
* substituted into the appropriate hierarchy. Must be called with
* cgroup_mutex held
*/
static struct css_set *find_css_set(
struct css_set *oldcg, struct cgroup *cgrp)
{
struct css_set *res;
struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT];
struct list_head tmp_cg_links;
struct cg_cgroup_link *link;
unsigned long key;
/* First see if we already have a cgroup group that matches
* the desired set */
read_lock(&css_set_lock);
res = find_existing_css_set(oldcg, cgrp, template);
if (res)
get_css_set(res);
read_unlock(&css_set_lock);
if (res)
return res;
res = kmalloc(sizeof(*res), GFP_KERNEL);
if (!res)
return NULL;
/* Allocate all the cg_cgroup_link objects that we'll need */
if (allocate_cg_links(root_count, &tmp_cg_links) < 0) {
kfree(res);
return NULL;
}
atomic_set(&res->refcount, 1);
INIT_LIST_HEAD(&res->cg_links);
INIT_LIST_HEAD(&res->tasks);
INIT_HLIST_NODE(&res->hlist);
/* Copy the set of subsystem state objects generated in
* find_existing_css_set() */
memcpy(res->subsys, template, sizeof(res->subsys));
write_lock(&css_set_lock);
/* Add reference counts and links from the new css_set. */
list_for_each_entry(link, &oldcg->cg_links, cg_link_list) {
struct cgroup *c = link->cgrp;
if (c->root == cgrp->root)
c = cgrp;
link_css_set(&tmp_cg_links, res, c);
}
BUG_ON(!list_empty(&tmp_cg_links));
css_set_count++;
/* Add this cgroup group to the hash table */
key = css_set_hash(res->subsys);
hash_add(css_set_table, &res->hlist, key);
write_unlock(&css_set_lock);
return res;
}
/*
* Return the cgroup for "task" from the given hierarchy. Must be
* called with cgroup_mutex held.
*/
static struct cgroup *task_cgroup_from_root(struct task_struct *task,
struct cgroupfs_root *root)
{
struct css_set *css;
struct cgroup *res = NULL;
BUG_ON(!mutex_is_locked(&cgroup_mutex));
read_lock(&css_set_lock);
/*
* No need to lock the task - since we hold cgroup_mutex the
* task can't change groups, so the only thing that can happen
* is that it exits and its css is set back to init_css_set.
*/
css = task->cgroups;
if (css == &init_css_set) {
res = &root->top_cgroup;
} else {
struct cg_cgroup_link *link;
list_for_each_entry(link, &css->cg_links, cg_link_list) {
struct cgroup *c = link->cgrp;
if (c->root == root) {
res = c;
break;
}
}
}
read_unlock(&css_set_lock);
BUG_ON(!res);
return res;
}
/*
* There is one global cgroup mutex. We also require taking
* task_lock() when dereferencing a task's cgroup subsys pointers.
* See "The task_lock() exception", at the end of this comment.
*
* A task must hold cgroup_mutex to modify cgroups.
*
* Any task can increment and decrement the count field without lock.
* So in general, code holding cgroup_mutex can't rely on the count
* field not changing. However, if the count goes to zero, then only
* cgroup_attach_task() can increment it again. Because a count of zero
* means that no tasks are currently attached, therefore there is no
* way a task attached to that cgroup can fork (the other way to
* increment the count). So code holding cgroup_mutex can safely
* assume that if the count is zero, it will stay zero. Similarly, if
* a task holds cgroup_mutex on a cgroup with zero count, it
* knows that the cgroup won't be removed, as cgroup_rmdir()
* needs that mutex.
*
* The fork and exit callbacks cgroup_fork() and cgroup_exit(), don't
* (usually) take cgroup_mutex. These are the two most performance
* critical pieces of code here. The exception occurs on cgroup_exit(),
* when a task in a notify_on_release cgroup exits. Then cgroup_mutex
* is taken, and if the cgroup count is zero, a usermode call made
* to the release agent with the name of the cgroup (path relative to
* the root of cgroup file system) as the argument.
*
* A cgroup can only be deleted if both its 'count' of using tasks
* is zero, and its list of 'children' cgroups is empty. Since all
* tasks in the system use _some_ cgroup, and since there is always at
* least one task in the system (init, pid == 1), therefore, top_cgroup
* always has either children cgroups and/or using tasks. So we don't
* need a special hack to ensure that top_cgroup cannot be deleted.
*
* The task_lock() exception
*
* The need for this exception arises from the action of
* cgroup_attach_task(), which overwrites one task's cgroup pointer with
* another. It does so using cgroup_mutex, however there are
* several performance critical places that need to reference
* task->cgroup without the expense of grabbing a system global
* mutex. Therefore except as noted below, when dereferencing or, as
* in cgroup_attach_task(), modifying a task's cgroup pointer we use
* task_lock(), which acts on a spinlock (task->alloc_lock) already in
* the task_struct routinely used for such matters.
*
* P.S. One more locking exception. RCU is used to guard the
* update of a tasks cgroup pointer by cgroup_attach_task()
*/
/*
* A couple of forward declarations required, due to cyclic reference loop:
* cgroup_mkdir -> cgroup_create -> cgroup_populate_dir ->
* cgroup_add_file -> cgroup_create_file -> cgroup_dir_inode_operations
* -> cgroup_mkdir.
*/
static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode);
static struct dentry *cgroup_lookup(struct inode *, struct dentry *, unsigned int);
static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry);
static int cgroup_populate_dir(struct cgroup *cgrp, bool base_files,
unsigned long subsys_mask);
static const struct inode_operations cgroup_dir_inode_operations;
static const struct file_operations proc_cgroupstats_operations;
static struct backing_dev_info cgroup_backing_dev_info = {
.name = "cgroup",
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
};
static int alloc_css_id(struct cgroup_subsys *ss,
struct cgroup *parent, struct cgroup *child);
static struct inode *cgroup_new_inode(umode_t mode, struct super_block *sb)
{
struct inode *inode = new_inode(sb);
if (inode) {
inode->i_ino = get_next_ino();
inode->i_mode = mode;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
inode->i_mapping->backing_dev_info = &cgroup_backing_dev_info;
}
return inode;
}
static struct cgroup_name *cgroup_alloc_name(struct dentry *dentry)
{
struct cgroup_name *name;
name = kmalloc(sizeof(*name) + dentry->d_name.len + 1, GFP_KERNEL);
if (!name)
return NULL;
strcpy(name->name, dentry->d_name.name);
return name;
}
static void cgroup_free_fn(struct work_struct *work)
{
struct cgroup *cgrp = container_of(work, struct cgroup, free_work);
struct cgroup_subsys *ss;
mutex_lock(&cgroup_mutex);
/*
* Release the subsystem state objects.
*/
for_each_subsys(cgrp->root, ss)
ss->css_free(cgrp);
cgrp->root->number_of_cgroups--;
mutex_unlock(&cgroup_mutex);
/*
* We get a ref to the parent's dentry, and put the ref when
* this cgroup is being freed, so it's guaranteed that the
* parent won't be destroyed before its children.
*/
dput(cgrp->parent->dentry);
ida_simple_remove(&cgrp->root->cgroup_ida, cgrp->id);
/*
* Drop the active superblock reference that we took when we
* created the cgroup. This will free cgrp->root, if we are
* holding the last reference to @sb.
*/
deactivate_super(cgrp->root->sb);
/*
* if we're getting rid of the cgroup, refcount should ensure
* that there are no pidlists left.
*/
BUG_ON(!list_empty(&cgrp->pidlists));
simple_xattrs_free(&cgrp->xattrs);
kfree(rcu_dereference_raw(cgrp->name));
kfree(cgrp);
}
static void cgroup_free_rcu(struct rcu_head *head)
{
struct cgroup *cgrp = container_of(head, struct cgroup, rcu_head);
schedule_work(&cgrp->free_work);
}
static void cgroup_diput(struct dentry *dentry, struct inode *inode)
{
/* is dentry a directory ? if so, kfree() associated cgroup */
if (S_ISDIR(inode->i_mode)) {
struct cgroup *cgrp = dentry->d_fsdata;
BUG_ON(!(cgroup_is_removed(cgrp)));
call_rcu(&cgrp->rcu_head, cgroup_free_rcu);
} else {
struct cfent *cfe = __d_cfe(dentry);
struct cgroup *cgrp = dentry->d_parent->d_fsdata;
WARN_ONCE(!list_empty(&cfe->node) &&
cgrp != &cgrp->root->top_cgroup,
"cfe still linked for %s\n", cfe->type->name);
simple_xattrs_free(&cfe->xattrs);
kfree(cfe);
}
iput(inode);
}
static int cgroup_delete(const struct dentry *d)
{
return 1;
}
static void remove_dir(struct dentry *d)
{
struct dentry *parent = dget(d->d_parent);
d_delete(d);
simple_rmdir(parent->d_inode, d);
dput(parent);
}
static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
{
struct cfent *cfe;
lockdep_assert_held(&cgrp->dentry->d_inode->i_mutex);
lockdep_assert_held(&cgroup_mutex);
/*
* If we're doing cleanup due to failure of cgroup_create(),
* the corresponding @cfe may not exist.
*/
list_for_each_entry(cfe, &cgrp->files, node) {
struct dentry *d = cfe->dentry;
if (cft && cfe->type != cft)
continue;
dget(d);
d_delete(d);
simple_unlink(cgrp->dentry->d_inode, d);
list_del_init(&cfe->node);
dput(d);
break;
}
}
/**
* cgroup_clear_directory - selective removal of base and subsystem files
* @dir: directory containing the files
* @base_files: true if the base files should be removed
* @subsys_mask: mask of the subsystem ids whose files should be removed
*/
static void cgroup_clear_directory(struct dentry *dir, bool base_files,
unsigned long subsys_mask)
{
struct cgroup *cgrp = __d_cgrp(dir);
struct cgroup_subsys *ss;
for_each_subsys(cgrp->root, ss) {
struct cftype_set *set;
if (!test_bit(ss->subsys_id, &subsys_mask))
continue;
list_for_each_entry(set, &ss->cftsets, node)
cgroup_addrm_files(cgrp, NULL, set->cfts, false);
}
if (base_files) {
while (!list_empty(&cgrp->files))
cgroup_rm_file(cgrp, NULL);
}
}
/*
* NOTE : the dentry must have been dget()'ed
*/
static void cgroup_d_remove_dir(struct dentry *dentry)
{
struct dentry *parent;
struct cgroupfs_root *root = dentry->d_sb->s_fs_info;
cgroup_clear_directory(dentry, true, root->subsys_mask);
parent = dentry->d_parent;
spin_lock(&parent->d_lock);
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
list_del_init(&dentry->d_u.d_child);
spin_unlock(&dentry->d_lock);
spin_unlock(&parent->d_lock);
remove_dir(dentry);
}
/*
* Call with cgroup_mutex held. Drops reference counts on modules, including
* any duplicate ones that parse_cgroupfs_options took. If this function
* returns an error, no reference counts are touched.
*/
static int rebind_subsystems(struct cgroupfs_root *root,
unsigned long final_subsys_mask)
{
unsigned long added_mask, removed_mask;
struct cgroup *cgrp = &root->top_cgroup;
int i;
BUG_ON(!mutex_is_locked(&cgroup_mutex));
BUG_ON(!mutex_is_locked(&cgroup_root_mutex));
removed_mask = root->actual_subsys_mask & ~final_subsys_mask;
added_mask = final_subsys_mask & ~root->actual_subsys_mask;
/* Check that any added subsystems are currently free */
for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
unsigned long bit = 1UL << i;
struct cgroup_subsys *ss = subsys[i];
if (!(bit & added_mask))
continue;
/*
* Nobody should tell us to do a subsys that doesn't exist:
* parse_cgroupfs_options should catch that case and refcounts
* ensure that subsystems won't disappear once selected.
*/
BUG_ON(ss == NULL);
if (ss->root != &rootnode) {
/* Subsystem isn't free */
return -EBUSY;
}
}
/* Currently we don't handle adding/removing subsystems when
* any child cgroups exist. This is theoretically supportable
* but involves complex error handling, so it's being left until
* later */
if (root->number_of_cgroups > 1)
return -EBUSY;
/* Process each subsystem */
for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
struct cgroup_subsys *ss = subsys[i];
unsigned long bit = 1UL << i;
if (bit & added_mask) {
/* We're binding this subsystem to this hierarchy */
BUG_ON(ss == NULL);
BUG_ON(cgrp->subsys[i]);
BUG_ON(!dummytop->subsys[i]);
BUG_ON(dummytop->subsys[i]->cgroup != dummytop);
cgrp->subsys[i] = dummytop->subsys[i];
cgrp->subsys[i]->cgroup = cgrp;
list_move(&ss->sibling, &root->subsys_list);
ss->root = root;
if (ss->bind)
ss->bind(cgrp);
/* refcount was already taken, and we're keeping it */
} else if (bit & removed_mask) {
/* We're removing this subsystem */
BUG_ON(ss == NULL);
BUG_ON(cgrp->subsys[i] != dummytop->subsys[i]);
BUG_ON(cgrp->subsys[i]->cgroup != cgrp);
if (ss->bind)
ss->bind(dummytop);
dummytop->subsys[i]->cgroup = dummytop;
cgrp->subsys[i] = NULL;
subsys[i]->root = &rootnode;
list_move(&ss->sibling, &rootnode.subsys_list);
/* subsystem is now free - drop reference on module */
module_put(ss->module);
} else if (bit & final_subsys_mask) {
/* Subsystem state should already exist */
BUG_ON(ss == NULL);
BUG_ON(!cgrp->subsys[i]);
/*
* a refcount was taken, but we already had one, so
* drop the extra reference.
*/
module_put(ss->module);
#ifdef CONFIG_MODULE_UNLOAD
BUG_ON(ss->module && !module_refcount(ss->module));
#endif
} else {
/* Subsystem state shouldn't exist */
BUG_ON(cgrp->subsys[i]);
}
}
root->subsys_mask = root->actual_subsys_mask = final_subsys_mask;
return 0;
}
static int cgroup_show_options(struct seq_file *seq, struct dentry *dentry)
{
struct cgroupfs_root *root = dentry->d_sb->s_fs_info;
struct cgroup_subsys *ss;
mutex_lock(&cgroup_root_mutex);
for_each_subsys(root, ss)
seq_printf(seq, ",%s", ss->name);
if (root->flags & CGRP_ROOT_SANE_BEHAVIOR)
seq_puts(seq, ",sane_behavior");
if (root->flags & CGRP_ROOT_NOPREFIX)
seq_puts(seq, ",noprefix");
if (root->flags & CGRP_ROOT_XATTR)
seq_puts(seq, ",xattr");
if (strlen(root->release_agent_path))
seq_printf(seq, ",release_agent=%s", root->release_agent_path);
if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->top_cgroup.flags))
seq_puts(seq, ",clone_children");
if (strlen(root->name))
seq_printf(seq, ",name=%s", root->name);
mutex_unlock(&cgroup_root_mutex);
return 0;
}
struct cgroup_sb_opts {
unsigned long subsys_mask;
unsigned long flags;
char *release_agent;
bool cpuset_clone_children;
char *name;
/* User explicitly requested empty subsystem */
bool none;
struct cgroupfs_root *new_root;
};
/*
* Convert a hierarchy specifier into a bitmask of subsystems and flags. Call
* with cgroup_mutex held to protect the subsys[] array. This function takes
* refcounts on subsystems to be used, unless it returns error, in which case
* no refcounts are taken.
*/
static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
{
char *token, *o = data;
bool all_ss = false, one_ss = false;
unsigned long mask = (unsigned long)-1;
int i;
bool module_pin_failed = false;
BUG_ON(!mutex_is_locked(&cgroup_mutex));
#ifdef CONFIG_CPUSETS
mask = ~(1UL << cpuset_subsys_id);
#endif
memset(opts, 0, sizeof(*opts));
while ((token = strsep(&o, ",")) != NULL) {
if (!*token)
return -EINVAL;
if (!strcmp(token, "none")) {
/* Explicitly have no subsystems */
opts->none = true;
continue;
}
if (!strcmp(token, "all")) {
/* Mutually exclusive option 'all' + subsystem name */
if (one_ss)
return -EINVAL;
all_ss = true;
continue;
}
if (!strcmp(token, "__DEVEL__sane_behavior")) {
opts->flags |= CGRP_ROOT_SANE_BEHAVIOR;
continue;
}
if (!strcmp(token, "noprefix")) {
opts->flags |= CGRP_ROOT_NOPREFIX;
continue;
}
if (!strcmp(token, "clone_children")) {
opts->cpuset_clone_children = true;
continue;
}
if (!strcmp(token, "xattr")) {
opts->flags |= CGRP_ROOT_XATTR;
continue;
}
if (!strncmp(token, "release_agent=", 14)) {
/* Specifying two release agents is forbidden */
if (opts->release_agent)
return -EINVAL;
opts->release_agent =
kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL);
if (!opts->release_agent)
return -ENOMEM;
continue;
}
if (!strncmp(token, "name=", 5)) {
const char *name = token + 5;
/* Can't specify an empty name */
if (!strlen(name))
return -EINVAL;
/* Must match [\w.-]+ */
for (i = 0; i < strlen(name); i++) {
char c = name[i];
if (isalnum(c))
continue;
if ((c == '.') || (c == '-') || (c == '_'))
continue;
return -EINVAL;
}
/* Specifying two names is forbidden */
if (opts->name)
return -EINVAL;
opts->name = kstrndup(name,
MAX_CGROUP_ROOT_NAMELEN - 1,
GFP_KERNEL);
if (!opts->name)
return -ENOMEM;
continue;
}
for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
struct cgroup_subsys *ss = subsys[i];
if (ss == NULL)
continue;
if (strcmp(token, ss->name))
continue;
if (ss->disabled)
continue;
/* Mutually exclusive option 'all' + subsystem name */
if (all_ss)
return -EINVAL;
set_bit(i, &opts->subsys_mask);
one_ss = true;
break;
}
if (i == CGROUP_SUBSYS_COUNT)
return -ENOENT;
}
/*
* If the 'all' option was specified select all the subsystems,
* otherwise if 'none', 'name=' and a subsystem name options
* were not specified, let's default to 'all'
*/
if (all_ss || (!one_ss && !opts->none && !opts->name)) {
for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
struct cgroup_subsys *ss = subsys[i];
if (ss == NULL)
continue;
if (ss->disabled)
continue;
set_bit(i, &opts->subsys_mask);
}
}
/* Consistency checks */
if (opts->flags & CGRP_ROOT_SANE_BEHAVIOR) {
pr_warning("cgroup: sane_behavior: this is still under development and its behaviors will change, proceed at your own risk\n");
if (opts->flags & CGRP_ROOT_NOPREFIX) {
pr_err("cgroup: sane_behavior: noprefix is not allowed\n");
return -EINVAL;
}
if (opts->cpuset_clone_children) {
pr_err("cgroup: sane_behavior: clone_children is not allowed\n");
return -EINVAL;
}
}
/*
* Option noprefix was introduced just for backward compatibility
* with the old cpuset, so we allow noprefix only if mounting just
* the cpuset subsystem.
*/
if ((opts->flags & CGRP_ROOT_NOPREFIX) && (opts->subsys_mask & mask))
return -EINVAL;
/* Can't specify "none" and some subsystems */
if (opts->subsys_mask && opts->none)
return -EINVAL;
/*
* We either have to specify by name or by subsystems. (So all
* empty hierarchies must have a name).
*/
if (!opts->subsys_mask && !opts->name)
return -EINVAL;
/*
* Grab references on all the modules we'll need, so the subsystems
* don't dance around before rebind_subsystems attaches them. This may
* take duplicate reference counts on a subsystem that's already used,
* but rebind_subsystems handles this case.
*/
for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
unsigned long bit = 1UL << i;
if (!(bit & opts->subsys_mask))
continue;
if (!try_module_get(subsys[i]->module)) {
module_pin_failed = true;
break;
}
}
if (module_pin_failed) {
/*
* oops, one of the modules was going away. this means that we
* raced with a module_delete call, and to the user this is
* essentially a "subsystem doesn't exist" case.
*/
for (i--; i >= 0; i--) {
/* drop refcounts only on the ones we took */
unsigned long bit = 1UL << i;
if (!(bit & opts->subsys_mask))
continue;
module_put(subsys[i]->module);
}
return -ENOENT;
}
return 0;
}
static void drop_parsed_module_refcounts(unsigned long subsys_mask)
{
int i;
for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
unsigned long bit = 1UL << i;
if (!(bit & subsys_mask))
continue;
module_put(subsys[i]->module);
}
}
static int cgroup_remount(struct super_block *sb, int *flags, char *data)
{
int ret = 0;
struct cgroupfs_root *root = sb->s_fs_info;
struct cgroup *cgrp = &root->top_cgroup;
struct cgroup_sb_opts opts;
unsigned long added_mask, removed_mask;
if (root->flags & CGRP_ROOT_SANE_BEHAVIOR) {
pr_err("cgroup: sane_behavior: remount is not allowed\n");
return -EINVAL;
}
mutex_lock(&cgrp->dentry->d_inode->i_mutex);
mutex_lock(&cgroup_mutex);
mutex_lock(&cgroup_root_mutex);
/* See what subsystems are wanted */
ret = parse_cgroupfs_options(data, &opts);
if (ret)
goto out_unlock;
if (opts.subsys_mask != root->actual_subsys_mask || opts.release_agent)
pr_warning("cgroup: option changes via remount are deprecated (pid=%d comm=%s)\n",
task_tgid_nr(current), current->comm);
added_mask = opts.subsys_mask & ~root->subsys_mask;
removed_mask = root->subsys_mask & ~opts.subsys_mask;
/* Don't allow flags or name to change at remount */
if (opts.flags != root->flags ||
(opts.name && strcmp(opts.name, root->name))) {
ret = -EINVAL;
drop_parsed_module_refcounts(opts.subsys_mask);
goto out_unlock;
}
/*
* Clear out the files of subsystems that should be removed, do
* this before rebind_subsystems, since rebind_subsystems may
* change this hierarchy's subsys_list.
*/
cgroup_clear_directory(cgrp->dentry, false, removed_mask);
ret = rebind_subsystems(root, opts.subsys_mask);
if (ret) {
/* rebind_subsystems failed, re-populate the removed files */
cgroup_populate_dir(cgrp, false, removed_mask);
drop_parsed_module_refcounts(opts.subsys_mask);
goto out_unlock;
}
/* re-populate subsystem files */
cgroup_populate_dir(cgrp, false, added_mask);
if (opts.release_agent)
strcpy(root->release_agent_path, opts.release_agent);
out_unlock:
kfree(opts.release_agent);
kfree(opts.name);
mutex_unlock(&cgroup_root_mutex);
mutex_unlock(&cgroup_mutex);
mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
return ret;
}
static const struct super_operations cgroup_ops = {
.statfs = simple_statfs,
.drop_inode = generic_delete_inode,
.show_options = cgroup_show_options,
.remount_fs = cgroup_remount,
};
static void init_cgroup_housekeeping(struct cgroup *cgrp)
{
INIT_LIST_HEAD(&cgrp->sibling);
INIT_LIST_HEAD(&cgrp->children);
INIT_LIST_HEAD(&cgrp->files);
INIT_LIST_HEAD(&cgrp->css_sets);
INIT_LIST_HEAD(&cgrp->allcg_node);
INIT_LIST_HEAD(&cgrp->release_list);
INIT_LIST_HEAD(&cgrp->pidlists);
INIT_WORK(&cgrp->free_work, cgroup_free_fn);
mutex_init(&cgrp->pidlist_mutex);
INIT_LIST_HEAD(&cgrp->event_list);
spin_lock_init(&cgrp->event_list_lock);
simple_xattrs_init(&cgrp->xattrs);
}
static void init_cgroup_root(struct cgroupfs_root *root)
{
struct cgroup *cgrp = &root->top_cgroup;
INIT_LIST_HEAD(&root->subsys_list);
INIT_LIST_HEAD(&root->root_list);
INIT_LIST_HEAD(&root->allcg_list);
root->number_of_cgroups = 1;
cgrp->root = root;
cgrp->name = &root_cgroup_name;
init_cgroup_housekeeping(cgrp);
list_add_tail(&cgrp->allcg_node, &root->allcg_list);
}
static bool init_root_id(struct cgroupfs_root *root)
{
int ret = 0;
do {
if (!ida_pre_get(&hierarchy_ida, GFP_KERNEL))
return false;
spin_lock(&hierarchy_id_lock);
/* Try to allocate the next unused ID */
ret = ida_get_new_above(&hierarchy_ida, next_hierarchy_id,
&root->hierarchy_id);
if (ret == -ENOSPC)
/* Try again starting from 0 */
ret = ida_get_new(&hierarchy_ida, &root->hierarchy_id);
if (!ret) {
next_hierarchy_id = root->hierarchy_id + 1;
} else if (ret != -EAGAIN) {
/* Can only get here if the 31-bit IDR is full ... */
BUG_ON(ret);
}
spin_unlock(&hierarchy_id_lock);
} while (ret);
return true;
}
static int cgroup_test_super(struct super_block *sb, void *data)
{
struct cgroup_sb_opts *opts = data;
struct cgroupfs_root *root = sb->s_fs_info;
/* If we asked for a name then it must match */
if (opts->name && strcmp(opts->name, root->name))
return 0;
/*
* If we asked for subsystems (or explicitly for no
* subsystems) then they must match
*/
if ((opts->subsys_mask || opts->none)
&& (opts->subsys_mask != root->subsys_mask))
return 0;
return 1;
}
static struct cgroupfs_root *cgroup_root_from_opts(struct cgroup_sb_opts *opts)
{
struct cgroupfs_root *root;
if (!opts->subsys_mask && !opts->none)
return NULL;
root = kzalloc(sizeof(*root), GFP_KERNEL);
if (!root)
return ERR_PTR(-ENOMEM);
if (!init_root_id(root)) {
kfree(root);
return ERR_PTR(-ENOMEM);
}
init_cgroup_root(root);
root->subsys_mask = opts->subsys_mask;
root->flags = opts->flags;
ida_init(&root->cgroup_ida);
if (opts->release_agent)
strcpy(root->release_agent_path, opts->release_agent);
if (opts->name)
strcpy(root->name, opts->name);
if (opts->cpuset_clone_children)
set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->top_cgroup.flags);
return root;
}
static void cgroup_drop_root(struct cgroupfs_root *root)
{
if (!root)
return;
BUG_ON(!root->hierarchy_id);
spin_lock(&hierarchy_id_lock);
ida_remove(&hierarchy_ida, root->hierarchy_id);
spin_unlock(&hierarchy_id_lock);
ida_destroy(&root->cgroup_ida);
kfree(root);
}
static int cgroup_set_super(struct super_block *sb, void *data)
{
int ret;
struct cgroup_sb_opts *opts = data;
/* If we don't have a new root, we can't set up a new sb */
if (!opts->new_root)
return -EINVAL;
BUG_ON(!opts->subsys_mask && !opts->none);
ret = set_anon_super(sb, NULL);
if (ret)
return ret;
sb->s_fs_info = opts->new_root;
opts->new_root->sb = sb;
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_magic = CGROUP_SUPER_MAGIC;
sb->s_op = &cgroup_ops;
return 0;
}
static int cgroup_get_rootdir(struct super_block *sb)
{
static const struct dentry_operations cgroup_dops = {
.d_iput = cgroup_diput,
.d_delete = cgroup_delete,
};
struct inode *inode =
cgroup_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR, sb);
if (!inode)
return -ENOMEM;
inode->i_fop = &simple_dir_operations;
inode->i_op = &cgroup_dir_inode_operations;
/* directories start off with i_nlink == 2 (for "." entry) */
inc_nlink(inode);
sb->s_root = d_make_root(inode);
if (!sb->s_root)
return -ENOMEM;
/* for everything else we want ->d_op set */
sb->s_d_op = &cgroup_dops;
return 0;
}
static struct dentry *cgroup_mount(struct file_system_type *fs_type,
int flags, const char *unused_dev_name,
void *data)
{
struct cgroup_sb_opts opts;
struct cgroupfs_root *root;
int ret = 0;
struct super_block *sb;
struct cgroupfs_root *new_root;
struct inode *inode;
/* First find the desired set of subsystems */
mutex_lock(&cgroup_mutex);
ret = parse_cgroupfs_options(data, &opts);
mutex_unlock(&cgroup_mutex);
if (ret)
goto out_err;
/*
* Allocate a new cgroup root. We may not need it if we're
* reusing an existing hierarchy.
*/
new_root = cgroup_root_from_opts(&opts);
if (IS_ERR(new_root)) {
ret = PTR_ERR(new_root);
goto drop_modules;
}
opts.new_root = new_root;
/* Locate an existing or new sb for this hierarchy */
sb = sget(fs_type, cgroup_test_super, cgroup_set_super, 0, &opts);
if (IS_ERR(sb)) {
ret = PTR_ERR(sb);
cgroup_drop_root(opts.new_root);
goto drop_modules;
}
root = sb->s_fs_info;
BUG_ON(!root);
if (root == opts.new_root) {
/* We used the new root structure, so this is a new hierarchy */
struct list_head tmp_cg_links;
struct cgroup *root_cgrp = &root->top_cgroup;
struct cgroupfs_root *existing_root;
const struct cred *cred;
int i;
struct css_set *cg;
BUG_ON(sb->s_root != NULL);
ret = cgroup_get_rootdir(sb);
if (ret)
goto drop_new_super;
inode = sb->s_root->d_inode;
mutex_lock(&inode->i_mutex);
mutex_lock(&cgroup_mutex);
mutex_lock(&cgroup_root_mutex);
/* Check for name clashes with existing mounts */
ret = -EBUSY;
if (strlen(root->name))
for_each_active_root(existing_root)
if (!strcmp(existing_root->name, root->name))
goto unlock_drop;
/*
* We're accessing css_set_count without locking
* css_set_lock here, but that's OK - it can only be
* increased by someone holding cgroup_lock, and
* that's us. The worst that can happen is that we
* have some link structures left over
*/
ret = allocate_cg_links(css_set_count, &tmp_cg_links);
if (ret)
goto unlock_drop;
ret = rebind_subsystems(root, root->subsys_mask);
if (ret == -EBUSY) {
free_cg_links(&tmp_cg_links);
goto unlock_drop;
}
/*
* There must be no failure case after here, since rebinding
* takes care of subsystems' refcounts, which are explicitly
* dropped in the failure exit path.
*/
/* EBUSY should be the only error here */
BUG_ON(ret);
list_add(&root->root_list, &roots);
root_count++;
sb->s_root->d_fsdata = root_cgrp;
root->top_cgroup.dentry = sb->s_root;
/* Link the top cgroup in this hierarchy into all
* the css_set objects */
write_lock(&css_set_lock);
hash_for_each(css_set_table, i, cg, hlist)
link_css_set(&tmp_cg_links, cg, root_cgrp);
write_unlock(&css_set_lock);
free_cg_links(&tmp_cg_links);
BUG_ON(!list_empty(&root_cgrp->children));
BUG_ON(root->number_of_cgroups != 1);
cred = override_creds(&init_cred);
cgroup_populate_dir(root_cgrp, true, root->subsys_mask);
revert_creds(cred);
mutex_unlock(&cgroup_root_mutex);
mutex_unlock(&cgroup_mutex);
mutex_unlock(&inode->i_mutex);
} else {
/*
* We re-used an existing hierarchy - the new root (if
* any) is not needed
*/
cgroup_drop_root(opts.new_root);
if (root->flags != opts.flags) {
if ((root->flags | opts.flags) & CGRP_ROOT_SANE_BEHAVIOR) {
pr_err("cgroup: sane_behavior: new mount options should match the existing superblock\n");
ret = -EINVAL;
goto drop_new_super;
} else {
pr_warning("cgroup: new mount options do not match the existing superblock, will be ignored\n");
}
}
/* no subsys rebinding, so refcounts don't change */
drop_parsed_module_refcounts(opts.subsys_mask);
}
kfree(opts.release_agent);
kfree(opts.name);
return dget(sb->s_root);
unlock_drop:
mutex_unlock(&cgroup_root_mutex);
mutex_unlock(&cgroup_mutex);
mutex_unlock(&inode->i_mutex);
drop_new_super:
deactivate_locked_super(sb);
drop_modules:
drop_parsed_module_refcounts(opts.subsys_mask);
out_err:
kfree(opts.release_agent);
kfree(opts.name);
return ERR_PTR(ret);
}
static void cgroup_kill_sb(struct super_block *sb) {
struct cgroupfs_root *root = sb->s_fs_info;
struct cgroup *cgrp = &root->top_cgroup;
int ret;
struct cg_cgroup_link *link;
struct cg_cgroup_link *saved_link;
BUG_ON(!root);
BUG_ON(root->number_of_cgroups != 1);
BUG_ON(!list_empty(&cgrp->children));
mutex_lock(&cgroup_mutex);
mutex_lock(&cgroup_root_mutex);
/* Rebind all subsystems back to the default hierarchy */
ret = rebind_subsystems(root, 0);
/* Shouldn't be able to fail ... */
BUG_ON(ret);
/*
* Release all the links from css_sets to this hierarchy's
* root cgroup
*/
write_lock(&css_set_lock);
list_for_each_entry_safe(link, saved_link, &cgrp->css_sets,
cgrp_link_list) {
list_del(&link->cg_link_list);
list_del(&link->cgrp_link_list);
kfree(link);
}
write_unlock(&css_set_lock);
if (!list_empty(&root->root_list)) {
list_del(&root->root_list);
root_count--;
}
mutex_unlock(&cgroup_root_mutex);
mutex_unlock(&cgroup_mutex);
simple_xattrs_free(&cgrp->xattrs);
kill_litter_super(sb);
cgroup_drop_root(root);
}
static struct file_system_type cgroup_fs_type = {
.name = "cgroup",
.mount = cgroup_mount,
.kill_sb = cgroup_kill_sb,
};
static struct kobject *cgroup_kobj;
/**
* cgroup_path - generate the path of a cgroup
* @cgrp: the cgroup in question
* @buf: the buffer to write the path into
* @buflen: the length of the buffer
*
* Writes path of cgroup into buf. Returns 0 on success, -errno on error.
*
* We can't generate cgroup path using dentry->d_name, as accessing
* dentry->name must be protected by irq-unsafe dentry->d_lock or parent
* inode's i_mutex, while on the other hand cgroup_path() can be called
* with some irq-safe spinlocks held.
*/
int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
{
int ret = -ENAMETOOLONG;
char *start;
if (!cgrp->parent) {
if (strlcpy(buf, "/", buflen) >= buflen)
return -ENAMETOOLONG;
return 0;
}
start = buf + buflen - 1;
*start = '\0';
rcu_read_lock();
do {
const char *name = cgroup_name(cgrp);
int len;
len = strlen(name);
if ((start -= len) < buf)
goto out;
memcpy(start, name, len);
if (--start < buf)
goto out;
*start = '/';
cgrp = cgrp->parent;
} while (cgrp->parent);
ret = 0;
memmove(buf, start, buf + buflen - start);
out:
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(cgroup_path);
/*
* Control Group taskset
*/
struct task_and_cgroup {
struct task_struct *task;
struct cgroup *cgrp;
struct css_set *cg;
};
struct cgroup_taskset {
struct task_and_cgroup single;
struct flex_array *tc_array;
int tc_array_len;
int idx;
struct cgroup *cur_cgrp;
};
/**
* cgroup_taskset_first - reset taskset and return the first task
* @tset: taskset of interest
*
* @tset iteration is initialized and the first task is returned.
*/
struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset)
{
if (tset->tc_array) {
tset->idx = 0;
return cgroup_taskset_next(tset);
} else {
tset->cur_cgrp = tset->single.cgrp;
return tset->single.task;
}
}
EXPORT_SYMBOL_GPL(cgroup_taskset_first);
/**
* cgroup_taskset_next - iterate to the next task in taskset
* @tset: taskset of interest
*
* Return the next task in @tset. Iteration must have been initialized
* with cgroup_taskset_first().
*/
struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset)
{
struct task_and_cgroup *tc;
if (!tset->tc_array || tset->idx >= tset->tc_array_len)
return NULL;
tc = flex_array_get(tset->tc_array, tset->idx++);
tset->cur_cgrp = tc->cgrp;
return tc->task;
}
EXPORT_SYMBOL_GPL(cgroup_taskset_next);
/**
* cgroup_taskset_cur_cgroup - return the matching cgroup for the current task
* @tset: taskset of interest
*
* Return the cgroup for the current (last returned) task of @tset. This
* function must be preceded by either cgroup_taskset_first() or
* cgroup_taskset_next().
*/
struct cgroup *cgroup_taskset_cur_cgroup(struct cgroup_taskset *tset)
{
return tset->cur_cgrp;
}
EXPORT_SYMBOL_GPL(cgroup_taskset_cur_cgroup);
/**
* cgroup_taskset_size - return the number of tasks in taskset
* @tset: taskset of interest
*/
int cgroup_taskset_size(struct cgroup_taskset *tset)
{
return tset->tc_array ? tset->tc_array_len : 1;
}
EXPORT_SYMBOL_GPL(cgroup_taskset_size);
/*
* cgroup_task_migrate - move a task from one cgroup to another.
*
* Must be called with cgroup_mutex and threadgroup locked.
*/
static void cgroup_task_migrate(struct cgroup *oldcgrp,
struct task_struct *tsk, struct css_set *newcg)
{
struct css_set *oldcg;
/*
* We are synchronized through threadgroup_lock() against PF_EXITING
* setting such that we can't race against cgroup_exit() changing the
* css_set to init_css_set and dropping the old one.
*/
WARN_ON_ONCE(tsk->flags & PF_EXITING);
oldcg = tsk->cgroups;
task_lock(tsk);
rcu_assign_pointer(tsk->cgroups, newcg);
task_unlock(tsk);
/* Update the css_set linked lists if we're using them */
write_lock(&css_set_lock);
if (!list_empty(&tsk->cg_list))
list_move(&tsk->cg_list, &newcg->tasks);
write_unlock(&css_set_lock);
/*
* We just gained a reference on oldcg by taking it from the task. As
* trading it for newcg is protected by cgroup_mutex, we're safe to drop
* it here; it will be freed under RCU.
*/
set_bit(CGRP_RELEASABLE, &oldcgrp->flags);
put_css_set(oldcg);
}
/**
* cgroup_attach_task - attach a task or a whole threadgroup to a cgroup
* @cgrp: the cgroup to attach to
* @tsk: the task or the leader of the threadgroup to be attached
* @threadgroup: attach the whole threadgroup?
*
* Call holding cgroup_mutex and the group_rwsem of the leader. Will take
* task_lock of @tsk or each thread in the threadgroup individually in turn.
*/
static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
bool threadgroup)
{
int retval, i, group_size;
struct cgroup_subsys *ss, *failed_ss = NULL;
struct cgroupfs_root *root = cgrp->root;
/* threadgroup list cursor and array */
struct task_struct *leader = tsk;
struct task_and_cgroup *tc;
struct flex_array *group;
struct cgroup_taskset tset = { };
/*
* step 0: in order to do expensive, possibly blocking operations for
* every thread, we cannot iterate the thread group list, since it needs
* rcu or tasklist locked. instead, build an array of all threads in the
* group - group_rwsem prevents new threads from appearing, and if
* threads exit, this will just be an over-estimate.
*/
if (threadgroup)
group_size = get_nr_threads(tsk);
else
group_size = 1;
/* flex_array supports very large thread-groups better than kmalloc. */
group = flex_array_alloc(sizeof(*tc), group_size, GFP_KERNEL);
if (!group)
return -ENOMEM;
/* pre-allocate to guarantee space while iterating in rcu read-side. */
retval = flex_array_prealloc(group, 0, group_size, GFP_KERNEL);
if (retval)
goto out_free_group_list;
i = 0;
/*
* Prevent freeing of tasks while we take a snapshot. Tasks that are
* already PF_EXITING could be freed from underneath us unless we
* take an rcu_read_lock.
*/
rcu_read_lock();
do {
struct task_and_cgroup ent;
/* @tsk either already exited or can't exit until the end */
if (tsk->flags & PF_EXITING)
continue;
/* as per above, nr_threads may decrease, but not increase. */
BUG_ON(i >= group_size);
ent.task = tsk;
ent.cgrp = task_cgroup_from_root(tsk, root);
/* nothing to do if this task is already in the cgroup */
if (ent.cgrp == cgrp)
continue;
/*
* saying GFP_ATOMIC has no effect here because we did prealloc
* earlier, but it's good form to communicate our expectations.
*/
retval = flex_array_put(group, i, &ent, GFP_ATOMIC);
BUG_ON(retval != 0);
i++;
if (!threadgroup)
break;
} while_each_thread(leader, tsk);
rcu_read_unlock();
/* remember the number of threads in the array for later. */
group_size = i;
tset.tc_array = group;
tset.tc_array_len = group_size;
/* methods shouldn't be called if no task is actually migrating */
retval = 0;
if (!group_size)
goto out_free_group_list;
/*
* step 1: check that we can legitimately attach to the cgroup.
*/
for_each_subsys(root, ss) {
if (ss->can_attach) {
retval = ss->can_attach(cgrp, &tset);
if (retval) {
failed_ss = ss;
goto out_cancel_attach;
}
}
}
/*
* step 2: make sure css_sets exist for all threads to be migrated.
* we use find_css_set, which allocates a new one if necessary.
*/
for (i = 0; i < group_size; i++) {
tc = flex_array_get(group, i);
tc->cg = find_css_set(tc->task->cgroups, cgrp);
if (!tc->cg) {
retval = -ENOMEM;
goto out_put_css_set_refs;
}
}
/*
* step 3: now that we're guaranteed success wrt the css_sets,
* proceed to move all tasks to the new cgroup. There are no
* failure cases after here, so this is the commit point.
*/
for (i = 0; i < group_size; i++) {
tc = flex_array_get(group, i);
cgroup_task_migrate(tc->cgrp, tc->task, tc->cg);
}
/* nothing is sensitive to fork() after this point. */
/*
* step 4: do subsystem attach callbacks.
*/
for_each_subsys(root, ss) {
if (ss->attach)
ss->attach(cgrp, &tset);
}
/*
* step 5: success! and cleanup
*/
retval = 0;
out_put_css_set_refs:
if (retval) {
for (i = 0; i < group_size; i++) {
tc = flex_array_get(group, i);
if (!tc->cg)
break;
put_css_set(tc->cg);
}
}
out_cancel_attach:
if (retval) {
for_each_subsys(root, ss) {
if (ss == failed_ss)
break;
if (ss->cancel_attach)
ss->cancel_attach(cgrp, &tset);
}
}
out_free_group_list:
flex_array_free(group);
return retval;
}
static int cgroup_allow_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
{
struct cgroup_subsys *ss;
int ret;
for_each_subsys(cgrp->root, ss) {
if (ss->allow_attach) {
ret = ss->allow_attach(cgrp, tset);
if (ret)
return ret;
} else {
return -EACCES;
}
}
return 0;
}
int subsys_cgroup_allow_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
{
const struct cred *cred = current_cred(), *tcred;
struct task_struct *task;
if (capable(CAP_SYS_NICE))
return 0;
cgroup_taskset_for_each(task, cgrp, tset) {
tcred = __task_cred(task);
if (current != task && cred->euid != tcred->uid &&
cred->euid != tcred->suid)
return -EACCES;
}
return 0;
}
/*
* Find the task_struct of the task to attach by vpid and pass it along to the
* function to attach either it or all tasks in its threadgroup. Will lock
* cgroup_mutex and threadgroup; may take task_lock of task.
*/
static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
{
struct task_struct *tsk;
const struct cred *cred = current_cred(), *tcred;
int ret;
if (!cgroup_lock_live_group(cgrp))
return -ENODEV;
retry_find_task:
rcu_read_lock();
if (pid) {
tsk = find_task_by_vpid(pid);
if (!tsk) {
rcu_read_unlock();
ret= -ESRCH;
goto out_unlock_cgroup;
}
/*
* even if we're attaching all tasks in the thread group, we
* only need to check permissions on one of them.
*/
tcred = __task_cred(tsk);
if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
!uid_eq(cred->euid, tcred->uid) &&
!uid_eq(cred->euid, tcred->suid)) {
/*
* if the default permission check fails, give each
* cgroup a chance to extend the permission check
*/
struct cgroup_taskset tset = { };
tset.single.task = tsk;
tset.single.cgrp = cgrp;
ret = cgroup_allow_attach(cgrp, &tset);
if (ret) {
rcu_read_unlock();
goto out_unlock_cgroup;
}
}
} else
tsk = current;
if (threadgroup)
tsk = tsk->group_leader;
/*
* Workqueue threads may acquire PF_NO_SETAFFINITY and become
* trapped in a cpuset, or RT worker may be born in a cgroup
* with no rt_runtime allocated. Just say no.
*/
if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) {
ret = -EINVAL;
rcu_read_unlock();
goto out_unlock_cgroup;
}
get_task_struct(tsk);
rcu_read_unlock();
threadgroup_lock(tsk);
if (threadgroup) {
if (!thread_group_leader(tsk)) {
/*
* a race with de_thread from another thread's exec()
* may strip us of our leadership, if this happens,
* there is no choice but to throw this task away and
* try again; this is
* "double-double-toil-and-trouble-check locking".
*/
threadgroup_unlock(tsk);
put_task_struct(tsk);
goto retry_find_task;
}
}
ret = cgroup_attach_task(cgrp, tsk, threadgroup);
threadgroup_unlock(tsk);
put_task_struct(tsk);
out_unlock_cgroup:
mutex_unlock(&cgroup_mutex);
return ret;
}
/**
* cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
* @from: attach to all cgroups of a given task
* @tsk: the task to be attached
*/
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
{
struct cgroupfs_root *root;
int retval = 0;
mutex_lock(&cgroup_mutex);
for_each_active_root(root) {
struct cgroup *from_cg = task_cgroup_from_root(from, root);
retval = cgroup_attach_task(from_cg, tsk, false);
if (retval)
break;
}
mutex_unlock(&cgroup_mutex);
return retval;
}
EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
static int cgroup_tasks_write(struct cgroup *cgrp, struct cftype *cft, u64 pid)
{
return attach_task_by_pid(cgrp, pid, false);
}
static int cgroup_procs_write(struct cgroup *cgrp, struct cftype *cft, u64 tgid)
{
return attach_task_by_pid(cgrp, tgid, true);
}
static int cgroup_release_agent_write(struct cgroup *cgrp, struct cftype *cft,
const char *buffer)
{
BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
if (strlen(buffer) >= PATH_MAX)
return -EINVAL;
if (!cgroup_lock_live_group(cgrp))
return -ENODEV;
mutex_lock(&cgroup_root_mutex);
strcpy(cgrp->root->release_agent_path, buffer);
mutex_unlock(&cgroup_root_mutex);
mutex_unlock(&cgroup_mutex);
return 0;
}
static int cgroup_release_agent_show(struct cgroup *cgrp, struct cftype *cft,
struct seq_file *seq)
{
if (!cgroup_lock_live_group(cgrp))
return -ENODEV;
seq_puts(seq, cgrp->root->release_agent_path);
seq_putc(seq, '\n');
mutex_unlock(&cgroup_mutex);
return 0;
}
static int cgroup_sane_behavior_show(struct cgroup *cgrp, struct cftype *cft,
struct seq_file *seq)
{
seq_printf(seq, "%d\n", cgroup_sane_behavior(cgrp));
return 0;
}
/* A buffer size big enough for numbers or short strings */
#define CGROUP_LOCAL_BUFFER_SIZE 64
static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft,
struct file *file,
const char __user *userbuf,
size_t nbytes, loff_t *unused_ppos)
{
char buffer[CGROUP_LOCAL_BUFFER_SIZE];
int retval = 0;
char *end;
if (!nbytes)
return -EINVAL;
if (nbytes >= sizeof(buffer))
return -E2BIG;
if (copy_from_user(buffer, userbuf, nbytes))
return -EFAULT;
buffer[nbytes] = 0; /* nul-terminate */
if (cft->write_u64) {
u64 val = simple_strtoull(strstrip(buffer), &end, 0);
if (*end)
return -EINVAL;
retval = cft->write_u64(cgrp, cft, val);
} else {
s64 val = simple_strtoll(strstrip(buffer), &end, 0);
if (*end)
return -EINVAL;
retval = cft->write_s64(cgrp, cft, val);
}
if (!retval)
retval = nbytes;
return retval;
}
static ssize_t cgroup_write_string(struct cgroup *cgrp, struct cftype *cft,
struct file *file,
const char __user *userbuf,
size_t nbytes, loff_t *unused_ppos)
{
char local_buffer[CGROUP_LOCAL_BUFFER_SIZE];
int retval = 0;
size_t max_bytes = cft->max_write_len;
char *buffer = local_buffer;
if (!max_bytes)
max_bytes = sizeof(local_buffer) - 1;
if (nbytes >= max_bytes)
return -E2BIG;
/* Allocate a dynamic buffer if we need one */
if (nbytes >= sizeof(local_buffer)) {
buffer = kmalloc(nbytes + 1, GFP_KERNEL);
if (buffer == NULL)
return -ENOMEM;
}
if (nbytes && copy_from_user(buffer, userbuf, nbytes)) {
retval = -EFAULT;
goto out;
}
buffer[nbytes] = 0; /* nul-terminate */
retval = cft->write_string(cgrp, cft, strstrip(buffer));
if (!retval)
retval = nbytes;
out:
if (buffer != local_buffer)
kfree(buffer);
return retval;
}
static ssize_t cgroup_file_write(struct file *file, const char __user *buf,
size_t nbytes, loff_t *ppos)
{
struct cftype *cft = __d_cft(file->f_dentry);
struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
if (cgroup_is_removed(cgrp))
return -ENODEV;
if (cft->write)
return cft->write(cgrp, cft, file, buf, nbytes, ppos);
if (cft->write_u64 || cft->write_s64)
return cgroup_write_X64(cgrp, cft, file, buf, nbytes, ppos);
if (cft->write_string)
return cgroup_write_string(cgrp, cft, file, buf, nbytes, ppos);
if (cft->trigger) {
int ret = cft->trigger(cgrp, (unsigned int)cft->private);
return ret ? ret : nbytes;
}
return -EINVAL;
}
static ssize_t cgroup_read_u64(struct cgroup *cgrp, struct cftype *cft,
struct file *file,
char __user *buf, size_t nbytes,
loff_t *ppos)
{
char tmp[CGROUP_LOCAL_BUFFER_SIZE];
u64 val = cft->read_u64(cgrp, cft);
int len = sprintf(tmp, "%llu\n", (unsigned long long) val);
return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
}
static ssize_t cgroup_read_s64(struct cgroup *cgrp, struct cftype *cft,
struct file *file,
char __user *buf, size_t nbytes,
loff_t *ppos)
{
char tmp[CGROUP_LOCAL_BUFFER_SIZE];
s64 val = cft->read_s64(cgrp, cft);
int len = sprintf(tmp, "%lld\n", (long long) val);
return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
}
static ssize_t cgroup_file_read(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos)
{
struct cftype *cft = __d_cft(file->f_dentry);
struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
if (cgroup_is_removed(cgrp))
return -ENODEV;
if (cft->read)
return cft->read(cgrp, cft, file, buf, nbytes, ppos);
if (cft->read_u64)
return cgroup_read_u64(cgrp, cft, file, buf, nbytes, ppos);
if (cft->read_s64)
return cgroup_read_s64(cgrp, cft, file, buf, nbytes, ppos);
return -EINVAL;
}
/*
* seqfile ops/methods for returning structured data. Currently just
* supports string->u64 maps, but can be extended in future.
*/
struct cgroup_seqfile_state {
struct cftype *cft;
struct cgroup *cgroup;
};
static int cgroup_map_add(struct cgroup_map_cb *cb, const char *key, u64 value)
{
struct seq_file *sf = cb->state;
return seq_printf(sf, "%s %llu\n", key, (unsigned long long)value);
}
static int cgroup_seqfile_show(struct seq_file *m, void *arg)
{
struct cgroup_seqfile_state *state = m->private;
struct cftype *cft = state->cft;
if (cft->read_map) {
struct cgroup_map_cb cb = {
.fill = cgroup_map_add,
.state = m,
};
return cft->read_map(state->cgroup, cft, &cb);
}
return cft->read_seq_string(state->cgroup, cft, m);
}
static int cgroup_seqfile_release(struct inode *inode, struct file *file)
{
struct seq_file *seq = file->private_data;
kfree(seq->private);
return single_release(inode, file);
}
static const struct file_operations cgroup_seqfile_operations = {
.read = seq_read,
.write = cgroup_file_write,
.llseek = seq_lseek,
.release = cgroup_seqfile_release,
};
static int cgroup_file_open(struct inode *inode, struct file *file)
{
int err;
struct cftype *cft;
err = generic_file_open(inode, file);
if (err)
return err;
cft = __d_cft(file->f_dentry);
if (cft->read_map || cft->read_seq_string) {
struct cgroup_seqfile_state *state =
kzalloc(sizeof(*state), GFP_USER);
if (!state)
return -ENOMEM;
state->cft = cft;
state->cgroup = __d_cgrp(file->f_dentry->d_parent);
file->f_op = &cgroup_seqfile_operations;
err = single_open(file, cgroup_seqfile_show, state);
if (err < 0)
kfree(state);
} else if (cft->open)
err = cft->open(inode, file);
else
err = 0;
return err;
}
static int cgroup_file_release(struct inode *inode, struct file *file)
{
struct cftype *cft = __d_cft(file->f_dentry);
if (cft->release)
return cft->release(inode, file);
return 0;
}
/*
* cgroup_rename - Only allow simple rename of directories in place.
*/
static int cgroup_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
int ret;
struct cgroup_name *name, *old_name;
struct cgroup *cgrp;
/*
* It's convinient to use parent dir's i_mutex to protected
* cgrp->name.
*/
lockdep_assert_held(&old_dir->i_mutex);
if (!S_ISDIR(old_dentry->d_inode->i_mode))
return -ENOTDIR;
if (new_dentry->d_inode)
return -EEXIST;
if (old_dir != new_dir)
return -EIO;
cgrp = __d_cgrp(old_dentry);
name = cgroup_alloc_name(new_dentry);
if (!name)
return -ENOMEM;
ret = simple_rename(old_dir, old_dentry, new_dir, new_dentry);
if (ret) {
kfree(name);
return ret;
}
old_name = cgrp->name;
rcu_assign_pointer(cgrp->name, name);
kfree_rcu(old_name, rcu_head);
return 0;
}
static struct simple_xattrs *__d_xattrs(struct dentry *dentry)
{
if (S_ISDIR(dentry->d_inode->i_mode))
return &__d_cgrp(dentry)->xattrs;
else
return &__d_cfe(dentry)->xattrs;
}
static inline int xattr_enabled(struct dentry *dentry)
{
struct cgroupfs_root *root = dentry->d_sb->s_fs_info;
return root->flags & CGRP_ROOT_XATTR;
}
static bool is_valid_xattr(const char *name)
{
if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
!strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN))
return true;
return false;
}
static int cgroup_setxattr(struct dentry *dentry, const char *name,
const void *val, size_t size, int flags)
{
if (!xattr_enabled(dentry))
return -EOPNOTSUPP;
if (!is_valid_xattr(name))
return -EINVAL;
return simple_xattr_set(__d_xattrs(dentry), name, val, size, flags);
}
static int cgroup_removexattr(struct dentry *dentry, const char *name)
{
if (!xattr_enabled(dentry))
return -EOPNOTSUPP;
if (!is_valid_xattr(name))
return -EINVAL;
return simple_xattr_remove(__d_xattrs(dentry), name);
}
static ssize_t cgroup_getxattr(struct dentry *dentry, const char *name,
void *buf, size_t size)
{
if (!xattr_enabled(dentry))
return -EOPNOTSUPP;
if (!is_valid_xattr(name))
return -EINVAL;
return simple_xattr_get(__d_xattrs(dentry), name, buf, size);
}
static ssize_t cgroup_listxattr(struct dentry *dentry, char *buf, size_t size)
{
if (!xattr_enabled(dentry))
return -EOPNOTSUPP;
return simple_xattr_list(__d_xattrs(dentry), buf, size);
}
static const struct file_operations cgroup_file_operations = {
.read = cgroup_file_read,
.write = cgroup_file_write,
.llseek = generic_file_llseek,
.open = cgroup_file_open,
.release = cgroup_file_release,
};
static const struct inode_operations cgroup_file_inode_operations = {
.setxattr = cgroup_setxattr,
.getxattr = cgroup_getxattr,
.listxattr = cgroup_listxattr,
.removexattr = cgroup_removexattr,
};
static const struct inode_operations cgroup_dir_inode_operations = {
.lookup = cgroup_lookup,
.mkdir = cgroup_mkdir,
.rmdir = cgroup_rmdir,
.rename = cgroup_rename,
.setxattr = cgroup_setxattr,
.getxattr = cgroup_getxattr,
.listxattr = cgroup_listxattr,
.removexattr = cgroup_removexattr,
};
static struct dentry *cgroup_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
{
if (dentry->d_name.len > NAME_MAX)
return ERR_PTR(-ENAMETOOLONG);
d_add(dentry, NULL);
return NULL;
}
/*
* Check if a file is a control file
*/
static inline struct cftype *__file_cft(struct file *file)
{
if (file_inode(file)->i_fop != &cgroup_file_operations)
return ERR_PTR(-EINVAL);
return __d_cft(file->f_dentry);
}
static int cgroup_create_file(struct dentry *dentry, umode_t mode,
struct super_block *sb)
{
struct inode *inode;
if (!dentry)
return -ENOENT;
if (dentry->d_inode)
return -EEXIST;
inode = cgroup_new_inode(mode, sb);
if (!inode)
return -ENOMEM;
if (S_ISDIR(mode)) {
inode->i_op = &cgroup_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
/* start off with i_nlink == 2 (for "." entry) */
inc_nlink(inode);
inc_nlink(dentry->d_parent->d_inode);
/*
* Control reaches here with cgroup_mutex held.
* @inode->i_mutex should nest outside cgroup_mutex but we
* want to populate it immediately without releasing
* cgroup_mutex. As @inode isn't visible to anyone else
* yet, trylock will always succeed without affecting
* lockdep checks.
*/
WARN_ON_ONCE(!mutex_trylock(&inode->i_mutex));
} else if (S_ISREG(mode)) {
inode->i_size = 0;
inode->i_fop = &cgroup_file_operations;
inode->i_op = &cgroup_file_inode_operations;
}
d_instantiate(dentry, inode);
dget(dentry); /* Extra count - pin the dentry in core */
return 0;
}
/**
* cgroup_file_mode - deduce file mode of a control file
* @cft: the control file in question
*
* returns cft->mode if ->mode is not 0
* returns S_IRUGO|S_IWUSR if it has both a read and a write handler
* returns S_IRUGO if it has only a read handler
* returns S_IWUSR if it has only a write hander
*/
static umode_t cgroup_file_mode(const struct cftype *cft)
{
umode_t mode = 0;
if (cft->mode)
return cft->mode;
if (cft->read || cft->read_u64 || cft->read_s64 ||
cft->read_map || cft->read_seq_string)
mode |= S_IRUGO;
if (cft->write || cft->write_u64 || cft->write_s64 ||
cft->write_string || cft->trigger)
mode |= S_IWUSR;
return mode;
}
static int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys,
struct cftype *cft)
{
struct dentry *dir = cgrp->dentry;
struct cgroup *parent = __d_cgrp(dir);
struct dentry *dentry;
struct cfent *cfe;
int error;
umode_t mode;
char name[MAX_CGROUP_TYPE_NAMELEN + MAX_CFTYPE_NAME + 2] = { 0 };
if (subsys && !(cgrp->root->flags & CGRP_ROOT_NOPREFIX)) {
strcpy(name, subsys->name);
strcat(name, ".");
}
strcat(name, cft->name);
BUG_ON(!mutex_is_locked(&dir->d_inode->i_mutex));
cfe = kzalloc(sizeof(*cfe), GFP_KERNEL);
if (!cfe)
return -ENOMEM;
dentry = lookup_one_len(name, dir, strlen(name));
if (IS_ERR(dentry)) {
error = PTR_ERR(dentry);
goto out;
}
cfe->type = (void *)cft;
cfe->dentry = dentry;
dentry->d_fsdata = cfe;
simple_xattrs_init(&cfe->xattrs);
mode = cgroup_file_mode(cft);
error = cgroup_create_file(dentry, mode | S_IFREG, cgrp->root->sb);
if (!error) {
list_add_tail(&cfe->node, &parent->files);
cfe = NULL;
}
dput(dentry);
out:
kfree(cfe);
return error;
}
static int cgroup_addrm_files(struct cgroup *cgrp, struct cgroup_subsys *subsys,
struct cftype cfts[], bool is_add)
{
struct cftype *cft;
int err, ret = 0;
for (cft = cfts; cft->name[0] != '\0'; cft++) {
/* does cft->flags tell us to skip this file on @cgrp? */
if ((cft->flags & CFTYPE_INSANE) && cgroup_sane_behavior(cgrp))
continue;
if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgrp->parent)
continue;
if ((cft->flags & CFTYPE_ONLY_ON_ROOT) && cgrp->parent)
continue;
if (is_add) {
err = cgroup_add_file(cgrp, subsys, cft);
if (err)
pr_warn("cgroup_addrm_files: failed to add %s, err=%d\n",
cft->name, err);
ret = err;
} else {
cgroup_rm_file(cgrp, cft);
}
}
return ret;
}
static DEFINE_MUTEX(cgroup_cft_mutex);
static void cgroup_cfts_prepare(void)
__acquires(&cgroup_cft_mutex) __acquires(&cgroup_mutex)
{
/*
* Thanks to the entanglement with vfs inode locking, we can't walk
* the existing cgroups under cgroup_mutex and create files.
* Instead, we increment reference on all cgroups and build list of
* them using @cgrp->cft_q_node. Grab cgroup_cft_mutex to ensure
* exclusive access to the field.
*/
mutex_lock(&cgroup_cft_mutex);
mutex_lock(&cgroup_mutex);
}
static void cgroup_cfts_commit(struct cgroup_subsys *ss,
struct cftype *cfts, bool is_add)
__releases(&cgroup_mutex) __releases(&cgroup_cft_mutex)
{
LIST_HEAD(pending);
struct cgroup *cgrp, *n;
/* %NULL @cfts indicates abort and don't bother if @ss isn't attached */
if (cfts && ss->root != &rootnode) {
list_for_each_entry(cgrp, &ss->root->allcg_list, allcg_node) {
dget(cgrp->dentry);
list_add_tail(&cgrp->cft_q_node, &pending);
}
}
mutex_unlock(&cgroup_mutex);
/*
* All new cgroups will see @cfts update on @ss->cftsets. Add/rm
* files for all cgroups which were created before.
*/
list_for_each_entry_safe(cgrp, n, &pending, cft_q_node) {
struct inode *inode = cgrp->dentry->d_inode;
mutex_lock(&inode->i_mutex);
mutex_lock(&cgroup_mutex);
if (!cgroup_is_removed(cgrp))
cgroup_addrm_files(cgrp, ss, cfts, is_add);
mutex_unlock(&cgroup_mutex);
mutex_unlock(&inode->i_mutex);
list_del_init(&cgrp->cft_q_node);
dput(cgrp->dentry);
}
mutex_unlock(&cgroup_cft_mutex);
}
/**
* cgroup_add_cftypes - add an array of cftypes to a subsystem
* @ss: target cgroup subsystem
* @cfts: zero-length name terminated array of cftypes
*
* Register @cfts to @ss. Files described by @cfts are created for all
* existing cgroups to which @ss is attached and all future cgroups will
* have them too. This function can be called anytime whether @ss is
* attached or not.
*
* Returns 0 on successful registration, -errno on failure. Note that this
* function currently returns 0 as long as @cfts registration is successful
* even if some file creation attempts on existing cgroups fail.
*/
int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
{
struct cftype_set *set;
set = kzalloc(sizeof(*set), GFP_KERNEL);
if (!set)
return -ENOMEM;
cgroup_cfts_prepare();
set->cfts = cfts;
list_add_tail(&set->node, &ss->cftsets);
cgroup_cfts_commit(ss, cfts, true);
return 0;
}
EXPORT_SYMBOL_GPL(cgroup_add_cftypes);
/**
* cgroup_rm_cftypes - remove an array of cftypes from a subsystem
* @ss: target cgroup subsystem
* @cfts: zero-length name terminated array of cftypes
*
* Unregister @cfts from @ss. Files described by @cfts are removed from
* all existing cgroups to which @ss is attached and all future cgroups
* won't have them either. This function can be called anytime whether @ss
* is attached or not.
*
* Returns 0 on successful unregistration, -ENOENT if @cfts is not
* registered with @ss.
*/
int cgroup_rm_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
{
struct cftype_set *set;
cgroup_cfts_prepare();
list_for_each_entry(set, &ss->cftsets, node) {
if (set->cfts == cfts) {
list_del_init(&set->node);
cgroup_cfts_commit(ss, cfts, false);
return 0;
}
}
cgroup_cfts_commit(ss, NULL, false);
return -ENOENT;
}
/**
* cgroup_task_count - count the number of tasks in a cgroup.
* @cgrp: the cgroup in question
*
* Return the number of tasks in the cgroup.
*/
int cgroup_task_count(const struct cgroup *cgrp)
{
int count = 0;
struct cg_cgroup_link *link;
read_lock(&css_set_lock);
list_for_each_entry(link, &cgrp->css_sets, cgrp_link_list) {
count += atomic_read(&link->cg->refcount);
}
read_unlock(&css_set_lock);
return count;
}
/*
* Advance a list_head iterator. The iterator should be positioned at
* the start of a css_set
*/
static void cgroup_advance_iter(struct cgroup *cgrp,
struct cgroup_iter *it)
{
struct list_head *l = it->cg_link;
struct cg_cgroup_link *link;
struct css_set *cg;
/* Advance to the next non-empty css_set */
do {
l = l->next;
if (l == &cgrp->css_sets) {
it->cg_link = NULL;
return;
}
link = list_entry(l, struct cg_cgroup_link, cgrp_link_list);
cg = link->cg;
} while (list_empty(&cg->tasks));
it->cg_link = l;
it->task = cg->tasks.next;
}
/*
* To reduce the fork() overhead for systems that are not actually
* using their cgroups capability, we don't maintain the lists running
* through each css_set to its tasks until we see the list actually
* used - in other words after the first call to cgroup_iter_start().
*/
static void cgroup_enable_task_cg_lists(void)
{
struct task_struct *p, *g;
write_lock(&css_set_lock);
use_task_css_set_links = 1;
/*
* We need tasklist_lock because RCU is not safe against
* while_each_thread(). Besides, a forking task that has passed
* cgroup_post_fork() without seeing use_task_css_set_links = 1
* is not guaranteed to have its child immediately visible in the
* tasklist if we walk through it with RCU.
*/
read_lock(&tasklist_lock);
do_each_thread(g, p) {
task_lock(p);
/*
* We should check if the process is exiting, otherwise
* it will race with cgroup_exit() in that the list
* entry won't be deleted though the process has exited.
*/
if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list))
list_add(&p->cg_list, &p->cgroups->tasks);
task_unlock(p);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
write_unlock(&css_set_lock);
}
/**
* cgroup_next_descendant_pre - find the next descendant for pre-order walk
* @pos: the current position (%NULL to initiate traversal)
* @cgroup: cgroup whose descendants to walk
*
* To be used by cgroup_for_each_descendant_pre(). Find the next
* descendant to visit for pre-order traversal of @cgroup's descendants.
*/
struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos,
struct cgroup *cgroup)
{
struct cgroup *next;
WARN_ON_ONCE(!rcu_read_lock_held());
/* if first iteration, pretend we just visited @cgroup */
if (!pos)
pos = cgroup;
/* visit the first child if exists */
next = list_first_or_null_rcu(&pos->children, struct cgroup, sibling);
if (next)
return next;
/* no child, visit my or the closest ancestor's next sibling */
while (pos != cgroup) {
next = list_entry_rcu(pos->sibling.next, struct cgroup,
sibling);
if (&next->sibling != &pos->parent->children)
return next;
pos = pos->parent;
}
return NULL;
}
EXPORT_SYMBOL_GPL(cgroup_next_descendant_pre);
/**
* cgroup_rightmost_descendant - return the rightmost descendant of a cgroup
* @pos: cgroup of interest
*
* Return the rightmost descendant of @pos. If there's no descendant,
* @pos is returned. This can be used during pre-order traversal to skip
* subtree of @pos.
*/
struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos)
{
struct cgroup *last, *tmp;
WARN_ON_ONCE(!rcu_read_lock_held());
do {
last = pos;
/* ->prev isn't RCU safe, walk ->next till the end */
pos = NULL;
list_for_each_entry_rcu(tmp, &last->children, sibling)
pos = tmp;
} while (pos);
return last;
}
EXPORT_SYMBOL_GPL(cgroup_rightmost_descendant);
static struct cgroup *cgroup_leftmost_descendant(struct cgroup *pos)
{
struct cgroup *last;
do {
last = pos;
pos = list_first_or_null_rcu(&pos->children, struct cgroup,
sibling);
} while (pos);
return last;
}
/**
* cgroup_next_descendant_post - find the next descendant for post-order walk
* @pos: the current position (%NULL to initiate traversal)
* @cgroup: cgroup whose descendants to walk
*
* To be used by cgroup_for_each_descendant_post(). Find the next
* descendant to visit for post-order traversal of @cgroup's descendants.
*/
struct cgroup *cgroup_next_descendant_post(struct cgroup *pos,
struct cgroup *cgroup)
{
struct cgroup *next;
WARN_ON_ONCE(!rcu_read_lock_held());
/* if first iteration, visit the leftmost descendant */
if (!pos) {
next = cgroup_leftmost_descendant(cgroup);
return next != cgroup ? next : NULL;
}
/* if there's an unvisited sibling, visit its leftmost descendant */
next = list_entry_rcu(pos->sibling.next, struct cgroup, sibling);
if (&next->sibling != &pos->parent->children)
return cgroup_leftmost_descendant(next);
/* no sibling left, visit parent */
next = pos->parent;
return next != cgroup ? next : NULL;
}
EXPORT_SYMBOL_GPL(cgroup_next_descendant_post);
void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it)
__acquires(css_set_lock)
{
/*
* The first time anyone tries to iterate across a cgroup,
* we need to enable the list linking each css_set to its
* tasks, and fix up all existing tasks.
*/
if (!use_task_css_set_links)
cgroup_enable_task_cg_lists();
read_lock(&css_set_lock);
it->cg_link = &cgrp->css_sets;
cgroup_advance_iter(cgrp, it);
}
struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
struct cgroup_iter *it)
{
struct task_struct *res;
struct list_head *l = it->task;
struct cg_cgroup_link *link;
/* If the iterator cg is NULL, we have no tasks */
if (!it->cg_link)
return NULL;
res = list_entry(l, struct task_struct, cg_list);
/* Advance iterator to find next entry */
l = l->next;
link = list_entry(it->cg_link, struct cg_cgroup_link, cgrp_link_list);
if (l == &link->cg->tasks) {
/* We reached the end of this task list - move on to
* the next cg_cgroup_link */
cgroup_advance_iter(cgrp, it);
} else {
it->task = l;
}
return res;
}
void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it)
__releases(css_set_lock)
{
read_unlock(&css_set_lock);
}
static inline int started_after_time(struct task_struct *t1,
struct timespec *time,
struct task_struct *t2)
{
int start_diff = timespec_compare(&t1->start_time, time);
if (start_diff > 0) {
return 1;
} else if (start_diff < 0) {
return 0;
} else {
/*
* Arbitrarily, if two processes started at the same
* time, we'll say that the lower pointer value
* started first. Note that t2 may have exited by now
* so this may not be a valid pointer any longer, but
* that's fine - it still serves to distinguish
* between two tasks started (effectively) simultaneously.
*/
return t1 > t2;
}
}
/*
* This function is a callback from heap_insert() and is used to order
* the heap.
* In this case we order the heap in descending task start time.
*/
static inline int started_after(void *p1, void *p2)
{
struct task_struct *t1 = p1;
struct task_struct *t2 = p2;
return started_after_time(t1, &t2->start_time, t2);
}
/**
* cgroup_scan_tasks - iterate though all the tasks in a cgroup
* @scan: struct cgroup_scanner containing arguments for the scan
*
* Arguments include pointers to callback functions test_task() and
* process_task().
* Iterate through all the tasks in a cgroup, calling test_task() for each,
* and if it returns true, call process_task() for it also.
* The test_task pointer may be NULL, meaning always true (select all tasks).
* Effectively duplicates cgroup_iter_{start,next,end}()
* but does not lock css_set_lock for the call to process_task().
* The struct cgroup_scanner may be embedded in any structure of the caller's
* creation.
* It is guaranteed that process_task() will act on every task that
* is a member of the cgroup for the duration of this call. This
* function may or may not call process_task() for tasks that exit
* or move to a different cgroup during the call, or are forked or
* move into the cgroup during the call.
*
* Note that test_task() may be called with locks held, and may in some
* situations be called multiple times for the same task, so it should
* be cheap.
* If the heap pointer in the struct cgroup_scanner is non-NULL, a heap has been
* pre-allocated and will be used for heap operations (and its "gt" member will
* be overwritten), else a temporary heap will be used (allocation of which
* may cause this function to fail).
*/
int cgroup_scan_tasks(struct cgroup_scanner *scan)
{
int retval, i;
struct cgroup_iter it;
struct task_struct *p, *dropped;
/* Never dereference latest_task, since it's not refcounted */
struct task_struct *latest_task = NULL;
struct ptr_heap tmp_heap;
struct ptr_heap *heap;
struct timespec latest_time = { 0, 0 };
if (scan->heap) {
/* The caller supplied our heap and pre-allocated its memory */
heap = scan->heap;
heap->gt = &started_after;
} else {
/* We need to allocate our own heap memory */
heap = &tmp_heap;
retval = heap_init(heap, PAGE_SIZE, GFP_KERNEL, &started_after);
if (retval)
/* cannot allocate the heap */
return retval;
}
again:
/*
* Scan tasks in the cgroup, using the scanner's "test_task" callback
* to determine which are of interest, and using the scanner's
* "process_task" callback to process any of them that need an update.
* Since we don't want to hold any locks during the task updates,
* gather tasks to be processed in a heap structure.
* The heap is sorted by descending task start time.
* If the statically-sized heap fills up, we overflow tasks that
* started later, and in future iterations only consider tasks that
* started after the latest task in the previous pass. This
* guarantees forward progress and that we don't miss any tasks.
*/
heap->size = 0;
cgroup_iter_start(scan->cg, &it);
while ((p = cgroup_iter_next(scan->cg, &it))) {
/*
* Only affect tasks that qualify per the caller's callback,
* if he provided one
*/
if (scan->test_task && !scan->test_task(p, scan))
continue;
/*
* Only process tasks that started after the last task
* we processed
*/
if (!started_after_time(p, &latest_time, latest_task))
continue;
dropped = heap_insert(heap, p);
if (dropped == NULL) {
/*
* The new task was inserted; the heap wasn't
* previously full
*/
get_task_struct(p);
} else if (dropped != p) {
/*
* The new task was inserted, and pushed out a
* different task
*/
get_task_struct(p);
put_task_struct(dropped);
}
/*
* Else the new task was newer than anything already in
* the heap and wasn't inserted
*/
}
cgroup_iter_end(scan->cg, &it);
if (heap->size) {
for (i = 0; i < heap->size; i++) {
struct task_struct *q = heap->ptrs[i];
if (i == 0) {
latest_time = q->start_time;
latest_task = q;
}
/* Process the task per the caller's callback */
scan->process_task(q, scan);
put_task_struct(q);
}
/*
* If we had to process any tasks at all, scan again
* in case some of them were in the middle of forking
* children that didn't get processed.
* Not the most efficient way to do it, but it avoids
* having to take callback_mutex in the fork path
*/
goto again;
}
if (heap == &tmp_heap)
heap_free(&tmp_heap);
return 0;
}
static void cgroup_transfer_one_task(struct task_struct *task,
struct cgroup_scanner *scan)
{
struct cgroup *new_cgroup = scan->data;
mutex_lock(&cgroup_mutex);
cgroup_attach_task(new_cgroup, task, false);
mutex_unlock(&cgroup_mutex);
}
/**
* cgroup_trasnsfer_tasks - move tasks from one cgroup to another
* @to: cgroup to which the tasks will be moved
* @from: cgroup in which the tasks currently reside
*/
int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
{
struct cgroup_scanner scan;
scan.cg = from;
scan.test_task = NULL; /* select all tasks in cgroup */
scan.process_task = cgroup_transfer_one_task;
scan.heap = NULL;
scan.data = to;
return cgroup_scan_tasks(&scan);
}
/*
* Stuff for reading the 'tasks'/'procs' files.
*
* Reading this file can return large amounts of data if a cgroup has
* *lots* of attached tasks. So it may need several calls to read(),
* but we cannot guarantee that the information we produce is correct
* unless we produce it entirely atomically.
*
*/
/* which pidlist file are we talking about? */
enum cgroup_filetype {
CGROUP_FILE_PROCS,
CGROUP_FILE_TASKS,
};
/*
* A pidlist is a list of pids that virtually represents the contents of one
* of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
* a pair (one each for procs, tasks) for each pid namespace that's relevant
* to the cgroup.
*/
struct cgroup_pidlist {
/*
* used to find which pidlist is wanted. doesn't change as long as
* this particular list stays in the list.
*/
struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
/* array of xids */
pid_t *list;
/* how many elements the above list has */
int length;
/* how many files are using the current array */
int use_count;
/* each of these stored in a list by its cgroup */
struct list_head links;
/* pointer to the cgroup we belong to, for list removal purposes */
struct cgroup *owner;
/* protects the other fields */
struct rw_semaphore mutex;
};
/*
* The following two functions "fix" the issue where there are more pids
* than kmalloc will give memory for; in such cases, we use vmalloc/vfree.
* TODO: replace with a kernel-wide solution to this problem
*/
#define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2))
static void *pidlist_allocate(int count)
{
if (PIDLIST_TOO_LARGE(count))
return vmalloc(count * sizeof(pid_t));
else
return kmalloc(count * sizeof(pid_t), GFP_KERNEL);
}
static void pidlist_free(void *p)
{
if (is_vmalloc_addr(p))
vfree(p);
else
kfree(p);
}
/*
* pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
* Returns the number of unique elements.
*/
static int pidlist_uniq(pid_t *list, int length)
{
int src, dest = 1;
/*
* we presume the 0th element is unique, so i starts at 1. trivial
* edge cases first; no work needs to be done for either
*/
if (length == 0 || length == 1)
return length;
/* src and dest walk down the list; dest counts unique elements */
for (src = 1; src < length; src++) {
/* find next unique element */
while (list[src] == list[src-1]) {
src++;
if (src == length)
goto after;
}
/* dest always points to where the next unique element goes */
list[dest] = list[src];
dest++;
}
after:
return dest;
}
static int cmppid(const void *a, const void *b)
{
return *(pid_t *)a - *(pid_t *)b;
}
/*
* find the appropriate pidlist for our purpose (given procs vs tasks)
* returns with the lock on that pidlist already held, and takes care
* of the use count, or returns NULL with no locks held if we're out of
* memory.
*/
static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
enum cgroup_filetype type)
{
struct cgroup_pidlist *l;
/* don't need task_nsproxy() if we're looking at ourself */
struct pid_namespace *ns = task_active_pid_ns(current);
/*
* We can't drop the pidlist_mutex before taking the l->mutex in case
* the last ref-holder is trying to remove l from the list at the same
* time. Holding the pidlist_mutex precludes somebody taking whichever
* list we find out from under us - compare release_pid_array().
*/
mutex_lock(&cgrp->pidlist_mutex);
list_for_each_entry(l, &cgrp->pidlists, links) {
if (l->key.type == type && l->key.ns == ns) {
/* make sure l doesn't vanish out from under us */
down_write(&l->mutex);
mutex_unlock(&cgrp->pidlist_mutex);
return l;
}
}
/* entry not found; create a new one */
l = kmalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
if (!l) {
mutex_unlock(&cgrp->pidlist_mutex);
return l;
}
init_rwsem(&l->mutex);
down_write(&l->mutex);
l->key.type = type;
l->key.ns = get_pid_ns(ns);
l->use_count = 0; /* don't increment here */
l->list = NULL;
l->owner = cgrp;
list_add(&l->links, &cgrp->pidlists);
mutex_unlock(&cgrp->pidlist_mutex);
return l;
}
/*
* Load a cgroup's pidarray with either procs' tgids or tasks' pids
*/
static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
struct cgroup_pidlist **lp)
{
pid_t *array;
int length;
int pid, n = 0; /* used for populating the array */
struct cgroup_iter it;
struct task_struct *tsk;
struct cgroup_pidlist *l;
/*
* If cgroup gets more users after we read count, we won't have
* enough space - tough. This race is indistinguishable to the
* caller from the case that the additional cgroup users didn't
* show up until sometime later on.
*/
length = cgroup_task_count(cgrp);
array = pidlist_allocate(length);
if (!array)
return -ENOMEM;
/* now, populate the array */
cgroup_iter_start(cgrp, &it);
while ((tsk = cgroup_iter_next(cgrp, &it))) {
if (unlikely(n == length))
break;
/* get tgid or pid for procs or tasks file respectively */
if (type == CGROUP_FILE_PROCS)
pid = task_tgid_vnr(tsk);
else
pid = task_pid_vnr(tsk);
if (pid > 0) /* make sure to only use valid results */
array[n++] = pid;
}
cgroup_iter_end(cgrp, &it);
length = n;
/* now sort & (if procs) strip out duplicates */
sort(array, length, sizeof(pid_t), cmppid, NULL);
if (type == CGROUP_FILE_PROCS)
length = pidlist_uniq(array, length);
l = cgroup_pidlist_find(cgrp, type);
if (!l) {
pidlist_free(array);
return -ENOMEM;
}
/* store array, freeing old if necessary - lock already held */
pidlist_free(l->list);
l->list = array;
l->length = length;
l->use_count++;
up_write(&l->mutex);
*lp = l;
return 0;
}
/**
* cgroupstats_build - build and fill cgroupstats
* @stats: cgroupstats to fill information into
* @dentry: A dentry entry belonging to the cgroup for which stats have
* been requested.
*
* Build and fill cgroupstats so that taskstats can export it to user
* space.
*/
int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
{
int ret = -EINVAL;
struct cgroup *cgrp;
struct cgroup_iter it;
struct task_struct *tsk;
/*
* Validate dentry by checking the superblock operations,
* and make sure it's a directory.
*/
if (dentry->d_sb->s_op != &cgroup_ops ||
!S_ISDIR(dentry->d_inode->i_mode))
goto err;
ret = 0;
cgrp = dentry->d_fsdata;
cgroup_iter_start(cgrp, &it);
while ((tsk = cgroup_iter_next(cgrp, &it))) {
switch (tsk->state) {
case TASK_RUNNING:
stats->nr_running++;
break;
case TASK_INTERRUPTIBLE:
stats->nr_sleeping++;
break;
case TASK_UNINTERRUPTIBLE:
stats->nr_uninterruptible++;
break;
case TASK_STOPPED:
stats->nr_stopped++;
break;
default:
if (delayacct_is_task_waiting_on_io(tsk))
stats->nr_io_wait++;
break;
}
}
cgroup_iter_end(cgrp, &it);
err:
return ret;
}
/*
* seq_file methods for the tasks/procs files. The seq_file position is the
* next pid to display; the seq_file iterator is a pointer to the pid
* in the cgroup->l->list array.
*/
static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
{
/*
* Initially we receive a position value that corresponds to
* one more than the last pid shown (or 0 on the first call or
* after a seek to the start). Use a binary-search to find the
* next pid to display, if any
*/
struct cgroup_pidlist *l = s->private;
int index = 0, pid = *pos;
int *iter;
down_read(&l->mutex);
if (pid) {
int end = l->length;
while (index < end) {
int mid = (index + end) / 2;
if (l->list[mid] == pid) {
index = mid;
break;
} else if (l->list[mid] <= pid)
index = mid + 1;
else
end = mid;
}
}
/* If we're off the end of the array, we're done */
if (index >= l->length)
return NULL;
/* Update the abstract position to be the actual pid that we found */
iter = l->list + index;
*pos = *iter;
return iter;
}
static void cgroup_pidlist_stop(struct seq_file *s, void *v)
{
struct cgroup_pidlist *l = s->private;
up_read(&l->mutex);
}
static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
{
struct cgroup_pidlist *l = s->private;
pid_t *p = v;
pid_t *end = l->list + l->length;
/*
* Advance to the next pid in the array. If this goes off the
* end, we're done
*/
p++;
if (p >= end) {
return NULL;
} else {
*pos = *p;
return p;
}
}
static int cgroup_pidlist_show(struct seq_file *s, void *v)
{
return seq_printf(s, "%d\n", *(int *)v);
}
/*
* seq_operations functions for iterating on pidlists through seq_file -
* independent of whether it's tasks or procs
*/
static const struct seq_operations cgroup_pidlist_seq_operations = {
.start = cgroup_pidlist_start,
.stop = cgroup_pidlist_stop,
.next = cgroup_pidlist_next,
.show = cgroup_pidlist_show,
};
static void cgroup_release_pid_array(struct cgroup_pidlist *l)
{
/*
* the case where we're the last user of this particular pidlist will
* have us remove it from the cgroup's list, which entails taking the
* mutex. since in pidlist_find the pidlist->lock depends on cgroup->
* pidlist_mutex, we have to take pidlist_mutex first.
*/
mutex_lock(&l->owner->pidlist_mutex);
down_write(&l->mutex);
BUG_ON(!l->use_count);
if (!--l->use_count) {
/* we're the last user if refcount is 0; remove and free */
list_del(&l->links);
mutex_unlock(&l->owner->pidlist_mutex);
pidlist_free(l->list);
put_pid_ns(l->key.ns);
up_write(&l->mutex);
kfree(l);
return;
}
mutex_unlock(&l->owner->pidlist_mutex);
up_write(&l->mutex);
}
static int cgroup_pidlist_release(struct inode *inode, struct file *file)
{
struct cgroup_pidlist *l;
if (!(file->f_mode & FMODE_READ))
return 0;
/*
* the seq_file will only be initialized if the file was opened for
* reading; hence we check if it's not null only in that case.
*/
l = ((struct seq_file *)file->private_data)->private;
cgroup_release_pid_array(l);
return seq_release(inode, file);
}
static const struct file_operations cgroup_pidlist_operations = {
.read = seq_read,
.llseek = seq_lseek,
.write = cgroup_file_write,
.release = cgroup_pidlist_release,
};
/*
* The following functions handle opens on a file that displays a pidlist
* (tasks or procs). Prepare an array of the process/thread IDs of whoever's
* in the cgroup.
*/
/* helper function for the two below it */
static int cgroup_pidlist_open(struct file *file, enum cgroup_filetype type)
{
struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
struct cgroup_pidlist *l;
int retval;
/* Nothing to do for write-only files */
if (!(file->f_mode & FMODE_READ))
return 0;
/* have the array populated */
retval = pidlist_array_load(cgrp, type, &l);
if (retval)
return retval;
/* configure file information */
file->f_op = &cgroup_pidlist_operations;
retval = seq_open(file, &cgroup_pidlist_seq_operations);
if (retval) {
cgroup_release_pid_array(l);
return retval;
}
((struct seq_file *)file->private_data)->private = l;
return 0;
}
static int cgroup_tasks_open(struct inode *unused, struct file *file)
{
return cgroup_pidlist_open(file, CGROUP_FILE_TASKS);
}
static int cgroup_procs_open(struct inode *unused, struct file *file)
{
return cgroup_pidlist_open(file, CGROUP_FILE_PROCS);
}
static u64 cgroup_read_notify_on_release(struct cgroup *cgrp,
struct cftype *cft)
{
return notify_on_release(cgrp);
}
static int cgroup_write_notify_on_release(struct cgroup *cgrp,
struct cftype *cft,
u64 val)
{
clear_bit(CGRP_RELEASABLE, &cgrp->flags);
if (val)
set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
else
clear_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
return 0;
}
/*
* Unregister event and free resources.
*
* Gets called from workqueue.
*/
static void cgroup_event_remove(struct work_struct *work)
{
struct cgroup_event *event = container_of(work, struct cgroup_event,
remove);
struct cgroup *cgrp = event->cgrp;
remove_wait_queue(event->wqh, &event->wait);
event->cft->unregister_event(cgrp, event->cft, event->eventfd);
/* Notify userspace the event is going away. */
eventfd_signal(event->eventfd, 1);
eventfd_ctx_put(event->eventfd);
kfree(event);
dput(cgrp->dentry);
}
/*
* Gets called on POLLHUP on eventfd when user closes it.
*
* Called with wqh->lock held and interrupts disabled.
*/
static int cgroup_event_wake(wait_queue_t *wait, unsigned mode,
int sync, void *key)
{
struct cgroup_event *event = container_of(wait,
struct cgroup_event, wait);
struct cgroup *cgrp = event->cgrp;
unsigned long flags = (unsigned long)key;
if (flags & POLLHUP) {
/*
* If the event has been detached at cgroup removal, we
* can simply return knowing the other side will cleanup
* for us.
*
* We can't race against event freeing since the other
* side will require wqh->lock via remove_wait_queue(),
* which we hold.
*/
spin_lock(&cgrp->event_list_lock);
if (!list_empty(&event->list)) {
list_del_init(&event->list);
/*
* We are in atomic context, but cgroup_event_remove()
* may sleep, so we have to call it in workqueue.
*/
schedule_work(&event->remove);
}
spin_unlock(&cgrp->event_list_lock);
}
return 0;
}
static void cgroup_event_ptable_queue_proc(struct file *file,
wait_queue_head_t *wqh, poll_table *pt)
{
struct cgroup_event *event = container_of(pt,
struct cgroup_event, pt);
event->wqh = wqh;
add_wait_queue(wqh, &event->wait);
}
/*
* Parse input and register new cgroup event handler.
*
* Input must be in format '<event_fd> <control_fd> <args>'.
* Interpretation of args is defined by control file implementation.
*/
static int cgroup_write_event_control(struct cgroup *cgrp, struct cftype *cft,
const char *buffer)
{
struct cgroup_event *event = NULL;
struct cgroup *cgrp_cfile;
unsigned int efd, cfd;
struct file *efile = NULL;
struct file *cfile = NULL;
char *endp;
int ret;
efd = simple_strtoul(buffer, &endp, 10);
if (*endp != ' ')
return -EINVAL;
buffer = endp + 1;
cfd = simple_strtoul(buffer, &endp, 10);
if ((*endp != ' ') && (*endp != '\0'))
return -EINVAL;
buffer = endp + 1;
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (!event)
return -ENOMEM;
event->cgrp = cgrp;
INIT_LIST_HEAD(&event->list);
init_poll_funcptr(&event->pt, cgroup_event_ptable_queue_proc);
init_waitqueue_func_entry(&event->wait, cgroup_event_wake);
INIT_WORK(&event->remove, cgroup_event_remove);
efile = eventfd_fget(efd);
if (IS_ERR(efile)) {
ret = PTR_ERR(efile);
goto fail;
}
event->eventfd = eventfd_ctx_fileget(efile);
if (IS_ERR(event->eventfd)) {
ret = PTR_ERR(event->eventfd);
goto fail;
}
cfile = fget(cfd);
if (!cfile) {
ret = -EBADF;
goto fail;
}
/* the process need read permission on control file */
/* AV: shouldn't we check that it's been opened for read instead? */
ret = inode_permission(file_inode(cfile), MAY_READ);
if (ret < 0)
goto fail;
event->cft = __file_cft(cfile);
if (IS_ERR(event->cft)) {
ret = PTR_ERR(event->cft);
goto fail;
}
/*
* The file to be monitored must be in the same cgroup as
* cgroup.event_control is.
*/
cgrp_cfile = __d_cgrp(cfile->f_dentry->d_parent);
if (cgrp_cfile != cgrp) {
ret = -EINVAL;
goto fail;
}
if (!event->cft->register_event || !event->cft->unregister_event) {
ret = -EINVAL;
goto fail;
}
ret = event->cft->register_event(cgrp, event->cft,
event->eventfd, buffer);
if (ret)
goto fail;
efile->f_op->poll(efile, &event->pt);
/*
* Events should be removed after rmdir of cgroup directory, but before
* destroying subsystem state objects. Let's take reference to cgroup
* directory dentry to do that.
*/
dget(cgrp->dentry);
spin_lock(&cgrp->event_list_lock);
list_add(&event->list, &cgrp->event_list);
spin_unlock(&cgrp->event_list_lock);
fput(cfile);
fput(efile);
return 0;
fail:
if (cfile)
fput(cfile);
if (event && event->eventfd && !IS_ERR(event->eventfd))
eventfd_ctx_put(event->eventfd);
if (!IS_ERR_OR_NULL(efile))
fput(efile);
kfree(event);
return ret;
}
static u64 cgroup_clone_children_read(struct cgroup *cgrp,
struct cftype *cft)
{
return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
}
static int cgroup_clone_children_write(struct cgroup *cgrp,
struct cftype *cft,
u64 val)
{
if (val)
set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
else
clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
return 0;
}
/*
* for the common functions, 'private' gives the type of file
*/
/* for hysterical raisins, we can't put this on the older files */
#define CGROUP_FILE_GENERIC_PREFIX "cgroup."
static struct cftype files[] = {
{
.name = "tasks",
.open = cgroup_tasks_open,
.write_u64 = cgroup_tasks_write,
.release = cgroup_pidlist_release,
.mode = S_IRUGO | S_IWUSR,
},
{
.name = CGROUP_FILE_GENERIC_PREFIX "procs",
.open = cgroup_procs_open,
.write_u64 = cgroup_procs_write,
.release = cgroup_pidlist_release,
.mode = S_IRUGO | S_IWUSR,
},
{
.name = "notify_on_release",
.read_u64 = cgroup_read_notify_on_release,
.write_u64 = cgroup_write_notify_on_release,
},
{
.name = CGROUP_FILE_GENERIC_PREFIX "event_control",
.write_string = cgroup_write_event_control,
.mode = S_IWUGO,
},
{
.name = "cgroup.clone_children",
.flags = CFTYPE_INSANE,
.read_u64 = cgroup_clone_children_read,
.write_u64 = cgroup_clone_children_write,
},
{
.name = "cgroup.sane_behavior",
.flags = CFTYPE_ONLY_ON_ROOT,
.read_seq_string = cgroup_sane_behavior_show,
},
{
.name = "release_agent",
.flags = CFTYPE_ONLY_ON_ROOT,
.read_seq_string = cgroup_release_agent_show,
.write_string = cgroup_release_agent_write,
.max_write_len = PATH_MAX,
},
{ } /* terminate */
};
/**
* cgroup_populate_dir - selectively creation of files in a directory
* @cgrp: target cgroup
* @base_files: true if the base files should be added
* @subsys_mask: mask of the subsystem ids whose files should be added
*/
static int cgroup_populate_dir(struct cgroup *cgrp, bool base_files,
unsigned long subsys_mask)
{
int err;
struct cgroup_subsys *ss;
if (base_files) {
err = cgroup_addrm_files(cgrp, NULL, files, true);
if (err < 0)
return err;
}
/* process cftsets of each subsystem */
for_each_subsys(cgrp->root, ss) {
struct cftype_set *set;
if (!test_bit(ss->subsys_id, &subsys_mask))
continue;
list_for_each_entry(set, &ss->cftsets, node)
cgroup_addrm_files(cgrp, ss, set->cfts, true);
}
/* This cgroup is ready now */
for_each_subsys(cgrp->root, ss) {
struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
/*
* Update id->css pointer and make this css visible from
* CSS ID functions. This pointer will be dereferened
* from RCU-read-side without locks.
*/
if (css->id)
rcu_assign_pointer(css->id->css, css);
}
return 0;
}
static void css_dput_fn(struct work_struct *work)
{
struct cgroup_subsys_state *css =
container_of(work, struct cgroup_subsys_state, dput_work);
struct dentry *dentry = css->cgroup->dentry;
struct super_block *sb = dentry->d_sb;
atomic_inc(&sb->s_active);
dput(dentry);
deactivate_super(sb);
}
static void init_cgroup_css(struct cgroup_subsys_state *css,
struct cgroup_subsys *ss,
struct cgroup *cgrp)
{
css->cgroup = cgrp;
atomic_set(&css->refcnt, 1);
css->flags = 0;
css->id = NULL;
if (cgrp == dummytop)
css->flags |= CSS_ROOT;
BUG_ON(cgrp->subsys[ss->subsys_id]);
cgrp->subsys[ss->subsys_id] = css;
/*
* css holds an extra ref to @cgrp->dentry which is put on the last
* css_put(). dput() requires process context, which css_put() may
* be called without. @css->dput_work will be used to invoke
* dput() asynchronously from css_put().
*/
INIT_WORK(&css->dput_work, css_dput_fn);
}
/* invoke ->post_create() on a new CSS and mark it online if successful */
static int online_css(struct cgroup_subsys *ss, struct cgroup *cgrp)
{
int ret = 0;
lockdep_assert_held(&cgroup_mutex);
if (ss->css_online)
ret = ss->css_online(cgrp);
if (!ret)
cgrp->subsys[ss->subsys_id]->flags |= CSS_ONLINE;
return ret;
}
/* if the CSS is online, invoke ->pre_destory() on it and mark it offline */
static void offline_css(struct cgroup_subsys *ss, struct cgroup *cgrp)
__releases(&cgroup_mutex) __acquires(&cgroup_mutex)
{
struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
lockdep_assert_held(&cgroup_mutex);
if (!(css->flags & CSS_ONLINE))
return;
if (ss->css_offline)
ss->css_offline(cgrp);
cgrp->subsys[ss->subsys_id]->flags &= ~CSS_ONLINE;
}
/*
* cgroup_create - create a cgroup
* @parent: cgroup that will be parent of the new cgroup
* @dentry: dentry of the new cgroup
* @mode: mode to set on new inode
*
* Must be called with the mutex on the parent inode held
*/
static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
umode_t mode)
{
struct cgroup *cgrp;
struct cgroup_name *name;
struct cgroupfs_root *root = parent->root;
int err = 0;
struct cgroup_subsys *ss;
struct super_block *sb = root->sb;
/* allocate the cgroup and its ID, 0 is reserved for the root */
cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL);
if (!cgrp)
return -ENOMEM;
name = cgroup_alloc_name(dentry);
if (!name)
goto err_free_cgrp;
rcu_assign_pointer(cgrp->name, name);
cgrp->id = ida_simple_get(&root->cgroup_ida, 1, 0, GFP_KERNEL);
if (cgrp->id < 0)
goto err_free_name;
/*
* Only live parents can have children. Note that the liveliness
* check isn't strictly necessary because cgroup_mkdir() and
* cgroup_rmdir() are fully synchronized by i_mutex; however, do it
* anyway so that locking is contained inside cgroup proper and we
* don't get nasty surprises if we ever grow another caller.
*/
if (!cgroup_lock_live_group(parent)) {
err = -ENODEV;
goto err_free_id;
}
/* Grab a reference on the superblock so the hierarchy doesn't
* get deleted on unmount if there are child cgroups. This
* can be done outside cgroup_mutex, since the sb can't
* disappear while someone has an open control file on the
* fs */
atomic_inc(&sb->s_active);
init_cgroup_housekeeping(cgrp);
dentry->d_fsdata = cgrp;
cgrp->dentry = dentry;
cgrp->parent = parent;
cgrp->root = parent->root;
if (notify_on_release(parent))
set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &parent->flags))
set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
for_each_subsys(root, ss) {
struct cgroup_subsys_state *css;
css = ss->css_alloc(cgrp);
if (IS_ERR(css)) {
err = PTR_ERR(css);
goto err_free_all;
}
init_cgroup_css(css, ss, cgrp);
if (ss->use_id) {
err = alloc_css_id(ss, parent, cgrp);
if (err)
goto err_free_all;
}
}
/*
* Create directory. cgroup_create_file() returns with the new
* directory locked on success so that it can be populated without
* dropping cgroup_mutex.
*/
err = cgroup_create_file(dentry, S_IFDIR | mode, sb);
if (err < 0)
goto err_free_all;
lockdep_assert_held(&dentry->d_inode->i_mutex);
/* allocation complete, commit to creation */
list_add_tail(&cgrp->allcg_node, &root->allcg_list);
list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children);
root->number_of_cgroups++;
/* each css holds a ref to the cgroup's dentry */
for_each_subsys(root, ss)
dget(dentry);
/* hold a ref to the parent's dentry */
dget(parent->dentry);
/* creation succeeded, notify subsystems */
for_each_subsys(root, ss) {
err = online_css(ss, cgrp);
if (err)
goto err_destroy;
if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
parent->parent) {
pr_warning("cgroup: %s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n",
current->comm, current->pid, ss->name);
if (!strcmp(ss->name, "memory"))
pr_warning("cgroup: \"memory\" requires setting use_hierarchy to 1 on the root.\n");
ss->warned_broken_hierarchy = true;
}
}
err = cgroup_populate_dir(cgrp, true, root->subsys_mask);
if (err)
goto err_destroy;
mutex_unlock(&cgroup_mutex);
mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
return 0;
err_free_all:
for_each_subsys(root, ss) {
if (cgrp->subsys[ss->subsys_id])
ss->css_free(cgrp);
}
mutex_unlock(&cgroup_mutex);
/* Release the reference count that we took on the superblock */
deactivate_super(sb);
err_free_id:
ida_simple_remove(&root->cgroup_ida, cgrp->id);
err_free_name:
kfree(rcu_dereference_raw(cgrp->name));
err_free_cgrp:
kfree(cgrp);
return err;
err_destroy:
cgroup_destroy_locked(cgrp);
mutex_unlock(&cgroup_mutex);
mutex_unlock(&dentry->d_inode->i_mutex);
return err;
}
static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct cgroup *c_parent = dentry->d_parent->d_fsdata;
/* the vfs holds inode->i_mutex already */
return cgroup_create(c_parent, dentry, mode | S_IFDIR);
}
static int cgroup_destroy_locked(struct cgroup *cgrp)
__releases(&cgroup_mutex) __acquires(&cgroup_mutex)
{
struct dentry *d = cgrp->dentry;
struct cgroup *parent = cgrp->parent;
struct cgroup_event *event, *tmp;
struct cgroup_subsys *ss;
lockdep_assert_held(&d->d_inode->i_mutex);
lockdep_assert_held(&cgroup_mutex);
if (atomic_read(&cgrp->count) || !list_empty(&cgrp->children))
return -EBUSY;
/*
* Block new css_tryget() by deactivating refcnt and mark @cgrp
* removed. This makes future css_tryget() and child creation
* attempts fail thus maintaining the removal conditions verified
* above.
*/
for_each_subsys(cgrp->root, ss) {
struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
WARN_ON(atomic_read(&css->refcnt) < 0);
atomic_add(CSS_DEACT_BIAS, &css->refcnt);
}
set_bit(CGRP_REMOVED, &cgrp->flags);
/* tell subsystems to initate destruction */
for_each_subsys(cgrp->root, ss)
offline_css(ss, cgrp);
/*
* Put all the base refs. Each css holds an extra reference to the
* cgroup's dentry and cgroup removal proceeds regardless of css
* refs. On the last put of each css, whenever that may be, the
* extra dentry ref is put so that dentry destruction happens only
* after all css's are released.
*/
for_each_subsys(cgrp->root, ss)
css_put(cgrp->subsys[ss->subsys_id]);
raw_spin_lock(&release_list_lock);
if (!list_empty(&cgrp->release_list))
list_del_init(&cgrp->release_list);
raw_spin_unlock(&release_list_lock);
/* delete this cgroup from parent->children */
list_del_rcu(&cgrp->sibling);
list_del_init(&cgrp->allcg_node);
dget(d);
cgroup_d_remove_dir(d);
dput(d);
set_bit(CGRP_RELEASABLE, &parent->flags);
check_for_release(parent);
/*
* Unregister events and notify userspace.
* Notify userspace about cgroup removing only after rmdir of cgroup
* directory to avoid race between userspace and kernelspace.
*/
spin_lock(&cgrp->event_list_lock);
list_for_each_entry_safe(event, tmp, &cgrp->event_list, list) {
list_del_init(&event->list);
schedule_work(&event->remove);
}
spin_unlock(&cgrp->event_list_lock);
return 0;
}
static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
{
int ret;
mutex_lock(&cgroup_mutex);
ret = cgroup_destroy_locked(dentry->d_fsdata);
mutex_unlock(&cgroup_mutex);
return ret;
}
static void __init_or_module cgroup_init_cftsets(struct cgroup_subsys *ss)
{
INIT_LIST_HEAD(&ss->cftsets);
/*
* base_cftset is embedded in subsys itself, no need to worry about
* deregistration.
*/
if (ss->base_cftypes) {
ss->base_cftset.cfts = ss->base_cftypes;
list_add_tail(&ss->base_cftset.node, &ss->cftsets);
}
}
static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
{
struct cgroup_subsys_state *css;
printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name);
mutex_lock(&cgroup_mutex);
/* init base cftset */
cgroup_init_cftsets(ss);
/* Create the top cgroup state for this subsystem */
list_add(&ss->sibling, &rootnode.subsys_list);
ss->root = &rootnode;
css = ss->css_alloc(dummytop);
/* We don't handle early failures gracefully */
BUG_ON(IS_ERR(css));
init_cgroup_css(css, ss, dummytop);
/* Update the init_css_set to contain a subsys
* pointer to this state - since the subsystem is
* newly registered, all tasks and hence the
* init_css_set is in the subsystem's top cgroup. */
init_css_set.subsys[ss->subsys_id] = css;
need_forkexit_callback |= ss->fork || ss->exit;
/* At system boot, before all subsystems have been
* registered, no tasks have been forked, so we don't
* need to invoke fork callbacks here. */
BUG_ON(!list_empty(&init_task.tasks));
BUG_ON(online_css(ss, dummytop));
mutex_unlock(&cgroup_mutex);
/* this function shouldn't be used with modular subsystems, since they
* need to register a subsys_id, among other things */
BUG_ON(ss->module);
}
/**
* cgroup_load_subsys: load and register a modular subsystem at runtime
* @ss: the subsystem to load
*
* This function should be called in a modular subsystem's initcall. If the
* subsystem is built as a module, it will be assigned a new subsys_id and set
* up for use. If the subsystem is built-in anyway, work is delegated to the
* simpler cgroup_init_subsys.
*/
int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
{
struct cgroup_subsys_state *css;
int i, ret;
struct hlist_node *tmp;
struct css_set *cg;
unsigned long key;
/* check name and function validity */
if (ss->name == NULL || strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN ||
ss->css_alloc == NULL || ss->css_free == NULL)
return -EINVAL;
/*
* we don't support callbacks in modular subsystems. this check is
* before the ss->module check for consistency; a subsystem that could
* be a module should still have no callbacks even if the user isn't
* compiling it as one.
*/
if (ss->fork || ss->exit)
return -EINVAL;
/*
* an optionally modular subsystem is built-in: we want to do nothing,
* since cgroup_init_subsys will have already taken care of it.
*/
if (ss->module == NULL) {
/* a sanity check */
BUG_ON(subsys[ss->subsys_id] != ss);
return 0;
}
/* init base cftset */
cgroup_init_cftsets(ss);
mutex_lock(&cgroup_mutex);
subsys[ss->subsys_id] = ss;
/*
* no ss->css_alloc seems to need anything important in the ss
* struct, so this can happen first (i.e. before the rootnode
* attachment).
*/
css = ss->css_alloc(dummytop);
if (IS_ERR(css)) {
/* failure case - need to deassign the subsys[] slot. */
subsys[ss->subsys_id] = NULL;
mutex_unlock(&cgroup_mutex);
return PTR_ERR(css);
}
list_add(&ss->sibling, &rootnode.subsys_list);
ss->root = &rootnode;
/* our new subsystem will be attached to the dummy hierarchy. */
init_cgroup_css(css, ss, dummytop);
/* init_idr must be after init_cgroup_css because it sets css->id. */
if (ss->use_id) {
ret = cgroup_init_idr(ss, css);
if (ret)
goto err_unload;
}
/*
* Now we need to entangle the css into the existing css_sets. unlike
* in cgroup_init_subsys, there are now multiple css_sets, so each one
* will need a new pointer to it; done by iterating the css_set_table.
* furthermore, modifying the existing css_sets will corrupt the hash
* table state, so each changed css_set will need its hash recomputed.
* this is all done under the css_set_lock.
*/
write_lock(&css_set_lock);
hash_for_each_safe(css_set_table, i, tmp, cg, hlist) {
/* skip entries that we already rehashed */
if (cg->subsys[ss->subsys_id])
continue;
/* remove existing entry */
hash_del(&cg->hlist);
/* set new value */
cg->subsys[ss->subsys_id] = css;
/* recompute hash and restore entry */
key = css_set_hash(cg->subsys);
hash_add(css_set_table, &cg->hlist, key);
}
write_unlock(&css_set_lock);
ret = online_css(ss, dummytop);
if (ret)
goto err_unload;
/* success! */
mutex_unlock(&cgroup_mutex);
return 0;
err_unload:
mutex_unlock(&cgroup_mutex);
/* @ss can't be mounted here as try_module_get() would fail */
cgroup_unload_subsys(ss);
return ret;
}
EXPORT_SYMBOL_GPL(cgroup_load_subsys);
/**
* cgroup_unload_subsys: unload a modular subsystem
* @ss: the subsystem to unload
*
* This function should be called in a modular subsystem's exitcall. When this
* function is invoked, the refcount on the subsystem's module will be 0, so
* the subsystem will not be attached to any hierarchy.
*/
void cgroup_unload_subsys(struct cgroup_subsys *ss)
{
struct cg_cgroup_link *link;
BUG_ON(ss->module == NULL);
/*
* we shouldn't be called if the subsystem is in use, and the use of
* try_module_get in parse_cgroupfs_options should ensure that it
* doesn't start being used while we're killing it off.
*/
BUG_ON(ss->root != &rootnode);
mutex_lock(&cgroup_mutex);
offline_css(ss, dummytop);
if (ss->use_id)
idr_destroy(&ss->idr);
/* deassign the subsys_id */
subsys[ss->subsys_id] = NULL;
/* remove subsystem from rootnode's list of subsystems */
list_del_init(&ss->sibling);
/*
* disentangle the css from all css_sets attached to the dummytop. as
* in loading, we need to pay our respects to the hashtable gods.
*/
write_lock(&css_set_lock);
list_for_each_entry(link, &dummytop->css_sets, cgrp_link_list) {
struct css_set *cg = link->cg;
unsigned long key;
hash_del(&cg->hlist);
cg->subsys[ss->subsys_id] = NULL;
key = css_set_hash(cg->subsys);
hash_add(css_set_table, &cg->hlist, key);
}
write_unlock(&css_set_lock);
/*
* remove subsystem's css from the dummytop and free it - need to
* free before marking as null because ss->css_free needs the
* cgrp->subsys pointer to find their state. note that this also
* takes care of freeing the css_id.
*/
ss->css_free(dummytop);
dummytop->subsys[ss->subsys_id] = NULL;
mutex_unlock(&cgroup_mutex);
}
EXPORT_SYMBOL_GPL(cgroup_unload_subsys);
/**
* cgroup_init_early - cgroup initialization at system boot
*
* Initialize cgroups at system boot, and initialize any
* subsystems that request early init.
*/
int __init cgroup_init_early(void)
{
int i;
atomic_set(&init_css_set.refcount, 1);
INIT_LIST_HEAD(&init_css_set.cg_links);
INIT_LIST_HEAD(&init_css_set.tasks);
INIT_HLIST_NODE(&init_css_set.hlist);
css_set_count = 1;
init_cgroup_root(&rootnode);
root_count = 1;
init_task.cgroups = &init_css_set;
init_css_set_link.cg = &init_css_set;
init_css_set_link.cgrp = dummytop;
list_add(&init_css_set_link.cgrp_link_list,
&rootnode.top_cgroup.css_sets);
list_add(&init_css_set_link.cg_link_list,
&init_css_set.cg_links);
for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
struct cgroup_subsys *ss = subsys[i];
/* at bootup time, we don't worry about modular subsystems */
if (!ss || ss->module)
continue;
BUG_ON(!ss->name);
BUG_ON(strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN);
BUG_ON(!ss->css_alloc);
BUG_ON(!ss->css_free);
if (ss->subsys_id != i) {
printk(KERN_ERR "cgroup: Subsys %s id == %d\n",
ss->name, ss->subsys_id);
BUG();
}
if (ss->early_init)
cgroup_init_subsys(ss);
}
return 0;
}
/**
* cgroup_init - cgroup initialization
*
* Register cgroup filesystem and /proc file, and initialize
* any subsystems that didn't request early init.
*/
int __init cgroup_init(void)
{
int err;
int i;
unsigned long key;
err = bdi_init(&cgroup_backing_dev_info);
if (err)
return err;
for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
struct cgroup_subsys *ss = subsys[i];
/* at bootup time, we don't worry about modular subsystems */
if (!ss || ss->module)
continue;
if (!ss->early_init)
cgroup_init_subsys(ss);
if (ss->use_id)
cgroup_init_idr(ss, init_css_set.subsys[ss->subsys_id]);
}
/* Add init_css_set to the hash table */
key = css_set_hash(init_css_set.subsys);
hash_add(css_set_table, &init_css_set.hlist, key);
BUG_ON(!init_root_id(&rootnode));
cgroup_kobj = kobject_create_and_add("cgroup", fs_kobj);
if (!cgroup_kobj) {
err = -ENOMEM;
goto out;
}
err = register_filesystem(&cgroup_fs_type);
if (err < 0) {
kobject_put(cgroup_kobj);
goto out;
}
proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations);
out:
if (err)
bdi_destroy(&cgroup_backing_dev_info);
return err;
}
/*
* proc_cgroup_show()
* - Print task's cgroup paths into seq_file, one line for each hierarchy
* - Used for /proc/<pid>/cgroup.
* - No need to task_lock(tsk) on this tsk->cgroup reference, as it
* doesn't really matter if tsk->cgroup changes after we read it,
* and we take cgroup_mutex, keeping cgroup_attach_task() from changing it
* anyway. No need to check that tsk->cgroup != NULL, thanks to
* the_top_cgroup_hack in cgroup_exit(), which sets an exiting tasks
* cgroup to top_cgroup.
*/
/* TODO: Use a proper seq_file iterator */
int proc_cgroup_show(struct seq_file *m, void *v)
{
struct pid *pid;
struct task_struct *tsk;
char *buf;
int retval;
struct cgroupfs_root *root;
retval = -ENOMEM;
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!buf)
goto out;
retval = -ESRCH;
pid = m->private;
tsk = get_pid_task(pid, PIDTYPE_PID);
if (!tsk)
goto out_free;
retval = 0;
mutex_lock(&cgroup_mutex);
for_each_active_root(root) {
struct cgroup_subsys *ss;
struct cgroup *cgrp;
int count = 0;
seq_printf(m, "%d:", root->hierarchy_id);
for_each_subsys(root, ss)
seq_printf(m, "%s%s", count++ ? "," : "", ss->name);
if (strlen(root->name))
seq_printf(m, "%sname=%s", count ? "," : "",
root->name);
seq_putc(m, ':');
cgrp = task_cgroup_from_root(tsk, root);
retval = cgroup_path(cgrp, buf, PAGE_SIZE);
if (retval < 0)
goto out_unlock;
seq_puts(m, buf);
seq_putc(m, '\n');
}
out_unlock:
mutex_unlock(&cgroup_mutex);
put_task_struct(tsk);
out_free:
kfree(buf);
out:
return retval;
}
/* Display information about each subsystem and each hierarchy */
static int proc_cgroupstats_show(struct seq_file *m, void *v)
{
int i;
seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
/*
* ideally we don't want subsystems moving around while we do this.
* cgroup_mutex is also necessary to guarantee an atomic snapshot of
* subsys/hierarchy state.
*/
mutex_lock(&cgroup_mutex);
for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
struct cgroup_subsys *ss = subsys[i];
if (ss == NULL)
continue;
seq_printf(m, "%s\t%d\t%d\t%d\n",
ss->name, ss->root->hierarchy_id,
ss->root->number_of_cgroups, !ss->disabled);
}
mutex_unlock(&cgroup_mutex);
return 0;
}
static int cgroupstats_open(struct inode *inode, struct file *file)
{
return single_open(file, proc_cgroupstats_show, NULL);
}
static const struct file_operations proc_cgroupstats_operations = {
.open = cgroupstats_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/**
* cgroup_fork - attach newly forked task to its parents cgroup.
* @child: pointer to task_struct of forking parent process.
*
* Description: A task inherits its parent's cgroup at fork().
*
* A pointer to the shared css_set was automatically copied in
* fork.c by dup_task_struct(). However, we ignore that copy, since
* it was not made under the protection of RCU or cgroup_mutex, so
* might no longer be a valid cgroup pointer. cgroup_attach_task() might
* have already changed current->cgroups, allowing the previously
* referenced cgroup group to be removed and freed.
*
* At the point that cgroup_fork() is called, 'current' is the parent
* task, and the passed argument 'child' points to the child task.
*/
void cgroup_fork(struct task_struct *child)
{
task_lock(current);
child->cgroups = current->cgroups;
get_css_set(child->cgroups);
task_unlock(current);
INIT_LIST_HEAD(&child->cg_list);
}
/**
* cgroup_post_fork - called on a new task after adding it to the task list
* @child: the task in question
*
* Adds the task to the list running through its css_set if necessary and
* call the subsystem fork() callbacks. Has to be after the task is
* visible on the task list in case we race with the first call to
* cgroup_iter_start() - to guarantee that the new task ends up on its
* list.
*/
void cgroup_post_fork(struct task_struct *child)
{
int i;
/*
* use_task_css_set_links is set to 1 before we walk the tasklist
* under the tasklist_lock and we read it here after we added the child
* to the tasklist under the tasklist_lock as well. If the child wasn't
* yet in the tasklist when we walked through it from
* cgroup_enable_task_cg_lists(), then use_task_css_set_links value
* should be visible now due to the paired locking and barriers implied
* by LOCK/UNLOCK: it is written before the tasklist_lock unlock
* in cgroup_enable_task_cg_lists() and read here after the tasklist_lock
* lock on fork.
*/
if (use_task_css_set_links) {
write_lock(&css_set_lock);
task_lock(child);
if (list_empty(&child->cg_list))
list_add(&child->cg_list, &child->cgroups->tasks);
task_unlock(child);
write_unlock(&css_set_lock);
}
/*
* Call ss->fork(). This must happen after @child is linked on
* css_set; otherwise, @child might change state between ->fork()
* and addition to css_set.
*/
if (need_forkexit_callback) {
/*
* fork/exit callbacks are supported only for builtin
* subsystems, and the builtin section of the subsys
* array is immutable, so we don't need to lock the
* subsys array here. On the other hand, modular section
* of the array can be freed at module unload, so we
* can't touch that.
*/
for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
struct cgroup_subsys *ss = subsys[i];
if (ss->fork)
ss->fork(child);
}
}
}
/**
* cgroup_exit - detach cgroup from exiting task
* @tsk: pointer to task_struct of exiting process
* @run_callback: run exit callbacks?
*
* Description: Detach cgroup from @tsk and release it.
*
* Note that cgroups marked notify_on_release force every task in
* them to take the global cgroup_mutex mutex when exiting.
* This could impact scaling on very large systems. Be reluctant to
* use notify_on_release cgroups where very high task exit scaling
* is required on large systems.
*
* the_top_cgroup_hack:
*
* Set the exiting tasks cgroup to the root cgroup (top_cgroup).
*
* We call cgroup_exit() while the task is still competent to
* handle notify_on_release(), then leave the task attached to the
* root cgroup in each hierarchy for the remainder of its exit.
*
* To do this properly, we would increment the reference count on
* top_cgroup, and near the very end of the kernel/exit.c do_exit()
* code we would add a second cgroup function call, to drop that
* reference. This would just create an unnecessary hot spot on
* the top_cgroup reference count, to no avail.
*
* Normally, holding a reference to a cgroup without bumping its
* count is unsafe. The cgroup could go away, or someone could
* attach us to a different cgroup, decrementing the count on
* the first cgroup that we never incremented. But in this case,
* top_cgroup isn't going away, and either task has PF_EXITING set,
* which wards off any cgroup_attach_task() attempts, or task is a failed
* fork, never visible to cgroup_attach_task.
*/
void cgroup_exit(struct task_struct *tsk, int run_callbacks)
{
struct css_set *cg;
int i;
/*
* Unlink from the css_set task list if necessary.
* Optimistically check cg_list before taking
* css_set_lock
*/
if (!list_empty(&tsk->cg_list)) {
write_lock(&css_set_lock);
if (!list_empty(&tsk->cg_list))
list_del_init(&tsk->cg_list);
write_unlock(&css_set_lock);
}
/* Reassign the task to the init_css_set. */
task_lock(tsk);
cg = tsk->cgroups;
tsk->cgroups = &init_css_set;
if (run_callbacks && need_forkexit_callback) {
/*
* fork/exit callbacks are supported only for builtin
* subsystems, see cgroup_post_fork() for details.
*/
for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
struct cgroup_subsys *ss = subsys[i];
if (ss->exit) {
struct cgroup *old_cgrp =
rcu_dereference_raw(cg->subsys[i])->cgroup;
struct cgroup *cgrp = task_cgroup(tsk, i);
ss->exit(cgrp, old_cgrp, tsk);
}
}
}
task_unlock(tsk);
put_css_set_taskexit(cg);
}
static void check_for_release(struct cgroup *cgrp)
{
/* All of these checks rely on RCU to keep the cgroup
* structure alive */
if (cgroup_is_releasable(cgrp) &&
!atomic_read(&cgrp->count) && list_empty(&cgrp->children)) {
/*
* Control Group is currently removeable. If it's not
* already queued for a userspace notification, queue
* it now
*/
int need_schedule_work = 0;
raw_spin_lock(&release_list_lock);
if (!cgroup_is_removed(cgrp) &&
list_empty(&cgrp->release_list)) {
list_add(&cgrp->release_list, &release_list);
need_schedule_work = 1;
}
raw_spin_unlock(&release_list_lock);
if (need_schedule_work)
schedule_work(&release_agent_work);
}
}
/* Caller must verify that the css is not for root cgroup */
bool __css_tryget(struct cgroup_subsys_state *css)
{
while (true) {
int t, v;
v = css_refcnt(css);
t = atomic_cmpxchg(&css->refcnt, v, v + 1);
if (likely(t == v))
return true;
else if (t < 0)
return false;
cpu_relax();
}
}
EXPORT_SYMBOL_GPL(__css_tryget);
/* Caller must verify that the css is not for root cgroup */
void __css_put(struct cgroup_subsys_state *css)
{
int v;
v = css_unbias_refcnt(atomic_dec_return(&css->refcnt));
if (v == 0)
schedule_work(&css->dput_work);
}
EXPORT_SYMBOL_GPL(__css_put);
/*
* Notify userspace when a cgroup is released, by running the
* configured release agent with the name of the cgroup (path
* relative to the root of cgroup file system) as the argument.
*
* Most likely, this user command will try to rmdir this cgroup.
*
* This races with the possibility that some other task will be
* attached to this cgroup before it is removed, or that some other
* user task will 'mkdir' a child cgroup of this cgroup. That's ok.
* The presumed 'rmdir' will fail quietly if this cgroup is no longer
* unused, and this cgroup will be reprieved from its death sentence,
* to continue to serve a useful existence. Next time it's released,
* we will get notified again, if it still has 'notify_on_release' set.
*
* The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
* means only wait until the task is successfully execve()'d. The
* separate release agent task is forked by call_usermodehelper(),
* then control in this thread returns here, without waiting for the
* release agent task. We don't bother to wait because the caller of
* this routine has no use for the exit status of the release agent
* task, so no sense holding our caller up for that.
*/
static void cgroup_release_agent(struct work_struct *work)
{
BUG_ON(work != &release_agent_work);
mutex_lock(&cgroup_mutex);
raw_spin_lock(&release_list_lock);
while (!list_empty(&release_list)) {
char *argv[3], *envp[3];
int i;
char *pathbuf = NULL, *agentbuf = NULL;
struct cgroup *cgrp = list_entry(release_list.next,
struct cgroup,
release_list);
list_del_init(&cgrp->release_list);
raw_spin_unlock(&release_list_lock);
pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!pathbuf)
goto continue_free;
if (cgroup_path(cgrp, pathbuf, PAGE_SIZE) < 0)
goto continue_free;
agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
if (!agentbuf)
goto continue_free;
i = 0;
argv[i++] = agentbuf;
argv[i++] = pathbuf;
argv[i] = NULL;
i = 0;
/* minimal command environment */
envp[i++] = "HOME=/";
envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
envp[i] = NULL;
/* Drop the lock while we invoke the usermode helper,
* since the exec could involve hitting disk and hence
* be a slow process */
mutex_unlock(&cgroup_mutex);
call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
mutex_lock(&cgroup_mutex);
continue_free:
kfree(pathbuf);
kfree(agentbuf);
raw_spin_lock(&release_list_lock);
}
raw_spin_unlock(&release_list_lock);
mutex_unlock(&cgroup_mutex);
}
static int __init cgroup_disable(char *str)
{
int i;
char *token;
while ((token = strsep(&str, ",")) != NULL) {
if (!*token)
continue;
for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
struct cgroup_subsys *ss = subsys[i];
/*
* cgroup_disable, being at boot time, can't
* know about module subsystems, so we don't
* worry about them.
*/
if (!ss || ss->module)
continue;
if (!strcmp(token, ss->name)) {
ss->disabled = 1;
printk(KERN_INFO "Disabling %s control group"
" subsystem\n", ss->name);
break;
}
}
}
return 1;
}
__setup("cgroup_disable=", cgroup_disable);
/*
* Functons for CSS ID.
*/
/*
*To get ID other than 0, this should be called when !cgroup_is_removed().
*/
unsigned short css_id(struct cgroup_subsys_state *css)
{
struct css_id *cssid;
/*
* This css_id() can return correct value when somone has refcnt
* on this or this is under rcu_read_lock(). Once css->id is allocated,
* it's unchanged until freed.
*/
cssid = rcu_dereference_check(css->id, css_refcnt(css));
if (cssid)
return cssid->id;
return 0;
}
EXPORT_SYMBOL_GPL(css_id);
unsigned short css_depth(struct cgroup_subsys_state *css)
{
struct css_id *cssid;
cssid = rcu_dereference_check(css->id, css_refcnt(css));
if (cssid)
return cssid->depth;
return 0;
}
EXPORT_SYMBOL_GPL(css_depth);
/**
* css_is_ancestor - test "root" css is an ancestor of "child"
* @child: the css to be tested.
* @root: the css supporsed to be an ancestor of the child.
*
* Returns true if "root" is an ancestor of "child" in its hierarchy. Because
* this function reads css->id, the caller must hold rcu_read_lock().
* But, considering usual usage, the csses should be valid objects after test.
* Assuming that the caller will do some action to the child if this returns
* returns true, the caller must take "child";s reference count.
* If "child" is valid object and this returns true, "root" is valid, too.
*/
bool css_is_ancestor(struct cgroup_subsys_state *child,
const struct cgroup_subsys_state *root)
{
struct css_id *child_id;
struct css_id *root_id;
child_id = rcu_dereference(child->id);
if (!child_id)
return false;
root_id = rcu_dereference(root->id);
if (!root_id)
return false;
if (child_id->depth < root_id->depth)
return false;
if (child_id->stack[root_id->depth] != root_id->id)
return false;
return true;
}
void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css)
{
struct css_id *id = css->id;
/* When this is called before css_id initialization, id can be NULL */
if (!id)
return;
BUG_ON(!ss->use_id);
rcu_assign_pointer(id->css, NULL);
rcu_assign_pointer(css->id, NULL);
spin_lock(&ss->id_lock);
idr_remove(&ss->idr, id->id);
spin_unlock(&ss->id_lock);
kfree_rcu(id, rcu_head);
}
EXPORT_SYMBOL_GPL(free_css_id);
/*
* This is called by init or create(). Then, calls to this function are
* always serialized (By cgroup_mutex() at create()).
*/
static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth)
{
struct css_id *newid;
int ret, size;
BUG_ON(!ss->use_id);
size = sizeof(*newid) + sizeof(unsigned short) * (depth + 1);
newid = kzalloc(size, GFP_KERNEL);
if (!newid)
return ERR_PTR(-ENOMEM);
idr_preload(GFP_KERNEL);
spin_lock(&ss->id_lock);
/* Don't use 0. allocates an ID of 1-65535 */
ret = idr_alloc(&ss->idr, newid, 1, CSS_ID_MAX + 1, GFP_NOWAIT);
spin_unlock(&ss->id_lock);
idr_preload_end();
/* Returns error when there are no free spaces for new ID.*/
if (ret < 0)
goto err_out;
newid->id = ret;
newid->depth = depth;
return newid;
err_out:
kfree(newid);
return ERR_PTR(ret);
}
static int __init_or_module cgroup_init_idr(struct cgroup_subsys *ss,
struct cgroup_subsys_state *rootcss)
{
struct css_id *newid;
spin_lock_init(&ss->id_lock);
idr_init(&ss->idr);
newid = get_new_cssid(ss, 0);
if (IS_ERR(newid))
return PTR_ERR(newid);
newid->stack[0] = newid->id;
newid->css = rootcss;
rootcss->id = newid;
return 0;
}
static int alloc_css_id(struct cgroup_subsys *ss, struct cgroup *parent,
struct cgroup *child)
{
int subsys_id, i, depth = 0;
struct cgroup_subsys_state *parent_css, *child_css;
struct css_id *child_id, *parent_id;
subsys_id = ss->subsys_id;
parent_css = parent->subsys[subsys_id];
child_css = child->subsys[subsys_id];
parent_id = parent_css->id;
depth = parent_id->depth + 1;
child_id = get_new_cssid(ss, depth);
if (IS_ERR(child_id))
return PTR_ERR(child_id);
for (i = 0; i < depth; i++)
child_id->stack[i] = parent_id->stack[i];
child_id->stack[depth] = child_id->id;
/*
* child_id->css pointer will be set after this cgroup is available
* see cgroup_populate_dir()
*/
rcu_assign_pointer(child_css->id, child_id);
return 0;
}
/**
* css_lookup - lookup css by id
* @ss: cgroup subsys to be looked into.
* @id: the id
*
* Returns pointer to cgroup_subsys_state if there is valid one with id.
* NULL if not. Should be called under rcu_read_lock()
*/
struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id)
{
struct css_id *cssid = NULL;
BUG_ON(!ss->use_id);
cssid = idr_find(&ss->idr, id);
if (unlikely(!cssid))
return NULL;
return rcu_dereference(cssid->css);
}
EXPORT_SYMBOL_GPL(css_lookup);
/*
* get corresponding css from file open on cgroupfs directory
*/
struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id)
{
struct cgroup *cgrp;
struct inode *inode;
struct cgroup_subsys_state *css;
inode = file_inode(f);
/* check in cgroup filesystem dir */
if (inode->i_op != &cgroup_dir_inode_operations)
return ERR_PTR(-EBADF);
if (id < 0 || id >= CGROUP_SUBSYS_COUNT)
return ERR_PTR(-EINVAL);
/* get cgroup */
cgrp = __d_cgrp(f->f_dentry);
css = cgrp->subsys[id];
return css ? css : ERR_PTR(-ENOENT);
}
#ifdef CONFIG_CGROUP_DEBUG
static struct cgroup_subsys_state *debug_css_alloc(struct cgroup *cont)
{
struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);
if (!css)
return ERR_PTR(-ENOMEM);
return css;
}
static void debug_css_free(struct cgroup *cont)
{
kfree(cont->subsys[debug_subsys_id]);
}
static u64 cgroup_refcount_read(struct cgroup *cont, struct cftype *cft)
{
return atomic_read(&cont->count);
}
static u64 debug_taskcount_read(struct cgroup *cont, struct cftype *cft)
{
return cgroup_task_count(cont);
}
static u64 current_css_set_read(struct cgroup *cont, struct cftype *cft)
{
return (u64)(unsigned long)current->cgroups;
}
static u64 current_css_set_refcount_read(struct cgroup *cont,
struct cftype *cft)
{
u64 count;
rcu_read_lock();
count = atomic_read(¤t->cgroups->refcount);
rcu_read_unlock();
return count;
}
static int current_css_set_cg_links_read(struct cgroup *cont,
struct cftype *cft,
struct seq_file *seq)
{
struct cg_cgroup_link *link;
struct css_set *cg;
read_lock(&css_set_lock);
rcu_read_lock();
cg = rcu_dereference(current->cgroups);
list_for_each_entry(link, &cg->cg_links, cg_link_list) {
struct cgroup *c = link->cgrp;
const char *name;
if (c->dentry)
name = c->dentry->d_name.name;
else
name = "?";
seq_printf(seq, "Root %d group %s\n",
c->root->hierarchy_id, name);
}
rcu_read_unlock();
read_unlock(&css_set_lock);
return 0;
}
#define MAX_TASKS_SHOWN_PER_CSS 25
static int cgroup_css_links_read(struct cgroup *cont,
struct cftype *cft,
struct seq_file *seq)
{
struct cg_cgroup_link *link;
read_lock(&css_set_lock);
list_for_each_entry(link, &cont->css_sets, cgrp_link_list) {
struct css_set *cg = link->cg;
struct task_struct *task;
int count = 0;
seq_printf(seq, "css_set %p\n", cg);
list_for_each_entry(task, &cg->tasks, cg_list) {
if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
seq_puts(seq, " ...\n");
break;
} else {
seq_printf(seq, " task %d\n",
task_pid_vnr(task));
}
}
}
read_unlock(&css_set_lock);
return 0;
}
static u64 releasable_read(struct cgroup *cgrp, struct cftype *cft)
{
return test_bit(CGRP_RELEASABLE, &cgrp->flags);
}
static struct cftype debug_files[] = {
{
.name = "cgroup_refcount",
.read_u64 = cgroup_refcount_read,
},
{
.name = "taskcount",
.read_u64 = debug_taskcount_read,
},
{
.name = "current_css_set",
.read_u64 = current_css_set_read,
},
{
.name = "current_css_set_refcount",
.read_u64 = current_css_set_refcount_read,
},
{
.name = "current_css_set_cg_links",
.read_seq_string = current_css_set_cg_links_read,
},
{
.name = "cgroup_css_links",
.read_seq_string = cgroup_css_links_read,
},
{
.name = "releasable",
.read_u64 = releasable_read,
},
{ } /* terminate */
};
struct cgroup_subsys debug_subsys = {
.name = "debug",
.css_alloc = debug_css_alloc,
.css_free = debug_css_free,
.subsys_id = debug_subsys_id,
.base_cftypes = debug_files,
};
#endif /* CONFIG_CGROUP_DEBUG */
| gpl-2.0 |
rootfs/vzkernel | net/ipv4/netfilter/arptable_filter.c | 7 | 2287 | /*
* Filtering ARP tables module.
*
* Copyright (C) 2002 David S. Miller (davem@redhat.com)
*
*/
#include <linux/module.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_arp/arp_tables.h>
#include <linux/slab.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
MODULE_DESCRIPTION("arptables filter table");
#define FILTER_VALID_HOOKS ((1 << NF_ARP_IN) | (1 << NF_ARP_OUT) | \
(1 << NF_ARP_FORWARD))
static const struct xt_table packet_filter = {
.name = "filter",
.valid_hooks = FILTER_VALID_HOOKS,
.me = THIS_MODULE,
.af = NFPROTO_ARP,
.priority = NF_IP_PRI_FILTER,
};
/* The work comes in here from netfilter.c */
static unsigned int
arptable_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
const struct net_device *in, const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
const struct net *net = dev_net((in != NULL) ? in : out);
return arpt_do_table(skb, ops->hooknum, in, out,
net->ipv4.arptable_filter);
}
static struct nf_hook_ops *arpfilter_ops __read_mostly;
static int __net_init arptable_filter_net_init(struct net *net)
{
struct arpt_replace *repl;
repl = arpt_alloc_initial_table(&packet_filter);
if (repl == NULL)
return -ENOMEM;
net->ipv4.arptable_filter =
arpt_register_table(net, &packet_filter, repl);
kfree(repl);
return PTR_RET(net->ipv4.arptable_filter);
}
static void __net_exit arptable_filter_net_exit(struct net *net)
{
arpt_unregister_table(net->ipv4.arptable_filter);
}
static struct pernet_operations arptable_filter_net_ops = {
.init = arptable_filter_net_init,
.exit = arptable_filter_net_exit,
};
static int __init arptable_filter_init(void)
{
int ret;
ret = register_pernet_subsys(&arptable_filter_net_ops);
if (ret < 0)
return ret;
arpfilter_ops = xt_hook_link(&packet_filter, arptable_filter_hook);
if (IS_ERR(arpfilter_ops)) {
ret = PTR_ERR(arpfilter_ops);
goto cleanup_table;
}
return ret;
cleanup_table:
unregister_pernet_subsys(&arptable_filter_net_ops);
return ret;
}
static void __exit arptable_filter_fini(void)
{
xt_hook_unlink(&packet_filter, arpfilter_ops);
unregister_pernet_subsys(&arptable_filter_net_ops);
}
module_init(arptable_filter_init);
module_exit(arptable_filter_fini);
| gpl-2.0 |
SReichelt/ardour | libs/evoral/src/midi_util.cpp | 7 | 1311 | /* This file is part of Evoral.
* Copyright (C) 2008 David Robillard <http://drobilla.net>
* Copyright (C) 2009 Paul Davis
*
* Evoral is free software; you can redistribute it and/or modify it under the
* terms of the GNU General Public License as published by the Free Software
* Foundation; either version 2 of the License, or (at your option) any later
* version.
*
* Evoral is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "evoral/midi_util.h"
#include <cstdio>
namespace Evoral {
std::string
midi_note_name (uint8_t val)
{
if (val > 127) {
return "???";
}
static const char* notes[] = {
"C",
"C#",
"D",
"D#",
"E",
"F",
"F#",
"G",
"G#",
"A",
"A#",
"B"
};
/* MIDI note 0 is in octave -1 (in scientific pitch notation) */
int octave = val / 12 - 1;
static char buf[8];
val = val % 12;
snprintf (buf, sizeof (buf), "%s%d", notes[val], octave);
return buf;
}
}
| gpl-2.0 |
heshamelmatary/rtems-gsoc2012 | testsuites/psxtests/psx05/task.c | 7 | 1308 | /* Task_1
*
* This routine serves as a test task. It verifies the basic task
* switching capabilities of the executive.
*
* Input parameters:
* argument - task argument
*
* Output parameters: NONE
*
* COPYRIGHT (c) 1989-2009.
* On-Line Applications Research Corporation (OAR).
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.com/license/LICENSE.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "system.h"
#include <errno.h>
void *Task_1(
void *argument
)
{
int status;
printf( "Task: pthread_mutex_trylock already locked\n" );
status = pthread_mutex_trylock( &Mutex_id );
if ( status != EBUSY )
printf( "status = %d\n", status );
rtems_test_assert( status == EBUSY );
printf( "Task: pthread_mutex_lock unavailable\n" );
status = pthread_mutex_lock( &Mutex_id );
if ( status )
printf( "status = %d\n", status );
rtems_test_assert( !status );
/* switch to init */
printf( "Task: mutex acquired\n" );
printf( "Task: sleep for 2 seconds\n" );
sleep( 2 );
/* switch to init */
printf( "Task: exit\n" );
pthread_exit( NULL );
/* switch to Init */
return NULL; /* just so the compiler thinks we returned something */
}
| gpl-2.0 |
GiuseppeGorgoglione/mame | src/mame/drivers/lft.cpp | 7 | 3195 | // license:BSD-3-Clause
// copyright-holders:Robbbert
/***************************************************************************
2013-09-09 Skeleton of LFT computer system. A search on the net produces
no finds.
****************************************************************************/
#include "emu.h"
#include "cpu/i86/i186.h"
#include "machine/terminal.h"
class lft_state : public driver_device
{
public:
lft_state(const machine_config &mconfig, device_type type, const char *tag)
: driver_device(mconfig, type, tag)
, m_maincpu(*this, "maincpu")
, m_terminal(*this, "terminal")
{}
DECLARE_WRITE8_MEMBER(kbd_put);
DECLARE_WRITE16_MEMBER(term_w);
DECLARE_READ16_MEMBER(keyin_r);
DECLARE_READ16_MEMBER(status_r);
private:
UINT8 m_term_data;
virtual void machine_reset() override;
required_device<cpu_device> m_maincpu;
required_device<generic_terminal_device> m_terminal;
};
static ADDRESS_MAP_START(lft_mem, AS_PROGRAM, 16, lft_state)
ADDRESS_MAP_UNMAP_HIGH
AM_RANGE(0x00000, 0x5ffff) AM_RAM
AM_RANGE(0xfc000, 0xfffff) AM_ROM AM_REGION("roms", 0)
ADDRESS_MAP_END
static ADDRESS_MAP_START(lft_io, AS_IO, 16, lft_state)
ADDRESS_MAP_UNMAP_HIGH
ADDRESS_MAP_GLOBAL_MASK(0xff)
// screen 1
AM_RANGE(0x00, 0x01) AM_READNOP
AM_RANGE(0x04, 0x05) AM_READWRITE(keyin_r,term_w)
// screen 2
AM_RANGE(0x02, 0x03) AM_READNOP
AM_RANGE(0x06, 0x07) AM_WRITENOP
ADDRESS_MAP_END
/* Input ports */
static INPUT_PORTS_START( lft )
INPUT_PORTS_END
READ16_MEMBER( lft_state::keyin_r )
{
UINT16 ret = m_term_data;
m_term_data = 0;
return ret;
}
READ16_MEMBER( lft_state::status_r )
{
return (m_term_data) ? 5 : 4;
}
WRITE8_MEMBER( lft_state::kbd_put )
{
m_term_data = data;
}
WRITE16_MEMBER( lft_state::term_w )
{
m_terminal->write(space, 0, data & 0x7f); // fix backspace
}
void lft_state::machine_reset()
{
m_term_data = 0;
}
static MACHINE_CONFIG_START( lft, lft_state )
/* basic machine hardware */
MCFG_CPU_ADD("maincpu", I80186, 4000000) // no idea
MCFG_CPU_PROGRAM_MAP(lft_mem)
MCFG_CPU_IO_MAP(lft_io)
/* video hardware */
MCFG_DEVICE_ADD("terminal", GENERIC_TERMINAL, 0)
MCFG_GENERIC_TERMINAL_KEYBOARD_CB(WRITE8(lft_state, kbd_put))
MACHINE_CONFIG_END
/* ROM definition */
ROM_START( lft1230 )
ROM_REGION(0x4000, "roms", 0)
ROM_LOAD16_BYTE( "1230lf29", 0x0000, 0x2000, CRC(11c87367) SHA1(0879650aa98e19a4e6ca7b6ee7874f81c9c8ccfa) )
ROM_LOAD16_BYTE( "1230lf42", 0x0001, 0x2000, CRC(ab82b620) SHA1(8c7d93950703f348e5ce0f9e376d157dd6098c6a) )
ROM_END
ROM_START( lft1510 )
ROM_REGION(0x4000, "roms", 0)
ROM_LOAD16_BYTE( "1510lfev", 0x2000, 0x1000, CRC(47dbb290) SHA1(b557e9a54a30d9a16edfdef4a6b12a5393d30bf3) )
ROM_IGNORE(0x1000)
ROM_LOAD16_BYTE( "1510lfod", 0x2001, 0x1000, CRC(ba8c23fc) SHA1(d4b82f69fccd653b31e7bd05ee884b323ff0007b) )
ROM_IGNORE(0x1000)
ROM_END
/* Driver */
/* YEAR NAME PARENT COMPAT MACHINE INPUT CLASS INIT COMPANY FULLNAME FLAGS */
COMP( ????, lft1510, 0, 0, lft, lft, driver_device, 0, "LFT", "LFT 1510", MACHINE_IS_SKELETON)
COMP( ????, lft1230, lft1510, 0, lft, lft, driver_device, 0, "LFT", "LFT 1230", MACHINE_IS_SKELETON)
| gpl-2.0 |
erpragatisingh/androidTraining | Android_6_weekTraning/tess-two/jni/com_googlecode_leptonica_android/src/prog/bak/findpattern1.c | 7 | 4239 | /*====================================================================*
- Copyright (C) 2001 Leptonica. All rights reserved.
- This software is distributed in the hope that it will be
- useful, but with NO WARRANTY OF ANY KIND.
- No author or distributor accepts responsibility to anyone for the
- consequences of using this software, or for whether it serves any
- particular purpose or works at all, unless he or she says so in
- writing. Everyone is granted permission to copy, modify and
- redistribute this source code, for commercial or non-commercial
- purposes, with the following restrictions: (1) the origin of this
- source code must not be misrepresented; (2) modified versions must
- be plainly marked as such; and (3) this notice may not be removed
- or altered from any source or modified source distribution.
*====================================================================*/
/*
* findpattern1.c
*
* findpattern1 filein patternfile fileout
*
* This is setup with input parameters to generate a hit-miss
* Sel from the instance char.tif of a "c" bitmap, from
* the page image feyn.tif, scanned at 300 ppi:
*
* findpattern1 feyn.tif char.tif junkcharout
*
* It shows a number of different outputs, including a magnified
* image of the Sel superimposed on the "c" bitmap.
*/
#include <stdio.h>
#include <stdlib.h>
#include "allheaders.h"
/* for pixGenerateSelWithRuns() */
static const l_int32 NumHorLines = 11;
static const l_int32 NumVertLines = 8;
static const l_int32 MinRunlength = 1;
/* for pixDisplayHitMissSel() */
static const l_uint32 HitColor = 0xff880000;
static const l_uint32 MissColor = 0x00ff8800;
main(int argc,
char **argv)
{
char *filein, *fileout, *patternfile;
l_int32 w, h, i, n;
BOX *box, *boxe;
BOXA *boxa1, *boxa2;
PIX *pixs, *pixp, *pixpe;
PIX *pixd, *pixt1, *pixt2, *pixhmt;
SEL *sel_2h, *sel;
static char mainName[] = "findpattern1";
if (argc != 4)
exit(ERROR_INT(" Syntax: findpattern1 filein patternfile fileout",
mainName, 1));
filein = argv[1];
patternfile = argv[2];
fileout = argv[3];
if ((pixs = pixRead(filein)) == NULL)
exit(ERROR_INT("pixs not made", mainName, 1));
if ((pixp = pixRead(patternfile)) == NULL)
exit(ERROR_INT("pixp not made", mainName, 1));
w = pixGetWidth(pixp);
h = pixGetHeight(pixp);
/* generate the hit-miss Sel with runs */
sel = pixGenerateSelWithRuns(pixp, NumHorLines, NumVertLines, 0,
MinRunlength, 7, 7, 0, 0, &pixpe);
/* display the Sel two ways */
selWriteStream(stderr, sel);
pixt1 = pixDisplayHitMissSel(pixpe, sel, 9, HitColor, MissColor);
pixDisplay(pixt1, 200, 200);
pixWrite("/tmp/junkpixt", pixt1, IFF_PNG);
/* use the Sel to find all instances in the page */
startTimer();
pixhmt = pixHMT(NULL, pixs, sel);
fprintf(stderr, "Time to find patterns = %7.3f\n", stopTimer());
/* small erosion to remove noise; typically not necessary if
* there are enough elements in the Sel */
sel_2h = selCreateBrick(1, 2, 0, 0, SEL_HIT);
pixt2 = pixErode(NULL, pixhmt, sel_2h);
/* display the result visually by placing the Sel at each
* location found */
pixd = pixDilate(NULL, pixt2, sel);
pixWrite(fileout, pixd, IFF_TIFF_G4);
/* display outut with an outline around each located pattern */
boxa1 = pixConnCompBB(pixt2, 8);
n = boxaGetCount(boxa1);
boxa2 = boxaCreate(n);
for (i = 0; i < n; i++) {
box = boxaGetBox(boxa1, i, L_COPY);
boxe = boxCreate(box->x - w / 2, box->y - h / 2, w + 4, h + 4);
boxaAddBox(boxa2, boxe, L_INSERT);
pixRenderBox(pixs, boxe, 4, L_FLIP_PIXELS);
boxDestroy(&box);
}
pixWrite("/tmp/junkoutline", pixs, IFF_TIFF_G4);
boxaWriteStream(stderr, boxa2);
pixDestroy(&pixs);
pixDestroy(&pixp);
pixDestroy(&pixpe);
pixDestroy(&pixt1);
pixDestroy(&pixt2);
pixDestroy(&pixhmt);
pixDestroy(&pixd);
selDestroy(&sel);
selDestroy(&sel_2h);
boxaDestroy(&boxa1);
boxaDestroy(&boxa2);
return 0;
}
| gpl-2.0 |
CyanogenMod/android_kernel_hardkernel_odroidc1 | drivers/mtd/nand/nand_base.c | 7 | 101015 | /*
* drivers/mtd/nand.c
*
* Overview:
* This is the generic MTD driver for NAND flash devices. It should be
* capable of working with almost all NAND chips currently available.
*
* Additional technical information is available on
* http://www.linux-mtd.infradead.org/doc/nand.html
*
* Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
* 2002-2006 Thomas Gleixner (tglx@linutronix.de)
*
* Credits:
* David Woodhouse for adding multichip support
*
* Aleph One Ltd. and Toby Churchill Ltd. for supporting the
* rework for 2K page size chips
*
* TODO:
* Enable cached programming for 2k page size chips
* Check, if mtd->ecctype should be set to MTD_ECC_HW
* if we have HW ECC support.
* BBT table is not serialized, has to be fixed
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/nand_bch.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/leds.h>
#include <linux/io.h>
#include <linux/mtd/partitions.h>
#ifndef CONFIG_SYS_NAND_RESET_CNT
#define CONFIG_SYS_NAND_RESET_CNT 200000
#endif
/* Define default oob placement schemes for large and small page devices */
static struct nand_ecclayout nand_oob_8 = {
.eccbytes = 3,
.eccpos = {0, 1, 2},
.oobfree = {
{.offset = 3,
.length = 2},
{.offset = 6,
.length = 2} }
};
static struct nand_ecclayout nand_oob_16 = {
.eccbytes = 6,
.eccpos = {0, 1, 2, 3, 6, 7},
.oobfree = {
{.offset = 8,
. length = 8} }
};
static struct nand_ecclayout nand_oob_64 = {
.eccbytes = 24,
.eccpos = {
40, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55,
56, 57, 58, 59, 60, 61, 62, 63},
.oobfree = {
{.offset = 2,
.length = 38} }
};
static struct nand_ecclayout nand_oob_128 = {
.eccbytes = 48,
.eccpos = {
80, 81, 82, 83, 84, 85, 86, 87,
88, 89, 90, 91, 92, 93, 94, 95,
96, 97, 98, 99, 100, 101, 102, 103,
104, 105, 106, 107, 108, 109, 110, 111,
112, 113, 114, 115, 116, 117, 118, 119,
120, 121, 122, 123, 124, 125, 126, 127},
.oobfree = {
{.offset = 2,
.length = 78} }
};
int nand_get_device(struct mtd_info *mtd, int new_state);
static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops);
/*
* For devices which display every fart in the system on a separate LED. Is
* compiled away when LED support is disabled.
*/
DEFINE_LED_TRIGGER(nand_led_trigger);
static int check_offs_len(struct mtd_info *mtd,
loff_t ofs, uint64_t len)
{
struct nand_chip *chip = mtd->priv;
int ret = 0;
/* Start address must align on block boundary */
if (ofs & ((1 << chip->phys_erase_shift) - 1)) {
pr_debug("%s: unaligned address\n", __func__);
ret = -EINVAL;
}
/* Length must align on block boundary */
if (len & ((1 << chip->phys_erase_shift) - 1)) {
pr_debug("%s: length not block aligned\n", __func__);
ret = -EINVAL;
}
return ret;
}
/**
* nand_release_device - [GENERIC] release chip
* @mtd: MTD device structure
*
* Release chip lock and wake up anyone waiting on the device.
*/
void nand_release_device(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
chip->select_chip(mtd, -1);
/* Release the controller and the chip */
spin_lock(&chip->controller->lock);
chip->controller->active = NULL;
chip->state = FL_READY;
wake_up(&chip->controller->wq);
spin_unlock(&chip->controller->lock);
}
/**
* nand_read_byte - [DEFAULT] read one byte from the chip
* @mtd: MTD device structure
*
* Default read function for 8bit buswidth
*/
static uint8_t nand_read_byte(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
return readb(chip->IO_ADDR_R);
}
/**
* nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
* nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
* @mtd: MTD device structure
*
* Default read function for 16bit buswidth with endianness conversion.
*
*/
static uint8_t nand_read_byte16(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
return (uint8_t) cpu_to_le16(readw(chip->IO_ADDR_R));
}
/**
* nand_read_word - [DEFAULT] read one word from the chip
* @mtd: MTD device structure
*
* Default read function for 16bit buswidth without endianness conversion.
*/
static u16 nand_read_word(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
return readw(chip->IO_ADDR_R);
}
/**
* nand_select_chip - [DEFAULT] control CE line
* @mtd: MTD device structure
* @chipnr: chipnumber to select, -1 for deselect
*
* Default select function for 1 chip devices.
*/
static void nand_select_chip(struct mtd_info *mtd, int chipnr)
{
struct nand_chip *chip = mtd->priv;
switch (chipnr) {
case -1:
chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
break;
case 0:
break;
default:
BUG();
}
}
/**
* nand_write_buf - [DEFAULT] write buffer to chip
* @mtd: MTD device structure
* @buf: data buffer
* @len: number of bytes to write
*
* Default write function for 8bit buswidth.
*/
static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
{
int i;
struct nand_chip *chip = mtd->priv;
for (i = 0; i < len; i++)
writeb(buf[i], chip->IO_ADDR_W);
}
/**
* nand_read_buf - [DEFAULT] read chip data into buffer
* @mtd: MTD device structure
* @buf: buffer to store date
* @len: number of bytes to read
*
* Default read function for 8bit buswidth.
*/
static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
{
int i;
struct nand_chip *chip = mtd->priv;
for (i = 0; i < len; i++)
buf[i] = readb(chip->IO_ADDR_R);
}
/**
* nand_write_buf16 - [DEFAULT] write buffer to chip
* @mtd: MTD device structure
* @buf: data buffer
* @len: number of bytes to write
*
* Default write function for 16bit buswidth.
*/
static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
{
int i;
struct nand_chip *chip = mtd->priv;
u16 *p = (u16 *) buf;
len >>= 1;
for (i = 0; i < len; i++)
writew(p[i], chip->IO_ADDR_W);
}
/**
* nand_read_buf16 - [DEFAULT] read chip data into buffer
* @mtd: MTD device structure
* @buf: buffer to store date
* @len: number of bytes to read
*
* Default read function for 16bit buswidth.
*/
static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
{
int i;
struct nand_chip *chip = mtd->priv;
u16 *p = (u16 *) buf;
len >>= 1;
for (i = 0; i < len; i++)
p[i] = readw(chip->IO_ADDR_R);
}
/**
* nand_block_bad - [DEFAULT] Read bad block marker from the chip
* @mtd: MTD device structure
* @ofs: offset from device start
* @getchip: 0, if the chip is already selected
*
* Check, if the block is bad.
*/
static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
{
int page, chipnr, res = 0, i = 0;
struct nand_chip *chip = mtd->priv;
u16 bad;
if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
ofs += mtd->erasesize - mtd->writesize;
page = (int)(ofs >> chip->page_shift) & chip->pagemask;
if (getchip) {
chipnr = (int)(ofs >> chip->chip_shift);
nand_get_device(mtd, FL_READING);
/* Select the NAND device */
chip->select_chip(mtd, chipnr);
}
do {
if (chip->options & NAND_BUSWIDTH_16) {
chip->cmdfunc(mtd, NAND_CMD_READOOB,
chip->badblockpos & 0xFE, page);
bad = cpu_to_le16(chip->read_word(mtd));
if (chip->badblockpos & 0x1)
bad >>= 8;
else
bad &= 0xFF;
} else {
chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos,
page);
bad = chip->read_byte(mtd);
}
if (likely(chip->badblockbits == 8))
res = bad != 0xFF;
else
res = hweight8(bad) < chip->badblockbits;
ofs += mtd->writesize;
page = (int)(ofs >> chip->page_shift) & chip->pagemask;
i++;
} while (!res && i < 2 && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE));
if (getchip) {
chip->select_chip(mtd, -1);
nand_release_device(mtd);
}
return res;
}
/**
* nand_default_block_markbad - [DEFAULT] mark a block bad
* @mtd: MTD device structure
* @ofs: offset from device start
*
* This is the default implementation, which can be overridden by a hardware
* specific driver. We try operations in the following order, according to our
* bbt_options (NAND_BBT_NO_OOB_BBM and NAND_BBT_USE_FLASH):
* (1) erase the affected block, to allow OOB marker to be written cleanly
* (2) update in-memory BBT
* (3) write bad block marker to OOB area of affected block
* (4) update flash-based BBT
* Note that we retain the first error encountered in (3) or (4), finish the
* procedures, and dump the error in the end.
*/
static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
struct nand_chip *chip = mtd->priv;
uint8_t buf[2] = { 0, 0 };
int block, res, ret = 0, i = 0;
int write_oob = !(chip->bbt_options & NAND_BBT_NO_OOB_BBM);
if (write_oob) {
struct erase_info einfo;
/* Attempt erase before marking OOB */
memset(&einfo, 0, sizeof(einfo));
einfo.mtd = mtd;
einfo.addr = ofs;
einfo.len = 1 << chip->phys_erase_shift;
nand_erase_nand(mtd, &einfo, 0);
}
/* Get block number */
block = (int)(ofs >> chip->bbt_erase_shift);
/* Mark block bad in memory-based BBT */
if (chip->bbt)
chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
/* Write bad block marker to OOB */
if (write_oob) {
struct mtd_oob_ops ops;
loff_t wr_ofs = ofs;
nand_get_device(mtd, FL_WRITING);
ops.datbuf = NULL;
ops.oobbuf = buf;
ops.ooboffs = chip->badblockpos;
if (chip->options & NAND_BUSWIDTH_16) {
ops.ooboffs &= ~0x01;
ops.len = ops.ooblen = 2;
} else {
ops.len = ops.ooblen = 1;
}
ops.mode = MTD_OPS_PLACE_OOB;
/* Write to first/last page(s) if necessary */
if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
wr_ofs += mtd->erasesize - mtd->writesize;
do {
res = nand_do_write_oob(mtd, wr_ofs, &ops);
if (!ret)
ret = res;
i++;
wr_ofs += mtd->writesize;
} while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
nand_release_device(mtd);
}
/* Update flash-based bad block table */
if (chip->bbt_options & NAND_BBT_USE_FLASH) {
res = nand_update_bbt(mtd, ofs);
if (!ret)
ret = res;
}
if (!ret)
mtd->ecc_stats.badblocks++;
return ret;
}
/**
* nand_check_wp - [GENERIC] check if the chip is write protected
* @mtd: MTD device structure
*
* Check, if the device is write protected. The function expects, that the
* device is already selected.
*/
static int nand_check_wp(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
/* Broken xD cards report WP despite being writable */
if (chip->options & NAND_BROKEN_XD)
return 0;
/* Check the WP bit */
chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1;
}
/**
* nand_block_checkbad - [GENERIC] Check if a block is marked bad
* @mtd: MTD device structure
* @ofs: offset from device start
* @getchip: 0, if the chip is already selected
* @allowbbt: 1, if its allowed to access the bbt area
*
* Check, if the block is bad. Either by reading the bad block table or
* calling of the scan function.
*/
static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int getchip,
int allowbbt)
{
struct nand_chip *chip = mtd->priv;
if (!chip->bbt)
return chip->block_bad(mtd, ofs, getchip);
/* Return info from the table */
return nand_isbad_bbt(mtd, ofs, allowbbt);
}
/**
* panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
* @mtd: MTD device structure
* @timeo: Timeout
*
* Helper function for nand_wait_ready used when needing to wait in interrupt
* context.
*/
static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
{
struct nand_chip *chip = mtd->priv;
int i;
/* Wait for the device to get ready */
for (i = 0; i < timeo; i++) {
if (chip->dev_ready(mtd))
break;
touch_softlockup_watchdog();
mdelay(1);
}
}
/* Wait for the ready pin, after a command. The timeout is caught later. */
void nand_wait_ready(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
unsigned long timeo = jiffies + msecs_to_jiffies(20);
/* 400ms timeout */
if (in_interrupt() || oops_in_progress)
return panic_nand_wait_ready(mtd, 400);
led_trigger_event(nand_led_trigger, LED_FULL);
/* Wait until command is processed or timeout occurs */
do {
if (chip->dev_ready(mtd))
break;
touch_softlockup_watchdog();
} while (time_before(jiffies, timeo));
led_trigger_event(nand_led_trigger, LED_OFF);
}
EXPORT_SYMBOL_GPL(nand_wait_ready);
/**
* nand_command - [DEFAULT] Send command to NAND device
* @mtd: MTD device structure
* @command: the command to be sent
* @column: the column address for this command, -1 if none
* @page_addr: the page address for this command, -1 if none
*
* Send command to NAND device. This function is used for small page devices
* (512 Bytes per page).
*/
static void nand_command(struct mtd_info *mtd, unsigned int command,
int column, int page_addr)
{
register struct nand_chip *chip = mtd->priv;
int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
uint32_t rst_sts_cnt = CONFIG_SYS_NAND_RESET_CNT;
/* Write out the command to the device */
if (command == NAND_CMD_SEQIN) {
int readcmd;
if (column >= mtd->writesize) {
/* OOB area */
column -= mtd->writesize;
readcmd = NAND_CMD_READOOB;
} else if (column < 256) {
/* First 256 bytes --> READ0 */
readcmd = NAND_CMD_READ0;
} else {
column -= 256;
readcmd = NAND_CMD_READ1;
}
chip->cmd_ctrl(mtd, readcmd, ctrl);
ctrl &= ~NAND_CTRL_CHANGE;
}
chip->cmd_ctrl(mtd, command, ctrl);
/* Address cycle, when necessary */
ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
/* Serially input address */
if (column != -1) {
/* Adjust columns for 16 bit buswidth */
if (chip->options & NAND_BUSWIDTH_16)
column >>= 1;
chip->cmd_ctrl(mtd, column, ctrl);
ctrl &= ~NAND_CTRL_CHANGE;
}
if (page_addr != -1) {
chip->cmd_ctrl(mtd, page_addr, ctrl);
ctrl &= ~NAND_CTRL_CHANGE;
chip->cmd_ctrl(mtd, page_addr >> 8, ctrl);
/* One more address cycle for devices > 32MiB */
if (chip->chipsize > (32 << 20))
chip->cmd_ctrl(mtd, page_addr >> 16, ctrl);
}
chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
/*
* Program and erase have their own busy handlers status and sequential
* in needs no delay
*/
switch (command) {
case NAND_CMD_PAGEPROG:
case NAND_CMD_ERASE1:
case NAND_CMD_ERASE2:
case NAND_CMD_SEQIN:
case NAND_CMD_STATUS:
return;
case NAND_CMD_RESET:
if (chip->dev_ready)
break;
udelay(chip->chip_delay);
chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
NAND_CTRL_CLE | NAND_CTRL_CHANGE);
chip->cmd_ctrl(mtd,
NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
while (!(chip->read_byte(mtd) & NAND_STATUS_READY) &&
(rst_sts_cnt--));
return;
/* This applies to read commands */
default:
/*
* If we don't have access to the busy pin, we apply the given
* command delay
*/
if (!chip->dev_ready) {
udelay(chip->chip_delay);
return;
}
}
/*
* Apply this short delay always to ensure that we do wait tWB in
* any case on any machine.
*/
ndelay(100);
nand_wait_ready(mtd);
}
/**
* nand_command_lp - [DEFAULT] Send command to NAND large page device
* @mtd: MTD device structure
* @command: the command to be sent
* @column: the column address for this command, -1 if none
* @page_addr: the page address for this command, -1 if none
*
* Send command to NAND device. This is the version for the new large page
* devices. We don't have the separate regions as we have in the small page
* devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
*/
static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
int column, int page_addr)
{
register struct nand_chip *chip = mtd->priv;
/* Emulate NAND_CMD_READOOB */
if (command == NAND_CMD_READOOB) {
column += mtd->writesize;
command = NAND_CMD_READ0;
}
/* Command latch cycle */
chip->cmd_ctrl(mtd, command & 0xff,
NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
if (column != -1 || page_addr != -1) {
int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
/* Serially input address */
if (column != -1) {
/* Adjust columns for 16 bit buswidth */
if (chip->options & NAND_BUSWIDTH_16)
column >>= 1;
chip->cmd_ctrl(mtd, column, ctrl);
ctrl &= ~NAND_CTRL_CHANGE;
chip->cmd_ctrl(mtd, column >> 8, ctrl);
}
if (page_addr != -1) {
chip->cmd_ctrl(mtd, page_addr, ctrl);
chip->cmd_ctrl(mtd, page_addr >> 8,
NAND_NCE | NAND_ALE);
/* One more address cycle for devices > 128MiB */
if (chip->chipsize > (128 << 20))
chip->cmd_ctrl(mtd, page_addr >> 16,
NAND_NCE | NAND_ALE);
}
}
chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
/*
* Program and erase have their own busy handlers status, sequential
* in, and deplete1 need no delay.
*/
switch (command) {
case NAND_CMD_CACHEDPROG:
case NAND_CMD_PAGEPROG:
case NAND_CMD_ERASE1:
case NAND_CMD_ERASE2:
case NAND_CMD_SEQIN:
case NAND_CMD_RNDIN:
case NAND_CMD_STATUS:
return;
case NAND_CMD_RESET:
if (chip->dev_ready)
break;
udelay(chip->chip_delay);
chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
chip->cmd_ctrl(mtd, NAND_CMD_NONE,
NAND_NCE | NAND_CTRL_CHANGE);
while (!(chip->read_byte(mtd) & NAND_STATUS_READY))
;
return;
case NAND_CMD_RNDOUT:
/* No ready / busy check necessary */
chip->cmd_ctrl(mtd, NAND_CMD_RNDOUTSTART,
NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
chip->cmd_ctrl(mtd, NAND_CMD_NONE,
NAND_NCE | NAND_CTRL_CHANGE);
return;
case NAND_CMD_READ0:
chip->cmd_ctrl(mtd, NAND_CMD_READSTART,
NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
chip->cmd_ctrl(mtd, NAND_CMD_NONE,
NAND_NCE | NAND_CTRL_CHANGE);
/* This applies to read commands */
default:
/*
* If we don't have access to the busy pin, we apply the given
* command delay.
*/
if (!chip->dev_ready) {
udelay(chip->chip_delay);
return;
}
}
/*
* Apply this short delay always to ensure that we do wait tWB in
* any case on any machine.
*/
ndelay(100);
nand_wait_ready(mtd);
}
/**
* panic_nand_get_device - [GENERIC] Get chip for selected access
* @chip: the nand chip descriptor
* @mtd: MTD device structure
* @new_state: the state which is requested
*
* Used when in panic, no locks are taken.
*/
static void panic_nand_get_device(struct nand_chip *chip,
struct mtd_info *mtd, int new_state)
{
/* Hardware controller shared among independent devices */
chip->controller->active = chip;
chip->state = new_state;
}
/**
* nand_get_device - [GENERIC] Get chip for selected access
* @mtd: MTD device structure
* @new_state: the state which is requested
*
* Get the device and lock it for exclusive access
*/
int
nand_get_device(struct mtd_info *mtd, int new_state)
{
struct nand_chip *chip = mtd->priv;
spinlock_t *lock = &chip->controller->lock;
wait_queue_head_t *wq = &chip->controller->wq;
DECLARE_WAITQUEUE(wait, current);
retry:
spin_lock(lock);
/* Hardware controller shared among independent devices */
if (!chip->controller->active)
chip->controller->active = chip;
if (chip->controller->active == chip && chip->state == FL_READY) {
chip->state = new_state;
spin_unlock(lock);
return 0;
}
if (new_state == FL_PM_SUSPENDED) {
if (chip->controller->active->state == FL_PM_SUSPENDED) {
chip->state = FL_PM_SUSPENDED;
spin_unlock(lock);
return 0;
}
}
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(wq, &wait);
spin_unlock(lock);
schedule();
remove_wait_queue(wq, &wait);
goto retry;
}
/**
* panic_nand_wait - [GENERIC] wait until the command is done
* @mtd: MTD device structure
* @chip: NAND chip structure
* @timeo: timeout
*
* Wait for command done. This is a helper function for nand_wait used when
* we are in interrupt context. May happen when in panic and trying to write
* an oops through mtdoops.
*/
static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
unsigned long timeo)
{
int i;
for (i = 0; i < timeo; i++) {
if (chip->dev_ready) {
if (chip->dev_ready(mtd))
break;
} else {
if (chip->read_byte(mtd) & NAND_STATUS_READY)
break;
}
mdelay(1);
}
}
/**
* nand_wait - [DEFAULT] wait until the command is done
* @mtd: MTD device structure
* @chip: NAND chip structure
*
* Wait for command done. This applies to erase and program only. Erase can
* take up to 400ms and program up to 20ms according to general NAND and
* SmartMedia specs.
*/
static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
{
int status, state = chip->state;
unsigned long timeo = (state == FL_ERASING ? 400 : 20);
led_trigger_event(nand_led_trigger, LED_FULL);
/*
* Apply this short delay always to ensure that we do wait tWB in any
* case on any machine.
*/
ndelay(100);
chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
if (in_interrupt() || oops_in_progress)
panic_nand_wait(mtd, chip, timeo);
else {
timeo = jiffies + msecs_to_jiffies(timeo);
while (time_before(jiffies, timeo)) {
if (chip->dev_ready) {
if (chip->dev_ready(mtd))
break;
} else {
if (chip->read_byte(mtd) & NAND_STATUS_READY)
break;
}
cond_resched();
}
}
led_trigger_event(nand_led_trigger, LED_OFF);
status = (int)chip->read_byte(mtd);
/* This can happen if in case of timeout or buggy dev_ready */
WARN_ON(!(status & NAND_STATUS_READY));
return status;
}
/**
* __nand_unlock - [REPLACEABLE] unlocks specified locked blocks
* @mtd: mtd info
* @ofs: offset to start unlock from
* @len: length to unlock
* @invert: when = 0, unlock the range of blocks within the lower and
* upper boundary address
* when = 1, unlock the range of blocks outside the boundaries
* of the lower and upper boundary address
*
* Returs unlock status.
*/
static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
uint64_t len, int invert)
{
int ret = 0;
int status, page;
struct nand_chip *chip = mtd->priv;
/* Submit address of first page to unlock */
page = ofs >> chip->page_shift;
chip->cmdfunc(mtd, NAND_CMD_UNLOCK1, -1, page & chip->pagemask);
/* Submit address of last page to unlock */
page = (ofs + len) >> chip->page_shift;
chip->cmdfunc(mtd, NAND_CMD_UNLOCK2, -1,
(page | invert) & chip->pagemask);
/* Call wait ready function */
status = chip->waitfunc(mtd, chip);
/* See if device thinks it succeeded */
if (status & NAND_STATUS_FAIL) {
pr_debug("%s: error status = 0x%08x\n",
__func__, status);
ret = -EIO;
}
return ret;
}
/**
* nand_unlock - [REPLACEABLE] unlocks specified locked blocks
* @mtd: mtd info
* @ofs: offset to start unlock from
* @len: length to unlock
*
* Returns unlock status.
*/
int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
int ret = 0;
int chipnr;
struct nand_chip *chip = mtd->priv;
pr_debug("%s: start = 0x%012llx, len = %llu\n",
__func__, (unsigned long long)ofs, len);
if (check_offs_len(mtd, ofs, len))
ret = -EINVAL;
/* Align to last block address if size addresses end of the device */
if (ofs + len == mtd->size)
len -= mtd->erasesize;
nand_get_device(mtd, FL_UNLOCKING);
/* Shift to get chip number */
chipnr = ofs >> chip->chip_shift;
chip->select_chip(mtd, chipnr);
/* Check, if it is write protected */
if (nand_check_wp(mtd)) {
pr_debug("%s: device is write protected!\n",
__func__);
ret = -EIO;
goto out;
}
ret = __nand_unlock(mtd, ofs, len, 0);
out:
chip->select_chip(mtd, -1);
nand_release_device(mtd);
return ret;
}
EXPORT_SYMBOL(nand_unlock);
/**
* nand_lock - [REPLACEABLE] locks all blocks present in the device
* @mtd: mtd info
* @ofs: offset to start unlock from
* @len: length to unlock
*
* This feature is not supported in many NAND parts. 'Micron' NAND parts do
* have this feature, but it allows only to lock all blocks, not for specified
* range for block. Implementing 'lock' feature by making use of 'unlock', for
* now.
*
* Returns lock status.
*/
int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
int ret = 0;
int chipnr, status, page;
struct nand_chip *chip = mtd->priv;
pr_debug("%s: start = 0x%012llx, len = %llu\n",
__func__, (unsigned long long)ofs, len);
if (check_offs_len(mtd, ofs, len))
ret = -EINVAL;
nand_get_device(mtd, FL_LOCKING);
/* Shift to get chip number */
chipnr = ofs >> chip->chip_shift;
chip->select_chip(mtd, chipnr);
/* Check, if it is write protected */
if (nand_check_wp(mtd)) {
pr_debug("%s: device is write protected!\n",
__func__);
status = MTD_ERASE_FAILED;
ret = -EIO;
goto out;
}
/* Submit address of first page to lock */
page = ofs >> chip->page_shift;
chip->cmdfunc(mtd, NAND_CMD_LOCK, -1, page & chip->pagemask);
/* Call wait ready function */
status = chip->waitfunc(mtd, chip);
/* See if device thinks it succeeded */
if (status & NAND_STATUS_FAIL) {
pr_debug("%s: error status = 0x%08x\n",
__func__, status);
ret = -EIO;
goto out;
}
ret = __nand_unlock(mtd, ofs, len, 0x1);
out:
chip->select_chip(mtd, -1);
nand_release_device(mtd);
return ret;
}
EXPORT_SYMBOL(nand_lock);
/**
* nand_read_page_raw - [INTERN] read raw page data without ecc
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: buffer to store read data
* @oob_required: caller requires OOB data read to chip->oob_poi
* @page: page number to read
*
* Not for syndrome calculating ECC controllers, which use a special oob layout.
*/
static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page)
{
chip->read_buf(mtd, buf, mtd->writesize);
if (oob_required)
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
return 0;
}
/**
* nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: buffer to store read data
* @oob_required: caller requires OOB data read to chip->oob_poi
* @page: page number to read
*
* We need a special oob layout and handling even when OOB isn't used.
*/
static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
int eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
uint8_t *oob = chip->oob_poi;
int steps, size;
for (steps = chip->ecc.steps; steps > 0; steps--) {
chip->read_buf(mtd, buf, eccsize);
buf += eccsize;
if (chip->ecc.prepad) {
chip->read_buf(mtd, oob, chip->ecc.prepad);
oob += chip->ecc.prepad;
}
chip->read_buf(mtd, oob, eccbytes);
oob += eccbytes;
if (chip->ecc.postpad) {
chip->read_buf(mtd, oob, chip->ecc.postpad);
oob += chip->ecc.postpad;
}
}
size = mtd->oobsize - (oob - chip->oob_poi);
if (size)
chip->read_buf(mtd, oob, size);
return 0;
}
/**
* nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: buffer to store read data
* @oob_required: caller requires OOB data read to chip->oob_poi
* @page: page number to read
*/
static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page)
{
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *p = buf;
uint8_t *ecc_calc = chip->buffers->ecccalc;
uint8_t *ecc_code = chip->buffers->ecccode;
uint32_t *eccpos = chip->ecc.layout->eccpos;
unsigned int max_bitflips = 0;
chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
chip->ecc.calculate(mtd, p, &ecc_calc[i]);
for (i = 0; i < chip->ecc.total; i++)
ecc_code[i] = chip->oob_poi[eccpos[i]];
eccsteps = chip->ecc.steps;
p = buf;
for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
int stat;
stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
if (stat < 0) {
mtd->ecc_stats.failed++;
} else {
mtd->ecc_stats.corrected += stat;
max_bitflips = max_t(unsigned int, max_bitflips, stat);
}
}
return max_bitflips;
}
/**
* nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
* @mtd: mtd info structure
* @chip: nand chip info structure
* @data_offs: offset of requested data within the page
* @readlen: data length
* @bufpoi: buffer to store read data
*/
static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi)
{
int start_step, end_step, num_steps;
uint32_t *eccpos = chip->ecc.layout->eccpos;
uint8_t *p;
int data_col_addr, i, gaps = 0;
int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
int index = 0;
unsigned int max_bitflips = 0;
/* Column address within the page aligned to ECC size (256bytes) */
start_step = data_offs / chip->ecc.size;
end_step = (data_offs + readlen - 1) / chip->ecc.size;
num_steps = end_step - start_step + 1;
/* Data size aligned to ECC ecc.size */
datafrag_len = num_steps * chip->ecc.size;
eccfrag_len = num_steps * chip->ecc.bytes;
data_col_addr = start_step * chip->ecc.size;
/* If we read not a page aligned data */
if (data_col_addr != 0)
chip->cmdfunc(mtd, NAND_CMD_RNDOUT, data_col_addr, -1);
p = bufpoi + data_col_addr;
chip->read_buf(mtd, p, datafrag_len);
/* Calculate ECC */
for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]);
/*
* The performance is faster if we position offsets according to
* ecc.pos. Let's make sure that there are no gaps in ECC positions.
*/
for (i = 0; i < eccfrag_len - 1; i++) {
if (eccpos[i + start_step * chip->ecc.bytes] + 1 !=
eccpos[i + start_step * chip->ecc.bytes + 1]) {
gaps = 1;
break;
}
}
if (gaps) {
chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
} else {
/*
* Send the command to read the particular ECC bytes take care
* about buswidth alignment in read_buf.
*/
index = start_step * chip->ecc.bytes;
aligned_pos = eccpos[index] & ~(busw - 1);
aligned_len = eccfrag_len;
if (eccpos[index] & (busw - 1))
aligned_len++;
if (eccpos[index + (num_steps * chip->ecc.bytes)] & (busw - 1))
aligned_len++;
chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
mtd->writesize + aligned_pos, -1);
chip->read_buf(mtd, &chip->oob_poi[aligned_pos], aligned_len);
}
for (i = 0; i < eccfrag_len; i++)
chip->buffers->ecccode[i] = chip->oob_poi[eccpos[i + index]];
p = bufpoi + data_col_addr;
for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
int stat;
stat = chip->ecc.correct(mtd, p,
&chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
if (stat < 0) {
mtd->ecc_stats.failed++;
} else {
mtd->ecc_stats.corrected += stat;
max_bitflips = max_t(unsigned int, max_bitflips, stat);
}
}
return max_bitflips;
}
/**
* nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: buffer to store read data
* @oob_required: caller requires OOB data read to chip->oob_poi
* @page: page number to read
*
* Not for syndrome calculating ECC controllers which need a special oob layout.
*/
static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page)
{
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *p = buf;
uint8_t *ecc_calc = chip->buffers->ecccalc;
uint8_t *ecc_code = chip->buffers->ecccode;
uint32_t *eccpos = chip->ecc.layout->eccpos;
unsigned int max_bitflips = 0;
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
chip->ecc.hwctl(mtd, NAND_ECC_READ);
chip->read_buf(mtd, p, eccsize);
chip->ecc.calculate(mtd, p, &ecc_calc[i]);
}
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
for (i = 0; i < chip->ecc.total; i++)
ecc_code[i] = chip->oob_poi[eccpos[i]];
eccsteps = chip->ecc.steps;
p = buf;
for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
int stat;
stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
if (stat < 0) {
mtd->ecc_stats.failed++;
} else {
mtd->ecc_stats.corrected += stat;
max_bitflips = max_t(unsigned int, max_bitflips, stat);
}
}
return max_bitflips;
}
/**
* nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: buffer to store read data
* @oob_required: caller requires OOB data read to chip->oob_poi
* @page: page number to read
*
* Hardware ECC for large page chips, require OOB to be read first. For this
* ECC mode, the write_page method is re-used from ECC_HW. These methods
* read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
* multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
* the data area, by overwriting the NAND manufacturer bad block markings.
*/
static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
{
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *p = buf;
uint8_t *ecc_code = chip->buffers->ecccode;
uint32_t *eccpos = chip->ecc.layout->eccpos;
uint8_t *ecc_calc = chip->buffers->ecccalc;
unsigned int max_bitflips = 0;
/* Read the OOB area first */
chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
for (i = 0; i < chip->ecc.total; i++)
ecc_code[i] = chip->oob_poi[eccpos[i]];
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
int stat;
chip->ecc.hwctl(mtd, NAND_ECC_READ);
chip->read_buf(mtd, p, eccsize);
chip->ecc.calculate(mtd, p, &ecc_calc[i]);
stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
if (stat < 0) {
mtd->ecc_stats.failed++;
} else {
mtd->ecc_stats.corrected += stat;
max_bitflips = max_t(unsigned int, max_bitflips, stat);
}
}
return max_bitflips;
}
/**
* nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: buffer to store read data
* @oob_required: caller requires OOB data read to chip->oob_poi
* @page: page number to read
*
* The hw generator calculates the error syndrome automatically. Therefore we
* need a special oob layout and handling.
*/
static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page)
{
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *p = buf;
uint8_t *oob = chip->oob_poi;
unsigned int max_bitflips = 0;
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
int stat;
chip->ecc.hwctl(mtd, NAND_ECC_READ);
chip->read_buf(mtd, p, eccsize);
if (chip->ecc.prepad) {
chip->read_buf(mtd, oob, chip->ecc.prepad);
oob += chip->ecc.prepad;
}
chip->ecc.hwctl(mtd, NAND_ECC_READSYN);
chip->read_buf(mtd, oob, eccbytes);
stat = chip->ecc.correct(mtd, p, oob, NULL);
if (stat < 0) {
mtd->ecc_stats.failed++;
} else {
mtd->ecc_stats.corrected += stat;
max_bitflips = max_t(unsigned int, max_bitflips, stat);
}
oob += eccbytes;
if (chip->ecc.postpad) {
chip->read_buf(mtd, oob, chip->ecc.postpad);
oob += chip->ecc.postpad;
}
}
/* Calculate remaining oob bytes */
i = mtd->oobsize - (oob - chip->oob_poi);
if (i)
chip->read_buf(mtd, oob, i);
return max_bitflips;
}
/**
* nand_transfer_oob - [INTERN] Transfer oob to client buffer
* @chip: nand chip structure
* @oob: oob destination address
* @ops: oob ops structure
* @len: size of oob to transfer
*/
static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
struct mtd_oob_ops *ops, size_t len)
{
switch (ops->mode) {
case MTD_OPS_PLACE_OOB:
case MTD_OPS_RAW:
memcpy(oob, chip->oob_poi + ops->ooboffs, len);
return oob + len;
case MTD_OPS_AUTO_OOB: {
struct nand_oobfree *free = chip->ecc.layout->oobfree;
uint32_t boffs = 0, roffs = ops->ooboffs;
size_t bytes = 0;
for (; free->length && len; free++, len -= bytes) {
/* Read request not from offset 0? */
if (unlikely(roffs)) {
if (roffs >= free->length) {
roffs -= free->length;
continue;
}
boffs = free->offset + roffs;
bytes = min_t(size_t, len,
(free->length - roffs));
roffs = 0;
} else {
bytes = min_t(size_t, len, free->length);
boffs = free->offset;
}
memcpy(oob, chip->oob_poi + boffs, bytes);
oob += bytes;
}
return oob;
}
default:
BUG();
}
return NULL;
}
/**
* nand_do_read_ops - [INTERN] Read data with ECC
* @mtd: MTD device structure
* @from: offset to read from
* @ops: oob ops structure
*
* Internal function. Called with chip held.
*/
static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
int chipnr, page, realpage, col, bytes, aligned, oob_required;
struct nand_chip *chip = mtd->priv;
struct mtd_ecc_stats stats;
int ret = 0;
uint32_t readlen = ops->len;
uint32_t oobreadlen = ops->ooblen;
uint32_t max_oobsize = ops->mode == MTD_OPS_AUTO_OOB ?
mtd->oobavail : mtd->oobsize;
uint8_t *bufpoi, *oob, *buf;
unsigned int max_bitflips = 0;
stats = mtd->ecc_stats;
chipnr = (int)(from >> chip->chip_shift);
chip->select_chip(mtd, chipnr);
realpage = (int)(from >> chip->page_shift);
page = realpage & chip->pagemask;
col = (int)(from & (mtd->writesize - 1));
buf = ops->datbuf;
oob = ops->oobbuf;
oob_required = oob ? 1 : 0;
while (1) {
bytes = min(mtd->writesize - col, readlen);
aligned = (bytes == mtd->writesize);
/* Is the current page in the buffer? */
if (realpage != chip->pagebuf || oob) {
bufpoi = aligned ? buf : chip->buffers->databuf;
chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
/*
* Now read the page into the buffer. Absent an error,
* the read methods return max bitflips per ecc step.
*/
if (unlikely(ops->mode == MTD_OPS_RAW))
ret = chip->ecc.read_page_raw(mtd, chip, bufpoi,
oob_required,
page);
else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
!oob)
ret = chip->ecc.read_subpage(mtd, chip,
col, bytes, bufpoi);
else
ret = chip->ecc.read_page(mtd, chip, bufpoi,
oob_required, page);
if (ret < 0) {
if (!aligned)
/* Invalidate page cache */
chip->pagebuf = -1;
break;
}
max_bitflips = max_t(unsigned int, max_bitflips, ret);
/* Transfer not aligned data */
if (!aligned) {
if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
!(mtd->ecc_stats.failed - stats.failed) &&
(ops->mode != MTD_OPS_RAW)) {
chip->pagebuf = realpage;
chip->pagebuf_bitflips = ret;
} else {
/* Invalidate page cache */
chip->pagebuf = -1;
}
memcpy(buf, chip->buffers->databuf + col, bytes);
}
buf += bytes;
if (unlikely(oob)) {
int toread = min(oobreadlen, max_oobsize);
if (toread) {
oob = nand_transfer_oob(chip,
oob, ops, toread);
oobreadlen -= toread;
}
}
if (chip->options & NAND_NEED_READRDY) {
/* Apply delay or wait for ready/busy pin */
if (!chip->dev_ready)
udelay(chip->chip_delay);
else
nand_wait_ready(mtd);
}
} else {
memcpy(buf, chip->buffers->databuf + col, bytes);
buf += bytes;
max_bitflips = max_t(unsigned int, max_bitflips,
chip->pagebuf_bitflips);
}
readlen -= bytes;
if (!readlen)
break;
/* For subsequent reads align to page boundary */
col = 0;
/* Increment page address */
//realpage++;
realpage += (mtd->writesize >> chip->page_shift);
page = realpage & chip->pagemask;
/* Check, if we cross a chip boundary */
if (!page) {
chipnr++;
chip->select_chip(mtd, -1);
chip->select_chip(mtd, chipnr);
}
}
chip->select_chip(mtd, -1);
ops->retlen = ops->len - (size_t) readlen;
if (oob)
ops->oobretlen = ops->ooblen - oobreadlen;
if (ret < 0)
return ret;
if (mtd->ecc_stats.failed - stats.failed)
return -EBADMSG;
return max_bitflips;
}
/**
* nand_read - [MTD Interface] MTD compatibility function for nand_do_read_ecc
* @mtd: MTD device structure
* @from: offset to read from
* @len: number of bytes to read
* @retlen: pointer to variable to store the number of read bytes
* @buf: the databuffer to put data
*
* Get hold of the chip and call nand_do_read.
*/
static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, uint8_t *buf)
{
struct mtd_oob_ops ops;
int ret;
nand_get_device(mtd, FL_READING);
ops.len = len;
ops.datbuf = buf;
ops.oobbuf = NULL;
ops.mode = MTD_OPS_PLACE_OOB;
ret = nand_do_read_ops(mtd, from, &ops);
*retlen = ops.retlen;
nand_release_device(mtd);
return ret;
}
/**
* nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
* @mtd: mtd info structure
* @chip: nand chip info structure
* @page: page number to read
*/
static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
return 0;
}
/**
* nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
* with syndromes
* @mtd: mtd info structure
* @chip: nand chip info structure
* @page: page number to read
*/
static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
uint8_t *buf = chip->oob_poi;
int length = mtd->oobsize;
int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
int eccsize = chip->ecc.size;
uint8_t *bufpoi = buf;
int i, toread, sndrnd = 0, pos;
chip->cmdfunc(mtd, NAND_CMD_READ0, chip->ecc.size, page);
for (i = 0; i < chip->ecc.steps; i++) {
if (sndrnd) {
pos = eccsize + i * (eccsize + chunk);
if (mtd->writesize > 512)
chip->cmdfunc(mtd, NAND_CMD_RNDOUT, pos, -1);
else
chip->cmdfunc(mtd, NAND_CMD_READ0, pos, page);
} else
sndrnd = 1;
toread = min_t(int, length, chunk);
chip->read_buf(mtd, bufpoi, toread);
bufpoi += toread;
length -= toread;
}
if (length > 0)
chip->read_buf(mtd, bufpoi, length);
return 0;
}
/**
* nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
* @mtd: mtd info structure
* @chip: nand chip info structure
* @page: page number to write
*/
static int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
int status = 0;
const uint8_t *buf = chip->oob_poi;
int length = mtd->oobsize;
chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
chip->write_buf(mtd, buf, length);
/* Send command to program the OOB data */
chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
status = chip->waitfunc(mtd, chip);
return status & NAND_STATUS_FAIL ? -EIO : 0;
}
/**
* nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
* with syndrome - only for large page flash
* @mtd: mtd info structure
* @chip: nand chip info structure
* @page: page number to write
*/
static int nand_write_oob_syndrome(struct mtd_info *mtd,
struct nand_chip *chip, int page)
{
int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
int eccsize = chip->ecc.size, length = mtd->oobsize;
int i, len, pos, status = 0, sndcmd = 0, steps = chip->ecc.steps;
const uint8_t *bufpoi = chip->oob_poi;
/*
* data-ecc-data-ecc ... ecc-oob
* or
* data-pad-ecc-pad-data-pad .... ecc-pad-oob
*/
if (!chip->ecc.prepad && !chip->ecc.postpad) {
pos = steps * (eccsize + chunk);
steps = 0;
} else
pos = eccsize;
chip->cmdfunc(mtd, NAND_CMD_SEQIN, pos, page);
for (i = 0; i < steps; i++) {
if (sndcmd) {
if (mtd->writesize <= 512) {
uint32_t fill = 0xFFFFFFFF;
len = eccsize;
while (len > 0) {
int num = min_t(int, len, 4);
chip->write_buf(mtd, (uint8_t *)&fill,
num);
len -= num;
}
} else {
pos = eccsize + i * (eccsize + chunk);
chip->cmdfunc(mtd, NAND_CMD_RNDIN, pos, -1);
}
} else
sndcmd = 1;
len = min_t(int, length, chunk);
chip->write_buf(mtd, bufpoi, len);
bufpoi += len;
length -= len;
}
if (length > 0)
chip->write_buf(mtd, bufpoi, length);
chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
status = chip->waitfunc(mtd, chip);
return status & NAND_STATUS_FAIL ? -EIO : 0;
}
/**
* nand_do_read_oob - [INTERN] NAND read out-of-band
* @mtd: MTD device structure
* @from: offset to read from
* @ops: oob operations description structure
*
* NAND read out-of-band data from the spare area.
*/
static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
int page, realpage, chipnr;
struct nand_chip *chip = mtd->priv;
struct mtd_ecc_stats stats;
int readlen = ops->ooblen;
int len;
uint8_t *buf = ops->oobbuf;
int ret = 0;
pr_debug("%s: from = 0x%08Lx, len = %i\n",
__func__, (unsigned long long)from, readlen);
stats = mtd->ecc_stats;
if (ops->mode == MTD_OPS_AUTO_OOB)
len = chip->ecc.layout->oobavail;
else
len = mtd->oobsize;
if (unlikely(ops->ooboffs >= len)) {
pr_debug("%s: attempt to start read outside oob\n",
__func__);
return -EINVAL;
}
/* Do not allow reads past end of device */
if (unlikely(from >= mtd->size ||
ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
(from >> chip->page_shift)) * len)) {
pr_debug("%s: attempt to read beyond end of device\n",
__func__);
return -EINVAL;
}
chipnr = (int)(from >> chip->chip_shift);
chip->select_chip(mtd, chipnr);
/* Shift to get page */
realpage = (int)(from >> chip->page_shift);
page = realpage & chip->pagemask;
while (1) {
if (ops->mode == MTD_OPS_RAW)
ret = chip->ecc.read_oob_raw(mtd, chip, page);
else
ret = chip->ecc.read_oob(mtd, chip, page);
if (ret < 0)
break;
len = min(len, readlen);
buf = nand_transfer_oob(chip, buf, ops, len);
if (chip->options & NAND_NEED_READRDY) {
/* Apply delay or wait for ready/busy pin */
if (!chip->dev_ready)
udelay(chip->chip_delay);
else
nand_wait_ready(mtd);
}
readlen -= len;
if (!readlen)
break;
/* Increment page address */
realpage++;
page = realpage & chip->pagemask;
/* Check, if we cross a chip boundary */
if (!page) {
chipnr++;
chip->select_chip(mtd, -1);
chip->select_chip(mtd, chipnr);
}
}
chip->select_chip(mtd, -1);
ops->oobretlen = ops->ooblen - readlen;
if (ret < 0)
return ret;
if (mtd->ecc_stats.failed - stats.failed)
return -EBADMSG;
return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
}
/**
* nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
* @mtd: MTD device structure
* @from: offset to read from
* @ops: oob operation description structure
*
* NAND read data and/or out-of-band data.
*/
static int nand_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
int ret = -ENOTSUPP;
ops->retlen = 0;
/* Do not allow reads past end of device */
if (ops->datbuf && (from + ops->len) > mtd->size) {
pr_debug("%s: attempt to read beyond end of device\n",
__func__);
return -EINVAL;
}
nand_get_device(mtd, FL_READING);
switch (ops->mode) {
case MTD_OPS_PLACE_OOB:
case MTD_OPS_AUTO_OOB:
case MTD_OPS_RAW:
break;
default:
goto out;
}
if (!ops->datbuf)
ret = nand_do_read_oob(mtd, from, ops);
else
ret = nand_do_read_ops(mtd, from, ops);
out:
nand_release_device(mtd);
return ret;
}
/**
* nand_write_page_raw - [INTERN] raw page write function
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: data buffer
* @oob_required: must write chip->oob_poi to OOB
*
* Not for syndrome calculating ECC controllers, which use a special oob layout.
*/
static int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required)
{
chip->write_buf(mtd, buf, mtd->writesize);
if (oob_required)
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
return 0;
}
/**
* nand_write_page_raw_syndrome - [INTERN] raw page write function
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: data buffer
* @oob_required: must write chip->oob_poi to OOB
*
* We need a special oob layout and handling even when ECC isn't checked.
*/
static int nand_write_page_raw_syndrome(struct mtd_info *mtd,
struct nand_chip *chip,
const uint8_t *buf, int oob_required)
{
int eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
uint8_t *oob = chip->oob_poi;
int steps, size;
for (steps = chip->ecc.steps; steps > 0; steps--) {
chip->write_buf(mtd, buf, eccsize);
buf += eccsize;
if (chip->ecc.prepad) {
chip->write_buf(mtd, oob, chip->ecc.prepad);
oob += chip->ecc.prepad;
}
chip->read_buf(mtd, oob, eccbytes);
oob += eccbytes;
if (chip->ecc.postpad) {
chip->write_buf(mtd, oob, chip->ecc.postpad);
oob += chip->ecc.postpad;
}
}
size = mtd->oobsize - (oob - chip->oob_poi);
if (size)
chip->write_buf(mtd, oob, size);
return 0;
}
/**
* nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: data buffer
* @oob_required: must write chip->oob_poi to OOB
*/
static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required)
{
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *ecc_calc = chip->buffers->ecccalc;
const uint8_t *p = buf;
uint32_t *eccpos = chip->ecc.layout->eccpos;
/* Software ECC calculation */
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
chip->ecc.calculate(mtd, p, &ecc_calc[i]);
for (i = 0; i < chip->ecc.total; i++)
chip->oob_poi[eccpos[i]] = ecc_calc[i];
return chip->ecc.write_page_raw(mtd, chip, buf, 1);
}
/**
* nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: data buffer
* @oob_required: must write chip->oob_poi to OOB
*/
static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required)
{
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *ecc_calc = chip->buffers->ecccalc;
const uint8_t *p = buf;
uint32_t *eccpos = chip->ecc.layout->eccpos;
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
chip->write_buf(mtd, p, eccsize);
chip->ecc.calculate(mtd, p, &ecc_calc[i]);
}
for (i = 0; i < chip->ecc.total; i++)
chip->oob_poi[eccpos[i]] = ecc_calc[i];
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
return 0;
}
/**
* nand_write_subpage_hwecc - [REPLACABLE] hardware ECC based subpage write
* @mtd: mtd info structure
* @chip: nand chip info structure
* @column: column address of subpage within the page
* @data_len: data length
* @oob_required: must write chip->oob_poi to OOB
*/
static int nand_write_subpage_hwecc(struct mtd_info *mtd,
struct nand_chip *chip, uint32_t offset,
uint32_t data_len, const uint8_t *data_buf,
int oob_required)
{
uint8_t *oob_buf = chip->oob_poi;
uint8_t *ecc_calc = chip->buffers->ecccalc;
int ecc_size = chip->ecc.size;
int ecc_bytes = chip->ecc.bytes;
int ecc_steps = chip->ecc.steps;
uint32_t *eccpos = chip->ecc.layout->eccpos;
uint32_t start_step = offset / ecc_size;
uint32_t end_step = (offset + data_len - 1) / ecc_size;
int oob_bytes = mtd->oobsize / ecc_steps;
int step, i;
for (step = 0; step < ecc_steps; step++) {
/* configure controller for WRITE access */
chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
/* write data (untouched subpages already masked by 0xFF) */
chip->write_buf(mtd, data_buf, ecc_size);
/* mask ECC of un-touched subpages by padding 0xFF */
if ((step < start_step) || (step > end_step))
memset(ecc_calc, 0xff, ecc_bytes);
else
chip->ecc.calculate(mtd, data_buf, ecc_calc);
/* mask OOB of un-touched subpages by padding 0xFF */
/* if oob_required, preserve OOB metadata of written subpage */
if (!oob_required || (step < start_step) || (step > end_step))
memset(oob_buf, 0xff, oob_bytes);
data_buf += ecc_size;
ecc_calc += ecc_bytes;
oob_buf += oob_bytes;
}
/* copy calculated ECC for whole page to chip->buffer->oob */
/* this include masked-value(0xFF) for unwritten subpages */
ecc_calc = chip->buffers->ecccalc;
for (i = 0; i < chip->ecc.total; i++)
chip->oob_poi[eccpos[i]] = ecc_calc[i];
/* write OOB buffer to NAND device */
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
return 0;
}
/**
* nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: data buffer
* @oob_required: must write chip->oob_poi to OOB
*
* The hw generator calculates the error syndrome automatically. Therefore we
* need a special oob layout and handling.
*/
static int nand_write_page_syndrome(struct mtd_info *mtd,
struct nand_chip *chip,
const uint8_t *buf, int oob_required)
{
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
const uint8_t *p = buf;
uint8_t *oob = chip->oob_poi;
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
chip->write_buf(mtd, p, eccsize);
if (chip->ecc.prepad) {
chip->write_buf(mtd, oob, chip->ecc.prepad);
oob += chip->ecc.prepad;
}
chip->ecc.calculate(mtd, p, oob);
chip->write_buf(mtd, oob, eccbytes);
oob += eccbytes;
if (chip->ecc.postpad) {
chip->write_buf(mtd, oob, chip->ecc.postpad);
oob += chip->ecc.postpad;
}
}
/* Calculate remaining oob bytes */
i = mtd->oobsize - (oob - chip->oob_poi);
if (i)
chip->write_buf(mtd, oob, i);
return 0;
}
/**
* nand_write_page - [REPLACEABLE] write one page
* @mtd: MTD device structure
* @chip: NAND chip descriptor
* @offset: address offset within the page
* @data_len: length of actual data to be written
* @buf: the data to write
* @oob_required: must write chip->oob_poi to OOB
* @page: page number to write
* @cached: cached programming
* @raw: use _raw version of write_page
*/
static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
uint32_t offset, int data_len, const uint8_t *buf,
int oob_required, int page, int cached, int raw)
{
int status, subpage;
if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
chip->ecc.write_subpage)
subpage = offset || (data_len < mtd->writesize);
else
subpage = 0;
chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
if (unlikely(raw))
status = chip->ecc.write_page_raw(mtd, chip, buf,
oob_required);
else if (subpage)
status = chip->ecc.write_subpage(mtd, chip, offset, data_len,
buf, oob_required);
else
status = chip->ecc.write_page(mtd, chip, buf, oob_required);
if (status < 0)
return status;
/*
* Cached progamming disabled for now. Not sure if it's worth the
* trouble. The speed gain is not very impressive. (2.3->2.6Mib/s).
*/
cached = 0;
if (!cached || !NAND_HAS_CACHEPROG(chip)) {
chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
status = chip->waitfunc(mtd, chip);
/*
* See if operation failed and additional status checks are
* available.
*/
if ((status & NAND_STATUS_FAIL) && (chip->errstat))
status = chip->errstat(mtd, chip, FL_WRITING, status,
page);
if (status & NAND_STATUS_FAIL)
return -EIO;
} else {
chip->cmdfunc(mtd, NAND_CMD_CACHEDPROG, -1, -1);
status = chip->waitfunc(mtd, chip);
}
return 0;
}
/**
* nand_fill_oob - [INTERN] Transfer client buffer to oob
* @mtd: MTD device structure
* @oob: oob data buffer
* @len: oob data write length
* @ops: oob ops structure
*/
static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
struct mtd_oob_ops *ops)
{
struct nand_chip *chip = mtd->priv;
/*
* Initialise to all 0xFF, to avoid the possibility of left over OOB
* data from a previous OOB read.
*/
memset(chip->oob_poi, 0xff, mtd->oobsize);
switch (ops->mode) {
case MTD_OPS_PLACE_OOB:
case MTD_OPS_RAW:
memcpy(chip->oob_poi + ops->ooboffs, oob, len);
return oob + len;
case MTD_OPS_AUTO_OOB: {
struct nand_oobfree *free = chip->ecc.layout->oobfree;
uint32_t boffs = 0, woffs = ops->ooboffs;
size_t bytes = 0;
for (; free->length && len; free++, len -= bytes) {
/* Write request not from offset 0? */
if (unlikely(woffs)) {
if (woffs >= free->length) {
woffs -= free->length;
continue;
}
boffs = free->offset + woffs;
bytes = min_t(size_t, len,
(free->length - woffs));
woffs = 0;
} else {
bytes = min_t(size_t, len, free->length);
boffs = free->offset;
}
memcpy(chip->oob_poi + boffs, oob, bytes);
oob += bytes;
}
return oob;
}
default:
BUG();
}
return NULL;
}
#define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
/**
* nand_do_write_ops - [INTERN] NAND write with ECC
* @mtd: MTD device structure
* @to: offset to write to
* @ops: oob operations description structure
*
* NAND write with ECC.
*/
static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
{
int chipnr, realpage, page, blockmask, column;
struct nand_chip *chip = mtd->priv;
uint32_t writelen = ops->len;
uint32_t oobwritelen = ops->ooblen;
uint32_t oobmaxlen = ops->mode == MTD_OPS_AUTO_OOB ?
mtd->oobavail : mtd->oobsize;
uint8_t *oob = ops->oobbuf;
uint8_t *buf = ops->datbuf;
int ret;
int oob_required = oob ? 1 : 0;
ops->retlen = 0;
if (!writelen)
return 0;
/* Reject writes, which are not page aligned */
if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
pr_notice("%s: attempt to write non page aligned data\n",
__func__);
return -EINVAL;
}
column = to & (mtd->writesize - 1);
chipnr = (int)(to >> chip->chip_shift);
chip->select_chip(mtd, chipnr);
/* Check, if it is write protected */
if (nand_check_wp(mtd)) {
ret = -EIO;
goto err_out;
}
realpage = (int)(to >> chip->page_shift);
page = realpage & chip->pagemask;
blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
/* Invalidate the page cache, when we write to the cached page */
if (to <= (chip->pagebuf << chip->page_shift) &&
(chip->pagebuf << chip->page_shift) < (to + ops->len))
chip->pagebuf = -1;
/* Don't allow multipage oob writes with offset */
if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
ret = -EINVAL;
goto err_out;
}
while (1) {
int bytes = mtd->writesize;
int cached = writelen > bytes && page != blockmask;
uint8_t *wbuf = buf;
/* Partial page write? */
if (unlikely(column || writelen < (mtd->writesize - 1))) {
cached = 0;
bytes = min_t(int, bytes - column, (int) writelen);
chip->pagebuf = -1;
memset(chip->buffers->databuf, 0xff, mtd->writesize);
memcpy(&chip->buffers->databuf[column], buf, bytes);
wbuf = chip->buffers->databuf;
}
if (unlikely(oob)) {
size_t len = min(oobwritelen, oobmaxlen);
oob = nand_fill_oob(mtd, oob, len, ops);
oobwritelen -= len;
} else {
/* We still need to erase leftover OOB data */
memset(chip->oob_poi, 0xff, mtd->oobsize);
}
ret = chip->write_page(mtd, chip, column, bytes, wbuf,
oob_required, page, cached,
(ops->mode == MTD_OPS_RAW));
if (ret)
break;
writelen -= bytes;
if (!writelen)
break;
column = 0;
buf += bytes;
//realpage++;
realpage += (mtd->writesize >> chip->page_shift);
page = realpage & chip->pagemask;
/* Check, if we cross a chip boundary */
if (!page) {
chipnr++;
chip->select_chip(mtd, -1);
chip->select_chip(mtd, chipnr);
}
}
ops->retlen = ops->len - writelen;
if (unlikely(oob))
ops->oobretlen = ops->ooblen;
err_out:
chip->select_chip(mtd, -1);
return ret;
}
/**
* panic_nand_write - [MTD Interface] NAND write with ECC
* @mtd: MTD device structure
* @to: offset to write to
* @len: number of bytes to write
* @retlen: pointer to variable to store the number of written bytes
* @buf: the data to write
*
* NAND write with ECC. Used when performing writes in interrupt context, this
* may for example be called by mtdoops when writing an oops while in panic.
*/
static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const uint8_t *buf)
{
struct nand_chip *chip = mtd->priv;
struct mtd_oob_ops ops;
int ret;
/* Wait for the device to get ready */
panic_nand_wait(mtd, chip, 400);
/* Grab the device */
panic_nand_get_device(chip, mtd, FL_WRITING);
ops.len = len;
ops.datbuf = (uint8_t *)buf;
ops.oobbuf = NULL;
ops.mode = MTD_OPS_PLACE_OOB;
ret = nand_do_write_ops(mtd, to, &ops);
*retlen = ops.retlen;
return ret;
}
/**
* nand_write - [MTD Interface] NAND write with ECC
* @mtd: MTD device structure
* @to: offset to write to
* @len: number of bytes to write
* @retlen: pointer to variable to store the number of written bytes
* @buf: the data to write
*
* NAND write with ECC.
*/
static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const uint8_t *buf)
{
struct mtd_oob_ops ops;
int ret;
nand_get_device(mtd, FL_WRITING);
ops.len = len;
ops.datbuf = (uint8_t *)buf;
ops.oobbuf = NULL;
ops.mode = MTD_OPS_PLACE_OOB;
ret = nand_do_write_ops(mtd, to, &ops);
*retlen = ops.retlen;
nand_release_device(mtd);
return ret;
}
/**
* nand_do_write_oob - [MTD Interface] NAND write out-of-band
* @mtd: MTD device structure
* @to: offset to write to
* @ops: oob operation description structure
*
* NAND write out-of-band.
*/
static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
{
int chipnr, page, status, len;
struct nand_chip *chip = mtd->priv;
pr_debug("%s: to = 0x%08x, len = %i\n",
__func__, (unsigned int)to, (int)ops->ooblen);
if (ops->mode == MTD_OPS_AUTO_OOB)
len = chip->ecc.layout->oobavail;
else
len = mtd->oobsize;
/* Do not allow write past end of page */
if ((ops->ooboffs + ops->ooblen) > len) {
pr_debug("%s: attempt to write past end of page\n",
__func__);
return -EINVAL;
}
if (unlikely(ops->ooboffs >= len)) {
pr_debug("%s: attempt to start write outside oob\n",
__func__);
return -EINVAL;
}
/* Do not allow write past end of device */
if (unlikely(to >= mtd->size ||
ops->ooboffs + ops->ooblen >
((mtd->size >> chip->page_shift) -
(to >> chip->page_shift)) * len)) {
pr_debug("%s: attempt to write beyond end of device\n",
__func__);
return -EINVAL;
}
chipnr = (int)(to >> chip->chip_shift);
chip->select_chip(mtd, chipnr);
/* Shift to get page */
page = (int)(to >> chip->page_shift);
/*
* Reset the chip. Some chips (like the Toshiba TC5832DC found in one
* of my DiskOnChip 2000 test units) will clear the whole data page too
* if we don't do this. I have no clue why, but I seem to have 'fixed'
* it in the doc2000 driver in August 1999. dwmw2.
*/
chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
/* Check, if it is write protected */
if (nand_check_wp(mtd)) {
chip->select_chip(mtd, -1);
return -EROFS;
}
/* Invalidate the page cache, if we write to the cached page */
if (page == chip->pagebuf)
chip->pagebuf = -1;
nand_fill_oob(mtd, ops->oobbuf, ops->ooblen, ops);
if (ops->mode == MTD_OPS_RAW)
status = chip->ecc.write_oob_raw(mtd, chip, page & chip->pagemask);
else
status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
chip->select_chip(mtd, -1);
if (status)
return status;
ops->oobretlen = ops->ooblen;
return 0;
}
/**
* nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
* @mtd: MTD device structure
* @to: offset to write to
* @ops: oob operation description structure
*/
static int nand_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
{
int ret = -ENOTSUPP;
ops->retlen = 0;
/* Do not allow writes past end of device */
if (ops->datbuf && (to + ops->len) > mtd->size) {
pr_debug("%s: attempt to write beyond end of device\n",
__func__);
return -EINVAL;
}
nand_get_device(mtd, FL_WRITING);
switch (ops->mode) {
case MTD_OPS_PLACE_OOB:
case MTD_OPS_AUTO_OOB:
case MTD_OPS_RAW:
break;
default:
goto out;
}
if (!ops->datbuf)
ret = nand_do_write_oob(mtd, to, ops);
else
ret = nand_do_write_ops(mtd, to, ops);
out:
nand_release_device(mtd);
return ret;
}
/**
* single_erase_cmd - [GENERIC] NAND standard block erase command function
* @mtd: MTD device structure
* @page: the page address of the block which will be erased
*
* Standard erase command for NAND chips.
*/
static void single_erase_cmd(struct mtd_info *mtd, int page)
{
struct nand_chip *chip = mtd->priv;
/* Send commands to erase a block */
chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
}
/**
* nand_erase - [MTD Interface] erase block(s)
* @mtd: MTD device structure
* @instr: erase instruction
*
* Erase one ore more blocks.
*/
static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
{
return nand_erase_nand(mtd, instr, 0);
}
/**
* nand_erase_nand - [INTERN] erase block(s)
* @mtd: MTD device structure
* @instr: erase instruction
* @allowbbt: allow erasing the bbt area
*
* Erase one ore more blocks.
*/
int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
int allowbbt)
{
int page, status, pages_per_block, ret, chipnr;
struct nand_chip *chip = mtd->priv;
loff_t len;
pr_debug("%s: start = 0x%012llx, len = %llu\n",
__func__, (unsigned long long)instr->addr,
(unsigned long long)instr->len);
if (check_offs_len(mtd, instr->addr, instr->len))
return -EINVAL;
/* Grab the lock and see if the device is available */
nand_get_device(mtd, FL_ERASING);
/* Shift to get first page */
page = (int)(instr->addr >> chip->page_shift);
chipnr = (int)(instr->addr >> chip->chip_shift);
/* Calculate pages in each block */
pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
/* Select the NAND device */
chip->select_chip(mtd, chipnr);
/* Check, if it is write protected */
if (nand_check_wp(mtd)) {
pr_debug("%s: device is write protected!\n",
__func__);
instr->state = MTD_ERASE_FAILED;
goto erase_exit;
}
/* Loop through the pages */
len = instr->len;
instr->state = MTD_ERASING;
while (len) {
/* Check if we have a bad block, we do not erase bad blocks! */
if (nand_block_checkbad(mtd, ((loff_t) page) <<
chip->page_shift, 0, allowbbt)) {
pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
__func__, page);
instr->state = MTD_ERASE_FAILED;
goto erase_exit;
}
/*
* Invalidate the page cache, if we erase the block which
* contains the current cached page.
*/
if (page <= chip->pagebuf && chip->pagebuf <
(page + pages_per_block))
chip->pagebuf = -1;
chip->erase_cmd(mtd, page & chip->pagemask);
status = chip->waitfunc(mtd, chip);
/*
* See if operation failed and additional status checks are
* available
*/
if ((status & NAND_STATUS_FAIL) && (chip->errstat))
status = chip->errstat(mtd, chip, FL_ERASING,
status, page);
/* See if block erase succeeded */
if (status & NAND_STATUS_FAIL) {
pr_debug("%s: failed erase, page 0x%08x\n",
__func__, page);
instr->state = MTD_ERASE_FAILED;
instr->fail_addr =
((loff_t)page << chip->page_shift);
goto erase_exit;
}
/* Increment page address and decrement length */
len -= (1 << chip->phys_erase_shift);
page += pages_per_block;
/* Check, if we cross a chip boundary */
if (len && !(page & chip->pagemask)) {
chipnr++;
chip->select_chip(mtd, -1);
chip->select_chip(mtd, chipnr);
}
}
instr->state = MTD_ERASE_DONE;
erase_exit:
ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
/* Deselect and wake up anyone waiting on the device */
chip->select_chip(mtd, -1);
nand_release_device(mtd);
/* Do call back function */
if (!ret)
mtd_erase_callback(instr);
/* Return more or less happy */
return ret;
}
/**
* nand_sync - [MTD Interface] sync
* @mtd: MTD device structure
*
* Sync is actually a wait for chip ready function.
*/
static void nand_sync(struct mtd_info *mtd)
{
pr_debug("%s: called\n", __func__);
/* Grab the lock and see if the device is available */
nand_get_device(mtd, FL_SYNCING);
/* Release it and go back */
nand_release_device(mtd);
}
/**
* nand_block_isbad - [MTD Interface] Check if block at offset is bad
* @mtd: MTD device structure
* @offs: offset relative to mtd start
*/
static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
{
return nand_block_checkbad(mtd, offs, 1, 0);
}
/**
* nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
* @mtd: MTD device structure
* @ofs: offset relative to mtd start
*/
static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
struct nand_chip *chip = mtd->priv;
int ret;
ret = nand_block_isbad(mtd, ofs);
if (ret) {
/* If it was bad already, return success and do nothing */
if (ret > 0)
return 0;
return ret;
}
return chip->block_markbad(mtd, ofs);
}
/**
* nand_onfi_set_features- [REPLACEABLE] set features for ONFI nand
* @mtd: MTD device structure
* @chip: nand chip info structure
* @addr: feature address.
* @subfeature_param: the subfeature parameters, a four bytes array.
*/
static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
int addr, uint8_t *subfeature_param)
{
int status;
if (!chip->onfi_version)
return -EINVAL;
chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, addr, -1);
chip->write_buf(mtd, subfeature_param, ONFI_SUBFEATURE_PARAM_LEN);
status = chip->waitfunc(mtd, chip);
if (status & NAND_STATUS_FAIL)
return -EIO;
return 0;
}
/**
* nand_onfi_get_features- [REPLACEABLE] get features for ONFI nand
* @mtd: MTD device structure
* @chip: nand chip info structure
* @addr: feature address.
* @subfeature_param: the subfeature parameters, a four bytes array.
*/
static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
int addr, uint8_t *subfeature_param)
{
if (!chip->onfi_version)
return -EINVAL;
/* clear the sub feature parameters */
memset(subfeature_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, addr, -1);
chip->read_buf(mtd, subfeature_param, ONFI_SUBFEATURE_PARAM_LEN);
return 0;
}
/**
* nand_suspend - [MTD Interface] Suspend the NAND flash
* @mtd: MTD device structure
*/
static int nand_suspend(struct mtd_info *mtd)
{
return nand_get_device(mtd, FL_PM_SUSPENDED);
}
/**
* nand_resume - [MTD Interface] Resume the NAND flash
* @mtd: MTD device structure
*/
static void nand_resume(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
if (chip->state == FL_PM_SUSPENDED)
nand_release_device(mtd);
else
pr_err("%s called for a chip which is not in suspended state\n",
__func__);
}
/* Set default functions */
static void nand_set_defaults(struct nand_chip *chip, int busw)
{
/* check for proper chip_delay setup, set 20us if not */
if (!chip->chip_delay)
chip->chip_delay = 20;
/* check, if a user supplied command function given */
if (chip->cmdfunc == NULL)
chip->cmdfunc = nand_command;
/* check, if a user supplied wait function given */
if (chip->waitfunc == NULL)
chip->waitfunc = nand_wait;
if (!chip->select_chip)
chip->select_chip = nand_select_chip;
/* If called twice, pointers that depend on busw may need to be reset */
if (!chip->read_byte || chip->read_byte == nand_read_byte)
chip->read_byte = busw ? nand_read_byte16 : nand_read_byte;
if (!chip->read_word)
chip->read_word = nand_read_word;
if (!chip->block_bad)
chip->block_bad = nand_block_bad;
if (!chip->block_markbad)
chip->block_markbad = nand_default_block_markbad;
if (!chip->write_buf || chip->write_buf == nand_write_buf)
chip->write_buf = busw ? nand_write_buf16 : nand_write_buf;
if (!chip->read_buf || chip->read_buf == nand_read_buf)
chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
if (!chip->scan_bbt)
chip->scan_bbt = nand_default_bbt;
if (!chip->controller) {
chip->controller = &chip->hwcontrol;
spin_lock_init(&chip->controller->lock);
init_waitqueue_head(&chip->controller->wq);
}
}
/* Sanitize ONFI strings so we can safely print them */
static void sanitize_string(uint8_t *s, size_t len)
{
ssize_t i;
/* Null terminate */
s[len - 1] = 0;
/* Remove non printable chars */
for (i = 0; i < len - 1; i++) {
if (s[i] < ' ' || s[i] > 127)
s[i] = '?';
}
/* Remove trailing spaces */
strim(s);
}
static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
{
int i;
while (len--) {
crc ^= *p++ << 8;
for (i = 0; i < 8; i++)
crc = (crc << 1) ^ ((crc & 0x8000) ? 0x8005 : 0);
}
return crc;
}
/*
* Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
*/
static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
int *busw)
{
struct nand_onfi_params *p = &chip->onfi_params;
int i;
int val;
/* ONFI need to be probed in 8 bits mode, and 16 bits should be selected with NAND_BUSWIDTH_AUTO */
if (chip->options & NAND_BUSWIDTH_16) {
pr_err("Trying ONFI probe in 16 bits mode, aborting !\n");
return 0;
}
/* Try ONFI for unknown chip or LP */
chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
return 0;
chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
for (i = 0; i < 3; i++) {
chip->read_buf(mtd, (uint8_t *)p, sizeof(*p));
if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
le16_to_cpu(p->crc)) {
pr_info("ONFI param page %d valid\n", i);
break;
}
}
if (i == 3)
return 0;
/* Check version */
val = le16_to_cpu(p->revision);
if (val & (1 << 5))
chip->onfi_version = 23;
else if (val & (1 << 4))
chip->onfi_version = 22;
else if (val & (1 << 3))
chip->onfi_version = 21;
else if (val & (1 << 2))
chip->onfi_version = 20;
else if (val & (1 << 1))
chip->onfi_version = 10;
if (!chip->onfi_version) {
pr_info("%s: unsupported ONFI version: %d\n", __func__, val);
return 0;
}
sanitize_string(p->manufacturer, sizeof(p->manufacturer));
sanitize_string(p->model, sizeof(p->model));
if (!mtd->name)
mtd->name = p->model;
mtd->writesize = le32_to_cpu(p->byte_per_page);
/*
* pages_per_block and blocks_per_lun may not be a power-of-2 size
* (don't ask me who thought of this...). MTD assumes that these
* dimensions will be power-of-2, so just truncate the remaining area.
*/
mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
mtd->erasesize *= mtd->writesize;
mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
/* See erasesize comment */
chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
*busw = 0;
if (le16_to_cpu(p->features) & 1)
*busw = NAND_BUSWIDTH_16;
pr_info("ONFI flash detected\n");
return 1;
}
/*
* nand_id_has_period - Check if an ID string has a given wraparound period
* @id_data: the ID string
* @arrlen: the length of the @id_data array
* @period: the period of repitition
*
* Check if an ID string is repeated within a given sequence of bytes at
* specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
* period of 3). This is a helper function for nand_id_len(). Returns non-zero
* if the repetition has a period of @period; otherwise, returns zero.
*/
static int nand_id_has_period(u8 *id_data, int arrlen, int period)
{
int i, j;
for (i = 0; i < period; i++)
for (j = i + period; j < arrlen; j += period)
if (id_data[i] != id_data[j])
return 0;
return 1;
}
/*
* nand_id_len - Get the length of an ID string returned by CMD_READID
* @id_data: the ID string
* @arrlen: the length of the @id_data array
* Returns the length of the ID string, according to known wraparound/trailing
* zero patterns. If no pattern exists, returns the length of the array.
*/
static int nand_id_len(u8 *id_data, int arrlen)
{
int last_nonzero, period;
/* Find last non-zero byte */
for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
if (id_data[last_nonzero])
break;
/* All zeros */
if (last_nonzero < 0)
return 0;
/* Calculate wraparound period */
for (period = 1; period < arrlen; period++)
if (nand_id_has_period(id_data, arrlen, period))
break;
/* There's a repeated pattern */
if (period < arrlen)
return period;
/* There are trailing zeros */
if (last_nonzero < arrlen - 1)
return last_nonzero + 1;
/* No pattern detected */
return arrlen;
}
/*
* Many new NAND share similar device ID codes, which represent the size of the
* chip. The rest of the parameters must be decoded according to generic or
* manufacturer-specific "extended ID" decoding patterns.
*/
static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,
u8 id_data[8], int *busw)
{
int extid, id_len;
/* The 3rd id byte holds MLC / multichip data */
chip->cellinfo = id_data[2];
/* The 4th id byte is the important one */
extid = id_data[3];
id_len = nand_id_len(id_data, 8);
/*
* Field definitions are in the following datasheets:
* Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32)
* New Samsung (6 byte ID): Samsung K9GAG08U0F (p.44)
* Hynix MLC (6 byte ID): Hynix H27UBG8T2B (p.22)
*
* Check for ID length, non-zero 6th byte, cell type, and Hynix/Samsung
* ID to decide what to do.
*/
if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG &&
(chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
id_data[5] != 0x00) {
/* Calc pagesize */
mtd->writesize = 2048 << (extid & 0x03);
extid >>= 2;
/* Calc oobsize */
switch (((extid >> 2) & 0x04) | (extid & 0x03)) {
case 1:
mtd->oobsize = 128;
break;
case 2:
mtd->oobsize = 218;
break;
case 3:
mtd->oobsize = 400;
break;
case 4:
mtd->oobsize = 436;
break;
case 5:
mtd->oobsize = 512;
break;
case 6:
default: /* Other cases are "reserved" (unknown) */
mtd->oobsize = 640;
break;
}
extid >>= 2;
/* Calc blocksize */
mtd->erasesize = (128 * 1024) <<
(((extid >> 1) & 0x04) | (extid & 0x03));
*busw = 0;
} else if (id_len == 6 && id_data[0] == NAND_MFR_HYNIX &&
(chip->cellinfo & NAND_CI_CELLTYPE_MSK)) {
unsigned int tmp;
/* Calc pagesize */
mtd->writesize = 2048 << (extid & 0x03);
extid >>= 2;
/* Calc oobsize */
switch (((extid >> 2) & 0x04) | (extid & 0x03)) {
case 0:
mtd->oobsize = 128;
break;
case 1:
mtd->oobsize = 224;
break;
case 2:
mtd->oobsize = 448;
break;
case 3:
mtd->oobsize = 64;
break;
case 4:
mtd->oobsize = 32;
break;
case 5:
mtd->oobsize = 16;
break;
default:
mtd->oobsize = 640;
break;
}
extid >>= 2;
/* Calc blocksize */
tmp = ((extid >> 1) & 0x04) | (extid & 0x03);
if (tmp < 0x03)
mtd->erasesize = (128 * 1024) << tmp;
else if (tmp == 0x03)
mtd->erasesize = 768 * 1024;
else
mtd->erasesize = (64 * 1024) << tmp;
*busw = 0;
} else {
/* Calc pagesize */
mtd->writesize = 1024 << (extid & 0x03);
extid >>= 2;
/* Calc oobsize */
mtd->oobsize = (8 << (extid & 0x01)) *
(mtd->writesize >> 9);
extid >>= 2;
/* Calc blocksize. Blocksize is multiples of 64KiB */
mtd->erasesize = (64 * 1024) << (extid & 0x03);
extid >>= 2;
/* Get buswidth information */
*busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
}
}
/*
* Old devices have chip data hardcoded in the device ID table. nand_decode_id
* decodes a matching ID table entry and assigns the MTD size parameters for
* the chip.
*/
static void nand_decode_id(struct mtd_info *mtd, struct nand_chip *chip,
struct nand_flash_dev *type, u8 id_data[8],
int *busw)
{
int maf_id = id_data[0];
mtd->erasesize = type->erasesize;
mtd->writesize = type->pagesize;
mtd->oobsize = mtd->writesize / 32;
*busw = type->options & NAND_BUSWIDTH_16;
/*
* Check for Spansion/AMD ID + repeating 5th, 6th byte since
* some Spansion chips have erasesize that conflicts with size
* listed in nand_ids table.
* Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39)
*/
if (maf_id == NAND_MFR_AMD && id_data[4] != 0x00 && id_data[5] == 0x00
&& id_data[6] == 0x00 && id_data[7] == 0x00
&& mtd->writesize == 512) {
mtd->erasesize = 128 * 1024;
mtd->erasesize <<= ((id_data[3] & 0x03) << 1);
}
}
/*
* Set the bad block marker/indicator (BBM/BBI) patterns according to some
* heuristic patterns using various detected parameters (e.g., manufacturer,
* page size, cell-type information).
*/
static void nand_decode_bbm_options(struct mtd_info *mtd,
struct nand_chip *chip, u8 id_data[8])
{
int maf_id = id_data[0];
/* Set the bad block position */
if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
else
chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
/*
* Bad block marker is stored in the last page of each block on Samsung
* and Hynix MLC devices; stored in first two pages of each block on
* Micron devices with 2KiB pages and on SLC Samsung, Hynix, Toshiba,
* AMD/Spansion, and Macronix. All others scan only the first page.
*/
if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
(maf_id == NAND_MFR_SAMSUNG ||
maf_id == NAND_MFR_HYNIX))
chip->bbt_options |= NAND_BBT_SCANLASTPAGE;
else if ((!(chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
(maf_id == NAND_MFR_SAMSUNG ||
maf_id == NAND_MFR_HYNIX ||
maf_id == NAND_MFR_TOSHIBA ||
maf_id == NAND_MFR_AMD ||
maf_id == NAND_MFR_MACRONIX)) ||
(mtd->writesize == 2048 &&
maf_id == NAND_MFR_MICRON))
chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
}
static inline bool is_full_id_nand(struct nand_flash_dev *type)
{
return type->id_len;
}
static bool find_full_id_nand(struct mtd_info *mtd, struct nand_chip *chip,
struct nand_flash_dev *type, u8 *id_data, int *busw)
{
if (!strncmp(type->id, id_data, type->id_len)) {
mtd->writesize = type->pagesize;
mtd->erasesize = type->erasesize;
mtd->oobsize = type->oobsize;
chip->cellinfo = id_data[2];
chip->chipsize = (uint64_t)type->chipsize << 20;
chip->options |= type->options;
*busw = type->options & NAND_BUSWIDTH_16;
return true;
}
return false;
}
/*
* Get the flash and manufacturer id and lookup if the type is supported.
*/
static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
struct nand_chip *chip,
int busw,
int *maf_id, int *dev_id,
struct nand_flash_dev *type)
{
int i, maf_idx;
u8 id_data[8];
/* Select the device */
chip->select_chip(mtd, 0);
/*
* Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
* after power-up.
*/
chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
/* Send the command for reading device ID */
chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
/* Read manufacturer and device IDs */
*maf_id = chip->read_byte(mtd);
*dev_id = chip->read_byte(mtd);
/*
* Try again to make sure, as some systems the bus-hold or other
* interface concerns can cause random data which looks like a
* possibly credible NAND flash to appear. If the two results do
* not match, ignore the device completely.
*/
chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
/* Read entire ID string */
for (i = 0; i < 8; i++)
id_data[i] = chip->read_byte(mtd);
if (id_data[0] != *maf_id || id_data[1] != *dev_id) {
pr_info("%s: second ID read did not match "
"%02x,%02x against %02x,%02x\n", __func__,
*maf_id, *dev_id, id_data[0], id_data[1]);
return ERR_PTR(-ENODEV);
}
if (!type)
type = nand_flash_ids;
for (; type->name != NULL; type++) {
if (is_full_id_nand(type)) {
if (find_full_id_nand(mtd, chip, type, id_data, &busw))
goto ident_done;
} else if (*dev_id == type->dev_id) {
break;
}
}
chip->onfi_version = 0;
if (!type->name || !type->pagesize) {
/* Check is chip is ONFI compliant */
if (nand_flash_detect_onfi(mtd, chip, &busw))
goto ident_done;
}
if (!type->name)
return ERR_PTR(-ENODEV);
if (!mtd->name)
mtd->name = type->name;
chip->chipsize = (uint64_t)type->chipsize << 20;
if (!type->pagesize && chip->init_size) {
/* Set the pagesize, oobsize, erasesize by the driver */
busw = chip->init_size(mtd, chip, id_data);
} else if (!type->pagesize) {
/* Decode parameters from extended ID */
nand_decode_ext_id(mtd, chip, id_data, &busw);
} else {
nand_decode_id(mtd, chip, type, id_data, &busw);
}
/* Get chip options */
chip->options |= type->options;
/*
* Check if chip is not a Samsung device. Do not clear the
* options for chips which do not have an extended id.
*/
if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize)
chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
ident_done:
/* Try to identify manufacturer */
for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) {
if (nand_manuf_ids[maf_idx].id == *maf_id)
break;
}
if (chip->options & NAND_BUSWIDTH_AUTO) {
WARN_ON(chip->options & NAND_BUSWIDTH_16);
chip->options |= busw;
nand_set_defaults(chip, busw);
} else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
/*
* Check, if buswidth is correct. Hardware drivers should set
* chip correct!
*/
pr_info("NAND device: Manufacturer ID:"
" 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id,
*dev_id, nand_manuf_ids[maf_idx].name, mtd->name);
pr_warn("NAND bus width %d instead %d bit\n",
(chip->options & NAND_BUSWIDTH_16) ? 16 : 8,
busw ? 16 : 8);
return ERR_PTR(-EINVAL);
}
nand_decode_bbm_options(mtd, chip, id_data);
/* Calculate the address shift from the page size */
chip->page_shift = ffs(mtd->writesize) - 1;
/* Convert chipsize to number of pages per chip -1 */
chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
chip->bbt_erase_shift = chip->phys_erase_shift =
ffs(mtd->erasesize) - 1;
if (chip->chipsize & 0xffffffff)
chip->chip_shift = ffs((unsigned)chip->chipsize) - 1;
else {
chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32));
chip->chip_shift += 32 - 1;
}
chip->badblockbits = 8;
chip->erase_cmd = single_erase_cmd;
/* Do not replace user supplied command function! */
if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
chip->cmdfunc = nand_command_lp;
pr_info("NAND device: Manufacturer ID: 0x%02x, Chip ID: 0x%02x (%s %s),"
" %dMiB, page size: %d, OOB size: %d\n",
*maf_id, *dev_id, nand_manuf_ids[maf_idx].name,
chip->onfi_version ? chip->onfi_params.model : type->name,
(int)(chip->chipsize >> 20), mtd->writesize, mtd->oobsize);
return type;
}
/**
* nand_scan_ident - [NAND Interface] Scan for the NAND device
* @mtd: MTD device structure
* @maxchips: number of chips to scan for
* @table: alternative NAND ID table
*
* This is the first phase of the normal nand_scan() function. It reads the
* flash ID and sets up MTD fields accordingly.
*
* The mtd->owner field must be set to the module of the caller.
*/
int nand_scan_ident(struct mtd_info *mtd, int maxchips,
struct nand_flash_dev *table)
{
int i, busw, nand_maf_id, nand_dev_id;
struct nand_chip *chip = mtd->priv;
struct nand_flash_dev *type;
/* Get buswidth to select the correct functions */
busw = chip->options & NAND_BUSWIDTH_16;
/* Set the default functions */
nand_set_defaults(chip, busw);
/* Read the flash type */
type = nand_get_flash_type(mtd, chip, busw,
&nand_maf_id, &nand_dev_id, table);
if (IS_ERR(type)) {
if (!(chip->options & NAND_SCAN_SILENT_NODEV))
pr_warn("No NAND device found\n");
chip->select_chip(mtd, -1);
return PTR_ERR(type);
}
chip->select_chip(mtd, -1);
/* Check for a chip array */
for (i = 1; i < maxchips; i++) {
chip->select_chip(mtd, i);
/* See comment in nand_get_flash_type for reset */
chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
/* Send the command for reading device ID */
chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
/* Read manufacturer and device IDs */
if (nand_maf_id != chip->read_byte(mtd) ||
nand_dev_id != chip->read_byte(mtd)) {
chip->select_chip(mtd, -1);
break;
}
chip->select_chip(mtd, -1);
}
if (i > 1)
pr_info("%d NAND chips detected\n", i);
/* Store the number of chips and calc total size for mtd */
chip->numchips = i;
mtd->size = i * chip->chipsize;
return 0;
}
EXPORT_SYMBOL(nand_scan_ident);
/**
* nand_scan_tail - [NAND Interface] Scan for the NAND device
* @mtd: MTD device structure
*
* This is the second phase of the normal nand_scan() function. It fills out
* all the uninitialized function pointers with the defaults and scans for a
* bad block table if appropriate.
*/
int nand_scan_tail(struct mtd_info *mtd)
{
int i;
struct nand_chip *chip = mtd->priv;
/* New bad blocks should be marked in OOB, flash-based BBT, or both */
BUG_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
!(chip->bbt_options & NAND_BBT_USE_FLASH));
if (!(chip->options & NAND_OWN_BUFFERS))
chip->buffers = kmalloc(sizeof(*chip->buffers), GFP_KERNEL);
if (!chip->buffers)
return -ENOMEM;
/* Set the internal oob buffer location, just after the page data */
chip->oob_poi = chip->buffers->databuf + mtd->writesize;
/*
* If no default placement scheme is given, select an appropriate one.
*/
if (!chip->ecc.layout && (chip->ecc.mode != NAND_ECC_SOFT_BCH)) {
switch (mtd->oobsize) {
case 8:
chip->ecc.layout = &nand_oob_8;
break;
case 16:
chip->ecc.layout = &nand_oob_16;
break;
case 64:
chip->ecc.layout = &nand_oob_64;
break;
case 128:
chip->ecc.layout = &nand_oob_128;
break;
default:
pr_warn("No oob scheme defined for oobsize %d\n",
mtd->oobsize);
BUG();
}
}
if (!chip->write_page)
chip->write_page = nand_write_page;
/* set for ONFI nand */
if (!chip->onfi_set_features)
chip->onfi_set_features = nand_onfi_set_features;
if (!chip->onfi_get_features)
chip->onfi_get_features = nand_onfi_get_features;
/*
* Check ECC mode, default to software if 3byte/512byte hardware ECC is
* selected and we have 256 byte pagesize fallback to software ECC
*/
switch (chip->ecc.mode) {
case NAND_ECC_HW_OOB_FIRST:
/* Similar to NAND_ECC_HW, but a separate read_page handle */
if (!chip->ecc.calculate || !chip->ecc.correct ||
!chip->ecc.hwctl) {
pr_warn("No ECC functions supplied; "
"hardware ECC not possible\n");
BUG();
}
if (!chip->ecc.read_page)
chip->ecc.read_page = nand_read_page_hwecc_oob_first;
case NAND_ECC_HW:
/* Use standard hwecc read page function? */
if (!chip->ecc.read_page)
chip->ecc.read_page = nand_read_page_hwecc;
if (!chip->ecc.write_page)
chip->ecc.write_page = nand_write_page_hwecc;
if (!chip->ecc.read_page_raw)
chip->ecc.read_page_raw = nand_read_page_raw;
if (!chip->ecc.write_page_raw)
chip->ecc.write_page_raw = nand_write_page_raw;
if (!chip->ecc.read_oob)
chip->ecc.read_oob = nand_read_oob_std;
if (!chip->ecc.write_oob)
chip->ecc.write_oob = nand_write_oob_std;
if (!chip->ecc.read_subpage)
chip->ecc.read_subpage = nand_read_subpage;
if (!chip->ecc.write_subpage)
chip->ecc.write_subpage = nand_write_subpage_hwecc;
case NAND_ECC_HW_SYNDROME:
if ((!chip->ecc.calculate || !chip->ecc.correct ||
!chip->ecc.hwctl) &&
(!chip->ecc.read_page ||
chip->ecc.read_page == nand_read_page_hwecc ||
!chip->ecc.write_page ||
chip->ecc.write_page == nand_write_page_hwecc)) {
pr_warn("No ECC functions supplied; "
"hardware ECC not possible\n");
BUG();
}
/* Use standard syndrome read/write page function? */
if (!chip->ecc.read_page)
chip->ecc.read_page = nand_read_page_syndrome;
if (!chip->ecc.write_page)
chip->ecc.write_page = nand_write_page_syndrome;
if (!chip->ecc.read_page_raw)
chip->ecc.read_page_raw = nand_read_page_raw_syndrome;
if (!chip->ecc.write_page_raw)
chip->ecc.write_page_raw = nand_write_page_raw_syndrome;
if (!chip->ecc.read_oob)
chip->ecc.read_oob = nand_read_oob_syndrome;
if (!chip->ecc.write_oob)
chip->ecc.write_oob = nand_write_oob_syndrome;
if (mtd->writesize >= chip->ecc.size) {
if (!chip->ecc.strength) {
pr_warn("Driver must set ecc.strength when using hardware ECC\n");
BUG();
}
break;
}
pr_warn("%d byte HW ECC not possible on "
"%d byte page size, fallback to SW ECC\n",
chip->ecc.size, mtd->writesize);
chip->ecc.mode = NAND_ECC_SOFT;
case NAND_ECC_SOFT:
chip->ecc.calculate = nand_calculate_ecc;
chip->ecc.correct = nand_correct_data;
chip->ecc.read_page = nand_read_page_swecc;
chip->ecc.read_subpage = nand_read_subpage;
chip->ecc.write_page = nand_write_page_swecc;
chip->ecc.read_page_raw = nand_read_page_raw;
chip->ecc.write_page_raw = nand_write_page_raw;
chip->ecc.read_oob = nand_read_oob_std;
chip->ecc.write_oob = nand_write_oob_std;
if (!chip->ecc.size)
chip->ecc.size = 256;
chip->ecc.bytes = 3;
chip->ecc.strength = 1;
break;
case NAND_ECC_SOFT_BCH:
if (!mtd_nand_has_bch()) {
pr_warn("CONFIG_MTD_ECC_BCH not enabled\n");
BUG();
}
chip->ecc.calculate = nand_bch_calculate_ecc;
chip->ecc.correct = nand_bch_correct_data;
chip->ecc.read_page = nand_read_page_swecc;
chip->ecc.read_subpage = nand_read_subpage;
chip->ecc.write_page = nand_write_page_swecc;
chip->ecc.read_page_raw = nand_read_page_raw;
chip->ecc.write_page_raw = nand_write_page_raw;
chip->ecc.read_oob = nand_read_oob_std;
chip->ecc.write_oob = nand_write_oob_std;
/*
* Board driver should supply ecc.size and ecc.bytes values to
* select how many bits are correctable; see nand_bch_init()
* for details. Otherwise, default to 4 bits for large page
* devices.
*/
if (!chip->ecc.size && (mtd->oobsize >= 64)) {
chip->ecc.size = 512;
chip->ecc.bytes = 7;
}
chip->ecc.priv = nand_bch_init(mtd,
chip->ecc.size,
chip->ecc.bytes,
&chip->ecc.layout);
if (!chip->ecc.priv) {
pr_warn("BCH ECC initialization failed!\n");
BUG();
}
chip->ecc.strength =
chip->ecc.bytes * 8 / fls(8 * chip->ecc.size);
break;
case NAND_ECC_NONE:
pr_warn("NAND_ECC_NONE selected by board driver. "
"This is not recommended!\n");
chip->ecc.read_page = nand_read_page_raw;
chip->ecc.write_page = nand_write_page_raw;
chip->ecc.read_oob = nand_read_oob_std;
chip->ecc.read_page_raw = nand_read_page_raw;
chip->ecc.write_page_raw = nand_write_page_raw;
chip->ecc.write_oob = nand_write_oob_std;
chip->ecc.size = mtd->writesize;
chip->ecc.bytes = 0;
chip->ecc.strength = 0;
break;
default:
pr_warn("Invalid NAND_ECC_MODE %d\n", chip->ecc.mode);
BUG();
}
/* For many systems, the standard OOB write also works for raw */
if (!chip->ecc.read_oob_raw)
chip->ecc.read_oob_raw = chip->ecc.read_oob;
if (!chip->ecc.write_oob_raw)
chip->ecc.write_oob_raw = chip->ecc.write_oob;
/*
* The number of bytes available for a client to place data into
* the out of band area.
*/
chip->ecc.layout->oobavail = 0;
for (i = 0; chip->ecc.layout->oobfree[i].length
&& i < ARRAY_SIZE(chip->ecc.layout->oobfree); i++)
chip->ecc.layout->oobavail +=
chip->ecc.layout->oobfree[i].length;
mtd->oobavail = chip->ecc.layout->oobavail;
/*
* Set the number of read / write steps for one page depending on ECC
* mode.
*/
chip->ecc.steps = mtd->writesize / chip->ecc.size;
if (chip->ecc.steps * chip->ecc.size != mtd->writesize) {
pr_warn("Invalid ECC parameters\n");
BUG();
}
chip->ecc.total = chip->ecc.steps * chip->ecc.bytes;
/* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
!(chip->cellinfo & NAND_CI_CELLTYPE_MSK)) {
switch (chip->ecc.steps) {
case 2:
mtd->subpage_sft = 1;
break;
case 4:
case 8:
case 16:
mtd->subpage_sft = 2;
break;
}
}
chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
/* Initialize state */
chip->state = FL_READY;
/* Invalidate the pagebuffer reference */
chip->pagebuf = -1;
/* Large page NAND with SOFT_ECC should support subpage reads */
if ((chip->ecc.mode == NAND_ECC_SOFT) && (chip->page_shift > 9))
chip->options |= NAND_SUBPAGE_READ;
/* Fill in remaining MTD driver data */
mtd->type = MTD_NANDFLASH;
mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
MTD_CAP_NANDFLASH;
mtd->_erase = nand_erase;
mtd->_point = NULL;
mtd->_unpoint = NULL;
mtd->_read = nand_read;
mtd->_write = nand_write;
mtd->_panic_write = panic_nand_write;
mtd->_read_oob = nand_read_oob;
mtd->_write_oob = nand_write_oob;
mtd->_sync = nand_sync;
mtd->_lock = NULL;
mtd->_unlock = NULL;
mtd->_suspend = nand_suspend;
mtd->_resume = nand_resume;
mtd->_block_isbad = nand_block_isbad;
mtd->_block_markbad = nand_block_markbad;
mtd->writebufsize = mtd->writesize;
/* propagate ecc info to mtd_info */
mtd->ecclayout = chip->ecc.layout;
mtd->ecc_strength = chip->ecc.strength;
/*
* Initialize bitflip_threshold to its default prior scan_bbt() call.
* scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
* properly set.
*/
if (!mtd->bitflip_threshold)
mtd->bitflip_threshold = mtd->ecc_strength;
/* Check, if we should skip the bad block table scan */
if (chip->options & NAND_SKIP_BBTSCAN)
return 0;
/* Build bad block table */
return chip->scan_bbt(mtd);
}
EXPORT_SYMBOL(nand_scan_tail);
/*
* is_module_text_address() isn't exported, and it's mostly a pointless
* test if this is a module _anyway_ -- they'd have to try _really_ hard
* to call us from in-kernel code if the core NAND support is modular.
*/
#ifdef MODULE
#define caller_is_module() (1)
#else
#define caller_is_module() \
is_module_text_address((unsigned long)__builtin_return_address(0))
#endif
/**
* nand_scan - [NAND Interface] Scan for the NAND device
* @mtd: MTD device structure
* @maxchips: number of chips to scan for
*
* This fills out all the uninitialized function pointers with the defaults.
* The flash ID is read and the mtd/chip structures are filled with the
* appropriate values. The mtd->owner field must be set to the module of the
* caller.
*/
int nand_scan(struct mtd_info *mtd, int maxchips)
{
int ret;
/* Many callers got this wrong, so check for it for a while... */
if (!mtd->owner && caller_is_module()) {
pr_crit("%s called with NULL mtd->owner!\n", __func__);
BUG();
}
ret = nand_scan_ident(mtd, maxchips, NULL);
if (!ret)
ret = nand_scan_tail(mtd);
return ret;
}
EXPORT_SYMBOL(nand_scan);
/**
* nand_release - [NAND Interface] Free resources held by the NAND device
* @mtd: MTD device structure
*/
void nand_release(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
if (chip->ecc.mode == NAND_ECC_SOFT_BCH)
nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
mtd_device_unregister(mtd);
/* Free bad block table memory */
kfree(chip->bbt);
if (!(chip->options & NAND_OWN_BUFFERS))
kfree(chip->buffers);
/* Free bad block descriptor memory */
if (chip->badblock_pattern && chip->badblock_pattern->options
& NAND_BBT_DYNAMICSTRUCT)
kfree(chip->badblock_pattern);
}
EXPORT_SYMBOL_GPL(nand_release);
static int __init nand_base_init(void)
{
led_trigger_register_simple("nand-disk", &nand_led_trigger);
return 0;
}
static void __exit nand_base_exit(void)
{
led_trigger_unregister_simple(nand_led_trigger);
}
module_init(nand_base_init);
module_exit(nand_base_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
MODULE_DESCRIPTION("Generic NAND flash driver code");
| gpl-2.0 |
kamma/ace_kernel | net/core/dev_mcast.c | 775 | 5650 | /*
* Linux NET3: Multicast List maintenance.
*
* Authors:
* Tim Kordas <tjk@nostromo.eeap.cwru.edu>
* Richard Underwood <richard@wuzz.demon.co.uk>
*
* Stir fried together from the IP multicast and CAP patches above
* Alan Cox <alan@lxorguk.ukuu.org.uk>
*
* Fixes:
* Alan Cox : Update the device on a real delete
* rather than any time but...
* Alan Cox : IFF_ALLMULTI support.
* Alan Cox : New format set_multicast_list() calls.
* Gleb Natapov : Remove dev_mc_lock.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/in.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/if_ether.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <net/net_namespace.h>
#include <net/ip.h>
#include <net/route.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/arp.h>
/*
* Device multicast list maintenance.
*
* This is used both by IP and by the user level maintenance functions.
* Unlike BSD we maintain a usage count on a given multicast address so
* that a casual user application can add/delete multicasts used by
* protocols without doing damage to the protocols when it deletes the
* entries. It also helps IP as it tracks overlapping maps.
*
* Device mc lists are changed by bh at least if IPv6 is enabled,
* so that it must be bh protected.
*
* We block accesses to device mc filters with netif_tx_lock.
*/
/*
* Delete a device level multicast
*/
int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl)
{
int err;
netif_addr_lock_bh(dev);
err = __dev_addr_delete(&dev->mc_list, &dev->mc_count,
addr, alen, glbl);
if (!err) {
/*
* We have altered the list, so the card
* loaded filter is now wrong. Fix it
*/
__dev_set_rx_mode(dev);
}
netif_addr_unlock_bh(dev);
return err;
}
/*
* Add a device level multicast
*/
int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
{
int err;
netif_addr_lock_bh(dev);
err = __dev_addr_add(&dev->mc_list, &dev->mc_count, addr, alen, glbl);
if (!err)
__dev_set_rx_mode(dev);
netif_addr_unlock_bh(dev);
return err;
}
/**
* dev_mc_sync - Synchronize device's multicast list to another device
* @to: destination device
* @from: source device
*
* Add newly added addresses to the destination device and release
* addresses that have no users left. The source device must be
* locked by netif_tx_lock_bh.
*
* This function is intended to be called from the dev->set_multicast_list
* or dev->set_rx_mode function of layered software devices.
*/
int dev_mc_sync(struct net_device *to, struct net_device *from)
{
int err = 0;
netif_addr_lock_bh(to);
err = __dev_addr_sync(&to->mc_list, &to->mc_count,
&from->mc_list, &from->mc_count);
if (!err)
__dev_set_rx_mode(to);
netif_addr_unlock_bh(to);
return err;
}
EXPORT_SYMBOL(dev_mc_sync);
/**
* dev_mc_unsync - Remove synchronized addresses from the destination
* device
* @to: destination device
* @from: source device
*
* Remove all addresses that were added to the destination device by
* dev_mc_sync(). This function is intended to be called from the
* dev->stop function of layered software devices.
*/
void dev_mc_unsync(struct net_device *to, struct net_device *from)
{
netif_addr_lock_bh(from);
netif_addr_lock(to);
__dev_addr_unsync(&to->mc_list, &to->mc_count,
&from->mc_list, &from->mc_count);
__dev_set_rx_mode(to);
netif_addr_unlock(to);
netif_addr_unlock_bh(from);
}
EXPORT_SYMBOL(dev_mc_unsync);
#ifdef CONFIG_PROC_FS
static int dev_mc_seq_show(struct seq_file *seq, void *v)
{
struct dev_addr_list *m;
struct net_device *dev = v;
if (v == SEQ_START_TOKEN)
return 0;
netif_addr_lock_bh(dev);
for (m = dev->mc_list; m; m = m->next) {
int i;
seq_printf(seq, "%-4d %-15s %-5d %-5d ", dev->ifindex,
dev->name, m->dmi_users, m->dmi_gusers);
for (i = 0; i < m->dmi_addrlen; i++)
seq_printf(seq, "%02x", m->dmi_addr[i]);
seq_putc(seq, '\n');
}
netif_addr_unlock_bh(dev);
return 0;
}
static const struct seq_operations dev_mc_seq_ops = {
.start = dev_seq_start,
.next = dev_seq_next,
.stop = dev_seq_stop,
.show = dev_mc_seq_show,
};
static int dev_mc_seq_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &dev_mc_seq_ops,
sizeof(struct seq_net_private));
}
static const struct file_operations dev_mc_seq_fops = {
.owner = THIS_MODULE,
.open = dev_mc_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
#endif
static int __net_init dev_mc_net_init(struct net *net)
{
if (!proc_net_fops_create(net, "dev_mcast", 0, &dev_mc_seq_fops))
return -ENOMEM;
return 0;
}
static void __net_exit dev_mc_net_exit(struct net *net)
{
proc_net_remove(net, "dev_mcast");
}
static struct pernet_operations __net_initdata dev_mc_net_ops = {
.init = dev_mc_net_init,
.exit = dev_mc_net_exit,
};
void __init dev_mcast_init(void)
{
register_pernet_subsys(&dev_mc_net_ops);
}
EXPORT_SYMBOL(dev_mc_add);
EXPORT_SYMBOL(dev_mc_delete);
| gpl-2.0 |
brunotl/kernel-kobo-mx50-ntx | drivers/net/tokenring/proteon.c | 1543 | 9534 | /*
* proteon.c: A network driver for Proteon ISA token ring cards.
*
* Based on tmspci written 1999 by Adam Fritzler
*
* Written 2003 by Jochen Friedrich
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
* This driver module supports the following cards:
* - Proteon 1392, 1392+
*
* Maintainer(s):
* AF Adam Fritzler
* JF Jochen Friedrich jochen@scram.de
*
* Modification History:
* 02-Jan-03 JF Created
*
*/
static const char version[] = "proteon.c: v1.00 02/01/2003 by Jochen Friedrich\n";
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/trdevice.h>
#include <linux/platform_device.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/pci.h>
#include <asm/dma.h>
#include "tms380tr.h"
#define PROTEON_IO_EXTENT 32
/* A zero-terminated list of I/O addresses to be probed. */
static unsigned int portlist[] __initdata = {
0x0A20, 0x0E20, 0x1A20, 0x1E20, 0x2A20, 0x2E20, 0x3A20, 0x3E20,// Prot.
0x4A20, 0x4E20, 0x5A20, 0x5E20, 0x6A20, 0x6E20, 0x7A20, 0x7E20,// Prot.
0x8A20, 0x8E20, 0x9A20, 0x9E20, 0xAA20, 0xAE20, 0xBA20, 0xBE20,// Prot.
0xCA20, 0xCE20, 0xDA20, 0xDE20, 0xEA20, 0xEE20, 0xFA20, 0xFE20,// Prot.
0
};
/* A zero-terminated list of IRQs to be probed. */
static unsigned short irqlist[] = {
7, 6, 5, 4, 3, 12, 11, 10, 9,
0
};
/* A zero-terminated list of DMAs to be probed. */
static int dmalist[] __initdata = {
5, 6, 7,
0
};
static char cardname[] = "Proteon 1392\0";
static u64 dma_mask = ISA_MAX_ADDRESS;
static int proteon_open(struct net_device *dev);
static void proteon_read_eeprom(struct net_device *dev);
static unsigned short proteon_setnselout_pins(struct net_device *dev);
static unsigned short proteon_sifreadb(struct net_device *dev, unsigned short reg)
{
return inb(dev->base_addr + reg);
}
static unsigned short proteon_sifreadw(struct net_device *dev, unsigned short reg)
{
return inw(dev->base_addr + reg);
}
static void proteon_sifwriteb(struct net_device *dev, unsigned short val, unsigned short reg)
{
outb(val, dev->base_addr + reg);
}
static void proteon_sifwritew(struct net_device *dev, unsigned short val, unsigned short reg)
{
outw(val, dev->base_addr + reg);
}
static int __init proteon_probe1(struct net_device *dev, int ioaddr)
{
unsigned char chk1, chk2;
int i;
if (!request_region(ioaddr, PROTEON_IO_EXTENT, cardname))
return -ENODEV;
chk1 = inb(ioaddr + 0x1f); /* Get Proteon ID reg 1 */
if (chk1 != 0x1f)
goto nodev;
chk1 = inb(ioaddr + 0x1e) & 0x07; /* Get Proteon ID reg 0 */
for (i=0; i<16; i++) {
chk2 = inb(ioaddr + 0x1e) & 0x07;
if (((chk1 + 1) & 0x07) != chk2)
goto nodev;
chk1 = chk2;
}
dev->base_addr = ioaddr;
return (0);
nodev:
release_region(ioaddr, PROTEON_IO_EXTENT);
return -ENODEV;
}
static struct net_device_ops proteon_netdev_ops __read_mostly;
static int __init setup_card(struct net_device *dev, struct device *pdev)
{
struct net_local *tp;
static int versionprinted;
const unsigned *port;
int j,err = 0;
if (!dev)
return -ENOMEM;
if (dev->base_addr) /* probe specific location */
err = proteon_probe1(dev, dev->base_addr);
else {
for (port = portlist; *port; port++) {
err = proteon_probe1(dev, *port);
if (!err)
break;
}
}
if (err)
goto out5;
/* At this point we have found a valid card. */
if (versionprinted++ == 0)
printk(KERN_DEBUG "%s", version);
err = -EIO;
pdev->dma_mask = &dma_mask;
if (tmsdev_init(dev, pdev))
goto out4;
dev->base_addr &= ~3;
proteon_read_eeprom(dev);
printk(KERN_DEBUG "proteon.c: Ring Station Address: %pM\n",
dev->dev_addr);
tp = netdev_priv(dev);
tp->setnselout = proteon_setnselout_pins;
tp->sifreadb = proteon_sifreadb;
tp->sifreadw = proteon_sifreadw;
tp->sifwriteb = proteon_sifwriteb;
tp->sifwritew = proteon_sifwritew;
memcpy(tp->ProductID, cardname, PROD_ID_SIZE + 1);
tp->tmspriv = NULL;
dev->netdev_ops = &proteon_netdev_ops;
if (dev->irq == 0)
{
for(j = 0; irqlist[j] != 0; j++)
{
dev->irq = irqlist[j];
if (!request_irq(dev->irq, tms380tr_interrupt, 0,
cardname, dev))
break;
}
if(irqlist[j] == 0)
{
printk(KERN_INFO "proteon.c: AutoSelect no IRQ available\n");
goto out3;
}
}
else
{
for(j = 0; irqlist[j] != 0; j++)
if (irqlist[j] == dev->irq)
break;
if (irqlist[j] == 0)
{
printk(KERN_INFO "proteon.c: Illegal IRQ %d specified\n",
dev->irq);
goto out3;
}
if (request_irq(dev->irq, tms380tr_interrupt, 0,
cardname, dev))
{
printk(KERN_INFO "proteon.c: Selected IRQ %d not available\n",
dev->irq);
goto out3;
}
}
if (dev->dma == 0)
{
for(j = 0; dmalist[j] != 0; j++)
{
dev->dma = dmalist[j];
if (!request_dma(dev->dma, cardname))
break;
}
if(dmalist[j] == 0)
{
printk(KERN_INFO "proteon.c: AutoSelect no DMA available\n");
goto out2;
}
}
else
{
for(j = 0; dmalist[j] != 0; j++)
if (dmalist[j] == dev->dma)
break;
if (dmalist[j] == 0)
{
printk(KERN_INFO "proteon.c: Illegal DMA %d specified\n",
dev->dma);
goto out2;
}
if (request_dma(dev->dma, cardname))
{
printk(KERN_INFO "proteon.c: Selected DMA %d not available\n",
dev->dma);
goto out2;
}
}
err = register_netdev(dev);
if (err)
goto out;
printk(KERN_DEBUG "%s: IO: %#4lx IRQ: %d DMA: %d\n",
dev->name, dev->base_addr, dev->irq, dev->dma);
return 0;
out:
free_dma(dev->dma);
out2:
free_irq(dev->irq, dev);
out3:
tmsdev_term(dev);
out4:
release_region(dev->base_addr, PROTEON_IO_EXTENT);
out5:
return err;
}
/*
* Reads MAC address from adapter RAM, which should've read it from
* the onboard ROM.
*
* Calling this on a board that does not support it can be a very
* dangerous thing. The Madge board, for instance, will lock your
* machine hard when this is called. Luckily, its supported in a
* separate driver. --ASF
*/
static void proteon_read_eeprom(struct net_device *dev)
{
int i;
/* Address: 0000:0000 */
proteon_sifwritew(dev, 0, SIFADX);
proteon_sifwritew(dev, 0, SIFADR);
/* Read six byte MAC address data */
dev->addr_len = 6;
for(i = 0; i < 6; i++)
dev->dev_addr[i] = proteon_sifreadw(dev, SIFINC) >> 8;
}
static unsigned short proteon_setnselout_pins(struct net_device *dev)
{
return 0;
}
static int proteon_open(struct net_device *dev)
{
struct net_local *tp = netdev_priv(dev);
unsigned short val = 0;
int i;
/* Proteon reset sequence */
outb(0, dev->base_addr + 0x11);
mdelay(20);
outb(0x04, dev->base_addr + 0x11);
mdelay(20);
outb(0, dev->base_addr + 0x11);
mdelay(100);
/* set control/status reg */
val = inb(dev->base_addr + 0x11);
val |= 0x78;
val &= 0xf9;
if(tp->DataRate == SPEED_4)
val |= 0x20;
else
val &= ~0x20;
outb(val, dev->base_addr + 0x11);
outb(0xff, dev->base_addr + 0x12);
for(i = 0; irqlist[i] != 0; i++)
{
if(irqlist[i] == dev->irq)
break;
}
val = i;
i = (7 - dev->dma) << 4;
val |= i;
outb(val, dev->base_addr + 0x13);
return tms380tr_open(dev);
}
#define ISATR_MAX_ADAPTERS 3
static int io[ISATR_MAX_ADAPTERS];
static int irq[ISATR_MAX_ADAPTERS];
static int dma[ISATR_MAX_ADAPTERS];
MODULE_LICENSE("GPL");
module_param_array(io, int, NULL, 0);
module_param_array(irq, int, NULL, 0);
module_param_array(dma, int, NULL, 0);
static struct platform_device *proteon_dev[ISATR_MAX_ADAPTERS];
static struct platform_driver proteon_driver = {
.driver = {
.name = "proteon",
},
};
static int __init proteon_init(void)
{
struct net_device *dev;
struct platform_device *pdev;
int i, num = 0, err = 0;
proteon_netdev_ops = tms380tr_netdev_ops;
proteon_netdev_ops.ndo_open = proteon_open;
proteon_netdev_ops.ndo_stop = tms380tr_close;
err = platform_driver_register(&proteon_driver);
if (err)
return err;
for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) {
dev = alloc_trdev(sizeof(struct net_local));
if (!dev)
continue;
dev->base_addr = io[i];
dev->irq = irq[i];
dev->dma = dma[i];
pdev = platform_device_register_simple("proteon",
i, NULL, 0);
if (IS_ERR(pdev)) {
free_netdev(dev);
continue;
}
err = setup_card(dev, &pdev->dev);
if (!err) {
proteon_dev[i] = pdev;
platform_set_drvdata(pdev, dev);
++num;
} else {
platform_device_unregister(pdev);
free_netdev(dev);
}
}
printk(KERN_NOTICE "proteon.c: %d cards found.\n", num);
/* Probe for cards. */
if (num == 0) {
printk(KERN_NOTICE "proteon.c: No cards found.\n");
platform_driver_unregister(&proteon_driver);
return -ENODEV;
}
return 0;
}
static void __exit proteon_cleanup(void)
{
struct net_device *dev;
int i;
for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) {
struct platform_device *pdev = proteon_dev[i];
if (!pdev)
continue;
dev = platform_get_drvdata(pdev);
unregister_netdev(dev);
release_region(dev->base_addr, PROTEON_IO_EXTENT);
free_irq(dev->irq, dev);
free_dma(dev->dma);
tmsdev_term(dev);
free_netdev(dev);
platform_set_drvdata(pdev, NULL);
platform_device_unregister(pdev);
}
platform_driver_unregister(&proteon_driver);
}
module_init(proteon_init);
module_exit(proteon_cleanup);
| gpl-2.0 |
HoSStiA/linux-4.1-imx-var | drivers/gpu/drm/exynos/exynos_dp_reg.c | 1543 | 32506 | /*
* Samsung DP (Display port) register interface driver.
*
* Copyright (C) 2012 Samsung Electronics Co., Ltd.
* Author: Jingoo Han <jg1.han@samsung.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/device.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include "exynos_dp_core.h"
#include "exynos_dp_reg.h"
#define COMMON_INT_MASK_1 0
#define COMMON_INT_MASK_2 0
#define COMMON_INT_MASK_3 0
#define COMMON_INT_MASK_4 (HOTPLUG_CHG | HPD_LOST | PLUG)
#define INT_STA_MASK INT_HPD
void exynos_dp_enable_video_mute(struct exynos_dp_device *dp, bool enable)
{
u32 reg;
if (enable) {
reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_1);
reg |= HDCP_VIDEO_MUTE;
writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_1);
} else {
reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_1);
reg &= ~HDCP_VIDEO_MUTE;
writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_1);
}
}
void exynos_dp_stop_video(struct exynos_dp_device *dp)
{
u32 reg;
reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_1);
reg &= ~VIDEO_EN;
writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_1);
}
void exynos_dp_lane_swap(struct exynos_dp_device *dp, bool enable)
{
u32 reg;
if (enable)
reg = LANE3_MAP_LOGIC_LANE_0 | LANE2_MAP_LOGIC_LANE_1 |
LANE1_MAP_LOGIC_LANE_2 | LANE0_MAP_LOGIC_LANE_3;
else
reg = LANE3_MAP_LOGIC_LANE_3 | LANE2_MAP_LOGIC_LANE_2 |
LANE1_MAP_LOGIC_LANE_1 | LANE0_MAP_LOGIC_LANE_0;
writel(reg, dp->reg_base + EXYNOS_DP_LANE_MAP);
}
void exynos_dp_init_analog_param(struct exynos_dp_device *dp)
{
u32 reg;
reg = TX_TERMINAL_CTRL_50_OHM;
writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_1);
reg = SEL_24M | TX_DVDD_BIT_1_0625V;
writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_2);
reg = DRIVE_DVDD_BIT_1_0625V | VCO_BIT_600_MICRO;
writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_3);
reg = PD_RING_OSC | AUX_TERMINAL_CTRL_50_OHM |
TX_CUR1_2X | TX_CUR_16_MA;
writel(reg, dp->reg_base + EXYNOS_DP_PLL_FILTER_CTL_1);
reg = CH3_AMP_400_MV | CH2_AMP_400_MV |
CH1_AMP_400_MV | CH0_AMP_400_MV;
writel(reg, dp->reg_base + EXYNOS_DP_TX_AMP_TUNING_CTL);
}
void exynos_dp_init_interrupt(struct exynos_dp_device *dp)
{
/* Set interrupt pin assertion polarity as high */
writel(INT_POL1 | INT_POL0, dp->reg_base + EXYNOS_DP_INT_CTL);
/* Clear pending regisers */
writel(0xff, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_1);
writel(0x4f, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_2);
writel(0xe0, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_3);
writel(0xe7, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_4);
writel(0x63, dp->reg_base + EXYNOS_DP_INT_STA);
/* 0:mask,1: unmask */
writel(0x00, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_1);
writel(0x00, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_2);
writel(0x00, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_3);
writel(0x00, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_4);
writel(0x00, dp->reg_base + EXYNOS_DP_INT_STA_MASK);
}
void exynos_dp_reset(struct exynos_dp_device *dp)
{
u32 reg;
exynos_dp_stop_video(dp);
exynos_dp_enable_video_mute(dp, 0);
reg = MASTER_VID_FUNC_EN_N | SLAVE_VID_FUNC_EN_N |
AUD_FIFO_FUNC_EN_N | AUD_FUNC_EN_N |
HDCP_FUNC_EN_N | SW_FUNC_EN_N;
writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_1);
reg = SSC_FUNC_EN_N | AUX_FUNC_EN_N |
SERDES_FIFO_FUNC_EN_N |
LS_CLK_DOMAIN_FUNC_EN_N;
writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_2);
usleep_range(20, 30);
exynos_dp_lane_swap(dp, 0);
writel(0x0, dp->reg_base + EXYNOS_DP_SYS_CTL_1);
writel(0x40, dp->reg_base + EXYNOS_DP_SYS_CTL_2);
writel(0x0, dp->reg_base + EXYNOS_DP_SYS_CTL_3);
writel(0x0, dp->reg_base + EXYNOS_DP_SYS_CTL_4);
writel(0x0, dp->reg_base + EXYNOS_DP_PKT_SEND_CTL);
writel(0x0, dp->reg_base + EXYNOS_DP_HDCP_CTL);
writel(0x5e, dp->reg_base + EXYNOS_DP_HPD_DEGLITCH_L);
writel(0x1a, dp->reg_base + EXYNOS_DP_HPD_DEGLITCH_H);
writel(0x10, dp->reg_base + EXYNOS_DP_LINK_DEBUG_CTL);
writel(0x0, dp->reg_base + EXYNOS_DP_PHY_TEST);
writel(0x0, dp->reg_base + EXYNOS_DP_VIDEO_FIFO_THRD);
writel(0x20, dp->reg_base + EXYNOS_DP_AUDIO_MARGIN);
writel(0x4, dp->reg_base + EXYNOS_DP_M_VID_GEN_FILTER_TH);
writel(0x2, dp->reg_base + EXYNOS_DP_M_AUD_GEN_FILTER_TH);
writel(0x00000101, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL);
}
void exynos_dp_swreset(struct exynos_dp_device *dp)
{
writel(RESET_DP_TX, dp->reg_base + EXYNOS_DP_TX_SW_RESET);
}
void exynos_dp_config_interrupt(struct exynos_dp_device *dp)
{
u32 reg;
/* 0: mask, 1: unmask */
reg = COMMON_INT_MASK_1;
writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_1);
reg = COMMON_INT_MASK_2;
writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_2);
reg = COMMON_INT_MASK_3;
writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_3);
reg = COMMON_INT_MASK_4;
writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_4);
reg = INT_STA_MASK;
writel(reg, dp->reg_base + EXYNOS_DP_INT_STA_MASK);
}
enum pll_status exynos_dp_get_pll_lock_status(struct exynos_dp_device *dp)
{
u32 reg;
reg = readl(dp->reg_base + EXYNOS_DP_DEBUG_CTL);
if (reg & PLL_LOCK)
return PLL_LOCKED;
else
return PLL_UNLOCKED;
}
void exynos_dp_set_pll_power_down(struct exynos_dp_device *dp, bool enable)
{
u32 reg;
if (enable) {
reg = readl(dp->reg_base + EXYNOS_DP_PLL_CTL);
reg |= DP_PLL_PD;
writel(reg, dp->reg_base + EXYNOS_DP_PLL_CTL);
} else {
reg = readl(dp->reg_base + EXYNOS_DP_PLL_CTL);
reg &= ~DP_PLL_PD;
writel(reg, dp->reg_base + EXYNOS_DP_PLL_CTL);
}
}
void exynos_dp_set_analog_power_down(struct exynos_dp_device *dp,
enum analog_power_block block,
bool enable)
{
u32 reg;
switch (block) {
case AUX_BLOCK:
if (enable) {
reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
reg |= AUX_PD;
writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
} else {
reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
reg &= ~AUX_PD;
writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
}
break;
case CH0_BLOCK:
if (enable) {
reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
reg |= CH0_PD;
writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
} else {
reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
reg &= ~CH0_PD;
writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
}
break;
case CH1_BLOCK:
if (enable) {
reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
reg |= CH1_PD;
writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
} else {
reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
reg &= ~CH1_PD;
writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
}
break;
case CH2_BLOCK:
if (enable) {
reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
reg |= CH2_PD;
writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
} else {
reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
reg &= ~CH2_PD;
writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
}
break;
case CH3_BLOCK:
if (enable) {
reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
reg |= CH3_PD;
writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
} else {
reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
reg &= ~CH3_PD;
writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
}
break;
case ANALOG_TOTAL:
if (enable) {
reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
reg |= DP_PHY_PD;
writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
} else {
reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
reg &= ~DP_PHY_PD;
writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
}
break;
case POWER_ALL:
if (enable) {
reg = DP_PHY_PD | AUX_PD | CH3_PD | CH2_PD |
CH1_PD | CH0_PD;
writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
} else {
writel(0x00, dp->reg_base + EXYNOS_DP_PHY_PD);
}
break;
default:
break;
}
}
void exynos_dp_init_analog_func(struct exynos_dp_device *dp)
{
u32 reg;
int timeout_loop = 0;
exynos_dp_set_analog_power_down(dp, POWER_ALL, 0);
reg = PLL_LOCK_CHG;
writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_1);
reg = readl(dp->reg_base + EXYNOS_DP_DEBUG_CTL);
reg &= ~(F_PLL_LOCK | PLL_LOCK_CTRL);
writel(reg, dp->reg_base + EXYNOS_DP_DEBUG_CTL);
/* Power up PLL */
if (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
exynos_dp_set_pll_power_down(dp, 0);
while (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
timeout_loop++;
if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) {
dev_err(dp->dev, "failed to get pll lock status\n");
return;
}
usleep_range(10, 20);
}
}
/* Enable Serdes FIFO function and Link symbol clock domain module */
reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_2);
reg &= ~(SERDES_FIFO_FUNC_EN_N | LS_CLK_DOMAIN_FUNC_EN_N
| AUX_FUNC_EN_N);
writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_2);
}
void exynos_dp_clear_hotplug_interrupts(struct exynos_dp_device *dp)
{
u32 reg;
if (gpio_is_valid(dp->hpd_gpio))
return;
reg = HOTPLUG_CHG | HPD_LOST | PLUG;
writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_4);
reg = INT_HPD;
writel(reg, dp->reg_base + EXYNOS_DP_INT_STA);
}
void exynos_dp_init_hpd(struct exynos_dp_device *dp)
{
u32 reg;
if (gpio_is_valid(dp->hpd_gpio))
return;
exynos_dp_clear_hotplug_interrupts(dp);
reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3);
reg &= ~(F_HPD | HPD_CTRL);
writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_3);
}
enum dp_irq_type exynos_dp_get_irq_type(struct exynos_dp_device *dp)
{
u32 reg;
if (gpio_is_valid(dp->hpd_gpio)) {
reg = gpio_get_value(dp->hpd_gpio);
if (reg)
return DP_IRQ_TYPE_HP_CABLE_IN;
else
return DP_IRQ_TYPE_HP_CABLE_OUT;
} else {
/* Parse hotplug interrupt status register */
reg = readl(dp->reg_base + EXYNOS_DP_COMMON_INT_STA_4);
if (reg & PLUG)
return DP_IRQ_TYPE_HP_CABLE_IN;
if (reg & HPD_LOST)
return DP_IRQ_TYPE_HP_CABLE_OUT;
if (reg & HOTPLUG_CHG)
return DP_IRQ_TYPE_HP_CHANGE;
return DP_IRQ_TYPE_UNKNOWN;
}
}
void exynos_dp_reset_aux(struct exynos_dp_device *dp)
{
u32 reg;
/* Disable AUX channel module */
reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_2);
reg |= AUX_FUNC_EN_N;
writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_2);
}
void exynos_dp_init_aux(struct exynos_dp_device *dp)
{
u32 reg;
/* Clear inerrupts related to AUX channel */
reg = RPLY_RECEIV | AUX_ERR;
writel(reg, dp->reg_base + EXYNOS_DP_INT_STA);
exynos_dp_reset_aux(dp);
/* Disable AUX transaction H/W retry */
reg = AUX_BIT_PERIOD_EXPECTED_DELAY(3) | AUX_HW_RETRY_COUNT_SEL(0)|
AUX_HW_RETRY_INTERVAL_600_MICROSECONDS;
writel(reg, dp->reg_base + EXYNOS_DP_AUX_HW_RETRY_CTL);
/* Receive AUX Channel DEFER commands equal to DEFFER_COUNT*64 */
reg = DEFER_CTRL_EN | DEFER_COUNT(1);
writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_DEFER_CTL);
/* Enable AUX channel module */
reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_2);
reg &= ~AUX_FUNC_EN_N;
writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_2);
}
int exynos_dp_get_plug_in_status(struct exynos_dp_device *dp)
{
u32 reg;
if (gpio_is_valid(dp->hpd_gpio)) {
if (gpio_get_value(dp->hpd_gpio))
return 0;
} else {
reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3);
if (reg & HPD_STATUS)
return 0;
}
return -EINVAL;
}
void exynos_dp_enable_sw_function(struct exynos_dp_device *dp)
{
u32 reg;
reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_1);
reg &= ~SW_FUNC_EN_N;
writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_1);
}
int exynos_dp_start_aux_transaction(struct exynos_dp_device *dp)
{
int reg;
int retval = 0;
int timeout_loop = 0;
/* Enable AUX CH operation */
reg = readl(dp->reg_base + EXYNOS_DP_AUX_CH_CTL_2);
reg |= AUX_EN;
writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_2);
/* Is AUX CH command reply received? */
reg = readl(dp->reg_base + EXYNOS_DP_INT_STA);
while (!(reg & RPLY_RECEIV)) {
timeout_loop++;
if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) {
dev_err(dp->dev, "AUX CH command reply failed!\n");
return -ETIMEDOUT;
}
reg = readl(dp->reg_base + EXYNOS_DP_INT_STA);
usleep_range(10, 11);
}
/* Clear interrupt source for AUX CH command reply */
writel(RPLY_RECEIV, dp->reg_base + EXYNOS_DP_INT_STA);
/* Clear interrupt source for AUX CH access error */
reg = readl(dp->reg_base + EXYNOS_DP_INT_STA);
if (reg & AUX_ERR) {
writel(AUX_ERR, dp->reg_base + EXYNOS_DP_INT_STA);
return -EREMOTEIO;
}
/* Check AUX CH error access status */
reg = readl(dp->reg_base + EXYNOS_DP_AUX_CH_STA);
if ((reg & AUX_STATUS_MASK) != 0) {
dev_err(dp->dev, "AUX CH error happens: %d\n\n",
reg & AUX_STATUS_MASK);
return -EREMOTEIO;
}
return retval;
}
int exynos_dp_write_byte_to_dpcd(struct exynos_dp_device *dp,
unsigned int reg_addr,
unsigned char data)
{
u32 reg;
int i;
int retval;
for (i = 0; i < 3; i++) {
/* Clear AUX CH data buffer */
reg = BUF_CLR;
writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL);
/* Select DPCD device address */
reg = AUX_ADDR_7_0(reg_addr);
writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0);
reg = AUX_ADDR_15_8(reg_addr);
writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_15_8);
reg = AUX_ADDR_19_16(reg_addr);
writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_19_16);
/* Write data buffer */
reg = (unsigned int)data;
writel(reg, dp->reg_base + EXYNOS_DP_BUF_DATA_0);
/*
* Set DisplayPort transaction and write 1 byte
* If bit 3 is 1, DisplayPort transaction.
* If Bit 3 is 0, I2C transaction.
*/
reg = AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_WRITE;
writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1);
/* Start AUX transaction */
retval = exynos_dp_start_aux_transaction(dp);
if (retval == 0)
break;
else
dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
__func__);
}
return retval;
}
int exynos_dp_read_byte_from_dpcd(struct exynos_dp_device *dp,
unsigned int reg_addr,
unsigned char *data)
{
u32 reg;
int i;
int retval;
for (i = 0; i < 3; i++) {
/* Clear AUX CH data buffer */
reg = BUF_CLR;
writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL);
/* Select DPCD device address */
reg = AUX_ADDR_7_0(reg_addr);
writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0);
reg = AUX_ADDR_15_8(reg_addr);
writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_15_8);
reg = AUX_ADDR_19_16(reg_addr);
writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_19_16);
/*
* Set DisplayPort transaction and read 1 byte
* If bit 3 is 1, DisplayPort transaction.
* If Bit 3 is 0, I2C transaction.
*/
reg = AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_READ;
writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1);
/* Start AUX transaction */
retval = exynos_dp_start_aux_transaction(dp);
if (retval == 0)
break;
else
dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
__func__);
}
/* Read data buffer */
reg = readl(dp->reg_base + EXYNOS_DP_BUF_DATA_0);
*data = (unsigned char)(reg & 0xff);
return retval;
}
int exynos_dp_write_bytes_to_dpcd(struct exynos_dp_device *dp,
unsigned int reg_addr,
unsigned int count,
unsigned char data[])
{
u32 reg;
unsigned int start_offset;
unsigned int cur_data_count;
unsigned int cur_data_idx;
int i;
int retval = 0;
/* Clear AUX CH data buffer */
reg = BUF_CLR;
writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL);
start_offset = 0;
while (start_offset < count) {
/* Buffer size of AUX CH is 16 * 4bytes */
if ((count - start_offset) > 16)
cur_data_count = 16;
else
cur_data_count = count - start_offset;
for (i = 0; i < 3; i++) {
/* Select DPCD device address */
reg = AUX_ADDR_7_0(reg_addr + start_offset);
writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0);
reg = AUX_ADDR_15_8(reg_addr + start_offset);
writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_15_8);
reg = AUX_ADDR_19_16(reg_addr + start_offset);
writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_19_16);
for (cur_data_idx = 0; cur_data_idx < cur_data_count;
cur_data_idx++) {
reg = data[start_offset + cur_data_idx];
writel(reg, dp->reg_base + EXYNOS_DP_BUF_DATA_0
+ 4 * cur_data_idx);
}
/*
* Set DisplayPort transaction and write
* If bit 3 is 1, DisplayPort transaction.
* If Bit 3 is 0, I2C transaction.
*/
reg = AUX_LENGTH(cur_data_count) |
AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_WRITE;
writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1);
/* Start AUX transaction */
retval = exynos_dp_start_aux_transaction(dp);
if (retval == 0)
break;
else
dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
__func__);
}
start_offset += cur_data_count;
}
return retval;
}
int exynos_dp_read_bytes_from_dpcd(struct exynos_dp_device *dp,
unsigned int reg_addr,
unsigned int count,
unsigned char data[])
{
u32 reg;
unsigned int start_offset;
unsigned int cur_data_count;
unsigned int cur_data_idx;
int i;
int retval = 0;
/* Clear AUX CH data buffer */
reg = BUF_CLR;
writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL);
start_offset = 0;
while (start_offset < count) {
/* Buffer size of AUX CH is 16 * 4bytes */
if ((count - start_offset) > 16)
cur_data_count = 16;
else
cur_data_count = count - start_offset;
/* AUX CH Request Transaction process */
for (i = 0; i < 3; i++) {
/* Select DPCD device address */
reg = AUX_ADDR_7_0(reg_addr + start_offset);
writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0);
reg = AUX_ADDR_15_8(reg_addr + start_offset);
writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_15_8);
reg = AUX_ADDR_19_16(reg_addr + start_offset);
writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_19_16);
/*
* Set DisplayPort transaction and read
* If bit 3 is 1, DisplayPort transaction.
* If Bit 3 is 0, I2C transaction.
*/
reg = AUX_LENGTH(cur_data_count) |
AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_READ;
writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1);
/* Start AUX transaction */
retval = exynos_dp_start_aux_transaction(dp);
if (retval == 0)
break;
else
dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
__func__);
}
for (cur_data_idx = 0; cur_data_idx < cur_data_count;
cur_data_idx++) {
reg = readl(dp->reg_base + EXYNOS_DP_BUF_DATA_0
+ 4 * cur_data_idx);
data[start_offset + cur_data_idx] =
(unsigned char)reg;
}
start_offset += cur_data_count;
}
return retval;
}
int exynos_dp_select_i2c_device(struct exynos_dp_device *dp,
unsigned int device_addr,
unsigned int reg_addr)
{
u32 reg;
int retval;
/* Set EDID device address */
reg = device_addr;
writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0);
writel(0x0, dp->reg_base + EXYNOS_DP_AUX_ADDR_15_8);
writel(0x0, dp->reg_base + EXYNOS_DP_AUX_ADDR_19_16);
/* Set offset from base address of EDID device */
writel(reg_addr, dp->reg_base + EXYNOS_DP_BUF_DATA_0);
/*
* Set I2C transaction and write address
* If bit 3 is 1, DisplayPort transaction.
* If Bit 3 is 0, I2C transaction.
*/
reg = AUX_TX_COMM_I2C_TRANSACTION | AUX_TX_COMM_MOT |
AUX_TX_COMM_WRITE;
writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1);
/* Start AUX transaction */
retval = exynos_dp_start_aux_transaction(dp);
if (retval != 0)
dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__);
return retval;
}
int exynos_dp_read_byte_from_i2c(struct exynos_dp_device *dp,
unsigned int device_addr,
unsigned int reg_addr,
unsigned int *data)
{
u32 reg;
int i;
int retval;
for (i = 0; i < 3; i++) {
/* Clear AUX CH data buffer */
reg = BUF_CLR;
writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL);
/* Select EDID device */
retval = exynos_dp_select_i2c_device(dp, device_addr, reg_addr);
if (retval != 0)
continue;
/*
* Set I2C transaction and read data
* If bit 3 is 1, DisplayPort transaction.
* If Bit 3 is 0, I2C transaction.
*/
reg = AUX_TX_COMM_I2C_TRANSACTION |
AUX_TX_COMM_READ;
writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1);
/* Start AUX transaction */
retval = exynos_dp_start_aux_transaction(dp);
if (retval == 0)
break;
else
dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
__func__);
}
/* Read data */
if (retval == 0)
*data = readl(dp->reg_base + EXYNOS_DP_BUF_DATA_0);
return retval;
}
int exynos_dp_read_bytes_from_i2c(struct exynos_dp_device *dp,
unsigned int device_addr,
unsigned int reg_addr,
unsigned int count,
unsigned char edid[])
{
u32 reg;
unsigned int i, j;
unsigned int cur_data_idx;
unsigned int defer = 0;
int retval = 0;
for (i = 0; i < count; i += 16) {
for (j = 0; j < 3; j++) {
/* Clear AUX CH data buffer */
reg = BUF_CLR;
writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL);
/* Set normal AUX CH command */
reg = readl(dp->reg_base + EXYNOS_DP_AUX_CH_CTL_2);
reg &= ~ADDR_ONLY;
writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_2);
/*
* If Rx sends defer, Tx sends only reads
* request without sending address
*/
if (!defer)
retval = exynos_dp_select_i2c_device(dp,
device_addr, reg_addr + i);
else
defer = 0;
if (retval == 0) {
/*
* Set I2C transaction and write data
* If bit 3 is 1, DisplayPort transaction.
* If Bit 3 is 0, I2C transaction.
*/
reg = AUX_LENGTH(16) |
AUX_TX_COMM_I2C_TRANSACTION |
AUX_TX_COMM_READ;
writel(reg, dp->reg_base +
EXYNOS_DP_AUX_CH_CTL_1);
/* Start AUX transaction */
retval = exynos_dp_start_aux_transaction(dp);
if (retval == 0)
break;
else
dev_dbg(dp->dev,
"%s: Aux Transaction fail!\n",
__func__);
}
/* Check if Rx sends defer */
reg = readl(dp->reg_base + EXYNOS_DP_AUX_RX_COMM);
if (reg == AUX_RX_COMM_AUX_DEFER ||
reg == AUX_RX_COMM_I2C_DEFER) {
dev_err(dp->dev, "Defer: %d\n\n", reg);
defer = 1;
}
}
for (cur_data_idx = 0; cur_data_idx < 16; cur_data_idx++) {
reg = readl(dp->reg_base + EXYNOS_DP_BUF_DATA_0
+ 4 * cur_data_idx);
edid[i + cur_data_idx] = (unsigned char)reg;
}
}
return retval;
}
void exynos_dp_set_link_bandwidth(struct exynos_dp_device *dp, u32 bwtype)
{
u32 reg;
reg = bwtype;
if ((bwtype == LINK_RATE_2_70GBPS) || (bwtype == LINK_RATE_1_62GBPS))
writel(reg, dp->reg_base + EXYNOS_DP_LINK_BW_SET);
}
void exynos_dp_get_link_bandwidth(struct exynos_dp_device *dp, u32 *bwtype)
{
u32 reg;
reg = readl(dp->reg_base + EXYNOS_DP_LINK_BW_SET);
*bwtype = reg;
}
void exynos_dp_set_lane_count(struct exynos_dp_device *dp, u32 count)
{
u32 reg;
reg = count;
writel(reg, dp->reg_base + EXYNOS_DP_LANE_COUNT_SET);
}
void exynos_dp_get_lane_count(struct exynos_dp_device *dp, u32 *count)
{
u32 reg;
reg = readl(dp->reg_base + EXYNOS_DP_LANE_COUNT_SET);
*count = reg;
}
void exynos_dp_enable_enhanced_mode(struct exynos_dp_device *dp, bool enable)
{
u32 reg;
if (enable) {
reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_4);
reg |= ENHANCED;
writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_4);
} else {
reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_4);
reg &= ~ENHANCED;
writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_4);
}
}
void exynos_dp_set_training_pattern(struct exynos_dp_device *dp,
enum pattern_set pattern)
{
u32 reg;
switch (pattern) {
case PRBS7:
reg = SCRAMBLING_ENABLE | LINK_QUAL_PATTERN_SET_PRBS7;
writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET);
break;
case D10_2:
reg = SCRAMBLING_ENABLE | LINK_QUAL_PATTERN_SET_D10_2;
writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET);
break;
case TRAINING_PTN1:
reg = SCRAMBLING_DISABLE | SW_TRAINING_PATTERN_SET_PTN1;
writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET);
break;
case TRAINING_PTN2:
reg = SCRAMBLING_DISABLE | SW_TRAINING_PATTERN_SET_PTN2;
writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET);
break;
case DP_NONE:
reg = SCRAMBLING_ENABLE |
LINK_QUAL_PATTERN_SET_DISABLE |
SW_TRAINING_PATTERN_SET_NORMAL;
writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET);
break;
default:
break;
}
}
void exynos_dp_set_lane0_pre_emphasis(struct exynos_dp_device *dp, u32 level)
{
u32 reg;
reg = readl(dp->reg_base + EXYNOS_DP_LN0_LINK_TRAINING_CTL);
reg &= ~PRE_EMPHASIS_SET_MASK;
reg |= level << PRE_EMPHASIS_SET_SHIFT;
writel(reg, dp->reg_base + EXYNOS_DP_LN0_LINK_TRAINING_CTL);
}
void exynos_dp_set_lane1_pre_emphasis(struct exynos_dp_device *dp, u32 level)
{
u32 reg;
reg = readl(dp->reg_base + EXYNOS_DP_LN1_LINK_TRAINING_CTL);
reg &= ~PRE_EMPHASIS_SET_MASK;
reg |= level << PRE_EMPHASIS_SET_SHIFT;
writel(reg, dp->reg_base + EXYNOS_DP_LN1_LINK_TRAINING_CTL);
}
void exynos_dp_set_lane2_pre_emphasis(struct exynos_dp_device *dp, u32 level)
{
u32 reg;
reg = readl(dp->reg_base + EXYNOS_DP_LN2_LINK_TRAINING_CTL);
reg &= ~PRE_EMPHASIS_SET_MASK;
reg |= level << PRE_EMPHASIS_SET_SHIFT;
writel(reg, dp->reg_base + EXYNOS_DP_LN2_LINK_TRAINING_CTL);
}
void exynos_dp_set_lane3_pre_emphasis(struct exynos_dp_device *dp, u32 level)
{
u32 reg;
reg = readl(dp->reg_base + EXYNOS_DP_LN3_LINK_TRAINING_CTL);
reg &= ~PRE_EMPHASIS_SET_MASK;
reg |= level << PRE_EMPHASIS_SET_SHIFT;
writel(reg, dp->reg_base + EXYNOS_DP_LN3_LINK_TRAINING_CTL);
}
void exynos_dp_set_lane0_link_training(struct exynos_dp_device *dp,
u32 training_lane)
{
u32 reg;
reg = training_lane;
writel(reg, dp->reg_base + EXYNOS_DP_LN0_LINK_TRAINING_CTL);
}
void exynos_dp_set_lane1_link_training(struct exynos_dp_device *dp,
u32 training_lane)
{
u32 reg;
reg = training_lane;
writel(reg, dp->reg_base + EXYNOS_DP_LN1_LINK_TRAINING_CTL);
}
void exynos_dp_set_lane2_link_training(struct exynos_dp_device *dp,
u32 training_lane)
{
u32 reg;
reg = training_lane;
writel(reg, dp->reg_base + EXYNOS_DP_LN2_LINK_TRAINING_CTL);
}
void exynos_dp_set_lane3_link_training(struct exynos_dp_device *dp,
u32 training_lane)
{
u32 reg;
reg = training_lane;
writel(reg, dp->reg_base + EXYNOS_DP_LN3_LINK_TRAINING_CTL);
}
u32 exynos_dp_get_lane0_link_training(struct exynos_dp_device *dp)
{
u32 reg;
reg = readl(dp->reg_base + EXYNOS_DP_LN0_LINK_TRAINING_CTL);
return reg;
}
u32 exynos_dp_get_lane1_link_training(struct exynos_dp_device *dp)
{
u32 reg;
reg = readl(dp->reg_base + EXYNOS_DP_LN1_LINK_TRAINING_CTL);
return reg;
}
u32 exynos_dp_get_lane2_link_training(struct exynos_dp_device *dp)
{
u32 reg;
reg = readl(dp->reg_base + EXYNOS_DP_LN2_LINK_TRAINING_CTL);
return reg;
}
u32 exynos_dp_get_lane3_link_training(struct exynos_dp_device *dp)
{
u32 reg;
reg = readl(dp->reg_base + EXYNOS_DP_LN3_LINK_TRAINING_CTL);
return reg;
}
void exynos_dp_reset_macro(struct exynos_dp_device *dp)
{
u32 reg;
reg = readl(dp->reg_base + EXYNOS_DP_PHY_TEST);
reg |= MACRO_RST;
writel(reg, dp->reg_base + EXYNOS_DP_PHY_TEST);
/* 10 us is the minimum reset time. */
usleep_range(10, 20);
reg &= ~MACRO_RST;
writel(reg, dp->reg_base + EXYNOS_DP_PHY_TEST);
}
void exynos_dp_init_video(struct exynos_dp_device *dp)
{
u32 reg;
reg = VSYNC_DET | VID_FORMAT_CHG | VID_CLK_CHG;
writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_1);
reg = 0x0;
writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_1);
reg = CHA_CRI(4) | CHA_CTRL;
writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_2);
reg = 0x0;
writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_3);
reg = VID_HRES_TH(2) | VID_VRES_TH(0);
writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_8);
}
void exynos_dp_set_video_color_format(struct exynos_dp_device *dp)
{
u32 reg;
/* Configure the input color depth, color space, dynamic range */
reg = (dp->video_info->dynamic_range << IN_D_RANGE_SHIFT) |
(dp->video_info->color_depth << IN_BPC_SHIFT) |
(dp->video_info->color_space << IN_COLOR_F_SHIFT);
writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_2);
/* Set Input Color YCbCr Coefficients to ITU601 or ITU709 */
reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_3);
reg &= ~IN_YC_COEFFI_MASK;
if (dp->video_info->ycbcr_coeff)
reg |= IN_YC_COEFFI_ITU709;
else
reg |= IN_YC_COEFFI_ITU601;
writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_3);
}
int exynos_dp_is_slave_video_stream_clock_on(struct exynos_dp_device *dp)
{
u32 reg;
reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_1);
writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_1);
reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_1);
if (!(reg & DET_STA)) {
dev_dbg(dp->dev, "Input stream clock not detected.\n");
return -EINVAL;
}
reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_2);
writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_2);
reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_2);
dev_dbg(dp->dev, "wait SYS_CTL_2.\n");
if (reg & CHA_STA) {
dev_dbg(dp->dev, "Input stream clk is changing\n");
return -EINVAL;
}
return 0;
}
void exynos_dp_set_video_cr_mn(struct exynos_dp_device *dp,
enum clock_recovery_m_value_type type,
u32 m_value,
u32 n_value)
{
u32 reg;
if (type == REGISTER_M) {
reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_4);
reg |= FIX_M_VID;
writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_4);
reg = m_value & 0xff;
writel(reg, dp->reg_base + EXYNOS_DP_M_VID_0);
reg = (m_value >> 8) & 0xff;
writel(reg, dp->reg_base + EXYNOS_DP_M_VID_1);
reg = (m_value >> 16) & 0xff;
writel(reg, dp->reg_base + EXYNOS_DP_M_VID_2);
reg = n_value & 0xff;
writel(reg, dp->reg_base + EXYNOS_DP_N_VID_0);
reg = (n_value >> 8) & 0xff;
writel(reg, dp->reg_base + EXYNOS_DP_N_VID_1);
reg = (n_value >> 16) & 0xff;
writel(reg, dp->reg_base + EXYNOS_DP_N_VID_2);
} else {
reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_4);
reg &= ~FIX_M_VID;
writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_4);
writel(0x00, dp->reg_base + EXYNOS_DP_N_VID_0);
writel(0x80, dp->reg_base + EXYNOS_DP_N_VID_1);
writel(0x00, dp->reg_base + EXYNOS_DP_N_VID_2);
}
}
void exynos_dp_set_video_timing_mode(struct exynos_dp_device *dp, u32 type)
{
u32 reg;
if (type == VIDEO_TIMING_FROM_CAPTURE) {
reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
reg &= ~FORMAT_SEL;
writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
} else {
reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
reg |= FORMAT_SEL;
writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
}
}
void exynos_dp_enable_video_master(struct exynos_dp_device *dp, bool enable)
{
u32 reg;
if (enable) {
reg = readl(dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL);
reg &= ~VIDEO_MODE_MASK;
reg |= VIDEO_MASTER_MODE_EN | VIDEO_MODE_MASTER_MODE;
writel(reg, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL);
} else {
reg = readl(dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL);
reg &= ~VIDEO_MODE_MASK;
reg |= VIDEO_MODE_SLAVE_MODE;
writel(reg, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL);
}
}
void exynos_dp_start_video(struct exynos_dp_device *dp)
{
u32 reg;
reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_1);
reg |= VIDEO_EN;
writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_1);
}
int exynos_dp_is_video_stream_on(struct exynos_dp_device *dp)
{
u32 reg;
reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3);
writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_3);
reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3);
if (!(reg & STRM_VALID)) {
dev_dbg(dp->dev, "Input video stream is not detected.\n");
return -EINVAL;
}
return 0;
}
void exynos_dp_config_video_slave_mode(struct exynos_dp_device *dp)
{
u32 reg;
reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_1);
reg &= ~(MASTER_VID_FUNC_EN_N|SLAVE_VID_FUNC_EN_N);
reg |= MASTER_VID_FUNC_EN_N;
writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_1);
reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
reg &= ~INTERACE_SCAN_CFG;
reg |= (dp->video_info->interlaced << 2);
writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
reg &= ~VSYNC_POLARITY_CFG;
reg |= (dp->video_info->v_sync_polarity << 1);
writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
reg &= ~HSYNC_POLARITY_CFG;
reg |= (dp->video_info->h_sync_polarity << 0);
writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
reg = AUDIO_MODE_SPDIF_MODE | VIDEO_MODE_SLAVE_MODE;
writel(reg, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL);
}
void exynos_dp_enable_scrambling(struct exynos_dp_device *dp)
{
u32 reg;
reg = readl(dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET);
reg &= ~SCRAMBLING_DISABLE;
writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET);
}
void exynos_dp_disable_scrambling(struct exynos_dp_device *dp)
{
u32 reg;
reg = readl(dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET);
reg |= SCRAMBLING_DISABLE;
writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET);
}
| gpl-2.0 |
liusen09003110-163-com/linux | drivers/net/wireless/rt2x00/rt2x00pci.c | 1799 | 5124 | /*
Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
<http://rt2x00.serialmonkey.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
Module: rt2x00pci
Abstract: rt2x00 generic pci device routines.
*/
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include "rt2x00.h"
#include "rt2x00pci.h"
/*
* PCI driver handlers.
*/
static void rt2x00pci_free_reg(struct rt2x00_dev *rt2x00dev)
{
kfree(rt2x00dev->rf);
rt2x00dev->rf = NULL;
kfree(rt2x00dev->eeprom);
rt2x00dev->eeprom = NULL;
if (rt2x00dev->csr.base) {
iounmap(rt2x00dev->csr.base);
rt2x00dev->csr.base = NULL;
}
}
static int rt2x00pci_alloc_reg(struct rt2x00_dev *rt2x00dev)
{
struct pci_dev *pci_dev = to_pci_dev(rt2x00dev->dev);
rt2x00dev->csr.base = pci_ioremap_bar(pci_dev, 0);
if (!rt2x00dev->csr.base)
goto exit;
rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL);
if (!rt2x00dev->eeprom)
goto exit;
rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL);
if (!rt2x00dev->rf)
goto exit;
return 0;
exit:
rt2x00_probe_err("Failed to allocate registers\n");
rt2x00pci_free_reg(rt2x00dev);
return -ENOMEM;
}
int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops)
{
struct ieee80211_hw *hw;
struct rt2x00_dev *rt2x00dev;
int retval;
u16 chip;
retval = pci_enable_device(pci_dev);
if (retval) {
rt2x00_probe_err("Enable device failed\n");
return retval;
}
retval = pci_request_regions(pci_dev, pci_name(pci_dev));
if (retval) {
rt2x00_probe_err("PCI request regions failed\n");
goto exit_disable_device;
}
pci_set_master(pci_dev);
if (pci_set_mwi(pci_dev))
rt2x00_probe_err("MWI not available\n");
if (dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32))) {
rt2x00_probe_err("PCI DMA not supported\n");
retval = -EIO;
goto exit_release_regions;
}
hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
if (!hw) {
rt2x00_probe_err("Failed to allocate hardware\n");
retval = -ENOMEM;
goto exit_release_regions;
}
pci_set_drvdata(pci_dev, hw);
rt2x00dev = hw->priv;
rt2x00dev->dev = &pci_dev->dev;
rt2x00dev->ops = ops;
rt2x00dev->hw = hw;
rt2x00dev->irq = pci_dev->irq;
rt2x00dev->name = ops->name;
if (pci_is_pcie(pci_dev))
rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCIE);
else
rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI);
retval = rt2x00pci_alloc_reg(rt2x00dev);
if (retval)
goto exit_free_device;
/*
* Because rt3290 chip use different efuse offset to read efuse data.
* So before read efuse it need to indicate it is the
* rt3290 or not.
*/
pci_read_config_word(pci_dev, PCI_DEVICE_ID, &chip);
rt2x00dev->chip.rt = chip;
retval = rt2x00lib_probe_dev(rt2x00dev);
if (retval)
goto exit_free_reg;
return 0;
exit_free_reg:
rt2x00pci_free_reg(rt2x00dev);
exit_free_device:
ieee80211_free_hw(hw);
exit_release_regions:
pci_release_regions(pci_dev);
exit_disable_device:
pci_disable_device(pci_dev);
return retval;
}
EXPORT_SYMBOL_GPL(rt2x00pci_probe);
void rt2x00pci_remove(struct pci_dev *pci_dev)
{
struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
struct rt2x00_dev *rt2x00dev = hw->priv;
/*
* Free all allocated data.
*/
rt2x00lib_remove_dev(rt2x00dev);
rt2x00pci_free_reg(rt2x00dev);
ieee80211_free_hw(hw);
/*
* Free the PCI device data.
*/
pci_disable_device(pci_dev);
pci_release_regions(pci_dev);
}
EXPORT_SYMBOL_GPL(rt2x00pci_remove);
#ifdef CONFIG_PM
int rt2x00pci_suspend(struct pci_dev *pci_dev, pm_message_t state)
{
struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
struct rt2x00_dev *rt2x00dev = hw->priv;
int retval;
retval = rt2x00lib_suspend(rt2x00dev, state);
if (retval)
return retval;
pci_save_state(pci_dev);
pci_disable_device(pci_dev);
return pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
}
EXPORT_SYMBOL_GPL(rt2x00pci_suspend);
int rt2x00pci_resume(struct pci_dev *pci_dev)
{
struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
struct rt2x00_dev *rt2x00dev = hw->priv;
if (pci_set_power_state(pci_dev, PCI_D0) ||
pci_enable_device(pci_dev)) {
rt2x00_err(rt2x00dev, "Failed to resume device\n");
return -EIO;
}
pci_restore_state(pci_dev);
return rt2x00lib_resume(rt2x00dev);
}
EXPORT_SYMBOL_GPL(rt2x00pci_resume);
#endif /* CONFIG_PM */
/*
* rt2x00pci module information.
*/
MODULE_AUTHOR(DRV_PROJECT);
MODULE_VERSION(DRV_VERSION);
MODULE_DESCRIPTION("rt2x00 pci library");
MODULE_LICENSE("GPL");
| gpl-2.0 |
robacklin/ts4700 | arch/mips/math-emu/dp_logb.c | 1799 | 1446 | /* IEEE754 floating point arithmetic
* double precision: common utilities
*/
/*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk
*
* ########################################################################
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* ########################################################################
*/
#include "ieee754dp.h"
ieee754dp ieee754dp_logb(ieee754dp x)
{
COMPXDP;
CLEARCX;
EXPLODEXDP;
switch (xc) {
case IEEE754_CLASS_SNAN:
return ieee754dp_nanxcpt(x, "logb", x);
case IEEE754_CLASS_QNAN:
return x;
case IEEE754_CLASS_INF:
return ieee754dp_inf(0);
case IEEE754_CLASS_ZERO:
return ieee754dp_inf(1);
case IEEE754_CLASS_DNORM:
DPDNORMX;
break;
case IEEE754_CLASS_NORM:
break;
}
return ieee754dp_fint(xe);
}
| gpl-2.0 |
leadpoizon/android_kernel_moto_shamu | drivers/usb/serial/f81232.c | 2055 | 9240 | /*
* Fintek F81232 USB to serial adaptor driver
*
* Copyright (C) 2012 Greg Kroah-Hartman (gregkh@linuxfoundation.org)
* Copyright (C) 2012 Linux Foundation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/serial.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1934, 0x0706) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
#define CONTROL_DTR 0x01
#define CONTROL_RTS 0x02
#define UART_STATE 0x08
#define UART_STATE_TRANSIENT_MASK 0x74
#define UART_DCD 0x01
#define UART_DSR 0x02
#define UART_BREAK_ERROR 0x04
#define UART_RING 0x08
#define UART_FRAME_ERROR 0x10
#define UART_PARITY_ERROR 0x20
#define UART_OVERRUN_ERROR 0x40
#define UART_CTS 0x80
struct f81232_private {
spinlock_t lock;
u8 line_control;
u8 line_status;
};
static void f81232_update_line_status(struct usb_serial_port *port,
unsigned char *data,
unsigned int actual_length)
{
}
static void f81232_read_int_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
unsigned int actual_length = urb->actual_length;
int status = urb->status;
int retval;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(&port->dev, "%s - urb shutting down with status: %d\n",
__func__, status);
return;
default:
dev_dbg(&port->dev, "%s - nonzero urb status received: %d\n",
__func__, status);
goto exit;
}
usb_serial_debug_data(&port->dev, __func__,
urb->actual_length, urb->transfer_buffer);
f81232_update_line_status(port, data, actual_length);
exit:
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
dev_err(&urb->dev->dev,
"%s - usb_submit_urb failed with result %d\n",
__func__, retval);
}
static void f81232_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct f81232_private *priv = usb_get_serial_port_data(port);
unsigned char *data = urb->transfer_buffer;
char tty_flag = TTY_NORMAL;
unsigned long flags;
u8 line_status;
int i;
/* update line status */
spin_lock_irqsave(&priv->lock, flags);
line_status = priv->line_status;
priv->line_status &= ~UART_STATE_TRANSIENT_MASK;
spin_unlock_irqrestore(&priv->lock, flags);
wake_up_interruptible(&port->port.delta_msr_wait);
if (!urb->actual_length)
return;
/* break takes precedence over parity, */
/* which takes precedence over framing errors */
if (line_status & UART_BREAK_ERROR)
tty_flag = TTY_BREAK;
else if (line_status & UART_PARITY_ERROR)
tty_flag = TTY_PARITY;
else if (line_status & UART_FRAME_ERROR)
tty_flag = TTY_FRAME;
dev_dbg(&port->dev, "%s - tty_flag = %d\n", __func__, tty_flag);
/* overrun is special, not associated with a char */
if (line_status & UART_OVERRUN_ERROR)
tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
if (port->port.console && port->sysrq) {
for (i = 0; i < urb->actual_length; ++i)
if (!usb_serial_handle_sysrq_char(port, data[i]))
tty_insert_flip_char(&port->port, data[i],
tty_flag);
} else {
tty_insert_flip_string_fixed_flag(&port->port, data, tty_flag,
urb->actual_length);
}
tty_flip_buffer_push(&port->port);
}
static int set_control_lines(struct usb_device *dev, u8 value)
{
/* FIXME - Stubbed out for now */
return 0;
}
static void f81232_break_ctl(struct tty_struct *tty, int break_state)
{
/* FIXME - Stubbed out for now */
/*
* break_state = -1 to turn on break, and 0 to turn off break
* see drivers/char/tty_io.c to see it used.
* last_set_data_urb_value NEVER has the break bit set in it.
*/
}
static void f81232_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
/* FIXME - Stubbed out for now */
/* Don't change anything if nothing has changed */
if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios))
return;
/* Do the real work here... */
if (old_termios)
tty_termios_copy_hw(&tty->termios, old_termios);
}
static int f81232_tiocmget(struct tty_struct *tty)
{
/* FIXME - Stubbed out for now */
return 0;
}
static int f81232_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
/* FIXME - Stubbed out for now */
return 0;
}
static int f81232_open(struct tty_struct *tty, struct usb_serial_port *port)
{
int result;
/* Setup termios */
if (tty)
f81232_set_termios(tty, port, NULL);
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result) {
dev_err(&port->dev, "%s - failed submitting interrupt urb,"
" error %d\n", __func__, result);
return result;
}
result = usb_serial_generic_open(tty, port);
if (result) {
usb_kill_urb(port->interrupt_in_urb);
return result;
}
port->port.drain_delay = 256;
return 0;
}
static void f81232_close(struct usb_serial_port *port)
{
usb_serial_generic_close(port);
usb_kill_urb(port->interrupt_in_urb);
}
static void f81232_dtr_rts(struct usb_serial_port *port, int on)
{
struct f81232_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
u8 control;
spin_lock_irqsave(&priv->lock, flags);
/* Change DTR and RTS */
if (on)
priv->line_control |= (CONTROL_DTR | CONTROL_RTS);
else
priv->line_control &= ~(CONTROL_DTR | CONTROL_RTS);
control = priv->line_control;
spin_unlock_irqrestore(&priv->lock, flags);
set_control_lines(port->serial->dev, control);
}
static int f81232_carrier_raised(struct usb_serial_port *port)
{
struct f81232_private *priv = usb_get_serial_port_data(port);
if (priv->line_status & UART_DCD)
return 1;
return 0;
}
static int f81232_tiocmiwait(struct tty_struct *tty, unsigned long arg)
{
struct usb_serial_port *port = tty->driver_data;
struct f81232_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
unsigned int prevstatus;
unsigned int status;
unsigned int changed;
spin_lock_irqsave(&priv->lock, flags);
prevstatus = priv->line_status;
spin_unlock_irqrestore(&priv->lock, flags);
while (1) {
interruptible_sleep_on(&port->port.delta_msr_wait);
/* see if a signal did it */
if (signal_pending(current))
return -ERESTARTSYS;
if (port->serial->disconnected)
return -EIO;
spin_lock_irqsave(&priv->lock, flags);
status = priv->line_status;
spin_unlock_irqrestore(&priv->lock, flags);
changed = prevstatus ^ status;
if (((arg & TIOCM_RNG) && (changed & UART_RING)) ||
((arg & TIOCM_DSR) && (changed & UART_DSR)) ||
((arg & TIOCM_CD) && (changed & UART_DCD)) ||
((arg & TIOCM_CTS) && (changed & UART_CTS))) {
return 0;
}
prevstatus = status;
}
/* NOTREACHED */
return 0;
}
static int f81232_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct serial_struct ser;
struct usb_serial_port *port = tty->driver_data;
dev_dbg(&port->dev, "%s (%d) cmd = 0x%04x\n", __func__,
port->number, cmd);
switch (cmd) {
case TIOCGSERIAL:
memset(&ser, 0, sizeof ser);
ser.type = PORT_16654;
ser.line = port->serial->minor;
ser.port = port->number;
ser.baud_base = 460800;
if (copy_to_user((void __user *)arg, &ser, sizeof ser))
return -EFAULT;
return 0;
default:
dev_dbg(&port->dev, "%s not supported = 0x%04x\n",
__func__, cmd);
break;
}
return -ENOIOCTLCMD;
}
static int f81232_port_probe(struct usb_serial_port *port)
{
struct f81232_private *priv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
spin_lock_init(&priv->lock);
usb_set_serial_port_data(port, priv);
return 0;
}
static int f81232_port_remove(struct usb_serial_port *port)
{
struct f81232_private *priv;
priv = usb_get_serial_port_data(port);
kfree(priv);
return 0;
}
static struct usb_serial_driver f81232_device = {
.driver = {
.owner = THIS_MODULE,
.name = "f81232",
},
.id_table = id_table,
.num_ports = 1,
.bulk_in_size = 256,
.bulk_out_size = 256,
.open = f81232_open,
.close = f81232_close,
.dtr_rts = f81232_dtr_rts,
.carrier_raised = f81232_carrier_raised,
.ioctl = f81232_ioctl,
.break_ctl = f81232_break_ctl,
.set_termios = f81232_set_termios,
.tiocmget = f81232_tiocmget,
.tiocmset = f81232_tiocmset,
.tiocmiwait = f81232_tiocmiwait,
.process_read_urb = f81232_process_read_urb,
.read_int_callback = f81232_read_int_callback,
.port_probe = f81232_port_probe,
.port_remove = f81232_port_remove,
};
static struct usb_serial_driver * const serial_drivers[] = {
&f81232_device,
NULL,
};
module_usb_serial_driver(serial_drivers, id_table);
MODULE_DESCRIPTION("Fintek F81232 USB to serial adaptor driver");
MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@linuxfoundation.org");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
kingklick/kk-note2-kernel | fs/xfs/xfs_dir2_block.c | 2823 | 36168 | /*
* Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
#include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
#include "xfs_dir2_data.h"
#include "xfs_dir2_leaf.h"
#include "xfs_dir2_block.h"
#include "xfs_error.h"
#include "xfs_trace.h"
/*
* Local function prototypes.
*/
static void xfs_dir2_block_log_leaf(xfs_trans_t *tp, xfs_dabuf_t *bp, int first,
int last);
static void xfs_dir2_block_log_tail(xfs_trans_t *tp, xfs_dabuf_t *bp);
static int xfs_dir2_block_lookup_int(xfs_da_args_t *args, xfs_dabuf_t **bpp,
int *entno);
static int xfs_dir2_block_sort(const void *a, const void *b);
static xfs_dahash_t xfs_dir_hash_dot, xfs_dir_hash_dotdot;
/*
* One-time startup routine called from xfs_init().
*/
void
xfs_dir_startup(void)
{
xfs_dir_hash_dot = xfs_da_hashname((unsigned char *)".", 1);
xfs_dir_hash_dotdot = xfs_da_hashname((unsigned char *)"..", 2);
}
/*
* Add an entry to a block directory.
*/
int /* error */
xfs_dir2_block_addname(
xfs_da_args_t *args) /* directory op arguments */
{
xfs_dir2_data_free_t *bf; /* bestfree table in block */
xfs_dir2_block_t *block; /* directory block structure */
xfs_dir2_leaf_entry_t *blp; /* block leaf entries */
xfs_dabuf_t *bp; /* buffer for block */
xfs_dir2_block_tail_t *btp; /* block tail */
int compact; /* need to compact leaf ents */
xfs_dir2_data_entry_t *dep; /* block data entry */
xfs_inode_t *dp; /* directory inode */
xfs_dir2_data_unused_t *dup; /* block unused entry */
int error; /* error return value */
xfs_dir2_data_unused_t *enddup=NULL; /* unused at end of data */
xfs_dahash_t hash; /* hash value of found entry */
int high; /* high index for binary srch */
int highstale; /* high stale index */
int lfloghigh=0; /* last final leaf to log */
int lfloglow=0; /* first final leaf to log */
int len; /* length of the new entry */
int low; /* low index for binary srch */
int lowstale; /* low stale index */
int mid=0; /* midpoint for binary srch */
xfs_mount_t *mp; /* filesystem mount point */
int needlog; /* need to log header */
int needscan; /* need to rescan freespace */
__be16 *tagp; /* pointer to tag value */
xfs_trans_t *tp; /* transaction structure */
trace_xfs_dir2_block_addname(args);
dp = args->dp;
tp = args->trans;
mp = dp->i_mount;
/*
* Read the (one and only) directory block into dabuf bp.
*/
if ((error =
xfs_da_read_buf(tp, dp, mp->m_dirdatablk, -1, &bp, XFS_DATA_FORK))) {
return error;
}
ASSERT(bp != NULL);
block = bp->data;
/*
* Check the magic number, corrupted if wrong.
*/
if (unlikely(be32_to_cpu(block->hdr.magic) != XFS_DIR2_BLOCK_MAGIC)) {
XFS_CORRUPTION_ERROR("xfs_dir2_block_addname",
XFS_ERRLEVEL_LOW, mp, block);
xfs_da_brelse(tp, bp);
return XFS_ERROR(EFSCORRUPTED);
}
len = xfs_dir2_data_entsize(args->namelen);
/*
* Set up pointers to parts of the block.
*/
bf = block->hdr.bestfree;
btp = xfs_dir2_block_tail_p(mp, block);
blp = xfs_dir2_block_leaf_p(btp);
/*
* No stale entries? Need space for entry and new leaf.
*/
if (!btp->stale) {
/*
* Tag just before the first leaf entry.
*/
tagp = (__be16 *)blp - 1;
/*
* Data object just before the first leaf entry.
*/
enddup = (xfs_dir2_data_unused_t *)((char *)block + be16_to_cpu(*tagp));
/*
* If it's not free then can't do this add without cleaning up:
* the space before the first leaf entry needs to be free so it
* can be expanded to hold the pointer to the new entry.
*/
if (be16_to_cpu(enddup->freetag) != XFS_DIR2_DATA_FREE_TAG)
dup = enddup = NULL;
/*
* Check out the biggest freespace and see if it's the same one.
*/
else {
dup = (xfs_dir2_data_unused_t *)
((char *)block + be16_to_cpu(bf[0].offset));
if (dup == enddup) {
/*
* It is the biggest freespace, is it too small
* to hold the new leaf too?
*/
if (be16_to_cpu(dup->length) < len + (uint)sizeof(*blp)) {
/*
* Yes, we use the second-largest
* entry instead if it works.
*/
if (be16_to_cpu(bf[1].length) >= len)
dup = (xfs_dir2_data_unused_t *)
((char *)block +
be16_to_cpu(bf[1].offset));
else
dup = NULL;
}
} else {
/*
* Not the same free entry,
* just check its length.
*/
if (be16_to_cpu(dup->length) < len) {
dup = NULL;
}
}
}
compact = 0;
}
/*
* If there are stale entries we'll use one for the leaf.
* Is the biggest entry enough to avoid compaction?
*/
else if (be16_to_cpu(bf[0].length) >= len) {
dup = (xfs_dir2_data_unused_t *)
((char *)block + be16_to_cpu(bf[0].offset));
compact = 0;
}
/*
* Will need to compact to make this work.
*/
else {
/*
* Tag just before the first leaf entry.
*/
tagp = (__be16 *)blp - 1;
/*
* Data object just before the first leaf entry.
*/
dup = (xfs_dir2_data_unused_t *)((char *)block + be16_to_cpu(*tagp));
/*
* If it's not free then the data will go where the
* leaf data starts now, if it works at all.
*/
if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
if (be16_to_cpu(dup->length) + (be32_to_cpu(btp->stale) - 1) *
(uint)sizeof(*blp) < len)
dup = NULL;
} else if ((be32_to_cpu(btp->stale) - 1) * (uint)sizeof(*blp) < len)
dup = NULL;
else
dup = (xfs_dir2_data_unused_t *)blp;
compact = 1;
}
/*
* If this isn't a real add, we're done with the buffer.
*/
if (args->op_flags & XFS_DA_OP_JUSTCHECK)
xfs_da_brelse(tp, bp);
/*
* If we don't have space for the new entry & leaf ...
*/
if (!dup) {
/*
* Not trying to actually do anything, or don't have
* a space reservation: return no-space.
*/
if ((args->op_flags & XFS_DA_OP_JUSTCHECK) || args->total == 0)
return XFS_ERROR(ENOSPC);
/*
* Convert to the next larger format.
* Then add the new entry in that format.
*/
error = xfs_dir2_block_to_leaf(args, bp);
xfs_da_buf_done(bp);
if (error)
return error;
return xfs_dir2_leaf_addname(args);
}
/*
* Just checking, and it would work, so say so.
*/
if (args->op_flags & XFS_DA_OP_JUSTCHECK)
return 0;
needlog = needscan = 0;
/*
* If need to compact the leaf entries, do it now.
* Leave the highest-numbered stale entry stale.
* XXX should be the one closest to mid but mid is not yet computed.
*/
if (compact) {
int fromidx; /* source leaf index */
int toidx; /* target leaf index */
for (fromidx = toidx = be32_to_cpu(btp->count) - 1,
highstale = lfloghigh = -1;
fromidx >= 0;
fromidx--) {
if (be32_to_cpu(blp[fromidx].address) == XFS_DIR2_NULL_DATAPTR) {
if (highstale == -1)
highstale = toidx;
else {
if (lfloghigh == -1)
lfloghigh = toidx;
continue;
}
}
if (fromidx < toidx)
blp[toidx] = blp[fromidx];
toidx--;
}
lfloglow = toidx + 1 - (be32_to_cpu(btp->stale) - 1);
lfloghigh -= be32_to_cpu(btp->stale) - 1;
be32_add_cpu(&btp->count, -(be32_to_cpu(btp->stale) - 1));
xfs_dir2_data_make_free(tp, bp,
(xfs_dir2_data_aoff_t)((char *)blp - (char *)block),
(xfs_dir2_data_aoff_t)((be32_to_cpu(btp->stale) - 1) * sizeof(*blp)),
&needlog, &needscan);
blp += be32_to_cpu(btp->stale) - 1;
btp->stale = cpu_to_be32(1);
/*
* If we now need to rebuild the bestfree map, do so.
* This needs to happen before the next call to use_free.
*/
if (needscan) {
xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog);
needscan = 0;
}
}
/*
* Set leaf logging boundaries to impossible state.
* For the no-stale case they're set explicitly.
*/
else if (btp->stale) {
lfloglow = be32_to_cpu(btp->count);
lfloghigh = -1;
}
/*
* Find the slot that's first lower than our hash value, -1 if none.
*/
for (low = 0, high = be32_to_cpu(btp->count) - 1; low <= high; ) {
mid = (low + high) >> 1;
if ((hash = be32_to_cpu(blp[mid].hashval)) == args->hashval)
break;
if (hash < args->hashval)
low = mid + 1;
else
high = mid - 1;
}
while (mid >= 0 && be32_to_cpu(blp[mid].hashval) >= args->hashval) {
mid--;
}
/*
* No stale entries, will use enddup space to hold new leaf.
*/
if (!btp->stale) {
/*
* Mark the space needed for the new leaf entry, now in use.
*/
xfs_dir2_data_use_free(tp, bp, enddup,
(xfs_dir2_data_aoff_t)
((char *)enddup - (char *)block + be16_to_cpu(enddup->length) -
sizeof(*blp)),
(xfs_dir2_data_aoff_t)sizeof(*blp),
&needlog, &needscan);
/*
* Update the tail (entry count).
*/
be32_add_cpu(&btp->count, 1);
/*
* If we now need to rebuild the bestfree map, do so.
* This needs to happen before the next call to use_free.
*/
if (needscan) {
xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block,
&needlog);
needscan = 0;
}
/*
* Adjust pointer to the first leaf entry, we're about to move
* the table up one to open up space for the new leaf entry.
* Then adjust our index to match.
*/
blp--;
mid++;
if (mid)
memmove(blp, &blp[1], mid * sizeof(*blp));
lfloglow = 0;
lfloghigh = mid;
}
/*
* Use a stale leaf for our new entry.
*/
else {
for (lowstale = mid;
lowstale >= 0 &&
be32_to_cpu(blp[lowstale].address) != XFS_DIR2_NULL_DATAPTR;
lowstale--)
continue;
for (highstale = mid + 1;
highstale < be32_to_cpu(btp->count) &&
be32_to_cpu(blp[highstale].address) != XFS_DIR2_NULL_DATAPTR &&
(lowstale < 0 || mid - lowstale > highstale - mid);
highstale++)
continue;
/*
* Move entries toward the low-numbered stale entry.
*/
if (lowstale >= 0 &&
(highstale == be32_to_cpu(btp->count) ||
mid - lowstale <= highstale - mid)) {
if (mid - lowstale)
memmove(&blp[lowstale], &blp[lowstale + 1],
(mid - lowstale) * sizeof(*blp));
lfloglow = MIN(lowstale, lfloglow);
lfloghigh = MAX(mid, lfloghigh);
}
/*
* Move entries toward the high-numbered stale entry.
*/
else {
ASSERT(highstale < be32_to_cpu(btp->count));
mid++;
if (highstale - mid)
memmove(&blp[mid + 1], &blp[mid],
(highstale - mid) * sizeof(*blp));
lfloglow = MIN(mid, lfloglow);
lfloghigh = MAX(highstale, lfloghigh);
}
be32_add_cpu(&btp->stale, -1);
}
/*
* Point to the new data entry.
*/
dep = (xfs_dir2_data_entry_t *)dup;
/*
* Fill in the leaf entry.
*/
blp[mid].hashval = cpu_to_be32(args->hashval);
blp[mid].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp,
(char *)dep - (char *)block));
xfs_dir2_block_log_leaf(tp, bp, lfloglow, lfloghigh);
/*
* Mark space for the data entry used.
*/
xfs_dir2_data_use_free(tp, bp, dup,
(xfs_dir2_data_aoff_t)((char *)dup - (char *)block),
(xfs_dir2_data_aoff_t)len, &needlog, &needscan);
/*
* Create the new data entry.
*/
dep->inumber = cpu_to_be64(args->inumber);
dep->namelen = args->namelen;
memcpy(dep->name, args->name, args->namelen);
tagp = xfs_dir2_data_entry_tag_p(dep);
*tagp = cpu_to_be16((char *)dep - (char *)block);
/*
* Clean up the bestfree array and log the header, tail, and entry.
*/
if (needscan)
xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog);
if (needlog)
xfs_dir2_data_log_header(tp, bp);
xfs_dir2_block_log_tail(tp, bp);
xfs_dir2_data_log_entry(tp, bp, dep);
xfs_dir2_data_check(dp, bp);
xfs_da_buf_done(bp);
return 0;
}
/*
* Readdir for block directories.
*/
int /* error */
xfs_dir2_block_getdents(
xfs_inode_t *dp, /* incore inode */
void *dirent,
xfs_off_t *offset,
filldir_t filldir)
{
xfs_dir2_block_t *block; /* directory block structure */
xfs_dabuf_t *bp; /* buffer for block */
xfs_dir2_block_tail_t *btp; /* block tail */
xfs_dir2_data_entry_t *dep; /* block data entry */
xfs_dir2_data_unused_t *dup; /* block unused entry */
char *endptr; /* end of the data entries */
int error; /* error return value */
xfs_mount_t *mp; /* filesystem mount point */
char *ptr; /* current data entry */
int wantoff; /* starting block offset */
xfs_off_t cook;
mp = dp->i_mount;
/*
* If the block number in the offset is out of range, we're done.
*/
if (xfs_dir2_dataptr_to_db(mp, *offset) > mp->m_dirdatablk) {
return 0;
}
/*
* Can't read the block, give up, else get dabuf in bp.
*/
error = xfs_da_read_buf(NULL, dp, mp->m_dirdatablk, -1,
&bp, XFS_DATA_FORK);
if (error)
return error;
ASSERT(bp != NULL);
/*
* Extract the byte offset we start at from the seek pointer.
* We'll skip entries before this.
*/
wantoff = xfs_dir2_dataptr_to_off(mp, *offset);
block = bp->data;
xfs_dir2_data_check(dp, bp);
/*
* Set up values for the loop.
*/
btp = xfs_dir2_block_tail_p(mp, block);
ptr = (char *)block->u;
endptr = (char *)xfs_dir2_block_leaf_p(btp);
/*
* Loop over the data portion of the block.
* Each object is a real entry (dep) or an unused one (dup).
*/
while (ptr < endptr) {
dup = (xfs_dir2_data_unused_t *)ptr;
/*
* Unused, skip it.
*/
if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
ptr += be16_to_cpu(dup->length);
continue;
}
dep = (xfs_dir2_data_entry_t *)ptr;
/*
* Bump pointer for the next iteration.
*/
ptr += xfs_dir2_data_entsize(dep->namelen);
/*
* The entry is before the desired starting point, skip it.
*/
if ((char *)dep - (char *)block < wantoff)
continue;
cook = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
(char *)dep - (char *)block);
/*
* If it didn't fit, set the final offset to here & return.
*/
if (filldir(dirent, (char *)dep->name, dep->namelen,
cook & 0x7fffffff, be64_to_cpu(dep->inumber),
DT_UNKNOWN)) {
*offset = cook & 0x7fffffff;
xfs_da_brelse(NULL, bp);
return 0;
}
}
/*
* Reached the end of the block.
* Set the offset to a non-existent block 1 and return.
*/
*offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0) &
0x7fffffff;
xfs_da_brelse(NULL, bp);
return 0;
}
/*
* Log leaf entries from the block.
*/
static void
xfs_dir2_block_log_leaf(
xfs_trans_t *tp, /* transaction structure */
xfs_dabuf_t *bp, /* block buffer */
int first, /* index of first logged leaf */
int last) /* index of last logged leaf */
{
xfs_dir2_block_t *block; /* directory block structure */
xfs_dir2_leaf_entry_t *blp; /* block leaf entries */
xfs_dir2_block_tail_t *btp; /* block tail */
xfs_mount_t *mp; /* filesystem mount point */
mp = tp->t_mountp;
block = bp->data;
btp = xfs_dir2_block_tail_p(mp, block);
blp = xfs_dir2_block_leaf_p(btp);
xfs_da_log_buf(tp, bp, (uint)((char *)&blp[first] - (char *)block),
(uint)((char *)&blp[last + 1] - (char *)block - 1));
}
/*
* Log the block tail.
*/
static void
xfs_dir2_block_log_tail(
xfs_trans_t *tp, /* transaction structure */
xfs_dabuf_t *bp) /* block buffer */
{
xfs_dir2_block_t *block; /* directory block structure */
xfs_dir2_block_tail_t *btp; /* block tail */
xfs_mount_t *mp; /* filesystem mount point */
mp = tp->t_mountp;
block = bp->data;
btp = xfs_dir2_block_tail_p(mp, block);
xfs_da_log_buf(tp, bp, (uint)((char *)btp - (char *)block),
(uint)((char *)(btp + 1) - (char *)block - 1));
}
/*
* Look up an entry in the block. This is the external routine,
* xfs_dir2_block_lookup_int does the real work.
*/
int /* error */
xfs_dir2_block_lookup(
xfs_da_args_t *args) /* dir lookup arguments */
{
xfs_dir2_block_t *block; /* block structure */
xfs_dir2_leaf_entry_t *blp; /* block leaf entries */
xfs_dabuf_t *bp; /* block buffer */
xfs_dir2_block_tail_t *btp; /* block tail */
xfs_dir2_data_entry_t *dep; /* block data entry */
xfs_inode_t *dp; /* incore inode */
int ent; /* entry index */
int error; /* error return value */
xfs_mount_t *mp; /* filesystem mount point */
trace_xfs_dir2_block_lookup(args);
/*
* Get the buffer, look up the entry.
* If not found (ENOENT) then return, have no buffer.
*/
if ((error = xfs_dir2_block_lookup_int(args, &bp, &ent)))
return error;
dp = args->dp;
mp = dp->i_mount;
block = bp->data;
xfs_dir2_data_check(dp, bp);
btp = xfs_dir2_block_tail_p(mp, block);
blp = xfs_dir2_block_leaf_p(btp);
/*
* Get the offset from the leaf entry, to point to the data.
*/
dep = (xfs_dir2_data_entry_t *)((char *)block +
xfs_dir2_dataptr_to_off(mp, be32_to_cpu(blp[ent].address)));
/*
* Fill in inode number, CI name if appropriate, release the block.
*/
args->inumber = be64_to_cpu(dep->inumber);
error = xfs_dir_cilookup_result(args, dep->name, dep->namelen);
xfs_da_brelse(args->trans, bp);
return XFS_ERROR(error);
}
/*
* Internal block lookup routine.
*/
static int /* error */
xfs_dir2_block_lookup_int(
xfs_da_args_t *args, /* dir lookup arguments */
xfs_dabuf_t **bpp, /* returned block buffer */
int *entno) /* returned entry number */
{
xfs_dir2_dataptr_t addr; /* data entry address */
xfs_dir2_block_t *block; /* block structure */
xfs_dir2_leaf_entry_t *blp; /* block leaf entries */
xfs_dabuf_t *bp; /* block buffer */
xfs_dir2_block_tail_t *btp; /* block tail */
xfs_dir2_data_entry_t *dep; /* block data entry */
xfs_inode_t *dp; /* incore inode */
int error; /* error return value */
xfs_dahash_t hash; /* found hash value */
int high; /* binary search high index */
int low; /* binary search low index */
int mid; /* binary search current idx */
xfs_mount_t *mp; /* filesystem mount point */
xfs_trans_t *tp; /* transaction pointer */
enum xfs_dacmp cmp; /* comparison result */
dp = args->dp;
tp = args->trans;
mp = dp->i_mount;
/*
* Read the buffer, return error if we can't get it.
*/
if ((error =
xfs_da_read_buf(tp, dp, mp->m_dirdatablk, -1, &bp, XFS_DATA_FORK))) {
return error;
}
ASSERT(bp != NULL);
block = bp->data;
xfs_dir2_data_check(dp, bp);
btp = xfs_dir2_block_tail_p(mp, block);
blp = xfs_dir2_block_leaf_p(btp);
/*
* Loop doing a binary search for our hash value.
* Find our entry, ENOENT if it's not there.
*/
for (low = 0, high = be32_to_cpu(btp->count) - 1; ; ) {
ASSERT(low <= high);
mid = (low + high) >> 1;
if ((hash = be32_to_cpu(blp[mid].hashval)) == args->hashval)
break;
if (hash < args->hashval)
low = mid + 1;
else
high = mid - 1;
if (low > high) {
ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
xfs_da_brelse(tp, bp);
return XFS_ERROR(ENOENT);
}
}
/*
* Back up to the first one with the right hash value.
*/
while (mid > 0 && be32_to_cpu(blp[mid - 1].hashval) == args->hashval) {
mid--;
}
/*
* Now loop forward through all the entries with the
* right hash value looking for our name.
*/
do {
if ((addr = be32_to_cpu(blp[mid].address)) == XFS_DIR2_NULL_DATAPTR)
continue;
/*
* Get pointer to the entry from the leaf.
*/
dep = (xfs_dir2_data_entry_t *)
((char *)block + xfs_dir2_dataptr_to_off(mp, addr));
/*
* Compare name and if it's an exact match, return the index
* and buffer. If it's the first case-insensitive match, store
* the index and buffer and continue looking for an exact match.
*/
cmp = mp->m_dirnameops->compname(args, dep->name, dep->namelen);
if (cmp != XFS_CMP_DIFFERENT && cmp != args->cmpresult) {
args->cmpresult = cmp;
*bpp = bp;
*entno = mid;
if (cmp == XFS_CMP_EXACT)
return 0;
}
} while (++mid < be32_to_cpu(btp->count) &&
be32_to_cpu(blp[mid].hashval) == hash);
ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
/*
* Here, we can only be doing a lookup (not a rename or replace).
* If a case-insensitive match was found earlier, return success.
*/
if (args->cmpresult == XFS_CMP_CASE)
return 0;
/*
* No match, release the buffer and return ENOENT.
*/
xfs_da_brelse(tp, bp);
return XFS_ERROR(ENOENT);
}
/*
* Remove an entry from a block format directory.
* If that makes the block small enough to fit in shortform, transform it.
*/
int /* error */
xfs_dir2_block_removename(
xfs_da_args_t *args) /* directory operation args */
{
xfs_dir2_block_t *block; /* block structure */
xfs_dir2_leaf_entry_t *blp; /* block leaf pointer */
xfs_dabuf_t *bp; /* block buffer */
xfs_dir2_block_tail_t *btp; /* block tail */
xfs_dir2_data_entry_t *dep; /* block data entry */
xfs_inode_t *dp; /* incore inode */
int ent; /* block leaf entry index */
int error; /* error return value */
xfs_mount_t *mp; /* filesystem mount point */
int needlog; /* need to log block header */
int needscan; /* need to fixup bestfree */
xfs_dir2_sf_hdr_t sfh; /* shortform header */
int size; /* shortform size */
xfs_trans_t *tp; /* transaction pointer */
trace_xfs_dir2_block_removename(args);
/*
* Look up the entry in the block. Gets the buffer and entry index.
* It will always be there, the vnodeops level does a lookup first.
*/
if ((error = xfs_dir2_block_lookup_int(args, &bp, &ent))) {
return error;
}
dp = args->dp;
tp = args->trans;
mp = dp->i_mount;
block = bp->data;
btp = xfs_dir2_block_tail_p(mp, block);
blp = xfs_dir2_block_leaf_p(btp);
/*
* Point to the data entry using the leaf entry.
*/
dep = (xfs_dir2_data_entry_t *)
((char *)block + xfs_dir2_dataptr_to_off(mp, be32_to_cpu(blp[ent].address)));
/*
* Mark the data entry's space free.
*/
needlog = needscan = 0;
xfs_dir2_data_make_free(tp, bp,
(xfs_dir2_data_aoff_t)((char *)dep - (char *)block),
xfs_dir2_data_entsize(dep->namelen), &needlog, &needscan);
/*
* Fix up the block tail.
*/
be32_add_cpu(&btp->stale, 1);
xfs_dir2_block_log_tail(tp, bp);
/*
* Remove the leaf entry by marking it stale.
*/
blp[ent].address = cpu_to_be32(XFS_DIR2_NULL_DATAPTR);
xfs_dir2_block_log_leaf(tp, bp, ent, ent);
/*
* Fix up bestfree, log the header if necessary.
*/
if (needscan)
xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog);
if (needlog)
xfs_dir2_data_log_header(tp, bp);
xfs_dir2_data_check(dp, bp);
/*
* See if the size as a shortform is good enough.
*/
if ((size = xfs_dir2_block_sfsize(dp, block, &sfh)) >
XFS_IFORK_DSIZE(dp)) {
xfs_da_buf_done(bp);
return 0;
}
/*
* If it works, do the conversion.
*/
return xfs_dir2_block_to_sf(args, bp, size, &sfh);
}
/*
* Replace an entry in a V2 block directory.
* Change the inode number to the new value.
*/
int /* error */
xfs_dir2_block_replace(
xfs_da_args_t *args) /* directory operation args */
{
xfs_dir2_block_t *block; /* block structure */
xfs_dir2_leaf_entry_t *blp; /* block leaf entries */
xfs_dabuf_t *bp; /* block buffer */
xfs_dir2_block_tail_t *btp; /* block tail */
xfs_dir2_data_entry_t *dep; /* block data entry */
xfs_inode_t *dp; /* incore inode */
int ent; /* leaf entry index */
int error; /* error return value */
xfs_mount_t *mp; /* filesystem mount point */
trace_xfs_dir2_block_replace(args);
/*
* Lookup the entry in the directory. Get buffer and entry index.
* This will always succeed since the caller has already done a lookup.
*/
if ((error = xfs_dir2_block_lookup_int(args, &bp, &ent))) {
return error;
}
dp = args->dp;
mp = dp->i_mount;
block = bp->data;
btp = xfs_dir2_block_tail_p(mp, block);
blp = xfs_dir2_block_leaf_p(btp);
/*
* Point to the data entry we need to change.
*/
dep = (xfs_dir2_data_entry_t *)
((char *)block + xfs_dir2_dataptr_to_off(mp, be32_to_cpu(blp[ent].address)));
ASSERT(be64_to_cpu(dep->inumber) != args->inumber);
/*
* Change the inode number to the new value.
*/
dep->inumber = cpu_to_be64(args->inumber);
xfs_dir2_data_log_entry(args->trans, bp, dep);
xfs_dir2_data_check(dp, bp);
xfs_da_buf_done(bp);
return 0;
}
/*
* Qsort comparison routine for the block leaf entries.
*/
static int /* sort order */
xfs_dir2_block_sort(
const void *a, /* first leaf entry */
const void *b) /* second leaf entry */
{
const xfs_dir2_leaf_entry_t *la; /* first leaf entry */
const xfs_dir2_leaf_entry_t *lb; /* second leaf entry */
la = a;
lb = b;
return be32_to_cpu(la->hashval) < be32_to_cpu(lb->hashval) ? -1 :
(be32_to_cpu(la->hashval) > be32_to_cpu(lb->hashval) ? 1 : 0);
}
/*
* Convert a V2 leaf directory to a V2 block directory if possible.
*/
int /* error */
xfs_dir2_leaf_to_block(
xfs_da_args_t *args, /* operation arguments */
xfs_dabuf_t *lbp, /* leaf buffer */
xfs_dabuf_t *dbp) /* data buffer */
{
__be16 *bestsp; /* leaf bests table */
xfs_dir2_block_t *block; /* block structure */
xfs_dir2_block_tail_t *btp; /* block tail */
xfs_inode_t *dp; /* incore directory inode */
xfs_dir2_data_unused_t *dup; /* unused data entry */
int error; /* error return value */
int from; /* leaf from index */
xfs_dir2_leaf_t *leaf; /* leaf structure */
xfs_dir2_leaf_entry_t *lep; /* leaf entry */
xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */
xfs_mount_t *mp; /* file system mount point */
int needlog; /* need to log data header */
int needscan; /* need to scan for bestfree */
xfs_dir2_sf_hdr_t sfh; /* shortform header */
int size; /* bytes used */
__be16 *tagp; /* end of entry (tag) */
int to; /* block/leaf to index */
xfs_trans_t *tp; /* transaction pointer */
trace_xfs_dir2_leaf_to_block(args);
dp = args->dp;
tp = args->trans;
mp = dp->i_mount;
leaf = lbp->data;
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC);
ltp = xfs_dir2_leaf_tail_p(mp, leaf);
/*
* If there are data blocks other than the first one, take this
* opportunity to remove trailing empty data blocks that may have
* been left behind during no-space-reservation operations.
* These will show up in the leaf bests table.
*/
while (dp->i_d.di_size > mp->m_dirblksize) {
bestsp = xfs_dir2_leaf_bests_p(ltp);
if (be16_to_cpu(bestsp[be32_to_cpu(ltp->bestcount) - 1]) ==
mp->m_dirblksize - (uint)sizeof(block->hdr)) {
if ((error =
xfs_dir2_leaf_trim_data(args, lbp,
(xfs_dir2_db_t)(be32_to_cpu(ltp->bestcount) - 1))))
goto out;
} else {
error = 0;
goto out;
}
}
/*
* Read the data block if we don't already have it, give up if it fails.
*/
if (dbp == NULL &&
(error = xfs_da_read_buf(tp, dp, mp->m_dirdatablk, -1, &dbp,
XFS_DATA_FORK))) {
goto out;
}
block = dbp->data;
ASSERT(be32_to_cpu(block->hdr.magic) == XFS_DIR2_DATA_MAGIC);
/*
* Size of the "leaf" area in the block.
*/
size = (uint)sizeof(block->tail) +
(uint)sizeof(*lep) * (be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale));
/*
* Look at the last data entry.
*/
tagp = (__be16 *)((char *)block + mp->m_dirblksize) - 1;
dup = (xfs_dir2_data_unused_t *)((char *)block + be16_to_cpu(*tagp));
/*
* If it's not free or is too short we can't do it.
*/
if (be16_to_cpu(dup->freetag) != XFS_DIR2_DATA_FREE_TAG ||
be16_to_cpu(dup->length) < size) {
error = 0;
goto out;
}
/*
* Start converting it to block form.
*/
block->hdr.magic = cpu_to_be32(XFS_DIR2_BLOCK_MAGIC);
needlog = 1;
needscan = 0;
/*
* Use up the space at the end of the block (blp/btp).
*/
xfs_dir2_data_use_free(tp, dbp, dup, mp->m_dirblksize - size, size,
&needlog, &needscan);
/*
* Initialize the block tail.
*/
btp = xfs_dir2_block_tail_p(mp, block);
btp->count = cpu_to_be32(be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale));
btp->stale = 0;
xfs_dir2_block_log_tail(tp, dbp);
/*
* Initialize the block leaf area. We compact out stale entries.
*/
lep = xfs_dir2_block_leaf_p(btp);
for (from = to = 0; from < be16_to_cpu(leaf->hdr.count); from++) {
if (be32_to_cpu(leaf->ents[from].address) == XFS_DIR2_NULL_DATAPTR)
continue;
lep[to++] = leaf->ents[from];
}
ASSERT(to == be32_to_cpu(btp->count));
xfs_dir2_block_log_leaf(tp, dbp, 0, be32_to_cpu(btp->count) - 1);
/*
* Scan the bestfree if we need it and log the data block header.
*/
if (needscan)
xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog);
if (needlog)
xfs_dir2_data_log_header(tp, dbp);
/*
* Pitch the old leaf block.
*/
error = xfs_da_shrink_inode(args, mp->m_dirleafblk, lbp);
lbp = NULL;
if (error) {
goto out;
}
/*
* Now see if the resulting block can be shrunken to shortform.
*/
if ((size = xfs_dir2_block_sfsize(dp, block, &sfh)) >
XFS_IFORK_DSIZE(dp)) {
error = 0;
goto out;
}
return xfs_dir2_block_to_sf(args, dbp, size, &sfh);
out:
if (lbp)
xfs_da_buf_done(lbp);
if (dbp)
xfs_da_buf_done(dbp);
return error;
}
/*
* Convert the shortform directory to block form.
*/
int /* error */
xfs_dir2_sf_to_block(
xfs_da_args_t *args) /* operation arguments */
{
xfs_dir2_db_t blkno; /* dir-relative block # (0) */
xfs_dir2_block_t *block; /* block structure */
xfs_dir2_leaf_entry_t *blp; /* block leaf entries */
xfs_dabuf_t *bp; /* block buffer */
xfs_dir2_block_tail_t *btp; /* block tail pointer */
char *buf; /* sf buffer */
int buf_len;
xfs_dir2_data_entry_t *dep; /* data entry pointer */
xfs_inode_t *dp; /* incore directory inode */
int dummy; /* trash */
xfs_dir2_data_unused_t *dup; /* unused entry pointer */
int endoffset; /* end of data objects */
int error; /* error return value */
int i; /* index */
xfs_mount_t *mp; /* filesystem mount point */
int needlog; /* need to log block header */
int needscan; /* need to scan block freespc */
int newoffset; /* offset from current entry */
int offset; /* target block offset */
xfs_dir2_sf_entry_t *sfep; /* sf entry pointer */
xfs_dir2_sf_t *sfp; /* shortform structure */
__be16 *tagp; /* end of data entry */
xfs_trans_t *tp; /* transaction pointer */
struct xfs_name name;
trace_xfs_dir2_sf_to_block(args);
dp = args->dp;
tp = args->trans;
mp = dp->i_mount;
ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
/*
* Bomb out if the shortform directory is way too short.
*/
if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) {
ASSERT(XFS_FORCED_SHUTDOWN(mp));
return XFS_ERROR(EIO);
}
ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
ASSERT(dp->i_df.if_u1.if_data != NULL);
sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->hdr.i8count));
/*
* Copy the directory into the stack buffer.
* Then pitch the incore inode data so we can make extents.
*/
buf_len = dp->i_df.if_bytes;
buf = kmem_alloc(buf_len, KM_SLEEP);
memcpy(buf, sfp, buf_len);
xfs_idata_realloc(dp, -buf_len, XFS_DATA_FORK);
dp->i_d.di_size = 0;
xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
/*
* Reset pointer - old sfp is gone.
*/
sfp = (xfs_dir2_sf_t *)buf;
/*
* Add block 0 to the inode.
*/
error = xfs_dir2_grow_inode(args, XFS_DIR2_DATA_SPACE, &blkno);
if (error) {
kmem_free(buf);
return error;
}
/*
* Initialize the data block.
*/
error = xfs_dir2_data_init(args, blkno, &bp);
if (error) {
kmem_free(buf);
return error;
}
block = bp->data;
block->hdr.magic = cpu_to_be32(XFS_DIR2_BLOCK_MAGIC);
/*
* Compute size of block "tail" area.
*/
i = (uint)sizeof(*btp) +
(sfp->hdr.count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t);
/*
* The whole thing is initialized to free by the init routine.
* Say we're using the leaf and tail area.
*/
dup = (xfs_dir2_data_unused_t *)block->u;
needlog = needscan = 0;
xfs_dir2_data_use_free(tp, bp, dup, mp->m_dirblksize - i, i, &needlog,
&needscan);
ASSERT(needscan == 0);
/*
* Fill in the tail.
*/
btp = xfs_dir2_block_tail_p(mp, block);
btp->count = cpu_to_be32(sfp->hdr.count + 2); /* ., .. */
btp->stale = 0;
blp = xfs_dir2_block_leaf_p(btp);
endoffset = (uint)((char *)blp - (char *)block);
/*
* Remove the freespace, we'll manage it.
*/
xfs_dir2_data_use_free(tp, bp, dup,
(xfs_dir2_data_aoff_t)((char *)dup - (char *)block),
be16_to_cpu(dup->length), &needlog, &needscan);
/*
* Create entry for .
*/
dep = (xfs_dir2_data_entry_t *)
((char *)block + XFS_DIR2_DATA_DOT_OFFSET);
dep->inumber = cpu_to_be64(dp->i_ino);
dep->namelen = 1;
dep->name[0] = '.';
tagp = xfs_dir2_data_entry_tag_p(dep);
*tagp = cpu_to_be16((char *)dep - (char *)block);
xfs_dir2_data_log_entry(tp, bp, dep);
blp[0].hashval = cpu_to_be32(xfs_dir_hash_dot);
blp[0].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp,
(char *)dep - (char *)block));
/*
* Create entry for ..
*/
dep = (xfs_dir2_data_entry_t *)
((char *)block + XFS_DIR2_DATA_DOTDOT_OFFSET);
dep->inumber = cpu_to_be64(xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent));
dep->namelen = 2;
dep->name[0] = dep->name[1] = '.';
tagp = xfs_dir2_data_entry_tag_p(dep);
*tagp = cpu_to_be16((char *)dep - (char *)block);
xfs_dir2_data_log_entry(tp, bp, dep);
blp[1].hashval = cpu_to_be32(xfs_dir_hash_dotdot);
blp[1].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp,
(char *)dep - (char *)block));
offset = XFS_DIR2_DATA_FIRST_OFFSET;
/*
* Loop over existing entries, stuff them in.
*/
if ((i = 0) == sfp->hdr.count)
sfep = NULL;
else
sfep = xfs_dir2_sf_firstentry(sfp);
/*
* Need to preserve the existing offset values in the sf directory.
* Insert holes (unused entries) where necessary.
*/
while (offset < endoffset) {
/*
* sfep is null when we reach the end of the list.
*/
if (sfep == NULL)
newoffset = endoffset;
else
newoffset = xfs_dir2_sf_get_offset(sfep);
/*
* There should be a hole here, make one.
*/
if (offset < newoffset) {
dup = (xfs_dir2_data_unused_t *)
((char *)block + offset);
dup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
dup->length = cpu_to_be16(newoffset - offset);
*xfs_dir2_data_unused_tag_p(dup) = cpu_to_be16(
((char *)dup - (char *)block));
xfs_dir2_data_log_unused(tp, bp, dup);
(void)xfs_dir2_data_freeinsert((xfs_dir2_data_t *)block,
dup, &dummy);
offset += be16_to_cpu(dup->length);
continue;
}
/*
* Copy a real entry.
*/
dep = (xfs_dir2_data_entry_t *)((char *)block + newoffset);
dep->inumber = cpu_to_be64(xfs_dir2_sf_get_inumber(sfp,
xfs_dir2_sf_inumberp(sfep)));
dep->namelen = sfep->namelen;
memcpy(dep->name, sfep->name, dep->namelen);
tagp = xfs_dir2_data_entry_tag_p(dep);
*tagp = cpu_to_be16((char *)dep - (char *)block);
xfs_dir2_data_log_entry(tp, bp, dep);
name.name = sfep->name;
name.len = sfep->namelen;
blp[2 + i].hashval = cpu_to_be32(mp->m_dirnameops->
hashname(&name));
blp[2 + i].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp,
(char *)dep - (char *)block));
offset = (int)((char *)(tagp + 1) - (char *)block);
if (++i == sfp->hdr.count)
sfep = NULL;
else
sfep = xfs_dir2_sf_nextentry(sfp, sfep);
}
/* Done with the temporary buffer */
kmem_free(buf);
/*
* Sort the leaf entries by hash value.
*/
xfs_sort(blp, be32_to_cpu(btp->count), sizeof(*blp), xfs_dir2_block_sort);
/*
* Log the leaf entry area and tail.
* Already logged the header in data_init, ignore needlog.
*/
ASSERT(needscan == 0);
xfs_dir2_block_log_leaf(tp, bp, 0, be32_to_cpu(btp->count) - 1);
xfs_dir2_block_log_tail(tp, bp);
xfs_dir2_data_check(dp, bp);
xfs_da_buf_done(bp);
return 0;
}
| gpl-2.0 |
kasperhettinga/p4wifi_stock | security/apparmor/audit.c | 3335 | 4642 | /*
* AppArmor security module
*
* This file contains AppArmor auditing functions
*
* Copyright (C) 1998-2008 Novell/SUSE
* Copyright 2009-2010 Canonical Ltd.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2 of the
* License.
*/
#include <linux/audit.h>
#include <linux/socket.h>
#include "include/apparmor.h"
#include "include/audit.h"
#include "include/policy.h"
const char *op_table[] = {
"null",
"sysctl",
"capable",
"unlink",
"mkdir",
"rmdir",
"mknod",
"truncate",
"link",
"symlink",
"rename_src",
"rename_dest",
"chmod",
"chown",
"getattr",
"open",
"file_perm",
"file_lock",
"file_mmap",
"file_mprotect",
"create",
"post_create",
"bind",
"connect",
"listen",
"accept",
"sendmsg",
"recvmsg",
"getsockname",
"getpeername",
"getsockopt",
"setsockopt",
"socket_shutdown",
"ptrace",
"exec",
"change_hat",
"change_profile",
"change_onexec",
"setprocattr",
"setrlimit",
"profile_replace",
"profile_load",
"profile_remove"
};
const char *audit_mode_names[] = {
"normal",
"quiet_denied",
"quiet",
"noquiet",
"all"
};
static char *aa_audit_type[] = {
"AUDIT",
"ALLOWED",
"DENIED",
"HINT",
"STATUS",
"ERROR",
"KILLED"
};
/*
* Currently AppArmor auditing is fed straight into the audit framework.
*
* TODO:
* netlink interface for complain mode
* user auditing, - send user auditing to netlink interface
* system control of whether user audit messages go to system log
*/
/**
* audit_base - core AppArmor function.
* @ab: audit buffer to fill (NOT NULL)
* @ca: audit structure containing data to audit (NOT NULL)
*
* Record common AppArmor audit data from @sa
*/
static void audit_pre(struct audit_buffer *ab, void *ca)
{
struct common_audit_data *sa = ca;
struct task_struct *tsk = sa->tsk ? sa->tsk : current;
if (aa_g_audit_header) {
audit_log_format(ab, "apparmor=");
audit_log_string(ab, aa_audit_type[sa->aad.type]);
}
if (sa->aad.op) {
audit_log_format(ab, " operation=");
audit_log_string(ab, op_table[sa->aad.op]);
}
if (sa->aad.info) {
audit_log_format(ab, " info=");
audit_log_string(ab, sa->aad.info);
if (sa->aad.error)
audit_log_format(ab, " error=%d", sa->aad.error);
}
if (sa->aad.profile) {
struct aa_profile *profile = sa->aad.profile;
pid_t pid;
rcu_read_lock();
pid = tsk->real_parent->pid;
rcu_read_unlock();
audit_log_format(ab, " parent=%d", pid);
if (profile->ns != root_ns) {
audit_log_format(ab, " namespace=");
audit_log_untrustedstring(ab, profile->ns->base.hname);
}
audit_log_format(ab, " profile=");
audit_log_untrustedstring(ab, profile->base.hname);
}
if (sa->aad.name) {
audit_log_format(ab, " name=");
audit_log_untrustedstring(ab, sa->aad.name);
}
}
/**
* aa_audit_msg - Log a message to the audit subsystem
* @sa: audit event structure (NOT NULL)
* @cb: optional callback fn for type specific fields (MAYBE NULL)
*/
void aa_audit_msg(int type, struct common_audit_data *sa,
void (*cb) (struct audit_buffer *, void *))
{
sa->aad.type = type;
sa->lsm_pre_audit = audit_pre;
sa->lsm_post_audit = cb;
common_lsm_audit(sa);
}
/**
* aa_audit - Log a profile based audit event to the audit subsystem
* @type: audit type for the message
* @profile: profile to check against (NOT NULL)
* @gfp: allocation flags to use
* @sa: audit event (NOT NULL)
* @cb: optional callback fn for type specific fields (MAYBE NULL)
*
* Handle default message switching based off of audit mode flags
*
* Returns: error on failure
*/
int aa_audit(int type, struct aa_profile *profile, gfp_t gfp,
struct common_audit_data *sa,
void (*cb) (struct audit_buffer *, void *))
{
BUG_ON(!profile);
if (type == AUDIT_APPARMOR_AUTO) {
if (likely(!sa->aad.error)) {
if (AUDIT_MODE(profile) != AUDIT_ALL)
return 0;
type = AUDIT_APPARMOR_AUDIT;
} else if (COMPLAIN_MODE(profile))
type = AUDIT_APPARMOR_ALLOWED;
else
type = AUDIT_APPARMOR_DENIED;
}
if (AUDIT_MODE(profile) == AUDIT_QUIET ||
(type == AUDIT_APPARMOR_DENIED &&
AUDIT_MODE(profile) == AUDIT_QUIET))
return sa->aad.error;
if (KILL_MODE(profile) && type == AUDIT_APPARMOR_DENIED)
type = AUDIT_APPARMOR_KILL;
if (!unconfined(profile))
sa->aad.profile = profile;
aa_audit_msg(type, sa, cb);
if (sa->aad.type == AUDIT_APPARMOR_KILL)
(void)send_sig_info(SIGKILL, NULL, sa->tsk ? sa->tsk : current);
if (sa->aad.type == AUDIT_APPARMOR_ALLOWED)
return complain_error(sa->aad.error);
return sa->aad.error;
}
| gpl-2.0 |
tusharjain95/Babblefire | arch/arm/kernel/stacktrace.c | 4103 | 3346 | #include <linux/export.h>
#include <linux/sched.h>
#include <linux/stacktrace.h>
#include <asm/stacktrace.h>
#if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND)
/*
* Unwind the current stack frame and store the new register values in the
* structure passed as argument. Unwinding is equivalent to a function return,
* hence the new PC value rather than LR should be used for backtrace.
*
* With framepointer enabled, a simple function prologue looks like this:
* mov ip, sp
* stmdb sp!, {fp, ip, lr, pc}
* sub fp, ip, #4
*
* A simple function epilogue looks like this:
* ldm sp, {fp, sp, pc}
*
* Note that with framepointer enabled, even the leaf functions have the same
* prologue and epilogue, therefore we can ignore the LR value in this case.
*/
int notrace unwind_frame(struct stackframe *frame)
{
unsigned long high, low;
unsigned long fp = frame->fp;
/* only go to a higher address on the stack */
low = frame->sp;
high = ALIGN(low, THREAD_SIZE);
/* check current frame pointer is within bounds */
if (fp < (low + 12) || fp + 4 >= high)
return -EINVAL;
/* restore the registers from the stack frame */
frame->fp = *(unsigned long *)(fp - 12);
frame->sp = *(unsigned long *)(fp - 8);
frame->pc = *(unsigned long *)(fp - 4);
return 0;
}
#endif
void notrace walk_stackframe(struct stackframe *frame,
int (*fn)(struct stackframe *, void *), void *data)
{
while (1) {
int ret;
if (fn(frame, data))
break;
ret = unwind_frame(frame);
if (ret < 0)
break;
}
}
EXPORT_SYMBOL(walk_stackframe);
#ifdef CONFIG_STACKTRACE
struct stack_trace_data {
struct stack_trace *trace;
unsigned int no_sched_functions;
unsigned int skip;
};
static int save_trace(struct stackframe *frame, void *d)
{
struct stack_trace_data *data = d;
struct stack_trace *trace = data->trace;
unsigned long addr = frame->pc;
if (data->no_sched_functions && in_sched_functions(addr))
return 0;
if (data->skip) {
data->skip--;
return 0;
}
trace->entries[trace->nr_entries++] = addr;
return trace->nr_entries >= trace->max_entries;
}
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
struct stack_trace_data data;
struct stackframe frame;
data.trace = trace;
data.skip = trace->skip;
if (tsk != current) {
#ifdef CONFIG_SMP
/*
* What guarantees do we have here that 'tsk' is not
* running on another CPU? For now, ignore it as we
* can't guarantee we won't explode.
*/
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
return;
#else
data.no_sched_functions = 1;
frame.fp = thread_saved_fp(tsk);
frame.sp = thread_saved_sp(tsk);
frame.lr = 0; /* recovered from the stack */
frame.pc = thread_saved_pc(tsk);
#endif
} else {
register unsigned long current_sp asm ("sp");
data.no_sched_functions = 0;
frame.fp = (unsigned long)__builtin_frame_address(0);
frame.sp = current_sp;
frame.lr = (unsigned long)__builtin_return_address(0);
frame.pc = (unsigned long)save_stack_trace_tsk;
}
walk_stackframe(&frame, save_trace, &data);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
void save_stack_trace(struct stack_trace *trace)
{
save_stack_trace_tsk(current, trace);
}
EXPORT_SYMBOL_GPL(save_stack_trace);
#endif
| gpl-2.0 |
randomblame/a500_2.6 | drivers/ide/macide.c | 4615 | 3022 | /*
* Macintosh IDE Driver
*
* Copyright (C) 1998 by Michael Schmitz
*
* This driver was written based on information obtained from the MacOS IDE
* driver binary by Mikael Forselius
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*/
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/ide.h>
#include <asm/macintosh.h>
#include <asm/macints.h>
#include <asm/mac_baboon.h>
#define IDE_BASE 0x50F1A000 /* Base address of IDE controller */
/*
* Generic IDE registers as offsets from the base
* These match MkLinux so they should be correct.
*/
#define IDE_CONTROL 0x38 /* control/altstatus */
/*
* Mac-specific registers
*/
/*
* this register is odd; it doesn't seem to do much and it's
* not word-aligned like virtually every other hardware register
* on the Mac...
*/
#define IDE_IFR 0x101 /* (0x101) IDE interrupt flags on Quadra:
*
* Bit 0+1: some interrupt flags
* Bit 2+3: some interrupt enable
* Bit 4: ??
* Bit 5: IDE interrupt flag (any hwif)
* Bit 6: maybe IDE interrupt enable (any hwif) ??
* Bit 7: Any interrupt condition
*/
volatile unsigned char *ide_ifr = (unsigned char *) (IDE_BASE + IDE_IFR);
int macide_test_irq(ide_hwif_t *hwif)
{
if (*ide_ifr & 0x20)
return 1;
return 0;
}
static void macide_clear_irq(ide_drive_t *drive)
{
*ide_ifr &= ~0x20;
}
static void __init macide_setup_ports(struct ide_hw *hw, unsigned long base,
int irq)
{
int i;
memset(hw, 0, sizeof(*hw));
for (i = 0; i < 8; i++)
hw->io_ports_array[i] = base + i * 4;
hw->io_ports.ctl_addr = base + IDE_CONTROL;
hw->irq = irq;
}
static const struct ide_port_ops macide_port_ops = {
.clear_irq = macide_clear_irq,
.test_irq = macide_test_irq,
};
static const struct ide_port_info macide_port_info = {
.port_ops = &macide_port_ops,
.host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA,
.irq_flags = IRQF_SHARED,
.chipset = ide_generic,
};
static const char *mac_ide_name[] =
{ "Quadra", "Powerbook", "Powerbook Baboon" };
/*
* Probe for a Macintosh IDE interface
*/
static int __init macide_init(void)
{
unsigned long base;
int irq;
struct ide_hw hw, *hws[] = { &hw };
struct ide_port_info d = macide_port_info;
if (!MACH_IS_MAC)
return -ENODEV;
switch (macintosh_config->ide_type) {
case MAC_IDE_QUADRA:
base = IDE_BASE;
irq = IRQ_NUBUS_F;
break;
case MAC_IDE_PB:
base = IDE_BASE;
irq = IRQ_NUBUS_C;
break;
case MAC_IDE_BABOON:
base = BABOON_BASE;
d.port_ops = NULL;
irq = IRQ_BABOON_1;
break;
default:
return -ENODEV;
}
printk(KERN_INFO "ide: Macintosh %s IDE controller\n",
mac_ide_name[macintosh_config->ide_type - 1]);
macide_setup_ports(&hw, base, irq);
return ide_host_add(&d, hws, 1, NULL);
}
module_init(macide_init);
MODULE_LICENSE("GPL");
| gpl-2.0 |
ShadowElite22/Xperia-Z2-Z3 | arch/arm/mach-imx/clock-imx21.c | 5383 | 32334 | /*
* Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
* Copyright 2008 Juergen Beisert, kernel@pengutronix.de
* Copyright 2008 Martin Fuzzey, mfuzzey@gmail.com
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
*/
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/clkdev.h>
#include <mach/clock.h>
#include <mach/hardware.h>
#include <mach/common.h>
#include <asm/div64.h>
#define IO_ADDR_CCM(off) (MX21_IO_ADDRESS(MX21_CCM_BASE_ADDR + (off)))
/* Register offsets */
#define CCM_CSCR IO_ADDR_CCM(0x0)
#define CCM_MPCTL0 IO_ADDR_CCM(0x4)
#define CCM_MPCTL1 IO_ADDR_CCM(0x8)
#define CCM_SPCTL0 IO_ADDR_CCM(0xc)
#define CCM_SPCTL1 IO_ADDR_CCM(0x10)
#define CCM_OSC26MCTL IO_ADDR_CCM(0x14)
#define CCM_PCDR0 IO_ADDR_CCM(0x18)
#define CCM_PCDR1 IO_ADDR_CCM(0x1c)
#define CCM_PCCR0 IO_ADDR_CCM(0x20)
#define CCM_PCCR1 IO_ADDR_CCM(0x24)
#define CCM_CCSR IO_ADDR_CCM(0x28)
#define CCM_PMCTL IO_ADDR_CCM(0x2c)
#define CCM_PMCOUNT IO_ADDR_CCM(0x30)
#define CCM_WKGDCTL IO_ADDR_CCM(0x34)
#define CCM_CSCR_PRESC_OFFSET 29
#define CCM_CSCR_PRESC_MASK (0x7 << CCM_CSCR_PRESC_OFFSET)
#define CCM_CSCR_USB_OFFSET 26
#define CCM_CSCR_USB_MASK (0x7 << CCM_CSCR_USB_OFFSET)
#define CCM_CSCR_SD_OFFSET 24
#define CCM_CSCR_SD_MASK (0x3 << CCM_CSCR_SD_OFFSET)
#define CCM_CSCR_SPLLRES (1 << 22)
#define CCM_CSCR_MPLLRES (1 << 21)
#define CCM_CSCR_SSI2_OFFSET 20
#define CCM_CSCR_SSI2 (1 << CCM_CSCR_SSI2_OFFSET)
#define CCM_CSCR_SSI1_OFFSET 19
#define CCM_CSCR_SSI1 (1 << CCM_CSCR_SSI1_OFFSET)
#define CCM_CSCR_FIR_OFFSET 18
#define CCM_CSCR_FIR (1 << CCM_CSCR_FIR_OFFSET)
#define CCM_CSCR_SP (1 << 17)
#define CCM_CSCR_MCU (1 << 16)
#define CCM_CSCR_BCLK_OFFSET 10
#define CCM_CSCR_BCLK_MASK (0xf << CCM_CSCR_BCLK_OFFSET)
#define CCM_CSCR_IPDIV_OFFSET 9
#define CCM_CSCR_IPDIV (1 << CCM_CSCR_IPDIV_OFFSET)
#define CCM_CSCR_OSC26MDIV (1 << 4)
#define CCM_CSCR_OSC26M (1 << 3)
#define CCM_CSCR_FPM (1 << 2)
#define CCM_CSCR_SPEN (1 << 1)
#define CCM_CSCR_MPEN 1
#define CCM_MPCTL0_CPLM (1 << 31)
#define CCM_MPCTL0_PD_OFFSET 26
#define CCM_MPCTL0_PD_MASK (0xf << 26)
#define CCM_MPCTL0_MFD_OFFSET 16
#define CCM_MPCTL0_MFD_MASK (0x3ff << 16)
#define CCM_MPCTL0_MFI_OFFSET 10
#define CCM_MPCTL0_MFI_MASK (0xf << 10)
#define CCM_MPCTL0_MFN_OFFSET 0
#define CCM_MPCTL0_MFN_MASK 0x3ff
#define CCM_MPCTL1_LF (1 << 15)
#define CCM_MPCTL1_BRMO (1 << 6)
#define CCM_SPCTL0_CPLM (1 << 31)
#define CCM_SPCTL0_PD_OFFSET 26
#define CCM_SPCTL0_PD_MASK (0xf << 26)
#define CCM_SPCTL0_MFD_OFFSET 16
#define CCM_SPCTL0_MFD_MASK (0x3ff << 16)
#define CCM_SPCTL0_MFI_OFFSET 10
#define CCM_SPCTL0_MFI_MASK (0xf << 10)
#define CCM_SPCTL0_MFN_OFFSET 0
#define CCM_SPCTL0_MFN_MASK 0x3ff
#define CCM_SPCTL1_LF (1 << 15)
#define CCM_SPCTL1_BRMO (1 << 6)
#define CCM_OSC26MCTL_PEAK_OFFSET 16
#define CCM_OSC26MCTL_PEAK_MASK (0x3 << 16)
#define CCM_OSC26MCTL_AGC_OFFSET 8
#define CCM_OSC26MCTL_AGC_MASK (0x3f << 8)
#define CCM_OSC26MCTL_ANATEST_OFFSET 0
#define CCM_OSC26MCTL_ANATEST_MASK 0x3f
#define CCM_PCDR0_SSI2BAUDDIV_OFFSET 26
#define CCM_PCDR0_SSI2BAUDDIV_MASK (0x3f << 26)
#define CCM_PCDR0_SSI1BAUDDIV_OFFSET 16
#define CCM_PCDR0_SSI1BAUDDIV_MASK (0x3f << 16)
#define CCM_PCDR0_NFCDIV_OFFSET 12
#define CCM_PCDR0_NFCDIV_MASK (0xf << 12)
#define CCM_PCDR0_48MDIV_OFFSET 5
#define CCM_PCDR0_48MDIV_MASK (0x7 << CCM_PCDR0_48MDIV_OFFSET)
#define CCM_PCDR0_FIRIDIV_OFFSET 0
#define CCM_PCDR0_FIRIDIV_MASK 0x1f
#define CCM_PCDR1_PERDIV4_OFFSET 24
#define CCM_PCDR1_PERDIV4_MASK (0x3f << 24)
#define CCM_PCDR1_PERDIV3_OFFSET 16
#define CCM_PCDR1_PERDIV3_MASK (0x3f << 16)
#define CCM_PCDR1_PERDIV2_OFFSET 8
#define CCM_PCDR1_PERDIV2_MASK (0x3f << 8)
#define CCM_PCDR1_PERDIV1_OFFSET 0
#define CCM_PCDR1_PERDIV1_MASK 0x3f
#define CCM_PCCR_HCLK_CSI_OFFSET 31
#define CCM_PCCR_HCLK_CSI_REG CCM_PCCR0
#define CCM_PCCR_HCLK_DMA_OFFSET 30
#define CCM_PCCR_HCLK_DMA_REG CCM_PCCR0
#define CCM_PCCR_HCLK_BROM_OFFSET 28
#define CCM_PCCR_HCLK_BROM_REG CCM_PCCR0
#define CCM_PCCR_HCLK_EMMA_OFFSET 27
#define CCM_PCCR_HCLK_EMMA_REG CCM_PCCR0
#define CCM_PCCR_HCLK_LCDC_OFFSET 26
#define CCM_PCCR_HCLK_LCDC_REG CCM_PCCR0
#define CCM_PCCR_HCLK_SLCDC_OFFSET 25
#define CCM_PCCR_HCLK_SLCDC_REG CCM_PCCR0
#define CCM_PCCR_HCLK_USBOTG_OFFSET 24
#define CCM_PCCR_HCLK_USBOTG_REG CCM_PCCR0
#define CCM_PCCR_HCLK_BMI_OFFSET 23
#define CCM_PCCR_BMI_MASK (1 << CCM_PCCR_BMI_MASK)
#define CCM_PCCR_HCLK_BMI_REG CCM_PCCR0
#define CCM_PCCR_PERCLK4_OFFSET 22
#define CCM_PCCR_PERCLK4_REG CCM_PCCR0
#define CCM_PCCR_SLCDC_OFFSET 21
#define CCM_PCCR_SLCDC_REG CCM_PCCR0
#define CCM_PCCR_FIRI_BAUD_OFFSET 20
#define CCM_PCCR_FIRI_BAUD_MASK (1 << CCM_PCCR_FIRI_BAUD_MASK)
#define CCM_PCCR_FIRI_BAUD_REG CCM_PCCR0
#define CCM_PCCR_NFC_OFFSET 19
#define CCM_PCCR_NFC_REG CCM_PCCR0
#define CCM_PCCR_LCDC_OFFSET 18
#define CCM_PCCR_LCDC_REG CCM_PCCR0
#define CCM_PCCR_SSI1_BAUD_OFFSET 17
#define CCM_PCCR_SSI1_BAUD_REG CCM_PCCR0
#define CCM_PCCR_SSI2_BAUD_OFFSET 16
#define CCM_PCCR_SSI2_BAUD_REG CCM_PCCR0
#define CCM_PCCR_EMMA_OFFSET 15
#define CCM_PCCR_EMMA_REG CCM_PCCR0
#define CCM_PCCR_USBOTG_OFFSET 14
#define CCM_PCCR_USBOTG_REG CCM_PCCR0
#define CCM_PCCR_DMA_OFFSET 13
#define CCM_PCCR_DMA_REG CCM_PCCR0
#define CCM_PCCR_I2C1_OFFSET 12
#define CCM_PCCR_I2C1_REG CCM_PCCR0
#define CCM_PCCR_GPIO_OFFSET 11
#define CCM_PCCR_GPIO_REG CCM_PCCR0
#define CCM_PCCR_SDHC2_OFFSET 10
#define CCM_PCCR_SDHC2_REG CCM_PCCR0
#define CCM_PCCR_SDHC1_OFFSET 9
#define CCM_PCCR_SDHC1_REG CCM_PCCR0
#define CCM_PCCR_FIRI_OFFSET 8
#define CCM_PCCR_FIRI_MASK (1 << CCM_PCCR_BAUD_MASK)
#define CCM_PCCR_FIRI_REG CCM_PCCR0
#define CCM_PCCR_SSI2_IPG_OFFSET 7
#define CCM_PCCR_SSI2_REG CCM_PCCR0
#define CCM_PCCR_SSI1_IPG_OFFSET 6
#define CCM_PCCR_SSI1_REG CCM_PCCR0
#define CCM_PCCR_CSPI2_OFFSET 5
#define CCM_PCCR_CSPI2_REG CCM_PCCR0
#define CCM_PCCR_CSPI1_OFFSET 4
#define CCM_PCCR_CSPI1_REG CCM_PCCR0
#define CCM_PCCR_UART4_OFFSET 3
#define CCM_PCCR_UART4_REG CCM_PCCR0
#define CCM_PCCR_UART3_OFFSET 2
#define CCM_PCCR_UART3_REG CCM_PCCR0
#define CCM_PCCR_UART2_OFFSET 1
#define CCM_PCCR_UART2_REG CCM_PCCR0
#define CCM_PCCR_UART1_OFFSET 0
#define CCM_PCCR_UART1_REG CCM_PCCR0
#define CCM_PCCR_OWIRE_OFFSET 31
#define CCM_PCCR_OWIRE_REG CCM_PCCR1
#define CCM_PCCR_KPP_OFFSET 30
#define CCM_PCCR_KPP_REG CCM_PCCR1
#define CCM_PCCR_RTC_OFFSET 29
#define CCM_PCCR_RTC_REG CCM_PCCR1
#define CCM_PCCR_PWM_OFFSET 28
#define CCM_PCCR_PWM_REG CCM_PCCR1
#define CCM_PCCR_GPT3_OFFSET 27
#define CCM_PCCR_GPT3_REG CCM_PCCR1
#define CCM_PCCR_GPT2_OFFSET 26
#define CCM_PCCR_GPT2_REG CCM_PCCR1
#define CCM_PCCR_GPT1_OFFSET 25
#define CCM_PCCR_GPT1_REG CCM_PCCR1
#define CCM_PCCR_WDT_OFFSET 24
#define CCM_PCCR_WDT_REG CCM_PCCR1
#define CCM_PCCR_CSPI3_OFFSET 23
#define CCM_PCCR_CSPI3_REG CCM_PCCR1
#define CCM_PCCR_CSPI1_MASK (1 << CCM_PCCR_CSPI1_OFFSET)
#define CCM_PCCR_CSPI2_MASK (1 << CCM_PCCR_CSPI2_OFFSET)
#define CCM_PCCR_CSPI3_MASK (1 << CCM_PCCR_CSPI3_OFFSET)
#define CCM_PCCR_DMA_MASK (1 << CCM_PCCR_DMA_OFFSET)
#define CCM_PCCR_EMMA_MASK (1 << CCM_PCCR_EMMA_OFFSET)
#define CCM_PCCR_GPIO_MASK (1 << CCM_PCCR_GPIO_OFFSET)
#define CCM_PCCR_GPT1_MASK (1 << CCM_PCCR_GPT1_OFFSET)
#define CCM_PCCR_GPT2_MASK (1 << CCM_PCCR_GPT2_OFFSET)
#define CCM_PCCR_GPT3_MASK (1 << CCM_PCCR_GPT3_OFFSET)
#define CCM_PCCR_HCLK_BROM_MASK (1 << CCM_PCCR_HCLK_BROM_OFFSET)
#define CCM_PCCR_HCLK_CSI_MASK (1 << CCM_PCCR_HCLK_CSI_OFFSET)
#define CCM_PCCR_HCLK_DMA_MASK (1 << CCM_PCCR_HCLK_DMA_OFFSET)
#define CCM_PCCR_HCLK_EMMA_MASK (1 << CCM_PCCR_HCLK_EMMA_OFFSET)
#define CCM_PCCR_HCLK_LCDC_MASK (1 << CCM_PCCR_HCLK_LCDC_OFFSET)
#define CCM_PCCR_HCLK_SLCDC_MASK (1 << CCM_PCCR_HCLK_SLCDC_OFFSET)
#define CCM_PCCR_HCLK_USBOTG_MASK (1 << CCM_PCCR_HCLK_USBOTG_OFFSET)
#define CCM_PCCR_I2C1_MASK (1 << CCM_PCCR_I2C1_OFFSET)
#define CCM_PCCR_KPP_MASK (1 << CCM_PCCR_KPP_OFFSET)
#define CCM_PCCR_LCDC_MASK (1 << CCM_PCCR_LCDC_OFFSET)
#define CCM_PCCR_NFC_MASK (1 << CCM_PCCR_NFC_OFFSET)
#define CCM_PCCR_OWIRE_MASK (1 << CCM_PCCR_OWIRE_OFFSET)
#define CCM_PCCR_PERCLK4_MASK (1 << CCM_PCCR_PERCLK4_OFFSET)
#define CCM_PCCR_PWM_MASK (1 << CCM_PCCR_PWM_OFFSET)
#define CCM_PCCR_RTC_MASK (1 << CCM_PCCR_RTC_OFFSET)
#define CCM_PCCR_SDHC1_MASK (1 << CCM_PCCR_SDHC1_OFFSET)
#define CCM_PCCR_SDHC2_MASK (1 << CCM_PCCR_SDHC2_OFFSET)
#define CCM_PCCR_SLCDC_MASK (1 << CCM_PCCR_SLCDC_OFFSET)
#define CCM_PCCR_SSI1_BAUD_MASK (1 << CCM_PCCR_SSI1_BAUD_OFFSET)
#define CCM_PCCR_SSI1_IPG_MASK (1 << CCM_PCCR_SSI1_IPG_OFFSET)
#define CCM_PCCR_SSI2_BAUD_MASK (1 << CCM_PCCR_SSI2_BAUD_OFFSET)
#define CCM_PCCR_SSI2_IPG_MASK (1 << CCM_PCCR_SSI2_IPG_OFFSET)
#define CCM_PCCR_UART1_MASK (1 << CCM_PCCR_UART1_OFFSET)
#define CCM_PCCR_UART2_MASK (1 << CCM_PCCR_UART2_OFFSET)
#define CCM_PCCR_UART3_MASK (1 << CCM_PCCR_UART3_OFFSET)
#define CCM_PCCR_UART4_MASK (1 << CCM_PCCR_UART4_OFFSET)
#define CCM_PCCR_USBOTG_MASK (1 << CCM_PCCR_USBOTG_OFFSET)
#define CCM_PCCR_WDT_MASK (1 << CCM_PCCR_WDT_OFFSET)
#define CCM_CCSR_32KSR (1 << 15)
#define CCM_CCSR_CLKMODE1 (1 << 9)
#define CCM_CCSR_CLKMODE0 (1 << 8)
#define CCM_CCSR_CLKOSEL_OFFSET 0
#define CCM_CCSR_CLKOSEL_MASK 0x1f
#define SYS_FMCR 0x14 /* Functional Muxing Control Reg */
#define SYS_CHIP_ID 0x00 /* The offset of CHIP ID register */
static int _clk_enable(struct clk *clk)
{
u32 reg;
reg = __raw_readl(clk->enable_reg);
reg |= 1 << clk->enable_shift;
__raw_writel(reg, clk->enable_reg);
return 0;
}
static void _clk_disable(struct clk *clk)
{
u32 reg;
reg = __raw_readl(clk->enable_reg);
reg &= ~(1 << clk->enable_shift);
__raw_writel(reg, clk->enable_reg);
}
static unsigned long _clk_generic_round_rate(struct clk *clk,
unsigned long rate,
u32 max_divisor)
{
u32 div;
unsigned long parent_rate;
parent_rate = clk_get_rate(clk->parent);
div = parent_rate / rate;
if (parent_rate % rate)
div++;
if (div > max_divisor)
div = max_divisor;
return parent_rate / div;
}
static int _clk_spll_enable(struct clk *clk)
{
u32 reg;
reg = __raw_readl(CCM_CSCR);
reg |= CCM_CSCR_SPEN;
__raw_writel(reg, CCM_CSCR);
while ((__raw_readl(CCM_SPCTL1) & CCM_SPCTL1_LF) == 0)
;
return 0;
}
static void _clk_spll_disable(struct clk *clk)
{
u32 reg;
reg = __raw_readl(CCM_CSCR);
reg &= ~CCM_CSCR_SPEN;
__raw_writel(reg, CCM_CSCR);
}
#define CSCR() (__raw_readl(CCM_CSCR))
#define PCDR0() (__raw_readl(CCM_PCDR0))
#define PCDR1() (__raw_readl(CCM_PCDR1))
static unsigned long _clk_perclkx_round_rate(struct clk *clk,
unsigned long rate)
{
return _clk_generic_round_rate(clk, rate, 64);
}
static int _clk_perclkx_set_rate(struct clk *clk, unsigned long rate)
{
u32 reg;
u32 div;
unsigned long parent_rate;
parent_rate = clk_get_rate(clk->parent);
if (clk->id < 0 || clk->id > 3)
return -EINVAL;
div = parent_rate / rate;
if (div > 64 || div < 1 || ((parent_rate / div) != rate))
return -EINVAL;
div--;
reg =
__raw_readl(CCM_PCDR1) & ~(CCM_PCDR1_PERDIV1_MASK <<
(clk->id << 3));
reg |= div << (clk->id << 3);
__raw_writel(reg, CCM_PCDR1);
return 0;
}
static unsigned long _clk_usb_recalc(struct clk *clk)
{
unsigned long usb_pdf;
unsigned long parent_rate;
parent_rate = clk_get_rate(clk->parent);
usb_pdf = (CSCR() & CCM_CSCR_USB_MASK) >> CCM_CSCR_USB_OFFSET;
return parent_rate / (usb_pdf + 1U);
}
static unsigned long _clk_usb_round_rate(struct clk *clk,
unsigned long rate)
{
return _clk_generic_round_rate(clk, rate, 8);
}
static int _clk_usb_set_rate(struct clk *clk, unsigned long rate)
{
u32 reg;
u32 div;
unsigned long parent_rate;
parent_rate = clk_get_rate(clk->parent);
div = parent_rate / rate;
if (div > 8 || div < 1 || ((parent_rate / div) != rate))
return -EINVAL;
div--;
reg = CSCR() & ~CCM_CSCR_USB_MASK;
reg |= div << CCM_CSCR_USB_OFFSET;
__raw_writel(reg, CCM_CSCR);
return 0;
}
static unsigned long _clk_ssix_recalc(struct clk *clk, unsigned long pdf)
{
unsigned long parent_rate;
parent_rate = clk_get_rate(clk->parent);
pdf = (pdf < 2) ? 124UL : pdf; /* MX21 & MX27 TO1 */
return 2UL * parent_rate / pdf;
}
static unsigned long _clk_ssi1_recalc(struct clk *clk)
{
return _clk_ssix_recalc(clk,
(PCDR0() & CCM_PCDR0_SSI1BAUDDIV_MASK)
>> CCM_PCDR0_SSI1BAUDDIV_OFFSET);
}
static unsigned long _clk_ssi2_recalc(struct clk *clk)
{
return _clk_ssix_recalc(clk,
(PCDR0() & CCM_PCDR0_SSI2BAUDDIV_MASK) >>
CCM_PCDR0_SSI2BAUDDIV_OFFSET);
}
static unsigned long _clk_nfc_recalc(struct clk *clk)
{
unsigned long nfc_pdf;
unsigned long parent_rate;
parent_rate = clk_get_rate(clk->parent);
nfc_pdf = (PCDR0() & CCM_PCDR0_NFCDIV_MASK)
>> CCM_PCDR0_NFCDIV_OFFSET;
return parent_rate / (nfc_pdf + 1);
}
static unsigned long _clk_parent_round_rate(struct clk *clk, unsigned long rate)
{
return clk->parent->round_rate(clk->parent, rate);
}
static int _clk_parent_set_rate(struct clk *clk, unsigned long rate)
{
return clk->parent->set_rate(clk->parent, rate);
}
static unsigned long external_high_reference; /* in Hz */
static unsigned long get_high_reference_clock_rate(struct clk *clk)
{
return external_high_reference;
}
/*
* the high frequency external clock reference
* Default case is 26MHz.
*/
static struct clk ckih_clk = {
.get_rate = get_high_reference_clock_rate,
};
static unsigned long external_low_reference; /* in Hz */
static unsigned long get_low_reference_clock_rate(struct clk *clk)
{
return external_low_reference;
}
/*
* the low frequency external clock reference
* Default case is 32.768kHz.
*/
static struct clk ckil_clk = {
.get_rate = get_low_reference_clock_rate,
};
static unsigned long _clk_fpm_recalc(struct clk *clk)
{
return clk_get_rate(clk->parent) * 512;
}
/* Output of frequency pre multiplier */
static struct clk fpm_clk = {
.parent = &ckil_clk,
.get_rate = _clk_fpm_recalc,
};
static unsigned long get_mpll_clk(struct clk *clk)
{
uint32_t reg;
unsigned long ref_clk;
unsigned long mfi = 0, mfn = 0, mfd = 0, pdf = 0;
unsigned long long temp;
ref_clk = clk_get_rate(clk->parent);
reg = __raw_readl(CCM_MPCTL0);
pdf = (reg & CCM_MPCTL0_PD_MASK) >> CCM_MPCTL0_PD_OFFSET;
mfd = (reg & CCM_MPCTL0_MFD_MASK) >> CCM_MPCTL0_MFD_OFFSET;
mfi = (reg & CCM_MPCTL0_MFI_MASK) >> CCM_MPCTL0_MFI_OFFSET;
mfn = (reg & CCM_MPCTL0_MFN_MASK) >> CCM_MPCTL0_MFN_OFFSET;
mfi = (mfi <= 5) ? 5 : mfi;
temp = 2LL * ref_clk * mfn;
do_div(temp, mfd + 1);
temp = 2LL * ref_clk * mfi + temp;
do_div(temp, pdf + 1);
return (unsigned long)temp;
}
static struct clk mpll_clk = {
.parent = &ckih_clk,
.get_rate = get_mpll_clk,
};
static unsigned long _clk_fclk_get_rate(struct clk *clk)
{
unsigned long parent_rate;
u32 div;
div = (CSCR() & CCM_CSCR_PRESC_MASK) >> CCM_CSCR_PRESC_OFFSET;
parent_rate = clk_get_rate(clk->parent);
return parent_rate / (div+1);
}
static struct clk fclk_clk = {
.parent = &mpll_clk,
.get_rate = _clk_fclk_get_rate
};
static unsigned long get_spll_clk(struct clk *clk)
{
uint32_t reg;
unsigned long ref_clk;
unsigned long mfi = 0, mfn = 0, mfd = 0, pdf = 0;
unsigned long long temp;
ref_clk = clk_get_rate(clk->parent);
reg = __raw_readl(CCM_SPCTL0);
pdf = (reg & CCM_SPCTL0_PD_MASK) >> CCM_SPCTL0_PD_OFFSET;
mfd = (reg & CCM_SPCTL0_MFD_MASK) >> CCM_SPCTL0_MFD_OFFSET;
mfi = (reg & CCM_SPCTL0_MFI_MASK) >> CCM_SPCTL0_MFI_OFFSET;
mfn = (reg & CCM_SPCTL0_MFN_MASK) >> CCM_SPCTL0_MFN_OFFSET;
mfi = (mfi <= 5) ? 5 : mfi;
temp = 2LL * ref_clk * mfn;
do_div(temp, mfd + 1);
temp = 2LL * ref_clk * mfi + temp;
do_div(temp, pdf + 1);
return (unsigned long)temp;
}
static struct clk spll_clk = {
.parent = &ckih_clk,
.get_rate = get_spll_clk,
.enable = _clk_spll_enable,
.disable = _clk_spll_disable,
};
static unsigned long get_hclk_clk(struct clk *clk)
{
unsigned long rate;
unsigned long bclk_pdf;
bclk_pdf = (CSCR() & CCM_CSCR_BCLK_MASK)
>> CCM_CSCR_BCLK_OFFSET;
rate = clk_get_rate(clk->parent);
return rate / (bclk_pdf + 1);
}
static struct clk hclk_clk = {
.parent = &fclk_clk,
.get_rate = get_hclk_clk,
};
static unsigned long get_ipg_clk(struct clk *clk)
{
unsigned long rate;
unsigned long ipg_pdf;
ipg_pdf = (CSCR() & CCM_CSCR_IPDIV) >> CCM_CSCR_IPDIV_OFFSET;
rate = clk_get_rate(clk->parent);
return rate / (ipg_pdf + 1);
}
static struct clk ipg_clk = {
.parent = &hclk_clk,
.get_rate = get_ipg_clk,
};
static unsigned long _clk_perclkx_recalc(struct clk *clk)
{
unsigned long perclk_pdf;
unsigned long parent_rate;
parent_rate = clk_get_rate(clk->parent);
if (clk->id < 0 || clk->id > 3)
return 0;
perclk_pdf = (PCDR1() >> (clk->id << 3)) & CCM_PCDR1_PERDIV1_MASK;
return parent_rate / (perclk_pdf + 1);
}
static struct clk per_clk[] = {
{
.id = 0,
.parent = &mpll_clk,
.get_rate = _clk_perclkx_recalc,
}, {
.id = 1,
.parent = &mpll_clk,
.get_rate = _clk_perclkx_recalc,
}, {
.id = 2,
.parent = &mpll_clk,
.round_rate = _clk_perclkx_round_rate,
.set_rate = _clk_perclkx_set_rate,
.get_rate = _clk_perclkx_recalc,
/* Enable/Disable done via lcd_clkc[1] */
}, {
.id = 3,
.parent = &mpll_clk,
.round_rate = _clk_perclkx_round_rate,
.set_rate = _clk_perclkx_set_rate,
.get_rate = _clk_perclkx_recalc,
/* Enable/Disable done via csi_clk[1] */
},
};
static struct clk uart_ipg_clk[];
static struct clk uart_clk[] = {
{
.id = 0,
.parent = &per_clk[0],
.secondary = &uart_ipg_clk[0],
}, {
.id = 1,
.parent = &per_clk[0],
.secondary = &uart_ipg_clk[1],
}, {
.id = 2,
.parent = &per_clk[0],
.secondary = &uart_ipg_clk[2],
}, {
.id = 3,
.parent = &per_clk[0],
.secondary = &uart_ipg_clk[3],
},
};
static struct clk uart_ipg_clk[] = {
{
.id = 0,
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_UART1_REG,
.enable_shift = CCM_PCCR_UART1_OFFSET,
.disable = _clk_disable,
}, {
.id = 1,
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_UART2_REG,
.enable_shift = CCM_PCCR_UART2_OFFSET,
.disable = _clk_disable,
}, {
.id = 2,
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_UART3_REG,
.enable_shift = CCM_PCCR_UART3_OFFSET,
.disable = _clk_disable,
}, {
.id = 3,
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_UART4_REG,
.enable_shift = CCM_PCCR_UART4_OFFSET,
.disable = _clk_disable,
},
};
static struct clk gpt_ipg_clk[];
static struct clk gpt_clk[] = {
{
.id = 0,
.parent = &per_clk[0],
.secondary = &gpt_ipg_clk[0],
}, {
.id = 1,
.parent = &per_clk[0],
.secondary = &gpt_ipg_clk[1],
}, {
.id = 2,
.parent = &per_clk[0],
.secondary = &gpt_ipg_clk[2],
},
};
static struct clk gpt_ipg_clk[] = {
{
.id = 0,
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_GPT1_REG,
.enable_shift = CCM_PCCR_GPT1_OFFSET,
.disable = _clk_disable,
}, {
.id = 1,
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_GPT2_REG,
.enable_shift = CCM_PCCR_GPT2_OFFSET,
.disable = _clk_disable,
}, {
.id = 2,
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_GPT3_REG,
.enable_shift = CCM_PCCR_GPT3_OFFSET,
.disable = _clk_disable,
},
};
static struct clk pwm_clk[] = {
{
.parent = &per_clk[0],
.secondary = &pwm_clk[1],
}, {
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_PWM_REG,
.enable_shift = CCM_PCCR_PWM_OFFSET,
.disable = _clk_disable,
},
};
static struct clk sdhc_ipg_clk[];
static struct clk sdhc_clk[] = {
{
.id = 0,
.parent = &per_clk[1],
.secondary = &sdhc_ipg_clk[0],
}, {
.id = 1,
.parent = &per_clk[1],
.secondary = &sdhc_ipg_clk[1],
},
};
static struct clk sdhc_ipg_clk[] = {
{
.id = 0,
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_SDHC1_REG,
.enable_shift = CCM_PCCR_SDHC1_OFFSET,
.disable = _clk_disable,
}, {
.id = 1,
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_SDHC2_REG,
.enable_shift = CCM_PCCR_SDHC2_OFFSET,
.disable = _clk_disable,
},
};
static struct clk cspi_ipg_clk[];
static struct clk cspi_clk[] = {
{
.id = 0,
.parent = &per_clk[1],
.secondary = &cspi_ipg_clk[0],
}, {
.id = 1,
.parent = &per_clk[1],
.secondary = &cspi_ipg_clk[1],
}, {
.id = 2,
.parent = &per_clk[1],
.secondary = &cspi_ipg_clk[2],
},
};
static struct clk cspi_ipg_clk[] = {
{
.id = 0,
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_CSPI1_REG,
.enable_shift = CCM_PCCR_CSPI1_OFFSET,
.disable = _clk_disable,
}, {
.id = 1,
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_CSPI2_REG,
.enable_shift = CCM_PCCR_CSPI2_OFFSET,
.disable = _clk_disable,
}, {
.id = 3,
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_CSPI3_REG,
.enable_shift = CCM_PCCR_CSPI3_OFFSET,
.disable = _clk_disable,
},
};
static struct clk lcdc_clk[] = {
{
.parent = &per_clk[2],
.secondary = &lcdc_clk[1],
.round_rate = _clk_parent_round_rate,
.set_rate = _clk_parent_set_rate,
}, {
.parent = &ipg_clk,
.secondary = &lcdc_clk[2],
.enable = _clk_enable,
.enable_reg = CCM_PCCR_LCDC_REG,
.enable_shift = CCM_PCCR_LCDC_OFFSET,
.disable = _clk_disable,
}, {
.parent = &hclk_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_HCLK_LCDC_REG,
.enable_shift = CCM_PCCR_HCLK_LCDC_OFFSET,
.disable = _clk_disable,
},
};
static struct clk csi_clk[] = {
{
.parent = &per_clk[3],
.secondary = &csi_clk[1],
.round_rate = _clk_parent_round_rate,
.set_rate = _clk_parent_set_rate,
}, {
.parent = &hclk_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_HCLK_CSI_REG,
.enable_shift = CCM_PCCR_HCLK_CSI_OFFSET,
.disable = _clk_disable,
},
};
static struct clk usb_clk[] = {
{
.parent = &spll_clk,
.secondary = &usb_clk[1],
.get_rate = _clk_usb_recalc,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_USBOTG_REG,
.enable_shift = CCM_PCCR_USBOTG_OFFSET,
.disable = _clk_disable,
.round_rate = _clk_usb_round_rate,
.set_rate = _clk_usb_set_rate,
}, {
.parent = &hclk_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_HCLK_USBOTG_REG,
.enable_shift = CCM_PCCR_HCLK_USBOTG_OFFSET,
.disable = _clk_disable,
}
};
static struct clk ssi_ipg_clk[];
static struct clk ssi_clk[] = {
{
.id = 0,
.parent = &mpll_clk,
.secondary = &ssi_ipg_clk[0],
.get_rate = _clk_ssi1_recalc,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_SSI1_BAUD_REG,
.enable_shift = CCM_PCCR_SSI1_BAUD_OFFSET,
.disable = _clk_disable,
}, {
.id = 1,
.parent = &mpll_clk,
.secondary = &ssi_ipg_clk[1],
.get_rate = _clk_ssi2_recalc,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_SSI2_BAUD_REG,
.enable_shift = CCM_PCCR_SSI2_BAUD_OFFSET,
.disable = _clk_disable,
},
};
static struct clk ssi_ipg_clk[] = {
{
.id = 0,
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_SSI1_REG,
.enable_shift = CCM_PCCR_SSI1_IPG_OFFSET,
.disable = _clk_disable,
}, {
.id = 1,
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_SSI2_REG,
.enable_shift = CCM_PCCR_SSI2_IPG_OFFSET,
.disable = _clk_disable,
},
};
static struct clk nfc_clk = {
.parent = &fclk_clk,
.get_rate = _clk_nfc_recalc,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_NFC_REG,
.enable_shift = CCM_PCCR_NFC_OFFSET,
.disable = _clk_disable,
};
static struct clk dma_clk[] = {
{
.parent = &hclk_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_DMA_REG,
.enable_shift = CCM_PCCR_DMA_OFFSET,
.disable = _clk_disable,
.secondary = &dma_clk[1],
}, {
.enable = _clk_enable,
.enable_reg = CCM_PCCR_HCLK_DMA_REG,
.enable_shift = CCM_PCCR_HCLK_DMA_OFFSET,
.disable = _clk_disable,
},
};
static struct clk brom_clk = {
.parent = &hclk_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_HCLK_BROM_REG,
.enable_shift = CCM_PCCR_HCLK_BROM_OFFSET,
.disable = _clk_disable,
};
static struct clk emma_clk[] = {
{
.parent = &hclk_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_EMMA_REG,
.enable_shift = CCM_PCCR_EMMA_OFFSET,
.disable = _clk_disable,
.secondary = &emma_clk[1],
}, {
.enable = _clk_enable,
.enable_reg = CCM_PCCR_HCLK_EMMA_REG,
.enable_shift = CCM_PCCR_HCLK_EMMA_OFFSET,
.disable = _clk_disable,
}
};
static struct clk slcdc_clk[] = {
{
.parent = &hclk_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_SLCDC_REG,
.enable_shift = CCM_PCCR_SLCDC_OFFSET,
.disable = _clk_disable,
.secondary = &slcdc_clk[1],
}, {
.enable = _clk_enable,
.enable_reg = CCM_PCCR_HCLK_SLCDC_REG,
.enable_shift = CCM_PCCR_HCLK_SLCDC_OFFSET,
.disable = _clk_disable,
}
};
static struct clk wdog_clk = {
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_WDT_REG,
.enable_shift = CCM_PCCR_WDT_OFFSET,
.disable = _clk_disable,
};
static struct clk gpio_clk = {
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_GPIO_REG,
.enable_shift = CCM_PCCR_GPIO_OFFSET,
.disable = _clk_disable,
};
static struct clk i2c_clk = {
.id = 0,
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_I2C1_REG,
.enable_shift = CCM_PCCR_I2C1_OFFSET,
.disable = _clk_disable,
};
static struct clk kpp_clk = {
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_KPP_REG,
.enable_shift = CCM_PCCR_KPP_OFFSET,
.disable = _clk_disable,
};
static struct clk owire_clk = {
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_OWIRE_REG,
.enable_shift = CCM_PCCR_OWIRE_OFFSET,
.disable = _clk_disable,
};
static struct clk rtc_clk = {
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_RTC_REG,
.enable_shift = CCM_PCCR_RTC_OFFSET,
.disable = _clk_disable,
};
static unsigned long _clk_clko_round_rate(struct clk *clk, unsigned long rate)
{
return _clk_generic_round_rate(clk, rate, 8);
}
static int _clk_clko_set_rate(struct clk *clk, unsigned long rate)
{
u32 reg;
u32 div;
unsigned long parent_rate;
parent_rate = clk_get_rate(clk->parent);
div = parent_rate / rate;
if (div > 8 || div < 1 || ((parent_rate / div) != rate))
return -EINVAL;
div--;
reg = __raw_readl(CCM_PCDR0);
if (clk->parent == &usb_clk[0]) {
reg &= ~CCM_PCDR0_48MDIV_MASK;
reg |= div << CCM_PCDR0_48MDIV_OFFSET;
}
__raw_writel(reg, CCM_PCDR0);
return 0;
}
static unsigned long _clk_clko_recalc(struct clk *clk)
{
u32 div = 0;
unsigned long parent_rate;
parent_rate = clk_get_rate(clk->parent);
if (clk->parent == &usb_clk[0]) /* 48M */
div = __raw_readl(CCM_PCDR0) & CCM_PCDR0_48MDIV_MASK
>> CCM_PCDR0_48MDIV_OFFSET;
div++;
return parent_rate / div;
}
static struct clk clko_clk;
static int _clk_clko_set_parent(struct clk *clk, struct clk *parent)
{
u32 reg;
reg = __raw_readl(CCM_CCSR) & ~CCM_CCSR_CLKOSEL_MASK;
if (parent == &ckil_clk)
reg |= 0 << CCM_CCSR_CLKOSEL_OFFSET;
else if (parent == &fpm_clk)
reg |= 1 << CCM_CCSR_CLKOSEL_OFFSET;
else if (parent == &ckih_clk)
reg |= 2 << CCM_CCSR_CLKOSEL_OFFSET;
else if (parent == mpll_clk.parent)
reg |= 3 << CCM_CCSR_CLKOSEL_OFFSET;
else if (parent == spll_clk.parent)
reg |= 4 << CCM_CCSR_CLKOSEL_OFFSET;
else if (parent == &mpll_clk)
reg |= 5 << CCM_CCSR_CLKOSEL_OFFSET;
else if (parent == &spll_clk)
reg |= 6 << CCM_CCSR_CLKOSEL_OFFSET;
else if (parent == &fclk_clk)
reg |= 7 << CCM_CCSR_CLKOSEL_OFFSET;
else if (parent == &hclk_clk)
reg |= 8 << CCM_CCSR_CLKOSEL_OFFSET;
else if (parent == &ipg_clk)
reg |= 9 << CCM_CCSR_CLKOSEL_OFFSET;
else if (parent == &per_clk[0])
reg |= 0xA << CCM_CCSR_CLKOSEL_OFFSET;
else if (parent == &per_clk[1])
reg |= 0xB << CCM_CCSR_CLKOSEL_OFFSET;
else if (parent == &per_clk[2])
reg |= 0xC << CCM_CCSR_CLKOSEL_OFFSET;
else if (parent == &per_clk[3])
reg |= 0xD << CCM_CCSR_CLKOSEL_OFFSET;
else if (parent == &ssi_clk[0])
reg |= 0xE << CCM_CCSR_CLKOSEL_OFFSET;
else if (parent == &ssi_clk[1])
reg |= 0xF << CCM_CCSR_CLKOSEL_OFFSET;
else if (parent == &nfc_clk)
reg |= 0x10 << CCM_CCSR_CLKOSEL_OFFSET;
else if (parent == &usb_clk[0])
reg |= 0x14 << CCM_CCSR_CLKOSEL_OFFSET;
else if (parent == &clko_clk)
reg |= 0x15 << CCM_CCSR_CLKOSEL_OFFSET;
else
return -EINVAL;
__raw_writel(reg, CCM_CCSR);
return 0;
}
static struct clk clko_clk = {
.get_rate = _clk_clko_recalc,
.set_rate = _clk_clko_set_rate,
.round_rate = _clk_clko_round_rate,
.set_parent = _clk_clko_set_parent,
};
#define _REGISTER_CLOCK(d, n, c) \
{ \
.dev_id = d, \
.con_id = n, \
.clk = &c, \
},
static struct clk_lookup lookups[] = {
/* It's unlikely that any driver wants one of them directly:
_REGISTER_CLOCK(NULL, "ckih", ckih_clk)
_REGISTER_CLOCK(NULL, "ckil", ckil_clk)
_REGISTER_CLOCK(NULL, "fpm", fpm_clk)
_REGISTER_CLOCK(NULL, "mpll", mpll_clk)
_REGISTER_CLOCK(NULL, "spll", spll_clk)
_REGISTER_CLOCK(NULL, "fclk", fclk_clk)
_REGISTER_CLOCK(NULL, "hclk", hclk_clk)
_REGISTER_CLOCK(NULL, "ipg", ipg_clk)
*/
_REGISTER_CLOCK(NULL, "perclk1", per_clk[0])
_REGISTER_CLOCK(NULL, "perclk2", per_clk[1])
_REGISTER_CLOCK(NULL, "perclk3", per_clk[2])
_REGISTER_CLOCK(NULL, "perclk4", per_clk[3])
_REGISTER_CLOCK(NULL, "clko", clko_clk)
_REGISTER_CLOCK("imx21-uart.0", NULL, uart_clk[0])
_REGISTER_CLOCK("imx21-uart.1", NULL, uart_clk[1])
_REGISTER_CLOCK("imx21-uart.2", NULL, uart_clk[2])
_REGISTER_CLOCK("imx21-uart.3", NULL, uart_clk[3])
_REGISTER_CLOCK(NULL, "gpt1", gpt_clk[0])
_REGISTER_CLOCK(NULL, "gpt1", gpt_clk[1])
_REGISTER_CLOCK(NULL, "gpt1", gpt_clk[2])
_REGISTER_CLOCK(NULL, "pwm", pwm_clk[0])
_REGISTER_CLOCK(NULL, "sdhc1", sdhc_clk[0])
_REGISTER_CLOCK(NULL, "sdhc2", sdhc_clk[1])
_REGISTER_CLOCK("imx21-cspi.0", NULL, cspi_clk[0])
_REGISTER_CLOCK("imx21-cspi.1", NULL, cspi_clk[1])
_REGISTER_CLOCK("imx21-cspi.2", NULL, cspi_clk[2])
_REGISTER_CLOCK("imx-fb.0", NULL, lcdc_clk[0])
_REGISTER_CLOCK(NULL, "csi", csi_clk[0])
_REGISTER_CLOCK("imx21-hcd.0", NULL, usb_clk[0])
_REGISTER_CLOCK(NULL, "ssi1", ssi_clk[0])
_REGISTER_CLOCK(NULL, "ssi2", ssi_clk[1])
_REGISTER_CLOCK("mxc_nand.0", NULL, nfc_clk)
_REGISTER_CLOCK(NULL, "dma", dma_clk[0])
_REGISTER_CLOCK(NULL, "brom", brom_clk)
_REGISTER_CLOCK(NULL, "emma", emma_clk[0])
_REGISTER_CLOCK(NULL, "slcdc", slcdc_clk[0])
_REGISTER_CLOCK("imx2-wdt.0", NULL, wdog_clk)
_REGISTER_CLOCK(NULL, "gpio", gpio_clk)
_REGISTER_CLOCK("imx-i2c.0", NULL, i2c_clk)
_REGISTER_CLOCK("mxc-keypad", NULL, kpp_clk)
_REGISTER_CLOCK(NULL, "owire", owire_clk)
_REGISTER_CLOCK(NULL, "rtc", rtc_clk)
};
/*
* must be called very early to get information about the
* available clock rate when the timer framework starts
*/
int __init mx21_clocks_init(unsigned long lref, unsigned long href)
{
u32 cscr;
external_low_reference = lref;
external_high_reference = href;
/* detect clock reference for both system PLL */
cscr = CSCR();
if (cscr & CCM_CSCR_MCU)
mpll_clk.parent = &ckih_clk;
else
mpll_clk.parent = &fpm_clk;
if (cscr & CCM_CSCR_SP)
spll_clk.parent = &ckih_clk;
else
spll_clk.parent = &fpm_clk;
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
/* Turn off all clock gates */
__raw_writel(0, CCM_PCCR0);
__raw_writel(CCM_PCCR_GPT1_MASK, CCM_PCCR1);
/* This turns of the serial PLL as well */
spll_clk.disable(&spll_clk);
/* This will propagate to all children and init all the clock rates. */
clk_enable(&per_clk[0]);
clk_enable(&gpio_clk);
#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
clk_enable(&uart_clk[0]);
#endif
mxc_timer_init(&gpt_clk[0], MX21_IO_ADDRESS(MX21_GPT1_BASE_ADDR),
MX21_INT_GPT1);
return 0;
}
| gpl-2.0 |
anwarMov/android_kernel_asus_a400cg | arch/ia64/sn/kernel/io_init.c | 6663 | 10655 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2006 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/slab.h>
#include <linux/export.h>
#include <asm/sn/types.h>
#include <asm/sn/addrs.h>
#include <asm/sn/io.h>
#include <asm/sn/module.h>
#include <asm/sn/intr.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcidev.h>
#include <asm/sn/sn_sal.h>
#include "xtalk/hubdev.h"
/*
* The code in this file will only be executed when running with
* a PROM that does _not_ have base ACPI IO support.
* (i.e., SN_ACPI_BASE_SUPPORT() == 0)
*/
static int max_segment_number; /* Default highest segment number */
static int max_pcibus_number = 255; /* Default highest pci bus number */
/*
* Retrieve the hub device info structure for the given nasid.
*/
static inline u64 sal_get_hubdev_info(u64 handle, u64 address)
{
struct ia64_sal_retval ret_stuff;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
SAL_CALL_NOLOCK(ret_stuff,
(u64) SN_SAL_IOIF_GET_HUBDEV_INFO,
(u64) handle, (u64) address, 0, 0, 0, 0, 0);
return ret_stuff.v0;
}
/*
* Retrieve the pci bus information given the bus number.
*/
static inline u64 sal_get_pcibus_info(u64 segment, u64 busnum, u64 address)
{
struct ia64_sal_retval ret_stuff;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
SAL_CALL_NOLOCK(ret_stuff,
(u64) SN_SAL_IOIF_GET_PCIBUS_INFO,
(u64) segment, (u64) busnum, (u64) address, 0, 0, 0, 0);
return ret_stuff.v0;
}
/*
* Retrieve the pci device information given the bus and device|function number.
*/
static inline u64
sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
u64 sn_irq_info)
{
struct ia64_sal_retval ret_stuff;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
SAL_CALL_NOLOCK(ret_stuff,
(u64) SN_SAL_IOIF_GET_PCIDEV_INFO,
(u64) segment, (u64) bus_number, (u64) devfn,
(u64) pci_dev,
sn_irq_info, 0, 0);
return ret_stuff.v0;
}
/*
* sn_fixup_ionodes() - This routine initializes the HUB data structure for
* each node in the system. This function is only
* executed when running with a non-ACPI capable PROM.
*/
static void __init sn_fixup_ionodes(void)
{
struct hubdev_info *hubdev;
u64 status;
u64 nasid;
int i;
extern void sn_common_hubdev_init(struct hubdev_info *);
/*
* Get SGI Specific HUB chipset information.
* Inform Prom that this kernel can support domain bus numbering.
*/
for (i = 0; i < num_cnodes; i++) {
hubdev = (struct hubdev_info *)(NODEPDA(i)->pdinfo);
nasid = cnodeid_to_nasid(i);
hubdev->max_segment_number = 0xffffffff;
hubdev->max_pcibus_number = 0xff;
status = sal_get_hubdev_info(nasid, (u64) __pa(hubdev));
if (status)
continue;
/* Save the largest Domain and pcibus numbers found. */
if (hubdev->max_segment_number) {
/*
* Dealing with a Prom that supports segments.
*/
max_segment_number = hubdev->max_segment_number;
max_pcibus_number = hubdev->max_pcibus_number;
}
sn_common_hubdev_init(hubdev);
}
}
/*
* sn_pci_legacy_window_fixup - Create PCI controller windows for
* legacy IO and MEM space. This needs to
* be done here, as the PROM does not have
* ACPI support defining the root buses
* and their resources (_CRS),
*/
static void
sn_legacy_pci_window_fixup(struct pci_controller *controller,
u64 legacy_io, u64 legacy_mem)
{
controller->window = kcalloc(2, sizeof(struct pci_window),
GFP_KERNEL);
BUG_ON(controller->window == NULL);
controller->window[0].offset = legacy_io;
controller->window[0].resource.name = "legacy_io";
controller->window[0].resource.flags = IORESOURCE_IO;
controller->window[0].resource.start = legacy_io;
controller->window[0].resource.end =
controller->window[0].resource.start + 0xffff;
controller->window[0].resource.parent = &ioport_resource;
controller->window[1].offset = legacy_mem;
controller->window[1].resource.name = "legacy_mem";
controller->window[1].resource.flags = IORESOURCE_MEM;
controller->window[1].resource.start = legacy_mem;
controller->window[1].resource.end =
controller->window[1].resource.start + (1024 * 1024) - 1;
controller->window[1].resource.parent = &iomem_resource;
controller->windows = 2;
}
/*
* sn_pci_window_fixup() - Create a pci_window for each device resource.
* It will setup pci_windows for use by
* pcibios_bus_to_resource(), pcibios_resource_to_bus(),
* etc.
*/
static void
sn_pci_window_fixup(struct pci_dev *dev, unsigned int count,
s64 * pci_addrs)
{
struct pci_controller *controller = PCI_CONTROLLER(dev->bus);
unsigned int i;
unsigned int idx;
unsigned int new_count;
struct pci_window *new_window;
if (count == 0)
return;
idx = controller->windows;
new_count = controller->windows + count;
new_window = kcalloc(new_count, sizeof(struct pci_window), GFP_KERNEL);
BUG_ON(new_window == NULL);
if (controller->window) {
memcpy(new_window, controller->window,
sizeof(struct pci_window) * controller->windows);
kfree(controller->window);
}
/* Setup a pci_window for each device resource. */
for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
if (pci_addrs[i] == -1)
continue;
new_window[idx].offset = dev->resource[i].start - pci_addrs[i];
new_window[idx].resource = dev->resource[i];
idx++;
}
controller->windows = new_count;
controller->window = new_window;
}
/*
* sn_io_slot_fixup() - We are not running with an ACPI capable PROM,
* and need to convert the pci_dev->resource
* 'start' and 'end' addresses to mapped addresses,
* and setup the pci_controller->window array entries.
*/
void
sn_io_slot_fixup(struct pci_dev *dev)
{
unsigned int count = 0;
int idx;
s64 pci_addrs[PCI_ROM_RESOURCE + 1];
unsigned long addr, end, size, start;
struct pcidev_info *pcidev_info;
struct sn_irq_info *sn_irq_info;
int status;
pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
if (!pcidev_info)
panic("%s: Unable to alloc memory for pcidev_info", __func__);
sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
if (!sn_irq_info)
panic("%s: Unable to alloc memory for sn_irq_info", __func__);
/* Call to retrieve pci device information needed by kernel. */
status = sal_get_pcidev_info((u64) pci_domain_nr(dev),
(u64) dev->bus->number,
dev->devfn,
(u64) __pa(pcidev_info),
(u64) __pa(sn_irq_info));
BUG_ON(status); /* Cannot get platform pci device information */
/* Copy over PIO Mapped Addresses */
for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
if (!pcidev_info->pdi_pio_mapped_addr[idx]) {
pci_addrs[idx] = -1;
continue;
}
start = dev->resource[idx].start;
end = dev->resource[idx].end;
size = end - start;
if (size == 0) {
pci_addrs[idx] = -1;
continue;
}
pci_addrs[idx] = start;
count++;
addr = pcidev_info->pdi_pio_mapped_addr[idx];
addr = ((addr << 4) >> 4) | __IA64_UNCACHED_OFFSET;
dev->resource[idx].start = addr;
dev->resource[idx].end = addr + size;
/*
* if it's already in the device structure, remove it before
* inserting
*/
if (dev->resource[idx].parent && dev->resource[idx].parent->child)
release_resource(&dev->resource[idx]);
if (dev->resource[idx].flags & IORESOURCE_IO)
insert_resource(&ioport_resource, &dev->resource[idx]);
else
insert_resource(&iomem_resource, &dev->resource[idx]);
/*
* If ROM, set the actual ROM image size, and mark as
* shadowed in PROM.
*/
if (idx == PCI_ROM_RESOURCE) {
size_t image_size;
void __iomem *rom;
rom = ioremap(pci_resource_start(dev, PCI_ROM_RESOURCE),
size + 1);
image_size = pci_get_rom_size(dev, rom, size + 1);
dev->resource[PCI_ROM_RESOURCE].end =
dev->resource[PCI_ROM_RESOURCE].start +
image_size - 1;
dev->resource[PCI_ROM_RESOURCE].flags |=
IORESOURCE_ROM_BIOS_COPY;
}
}
/* Create a pci_window in the pci_controller struct for
* each device resource.
*/
if (count > 0)
sn_pci_window_fixup(dev, count, pci_addrs);
sn_pci_fixup_slot(dev, pcidev_info, sn_irq_info);
}
EXPORT_SYMBOL(sn_io_slot_fixup);
/*
* sn_pci_controller_fixup() - This routine sets up a bus's resources
* consistent with the Linux PCI abstraction layer.
*/
static void __init
sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus)
{
s64 status = 0;
struct pci_controller *controller;
struct pcibus_bussoft *prom_bussoft_ptr;
LIST_HEAD(resources);
int i;
status = sal_get_pcibus_info((u64) segment, (u64) busnum,
(u64) ia64_tpa(&prom_bussoft_ptr));
if (status > 0)
return; /*bus # does not exist */
prom_bussoft_ptr = __va(prom_bussoft_ptr);
controller = kzalloc(sizeof(*controller), GFP_KERNEL);
BUG_ON(!controller);
controller->segment = segment;
/*
* Temporarily save the prom_bussoft_ptr for use by sn_bus_fixup().
* (platform_data will be overwritten later in sn_common_bus_fixup())
*/
controller->platform_data = prom_bussoft_ptr;
sn_legacy_pci_window_fixup(controller,
prom_bussoft_ptr->bs_legacy_io,
prom_bussoft_ptr->bs_legacy_mem);
for (i = 0; i < controller->windows; i++)
pci_add_resource_offset(&resources,
&controller->window[i].resource,
controller->window[i].offset);
bus = pci_scan_root_bus(NULL, busnum, &pci_root_ops, controller,
&resources);
if (bus == NULL)
goto error_return; /* error, or bus already scanned */
bus->sysdata = controller;
return;
error_return:
kfree(controller);
return;
}
/*
* sn_bus_fixup
*/
void
sn_bus_fixup(struct pci_bus *bus)
{
struct pci_dev *pci_dev = NULL;
struct pcibus_bussoft *prom_bussoft_ptr;
if (!bus->parent) { /* If root bus */
prom_bussoft_ptr = PCI_CONTROLLER(bus)->platform_data;
if (prom_bussoft_ptr == NULL) {
printk(KERN_ERR
"sn_bus_fixup: 0x%04x:0x%02x Unable to "
"obtain prom_bussoft_ptr\n",
pci_domain_nr(bus), bus->number);
return;
}
sn_common_bus_fixup(bus, prom_bussoft_ptr);
}
list_for_each_entry(pci_dev, &bus->devices, bus_list) {
sn_io_slot_fixup(pci_dev);
}
}
/*
* sn_io_init - PROM does not have ACPI support to define nodes or root buses,
* so we need to do things the hard way, including initiating the
* bus scanning ourselves.
*/
void __init sn_io_init(void)
{
int i, j;
sn_fixup_ionodes();
/* busses are not known yet ... */
for (i = 0; i <= max_segment_number; i++)
for (j = 0; j <= max_pcibus_number; j++)
sn_pci_controller_fixup(i, j, NULL);
}
| gpl-2.0 |
IllusionRom-deprecated/android_kernel_samsung_aries | drivers/pci/pcie/aer/aerdrv_acpi.c | 8199 | 2939 | /*
* Access ACPI _OSC method
*
* Copyright (C) 2006 Intel Corp.
* Tom Long Nguyen (tom.l.nguyen@intel.com)
* Zhang Yanmin (yanmin.zhang@intel.com)
*
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/pm.h>
#include <linux/suspend.h>
#include <linux/acpi.h>
#include <linux/pci-acpi.h>
#include <linux/delay.h>
#include <acpi/apei.h>
#include "aerdrv.h"
#ifdef CONFIG_ACPI_APEI
static inline int hest_match_pci(struct acpi_hest_aer_common *p,
struct pci_dev *pci)
{
return (0 == pci_domain_nr(pci->bus) &&
p->bus == pci->bus->number &&
p->device == PCI_SLOT(pci->devfn) &&
p->function == PCI_FUNC(pci->devfn));
}
struct aer_hest_parse_info {
struct pci_dev *pci_dev;
int firmware_first;
};
static int aer_hest_parse(struct acpi_hest_header *hest_hdr, void *data)
{
struct aer_hest_parse_info *info = data;
struct acpi_hest_aer_common *p;
u8 pcie_type = 0;
u8 bridge = 0;
int ff = 0;
switch (hest_hdr->type) {
case ACPI_HEST_TYPE_AER_ROOT_PORT:
pcie_type = PCI_EXP_TYPE_ROOT_PORT;
break;
case ACPI_HEST_TYPE_AER_ENDPOINT:
pcie_type = PCI_EXP_TYPE_ENDPOINT;
break;
case ACPI_HEST_TYPE_AER_BRIDGE:
if ((info->pci_dev->class >> 16) == PCI_BASE_CLASS_BRIDGE)
bridge = 1;
break;
default:
return 0;
}
p = (struct acpi_hest_aer_common *)(hest_hdr + 1);
if (p->flags & ACPI_HEST_GLOBAL) {
if ((info->pci_dev->is_pcie &&
info->pci_dev->pcie_type == pcie_type) || bridge)
ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
} else
if (hest_match_pci(p, info->pci_dev))
ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
info->firmware_first = ff;
return 0;
}
static void aer_set_firmware_first(struct pci_dev *pci_dev)
{
int rc;
struct aer_hest_parse_info info = {
.pci_dev = pci_dev,
.firmware_first = 0,
};
rc = apei_hest_parse(aer_hest_parse, &info);
if (rc)
pci_dev->__aer_firmware_first = 0;
else
pci_dev->__aer_firmware_first = info.firmware_first;
pci_dev->__aer_firmware_first_valid = 1;
}
int pcie_aer_get_firmware_first(struct pci_dev *dev)
{
if (!dev->__aer_firmware_first_valid)
aer_set_firmware_first(dev);
return dev->__aer_firmware_first;
}
static bool aer_firmware_first;
static int aer_hest_parse_aff(struct acpi_hest_header *hest_hdr, void *data)
{
struct acpi_hest_aer_common *p;
if (aer_firmware_first)
return 0;
switch (hest_hdr->type) {
case ACPI_HEST_TYPE_AER_ROOT_PORT:
case ACPI_HEST_TYPE_AER_ENDPOINT:
case ACPI_HEST_TYPE_AER_BRIDGE:
p = (struct acpi_hest_aer_common *)(hest_hdr + 1);
aer_firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
default:
return 0;
}
}
/**
* aer_acpi_firmware_first - Check if APEI should control AER.
*/
bool aer_acpi_firmware_first(void)
{
static bool parsed = false;
if (!parsed) {
apei_hest_parse(aer_hest_parse_aff, NULL);
parsed = true;
}
return aer_firmware_first;
}
#endif
| gpl-2.0 |
jsr-d10/android_kernel_jsr_msm8226 | net/ceph/ceph_strings.c | 8455 | 2557 | /*
* Ceph string constants
*/
#include <linux/module.h>
#include <linux/ceph/types.h>
const char *ceph_entity_type_name(int type)
{
switch (type) {
case CEPH_ENTITY_TYPE_MDS: return "mds";
case CEPH_ENTITY_TYPE_OSD: return "osd";
case CEPH_ENTITY_TYPE_MON: return "mon";
case CEPH_ENTITY_TYPE_CLIENT: return "client";
case CEPH_ENTITY_TYPE_AUTH: return "auth";
default: return "unknown";
}
}
const char *ceph_osd_op_name(int op)
{
switch (op) {
case CEPH_OSD_OP_READ: return "read";
case CEPH_OSD_OP_STAT: return "stat";
case CEPH_OSD_OP_MASKTRUNC: return "masktrunc";
case CEPH_OSD_OP_WRITE: return "write";
case CEPH_OSD_OP_DELETE: return "delete";
case CEPH_OSD_OP_TRUNCATE: return "truncate";
case CEPH_OSD_OP_ZERO: return "zero";
case CEPH_OSD_OP_WRITEFULL: return "writefull";
case CEPH_OSD_OP_ROLLBACK: return "rollback";
case CEPH_OSD_OP_APPEND: return "append";
case CEPH_OSD_OP_STARTSYNC: return "startsync";
case CEPH_OSD_OP_SETTRUNC: return "settrunc";
case CEPH_OSD_OP_TRIMTRUNC: return "trimtrunc";
case CEPH_OSD_OP_TMAPUP: return "tmapup";
case CEPH_OSD_OP_TMAPGET: return "tmapget";
case CEPH_OSD_OP_TMAPPUT: return "tmapput";
case CEPH_OSD_OP_GETXATTR: return "getxattr";
case CEPH_OSD_OP_GETXATTRS: return "getxattrs";
case CEPH_OSD_OP_SETXATTR: return "setxattr";
case CEPH_OSD_OP_SETXATTRS: return "setxattrs";
case CEPH_OSD_OP_RESETXATTRS: return "resetxattrs";
case CEPH_OSD_OP_RMXATTR: return "rmxattr";
case CEPH_OSD_OP_CMPXATTR: return "cmpxattr";
case CEPH_OSD_OP_PULL: return "pull";
case CEPH_OSD_OP_PUSH: return "push";
case CEPH_OSD_OP_BALANCEREADS: return "balance-reads";
case CEPH_OSD_OP_UNBALANCEREADS: return "unbalance-reads";
case CEPH_OSD_OP_SCRUB: return "scrub";
case CEPH_OSD_OP_WRLOCK: return "wrlock";
case CEPH_OSD_OP_WRUNLOCK: return "wrunlock";
case CEPH_OSD_OP_RDLOCK: return "rdlock";
case CEPH_OSD_OP_RDUNLOCK: return "rdunlock";
case CEPH_OSD_OP_UPLOCK: return "uplock";
case CEPH_OSD_OP_DNLOCK: return "dnlock";
case CEPH_OSD_OP_CALL: return "call";
case CEPH_OSD_OP_PGLS: return "pgls";
}
return "???";
}
const char *ceph_pool_op_name(int op)
{
switch (op) {
case POOL_OP_CREATE: return "create";
case POOL_OP_DELETE: return "delete";
case POOL_OP_AUID_CHANGE: return "auid change";
case POOL_OP_CREATE_SNAP: return "create snap";
case POOL_OP_DELETE_SNAP: return "delete snap";
case POOL_OP_CREATE_UNMANAGED_SNAP: return "create unmanaged snap";
case POOL_OP_DELETE_UNMANAGED_SNAP: return "delete unmanaged snap";
}
return "???";
}
| gpl-2.0 |
wwbhl/android_kernel_samsung_piranha | drivers/char/tpm/tpm_bios.c | 9223 | 11767 | /*
* Copyright (C) 2005 IBM Corporation
*
* Authors:
* Seiji Munetoh <munetoh@jp.ibm.com>
* Stefan Berger <stefanb@us.ibm.com>
* Reiner Sailer <sailer@watson.ibm.com>
* Kylene Hall <kjhall@us.ibm.com>
*
* Maintained by: <tpmdd-devel@lists.sourceforge.net>
*
* Access to the eventlog extended by the TCG BIOS of PC platform
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/seq_file.h>
#include <linux/fs.h>
#include <linux/security.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <acpi/acpi.h>
#include "tpm.h"
#define TCG_EVENT_NAME_LEN_MAX 255
#define MAX_TEXT_EVENT 1000 /* Max event string length */
#define ACPI_TCPA_SIG "TCPA" /* 0x41504354 /'TCPA' */
enum bios_platform_class {
BIOS_CLIENT = 0x00,
BIOS_SERVER = 0x01,
};
struct tpm_bios_log {
void *bios_event_log;
void *bios_event_log_end;
};
struct acpi_tcpa {
struct acpi_table_header hdr;
u16 platform_class;
union {
struct client_hdr {
u32 log_max_len __attribute__ ((packed));
u64 log_start_addr __attribute__ ((packed));
} client;
struct server_hdr {
u16 reserved;
u64 log_max_len __attribute__ ((packed));
u64 log_start_addr __attribute__ ((packed));
} server;
};
};
struct tcpa_event {
u32 pcr_index;
u32 event_type;
u8 pcr_value[20]; /* SHA1 */
u32 event_size;
u8 event_data[0];
};
enum tcpa_event_types {
PREBOOT = 0,
POST_CODE,
UNUSED,
NO_ACTION,
SEPARATOR,
ACTION,
EVENT_TAG,
SCRTM_CONTENTS,
SCRTM_VERSION,
CPU_MICROCODE,
PLATFORM_CONFIG_FLAGS,
TABLE_OF_DEVICES,
COMPACT_HASH,
IPL,
IPL_PARTITION_DATA,
NONHOST_CODE,
NONHOST_CONFIG,
NONHOST_INFO,
};
static const char* tcpa_event_type_strings[] = {
"PREBOOT",
"POST CODE",
"",
"NO ACTION",
"SEPARATOR",
"ACTION",
"EVENT TAG",
"S-CRTM Contents",
"S-CRTM Version",
"CPU Microcode",
"Platform Config Flags",
"Table of Devices",
"Compact Hash",
"IPL",
"IPL Partition Data",
"Non-Host Code",
"Non-Host Config",
"Non-Host Info"
};
struct tcpa_pc_event {
u32 event_id;
u32 event_size;
u8 event_data[0];
};
enum tcpa_pc_event_ids {
SMBIOS = 1,
BIS_CERT,
POST_BIOS_ROM,
ESCD,
CMOS,
NVRAM,
OPTION_ROM_EXEC,
OPTION_ROM_CONFIG,
OPTION_ROM_MICROCODE = 10,
S_CRTM_VERSION,
S_CRTM_CONTENTS,
POST_CONTENTS,
HOST_TABLE_OF_DEVICES,
};
static const char* tcpa_pc_event_id_strings[] = {
"",
"SMBIOS",
"BIS Certificate",
"POST BIOS ",
"ESCD ",
"CMOS",
"NVRAM",
"Option ROM",
"Option ROM config",
"",
"Option ROM microcode ",
"S-CRTM Version",
"S-CRTM Contents ",
"POST Contents ",
"Table of Devices",
};
/* returns pointer to start of pos. entry of tcg log */
static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
{
loff_t i;
struct tpm_bios_log *log = m->private;
void *addr = log->bios_event_log;
void *limit = log->bios_event_log_end;
struct tcpa_event *event;
/* read over *pos measurements */
for (i = 0; i < *pos; i++) {
event = addr;
if ((addr + sizeof(struct tcpa_event)) < limit) {
if (event->event_type == 0 && event->event_size == 0)
return NULL;
addr += sizeof(struct tcpa_event) + event->event_size;
}
}
/* now check if current entry is valid */
if ((addr + sizeof(struct tcpa_event)) >= limit)
return NULL;
event = addr;
if ((event->event_type == 0 && event->event_size == 0) ||
((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
return NULL;
return addr;
}
static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
loff_t *pos)
{
struct tcpa_event *event = v;
struct tpm_bios_log *log = m->private;
void *limit = log->bios_event_log_end;
v += sizeof(struct tcpa_event) + event->event_size;
/* now check if current entry is valid */
if ((v + sizeof(struct tcpa_event)) >= limit)
return NULL;
event = v;
if (event->event_type == 0 && event->event_size == 0)
return NULL;
if ((event->event_type == 0 && event->event_size == 0) ||
((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
return NULL;
(*pos)++;
return v;
}
static void tpm_bios_measurements_stop(struct seq_file *m, void *v)
{
}
static int get_event_name(char *dest, struct tcpa_event *event,
unsigned char * event_entry)
{
const char *name = "";
/* 41 so there is room for 40 data and 1 nul */
char data[41] = "";
int i, n_len = 0, d_len = 0;
struct tcpa_pc_event *pc_event;
switch(event->event_type) {
case PREBOOT:
case POST_CODE:
case UNUSED:
case NO_ACTION:
case SCRTM_CONTENTS:
case SCRTM_VERSION:
case CPU_MICROCODE:
case PLATFORM_CONFIG_FLAGS:
case TABLE_OF_DEVICES:
case COMPACT_HASH:
case IPL:
case IPL_PARTITION_DATA:
case NONHOST_CODE:
case NONHOST_CONFIG:
case NONHOST_INFO:
name = tcpa_event_type_strings[event->event_type];
n_len = strlen(name);
break;
case SEPARATOR:
case ACTION:
if (MAX_TEXT_EVENT > event->event_size) {
name = event_entry;
n_len = event->event_size;
}
break;
case EVENT_TAG:
pc_event = (struct tcpa_pc_event *)event_entry;
/* ToDo Row data -> Base64 */
switch (pc_event->event_id) {
case SMBIOS:
case BIS_CERT:
case CMOS:
case NVRAM:
case OPTION_ROM_EXEC:
case OPTION_ROM_CONFIG:
case S_CRTM_VERSION:
name = tcpa_pc_event_id_strings[pc_event->event_id];
n_len = strlen(name);
break;
/* hash data */
case POST_BIOS_ROM:
case ESCD:
case OPTION_ROM_MICROCODE:
case S_CRTM_CONTENTS:
case POST_CONTENTS:
name = tcpa_pc_event_id_strings[pc_event->event_id];
n_len = strlen(name);
for (i = 0; i < 20; i++)
d_len += sprintf(&data[2*i], "%02x",
pc_event->event_data[i]);
break;
default:
break;
}
default:
break;
}
return snprintf(dest, MAX_TEXT_EVENT, "[%.*s%.*s]",
n_len, name, d_len, data);
}
static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
{
struct tcpa_event *event = v;
char *data = v;
int i;
for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
seq_putc(m, data[i]);
return 0;
}
static int tpm_bios_measurements_release(struct inode *inode,
struct file *file)
{
struct seq_file *seq = file->private_data;
struct tpm_bios_log *log = seq->private;
if (log) {
kfree(log->bios_event_log);
kfree(log);
}
return seq_release(inode, file);
}
static int tpm_ascii_bios_measurements_show(struct seq_file *m, void *v)
{
int len = 0;
int i;
char *eventname;
struct tcpa_event *event = v;
unsigned char *event_entry =
(unsigned char *) (v + sizeof(struct tcpa_event));
eventname = kmalloc(MAX_TEXT_EVENT, GFP_KERNEL);
if (!eventname) {
printk(KERN_ERR "%s: ERROR - No Memory for event name\n ",
__func__);
return -EFAULT;
}
seq_printf(m, "%2d ", event->pcr_index);
/* 2nd: SHA1 */
for (i = 0; i < 20; i++)
seq_printf(m, "%02x", event->pcr_value[i]);
/* 3rd: event type identifier */
seq_printf(m, " %02x", event->event_type);
len += get_event_name(eventname, event, event_entry);
/* 4th: eventname <= max + \'0' delimiter */
seq_printf(m, " %s\n", eventname);
kfree(eventname);
return 0;
}
static const struct seq_operations tpm_ascii_b_measurments_seqops = {
.start = tpm_bios_measurements_start,
.next = tpm_bios_measurements_next,
.stop = tpm_bios_measurements_stop,
.show = tpm_ascii_bios_measurements_show,
};
static const struct seq_operations tpm_binary_b_measurments_seqops = {
.start = tpm_bios_measurements_start,
.next = tpm_bios_measurements_next,
.stop = tpm_bios_measurements_stop,
.show = tpm_binary_bios_measurements_show,
};
/* read binary bios log */
static int read_log(struct tpm_bios_log *log)
{
struct acpi_tcpa *buff;
acpi_status status;
struct acpi_table_header *virt;
u64 len, start;
if (log->bios_event_log != NULL) {
printk(KERN_ERR
"%s: ERROR - Eventlog already initialized\n",
__func__);
return -EFAULT;
}
/* Find TCPA entry in RSDT (ACPI_LOGICAL_ADDRESSING) */
status = acpi_get_table(ACPI_SIG_TCPA, 1,
(struct acpi_table_header **)&buff);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR "%s: ERROR - Could not get TCPA table\n",
__func__);
return -EIO;
}
switch(buff->platform_class) {
case BIOS_SERVER:
len = buff->server.log_max_len;
start = buff->server.log_start_addr;
break;
case BIOS_CLIENT:
default:
len = buff->client.log_max_len;
start = buff->client.log_start_addr;
break;
}
if (!len) {
printk(KERN_ERR "%s: ERROR - TCPA log area empty\n", __func__);
return -EIO;
}
/* malloc EventLog space */
log->bios_event_log = kmalloc(len, GFP_KERNEL);
if (!log->bios_event_log) {
printk("%s: ERROR - Not enough Memory for BIOS measurements\n",
__func__);
return -ENOMEM;
}
log->bios_event_log_end = log->bios_event_log + len;
virt = acpi_os_map_memory(start, len);
memcpy(log->bios_event_log, virt, len);
acpi_os_unmap_memory(virt, len);
return 0;
}
static int tpm_ascii_bios_measurements_open(struct inode *inode,
struct file *file)
{
int err;
struct tpm_bios_log *log;
struct seq_file *seq;
log = kzalloc(sizeof(struct tpm_bios_log), GFP_KERNEL);
if (!log)
return -ENOMEM;
if ((err = read_log(log)))
goto out_free;
/* now register seq file */
err = seq_open(file, &tpm_ascii_b_measurments_seqops);
if (!err) {
seq = file->private_data;
seq->private = log;
} else {
goto out_free;
}
out:
return err;
out_free:
kfree(log->bios_event_log);
kfree(log);
goto out;
}
static const struct file_operations tpm_ascii_bios_measurements_ops = {
.open = tpm_ascii_bios_measurements_open,
.read = seq_read,
.llseek = seq_lseek,
.release = tpm_bios_measurements_release,
};
static int tpm_binary_bios_measurements_open(struct inode *inode,
struct file *file)
{
int err;
struct tpm_bios_log *log;
struct seq_file *seq;
log = kzalloc(sizeof(struct tpm_bios_log), GFP_KERNEL);
if (!log)
return -ENOMEM;
if ((err = read_log(log)))
goto out_free;
/* now register seq file */
err = seq_open(file, &tpm_binary_b_measurments_seqops);
if (!err) {
seq = file->private_data;
seq->private = log;
} else {
goto out_free;
}
out:
return err;
out_free:
kfree(log->bios_event_log);
kfree(log);
goto out;
}
static const struct file_operations tpm_binary_bios_measurements_ops = {
.open = tpm_binary_bios_measurements_open,
.read = seq_read,
.llseek = seq_lseek,
.release = tpm_bios_measurements_release,
};
static int is_bad(void *p)
{
if (!p)
return 1;
if (IS_ERR(p) && (PTR_ERR(p) != -ENODEV))
return 1;
return 0;
}
struct dentry **tpm_bios_log_setup(char *name)
{
struct dentry **ret = NULL, *tpm_dir, *bin_file, *ascii_file;
tpm_dir = securityfs_create_dir(name, NULL);
if (is_bad(tpm_dir))
goto out;
bin_file =
securityfs_create_file("binary_bios_measurements",
S_IRUSR | S_IRGRP, tpm_dir, NULL,
&tpm_binary_bios_measurements_ops);
if (is_bad(bin_file))
goto out_tpm;
ascii_file =
securityfs_create_file("ascii_bios_measurements",
S_IRUSR | S_IRGRP, tpm_dir, NULL,
&tpm_ascii_bios_measurements_ops);
if (is_bad(ascii_file))
goto out_bin;
ret = kmalloc(3 * sizeof(struct dentry *), GFP_KERNEL);
if (!ret)
goto out_ascii;
ret[0] = ascii_file;
ret[1] = bin_file;
ret[2] = tpm_dir;
return ret;
out_ascii:
securityfs_remove(ascii_file);
out_bin:
securityfs_remove(bin_file);
out_tpm:
securityfs_remove(tpm_dir);
out:
return NULL;
}
EXPORT_SYMBOL_GPL(tpm_bios_log_setup);
void tpm_bios_log_teardown(struct dentry **lst)
{
int i;
for (i = 0; i < 3; i++)
securityfs_remove(lst[i]);
}
EXPORT_SYMBOL_GPL(tpm_bios_log_teardown);
MODULE_LICENSE("GPL");
| gpl-2.0 |
linino/linux | arch/m68k/atari/stdma.c | 10503 | 5111 | /*
* linux/arch/m68k/atari/stmda.c
*
* Copyright (C) 1994 Roman Hodek
*
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
/* This file contains some function for controlling the access to the */
/* ST-DMA chip that may be shared between devices. Currently we have: */
/* TT: Floppy and ACSI bus */
/* Falcon: Floppy and SCSI */
/* */
/* The controlling functions set up a wait queue for access to the */
/* ST-DMA chip. Callers to stdma_lock() that cannot granted access are */
/* put onto a queue and waked up later if the owner calls */
/* stdma_release(). Additionally, the caller gives his interrupt */
/* service routine to stdma_lock(). */
/* */
/* On the Falcon, the IDE bus uses just the ACSI/Floppy interrupt, but */
/* not the ST-DMA chip itself. So falhd.c needs not to lock the */
/* chip. The interrupt is routed to falhd.c if IDE is configured, the */
/* model is a Falcon and the interrupt was caused by the HD controller */
/* (can be determined by looking at its status register). */
#include <linux/types.h>
#include <linux/kdev_t.h>
#include <linux/genhd.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/wait.h>
#include <linux/module.h>
#include <asm/atari_stdma.h>
#include <asm/atariints.h>
#include <asm/atarihw.h>
#include <asm/io.h>
#include <asm/irq.h>
static int stdma_locked; /* the semaphore */
/* int func to be called */
static irq_handler_t stdma_isr;
static void *stdma_isr_data; /* data passed to isr */
static DECLARE_WAIT_QUEUE_HEAD(stdma_wait); /* wait queue for ST-DMA */
/***************************** Prototypes *****************************/
static irqreturn_t stdma_int (int irq, void *dummy);
/************************* End of Prototypes **************************/
/*
* Function: void stdma_lock( isrfunc isr, void *data )
*
* Purpose: Tries to get a lock on the ST-DMA chip that is used by more
* then one device driver. Waits on stdma_wait until lock is free.
* stdma_lock() may not be called from an interrupt! You have to
* get the lock in your main routine and release it when your
* request is finished.
*
* Inputs: A interrupt function that is called until the lock is
* released.
*
* Returns: nothing
*
*/
void stdma_lock(irq_handler_t handler, void *data)
{
unsigned long flags;
local_irq_save(flags); /* protect lock */
/* Since the DMA is used for file system purposes, we
have to sleep uninterruptible (there may be locked
buffers) */
wait_event(stdma_wait, !stdma_locked);
stdma_locked = 1;
stdma_isr = handler;
stdma_isr_data = data;
local_irq_restore(flags);
}
EXPORT_SYMBOL(stdma_lock);
/*
* Function: void stdma_release( void )
*
* Purpose: Releases the lock on the ST-DMA chip.
*
* Inputs: none
*
* Returns: nothing
*
*/
void stdma_release(void)
{
unsigned long flags;
local_irq_save(flags);
stdma_locked = 0;
stdma_isr = NULL;
stdma_isr_data = NULL;
wake_up(&stdma_wait);
local_irq_restore(flags);
}
EXPORT_SYMBOL(stdma_release);
/*
* Function: int stdma_others_waiting( void )
*
* Purpose: Check if someone waits for the ST-DMA lock.
*
* Inputs: none
*
* Returns: 0 if no one is waiting, != 0 otherwise
*
*/
int stdma_others_waiting(void)
{
return waitqueue_active(&stdma_wait);
}
EXPORT_SYMBOL(stdma_others_waiting);
/*
* Function: int stdma_islocked( void )
*
* Purpose: Check if the ST-DMA is currently locked.
* Note: Returned status is only valid if ints are disabled while calling and
* as long as they remain disabled.
* If called with ints enabled, status can change only from locked to
* unlocked, because ints may not lock the ST-DMA.
*
* Inputs: none
*
* Returns: != 0 if locked, 0 otherwise
*
*/
int stdma_islocked(void)
{
return stdma_locked;
}
EXPORT_SYMBOL(stdma_islocked);
/*
* Function: void stdma_init( void )
*
* Purpose: Initialize the ST-DMA chip access controlling.
* It sets up the interrupt and its service routine. The int is registered
* as slow int, client devices have to live with that (no problem
* currently).
*
* Inputs: none
*
* Return: nothing
*
*/
void __init stdma_init(void)
{
stdma_isr = NULL;
if (request_irq(IRQ_MFP_FDC, stdma_int, IRQ_TYPE_SLOW | IRQF_SHARED,
"ST-DMA floppy,ACSI,IDE,Falcon-SCSI", stdma_int))
pr_err("Couldn't register ST-DMA interrupt\n");
}
/*
* Function: void stdma_int()
*
* Purpose: The interrupt routine for the ST-DMA. It calls the isr
* registered by stdma_lock().
*
*/
static irqreturn_t stdma_int(int irq, void *dummy)
{
if (stdma_isr)
(*stdma_isr)(irq, stdma_isr_data);
return IRQ_HANDLED;
}
| gpl-2.0 |
SlimRoms/kernel_motorola_msm8226 | arch/m68k/atari/stdma.c | 10503 | 5111 | /*
* linux/arch/m68k/atari/stmda.c
*
* Copyright (C) 1994 Roman Hodek
*
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
/* This file contains some function for controlling the access to the */
/* ST-DMA chip that may be shared between devices. Currently we have: */
/* TT: Floppy and ACSI bus */
/* Falcon: Floppy and SCSI */
/* */
/* The controlling functions set up a wait queue for access to the */
/* ST-DMA chip. Callers to stdma_lock() that cannot granted access are */
/* put onto a queue and waked up later if the owner calls */
/* stdma_release(). Additionally, the caller gives his interrupt */
/* service routine to stdma_lock(). */
/* */
/* On the Falcon, the IDE bus uses just the ACSI/Floppy interrupt, but */
/* not the ST-DMA chip itself. So falhd.c needs not to lock the */
/* chip. The interrupt is routed to falhd.c if IDE is configured, the */
/* model is a Falcon and the interrupt was caused by the HD controller */
/* (can be determined by looking at its status register). */
#include <linux/types.h>
#include <linux/kdev_t.h>
#include <linux/genhd.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/wait.h>
#include <linux/module.h>
#include <asm/atari_stdma.h>
#include <asm/atariints.h>
#include <asm/atarihw.h>
#include <asm/io.h>
#include <asm/irq.h>
static int stdma_locked; /* the semaphore */
/* int func to be called */
static irq_handler_t stdma_isr;
static void *stdma_isr_data; /* data passed to isr */
static DECLARE_WAIT_QUEUE_HEAD(stdma_wait); /* wait queue for ST-DMA */
/***************************** Prototypes *****************************/
static irqreturn_t stdma_int (int irq, void *dummy);
/************************* End of Prototypes **************************/
/*
* Function: void stdma_lock( isrfunc isr, void *data )
*
* Purpose: Tries to get a lock on the ST-DMA chip that is used by more
* then one device driver. Waits on stdma_wait until lock is free.
* stdma_lock() may not be called from an interrupt! You have to
* get the lock in your main routine and release it when your
* request is finished.
*
* Inputs: A interrupt function that is called until the lock is
* released.
*
* Returns: nothing
*
*/
void stdma_lock(irq_handler_t handler, void *data)
{
unsigned long flags;
local_irq_save(flags); /* protect lock */
/* Since the DMA is used for file system purposes, we
have to sleep uninterruptible (there may be locked
buffers) */
wait_event(stdma_wait, !stdma_locked);
stdma_locked = 1;
stdma_isr = handler;
stdma_isr_data = data;
local_irq_restore(flags);
}
EXPORT_SYMBOL(stdma_lock);
/*
* Function: void stdma_release( void )
*
* Purpose: Releases the lock on the ST-DMA chip.
*
* Inputs: none
*
* Returns: nothing
*
*/
void stdma_release(void)
{
unsigned long flags;
local_irq_save(flags);
stdma_locked = 0;
stdma_isr = NULL;
stdma_isr_data = NULL;
wake_up(&stdma_wait);
local_irq_restore(flags);
}
EXPORT_SYMBOL(stdma_release);
/*
* Function: int stdma_others_waiting( void )
*
* Purpose: Check if someone waits for the ST-DMA lock.
*
* Inputs: none
*
* Returns: 0 if no one is waiting, != 0 otherwise
*
*/
int stdma_others_waiting(void)
{
return waitqueue_active(&stdma_wait);
}
EXPORT_SYMBOL(stdma_others_waiting);
/*
* Function: int stdma_islocked( void )
*
* Purpose: Check if the ST-DMA is currently locked.
* Note: Returned status is only valid if ints are disabled while calling and
* as long as they remain disabled.
* If called with ints enabled, status can change only from locked to
* unlocked, because ints may not lock the ST-DMA.
*
* Inputs: none
*
* Returns: != 0 if locked, 0 otherwise
*
*/
int stdma_islocked(void)
{
return stdma_locked;
}
EXPORT_SYMBOL(stdma_islocked);
/*
* Function: void stdma_init( void )
*
* Purpose: Initialize the ST-DMA chip access controlling.
* It sets up the interrupt and its service routine. The int is registered
* as slow int, client devices have to live with that (no problem
* currently).
*
* Inputs: none
*
* Return: nothing
*
*/
void __init stdma_init(void)
{
stdma_isr = NULL;
if (request_irq(IRQ_MFP_FDC, stdma_int, IRQ_TYPE_SLOW | IRQF_SHARED,
"ST-DMA floppy,ACSI,IDE,Falcon-SCSI", stdma_int))
pr_err("Couldn't register ST-DMA interrupt\n");
}
/*
* Function: void stdma_int()
*
* Purpose: The interrupt routine for the ST-DMA. It calls the isr
* registered by stdma_lock().
*
*/
static irqreturn_t stdma_int(int irq, void *dummy)
{
if (stdma_isr)
(*stdma_isr)(irq, stdma_isr_data);
return IRQ_HANDLED;
}
| gpl-2.0 |
ptmr3/Skyrocket_JB_Kernel | arch/frv/kernel/futex.c | 12039 | 6709 | /* futex.c: futex operations
*
* Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/futex.h>
#include <linux/uaccess.h>
#include <asm/futex.h>
#include <asm/errno.h>
/*
* the various futex operations; MMU fault checking is ignored under no-MMU
* conditions
*/
static inline int atomic_futex_op_xchg_set(int oparg, u32 __user *uaddr, int *_oldval)
{
int oldval, ret;
asm("0: \n"
" orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
" ckeq icc3,cc7 \n"
"1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
" orcr cc7,cc7,cc3 \n" /* set CC3 to true */
"2: cst.p %3,%M0 ,cc3,#1 \n"
" corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
" beq icc3,#0,0b \n"
" setlos 0,%2 \n"
"3: \n"
".subsection 2 \n"
"4: setlos %5,%2 \n"
" bra 3b \n"
".previous \n"
".section __ex_table,\"a\" \n"
" .balign 8 \n"
" .long 1b,4b \n"
" .long 2b,4b \n"
".previous"
: "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg)
: "3"(oparg), "i"(-EFAULT)
: "memory", "cc7", "cc3", "icc3"
);
*_oldval = oldval;
return ret;
}
static inline int atomic_futex_op_xchg_add(int oparg, u32 __user *uaddr, int *_oldval)
{
int oldval, ret;
asm("0: \n"
" orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
" ckeq icc3,cc7 \n"
"1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
" orcr cc7,cc7,cc3 \n" /* set CC3 to true */
" add %1,%3,%3 \n"
"2: cst.p %3,%M0 ,cc3,#1 \n"
" corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
" beq icc3,#0,0b \n"
" setlos 0,%2 \n"
"3: \n"
".subsection 2 \n"
"4: setlos %5,%2 \n"
" bra 3b \n"
".previous \n"
".section __ex_table,\"a\" \n"
" .balign 8 \n"
" .long 1b,4b \n"
" .long 2b,4b \n"
".previous"
: "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg)
: "3"(oparg), "i"(-EFAULT)
: "memory", "cc7", "cc3", "icc3"
);
*_oldval = oldval;
return ret;
}
static inline int atomic_futex_op_xchg_or(int oparg, u32 __user *uaddr, int *_oldval)
{
int oldval, ret;
asm("0: \n"
" orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
" ckeq icc3,cc7 \n"
"1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
" orcr cc7,cc7,cc3 \n" /* set CC3 to true */
" or %1,%3,%3 \n"
"2: cst.p %3,%M0 ,cc3,#1 \n"
" corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
" beq icc3,#0,0b \n"
" setlos 0,%2 \n"
"3: \n"
".subsection 2 \n"
"4: setlos %5,%2 \n"
" bra 3b \n"
".previous \n"
".section __ex_table,\"a\" \n"
" .balign 8 \n"
" .long 1b,4b \n"
" .long 2b,4b \n"
".previous"
: "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg)
: "3"(oparg), "i"(-EFAULT)
: "memory", "cc7", "cc3", "icc3"
);
*_oldval = oldval;
return ret;
}
static inline int atomic_futex_op_xchg_and(int oparg, u32 __user *uaddr, int *_oldval)
{
int oldval, ret;
asm("0: \n"
" orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
" ckeq icc3,cc7 \n"
"1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
" orcr cc7,cc7,cc3 \n" /* set CC3 to true */
" and %1,%3,%3 \n"
"2: cst.p %3,%M0 ,cc3,#1 \n"
" corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
" beq icc3,#0,0b \n"
" setlos 0,%2 \n"
"3: \n"
".subsection 2 \n"
"4: setlos %5,%2 \n"
" bra 3b \n"
".previous \n"
".section __ex_table,\"a\" \n"
" .balign 8 \n"
" .long 1b,4b \n"
" .long 2b,4b \n"
".previous"
: "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg)
: "3"(oparg), "i"(-EFAULT)
: "memory", "cc7", "cc3", "icc3"
);
*_oldval = oldval;
return ret;
}
static inline int atomic_futex_op_xchg_xor(int oparg, u32 __user *uaddr, int *_oldval)
{
int oldval, ret;
asm("0: \n"
" orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
" ckeq icc3,cc7 \n"
"1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
" orcr cc7,cc7,cc3 \n" /* set CC3 to true */
" xor %1,%3,%3 \n"
"2: cst.p %3,%M0 ,cc3,#1 \n"
" corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
" beq icc3,#0,0b \n"
" setlos 0,%2 \n"
"3: \n"
".subsection 2 \n"
"4: setlos %5,%2 \n"
" bra 3b \n"
".previous \n"
".section __ex_table,\"a\" \n"
" .balign 8 \n"
" .long 1b,4b \n"
" .long 2b,4b \n"
".previous"
: "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg)
: "3"(oparg), "i"(-EFAULT)
: "memory", "cc7", "cc3", "icc3"
);
*_oldval = oldval;
return ret;
}
/*****************************************************************************/
/*
* do the futex operations
*/
int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
int oparg = (encoded_op << 8) >> 20;
int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
ret = atomic_futex_op_xchg_set(oparg, uaddr, &oldval);
break;
case FUTEX_OP_ADD:
ret = atomic_futex_op_xchg_add(oparg, uaddr, &oldval);
break;
case FUTEX_OP_OR:
ret = atomic_futex_op_xchg_or(oparg, uaddr, &oldval);
break;
case FUTEX_OP_ANDN:
ret = atomic_futex_op_xchg_and(~oparg, uaddr, &oldval);
break;
case FUTEX_OP_XOR:
ret = atomic_futex_op_xchg_xor(oparg, uaddr, &oldval);
break;
default:
ret = -ENOSYS;
break;
}
pagefault_enable();
if (!ret) {
switch (cmp) {
case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
default: ret = -ENOSYS; break;
}
}
return ret;
} /* end futex_atomic_op_inuser() */
| gpl-2.0 |
blade-vec-4g/android_kernel_zte_msm8226 | net/netfilter/xt_realm.c | 14087 | 1451 | /* IP tables module for matching the routing realm
*
* (C) 2003 by Sampsa Ranta <sampsa@netsonic.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <net/route.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter/xt_realm.h>
#include <linux/netfilter/x_tables.h>
MODULE_AUTHOR("Sampsa Ranta <sampsa@netsonic.fi>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Xtables: Routing realm match");
MODULE_ALIAS("ipt_realm");
static bool
realm_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_realm_info *info = par->matchinfo;
const struct dst_entry *dst = skb_dst(skb);
return (info->id == (dst->tclassid & info->mask)) ^ info->invert;
}
static struct xt_match realm_mt_reg __read_mostly = {
.name = "realm",
.match = realm_mt,
.matchsize = sizeof(struct xt_realm_info),
.hooks = (1 << NF_INET_POST_ROUTING) | (1 << NF_INET_FORWARD) |
(1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_LOCAL_IN),
.family = NFPROTO_UNSPEC,
.me = THIS_MODULE
};
static int __init realm_mt_init(void)
{
return xt_register_match(&realm_mt_reg);
}
static void __exit realm_mt_exit(void)
{
xt_unregister_match(&realm_mt_reg);
}
module_init(realm_mt_init);
module_exit(realm_mt_exit);
| gpl-2.0 |
GolovanovSrg/au-linux-kernel-spring-2016 | linux/net/netfilter/xt_realm.c | 14087 | 1451 | /* IP tables module for matching the routing realm
*
* (C) 2003 by Sampsa Ranta <sampsa@netsonic.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <net/route.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter/xt_realm.h>
#include <linux/netfilter/x_tables.h>
MODULE_AUTHOR("Sampsa Ranta <sampsa@netsonic.fi>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Xtables: Routing realm match");
MODULE_ALIAS("ipt_realm");
static bool
realm_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_realm_info *info = par->matchinfo;
const struct dst_entry *dst = skb_dst(skb);
return (info->id == (dst->tclassid & info->mask)) ^ info->invert;
}
static struct xt_match realm_mt_reg __read_mostly = {
.name = "realm",
.match = realm_mt,
.matchsize = sizeof(struct xt_realm_info),
.hooks = (1 << NF_INET_POST_ROUTING) | (1 << NF_INET_FORWARD) |
(1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_LOCAL_IN),
.family = NFPROTO_UNSPEC,
.me = THIS_MODULE
};
static int __init realm_mt_init(void)
{
return xt_register_match(&realm_mt_reg);
}
static void __exit realm_mt_exit(void)
{
xt_unregister_match(&realm_mt_reg);
}
module_init(realm_mt_init);
module_exit(realm_mt_exit);
| gpl-2.0 |
pinskia/glibc-ilp32 | iconvdata/cp1252.c | 8 | 1077 | /* Conversion from and to CP1252.
Copyright (C) 1998 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@cygnus.com>, 1998.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#include <stdint.h>
/* Get the conversion table. */
#define TABLES <cp1252.h>
#define CHARSET_NAME "CP1252//"
#define HAS_HOLES 1 /* Not all 256 character are defined. */
#include <8bit-gap.c>
| gpl-2.0 |
robthepyro/SolarForce1 | CAN Embedded/RFRx_Board/lufa-LUFA-140928/Projects/LEDNotifier/LEDNotifier.c | 8 | 5116 | /*
LUFA Library
Copyright (C) Dean Camera, 2014.
dean [at] fourwalledcubicle [dot] com
www.lufa-lib.org
*/
/*
Copyright 2014 Dean Camera (dean [at] fourwalledcubicle [dot] com)
Permission to use, copy, modify, distribute, and sell this
software and its documentation for any purpose is hereby granted
without fee, provided that the above copyright notice appear in
all copies and that both that the copyright notice and this
permission notice and warranty disclaimer appear in supporting
documentation, and that the name of the author not be used in
advertising or publicity pertaining to distribution of the
software without specific, written prior permission.
The author disclaims all warranties with regard to this
software, including all implied warranties of merchantability
and fitness. In no event shall the author be liable for any
special, indirect or consequential damages or any damages
whatsoever resulting from loss of use, data or profits, whether
in an action of contract, negligence or other tortious action,
arising out of or in connection with the use or performance of
this software.
*/
/** \file
*
* Main source file for the LEDNotfier project. This file contains the main tasks of
* the project and is responsible for the initial application hardware configuration.
*/
#include "LEDNotifier.h"
/** LUFA CDC Class driver interface configuration and state information. This structure is
* passed to all CDC Class driver functions, so that multiple instances of the same class
* within a device can be differentiated from one another.
*/
USB_ClassInfo_CDC_Device_t VirtualSerial_CDC_Interface =
{
.Config =
{
.ControlInterfaceNumber = INTERFACE_ID_CDC_CCI,
.DataINEndpoint =
{
.Address = CDC_TX_EPADDR,
.Size = CDC_TXRX_EPSIZE,
.Banks = 1,
},
.DataOUTEndpoint =
{
.Address = CDC_RX_EPADDR,
.Size = CDC_TXRX_EPSIZE,
.Banks = 1,
},
.NotificationEndpoint =
{
.Address = CDC_NOTIFICATION_EPADDR,
.Size = CDC_NOTIFICATION_EPSIZE,
.Banks = 1,
},
},
};
/** Counter for the software PWM. */
static volatile uint8_t SoftPWM_Count;
/** Duty cycle for the first software PWM channel. */
static volatile uint8_t SoftPWM_Channel1_Duty;
/** Duty cycle for the second software PWM channel. */
static volatile uint8_t SoftPWM_Channel2_Duty;
/** Duty cycle for the third software PWM channel. */
static volatile uint8_t SoftPWM_Channel3_Duty;
/** Standard file stream for the CDC interface when set up, so that the virtual CDC COM port can be
* used like any regular character stream in the C APIs.
*/
static FILE USBSerialStream;
/** Interrupt handler for managing the software PWM channels for the LEDs */
ISR(TIMER0_COMPA_vect, ISR_BLOCK)
{
uint8_t LEDMask = LEDS_ALL_LEDS;
if (++SoftPWM_Count == 0b00011111)
SoftPWM_Count = 0;
if (SoftPWM_Count >= SoftPWM_Channel1_Duty)
LEDMask &= ~LEDS_LED1;
if (SoftPWM_Count >= SoftPWM_Channel2_Duty)
LEDMask &= ~LEDS_LED2;
if (SoftPWM_Count >= SoftPWM_Channel3_Duty)
LEDMask &= ~LEDS_LED3;
LEDs_SetAllLEDs(LEDMask);
}
/** Main program entry point. This routine contains the overall program flow, including initial
* setup of all components and the main program loop.
*/
int main(void)
{
SetupHardware();
/* Create a regular blocking character stream for the interface so that it can be used with the stdio.h functions */
CDC_Device_CreateBlockingStream(&VirtualSerial_CDC_Interface, &USBSerialStream);
GlobalInterruptEnable();
for (;;)
{
/* Read in next LED colour command from the host */
uint8_t ColourUpdate = fgetc(&USBSerialStream);
/* Top 3 bits select the LED, bottom 5 control the brightness */
uint8_t Channel = (ColourUpdate & 0b11100000);
uint8_t Duty = (ColourUpdate & 0b00011111);
if (Channel & (1 << 5))
SoftPWM_Channel1_Duty = Duty;
if (Channel & (1 << 6))
SoftPWM_Channel2_Duty = Duty;
if (Channel & (1 << 7))
SoftPWM_Channel3_Duty = Duty;
CDC_Device_USBTask(&VirtualSerial_CDC_Interface);
USB_USBTask();
}
}
/** Configures the board hardware and chip peripherals for the demo's functionality. */
void SetupHardware(void)
{
#if (ARCH == ARCH_AVR8)
/* Disable watchdog if enabled by bootloader/fuses */
MCUSR &= ~(1 << WDRF);
wdt_disable();
/* Disable clock division */
clock_prescale_set(clock_div_1);
#endif
/* Hardware Initialization */
LEDs_Init();
USB_Init();
/* Timer Initialization */
OCR0A = 100;
TCCR0A = (1 << WGM01);
TCCR0B = (1 << CS00);
TIMSK0 = (1 << OCIE0A);
}
/** Event handler for the library USB Configuration Changed event. */
void EVENT_USB_Device_ConfigurationChanged(void)
{
CDC_Device_ConfigureEndpoints(&VirtualSerial_CDC_Interface);
}
/** Event handler for the library USB Control Request reception event. */
void EVENT_USB_Device_ControlRequest(void)
{
CDC_Device_ProcessControlRequest(&VirtualSerial_CDC_Interface);
}
| gpl-2.0 |
Megatron007/megabyte_bullhead | arch/arm/kernel/smp_twd.c | 8 | 8951 | /*
* linux/arch/arm/kernel/smp_twd.c
*
* Copyright (C) 2002 ARM Ltd.
* All Rights Reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/smp.h>
#include <linux/jiffies.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <asm/smp_plat.h>
#include <asm/smp_twd.h>
#include <asm/localtimer.h>
/* set up by the platform code */
static void __iomem *twd_base;
static struct clk *twd_clk;
static unsigned long twd_timer_rate;
static DEFINE_PER_CPU(bool, percpu_setup_called);
static struct clock_event_device __percpu **twd_evt;
static int twd_ppi;
static void twd_set_mode(enum clock_event_mode mode,
struct clock_event_device *clk)
{
unsigned long ctrl;
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
ctrl = TWD_TIMER_CONTROL_ENABLE | TWD_TIMER_CONTROL_IT_ENABLE
| TWD_TIMER_CONTROL_PERIODIC;
__raw_writel(DIV_ROUND_CLOSEST(twd_timer_rate, HZ),
twd_base + TWD_TIMER_LOAD);
break;
case CLOCK_EVT_MODE_ONESHOT:
/* period set, and timer enabled in 'next_event' hook */
ctrl = TWD_TIMER_CONTROL_IT_ENABLE | TWD_TIMER_CONTROL_ONESHOT;
break;
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
default:
ctrl = 0;
}
__raw_writel(ctrl, twd_base + TWD_TIMER_CONTROL);
}
static int twd_set_next_event(unsigned long evt,
struct clock_event_device *unused)
{
unsigned long ctrl = __raw_readl(twd_base + TWD_TIMER_CONTROL);
ctrl |= TWD_TIMER_CONTROL_ENABLE;
__raw_writel(evt, twd_base + TWD_TIMER_COUNTER);
__raw_writel(ctrl, twd_base + TWD_TIMER_CONTROL);
return 0;
}
/*
* local_timer_ack: checks for a local timer interrupt.
*
* If a local timer interrupt has occurred, acknowledge and return 1.
* Otherwise, return 0.
*/
static int twd_timer_ack(void)
{
if (__raw_readl(twd_base + TWD_TIMER_INTSTAT)) {
__raw_writel(1, twd_base + TWD_TIMER_INTSTAT);
return 1;
}
return 0;
}
static void twd_timer_stop(struct clock_event_device *clk)
{
twd_set_mode(CLOCK_EVT_MODE_UNUSED, clk);
disable_percpu_irq(clk->irq);
}
#ifdef CONFIG_COMMON_CLK
/*
* Updates clockevent frequency when the cpu frequency changes.
* Called on the cpu that is changing frequency with interrupts disabled.
*/
static void twd_update_frequency(void *new_rate)
{
twd_timer_rate = *((unsigned long *) new_rate);
clockevents_update_freq(*__this_cpu_ptr(twd_evt), twd_timer_rate);
}
static int twd_rate_change(struct notifier_block *nb,
unsigned long flags, void *data)
{
struct clk_notifier_data *cnd = data;
/*
* The twd clock events must be reprogrammed to account for the new
* frequency. The timer is local to a cpu, so cross-call to the
* changing cpu.
*/
if (flags == POST_RATE_CHANGE)
on_each_cpu(twd_update_frequency,
(void *)&cnd->new_rate, 1);
return NOTIFY_OK;
}
static struct notifier_block twd_clk_nb = {
.notifier_call = twd_rate_change,
};
static int twd_clk_init(void)
{
if (twd_evt && *__this_cpu_ptr(twd_evt) && !IS_ERR(twd_clk))
return clk_notifier_register(twd_clk, &twd_clk_nb);
return 0;
}
core_initcall(twd_clk_init);
#elif defined (CONFIG_CPU_FREQ)
#include <linux/cpufreq.h>
/*
* Updates clockevent frequency when the cpu frequency changes.
* Called on the cpu that is changing frequency with interrupts disabled.
*/
static void twd_update_frequency(void *data)
{
twd_timer_rate = clk_get_rate(twd_clk);
clockevents_update_freq(*__this_cpu_ptr(twd_evt), twd_timer_rate);
}
static int twd_cpufreq_transition(struct notifier_block *nb,
unsigned long state, void *data)
{
struct cpufreq_freqs *freqs = data;
/*
* The twd clock events must be reprogrammed to account for the new
* frequency. The timer is local to a cpu, so cross-call to the
* changing cpu.
*/
if (state == CPUFREQ_POSTCHANGE)
smp_call_function_single(freqs->cpu, twd_update_frequency,
NULL, 1);
return NOTIFY_OK;
}
static struct notifier_block twd_cpufreq_nb = {
.notifier_call = twd_cpufreq_transition,
};
static int twd_cpufreq_init(void)
{
if (twd_evt && *__this_cpu_ptr(twd_evt) && !IS_ERR(twd_clk))
return cpufreq_register_notifier(&twd_cpufreq_nb,
CPUFREQ_TRANSITION_NOTIFIER);
return 0;
}
core_initcall(twd_cpufreq_init);
#endif
static void twd_calibrate_rate(void)
{
unsigned long count;
u64 waitjiffies;
/*
* If this is the first time round, we need to work out how fast
* the timer ticks
*/
if (twd_timer_rate == 0) {
printk(KERN_INFO "Calibrating local timer... ");
/* Wait for a tick to start */
waitjiffies = get_jiffies_64() + 1;
while (get_jiffies_64() < waitjiffies)
udelay(10);
/* OK, now the tick has started, let's get the timer going */
waitjiffies += 5;
/* enable, no interrupt or reload */
__raw_writel(0x1, twd_base + TWD_TIMER_CONTROL);
/* maximum value */
__raw_writel(0xFFFFFFFFU, twd_base + TWD_TIMER_COUNTER);
while (get_jiffies_64() < waitjiffies)
udelay(10);
count = __raw_readl(twd_base + TWD_TIMER_COUNTER);
twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5);
printk("%lu.%02luMHz.\n", twd_timer_rate / 1000000,
(twd_timer_rate / 10000) % 100);
}
}
static irqreturn_t twd_handler(int irq, void *dev_id)
{
struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
if (twd_timer_ack()) {
evt->event_handler(evt);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static void twd_get_clock(struct device_node *np)
{
int err;
if (np)
twd_clk = of_clk_get(np, 0);
else
twd_clk = clk_get_sys("smp_twd", NULL);
if (IS_ERR(twd_clk)) {
pr_err("smp_twd: clock not found %d\n", (int) PTR_ERR(twd_clk));
return;
}
err = clk_prepare_enable(twd_clk);
if (err) {
pr_err("smp_twd: clock failed to prepare+enable: %d\n", err);
clk_put(twd_clk);
return;
}
twd_timer_rate = clk_get_rate(twd_clk);
}
/*
* Setup the local clock events for a CPU.
*/
static int twd_timer_setup(struct clock_event_device *clk)
{
struct clock_event_device **this_cpu_clk;
int cpu = smp_processor_id();
/*
* If the basic setup for this CPU has been done before don't
* bother with the below.
*/
if (per_cpu(percpu_setup_called, cpu)) {
__raw_writel(0, twd_base + TWD_TIMER_CONTROL);
clockevents_register_device(*__this_cpu_ptr(twd_evt));
enable_percpu_irq(clk->irq, 0);
return 0;
}
per_cpu(percpu_setup_called, cpu) = true;
twd_calibrate_rate();
/*
* The following is done once per CPU the first time .setup() is
* called.
*/
__raw_writel(0, twd_base + TWD_TIMER_CONTROL);
clk->name = "local_timer";
clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_C3STOP;
clk->rating = 350;
clk->set_mode = twd_set_mode;
clk->set_next_event = twd_set_next_event;
clk->irq = twd_ppi;
this_cpu_clk = __this_cpu_ptr(twd_evt);
*this_cpu_clk = clk;
clockevents_config_and_register(clk, twd_timer_rate,
0xf, 0xffffffff);
enable_percpu_irq(clk->irq, 0);
return 0;
}
static struct local_timer_ops twd_lt_ops = {
.setup = twd_timer_setup,
.stop = twd_timer_stop,
};
static int __init twd_local_timer_common_register(struct device_node *np)
{
int err;
twd_evt = alloc_percpu(struct clock_event_device *);
if (!twd_evt) {
err = -ENOMEM;
goto out_free;
}
err = request_percpu_irq(twd_ppi, twd_handler, "twd", twd_evt);
if (err) {
pr_err("twd: can't register interrupt %d (%d)\n", twd_ppi, err);
goto out_free;
}
err = local_timer_register(&twd_lt_ops);
if (err)
goto out_irq;
twd_get_clock(np);
return 0;
out_irq:
free_percpu_irq(twd_ppi, twd_evt);
out_free:
iounmap(twd_base);
twd_base = NULL;
free_percpu(twd_evt);
return err;
}
int __init twd_local_timer_register(struct twd_local_timer *tlt)
{
if (twd_base || twd_evt)
return -EBUSY;
twd_ppi = tlt->res[1].start;
twd_base = ioremap(tlt->res[0].start, resource_size(&tlt->res[0]));
if (!twd_base)
return -ENOMEM;
return twd_local_timer_common_register(NULL);
}
#ifdef CONFIG_OF
static void __init twd_local_timer_of_register(struct device_node *np)
{
int err;
if (!is_smp() || !setup_max_cpus)
return;
twd_ppi = irq_of_parse_and_map(np, 0);
if (!twd_ppi) {
err = -EINVAL;
goto out;
}
twd_base = of_iomap(np, 0);
if (!twd_base) {
err = -ENOMEM;
goto out;
}
err = twd_local_timer_common_register(np);
out:
WARN(err, "twd_local_timer_of_register failed (%d)\n", err);
}
CLOCKSOURCE_OF_DECLARE(arm_twd_a9, "arm,cortex-a9-twd-timer", twd_local_timer_of_register);
CLOCKSOURCE_OF_DECLARE(arm_twd_a5, "arm,cortex-a5-twd-timer", twd_local_timer_of_register);
CLOCKSOURCE_OF_DECLARE(arm_twd_11mp, "arm,arm11mp-twd-timer", twd_local_timer_of_register);
#endif
| gpl-2.0 |
nkremerh/cctools | batch_job/src/mesos_task.c | 8 | 1138 | #include "mesos_task.h"
struct mesos_task *mesos_task_create(int task_id, const char *cmd,
const char *extra_input_files, const char *extra_output_files)
{
struct mesos_task *mt = malloc(sizeof(*mt));
mt->task_id = task_id;
mt->task_cmd = xxstrdup(cmd);
if (extra_input_files != NULL) {
mt->task_input_files = text_list_load_str(extra_input_files);
int i = 0;
int num_input_files = text_list_size(mt->task_input_files);
for(i = 0; i < num_input_files; i++) {
if (text_list_get(mt->task_input_files, i)[0] != '/') {
char *path_buf = path_getcwd();
string_combine(path_buf, "/");
string_combine(path_buf, text_list_get(mt->task_input_files, i));
text_list_set(mt->task_input_files, path_buf, i);
}
}
} else {
mt->task_input_files = NULL;
}
if (extra_output_files != NULL) {
mt->task_output_files = text_list_load_str(extra_output_files);
} else {
mt->task_output_files = NULL;
}
return mt;
}
void mesos_task_delete(struct mesos_task *mt)
{
free(mt->task_cmd);
free(mt->task_input_files);
free(mt->task_output_files);
free(mt);
}
/* vim: set noexpandtab tabstop=4: */
| gpl-2.0 |
toastcfh/android_kernel_lge_d851 | drivers/net/ethernet/mellanox/mlx4/en_tx.c | 8 | 22946 | /*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <asm/page.h>
#include <linux/mlx4/cq.h>
#include <linux/slab.h>
#include <linux/mlx4/qp.h>
#include <linux/skbuff.h>
#include <linux/if_vlan.h>
#include <linux/vmalloc.h>
#include <linux/tcp.h>
#include <linux/moduleparam.h>
#include "mlx4_en.h"
enum {
MAX_INLINE = 104, /* */
MAX_BF = 256,
};
static int inline_thold __read_mostly = MAX_INLINE;
module_param_named(inline_thold, inline_thold, int, 0444);
MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring, int qpn, u32 size,
u16 stride)
{
struct mlx4_en_dev *mdev = priv->mdev;
int tmp;
int err;
ring->size = size;
ring->size_mask = size - 1;
ring->stride = stride;
inline_thold = min(inline_thold, MAX_INLINE);
spin_lock_init(&ring->comp_lock);
tmp = size * sizeof(struct mlx4_en_tx_info);
ring->tx_info = vmalloc(tmp);
if (!ring->tx_info)
return -ENOMEM;
en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
ring->tx_info, tmp);
ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
if (!ring->bounce_buf) {
err = -ENOMEM;
goto err_tx;
}
ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
2 * PAGE_SIZE);
if (err) {
en_err(priv, "Failed allocating hwq resources\n");
goto err_bounce;
}
err = mlx4_en_map_buffer(&ring->wqres.buf);
if (err) {
en_err(priv, "Failed to map TX buffer\n");
goto err_hwq_res;
}
ring->buf = ring->wqres.buf.direct.buf;
en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
"buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
ring->qpn = qpn;
err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
if (err) {
en_err(priv, "Failed allocating qp %d\n", ring->qpn);
goto err_map;
}
ring->qp.event = mlx4_en_sqp_event;
err = mlx4_bf_alloc(mdev->dev, &ring->bf);
if (err) {
en_dbg(DRV, priv, "working without blueflame (%d)", err);
ring->bf.uar = &mdev->priv_uar;
ring->bf.uar->map = mdev->uar_map;
ring->bf_enabled = false;
} else
ring->bf_enabled = true;
return 0;
err_map:
mlx4_en_unmap_buffer(&ring->wqres.buf);
err_hwq_res:
mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
err_bounce:
kfree(ring->bounce_buf);
ring->bounce_buf = NULL;
err_tx:
vfree(ring->tx_info);
ring->tx_info = NULL;
return err;
}
void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring)
{
struct mlx4_en_dev *mdev = priv->mdev;
en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
if (ring->bf_enabled)
mlx4_bf_free(mdev->dev, &ring->bf);
mlx4_qp_remove(mdev->dev, &ring->qp);
mlx4_qp_free(mdev->dev, &ring->qp);
mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
mlx4_en_unmap_buffer(&ring->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
kfree(ring->bounce_buf);
ring->bounce_buf = NULL;
vfree(ring->tx_info);
ring->tx_info = NULL;
}
int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
int cq)
{
struct mlx4_en_dev *mdev = priv->mdev;
int err;
ring->cqn = cq;
ring->prod = 0;
ring->cons = 0xffffffff;
ring->last_nr_txbb = 1;
ring->poll_cnt = 0;
ring->blocked = 0;
memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info));
memset(ring->buf, 0, ring->buf_size);
ring->qp_state = MLX4_QP_STATE_RST;
ring->doorbell_qpn = ring->qp.qpn << 8;
mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
ring->cqn, &ring->context);
if (ring->bf_enabled)
ring->context.usr_page = cpu_to_be32(ring->bf.uar->index);
err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
&ring->qp, &ring->qp_state);
return err;
}
void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring)
{
struct mlx4_en_dev *mdev = priv->mdev;
mlx4_qp_modify(mdev->dev, NULL, ring->qp_state,
MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
}
static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
int index, u8 owner)
{
struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset;
struct sk_buff *skb = tx_info->skb;
struct skb_frag_struct *frag;
void *end = ring->buf + ring->buf_size;
int frags = skb_shinfo(skb)->nr_frags;
int i;
__be32 *ptr = (__be32 *)tx_desc;
__be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
/* */
if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
if (!tx_info->inl) {
if (tx_info->linear) {
dma_unmap_single(priv->ddev,
(dma_addr_t) be64_to_cpu(data->addr),
be32_to_cpu(data->byte_count),
PCI_DMA_TODEVICE);
++data;
}
for (i = 0; i < frags; i++) {
frag = &skb_shinfo(skb)->frags[i];
dma_unmap_page(priv->ddev,
(dma_addr_t) be64_to_cpu(data[i].addr),
skb_frag_size(frag), PCI_DMA_TODEVICE);
}
}
/* */
for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
*ptr = stamp;
ptr += STAMP_DWORDS;
}
} else {
if (!tx_info->inl) {
if ((void *) data >= end) {
data = ring->buf + ((void *)data - end);
}
if (tx_info->linear) {
dma_unmap_single(priv->ddev,
(dma_addr_t) be64_to_cpu(data->addr),
be32_to_cpu(data->byte_count),
PCI_DMA_TODEVICE);
++data;
}
for (i = 0; i < frags; i++) {
/* */
if ((void *) data >= end)
data = ring->buf;
frag = &skb_shinfo(skb)->frags[i];
dma_unmap_page(priv->ddev,
(dma_addr_t) be64_to_cpu(data->addr),
skb_frag_size(frag), PCI_DMA_TODEVICE);
++data;
}
}
/* */
for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
*ptr = stamp;
ptr += STAMP_DWORDS;
if ((void *) ptr >= end) {
ptr = ring->buf;
stamp ^= cpu_to_be32(0x80000000);
}
}
}
dev_kfree_skb_any(skb);
return tx_info->nr_txbb;
}
int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int cnt = 0;
/* */
ring->cons += ring->last_nr_txbb;
en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n",
ring->cons, ring->prod);
if ((u32) (ring->prod - ring->cons) > ring->size) {
if (netif_msg_tx_err(priv))
en_warn(priv, "Tx consumer passed producer!\n");
return 0;
}
while (ring->cons != ring->prod) {
ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring,
ring->cons & ring->size_mask,
!!(ring->cons & ring->size));
ring->cons += ring->last_nr_txbb;
cnt++;
}
if (cnt)
en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);
return cnt;
}
static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_cq *mcq = &cq->mcq;
struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
struct mlx4_cqe *cqe;
u16 index;
u16 new_index, ring_index;
u32 txbbs_skipped = 0;
u32 cons_index = mcq->cons_index;
int size = cq->size;
u32 size_mask = ring->size_mask;
struct mlx4_cqe *buf = cq->buf;
if (!priv->port_up)
return;
index = cons_index & size_mask;
cqe = &buf[index];
ring_index = ring->cons & size_mask;
/* */
while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
cons_index & size)) {
/*
*/
rmb();
/* */
new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
do {
txbbs_skipped += ring->last_nr_txbb;
ring_index = (ring_index + ring->last_nr_txbb) & size_mask;
/* */
ring->last_nr_txbb = mlx4_en_free_tx_desc(
priv, ring, ring_index,
!!((ring->cons + txbbs_skipped) &
ring->size));
} while (ring_index != new_index);
++cons_index;
index = cons_index & size_mask;
cqe = &buf[index];
}
/*
*/
mcq->cons_index = cons_index;
mlx4_cq_set_ci(mcq);
wmb();
ring->cons += txbbs_skipped;
/* */
if (unlikely(ring->blocked)) {
if ((u32) (ring->prod - ring->cons) <=
ring->size - HEADROOM - MAX_DESC_TXBBS) {
ring->blocked = 0;
netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
priv->port_stats.wake_queue++;
}
}
}
void mlx4_en_tx_irq(struct mlx4_cq *mcq)
{
struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
struct mlx4_en_priv *priv = netdev_priv(cq->dev);
struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
if (!spin_trylock(&ring->comp_lock))
return;
mlx4_en_process_tx_cq(cq->dev, cq);
mod_timer(&cq->timer, jiffies + 1);
spin_unlock(&ring->comp_lock);
}
void mlx4_en_poll_tx_cq(unsigned long data)
{
struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data;
struct mlx4_en_priv *priv = netdev_priv(cq->dev);
struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
u32 inflight;
INC_PERF_COUNTER(priv->pstats.tx_poll);
if (!spin_trylock_irq(&ring->comp_lock)) {
mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
return;
}
mlx4_en_process_tx_cq(cq->dev, cq);
inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb);
/*
*/
if (inflight && priv->port_up)
mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
spin_unlock_irq(&ring->comp_lock);
}
static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
u32 index,
unsigned int desc_size)
{
u32 copy = (ring->size - index) * TXBB_SIZE;
int i;
for (i = desc_size - copy - 4; i >= 0; i -= 4) {
if ((i & (TXBB_SIZE - 1)) == 0)
wmb();
*((u32 *) (ring->buf + i)) =
*((u32 *) (ring->bounce_buf + copy + i));
}
for (i = copy - 4; i >= 4 ; i -= 4) {
if ((i & (TXBB_SIZE - 1)) == 0)
wmb();
*((u32 *) (ring->buf + index * TXBB_SIZE + i)) =
*((u32 *) (ring->bounce_buf + i));
}
/* */
return ring->buf + index * TXBB_SIZE;
}
static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
{
struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind];
struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind];
unsigned long flags;
/*
*/
if (!timer_pending(&cq->timer))
mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
/* */
if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
if (spin_trylock_irqsave(&ring->comp_lock, flags)) {
mlx4_en_process_tx_cq(priv->dev, cq);
spin_unlock_irqrestore(&ring->comp_lock, flags);
}
}
static int is_inline(struct sk_buff *skb, void **pfrag)
{
void *ptr;
if (inline_thold && !skb_is_gso(skb) && skb->len <= inline_thold) {
if (skb_shinfo(skb)->nr_frags == 1) {
ptr = skb_frag_address_safe(&skb_shinfo(skb)->frags[0]);
if (unlikely(!ptr))
return 0;
if (pfrag)
*pfrag = ptr;
return 1;
} else if (unlikely(skb_shinfo(skb)->nr_frags))
return 0;
else
return 1;
}
return 0;
}
static int inline_size(struct sk_buff *skb)
{
if (skb->len + CTRL_SIZE + sizeof(struct mlx4_wqe_inline_seg)
<= MLX4_INLINE_ALIGN)
return ALIGN(skb->len + CTRL_SIZE +
sizeof(struct mlx4_wqe_inline_seg), 16);
else
return ALIGN(skb->len + CTRL_SIZE + 2 *
sizeof(struct mlx4_wqe_inline_seg), 16);
}
static int get_real_size(struct sk_buff *skb, struct net_device *dev,
int *lso_header_size)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int real_size;
if (skb_is_gso(skb)) {
*lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
real_size = CTRL_SIZE + skb_shinfo(skb)->nr_frags * DS_SIZE +
ALIGN(*lso_header_size + 4, DS_SIZE);
if (unlikely(*lso_header_size != skb_headlen(skb))) {
/*
*/
if (*lso_header_size < skb_headlen(skb))
real_size += DS_SIZE;
else {
if (netif_msg_tx_err(priv))
en_warn(priv, "Non-linear headers\n");
return 0;
}
}
} else {
*lso_header_size = 0;
if (!is_inline(skb, NULL))
real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE;
else
real_size = inline_size(skb);
}
return real_size;
}
static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *skb,
int real_size, u16 *vlan_tag, int tx_ind, void *fragptr)
{
struct mlx4_wqe_inline_seg *inl = &tx_desc->inl;
int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl;
if (skb->len <= spc) {
inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
if (skb_shinfo(skb)->nr_frags)
memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr,
skb_frag_size(&skb_shinfo(skb)->frags[0]));
} else {
inl->byte_count = cpu_to_be32(1 << 31 | spc);
if (skb_headlen(skb) <= spc) {
skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
if (skb_headlen(skb) < spc) {
memcpy(((void *)(inl + 1)) + skb_headlen(skb),
fragptr, spc - skb_headlen(skb));
fragptr += spc - skb_headlen(skb);
}
inl = (void *) (inl + 1) + spc;
memcpy(((void *)(inl + 1)), fragptr, skb->len - spc);
} else {
skb_copy_from_linear_data(skb, inl + 1, spc);
inl = (void *) (inl + 1) + spc;
skb_copy_from_linear_data_offset(skb, spc, inl + 1,
skb_headlen(skb) - spc);
if (skb_shinfo(skb)->nr_frags)
memcpy(((void *)(inl + 1)) + skb_headlen(skb) - spc,
fragptr, skb_frag_size(&skb_shinfo(skb)->frags[0]));
}
wmb();
inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
}
tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag);
tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
(!!vlan_tx_tag_present(skb));
tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
}
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
u16 vlan_tag = 0;
/*
*/
if (priv->prof->rx_ppp && vlan_tx_tag_present(skb)) {
vlan_tag = vlan_tx_tag_get(skb);
return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13);
}
return skb_tx_hash(dev, skb);
}
static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt)
{
__iowrite64_copy(dst, src, bytecnt / 8);
}
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_tx_ring *ring;
struct mlx4_en_cq *cq;
struct mlx4_en_tx_desc *tx_desc;
struct mlx4_wqe_data_seg *data;
struct skb_frag_struct *frag;
struct mlx4_en_tx_info *tx_info;
struct ethhdr *ethh;
int tx_ind = 0;
int nr_txbb;
int desc_size;
int real_size;
dma_addr_t dma;
u32 index, bf_index;
__be32 op_own;
u16 vlan_tag = 0;
int i;
int lso_header_size;
void *fragptr;
bool bounce = false;
if (!priv->port_up)
goto tx_drop;
real_size = get_real_size(skb, dev, &lso_header_size);
if (unlikely(!real_size))
goto tx_drop;
/* */
desc_size = ALIGN(real_size, TXBB_SIZE);
nr_txbb = desc_size / TXBB_SIZE;
if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
if (netif_msg_tx_err(priv))
en_warn(priv, "Oversized header or SG list\n");
goto tx_drop;
}
tx_ind = skb->queue_mapping;
ring = &priv->tx_ring[tx_ind];
if (vlan_tx_tag_present(skb))
vlan_tag = vlan_tx_tag_get(skb);
/* */
if (unlikely(((int)(ring->prod - ring->cons)) >
ring->size - HEADROOM - MAX_DESC_TXBBS)) {
/* */
netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
ring->blocked = 1;
priv->port_stats.queue_stopped++;
/* */
cq = &priv->tx_cq[tx_ind];
mlx4_en_arm_cq(priv, cq);
return NETDEV_TX_BUSY;
}
/* */
AVG_PERF_COUNTER(priv->pstats.inflight_avg,
(u32) (ring->prod - ring->cons - 1));
/* */
index = ring->prod & ring->size_mask;
bf_index = ring->prod;
/*
*/
if (likely(index + nr_txbb <= ring->size))
tx_desc = ring->buf + index * TXBB_SIZE;
else {
tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
bounce = true;
}
/* */
tx_info = &ring->tx_info[index];
tx_info->skb = skb;
tx_info->nr_txbb = nr_txbb;
/*
*/
tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
!!vlan_tx_tag_present(skb);
tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
MLX4_WQE_CTRL_TCP_UDP_CSUM);
ring->tx_csum++;
}
/* */
ethh = (struct ethhdr *)skb->data;
tx_desc->ctrl.srcrb_flags16[0] = get_unaligned((__be16 *)ethh->h_dest);
tx_desc->ctrl.imm = get_unaligned((__be32 *)(ethh->h_dest + 2));
/* */
if (lso_header_size) {
/* */
op_own = cpu_to_be32(MLX4_OPCODE_LSO | (1 << 6)) |
((ring->prod & ring->size) ?
cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
/* */
tx_desc->lso.mss_hdr_size = cpu_to_be32(
skb_shinfo(skb)->gso_size << 16 | lso_header_size);
/*
*/
memcpy(tx_desc->lso.header, skb->data, lso_header_size);
data = ((void *) &tx_desc->lso +
ALIGN(lso_header_size + 4, DS_SIZE));
priv->port_stats.tso_packets++;
i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) +
!!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size);
ring->bytes += skb->len + (i - 1) * lso_header_size;
ring->packets += i;
} else {
/* */
op_own = cpu_to_be32(MLX4_OPCODE_SEND) |
((ring->prod & ring->size) ?
cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
data = &tx_desc->data;
ring->bytes += max(skb->len, (unsigned int) ETH_ZLEN);
ring->packets++;
}
AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
/* */
tx_info->data_offset = (void *) data - (void *) tx_desc;
tx_info->linear = (lso_header_size < skb_headlen(skb) && !is_inline(skb, NULL)) ? 1 : 0;
data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
if (!is_inline(skb, &fragptr)) {
/* */
for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
frag = &skb_shinfo(skb)->frags[i];
dma = skb_frag_dma_map(priv->ddev, frag,
0, skb_frag_size(frag),
DMA_TO_DEVICE);
data->addr = cpu_to_be64(dma);
data->lkey = cpu_to_be32(mdev->mr.key);
wmb();
data->byte_count = cpu_to_be32(skb_frag_size(frag));
--data;
}
/* */
if (tx_info->linear) {
dma = dma_map_single(priv->ddev, skb->data + lso_header_size,
skb_headlen(skb) - lso_header_size, PCI_DMA_TODEVICE);
data->addr = cpu_to_be64(dma);
data->lkey = cpu_to_be32(mdev->mr.key);
wmb();
data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size);
}
tx_info->inl = 0;
} else {
build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr);
tx_info->inl = 1;
}
ring->prod += nr_txbb;
/* */
if (bounce)
tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
/* */
if (likely(!skb_shared(skb)))
skb_orphan(skb);
if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) {
*(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn);
op_own |= htonl((bf_index & 0xffff) << 8);
/*
*/
wmb();
tx_desc->ctrl.owner_opcode = op_own;
wmb();
mlx4_bf_copy(ring->bf.reg + ring->bf.offset, (unsigned long *) &tx_desc->ctrl,
desc_size);
wmb();
ring->bf.offset ^= ring->bf.buf_size;
} else {
/*
*/
wmb();
tx_desc->ctrl.owner_opcode = op_own;
wmb();
iowrite32be(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL);
}
/* */
mlx4_en_xmit_poll(priv, tx_ind);
return NETDEV_TX_OK;
tx_drop:
dev_kfree_skb_any(skb);
priv->stats.tx_dropped++;
return NETDEV_TX_OK;
}
| gpl-2.0 |
visi0nary/mediatek | mt6732/kernel/mediatek/platform/mt6752/kernel/drivers/ldvt/vdec/verify/vdec_verify_vdecode.c | 8 | 219766 | #include <linux/string.h>
#include <linux/interrupt.h>
#include "../include/drv_config.h"
#include "vdec_verify_vdecode.h"
#include "../hal/vdec_hal_if_common.h"
#include "../hal/vdec_hal_if_wmv.h"
#include "../hal/vdec_hal_if_mpeg.h"
#include "../hal/vdec_hal_if_h264.h"
#include "../hal/vdec_hal_if_h265.h"
#include "../include/vdec_info_common.h"
//#include "vdec_verify_dvdec.h"
//#include "vdec_info_dv.h"
#include "../vdec.h"
//#include "vdec_hw_dvdec.h"
#include "../hal/vdec_hw_common.h"
#include "../hal/vdec_hw_h264.h"
#include "../hal/vdec_hw_h265.h"
#include "../include/drv_common.h"
//#include "x_bim.h"
//#include "x_os.h"
//#include "x_assert.h"
//#include "x_debug.h"
#ifdef VERIFICATION_FGT
#include "vdec_verify_fgt.h"
#endif
#include "vdec_verify_file_common.h"
#include "vdec_verify_filesetting.h"
#include "vdec_verify_irq_fiq_proc.h"
#include "vdec_verify_common.h"
#include "vdec_verify_vparser_mpeg.h"
#include "vdec_verify_vparser_mpeg4.h"
#include "vdec_verify_vparser_wmv.h"
#include "vdec_verify_vparser_h264.h"
#include "vdec_verify_vparser_h265.h"
#include "vdec_verify_vparser_rm.h"
#include "vdec_verify_vparser_vp6.h"
#include "vdec_verify_vparser_avs.h"
#include "vdec_verify_vparser_vp8.h"
#include <linux/vmalloc.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/miscdevice.h>
#include <linux/platform_device.h>
#include <linux/device.h>
#include <linux/kdev_t.h>
#include <linux/cdev.h>
#include <asm/uaccess.h>
#include <linux/sched.h>
#include <linux/file.h>
#include <linux/slab.h>
#include <linux/delay.h>
extern char gpfH264LogFileBuffer[4096];
extern int gfpH264log;
extern unsigned int gH264logbufferOffset;
int vdecwriteFile(int fp,char *buf,int writelen);
//#define DBG_H264_PRINTF(format,...) do { if(-1 != gfpH264log){ { gH264logbufferOffset += sprintf((char *)(gpfH264LogFileBuffer+gH264logbufferOffset),format, ##__VA_ARGS__);} if(gH264logbufferOffset >= 3840 ){ vdecwriteFile(gfpH264log, gpfH264LogFileBuffer, gH264logbufferOffset); gH264logbufferOffset = 0; } } } while (0)
#define DBG_H264_PRINTF
/*
#define DBG_H264_PRINTF(format,...) \
do { \
if (-1 != gfpH264log) {\
{ gH264logbufferOffset += sprintf((char *)(gpfH264LogFileBuffer+gH264logbufferOffset),format, ##__VA_ARGS__);} \
if (gH264logbufferOffset >= 3840 ) { \
vdecwriteFile(gfpH264log, gpfH264LogFileBuffer, gH264logbufferOffset); \
gH264logbufferOffset = 0; \
} \
} \
} while (0)
*/
extern int rand(void);
extern void reset_pic_hdr_bits(UINT32 u4InstID);
extern UINT32 pic_hdr_bitcount(UINT32 u4InstID) ;
void vNormDecProc(UINT32 u4InstID);
void vVerifyWMVInitProc(UINT32 u4InstID);
void vInitVParserMPEG(UINT32 u4InstID);
void ComputeDQuantDecParam(UINT32 u4InstID);
void vVerTestMTCMOS(UINT32 u4InstID);
void vVerTestDCM(UINT32 u4InstID);
void vVerInitVDec(UINT32 u4InstID);
void vVDecProc(UINT32 u4InstID);
void vSetDownScaleParam(UINT32 u4InstID, BOOL fgEnable, VDEC_INFO_VDSCL_PRM_T *prDownScalerPrm);
void vCodecVersion(UINT32 u4InstID, UINT32 u4CodecFOURCC);
void vChkVDec(UINT32 u4InstID);
void vH264VDecEnd(UINT32 u4InstID);
void vH265VDecEnd(UINT32 u4InstID);
void vVerifyFlushBufInfo(UINT32 u4InstID);
BOOL fgIsH264VDecComplete(UINT32 u4InstID);
void vVerifyAdapRefPicmarkingProce(UINT32 u4InstID);
void vVerifySetPicRefType(UINT32 u4InstID, UCHAR ucPicStruct, UCHAR ucRefType);
UCHAR bGetPicRefType(UINT32 u4InstID, UCHAR ucPicStruct);
void vChkOutputFBuf(UINT32 u4InstID);
void vAdd2RefPicList(UINT32 u4InstID);
void vVerifyClrPicRefInfo(UINT32 u4InstID, UCHAR ucPicType, UCHAR ucFBufIdx);
void vVerifyFlushAllSetData(UINT32 u4InstID);
void vH264DecEndProc(UINT32 u4InstID);
void vWMVDecEndProc(UINT32 u4InstID);
void vMPEGDecEndProc(UINT32 u4InstID);
void vVerifyDx3SufxChk(UINT32 u4InstID);
void vSetDx3SliceBoundary(UINT32 u4InstID, VDEC_INFO_MPEG_DEC_PRM_T *prVDecMPEGDecPrm);
void vMp4FixBCode(UINT32 u4InstID);
void PostAdjustReconRange(UINT32 u4InstID);
void vWMVVDecEnd(UINT32 u4InstID);
BOOL fgIsWMVVDecComplete(UINT32 u4InstID);
void vVerifySetVSyncPrmBufPtr(UINT32 u4InstID, UINT32 u4BufIdx);
void vReadWMVChkSumGolden(UINT32 u4InstID);
void vReadH264ChkSumGolden(UINT32 u4InstID);
void vReadMPEGChkSumGolden(UINT32 u4InstID);
void vVerifySetUpParm(UINT32 u4InstID, UINT32 dwPicW, UINT32 dwPicH, UINT32 dwFrmRatCod, BOOL fgDivXM4v, BOOL fgDx4M4v);
void vDvCompare(UINT32 u4InstID);
void vReadDvChkSumGolden(UINT32 u4InstID);
void vH264ChkSumDump(UINT32 u4InstID);
void vVParserProc(UINT32 u4InstID);
void vVerifyInitVParserWMV(UINT32 u4InstID);
void vVPrsMPEGIPProc(UINT32 u4InstID);
void vVPrsMPEGBProc(UINT32 u4InstID);
#ifdef VPMODE
INT32 i4VPModeDecStart(UINT32 u4VDecID,VDEC_INFO_DEC_PRM_T *prDecPrm);
#endif
void vAVCDumpChkSum(void);
void vPrintDumpReg(UINT32 u4InstID,UINT32 fgTAB);
#ifdef MPEG4_CRC_CMP
extern void vMPEG4CrcCmp(UINT32 u4InstID,UCHAR *ptAddr,UINT32 u4Size);
#endif
#ifdef VDEC_SRAM
void vDumpSram(UINT32 u4InstID);
void vWriteSram(UINT32 u4InstID,UINT32 u4SramAddr,UINT32 u4SramValue);
UINT32 u4ReadSram(UINT32 u4InstID,UINT32 u4SramAddr);
#endif
extern void reset_dec_counter(UINT32 u4InstID);
extern void Dump_Dram0x49B_0x4FB();
extern UINT32 u4FilePicCont_noVOP;
// *********************************************************************
// Function : void vNormDecProc(UINT32 u4InstID)
// Description : normal decode procedure
// Parameter : None
// Return : None
// *********************************************************************
void vMpvPlay(UINT32 u4InstID)
{
int test_counter = 0;
if(_u4CodecVer[u4InstID] == VDEC_WMV)
{
vVerifyWMVInitProc(u4InstID);
}
while(_u4VerBitCount[u4InstID] < (_tInFileInfo[u4InstID].u4FileLength << 3) && test_counter<=2)
{
vNormDecProc(u4InstID);
//test_counter++; //for debug
/*
if ( _u4PicCnt[u4InstID] == 400 ){ //for debug
break;
}
*/
}
}
// *********************************************************************
// Function : void vVerifyWMVInitProc(UINT32 u4InstID)
// Description : WMV initialize process
// Parameter : None
// Return : None
// *********************************************************************
void vVerifyWMVInitProc(UINT32 u4InstID)
{
VDEC_INFO_WMV_VFIFO_PRM_T rWmvVFifoInitPrm;
VDEC_INFO_WMV_BS_INIT_PRM_T rWmvBSInitPrm;
//UINT32 *pu4VFIFOSa;
char fiInName[256];
char FileExt[4];
INT32 iLen,i4RCVNumFrames,i4CodecVersion,u4CodecFOURCC;
strcpy(fiInName, _bFileStr1[u4InstID][1]);
iLen = strlen(fiInName);
if( (fiInName[iLen-4] == '.') && (fiInName[iLen-3] == 'v') &&
(fiInName[iLen-2] == '9') && (fiInName[iLen-1] == 'e') )
{
strcpy(FileExt, "v9e");
u4CodecFOURCC = FOURCC_WMVA_WMV;
}
if( (fiInName[iLen-4] == '.') && (fiInName[iLen-3] == 'v') &&
(fiInName[iLen-2] == 'c') && (fiInName[iLen-1] == '1') )
{
strcpy(FileExt, "vc1");
u4CodecFOURCC = FOURCC_WVC1_WMV;
}
if( (fiInName[iLen-4] == '.') && (fiInName[iLen-3] == 'r') &&
(fiInName[iLen-2] == 'c') && (fiInName[iLen-1] == 'v') )
{
strcpy(FileExt, "rcv");
i4RCVNumFrames = (*_pucVFifo[u4InstID])+((*(_pucVFifo[u4InstID]+1))<<8)+((*(_pucVFifo[u4InstID]+2))<<16)+((*(_pucVFifo[u4InstID]+3))<<24);//(*pu4VFIFOSa);
_i4RcvVersion[u4InstID] = (i4RCVNumFrames >> 30) & 0x1;
i4CodecVersion = i4RCVNumFrames >> 24;
if (_i4RcvVersion[u4InstID] == 0)
{
i4CodecVersion &= 0x7f;
}
else
{
i4CodecVersion &= 0x3f;
}
if(i4CodecVersion == 0) /* WMV7 */
u4CodecFOURCC = FOURCC_WMV1_WMV;
else if(i4CodecVersion == 1) /* MP43, not supported */
u4CodecFOURCC = FOURCC_MP43_WMV;
else if(i4CodecVersion == 2) /* WMV8 */
u4CodecFOURCC = FOURCC_WMV2_WMV;
else if(i4CodecVersion== 3) /* MP42, not supported */
u4CodecFOURCC = FOURCC_MP42_WMV;
else if(i4CodecVersion == 4) /* MP4S, not supported */
u4CodecFOURCC = FOURCC_MP4S_WMV;
else if(i4CodecVersion == 5) /* Simple & Main Profile */
u4CodecFOURCC = FOURCC_WMV3_WMV;
else if(i4CodecVersion == 6) /* Advanced Profile */
u4CodecFOURCC = FOURCC_WMVA_WMV;
else if(i4CodecVersion == 8) /* Advanced Profile */
u4CodecFOURCC = FOURCC_WVC1_WMV;
}
vCodecVersion(u4InstID, u4CodecFOURCC);
vSetVerFRefBuf(u4InstID, 0);
vSetVerBRefBuf(u4InstID, 1);
rWmvVFifoInitPrm.u4CodeType = _i4CodecVersion[u4InstID];
rWmvVFifoInitPrm.u4VFifoSa = (UINT32)_pucVFifo[u4InstID];
rWmvVFifoInitPrm.u4VFifoEa = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
i4VDEC_HAL_WMV_InitVDecHW(u4InstID,&rWmvVFifoInitPrm);
rWmvBSInitPrm.u4VFifoSa = (UINT32)_pucVFifo[u4InstID];
rWmvBSInitPrm.u4VFifoEa = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
rWmvBSInitPrm.u4ReadPointer= (UINT32)_pucVFifo[u4InstID];
rWmvBSInitPrm.u4WritePointer= (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
if (_i4CodecVersion[u4InstID] == VDEC_VC1)
{
i4VDEC_HAL_WMV_InitBarrelShifter(_u4BSID[u4InstID],u4InstID, &rWmvBSInitPrm, TRUE);
}
else
{
i4VDEC_HAL_WMV_InitBarrelShifter(_u4BSID[u4InstID],u4InstID, &rWmvBSInitPrm, FALSE);
}
}
// *********************************************************************
// Function : void vNormDecProc(UINT32 u4InstID)
// Description : normal decode procedure
// Parameter : None
// Return : None
// *********************************************************************
#if (WMV_8320_SUPPORT)
#define WMV_8320_TEST_BARREL_SHIFT (0)
#if WMV_8320_TEST_BARREL_SHIFT
BOOL _fgTestBarrelShift = TRUE;
#endif
#endif
void vNormDecProc(UINT32 u4InstID)
{
#if (WMV_8320_TEST_BARREL_SHIFT)
VDEC_INFO_WMV_VFIFO_PRM_T rWmvVFifoInitPrm;
VDEC_INFO_WMV_BS_INIT_PRM_T rWmvBSInitPrm;
#endif
#ifdef VDEC_SIM_DUMP
printk("<vdec> _tVerDec[%d].ucState=%d\n", u4InstID, _tVerDec[u4InstID]);
#endif
switch(_tVerDec[u4InstID].ucState)
{
case DEC_NORM_INIT_PRM:
//printk("vNormDecProc, DEC_NORM_INIT_PRM\n");
#if (WMV_8320_TEST_BARREL_SHIFT)
if (TRUE == _fgTestBarrelShift)
{
if(_u4CodecVer[u4InstID] == VDEC_WMV)
{
rWmvVFifoInitPrm.u4CodeType = _i4CodecVersion[u4InstID];
rWmvVFifoInitPrm.u4VFifoSa = (UINT32)_pucVFifo[u4InstID];
rWmvVFifoInitPrm.u4VFifoEa = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
i4VDEC_HAL_WMV_InitVDecHW(u4InstID,&rWmvVFifoInitPrm);
rWmvBSInitPrm.u4VFifoSa = (UINT32)_pucVFifo[u4InstID];
rWmvBSInitPrm.u4VFifoEa = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
rWmvBSInitPrm.u4ReadPointer= (UINT32)_pucVFifo[u4InstID];
rWmvBSInitPrm.u4WritePointer= (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
if (_i4CodecVersion[u4InstID] == VDEC_VC1)
{
i4VDEC_HAL_WMV_InitBarrelShifter(_u4BSID[u4InstID],u4InstID, &rWmvBSInitPrm, TRUE);
}
else
{
i4VDEC_HAL_WMV_InitBarrelShifter(_u4BSID[u4InstID],u4InstID, &rWmvBSInitPrm, FALSE);
}
}
vVerInitVDec(u4InstID);
// delay
//x_thread_delay(2);
printk("vNormDecProc, 72:0x%x\n", u4VDecReadVLD(u4InstID, 4*72));
}
#else
vVerInitVDec(u4InstID);
_tVerDec[u4InstID].ucState = DEC_NORM_VPARSER;
#endif
break;
case DEC_NORM_VPARSER:
printk("vNormDecProc, DEC_NORM_VPARSER\n");
vVParserProc(u4InstID);
break;
case DEC_NORM_WAIT_TO_DEC:
printk("vNormDecProc, DEC_NORM_WAIT_TO_DEC\n");
vVDecProc(u4InstID);
break;
case DEC_NORM_WAIT_DECODE:
printk("vNormDecProc, DEC_NORM_WAIT_DECODE\n");
vChkVDec(u4InstID);
#if (POWER_TEST_CASE == POWER_TEST_MTCMOS)
vVerTestMTCMOS(u4InstID);
#elif (POWER_TEST_CASE == POWER_TEST_DCM)
vVerTestDCM(u4InstID);
#endif
break;
}
}
#if (POWER_TEST_CASE == POWER_TEST_MTCMOS)
// *********************************************************************
// Function : void vVerTestMTCMOS(UINT32 u4InstID)
// Description : Test MTCMOS on/off (power to VDEC or not)
// Parameter : None
// Return : None
// *********************************************************************
void vVerTestMTCMOS(UINT32 u4InstID)
{
struct file *fp;
char buf[512];
char buf2[2048];
int writelen = 10;
int readlen = 1024;
UINT32 retval = 0;
mm_segment_t vdecoldfs;
// Only test once for 1st bitstream 1st pic
if (_u4PowerTestInit[u4InstID] == 1)
{
return;
}
_u4PowerTestInit[u4InstID] = 1;
vdecoldfs = get_fs();
set_fs(KERNEL_DS);
fp=filp_open("/proc/clkmgr/subsys_test", O_RDWR, 0);
if (fp)
{
printk("/proc/clkmgr/subsys_test open successful\n");
}
else
{
printk("/proc/clkmgr/subsys_test open failed\n");
}
if (fp->f_op && fp->f_op->write)
{
sprintf(buf, "disable 8\0"); // for 6589, 8 is VDEC
writelen = 10;
retval = fp->f_op->write(fp,buf,writelen,&fp->f_pos);
printk("[PWR_TEST] MTCMOS write off for 10 sec! retval = %d\n", retval);
#if POWER_TEST_MANUAL_CHECK
msleep(100);
retval = fp->f_op->read(fp,buf2,readlen, &fp->f_pos);
buf2[512] = '\0';
printk("[PWR_TEST] Check MTCMOS\n");
printk("%s \n retval = %d\n", buf2, retval);
#endif
sprintf(buf, "enable 8\0");
writelen = 9;
retval = fp->f_op->write(fp,buf,writelen,&fp->f_pos);
printk("[PWR_TEST] MTCMOS write on, wait for 10 sec! retval = %d\n", retval);
#if POWER_TEST_MANUAL_CHECK
msleep(100);
fp->f_op->read(fp,buf2,readlen, &fp->f_pos);
buf2[512] = '\0';
printk("[PWR_TEST] Check MTCMOS setback\n");
printk("%s\n retval = %d\n", buf2, retval);
#endif
reset_dec_counter(u4InstID);
vWriteGconReg(0, 0x1); // MTCMOS off turns off power, need to turn back VDEC clock afterwards
filp_close(fp, NULL);
}
set_fs(vdecoldfs);
}
#endif
#if (POWER_TEST_CASE == POWER_TEST_DCM)
// *********************************************************************
// Function : void vVerTestDCM(UINT32 u4InstID)
// Description : Test DCM on/off (auto clock adjust or not)
// Parameter : None
// Return : None
// *********************************************************************
void vVerTestDCM(UINT32 u4InstID)
{
UINT32 u4RegVal1 = 0;
UINT32 u4RegVal2 = 0;
// VDEC on GCON_0[0] = 1, off GCON_1[0] = 1
// LARB on GCON_2[0] = 1, off GCON_3[0] = 1
u4RegVal1 = u4ReadGconReg(0x0);
u4RegVal2 = u4ReadGconReg(0x8);
printk("[PWR_TEST] DCM status 0x0 = 0x%x, 0x8 = 0x%x\n", u4RegVal1, u4RegVal2);
// OFF
vWriteGconReg(0x4, 1);
vWriteGconReg(0xC, 1);
u4RegVal1 = u4ReadGconReg(0x0);
u4RegVal2 = u4ReadGconReg(0x8);
printk("[PWR_TEST] DCM write off for 10 sec! 0x0 = 0x%x, 0x8 = 0x%x\n", u4RegVal1, u4RegVal2);
#if POWER_TEST_MANUAL_CHECK
msleep(100);
#endif
// ON
vWriteGconReg(0x0, 1);
vWriteGconReg(0x8, 1);
u4RegVal1 = u4ReadGconReg(0x0);
u4RegVal2 = u4ReadGconReg(0x8);
printk("[PWR_TEST] DCM write on for 10 sec! 0x0 = 0x%x, 0x8 = 0x%x\n", u4RegVal1, u4RegVal2);
#if POWER_TEST_MANUAL_CHECK
msleep(100);
#endif
}
#endif
// *********************************************************************
// Function : void vVerInitVDec(UINT32 u4InstID)
// Description : Dec procedure initilize
// Parameter : None
// Return : None
// *********************************************************************
void vVerInitVDec(UINT32 u4InstID)
{
#ifdef REG_LOG_NEW
_fgRegLogConsole[u4InstID] = false;//TRUE; //kevin use fals for debug
#endif
if (_u4CodecVer[u4InstID] == VDEC_RM)
{
vRM_VerInitDec(u4InstID);
//
if (_u4FileCnt[u4InstID] > 0)
{
_u4FileCnt[u4InstID] = u4RM_PreParseIPic(u4InstID, _u4FileCnt[u4InstID]);
}
} else if (_u4CodecVer[u4InstID] == VDEC_H265){ //2013/04/03 CM Hung HEVC verification
int i,j;
VDEC_INFO_H265_INIT_PRM_T rH265VDecInitPrm;
VDEC_INFO_H265_BS_INIT_PRM_T rH265BSInitPrm;
vHEVCInitROM(u4InstID);
#ifdef HEVC_UFO_MODE
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.bIsUFOMode = 1;
#else
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.bIsUFOMode = 0;
#endif
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.ucMaxFBufNum = 0xff;
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.rLastInfo.u4LastPicW = 0;
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.rLastInfo.u4LastPicH = 0;
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.i4PrePOC = 0;
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.bFirstSliceInSequence = 1;
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.i4RAPOC = MAX_INT;
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.ucPreFBIndex = 0;
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.prSEI = &_rH265SEI[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.u4ReadPtrOffset = 0;
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.u4PreReadPtr = 0;
_u4TotalDecFrms[u4InstID] = 0;
_u4PrevPtr[u4InstID] = 0;
_u4PicCnt[u4InstID] = 0;
_u4VerBitCount[u4InstID] = 0;
_u4SkipPicNum[u4InstID] = 0;
_u4CurrPicStartAddr[1] = 0; //VP mode
for (i = 0; i<32; i++){
_rH265SPS[u4InstID][i].bSL_Init = 0;
_rH265SPS[u4InstID][i].bSPSValid = 0;
for (j = 0; j<32; j++)
_rH265SPS[u4InstID][i].pShortTermRefPicSets[j] = NULL;
}
for (i = 0; i<256; i++){
_rH265PPS[u4InstID][i].bSL_Init = 0;
_rH265PPS[u4InstID][i].bPPSValid = 0;
for (j = 0; j<MAX_TILES_WITTH_HEIGHT; j++){
_rH265PPS[u4InstID][i].u4ColumnWidthMinus1[j] = 0;
_rH265PPS[u4InstID][i].u4RowHeightMinus1[j] = 0;
}
}
vVerifyFlushBufInfo(u4InstID);
vVerifyFlushAllSetData(u4InstID);
vSetDecFlag(u4InstID, DEC_FLAG_CHG_FBUF);
rH265VDecInitPrm.u4FGDatabase = (UINT32)_pucFGDatabase[u4InstID];
rH265VDecInitPrm.u4FGSeedbase = (UINT32)_pucFGSeedbase[u4InstID];
i4VDEC_HAL_H265_InitVDecHW(u4InstID);
rH265BSInitPrm.u4VFifoSa = (UINT32)_pucVFifo[u4InstID];
rH265BSInitPrm.u4VFifoEa = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
rH265BSInitPrm.u4VLDRdPtr = (UINT32)_pucVFifo[u4InstID];
#ifndef RING_VFIFO_SUPPORT
rH265BSInitPrm.u4VLDWrPtr = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
#else
rH265BSInitPrm.u4VLDWrPtr = (UINT32)_pucVFifo[u4InstID] + ((_u4LoadBitstreamCnt[u4InstID]%2)?(V_FIFO_SZ):(V_FIFO_SZ>>1));
#endif
rH265BSInitPrm.u4PredSa = /*PHYSICAL*/((UINT32)_pucPredSa[u4InstID]);
i4VDEC_HAL_H265_InitBarrelShifter(_u4BSID[u4InstID], u4InstID, &rH265BSInitPrm);
//HW parse NALs test
//vHEVCParseNALs(u4InstID);
} else if (_u4CodecVer[u4InstID] == VDEC_H264)
//if(_u4CodecVer[u4InstID] == VDEC_H264)
{
VDEC_INFO_H264_INIT_PRM_T rH264VDecInitPrm;
VDEC_INFO_H264_BS_INIT_PRM_T rH264BSInitPrm;
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH264DecPrm.ucMaxFBufNum = 0xff;
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH264DecPrm.rLastInfo.u4LastPicW = 0;
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH264DecPrm.rLastInfo.u4LastPicH = 0;
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH264DecPrm.prSEI = &_rSEI[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH264DecPrm.prFGTPrm= &_rFGTPrm[u4InstID];
_u4TotalDecFrms[u4InstID] = 0;
#if AVC_8320_SUPPORT
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH264DecPrm.u4VLDWrapperWrok = (UINT32)_pucVLDWrapperWrok[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH264DecPrm.u4PPWrapperWrok = (UINT32)_pucPPWrapperWork[u4InstID];
#endif
vOutputPOCData(0xFFFFFFFF);
vVerifyFlushBufInfo(u4InstID);
vVerifyFlushAllSetData(u4InstID);
vSetDecFlag(u4InstID, DEC_FLAG_CHG_FBUF);
#ifdef VERIFICATION_FGT
vAllocFGTTable(u4InstID);
#endif
#ifdef BARREL2_THREAD_SUPPORT
VERIFY (x_sema_lock(_ahVDecEndSema[u4InstID], X_SEMA_OPTION_WAIT) == OSR_OK);
#endif
rH264VDecInitPrm.u4FGDatabase = (UINT32)_pucFGDatabase[u4InstID];
rH264VDecInitPrm.u4CompModelValue = (UINT32)(_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH264DecPrm.prSEI->pucCompModelValue);
rH264VDecInitPrm.u4FGSeedbase = (UINT32)_pucFGSeedbase[u4InstID];
i4VDEC_HAL_H264_InitVDecHW(u4InstID, &rH264VDecInitPrm);
rH264BSInitPrm.u4VFifoSa = (UINT32)_pucVFifo[u4InstID];
rH264BSInitPrm.u4VFifoEa = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
rH264BSInitPrm.u4VLDRdPtr = (UINT32)_pucVFifo[u4InstID];
rH264BSInitPrm.u4VLDWrPtr = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
#ifndef RING_VFIFO_SUPPORT
rH264BSInitPrm.u4VLDWrPtr = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
#else
rH264BSInitPrm.u4VLDWrPtr = (UINT32)_pucVFifo[u4InstID] + ((_u4LoadBitstreamCnt[u4InstID]%2)?(V_FIFO_SZ):(V_FIFO_SZ>>1));
#endif
rH264BSInitPrm.u4PredSa = /*PHYSICAL*/((UINT32)_pucPredSa[u4InstID]);
i4VDEC_HAL_H264_InitBarrelShifter(_u4BSID[u4InstID], u4InstID, &rH264BSInitPrm);
#ifdef BARREL2_THREAD_SUPPORT
VERIFY (x_sema_unlock(_ahVDecEndSema[u4InstID]) == OSR_OK);
#endif
}
else if(_u4CodecVer[u4InstID] == VDEC_WMV)
{
vVerifyInitVParserWMV(u4InstID);
vDEC_HAL_COMMON_SetVLDPower(u4InstID,ON);
if(_i4CodecVersion[u4InstID] != VDEC_VC1)
{
vRCVFileHeader(u4InstID);
if(_i4CodecVersion[u4InstID] == VDEC_WMV3)
{
_u4VprErr[u4InstID] = u4DecodeVOLHead_WMV3(u4InstID);
}
else if((_i4CodecVersion[u4InstID] == VDEC_WMV1) || (_i4CodecVersion[u4InstID] == VDEC_WMV2))
{
_u4VprErr[u4InstID] = u4DecodeVOLHead_WMV12(u4InstID);
}
_u4WMVBitCount[u4InstID] = pic_hdr_bitcount(u4InstID);
_i4HeaderLen[u4InstID] = _u4WMVBitCount[u4InstID] / 8;
_iSetPos[u4InstID] = _i4HeaderLen[u4InstID];
}
_u4PicHdrBits[u4InstID] = 0;
_fgCounting[u4InstID] = FALSE;
}
else if(_u4CodecVer[u4InstID] == VDEC_VP6)
{
vVerInitVP6(u4InstID);
}
#if (CONFIG_CHIP_VER_CURR >= CONFIG_CHIP_VER_MT8580)
else if(_u4CodecVer[u4InstID] == VDEC_VP8)
{
vVerInitVP8(u4InstID);
}
#endif
else if(_u4CodecVer[u4InstID] == VDEC_AVS)
{
vVerInitAVS(u4InstID);
}
else
{
VDEC_INFO_MPEG_VFIFO_PRM_T rMPEGVDecInitPrm;
VDEC_INFO_MPEG_BS_INIT_PRM_T rMPEGBSInitPrm;
vInitVParserMPEG(u4InstID);
rMPEGVDecInitPrm.u4VFifoSa = (UINT32)_pucVFifo[u4InstID];
rMPEGVDecInitPrm.u4VFifoEa = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
rMPEGBSInitPrm.u4VFifoSa = (UINT32)_pucVFifo[u4InstID];
rMPEGBSInitPrm.u4VFifoEa = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
rMPEGBSInitPrm.u4ReadPointer= (UINT32)_pucVFifo[u4InstID];
rMPEGVDecInitPrm.u4CodeType = _u4CodecVer[u4InstID];
#ifndef RING_VFIFO_SUPPORT
rMPEGBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
#else
// rMPEGBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + (V_FIFO_SZ*(0.5 + 0.5 *(_u4LoadBitstreamCnt[u4InstID]%2)));
rMPEGBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + ((_u4LoadBitstreamCnt[u4InstID]%2)?(V_FIFO_SZ):(V_FIFO_SZ>>1));//mtk40343
#endif
i4VDEC_HAL_MPEG_InitVDecHW(u4InstID, &rMPEGVDecInitPrm);
i4VDEC_HAL_MPEG_InitBarrelShifter(_u4BSID[u4InstID], u4InstID, &rMPEGBSInitPrm);
// Restore Quantization Matrix
if(_fgVerLoadIntraMatrix[u4InstID])
{
vVDEC_HAL_MPEG_ReLoadQuantMatrix(u4InstID, TRUE);
}
if(_fgVerLoadNonIntraMatrix[u4InstID])
{
vVDEC_HAL_MPEG_ReLoadQuantMatrix(u4InstID, FALSE);
}
if(_u4CodecVer[u4InstID] == VDEC_MPEG2)
{
vDEC_HAL_COMMON_SetVLDPower(u4InstID,ON);
vVDec_HAL_CRC_Enable(u4InstID, 1);
u4VParserMPEG12(u4InstID, TRUE);
vDEC_HAL_COMMON_SetVLDPower(u4InstID,OFF);
}
else if((_u4CodecVer[u4InstID] == VDEC_MPEG4) || (_u4CodecVer[u4InstID] == VDEC_H263))
{
if(!_fgShortHeader[u4InstID])
{
vDEC_HAL_COMMON_SetVLDPower(u4InstID,ON);
u4VParserMPEG4(u4InstID, TRUE);
vDEC_HAL_COMMON_SetVLDPower(u4InstID,OFF);
}
}
else if(_u4CodecVer[u4InstID] == VDEC_DIVX3)
{
UINT32 temp;
_u4Divx3SetPos[u4InstID] += 8;
// skip Compression and "SizeImage"
temp = u4VDEC_HAL_MPEG_GetBitStreamShift(_u4BSID[u4InstID], u4InstID, 32);
_u4DIVX3Width[u4InstID] = (((temp&0x00ff0000)>>16)<<8) + ((temp & 0xff000000)>>24);
temp = u4VDEC_HAL_MPEG_GetBitStreamShift(_u4BSID[u4InstID], u4InstID, 32);
_u4DIVX3Height[u4InstID] = (((temp&0x00ff0000)>>16)<<8) + ((temp & 0xff000000)>>24);
vVerifySetUpParm(u4InstID, _u4DIVX3Width[u4InstID], _u4DIVX3Height[u4InstID], 4 /*FRC_29_97*/, FALSE, FALSE);
}
_tVerPic[u4InstID].u4W = _u4HSize[u4InstID];
_tVerPic[u4InstID].u4H = _u4VSize[u4InstID];
_tVerPic[u4InstID].ucMpegVer = _ucMpegVer[u4InstID];
}
vVerifyVDecIsrInit(u4InstID);
}
// for AVI or QT, we know frame rate from system layer
// for M4V, maybe set it to 4 (29.976Hz)
void vVerifySetUpParm(UINT32 u4InstID, UINT32 dwPicW, UINT32 dwPicH, UINT32 dwFrmRatCod, BOOL fgDivXM4v, BOOL fgDx4M4v)
{
_u4RealHSize[u4InstID] = dwPicW;
_u4RealVSize[u4InstID] = dwPicH;
_u4UPicW[u4InstID] = dwPicW;
_u4UPicH[u4InstID] = dwPicH;
_u4UFrmRatCod[u4InstID] = dwFrmRatCod;
_fgVerUDivXM4v[u4InstID] = fgDivXM4v;
_fgVerUDx4M4v[u4InstID] = fgDx4M4v;
}
void vVerifyVDecIsrInit(UINT32 u4InstID)
{
//BIM_DisableIrq(0xffffffff); //ginny mark it 071015
#ifndef IRQ_DISABLE
if (u4InstID == 0)
{
// reg ISR
if (request_irq(MT8320_VDEC_IRQ, vVDec0IrqProc, 0, "VDEC0_VT", NULL))
{
ASSERT(0);
}
}
else if (u4InstID == 1)
{
}
#endif
}
void vVerifyVDecIsrStop(UINT32 u4InstID)
{
#ifndef IRQ_DISABLE
// x_os_isr_fct pfnOldIsr;
if(u4InstID)
{
// dereg ISR
// if (x_reg_isr(VECTOR_VDFUL, NULL, &pfnOldIsr) != OSR_OK)
{
// ASSERT(0);
}
}
else
{
// dereg ISR
}
#endif
}
void vVerifyInitVParserWMV(UINT32 u4InstID)
{
VDEC_INFO_WMV_SEQ_PRM_T *prWMVSPS = &_rWMVSPS[u4InstID];
VDEC_INFO_WMV_ETRY_PRM_T *prWMVEPS = &_rWMVEPS[u4InstID];
VDEC_INFO_WMV_PIC_PRM_T *prWMVPPS = &_rWMVPPS[u4InstID];
VDEC_INFO_WMV_ICOMP_PRM_T *prWMVICOMPPS = &_rWMVICOMPPS[u4InstID];
_u4WMVBitCount[u4InstID] = 0;
_u4WMVByteCount[u4InstID] = 0;
reset_pic_hdr_bits(u4InstID);
_u4DispBufIdx[u4InstID] = 1;
if(_u4DispBufIdx[u4InstID] == 1)
{
vSetVerFRefBuf(u4InstID, 0);
vSetVerBRefBuf(u4InstID, 1);
}
else
{
vSetVerFRefBuf(u4InstID, 1);
vSetVerBRefBuf(u4InstID, 0);
}
_u4WMVDecPicNo[u4InstID] = 0;
//Sequence Header variables initialization
//Advanced
prWMVSPS->fgBroadcastFlags = FALSE;
prWMVSPS->fgInterlacedSource = FALSE;
prWMVSPS->fgTemporalFrmCntr = FALSE;
prWMVSPS->fgSeqFrameInterpolation = FALSE;
prWMVSPS->fgHRDPrmFlag = FALSE;
prWMVSPS->i4HRDNumLeakyBuckets = 0;
prWMVPPS->fgTopFieldFirst = TRUE; //ming
//Simple & Main
prWMVSPS->fgXintra8Switch = FALSE;
prWMVSPS->fgMultiresEnabled = FALSE;
prWMVSPS->i4ResIndex = 0;
prWMVSPS->fgDCTTableMBEnabled = FALSE;
prWMVSPS->fgPreProcRange = FALSE;
prWMVSPS->i4NumBFrames = 1;
prWMVSPS->fgRotatedIdct = FALSE;
prWMVPPS->ucFrameCodingMode = PROGRESSIVE; //ming
// WMV7 & WMV8
prWMVSPS->fgMixedPel = FALSE;
prWMVSPS->fgFrmHybridMVOn = FALSE;
prWMVSPS->fgXintra8 = FALSE;
//End of Sequence
prWMVPPS->fgPostRC1 = TRUE;
//EntryPoint Header variables initialization
prWMVEPS->fgBrokenLink = FALSE;
prWMVEPS->fgClosedEntryPoint = FALSE;
prWMVEPS->fgPanScanPresent = FALSE;
prWMVEPS->fgRefDistPresent = FALSE;
prWMVEPS->fgLoopFilter = FALSE;
prWMVEPS->fgUVHpelBilinear = FALSE;
prWMVEPS->i4RangeState = 0;
prWMVEPS->i4ReconRangeState = 0;
prWMVEPS->fgExtendedMvMode = FALSE;
prWMVEPS->i4MVRangeIndex = 0;
prWMVEPS->i4DQuantCodingOn = 0;
prWMVEPS->fgXformSwitch = FALSE;
prWMVEPS->fgSequenceOverlap = FALSE;
//Quant related
prWMVEPS->fgExplicitSeqQuantizer = FALSE;
prWMVEPS->fgExplicitFrameQuantizer = FALSE;
prWMVEPS->fgExplicitQuantizer = FALSE;
prWMVPPS->fgUse3QPDZQuantizer = FALSE;
prWMVPPS->fgHalfStep = FALSE;
prWMVEPS->fgExtendedDeltaMvMode = FALSE;
prWMVEPS->i4DeltaMVRangeIndex = 0;
prWMVEPS->i4ExtendedDMVX = 0;
prWMVEPS->i4ExtendedDMVY = 0;
prWMVEPS->i4RefFrameDistance = 0;
prWMVPPS->i4BNumerator = 0;
prWMVSPS->i4NumBFrames = 1;
prWMVPPS->i4DCStepSize = 0;
prWMVPPS->i4X9MVMode = 0;
prWMVPPS->fgMBXformSwitching = FALSE;
prWMVPPS->i4FrameXformMode = 0;
prWMVEPS->fgRangeRedYFlag = FALSE;
prWMVEPS->fgRangeRedUVFlag = FALSE;
//End of EntryPoint
prWMVPPS->u4ForwardRefPicType = 0;
prWMVPPS->u4BackwardRefPicType = 0;
// Picture Header
//WMV7 & WMV8
prWMVPPS->fgDCPredIMBInPFrame = FALSE;
//for field pictures
prWMVPPS->fgTopFieldFirst = TRUE;
prWMVPPS->fgRepeatFirstField = FALSE;
//FALSE for PROGRESSIVE.
prWMVPPS->fgInterlaceV2 = FALSE;
prWMVPPS->fgFieldMode = FALSE;
prWMVPPS->i4CurrentField = 0; // 0:TOP, 1:BOTTOM field
prWMVPPS->i4CurrentTemporalField = 0; // 0:1st field or frame picture, 1: 2nd field
prWMVPPS->i4MaxZone1ScaledFarMVX = 0;
prWMVPPS->i4MaxZone1ScaledFarMVY = 0;
prWMVPPS->i4Zone1OffsetScaledFarMVX = 0;
prWMVPPS->i4Zone1OffsetScaledFarMVY = 0;
prWMVPPS->i4FarFieldScale1 = 0;
prWMVPPS->i4FarFieldScale2 = 0;
prWMVPPS->i4NearFieldScale = 0;
prWMVPPS->i4MaxZone1ScaledFarBackMVX = 0;
prWMVPPS->i4MaxZone1ScaledFarBackMVY = 0;
prWMVPPS->i4Zone1OffsetScaledFarBackMVX = 0;
prWMVPPS->i4Zone1OffsetScaledFarBackMVY = 0;
prWMVPPS->i4FarFieldScaleBack1 = 0;
prWMVPPS->i4FarFieldScaleBack2 = 0;
prWMVPPS->i4NearFieldScaleBack = 0;
prWMVPPS->fgTwoRefPictures = TRUE;
prWMVPPS->fgUseOppFieldForRef = TRUE;
prWMVPPS->fgUseSameFieldForRef = TRUE;
//Robert TODO: 0511
prWMVPPS->fgBackRefUsedHalfPel = FALSE;
prWMVPPS->fgBackRefTopFieldHalfPelMode = FALSE;
prWMVPPS->fgBackRefBottomFieldHalfPelMode = FALSE;
prWMVPPS->fgMvResolution = FALSE;
prWMVPPS->i4Overlap = 0;
prWMVPPS->i4MvTable = 0;
prWMVPPS->i4CBPTable = 0;
prWMVPPS->i4MBModeTable = 0;
prWMVPPS->i42MVBPTable = 0;
prWMVPPS->i44MVBPTable = 0;
//!WMVA profile
prWMVSPS->fgPreProcRange = FALSE;
prWMVEPS->fgNewDCQuant = FALSE;
prWMVPPS->fgDCTTableMB = FALSE;
// WMV7 & WMV8
//Robert TODO:
if(_i4CodecVersion[u4InstID] == VDEC_WMV2)
{
prWMVSPS->fgSkipBitCoding = TRUE;
prWMVSPS->fgNewPcbPcyTable = TRUE;
}
else
{
prWMVSPS->fgSkipBitCoding = FALSE;
prWMVSPS->fgNewPcbPcyTable = FALSE;
}
prWMVSPS->fgCODFlagOn = TRUE;
if(_i4CodecVersion[u4InstID] >= VDEC_WMV3)
{
prWMVEPS->fgNewDCQuant = TRUE;
}
ComputeDQuantDecParam(u4InstID);
prWMVPPS->ucDiffQtProfile = 0;
_iSeqHdrData1[u4InstID] = 0;
_iSeqHdrData2[u4InstID] = 0;
prWMVSPS->i4SkipBitModeV87 = 0;
prWMVSPS->i4Wmv8BpMode = 0;
//NEEDS to initialize
_new_entry_point[u4InstID] = 0;
prWMVSPS->fgPostProcInfoPresent = FALSE;
prWMVSPS->fgYUV411 = FALSE;
prWMVSPS->fgSpriteMode = FALSE;
prWMVEPS->i4RangeRedY = 0;
prWMVEPS->i4RangeMapUV = 0;
prWMVPPS->ucRepeatFrameCount = 0;
prWMVPPS->ucDQuantBiLevelStepSize = 0;
prWMVPPS->fgDQuantOn = FALSE;
prWMVPPS->i4Panning = 0;
prWMVPPS->fgDQuantBiLevel = FALSE;
prWMVICOMPPS->i4ResetMvDram = 0;
prWMVICOMPPS->i4SecondFieldParity = 0;
prWMVICOMPPS->i4BoundaryUMVIcomp = 0;
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecWMVDecPrm.prSPS = &_rWMVSPS[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecWMVDecPrm.prEPS = &_rWMVEPS[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecWMVDecPrm.prPPS = &_rWMVPPS[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecWMVDecPrm.prICOMPS = &_rWMVICOMPPS[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecWMVDecPrm.fgWmvMode = _u4WmvMode[u4InstID];
#if (WMV_8320_SUPPORT)
#if WMV_LOG_TMP
printk("vVerifyInitVParserWMV, u4VLDWrapperWrok = 0x%x\n", _pucVLDWrapperWrok[u4InstID]);
printk("vVerifyInitVParserWMV, u4PPWrapperWrok = 0x%x\n", _pucPPWrapperWork[u4InstID]);
#endif
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecWMVDecPrm.rWmvWorkBufSa.u4VLDWrapperWrok = (UINT32)_pucVLDWrapperWrok[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecWMVDecPrm.rWmvWorkBufSa.u4PPWrapperWrok = (UINT32)_pucPPWrapperWork[u4InstID];
#endif
}
void vInitVParserMPEG(UINT32 u4InstID)
{
_u4Divx3SetPos[u4InstID] = 0;
_u4BrokenLink[u4InstID] = 2;
_u4DispBufIdx[u4InstID] = 1;
if(_u4DispBufIdx[u4InstID] == 1)
{
vSetVerFRefBuf(u4InstID, 0);
vSetVerBRefBuf(u4InstID, 1);
}
else
{
vSetVerFRefBuf(u4InstID, 1);
vSetVerBRefBuf(u4InstID, 0);
}
_u4Datain[u4InstID] = 0; // for dwGetBitStream() return value
_u4BitCount[u4InstID] = 0; // for dwGetBitStream() byte aligned
_fgVerSeqHdr[u4InstID]=0; // sequence header process
_ucMpegVer[u4InstID] = VDEC_UNKNOWN;
_u4HSize[u4InstID]=0; // horizontal size = horizontal size value +
// horizontal size extension << 12
_u4VSize[u4InstID]=0; // vertical size = vertical size value +
// vertical size extension << 12
_ucParW[u4InstID] = 1;
_ucParH[u4InstID] = 1;
_u4HSizeVal[u4InstID]=0; // horizontal size value
_u4VSizeVal[u4InstID]=0; // vertical size value
_fgVerProgressiveSeq[u4InstID]=0; //progressive_sequence
_u4PicWidthMB[u4InstID]=0;
_u4BPicIniFlag[u4InstID]=0;
_u4BPicIniFlag0[u4InstID]=0;
//_ucHSizeExt=0; // horizontal size extension
//_ucVSizeExt=0; // vertical size extension
_ucCrmaFmt[u4InstID]=0; //chroma_format;
_ucFullPelFordVec[u4InstID]=0; // full_pel_forward_vector;
_ucFordFCode[u4InstID]=0; // forward_f_code;
_ucFullPelBackVec[u4InstID]=0; // full_pel_backward_vector;
_ucBackFCode[u4InstID]=0; // backward_f_code;
_ucIntraDcPre[u4InstID]=0; // intra_dc_precision;
_fgVerAltScan[u4InstID]=0;
_fgVerQScaleType[u4InstID]=0; // q_scale_type;
_fgVerFrmPredFrmDct[u4InstID]=0; // frame_pred_frame_dct;
_fgVerIntraVlcFmt[u4InstID]=0;
_fgVerConcealMotionVec[u4InstID]=0; // concealment_motion_vectors;
_pucfcode[u4InstID][0][0]=0;
_pucfcode[u4InstID][0][1]=0;
_pucfcode[u4InstID][1][0]=0;
_pucfcode[u4InstID][1][1]=0;
_u4PicPSXOff[u4InstID] = 0xFFFFFFFF;
//_dwOldPicPSXSkip = 0xFFFFFFFF;
_fgVerLoadIntraMatrix[u4InstID] = 0;
_fgVerLoadNonIntraMatrix[u4InstID] = 0;
//_bLastPicBBufMd=0; // 1 for 8-line mode, 0 for 16-line mode
_fgVerBrokenLink[u4InstID] = FALSE;
_fgVerClosedGop[u4InstID] = FALSE;
_u4UserDataCodecVersion[u4InstID] = 0;
_u4UserDataBuildNumber[u4InstID] = 0;
_u4TimeBase[u4InstID] = 0;
_fgVerShortVideoHeader[u4InstID] = FALSE;
_fgSorenson[u4InstID] = FALSE;
_ucSourceFormat[u4InstID] = 0;
_ucVisualObjectVerid[u4InstID] = 1;
_fgVerQuarterSample[u4InstID] = FALSE;
_fgVerReversibleVlc[u4InstID] = FALSE;
_fgVerReducedResolutionVopEnable[u4InstID] = FALSE;
_rDirMode[u4InstID].u4TFrm = 0xffffffff;
_fgVerTopFldFirst[u4InstID] = FALSE;
_rMPEG4VopPrm[u4InstID].prDirMd = &_rDirMode[u4InstID];
_rMPEG4VopPrm[u4InstID].prGmcPrm = &_rMPEG4GmcPrm[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rPicLayer.rMp4DecPrm.rDep.rM4vDecPrm.prVol = &_rMPEG4VolPrm[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rPicLayer.rMp4DecPrm.rDep.rM4vDecPrm.prVop = &_rMPEG4VopPrm[u4InstID];
}
void vSetWMVDecParam( UINT32 u4InstID, VDEC_INFO_DEC_PRM_T *tVerMpvDecPrm)
{
tVerMpvDecPrm->SpecDecPrm.rVDecWMVDecPrm.i4CodecVersion = _i4CodecVersion[u4InstID];
tVerMpvDecPrm->SpecDecPrm.rVDecWMVDecPrm.i4MemBase = 0;
if (!tVerMpvDecPrm->SpecDecPrm.rVDecWMVDecPrm.fgWmvMode)
{
tVerMpvDecPrm->SpecDecPrm.rVDecWMVDecPrm.rWmvWorkBufSa.u4Bp1Sa = (UINT32)_pucBp_1[u4InstID];
tVerMpvDecPrm->SpecDecPrm.rVDecWMVDecPrm.rWmvWorkBufSa.u4Bp2Sa = (UINT32)_pucBp_2[u4InstID];
tVerMpvDecPrm->SpecDecPrm.rVDecWMVDecPrm.rWmvWorkBufSa.u4Bp3Sa = (UINT32)_pucBp_3[u4InstID];
tVerMpvDecPrm->SpecDecPrm.rVDecWMVDecPrm.rWmvWorkBufSa.u4Bp4Sa = (UINT32)_pucBp_4[u4InstID];
tVerMpvDecPrm->SpecDecPrm.rVDecWMVDecPrm.rWmvWorkBufSa.u4Dcac2Sa = (UINT32)_pucDcac_2[u4InstID];
tVerMpvDecPrm->SpecDecPrm.rVDecWMVDecPrm.rWmvWorkBufSa.u4DcacSa = (UINT32)_pucDcac[u4InstID];
tVerMpvDecPrm->SpecDecPrm.rVDecWMVDecPrm.rWmvWorkBufSa.u4Mv12Sa = (UINT32)_pucMv_1_2[u4InstID];
tVerMpvDecPrm->SpecDecPrm.rVDecWMVDecPrm.rWmvWorkBufSa.u4Mv1Sa = (UINT32)_pucMv_1[u4InstID];
tVerMpvDecPrm->SpecDecPrm.rVDecWMVDecPrm.rWmvWorkBufSa.u4Mv2Sa = (UINT32)_pucMv_2[u4InstID];
tVerMpvDecPrm->SpecDecPrm.rVDecWMVDecPrm.rWmvWorkBufSa.u4Mv3Sa = (UINT32)_pucMv_3[u4InstID];
}
else
{
tVerMpvDecPrm->SpecDecPrm.rVDecWMVDecPrm.rWmvWorkBufSa.u4DcacNewSa = (UINT32)_pucDcacNew[u4InstID];
tVerMpvDecPrm->SpecDecPrm.rVDecWMVDecPrm.rWmvWorkBufSa.u4MvNewSa = (UINT32)_pucMvNew[u4InstID];
tVerMpvDecPrm->SpecDecPrm.rVDecWMVDecPrm.rWmvWorkBufSa.u4Bp0NewSa = (UINT32)_pucBp0New[u4InstID];
tVerMpvDecPrm->SpecDecPrm.rVDecWMVDecPrm.rWmvWorkBufSa.u4Bp1NewSa = (UINT32)_pucBp1New[u4InstID];
tVerMpvDecPrm->SpecDecPrm.rVDecWMVDecPrm.rWmvWorkBufSa.u4Bp2NewSa = (UINT32)_pucBp2New[u4InstID];
}
//#endif
tVerMpvDecPrm->SpecDecPrm.rVDecWMVDecPrm.rWmvWorkBufSa.u4Pic0CSa = (UINT32)_pucPic0C[u4InstID];
tVerMpvDecPrm->SpecDecPrm.rVDecWMVDecPrm.rWmvWorkBufSa.u4Pic0YSa = (UINT32)_pucPic0Y[u4InstID];
tVerMpvDecPrm->SpecDecPrm.rVDecWMVDecPrm.rWmvWorkBufSa.u4Pic1CSa = (UINT32)_pucPic1C[u4InstID];
tVerMpvDecPrm->SpecDecPrm.rVDecWMVDecPrm.rWmvWorkBufSa.u4Pic1YSa = (UINT32)_pucPic1Y[u4InstID];
tVerMpvDecPrm->SpecDecPrm.rVDecWMVDecPrm.rWmvWorkBufSa.u4Pic2CSa = (UINT32)_pucPic2C[u4InstID];
tVerMpvDecPrm->SpecDecPrm.rVDecWMVDecPrm.rWmvWorkBufSa.u4Pic2YSa = (UINT32)_pucPic2Y[u4InstID];
tVerMpvDecPrm->SpecDecPrm.rVDecWMVDecPrm.rWmvWorkBufSa.u4Pp1Sa = (UINT32)_pucPp_1[u4InstID];
tVerMpvDecPrm->SpecDecPrm.rVDecWMVDecPrm.rWmvWorkBufSa.u4Pp2Sa = (UINT32)_pucPp_2[u4InstID];
tVerMpvDecPrm->ucDecFBufIdx = BYTE0(_u4DecBufIdx[u4InstID]);
tVerMpvDecPrm->SpecDecPrm.rVDecWMVDecPrm.u4FRefBufIdx = _u4FRefBufIdx[u4InstID];
}
BOOL fgVDecProcWMV(UINT32 u4InstID)
{
UINT32 u4Bytes,u4Bits;
VDEC_INFO_WMV_BS_INIT_PRM_T rWmvBSInitPrm;
VDEC_INFO_DEC_PRM_T *tVerMpvDecPrm;
tVerMpvDecPrm = &_tVerMpvDecPrm[u4InstID];
vSetWMVDecParam(u4InstID, tVerMpvDecPrm);
//Log Input Window before Trigger VDec
{
UINT32 u4InputWindow = 0;
u4InputWindow = u4VDecReadVLD(u4InstID, 0xf0);
printk("fgVDecProcWMV, Input Window %x \n", u4InputWindow);
}
#ifdef VERIFICATION_DOWN_SCALE
#ifdef DOWN_SCALE_SUPPORT
//vSetDownScaleParam(u4InstID, TRUE, &_tDownScalerPrm[u4InstID]);
vSetDownScaleParam(u4InstID, TRUE, &(_tVerMpvDecPrm[u4InstID].rDownScalerPrm));
#else
//vSetDownScaleParam(u4InstID, FALSE, &_tDownScalerPrm[u4InstID]);
vSetDownScaleParam(u4InstID, FALSE, &(_tVerMpvDecPrm[u4InstID].rDownScalerPrm));
#endif
//vDEC_HAL_COMMON_SetDownScaler(u4InstID, &_tDownScalerPrm[u4InstID]);
//vVDECSetDownScalerPrm(u4InstID, &_tDownScalerPrm[u4InstID]);
vVDECSetDownScalerPrm(u4InstID, &(_tVerMpvDecPrm[u4InstID].rDownScalerPrm));
#endif
if(_u4BSID[u4InstID] == 1)
{
u4Bytes = u4VDEC_HAL_WMV_ReadRdPtr(1, u4InstID, (UINT32)_pucVFifo[u4InstID], &u4Bits);
rWmvBSInitPrm.u4VFifoSa = (UINT32)_pucVFifo[u4InstID];
rWmvBSInitPrm.u4VFifoEa = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
rWmvBSInitPrm.u4ReadPointer= (UINT32)_pucVFifo[u4InstID] + u4Bytes;
#ifndef RING_VFIFO_SUPPORT
rWmvBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
#else
// rWmvBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + (V_FIFO_SZ*(0.5 + 0.5 *(_u4LoadBitstreamCnt[u4InstID]%2)));
rWmvBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + ((_u4LoadBitstreamCnt[u4InstID]%2)?(V_FIFO_SZ):(V_FIFO_SZ>>1));
#endif
if (_i4CodecVersion[u4InstID] == VDEC_VC1)
{
i4VDEC_HAL_WMV_InitBarrelShifter(0, u4InstID, &rWmvBSInitPrm, TRUE);
}
else
{
i4VDEC_HAL_WMV_InitBarrelShifter(0, u4InstID, &rWmvBSInitPrm, FALSE);
}
u4VDEC_HAL_WMV_ShiftGetBitStream(0, u4InstID, u4Bits);
}
if(_i4CodecVersion[u4InstID] == VDEC_VC1)
{
i4VDEC_HAL_WMV_DecStart(u4InstID, tVerMpvDecPrm); //umv_from_mb = 0 for WMVA
if((_rWMVPPS[u4InstID].ucFrameCodingMode != INTERLACEFIELD) || (_rWMVPPS[u4InstID].i4CurrentTemporalField == 1))
{
_u4WMVDecPicNo[u4InstID]++;
}
}
else
{
i4VDEC_HAL_WMV_DecStart(u4InstID, tVerMpvDecPrm); //umv_from_mb = 1 for !WMVA
_u4WMVDecPicNo[u4InstID]++;
}
return (TRUE);
}
void vCodecVersion(UINT32 u4InstID, UINT32 u4CodecFOURCC)
{
VDEC_INFO_WMV_SEQ_PRM_T *prWMVSPS = &_rWMVSPS[u4InstID];
prWMVSPS->fgVC1 = FALSE;
if(u4CodecFOURCC == FOURCC_WVC1_WMV)
{
u4CodecFOURCC = FOURCC_WMVA_WMV;
prWMVSPS->fgVC1 = TRUE;
}
if((u4CodecFOURCC == FOURCC_WMV1_WMV) || (u4CodecFOURCC == FOURCC_wmv1_WMV))
_i4CodecVersion[u4InstID] = VDEC_WMV1;
else if((u4CodecFOURCC == FOURCC_WMV2_WMV) || (u4CodecFOURCC == FOURCC_wmv2_WMV))
_i4CodecVersion[u4InstID] = VDEC_WMV2;
else if((u4CodecFOURCC == FOURCC_WMV3_WMV) || (u4CodecFOURCC == FOURCC_wmv3_WMV))
_i4CodecVersion[u4InstID] = VDEC_WMV3;
else if((u4CodecFOURCC == FOURCC_WMVA_WMV) || (u4CodecFOURCC == FOURCC_wmva_WMV))
_i4CodecVersion[u4InstID] = VDEC_VC1;
else
{
vVDecOutputDebugString("WMV Codec Error\n");
}
}
void vSetFGTParam(UINT32 u4InstID, VDEC_INFO_H264_FGT_PRM_T *prFGTPrm)
{
//VDEC_INFO_DEC_PRM_T *ptVerMpvDecPrm;
//ptVerMpvDecPrm = &_tVerMpvDecPrm[u4InstID];
#ifdef FGT_SUPPORT
VDEC_INFO_DEC_PRM_T *ptVerMpvDecPrm;
ptVerMpvDecPrm = &_tVerMpvDecPrm[u4InstID];
prFGTPrm->ucDataScr = FGT_EN | FGT_SCR_PP; // If Cancel flag is TRUE, it will be bypass mode
#ifdef DOWN_SCALE_SUPPORT
prFGTPrm->ucDataScr |= FGT_VDSCL_BUSY_EN;
#endif
prFGTPrm->pucFGTScrYAddr = _pucDecWorkBuf[u4InstID];
prFGTPrm->pucFGTScrCAddr = _pucDecWorkBuf[u4InstID] + _ptCurrFBufInfo[u4InstID]->u4DramPicSize;
prFGTPrm->pucFGTTrgYAddr = _pucFGTBuf[u4InstID];
prFGTPrm->pucFGTTrgCAddr = _pucFGTBuf[u4InstID] + _ptCurrFBufInfo[u4InstID]->u4DramPicSize;
prFGTPrm->ucMBXSize = ((_ptCurrFBufInfo[u4InstID]->u4W + 15)>> 4);
prFGTPrm->ucMBYSize = (((_ptCurrFBufInfo[u4InstID]->u4H >> (1-(fgIsFrmPic(u4InstID)))) + 15)>> 4);
prFGTPrm->u4Ctrl = ((ptVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSEI->u4NumModelValuesMinus1[0] & 0x3) << 0) |
((ptVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSEI->u4NumModelValuesMinus1[1] & 0x3) << 2) |
((ptVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSEI->u4NumModelValuesMinus1[2] & 0x3) << 4) |
((ptVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSEI->u4NumModelValuesMinus1[2] & 0x3) << 4) |
(ptVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSEI->fgFilmGrainCharacteristicsCancelFlag << 8) |
(ptVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSEI->fgCompModelPresentFlag[0] << 9) |
(ptVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSEI->fgCompModelPresentFlag[1] << 10) |
(ptVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSEI->fgCompModelPresentFlag[2] << 11) |
((ptVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSEI->u4Log2ScaleFactor&0xfff) << 12) |
((_ptCurrFBufInfo[u4InstID]->i4POC & 0xff) << 16) |
(((ptVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4IdrPicId) & 0x7) << 24);
#else
prFGTPrm->ucDataScr = 0;
#endif
}
void vSaveDownScaleParam(UINT32 u4InstID, VDEC_INFO_VDSCL_PRM_T *prDownScalerPrm)
{
_tVerMpvDecPrm[u4InstID].rDownScalerPrm.ucPicStruct = prDownScalerPrm->ucPicStruct;
_tVerMpvDecPrm[u4InstID].rDownScalerPrm.ucScanType = prDownScalerPrm->ucScanType;
_tVerMpvDecPrm[u4InstID].rDownScalerPrm.ucScrAgent = prDownScalerPrm->ucScrAgent;
_tVerMpvDecPrm[u4InstID].rDownScalerPrm.ucSpectType = prDownScalerPrm->ucSpectType;
_tVerMpvDecPrm[u4InstID].rDownScalerPrm.ucVdoFmt = prDownScalerPrm->ucVdoFmt;
_tVerMpvDecPrm[u4InstID].rDownScalerPrm.u4DispW = prDownScalerPrm->u4DispW;
_tVerMpvDecPrm[u4InstID].rDownScalerPrm.u4SrcHeight = prDownScalerPrm->u4SrcHeight;
_tVerMpvDecPrm[u4InstID].rDownScalerPrm.u4SrcWidth = prDownScalerPrm->u4SrcWidth;
_tVerMpvDecPrm[u4InstID].rDownScalerPrm.u4TrgHeight = prDownScalerPrm->u4TrgHeight;
_tVerMpvDecPrm[u4InstID].rDownScalerPrm.u4TrgWidth = prDownScalerPrm->u4TrgWidth;
_tVerMpvDecPrm[u4InstID].rDownScalerPrm.u4TrgOffH = prDownScalerPrm->u4TrgOffH;
_tVerMpvDecPrm[u4InstID].rDownScalerPrm.u4TrgOffV = prDownScalerPrm->u4TrgOffV;
_tVerMpvDecPrm[u4InstID].rDownScalerPrm.u4TrgYAddr = prDownScalerPrm->u4TrgYAddr;
_tVerMpvDecPrm[u4InstID].rDownScalerPrm.u4TrgCAddr = prDownScalerPrm->u4TrgCAddr;
_tVerMpvDecPrm[u4InstID].rDownScalerPrm.u4WorkAddr = prDownScalerPrm->u4WorkAddr;
}
void vSetDownScaleParam(UINT32 u4InstID, BOOL fgEnable, VDEC_INFO_VDSCL_PRM_T *prDownScalerPrm)
{
#ifdef VERIFICATION_DOWN_SCALE
UINT32 dwPicWidthDec,dwPicHeightDec,u4DramPicSize;
VDEC_INFO_DEC_PRM_T *ptVerMpvDecPrm;
ptVerMpvDecPrm = &_tVerMpvDecPrm[u4InstID];
dwPicWidthDec = _tVerMpvDecPrm[u4InstID].u4PicW;
dwPicHeightDec = _tVerMpvDecPrm[u4InstID].u4PicH;
if(fgEnable)
{
prDownScalerPrm->fgMbaff = FALSE;
prDownScalerPrm->fgDSCLEn = TRUE;
#if (CONFIG_CHIP_VER_CURR >= CONFIG_CHIP_VER_MT8530)
prDownScalerPrm->ucAddrSwapMode = _tVerMpvDecPrm[u4InstID].ucAddrSwapMode ^ 0x4;
#else
prDownScalerPrm->ucAddrSwapMode = ADDRSWAP_OFF;
#endif
prDownScalerPrm->fgLumaKeyEn = _fgVDSCLEnableLumaKeyTest[u4InstID];
prDownScalerPrm->u2LumaKeyValue= (UINT16) (((UINT32) rand())%256);
if(_u4CodecVer[u4InstID] == VDEC_WMV)
{
if(_rWMVPPS[u4InstID].ucFrameCodingMode == INTERLACEFIELD)
{
if(_rWMVPPS[u4InstID].i4CurrentField == 1)
{
prDownScalerPrm->ucPicStruct = BOTTOM_FIELD;
}
else
{
prDownScalerPrm->ucPicStruct = TOP_FIELD;
}
}
else if(_rWMVPPS[u4InstID].ucFrameCodingMode == INTERLACEFRAME)
{
prDownScalerPrm->ucPicStruct = TOP_BOTTOM_FIELD;
}
else
{
prDownScalerPrm->ucPicStruct = FRAME;
}
if ((_rWMVEPS[u4InstID].fgLoopFilter == 1) || (_rWMVPPS[u4InstID].i4Overlap & 1))
{
if (prDownScalerPrm->fgLumaKeyEn)
prDownScalerPrm->ucScrAgent = RW_VDSCL_SRC_MC >> 2; //WMV+Luma_Key Only support MC out
else
prDownScalerPrm->ucScrAgent = RW_VDSCL_SRC_PP >> 2;
prDownScalerPrm->fgEnColorCvt = FALSE;
}
else
{
prDownScalerPrm->ucScrAgent = RW_VDSCL_SRC_MC >> 2;
prDownScalerPrm->fgEnColorCvt = (BOOL) (((UINT32) rand())&0x1);//random(2);
}
}
else if(_u4CodecVer[u4InstID] == VDEC_H264)
{
prDownScalerPrm->ucPicStruct = ptVerMpvDecPrm->ucPicStruct;
prDownScalerPrm->fgMbaff = ptVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSPS->fgMbAdaptiveFrameFieldFlag;
}
else
{
if(_fgVerProgressiveFrm[u4InstID] || _fgVerProgressiveSeq[u4InstID])
{
prDownScalerPrm->ucPicStruct = 3;
}
else
{
prDownScalerPrm->ucPicStruct = (ptVerMpvDecPrm->ucPicStruct > 3) ?
(ptVerMpvDecPrm->ucPicStruct - 3) : ptVerMpvDecPrm->ucPicStruct;
}
}
if((prDownScalerPrm->ucPicStruct == TOP_FIELD) || (prDownScalerPrm->ucPicStruct == BOTTOM_FIELD))
{
dwPicHeightDec = (dwPicHeightDec >> 1);
}
if(((_u4CodecVer[u4InstID] == VDEC_WMV)&&(_rWMVPPS[u4InstID].i4CurrentTemporalField==0))||
((_u4CodecVer[u4InstID] == VDEC_H264)&&(fgIsDecFlagSet(u4InstID, DEC_FLAG_CHG_FBUF)))||
((!_fgDec2ndFldPic[u4InstID])&&(_u4CodecVer[u4InstID] != VDEC_WMV)&&(_u4CodecVer[u4InstID] != VDEC_H264)))
{
if(_u4CodecVer[u4InstID] == VDEC_WMV)
{
prDownScalerPrm->ucSpectType = RW_VDSCL_SPEC_WMV >> 5;
prDownScalerPrm->fgYOnly = 0;
if ((_rWMVEPS[u4InstID].fgLoopFilter == 1) || (_rWMVPPS[u4InstID].i4Overlap & 1))
{
if (prDownScalerPrm->fgLumaKeyEn)
prDownScalerPrm->ucScrAgent = RW_VDSCL_SRC_MC >> 2; //WMV+Luma_Key Only support MC out
else
prDownScalerPrm->ucScrAgent = RW_VDSCL_SRC_PP >> 2;
prDownScalerPrm->fgEnColorCvt = FALSE;
}
else
{
prDownScalerPrm->ucScrAgent = RW_VDSCL_SRC_MC >> 2;
prDownScalerPrm->fgEnColorCvt = (BOOL) (((UINT32) rand())&0x1);//random(2);
}
}
else if(_u4CodecVer[u4InstID] == VDEC_H264)
{
prDownScalerPrm->ucSpectType = RW_VDSCL_SPEC_264 >> 5;
prDownScalerPrm->fgYOnly = (fgIsMonoPic(u4InstID)? (RW_VDSCL_Y_ONLY>>7): 0);
#ifdef FGT_SUPPORT
prDownScalerPrm->ucScrAgent = RW_VDSCL_SRC_FG >> 2;
#else
prDownScalerPrm->ucScrAgent = RW_VDSCL_SRC_PP >> 2;
#endif
prDownScalerPrm->fgEnColorCvt = FALSE;
}
else
{
prDownScalerPrm->ucSpectType = RW_VDSCL_SPEC_MPEG >> 5;
prDownScalerPrm->fgYOnly = 0;
if((_u4CodecVer[u4InstID] == VDEC_MPEG2) && _tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rMpegPpInfo.fgPpEnable)
{
prDownScalerPrm->ucScrAgent = RW_VDSCL_SRC_PP >> 2;
prDownScalerPrm->fgEnColorCvt = FALSE;
}
else
{
prDownScalerPrm->ucScrAgent = RW_VDSCL_SRC_MC >> 2;
prDownScalerPrm->fgEnColorCvt = (BOOL) (((UINT32) rand())&0x1);//random(2);
}
}
prDownScalerPrm->u4SrcHeight = dwPicHeightDec;
prDownScalerPrm->u4SrcWidth = dwPicWidthDec;
prDownScalerPrm->u4SrcYOffH = 0;
prDownScalerPrm->u4SrcYOffV = 0;
prDownScalerPrm->u4SrcCOffH = 0;
prDownScalerPrm->u4SrcCOffV = 0;
prDownScalerPrm->u4SclYOffH = 0;
prDownScalerPrm->u4SclYOffV = 0;
prDownScalerPrm->u4SclCOffH = 0;
prDownScalerPrm->u4SclCOffV = 0;
if(_fgVDSCLEnableRandomTest[u4InstID])
{
prDownScalerPrm->ucScanType = (UCHAR) (((UINT32) rand())&0x1);//random(2);
#if (CONFIG_CHIP_VER_CURR >= CONFIG_CHIP_VER_MT8550)
prDownScalerPrm->ucVdoFmt = 0;
#else
prDownScalerPrm->ucVdoFmt = (UCHAR) (((UINT32) rand())&0x1);//random(2);
#endif
while(TRUE)
{
#if (CONFIG_CHIP_VER_CURR >= CONFIG_CHIP_VER_MT8550)
if (dwPicWidthDec > 960)
{
prDownScalerPrm->u4TrgWidth = (UINT32) (((UINT32) rand())%960)+0x10;
if (prDownScalerPrm->u4TrgWidth > 960)
prDownScalerPrm->u4TrgWidth = 960;
}
else
#endif
{
prDownScalerPrm->u4TrgWidth = (UINT32) (((UINT32) rand())%dwPicWidthDec)+0x10;
}
prDownScalerPrm->u4TrgHeight = (UINT32) (((UINT32) rand())%dwPicHeightDec)+0x10;
if(((prDownScalerPrm->u4TrgWidth%2)==0)
&&((prDownScalerPrm->u4TrgHeight%4)==0)
&&(prDownScalerPrm->u4TrgWidth <= dwPicWidthDec)
&&(prDownScalerPrm->u4TrgHeight <= (dwPicHeightDec>>(prDownScalerPrm->ucVdoFmt)))
)
{
if(prDownScalerPrm->u4TrgWidth == dwPicWidthDec)
{
prDownScalerPrm->u4TrgOffH = 0;
}
else
{
prDownScalerPrm->u4TrgOffH = (((((UINT32) rand())%(dwPicWidthDec-prDownScalerPrm->u4TrgWidth))>>1)<<1);
}
if(prDownScalerPrm->u4TrgHeight == (dwPicHeightDec>>(prDownScalerPrm->ucVdoFmt)))
{
prDownScalerPrm->u4TrgOffV = 0;
}
else
{
prDownScalerPrm->u4TrgOffV = (((((UINT32) rand())%((dwPicHeightDec-prDownScalerPrm->u4TrgHeight)))>>2)<<2);
}
break;
}
}
if(prDownScalerPrm->ucScrAgent == (RW_VDSCL_SRC_PP >> 2))
{
if(prDownScalerPrm->ucSpectType == (RW_VDSCL_SPEC_WMV >> 5))
{
#if 0
if((prDownScalerPrm->u4SrcWidth>>1) >= prDownScalerPrm->u4TrgWidth)
{
prDownScalerPrm->u4SrcYOffH = (UINT32)(rand()%7);
prDownScalerPrm->u4SrcCOffH = (UINT32)(rand()%4);
}
#endif
if(prDownScalerPrm->ucPicStruct == TOP_BOTTOM_FIELD)
{
prDownScalerPrm->u4SrcYOffV = (UINT32)((((UINT32) rand())%3)*2);
prDownScalerPrm->u4SrcCOffV = (UINT32)((((UINT32) rand())%3)*2);
}
else
{
prDownScalerPrm->u4SrcYOffV = (UINT32)(((UINT32) rand())%5);
prDownScalerPrm->u4SrcCOffV = (UINT32)(((UINT32) rand())%5);
}
}
else//h264
{
#if 0
if((prDownScalerPrm->u4SrcWidth>>1) >= prDownScalerPrm->u4TrgWidth)
{
prDownScalerPrm->u4SrcYOffH = (UINT32)(rand()%9);
prDownScalerPrm->u4SrcCOffH = (UINT32)(rand()%5);
}
#endif
if(prDownScalerPrm->ucPicStruct == TOP_BOTTOM_FIELD)
{
prDownScalerPrm->u4SrcYOffV = (UINT32)((((UINT32) rand())%4)*2);
prDownScalerPrm->u4SrcCOffV = (UINT32)((((UINT32) rand())%2)*2);
}
else
{
prDownScalerPrm->u4SrcYOffV = (UINT32)(((UINT32) rand())%7);
prDownScalerPrm->u4SrcCOffV = (UINT32)(((UINT32) rand())%4);
}
}
}
else
{
#if 0
if((prDownScalerPrm->u4SrcWidth>>1) >= prDownScalerPrm->u4TrgWidth)
{
prDownScalerPrm->u4SrcYOffH = (UINT32)(rand()%7);
prDownScalerPrm->u4SrcCOffH = (UINT32)(rand()%4);
}
#endif
if(prDownScalerPrm->ucPicStruct == TOP_BOTTOM_FIELD)
{
prDownScalerPrm->u4SrcYOffV = (UINT32)((((UINT32) rand())%5)*2);
prDownScalerPrm->u4SrcCOffV = (UINT32)((((UINT32) rand())%3)*2);
}
else
{
prDownScalerPrm->u4SrcYOffV = (UINT32)(((UINT32) rand())%9);
prDownScalerPrm->u4SrcCOffV = (UINT32)(((UINT32) rand())%5);
}
}
if(prDownScalerPrm->fgEnColorCvt)
{
prDownScalerPrm->u4SrcYOffH = prDownScalerPrm->u4SrcYOffH - (prDownScalerPrm->u4SrcYOffH%2);
prDownScalerPrm->u4SrcYOffV = prDownScalerPrm->u4SrcYOffV - (prDownScalerPrm->u4SrcYOffV%2);
prDownScalerPrm->u4SrcCOffH = prDownScalerPrm->u4SrcCOffH - (prDownScalerPrm->u4SrcCOffH%2);
prDownScalerPrm->u4SrcCOffV = prDownScalerPrm->u4SrcCOffV - (prDownScalerPrm->u4SrcCOffV%2);
}
if(prDownScalerPrm->fgMbaff)
{
prDownScalerPrm->u4SrcYOffH = 0;
prDownScalerPrm->u4SrcYOffV = 0;
prDownScalerPrm->u4SrcCOffH = 0;
prDownScalerPrm->u4SrcCOffV = 0;
}
if((prDownScalerPrm->u4TrgWidth + prDownScalerPrm->u4TrgOffH) < 1920 )
{
prDownScalerPrm->u4DispW = ((((prDownScalerPrm->u4TrgWidth + prDownScalerPrm->u4TrgOffH + (((UINT32) rand())%(1920 - prDownScalerPrm->u4TrgWidth - prDownScalerPrm->u4TrgOffH )))+15)>>4)<<4);
}
else
{
prDownScalerPrm->u4DispW = 1920;
}
prDownScalerPrm->u4TrgBufLen = prDownScalerPrm->u4DispW;
}
else
{
//srand(IO_READ32(PARSER_BASE,0x4C)&0xFFFF);
prDownScalerPrm->ucScanType = 0;//random(2);
prDownScalerPrm->ucVdoFmt = 0;//random(2);
//prDownScalerPrm->u4DispW = (((dwPicWidthDec + 15) >> 4) << 4);
//prDownScalerPrm->u4TrgBufLen = prDownScalerPrm->u4DispW;
while(TRUE)
{
prDownScalerPrm->u4TrgWidth = dwPicWidthDec; //random(dwPicWidthDec) + 0x40;
prDownScalerPrm->u4TrgHeight = dwPicHeightDec;//random(dwPicHeightDec) + 0x30;
if((prDownScalerPrm->u4TrgHeight>=8)&&(prDownScalerPrm->u4TrgWidth>=8)&&((prDownScalerPrm->u4TrgWidth%2)==0)&&((prDownScalerPrm->u4TrgHeight%4)==0)
&&(prDownScalerPrm->u4TrgWidth <= dwPicWidthDec)&&(prDownScalerPrm->u4TrgHeight <= (dwPicHeightDec>>(prDownScalerPrm->ucVdoFmt))))
{
if(prDownScalerPrm->u4TrgWidth == dwPicWidthDec)
{
prDownScalerPrm->u4TrgOffH = 0;
}
else
{
prDownScalerPrm->u4TrgOffH = (((((UINT32) rand())%(dwPicWidthDec-prDownScalerPrm->u4TrgWidth))>>1)<<1);
}
if(prDownScalerPrm->u4TrgHeight == (dwPicHeightDec>>(prDownScalerPrm->ucVdoFmt)))
{
prDownScalerPrm->u4TrgOffV = 0;
}
else
{
prDownScalerPrm->u4TrgOffV = (((((UINT32) rand())%((dwPicHeightDec-prDownScalerPrm->u4TrgHeight)))>>2)<<2);
}
break;
}
}
if((prDownScalerPrm->u4TrgWidth + prDownScalerPrm->u4TrgOffH) < 1920 )
{
prDownScalerPrm->u4DispW = ((((prDownScalerPrm->u4TrgWidth + prDownScalerPrm->u4TrgOffH + (((UINT32) rand())%(1920 - prDownScalerPrm->u4TrgWidth - prDownScalerPrm->u4TrgOffH )))+15)>>4)<<4);
}
else
{
prDownScalerPrm->u4DispW = 1920;
}
prDownScalerPrm->u4DispW = (((dwPicWidthDec + 15) >> 4) << 4);
prDownScalerPrm->u4TrgBufLen = prDownScalerPrm->u4DispW;
#if 0
prDownScalerPrm->u4TrgHeight = dwPicHeightDec;
prDownScalerPrm->u4TrgWidth = dwPicWidthDec;
prDownScalerPrm->u4TrgOffH = 0;
prDownScalerPrm->u4TrgOffV = 0;
#endif
}
//prDownScalerPrm->pucTrgYAddr = _pucVDSCLBuf[u4InstID];
prDownScalerPrm->u4TrgYAddr = (UINT32)(_pucVDSCLBuf[u4InstID]);
u4DramPicSize = 0x1FE000;//1920*1088//((((_tVerMpvDecPrm.u4PicW + 15) >> 4) * ((_tVerMpvDecPrm.u4PicH + 31) >> 5)) << 9);
//prDownScalerPrm->u4TrgCAddr = *(UINT32*)(_pucVDSCLBuf[u4InstID] + u4DramPicSize);
prDownScalerPrm->u4TrgCAddr = prDownScalerPrm->u4TrgYAddr + u4DramPicSize;
//prDownScalerPrm->u4WorkAddr = *(UINT32*)_pucVDSCLWorkBuf[u4InstID];
prDownScalerPrm->u4WorkAddr = (UINT32)(_pucVDSCLWorkBuf[u4InstID]);
vFilledFBuf(u4InstID, _pucVDSCLBuf[u4InstID], _ptCurrFBufInfo[u4InstID]->u4DramPicSize);
}
//vVDecOutputDebugString("Vdo=%d PicHeight= %d TargHeight=%d DispW=%d\n",prDownScalerPrm->ucVdoFmt,
//dwPicHeightDec, prDownScalerPrm->u4TrgHeight,prDownScalerPrm->u4DispW);
}
else
{
prDownScalerPrm->fgDSCLEn = FALSE;
}
#if 0
if((_u4CodecVer == WMV)&&(_rWMVPPS.i4CurrentTemporalField==0)&&(prDownScalerPrm->fgDSCLEn==TRUE))
{
vFilledFBuf(_pucVDSCLBuf, _ptCurrFBufInfo->u4DramPicSize);
}
else if((_u4CodecVer != H264)&&(!_fgDec2ndFldPic)&&(prDownScalerPrm->fgDSCLEn==TRUE))
{
vFilledFBuf(_pucVDSCLBuf, _ptCurrFBufInfo->u4DramPicSize);
}
#endif
#endif
}
void ComputeDQuantDecParam(UINT32 u4InstID)
{
INT32 i4StepSize;
INT32 i4DCStepSize ;
VDEC_INFO_WMV_ETRY_PRM_T *prWMVEPS = &_rWMVEPS[u4InstID];
VDEC_INFO_WMV_PIC_PRM_T *prWMVPPS = &_rWMVPPS[u4InstID];
for (i4StepSize = 1; i4StepSize < 63; i4StepSize++) {
VDEC_INFO_WMV_DQUANT_PRM_T *pDQ = &prWMVPPS->rDQuantParam3QPDeadzone[i4StepSize];
INT32 i4DoubleStepSize = (i4StepSize + 1);
pDQ->i4DoubleStepSize = i4DoubleStepSize;
pDQ->i4StepMinusStepIsEven = 0;
pDQ->i4DoublePlusStepSize = i4DoubleStepSize;
pDQ->i4DoublePlusStepSizeNeg = -1 * pDQ->i4DoublePlusStepSize;
i4DCStepSize = (i4StepSize + 1) >> 1;
if (i4DCStepSize <= 4) {
pDQ->i4DCStepSize = 8;
if(prWMVEPS->fgNewDCQuant && i4DCStepSize <= 2) {
pDQ->i4DCStepSize = 2 * i4DCStepSize;
}
} else {
pDQ->i4DCStepSize = i4DCStepSize / 2 + 6;
}
}
for (i4StepSize = 1; i4StepSize < 63; i4StepSize++) {
VDEC_INFO_WMV_DQUANT_PRM_T *pDQ = &prWMVPPS->rDQuantParam5QPDeadzone [i4StepSize];
INT32 i4DoubleStepSize;
i4DoubleStepSize = (i4StepSize + 1);
pDQ->i4DoubleStepSize = i4DoubleStepSize;
if (_i4CodecVersion[u4InstID] >= VDEC_WMV3) {
pDQ->i4StepMinusStepIsEven = (i4StepSize + 1) >> 1;
pDQ->i4DoublePlusStepSize = i4DoubleStepSize + pDQ->i4StepMinusStepIsEven;
} else {
INT32 iStepSize2 = (i4StepSize + 1) >> 1;
pDQ->i4StepMinusStepIsEven = iStepSize2 - ((iStepSize2 & 1) == 0);
pDQ->i4DoublePlusStepSize = i4DoubleStepSize + pDQ->i4StepMinusStepIsEven;
}
pDQ->i4DoublePlusStepSizeNeg = -1 * pDQ->i4DoublePlusStepSize;
i4DCStepSize = (i4StepSize + 1) >> 1;
if (i4DCStepSize <= 4) {
pDQ->i4DCStepSize = 8;
if(prWMVEPS->fgNewDCQuant && i4DCStepSize <= 2)
pDQ->i4DCStepSize = 2 * i4DCStepSize;
} else {
pDQ->i4DCStepSize = i4DCStepSize / 2 + 6;
}
}
}
// *********************************************************************
// Function : void vVParserProc(UINT32 u4InstID)
// Description : Video parser procedure
// Parameter : None
// Return : None
// *********************************************************************
void vVParserProc(UINT32 u4InstID)
{
UINT32 u4VldByte,u4VldBit,u4RetVal;
char strMessage[512];
if (_u4CodecVer[u4InstID] == VDEC_RM)
{
#ifdef RM_ATSPEED_TEST_ENABLE
vRM_VParserEx(u4InstID);
#else //RM_ATSPEED_TEST_ENABLE
vRM_VParser(u4InstID);
#endif //RM_ATSPEED_TEST_ENABLE
_tVerDec[u4InstID].ucState = DEC_NORM_WAIT_TO_DEC;
}
else if(_u4CodecVer[u4InstID] == VDEC_H265)
{
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.bNoDecode = 0;
u4RetVal = vHEVCSearchRealPic(u4InstID);
if (NOT_SUPPORT==u4RetVal){
_u4VerBitCount[u4InstID] = 0xFFFFFFFF;
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.bNoDecode = 1;
_tVerDec[u4InstID].ucState = DEC_NORM_VPARSER; //skip decode
} else if ( PARSE_OK != u4RetVal){
if (SLICE_SYNTAX_ERROR==u4RetVal){
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.bNoDecode = 1;
_u4PicCnt[u4InstID]++; //skip decode
}
_tVerDec[u4InstID].ucState = DEC_NORM_VPARSER; //keep header parsing
} else {
_tVerDec[u4InstID].ucState = DEC_NORM_WAIT_TO_DEC;
}
}
else if(_u4CodecVer[u4InstID] == VDEC_H264)
//if(_u4CodecVer[u4InstID] == VDEC_H264)
{
#if VDEC_MVC_SUPPORT
while(_ucMVCType[u4InstID] && (_fgMVCReady[u4InstID] == FALSE))
{
#if VDEC_8320_SUPPORT
msleep(5);
_fgMVCBaseGo = TRUE;
#else
x_thread_delay(5);
#endif
}
#endif
vSearchRealPic(u4InstID);
_tVerDec[u4InstID].ucState = DEC_NORM_WAIT_TO_DEC;
}
else if(_u4CodecVer[u4InstID] == VDEC_WMV)
{
VDEC_INFO_WMV_SEQ_PRM_T *prWMVSPS = &_rWMVSPS[u4InstID];
VDEC_INFO_WMV_PIC_PRM_T *prWMVPPS = &_rWMVPPS[u4InstID];
#if (WMV_8320_SUPPORT)
#if WMV_LOG_TMP
printk("vVParserProc, u4VLDWrapperWrok = 0x%x\n", _tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecWMVDecPrm.rWmvWorkBufSa.u4VLDWrapperWrok);
printk("vVParserProc, u4PPWrapperWrok = 0x%x\n", _tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecWMVDecPrm.rWmvWorkBufSa.u4PPWrapperWrok);
#endif
vVDecWriteMC(u4InstID, RW_MC_VLD_WRAPPER, PHYSICAL(_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecWMVDecPrm.rWmvWorkBufSa.u4VLDWrapperWrok));
vVDecWriteMC(u4InstID, RW_MC_PP_WRAPPER, PHYSICAL(_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecWMVDecPrm.rWmvWorkBufSa.u4PPWrapperWrok));
#endif
_u4CurrPicStartAddr[u4InstID] = u4VDEC_HAL_WMV_ReadRdPtr(_u4BSID[u4InstID], u4InstID, (UINT32)_pucVFifo[u4InstID], &u4VldBit);
// _u4CurrPicStartAddr[u4InstID]
#if WMV_LOG_TMP
printk("vVParserProc, rd:0x%x\n",
_u4CurrPicStartAddr[u4InstID]);
#endif
#if (CONFIG_CHIP_VER_CURR >= CONFIG_CHIP_VER_MT8530)
#if WMV_EC_IMPROVE_SUPPORT
//Search Slice Start Code
if(_i4CodecVersion[u4InstID] == VDEC_VC1)
{
vWMVSearchSliceStartCode(u4InstID);
}
#endif
#endif
if(fgVParserProcWMV(u4InstID))
{
if(prWMVSPS->fgXintra8)
{
_u4WMVDecPicNo[u4InstID]++;
_tVerDec[u4InstID].ucState = DEC_NORM_WAIT_DECODE;
return;
}
UpdateVopheaderParam(u4InstID);
if(prWMVPPS->ucPicType == SKIPFRAME)
{
VDEC_INFO_WMV_VFIFO_PRM_T rWmvVFifoInitPrm;
VDEC_INFO_WMV_BS_INIT_PRM_T rWmvBSInitPrm;
_u4SkipFrameCnt[u4InstID]++;
_u4WMVDecPicNo[u4InstID]++;
_tVerDec[u4InstID].ucState = DEC_NORM_WAIT_DECODE;
if(_i4CodecVersion[u4InstID] != VDEC_VC1)
{
rWmvVFifoInitPrm.u4CodeType = _i4CodecVersion[u4InstID];
rWmvVFifoInitPrm.u4VFifoSa = (UINT32)_pucVFifo[u4InstID];
rWmvVFifoInitPrm.u4VFifoEa = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
i4VDEC_HAL_WMV_InitVDecHW(u4InstID,&rWmvVFifoInitPrm);
if(_iSetPos[u4InstID] >= V_FIFO_SZ)
{
_iSetPos[u4InstID] = _iSetPos[u4InstID] - V_FIFO_SZ;
}
rWmvBSInitPrm.u4VFifoSa = (UINT32)_pucVFifo[u4InstID];
rWmvBSInitPrm.u4VFifoEa = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
rWmvBSInitPrm.u4ReadPointer = (UINT32)_pucVFifo[u4InstID] + _iSetPos[u4InstID];
#ifndef RING_VFIFO_SUPPORT
rWmvBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
#else
// rWmvBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + (V_FIFO_SZ*(0.5 + 0.5 *(_u4LoadBitstreamCnt[u4InstID]%2)));
rWmvBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + ((_u4LoadBitstreamCnt[u4InstID]%2)?(V_FIFO_SZ):(V_FIFO_SZ>>1));
#endif
i4VDEC_HAL_WMV_InitBarrelShifter(_u4BSID[u4InstID], u4InstID, &rWmvBSInitPrm, FALSE);
}
else
{
vVDEC_HAL_WMV_AlignRdPtr(_u4BSID[u4InstID], u4InstID, (UINT32)_pucVFifo[u4InstID], BYTE_ALIGN); //in order to use fgNextStartCode().
u4VldByte = u4VDEC_HAL_WMV_ReadRdPtr(_u4BSID[u4InstID], u4InstID, (UINT32)_pucVFifo[u4InstID], &u4VldBit);
rWmvVFifoInitPrm.u4CodeType = _i4CodecVersion[u4InstID];
rWmvVFifoInitPrm.u4VFifoSa = (UINT32)_pucVFifo[u4InstID];
rWmvVFifoInitPrm.u4VFifoEa = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
i4VDEC_HAL_WMV_InitVDecHW(u4InstID,&rWmvVFifoInitPrm);
rWmvBSInitPrm.u4VFifoSa = (UINT32)_pucVFifo[u4InstID];
rWmvBSInitPrm.u4VFifoEa = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
rWmvBSInitPrm.u4ReadPointer = (UINT32)_pucVFifo[u4InstID] + u4VldByte;
#ifndef RING_VFIFO_SUPPORT
rWmvBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
#else
// rWmvBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + (V_FIFO_SZ*(0.5 + 0.5 *(_u4LoadBitstreamCnt[u4InstID]%2)));
rWmvBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + ((_u4LoadBitstreamCnt[u4InstID]%2)?(V_FIFO_SZ):(V_FIFO_SZ>>1));
#endif
i4VDEC_HAL_WMV_InitBarrelShifter(_u4BSID[u4InstID], u4InstID, &rWmvBSInitPrm, TRUE);
}
}
else
{
u4VldByte = u4VDEC_HAL_WMV_ReadRdPtr(_u4BSID[u4InstID], u4InstID, (UINT32)_pucVFifo[u4InstID], &u4VldBit);
//vVDecOutputDebugString("BYTE = %d and Bit = %d after parsing\n",u4VldByte,u4VldBit);
_tVerDec[u4InstID].ucState = DEC_NORM_WAIT_TO_DEC;
}
}
else
{
if(_u4VprErr[u4InstID] == END_OF_FILE)
{
_tVerDec[u4InstID].ucState = DEC_NORM_WAIT_DECODE;
}
else
{
strcpy(_tFileListRecInfo[u4InstID].bFileName,_FileList_Rec[u4InstID]);
sprintf(strMessage,"Parsing header error : 0x%.8x\n",_u4VprErr[u4InstID]);
fgWrMsg2PC(strMessage,strlen(strMessage),8,&_tFileListRecInfo[u4InstID]);
}
}
#if VDEC_DDR3_SUPPORT
/* if (_rWMVSPS[u4InstID].u4PicWidthSrc > 720|| _rWMVSPS[u4InstID].u4PicHeightSrc> 576)
{
printk("DDR3 Not Support Size over HD\n");
strcpy(_tFileListRecInfo[u4InstID].bFileName,_FileList_Rec[u4InstID]);
sprintf(strMessage,"DDR3 Size Over HD : Not Support in FPGA\n");
fgWrMsg2PC(strMessage,strlen(strMessage),8,&_tFileListRecInfo[u4InstID]);
_u4VerBitCount[u4InstID] = 0xFFFFFFFF;
}*/
#endif
}
else if(_u4CodecVer[u4InstID] == VDEC_MPEG2)
{
vDEC_HAL_COMMON_SetVLDPower(u4InstID,ON);
_u4CurrPicStartAddr[u4InstID] = u4VDEC_HAL_MPEG_ReadRdPtr(_u4BSID[u4InstID], u4InstID, (UINT32)_pucVFifo[u4InstID], &u4VldBit);
printk("<vdec> _u4CurrPicStartAddr[%u]=0x%x (%u)\n", u4InstID, _u4CurrPicStartAddr[u4InstID], _u4CurrPicStartAddr[u4InstID]);
u4VParserMPEG12(u4InstID, FALSE);
vDEC_HAL_COMMON_SetVLDPower(u4InstID,OFF);
switch(_u4PicCdTp[u4InstID])
{
case I_TYPE:
case P_TYPE:
vVPrsMPEGIPProc(u4InstID);
break;
case B_TYPE:
vVPrsMPEGBProc(u4InstID);
break;
default:
break;
}
_tVerDec[u4InstID].ucState = DEC_NORM_WAIT_TO_DEC;
}
else if((_u4CodecVer[u4InstID] == VDEC_DIVX3) || (_u4CodecVer[u4InstID] == VDEC_MPEG4) || (_u4CodecVer[u4InstID] == VDEC_H263))
{
vDEC_HAL_COMMON_SetVLDPower(u4InstID,ON);
_u4CurrPicStartAddr[u4InstID] = u4VDEC_HAL_MPEG_ReadRdPtr(_u4BSID[u4InstID], u4InstID, (UINT32)_pucVFifo[u4InstID], &u4VldBit);
u4VParserMPEG4(u4InstID, FALSE);
vDEC_HAL_COMMON_SetVLDPower(u4InstID,OFF);
if(_fgVerVopCoded0[u4InstID])
{
#ifndef VPMODE
_tVerDec[u4InstID].ucState = DEC_NORM_WAIT_DECODE;
#else
_tVerDec[u4InstID].ucState = DEC_NORM_WAIT_TO_DEC;//qiguo 8/6
#endif
}
else
{
switch(_u4PicCdTp[u4InstID])
{
case I_TYPE:
case P_TYPE:
vVPrsMPEGIPProc(u4InstID);
break;
case B_TYPE:
vVPrsMPEGBProc(u4InstID);
break;
default:
break;
}
_tVerDec[u4InstID].ucState = DEC_NORM_WAIT_TO_DEC;
}
_tVerPic[u4InstID].u4W = _u4HSize[u4InstID];
_tVerPic[u4InstID].u4H = _u4VSize[u4InstID];
_tVerPic[u4InstID].ucMpegVer = _ucMpegVer[u4InstID];
u4FilePicCont_noVOP++;
}
else if(_u4CodecVer[u4InstID] == VDEC_VP6)
{
u4VerVParserVP6(u4InstID, FALSE);
_tVerDec[u4InstID].ucState = DEC_NORM_WAIT_TO_DEC;
}
#if (CONFIG_CHIP_VER_CURR >= CONFIG_CHIP_VER_MT8580)
else if(_u4CodecVer[u4InstID] == VDEC_VP8)
{
u4VerVParserVP8(u4InstID, FALSE);
_tVerDec[u4InstID].ucState = DEC_NORM_WAIT_TO_DEC;
}
#endif
else if(_u4CodecVer[u4InstID] == VDEC_AVS)
{
u4VerVParserAVS(u4InstID, FALSE);
_tVerDec[u4InstID].ucState = DEC_NORM_WAIT_TO_DEC;
}
#ifdef VERIFY_DV_SUPPORT
else if(_u4CodecVer[u4InstID] == DV)
{
_tVerDec[u4InstID].ucState = DEC_NORM_WAIT_TO_DEC;
}
#endif
}
void vVPrsMPEGIPProc(UINT32 u4InstID)
{
if(_u4PicStruct[u4InstID] == FRM_PIC)
{
if(_u4BrokenLink[u4InstID] > 0)
{
_u4BrokenLink[u4InstID] --;
}
_fgDec2ndFldPic[u4InstID] = 0;
vSetVerFRefBuf(u4InstID, _u4BRefBufIdx[u4InstID]);
vSetVerBRefBuf(u4InstID, 1 - _u4FRefBufIdx[u4InstID]);
vSetVerDecBuf(u4InstID, _u4BRefBufIdx[u4InstID]);
}
else // Field Picture
{
if(_fgVerPrevBPic[u4InstID])
{
_fgDec2ndFldPic[u4InstID] = 0;
}
// 1st Field Picture
if(!_fgDec2ndFldPic[u4InstID])
{
if(_u4BrokenLink[u4InstID] > 0)
{
_u4BrokenLink[u4InstID] --;
}
// Set Reference Buffer
vSetVerFRefBuf(u4InstID, _u4BRefBufIdx[u4InstID]);
vSetVerBRefBuf(u4InstID, 1 - _u4FRefBufIdx[u4InstID]);
vSetVerDecBuf(u4InstID, _u4BRefBufIdx[u4InstID]);
_u4PicStruct[u4InstID] = (_u4PicStruct[u4InstID] == TOP_FLD_PIC) ? TWO_FLDPIC_TOPFIRST : TWO_FLDPIC_BTMFIRST;
}
// 2nd Field Picture
else
{
vSetVerDecBuf(u4InstID, _u4BRefBufIdx[u4InstID]);
}
}
}
void vVPrsMPEGBProc(UINT32 u4InstID)
{
if(_u4PicStruct[u4InstID] == FRM_PIC || !_fgDec2ndFldPic[u4InstID])
{
// Set Decoding Buffer
vSetVerDecBuf(u4InstID, 2);
}
else
{
// Field picture and 2ND_FLD_PIC
vSetVerDecBuf(u4InstID, 2);
}
if(_u4PicStruct[u4InstID] == FRM_PIC)
{
_fgDec2ndFldPic[u4InstID] = FALSE;
}
else
{
if(!_fgVerPrevBPic[u4InstID])
{
_fgDec2ndFldPic[u4InstID] = FALSE;
}
}
}
// *********************************************************************
// Function : void vVDecProc(UINT32 u4InstID)
// Description : Set VDec related parameters
// Parameter : None
// Return : None
// *********************************************************************
BOOL _fgReInitBS=TRUE;
void vVDecProc(UINT32 u4InstID)
{
_fgVDecComplete[u4InstID] = FALSE;
#if VDEC_DRAM_BUSY_TEST
vDrmaBusySet (u4InstID);
#endif
#if VDEC_DDR3_SUPPORT
_tVerMpvDecPrm[u4InstID].ucAddrSwapMode = ADDRSWAP_DDR3;
#else
_tVerMpvDecPrm[u4InstID].ucAddrSwapMode = _u2AddressSwapMode[u4InstID];
#endif
if (_u4CodecVer[u4InstID] == VDEC_RM)
{
vRM_TriggerDecode(u4InstID, &_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecRMDecPrm.rRMParsPicInfo);
_tVerDec[u4InstID].ucState = DEC_NORM_WAIT_DECODE;
}
else if(_u4CodecVer[u4InstID] == VDEC_H265)
{
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.u4NuhTemporalId = _u4NuhTemporalId[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.prCurrFBufInfo = _ptH265CurrFBufInfo[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.prCurrFBufInfo->u4YStartAddr = /*PHYSICAL*/((UINT32)_pucDecWorkBuf[u4InstID]);
#ifdef VDEC_SIM_DUMP
printk("[INFO] PP_OUT u4YStartAddr: 0x%08X, u4CStartAddr: 0x%08X\n", PHYSICAL(_ptH265CurrFBufInfo[u4InstID]->u4YStartAddr),
PHYSICAL(_ptH265CurrFBufInfo[u4InstID]->u4YStartAddr) +_ptH265CurrFBufInfo[u4InstID]->u4CAddrOffset );
printk ("[INFO] Trigger Decode!!!!\n");
#endif
if (_u4PicCnt[u4InstID] == _u4DumpRegPicNum[u4InstID]){
vVDEC_HAL_H265_VDec_DumpReg(u4InstID, 0);
}
// Dump Dram
if (_u4PicCnt[u4InstID] == _u4DumpRegPicNum[u4InstID] && _u4EndCompPicNum[u4InstID]==_u4DumpRegPicNum[u4InstID] ){
Dump_Dram0x49B_0x4FB();
}
#ifdef HEVC_DUMP_BITSTREAM_INFO
UINT32 u4Bits, u4FrameSPtr;
UCHAR pcFilename_info[200] = {0};
UCHAR pcInfo_data[200] = {0};
u4FrameSPtr = u4VDEC_HAL_H265_ReadRdPtr(_u4BSID[u4InstID], u4InstID, (UINT32)_pucVFifo[u4InstID], &u4Bits);
if (u4FrameSPtr<_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.u4PreReadPtr){
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.u4ReadPtrOffset++;
}
strncpy (pcFilename_info , _bFileStr1[u4InstID][1], (strlen(_bFileStr1[u4InstID][1]) -26));
strcat (pcFilename_info, "_bitstream.info");
sprintf(pcInfo_data, "frame %d 0x%x ", _u4PicCnt[u4InstID], _tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.u4ReadPtrOffset*V_FIFO_SZ+u4FrameSPtr);
fgWrData2PC(pcInfo_data, strlen(pcInfo_data), 7, pcFilename_info);
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.u4PreReadPtr = u4FrameSPtr;
#endif
i4VDEC_HAL_H265_DecStart(u4InstID, &_tVerMpvDecPrm[u4InstID]);
_tVerDec[u4InstID].ucState = DEC_NORM_WAIT_DECODE;
}
else if(_u4CodecVer[u4InstID] == VDEC_H264)
//if(_u4CodecVer[u4InstID] == VDEC_H264)
{
#ifdef REDEC
_u4VLDPosByte[u4InstID] = u4VDEC_HAL_H264_ReadRdPtr(0, u4InstID, (UINT32)_pucVFifo[u4InstID], &_u4VLDPosBit[u4InstID]);
if( _u4ReDecCnt[u4InstID] > 0)
{
vVerifyVDecSetPicInfo(u4InstID, &_tVerMpvDecPrm[u4InstID]);
}
#endif
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH264DecPrm.ucNalRefIdc = _u4NalRefIdc[u4InstID];
if(_ucMVCType[u4InstID] == 2)
{
//Dep View
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH264DecPrm.fgIsAllegMvcCfg = (_u4NalRefIdc[0] > 0)? 1: 0;
}
else
{
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH264DecPrm.fgIsAllegMvcCfg = 0;
}
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH264DecPrm.fgIsFrmPic = fgIsFrmPic(u4InstID);
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH264DecPrm.fgIsIDRPic = fgIsIDRPic(u4InstID);
//_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH264DecPrm.u4DecWorkBuf = (UINT32)_pucDecWorkBuf[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH264DecPrm.prCurrFBufInfo = _ptCurrFBufInfo[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH264DecPrm.prCurrFBufInfo->u4YStartAddr = /*PHYSICAL*/((UINT32)_pucDecWorkBuf[u4InstID]);
#ifdef VERIFICATION_DOWN_SCALE
#ifdef DOWN_SCALE_SUPPORT
//vSetDownScaleParam(u4InstID, TRUE, &_tDownScalerPrm[u4InstID]);
vSetDownScaleParam(u4InstID, TRUE, &(_tVerMpvDecPrm[u4InstID].rDownScalerPrm));
#else
//vSetDownScaleParam(u4InstID, FALSE, &_tDownScalerPrm[u4InstID]);
vSetDownScaleParam(u4InstID, FALSE, &(_tVerMpvDecPrm[u4InstID].rDownScalerPrm));
#endif
//vDEC_HAL_COMMON_SetDownScaler(u4InstID, &_tDownScalerPrm[u4InstID]);
//vVDECSetDownScalerPrm(u4InstID, &_tDownScalerPrm[u4InstID]);
vVDECSetDownScalerPrm(u4InstID, &(_tVerMpvDecPrm[u4InstID].rDownScalerPrm));
#endif
#ifdef FGT_SUPPORT
vSetFGTParam(&_rFGTPrm[u4InstID]);
i4VDEC_HAL_H264_FGTSetting(u4InstID, &_rFGTPrm[u4InstID]);
#endif
if(_ucMVCType[u4InstID] == 2)
_fgVDecComplete[0] = FALSE;
#ifdef LETTERBOX_SUPPORT
vLBDParaParsing(u4InstID);
vVDECSetLetetrBoxDetPrm(u4InstID, &_rLBDPrm[u4InstID]);
#endif
// BSP_InvDCacheRange((UINT32)_pucDPB[u4InstID],DPB_SZ);
DBG_H264_PRINTF("[Info] >>> Trigger Decode BEGIN \n");
i4VDEC_HAL_H264_DecStart(u4InstID, &_tVerMpvDecPrm[u4InstID]);
DBG_H264_PRINTF("[Info] >>> Trigger Decode END \n");
_tVerDec[u4InstID].ucState = DEC_NORM_WAIT_DECODE;
}
else if(_u4CodecVer[u4InstID] == VDEC_WMV)
{
#ifdef REDEC
_u4VLDPosByte[u4InstID] = u4VDEC_HAL_WMV_ReadRdPtr(_u4BSID[u4InstID], u4InstID, (UINT32)_pucVFifo[u4InstID], &_u4VLDPosBit[u4InstID]);
#endif
if(fgVDecProcWMV(u4InstID))
{
_tVerDec[u4InstID].ucState = DEC_NORM_WAIT_DECODE;
}
}
else if(_u4CodecVer[u4InstID] == VDEC_VP6)
{
vVDecEnableCRC(u4InstID, 1, 1);
vVerifyVDecSetVP6Info(u4InstID);
_tVerDec[u4InstID].ucState = DEC_NORM_WAIT_DECODE;
}
#if (CONFIG_CHIP_VER_CURR >= CONFIG_CHIP_VER_MT8580)
else if(_u4CodecVer[u4InstID] == VDEC_VP8)
{
vVerifyVDecSetVP8Info(u4InstID);
_tVerDec[u4InstID].ucState = DEC_NORM_WAIT_DECODE;
}
#endif
else if(_u4CodecVer[u4InstID] == VDEC_AVS)
{
vVerifyVDecSetAVSInfo(u4InstID);
_tVerDec[u4InstID].ucState = DEC_NORM_WAIT_DECODE;
}
else
{
vVDecEnableCRC(u4InstID, 1, (VDEC_PP_ENABLE == FALSE)); // MPEG2 crc from MC
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.fgDec2ndFld = _fgDec2ndFldPic[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rMpegFrameBufSa.u4Pic0CSa = (UINT32)_pucPic0C[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rMpegFrameBufSa.u4Pic0YSa = (UINT32)_pucPic0Y[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rMpegFrameBufSa.u4Pic1CSa = (UINT32)_pucPic1C[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rMpegFrameBufSa.u4Pic1YSa = (UINT32)_pucPic1Y[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rMpegFrameBufSa.u4Pic2CSa = (UINT32)_pucPic2C[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rMpegFrameBufSa.u4Pic2YSa = (UINT32)_pucPic2Y[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rMpegPpInfo.fgPpEnable = VDEC_PP_ENABLE;
#if VDEC_UFO_ENABLE
UINT32 u4DramPicSize;
UINT32 u4DramPicArea;
u4PIC_WIDTH[u4InstID] = ((_tVerMpvDecPrm[u4InstID].u4PicW + 15)>>4)<<4;
u4PIC_HEIGHT[u4InstID] = ((_tVerMpvDecPrm[u4InstID].u4PicH + 31)>>5)<<5;
u4PIC_SIZE_Y[u4InstID] = ((u4PIC_WIDTH[u4InstID] * u4PIC_HEIGHT[u4InstID] + 511)>>9)<<9;
u4PIC_SIZE[u4InstID] = (((u4PIC_SIZE_Y[u4InstID] + u4PIC_SIZE_Y[u4InstID]>>1) + 511)>>9)<<9;
u4UFO_LEN_SIZE_Y[u4InstID] = ((((u4PIC_SIZE_Y[u4InstID] +255)>>8)+63+(16*8))>>6)<<6;
u4UFO_LEN_SIZE_C[u4InstID] = (((u4UFO_LEN_SIZE_Y[u4InstID]>>1)+15+(16*8))>>4)<<4;
u4PIC_SIZE_Y_BS[u4InstID] = ((u4PIC_SIZE_Y[u4InstID] + 4095)>>12)<<12;
u4PIC_SIZE_BS[u4InstID] = ((u4PIC_SIZE_Y_BS[u4InstID] + (u4PIC_SIZE_Y[u4InstID]>>1)+511)>>9)<<9;
u4PIC_SIZE_REF[u4InstID] = ((u4PIC_SIZE_BS[u4InstID] + (u4UFO_LEN_SIZE_Y[u4InstID]<<1) + 4095)>>12)<<12;
_pucPic0C[u4InstID] = _pucPic0Y[u4InstID]+u4PIC_SIZE_Y_BS[u4InstID];
_pucPic1C[u4InstID] = _pucPic1Y[u4InstID]+u4PIC_SIZE_Y_BS[u4InstID];
_pucPic2C[u4InstID] = _pucPic2Y[u4InstID]+u4PIC_SIZE_Y_BS[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.fgDec2ndFld = _fgDec2ndFldPic[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rMpegFrameBufSa.u4Pic0CSa = (UINT32)_pucPic0C[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rMpegFrameBufSa.u4Pic1CSa = (UINT32)_pucPic1C[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rMpegFrameBufSa.u4Pic2CSa = (UINT32)_pucPic2C[u4InstID];
_tVerMpvDecPrm[u4InstID].u4PIC_SIZE_BS = u4PIC_SIZE_BS[u4InstID];
_tVerMpvDecPrm[u4InstID].u4UFO_LEN_SIZE_Y = u4UFO_LEN_SIZE_Y[u4InstID];
_tVerMpvDecPrm[u4InstID].u4PIC_SIZE_Y_BS = u4PIC_SIZE_Y_BS[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.u4DramPicSize = u4PIC_SIZE_Y_BS[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.u4DramPicArea = ((u4PIC_SIZE_REF[u4InstID] + (u4PIC_SIZE_Y[u4InstID]>>4)+4095)>>12)<<12;
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rMpegPpInfo.u4PpYBufSa = (UINT32)_pucPpYSa[u4InstID];
_pucPpCSa[u4InstID] = _pucPpYSa[u4InstID]+u4PIC_SIZE_Y_BS[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rMpegPpInfo.u4PpCBufSa = (UINT32)_pucPpCSa[u4InstID];
#else
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rMpegPpInfo.u4PpYBufSa = (UINT32)_pucPpYSa[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rMpegPpInfo.u4PpCBufSa = (UINT32)_pucPpCSa[u4InstID];
#endif
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.u4DecH = ((_tVerMpvDecPrm[u4InstID].u4PicH + 15) >> 4 ) << 4;
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.u4DecW = ((_tVerMpvDecPrm[u4InstID].u4PicW + 15) >> 4 ) << 4;
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.u4DecXOff = 0;
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.u4DecYOff = 0;
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.u4FRefBufIdx = _u4FRefBufIdx[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.u4MaxMbl = ((_tVerMpvDecPrm[u4InstID].u4PicH + 15) >> 4 ) - 1;
_tVerMpvDecPrm[u4InstID].ucDecFBufIdx = BYTE0(_u4DecBufIdx[u4InstID]);
_tVerMpvDecPrm[u4InstID].ucPicStruct = BYTE0(_u4PicStruct[u4InstID]);
_tVerMpvDecPrm[u4InstID].ucPicType = BYTE0(_u4PicCdTp[u4InstID]);
if(_u4CodecVer[u4InstID] == VDEC_MPEG2)
{
#ifdef REDEC
_u4VLDPosByte[u4InstID] = u4VDEC_HAL_MPEG_ReadRdPtr(_u4BSID[u4InstID], u4InstID, (UINT32)_pucVFifo[u4InstID], &_u4VLDPosBit[u4InstID]);
#endif
#ifdef VERIFICATION_DOWN_SCALE
#ifdef DOWN_SCALE_SUPPORT
//vSetDownScaleParam(u4InstID, TRUE, &_tDownScalerPrm[u4InstID]);
vSetDownScaleParam(u4InstID, TRUE, &(_tVerMpvDecPrm[u4InstID].rDownScalerPrm));
#else
//vSetDownScaleParam(u4InstID, FALSE, &_tDownScalerPrm[u4InstID]);
vSetDownScaleParam(u4InstID, FALSE, &(_tVerMpvDecPrm[u4InstID].rDownScalerPrm));
#endif
//vDEC_HAL_COMMON_SetDownScaler(u4InstID, &_tDownScalerPrm[u4InstID]);
//vVDECSetDownScalerPrm(u4InstID, &_tDownScalerPrm[u4InstID]);
//vVDECSetDownScalerPrm(u4InstID, &(_tVerMpvDecPrm[u4InstID].rDownScalerPrm));
#endif
if(_u4BSID[u4InstID] == 1)
{
UINT32 u4Bytes,u4Bits;
VDEC_INFO_MPEG_BS_INIT_PRM_T rMpegBSInitPrm;
vDEC_HAL_COMMON_SetVLDPower(u4InstID,ON);
u4Bytes = u4VDEC_HAL_MPEG_ReadRdPtr(1, u4InstID, (UINT32)_pucVFifo[u4InstID], &u4Bits);
rMpegBSInitPrm.u4VFifoSa = (UINT32)_pucVFifo[u4InstID];
rMpegBSInitPrm.u4VFifoEa = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
rMpegBSInitPrm.u4ReadPointer= (UINT32)_pucVFifo[u4InstID] + u4Bytes;
#ifndef RING_VFIFO_SUPPORT
rMpegBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
#else
// rMpegBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + (V_FIFO_SZ*(0.5 + 0.5 *(_u4LoadBitstreamCnt[u4InstID]%2)));
rMpegBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + ((_u4LoadBitstreamCnt[u4InstID]%2)?(V_FIFO_SZ):(V_FIFO_SZ>>1));//mtk40343
#endif
i4VDEC_HAL_MPEG_InitBarrelShifter(0, u4InstID, &rMpegBSInitPrm);
u4VDEC_HAL_MPEG_ShiftGetBitStream(0, u4InstID, u4Bits);
}
#if (CONFIG_CHIP_VER_CURR >= CONFIG_CHIP_VER_MT8580)
vVDEC_HAL_MPEG2_DisableMVOverflowDetection(u4InstID);
printk("<vdec> disable mv overflow\n");
#endif
_tVerMpvDecPrm[u4InstID].ucMpegSpecType = 1;
i4VDEC_HAL_MPEG12_DecStart(u4InstID, &_tVerMpvDecPrm[u4InstID]);
printk("@(posedge `VDEC_INTERRUPT);\n");
_tVerDec[u4InstID].ucState = DEC_NORM_WAIT_DECODE;
}
else if(_u4CodecVer[u4InstID] == VDEC_DIVX3)
{
#ifdef REDEC
_u4VLDPosByte[u4InstID] = u4VDEC_HAL_MPEG_ReadRdPtr(_u4BSID[u4InstID], u4InstID, (UINT32)_pucVFifo[u4InstID], &_u4VLDPosBit[u4InstID]);
#endif
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rPicLayer.rMp4DecPrm.rMpeg4WorkBufSa.u4BcodeSa = (UINT32)_pucMp4Bcode[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rPicLayer.rMp4DecPrm.rMpeg4WorkBufSa.u4Bmb1Sa = (UINT32)_pucMp4Bmb1[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rPicLayer.rMp4DecPrm.rMpeg4WorkBufSa.u4Bmb2Sa = (UINT32)_pucMp4Bmb2[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rPicLayer.rMp4DecPrm.rMpeg4WorkBufSa.u4DcacSa = (UINT32)_pucMp4Dcac[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rPicLayer.rMp4DecPrm.rMpeg4WorkBufSa.u4MvecSa = (UINT32)_pucMp4Mvec[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rPicLayer.rMp4DecPrm.rMpeg4WorkBufSa.u4VldWrapperSa = (UINT32)_pucVLDWrapperWrok[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rPicLayer.rMp4DecPrm.rMpeg4WorkBufSa.u4PPWrapperSa = (UINT32)_pucPPWrapperWork[u4InstID];
//6589NEW 2.4, 2.5, 4.1
#if (MPEG4_6589_SUPPORT)
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rPicLayer.rMp4DecPrm.rMpeg4WorkBufSa.u4DataPartitionSa= (UINT32)_pucMp4DataPartition[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rPicLayer.rMp4DecPrm.rMpeg4WorkBufSa.u4NotCodedSa = (UINT32)_pucMp4NotCoded[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rPicLayer.rMp4DecPrm.rMpeg4WorkBufSa.u4MvDirectSa = (UINT32)_pucMp4MvDirect[u4InstID];
#endif
#if (CONFIG_CHIP_VER_CURR >= CONFIG_CHIP_VER_MT8580)
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rPicLayer.rMp4DecPrm.rMpeg4WorkBufSize.u4BcodeSize = BCODE_SZ;//count in 16 byte
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rPicLayer.rMp4DecPrm.rMpeg4WorkBufSize.u4DcacSize = DCAC_SZ;
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rPicLayer.rMp4DecPrm.rMpeg4WorkBufSize.u4MVSize= VER_MVEC_SZ;
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rPicLayer.rMp4DecPrm.rMpeg4WorkBufSize.u4MB1Size = VER_BMB1_SZ;
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rPicLayer.rMp4DecPrm.rMpeg4WorkBufSize.u4MB2Size = VER_BMB2_SZ;
//6589NEW 2.4, 2.5, 4.1(MV Direct size not required)
#if (MPEG4_6589_SUPPORT)
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rPicLayer.rMp4DecPrm.rMpeg4WorkBufSize.u4DataPartitionSize = DATA_PARTITION_SZ;
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rPicLayer.rMp4DecPrm.rMpeg4WorkBufSize.u4NotCodedSize = NOT_CODED_SZ;
#endif
#endif
#if (MPEG4_6589_SUPPORT)
vSetDx3SliceBoundary(u4InstID, &(_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm));
#endif
_tVerMpvDecPrm[u4InstID].ucMpegSpecType = 3;//divx mode //_tVerMpvDecPrm[u4InstID].ucMpegSpecType = 3;//divx mode
#ifdef VERIFICATION_DOWN_SCALE
#ifdef DOWN_SCALE_SUPPORT
//vSetDownScaleParam(u4InstID, TRUE, &_tDownScalerPrm[u4InstID]);
vSetDownScaleParam(u4InstID, TRUE, &(_tVerMpvDecPrm[u4InstID].rDownScalerPrm));
#else
//vSetDownScaleParam(u4InstID, FALSE, &_tDownScalerPrm[u4InstID]);
vSetDownScaleParam(u4InstID, FALSE, &(_tVerMpvDecPrm[u4InstID].rDownScalerPrm));
#endif
//vDEC_HAL_COMMON_SetDownScaler(u4InstID, &_tDownScalerPrm[u4InstID]);
//vVDECSetDownScalerPrm(u4InstID, &_tDownScalerPrm[u4InstID]);
vVDECSetDownScalerPrm(u4InstID, &(_tVerMpvDecPrm[u4InstID].rDownScalerPrm));
#endif
if(_u4BSID[u4InstID] == 1)
{
UINT32 u4Bytes,u4Bits;
VDEC_INFO_MPEG_BS_INIT_PRM_T rMpegBSInitPrm;
vDEC_HAL_COMMON_SetVLDPower(u4InstID,ON);
u4Bytes = u4VDEC_HAL_MPEG_ReadRdPtr(1, u4InstID, (UINT32)_pucVFifo[u4InstID], &u4Bits);
rMpegBSInitPrm.u4VFifoSa = (UINT32)_pucVFifo[u4InstID];
rMpegBSInitPrm.u4VFifoEa = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
rMpegBSInitPrm.u4ReadPointer= (UINT32)_pucVFifo[u4InstID] + u4Bytes;
#ifndef RING_VFIFO_SUPPORT
rMpegBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
#else
// rMpegBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + (V_FIFO_SZ*(0.5 + 0.5 *(_u4LoadBitstreamCnt[u4InstID]%2)));
rMpegBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + ((_u4LoadBitstreamCnt[u4InstID]%2)?(V_FIFO_SZ):(V_FIFO_SZ>>1));//mtk40343
#endif
i4VDEC_HAL_MPEG_InitBarrelShifter(0, u4InstID, &rMpegBSInitPrm);
u4VDEC_HAL_MPEG_ShiftGetBitStream(0, u4InstID, u4Bits);
}
i4VDEC_HAL_DIVX3_DecStart(u4InstID, &_tVerMpvDecPrm[u4InstID]);
_tVerDec[u4InstID].ucState = DEC_NORM_WAIT_DECODE;
}
else if(_u4CodecVer[u4InstID] == VDEC_MPEG4 || _u4CodecVer[u4InstID] == VDEC_H263)
{
#ifdef REDEC
_u4VLDPosByte[u4InstID] = u4VDEC_HAL_MPEG_ReadRdPtr(_u4BSID[u4InstID], u4InstID, (UINT32)_pucVFifo[u4InstID], &_u4VLDPosBit[u4InstID]);
#endif
//PANDA H263 Deblocking test
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rMpegPpInfo.fgPpEnable = VDEC_PP_ENABLE;
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rMpegPpInfo.u4PpYBufSa = (UINT32)_pucPpYSa[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rMpegPpInfo.u4PpCBufSa = (UINT32)_pucPpCSa[u4InstID];
//~PANDA
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rPicLayer.rMp4DecPrm.rDep.rM4vDecPrm.prVol = &_rMPEG4VolPrm[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rPicLayer.rMp4DecPrm.rDep.rM4vDecPrm.prVop = &_rMPEG4VopPrm[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rPicLayer.rMp4DecPrm.rDep.rM4vDecPrm.prVop->prDirMd = &_rDirMode[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rPicLayer.rMp4DecPrm.rDep.rM4vDecPrm.prVop->prGmcPrm = &_rMPEG4GmcPrm[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rPicLayer.rMp4DecPrm.rMpeg4WorkBufSa.u4BcodeSa = (UINT32)_pucMp4Bcode[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rPicLayer.rMp4DecPrm.rMpeg4WorkBufSa.u4Bmb1Sa = (UINT32)_pucMp4Bmb1[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rPicLayer.rMp4DecPrm.rMpeg4WorkBufSa.u4Bmb2Sa = (UINT32)_pucMp4Bmb2[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rPicLayer.rMp4DecPrm.rMpeg4WorkBufSa.u4DcacSa = (UINT32)_pucMp4Dcac[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rPicLayer.rMp4DecPrm.rMpeg4WorkBufSa.u4MvecSa = (UINT32)_pucMp4Mvec[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rPicLayer.rMp4DecPrm.rMpeg4WorkBufSa.u4VldWrapperSa = (UINT32)_pucVLDWrapperWrok[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rPicLayer.rMp4DecPrm.rMpeg4WorkBufSa.u4PPWrapperSa = (UINT32)_pucPPWrapperWork[u4InstID];
//6589NEW 2.4, 2.5, 4.1
#if (MPEG4_6589_SUPPORT)
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rPicLayer.rMp4DecPrm.rMpeg4WorkBufSa.u4DataPartitionSa= (UINT32)_pucMp4DataPartition[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rPicLayer.rMp4DecPrm.rMpeg4WorkBufSa.u4NotCodedSa = (UINT32)_pucMp4NotCoded[u4InstID];
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rPicLayer.rMp4DecPrm.rMpeg4WorkBufSa.u4MvDirectSa = (UINT32)_pucMp4MvDirect[u4InstID];
#endif
//_tVerMpvDecPrm[u4InstID].ucMpegSpecType = 3;//divx mode //_tVerMpvDecPrm[u4InstID].ucMpegSpecType = 3;//divx mode
_tVerMpvDecPrm[u4InstID].ucMpegSpecType = 2;//mpeg4 mode //_tVerMpvDecPrm[u4InstID].ucMpegSpecType = 3;//divx mode
#if (CONFIG_CHIP_VER_CURR >= CONFIG_CHIP_VER_MT8580)
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rPicLayer.rMp4DecPrm.rMpeg4WorkBufSize.u4BcodeSize = BCODE_SZ;//count in 16 byte
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rPicLayer.rMp4DecPrm.rMpeg4WorkBufSize.u4DcacSize = DCAC_SZ;
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rPicLayer.rMp4DecPrm.rMpeg4WorkBufSize.u4MVSize= VER_MVEC_SZ;
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rPicLayer.rMp4DecPrm.rMpeg4WorkBufSize.u4MB1Size = VER_BMB1_SZ;
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rPicLayer.rMp4DecPrm.rMpeg4WorkBufSize.u4MB2Size = VER_BMB2_SZ;
//6589NEW 2.4, 2.5, 4.1(MV Direct size not required)
#if (MPEG4_6589_SUPPORT)
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rPicLayer.rMp4DecPrm.rMpeg4WorkBufSize.u4DataPartitionSize = DATA_PARTITION_SZ;
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.rPicLayer.rMp4DecPrm.rMpeg4WorkBufSize.u4NotCodedSize = NOT_CODED_SZ;
#endif
#endif
#ifdef VERIFICATION_DOWN_SCALE
#ifdef DOWN_SCALE_SUPPORT
//vSetDownScaleParam(u4InstID, TRUE, &_tDownScalerPrm[u4InstID]);
vSetDownScaleParam(u4InstID, TRUE, &(_tVerMpvDecPrm[u4InstID].rDownScalerPrm));
#else
//vSetDownScaleParam(u4InstID, FALSE, &_tDownScalerPrm[u4InstID]);
vSetDownScaleParam(u4InstID, FALSE, &(_tVerMpvDecPrm[u4InstID].rDownScalerPrm));
#endif
//vDEC_HAL_COMMON_SetDownScaler(u4InstID, &_tDownScalerPrm[u4InstID]);
//vVDECSetDownScalerPrm(u4InstID, &_tDownScalerPrm[u4InstID]);
vVDECSetDownScalerPrm(u4InstID, &(_tVerMpvDecPrm[u4InstID].rDownScalerPrm));
#endif
if(_u4BSID[u4InstID] == 1)
{
UINT32 u4Bytes,u4Bits;
VDEC_INFO_MPEG_BS_INIT_PRM_T rMpegBSInitPrm;
vDEC_HAL_COMMON_SetVLDPower(u4InstID,ON);
u4Bytes = u4VDEC_HAL_MPEG_ReadRdPtr(1, u4InstID, (UINT32)_pucVFifo[u4InstID], &u4Bits);
rMpegBSInitPrm.u4VFifoSa = (UINT32)_pucVFifo[u4InstID];
rMpegBSInitPrm.u4VFifoEa = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
rMpegBSInitPrm.u4ReadPointer= (UINT32)_pucVFifo[u4InstID] + u4Bytes;
#ifndef RING_VFIFO_SUPPORT
rMpegBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
#else
// rMpegBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + (V_FIFO_SZ*(0.5 + 0.5 *(_u4LoadBitstreamCnt[u4InstID]%2)));
rMpegBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + ((_u4LoadBitstreamCnt[u4InstID]%2)?(V_FIFO_SZ):(V_FIFO_SZ>>1));//mtk40343
#endif
i4VDEC_HAL_MPEG_InitBarrelShifter(0, u4InstID, &rMpegBSInitPrm);
u4VDEC_HAL_MPEG_ShiftGetBitStream(0, u4InstID, u4Bits);
}
if(0)//(_fgReInitBS)
{
UINT32 u4Bytes,u4Bits;
VDEC_INFO_MPEG_BS_INIT_PRM_T rMpegBSInitPrm;
vDEC_HAL_COMMON_SetVLDPower(u4InstID,ON);
u4Bytes = u4VDEC_HAL_MPEG_ReadRdPtr(0, u4InstID, (UINT32)_pucVFifo[u4InstID], &u4Bits);
rMpegBSInitPrm.u4VFifoSa = (UINT32)_pucVFifo[u4InstID];
rMpegBSInitPrm.u4VFifoEa = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
rMpegBSInitPrm.u4ReadPointer= (UINT32)_pucVFifo[u4InstID] + u4Bytes;
#ifndef RING_VFIFO_SUPPORT
rMpegBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
#else
// rMpegBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + (V_FIFO_SZ*(0.5 + 0.5 *(_u4LoadBitstreamCnt[u4InstID]%2)));
rMpegBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + ((_u4LoadBitstreamCnt[u4InstID]%2)?(V_FIFO_SZ):(V_FIFO_SZ>>1));//mtk40343
#endif
i4VDEC_HAL_MPEG_InitBarrelShifter(0, u4InstID, &rMpegBSInitPrm);
u4VDEC_HAL_MPEG_ShiftGetBitStream(0, u4InstID, u4Bits);
}
vVDecEnableCRC(u4InstID,1,1);
#ifdef VPMODE
if(_fgVerVopCoded0[u4InstID])//qiguo
{
vVDEC_HAL_MPEG_SetMPEG4Flag(u4InstID, FALSE);
_u4FileCnt[u4InstID] = _u4FileCnt[u4InstID] -1;
i4VPModeDecStart(u4InstID, &_tVerMpvDecPrm[u4InstID]);
}
else
#endif
{
i4VDEC_HAL_MPEG4_DecStart(u4InstID, &_tVerMpvDecPrm[u4InstID]);
}
_tVerDec[u4InstID].ucState = DEC_NORM_WAIT_DECODE;
}
}
}
void PostAdjustReconRange(UINT32 u4InstID)
{
VDEC_INFO_WMV_SEQ_PRM_T *prWMVSPS = &_rWMVSPS[u4InstID];
VDEC_INFO_WMV_ETRY_PRM_T *prWMVEPS = &_rWMVEPS[u4InstID];
VDEC_INFO_WMV_PIC_PRM_T *prWMVPPS = &_rWMVPPS[u4InstID];
if (prWMVSPS->fgPreProcRange) {
if (prWMVPPS->ucPicType == IVOP || prWMVPPS->ucPicType == BIVOP) {
if (prWMVSPS->i4NumBFrames == 0) {
prWMVEPS->i4ReconRangeState = prWMVEPS->i4RangeState;
}
else if(prWMVPPS->ucPicType != BVOP) {
AdjustReconRange(u4InstID);
}
}
}
}
void vVerifySetVSyncPrmBufPtr(UINT32 u4InstID, UINT32 u4BufIdx)
{
switch(u4BufIdx)
{
case 0:
_pucDecWorkBuf[u4InstID] = (UCHAR *) _pucPic0Y[_u1AlphaDecPrmIdx[u4InstID]];
_pucDecCWorkBuf[u4InstID] = (UCHAR *) _pucPic0C[_u1AlphaDecPrmIdx[u4InstID]];
break;
case 1:
_pucDecWorkBuf[u4InstID] = (UCHAR *) _pucPic1Y[_u1AlphaDecPrmIdx[u4InstID]];
_pucDecCWorkBuf[u4InstID] = (UCHAR *) _pucPic1C[_u1AlphaDecPrmIdx[u4InstID]];
break;
case 2:
_pucDecWorkBuf[u4InstID] = (UCHAR *) _pucPic2Y[_u1AlphaDecPrmIdx[u4InstID]];
_pucDecCWorkBuf[u4InstID] = (UCHAR *) _pucPic2C[_u1AlphaDecPrmIdx[u4InstID]];
break;
}
}
void vWMVVDecEnd(UINT32 u4InstID)
{
//VDEC_INFO_DEC_PRM_T *tVerMpvDecPrm;
//VDEC_INFO_H264_FBUF_INFO_T *tFBufInfo;
VDEC_INFO_WMV_VFIFO_PRM_T rWmvVFifoInitPrm;
VDEC_INFO_WMV_BS_INIT_PRM_T rWmvBSInitPrm;
UINT32 u4VldByte,u4VldBit;
VDEC_INFO_WMV_PIC_PRM_T *prWMVPPS = &_rWMVPPS[u4InstID];
//tFBufInfo = _ptCurrFBufInfo[u4InstID];
//tVerMpvDecPrm = &_tVerMpvDecPrm[u4InstID];
u4VldByte = u4VDEC_HAL_WMV_ReadRdPtr(0, u4InstID, (UINT32)_pucVFifo[u4InstID], &u4VldBit);
_u4WMVByteCount[u4InstID] = u4VldByte;
///u4VldByte
#if WMV_LOG_TMP
printk("vWMVVDecEnd, rd:0x%x\n", u4VldByte);
#endif
#ifdef LETTERBOX_DETECTION_ONLY
vCheckLBDResult(u4InstID);
#else
vWMVWrData2PC(u4InstID, _pucDumpYBuf[u4InstID], ((((_tVerPic[u4InstID].u4W + 15) >> 4) * ((_tVerPic[u4InstID].u4H + 31) >> 5)) << 9));
#endif
// reset HW
#ifdef REDEC
if(_u4ReDecCnt[u4InstID] > 0)
{
_u4WMVDecPicNo[u4InstID]--;
rWmvVFifoInitPrm.u4CodeType = _i4CodecVersion[u4InstID];
rWmvVFifoInitPrm.u4VFifoSa = (UINT32)_pucVFifo[u4InstID];
rWmvVFifoInitPrm.u4VFifoEa = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
i4VDEC_HAL_WMV_InitVDecHW(u4InstID,&rWmvVFifoInitPrm);
rWmvBSInitPrm.u4VFifoSa = (UINT32)_pucVFifo[u4InstID];
rWmvBSInitPrm.u4VFifoEa = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
rWmvBSInitPrm.u4ReadPointer = (UINT32)_pucVFifo[u4InstID] + _u4VLDPosByte[u4InstID];
#ifndef RING_VFIFO_SUPPORT
rWmvBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
#else
// rWmvBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + (V_FIFO_SZ*(0.5 + 0.5 *(_u4LoadBitstreamCnt[u4InstID]%2)));
rWmvBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + ((_u4LoadBitstreamCnt[u4InstID]%2)?(V_FIFO_SZ):(V_FIFO_SZ>>1));
#endif
if (_i4CodecVersion[u4InstID] == VDEC_VC1)
{
i4VDEC_HAL_WMV_InitBarrelShifter(0, u4InstID, &rWmvBSInitPrm, TRUE);
}
else
{
i4VDEC_HAL_WMV_InitBarrelShifter(0, u4InstID, &rWmvBSInitPrm, FALSE);
}
u4VDEC_HAL_WMV_ShiftGetBitStream(0, u4InstID, _u4VLDPosBit[u4InstID]);
_tVerDec[u4InstID].ucState = DEC_NORM_WAIT_TO_DEC;
return;
}
#endif
//ming modify@2006/4/12
if(prWMVPPS->ucFrameCodingMode == INTERLACEFIELD)
{
prWMVPPS->i4CurrentTemporalField ^= 1; //toggle field
prWMVPPS->i4CurrentField ^= 1;
}
if(_i4CodecVersion[u4InstID] != VDEC_VC1)
{
rWmvVFifoInitPrm.u4CodeType = _i4CodecVersion[u4InstID];
rWmvVFifoInitPrm.u4VFifoSa = (UINT32)_pucVFifo[u4InstID];
rWmvVFifoInitPrm.u4VFifoEa = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
i4VDEC_HAL_WMV_InitVDecHW(u4InstID,&rWmvVFifoInitPrm);
if(_iSetPos[u4InstID] >= V_FIFO_SZ)
{
_iSetPos[u4InstID] = _iSetPos[u4InstID] - V_FIFO_SZ;
}
rWmvBSInitPrm.u4VFifoSa = (UINT32)_pucVFifo[u4InstID];
rWmvBSInitPrm.u4VFifoEa = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
rWmvBSInitPrm.u4ReadPointer = (UINT32)_pucVFifo[u4InstID] + _iSetPos[u4InstID];
#ifndef RING_VFIFO_SUPPORT
rWmvBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
#else
// rWmvBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + (V_FIFO_SZ*(0.5 + 0.5 *(_u4LoadBitstreamCnt[u4InstID]%2)));
rWmvBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + ((_u4LoadBitstreamCnt[u4InstID]%2)?(V_FIFO_SZ):(V_FIFO_SZ>>1));
#endif
i4VDEC_HAL_WMV_InitBarrelShifter(_u4BSID[u4InstID], u4InstID, &rWmvBSInitPrm, FALSE);
}
else // WMVA
{
rWmvVFifoInitPrm.u4CodeType = _i4CodecVersion[u4InstID];
rWmvVFifoInitPrm.u4VFifoSa = (UINT32)_pucVFifo[u4InstID];
rWmvVFifoInitPrm.u4VFifoEa = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
i4VDEC_HAL_WMV_InitVDecHW(u4InstID,&rWmvVFifoInitPrm);
rWmvBSInitPrm.u4VFifoSa = (UINT32)_pucVFifo[u4InstID];
rWmvBSInitPrm.u4VFifoEa = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
rWmvBSInitPrm.u4ReadPointer = (UINT32)_pucVFifo[u4InstID] + u4VldByte;
#ifndef RING_VFIFO_SUPPORT
rWmvBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
#else
// rWmvBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + (V_FIFO_SZ*(0.5 + 0.5 *(_u4LoadBitstreamCnt[u4InstID]%2)));
rWmvBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + ((_u4LoadBitstreamCnt[u4InstID]%2)?(V_FIFO_SZ):(V_FIFO_SZ>>1));
#endif
i4VDEC_HAL_WMV_InitBarrelShifter(_u4BSID[u4InstID], u4InstID, &rWmvBSInitPrm, TRUE);
}
if(_rWMVPPS[u4InstID].ucPicType != SKIPFRAME)
{
//update _iReconRangeState
PostAdjustReconRange(u4InstID);
}
#ifndef INTERGRATION_WITH_DEMUX
#ifdef RING_VFIFO_SUPPORT
if((_u4LoadBitstreamCnt[u4InstID]&0x1) && (rWmvBSInitPrm.u4ReadPointer >
((UINT32)_pucVFifo[u4InstID] + (V_FIFO_SZ/2))))
{
_tInFileInfo[u4InstID].fgGetFileInfo = TRUE;
_tInFileInfo[u4InstID].pucTargetAddr = _pucVFifo[u4InstID];
_tInFileInfo[u4InstID].u4FileOffset = (V_FIFO_SZ * ((_u4LoadBitstreamCnt[u4InstID]+ 1)/2));
_tInFileInfo[u4InstID].u4TargetSz = (V_FIFO_SZ/2);
_tInFileInfo[u4InstID].u4FileLength = 0;
#ifdef SATA_HDD_READ_SUPPORT
if(!fgOpenHDDFile(u4InstID, _bFileStr1[u4InstID][1],"r+b", &_tInFileInfo[u4InstID]))
{
fgOpenPCFile(u4InstID, _bFileStr1[u4InstID][1],"r+b", &_tInFileInfo[u4InstID]);
}
#else
fgOpenFile(u4InstID, _bFileStr1[u4InstID][1],"r+b", &_tInFileInfo[u4InstID]);
#endif
_u4LoadBitstreamCnt[u4InstID]++;
}
else if((!(_u4LoadBitstreamCnt[u4InstID]&0x1)) && (rWmvBSInitPrm.u4ReadPointer <
((UINT32)_pucVFifo[u4InstID] + (V_FIFO_SZ/2))))
{
_tInFileInfo[u4InstID].fgGetFileInfo = TRUE;
_tInFileInfo[u4InstID].pucTargetAddr = _pucVFifo[u4InstID] + (V_FIFO_SZ/2);
_tInFileInfo[u4InstID].u4FileOffset = ((V_FIFO_SZ * (_u4LoadBitstreamCnt[u4InstID]+ 1)) /2);
_tInFileInfo[u4InstID].u4TargetSz = (V_FIFO_SZ/2);
_tInFileInfo[u4InstID].u4FileLength = 0;
#ifdef SATA_HDD_READ_SUPPORT
if(!fgOpenHDDFile(u4InstID, _bFileStr1[u4InstID][1],"r+b", &_tInFileInfo[u4InstID]))
{
fgOpenPCFile(u4InstID, _bFileStr1[u4InstID][1],"r+b", &_tInFileInfo[u4InstID]);
}
#else
fgOpenFile(u4InstID, _bFileStr1[u4InstID][1],"r+b", &_tInFileInfo[u4InstID]);
#endif
_u4LoadBitstreamCnt[u4InstID]++;
}
#endif
#endif
_tVerDec[u4InstID].ucState = DEC_NORM_VPARSER;
}
// *********************************************************************
// Function : BOOL fgIsWMVVDecComplete(UINT32 u4InstID)
// Description : Check if VDec complete with interrupt
// Parameter : None
// Return : None
// *********************************************************************
BOOL fgIsWMVVDecComplete(UINT32 u4InstID)
{
UINT32 u4MbX = 0;
UINT32 u4MbY = 0;
VDEC_INFO_WMV_SEQ_PRM_T *prWMVSPS = &_rWMVSPS[u4InstID];
VDEC_INFO_WMV_PIC_PRM_T *prWMVPPS = &_rWMVPPS[u4InstID];
printk("fgIsWMVVDecComplete\n");
if(_fgVDecComplete[u4InstID])
{
vVDEC_HAL_WMV_GetMbxMby(u4InstID, &u4MbX, &u4MbY);
if(prWMVPPS->ucFrameCodingMode != INTERLACEFIELD)
{
if(u4MbX < ((prWMVSPS->u4PicWidthDec >> 4) -1) || (u4MbY < ((prWMVSPS->u4PicHeightDec >> 4) -1)))
{
return FALSE;
}
else
{
return TRUE;
}
}
else
{
if(u4MbX < ((prWMVSPS->u4PicWidthDec >> 4) -1) || u4MbY < ((prWMVSPS->u4PicHeightDec >> 5) -1))
{
return FALSE;
}
else
{
return TRUE;
}
}
}
return FALSE;
}
void vWMVDecEndProc(UINT32 u4InstID)
{
UINT32 u4Cnt;
UINT32 u4CntTimeChk;
UINT32 u4MbX;
UINT32 u4MbY;
char strMessage[256];
UINT32 u4MbX_last;
UINT32 u4MbY_last;
UINT32 u4mvErrType;
VDEC_INFO_WMV_ERR_INFO_T prWmvErrInfo;
u4Cnt=0;
u4CntTimeChk = 0;
_fgVDecErr[u4InstID] = FALSE;
if(_rWMVPPS[u4InstID].ucPicType != SKIPFRAME)
{
while(u4CntTimeChk < DEC_RETRY_NUM)
{
u4Cnt ++;
if((u4Cnt & 0x3f)== 0x3f)
{
#ifndef IRQ_DISABLE
#else
if(u4VDEC_HAL_WMV_VDec_ReadFinishFlag(u4InstID) & 0x1)
{
_fgVDecComplete[u4InstID] = TRUE;
/* if(u4InstID == 0)
{
BIM_ClearIrq(VECTOR_VDFUL);
}
else
{
BIM_ClearIrq(VECTOR_VDLIT);
}
*/
}
#endif
if(fgIsWMVVDecComplete(u4InstID))
{
#ifdef CAPTURE_ESA_LOG
vWrite2PC(u4InstID, 17, (UCHAR*)_pucESALog[u4InstID]);
#endif
u4CntTimeChk = 0;
break;
}
else
{
u4MbX_last = u4MbX;
u4MbY_last = u4MbY;
vVDEC_HAL_WMV_GetMbxMby(u4InstID, &u4MbX, &u4MbY);
if((u4MbX == u4MbX_last) && (u4MbY == u4MbY_last))
{
u4CntTimeChk ++;
}
else
{
u4CntTimeChk =0;
}
}
u4Cnt = 0;
}
}
u4mvErrType = u4VDEC_HAL_WMV_GetErrType(u4InstID);
vVDEC_HAL_WMV_GetErrInfo(u4InstID, &prWmvErrInfo);
if((u4CntTimeChk == DEC_RETRY_NUM) ||
(u4mvErrType!= 0) || (prWmvErrInfo.u4WmvErrCnt != 0))
{
#ifndef INTERGRATION_WITH_DEMUX
#ifdef EXT_COMPARE
_fgVDecErr[u4InstID] = TRUE;
#endif
if(u4CntTimeChk == DEC_RETRY_NUM)
{
vVDecOutputDebugString("\n!!!!!!!!! Decoding Timeout !!!!!!!\n");
sprintf(strMessage, "%s", "\n!!!!!!!!! Decoding Timeout !!!!!!!");
fgWrMsg2PC(strMessage,strlen(strMessage),8,&_tFileListRecInfo[u4InstID]);
//vDumpReg();
}
vVDEC_HAL_WMV_GetMbxMby(u4InstID, &u4MbX, &u4MbY);
vVDecOutputDebugString("\n!!!!!!!!! Decoding Error 0x%.8x!!!!!!!\n", prWmvErrInfo.u4WmvErrType);
sprintf(strMessage,"\n!!!!!!!!! Decoding Error 0x%.8x at MC (x,y)=(%d/%d, %d/%d) !!!!!!!\n", u4mvErrType,
u4MbX, ((_tVerPic[u4InstID].u4W + 15)>> 4) - 1, u4MbY, (((_tVerPic[u4InstID].u4H >> (1-(fgWMVIsFrmPic(u4InstID)))) + 15)>> 4) - 1);
fgWrMsg2PC(strMessage,strlen(strMessage),8,&_tFileListRecInfo[u4InstID]);
sprintf(strMessage,"the length is %d (0x%.8x)\n", _tInFileInfo[u4InstID].u4FileLength, _tInFileInfo[u4InstID].u4FileLength);
fgWrMsg2PC(strMessage,strlen(strMessage),8,&_tFileListRecInfo[u4InstID]);
vReadWMVChkSumGolden(u4InstID);
vWrite2PC(u4InstID, 1, _pucVFifo[u4InstID]);
//vDumpReg();
#endif
}
//vWMVDumpReg();
vVDEC_HAL_WMV_GetMbxMby(u4InstID, &u4MbX, &u4MbY);
sprintf(strMessage,"\n!!!!!!!!! Decoding result at MC (x,y)=(%d/%d, %d/%d) !!!!!!!\n",
u4MbX, ((_tVerPic[u4InstID].u4W + 15)>> 4) - 1, u4MbY, (((_tVerPic[u4InstID].u4H >> (1-(fgWMVIsFrmPic(u4InstID)))) + 15)>> 4) - 1);
fgWrMsg2PC(strMessage,strlen(strMessage),8,&_tFileListRecInfo[u4InstID]);
vVDEC_HAL_WMV_DecEndProcess(u4InstID);
vVDEC_HAL_WMV_AlignRdPtr(0, u4InstID, (UINT32)_pucVFifo[u4InstID], BYTE_ALIGN);
vVerifySetVSyncPrmBufPtr(u4InstID, _u4DecBufIdx[u4InstID]);
vReadWMVChkSumGolden(u4InstID);
}
#if VDEC_DRAM_BUSY_TEST
vDrmaBusyOff (u4InstID);
#endif
#if 0//(CONFIG_CHIP_VER_CURR >= CONFIG_CHIP_VER_MT8550)
UINT32 u4MbnReg474, u4MbnReg476, u4MbnReg477;
u4MbnReg474 = u4VDecReadMC(u4InstID, (474<<2));
u4MbnReg476 = u4VDecReadMC(u4InstID, (476<<2));
u4MbnReg477 = u4VDecReadMC(u4InstID, (477<<2));
sprintf(strMessage, "\nMBN LOG_474 = 0x%.8x!!!!!!!\n", u4MbnReg474);
fgWrMsg2PC(strMessage,strlen(strMessage),8,&_tFileListRecInfo[u4InstID]);
sprintf(strMessage, "\nMBN LOG_476 = 0x%.8x!!!!!!!\n", u4MbnReg476);
fgWrMsg2PC(strMessage,strlen(strMessage),8,&_tFileListRecInfo[u4InstID]);
sprintf(strMessage, "\nMBN LOG_477 = 0x%.8x!!!!!!!\n", u4MbnReg477);
fgWrMsg2PC(strMessage,strlen(strMessage),8,&_tFileListRecInfo[u4InstID]);
#endif
vWMVVDecEnd(u4InstID);
}
void vH265DecEndProc(UINT32 u4InstID, unsigned long start_time)
{
UINT32 u4Cnt;
UINT32 u4CntTimeChk;
UINT32 u4RetryDelay = 100000;
UINT32 u4RetRegValue = 0;
BOOL bIsTimeOut = 0;
H265_SPS_Data* prSPS;
char bitstream_name[200] = {0};
int i;
prSPS = _tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.prSPS;
memcpy (bitstream_name , _bFileStr1[u4InstID][1]+12 , (strlen(_bFileStr1[u4InstID][1]) -38) );
bitstream_name[(strlen(_bFileStr1[u4InstID][1]) -38)] = '\0';
u4Cnt=0;
u4CntTimeChk = 0;
if ( _tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.bNoDecode ){
printk("[INFO] Syntax Error No Decode!!\n");
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.bNoDecode = 0;
} else {
while ( u4VDEC_HAL_H265_VDec_ReadFinishFlag(u4InstID) != 1){
if ( ( jiffies -start_time > 1700 ) ){
printk("\n!!!!!!Decode Polling int timeout !!!!!!\n\n");
bIsTimeOut = 1;
break;
}
while (u4RetryDelay>0 ){
u4RetryDelay--;
}
u4RetryDelay = 100000;
msleep(1);
}
#ifdef VDEC_SIM_DUMP
printk("[INFO] Decode done!!\n");
#endif
if (!bIsTimeOut) {
#ifdef HEVC_ESA_NBM_LOG
vH265Dump_ESA_NBM_performane_log( u4InstID, bitstream_name, _tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.bIsUFOMode);
#endif
#ifdef HEVC_DUMP_MC_VLDTOP_LOG
vH265DumpInfo(u4InstID);
#endif
}
u4VDEC_HAL_H265_VDec_ClearInt(u4InstID);
_rH265PicInfo[u4InstID].u4SliceCnt = 0;
#ifdef VDEC_SIM_DUMP
printk("[INFO] Update Current (pic_cnt=%d POC=%d) FB index %d\n", _u4PicCnt[u4InstID],_rH265SliceHdr[u4InstID].i4POC, _tVerMpvDecPrm[u4InstID].ucDecFBufIdx );
#endif
_ptH265CurrFBufInfo[u4InstID]->u4PicCnt = _u4PicCnt[u4InstID];
_ptH265CurrFBufInfo[u4InstID]->i4POC = _rH265SliceHdr[u4InstID].i4POC;
_ptH265CurrFBufInfo[u4InstID]->u4POCBits = (prSPS->u4Log2MaxPicOrderCntLsbMinus4+4);
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.rLastInfo.u4LastPicW = _ptH265CurrFBufInfo[u4InstID]->u4W;
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.rLastInfo.u4LastPicH = _ptH265CurrFBufInfo[u4InstID]->u4H;
_ptH265CurrFBufInfo[u4InstID]->ucFBufStatus = FRAME;
_ptH265CurrFBufInfo[u4InstID]->bFirstSliceReferenced = 1;
if ( bIsTimeOut ){
vVDEC_HAL_H265_VDec_DumpReg(u4InstID, 1);
_u4VerBitCount[u4InstID] = 0xFFFFFFFF;
}
}
#ifdef HEVC_DPB_INFO_DUMP
//Dump DPB info
for (i=0; i<_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.ucMaxFBufNum; i++){
printk("[INFO] FB index %d =============\n", i);
printk("[INFO] ucFBufStatus: %d (NO_PIC 0; FRAME 3)\n", _ptH265FBufInfo[u4InstID][i].ucFBufStatus );
printk("[INFO] ucFBufRefType: %d (NREF_PIC 0; SREF_PIC 1; LREF_PIC 2)\n", _ptH265FBufInfo[u4InstID][i].ucFBufRefType );
printk("[INFO] i4POC: %d; u4PicCnt: %d\n", _ptH265FBufInfo[u4InstID][i].i4POC, _ptH265FBufInfo[u4InstID][i].u4PicCnt );
printk("[INFO] u4YStartAddr: %0x08X\n", _ptH265FBufInfo[u4InstID][i].u4YStartAddr );
printk("[INFO] u4CAddrOffset: %0x08X\n", _ptH265FBufInfo[u4InstID][i].u4CAddrOffset );
printk("[INFO] u4MvStartAddr: %0x08X\n", _ptH265FBufInfo[u4InstID][i].u4MvStartAddr );
}
#endif
#if 0
if ( _tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.bPrevRAPisBLA &&
_ptH265CurrFBufInfo[u4InstID]->i4POC <_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.i4CRAPOC &&
(_ucNalUnitType[u4InstID] == NAL_UNIT_CODED_SLICE_RASL_N ||_ucNalUnitType[u4InstID] == NAL_UNIT_CODED_SLICE_TFD )){
printk("\n======== Frame %d (POC %d) Dropped !! i4CRAPOC %d ========\n\n", _u4PicCnt[u4InstID], _ptH265CurrFBufInfo[u4InstID]->i4POC , _tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.i4CRAPOC);
} else if ( _tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.i4RAPOC != MAX_INT &&
_ptH265CurrFBufInfo[u4InstID]->i4POC <_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.i4RAPOC &&
(_ucNalUnitType[u4InstID] == NAL_UNIT_CODED_SLICE_RASL_N ||_ucNalUnitType[u4InstID] == NAL_UNIT_CODED_SLICE_TFD )){
printk("\n======== Frame %d (POC %d) Dropped !! i4RAPOC %d ========\n\n", _u4PicCnt[u4InstID], _ptH265CurrFBufInfo[u4InstID]->i4POC , _tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.i4RAPOC);
} else {
vH265VDecEnd(u4InstID);
}
#else
vH265VDecEnd(u4InstID);
#endif
//printk("[INFO] debug ucPreFBIndex %d; FB index %d\n", _tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.ucPreFBIndex , _tVerMpvDecPrm[u4InstID].ucDecFBufIdx );
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.ucPreFBIndex = _tVerMpvDecPrm[u4InstID].ucDecFBufIdx;
_tVerDec[u4InstID].ucState = DEC_NORM_VPARSER;
}
#define ChunChia_LOG 0
void vH264DecEndProc(UINT32 u4InstID)
{
UINT32 u4Cnt;
UINT32 u4CntTimeChk;
UINT32 u4Bit,u4CurrentPtr;
//BOOL fgWaitChk;
UINT32 u4MbX;
UINT32 u4MbY;
UINT32 u4MbX_last;
UINT32 u4MbY_last;
char strMessage[256];
#if ChunChia_LOG
UINT32 u4Mc770, u4Mc774, u4Mc778, u4Mc8B8;
#endif
vVDEC_HAL_H264_GetMbxMby(u4InstID,&u4MbX, &u4MbY);
_fgVDecErr[u4InstID] = FALSE;
u4Cnt=0;
u4CntTimeChk = 0;
while(u4CntTimeChk < DEC_RETRY_NUM)
{
u4Cnt ++;
if((u4Cnt & 0x3f)== 0x3f)
{
#ifndef IRQ_DISABLE
#else
if(u4VDEC_HAL_H264_VDec_ReadFinishFlag(u4InstID))
{
_fgVDecComplete[u4InstID] = TRUE;
/* if(u4InstID == 0)
{
BIM_ClearIrq(VECTOR_VDFUL);
}
else
{
BIM_ClearIrq(VECTOR_VDLIT);
}*/
}
#endif
if(fgIsH264VDecComplete(u4InstID))
{
//printk("[H264] vH264DecEndProc, dec complete, cnt:%d \n", u4CntTimeChk);
#ifdef CAPTURE_ESA_LOG //fantasia H264 enable ESA log
vWrite2PC(u4InstID, 17, (UCHAR*)_pucESALog[u4InstID]);
#endif
u4CntTimeChk = 0;
break;
}
else
{
u4MbX_last = u4MbX;
u4MbY_last = u4MbY;
vVDEC_HAL_H264_GetMbxMby(u4InstID,&u4MbX, &u4MbY);
//vVDecOutputDebugString("\nMbX = %d, MbY = %d\n", u4MbX,u4MbY);
if((u4MbX == u4MbX_last) && (u4MbY == u4MbY_last))
{
u4CntTimeChk ++;
}
else
{
u4CntTimeChk =0;
}
}
u4Cnt = 0;
}
}
vVDEC_HAL_H264_VDec_PowerDown(u4InstID);
if((u4CntTimeChk == DEC_RETRY_NUM) ||
((u4VDEC_HAL_H264_GetErrMsg(u4InstID) != 0)
&& (!(((u4VDEC_HAL_H264_GetErrMsg(u4InstID) == 8) || (u4VDEC_HAL_H264_GetErrMsg(u4InstID) == 0x40)) && (fgVDEC_HAL_H264_DecPicComplete(u4InstID))))))
{
#ifndef INTERGRATION_WITH_DEMUX
#ifdef EXT_COMPARE
_fgVDecErr[u4InstID] = TRUE;
#endif
if(u4CntTimeChk == DEC_RETRY_NUM)
{
vVDecOutputDebugString("\n!!!!!!!!! Decoding Timeout !!!!!!!\n");
sprintf(strMessage, "%s", "\n!!!!!!!!! Decoding Timeout !!!!!!!\n");
fgWrMsg2PC(strMessage,strlen(strMessage),8,&_tFileListRecInfo[u4InstID]);
vVDEC_HAL_H264_VDec_DumpReg(u4InstID);
}
vVDEC_HAL_H264_GetMbxMby(u4InstID,&u4MbX, &u4MbY);
vVDecOutputDebugString("\n!!!!!!!!! Decoding Error 0x%.8x in pic %d (frm %d) !!!!!!!\n", u4VDEC_HAL_H264_GetErrMsg(u4InstID), _u4PicCnt[u4InstID], _u4FileCnt[u4InstID]);
sprintf(strMessage,"\n!!!!!!!!! Decoding Error 0x%.8x at MC (x,y)=(%d/%d, %d/%d) in pic %d (frm %d) !!!!!!!\n", u4VDEC_HAL_H264_GetErrMsg(u4InstID),
u4MbX, ((_ptCurrFBufInfo[u4InstID]->u4W + 15)>> 4) - 1, u4MbY, (((_ptCurrFBufInfo[u4InstID]->u4H >> (1-(fgIsFrmPic(u4InstID)))) + 15)>> 4) - 1, _u4PicCnt[u4InstID], _u4FileCnt[u4InstID]);
fgWrMsg2PC(strMessage,strlen(strMessage),8,&_tFileListRecInfo[u4InstID]);
sprintf(strMessage,"the length is %d (0x%.8x)\n", _tInFileInfo[u4InstID].u4FileLength, _tInFileInfo[u4InstID].u4FileLength);
fgWrMsg2PC(strMessage,strlen(strMessage),8,&_tFileListRecInfo[u4InstID]);
//vWrite2PC(u4InstID, 1, _pucVFifo[u4InstID]);
vVDEC_HAL_H264_VDec_DumpReg(u4InstID);
//fgWaitChk = TRUE;
//while(fgWaitChk);
#endif
}
vReadH264ChkSumGolden(u4InstID);
// @@ fantasia -> 2012-02-24 don't show the registers value
//vH264ChkSumDump(u4InstID);
//Print LOG
#if ChunChia_LOG
u4Mc770 = u4VDecReadMC(u4InstID, 0x770);
u4Mc774 = u4VDecReadMC(u4InstID, 0x774);
u4Mc778 = u4VDecReadMC(u4InstID, 0x778);
u4Mc8B8 = u4VDecReadMC(u4InstID, 0x8B8);
sprintf(strMessage,"======\n");
printk("%s", strMessage);
sprintf(strMessage,"(dram_dle_cnt: 0x%x, mc_dle_cnt: 0x%x, cycle_cnt: 0x%x, dram_dle_by_preq: 0x%x)\n", u4Mc770, u4Mc774, u4Mc778, u4Mc8B8);
printk("%s", strMessage);
#endif
u4CurrentPtr = u4VDEC_HAL_H264_ReadRdPtr(_u4BSID[u4InstID], u4InstID, (UINT32)_pucVFifo[u4InstID], &u4Bit);
if(u4CurrentPtr < _u4PrevPtr[u4InstID])//HW is ring,so read fifo overflow
{
printk("HW decode overflow ........!u4CurrentPtr = 0x%x,VFIFO = 0x%x\n",u4CurrentPtr,V_FIFO_SZ);
}
_u4PrevPtr[u4InstID] = u4CurrentPtr;
vH264VDecEnd(u4InstID);
}
void vH264ChkSumDump(UINT32 u4InstID)
{
UINT32 i; //,u4regval;
printk("H264 Check sum!\n");
for(i = 72; i < 76; i++)
{
printk("VLD #%d(0x%x) = 0x%x\n",i,i<<2,u4VDecReadVLD(u4InstID,i<<2));
}
for(i = 378; i < 386; i++)
{
printk("MC #%d(0x%x) = 0x%x\n",i,i<<2,u4VDecReadMC(u4InstID,i<<2));
}
for(i = 388; i < 398; i++)
{
printk("MC #%d(0x%x) = 0x%x\n",i,i<<2,u4VDecReadMC(u4InstID,i<<2));
}
for(i = 479; i < 482; i++)
{
printk("MC #%d(0x%x) = 0x%x\n",i,i<<2,u4VDecReadMC(u4InstID,i<<2));
}
printk("MC #%d(0x%x) = 0x%x\n",483,483<<2,u4VDecReadMC(u4InstID,483<<2));
printk("MC #%d(0x%x) = 0x%x\n",571,571<<2,u4VDecReadMC(u4InstID,571<<2));
printk("MC #%d(0x%x) = 0x%x\n",498,498<<2,u4VDecReadMC(u4InstID,498<<2));
printk("MC #%d(0x%x) = 0x%x\n",446,446<<2,u4VDecReadMC(u4InstID,446<<2));
for(i = 147; i < 152; i++)
{
printk("AVCMV #%d(0x%x) = 0x%x\n",i,i<<2,u4VDecReadAVCMV(u4InstID,i<<2));
}
for(i = 41; i < 45; i++)
{
printk("VLDTOP #%d(0x%x) = 0x%x\n",i,i<<2,u4VDecReadVLDTOP(u4InstID,i<<2));
}
for(i = 165; i < 169; i++)
{
printk("AVCVLD #%d(0x%x) = 0x%x\n",i,i<<2,u4VDecReadAVCVLD(u4InstID,i<<2));
}
for(i = 175; i < 177; i++)
{
printk("AVCVLD #%d(0x%x) = 0x%x\n",i,i<<2,u4VDecReadAVCVLD(u4InstID,i<<2));
}
}
// *********************************************************************
// Function : BOOL fgIsMPEGVDecComplete(UINT32 u4InstID)
// Description : Check if VDec complete with interrupt
// Parameter : None
// Return : None
// *********************************************************************
BOOL fgIsMPEGVDecComplete(UINT32 u4InstID)
{
UINT32 u4MbX;
UINT32 u4MbY;
if(_fgVDecComplete[u4InstID])
{
vVDEC_HAL_MPEG_GetMbxMby(u4InstID, &u4MbX, &u4MbY);
if(_u4PicStruct[u4InstID] == FRM_PIC)
{
if((u4MbX < (((_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.u4DecW + _tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.u4DecXOff + 15) >> 4) -1))
|| (u4MbY < (((_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.u4DecH + _tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.u4DecYOff + 15) >> 4) -1)))
{
return FALSE;
}
else
{
return TRUE;
}
}
else
{
if((u4MbX < (((_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.u4DecW + _tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.u4DecXOff + 15) >> 4) -1))
|| u4MbY < (((_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.u4DecH + _tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.u4DecYOff + 15) >> 5) -1))
{
return FALSE;
}
else
{
return TRUE;
}
}
}
return FALSE;
}
//Qing Li add here for dump reg and pic raw data
extern void VDecDumpMP4Register(UINT32 u4VDecID);
extern void VDecDumpMpegRegister(UINT32 u4VDecID,BOOL fgTriggerAB);
BOOL _fgDumpDeblocky = FALSE;
UINT32 _u4MpvEmuDumpCount = 0;
UINT32 _u4MpvEmuDumpStartPicCount = (UINT32)(-1);
UINT32 _u4MpvEmuDumpEndPicCount = (UINT32)(-1);
FILE* _pFileHandleY = NULL;
FILE* _pFileHandleCbcr = NULL;
FILE* _pFileHandleDeblockyY = NULL;
FILE* _pFileHandleDeblockyCbcr = NULL;
void vMPEGVDecDumpPic(UINT32 u4InstID)
{
CHAR* pFileNameY = "B:\\dumppic\\YGroupPic.raw";
CHAR* pFileNameCbcr = "B:\\dumppic\\CGroupPic.raw";
CHAR* pFileNameYDeblocky = "B:\\dumppic\\YGroupPic_Deblocky.raw";
CHAR* pFileNameCbcrDeblocky = "B:\\dumppic\\CGroupPic_Deblocky.raw";
FILE* pFileTemp = NULL;
UINT32 u4DataSizeY = 0;
UINT32 u4DataSizeCbcr = 0;
UINT32 u4SizeV = 0;
UINT32 u4SizeH = 0;
if ((_u4FileCnt[u4InstID] < _u4MpvEmuDumpStartPicCount) ||
(_u4FileCnt[u4InstID] > _u4MpvEmuDumpEndPicCount))
{
return;
}
printk("MPV Emu start to dump No.%d pic to PC\n", _u4FileCnt[u4InstID]);
if (0 != (_u4RealHSize[u4InstID] * _u4RealVSize[u4InstID]))
{
u4SizeH = _u4RealHSize[u4InstID];
u4SizeV = _u4RealVSize[u4InstID];
}
else
{
u4SizeH = _u4HSizeVal[u4InstID];
u4SizeV = _u4VSizeVal[u4InstID];
}
u4DataSizeY = (((u4SizeH + 15)>>4)<<4) * (((u4SizeV + 31)>>5)<<5);
u4DataSizeCbcr = u4DataSizeY / 2;
printk("MPV Emu H size %d\n", (((u4SizeH + 15)>>4)<<4));
printk("MPV Emu V size %d\n", (((u4SizeV + 31)>>5)<<5));
if (!_fgDumpDeblocky)
{
/* Dump Y */
do
{
if (NULL == _pFileHandleY)
{
pFileTemp = linux_fopen(pFileNameY, "wb");
if (NULL == pFileTemp)
{
printk("MPV Emu Create %s fail\n", pFileNameY);
break;
}
_pFileHandleY = pFileTemp;
printk("MPV Emu create %s success\n", pFileNameY);
}
if (u4DataSizeY != linux_fwrite ((char* )(_pucDecWorkBuf[u4InstID]), 1, u4DataSizeY, _pFileHandleY))
{
printk("MPV Emu Write to %s fail\n", pFileNameY);
printk("MPV Emu need to Write data count %d\n", u4DataSizeY);
linux_fclose(_pFileHandleY);
_pFileHandleY = NULL;
printk("MPV Emu close file %s\n", pFileNameY);
}
printk("MPV Emu Write to %s success\n", pFileNameY);
} while(0);
/* Dump Cbcr */
do
{
if (NULL == _pFileHandleCbcr)
{
pFileTemp = linux_fopen(pFileNameCbcr, "wb");
if (NULL == pFileTemp)
{
printk("MPV Emu Create %s fail\n", pFileNameCbcr);
break;
}
_pFileHandleCbcr = pFileTemp;
printk("MPV Emu create %s success\n", pFileNameCbcr);
}
if (u4DataSizeCbcr != linux_fwrite ((char* )(_pucDecCWorkBuf[u4InstID]), 1, u4DataSizeCbcr, _pFileHandleCbcr))
{
printk("MPV Emu Write to %s fail\n", pFileNameCbcr);
printk("MPV Emu need to Write data count %d\n", u4DataSizeCbcr);
linux_fclose(_pFileHandleCbcr);
_pFileHandleCbcr = NULL;
printk("MPV Emu close file %s\n", pFileNameCbcr);
}
printk("MPV Emu Write to %s success\n", pFileNameCbcr);
} while(0);
}
else
{
/* Dump Deblocking Y */
do
{
if (NULL == _pFileHandleDeblockyY)
{
pFileTemp = linux_fopen(pFileNameYDeblocky, "wb");
if (NULL == pFileTemp)
{
printk("MPV Emu Create %s fail\n", pFileNameYDeblocky);
break;
}
_pFileHandleDeblockyY = pFileTemp;
printk("MPV Emu create %s success\n", pFileNameYDeblocky);
}
if (u4DataSizeY != linux_fwrite ((char* )(_pucPpYSa[u4InstID]), 1, u4DataSizeY, _pFileHandleDeblockyY))
{
printk("MPV Emu Write to %s fail\n", pFileNameYDeblocky);
printk("MPV Emu need to Write data count %d\n", u4DataSizeY);
linux_fclose(_pFileHandleDeblockyY);
_pFileHandleDeblockyY = NULL;
printk("MPV Emu close file %s\n", pFileNameYDeblocky);
}
printk("MPV Emu Write to %s success\n", pFileNameYDeblocky);
} while(0);
/* Dump Deblocking Cbcr */
do
{
if (NULL == _pFileHandleDeblockyCbcr)
{
pFileTemp = linux_fopen(pFileNameCbcrDeblocky, "wb");
if (NULL == pFileTemp)
{
printk("MPV Emu Create %s fail\n", pFileNameCbcrDeblocky);
break;
}
_pFileHandleDeblockyCbcr = pFileTemp;
printk("MPV Emu create %s success\n", pFileNameCbcrDeblocky);
}
if (u4DataSizeCbcr != linux_fwrite ((char* )(_pucPpCSa[u4InstID]), 1, u4DataSizeCbcr, _pFileHandleDeblockyCbcr))
{
printk("MPV Emu Write to %s fail\n", pFileNameCbcrDeblocky);
printk("MPV Emu need to Write data count %d\n", u4DataSizeCbcr);
linux_fclose(_pFileHandleDeblockyCbcr);
_pFileHandleDeblockyCbcr = NULL;
printk("MPV Emu close file %s\n", pFileNameCbcrDeblocky);
}
printk("MPV Emu Write to %s success\n", pFileNameCbcrDeblocky);
} while(0);
}
_u4MpvEmuDumpCount ++;
printk("MPV Emu Has written %d pic to PC\n", _u4MpvEmuDumpCount);
// close all files after completing writing raw data to these files
if (_u4FileCnt[u4InstID] == _u4MpvEmuDumpEndPicCount)
{
if (_pFileHandleY)
{
linux_fclose(_pFileHandleY);
_pFileHandleY = NULL;
printk("MPV Emu close file %s\n", pFileNameY);
}
if (_pFileHandleCbcr)
{
linux_fclose(_pFileHandleCbcr);
_pFileHandleCbcr = NULL;
printk("MPV Emu close file %s\n", pFileNameCbcr);
}
if (_pFileHandleDeblockyY)
{
linux_fclose(_pFileHandleDeblockyY);
_pFileHandleDeblockyY = NULL;
printk("MPV Emu close file %s\n", pFileNameYDeblocky);
}
if (_pFileHandleDeblockyCbcr)
{
linux_fclose(_pFileHandleDeblockyCbcr);
_pFileHandleDeblockyCbcr = NULL;
printk("MPV Emu close file %s\n", pFileNameCbcrDeblocky);
}
_fgDumpDeblocky = FALSE;
_u4MpvEmuDumpStartPicCount = (UINT32)(-1);
_u4MpvEmuDumpEndPicCount = (UINT32)(-1);
_u4MpvEmuDumpCount = 0;
}
}
BOOL _fgDumpReg = FALSE;
void vMPEGVDecEnd(UINT32 u4InstID)
{
//VDEC_INFO_DEC_PRM_T *tVerMpvDecPrm;
//VDEC_INFO_H264_FBUF_INFO_T *tFBufInfo;
VDEC_INFO_MPEG_VFIFO_PRM_T rMpegVFifoInitPrm;
VDEC_INFO_MPEG_BS_INIT_PRM_T rMpegBSInitPrm;
UINT32 u4VldByte,u4VldBit;
rMpegVFifoInitPrm.u4CodeType = _u4CodecVer[u4InstID];
//tFBufInfo = _ptCurrFBufInfo[u4InstID];
//tVerMpvDecPrm = &_tVerMpvDecPrm[u4InstID];
if(_fgVerVopCoded0[u4InstID])
{
u4VldByte = u4VDEC_HAL_MPEG_ReadRdPtr(0, u4InstID, (UINT32)_pucVFifo[u4InstID], &u4VldBit) + 4;
}
else
{
u4VldByte = u4VDEC_HAL_MPEG_ReadRdPtr(0, u4InstID, (UINT32)_pucVFifo[u4InstID], &u4VldBit) - 4;
}
_u4WMVByteCount[u4InstID] = u4VldByte;
#ifdef LETTERBOX_DETECTION_ONLY
vCheckLBDResult(u4InstID);
#else
if (_u4CodecVer[u4InstID] == VDEC_MPEG4)
//if (1) //Cheng-Jung 20120305 Use CRC comparison for MPEG4, H263 and DIVX3
{
#ifdef MPEG4_CRC_CMP
#ifndef VPMODE
if(!_fgVerVopCoded0[u4InstID])
#endif
{
vMPEG4CrcCmp(u4InstID,NULL,0);
}
#else
vMPEGWrData2PC(u4InstID, _pucDumpYBuf[u4InstID], ((((_u4RealHSize[u4InstID] + 15) >> 4) * ((_u4RealVSize[u4InstID] + 31) >> 5)) << 9));
#endif
}
else
{
vMPEGWrData2PC(u4InstID, _pucDumpYBuf[u4InstID], ((((_u4RealHSize[u4InstID] + 15) >> 4) * ((_u4RealVSize[u4InstID] + 31) >> 5)) << 9));
}
#endif
/*
if (_fgDumpReg)
{
printk("MPV Dump register after decode\n");
VDecDumpMpegRegister(u4InstID,1);
printk("\n MPV Dump register end \n");
}
*/
// reset HW
#ifdef REDEC
if(_u4ReDecCnt[u4InstID] > 0)
{
rMpegVFifoInitPrm.u4VFifoSa = (UINT32)_pucVFifo[u4InstID];
rMpegVFifoInitPrm.u4VFifoEa = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
i4VDEC_HAL_MPEG_InitVDecHW(u4InstID,&rMpegVFifoInitPrm);
rMpegBSInitPrm.u4VFifoSa = (UINT32)_pucVFifo[u4InstID];
rMpegBSInitPrm.u4VFifoEa = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
rMpegBSInitPrm.u4ReadPointer = (UINT32)_pucVFifo[u4InstID] + _u4VLDPosByte[u4InstID];
#ifndef RING_VFIFO_SUPPORT
rMpegBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
#else
// rMpegBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + (V_FIFO_SZ*(0.5 + 0.5 *(_u4LoadBitstreamCnt[u4InstID]%2)));
rMpegBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + ((_u4LoadBitstreamCnt[u4InstID]%2)?(V_FIFO_SZ):(V_FIFO_SZ>>1));//mtk40343
#endif
i4VDEC_HAL_MPEG_InitBarrelShifter(0, u4InstID, &rMpegBSInitPrm);
u4VDEC_HAL_MPEG_ShiftGetBitStream(0, u4InstID, _u4VLDPosBit[u4InstID]);
// Restore Quantization Matrix
if(_fgVerLoadIntraMatrix[u4InstID])
{
vVDEC_HAL_MPEG_ReLoadQuantMatrix(u4InstID, TRUE);
}
if(_fgVerLoadNonIntraMatrix[u4InstID])
{
vVDEC_HAL_MPEG_ReLoadQuantMatrix(u4InstID, FALSE);
}
_tVerDec[u4InstID].ucState = DEC_NORM_WAIT_TO_DEC;
return;
}
#endif
if(!fgMPEGIsFrmPic(u4InstID))
{
_fgDec2ndFldPic[u4InstID] = 1 - _fgDec2ndFldPic[u4InstID];//vToggleDecFlag(DEC_FLG_2ND_FLD_PIC);
}
_u4MpegDecPicNo[u4InstID]++;
if(_u4CodecVer[u4InstID] == VDEC_DIVX3)
{
rMpegVFifoInitPrm.u4VFifoSa = (UINT32)_pucVFifo[u4InstID];
rMpegVFifoInitPrm.u4VFifoEa = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
i4VDEC_HAL_MPEG_InitVDecHW(u4InstID,&rMpegVFifoInitPrm);
if(_u4Divx3SetPos[u4InstID] >= V_FIFO_SZ)
{
_u4Divx3SetPos[u4InstID] = _u4Divx3SetPos[u4InstID] - V_FIFO_SZ;
}
rMpegBSInitPrm.u4VFifoSa = (UINT32)_pucVFifo[u4InstID];
rMpegBSInitPrm.u4VFifoEa = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
rMpegBSInitPrm.u4ReadPointer = (UINT32)_pucVFifo[u4InstID] + _u4Divx3SetPos[u4InstID];
#ifndef RING_VFIFO_SUPPORT
rMpegBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
#else
// rMpegBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + (V_FIFO_SZ*(0.5 + 0.5 *(_u4LoadBitstreamCnt[u4InstID]%2)));
rMpegBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + ((_u4LoadBitstreamCnt[u4InstID]%2)?(V_FIFO_SZ):(V_FIFO_SZ>>1));//mtk40343
#endif
i4VDEC_HAL_MPEG_InitBarrelShifter(_u4BSID[u4InstID], u4InstID, &rMpegBSInitPrm);
// Restore Quantization Matrix
if(_fgVerLoadIntraMatrix[u4InstID])
{
vVDEC_HAL_MPEG_ReLoadQuantMatrix(u4InstID, TRUE);
}
if(_fgVerLoadNonIntraMatrix[u4InstID])
{
vVDEC_HAL_MPEG_ReLoadQuantMatrix(u4InstID, FALSE);
}
}
else // MPEG
{
rMpegVFifoInitPrm.u4VFifoSa = (UINT32)_pucVFifo[u4InstID];
rMpegVFifoInitPrm.u4VFifoEa = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
i4VDEC_HAL_MPEG_InitVDecHW(u4InstID,&rMpegVFifoInitPrm);
rMpegBSInitPrm.u4VFifoSa = (UINT32)_pucVFifo[u4InstID];
rMpegBSInitPrm.u4VFifoEa = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
rMpegBSInitPrm.u4ReadPointer = (UINT32)_pucVFifo[u4InstID] + u4VldByte;
#ifndef RING_VFIFO_SUPPORT
rMpegBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
#else
// rMpegBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + (V_FIFO_SZ*(0.5 + 0.5 *(_u4LoadBitstreamCnt[u4InstID]%2)));
rMpegBSInitPrm.u4WritePointer = (UINT32)_pucVFifo[u4InstID] + ((_u4LoadBitstreamCnt[u4InstID]%2)?(V_FIFO_SZ):(V_FIFO_SZ>>1));//mtk40343
#endif
i4VDEC_HAL_MPEG_InitBarrelShifter(_u4BSID[u4InstID], u4InstID, &rMpegBSInitPrm);
// Restore Quantization Matrix
if(_fgVerLoadIntraMatrix[u4InstID])
{
vVDEC_HAL_MPEG_ReLoadQuantMatrix(u4InstID, TRUE);
}
if(_fgVerLoadNonIntraMatrix[u4InstID])
{
vVDEC_HAL_MPEG_ReLoadQuantMatrix(u4InstID, FALSE);
}
}
//6589NEW (4) Error concealment end of bitstream workaround
#ifdef MPEG4_6589_ERROR_CONCEAL
//printk("<vdec> Bitstream pos: %d, total len: %d\n", u4VldByte, _u4TotalBitstreamLen[u4InstID]);
if (u4VldByte >= _u4TotalBitstreamLen[u4InstID] - 20)
{
_u4VerBitCount[u4InstID] = 0xffffffff;
}
#endif
#ifndef INTERGRATION_WITH_DEMUX
#ifdef RING_VFIFO_SUPPORT
if((_u4LoadBitstreamCnt[u4InstID]&0x1) && (rMpegBSInitPrm.u4ReadPointer >
((UINT32)_pucVFifo[u4InstID] + (V_FIFO_SZ/2))))
{
_tInFileInfo[u4InstID].fgGetFileInfo = TRUE;
_tInFileInfo[u4InstID].pucTargetAddr = _pucVFifo[u4InstID];
_tInFileInfo[u4InstID].u4FileOffset = (V_FIFO_SZ * ((_u4LoadBitstreamCnt[u4InstID]+ 1)/2));
_tInFileInfo[u4InstID].u4TargetSz = (V_FIFO_SZ/2);
_tInFileInfo[u4InstID].u4FileLength = 0;
#ifdef SATA_HDD_READ_SUPPORT
if(!fgOpenHDDFile(u4InstID, _bFileStr1[u4InstID][1],"r+b", &_tInFileInfo[u4InstID]))
{
fgOpenPCFile(u4InstID, _bFileStr1[u4InstID][1],"r+b", &_tInFileInfo[u4InstID]);
}
#else
fgOpenPCFile(u4InstID, _bFileStr1[u4InstID][1],"r+b", &_tInFileInfo[u4InstID]);
#endif
_u4LoadBitstreamCnt[u4InstID]++;
}
else if((!(_u4LoadBitstreamCnt[u4InstID]&0x1)) && (rMpegBSInitPrm.u4ReadPointer <
((UINT32)_pucVFifo[u4InstID] + (V_FIFO_SZ/2))))
{
_tInFileInfo[u4InstID].fgGetFileInfo = TRUE;
_tInFileInfo[u4InstID].pucTargetAddr = _pucVFifo[u4InstID] + (V_FIFO_SZ/2);
_tInFileInfo[u4InstID].u4FileOffset = ((V_FIFO_SZ * (_u4LoadBitstreamCnt[u4InstID]+ 1)) /2);
_tInFileInfo[u4InstID].u4TargetSz = (V_FIFO_SZ/2);
_tInFileInfo[u4InstID].u4FileLength = 0;
#ifdef SATA_HDD_READ_SUPPORT
if(!fgOpenHDDFile(u4InstID, _bFileStr1[u4InstID][1],"r+b", &_tInFileInfo[u4InstID]))
{
fgOpenPCFile(u4InstID, _bFileStr1[u4InstID][1],"r+b", &_tInFileInfo[u4InstID]);
}
#else
fgOpenPCFile(u4InstID, _bFileStr1[u4InstID][1],"r+b", &_tInFileInfo[u4InstID]);
#endif
_u4LoadBitstreamCnt[u4InstID]++;
}
#endif
#endif
_tVerDec[u4InstID].ucState = DEC_NORM_VPARSER;
}
void vMPEGDecEndProc(UINT32 u4InstID)
{
#ifdef IRQ_DISABLE
BOOL fgMpeg4;
#endif
UINT32 u4Cnt;
UINT32 u4CntTimeChk;
UINT32 u4MbX;
UINT32 u4MbY;
char strMessage[256];
UINT32 u4MbX_last;
UINT32 u4MbY_last;
UINT32 u4MpegErrType = 0;
VDEC_INFO_MPEG_ERR_INFO_T prMpegErrInfo;
UINT32 u4RegVal;
INT32 i;
u4Cnt=0;
u4CntTimeChk = 0;
_fgVDecErr[u4InstID] = FALSE;
#ifndef VPMODE
if(!_fgVerVopCoded0[u4InstID])
#endif
{
#ifdef IRQ_DISABLE
fgMpeg4 = (_u4CodecVer[u4InstID] != VDEC_MPEG2)? TRUE : FALSE;
#endif
while(u4CntTimeChk < DEC_RETRY_NUM)
{
u4Cnt ++;
if((u4Cnt & 0x3f)== 0x3f)
{
#ifndef IRQ_DISABLE
#else
if(u4VDEC_HAL_MPEG_VDec_ReadFinishFlag(u4InstID, fgMpeg4))
{
//UINT32 u4VldBit;
#ifdef REG_LOG_NEW
#ifndef MPEG4_6589_ERROR_CONCEAL
#if (DUMP_ERROR == 0)
printk("[VDEC] End decoding _u4FileCnt = %d, dump = %d\n", _u4FileCnt[u4InstID], _u4DumpRegPicNum[u4InstID]);
if (_u4FileCnt[u4InstID] == _u4DumpRegPicNum[u4InstID])
#endif
//if (u4VDEC_HAL_MPEG_VDec_ReadErrorFlag(u4InstID)) // REMOVE
{
_fgRegLogConsole[u4InstID] = FALSE;
VDecDumpMP4Register(u4InstID);
fgWrData2PC(_pucRegisterLog[u4InstID],_u4RegisterLogLen[u4InstID],7,_RegFileName[u4InstID]);
_u4RegisterLogLen[u4InstID] = 0;
_fgRegLogConsole[u4InstID] = TRUE;
}
#else
/*
if (u4VDEC_HAL_MPEG_VDec_ReadErrorFlag(u4InstID))
{
_fgRegLogConsole[u4InstID] = FALSE;
VDecDumpMP4Register(u4InstID);
fgWrData2PC(_pucRegisterLog[u4InstID],_u4RegisterLogLen[u4InstID],7,_RegFileName[u4InstID]);
_u4RegisterLogLen[u4InstID] = 0;
_fgRegLogConsole[u4InstID] = TRUE;
}
*/
#endif
#endif
_fgVDecComplete[u4InstID] = TRUE;
//printk("<vdec> got finish flag\n");
//u4VDEC_HAL_MPEG_ReadRdPtr(0, u4InstID, (UINT32)_pucVFifo[u4InstID], &u4VldBit);
#ifdef CAPTURE_ESA_LOG
vWrite2PC(u4InstID, 17, (UCHAR*)_pucESALog[u4InstID]);
#endif
/* if(u4InstID == 0)
{
BIM_ClearIrq(VECTOR_VDFUL);
}
else
{
BIM_ClearIrq(VECTOR_VDLIT);
}*/
}
#endif
if(fgIsMPEGVDecComplete(u4InstID))
{
u4CntTimeChk = 0;
break;
}
else
{
u4MbX_last = u4MbX;
u4MbY_last = u4MbY;
vVDEC_HAL_MPEG_GetMbxMby(u4InstID, &u4MbX, &u4MbY);
if((u4MbX == u4MbX_last) && (u4MbY == u4MbY_last))
{
u4CntTimeChk ++;
}
else
{
u4CntTimeChk =0;
}
}
u4Cnt = 0;
if (_fgVDecComplete[u4InstID])
{
break;
}
}
}
if(_ucMpegVer[u4InstID] != VDEC_MPEG2)
{
u4MpegErrType = u4VDEC_HAL_MPEG4_GetErrType(u4InstID);
}
vVDEC_HAL_MPEG_GetErrInfo(u4InstID, &prMpegErrInfo);
if((u4CntTimeChk == DEC_RETRY_NUM) ||
(u4MpegErrType!= 0) || (prMpegErrInfo.u4MpegErrCnt != 0))
{
#ifndef INTERGRATION_WITH_DEMUX
//#ifdef EXT_COMPARE
_fgVDecErr[u4InstID] = TRUE;
//#endif
if(u4CntTimeChk == DEC_RETRY_NUM)
{
vVDecOutputDebugString("\n<vdec> !!!!!!!!! Decoding Timeout !!!!!!!\n");
sprintf(strMessage, "%s", "\n<vdec> !!!!!!!!! Decoding Timeout !!!!!!!\n");
fgWrMsg2PC(strMessage,strlen(strMessage),8,&_tFileListRecInfo[u4InstID]);
//vDumpReg();
#ifdef REG_LOG_NEW
#if (DUMP_ERROR == 0)
printk("[VDEC] End decoding (timeout) _u4FileCnt = %d, dump = %d\n", _u4FileCnt[u4InstID], _u4DumpRegPicNum[u4InstID]);
if (_u4FileCnt[u4InstID] == _u4DumpRegPicNum[u4InstID])
#endif
{
_fgRegLogConsole[u4InstID] = FALSE;
VDecDumpMP4Register(u4InstID);
fgWrData2PC(_pucRegisterLog[u4InstID],_u4RegisterLogLen[u4InstID],7,_RegFileName[u4InstID]);
_u4RegisterLogLen[u4InstID] = 0;
_fgRegLogConsole[u4InstID] = TRUE;
}
#endif
}
vVDEC_HAL_MPEG_GetMbxMby(u4InstID, &u4MbX, &u4MbY);
vVDecOutputDebugString("\n<vdec> !!!!!!!!! Decoding Error 0x%.8x!!!!!!!\n", prMpegErrInfo.u4MpegErrType);
sprintf(strMessage,"\n//<vdec> !!!!!!!!! Decoding Error 0x%.8x 0x%.8x 0x%.8x at MC (x,y)=(%d/%d, %d/%d) !!!!!!!\n", u4MpegErrType,
prMpegErrInfo.u4MpegErrType,prMpegErrInfo.u4MpegErrRow,u4MbX, ((_tVerPic[u4InstID].u4W + 15)>> 4) - 1, u4MbY,
(((_tVerPic[u4InstID].u4H >> (1-(fgMPEGIsFrmPic(u4InstID)))) + 15)>> 4) - 1);
printk("%s\n", strMessage);
// fgWrMsg2PC(strMessage,strlen(strMessage),8,&_tFileListRecInfo[u4InstID]);
sprintf(strMessage,"<vdec> the length is %d (0x%.8x)\n", _tInFileInfo[u4InstID].u4FileLength, _tInFileInfo[u4InstID].u4FileLength);
// fgWrMsg2PC(strMessage,strlen(strMessage),8,&_tFileListRecInfo[u4InstID]);
vReadMPEGChkSumGolden(u4InstID);
// vWrite2PC(u4InstID, 1, _pucVFifo[u4InstID]);
// vWrite2PC(u4InstID, 12, (UCHAR *)(&_u4DumpChksum[u4InstID][0]));
//vDumpReg();
#endif
}
/*
if (u4VDEC_HAL_MPEG_VDec_ReadErrorFlag(u4InstID))
{
printk("VLD_243[8] = 1, error occurs\n");
for (i=241; i<246; i++)
{
u4RegVal = u4VDecReadVLD(u4InstID, i*4);
printk("VLD_%d = 0x%08x\n", i, u4RegVal);
}
}
else
*/
{
printk("DECODING FINISH WITH NO ERROR!!!\n");
}
if(_ucMpegVer[u4InstID] != VDEC_MPEG2)
{
vVDEC_HAL_MPEG_VLDVdec2Barl(u4InstID);
}
if(_ucMpegVer[u4InstID] == VDEC_DIVX3)
{
vVerifyDx3SufxChk(u4InstID);
}
if(_u4CodecVer[u4InstID] == VDEC_MPEG4)
{
//6589NEW (C)
#if (!MPEG4_6589_SUPPORT)
// Mpeg4 workaround
vMp4FixBCode(u4InstID);
#endif
}
vVDEC_HAL_MPEG_AlignRdPtr(0, u4InstID, (UINT32)_pucVFifo[u4InstID], BYTE_ALIGN);
vVerifySetVSyncPrmBufPtr(u4InstID, _u4DecBufIdx[u4InstID]);
vReadMPEGChkSumGolden(u4InstID);
}
#if 0// Qing Li add here for test speed log
#if (CONFIG_CHIP_VER_CURR >= CONFIG_CHIP_VER_MT8550)
UINT32 u4MbnReg474, u4MbnReg476, u4MbnReg477;
u4MbnReg474 = u4VDecReadMC(u4InstID, (474<<2));
u4MbnReg476 = u4VDecReadMC(u4InstID, (476<<2));
u4MbnReg477 = u4VDecReadMC(u4InstID, (477<<2));
sprintf(strMessage, "\nMBN LOG_474 = 0x%.8x!!!!!!!\n", u4MbnReg474);
fgWrMsg2PC(strMessage,strlen(strMessage),8,&_tFileListRecInfo[u4InstID]);
sprintf(strMessage, "\nMBN LOG_476 = 0x%.8x!!!!!!!\n", u4MbnReg476);
fgWrMsg2PC(strMessage,strlen(strMessage),8,&_tFileListRecInfo[u4InstID]);
sprintf(strMessage, "\nMBN LOG_477 = 0x%.8x!!!!!!!\n", u4MbnReg477);
fgWrMsg2PC(strMessage,strlen(strMessage),8,&_tFileListRecInfo[u4InstID]);
#endif
#endif
vMPEGVDecDumpPic(u4InstID);
#if 0 //mc performance
{
UINT32 u4CodeType,u4CycleDram;
UCHAR u4TypeCode[2];
u4CodeType = u4VDecReadVLD(u4InstID,RW_VLD_VOP_TYPE)&0x0f;
switch(u4CodeType)
{
case 0:
u4TypeCode[0] = 'I';
break;
case 2:
u4TypeCode[0] = 'P';
break;
case 4:
u4TypeCode[0] = 'B';
break;
case 8:
u4TypeCode[0] = 'S';
break;
}
u4TypeCode[1] = '\0';
u4CycleDram = u4VDecReadMC(u4InstID,RO_MC_DRAM_CYCLE);
printk("VDEC_perf_measure: seq_name=%s pic_idx=%d pic_width=%d pic_height=%d pic_type=%s CYCLE_DRAM=%d\n",_bFileStr1[u4InstID][8],
_u4FileCnt[u4InstID],_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.u4DecW,_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecMPEGDecPrm.u4DecH,
u4TypeCode,u4CycleDram);
printk("DRAM BEHAVIOR SETTING 0x%x\n",u4VDecReadMC(u4InstID,RW_MC_PARA_BEHAVIOR));
}
#endif
#if 0
if(_u4FileCnt[u4InstID] == 5)
{
printk("WorkAround before 4139 = 0x%x,4140 = 0x%x\n",u4ReadSram(u4InstID,4139),u4ReadSram(u4InstID,4140));
vWriteSram(u4InstID,4139,u4ReadSram(u4InstID,4140));
printk("WorkAround after 4139 = 0x%x,4140 = 0x%x\n",u4ReadSram(u4InstID,4139),u4ReadSram(u4InstID,4140));
}
#endif
//dump sram ==>dcac and bcode data
#if 0
vDumpSram(u4InstID);
#endif
#if 0
{
UINT i;
for(i = 2; i < 10; i++)
{
printk("CRC %d == 0x%x\n",i,u4VDecReadCRC(u4InstID,i<<2));
}
}
#endif
#if CONFIG_DRV_VERIFY_SUPPORT
#if (DUMP_ERROR == 0)
if (_u4FileCnt[u4InstID] == _u4DumpRegPicNum[u4InstID])
#endif
{
//vVDEC_HAL_MPEG_VDec_DumpReg(u4VDecID, TRUE);
VDecDumpMpegRegister(u4InstID,1);
}
#endif
vMPEGVDecEnd(u4InstID);
}
#define VDEC_BCODE_SRAM_ADDR 8192
void vMp4FixBCode(UINT32 u4InstID)
{
UINT32 dMbx, dMby;
UINT32 dPrd;
UINT32 dIdx;
#ifdef VDEC_SRAM
UINT32 dTemp;
#else
UINT32 *pdPtr;
#endif
if(_u4PicCdTp[u4InstID] == P_TYPE)
{
dMbx = (_u4RealHSize[u4InstID] + 15) / 16;
dMby = (_u4RealVSize[u4InstID] + 15) / 16;
dPrd = dMbx * dMby;
if((dPrd % 32) == 0)
{
dIdx = dPrd / 32;
#ifdef VDEC_SRAM
dTemp = VDEC_BCODE_SRAM_ADDR + dIdx;
vWriteSram(u4InstID,(dTemp-1),u4ReadSram(u4InstID,dTemp));
// printk("<vdec>u4ReadSram = 0x%x\n",u4ReadSram(u4InstID,(dTemp-1)));
#else
pdPtr = (UINT32 *)_pucMp4Bcode[u4InstID];
pdPtr[dIdx - 1] = pdPtr[dIdx];
#endif
}
}
}
BOOL fgIsDvDecComplete(UINT32 u4InstID)
{
/*
//UINT32 u4MbX;
//UINT32 u4MbY;
UINT32 dwTmp;
dwTmp = dReadDV_8520(RO_DV_ST);
//if(dwTmp && 0x1)
if(dwTmp & 0x1)
{
dwDVDecodeDone_8520++;
}
//else if(dwTmp==0x2)
else if(dwTmp & 0x2)
{
dwDVDecodeTimeOut_8520++;
}
else
{
dwDVDecodeOther_8520++;
}
if(_fgVDecComplete[u4InstID])
{
_fgVDecComplete[u4InstID] = FALSE;
return TRUE;
}
return FALSE;
*/
return TRUE;
}
#if VP8_MB_ROW_MODE_SUPPORT_ME2_INTEGRATION
extern void vVerVP8DecEndProc_MB_ROW_End(UINT32 u4InstID);
void vChkVDec_Webp_Row_Mode(UINT32 u4InstID)
{
//vVerVP8DecEndProc(u4InstID);
vVerVP8DecEndProc_MB_ROW_End(u4InstID);
_tVerDec[u4InstID].ucState = DEC_NORM_VPARSER;//DEC_NORM_DEC_END;
_u4PicCnt[u4InstID] ++;
}
#endif
// *********************************************************************
// Function : void vChkVDec(UINT32 u4InstID)
// Description : Check if decoded complete & related settings
// Parameter : None
// Return : None
// *********************************************************************
void vChkVDec(UINT32 u4InstID)
{
char strMessage[512];
#ifdef VDEC_BREAK_EN
if (!fgBreakVDec(u4InstID))
{
printk("VDEC Break Time Out\n");
}
#endif
if (_u4CodecVer[u4InstID] == VDEC_RM)
{
vRM_VDecDecEndProc(u4InstID);
} else if(_u4CodecVer[u4InstID] == VDEC_H265)
{
vH265DecEndProc(u4InstID, jiffies);
}
else if(_u4CodecVer[u4InstID] == VDEC_H264)
//if(_u4CodecVer[u4InstID] == VDEC_H264)
{
vH264DecEndProc(u4InstID);
}
else if(_u4CodecVer[u4InstID] == VDEC_WMV)
{
if(_u4VprErr[u4InstID] == END_OF_FILE)
{
msleep(1000);
printk("=====>end of file. \n");
sprintf(strMessage," Compare Finish==> %s Pic count to [%d] \n",_bFileStr1[u4InstID][0], _u4FileCnt[u4InstID] - 1);
msleep(1000);
strcpy(_tFileListRecInfo[u4InstID].bFileName,_FileList_Rec[u4InstID]);
fgWrMsg2PC(strMessage,strlen(strMessage),8,&_tFileListRecInfo[u4InstID]);
_u4VerBitCount[u4InstID] = 0xffffffff;
}
else if(_rWMVSPS[u4InstID].fgXintra8)
{printk("=====>fgxintra8. \n");
sprintf(strMessage," Compare Finish==> Pic count to [%d] \n", _u4FileCnt[u4InstID] - 1);
strcpy(_tFileListRecInfo[u4InstID].bFileName,_FileList_Rec[u4InstID]);
fgWrMsg2PC(strMessage,strlen(strMessage),8,&_tFileListRecInfo[u4InstID]);
_u4VerBitCount[u4InstID] = 0xffffffff;
}
else
{
vWMVDecEndProc(u4InstID);
}
}
else if(_u4CodecVer[u4InstID] == VDEC_VP6)
{
vVerVP6DecEndProc(u4InstID);
_tVerDec[u4InstID].ucState = DEC_NORM_VPARSER;//DEC_NORM_DEC_END;
}
#if (CONFIG_CHIP_VER_CURR >= CONFIG_CHIP_VER_MT8580)
else if(_u4CodecVer[u4InstID] == VDEC_VP8)
{
vVerVP8DecEndProc(u4InstID);
_tVerDec[u4InstID].ucState = DEC_NORM_VPARSER;//DEC_NORM_DEC_END;
}
#endif
else if(_u4CodecVer[u4InstID] == VDEC_AVS)
{
vVerAVSDecEndProc(u4InstID);
_tVerDec[u4InstID].ucState = DEC_NORM_VPARSER;//DEC_NORM_DEC_END;
}
else
{
vMPEGDecEndProc(u4InstID);
}
#ifdef REDEC
if(_u4ReDecCnt[u4InstID] == 0)
#endif
{
_u4PicCnt[u4InstID] ++;
}
#ifdef REDEC
else
{
sprintf(strMessage,"[%d], ", _u4PicCnt[u4InstID]);
strcpy(_tFileListRecInfo[u4InstID].bFileName,_FileList_Rec[u4InstID]);
fgWrMsg2PC(strMessage,strlen(strMessage),8,&_tFileListRecInfo[u4InstID]);
}
#endif
}
void vVerifyDx3SufxChk(UINT32 u4InstID)
{
UINT32 dwByte, dwBit, dwShift, dwNextPicAddr;
if(_ucVopCdTp[u4InstID] == VCT_I)
{
dwByte = u4VDEC_HAL_MPEG_ReadRdPtr(_u4BSID[u4InstID], u4InstID, (UINT32)_pucVFifo[u4InstID], &dwBit);
dwBit += (dwByte * 8);
if(_u4Divx3SetPos[u4InstID] >= V_FIFO_SZ)
{
_u4Divx3SetPos[u4InstID] = _u4Divx3SetPos[u4InstID] - V_FIFO_SZ;
}
dwNextPicAddr = _u4Divx3SetPos[u4InstID] * 8;
if(dwNextPicAddr >= dwBit)
{
dwShift = dwNextPicAddr - dwBit;
}
else
{
dwShift = (dwNextPicAddr + (V_FIFO_SZ * 8)) - dwBit;
}
_fgVerSwitchRounding[u4InstID] = FALSE;
if(dwShift >= 17)
{
_fgVerSwitchRounding[u4InstID] = (u4VDEC_HAL_MPEG_ShiftGetBitStream(_u4BSID[u4InstID], u4InstID, 0) >> 15) & 0x1;
}
}
}
void vSetDx3SliceBoundary(UINT32 u4InstID, VDEC_INFO_MPEG_DEC_PRM_T *prVDecMPEGDecPrm)
{
UINT32 u4MbH = prVDecMPEGDecPrm->u4DecH >> 4;
UINT32 u4SliceSize = 0;
UINT32 i = 0;
UINT32 j = 0;
memset(prVDecMPEGDecPrm->rPicLayer.rMp4DecPrm.rDep.rDx3DecPrm.ucSliceBoundary, 0, 5*sizeof(UINT32));
if (prVDecMPEGDecPrm->rPicLayer.rMp4DecPrm.rDep.rDx3DecPrm.ucFrameMode != 22)
{
u4SliceSize = u4MbH / (prVDecMPEGDecPrm->rPicLayer.rMp4DecPrm.rDep.rDx3DecPrm.ucFrameMode - 22);
if (u4SliceSize != 0)
{
for (i=0; i<u4MbH; i++)
{
if (!(i%u4SliceSize))
{
if (j < 5)
{
prVDecMPEGDecPrm->rPicLayer.rMp4DecPrm.rDep.rDx3DecPrm.ucSliceBoundary[j] = i;
}
j++;
}
}
}
}
}
// *********************************************************************
// Function : void vVerifyFlushAllSetData(UINT32 u4InstID)
// Description : flush DPB info
// Parameter None
// Return : None
// *********************************************************************
void vVerifyFlushAllSetData(UINT32 u4InstID)
{
UINT32 i;
for(i=0; i<32; i++)
{
_rH264SPS[u4InstID][i].fgSPSValid = FALSE;
}
for(i=0; i<256; i++)
{
_rH264PPS[u4InstID][i].fgPPSValid = FALSE;
}
}
// *********************************************************************
// Function : void vVerifyFlushBufInfo(UINT32 u4InstID)
// Description : flush DPB info
// Parameter None
// Return : None
// *********************************************************************
void vVerifyFlushBufInfo(UINT32 u4InstID)
{
UINT32 i;
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH264DecPrm.u4MaxLongTermFrameIdx = 0xffffffff;
for(i=0; i<25; i++)
{
_ptFBufInfo[u4InstID][i].u4DecOrder = 0;
_ptH265FBufInfo[u4InstID][i].u4DecOrder = 0;
vVerifyClrFBufInfo(u4InstID, i);
}
for(i=0; i<6; i++)
{
_ptRefPicList[u4InstID][i].u4RefPicCnt = 0;
}
}
extern void Margin_padding(UINT32 Ptr_output_Y, UINT32 Ptr_output_C, UINT32 PIC_SIZE_Y );
// *********************************************************************
// Function : void vH265VDecEnd(UINT32 u4InstID)
// Description : VDec complete related setting
// Parameter : None
// Return : None
// *********************************************************************
void vH265VDecEnd(UINT32 u4InstID)
{
UINT32 u4ErrType = 0;
UINT32 u4Bits = 0;
UINT32 u4CRC[8] = {0};
int isFail = 0;
// Get CRC
vVDEC_HAL_H265_VDec_GetYCbCrCRC(u4InstID, u4CRC);
u4ErrType = u4VDEC_HAL_H265_GetErrMsg(u4InstID);
//u4ErrType = 1; //test for error bitstream no golden compare
if (u4ErrType != 0){
printk("[INFO] Error detected!! HEVCVLD_57: 0x%08X; HEVCVLD_58: 0x%08X\n", u4VDecReadHEVCVLD(u4InstID, RO_HEVLD_ERR_TYPE), u4VDecReadHEVCVLD(u4InstID, RO_HEVLD_ERR_ACCUMULATOR));
}
// update next picture start addr
_u4CurrPicStartAddr[1] = u4VDEC_HAL_H265_ReadRdPtr(_u4BSID[u4InstID], u4InstID, (UINT32)_pucVFifo[u4InstID], &u4Bits);
#ifdef HEVC_DUMP_PIC_BITSTREAM
UCHAR pcFilename_bitstream[200] = {0};
strncpy (pcFilename_bitstream , _bFileStr1[u4InstID][1], (strlen(_bFileStr1[u4InstID][1]) -4));
sprintf(pcFilename_bitstream, "%s_bitstream_f%d.dat", pcFilename_bitstream, _u4PicCnt[u4InstID]);
if (_testCnt>_u4CurrPicStartAddr[1]){ //ring FIFO
fgWrData2PC(_pucVFifo[u4InstID]+_testCnt, V_FIFO_SZ -_testCnt+1, 7, pcFilename_bitstream);
fgWrData2PC(_pucVFifo[u4InstID], _u4CurrPicStartAddr[1], 7, pcFilename_bitstream);
}else{
fgWrData2PC(_pucVFifo[u4InstID]+_testCnt, _u4CurrPicStartAddr[1]-_testCnt+1, 7, pcFilename_bitstream);
}
#endif
#ifdef HEVC_DUMP_BITSTREAM_INFO
UCHAR pcFilename_info[200] = {0};
UCHAR pcInfo_data[200] = {0};
if (_u4CurrPicStartAddr[1]<_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.u4PreReadPtr){
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.u4ReadPtrOffset++;
}
strncpy (pcFilename_info , _bFileStr1[u4InstID][1], (strlen(_bFileStr1[u4InstID][1]) -26));
strcat (pcFilename_info, "_bitstream.info");
sprintf(pcInfo_data, "0x%x\n", _tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.u4ReadPtrOffset*V_FIFO_SZ+_u4CurrPicStartAddr[1] );
fgWrData2PC(pcInfo_data, strlen(pcInfo_data), 7, pcFilename_info);
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH265DecPrm.u4PreReadPtr = _u4CurrPicStartAddr[1];
#endif
if ( _u4PicCnt[u4InstID] >= _u4StartCompPicNum[u4InstID] ) // test compare range
//if ( _u4PicCnt[u4InstID] >= 244 ) // debug test compare range
{
if ( u4ErrType== 0 ){
#ifdef HEVC_CRC_CMP
isFail = vH265CRCComparison(u4InstID, u4CRC);
if (isFail) {
#endif
Margin_padding(_ptH265CurrFBufInfo[u4InstID]->u4YStartAddr,
_ptH265CurrFBufInfo[u4InstID]->u4YStartAddr +_ptH265CurrFBufInfo[u4InstID]->u4CAddrOffset,
_ptH265CurrFBufInfo[u4InstID]->u4DramPicSize );
isFail = vH265GoldenComparison( u4InstID, _u4PicCnt[u4InstID],
_ptH265CurrFBufInfo[u4InstID]->u4DramPicSize,
_ptH265CurrFBufInfo[u4InstID]->u4YStartAddr,
_ptH265CurrFBufInfo[u4InstID]->u4YStartAddr +_ptH265CurrFBufInfo[u4InstID]->u4CAddrOffset ,
_ptH265CurrFBufInfo[u4InstID]->u4DramMvSize, 1 ,
_ptH265CurrFBufInfo[u4InstID]->u4YLenStartAddr,
_ptH265CurrFBufInfo[u4InstID]->u4CLenStartAddr,
_ptH265CurrFBufInfo[u4InstID]->u4UFOLenYsize,
_ptH265CurrFBufInfo[u4InstID]->u4UFOLenCsize);
#ifdef HEVC_CRC_CMP
}
#endif
}
if ( isFail ) {
vVDEC_HAL_H265_VDec_DumpReg(u4InstID, isFail);
_u4VerBitCount[u4InstID] = 0xFFFFFFFF;
} else if (_u4PicCnt[u4InstID] == _u4DumpRegPicNum[u4InstID]){
vVDEC_HAL_H265_VDec_DumpReg(u4InstID, 1);
}
}
vH265RingFIFO_read(u4InstID, 1);
#ifdef HEVC_VP_MODE
isFail = vVDEC_HAL_H265_VDec_VPmode(u4InstID);
#else
_u4CurrPicStartAddr[1] = 0;
#endif
// Error bitstream
if ( u4ErrType!= 0 ){
char file_name[200] = {0};
char bitstream_name[200] = {0};
VDEC_INFO_VERIFY_FILE_INFO_T rReadFileInfo;
//dump error YUV
printk("[INFO] Error bitstream!! dump YUV; Pic# %d\n", _u4PicCnt[u4InstID]);
vH265DumpYUV(u4InstID, _ptH265CurrFBufInfo[u4InstID]->u4YStartAddr,
_ptH265CurrFBufInfo[u4InstID]->u4YStartAddr +_ptH265CurrFBufInfo[u4InstID]->u4CAddrOffset,
_ptH265CurrFBufInfo[u4InstID]->u4DramPicSize, "er");
#if 0
//dump golden YUV
strncpy (bitstream_name , _bFileStr1[u4InstID][1], (strlen(_bFileStr1[u4InstID][1]) -21));
#ifdef HEVC_SDCARD_VFY
sprintf(file_name, "%ssao_pat/frame_%d_Y.dat", bitstream_name, _u4PicCnt[u4InstID]); //SDcard
#else
sprintf(file_name, "%ssao_pat\\frame_%d_Y.dat", bitstream_name, _u4PicCnt[u4InstID]); //USB
#endif
memset ( _pucDumpYBuf[u4InstID] , 0 ,_ptH265CurrFBufInfo[u4InstID]->u4DramPicSize );
rReadFileInfo.fgGetFileInfo = TRUE;
rReadFileInfo.pucTargetAddr = _pucDumpYBuf[u4InstID];
rReadFileInfo.u4FileOffset = 0;
rReadFileInfo.u4TargetSz = _ptH265CurrFBufInfo[u4InstID]->u4DramPicSize;
if(!fgOpenHDDFile(u4InstID, file_name, "r+b", &rReadFileInfo)){
printk("[ERROR] Miss file: %s!!!!!!!!!!!!!\n", file_name);
}
#ifdef HEVC_SDCARD_VFY
sprintf(file_name, "%ssao_pat/frame_%d_C.dat", bitstream_name, _u4PicCnt[u4InstID]); //SDcard
#else
sprintf(file_name, "%ssao_pat\\frame_%d_C.dat", bitstream_name, _u4PicCnt[u4InstID]); //USB
#endif
memset ( _pucDumpCBuf[u4InstID] , 0 ,_ptH265CurrFBufInfo[u4InstID]->u4DramPicSize>>1 );
rReadFileInfo.fgGetFileInfo = TRUE;
rReadFileInfo.pucTargetAddr = _pucDumpCBuf[u4InstID];
rReadFileInfo.u4FileOffset = 0;
rReadFileInfo.u4TargetSz = _ptH265CurrFBufInfo[u4InstID]->u4DramPicSize>>1;
if(!fgOpenHDDFile(u4InstID, file_name, "r+b", &rReadFileInfo)){
printk("[ERROR] Miss file: %s!!!!!!!!!!!!!\n", file_name);
}
printk("[INFO] Dump Golden YUV; Pic# %d\n", _u4PicCnt[u4InstID]);
vH265DumpYUV(u4InstID, _pucDumpYBuf[u4InstID],
_pucDumpCBuf[u4InstID],
_ptH265CurrFBufInfo[u4InstID]->u4DramPicSize, "gold");
#endif
if ( isFail ) {
vVDEC_HAL_H265_VDec_DumpReg(u4InstID, isFail);
_u4VerBitCount[u4InstID] = 0xFFFFFFFF;
} else if (_u4PicCnt[u4InstID] == _u4DumpRegPicNum[u4InstID]){
vVDEC_HAL_H265_VDec_DumpReg(u4InstID, 1);
}
}
if (_u4PicCnt[u4InstID] == _u4DumpRegPicNum[u4InstID] && _u4StartCompPicNum[u4InstID]==_u4DumpRegPicNum[u4InstID] && _u4EndCompPicNum[u4InstID]==_u4StartCompPicNum[u4InstID] ){
_u4VerBitCount[u4InstID] = 0xFFFFFFFF;
}
if ( _u4PicCnt[u4InstID] >= _u4EndCompPicNum[u4InstID] ) { // stop verification
_u4VerBitCount[u4InstID] = 0xFFFFFFFF;
}
}
// *********************************************************************
// Function : void vH264VDecEnd(UINT32 u4InstID)
// Description : VDec complete related setting
// Parameter : None
// Return : None
// *********************************************************************
void vH264VDecEnd(UINT32 u4InstID)
{
msleep(5);
VDEC_INFO_DEC_PRM_T *tVerMpvDecPrm;
VDEC_INFO_H264_FBUF_INFO_T *tFBufInfo;
#if defined(SW_RESET) || defined(REDEC)
VDEC_INFO_H264_INIT_PRM_T rH264VDecInitPrm;
VDEC_INFO_H264_BS_INIT_PRM_T rH264BSInitPrm;
#endif
#ifdef SW_RESET
UINT32 u4Bits;
#endif
//UINT32 u4RegVal;
//INT32 i;
#if VDEC_VER_COMPARE_CRC
#ifndef LETTERBOX_DETECTION_ONLY
BOOL fgCRCPass = FALSE;
#endif
#endif
msleep(5);
tFBufInfo = _ptCurrFBufInfo[u4InstID];
tVerMpvDecPrm = &_tVerMpvDecPrm[u4InstID];
#ifdef LETTERBOX_DETECTION_ONLY
vCheckLBDResult(u4InstID);
#else
#if VDEC_VER_COMPARE_CRC
fgCRCPass = vH264_CheckCRCResult(u4InstID);
//printk("[H264] vH264VDecEnd, after CRC check \n");
if (fgCRCPass == FALSE)
#endif
{
/*kevin add for debug*/
#ifdef VDEC_SIM_DUMP
{
fgWrData2PC(_pucRegisterLog[u4InstID],_u4RegisterLogLen[u4InstID],7,_RegFileName[u4InstID]);
_u4RegisterLogLen[u4InstID] = 0;
}
#endif
vH264WrData2PC(u4InstID, _pucDumpYBuf[u4InstID], tFBufInfo->u4DramPicSize);
printk("[H264] @@ do golden compare InstID %d, DramPicSize %d\n", u4InstID, tFBufInfo->u4DramPicSize);
DBG_H264_PRINTF("[H264] @@ do golden compare InstID %d, DramPicSize %d\n", u4InstID, tFBufInfo->u4DramPicSize);
}
#if VDEC_SIM_DUMP //kevin test yuv dump
printk("[INFO][k] dump YUV; Pic# %d\n", _u4PicCnt[u4InstID]);
vH264DumpYUV(u4InstID, _ptCurrFBufInfo[u4InstID]->u4YStartAddr,
_ptCurrFBufInfo[u4InstID]->u4YStartAddr +_ptCurrFBufInfo[u4InstID]->u4CAddrOffset,
_ptCurrFBufInfo[u4InstID]->u4DramPicSize);
printk("[INFO][k] dump YUV end\n");
#endif
/*
u4RegVal = u4VDecReadVLD(u4InstID, 161*4);
printk("[H264] golden mismatch, VLD_161 = 0x%08x\n", u4RegVal);
DBG_H264_PRINTF("[H264] golden mismatch, VLD_161 = 0x%08x\n", u4RegVal);
u4RegVal = u4VDecReadVLD(u4InstID, 251*4);
printk("[H264] golden mismatch, VLD_251 = 0x%08x\n", u4RegVal);
DBG_H264_PRINTF("[H264] golden mismatch, VLD_251 = 0x%08x\n", u4RegVal);
for (i=378; i<398; i++)
{
u4RegVal = u4VDecReadMC(u4InstID, i*4);
printk("[H264] golden mismatch, MC_%d = 0x%08x\n", i, u4RegVal);
DBG_H264_PRINTF("[H264] golden mismatch, MC_%d = 0x%08x\n", i, u4RegVal);
}
for (i=470; i<473; i++)
{
u4RegVal = u4VDecReadMC(u4InstID, i*4);
printk("[H264] golden mismatch, MC_%d = 0x%08x\n", i, u4RegVal);
DBG_H264_PRINTF("[H264] golden mismatch, MC_%d = 0x%08x\n", i, u4RegVal);
}
for (i=479; i<486; i++)
{
u4RegVal = u4VDecReadMC(u4InstID, i*4);
printk("[H264] golden mismatch, MC_%d = 0x%08x\n", i, u4RegVal);
DBG_H264_PRINTF("[H264] golden mismatch, MC_%d = 0x%08x\n", i, u4RegVal);
}
for (i=527; i<529; i++)
{
u4RegVal = u4VDecReadMC(u4InstID, i*4);
printk("[H264] golden mismatch, MC_%d = 0x%08x\n", i, u4RegVal);
DBG_H264_PRINTF("[H264] golden mismatch, MC_%d = 0x%08x\n", i, u4RegVal);
}
for (i=147; i<153; i++)
{
u4RegVal = u4VDecReadAVCMV(u4InstID, i*4);
printk("[H264] golden mismatch, MV_%d = 0x%08x\n", i, u4RegVal);
DBG_H264_PRINTF("[H264] golden mismatch, MV_%d = 0x%08x\n", i, u4RegVal);
}
for (i=41; i<76; i++)
{
u4RegVal = u4VDecReadVLDTOP(u4InstID, i*4);
printk("[H264] golden mismatch, VLD_TOP_%d = 0x%08x\n", i, u4RegVal);
DBG_H264_PRINTF("[H264] golden mismatch, VLD_TOP_%d = 0x%08x\n", i, u4RegVal);
}
for (i=64; i<81; i++)
{
u4RegVal = u4VDecReadPP(u4InstID, i*4);
printk("[H264] golden mismatch, PP_%d = 0x%08x\n", i, u4RegVal);
DBG_H264_PRINTF("[H264] golden mismatch, PP_%d = 0x%08x\n", i, u4RegVal);
}
*/
#endif
#ifdef REDEC
if(_u4ReDecCnt[u4InstID] > 0)
{
#ifdef BARREL2_THREAD_SUPPORT
VERIFY (x_sema_lock(_ahVDecEndSema[u4InstID], X_SEMA_OPTION_WAIT) == OSR_OK);
#endif
_u4FileOffset[u4InstID] = _u4VLDPosByte[u4InstID];
rH264VDecInitPrm.u4FGDatabase = (UINT32)_pucFGDatabase[u4InstID];
rH264VDecInitPrm.u4CompModelValue = (UINT32)(_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH264DecPrm.prSEI->pucCompModelValue);
rH264VDecInitPrm.u4FGSeedbase = (UINT32)_pucFGSeedbase[u4InstID];
i4VDEC_HAL_H264_InitVDecHW(u4InstID,&rH264VDecInitPrm);
rH264BSInitPrm.u4VFifoSa = (UINT32)_pucVFifo[u4InstID];
rH264BSInitPrm.u4VFifoEa = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
rH264BSInitPrm.u4VLDRdPtr = (UINT32)_pucVFifo[u4InstID] + _u4FileOffset[u4InstID];
#ifndef RING_VFIFO_SUPPORT
rH264BSInitPrm.u4VLDWrPtr = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
#else
// rH264BSInitPrm.u4VLDWrPtr = (UINT32)_pucVFifo[u4InstID] + (V_FIFO_SZ*(0.5 + 0.5 *(_u4LoadBitstreamCnt[u4InstID]%2)));
rH264BSInitPrm.u4VLDWrPtr = (UINT32)_pucVFifo[u4InstID] + ((_u4LoadBitstreamCnt[u4InstID]%2)?(V_FIFO_SZ):(V_FIFO_SZ>>1));
#endif
rH264BSInitPrm.u4PredSa = /*PHYSICAL*/((UINT32)_pucPredSa[u4InstID]);
i4VDEC_HAL_H264_InitBarrelShifter(0, u4InstID, &rH264BSInitPrm);
u4VDEC_HAL_H264_ShiftGetBitStream(0, u4InstID, _u4VLDPosBit[u4InstID]);
#ifdef BARREL2_THREAD_SUPPORT
VERIFY (x_sema_unlock(_ahVDecEndSema[u4InstID]) == OSR_OK);
#endif
_u4VLDPosByte[u4InstID] = u4VDEC_HAL_H264_ReadRdPtr(0, u4InstID, (UINT32)_pucVFifo[u4InstID], &_u4VLDPosBit[u4InstID]);
_tVerDec[u4InstID].ucState = DEC_NORM_WAIT_TO_DEC;
return;
}
#endif
tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.rLastInfo.u4LastPicW = tFBufInfo->u4W;
tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.rLastInfo.u4LastPicH = tFBufInfo->u4H;
printk("[H264] @@ Last Pic W %d, H %d\n", tFBufInfo->u4W, tFBufInfo->u4H);
DBG_H264_PRINTF("[H264] @@ Last Pic W %d, H %d\n", tFBufInfo->u4W, tFBufInfo->u4H);
//Marking procedure
if(fgIsRefPic(u4InstID))
{
if(fgIsIDRPic(u4InstID)) // IDR pic
{
if(tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->fgLongTermReferenceFlag)
{
vVerifySetPicRefType(u4InstID, tVerMpvDecPrm->ucPicStruct, LREF_PIC);
tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.u4MaxLongTermFrameIdx = 0;
tFBufInfo->u4LongTermFrameIdx = 0;
tFBufInfo->u4TFldLongTermFrameIdx = 0;
tFBufInfo->u4BFldLongTermFrameIdx = 0;
}
else
{
tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.u4MaxLongTermFrameIdx = 0xffffffff;
vVerifySetPicRefType(u4InstID, tVerMpvDecPrm->ucPicStruct, SREF_PIC);
}
}
else // !IDR pic
{
if(tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->fgAdaptiveRefPicMarkingModeFlag)
{
vVerifyAdapRefPicmarkingProce(u4InstID);
}
else
{
vVerifySlidingWindowProce(u4InstID);
}
if(bGetPicRefType(u4InstID, tVerMpvDecPrm->ucPicStruct) != LREF_PIC)
{
vVerifySetPicRefType(u4InstID, tVerMpvDecPrm->ucPicStruct, SREF_PIC);
}
}
}
tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.rLastInfo.fgLastMmco5 = tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->fgMmco5;
tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.rLastInfo.ucLastPicStruct = tVerMpvDecPrm->ucPicStruct;
if(tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.rLastInfo.fgLastMmco5)
{
tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.rLastInfo.u4LastFrameNum = 0;
tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.rLastInfo.i4LastFrameNumOffset = 0;
}
else
{
tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.rLastInfo.u4LastFrameNum = tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4FrameNum;
tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.rLastInfo.i4LastFrameNumOffset = tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.i4FrmNumOffset;
}
tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.rLastInfo.i4LastPOC = tFBufInfo->i4POC;
tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.rLastInfo.i4LastTFldPOC = tFBufInfo->i4TFldPOC;
tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.rLastInfo.i4LastBFldPOC = tFBufInfo->i4BFldPOC;
if(fgIsRefPic(u4InstID))
{
tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.rLastInfo.i4LastRefPOC = tFBufInfo->i4POC;
// tVerMpvDecPrm->rLastInfo.iLastRefPOCCntLsb = tVerMpvDecPrm->prSliceHdr->i4PicOrderCntLsb;
// tVerMpvDecPrm->rLastInfo.iLastRefPOCCntMsb = tVerMpvDecPrm->prSliceHdr->i4PicOrderCntMsb;
tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.rLastInfo.i4LastRefPOCLsb = tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->i4PicOrderCntLsb;
tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.rLastInfo.i4LastRefPOCMsb = tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->i4PicOrderCntMsb;
}
#if VDEC_MVC_SUPPORT
if(_ucMVCType[u4InstID] != 0)
{
tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.rLastInfo.ucLastDpbId = tVerMpvDecPrm->ucDecFBufIdx;
tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.rLastInfo.u4LastViewId = tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.rMvcExtInfo.u4ViewId;
memcpy(&_rH264PrevFbInfo[u4InstID], _ptCurrFBufInfo[u4InstID], sizeof(VDEC_INFO_H264_FBUF_INFO_T));
}
#endif
if(fgIsDecFlagSet(u4InstID, DEC_FLAG_CHG_FBUF))
{
_ptCurrFBufInfo[u4InstID]->eH264DpbStatus = H264_DPB_STATUS_DECODED;
_ptCurrFBufInfo[u4InstID]->u4DecOrder = _u4TotalDecFrms[u4InstID];
vChkOutputFBuf(u4InstID);
_u4TotalDecFrms[u4InstID] ++;
}
else
{
_ptCurrFBufInfo[u4InstID]->eH264DpbStatus = H264_DPB_STATUS_FLD_DECODED;
}
vAdd2RefPicList(u4InstID);
#ifdef SW_RESET
#ifdef BARREL2_THREAD_SUPPORT
VERIFY (x_sema_lock(_ahVDecEndSema[u4InstID], X_SEMA_OPTION_WAIT) == OSR_OK);
#endif
_u4FileOffset[u4InstID] = u4VDEC_HAL_H264_ReadRdPtr(0, u4InstID, (UINT32)_pucVFifo[u4InstID], &u4Bits);
rH264VDecInitPrm.u4FGDatabase = (UINT32)_pucFGDatabase[u4InstID];
rH264VDecInitPrm.u4CompModelValue = (UINT32)(_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH264DecPrm.prSEI->pucCompModelValue);
rH264VDecInitPrm.u4FGSeedbase = (UINT32)_pucFGSeedbase[u4InstID];
i4VDEC_HAL_H264_InitVDecHW(u4InstID,&rH264VDecInitPrm);
rH264BSInitPrm.u4VFifoSa = (UINT32)_pucVFifo[u4InstID];
rH264BSInitPrm.u4VFifoEa = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
rH264BSInitPrm.u4VLDRdPtr = (UINT32)_pucVFifo[u4InstID] + _u4FileOffset[u4InstID];
if (_ucMVCType[u4InstID] > 0)
{
if (u4InstID == 0)
rH264BSInitPrm.u4VLDRdPtr = (UINT32)_pucVFifo[u4InstID] + _u4FileOffset[1];
else if (u4InstID == 1)
rH264BSInitPrm.u4VLDRdPtr = (UINT32)_pucVFifo[u4InstID] + _u4FileOffset[0];
}
#ifndef RING_VFIFO_SUPPORT
rH264BSInitPrm.u4VLDWrPtr = (UINT32)_pucVFifo[u4InstID] + V_FIFO_SZ;
#else
// rH264BSInitPrm.u4VLDWrPtr = (UINT32)_pucVFifo[u4InstID] + (V_FIFO_SZ*(0.5 + 0.5 *(_u4LoadBitstreamCnt[u4InstID]%2)));
rH264BSInitPrm.u4VLDWrPtr = (UINT32)_pucVFifo[u4InstID] + ((_u4LoadBitstreamCnt[u4InstID]%2)?(V_FIFO_SZ):(V_FIFO_SZ>>1));
#endif
rH264BSInitPrm.u4PredSa = /*PHYSICAL*/((UINT32)_pucPredSa[u4InstID]);
i4VDEC_HAL_H264_InitBarrelShifter(_u4BSID[u4InstID], u4InstID, &rH264BSInitPrm);
u4VDEC_HAL_H264_ShiftGetBitStream(_u4BSID[u4InstID], u4InstID, u4Bits);
#ifdef BARREL2_THREAD_SUPPORT
VERIFY (x_sema_unlock(_ahVDecEndSema[u4InstID]) == OSR_OK);
#endif
#endif
#if 0
if(dwGetBitStream(0) == 0x0000010B)
{
vFlushDPB(tVerMpvDecPrm, TRUE);
}
#endif
#ifndef INTERGRATION_WITH_DEMUX
#ifdef RING_VFIFO_SUPPORT
if((_u4LoadBitstreamCnt[u4InstID]&0x1) && (rH264BSInitPrm.u4VLDRdPtr >
((UINT32)_pucVFifo[u4InstID] + (V_FIFO_SZ/2))))
{
_tInFileInfo[u4InstID].fgGetFileInfo = TRUE;
_tInFileInfo[u4InstID].pucTargetAddr = _pucVFifo[u4InstID];
_tInFileInfo[u4InstID].u4FileOffset = (V_FIFO_SZ * ((_u4LoadBitstreamCnt[u4InstID]+ 1)/2));
_tInFileInfo[u4InstID].u4TargetSz = (V_FIFO_SZ/2);
_tInFileInfo[u4InstID].u4FileLength = 0;
#ifdef SATA_HDD_READ_SUPPORT
if(!fgOpenHDDFile(u4InstID, _bFileStr1[u4InstID][1],"r+b", &_tInFileInfo[u4InstID]))
{
fgOpenPCFile(u4InstID, _bFileStr1[u4InstID][1],"r+b", &_tInFileInfo[u4InstID]);
}
#elif defined(IDE_READ_SUPPORT)
fgOpenIdeFile(_bFileStr1[u4InstID][1],"r+b", &_tInFileInfo[u4InstID]);
#else
fgOpenPCFile(u4InstID, _bFileStr1[u4InstID][1],"r+b", &_tInFileInfo[u4InstID]);
#endif
_u4LoadBitstreamCnt[u4InstID]++;
}
else if((!(_u4LoadBitstreamCnt[u4InstID]&0x1)) && (rH264BSInitPrm.u4VLDRdPtr <
((UINT32)_pucVFifo[u4InstID] + (V_FIFO_SZ/2))))
{
_tInFileInfo[u4InstID].fgGetFileInfo = TRUE;
_tInFileInfo[u4InstID].pucTargetAddr = _pucVFifo[u4InstID] + (V_FIFO_SZ/2);
_tInFileInfo[u4InstID].u4FileOffset = ((V_FIFO_SZ * (_u4LoadBitstreamCnt[u4InstID]+ 1)) /2);//(V_FIFO_SZ * ((_u4LoadBitstreamCnt[u4InstID]+ 1)/2));
_tInFileInfo[u4InstID].u4TargetSz = (V_FIFO_SZ/2);
_tInFileInfo[u4InstID].u4FileLength = 0;
#ifdef SATA_HDD_READ_SUPPORT
if(!fgOpenHDDFile(u4InstID, _bFileStr1[u4InstID][1],"r+b", &_tInFileInfo[u4InstID]))
{
fgOpenPCFile(u4InstID, _bFileStr1[u4InstID][1],"r+b", &_tInFileInfo[u4InstID]);
}
#elif defined(IDE_READ_SUPPORT)
fgOpenIdeFile(_bFileStr1[u4InstID][1],"r+b", &_tInFileInfo[u4InstID]);
#else
fgOpenPCFile(u4InstID, _bFileStr1[u4InstID][1],"r+b", &_tInFileInfo[u4InstID]);
#endif
_u4LoadBitstreamCnt[u4InstID]++;
}
if((0 ==_tInFileInfo[u4InstID].u4RealGetBytes) ||
(V_FIFO_SZ/2 != _tInFileInfo[u4InstID].u4RealGetBytes) )
{
//vAddStartCode2Dram(_pucVFifo+_tInFileInfo.u4FileLength);
UCHAR *pbDramAddr = _tInFileInfo[u4InstID].pucTargetAddr+_tInFileInfo[u4InstID].u4RealGetBytes;
pbDramAddr[0] = 0x00; pbDramAddr++;
if((UINT32)(_pucVFifo[u4InstID] + V_FIFO_SZ) <= (UINT32)pbDramAddr)
{
pbDramAddr = _pucVFifo[u4InstID];
}
pbDramAddr[0] = 0x00; pbDramAddr++;
if((UINT32)(_pucVFifo[u4InstID] + V_FIFO_SZ) <= (UINT32)pbDramAddr)
{
pbDramAddr = _pucVFifo[u4InstID];
}
pbDramAddr[0] = 0x01;
}
#endif
#endif
_tVerDec[u4InstID].ucState = DEC_NORM_VPARSER;
if((u4InstID == 0) && _ucMVCType[0])
{
_fgMVCReady[0] = FALSE;
_fgMVCReady[1] = TRUE;
#if 0
if(_u4VerBitCount[u4InstID] == 0xffffffff)
{
while((_fgMVCReady[0] == FALSE) || (_fgMVCReady[1] == TRUE))
{
udelay(5);
}
udelay(10);
}
#else
if(_u4VerBitCount[u4InstID] == 0xffffffff)
{
while((_fgMVCReady[0] == FALSE) || (_fgMVCReady[1] == TRUE))
{
msleep(5);
}
msleep(10);
}
#endif
}
if((u4InstID == 1) && _ucMVCType[1] && (_u4VerBitCount[1] != 0xffffffff))
{
_fgMVCReady[0] = TRUE;
_fgMVCReady[1] = FALSE;
}
/*
UINT32 i;
for (i = 0; i < 17; i++) {
printk("_ptFBufInfo[%d][%d] -> POC %d, TFPOC %d, BFPOC %d, (%d) \n", u4InstID, i, _ptFBufInfo[u4InstID][i].i4POC, _ptFBufInfo[u4InstID][i].i4TFldPOC, _ptFBufInfo[u4InstID][i].i4BFldPOC, _ptFBufInfo[u4InstID][i].ucPicStruct);
}
for (i = 0; i < 17; i++) {
printk("_ptFBufInfo[%d][%d] -> top %d, bottom %d\n", u4InstID, i, _ptFBufInfo[u4InstID][i].ucTFldRefType, _ptFBufInfo[u4InstID][i].ucBFldRefType);
}
*/
}
// *********************************************************************
// Function : void vVerifySetPicRefType(UINT32 u4InstID, UCHAR ucPicStruct, ucPicStruct ucRefType)
// Description : set pic ref type
// Parameter : UCHAR ucPicType: pic struct : FRAME, TOP_FIELD, BOTTOM_FIELD
// UCHAR ucRefType: pic ref type: NREF_PIC, SREF_PIC, LREF_PIC
// Return : None
// *********************************************************************
void vVerifySetPicRefType(UINT32 u4InstID, UCHAR ucPicStruct, UCHAR ucRefType)
{
if(ucPicStruct & TOP_FIELD)
{
_ptCurrFBufInfo[u4InstID]->ucTFldRefType = ucRefType;
}
if(ucPicStruct & BOTTOM_FIELD)
{
_ptCurrFBufInfo[u4InstID]->ucBFldRefType = ucRefType;
}
//if(ucPicStruct == FRAME)
{
_ptCurrFBufInfo[u4InstID]->ucFBufRefType = ucRefType;
}
//else
{
//_ptCurrFBufInfo->ucFBufRefType = NREF_PIC;
}
}
// *********************************************************************
// Function : void vVerifyAdapRefPicmarkingProce(UINT32 u4InstID)
// Description : marking the decoded ref pic with adaptive method
// Parameter : None
// Return : None
// *********************************************************************
void vVerifyAdapRefPicmarkingProce(UINT32 u4InstID)
{
VDEC_INFO_DEC_PRM_T *tVerMpvDecPrm;
UINT32 u4PicNumX;
UINT32 u4Cnt;
INT32 i;
tVerMpvDecPrm = &_tVerMpvDecPrm[u4InstID];
u4Cnt = 0;
while(tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4MemoryManagementControlOperation[u4Cnt] != 0)
{
switch(tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4MemoryManagementControlOperation[u4Cnt]&0xff)
{
case 0:
break;
case 1:
// picNumX
tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4DifferencOfPicNumsMinus1 = tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4MemoryManagementControlOperation[u4Cnt] >> 8;
//if(fgIsFrmPic(_u4VDecID))
{
u4PicNumX = _ptCurrFBufInfo[u4InstID]->i4PicNum - tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4DifferencOfPicNumsMinus1 - 1;
}
#if 0
else if(tVerMpvDecPrm->ucPicStruct & TOP_FIELD)
{
u4PicNumX = _ptCurrFBufInfo[u4InstID]->i4TFldPicNum - tVerMpvDecPrm->prSliceHdr->u4DifferencOfPicNumsMinus1 - 1;
}
else if(tVerMpvDecPrm->ucPicStruct & BOTTOM_FIELD)
{
u4PicNumX = _ptCurrFBufInfo[u4InstID]->i4BFldPicNum - tVerMpvDecPrm->prSliceHdr->u4DifferencOfPicNumsMinus1 - 1;
}
#endif
for(i=0; i < tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.ucMaxFBufNum; i++)
{
if(fgIsFrmPic(u4InstID) && (_ptFBufInfo[u4InstID][i].i4PicNum == u4PicNumX) && (_ptFBufInfo[u4InstID][i].ucFBufRefType == SREF_PIC))
{
vVerifyClrPicRefInfo(u4InstID, FRAME, i);
i = tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.ucMaxFBufNum; // break
}
else if((!fgIsFrmPic(u4InstID)) &&
(((_ptFBufInfo[u4InstID][i].i4TFldPicNum == u4PicNumX) && (_ptFBufInfo[u4InstID][i].ucTFldRefType == SREF_PIC))
|| ((_ptFBufInfo[u4InstID][i].i4BFldPicNum == u4PicNumX) && (_ptFBufInfo[u4InstID][i].ucBFldRefType == SREF_PIC))))
{
if((_ptFBufInfo[u4InstID][i].i4TFldPicNum == u4PicNumX) && (_ptFBufInfo[u4InstID][i].ucTFldRefType == SREF_PIC))
{
vVerifyClrPicRefInfo(u4InstID, TOP_FIELD, i);
}
if((_ptFBufInfo[u4InstID][i].i4BFldPicNum == u4PicNumX) && (_ptFBufInfo[u4InstID][i].ucBFldRefType == SREF_PIC))
{
vVerifyClrPicRefInfo(u4InstID, BOTTOM_FIELD, i);
}
i = tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.ucMaxFBufNum; // break
}
}
break;
case 2:
tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4LongTermPicNum = tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4MemoryManagementControlOperation[u4Cnt] >> 8;
u4PicNumX = tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4LongTermPicNum;
for(i=0; i < tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.ucMaxFBufNum; i++)
{
if(fgIsFrmPic(u4InstID) && (_ptFBufInfo[u4InstID][i].i4LongTermPicNum == u4PicNumX) && (_ptFBufInfo[u4InstID][i].ucFBufRefType == LREF_PIC))
{
vVerifyClrPicRefInfo(u4InstID, FRAME, i);
i = tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.ucMaxFBufNum; // break
}
else if((!fgIsFrmPic(u4InstID)) &&
(((_ptFBufInfo[u4InstID][i].i4TFldLongTermPicNum == u4PicNumX) && (_ptFBufInfo[u4InstID][i].ucTFldRefType == LREF_PIC))
|| ((_ptFBufInfo[u4InstID][i].i4BFldLongTermPicNum == u4PicNumX) && (_ptFBufInfo[u4InstID][i].ucBFldRefType == LREF_PIC))))
{
if((_ptFBufInfo[u4InstID][i].i4TFldLongTermPicNum == u4PicNumX) && (_ptFBufInfo[u4InstID][i].ucTFldRefType == LREF_PIC))
{
vVerifyClrPicRefInfo(u4InstID, TOP_FIELD, i);
}
if((_ptFBufInfo[u4InstID][i].i4BFldLongTermPicNum == u4PicNumX) && (_ptFBufInfo[u4InstID][i].ucBFldRefType == LREF_PIC))
{
vVerifyClrPicRefInfo(u4InstID, BOTTOM_FIELD, i);
}
i = tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.ucMaxFBufNum; // break
}
}
break;
case 3:
tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4DifferencOfPicNumsMinus1 = (tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4MemoryManagementControlOperation[u4Cnt] >> 8) & 0xff;
tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4LongTermFrameIdx = tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4MemoryManagementControlOperation[u4Cnt] >> 16;
u4PicNumX = tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4LongTermFrameIdx;
for(i=0; i < tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.ucMaxFBufNum; i++)
{
if(i != tVerMpvDecPrm->ucDecFBufIdx)
{
if((_ptFBufInfo[u4InstID][i].ucFBufStatus == FRAME) && (_ptFBufInfo[u4InstID][i].u4LongTermFrameIdx == u4PicNumX) && (_ptFBufInfo[u4InstID][i].ucFBufRefType == LREF_PIC))
{
vVerifyClrPicRefInfo(u4InstID, FRAME, i);
i = tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.ucMaxFBufNum; // break
}
else if((_ptFBufInfo[u4InstID][i].ucFBufStatus != FRAME) &&
(((_ptFBufInfo[u4InstID][i].u4TFldLongTermFrameIdx == u4PicNumX) && (_ptFBufInfo[u4InstID][i].ucTFldRefType == LREF_PIC))
|| ((_ptFBufInfo[u4InstID][i].u4BFldLongTermFrameIdx == u4PicNumX) && (_ptFBufInfo[u4InstID][i].ucBFldRefType == LREF_PIC))))
{
if((_ptFBufInfo[u4InstID][i].u4TFldLongTermFrameIdx == u4PicNumX) && (_ptFBufInfo[u4InstID][i].ucTFldRefType == LREF_PIC))
{
vVerifyClrPicRefInfo(u4InstID, TOP_FIELD, i);
}
if((_ptFBufInfo[u4InstID][i].u4BFldLongTermFrameIdx == u4PicNumX) && (_ptFBufInfo[u4InstID][i].ucBFldRefType == LREF_PIC))
{
vVerifyClrPicRefInfo(u4InstID, BOTTOM_FIELD, i);
}
i = tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.ucMaxFBufNum; // break
}
}
}
// picNumX
//if(fgIsFrmPic(_u4VDecID))
{
u4PicNumX = _ptCurrFBufInfo[u4InstID]->i4PicNum - tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4DifferencOfPicNumsMinus1 - 1;
}
#if 0
else if(tVerMpvDecPrm->ucPicStruct & TOP_FIELD)
{
u4PicNumX = _ptCurrFBufInfo[u4InstID]->i4TFldPicNum - tVerMpvDecPrm->prSliceHdr->u4DifferencOfPicNumsMinus1 - 1;
}
else if(tVerMpvDecPrm->ucPicStruct & BOTTOM_FIELD)
{
u4PicNumX = _ptCurrFBufInfo[u4InstID]->i4BFldPicNum - tVerMpvDecPrm->prSliceHdr->u4DifferencOfPicNumsMinus1 - 1;
}
#endif
for(i=0; i < tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.ucMaxFBufNum; i++)
{
if(fgIsFrmPic(u4InstID) && (_ptFBufInfo[u4InstID][i].i4PicNum == u4PicNumX)
&& (_ptFBufInfo[u4InstID][i].ucFBufRefType == SREF_PIC) && (!_ptFBufInfo[u4InstID][i].fgNonExisting))
{
_ptFBufInfo[u4InstID][i].ucFBufRefType = LREF_PIC;
_ptFBufInfo[u4InstID][i].ucTFldRefType = LREF_PIC;
_ptFBufInfo[u4InstID][i].ucBFldRefType = LREF_PIC;
_ptFBufInfo[u4InstID][i].u4LongTermFrameIdx = tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4LongTermFrameIdx;
_ptFBufInfo[u4InstID][i].u4TFldLongTermFrameIdx = tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4LongTermFrameIdx;
_ptFBufInfo[u4InstID][i].u4BFldLongTermFrameIdx = tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4LongTermFrameIdx;
_ptFBufInfo[u4InstID][i].i4LongTermPicNum = tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4LongTermFrameIdx;
_ptFBufInfo[u4InstID][i].i4TFldLongTermPicNum = tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4LongTermFrameIdx;
_ptFBufInfo[u4InstID][i].i4BFldLongTermPicNum = tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4LongTermFrameIdx;
i = tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.ucMaxFBufNum; // break
}
else if((!fgIsFrmPic(u4InstID)) &&
(((_ptFBufInfo[u4InstID][i].i4TFldPicNum == u4PicNumX) && (_ptFBufInfo[u4InstID][i].ucTFldRefType == SREF_PIC) && (!_ptFBufInfo[u4InstID][i].fgNonExisting))
|| ((_ptFBufInfo[u4InstID][i].i4BFldPicNum == u4PicNumX) && (_ptFBufInfo[u4InstID][i].ucBFldRefType == SREF_PIC) && (!_ptFBufInfo[u4InstID][i].fgNonExisting))))
{
if((_ptFBufInfo[u4InstID][i].i4TFldPicNum == u4PicNumX) && (_ptFBufInfo[u4InstID][i].ucTFldRefType == SREF_PIC) && (!_ptFBufInfo[u4InstID][i].fgNonExisting))
{
_ptFBufInfo[u4InstID][i].ucTFldRefType = LREF_PIC;
_ptFBufInfo[u4InstID][i].u4TFldLongTermFrameIdx = tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4LongTermFrameIdx;
_ptFBufInfo[u4InstID][i].i4TFldLongTermPicNum = (tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4LongTermFrameIdx<<1) + ((_tVerMpvDecPrm[u4InstID].ucPicStruct == TOP_FIELD)? 1: 0);
if(_ptFBufInfo[u4InstID][i].ucBFldRefType == LREF_PIC)
{
_ptFBufInfo[u4InstID][i].ucFBufRefType = LREF_PIC;
_ptFBufInfo[u4InstID][i].u4LongTermFrameIdx = tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4LongTermFrameIdx;
_ptFBufInfo[u4InstID][i].i4LongTermPicNum = _ptFBufInfo[u4InstID][i].u4LongTermFrameIdx;
}
}
if((_ptFBufInfo[u4InstID][i].i4BFldPicNum == u4PicNumX) && (_ptFBufInfo[u4InstID][i].ucBFldRefType == SREF_PIC) && (!_ptFBufInfo[u4InstID][i].fgNonExisting))
{
_ptFBufInfo[u4InstID][i].ucBFldRefType = LREF_PIC;
_ptFBufInfo[u4InstID][i].u4BFldLongTermFrameIdx = tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4LongTermFrameIdx;
_ptFBufInfo[u4InstID][i].i4BFldLongTermPicNum = (tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4LongTermFrameIdx<<1) + ((_tVerMpvDecPrm[u4InstID].ucPicStruct == BOTTOM_FIELD)? 1: 0);
if(_ptFBufInfo[u4InstID][i].ucTFldRefType == LREF_PIC)
{
_ptFBufInfo[u4InstID][i].ucFBufRefType = LREF_PIC;
_ptFBufInfo[u4InstID][i].u4LongTermFrameIdx = tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4LongTermFrameIdx;
_ptFBufInfo[u4InstID][i].i4LongTermPicNum = _ptFBufInfo[u4InstID][i].u4LongTermFrameIdx;
}
}
i = tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.ucMaxFBufNum; // break
}
}
break;
case 4:
tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4MaxLongTermFrameIdxPlus1 = tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4MemoryManagementControlOperation[u4Cnt] >> 8;
if(tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4MaxLongTermFrameIdxPlus1 == 0)
{
_tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH264DecPrm.u4MaxLongTermFrameIdx = 0xffffffff;
u4PicNumX = 0;
}
else
{
u4PicNumX =(tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4MaxLongTermFrameIdxPlus1);
}
for(i=0; i < tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.ucMaxFBufNum; i++)
{
if((_ptFBufInfo[u4InstID][i].u4LongTermFrameIdx >= u4PicNumX) &&
((_ptFBufInfo[u4InstID][i].ucTFldRefType == LREF_PIC) || (_ptFBufInfo[u4InstID][i].ucBFldRefType == LREF_PIC)))
{
_ptFBufInfo[u4InstID][i].ucTFldRefType = NREF_PIC;
_ptFBufInfo[u4InstID][i].ucBFldRefType = NREF_PIC;
_ptFBufInfo[u4InstID][i].ucFBufRefType = NREF_PIC;
}
}
break;
case 5:
_ptCurrFBufInfo[u4InstID]->u4FrameNum = 0;
_ptCurrFBufInfo[u4InstID]->i4PicNum = 0;
_ptCurrFBufInfo[u4InstID]->i4TFldPicNum = 0;
_ptCurrFBufInfo[u4InstID]->i4BFldPicNum = 0;
if(tVerMpvDecPrm->ucPicStruct == TOP_FIELD)
{
_ptCurrFBufInfo[u4InstID]->i4TFldPOC = 0;
}
else if(tVerMpvDecPrm->ucPicStruct == BOTTOM_FIELD)
{
_ptCurrFBufInfo[u4InstID]->i4BFldPOC = 0;
}
else if(tVerMpvDecPrm->ucPicStruct == FRAME)
{
_ptCurrFBufInfo[u4InstID]->i4TFldPOC -= _ptCurrFBufInfo[u4InstID]->i4POC;
_ptCurrFBufInfo[u4InstID]->i4BFldPOC -= _ptCurrFBufInfo[u4InstID]->i4POC;
_ptCurrFBufInfo[u4InstID]->i4POC = (_ptCurrFBufInfo[u4InstID]->i4TFldPOC < _ptCurrFBufInfo[u4InstID]->i4BFldPOC)?
_ptCurrFBufInfo[u4InstID]->i4TFldPOC: _ptCurrFBufInfo[u4InstID]->i4BFldPOC;
}
vVerifyFlushBufRefInfo(u4InstID);
break;
case 6:
tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4LongTermFrameIdx = tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4MemoryManagementControlOperation[u4Cnt] >> 8;
u4PicNumX = tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4LongTermFrameIdx;
for(i=0; i < tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.ucMaxFBufNum; i++)
{
if(i != tVerMpvDecPrm->ucDecFBufIdx)
{
if((_ptFBufInfo[u4InstID][i].ucFBufStatus == FRAME) && (_ptFBufInfo[u4InstID][i].u4LongTermFrameIdx == u4PicNumX) && (_ptFBufInfo[u4InstID][i].ucFBufRefType == LREF_PIC))
{
vVerifyClrPicRefInfo(u4InstID, FRAME, i);
i = tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.ucMaxFBufNum; // break
}
else if((_ptFBufInfo[u4InstID][i].ucFBufStatus != FRAME) &&
(((_ptFBufInfo[u4InstID][i].u4TFldLongTermFrameIdx == u4PicNumX) && (_ptFBufInfo[u4InstID][i].ucTFldRefType == LREF_PIC))
|| ((_ptFBufInfo[u4InstID][i].u4BFldLongTermFrameIdx == u4PicNumX) && (_ptFBufInfo[u4InstID][i].ucBFldRefType == LREF_PIC))))
{
if((_ptFBufInfo[u4InstID][i].u4TFldLongTermFrameIdx == u4PicNumX) && (_ptFBufInfo[u4InstID][i].ucTFldRefType == LREF_PIC))
{
vVerifyClrPicRefInfo(u4InstID, TOP_FIELD, i);
}
if((_ptFBufInfo[u4InstID][i].u4BFldLongTermFrameIdx == u4PicNumX) && (_ptFBufInfo[u4InstID][i].ucBFldRefType == LREF_PIC))
{
vVerifyClrPicRefInfo(u4InstID, BOTTOM_FIELD, i);
}
i = tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.ucMaxFBufNum; // break
}
}
}
if(fgIsFrmPic(u4InstID)) // 2 flds decoded
{
_ptCurrFBufInfo[u4InstID]->ucFBufRefType = LREF_PIC;
_ptCurrFBufInfo[u4InstID]->ucTFldRefType = LREF_PIC;
_ptCurrFBufInfo[u4InstID]->ucBFldRefType = LREF_PIC;
_ptCurrFBufInfo[u4InstID]->u4LongTermFrameIdx = _tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4LongTermFrameIdx;
_ptCurrFBufInfo[u4InstID]->u4TFldLongTermFrameIdx = tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4LongTermFrameIdx;
_ptCurrFBufInfo[u4InstID]->u4BFldLongTermFrameIdx = tVerMpvDecPrm->SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4LongTermFrameIdx;
}
else if(tVerMpvDecPrm->ucPicStruct & TOP_FIELD) // 1 fld decoded
{
_ptCurrFBufInfo[u4InstID]->ucTFldRefType = LREF_PIC;
_ptCurrFBufInfo[u4InstID]->u4LongTermFrameIdx = _tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4LongTermFrameIdx;
_ptCurrFBufInfo[u4InstID]->u4TFldLongTermFrameIdx = _tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4LongTermFrameIdx;
if(_ptCurrFBufInfo[u4InstID]->ucBFldRefType == LREF_PIC)
{
_ptCurrFBufInfo[u4InstID]->ucFBufRefType = LREF_PIC;
}
}
else if(tVerMpvDecPrm->ucPicStruct & BOTTOM_FIELD) // 1 fld decoded
{
_ptCurrFBufInfo[u4InstID]->ucBFldRefType = LREF_PIC;
_ptCurrFBufInfo[u4InstID]->u4LongTermFrameIdx = _tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4LongTermFrameIdx;
_ptCurrFBufInfo[u4InstID]->u4BFldLongTermFrameIdx = _tVerMpvDecPrm[u4InstID].SpecDecPrm.rVDecH264DecPrm.prSliceHdr->u4LongTermFrameIdx;
if(_ptCurrFBufInfo[u4InstID]->ucTFldRefType == LREF_PIC)
{
_ptCurrFBufInfo[u4InstID]->ucFBufRefType = LREF_PIC;
}
}
break;
}
u4Cnt ++;
}
}
// *********************************************************************
// Function : UCHAR bGetPicRefType(UINT32 u4InstID, UCHAR ucPicStruct)
// Description : get pic ref type
// Parameter : UCHAR ucPicType: pic struct : FRAME, TOP_FIELD, BOTTOM_FIELD
// Return : UCHAR ucRefType: pic ref type: NREF_PIC, SREF_PIC, LREF_PIC
// *********************************************************************
UCHAR bGetPicRefType(UINT32 u4InstID, UCHAR ucPicStruct)
{
if(ucPicStruct == TOP_FIELD)
{
return _ptCurrFBufInfo[u4InstID]->ucTFldRefType;
}
else if(ucPicStruct == BOTTOM_FIELD)
{
return _ptCurrFBufInfo[u4InstID]->ucBFldRefType;
}
else//if(ucPicStruct == FRAME)
{
return _ptCurrFBufInfo[u4InstID]->ucFBufRefType;
}
}
// *********************************************************************
// Function : void vChkOutputFBuf(UINT32 u4InstID)
// Description : Output 1 frm buff in DPB when DPB full
// Parameter :
// Return : None
// *********************************************************************
void vChkOutputFBuf(UINT32 u4InstID)
{
UINT32 u4MinPOCFBufIdx;
VDEC_INFO_DEC_PRM_T *tVerMpvDecPrm;
tVerMpvDecPrm = &_tVerMpvDecPrm[u4InstID];
// needs to output
do
{
u4MinPOCFBufIdx = ucVDecGetMinPOCFBuf(u4InstID, tVerMpvDecPrm, TRUE);
if((u4MinPOCFBufIdx != 0xFF)
&& (_ptFBufInfo[u4InstID][u4MinPOCFBufIdx].eH264DpbStatus != H264_DPB_STATUS_EMPTY))
{
_ptFBufInfo[u4InstID][u4MinPOCFBufIdx].eH264DpbStatus = H264_DPB_STATUS_OUTPUTTED;
vOutputPOCData(_ptFBufInfo[u4InstID][u4MinPOCFBufIdx].u4DecOrder);
#if 0
u4MinPOCFBufIdx = ucVDecGetMinPOCFBuf(u4InstID, tMpvDecPrm, TRUE);
if((u4MinPOCFBufIdx != 0xff)
&& (_ptFBufInfo[u4MinPOCFBufIdx].eH264DpbStatus != H264_DPB_STATUS_EMPTY))
{
//prH264DrvInfo->ucH264DpbOutputFbId = u4MinPOCFBufIdx;
}
else
{
u4MinPOCFBufIdx = 0xFF;
}
#endif
// check in next entry
}
else if((u4MinPOCFBufIdx != 0xFF)
&& (_ptFBufInfo[u4InstID][u4MinPOCFBufIdx].eH264DpbStatus == H264_DPB_STATUS_EMPTY))
{
u4MinPOCFBufIdx = 0xff;
}
}while(u4MinPOCFBufIdx != 0xff);
#if 0
// Check if DPB full
iMinPOC = 0x7fffffff;
for(i=0; i<_tVerMpvDecPrm.SpecDecPrm.rVDecH264DecPrm.ucMaxFBufNum; i++)
{
if(_ptFBufInfo[i].ucFBufStatus == NO_PIC)
{
iMinPOC = 0x7fffffff;
u4MinPOCFBufIdx = i;
break;
}
// miew: need to take care of field empty
else if((iMinPOC > _ptFBufInfo[i].i4POC) && fgIsNonRefFBuf(i))
{
iMinPOC = _ptFBufInfo[i].i4POC;
u4MinPOCFBufIdx = i;
}
}
// No free FBuf, output 1 fbuf is needed
if(_ptFBufInfo[u4MinPOCFBufIdx].ucFBufStatus != NO_PIC)
{
vVerifyClrFBufInfo(u4MinPOCFBufIdx);
}
#endif
}
// *********************************************************************
// Function : void vAdd2RefPicList(UINT32 u4InstID)
// Description : Add the current pic info to Ref Pic List
// Parameter : None
// Return : None
// *********************************************************************
void vAdd2RefPicList(UINT32 u4InstID)
{
}
// *********************************************************************
// Function : void vVerifyClrPicRefInfo(UINT32 u4InstID, UCHAR ucPicType, UCHAR ucFBufIdx)
// Description : Clear picture info in frame buffer
// Parameter : None
// Return : None
// *********************************************************************
void vVerifyClrPicRefInfo(UINT32 u4InstID, UCHAR ucPicType, UCHAR ucFBufIdx)
{
if(ucPicType & TOP_FIELD)
{
_ptFBufInfo[u4InstID][ucFBufIdx].ucTFldRefType = NREF_PIC;
}
if(ucPicType & BOTTOM_FIELD)
{
_ptFBufInfo[u4InstID][ucFBufIdx].ucBFldRefType = NREF_PIC;
}
_ptFBufInfo[u4InstID][ucFBufIdx].ucFBufRefType = NREF_PIC;
}
// *********************************************************************
// Function : BOOL fgIsH264VDecComplete(UINT32 u4InstID)
// Description : Check if VDec complete with interrupt
// Parameter : None
// Return : None
// *********************************************************************
BOOL fgIsH264VDecComplete(UINT32 u4InstID)
{
UINT32 u4MbX;
UINT32 u4MbY;
if(_fgVDecComplete[u4InstID] || (_ucMVCType[u4InstID] == 2 && _fgVDecComplete[0]))
{
vVDEC_HAL_H264_GetMbxMby(u4InstID,&u4MbX, &u4MbY);
if(fgIsFrmPic(u4InstID))
{
if(u4MbX < ((_ptCurrFBufInfo[u4InstID]->u4W >> 4) -1) || (u4MbY < ((_ptCurrFBufInfo[u4InstID]->u4H >> 4) -1)))
{
return FALSE;
}
else
{
return TRUE;
}
}
else
{
if(u4MbX < ((_ptCurrFBufInfo[u4InstID]->u4W >> 4) -1) || u4MbY < ((_ptCurrFBufInfo[u4InstID]->u4H >> 5) -1))
{
return FALSE;
}
else
{
return TRUE;
}
}
}
return FALSE;
}
// *********************************************************************
// Function : void vReadH264ChkSumGolden(UINT32 u4InstID)
// Description : write check sum in rec file
// Parameter : None
// Return : None
// *********************************************************************
void vReadH264ChkSumGolden(UINT32 u4InstID)
{
vVDEC_HAL_H264_VDec_ReadCheckSum(u4InstID, &_u4DumpChksum[u4InstID][0]);
}
// *********************************************************************
// Function : void vReadWMVChkSumGolden(UINT32 u4InstID)
// Description : write check sum in rec file
// Parameter : None
// Return : None
// *********************************************************************
void vReadWMVChkSumGolden(UINT32 u4InstID)
{
vVDEC_HAL_WMV_VDec_ReadCheckSum(u4InstID, &_u4DumpChksum[u4InstID][0]);
}
// *********************************************************************
// Function : void vReadMPEGChkSumGolden(UINT32 u4InstID)
// Description : write check sum in rec file
// Parameter : None
// Return : None
// *********************************************************************
void vReadMPEGChkSumGolden(UINT32 u4InstID)
{
vVDEC_HAL_MPEG_VDec_ReadCheckSum(u4InstID, &_u4DumpChksum[u4InstID][0]);
}
void vReadDvChkSumGolden(UINT32 u4InstID)
{
UINT32 u4Temp,u4Cnt;
UINT32 u4VDecID;
UINT32 *pu4CheckSum;
u4VDecID = u4InstID;
pu4CheckSum = &_u4DumpChksum[u4InstID][0];
u4Temp = 0;
*pu4CheckSum = u4VDecReadMC(u4VDecID, 0x5f4);
pu4CheckSum ++;
u4Temp ++;
*pu4CheckSum = u4VDecReadMC(u4VDecID, 0x5f8);
pu4CheckSum ++;
u4Temp ++;
*pu4CheckSum = u4VDecReadMC(u4VDecID, 0x608);
pu4CheckSum ++;
u4Temp ++;
*pu4CheckSum = u4VDecReadMC(u4VDecID, 0x60c);
pu4CheckSum ++;
u4Temp ++;
//MC 378~397
for(u4Cnt=378; u4Cnt<=397; u4Cnt++)
{
*pu4CheckSum = u4VDecReadMC(u4VDecID, (u4Cnt<<2));
pu4CheckSum ++;
u4Temp ++;
}
//AVC VLD 165~179
for(u4Cnt=165; u4Cnt<=179; u4Cnt++)
{
*pu4CheckSum = u4VDecReadAVCVLD(u4VDecID, (u4Cnt<<2));
pu4CheckSum ++;
u4Temp ++;
}
//MV 147~151
for(u4Cnt=147; u4Cnt<=151; u4Cnt++)
{
*pu4CheckSum = u4VDecReadAVCMV(u4VDecID, (u4Cnt<<2));
pu4CheckSum ++;
u4Temp ++;
}
//IP 212
*pu4CheckSum = u4VDecReadAVCMV(u4VDecID, (212 << 2));
pu4CheckSum ++;
u4Temp ++;
//IQ 235~239
for(u4Cnt=241; u4Cnt<=245; u4Cnt++)
{
*pu4CheckSum = u4VDecReadAVCMV(u4VDecID, (u4Cnt<<2));
pu4CheckSum ++;
u4Temp ++;
}
//IS 241~245
for(u4Cnt=241; u4Cnt<=245; u4Cnt++)
{
*pu4CheckSum = u4VDecReadAVCMV(u4VDecID, (u4Cnt<<2));
pu4CheckSum ++;
u4Temp ++;
}
while(u4Temp < MAX_CHKSUM_NUM)
{
*pu4CheckSum = 0;
pu4CheckSum ++;
u4Temp ++;
}
}
void vDvCompare(UINT32 u4InstID)
{
vDvWrData2PC(u4InstID, _pucDumpYBuf[u4InstID]);
}
#ifdef VPMODE
INT32 i4VPModeDecStart(UINT32 u4VDecID,VDEC_INFO_DEC_PRM_T *prDecPrm)
{
#if ((CONFIG_DRV_VERIFY_SUPPORT) ||(CONFIG_DRV_FPGA_BOARD) && (!VDEC_DRV_PARSER))
VDEC_INFO_MPEG_DEC_PRM_T *prMpegDecPrm = (VDEC_INFO_MPEG_DEC_PRM_T *) &(prDecPrm->SpecDecPrm.rVDecMPEGDecPrm);
#else
VDEC_INFO_MPEG_DEC_PRM_T *prMpegDecPrm = (VDEC_INFO_MPEG_DEC_PRM_T *)prDecPrm->prVDecCodecHalPrm;
#endif
//VDEC_INFO_MPEG_DEC_PRM_T *prMpegDecPrm = (VDEC_INFO_MPEG_DEC_PRM_T *)prDecPrm->prVDecCodecHalPrm;
#if VDEC_DDR3_SUPPORT
UINT32 u4DDR3_PicWdith = 0;
#endif
#if (CONFIG_CHIP_VER_CURR >= CONFIG_CHIP_VER_MT8580)
UINT32 u4PicW,u4PicH,u4WidthMB,u4HeightMB;
#endif
#if (CONFIG_DRV_LINUX_DATA_CONSISTENCY)
// HalFlushInvalidateDCache();
#endif
if (prDecPrm->ucDecFBufIdx == 2)
{
if(prMpegDecPrm->u4FRefBufIdx == 0)
{
// Cheng-Jung 20120322 [
prMpegDecPrm->rMpegFrameBufSa.u4Pic1YSa = prMpegDecPrm->rMpegFrameBufSa.u4Pic2YSa;
prMpegDecPrm->rMpegFrameBufSa.u4Pic1CSa = prMpegDecPrm->rMpegFrameBufSa.u4Pic2CSa;
//prMpegDecPrm->rMpegFrameBufSa.u4Pic0YSa = prMpegDecPrm->rMpegFrameBufSa.u4Pic2YSa;
//prMpegDecPrm->rMpegFrameBufSa.u4Pic0CSa = prMpegDecPrm->rMpegFrameBufSa.u4Pic2CSa;
// ]
prMpegDecPrm->u4FRefBufIdx = 1;
prDecPrm->ucDecFBufIdx = 0;
_u4DecBufIdx[u4VDecID] = 0;
_u4FRefBufIdx[u4VDecID] = 1;
}
else
{
// Cheng-Jung 20120322 [
prMpegDecPrm->rMpegFrameBufSa.u4Pic0YSa = prMpegDecPrm->rMpegFrameBufSa.u4Pic2YSa;
prMpegDecPrm->rMpegFrameBufSa.u4Pic0CSa = prMpegDecPrm->rMpegFrameBufSa.u4Pic2CSa;
//prMpegDecPrm->rMpegFrameBufSa.u4Pic1YSa = prMpegDecPrm->rMpegFrameBufSa.u4Pic2YSa;
//prMpegDecPrm->rMpegFrameBufSa.u4Pic1CSa = prMpegDecPrm->rMpegFrameBufSa.u4Pic2CSa;
// ]
prMpegDecPrm->u4FRefBufIdx = 0;
prDecPrm->ucDecFBufIdx = 1;
_u4DecBufIdx[u4VDecID] = 1;
_u4FRefBufIdx[u4VDecID] = 0;
}
}
// vVDECSetDownScalerPrm(u4VDecID, &prDecPrm->rDownScalerPrm);
vVDecWriteMC(u4VDecID, RW_MC_R1Y, (u4AbsDramANc(prMpegDecPrm->rMpegFrameBufSa.u4Pic0YSa)) >> 9); // div 512
vVDecWriteMC(u4VDecID, RW_MC_R1C, (u4AbsDramANc(prMpegDecPrm->rMpegFrameBufSa.u4Pic0CSa)) >> 8); // div 256
vVDecWriteMC(u4VDecID, RW_MC_R2Y, (u4AbsDramANc(prMpegDecPrm->rMpegFrameBufSa.u4Pic1YSa)) >> 9); // div 512
vVDecWriteMC(u4VDecID, RW_MC_R2C, (u4AbsDramANc(prMpegDecPrm->rMpegFrameBufSa.u4Pic1CSa)) >> 8); // div 256
vVDecWriteMC(u4VDecID, RW_MC_BY, (u4AbsDramANc(prMpegDecPrm->rMpegFrameBufSa.u4Pic2YSa)) >> 8); // div 256
vVDecWriteMC(u4VDecID, RW_MC_BC, (u4AbsDramANc(prMpegDecPrm->rMpegFrameBufSa.u4Pic2CSa)) >> 7); // div 128
vVDecWriteMC(u4VDecID, RW_MC_BY1, (u4AbsDramANc(prMpegDecPrm->rMpegFrameBufSa.u4Pic2YSa)) >> 9); // div 256
vVDecWriteMC(u4VDecID, RW_MC_BC1, (u4AbsDramANc(prMpegDecPrm->rMpegFrameBufSa.u4Pic2CSa)) >> 8); // div 128
vVDecWriteMC(u4VDecID, RW_MC_DIGY, (u4AbsDramANc(prMpegDecPrm->rMpegFrameBufSa.u4Pic0YSa)) >> 9); // div 512
vVDecWriteMC(u4VDecID, RW_MC_DIGC, (u4AbsDramANc(prMpegDecPrm->rMpegFrameBufSa.u4Pic0CSa)) >> 8); // div 256
vMCSetOutputBuf(u4VDecID, (UINT32)prDecPrm->ucDecFBufIdx, prMpegDecPrm->u4FRefBufIdx);
#if (CONFIG_CHIP_VER_CURR >= CONFIG_CHIP_VER_MT8530)
#if VDEC_DDR3_SUPPORT
u4DDR3_PicWdith = (((prDecPrm->u4PicBW + 63) >> 6) << 2);
#if (CONFIG_CHIP_VER_CURR >= CONFIG_CHIP_VER_MT8580)
vVDecWriteMC(u4VDecID, RW_MC_PIC_W_MB, u4DDR3_PicWdith);
#else
vVDecWriteVLD(u4VDecID, RW_VLD_PIC_W_MB, u4DDR3_PicWdith);
#endif
#else
#if (CONFIG_CHIP_VER_CURR >= CONFIG_CHIP_VER_MT8580)
vVDecWriteMC(u4VDecID, RW_MC_PIC_W_MB, ((prDecPrm->u4PicBW + 15)>> 4));
vVDecWriteMC(u4VDecID, RW_MC_DDR3_EN, (u4VDecReadMC(u4VDecID, RW_MC_DDR3_EN) & 0xFFFFFFFE));
#else
vVDecWriteVLD(u4VDecID, RW_VLD_PIC_W_MB, ((prDecPrm->u4PicBW + 15)>> 4));
#endif
#endif
#endif
#if (CONFIG_CHIP_VER_CURR >= CONFIG_CHIP_VER_MT8530)
vVDecWriteMC(0, 0x5E4, (u4VDecReadMC(0, 0x5E4) |(0x1 <<12)) );
//vVDecWriteMC(0, 0x660, (u4VDecReadMC(0, 0x660) |(0x80000000)) );
#ifndef VDEC_PIP_WITH_ONE_HW
vVDecWriteMC(1, 0x5E4, (u4VDecReadMC(1, 0x5E4) |(0x1 <<12)) );
//vVDecWriteMC(1, 0x660, (u4VDecReadMC(1, 0x660) |(0x80000000)) );
#endif
#endif
if (prMpegDecPrm->rMpegPpInfo.fgPpEnable)
{
UINT32 u4MBqp = 0;
vVDecWriteMC(u4VDecID, RW_MC_PP_ENABLE, 1);
vVDecWriteMC(u4VDecID, RW_MC_PP_Y_ADDR, u4AbsDramANc(prMpegDecPrm->rMpegPpInfo.u4PpYBufSa) >> 9);
vVDecWriteMC(u4VDecID, RW_MC_PP_C_ADDR, u4AbsDramANc(prMpegDecPrm->rMpegPpInfo.u4PpCBufSa) >> 8);
vVDecWriteMC(u4VDecID, RW_MC_PP_MB_WIDTH, (prDecPrm->u4PicW + 15) >> 4);
u4MBqp = (prMpegDecPrm->rMpegPpInfo.au1MBqp[0] & 0x1F) | ((UINT32)(prMpegDecPrm->rMpegPpInfo.au1MBqp[1] & 0x1F) << 8) \
| ((UINT32)(prMpegDecPrm->rMpegPpInfo.au1MBqp[2] & 0x1F) << 16) | ((UINT32)(prMpegDecPrm->rMpegPpInfo.au1MBqp[3] & 0x1F) << 24);
vVDecWriteMC(u4VDecID, RW_MC_PP_QP_TYPE, u4MBqp);
#if (CONFIG_CHIP_VER_CURR >= CONFIG_CHIP_VER_MT8550)
//vVDecWriteMC(u4VDecID, RW_MC_PP_DBLK_MODE, DBLK_Y+DBLK_C);
vVDecWriteMC(u4VDecID, RW_MC_PP_DBLK_MODE, 0);
vVDecWriteMC(u4VDecID, RW_MC_PP_WB_BY_POST, 0); // wirte MC out and PP out
if (prMpegDecPrm->rMpegPpInfo.fgPpDemoEn)
{
vVDecWriteMC(u4VDecID, 0x658, ((u4VDecReadMC(u4VDecID, 0x658)&0xFFFFFFFE)|0x1)); // partial deblocking
vVDecWriteMC(u4VDecID, 0x65C, ((((prDecPrm->u4PicH + 15) >> 4) - 1) << 24) | ((((prDecPrm->u4PicW + 15) >> 5) - 1) << 8)); // XY end MB
}
else
{
vVDecWriteMC(u4VDecID, 0x658, (u4VDecReadMC(u4VDecID, 0x658)&0xFFFFFFFE));
}
#else
vVDecWriteMC(u4VDecID, RW_MC_PP_DBLK_MODE, DBLK_Y+DBLK_C);
//vVDecWriteMC(u4VDecID, RW_MC_PP_WB_BY_POST, 0); // wirte MC out and PP out
#endif
//vVDecWriteMC(u4VDecID, RW_MC_PP_QP_TYPE, 0x00000114);
//vVDecWriteMC(u4VDecID, RW_MC_PP_WB_BY_POST, 0); // wirte MC out and PP out
vVDecWriteMC(u4VDecID, RW_MC_PP_X_RANGE, ((prDecPrm->u4PicW + 15) >> 4) - 1);
vVDecWriteMC(u4VDecID, RW_MC_PP_Y_RANGE, (((prDecPrm->u4PicH + 15) >> 4) >> (prDecPrm->ucPicStruct != 3)) - 1);
//vVDecWriteAVCVLD(u4VDecID, RW_AVLD_SHDR_2, 0x6E00);
//vVDecWriteMC(u4VDecID, RW_MC_PP_MODE, H264_MODE);
}
else
{
vVDecWriteMC(u4VDecID, RW_MC_PP_ENABLE, 0);
}
#if !MPEG4_6582_SUPPORT
// MT6582 no longer need [
vVDecWriteVLD(u4VDecID, RW_VLD_PSUPCTR, ((prDecPrm->u4PicW * prDecPrm->u4PicH) >> 8) + 1);
vVDecWriteVLD(u4VDecID, RW_VLD_PARA, 0xC0500000); //Frame Picture + VP ???
// ]
#endif
#if (CONFIG_CHIP_VER_CURR >= CONFIG_CHIP_VER_MT8580)
// vVDecWriteVLD(u4VDecID, RW_VLD_PICSZ, ((prDecPrm->u4PicH) << 16) + (prDecPrm->u4PicW >> 4));
u4PicW = ((prDecPrm->u4PicW +15)>>4)<<4;
u4PicH = ((prDecPrm->u4PicH + 15)>>4)<<4;
u4WidthMB = ((prDecPrm->u4PicW +15)>>4);
u4HeightMB = ((prDecPrm->u4PicH +15)>>4);
//vVDecWriteTopVLD(u4VDecID,RW_TOPVLD_WMV_PICSIZE,u4PicH<<16|u4PicW);
vVDecWriteVLDTOP(u4VDecID,RW_TOPVLD_WMV_PICSIZE,u4PicH<<16|u4PicW);
#if MPEG4_6582_SUPPORT
vVDecWriteVLDTOP(u4VDecID,RW_TOPVLD_WMV_PICSIZE_MB,(u4HeightMB-1)<<16|(u4WidthMB-1));
#else
vVDecWriteVLD(u4VDecID, RW_VLD_PICSZ, (prDecPrm->u4PicH)<<16);
vVDecWriteVLD(u4VDecID, RW_VLD_DIGMBSA, u4WidthMB);
// vVDecWriteTopVLD(u4VDecID,RW_TOPVLD_WMV_PICSIZE_MB,(((prDecPrm->u4PicW+ 15)>>4) -1) | ((((prDecPrm->u4PicH + 15)>>4) - 1)<<16));
vVDecWriteVLD(u4VDecID, RW_VLD_MBROWPRM, 0x1ff );
#endif // MPEG4_6582_SUPPORT
#else
vVDecWriteVLD(u4VDecID, RW_VLD_PICSZ, ((prDecPrm->u4PicH + 15) << 16) + (prDecPrm->u4PicW >> 4));
vVDecWriteVLD(u4VDecID, RW_VLD_MBROWPRM, ( ((prDecPrm->u4PicH + 15) >> 4 ) - 1) << 16);
#endif
// addr swap mode
vVDecWriteMC(u4VDecID, RW_MC_ADDRSWAP, prDecPrm->ucAddrSwapMode);
#if (CONFIG_CHIP_VER_CURR >= CONFIG_CHIP_VER_MT8550)
vVDecWriteMC(u4VDecID, RW_MC_NBM_CTRL,
((u4VDecReadMC(u4VDecID, RW_MC_NBM_CTRL) & 0xFFFFFFF8) |prDecPrm->ucAddrSwapMode));
#endif
/*
vVDecWriteMC(u4VDecID, RW_MC_HREFP, 0);
vVDecWriteMC(u4VDecID, RW_MC_DIGWD, ((prDecPrm->u4PicW + 15) >> 4));
vVDecWriteVLD(u4VDecID, RW_VLD_DIGMBSA, 0);
vVDecWriteVLD(u4VDecID, RW_VLD_SCALE, 0);//(random(3)<<24) |(random(3)<<16));
vVDecWriteVLD(u4VDecID, RW_VLD_DIGMBYOFF, 0);
*/
vVDecWriteMC(u4VDecID, RW_MC_UMV_PIC_WIDTH, prDecPrm->u4PicW);
vVDecWriteMC(u4VDecID, RW_MC_UMV_PIC_HEIGHT, prDecPrm->u4PicH);
#if MPEG4_6582_SUPPORT
vVDecWriteVLDTOP(u4VDecID, 0x90, 2);
vVDecWriteVLDTOP(u4VDecID, 0x90, 3);
#else
vVDecWriteVLD(u4VDecID, RW_VLD_PROC, VLD_RTERR + VLD_PDHW + VLD_PSUP +
(prDecPrm->u4PicW >> 4));
#endif
return HAL_HANDLE_OK;
}
#endif
void vAVCDumpChkSum(void)
{
UINT32 i,u4Val,u4VDecID = 0;
printk("read AVCVLD \n");
for(i = 165; i<180; i++)
{
u4Val = u4VDecReadAVCVLD(u4VDecID, i<<2);
printk("%d (0x%x) = 0x%4x\n",i, (i<<2),u4Val);
}
printk("read AVC MC \n");
for(i = 147; i<152; i++)
{
u4Val = u4VDecReadAVCMV(u4VDecID, i<<2);
printk("%d (0x%x) = 0x%4x\n",i, (i<<2),u4Val);
}
printk("read AVC MC \n");
for(i = 378; i<397; i++)
{
u4Val = u4VDecReadMC(u4VDecID, i<<2);
printk("%d (0x%x) = 0x%4x\n",i, (i<<2),u4Val);
}
}
void vPrintDumpReg(UINT32 u4InstID,UINT32 fgTAB)
{
UINT32 u4Val,u4Cnt;
// UINT32 u4InstID = 0;
// printk("Before Decode!\n");
#ifndef REG_LOG_NEW
if(fgTAB)
{
u4InstID = 1;
printk("After Decode!\n");
}
else
{
u4InstID = 0;
printk("Before Decode!\n");
}
for(u4Cnt = 33; u4Cnt < 40; u4Cnt++)
{
u4Val = ((UINT32 *)(_pucRegister[u4InstID]))[u4Cnt] ;
printk("%d (0x%x) = 0x%4x\n",u4Cnt, (u4Cnt<<2),u4Val);
}
for(u4Cnt = 42; u4Cnt < 71; u4Cnt++)
{
u4Val = ((UINT32 *)(_pucRegister[u4InstID]))[u4Cnt] ;
printk("%d (0x%x) = 0x%4x\n",u4Cnt, (u4Cnt<<2),u4Val);
}
for(u4Cnt = 112; u4Cnt < 131; u4Cnt++)
{
u4Val = ((UINT32 *)(_pucRegister[u4InstID]))[u4Cnt] ;
printk("%d (0x%x) = 0x%4x\n",u4Cnt, (u4Cnt<<2),u4Val);
}
for(u4Cnt = 131; u4Cnt < 192; u4Cnt++)
{
u4Val = ((UINT32 *)(_pucRegister[u4InstID]))[u4Cnt] ;
printk("%d (0x%x) = 0x%4x\n",u4Cnt, (u4Cnt<<2),u4Val);
}
for(u4Cnt = 192; u4Cnt < 256; u4Cnt++)
{
u4Val = ((UINT32 *)(_pucRegister[u4InstID]))[u4Cnt] ;
printk("%d (0x%x) = 0x%4x\n",u4Cnt, (u4Cnt<<2),u4Val);
}
printk("MC register data \n");
for(u4Cnt = 0; u4Cnt < 700; u4Cnt++)
{
u4Val = ((UINT32 *)(_pucRegister[u4InstID]))[u4Cnt + 0x100] ;
printk("%d (0x%x) = 0x%4x\n",u4Cnt, (u4Cnt<<2),u4Val);
}
printk("IS Settings\n");
for(u4Cnt = 128; u4Cnt < 192; u4Cnt++)
{
u4Val = ((UINT32 *)(_pucRegister[u4InstID]))[u4Cnt + 1000] ;
printk("%d (0x%x) = 0x%4x\n",u4Cnt, (u4Cnt<<2),u4Val);
}
printk("IQ Settings\n");
for(u4Cnt = 320; u4Cnt < 384; u4Cnt++)
{
u4Val = ((UINT32 *)(_pucRegister[u4InstID]))[u4Cnt + 1100] ;
printk("%d (0x%x) = 0x%4x\n",u4Cnt, (u4Cnt<<2),u4Val);
}
printk("IT Settings\n");
for(u4Cnt = 576; u4Cnt < 640; u4Cnt++)
{
u4Val = ((UINT32 *)(_pucRegister[u4InstID]))[u4Cnt + 1200] ;
printk("%d (0x%x) = 0x%4x\n",u4Cnt, (u4Cnt<<2),u4Val);
}
for(u4Cnt = 0; u4Cnt < 65; u4Cnt++)
{
u4Val = ((UINT32 *)(_pucRegister[u4InstID]))[u4Cnt + 2000] ;
printk("%d (0x%x) = 0x%4x\n",u4Cnt, (u4Cnt<<2),u4Val);
}
printk("Dump end!\n");
#endif
}
#ifdef VDEC_SRAM
#define SRAMSZ 52*1024
#define SRAMWRTCMD (1 << 16)
void vWriteSram(UINT32 u4InstID,UINT32 u4SramAddr,UINT32 u4SramValue)
{
UINT32 u4Temp;
#if 0
vVDecWriteMC(u4InstID,0x93C,u4VDecReadMC(u4InstID,0x93C)|(1<<0));//enable of sram and cs of sram
vVDecWriteMC(u4InstID,0x93C,u4VDecReadMC(u4InstID,0x93C)&(~(0x3fff<<12)));//set sram addr
vVDecWriteMC(u4InstID,0x93C,u4VDecReadMC(u4InstID,0x93C) | (u4SramAddr<<12));//set sram addr
vVDecWriteMC(u4InstID,0x940,u4SramValue);//set sram data
u4Temp = (1<<4)|(1<<8);
vVDecWriteMC(u4InstID,0x93C,u4VDecReadMC(u4InstID,0x93C) | u4Temp);//enable write
vVDecWriteMC(u4InstID,0x93C,0);//clear all
#endif
u4Temp = (u4VDecReadMC(u4InstID,0x93C) & 0xffffc000)|SRAMWRTCMD;
u4Temp |= u4SramAddr;
printk("<vdec>write addr = %d,value = 0x%x\n",u4Temp,u4SramValue);
vVDecWriteMC(u4InstID,0x93C,u4Temp);
vVDecWriteMC(u4InstID,0x940,u4SramValue);
}
UINT32 u4ReadSram(UINT32 u4InstID,UINT32 u4SramAddr)
{
UINT32 u4RegVal,u4Temp;
#if 0
vVDecWriteMC(u4InstID,0x93C,u4VDecReadMC(u4InstID,0x93C)|(1<<0)|(1<<4));//enable of sram and cs of sram
vVDecWriteMC(u4InstID,0x93C,u4VDecReadMC(u4InstID,0x93C)&(~(0x3fff<<12)));//set sram addr
vVDecWriteMC(u4InstID,0x93C,u4VDecReadMC(u4InstID,0x93C) | u4SramAddr<<12);//set sram addr
u4RegVal = u4VDecReadMC(u4InstID,0x944);
// vVDecWriteMC(u4InstID,0x93C,u4VDecReadMC(u4InstID,0x93C)&(~(1<<0))&(~(1<<4)));//disable sram read
vVDecWriteMC(u4InstID,0x93C,0);
#endif
u4Temp = (u4VDecReadMC(u4InstID,0x93C) & (~(SRAMWRTCMD))) & 0xffffc000;
u4Temp |= u4SramAddr;
vVDecWriteMC(u4InstID,0x93C,u4Temp);
u4RegVal = u4VDecReadMC(u4InstID,0x940);
printk("<vdec>sram addr = %d,data is 0x%x\n",u4SramAddr,u4RegVal);
return u4RegVal;
}
void vDumpSram(UINT32 u4InstID)
{
UINT32 u4Mcstart,u4SramAddr,u4RegVal,u4ReadSize,u4temp;
UCHAR fpDumpFile[100] = "d:\\ChkFolder\\sram";
// UCHAR ucTempBuff[30];
UCHAR *fpRear = ".bin";
FILE *pFile = NULL;
u4temp = strlen(fpDumpFile);
u4temp += sprintf(fpDumpFile+u4temp,"%d",_u4FileCnt[u4InstID]);
u4temp += sprintf(fpDumpFile+u4temp,"%s",fpRear);
// vVDecWriteMC(u4InstID,0x93C,u4VDecReadMC(u4InstID,0x93C)|(1<<0)|(1<<4));//enable of sram and cs of sram
vVDecWriteMC(u4InstID,0x93C,u4VDecReadMC(u4InstID,0x93C)|(1<<0));//enable of sram and cs of sram
vVDecWriteMC(u4InstID,0x93C,u4VDecReadMC(u4InstID,0x93C)|(1<<4));//enable of sram and cs of sram
printk("<vdec>Before read SRAM MC 0x93C = 0x%x \n",u4VDecReadMC(u4InstID,0x93C));
for(u4Mcstart = 0; u4Mcstart <13312; u4Mcstart++)
{
u4SramAddr = (u4Mcstart) << 12;
vVDecWriteMC(u4InstID,0x93C,u4VDecReadMC(u4InstID,0x93C)&(~(0x3fff<<12)));//set sram addr
vVDecWriteMC(u4InstID,0x93C,u4VDecReadMC(u4InstID,0x93C) | u4SramAddr);//set sram addr
// x_thread_delay(2);
u4RegVal = u4VDecReadMC(u4InstID,0x944);
((UINT32*)(_pucDumpSRAMBuf[u4InstID]))[u4Mcstart] = u4RegVal;
}
// vVDecWriteMC(u4InstID,0x93C,u4VDecReadMC(u4InstID,0x93C)&(~(1<<0))&(~(1<<4)));//disable sram read
vVDecWriteMC(u4InstID,0x93C,u4VDecReadMC(u4InstID,0x93C)&(~(1<<4)));//disable sram read
vVDecWriteMC(u4InstID,0x93C,u4VDecReadMC(u4InstID,0x93C)&(~(1<<0)));//disable sram read
printk("<vdec>After read SRAM MC 0x93C = 0x%x \n",u4VDecReadMC(u4InstID,0x93C));
pFile = fopen(fpDumpFile,"w+");
if(pFile == NULL)
{
printk("Create file error !\n");
}
#if 0
for(u4Mcstart = 0; u4Mcstart <13312; u4Mcstart++)
{
ucLen = 0;
ucLen = sprintf(ucTempBuff,"%x ",(_pucDumpSRAMBuf[u4InstID])[u4Mcstart*4 + 3]);
ucLen += sprintf(ucTempBuff+ucLen,"%x ",(_pucDumpSRAMBuf[u4InstID])[u4Mcstart*4 + 2]);
ucLen += sprintf(ucTempBuff+ucLen,"%x ",(_pucDumpSRAMBuf[u4InstID])[u4Mcstart*4 + 1]);
ucLen += sprintf(ucTempBuff+ucLen,"%x",(_pucDumpSRAMBuf[u4InstID])[u4Mcstart*4 ]);
fseek(pFile,SEEK_CUR,0);
fwrite((char*)(_pucDumpSRAMBuf[u4InstID] + u4Mcstart*4),1,strlen(ucTempBuff),pFile);
}
#endif
u4ReadSize = fwrite ((char* )(_pucDumpSRAMBuf[u4InstID]), 1, SRAMSZ, pFile);
printk("read file len = %d \n",u4ReadSize);
fclose(pFile);
}
#endif
| gpl-2.0 |
FilipBE/qtextended | src/libraries/qtopiaphone/qphonecallmanager.cpp | 8 | 18280 | /****************************************************************************
**
** This file is part of the Qt Extended Opensource Package.
**
** Copyright (C) 2009 Trolltech ASA.
**
** Contact: Qt Extended Information (info@qtextended.org)
**
** This file may be used under the terms of the GNU General Public License
** version 2.0 as published by the Free Software Foundation and appearing
** in the file LICENSE.GPL included in the packaging of this file.
**
** Please review the following information to ensure GNU General Public
** Licensing requirements will be met:
** http://www.fsf.org/licensing/licenses/info/GPLv2.html.
**
**
****************************************************************************/
#include <qphonecallmanager.h>
#include "qphonecallmanager_p.h"
#include "qphonecall_p.h"
#include <qglobal.h>
#include <quuid.h>
#include <qtimer.h>
/*!
\class QPhoneCallManager
\inpublicgroup QtTelephonyModule
\brief The QPhoneCallManager class provides access to the phone's call list.
\ingroup telephony
The current list of calls in the system can be inspected with QPhoneCallManager,
and new outgoing calls can be created with create().
The calls() method returns a list of all active calls in the system. Active
calls are those that are dialing, incoming, connected, or on hold. Idle and
dropped calls are not included in the list.
New calls are added to the call list whenever a client application
creates a call with create(), or when an incoming call is detected
by the back-end telephony services. The newCall() signal is
emitted when either of these conditions occurs.
As calls change state, the statesChanged() signal is emitted,
providing a list of all calls affected by the state change.
More than one call may be affected by a state change in cases where
an entire group of calls are hung up, put on hold, or swapped with
an active call group.
State changes on individual calls can be tracked using QPhoneCall::connectStateChanged(),
but this does not allow group operations to be tracked atomically. The statesChanged()
signal allows atomic tracking of group operations.
The methods callTypes() and services() can be used to find the telephony
handlers in the system, so that higher-level applications can choose
the most appropriate type of call.
For an example that demonstrates the use of QPhoneCallManager, see
\l {Tutorial: Making Phone Calls}{Making Phone Calls}. Also see the
documentation for QPhoneCall.
\sa QPhoneCall
*/
QPhoneCallManagerPrivate::QPhoneCallManagerPrivate( QObject *parent )
: QObject( parent )
{
// Hook onto the value space to find the supported call types.
// The items under "/Communications/QPhoneCallProvider/CallTypes"
// are the service names, each containing a QStringList of supported
// call types. e.g:
//
// modem = { Voice, Data, Fax, Video }
// voip = { VoIP }
//
item = new QValueSpaceItem
( "/Communications/QPhoneCallProvider/CallTypes", this );
connect( item, SIGNAL(contentsChanged()), this, SLOT(contentsChanged()) );
// Hook onto the ipc channels for talking to the phone call providers.
request = new QtopiaIpcAdaptor
( "QPE/Communications/QPhoneCallProvider/Request", this );
response = new QtopiaIpcAdaptor
( "QPE/Communications/QPhoneCallProvider/Response", this );
QtopiaIpcAdaptor::connect
( response, MESSAGE(stateChanged(QString,QPhoneCall::State,QString,QString,QString,int)),
this, SLOT(callStateChanged(QString,QPhoneCall::State,QString,QString,QString,int)) );
QtopiaIpcAdaptor::connect
( response, MESSAGE(stateTransaction(QByteArray)),
this, SLOT(callStateTransaction(QByteArray)) );
// Load the initial call type list.
loadCallTypes();
// Send a message to the providers to load the current call list.
request->send( MESSAGE(listAllCalls()) );
}
QPhoneCallManagerPrivate::~QPhoneCallManagerPrivate()
{
// Nothing to do here at present.
}
Q_GLOBAL_STATIC(QPhoneCallManagerPrivate, callManager);
QPhoneCallManagerPrivate *QPhoneCallManagerPrivate::instance()
{
return callManager();
}
QPhoneCall QPhoneCallManagerPrivate::create
( const QString& service, const QString& callType )
{
QPhoneCallPrivate *priv = new QPhoneCallPrivate
( this, service, callType, QUuid::createUuid().toString() );
connect( priv, SIGNAL(stateChanged(QPhoneCall)),
this, SLOT(trackStateChanged(QPhoneCall)) );
return QPhoneCall( priv );
}
void QPhoneCallManagerPrivate::callStateChanged
( const QString& identifier, QPhoneCall::State state,
const QString& number, const QString& service,
const QString& callType, int actions )
{
// Look for new calls from the phone call providers.
if ( state < QPhoneCall::HangupLocal ) {
QList<QPhoneCall>::Iterator it;
for ( it = calls.begin(); it != calls.end(); ++it ) {
if ( (*it).identifier() == identifier )
return; // We already know about this call.
}
// This is a new call that we didn't create ourselves.
QPhoneCallPrivate *priv = new QPhoneCallPrivate
( this, service, callType, identifier );
connect( priv, SIGNAL(stateChanged(QPhoneCall)),
this, SLOT(trackStateChanged(QPhoneCall)) );
priv->state = state;
priv->actions = (QPhoneCallImpl::Actions)actions;
priv->number = number;
if ( priv->state == QPhoneCall::Incoming ||
priv->state == QPhoneCall::Dialing ||
priv->state == QPhoneCall::Alerting ) {
// Incoming call, or call created by another process.
// Set the initial start time correctly.
priv->startTime = QDateTime::currentDateTime();
}
QPhoneCall call( priv );
calls.append( call );
emit newCall( call );
QList<QPhoneCall> list;
list.append( call );
emit statesChanged( list );
}
}
void QPhoneCallManagerPrivate::callStateTransaction
( const QByteArray& transaction )
{
int count, actions, dataPort;
QString identifier, number, service, callType;
QPhoneCall::State state;
QList<QPhoneCall> changedCalls;
QList<QPhoneCall> newCalls;
QList<QPhoneCall>::Iterator it;
QPhoneCall call;
QPhoneCallPrivate *priv;
// Process the transaction.
QDataStream stream( transaction );
stream >> count;
while ( count-- > 0 ) {
// Get the information about the next call in the transaction.
stream >> identifier;
stream >> state;
stream >> number;
stream >> service;
stream >> callType;
stream >> actions;
stream >> dataPort;
// Find the call.
call = QPhoneCall();
for ( it = calls.begin(); it != calls.end(); ++it ) {
if ( (*it).identifier() == identifier ) {
call = *it;
break;
}
}
// Existing or new call?
if ( !call.isNull() ) {
// Existing call - update its properties, but don't
// emit the stateChanged() signal just yet.
priv = call.d;
priv->state = state;
priv->actions = (QPhoneCallImpl::Actions)actions;
if ( priv->number.isEmpty() )
priv->number = number;
if ( dataPort != -1 && state == QPhoneCall::Connected )
priv->dataPort = dataPort;
changedCalls.append( call );
} else if ( state < QPhoneCall::HangupLocal ) {
// This is a new call that we didn't create ourselves.
priv = new QPhoneCallPrivate
( this, service, callType, identifier );
connect( priv, SIGNAL(stateChanged(QPhoneCall)),
this, SLOT(trackStateChanged(QPhoneCall)) );
priv->state = state;
priv->actions = (QPhoneCallImpl::Actions)actions;
priv->number = number;
if ( state == QPhoneCall::Connected )
priv->dataPort = dataPort;
call = QPhoneCall( priv );
calls.append( call );
newCalls.append( call );
}
}
// Emit the signals corresponding to the changes we just made.
for ( it = changedCalls.begin(); it != changedCalls.end(); ++it ) {
(*it).d->emitStateChanged();
}
for ( it = newCalls.begin(); it != newCalls.end(); ++it ) {
emit newCall( *it );
}
// Advertise the changes that were made during the transaction.
emit statesChanged( changedCalls + newCalls );
}
void QPhoneCallManagerPrivate::trackStateChanged( const QPhoneCall& call )
{
// Keep the call list up to date as the individual calls change state.
if ( call.dropped() ) {
calls.removeAll( call );
} else if ( !call.idle() && !calls.contains( call ) ) {
calls.append( call );
}
}
void QPhoneCallManagerPrivate::loadCallTypes()
{
QStringList services = item->subPaths();
QStringList serviceTypes;
callTypes.clear();
callTypeMap.clear();
foreach ( QString service, services ) {
serviceTypes = item->value( service ).toStringList();
foreach ( QString type, serviceTypes ) {
if ( !callTypes.contains( type ) ) {
callTypes += type;
callTypeMap[type] = QStringList( service );
} else {
callTypeMap[type] += service;
}
}
}
// Abort any calls that relate to services that are no longer active.
// We abort upon the next entry to the event loop to prevent the
// "calls" list from being side-effected while we are scanning it.
QList<QPhoneCall>::ConstIterator it;
QPhoneCallPrivate *priv;
for ( it = calls.begin(); it != calls.end(); ++it ) {
priv = (*it).d;
if ( !services.contains(priv->service) )
QTimer::singleShot(0, priv, SLOT(abortCall()));
}
}
void QPhoneCallManagerPrivate::contentsChanged()
{
loadCallTypes();
emit callTypesChanged();
}
/*!
Creates a new phone call manager and attaches it to \a parent.
*/
QPhoneCallManager::QPhoneCallManager( QObject *parent )
: QObject( parent )
{
d = QPhoneCallManagerPrivate::instance();
connect( d, SIGNAL(newCall(QPhoneCall)),
this, SIGNAL(newCall(QPhoneCall)) );
connect( d, SIGNAL(callTypesChanged()), this, SIGNAL(callTypesChanged()) );
connect( d, SIGNAL(statesChanged(QList<QPhoneCall>)),
this, SIGNAL(statesChanged(QList<QPhoneCall>)) );
}
/*!
Destroys this phone call manager.
*/
QPhoneCallManager::~QPhoneCallManager()
{
// Nothing to do here - the QPhoneCallManagerPrivate instance is shared
// between all QPhoneCallManager instances, both current and future.
}
/*!
Returns a list of all active calls in the system. Active calls are
those that are dialing, incoming, connected, or on hold. Idle and
dropped calls are not included in this list.
New calls will be added to this list whenever a client application
creates a call with create(), or when an incoming call is detected
by the back-end telephony services. The newCall() signal is
emitted when either of these conditions occurs.
As calls change state, the statesChanged() signal is emitted,
providing a list of all calls affected by the state change.
\sa create(), newCall(), statesChanged()
*/
QList<QPhoneCall> QPhoneCallManager::calls() const
{
return d->calls;
}
/*!
Creates a call with the specified call \a type. If there is
more than one service that supports the \a type, this method
will choose the first that it finds. Returns a null QPhoneCall
if no services support \a type.
The \a type is usually one of \c Voice, \c VoIP, \c Data, \c Fax,
\c Video, etc. Use callTypes() to get a complete list of all
call types that are supported by the system.
The new call will initially be in the \c Idle state, ready to dial.
It will not appear in the calls() list until it is actually dialed.
\sa callTypes()
*/
QPhoneCall QPhoneCallManager::create( const QString& type )
{
QStringList list = services( type );
if ( list.isEmpty() )
return QPhoneCall();
else
return create( type, list[0] );
}
/*!
Create a call with the specified call \a type on \a service.
Returns a null QPhoneCall if the combination of \a type and
\a service is invalid.
The \a type is usually one of \c Voice, \c VoIP, \c Data, \c Fax,
\c Video, etc. Use callTypes() to get a complete list of
call types that are supported by the system.
The \a service is a telephony service name such as \c modem or
\c voip. Use services() to get a complete list of service names
that are supported by the system.
The new call will initially be in the "idle" state, ready to dial.
It will not appear in the calls() list until it is actually dialed.
\sa QTelephonyService, callTypes(), services()
*/
QPhoneCall QPhoneCallManager::create
( const QString& type, const QString& service )
{
// Validate the arguments.
QMap< QString, QStringList >::ConstIterator it;
it = d->callTypeMap.find( type );
if ( it == d->callTypeMap.end() || !it.value().contains( service ) )
return QPhoneCall();
// Create the call.
return d->create( service, type );
}
/*!
Returns a list of all services that provide phone call functionality
within the system. The returned list will contain strings
such as \c{modem}, \c{voip}, etc, indicating the name of the
associated telephony service.
\sa QTelephonyService, callTypes()
*/
QStringList QPhoneCallManager::services() const
{
return d->item->subPaths();
}
/*!
Returns a list of all services that provide phone call functionality
for calls of \a type within the system.
\sa QTelephonyService, callTypes()
*/
QStringList QPhoneCallManager::services( const QString& type ) const
{
QMap< QString, QStringList >::ConstIterator it;
it = d->callTypeMap.find( type );
if ( it != d->callTypeMap.end() )
return it.value();
else
return QStringList();
}
/*!
Returns a list of all call types that are supported by the system at present.
The returned list will contain strings such as \c{Voice}, \c{VoIP},
\c{Data}, etc, indicating the valid call types that can be
created with create(). For example, the following can be used to
determine if \c{VoIP} calls are possible:
\code
QPhoneCallManager mgr;
if (mgr.callTypes().contains("VoIP"))
...
\endcode
Call types are added to this list whenever new telephony services
are registered with the system, and removed when the telephony
services are deregistered. The callTypesChanged() signal can be
used to track changes to the call type list.
\sa create(), callTypesChanged(), services()
*/
QStringList QPhoneCallManager::callTypes() const
{
return d->callTypes;
}
/*!
Returns a list of all call types that are supported by \a service.
The returned list will contain strings such as \c{Voice}, \c{VoIP},
\c{Data}, etc, indicating the valid call types that can be
created with create() when \a service is supplied as its
second parameter.
For example, the following can be used to determine if the \c{modem}
service can create \c{Data} calls, irrespective of whether other
services can create \c{Data} calls:
\code
QPhoneCallManager mgr;
if (mgr.callTypes("modem").contains("Data"))
...
\endcode
Call types are added to this list when the specified telephony
\a service is registered, and removed when the specified telephony
\a service is deregistered and there are no other services that
provide the same call type. The callTypesChanged() signal can be
used to track changes to the call type list.
\sa create(), callTypesChanged(), services()
*/
QStringList QPhoneCallManager::callTypes( const QString& service ) const
{
return d->item->value( service ).toStringList();
}
/*!
Returns the phone call associated with modem identifier \a id.
Returns a null QPhoneCall object if there are no phone calls currently
associated with that modem identifier.
This function is intended for use with GSM-style key sequences
such as \c{1x SEND} and \c{2x SEND}, which affect a specific call.
\sa QPhoneCall::modemIdentifier()
*/
QPhoneCall QPhoneCallManager::fromModemIdentifier( int id ) const
{
QList<QPhoneCall>::ConstIterator it;
for ( it = d->calls.begin(); it != d->calls.end(); ++it ) {
if ( (*it).modemIdentifier() == id )
return (*it);
}
return QPhoneCall();
}
/*!
\fn void QPhoneCallManager::newCall( const QPhoneCall& call )
Provides notification that a new \a call has been added to the calls() list.
New calls are added to the call list whenever a client application
creates a call with create(), or when an incoming call is detected
by the back-end telephony services.
\sa calls(), create(), statesChanged()
*/
/*!
\fn void QPhoneCallManager::callTypesChanged()
Signal that is emitted when the list of call types that are supported
by the system changes, or if the services that implement the call types
changes. This is usually an indication that a new telephony handler
has been created, or an existing telephony handler has shut down.
\sa callTypes(), services()
*/
/*!
\fn void QPhoneCallManager::statesChanged( const QList<QPhoneCall>& calls )
Signal that is emitted when the states within \a calls change at once
during an atomic operation; e.g. swapping held and active calls.
State changes that affect only a single call will also be reported
via this signal.
This signal will be sent after the individual state changes have been
reported via newCall() and QPhoneCall::connectStateChanged().
\sa QPhoneCall::connectStateChanged(), newCall()
*/
| gpl-2.0 |
TeamLGOG/android_kernel_lge_d800 | drivers/net/wireless/wl1251/main.c | 8 | 33065 | /*
* This file is part of wl1251
*
* Copyright (C) 2008-2009 Nokia Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*
*/
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/firmware.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/crc32.h>
#include <linux/etherdevice.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include "wl1251.h"
#include "wl12xx_80211.h"
#include "reg.h"
#include "io.h"
#include "cmd.h"
#include "event.h"
#include "tx.h"
#include "rx.h"
#include "ps.h"
#include "init.h"
#include "debugfs.h"
#include "boot.h"
void wl1251_enable_interrupts(struct wl1251 *wl)
{
wl->if_ops->enable_irq(wl);
}
void wl1251_disable_interrupts(struct wl1251 *wl)
{
wl->if_ops->disable_irq(wl);
}
static int wl1251_power_off(struct wl1251 *wl)
{
return wl->if_ops->power(wl, false);
}
static int wl1251_power_on(struct wl1251 *wl)
{
return wl->if_ops->power(wl, true);
}
static int wl1251_fetch_firmware(struct wl1251 *wl)
{
const struct firmware *fw;
struct device *dev = wiphy_dev(wl->hw->wiphy);
int ret;
ret = request_firmware(&fw, WL1251_FW_NAME, dev);
if (ret < 0) {
wl1251_error("could not get firmware: %d", ret);
return ret;
}
if (fw->size % 4) {
wl1251_error("firmware size is not multiple of 32 bits: %zu",
fw->size);
ret = -EILSEQ;
goto out;
}
wl->fw_len = fw->size;
wl->fw = vmalloc(wl->fw_len);
if (!wl->fw) {
wl1251_error("could not allocate memory for the firmware");
ret = -ENOMEM;
goto out;
}
memcpy(wl->fw, fw->data, wl->fw_len);
ret = 0;
out:
release_firmware(fw);
return ret;
}
static int wl1251_fetch_nvs(struct wl1251 *wl)
{
const struct firmware *fw;
struct device *dev = wiphy_dev(wl->hw->wiphy);
int ret;
ret = request_firmware(&fw, WL1251_NVS_NAME, dev);
if (ret < 0) {
wl1251_error("could not get nvs file: %d", ret);
return ret;
}
if (fw->size % 4) {
wl1251_error("nvs size is not multiple of 32 bits: %zu",
fw->size);
ret = -EILSEQ;
goto out;
}
wl->nvs_len = fw->size;
wl->nvs = kmemdup(fw->data, wl->nvs_len, GFP_KERNEL);
if (!wl->nvs) {
wl1251_error("could not allocate memory for the nvs file");
ret = -ENOMEM;
goto out;
}
ret = 0;
out:
release_firmware(fw);
return ret;
}
static void wl1251_fw_wakeup(struct wl1251 *wl)
{
u32 elp_reg;
elp_reg = ELPCTRL_WAKE_UP;
wl1251_write_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg);
elp_reg = wl1251_read_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR);
if (!(elp_reg & ELPCTRL_WLAN_READY))
wl1251_warning("WLAN not ready");
}
static int wl1251_chip_wakeup(struct wl1251 *wl)
{
int ret;
ret = wl1251_power_on(wl);
if (ret < 0)
return ret;
msleep(WL1251_POWER_ON_SLEEP);
wl->if_ops->reset(wl);
/*
*/
wl1251_set_partition(wl,
0x00000000,
0x00000000,
REGISTERS_BASE,
REGISTERS_DOWN_SIZE);
/* */
wl1251_fw_wakeup(wl);
/* */
/* */
wl->chip_id = wl1251_reg_read32(wl, CHIP_ID_B);
/* */
switch (wl->chip_id) {
case CHIP_ID_1251_PG12:
wl1251_debug(DEBUG_BOOT, "chip id 0x%x (1251 PG12)",
wl->chip_id);
break;
case CHIP_ID_1251_PG11:
wl1251_debug(DEBUG_BOOT, "chip id 0x%x (1251 PG11)",
wl->chip_id);
break;
case CHIP_ID_1251_PG10:
default:
wl1251_error("unsupported chip id: 0x%x", wl->chip_id);
ret = -ENODEV;
goto out;
}
if (wl->fw == NULL) {
ret = wl1251_fetch_firmware(wl);
if (ret < 0)
goto out;
}
if (wl->nvs == NULL && !wl->use_eeprom) {
/* */
ret = wl1251_fetch_nvs(wl);
if (ret < 0)
goto out;
}
out:
return ret;
}
#define WL1251_IRQ_LOOP_COUNT 10
static void wl1251_irq_work(struct work_struct *work)
{
u32 intr, ctr = WL1251_IRQ_LOOP_COUNT;
struct wl1251 *wl =
container_of(work, struct wl1251, irq_work);
int ret;
mutex_lock(&wl->mutex);
wl1251_debug(DEBUG_IRQ, "IRQ work");
if (wl->state == WL1251_STATE_OFF)
goto out;
ret = wl1251_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
wl1251_reg_write32(wl, ACX_REG_INTERRUPT_MASK, WL1251_ACX_INTR_ALL);
intr = wl1251_reg_read32(wl, ACX_REG_INTERRUPT_CLEAR);
wl1251_debug(DEBUG_IRQ, "intr: 0x%x", intr);
do {
if (wl->data_path) {
wl->rx_counter = wl1251_mem_read32(
wl, wl->data_path->rx_control_addr);
/* */
switch ((wl->rx_counter - wl->rx_handled) & 0xf) {
case 0:
wl1251_debug(DEBUG_IRQ,
"RX: FW and host in sync");
intr &= ~WL1251_ACX_INTR_RX0_DATA;
intr &= ~WL1251_ACX_INTR_RX1_DATA;
break;
case 1:
wl1251_debug(DEBUG_IRQ, "RX: FW +1");
intr |= WL1251_ACX_INTR_RX0_DATA;
intr &= ~WL1251_ACX_INTR_RX1_DATA;
break;
case 2:
wl1251_debug(DEBUG_IRQ, "RX: FW +2");
intr |= WL1251_ACX_INTR_RX0_DATA;
intr |= WL1251_ACX_INTR_RX1_DATA;
break;
default:
wl1251_warning(
"RX: FW and host out of sync: %d",
wl->rx_counter - wl->rx_handled);
break;
}
wl->rx_handled = wl->rx_counter;
wl1251_debug(DEBUG_IRQ, "RX counter: %d",
wl->rx_counter);
}
intr &= wl->intr_mask;
if (intr == 0) {
wl1251_debug(DEBUG_IRQ, "INTR is 0");
goto out_sleep;
}
if (intr & WL1251_ACX_INTR_RX0_DATA) {
wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_RX0_DATA");
wl1251_rx(wl);
}
if (intr & WL1251_ACX_INTR_RX1_DATA) {
wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_RX1_DATA");
wl1251_rx(wl);
}
if (intr & WL1251_ACX_INTR_TX_RESULT) {
wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_TX_RESULT");
wl1251_tx_complete(wl);
}
if (intr & WL1251_ACX_INTR_EVENT_A) {
wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_EVENT_A");
wl1251_event_handle(wl, 0);
}
if (intr & WL1251_ACX_INTR_EVENT_B) {
wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_EVENT_B");
wl1251_event_handle(wl, 1);
}
if (intr & WL1251_ACX_INTR_INIT_COMPLETE)
wl1251_debug(DEBUG_IRQ,
"WL1251_ACX_INTR_INIT_COMPLETE");
if (--ctr == 0)
break;
intr = wl1251_reg_read32(wl, ACX_REG_INTERRUPT_CLEAR);
} while (intr);
out_sleep:
wl1251_reg_write32(wl, ACX_REG_INTERRUPT_MASK, ~(wl->intr_mask));
wl1251_ps_elp_sleep(wl);
out:
mutex_unlock(&wl->mutex);
}
static int wl1251_join(struct wl1251 *wl, u8 bss_type, u8 channel,
u16 beacon_interval, u8 dtim_period)
{
int ret;
ret = wl1251_acx_frame_rates(wl, DEFAULT_HW_GEN_TX_RATE,
DEFAULT_HW_GEN_MODULATION_TYPE,
wl->tx_mgmt_frm_rate,
wl->tx_mgmt_frm_mod);
if (ret < 0)
goto out;
ret = wl1251_cmd_join(wl, bss_type, channel, beacon_interval,
dtim_period);
if (ret < 0)
goto out;
ret = wl1251_event_wait(wl, JOIN_EVENT_COMPLETE_ID, 100);
if (ret < 0)
wl1251_warning("join timeout");
out:
return ret;
}
static void wl1251_filter_work(struct work_struct *work)
{
struct wl1251 *wl =
container_of(work, struct wl1251, filter_work);
int ret;
mutex_lock(&wl->mutex);
if (wl->state == WL1251_STATE_OFF)
goto out;
ret = wl1251_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
ret = wl1251_join(wl, wl->bss_type, wl->channel, wl->beacon_int,
wl->dtim_period);
if (ret < 0)
goto out_sleep;
out_sleep:
wl1251_ps_elp_sleep(wl);
out:
mutex_unlock(&wl->mutex);
}
static void wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct wl1251 *wl = hw->priv;
unsigned long flags;
skb_queue_tail(&wl->tx_queue, skb);
/*
*/
ieee80211_queue_work(wl->hw, &wl->tx_work);
/*
*/
if (skb_queue_len(&wl->tx_queue) >= WL1251_TX_QUEUE_HIGH_WATERMARK) {
wl1251_debug(DEBUG_TX, "op_tx: tx_queue full, stop queues");
spin_lock_irqsave(&wl->wl_lock, flags);
ieee80211_stop_queues(wl->hw);
wl->tx_queue_stopped = true;
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
}
static int wl1251_op_start(struct ieee80211_hw *hw)
{
struct wl1251 *wl = hw->priv;
struct wiphy *wiphy = hw->wiphy;
int ret = 0;
wl1251_debug(DEBUG_MAC80211, "mac80211 start");
mutex_lock(&wl->mutex);
if (wl->state != WL1251_STATE_OFF) {
wl1251_error("cannot start because not in off state: %d",
wl->state);
ret = -EBUSY;
goto out;
}
ret = wl1251_chip_wakeup(wl);
if (ret < 0)
goto out;
ret = wl1251_boot(wl);
if (ret < 0)
goto out;
ret = wl1251_hw_init(wl);
if (ret < 0)
goto out;
ret = wl1251_acx_station_id(wl);
if (ret < 0)
goto out;
wl->state = WL1251_STATE_ON;
wl1251_info("firmware booted (%s)", wl->fw_ver);
/* */
wiphy->hw_version = wl->chip_id;
strncpy(wiphy->fw_version, wl->fw_ver, sizeof(wiphy->fw_version));
out:
if (ret < 0)
wl1251_power_off(wl);
mutex_unlock(&wl->mutex);
return ret;
}
static void wl1251_op_stop(struct ieee80211_hw *hw)
{
struct wl1251 *wl = hw->priv;
wl1251_info("down");
wl1251_debug(DEBUG_MAC80211, "mac80211 stop");
mutex_lock(&wl->mutex);
WARN_ON(wl->state != WL1251_STATE_ON);
if (wl->scanning) {
ieee80211_scan_completed(wl->hw, true);
wl->scanning = false;
}
wl->state = WL1251_STATE_OFF;
wl1251_disable_interrupts(wl);
mutex_unlock(&wl->mutex);
cancel_work_sync(&wl->irq_work);
cancel_work_sync(&wl->tx_work);
cancel_work_sync(&wl->filter_work);
cancel_delayed_work_sync(&wl->elp_work);
mutex_lock(&wl->mutex);
/* */
wl1251_tx_flush(wl);
wl1251_power_off(wl);
memset(wl->bssid, 0, ETH_ALEN);
wl->listen_int = 1;
wl->bss_type = MAX_BSS_TYPE;
wl->data_in_count = 0;
wl->rx_counter = 0;
wl->rx_handled = 0;
wl->rx_current_buffer = 0;
wl->rx_last_id = 0;
wl->next_tx_complete = 0;
wl->elp = false;
wl->station_mode = STATION_ACTIVE_MODE;
wl->tx_queue_stopped = false;
wl->power_level = WL1251_DEFAULT_POWER_LEVEL;
wl->rssi_thold = 0;
wl->channel = WL1251_DEFAULT_CHANNEL;
wl1251_debugfs_reset(wl);
mutex_unlock(&wl->mutex);
}
static int wl1251_op_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct wl1251 *wl = hw->priv;
int ret = 0;
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
IEEE80211_VIF_SUPPORTS_CQM_RSSI;
wl1251_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
vif->type, vif->addr);
mutex_lock(&wl->mutex);
if (wl->vif) {
ret = -EBUSY;
goto out;
}
wl->vif = vif;
switch (vif->type) {
case NL80211_IFTYPE_STATION:
wl->bss_type = BSS_TYPE_STA_BSS;
break;
case NL80211_IFTYPE_ADHOC:
wl->bss_type = BSS_TYPE_IBSS;
break;
default:
ret = -EOPNOTSUPP;
goto out;
}
if (memcmp(wl->mac_addr, vif->addr, ETH_ALEN)) {
memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr);
ret = wl1251_acx_station_id(wl);
if (ret < 0)
goto out;
}
out:
mutex_unlock(&wl->mutex);
return ret;
}
static void wl1251_op_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct wl1251 *wl = hw->priv;
mutex_lock(&wl->mutex);
wl1251_debug(DEBUG_MAC80211, "mac80211 remove interface");
wl->vif = NULL;
mutex_unlock(&wl->mutex);
}
static int wl1251_build_qos_null_data(struct wl1251 *wl)
{
struct ieee80211_qos_hdr template;
memset(&template, 0, sizeof(template));
memcpy(template.addr1, wl->bssid, ETH_ALEN);
memcpy(template.addr2, wl->mac_addr, ETH_ALEN);
memcpy(template.addr3, wl->bssid, ETH_ALEN);
template.frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
IEEE80211_STYPE_QOS_NULLFUNC |
IEEE80211_FCTL_TODS);
/* */
template.qos_ctrl = cpu_to_le16(0);
return wl1251_cmd_template_set(wl, CMD_QOS_NULL_DATA, &template,
sizeof(template));
}
static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
{
struct wl1251 *wl = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
int channel, ret = 0;
channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
wl1251_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d",
channel,
conf->flags & IEEE80211_CONF_PS ? "on" : "off",
conf->power_level);
mutex_lock(&wl->mutex);
ret = wl1251_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
if (channel != wl->channel) {
wl->channel = channel;
ret = wl1251_join(wl, wl->bss_type, wl->channel,
wl->beacon_int, wl->dtim_period);
if (ret < 0)
goto out_sleep;
}
if (conf->flags & IEEE80211_CONF_PS && !wl->psm_requested) {
wl1251_debug(DEBUG_PSM, "psm enabled");
wl->psm_requested = true;
wl->dtim_period = conf->ps_dtim_period;
ret = wl1251_acx_wr_tbtt_and_dtim(wl, wl->beacon_int,
wl->dtim_period);
/*
*/
ret = wl1251_ps_set_mode(wl, STATION_POWER_SAVE_MODE);
if (ret < 0)
goto out_sleep;
} else if (!(conf->flags & IEEE80211_CONF_PS) &&
wl->psm_requested) {
wl1251_debug(DEBUG_PSM, "psm disabled");
wl->psm_requested = false;
if (wl->station_mode != STATION_ACTIVE_MODE) {
ret = wl1251_ps_set_mode(wl, STATION_ACTIVE_MODE);
if (ret < 0)
goto out_sleep;
}
}
if (changed & IEEE80211_CONF_CHANGE_IDLE) {
if (conf->flags & IEEE80211_CONF_IDLE) {
ret = wl1251_ps_set_mode(wl, STATION_IDLE);
if (ret < 0)
goto out_sleep;
} else {
ret = wl1251_ps_set_mode(wl, STATION_ACTIVE_MODE);
if (ret < 0)
goto out_sleep;
ret = wl1251_join(wl, wl->bss_type, wl->channel,
wl->beacon_int, wl->dtim_period);
if (ret < 0)
goto out_sleep;
}
}
if (conf->power_level != wl->power_level) {
ret = wl1251_acx_tx_power(wl, conf->power_level);
if (ret < 0)
goto out_sleep;
wl->power_level = conf->power_level;
}
out_sleep:
wl1251_ps_elp_sleep(wl);
out:
mutex_unlock(&wl->mutex);
return ret;
}
#define WL1251_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
FIF_ALLMULTI | \
FIF_FCSFAIL | \
FIF_BCN_PRBRESP_PROMISC | \
FIF_CONTROL | \
FIF_OTHER_BSS)
static void wl1251_op_configure_filter(struct ieee80211_hw *hw,
unsigned int changed,
unsigned int *total,u64 multicast)
{
struct wl1251 *wl = hw->priv;
wl1251_debug(DEBUG_MAC80211, "mac80211 configure filter");
*total &= WL1251_SUPPORTED_FILTERS;
changed &= WL1251_SUPPORTED_FILTERS;
if (changed == 0)
/* */
return;
/* */
wl->rx_config = WL1251_DEFAULT_RX_CONFIG;
wl->rx_filter = WL1251_DEFAULT_RX_FILTER;
if (*total & FIF_PROMISC_IN_BSS) {
wl->rx_config |= CFG_BSSID_FILTER_EN;
wl->rx_config |= CFG_RX_ALL_GOOD;
}
if (*total & FIF_ALLMULTI)
/*
*/
wl->rx_config &= ~CFG_MC_FILTER_EN;
if (*total & FIF_FCSFAIL)
wl->rx_filter |= CFG_RX_FCS_ERROR;
if (*total & FIF_BCN_PRBRESP_PROMISC) {
wl->rx_config &= ~CFG_BSSID_FILTER_EN;
wl->rx_config &= ~CFG_SSID_FILTER_EN;
}
if (*total & FIF_CONTROL)
wl->rx_filter |= CFG_RX_CTL_EN;
if (*total & FIF_OTHER_BSS)
wl->rx_filter &= ~CFG_BSSID_FILTER_EN;
/*
*/
/* */
}
/* */
static int wl1251_set_key_type(struct wl1251 *wl,
struct wl1251_cmd_set_keys *key,
enum set_key_cmd cmd,
struct ieee80211_key_conf *mac80211_key,
const u8 *addr)
{
switch (mac80211_key->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
if (is_broadcast_ether_addr(addr))
key->key_type = KEY_WEP_DEFAULT;
else
key->key_type = KEY_WEP_ADDR;
mac80211_key->hw_key_idx = mac80211_key->keyidx;
break;
case WLAN_CIPHER_SUITE_TKIP:
if (is_broadcast_ether_addr(addr))
key->key_type = KEY_TKIP_MIC_GROUP;
else
key->key_type = KEY_TKIP_MIC_PAIRWISE;
mac80211_key->hw_key_idx = mac80211_key->keyidx;
break;
case WLAN_CIPHER_SUITE_CCMP:
if (is_broadcast_ether_addr(addr))
key->key_type = KEY_AES_GROUP;
else
key->key_type = KEY_AES_PAIRWISE;
mac80211_key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
break;
default:
wl1251_error("Unknown key cipher 0x%x", mac80211_key->cipher);
return -EOPNOTSUPP;
}
return 0;
}
static int wl1251_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
struct wl1251 *wl = hw->priv;
struct wl1251_cmd_set_keys *wl_cmd;
const u8 *addr;
int ret;
static const u8 bcast_addr[ETH_ALEN] =
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
wl1251_debug(DEBUG_MAC80211, "mac80211 set key");
wl_cmd = kzalloc(sizeof(*wl_cmd), GFP_KERNEL);
if (!wl_cmd) {
ret = -ENOMEM;
goto out;
}
addr = sta ? sta->addr : bcast_addr;
wl1251_debug(DEBUG_CRYPT, "CMD: 0x%x", cmd);
wl1251_dump(DEBUG_CRYPT, "ADDR: ", addr, ETH_ALEN);
wl1251_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
key->cipher, key->keyidx, key->keylen, key->flags);
wl1251_dump(DEBUG_CRYPT, "KEY: ", key->key, key->keylen);
if (is_zero_ether_addr(addr)) {
/* */
ret = -EOPNOTSUPP;
goto out;
}
mutex_lock(&wl->mutex);
ret = wl1251_ps_elp_wakeup(wl);
if (ret < 0)
goto out_unlock;
switch (cmd) {
case SET_KEY:
wl_cmd->key_action = KEY_ADD_OR_REPLACE;
break;
case DISABLE_KEY:
wl_cmd->key_action = KEY_REMOVE;
break;
default:
wl1251_error("Unsupported key cmd 0x%x", cmd);
break;
}
ret = wl1251_set_key_type(wl, wl_cmd, cmd, key, addr);
if (ret < 0) {
wl1251_error("Set KEY type failed");
goto out_sleep;
}
if (wl_cmd->key_type != KEY_WEP_DEFAULT)
memcpy(wl_cmd->addr, addr, ETH_ALEN);
if ((wl_cmd->key_type == KEY_TKIP_MIC_GROUP) ||
(wl_cmd->key_type == KEY_TKIP_MIC_PAIRWISE)) {
/*
*/
memcpy(wl_cmd->key, key->key, 16);
memcpy(wl_cmd->key + 16, key->key + 24, 8);
memcpy(wl_cmd->key + 24, key->key + 16, 8);
} else {
memcpy(wl_cmd->key, key->key, key->keylen);
}
wl_cmd->key_size = key->keylen;
wl_cmd->id = key->keyidx;
wl_cmd->ssid_profile = 0;
wl1251_dump(DEBUG_CRYPT, "TARGET KEY: ", wl_cmd, sizeof(*wl_cmd));
ret = wl1251_cmd_send(wl, CMD_SET_KEYS, wl_cmd, sizeof(*wl_cmd));
if (ret < 0) {
wl1251_warning("could not set keys");
goto out_sleep;
}
out_sleep:
wl1251_ps_elp_sleep(wl);
out_unlock:
mutex_unlock(&wl->mutex);
out:
kfree(wl_cmd);
return ret;
}
static int wl1251_op_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_scan_request *req)
{
struct wl1251 *wl = hw->priv;
struct sk_buff *skb;
size_t ssid_len = 0;
u8 *ssid = NULL;
int ret;
wl1251_debug(DEBUG_MAC80211, "mac80211 hw scan");
if (req->n_ssids) {
ssid = req->ssids[0].ssid;
ssid_len = req->ssids[0].ssid_len;
}
mutex_lock(&wl->mutex);
if (wl->scanning) {
wl1251_debug(DEBUG_SCAN, "scan already in progress");
ret = -EINVAL;
goto out;
}
ret = wl1251_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
skb = ieee80211_probereq_get(wl->hw, wl->vif, ssid, ssid_len,
req->ie, req->ie_len);
if (!skb) {
ret = -ENOMEM;
goto out;
}
ret = wl1251_cmd_template_set(wl, CMD_PROBE_REQ, skb->data,
skb->len);
dev_kfree_skb(skb);
if (ret < 0)
goto out_sleep;
ret = wl1251_cmd_trigger_scan_to(wl, 0);
if (ret < 0)
goto out_sleep;
wl->scanning = true;
ret = wl1251_cmd_scan(wl, ssid, ssid_len, req->channels,
req->n_channels, WL1251_SCAN_NUM_PROBES);
if (ret < 0) {
wl->scanning = false;
goto out_sleep;
}
out_sleep:
wl1251_ps_elp_sleep(wl);
out:
mutex_unlock(&wl->mutex);
return ret;
}
static int wl1251_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
struct wl1251 *wl = hw->priv;
int ret;
mutex_lock(&wl->mutex);
ret = wl1251_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
ret = wl1251_acx_rts_threshold(wl, (u16) value);
if (ret < 0)
wl1251_warning("wl1251_op_set_rts_threshold failed: %d", ret);
wl1251_ps_elp_sleep(wl);
out:
mutex_unlock(&wl->mutex);
return ret;
}
static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
u32 changed)
{
struct wl1251 *wl = hw->priv;
struct sk_buff *beacon, *skb;
int ret;
wl1251_debug(DEBUG_MAC80211, "mac80211 bss info changed");
mutex_lock(&wl->mutex);
ret = wl1251_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
if (changed & BSS_CHANGED_CQM) {
ret = wl1251_acx_low_rssi(wl, bss_conf->cqm_rssi_thold,
WL1251_DEFAULT_LOW_RSSI_WEIGHT,
WL1251_DEFAULT_LOW_RSSI_DEPTH,
WL1251_ACX_LOW_RSSI_TYPE_EDGE);
if (ret < 0)
goto out;
wl->rssi_thold = bss_conf->cqm_rssi_thold;
}
if (changed & BSS_CHANGED_BSSID) {
memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
if (!skb)
goto out_sleep;
ret = wl1251_cmd_template_set(wl, CMD_NULL_DATA,
skb->data, skb->len);
dev_kfree_skb(skb);
if (ret < 0)
goto out_sleep;
ret = wl1251_build_qos_null_data(wl);
if (ret < 0)
goto out;
if (wl->bss_type != BSS_TYPE_IBSS) {
ret = wl1251_join(wl, wl->bss_type, wl->channel,
wl->beacon_int, wl->dtim_period);
if (ret < 0)
goto out_sleep;
}
}
if (changed & BSS_CHANGED_ASSOC) {
if (bss_conf->assoc) {
wl->beacon_int = bss_conf->beacon_int;
skb = ieee80211_pspoll_get(wl->hw, wl->vif);
if (!skb)
goto out_sleep;
ret = wl1251_cmd_template_set(wl, CMD_PS_POLL,
skb->data,
skb->len);
dev_kfree_skb(skb);
if (ret < 0)
goto out_sleep;
ret = wl1251_acx_aid(wl, bss_conf->aid);
if (ret < 0)
goto out_sleep;
} else {
/* */
wl->beacon_int = WL1251_DEFAULT_BEACON_INT;
wl->dtim_period = WL1251_DEFAULT_DTIM_PERIOD;
}
}
if (changed & BSS_CHANGED_ERP_SLOT) {
if (bss_conf->use_short_slot)
ret = wl1251_acx_slot(wl, SLOT_TIME_SHORT);
else
ret = wl1251_acx_slot(wl, SLOT_TIME_LONG);
if (ret < 0) {
wl1251_warning("Set slot time failed %d", ret);
goto out_sleep;
}
}
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
if (bss_conf->use_short_preamble)
wl1251_acx_set_preamble(wl, ACX_PREAMBLE_SHORT);
else
wl1251_acx_set_preamble(wl, ACX_PREAMBLE_LONG);
}
if (changed & BSS_CHANGED_ERP_CTS_PROT) {
if (bss_conf->use_cts_prot)
ret = wl1251_acx_cts_protect(wl, CTSPROTECT_ENABLE);
else
ret = wl1251_acx_cts_protect(wl, CTSPROTECT_DISABLE);
if (ret < 0) {
wl1251_warning("Set ctsprotect failed %d", ret);
goto out_sleep;
}
}
if (changed & BSS_CHANGED_BEACON) {
beacon = ieee80211_beacon_get(hw, vif);
if (!beacon)
goto out_sleep;
ret = wl1251_cmd_template_set(wl, CMD_BEACON, beacon->data,
beacon->len);
if (ret < 0) {
dev_kfree_skb(beacon);
goto out_sleep;
}
ret = wl1251_cmd_template_set(wl, CMD_PROBE_RESP, beacon->data,
beacon->len);
dev_kfree_skb(beacon);
if (ret < 0)
goto out_sleep;
ret = wl1251_join(wl, wl->bss_type, wl->beacon_int,
wl->channel, wl->dtim_period);
if (ret < 0)
goto out_sleep;
}
out_sleep:
wl1251_ps_elp_sleep(wl);
out:
mutex_unlock(&wl->mutex);
}
/* */
static struct ieee80211_rate wl1251_rates[] = {
{ .bitrate = 10,
.hw_value = 0x1,
.hw_value_short = 0x1, },
{ .bitrate = 20,
.hw_value = 0x2,
.hw_value_short = 0x2,
.flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 55,
.hw_value = 0x4,
.hw_value_short = 0x4,
.flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 110,
.hw_value = 0x20,
.hw_value_short = 0x20,
.flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 60,
.hw_value = 0x8,
.hw_value_short = 0x8, },
{ .bitrate = 90,
.hw_value = 0x10,
.hw_value_short = 0x10, },
{ .bitrate = 120,
.hw_value = 0x40,
.hw_value_short = 0x40, },
{ .bitrate = 180,
.hw_value = 0x80,
.hw_value_short = 0x80, },
{ .bitrate = 240,
.hw_value = 0x200,
.hw_value_short = 0x200, },
{ .bitrate = 360,
.hw_value = 0x400,
.hw_value_short = 0x400, },
{ .bitrate = 480,
.hw_value = 0x800,
.hw_value_short = 0x800, },
{ .bitrate = 540,
.hw_value = 0x1000,
.hw_value_short = 0x1000, },
};
/* */
static struct ieee80211_channel wl1251_channels[] = {
{ .hw_value = 1, .center_freq = 2412},
{ .hw_value = 2, .center_freq = 2417},
{ .hw_value = 3, .center_freq = 2422},
{ .hw_value = 4, .center_freq = 2427},
{ .hw_value = 5, .center_freq = 2432},
{ .hw_value = 6, .center_freq = 2437},
{ .hw_value = 7, .center_freq = 2442},
{ .hw_value = 8, .center_freq = 2447},
{ .hw_value = 9, .center_freq = 2452},
{ .hw_value = 10, .center_freq = 2457},
{ .hw_value = 11, .center_freq = 2462},
{ .hw_value = 12, .center_freq = 2467},
{ .hw_value = 13, .center_freq = 2472},
};
static int wl1251_op_conf_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
enum wl1251_acx_ps_scheme ps_scheme;
struct wl1251 *wl = hw->priv;
int ret;
mutex_lock(&wl->mutex);
wl1251_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
ret = wl1251_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
/* */
ret = wl1251_acx_ac_cfg(wl, wl1251_tx_get_queue(queue),
params->cw_min, params->cw_max,
params->aifs, params->txop * 32);
if (ret < 0)
goto out_sleep;
if (params->uapsd)
ps_scheme = WL1251_ACX_PS_SCHEME_UPSD_TRIGGER;
else
ps_scheme = WL1251_ACX_PS_SCHEME_LEGACY;
ret = wl1251_acx_tid_cfg(wl, wl1251_tx_get_queue(queue),
CHANNEL_TYPE_EDCF,
wl1251_tx_get_queue(queue), ps_scheme,
WL1251_ACX_ACK_POLICY_LEGACY);
if (ret < 0)
goto out_sleep;
out_sleep:
wl1251_ps_elp_sleep(wl);
out:
mutex_unlock(&wl->mutex);
return ret;
}
static int wl1251_op_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
{
struct wl1251 *wl = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
if (idx != 0)
return -ENOENT;
survey->channel = conf->channel;
survey->filled = SURVEY_INFO_NOISE_DBM;
survey->noise = wl->noise;
return 0;
}
/* */
static struct ieee80211_supported_band wl1251_band_2ghz = {
.channels = wl1251_channels,
.n_channels = ARRAY_SIZE(wl1251_channels),
.bitrates = wl1251_rates,
.n_bitrates = ARRAY_SIZE(wl1251_rates),
};
static const struct ieee80211_ops wl1251_ops = {
.start = wl1251_op_start,
.stop = wl1251_op_stop,
.add_interface = wl1251_op_add_interface,
.remove_interface = wl1251_op_remove_interface,
.config = wl1251_op_config,
.configure_filter = wl1251_op_configure_filter,
.tx = wl1251_op_tx,
.set_key = wl1251_op_set_key,
.hw_scan = wl1251_op_hw_scan,
.bss_info_changed = wl1251_op_bss_info_changed,
.set_rts_threshold = wl1251_op_set_rts_threshold,
.conf_tx = wl1251_op_conf_tx,
.get_survey = wl1251_op_get_survey,
};
static int wl1251_read_eeprom_byte(struct wl1251 *wl, off_t offset, u8 *data)
{
unsigned long timeout;
wl1251_reg_write32(wl, EE_ADDR, offset);
wl1251_reg_write32(wl, EE_CTL, EE_CTL_READ);
/* */
timeout = jiffies + msecs_to_jiffies(100);
while (1) {
if (!(wl1251_reg_read32(wl, EE_CTL) & EE_CTL_READ))
break;
if (time_after(jiffies, timeout))
return -ETIMEDOUT;
msleep(1);
}
*data = wl1251_reg_read32(wl, EE_DATA);
return 0;
}
static int wl1251_read_eeprom(struct wl1251 *wl, off_t offset,
u8 *data, size_t len)
{
size_t i;
int ret;
wl1251_reg_write32(wl, EE_START, 0);
for (i = 0; i < len; i++) {
ret = wl1251_read_eeprom_byte(wl, offset + i, &data[i]);
if (ret < 0)
return ret;
}
return 0;
}
static int wl1251_read_eeprom_mac(struct wl1251 *wl)
{
u8 mac[ETH_ALEN];
int i, ret;
wl1251_set_partition(wl, 0, 0, REGISTERS_BASE, REGISTERS_DOWN_SIZE);
ret = wl1251_read_eeprom(wl, 0x1c, mac, sizeof(mac));
if (ret < 0) {
wl1251_warning("failed to read MAC address from EEPROM");
return ret;
}
/* */
for (i = 0; i < ETH_ALEN; i++)
wl->mac_addr[i] = mac[ETH_ALEN - i - 1];
return 0;
}
static int wl1251_register_hw(struct wl1251 *wl)
{
int ret;
if (wl->mac80211_registered)
return 0;
SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr);
ret = ieee80211_register_hw(wl->hw);
if (ret < 0) {
wl1251_error("unable to register mac80211 hw: %d", ret);
return ret;
}
wl->mac80211_registered = true;
wl1251_notice("loaded");
return 0;
}
int wl1251_init_ieee80211(struct wl1251 *wl)
{
int ret;
/* */
wl->hw->extra_tx_headroom = sizeof(struct tx_double_buffer_desc)
+ WL1251_TKIP_IV_SPACE;
/* */
/* */
wl->hw->channel_change_time = 10000;
wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_SUPPORTS_UAPSD;
wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC);
wl->hw->wiphy->max_scan_ssids = 1;
wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1251_band_2ghz;
wl->hw->queues = 4;
if (wl->use_eeprom)
wl1251_read_eeprom_mac(wl);
ret = wl1251_register_hw(wl);
if (ret)
goto out;
wl1251_debugfs_init(wl);
wl1251_notice("initialized");
ret = 0;
out:
return ret;
}
EXPORT_SYMBOL_GPL(wl1251_init_ieee80211);
struct ieee80211_hw *wl1251_alloc_hw(void)
{
struct ieee80211_hw *hw;
struct wl1251 *wl;
int i;
static const u8 nokia_oui[3] = {0x00, 0x1f, 0xdf};
hw = ieee80211_alloc_hw(sizeof(*wl), &wl1251_ops);
if (!hw) {
wl1251_error("could not alloc ieee80211_hw");
return ERR_PTR(-ENOMEM);
}
wl = hw->priv;
memset(wl, 0, sizeof(*wl));
wl->hw = hw;
wl->data_in_count = 0;
skb_queue_head_init(&wl->tx_queue);
INIT_WORK(&wl->filter_work, wl1251_filter_work);
INIT_DELAYED_WORK(&wl->elp_work, wl1251_elp_work);
wl->channel = WL1251_DEFAULT_CHANNEL;
wl->scanning = false;
wl->default_key = 0;
wl->listen_int = 1;
wl->rx_counter = 0;
wl->rx_handled = 0;
wl->rx_current_buffer = 0;
wl->rx_last_id = 0;
wl->rx_config = WL1251_DEFAULT_RX_CONFIG;
wl->rx_filter = WL1251_DEFAULT_RX_FILTER;
wl->elp = false;
wl->station_mode = STATION_ACTIVE_MODE;
wl->psm_requested = false;
wl->tx_queue_stopped = false;
wl->power_level = WL1251_DEFAULT_POWER_LEVEL;
wl->rssi_thold = 0;
wl->beacon_int = WL1251_DEFAULT_BEACON_INT;
wl->dtim_period = WL1251_DEFAULT_DTIM_PERIOD;
wl->vif = NULL;
for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++)
wl->tx_frames[i] = NULL;
wl->next_tx_complete = 0;
INIT_WORK(&wl->irq_work, wl1251_irq_work);
INIT_WORK(&wl->tx_work, wl1251_tx_work);
/*
*/
memcpy(wl->mac_addr, nokia_oui, 3);
get_random_bytes(wl->mac_addr + 3, 3);
wl->state = WL1251_STATE_OFF;
mutex_init(&wl->mutex);
wl->tx_mgmt_frm_rate = DEFAULT_HW_GEN_TX_RATE;
wl->tx_mgmt_frm_mod = DEFAULT_HW_GEN_MODULATION_TYPE;
wl->rx_descriptor = kmalloc(sizeof(*wl->rx_descriptor), GFP_KERNEL);
if (!wl->rx_descriptor) {
wl1251_error("could not allocate memory for rx descriptor");
ieee80211_free_hw(hw);
return ERR_PTR(-ENOMEM);
}
return hw;
}
EXPORT_SYMBOL_GPL(wl1251_alloc_hw);
int wl1251_free_hw(struct wl1251 *wl)
{
ieee80211_unregister_hw(wl->hw);
wl1251_debugfs_exit(wl);
kfree(wl->target_mem_map);
kfree(wl->data_path);
vfree(wl->fw);
wl->fw = NULL;
kfree(wl->nvs);
wl->nvs = NULL;
kfree(wl->rx_descriptor);
wl->rx_descriptor = NULL;
ieee80211_free_hw(wl->hw);
return 0;
}
EXPORT_SYMBOL_GPL(wl1251_free_hw);
MODULE_DESCRIPTION("TI wl1251 Wireles LAN Driver Core");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kalle Valo <kvalo@adurom.com>");
MODULE_FIRMWARE(WL1251_FW_NAME);
| gpl-2.0 |
vmora/QGIS | src/app/qgsselectbyformdialog.cpp | 8 | 5171 | /***************************************************************************
qgsselectbyformdialog.cpp
------------------------
Date : June 2016
Copyright : (C) 2016 nyall Dawson
Email : nyall dot dawson at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
#include "qgsselectbyformdialog.h"
#include "qgsattributeform.h"
#include "qgsmapcanvas.h"
#include "qgssettings.h"
#include <QLayout>
QgsSelectByFormDialog::QgsSelectByFormDialog( QgsVectorLayer *layer, const QgsAttributeEditorContext &context, QWidget *parent, Qt::WindowFlags fl )
: QDialog( parent, fl )
, mLayer( layer )
{
QgsAttributeEditorContext dlgContext = context;
dlgContext.setFormMode( QgsAttributeEditorContext::StandaloneDialog );
dlgContext.setAllowCustomUi( false );
mForm = new QgsAttributeForm( layer, QgsFeature(), dlgContext, this );
mForm->setMode( QgsAttributeForm::SearchMode );
QVBoxLayout *vLayout = new QVBoxLayout();
vLayout->setMargin( 0 );
vLayout->setContentsMargins( 0, 0, 0, 0 );
setLayout( vLayout );
vLayout->addWidget( mForm );
connect( mForm, &QgsAttributeForm::closed, this, &QWidget::close );
QgsSettings settings;
restoreGeometry( settings.value( QStringLiteral( "Windows/SelectByForm/geometry" ) ).toByteArray() );
setWindowTitle( tr( "Select Features by Value" ) );
}
QgsSelectByFormDialog::~QgsSelectByFormDialog()
{
QgsSettings settings;
settings.setValue( QStringLiteral( "Windows/SelectByForm/geometry" ), saveGeometry() );
}
void QgsSelectByFormDialog::setMessageBar( QgsMessageBar *messageBar )
{
mMessageBar = messageBar;
mForm->setMessageBar( messageBar );
}
void QgsSelectByFormDialog::setMapCanvas( QgsMapCanvas *canvas )
{
mMapCanvas = canvas;
connect( mForm, &QgsAttributeForm::zoomToFeatures, this, &QgsSelectByFormDialog::zoomToFeatures );
connect( mForm, &QgsAttributeForm::flashFeatures, this, &QgsSelectByFormDialog::flashFeatures );
}
void QgsSelectByFormDialog::zoomToFeatures( const QString &filter )
{
QgsExpressionContext context( QgsExpressionContextUtils::globalProjectLayerScopes( mLayer ) );
QgsFeatureRequest request = QgsFeatureRequest().setFilterExpression( filter )
.setExpressionContext( context )
.setSubsetOfAttributes( QgsAttributeList() );
QgsFeatureIterator features = mLayer->getFeatures( request );
QgsRectangle bbox;
bbox.setMinimal();
QgsFeature feat;
int featureCount = 0;
while ( features.nextFeature( feat ) )
{
QgsGeometry geom = feat.geometry();
if ( geom.isNull() || geom.constGet()->isEmpty() )
continue;
QgsRectangle r = mMapCanvas->mapSettings().layerExtentToOutputExtent( mLayer, geom.boundingBox() );
bbox.combineExtentWith( r );
featureCount++;
}
features.close();
QgsSettings settings;
int timeout = settings.value( QStringLiteral( "qgis/messageTimeout" ), 5 ).toInt();
if ( featureCount > 0 )
{
mMapCanvas->zoomToFeatureExtent( bbox );
if ( mMessageBar )
{
mMessageBar->pushMessage( QString(),
tr( "Zoomed to %n matching feature(s)", "number of matching features", featureCount ),
Qgis::Info,
timeout );
}
}
else if ( mMessageBar )
{
mMessageBar->pushMessage( QString(),
tr( "No matching features found" ),
Qgis::Info,
timeout );
}
}
void QgsSelectByFormDialog::flashFeatures( const QString &filter )
{
QgsExpressionContext context( QgsExpressionContextUtils::globalProjectLayerScopes( mLayer ) );
QgsFeatureRequest request = QgsFeatureRequest().setFilterExpression( filter )
.setExpressionContext( context )
.setSubsetOfAttributes( QgsAttributeList() );
QgsFeatureIterator features = mLayer->getFeatures( request );
QgsFeature feat;
QList< QgsGeometry > geoms;
while ( features.nextFeature( feat ) )
{
if ( feat.hasGeometry() )
geoms << feat.geometry();
}
QgsSettings settings;
int timeout = settings.value( QStringLiteral( "qgis/messageTimeout" ), 5 ).toInt();
if ( !geoms.empty() )
{
mMapCanvas->flashGeometries( geoms, mLayer->crs() );
}
else if ( mMessageBar )
{
mMessageBar->pushMessage( QString(),
tr( "No matching features found" ),
Qgis::Info,
timeout );
}
}
| gpl-2.0 |
myarjunar/QGIS | python/ext-libs/pyspatialite/src/connection.c | 8 | 53631 | /* connection.c - the connection type
*
* Copyright (C) 2004-2010 Gerhard Häring <gh@ghaering.de>
*
* This file is part of pysqlite.
*
* This software is provided 'as-is', without any express or implied
* warranty. In no event will the authors be held liable for any damages
* arising from the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software
* in a product, an acknowledgment in the product documentation would be
* appreciated but is not required.
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
* 3. This notice may not be removed or altered from any source distribution.
*/
#include "cache.h"
#include "module.h"
#include "structmember.h"
#include "connection.h"
#include "statement.h"
#include "cursor.h"
#include "prepare_protocol.h"
#include "util.h"
#include "spatialite.h"
#include "pythread.h"
#define ACTION_FINALIZE 1
#define ACTION_RESET 2
#if SQLITE_VERSION_NUMBER >= 3003008
#ifndef SQLITE_OMIT_LOAD_EXTENSION
#define HAVE_LOAD_EXTENSION
#endif
#endif
_Py_IDENTIFIER(cursor);
static int pysqlite_connection_set_isolation_level(pysqlite_Connection* self, PyObject* isolation_level);
static void _pysqlite_drop_unused_cursor_references(pysqlite_Connection* self);
static void _sqlite3_result_error(sqlite3_context* ctx, const char* errmsg, int len)
{
/* in older SQLite versions, calling sqlite3_result_error in callbacks
* triggers a bug in SQLite that leads either to irritating results or
* segfaults, depending on the SQLite version */
#if SQLITE_VERSION_NUMBER >= 3003003
sqlite3_result_error(ctx, errmsg, len);
#else
PyErr_SetString(pysqlite_OperationalError, errmsg);
#endif
}
int pysqlite_connection_init(pysqlite_Connection* self, PyObject* args, PyObject* kwargs)
{
static char *kwlist[] = {
"database", "timeout", "detect_types", "isolation_level",
"check_same_thread", "factory", "cached_statements", "uri",
NULL
};
char* database;
int detect_types = 0;
PyObject* isolation_level = NULL;
PyObject* factory = NULL;
int check_same_thread = 1;
int cached_statements = 100;
int uri = 0;
double timeout = 5.0;
int rc;
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s|diOiOip", kwlist,
&database, &timeout, &detect_types,
&isolation_level, &check_same_thread,
&factory, &cached_statements, &uri))
{
return -1;
}
self->initialized = 1;
self->begin_statement = NULL;
self->statement_cache = NULL;
self->statements = NULL;
self->cursors = NULL;
Py_INCREF(Py_None);
self->row_factory = Py_None;
Py_INCREF(&PyUnicode_Type);
self->text_factory = (PyObject*)&PyUnicode_Type;
Py_BEGIN_ALLOW_THREADS
#ifdef SQLITE_OPEN_URI
#if defined(SPATIALITE_HAS_INIT_EX)
self->slconn = spatialite_alloc_connection();
#else
spatialite_init( 0 );
#endif
rc = sqlite3_open_v2(database, &self->db,
SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE |
(uri ? SQLITE_OPEN_URI : 0), NULL);
#else
if (uri) {
PyErr_SetString(pysqlite_NotSupportedError, "URIs not supported");
return -1;
}
#if defined(SPATIALITE_HAS_INIT_EX)
self->slconn = spatialite_alloc_connection();
#else
spatialite_init( 0 );
#endif
rc = sqlite3_open(database, &self->db);
#if defined(SPATIALITE_HAS_INIT_EX)
spatialite_init_ex( self->db, self->slconn, 0 );
#endif
#endif
Py_END_ALLOW_THREADS
if (rc != SQLITE_OK) {
_pysqlite_seterror(self->db, NULL);
return -1;
}
if (!isolation_level) {
isolation_level = PyUnicode_FromString("");
if (!isolation_level) {
return -1;
}
} else {
Py_INCREF(isolation_level);
}
self->isolation_level = NULL;
if (pysqlite_connection_set_isolation_level(self, isolation_level) < 0) {
Py_DECREF(isolation_level);
return -1;
}
Py_DECREF(isolation_level);
self->statement_cache = (pysqlite_Cache*)PyObject_CallFunction((PyObject*)&pysqlite_CacheType, "Oi", self, cached_statements);
if (PyErr_Occurred()) {
return -1;
}
self->created_statements = 0;
self->created_cursors = 0;
/* Create lists of weak references to statements/cursors */
self->statements = PyList_New(0);
self->cursors = PyList_New(0);
if (!self->statements || !self->cursors) {
return -1;
}
/* By default, the Cache class INCREFs the factory in its initializer, and
* decrefs it in its deallocator method. Since this would create a circular
* reference here, we're breaking it by decrementing self, and telling the
* cache class to not decref the factory (self) in its deallocator.
*/
self->statement_cache->decref_factory = 0;
Py_DECREF(self);
self->inTransaction = 0;
self->detect_types = detect_types;
self->timeout = timeout;
(void)sqlite3_busy_timeout(self->db, (int)(timeout*1000));
#ifdef WITH_THREAD
self->thread_ident = PyThread_get_thread_ident();
#endif
self->check_same_thread = check_same_thread;
self->function_pinboard = PyDict_New();
if (!self->function_pinboard) {
return -1;
}
self->collations = PyDict_New();
if (!self->collations) {
return -1;
}
self->Warning = pysqlite_Warning;
self->Error = pysqlite_Error;
self->InterfaceError = pysqlite_InterfaceError;
self->DatabaseError = pysqlite_DatabaseError;
self->DataError = pysqlite_DataError;
self->OperationalError = pysqlite_OperationalError;
self->IntegrityError = pysqlite_IntegrityError;
self->InternalError = pysqlite_InternalError;
self->ProgrammingError = pysqlite_ProgrammingError;
self->NotSupportedError = pysqlite_NotSupportedError;
return 0;
}
/* Empty the entire statement cache of this connection */
void pysqlite_flush_statement_cache(pysqlite_Connection* self)
{
pysqlite_Node* node;
pysqlite_Statement* statement;
node = self->statement_cache->first;
while (node) {
statement = (pysqlite_Statement*)(node->data);
(void)pysqlite_statement_finalize(statement);
node = node->next;
}
Py_SETREF(self->statement_cache,
(pysqlite_Cache *)PyObject_CallFunction((PyObject *)&pysqlite_CacheType, "O", self));
Py_DECREF(self);
self->statement_cache->decref_factory = 0;
}
/* action in (ACTION_RESET, ACTION_FINALIZE) */
void pysqlite_do_all_statements(pysqlite_Connection* self, int action, int reset_cursors)
{
int i;
PyObject* weakref;
PyObject* statement;
pysqlite_Cursor* cursor;
for (i = 0; i < PyList_Size(self->statements); i++) {
weakref = PyList_GetItem(self->statements, i);
statement = PyWeakref_GetObject(weakref);
if (statement != Py_None) {
Py_INCREF(statement);
if (action == ACTION_RESET) {
(void)pysqlite_statement_reset((pysqlite_Statement*)statement);
} else {
(void)pysqlite_statement_finalize((pysqlite_Statement*)statement);
}
Py_DECREF(statement);
}
}
if (reset_cursors) {
for (i = 0; i < PyList_Size(self->cursors); i++) {
weakref = PyList_GetItem(self->cursors, i);
cursor = (pysqlite_Cursor*)PyWeakref_GetObject(weakref);
if ((PyObject*)cursor != Py_None) {
cursor->reset = 1;
}
}
}
}
void pysqlite_connection_dealloc(pysqlite_Connection* self)
{
Py_XDECREF(self->statement_cache);
/* Clean up if user has not called .close() explicitly. */
if (self->db) {
Py_BEGIN_ALLOW_THREADS
sqlite3_close(self->db);
#if defined(SPATIALITE_HAS_INIT_EX)
spatialite_cleanup_ex( self->slconn );
#endif
Py_END_ALLOW_THREADS
}
if (self->begin_statement) {
PyMem_Free(self->begin_statement);
}
Py_XDECREF(self->isolation_level);
Py_XDECREF(self->function_pinboard);
Py_XDECREF(self->row_factory);
Py_XDECREF(self->text_factory);
Py_XDECREF(self->collations);
Py_XDECREF(self->statements);
Py_XDECREF(self->cursors);
Py_TYPE(self)->tp_free((PyObject*)self);
}
/*
* Registers a cursor with the connection.
*
* 0 => error; 1 => ok
*/
int pysqlite_connection_register_cursor(pysqlite_Connection* connection, PyObject* cursor)
{
PyObject* weakref;
weakref = PyWeakref_NewRef((PyObject*)cursor, NULL);
if (!weakref) {
goto error;
}
if (PyList_Append(connection->cursors, weakref) != 0) {
Py_CLEAR(weakref);
goto error;
}
Py_DECREF(weakref);
return 1;
error:
return 0;
}
PyObject* pysqlite_connection_cursor(pysqlite_Connection* self, PyObject* args, PyObject* kwargs)
{
static char *kwlist[] = {"factory", NULL, NULL};
PyObject* factory = NULL;
PyObject* cursor;
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O", kwlist,
&factory)) {
return NULL;
}
if (!pysqlite_check_thread(self) || !pysqlite_check_connection(self)) {
return NULL;
}
if (factory == NULL) {
factory = (PyObject*)&pysqlite_CursorType;
}
cursor = PyObject_CallFunction(factory, "O", self);
_pysqlite_drop_unused_cursor_references(self);
if (cursor && self->row_factory != Py_None) {
Py_INCREF(self->row_factory);
Py_SETREF(((pysqlite_Cursor *)cursor)->row_factory, self->row_factory);
}
return cursor;
}
PyObject* pysqlite_connection_close(pysqlite_Connection* self, PyObject* args)
{
int rc;
if (!pysqlite_check_thread(self)) {
return NULL;
}
pysqlite_do_all_statements(self, ACTION_FINALIZE, 1);
if (self->db) {
Py_BEGIN_ALLOW_THREADS
rc = sqlite3_close(self->db);
#if defined(SPATIALITE_HAS_INIT_EX)
spatialite_cleanup_ex( self->slconn );
#endif
Py_END_ALLOW_THREADS
if (rc != SQLITE_OK) {
_pysqlite_seterror(self->db, NULL);
return NULL;
} else {
self->db = NULL;
}
}
Py_INCREF(Py_None);
return Py_None;
}
/*
* Checks if a connection object is usable (i. e. not closed).
*
* 0 => error; 1 => ok
*/
int pysqlite_check_connection(pysqlite_Connection* con)
{
if (!con->initialized) {
PyErr_SetString(pysqlite_ProgrammingError, "Base Connection.__init__ not called.");
return 0;
}
if (!con->db) {
PyErr_SetString(pysqlite_ProgrammingError, "Cannot operate on a closed database.");
return 0;
} else {
return 1;
}
}
PyObject* _pysqlite_connection_begin(pysqlite_Connection* self)
{
int rc;
const char* tail;
sqlite3_stmt* statement;
Py_BEGIN_ALLOW_THREADS
rc = sqlite3_prepare(self->db, self->begin_statement, -1, &statement, &tail);
Py_END_ALLOW_THREADS
if (rc != SQLITE_OK) {
_pysqlite_seterror(self->db, statement);
goto error;
}
rc = pysqlite_step(statement, self);
if (rc == SQLITE_DONE) {
self->inTransaction = 1;
} else {
_pysqlite_seterror(self->db, statement);
}
Py_BEGIN_ALLOW_THREADS
rc = sqlite3_finalize(statement);
Py_END_ALLOW_THREADS
if (rc != SQLITE_OK && !PyErr_Occurred()) {
_pysqlite_seterror(self->db, NULL);
}
error:
if (PyErr_Occurred()) {
return NULL;
} else {
Py_INCREF(Py_None);
return Py_None;
}
}
PyObject* pysqlite_connection_commit(pysqlite_Connection* self, PyObject* args)
{
int rc;
const char* tail;
sqlite3_stmt* statement;
if (!pysqlite_check_thread(self) || !pysqlite_check_connection(self)) {
return NULL;
}
if (self->inTransaction) {
pysqlite_do_all_statements(self, ACTION_RESET, 0);
Py_BEGIN_ALLOW_THREADS
rc = sqlite3_prepare(self->db, "COMMIT", -1, &statement, &tail);
Py_END_ALLOW_THREADS
if (rc != SQLITE_OK) {
_pysqlite_seterror(self->db, NULL);
goto error;
}
rc = pysqlite_step(statement, self);
if (rc == SQLITE_DONE) {
self->inTransaction = 0;
} else {
_pysqlite_seterror(self->db, statement);
}
Py_BEGIN_ALLOW_THREADS
rc = sqlite3_finalize(statement);
Py_END_ALLOW_THREADS
if (rc != SQLITE_OK && !PyErr_Occurred()) {
_pysqlite_seterror(self->db, NULL);
}
}
error:
if (PyErr_Occurred()) {
return NULL;
} else {
Py_INCREF(Py_None);
return Py_None;
}
}
PyObject* pysqlite_connection_rollback(pysqlite_Connection* self, PyObject* args)
{
int rc;
const char* tail;
sqlite3_stmt* statement;
if (!pysqlite_check_thread(self) || !pysqlite_check_connection(self)) {
return NULL;
}
if (self->inTransaction) {
pysqlite_do_all_statements(self, ACTION_RESET, 1);
Py_BEGIN_ALLOW_THREADS
rc = sqlite3_prepare(self->db, "ROLLBACK", -1, &statement, &tail);
Py_END_ALLOW_THREADS
if (rc != SQLITE_OK) {
_pysqlite_seterror(self->db, NULL);
goto error;
}
rc = pysqlite_step(statement, self);
if (rc == SQLITE_DONE) {
self->inTransaction = 0;
} else {
_pysqlite_seterror(self->db, statement);
}
Py_BEGIN_ALLOW_THREADS
rc = sqlite3_finalize(statement);
Py_END_ALLOW_THREADS
if (rc != SQLITE_OK && !PyErr_Occurred()) {
_pysqlite_seterror(self->db, NULL);
}
}
error:
if (PyErr_Occurred()) {
return NULL;
} else {
Py_INCREF(Py_None);
return Py_None;
}
}
static int
_pysqlite_set_result(sqlite3_context* context, PyObject* py_val)
{
if ( !py_val || PyErr_Occurred() || py_val == Py_None ) {
sqlite3_result_null(context);
} else if (PyLong_Check(py_val)) {
sqlite_int64 value = _pysqlite_long_as_int64(py_val);
if (value == -1 && PyErr_Occurred())
return -1;
sqlite3_result_int64(context, value);
#if PY_MAJOR_VERSION < 3
} else if (PyInt_Check(py_val)) {
long longval = PyInt_AsLong(py_val);
sqlite3_result_int64(context, (PY_LONG_LONG)longval);
#endif
} else if (PyFloat_Check(py_val)) {
sqlite3_result_double(context, PyFloat_AsDouble(py_val));
#if PY_MAJOR_VERSION < 3
} else if (PyBuffer_Check(py_val)) {
const char* buffer;
Py_ssize_t buflen;
if (PyObject_AsCharBuffer(py_val, &buffer, &buflen) != 0) {
PyErr_SetString(PyExc_ValueError, "could not convert BLOB to buffer");
} else {
sqlite3_result_blob(context, buffer, buflen, SQLITE_TRANSIENT);
}
} else if (PyString_Check(py_val)) {
sqlite3_result_text(context, PyString_AsString(py_val), -1, SQLITE_TRANSIENT);
#endif
} else if (PyUnicode_Check(py_val)) {
#if PY_MAJOR_VERSION < 3
PyObject *stringval = PyUnicode_AsUTF8String(py_val);
if (stringval) {
sqlite3_result_text(context, PyString_AsString(stringval), -1, SQLITE_TRANSIENT);
Py_DECREF(stringval);
}
#else
const char *str = _PyUnicode_AsString(py_val);
if (str == NULL)
return -1;
sqlite3_result_text(context, str, -1, SQLITE_TRANSIENT);
#endif
} else if (PyObject_CheckBuffer(py_val)) {
Py_buffer view;
if (PyObject_GetBuffer(py_val, &view, PyBUF_SIMPLE) != 0) {
PyErr_SetString(PyExc_ValueError,
"could not convert BLOB to buffer");
return -1;
}
if (view.len > INT_MAX) {
PyErr_SetString(PyExc_OverflowError,
"BLOB longer than INT_MAX bytes");
PyBuffer_Release(&view);
return -1;
}
sqlite3_result_blob(context, view.buf, (int)view.len, SQLITE_TRANSIENT);
PyBuffer_Release(&view);
} else {
return -1;
}
return 0;
}
PyObject* _pysqlite_build_py_params(sqlite3_context *context, int argc, sqlite3_value** argv)
{
PyObject* args;
int i;
sqlite3_value* cur_value;
PyObject* cur_py_value;
const char* val_str;
Py_ssize_t buflen;
args = PyTuple_New(argc);
if (!args) {
return NULL;
}
for (i = 0; i < argc; i++) {
cur_value = argv[i];
switch (sqlite3_value_type(argv[i])) {
case SQLITE_INTEGER:
cur_py_value = _pysqlite_long_from_int64(sqlite3_value_int64(cur_value));
break;
case SQLITE_FLOAT:
cur_py_value = PyFloat_FromDouble(sqlite3_value_double(cur_value));
break;
case SQLITE_TEXT:
val_str = (const char*)sqlite3_value_text(cur_value);
cur_py_value = PyUnicode_FromString(val_str);
/* TODO: have a way to show errors here */
if (!cur_py_value) {
PyErr_Clear();
Py_INCREF(Py_None);
cur_py_value = Py_None;
}
break;
case SQLITE_BLOB:
buflen = sqlite3_value_bytes(cur_value);
cur_py_value = PyBytes_FromStringAndSize(
sqlite3_value_blob(cur_value), buflen);
break;
case SQLITE_NULL:
default:
Py_INCREF(Py_None);
cur_py_value = Py_None;
}
if (!cur_py_value) {
Py_DECREF(args);
return NULL;
}
PyTuple_SetItem(args, i, cur_py_value);
}
return args;
}
void _pysqlite_func_callback(sqlite3_context* context, int argc, sqlite3_value** argv)
{
PyObject* args;
PyObject* py_func;
PyObject* py_retval = NULL;
int ok;
#ifdef WITH_THREAD
PyGILState_STATE threadstate;
threadstate = PyGILState_Ensure();
#endif
py_func = (PyObject*)sqlite3_user_data(context);
args = _pysqlite_build_py_params(context, argc, argv);
if (args) {
py_retval = PyObject_CallObject(py_func, args);
Py_DECREF(args);
}
ok = 0;
if (py_retval) {
ok = _pysqlite_set_result(context, py_retval) == 0;
Py_DECREF(py_retval);
}
if (!ok) {
if (_enable_callback_tracebacks) {
PyErr_Print();
} else {
PyErr_Clear();
}
_sqlite3_result_error(context, "user-defined function raised exception", -1);
}
#ifdef WITH_THREAD
PyGILState_Release(threadstate);
#endif
}
static void _pysqlite_step_callback(sqlite3_context *context, int argc, sqlite3_value** params)
{
PyObject* args;
PyObject* function_result = NULL;
PyObject* aggregate_class;
PyObject** aggregate_instance;
PyObject* stepmethod = NULL;
#ifdef WITH_THREAD
PyGILState_STATE threadstate;
threadstate = PyGILState_Ensure();
#endif
aggregate_class = (PyObject*)sqlite3_user_data(context);
aggregate_instance = (PyObject**)sqlite3_aggregate_context(context, sizeof(PyObject*));
if (*aggregate_instance == 0) {
*aggregate_instance = PyObject_CallFunction(aggregate_class, "");
if (PyErr_Occurred()) {
*aggregate_instance = 0;
if (_enable_callback_tracebacks) {
PyErr_Print();
} else {
PyErr_Clear();
}
_sqlite3_result_error(context, "user-defined aggregate's '__init__' method raised error", -1);
goto error;
}
}
stepmethod = PyObject_GetAttrString(*aggregate_instance, "step");
if (!stepmethod) {
goto error;
}
args = _pysqlite_build_py_params(context, argc, params);
if (!args) {
goto error;
}
function_result = PyObject_CallObject(stepmethod, args);
Py_DECREF(args);
if (!function_result) {
if (_enable_callback_tracebacks) {
PyErr_Print();
} else {
PyErr_Clear();
}
_sqlite3_result_error(context, "user-defined aggregate's 'step' method raised error", -1);
}
error:
Py_XDECREF(stepmethod);
Py_XDECREF(function_result);
#ifdef WITH_THREAD
PyGILState_Release(threadstate);
#endif
}
void _pysqlite_final_callback(sqlite3_context* context)
{
PyObject* function_result;
PyObject** aggregate_instance;
#if PY_MAJOR_VERSION >= 3
_Py_IDENTIFIER(finalize);
#endif
int ok;
PyObject *exception, *value, *tb;
int restore;
#ifdef WITH_THREAD
PyGILState_STATE threadstate;
threadstate = PyGILState_Ensure();
#endif
aggregate_instance = (PyObject**)sqlite3_aggregate_context(context, sizeof(PyObject*));
if (!*aggregate_instance) {
/* this branch is executed if there was an exception in the aggregate's
* __init__ */
goto error;
}
/* Keep the exception (if any) of the last call to step() */
PyErr_Fetch(&exception, &value, &tb);
restore = 1;
#if PY_MAJOR_VERSION < 3
function_result = PyObject_CallMethod(*aggregate_instance, "finalize", "");
#else
function_result = _PyObject_CallMethodId(*aggregate_instance, &PyId_finalize, "");
#endif
Py_DECREF(*aggregate_instance);
ok = 0;
if (function_result) {
ok = _pysqlite_set_result(context, function_result) == 0;
Py_DECREF(function_result);
}
if (!ok) {
if (_enable_callback_tracebacks) {
PyErr_Print();
} else {
PyErr_Clear();
}
_sqlite3_result_error(context, "user-defined aggregate's 'finalize' method raised error", -1);
#if SQLITE_VERSION_NUMBER < 3003003
/* with old SQLite versions, _sqlite3_result_error() sets a new Python
exception, so don't restore the previous exception */
restore = 0;
#endif
}
if (restore) {
/* Restore the exception (if any) of the last call to step(),
but clear also the current exception if finalize() failed */
PyErr_Restore(exception, value, tb);
}
error:
#ifdef WITH_THREAD
PyGILState_Release(threadstate);
#endif
/* explicit return to avoid a compilation error if WITH_THREAD
is not defined */
return;
}
static void _pysqlite_drop_unused_statement_references(pysqlite_Connection* self)
{
PyObject* new_list;
PyObject* weakref;
int i;
/* we only need to do this once in a while */
if (self->created_statements++ < 200) {
return;
}
self->created_statements = 0;
new_list = PyList_New(0);
if (!new_list) {
return;
}
for (i = 0; i < PyList_Size(self->statements); i++) {
weakref = PyList_GetItem(self->statements, i);
if (PyWeakref_GetObject(weakref) != Py_None) {
if (PyList_Append(new_list, weakref) != 0) {
Py_DECREF(new_list);
return;
}
}
}
Py_SETREF(self->statements, new_list);
}
static void _pysqlite_drop_unused_cursor_references(pysqlite_Connection* self)
{
PyObject* new_list;
PyObject* weakref;
int i;
/* we only need to do this once in a while */
if (self->created_cursors++ < 200) {
return;
}
self->created_cursors = 0;
new_list = PyList_New(0);
if (!new_list) {
return;
}
for (i = 0; i < PyList_Size(self->cursors); i++) {
weakref = PyList_GetItem(self->cursors, i);
if (PyWeakref_GetObject(weakref) != Py_None) {
if (PyList_Append(new_list, weakref) != 0) {
Py_DECREF(new_list);
return;
}
}
}
Py_SETREF(self->cursors, new_list);
}
PyObject* pysqlite_connection_create_function(pysqlite_Connection* self, PyObject* args, PyObject* kwargs)
{
static char *kwlist[] = {"name", "narg", "func", NULL, NULL};
PyObject* func;
char* name;
int narg;
int rc;
if (!pysqlite_check_thread(self) || !pysqlite_check_connection(self)) {
return NULL;
}
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "siO", kwlist,
&name, &narg, &func))
{
return NULL;
}
rc = sqlite3_create_function(self->db, name, narg, SQLITE_UTF8, (void*)func, _pysqlite_func_callback, NULL, NULL);
if (rc != SQLITE_OK) {
/* Workaround for SQLite bug: no error code or string is available here */
PyErr_SetString(pysqlite_OperationalError, "Error creating function");
return NULL;
} else {
if (PyDict_SetItem(self->function_pinboard, func, Py_None) == -1)
return NULL;
Py_INCREF(Py_None);
return Py_None;
}
}
PyObject* pysqlite_connection_create_aggregate(pysqlite_Connection* self, PyObject* args, PyObject* kwargs)
{
PyObject* aggregate_class;
int n_arg;
char* name;
static char *kwlist[] = { "name", "n_arg", "aggregate_class", NULL };
int rc;
if (!pysqlite_check_thread(self) || !pysqlite_check_connection(self)) {
return NULL;
}
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "siO:create_aggregate",
kwlist, &name, &n_arg, &aggregate_class)) {
return NULL;
}
rc = sqlite3_create_function(self->db, name, n_arg, SQLITE_UTF8, (void*)aggregate_class, 0, &_pysqlite_step_callback, &_pysqlite_final_callback);
if (rc != SQLITE_OK) {
/* Workaround for SQLite bug: no error code or string is available here */
PyErr_SetString(pysqlite_OperationalError, "Error creating aggregate");
return NULL;
} else {
if (PyDict_SetItem(self->function_pinboard, aggregate_class, Py_None) == -1)
return NULL;
Py_INCREF(Py_None);
return Py_None;
}
}
static int _authorizer_callback(void* user_arg, int action, const char* arg1, const char* arg2 , const char* dbname, const char* access_attempt_source)
{
PyObject *ret;
int rc;
#ifdef WITH_THREAD
PyGILState_STATE gilstate;
gilstate = PyGILState_Ensure();
#endif
ret = PyObject_CallFunction((PyObject*)user_arg, "issss", action, arg1, arg2, dbname, access_attempt_source);
if (ret == NULL) {
if (_enable_callback_tracebacks)
PyErr_Print();
else
PyErr_Clear();
rc = SQLITE_DENY;
}
else {
if (PyLong_Check(ret)) {
rc = _PyLong_AsInt(ret);
if (rc == -1 && PyErr_Occurred()) {
if (_enable_callback_tracebacks)
PyErr_Print();
else
PyErr_Clear();
rc = SQLITE_DENY;
}
}
else {
rc = SQLITE_DENY;
}
Py_DECREF(ret);
}
#ifdef WITH_THREAD
PyGILState_Release(gilstate);
#endif
return rc;
}
static int _progress_handler(void* user_arg)
{
int rc;
PyObject *ret;
#ifdef WITH_THREAD
PyGILState_STATE gilstate;
gilstate = PyGILState_Ensure();
#endif
ret = PyObject_CallFunction((PyObject*)user_arg, "");
if (!ret) {
if (_enable_callback_tracebacks) {
PyErr_Print();
} else {
PyErr_Clear();
}
/* abort query if error occurred */
rc = 1;
} else {
rc = (int)PyObject_IsTrue(ret);
Py_DECREF(ret);
}
#ifdef WITH_THREAD
PyGILState_Release(gilstate);
#endif
return rc;
}
static void _trace_callback(void* user_arg, const char* statement_string)
{
PyObject *py_statement = NULL;
PyObject *ret = NULL;
#ifdef WITH_THREAD
PyGILState_STATE gilstate;
gilstate = PyGILState_Ensure();
#endif
py_statement = PyUnicode_DecodeUTF8(statement_string,
strlen(statement_string), "replace");
if (py_statement) {
ret = PyObject_CallFunctionObjArgs((PyObject*)user_arg, py_statement, NULL);
Py_DECREF(py_statement);
}
if (ret) {
Py_DECREF(ret);
} else {
if (_enable_callback_tracebacks) {
PyErr_Print();
} else {
PyErr_Clear();
}
}
#ifdef WITH_THREAD
PyGILState_Release(gilstate);
#endif
}
static PyObject* pysqlite_connection_set_authorizer(pysqlite_Connection* self, PyObject* args, PyObject* kwargs)
{
PyObject* authorizer_cb;
static char *kwlist[] = { "authorizer_callback", NULL };
int rc;
if (!pysqlite_check_thread(self) || !pysqlite_check_connection(self)) {
return NULL;
}
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O:set_authorizer",
kwlist, &authorizer_cb)) {
return NULL;
}
rc = sqlite3_set_authorizer(self->db, _authorizer_callback, (void*)authorizer_cb);
if (rc != SQLITE_OK) {
PyErr_SetString(pysqlite_OperationalError, "Error setting authorizer callback");
return NULL;
} else {
if (PyDict_SetItem(self->function_pinboard, authorizer_cb, Py_None) == -1)
return NULL;
Py_INCREF(Py_None);
return Py_None;
}
}
static PyObject* pysqlite_connection_set_progress_handler(pysqlite_Connection* self, PyObject* args, PyObject* kwargs)
{
PyObject* progress_handler;
int n;
static char *kwlist[] = { "progress_handler", "n", NULL };
if (!pysqlite_check_thread(self) || !pysqlite_check_connection(self)) {
return NULL;
}
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "Oi:set_progress_handler",
kwlist, &progress_handler, &n)) {
return NULL;
}
if (progress_handler == Py_None) {
/* None clears the progress handler previously set */
sqlite3_progress_handler(self->db, 0, 0, (void*)0);
} else {
sqlite3_progress_handler(self->db, n, _progress_handler, progress_handler);
if (PyDict_SetItem(self->function_pinboard, progress_handler, Py_None) == -1)
return NULL;
}
Py_INCREF(Py_None);
return Py_None;
}
static PyObject* pysqlite_connection_set_trace_callback(pysqlite_Connection* self, PyObject* args, PyObject* kwargs)
{
PyObject* trace_callback;
static char *kwlist[] = { "trace_callback", NULL };
if (!pysqlite_check_thread(self) || !pysqlite_check_connection(self)) {
return NULL;
}
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O:set_trace_callback",
kwlist, &trace_callback)) {
return NULL;
}
if (trace_callback == Py_None) {
/* None clears the trace callback previously set */
sqlite3_trace(self->db, 0, (void*)0);
} else {
if (PyDict_SetItem(self->function_pinboard, trace_callback, Py_None) == -1)
return NULL;
sqlite3_trace(self->db, _trace_callback, trace_callback);
}
Py_INCREF(Py_None);
return Py_None;
}
#ifdef HAVE_LOAD_EXTENSION
static PyObject* pysqlite_enable_load_extension(pysqlite_Connection* self, PyObject* args)
{
int rc;
int onoff;
if (!pysqlite_check_thread(self) || !pysqlite_check_connection(self)) {
return NULL;
}
if (!PyArg_ParseTuple(args, "i", &onoff)) {
return NULL;
}
rc = sqlite3_enable_load_extension(self->db, onoff);
if (rc != SQLITE_OK) {
PyErr_SetString(pysqlite_OperationalError, "Error enabling load extension");
return NULL;
} else {
Py_INCREF(Py_None);
return Py_None;
}
}
static PyObject* pysqlite_load_extension(pysqlite_Connection* self, PyObject* args)
{
int rc;
char* extension_name;
char* errmsg;
if (!pysqlite_check_thread(self) || !pysqlite_check_connection(self)) {
return NULL;
}
if (!PyArg_ParseTuple(args, "s", &extension_name)) {
return NULL;
}
rc = sqlite3_load_extension(self->db, extension_name, 0, &errmsg);
if (rc != 0) {
PyErr_SetString(pysqlite_OperationalError, errmsg);
return NULL;
} else {
Py_INCREF(Py_None);
return Py_None;
}
}
#endif
int pysqlite_check_thread(pysqlite_Connection* self)
{
#ifdef WITH_THREAD
if (self->check_same_thread) {
if (PyThread_get_thread_ident() != self->thread_ident) {
PyErr_Format(pysqlite_ProgrammingError,
"SQLite objects created in a thread can only be used in that same thread."
"The object was created in thread id %ld and this is thread id %ld",
self->thread_ident, PyThread_get_thread_ident());
return 0;
}
}
#endif
return 1;
}
static PyObject* pysqlite_connection_get_isolation_level(pysqlite_Connection* self, void* unused)
{
Py_INCREF(self->isolation_level);
return self->isolation_level;
}
static PyObject* pysqlite_connection_get_total_changes(pysqlite_Connection* self, void* unused)
{
if (!pysqlite_check_connection(self)) {
return NULL;
} else {
return Py_BuildValue("i", sqlite3_total_changes(self->db));
}
}
static int pysqlite_connection_set_isolation_level(pysqlite_Connection* self, PyObject* isolation_level)
{
PyObject* res;
PyObject* begin_statement;
const char *begin_statement_str;
Py_XDECREF(self->isolation_level);
if (self->begin_statement) {
PyMem_Free(self->begin_statement);
self->begin_statement = NULL;
}
if (isolation_level == Py_None) {
Py_INCREF(Py_None);
self->isolation_level = Py_None;
res = pysqlite_connection_commit(self, NULL);
if (!res) {
return -1;
}
Py_DECREF(res);
self->inTransaction = 0;
} else {
Py_ssize_t size;
Py_INCREF(isolation_level);
self->isolation_level = isolation_level;
begin_statement = PyUnicode_FromString("BEGIN ");
if (!begin_statement) {
return -1;
}
PyUnicode_Concat(begin_statement, isolation_level);
if (!begin_statement) {
return -1;
}
#if PY_MAJOR_VERSION < 3
begin_statement_str = PyString_AsString(begin_statement);
if(begin_statement_str)
size = strlen(begin_statement_str);
#else
begin_statement_str = _PyUnicode_AsStringAndSize(begin_statement, &size);
#endif
if(!begin_statement_str) {
Py_DECREF(begin_statement);
return -1;
}
self->begin_statement = PyMem_Malloc(size + 2);
if (!self->begin_statement) {
Py_DECREF(begin_statement);
return -1;
}
strcpy(self->begin_statement, begin_statement_str);
Py_DECREF(begin_statement);
}
return 0;
}
PyObject* pysqlite_connection_call(pysqlite_Connection* self, PyObject* args, PyObject* kwargs)
{
PyObject* sql;
pysqlite_Statement* statement;
PyObject* weakref;
int rc;
if (!pysqlite_check_thread(self) || !pysqlite_check_connection(self)) {
return NULL;
}
if (!_PyArg_NoKeywords(MODULE_NAME ".Connection()", kwargs))
return NULL;
if (!PyArg_ParseTuple(args, "O", &sql)) {
return NULL;
}
_pysqlite_drop_unused_statement_references(self);
statement = PyObject_New(pysqlite_Statement, &pysqlite_StatementType);
if (!statement) {
return NULL;
}
statement->db = NULL;
statement->st = NULL;
statement->sql = NULL;
statement->in_use = 0;
statement->in_weakreflist = NULL;
rc = pysqlite_statement_create(statement, self, sql);
if (rc != SQLITE_OK) {
if (rc == PYSQLITE_TOO_MUCH_SQL) {
PyErr_SetString(pysqlite_Warning, "You can only execute one statement at a time.");
} else if (rc == PYSQLITE_SQL_WRONG_TYPE) {
if (PyErr_ExceptionMatches(PyExc_TypeError))
PyErr_SetString(pysqlite_Warning, "SQL is of wrong type. Must be string.");
} else {
(void)pysqlite_statement_reset(statement);
_pysqlite_seterror(self->db, NULL);
}
goto error;
}
weakref = PyWeakref_NewRef((PyObject*)statement, NULL);
if (weakref == NULL)
goto error;
if (PyList_Append(self->statements, weakref) != 0) {
Py_DECREF(weakref);
goto error;
}
Py_DECREF(weakref);
return (PyObject*)statement;
error:
Py_DECREF(statement);
return NULL;
}
PyObject* pysqlite_connection_execute(pysqlite_Connection* self, PyObject* args)
{
PyObject* cursor = 0;
PyObject* result = 0;
PyObject* method = 0;
#if PY_MAJOR_VERSION < 3
cursor = PyObject_CallMethod((PyObject*)self, "cursor", "");
#else
cursor = _PyObject_CallMethodId((PyObject*)self, &PyId_cursor, "");
#endif
if (!cursor) {
goto error;
}
method = PyObject_GetAttrString(cursor, "execute");
if (!method) {
Py_CLEAR(cursor);
goto error;
}
result = PyObject_CallObject(method, args);
if (!result) {
Py_CLEAR(cursor);
}
error:
Py_XDECREF(result);
Py_XDECREF(method);
return cursor;
}
PyObject* pysqlite_connection_executemany(pysqlite_Connection* self, PyObject* args)
{
PyObject* cursor = 0;
PyObject* result = 0;
PyObject* method = 0;
#if PY_MAJOR_VERSION < 3
cursor = PyObject_CallMethod((PyObject*)self, "cursor", "");
#else
cursor = _PyObject_CallMethodId((PyObject*)self, &PyId_cursor, "");
#endif
if (!cursor) {
goto error;
}
method = PyObject_GetAttrString(cursor, "executemany");
if (!method) {
Py_CLEAR(cursor);
goto error;
}
result = PyObject_CallObject(method, args);
if (!result) {
Py_CLEAR(cursor);
}
error:
Py_XDECREF(result);
Py_XDECREF(method);
return cursor;
}
PyObject* pysqlite_connection_executescript(pysqlite_Connection* self, PyObject* args)
{
PyObject* cursor = 0;
PyObject* result = 0;
PyObject* method = 0;
#if PY_MAJOR_VERSION < 3
cursor = PyObject_CallMethod((PyObject*)self, "cursor", "");
#else
cursor = _PyObject_CallMethodId((PyObject*)self, &PyId_cursor, "");
#endif
if (!cursor) {
goto error;
}
method = PyObject_GetAttrString(cursor, "executescript");
if (!method) {
Py_CLEAR(cursor);
goto error;
}
result = PyObject_CallObject(method, args);
if (!result) {
Py_CLEAR(cursor);
}
error:
Py_XDECREF(result);
Py_XDECREF(method);
return cursor;
}
/* ------------------------- COLLATION CODE ------------------------ */
static int
pysqlite_collation_callback(
void* context,
int text1_length, const void* text1_data,
int text2_length, const void* text2_data)
{
PyObject* callback = (PyObject*)context;
PyObject* string1 = 0;
PyObject* string2 = 0;
#ifdef WITH_THREAD
PyGILState_STATE gilstate;
#endif
PyObject* retval = NULL;
long longval;
int result = 0;
#ifdef WITH_THREAD
gilstate = PyGILState_Ensure();
#endif
if (PyErr_Occurred()) {
goto finally;
}
string1 = PyUnicode_FromStringAndSize((const char*)text1_data, text1_length);
string2 = PyUnicode_FromStringAndSize((const char*)text2_data, text2_length);
if (!string1 || !string2) {
goto finally; /* failed to allocate strings */
}
retval = PyObject_CallFunctionObjArgs(callback, string1, string2, NULL);
if (!retval) {
/* execution failed */
goto finally;
}
longval = PyLong_AsLongAndOverflow(retval, &result);
if (longval == -1 && PyErr_Occurred()) {
PyErr_Clear();
result = 0;
}
else if (!result) {
if (longval > 0)
result = 1;
else if (longval < 0)
result = -1;
}
finally:
Py_XDECREF(string1);
Py_XDECREF(string2);
Py_XDECREF(retval);
#ifdef WITH_THREAD
PyGILState_Release(gilstate);
#endif
return result;
}
static PyObject *
pysqlite_connection_interrupt(pysqlite_Connection* self, PyObject* args)
{
PyObject* retval = NULL;
if (!pysqlite_check_connection(self)) {
goto finally;
}
sqlite3_interrupt(self->db);
Py_INCREF(Py_None);
retval = Py_None;
finally:
return retval;
}
/* Function author: Paul Kippes <kippesp@gmail.com>
* Class method of Connection to call the Python function _iterdump
* of the sqlite3 module.
*/
static PyObject *
pysqlite_connection_iterdump(pysqlite_Connection* self, PyObject* args)
{
PyObject* retval = NULL;
PyObject* module = NULL;
PyObject* module_dict;
PyObject* pyfn_iterdump;
if (!pysqlite_check_connection(self)) {
goto finally;
}
module = PyImport_ImportModule(MODULE_NAME ".dump");
if (!module) {
goto finally;
}
module_dict = PyModule_GetDict(module);
if (!module_dict) {
goto finally;
}
pyfn_iterdump = PyDict_GetItemString(module_dict, "_iterdump");
if (!pyfn_iterdump) {
PyErr_SetString(pysqlite_OperationalError, "Failed to obtain _iterdump() reference");
goto finally;
}
args = PyTuple_New(1);
if (!args) {
goto finally;
}
Py_INCREF(self);
PyTuple_SetItem(args, 0, (PyObject*)self);
retval = PyObject_CallObject(pyfn_iterdump, args);
finally:
Py_XDECREF(args);
Py_XDECREF(module);
return retval;
}
static PyObject *
pysqlite_connection_create_collation(pysqlite_Connection* self, PyObject* args)
{
PyObject* callable;
PyObject* uppercase_name = 0;
PyObject* name;
PyObject* retval;
Py_ssize_t i, len;
#if PY_MAJOR_VERSION < 3
char* chk;
#else
_Py_IDENTIFIER(upper);
#endif
char *uppercase_name_str;
int rc;
unsigned int kind;
void *data;
if (!pysqlite_check_thread(self) || !pysqlite_check_connection(self)) {
goto finally;
}
if (!PyArg_ParseTuple(args, "O!O:create_collation(name, callback)", &PyUnicode_Type, &name, &callable)) {
goto finally;
}
#if PY_MAJOR_VERSION < 3
uppercase_name = PyObject_CallMethod(name, "upper", "");
#else
uppercase_name = _PyObject_CallMethodId(name, &PyId_upper, "");
#endif
if (!uppercase_name) {
goto finally;
}
#if PY_MAJOR_VERSION < 3
chk = PyString_AsString(uppercase_name);
while (*chk) {
if ((*chk >= '0' && *chk <= '9')
|| (*chk >= 'A' && *chk <= 'Z')
|| (*chk == '_'))
{
chk++;
} else {
PyErr_SetString(pysqlite_ProgrammingError, "invalid character in collation name");
goto finally;
}
}
uppercase_name_str = PyString_AsString(uppercase_name);
#else
if (PyUnicode_READY(uppercase_name))
goto finally;
len = PyUnicode_GET_LENGTH(uppercase_name);
kind = PyUnicode_KIND(uppercase_name);
data = PyUnicode_DATA(uppercase_name);
for (i=0; i<len; i++) {
Py_UCS4 ch = PyUnicode_READ(kind, data, i);
if ((ch >= '0' && ch <= '9')
|| (ch >= 'A' && ch <= 'Z')
|| (ch == '_'))
{
continue;
} else {
PyErr_SetString(pysqlite_ProgrammingError, "invalid character in collation name");
goto finally;
}
}
uppercase_name_str = _PyUnicode_AsString(uppercase_name);
#endif
if (!uppercase_name_str)
goto finally;
if (callable != Py_None && !PyCallable_Check(callable)) {
PyErr_SetString(PyExc_TypeError, "parameter must be callable");
goto finally;
}
if (callable != Py_None) {
if (PyDict_SetItem(self->collations, uppercase_name, callable) == -1)
goto finally;
} else {
if (PyDict_DelItem(self->collations, uppercase_name) == -1)
goto finally;
}
rc = sqlite3_create_collation(self->db,
uppercase_name_str,
SQLITE_UTF8,
(callable != Py_None) ? callable : NULL,
(callable != Py_None) ? pysqlite_collation_callback : NULL);
if (rc != SQLITE_OK) {
PyDict_DelItem(self->collations, uppercase_name);
_pysqlite_seterror(self->db, NULL);
goto finally;
}
finally:
Py_XDECREF(uppercase_name);
if (PyErr_Occurred()) {
retval = NULL;
} else {
Py_INCREF(Py_None);
retval = Py_None;
}
return retval;
}
/* Called when the connection is used as a context manager. Returns itself as a
* convenience to the caller. */
static PyObject *
pysqlite_connection_enter(pysqlite_Connection* self, PyObject* args)
{
Py_INCREF(self);
return (PyObject*)self;
}
/** Called when the connection is used as a context manager. If there was any
* exception, a rollback takes place; otherwise we commit. */
static PyObject *
pysqlite_connection_exit(pysqlite_Connection* self, PyObject* args)
{
PyObject* exc_type, *exc_value, *exc_tb;
char* method_name;
PyObject* result;
if (!PyArg_ParseTuple(args, "OOO", &exc_type, &exc_value, &exc_tb)) {
return NULL;
}
if (exc_type == Py_None && exc_value == Py_None && exc_tb == Py_None) {
method_name = "commit";
} else {
method_name = "rollback";
}
result = PyObject_CallMethod((PyObject*)self, method_name, "");
if (!result) {
return NULL;
}
Py_DECREF(result);
Py_RETURN_FALSE;
}
static char connection_doc[] =
PyDoc_STR("SQLite database connection object.");
static PyGetSetDef connection_getset[] = {
{"isolation_level", (getter)pysqlite_connection_get_isolation_level, (setter)pysqlite_connection_set_isolation_level},
{"total_changes", (getter)pysqlite_connection_get_total_changes, (setter)0},
{NULL}
};
static PyMethodDef connection_methods[] = {
{"cursor", (PyCFunction)pysqlite_connection_cursor, METH_VARARGS|METH_KEYWORDS,
PyDoc_STR("Return a cursor for the connection.")},
{"close", (PyCFunction)pysqlite_connection_close, METH_NOARGS,
PyDoc_STR("Closes the connection.")},
{"commit", (PyCFunction)pysqlite_connection_commit, METH_NOARGS,
PyDoc_STR("Commit the current transaction.")},
{"rollback", (PyCFunction)pysqlite_connection_rollback, METH_NOARGS,
PyDoc_STR("Roll back the current transaction.")},
{"create_function", (PyCFunction)pysqlite_connection_create_function, METH_VARARGS|METH_KEYWORDS,
PyDoc_STR("Creates a new function. Non-standard.")},
{"create_aggregate", (PyCFunction)pysqlite_connection_create_aggregate, METH_VARARGS|METH_KEYWORDS,
PyDoc_STR("Creates a new aggregate. Non-standard.")},
{"set_authorizer", (PyCFunction)pysqlite_connection_set_authorizer, METH_VARARGS|METH_KEYWORDS,
PyDoc_STR("Sets authorizer callback. Non-standard.")},
#ifdef HAVE_LOAD_EXTENSION
{"enable_load_extension", (PyCFunction)pysqlite_enable_load_extension, METH_VARARGS,
PyDoc_STR("Enable dynamic loading of SQLite extension modules. Non-standard.")},
{"load_extension", (PyCFunction)pysqlite_load_extension, METH_VARARGS,
PyDoc_STR("Load SQLite extension module. Non-standard.")},
#endif
{"set_progress_handler", (PyCFunction)pysqlite_connection_set_progress_handler, METH_VARARGS|METH_KEYWORDS,
PyDoc_STR("Sets progress handler callback. Non-standard.")},
{"set_trace_callback", (PyCFunction)pysqlite_connection_set_trace_callback, METH_VARARGS|METH_KEYWORDS,
PyDoc_STR("Sets a trace callback called for each SQL statement (passed as unicode). Non-standard.")},
{"execute", (PyCFunction)pysqlite_connection_execute, METH_VARARGS,
PyDoc_STR("Executes a SQL statement. Non-standard.")},
{"executemany", (PyCFunction)pysqlite_connection_executemany, METH_VARARGS,
PyDoc_STR("Repeatedly executes a SQL statement. Non-standard.")},
{"executescript", (PyCFunction)pysqlite_connection_executescript, METH_VARARGS,
PyDoc_STR("Executes a multiple SQL statements at once. Non-standard.")},
{"create_collation", (PyCFunction)pysqlite_connection_create_collation, METH_VARARGS,
PyDoc_STR("Creates a collation function. Non-standard.")},
{"interrupt", (PyCFunction)pysqlite_connection_interrupt, METH_NOARGS,
PyDoc_STR("Abort any pending database operation. Non-standard.")},
{"iterdump", (PyCFunction)pysqlite_connection_iterdump, METH_NOARGS,
PyDoc_STR("Returns iterator to the dump of the database in an SQL text format. Non-standard.")},
{"__enter__", (PyCFunction)pysqlite_connection_enter, METH_NOARGS,
PyDoc_STR("For context manager. Non-standard.")},
{"__exit__", (PyCFunction)pysqlite_connection_exit, METH_VARARGS,
PyDoc_STR("For context manager. Non-standard.")},
{NULL, NULL}
};
static struct PyMemberDef connection_members[] =
{
{"Warning", T_OBJECT, offsetof(pysqlite_Connection, Warning), READONLY},
{"Error", T_OBJECT, offsetof(pysqlite_Connection, Error), READONLY},
{"InterfaceError", T_OBJECT, offsetof(pysqlite_Connection, InterfaceError), READONLY},
{"DatabaseError", T_OBJECT, offsetof(pysqlite_Connection, DatabaseError), READONLY},
{"DataError", T_OBJECT, offsetof(pysqlite_Connection, DataError), READONLY},
{"OperationalError", T_OBJECT, offsetof(pysqlite_Connection, OperationalError), READONLY},
{"IntegrityError", T_OBJECT, offsetof(pysqlite_Connection, IntegrityError), READONLY},
{"InternalError", T_OBJECT, offsetof(pysqlite_Connection, InternalError), READONLY},
{"ProgrammingError", T_OBJECT, offsetof(pysqlite_Connection, ProgrammingError), READONLY},
{"NotSupportedError", T_OBJECT, offsetof(pysqlite_Connection, NotSupportedError), READONLY},
{"row_factory", T_OBJECT, offsetof(pysqlite_Connection, row_factory)},
{"text_factory", T_OBJECT, offsetof(pysqlite_Connection, text_factory)},
{"in_transaction", T_BOOL, offsetof(pysqlite_Connection, inTransaction), READONLY},
{NULL}
};
PyTypeObject pysqlite_ConnectionType = {
PyVarObject_HEAD_INIT(NULL, 0)
MODULE_NAME ".Connection", /* tp_name */
sizeof(pysqlite_Connection), /* tp_basicsize */
0, /* tp_itemsize */
(destructor)pysqlite_connection_dealloc, /* tp_dealloc */
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_reserved */
0, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
0, /* tp_hash */
(ternaryfunc)pysqlite_connection_call, /* tp_call */
0, /* tp_str */
0, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, /* tp_flags */
connection_doc, /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
connection_methods, /* tp_methods */
connection_members, /* tp_members */
connection_getset, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
(initproc)pysqlite_connection_init, /* tp_init */
0, /* tp_alloc */
0, /* tp_new */
0 /* tp_free */
};
extern int pysqlite_connection_setup_types(void)
{
pysqlite_ConnectionType.tp_new = PyType_GenericNew;
return PyType_Ready(&pysqlite_ConnectionType);
}
| gpl-2.0 |
arne182/chdk-eyefi | platform/ixus75_sd750/sub/100b/movie_rec.c | 8 | 13074 | #include "conf.h"
int *video_quality = &conf.video_quality;
int *video_mode = &conf.video_mode;
long def_table1[9]={0x2000,0x38D,0x788,0x5800,0x9C5,0x14B8,0x10000,0x1C6A,0x3C45};
long def_table2[9]={0x1CCD,-0x2E1,-0x579,0x4F33,-0x7EB,-0xF0C,0xE666,-0x170A,-0x2BC6};
long table1[9], table2[9];
void change_video_tables(int a, int b){
int i;
for (i=0;i<9;i++) {table1[i]=(def_table1[i]*a)/b; table2[i]=(def_table2[i]*a)/b;}
}
long CompressionRateTable[]={0x60, 0x5D, 0x5A, 0x57, 0x54, 0x51, 0x4D, 0x48, 0x42, 0x3B, 0x32, 0x29, 0x22, 0x1D, 0x17, 0x14, 0x10, 0xE, 0xB, 9, 7, 6, 5, 4, 3, 2, 1};
void __attribute__((naked,noinline)) movie_record_task(){
asm volatile(
"STMFD SP!, {R4,R5,LR}\n"
"SUB SP, SP, #4\n"
"MOV R5, SP\n"
"MOV R4, #0\n"
"loc_FFB8E934:\n"
"LDR R3, =0xBBB10\n"
"MOV R2, #0\n"
"LDR R0, [R3]\n"
"MOV R1, R5\n"
"BL sub_FFB1EFB4\n" // ReceiveMessageQueue
"LDR R3, =0xBBC30\n"
"LDR R2, [R3]\n"
"CMP R2, #0\n"
"BNE loc_FFB8E9E0\n"
"LDR R1, [SP]\n" // ,#0x10+var_10
"LDR R3, [R1]\n"
"SUB R3, R3, #2\n"
"MOV R0, R1\n"
"CMP R3, #9\n"
"LDRLS PC, [PC,R3,LSL#2]\n"
"B loc_FFB8E9E4\n"
".long loc_FFB8E9A4\n"
".long loc_FFB8E9BC\n"
".long loc_FFB8E9C4\n"
".long loc_FFB8E9CC\n"
".long loc_FFB8E9AC\n"
".long loc_FFB8E9D4\n"
".long loc_FFB8E9B4\n"
".long loc_FFB8E9E4\n"
".long loc_FFB8E9DC\n"
".long loc_FFB8E99C\n"
"loc_FFB8E99C:\n"
"BL sub_FFB8EA50\n"
"B loc_FFB8E9E0\n"
"loc_FFB8E9A4:\n"
"BL unlock_optical_zoom\n"
"BL sub_FFB8EC58\n"
"B loc_FFB8E9E0\n"
"loc_FFB8E9AC:\n"
"BL sub_FFB8F038_my\n" //----------->
"B loc_FFB8E9E0\n"
"loc_FFB8E9B4:\n"
"BL sub_FFB8F8C8\n"
"B loc_FFB8E9E0\n"
"loc_FFB8E9BC:\n"
"BL sub_FFB8F37C\n"
"B loc_FFB8E9E0\n"
"loc_FFB8E9C4:\n"
"BL sub_FFB8FA64\n"
"B loc_FFB8E9E0\n"
"loc_FFB8E9CC:\n"
"BL sub_FFB8FC44\n"
"B loc_FFB8E9E0\n"
"loc_FFB8E9D4:\n"
"BL sub_FFB8F950\n"
"B loc_FFB8E9E0\n"
"loc_FFB8E9DC:\n"
"BL sub_FFB8F3CC\n"
"loc_FFB8E9E0:\n"
"LDR R1, [SP]\n" // ,#0x10+var_10
"loc_FFB8E9E4:\n"
"LDR R3, =0xBBB14\n"
"STR R4, [R1]\n"
"LDR R0, [R3]\n"
"MOV R2, R4\n"
"BL sub_FFB1F3CC\n" // PostMessageQueue
"B loc_FFB8E934\n"
);
}
void __attribute__((naked,noinline)) sub_FFB8F038_my(){
asm volatile(
"STMFD SP!, {R4-R11,LR}\n"
"LDR R7, =0xBBC48\n"
"SUB SP, SP, #0x3C\n"
"LDR R3, [R7]\n"
"MOV R5, #0\n"
"CMP R3, #3\n"
"MOV R4, R0\n"
"STR R5, [SP,#0x34]\n" // #0x60+var_2C
"STR R5, [SP,#0x2C]\n" // #0x60+var_34
"MOV R6, #1\n"
"MOVEQ R3, #4\n"
"STREQ R3, [R7]\n"
"LDR R3, =0xBBCF4\n"
"MOV LR, PC\n"
"LDR PC, [R3]\n"
"LDR R2, [R7]\n"
"CMP R2, #4\n"
"BNE loc_FFB8F294\n"
"LDR R3, =0xBBD36\n"
"LDRH R2, [R3]\n"
"CMP R2, #1\n"
"BNE loc_FFB8F0C8\n"
"LDR R2, =0xBBC58\n"
"LDR R1, =0xBBD38\n"
"LDR R0, [R2]\n"
"LDRH R3, [R1]\n"
"MUL R12, R3, R0\n"
"LDR R2, =0x10624DD3\n"
"UMULL R3, R1, R2, R12\n"
"LDR R3, =0xBBC74\n"
"MOV R1, R1,LSR#6\n"
"LDR R0, [R3]\n"
"BL sub_FF91EA44\n" // __umodsi3
"CMP R0, #0\n"
"MOVNE R6, #0\n"
"MOVEQ R6, #1\n"
"loc_FFB8F0C8:\n"
"CMP R6, #1\n"
"BNE loc_FFB8F0E8\n"
"ADD R0, SP, #0x38\n" // #0x60+var_28
"ADD R1, SP, #0x34\n" // #0x60+var_2C
"ADD R2, SP, #0x30\n" // #0x60+var_30
"ADD R3, SP, #0x2C\n" // #0x60+var_34
"BL sub_FFB90800\n"
"MOV R5, R0\n"
"loc_FFB8F0E8:\n"
"CMP R5, #0\n"
"BNE loc_FFB8F118\n"
"LDR R3, =0xBBC38\n"
"LDR R2, [R3]\n"
"CMP R2, #1\n"
"BNE loc_FFB8F12C\n"
"LDR R2, =0xBBC74\n"
"LDR R1, =0xBBC4C\n"
"LDR R0, [R2]\n"
"LDR R3, [R1]\n"
"CMP R0, R3\n"
"BCC loc_FFB8F12C\n"
"loc_FFB8F118:\n"
"MOV R0, R5\n"
"BL sub_FFB8F2F4\n"
"BL sub_FFB8F888\n"
"MOV R3, #5\n"
"B loc_FFB8F290\n"
"loc_FFB8F12C:\n"
"LDR R9, [SP,#0x34]\n" // #0x60+var_2C
"CMP R9, #0\n"
"BEQ loc_FFB8F214\n"
"LDR R7, =0xBBC90\n"
"LDR R12, =0xBBC7C\n"
"LDMIB R4, {R0-R2}\n"
"LDR R10, [R4,#0x18]\n"
"LDR R6, [R7]\n"
"LDR R7, [R4,#0x14]\n"
"LDR R4, =0xBBC3C\n"
"LDR R8, [R12]\n"
"ADD R5, SP, #0x2C\n" // #0x60+var_34
"LDMIA R5, {R5,LR}\n"
"MOV R11, #1\n"
"LDR R3, [SP,#0x38]\n" // #0x60+var_28
"ADD R12, SP, #0x28\n" // #0x60+var_38
"STR R11, [R4]\n"
"ADD R4, SP, #0x24\n" // #0x60+var_3C
"STMEA SP, {R9,LR}\n"
"STR R5, [SP,#0x8]\n" // #0x60+var_58
"STR R12, [SP,#0xC]\n" // #0x60+var_54
"STR R8, [SP,#0x10]\n" // #0x60+var_50
"STR R6, [SP,#0x14]\n" // #0x60+var_4C
"STR R7, [SP,#0x18]\n" // #0x60+var_48
"STR R10, [SP,#0x1C]\n" // #0x60+var_44
"STR R4, [SP,#0x20]\n" // #0x60+var_40
"BL sub_FFB938AC\n"
"LDR R3, =0xBBB08\n"
"MOV R1, #0x3E8\n"
"LDR R0, [R3]\n"
"BL sub_FFB1FB7C\n"
"CMP R0, #9\n"
"BNE loc_FFB8F1C0\n"
"BL sub_FFB91090\n"
"LDR R3, =0xBBC48\n"
"LDR R0, =0xFFB8F020\n" // aJpegtimeout_5
"B loc_FFB8F1D8\n"
"loc_FFB8F1C0:\n"
"LDR R4, [SP,#0x24]\n" // #0x60+var_3C
"CMP R4, #0\n"
"BEQ loc_FFB8F1E4\n"
"BL sub_FFB91090\n"
"LDR R3, =0xBBC48\n"
"LDR R0, =0xFFB8F02C\n" // aJpegicerror_5
"loc_FFB8F1D8:\n"
"STR R11, [R3]\n"
"BL sub_FFB42B44\n" // HardwareDefect
"B loc_FFB8F294\n"
"loc_FFB8F1E4:\n"
"BL sub_FFB93A28\n"
"LDR R0, [SP,#0x38]\n" // #0x60+var_28
"LDR R1, [SP,#0x28]\n" // #0x60+var_38
"BL sub_FFB90CE8\n"
"LDR R12, =0xBBC70\n"
"LDR R3, [R12]\n"
"ADD R3, R3, #1\n"
"LDR R0, [SP,#0x28]\n" // #0x60+var_38
"LDR R1, =0xBBC90\n"
"MOV R2, R4\n"
"STR R3, [R12]\n"
"BL sub_FFAFE538_my\n" //-------------->
"loc_FFB8F214:\n"
"LDR R4, =0xBBC74\n"
"LDR R2, =0xBBC9C\n"
"LDR R3, [R4]\n"
"LDR R1, [R2]\n"
"LDR R12, =0xBBC98\n"
"ADD R3, R3, #1\n"
"MUL R0, R1, R3\n"
"LDR R1, [R12]\n"
"STR R3, [R4]\n"
"BL sub_FF91E9AC\n" // __udivsi3
"LDR R6, =0xBBC94\n"
"MOV R4, R0\n"
"BL sub_FFB910CC\n"
"LDR R3, [R6]\n"
"CMP R3, R4\n"
"BNE loc_FFB8F268\n"
"LDR R5, =0xBBC40\n"
"LDR R3, [R5]\n"
"CMP R3, #1\n"
"BNE loc_FFB8F288\n"
"B loc_FFB8F26C\n"
"loc_FFB8F268:\n"
"LDR R5, =0xBBC40\n"
"loc_FFB8F26C:\n"
"LDR R2, =0xBBCD8\n"
"MOV R0, R4\n"
"MOV LR, PC\n"
"LDR PC, [R2]\n"
"MOV R3, #0\n"
"STR R3, [R5]\n"
"STR R4, [R6]\n"
"loc_FFB8F288:\n"
"LDR R7, =0xBBC3C\n"
"MOV R3, #0\n"
"loc_FFB8F290:\n"
"STR R3, [R7]\n"
"loc_FFB8F294:\n"
"ADD SP, SP, #0x3C\n"
"LDMFD SP!, {R4-R11,PC}\n"
);
}
void __attribute__((naked,noinline)) sub_FFAFE538_my(){
asm volatile(
"STMFD SP!, {R4-R8,LR}\n"
"LDR R12, =0x975B4\n"
"LDR R4, [R12]\n"
"LDR R3, =0x975BC\n"
"CMP R4, #0\n"
"MOV R8, R1\n"
"MOV R7, R0\n"
"LDR R1, [R3]\n"
"BEQ loc_FFAFE570\n"
"LDR R2, =0x975C0\n"
"LDR R3, [R2]\n"
"CMP R3, #1\n"
"BNE loc_FFAFE584\n"
"B loc_FFAFE574\n"
"loc_FFAFE570:\n"
"LDR R2, =0x975C0\n"
"loc_FFAFE574:\n"
"MOV R3, #0\n"
"STR R3, [R2]\n"
"STR R7, [R12]\n"
"B loc_FFAFE63C\n"
"loc_FFAFE584:\n"
"LDR R2, =0x975B8\n"
"LDR R3, [R2]\n"
"LDR R6, =table1\n" // unk_FFAFE43C
"ADD R3, R3, R3,LSL#1\n"
"MOV LR, R3,LSL#2\n"
"LDR R2, [R6,LR]\n"
"LDR R5, =table2\n" // unk_FFAFE460
"RSB R12, R2, R4\n"
"LDR R3, [R5,LR]\n"
"CMP R12, #0\n"
"RSB R0, R3, R4\n"
"BLE loc_FFAFE5E8\n"
"ADD R3, R6, #4\n"
"LDR R2, [R3,LR]\n"
"CMP R2, R12\n"
"ADDGE R1, R1, #1\n"
"BGE loc_FFAFE5DC\n"
"ADD R3, R6, #8\n"
"LDR R2, [R3,LR]\n"
"CMP R2, R12\n"
"ADDGE R1, R1, #2\n"
"ADDLT R1, R1, #3\n"
"loc_FFAFE5DC:\n"
// "CMP R1, #0x16\n"
// "MOVGE R1, #0x16\n"
"CMP R1, #0x1A\n" // +
"MOVGE R1, #0x1A\n" // +
"B loc_FFAFE620\n"
"loc_FFAFE5E8:\n"
"CMP R0, #0\n"
"BGE loc_FFAFE620\n"
"ADD R3, R5, #4\n"
"LDR R2, [R3,LR]\n"
"CMP R2, R0\n"
"SUBLE R1, R1, #1\n"
"BLE loc_FFAFE618\n"
"ADD R3, R5, #8\n"
"LDR R2, [R3,LR]\n"
"CMP R2, R0\n"
"SUBLE R1, R1, #2\n"
"SUBGT R1, R1, #3\n"
"loc_FFAFE618:\n"
"CMP R1, #0\n"
"MOVLT R1, #0\n"
"loc_FFAFE620:\n"
"LDR R0, =0x975BC\n"
"LDR R3, [R0]\n"
"CMP R1, R3\n"
"LDRNE R2, =0x975C0\n"
"MOVNE R3, #1\n"
"STRNE R1, [R0]\n"
"STRNE R3, [R2]\n"
"loc_FFAFE63C:\n"
"LDR R3, =0x975BC\n"
"LDR R2, =CompressionRateTable\n" // unk_FFAFE3E0
"LDR R1, [R3]\n"
"LDR R0, =0x975B4\n"
"LDR R3, [R2,R1,LSL#2]\n"
"LDR R1, =video_mode\n" // +
"LDR R1, [R1]\n" // +
"LDR R1, [R1]\n" // +
"CMP R1, #1\n" // +
"LDREQ R1, =video_quality\n" // +
"LDREQ R1, [R1]\n" // +
"LDREQ R3, [R1]\n" // +
"STR R7, [R0]\n"
"STR R3, [R8]\n"
"LDMFD SP!, {R4-R8,PC}\n"
);
}
| gpl-2.0 |
omega-roms/G900I_Omega_Kernel_LL_5.0 | sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c | 8 | 35112 | /* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/init.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/time.h>
#include <linux/wait.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/soc.h>
#include <sound/soc-dapm.h>
#include <sound/pcm.h>
#include <sound/initval.h>
#include <sound/control.h>
#include <sound/q6audio-v2.h>
#include <asm/dma.h>
#include <linux/dma-mapping.h>
#include <linux/msm_audio_ion.h>
#include <linux/of_device.h>
#include <sound/tlv.h>
#include <sound/pcm_params.h>
#include "msm-pcm-q6-v2.h"
#include "msm-pcm-routing-v2.h"
#define DUALWAVE_ENABLE
#ifdef DUALWAVE_ENABLE
#include <linux/syscalls.h>
#include <asm/uaccess.h>
#include <linux/proc_fs.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#define SND_PLAYBACK_UNAVAILABLE 0
#define SND_PLAYBACK_AVAILABLE 1
#define SND_CAPTURE_UNAVAILABLE 2
#define SND_CAPTURE_AVAILABLE 3
#define DUALWAVE_INACTIVE 0
#define DUALWAVE_PLAYBACK 1
#define DUALWAVE_CAPTURE 2
#define GET_CUR_TIME_ON(tCurTimespec) \
do { \
long int llErrTime = 0; \
struct timespec tMyTime; \
mm_segment_t tOldfs; \
tOldfs = get_fs(); \
set_fs(KERNEL_DS); \
\
llErrTime = sys_clock_gettime(CLOCK_REALTIME, &tMyTime); \
set_fs(tOldfs); \
\
tCurTimespec = tMyTime; \
}while(0)
static struct timespec res;
extern int send_uevent_wh_timeinfo(const char *szName, struct timespec *ptTime);
extern int send_uevent_snd_avail(int state);
extern int checkDualWaveStatus(void);
static int dw_status = 0;
#endif
static struct audio_locks the_locks;
#define PCM_MASTER_VOL_MAX_STEPS 0x2000
static const DECLARE_TLV_DB_LINEAR(msm_pcm_vol_gain, 0,
PCM_MASTER_VOL_MAX_STEPS);
struct snd_msm {
struct snd_card *card;
struct snd_pcm *pcm;
};
#define PLAYBACK_MIN_NUM_PERIODS 2
#define PLAYBACK_MAX_NUM_PERIODS 8
#define PLAYBACK_MAX_PERIOD_SIZE 30720
#define PLAYBACK_MIN_PERIOD_SIZE 128
#define CAPTURE_MIN_NUM_PERIODS 2
#define CAPTURE_MAX_NUM_PERIODS 8
#define CAPTURE_MAX_PERIOD_SIZE 4096
#define CAPTURE_MIN_PERIOD_SIZE 320
static struct snd_pcm_hardware msm_pcm_hardware_capture = {
.info = (SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE),
.rates = SNDRV_PCM_RATE_8000_48000,
.rate_min = 8000,
.rate_max = 48000,
.channels_min = 1,
.channels_max = 4,
.buffer_bytes_max = CAPTURE_MAX_NUM_PERIODS *
CAPTURE_MAX_PERIOD_SIZE,
.period_bytes_min = CAPTURE_MIN_PERIOD_SIZE,
.period_bytes_max = CAPTURE_MAX_PERIOD_SIZE,
.periods_min = CAPTURE_MIN_NUM_PERIODS,
.periods_max = CAPTURE_MAX_NUM_PERIODS,
.fifo_size = 0,
};
static struct snd_pcm_hardware msm_pcm_hardware_playback = {
.info = (SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE),
.rates = SNDRV_PCM_RATE_8000_192000,
.rate_min = 8000,
.rate_max = 192000,
.channels_min = 1,
.channels_max = 8,
.buffer_bytes_max = PLAYBACK_MAX_NUM_PERIODS *
PLAYBACK_MAX_PERIOD_SIZE,
.period_bytes_min = PLAYBACK_MIN_PERIOD_SIZE,
.period_bytes_max = PLAYBACK_MAX_PERIOD_SIZE,
.periods_min = PLAYBACK_MIN_NUM_PERIODS,
.periods_max = PLAYBACK_MAX_NUM_PERIODS,
.fifo_size = 0,
};
/* Conventional and unconventional sample rate supported */
static unsigned int supported_sample_rates[] = {
8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000,
88200, 96000, 176400, 192000
};
static uint32_t in_frame_info[CAPTURE_MAX_NUM_PERIODS][2];
static struct snd_pcm_hw_constraint_list constraints_sample_rates = {
.count = ARRAY_SIZE(supported_sample_rates),
.list = supported_sample_rates,
.mask = 0,
};
static void msm_pcm_route_event_handler(enum msm_pcm_routing_event event,
void *priv_data)
{
struct msm_audio *prtd = priv_data;
BUG_ON(!prtd);
pr_debug("%s: event %x\n", __func__, event);
switch (event) {
case MSM_PCM_RT_EVT_BUF_RECFG:
q6asm_cmd(prtd->audio_client, CMD_PAUSE);
q6asm_cmd(prtd->audio_client, CMD_FLUSH);
q6asm_run(prtd->audio_client, 0, 0, 0);
default:
break;
}
}
static void event_handler(uint32_t opcode,
uint32_t token, uint32_t *payload, void *priv)
{
struct msm_audio *prtd = priv;
struct snd_pcm_substream *substream = prtd->substream;
uint32_t *ptrmem = (uint32_t *)payload;
uint32_t idx = 0;
uint32_t size = 0;
switch (opcode) {
case ASM_DATA_EVENT_WRITE_DONE_V2: {
pr_debug("ASM_DATA_EVENT_WRITE_DONE_V2\n");
pr_debug("Buffer Consumed = 0x%08x\n", *ptrmem);
prtd->pcm_irq_pos += prtd->pcm_count;
if (atomic_read(&prtd->start))
snd_pcm_period_elapsed(substream);
atomic_inc(&prtd->out_count);
wake_up(&the_locks.write_wait);
if (!atomic_read(&prtd->start))
break;
if (!prtd->mmap_flag)
break;
if (q6asm_is_cpu_buf_avail_nolock(IN,
prtd->audio_client,
&size, &idx)) {
pr_debug("%s:writing %d bytes of buffer to dsp 2\n",
__func__, prtd->pcm_count);
q6asm_write_nolock(prtd->audio_client,
prtd->pcm_count, 0, 0, NO_TIMESTAMP);
}
break;
}
case ASM_DATA_EVENT_RENDERED_EOS:
pr_debug("ASM_DATA_EVENT_RENDERED_EOS\n");
clear_bit(CMD_EOS, &prtd->cmd_pending);
wake_up(&the_locks.eos_wait);
break;
case ASM_DATA_EVENT_READ_DONE_V2: {
pr_debug("ASM_DATA_EVENT_READ_DONE_V2\n");
pr_debug("token = 0x%08x\n", token);
in_frame_info[token][0] = payload[4];
in_frame_info[token][1] = payload[5];
/* assume data size = 0 during flushing */
if (in_frame_info[token][0]) {
prtd->pcm_irq_pos += in_frame_info[token][0];
pr_debug("pcm_irq_pos=%d\n", prtd->pcm_irq_pos);
if (atomic_read(&prtd->start))
snd_pcm_period_elapsed(substream);
if (atomic_read(&prtd->in_count) <= prtd->periods)
atomic_inc(&prtd->in_count);
wake_up(&the_locks.read_wait);
if (prtd->mmap_flag &&
q6asm_is_cpu_buf_avail_nolock(OUT,
prtd->audio_client,
&size, &idx))
q6asm_read_nolock(prtd->audio_client);
} else {
pr_debug("%s: reclaim flushed buf in_count %x\n",
__func__, atomic_read(&prtd->in_count));
prtd->pcm_irq_pos += prtd->pcm_count;
atomic_inc(&prtd->in_count);
if (atomic_read(&prtd->in_count) == prtd->periods) {
pr_info("%s: reclaimed all bufs\n", __func__);
if (atomic_read(&prtd->start))
snd_pcm_period_elapsed(substream);
wake_up(&the_locks.read_wait);
}
}
break;
}
case APR_BASIC_RSP_RESULT: {
switch (payload[0]) {
case ASM_SESSION_CMD_RUN_V2:
if (substream->stream
!= SNDRV_PCM_STREAM_PLAYBACK) {
atomic_set(&prtd->start, 1);
break;
}
if (prtd->mmap_flag) {
pr_debug("%s:writing %d bytes of buffer to dsp\n",
__func__,
prtd->pcm_count);
q6asm_write_nolock(prtd->audio_client,
prtd->pcm_count,
0, 0, NO_TIMESTAMP);
} else {
while (atomic_read(&prtd->out_needed)) {
pr_debug("%s:writing %d bytes of buffer to dsp\n",
__func__,
prtd->pcm_count);
q6asm_write_nolock(prtd->audio_client,
prtd->pcm_count,
0, 0, NO_TIMESTAMP);
atomic_dec(&prtd->out_needed);
wake_up(&the_locks.write_wait);
};
}
atomic_set(&prtd->start, 1);
break;
default:
pr_debug("%s:Payload = [0x%x]stat[0x%x]\n",
__func__, payload[0], payload[1]);
break;
}
}
break;
default:
pr_debug("Not Supported Event opcode[0x%x]\n", opcode);
break;
}
}
static int msm_pcm_playback_prepare(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
struct msm_audio *prtd = runtime->private_data;
struct msm_plat_data *pdata;
struct snd_pcm_hw_params *params;
int ret;
uint16_t bits_per_sample = 16;
pdata = (struct msm_plat_data *)
dev_get_drvdata(soc_prtd->platform->dev);
if (!pdata) {
pr_err("%s: platform data not populated\n", __func__);
return -EINVAL;
}
params = &soc_prtd->dpcm[substream->stream].hw_params;
pr_debug("%s\n", __func__);
prtd->pcm_size = snd_pcm_lib_buffer_bytes(substream);
prtd->pcm_count = snd_pcm_lib_period_bytes(substream);
prtd->pcm_irq_pos = 0;
/* rate and channels are sent to audio driver */
prtd->samp_rate = runtime->rate;
prtd->channel_mode = runtime->channels;
if (prtd->enabled)
return 0;
prtd->audio_client->perf_mode = pdata->perf_mode;
pr_info("%s: perf: %x\n", __func__, pdata->perf_mode);
if (params_format(params) == SNDRV_PCM_FORMAT_S24_LE)
bits_per_sample = 24;
ret = q6asm_open_write_v2(prtd->audio_client,
FORMAT_LINEAR_PCM, bits_per_sample);
if (ret < 0) {
pr_err("%s: q6asm_open_write_v2 failed\n", __func__);
q6asm_audio_client_free(prtd->audio_client);
prtd->audio_client = NULL;
return -ENOMEM;
}
pr_info("%s: session ID %d\n", __func__,
prtd->audio_client->session);
prtd->session_id = prtd->audio_client->session;
msm_pcm_routing_reg_phy_stream(soc_prtd->dai_link->be_id,
prtd->audio_client->perf_mode,
prtd->session_id, substream->stream);
switch (runtime->format) {
case SNDRV_PCM_FORMAT_S16_LE:
bits_per_sample = 16;
break;
case SNDRV_PCM_FORMAT_S24_LE:
bits_per_sample = 24;
break;
}
ret = q6asm_media_format_block_multi_ch_pcm_v2(
prtd->audio_client, runtime->rate,
runtime->channels, !prtd->set_channel_map,
prtd->channel_map, bits_per_sample);
if (ret < 0)
pr_info("%s: CMD Format block failed\n", __func__);
atomic_set(&prtd->out_count, runtime->periods);
prtd->enabled = 1;
prtd->cmd_pending = 0;
prtd->cmd_interrupt = 0;
return 0;
}
static int msm_pcm_capture_prepare(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct msm_audio *prtd = runtime->private_data;
struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
struct msm_plat_data *pdata;
struct snd_pcm_hw_params *params;
struct msm_pcm_routing_evt event;
int ret = 0;
int i = 0;
uint16_t bits_per_sample = 16;
pdata = (struct msm_plat_data *)
dev_get_drvdata(soc_prtd->platform->dev);
if (!pdata) {
pr_err("%s: platform data not populated\n", __func__);
return -EINVAL;
}
pr_debug("%s\n", __func__);
params = &soc_prtd->dpcm[substream->stream].hw_params;
if (params_format(params) == SNDRV_PCM_FORMAT_S24_LE)
bits_per_sample = 24;
prtd->audio_client->perf_mode = pdata->perf_mode;
pr_info("%s: perf_mode: 0x%x\n", __func__, pdata->perf_mode);
pr_info("%s Opening %d-ch PCM read stream\n",
__func__, params_channels(params));
ret = q6asm_open_read_v2(prtd->audio_client, FORMAT_LINEAR_PCM,
bits_per_sample);
if (ret < 0) {
pr_err("%s: q6asm_open_read failed\n", __func__);
q6asm_audio_client_free(prtd->audio_client);
prtd->audio_client = NULL;
return -ENOMEM;
}
pr_info("%s: session ID %d\n",
__func__, prtd->audio_client->session);
prtd->session_id = prtd->audio_client->session;
event.event_func = msm_pcm_route_event_handler;
event.priv_data = (void *) prtd;
msm_pcm_routing_reg_phy_stream_v2(
soc_prtd->dai_link->be_id,
prtd->audio_client->perf_mode,
prtd->session_id, substream->stream,
event);
prtd->pcm_size = snd_pcm_lib_buffer_bytes(substream);
prtd->pcm_count = snd_pcm_lib_period_bytes(substream);
prtd->pcm_irq_pos = 0;
/* rate and channels are sent to audio driver */
prtd->samp_rate = runtime->rate;
prtd->channel_mode = runtime->channels;
if (prtd->enabled)
return 0;
switch (runtime->format) {
case SNDRV_PCM_FORMAT_S16_LE:
bits_per_sample = 16;
break;
case SNDRV_PCM_FORMAT_S24_LE:
bits_per_sample = 24;
break;
}
pr_debug("Samp_rate = %d\n", prtd->samp_rate);
pr_debug("Channel = %d\n", prtd->channel_mode);
ret = q6asm_enc_cfg_blk_pcm_format_support(prtd->audio_client,
prtd->samp_rate, prtd->channel_mode,
bits_per_sample);
if (ret < 0)
pr_debug("%s: cmd cfg pcm was block failed", __func__);
for (i = 0; i < runtime->periods; i++)
q6asm_read(prtd->audio_client);
prtd->periods = runtime->periods;
prtd->enabled = 1;
return ret;
}
static int msm_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
int ret = 0;
struct snd_pcm_runtime *runtime = substream->runtime;
struct msm_audio *prtd = runtime->private_data;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
pr_debug("%s: Trigger start\n", __func__);
#ifdef DUALWAVE_ENABLE
if(dw_status != DUALWAVE_INACTIVE){
GET_CUR_TIME_ON(res);
}
#endif
ret = q6asm_run_nowait(prtd->audio_client, 0, 0, 0);
#ifdef DUALWAVE_ENABLE
switch(dw_status) {
case DUALWAVE_PLAYBACK:
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK){
send_uevent_wh_timeinfo("PLAY_TIME",&res);
}
break;
case DUALWAVE_CAPTURE:
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE){
send_uevent_wh_timeinfo("CAPTURE_TIME",&res);
}
break;
case DUALWAVE_INACTIVE:
default:
break;
}
#endif
break;
case SNDRV_PCM_TRIGGER_STOP:
pr_debug("SNDRV_PCM_TRIGGER_STOP\n");
atomic_set(&prtd->start, 0);
if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK)
break;
/* pending CMD_EOS isn't expected */
WARN_ON_ONCE(test_bit(CMD_EOS, &prtd->cmd_pending));
set_bit(CMD_EOS, &prtd->cmd_pending);
ret = q6asm_cmd_nowait(prtd->audio_client, CMD_EOS);
if (ret)
clear_bit(CMD_EOS, &prtd->cmd_pending);
break;
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
pr_debug("SNDRV_PCM_TRIGGER_PAUSE\n");
ret = q6asm_cmd_nowait(prtd->audio_client, CMD_PAUSE);
atomic_set(&prtd->start, 0);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static int msm_pcm_open(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct msm_audio *prtd;
int ret = 0;
prtd = kzalloc(sizeof(struct msm_audio), GFP_KERNEL);
if (prtd == NULL) {
pr_err("Failed to allocate memory for msm_audio\n");
return -ENOMEM;
}
prtd->substream = substream;
prtd->audio_client = q6asm_audio_client_alloc(
(app_cb)event_handler, prtd);
if (!prtd->audio_client) {
pr_info("%s: Could not allocate memory\n", __func__);
kfree(prtd);
return -ENOMEM;
}
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
runtime->hw = msm_pcm_hardware_playback;
/* Capture path */
else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
runtime->hw = msm_pcm_hardware_capture;
else {
pr_err("Invalid Stream type %d\n", substream->stream);
return -EINVAL;
}
ret = snd_pcm_hw_constraint_list(runtime, 0,
SNDRV_PCM_HW_PARAM_RATE,
&constraints_sample_rates);
if (ret < 0)
pr_info("snd_pcm_hw_constraint_list failed\n");
/* Ensure that buffer size is a multiple of period size */
ret = snd_pcm_hw_constraint_integer(runtime,
SNDRV_PCM_HW_PARAM_PERIODS);
if (ret < 0)
pr_info("snd_pcm_hw_constraint_integer failed\n");
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
ret = snd_pcm_hw_constraint_minmax(runtime,
SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
PLAYBACK_MIN_NUM_PERIODS * PLAYBACK_MIN_PERIOD_SIZE,
PLAYBACK_MAX_NUM_PERIODS * PLAYBACK_MAX_PERIOD_SIZE);
if (ret < 0) {
pr_err("constraint for buffer bytes min max ret = %d\n",
ret);
}
}
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
ret = snd_pcm_hw_constraint_minmax(runtime,
SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
CAPTURE_MIN_NUM_PERIODS * CAPTURE_MIN_PERIOD_SIZE,
CAPTURE_MAX_NUM_PERIODS * CAPTURE_MAX_PERIOD_SIZE);
if (ret < 0) {
pr_err("constraint for buffer bytes min max ret = %d\n",
ret);
}
}
ret = snd_pcm_hw_constraint_step(runtime, 0,
SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 32);
if (ret < 0) {
pr_err("constraint for period bytes step ret = %d\n",
ret);
}
ret = snd_pcm_hw_constraint_step(runtime, 0,
SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 32);
if (ret < 0) {
pr_err("constraint for buffer bytes step ret = %d\n",
ret);
}
prtd->dsp_cnt = 0;
prtd->set_channel_map = false;
runtime->private_data = prtd;
return 0;
}
static int msm_pcm_playback_copy(struct snd_pcm_substream *substream, int a,
snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames)
{
int ret = 0;
int fbytes = 0;
int xfer = 0;
char *bufptr = NULL;
void *data = NULL;
uint32_t idx = 0;
uint32_t size = 0;
struct snd_pcm_runtime *runtime = substream->runtime;
struct msm_audio *prtd = runtime->private_data;
fbytes = frames_to_bytes(runtime, frames);
pr_debug("%s: prtd->out_count = %d\n",
__func__, atomic_read(&prtd->out_count));
ret = wait_event_timeout(the_locks.write_wait,
(atomic_read(&prtd->out_count)), 5 * HZ);
if (!ret) {
pr_err("%s: wait_event_timeout failed\n", __func__);
goto fail;
}
if (!atomic_read(&prtd->out_count)) {
pr_err("%s: pcm stopped out_count 0\n", __func__);
return 0;
}
data = q6asm_is_cpu_buf_avail(IN, prtd->audio_client, &size, &idx);
if (size < fbytes) {
fbytes = size;
}
bufptr = data;
if (bufptr) {
pr_debug("%s:fbytes =%d: xfer=%d size=%d\n",
__func__, fbytes, xfer, size);
xfer = fbytes;
if (copy_from_user(bufptr, buf, xfer)) {
ret = -EFAULT;
goto fail;
}
buf += xfer;
fbytes -= xfer;
pr_debug("%s:fbytes = %d: xfer=%d\n", __func__, fbytes, xfer);
if (atomic_read(&prtd->start)) {
pr_debug("%s:writing %d bytes of buffer to dsp\n",
__func__, xfer);
ret = q6asm_write(prtd->audio_client, xfer,
0, 0, NO_TIMESTAMP);
if (ret < 0) {
ret = -EFAULT;
goto fail;
}
} else
atomic_inc(&prtd->out_needed);
atomic_dec(&prtd->out_count);
}
fail:
return ret;
}
static int msm_pcm_playback_close(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
struct msm_audio *prtd = runtime->private_data;
int dir = 0;
int ret = 0;
pr_debug("%s: cmd_pending 0x%lx\n", __func__, prtd->cmd_pending);
if (prtd->audio_client) {
dir = IN;
ret = wait_event_timeout(the_locks.eos_wait,
!test_bit(CMD_EOS, &prtd->cmd_pending),
5 * HZ);
if (!ret)
pr_err("%s: CMD_EOS failed, cmd_pending 0x%lx\n",
__func__, prtd->cmd_pending);
q6asm_cmd(prtd->audio_client, CMD_CLOSE);
q6asm_audio_client_buf_free_contiguous(dir,
prtd->audio_client);
q6asm_audio_client_free(prtd->audio_client);
}
msm_pcm_routing_dereg_phy_stream(soc_prtd->dai_link->be_id,
SNDRV_PCM_STREAM_PLAYBACK);
kfree(prtd);
return 0;
}
static int msm_pcm_capture_copy(struct snd_pcm_substream *substream,
int channel, snd_pcm_uframes_t hwoff, void __user *buf,
snd_pcm_uframes_t frames)
{
int ret = 0;
int fbytes = 0;
int xfer;
char *bufptr;
void *data = NULL;
static uint32_t idx;
static uint32_t size;
uint32_t offset = 0;
struct snd_pcm_runtime *runtime = substream->runtime;
struct msm_audio *prtd = substream->runtime->private_data;
pr_debug("%s\n", __func__);
fbytes = frames_to_bytes(runtime, frames);
pr_debug("appl_ptr %d\n", (int)runtime->control->appl_ptr);
pr_debug("hw_ptr %d\n", (int)runtime->status->hw_ptr);
pr_debug("avail_min %d\n", (int)runtime->control->avail_min);
ret = wait_event_timeout(the_locks.read_wait,
(atomic_read(&prtd->in_count)), 5 * HZ);
if (!ret) {
pr_debug("%s: wait_event_timeout failed\n", __func__);
goto fail;
}
if (!atomic_read(&prtd->in_count)) {
pr_debug("%s: pcm stopped in_count 0\n", __func__);
return 0;
}
pr_debug("Checking if valid buffer is available...%08x\n",
(unsigned int) data);
data = q6asm_is_cpu_buf_avail(OUT, prtd->audio_client, &size, &idx);
bufptr = data;
pr_debug("Size = %d\n", size);
pr_debug("fbytes = %d\n", fbytes);
pr_debug("idx = %d\n", idx);
if (bufptr) {
xfer = fbytes;
if (xfer > size)
xfer = size;
offset = in_frame_info[idx][1];
pr_debug("Offset value = %d\n", offset);
if (copy_to_user(buf, bufptr+offset, xfer)) {
pr_err("Failed to copy buf to user\n");
ret = -EFAULT;
goto fail;
}
fbytes -= xfer;
size -= xfer;
in_frame_info[idx][1] += xfer;
pr_debug("%s:fbytes = %d: size=%d: xfer=%d\n",
__func__, fbytes, size, xfer);
pr_debug(" Sending next buffer to dsp\n");
memset(&in_frame_info[idx], 0,
sizeof(uint32_t) * 2);
atomic_dec(&prtd->in_count);
ret = q6asm_read(prtd->audio_client);
if (ret < 0) {
pr_err("q6asm read failed\n");
ret = -EFAULT;
goto fail;
}
} else
pr_err("No valid buffer\n");
pr_debug("Returning from capture_copy... %d\n", ret);
fail:
return ret;
}
static int msm_pcm_capture_close(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
struct msm_audio *prtd = runtime->private_data;
int dir = OUT;
pr_debug("%s\n", __func__);
if (prtd->audio_client) {
q6asm_cmd(prtd->audio_client, CMD_CLOSE);
q6asm_audio_client_buf_free_contiguous(dir,
prtd->audio_client);
q6asm_audio_client_free(prtd->audio_client);
}
msm_pcm_routing_dereg_phy_stream(soc_prtd->dai_link->be_id,
SNDRV_PCM_STREAM_CAPTURE);
kfree(prtd);
return 0;
}
static int msm_pcm_copy(struct snd_pcm_substream *substream, int a,
snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames)
{
int ret = 0;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
ret = msm_pcm_playback_copy(substream, a, hwoff, buf, frames);
else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
ret = msm_pcm_capture_copy(substream, a, hwoff, buf, frames);
return ret;
}
static int msm_pcm_close(struct snd_pcm_substream *substream)
{
int ret = 0;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
ret = msm_pcm_playback_close(substream);
else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
ret = msm_pcm_capture_close(substream);
#ifdef DUALWAVE_ENABLE
switch(dw_status) {
case DUALWAVE_PLAYBACK:
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK){
send_uevent_snd_avail(SND_PLAYBACK_AVAILABLE);
}
break;
case DUALWAVE_CAPTURE:
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE){
send_uevent_snd_avail(SND_CAPTURE_AVAILABLE);
}
break;
case DUALWAVE_INACTIVE:
default:
break;
}
#endif
return ret;
}
static int msm_pcm_prepare(struct snd_pcm_substream *substream)
{
int ret = 0;
#ifdef DUALWAVE_ENABLE
dw_status = checkDualWaveStatus();
switch(dw_status) {
case DUALWAVE_PLAYBACK:
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK){
printk("khhan pcm prepare # SEND_UEVENT : PLAYBACK UNAVAILABLE\n");
send_uevent_snd_avail(SND_PLAYBACK_UNAVAILABLE);
}
break;
case DUALWAVE_CAPTURE:
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE){
printk("khhan pcm prepare # SEND_UEVENT : CAPTURE UNAVAILABLE\n");
send_uevent_snd_avail(SND_CAPTURE_UNAVAILABLE);
}
break;
case DUALWAVE_INACTIVE:
default:
break;
}
#endif
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
ret = msm_pcm_playback_prepare(substream);
else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
ret = msm_pcm_capture_prepare(substream);
return ret;
}
static snd_pcm_uframes_t msm_pcm_pointer(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct msm_audio *prtd = runtime->private_data;
if (prtd->pcm_irq_pos >= prtd->pcm_size)
prtd->pcm_irq_pos = 0;
pr_debug("pcm_irq_pos = %d\n", prtd->pcm_irq_pos);
return bytes_to_frames(runtime, (prtd->pcm_irq_pos));
}
static int msm_pcm_mmap(struct snd_pcm_substream *substream,
struct vm_area_struct *vma)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct msm_audio *prtd = runtime->private_data;
struct audio_client *ac = prtd->audio_client;
struct audio_port_data *apd = ac->port;
struct audio_buffer *ab;
int dir = -1;
prtd->mmap_flag = 1;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
dir = IN;
else
dir = OUT;
ab = &(apd[dir].buf[0]);
return msm_audio_ion_mmap(ab, vma);
}
static int msm_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct msm_audio *prtd = runtime->private_data;
struct snd_dma_buffer *dma_buf = &substream->dma_buffer;
struct audio_buffer *buf;
int dir, ret;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
dir = IN;
else
dir = OUT;
ret = q6asm_audio_client_buf_alloc_contiguous(dir,
prtd->audio_client,
(params_buffer_bytes(params) / params_periods(params)),
params_periods(params));
if (ret < 0) {
pr_err("Audio Start: Buffer Allocation failed rc = %d\n",
ret);
return -ENOMEM;
}
buf = prtd->audio_client->port[dir].buf;
if (buf == NULL || buf[0].data == NULL)
return -ENOMEM;
pr_debug("%s:buf = %p\n", __func__, buf);
dma_buf->dev.type = SNDRV_DMA_TYPE_DEV;
dma_buf->dev.dev = substream->pcm->card->dev;
dma_buf->private_data = NULL;
dma_buf->area = buf[0].data;
dma_buf->addr = buf[0].phys;
dma_buf->bytes = params_buffer_bytes(params);
if (!dma_buf->area)
return -ENOMEM;
snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
return 0;
}
static struct snd_pcm_ops msm_pcm_ops = {
.open = msm_pcm_open,
.copy = msm_pcm_copy,
.hw_params = msm_pcm_hw_params,
.close = msm_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
.prepare = msm_pcm_prepare,
.trigger = msm_pcm_trigger,
.pointer = msm_pcm_pointer,
.mmap = msm_pcm_mmap,
};
static int msm_pcm_set_volume(struct msm_audio *prtd, uint32_t volume)
{
int rc = 0;
if (prtd && prtd->audio_client) {
pr_debug("%s: channels %d volume 0x%x\n", __func__,
prtd->channel_mode, volume);
rc = q6asm_set_volume(prtd->audio_client, volume);
if (rc < 0) {
pr_err("%s: Send Volume command failed rc=%d\n",
__func__, rc);
}
}
return rc;
}
static int msm_pcm_volume_ctl_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_pcm_volume *vol = snd_kcontrol_chip(kcontrol);
struct snd_pcm_substream *substream =
vol->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
struct msm_audio *prtd;
pr_debug("%s\n", __func__);
if (!substream) {
pr_err("%s substream not found\n", __func__);
return -ENODEV;
}
if (!substream->runtime) {
pr_err("%s substream runtime not found\n", __func__);
return 0;
}
prtd = substream->runtime->private_data;
if (prtd)
ucontrol->value.integer.value[0] = prtd->volume;
return 0;
}
static int msm_pcm_volume_ctl_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
int rc = 0;
struct snd_pcm_volume *vol = snd_kcontrol_chip(kcontrol);
struct snd_pcm_substream *substream =
vol->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
struct msm_audio *prtd;
int volume = ucontrol->value.integer.value[0];
pr_debug("%s: volume : 0x%x\n", __func__, volume);
if (!substream) {
pr_err("%s substream not found\n", __func__);
return -ENODEV;
}
if (!substream->runtime) {
pr_err("%s substream runtime not found\n", __func__);
return 0;
}
prtd = substream->runtime->private_data;
if (prtd) {
rc = msm_pcm_set_volume(prtd, volume);
prtd->volume = volume;
}
return rc;
}
static int msm_pcm_add_volume_control(struct snd_soc_pcm_runtime *rtd)
{
int ret = 0;
struct snd_pcm *pcm = rtd->pcm;
struct snd_pcm_volume *volume_info;
struct snd_kcontrol *kctl;
dev_dbg(rtd->dev, "%s, Volume control add\n", __func__);
ret = snd_pcm_add_volume_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
NULL, 1, rtd->dai_link->be_id,
&volume_info);
if (ret < 0) {
pr_err("%s volume control failed ret %d\n", __func__, ret);
return ret;
}
kctl = volume_info->kctl;
kctl->put = msm_pcm_volume_ctl_put;
kctl->get = msm_pcm_volume_ctl_get;
kctl->tlv.p = msm_pcm_vol_gain;
return 0;
}
static int msm_pcm_chmap_ctl_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
int i;
struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
struct snd_pcm_substream *substream;
struct msm_audio *prtd;
pr_debug("%s", __func__);
substream = snd_pcm_chmap_substream(info, idx);
if (!substream)
return -ENODEV;
if (!substream->runtime)
return 0;
prtd = substream->runtime->private_data;
if (prtd) {
prtd->set_channel_map = true;
for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL; i++)
prtd->channel_map[i] =
(char)(ucontrol->value.integer.value[i]);
}
return 0;
}
static int msm_pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
int i;
struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
struct snd_pcm_substream *substream;
struct msm_audio *prtd;
pr_debug("%s", __func__);
substream = snd_pcm_chmap_substream(info, idx);
if (!substream)
return -ENODEV;
memset(ucontrol->value.integer.value, 0,
sizeof(ucontrol->value.integer.value));
if (!substream->runtime)
return 0; /* no channels set */
prtd = substream->runtime->private_data;
if (prtd && prtd->set_channel_map == true) {
for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL; i++)
ucontrol->value.integer.value[i] =
(int)prtd->channel_map[i];
} else {
for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL; i++)
ucontrol->value.integer.value[i] = 0;
}
return 0;
}
static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
struct snd_pcm_chmap *chmap_info;
struct snd_kcontrol *kctl;
char device_num[12];
int i, ret = 0;
if (!card->dev->coherent_dma_mask)
card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
pr_debug("%s, Channel map cntrl add\n", __func__);
ret = snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
snd_pcm_std_chmaps,
PCM_FORMAT_MAX_NUM_CHANNEL, 0,
&chmap_info);
if (ret < 0)
return ret;
kctl = chmap_info->kctl;
for (i = 0; i < kctl->count; i++)
kctl->vd[i].access |= SNDRV_CTL_ELEM_ACCESS_WRITE;
snprintf(device_num, sizeof(device_num), "%d", pcm->device);
strlcat(kctl->id.name, device_num, sizeof(kctl->id.name));
pr_debug("%s, Overwriting channel map control name to: %s",
__func__, kctl->id.name);
kctl->put = msm_pcm_chmap_ctl_put;
kctl->get = msm_pcm_chmap_ctl_get;
ret = msm_pcm_add_volume_control(rtd);
if (ret)
pr_err("%s: Could not add pcm Volume Control %d\n",
__func__, ret);
return ret;
}
static struct snd_soc_platform_driver msm_soc_platform = {
.ops = &msm_pcm_ops,
.pcm_new = msm_asoc_pcm_new,
};
static __devinit int msm_pcm_probe(struct platform_device *pdev)
{
int rc;
int id;
struct msm_plat_data *pdata;
const char *latency_level;
rc = of_property_read_u32(pdev->dev.of_node,
"qti,msm-pcm-dsp-id", &id);
if (rc) {
dev_err(&pdev->dev, "%s: qti,msm-pcm-dsp-id missing in DT node\n",
__func__);
return rc;
}
pdata = kzalloc(sizeof(struct msm_plat_data), GFP_KERNEL);
if (!pdata) {
dev_err(&pdev->dev, "Failed to allocate memory for platform data\n");
return -ENOMEM;
}
if (of_property_read_bool(pdev->dev.of_node,
"qti,msm-pcm-low-latency")) {
pdata->perf_mode = LOW_LATENCY_PCM_MODE;
rc = of_property_read_string(pdev->dev.of_node,
"qti,latency-level", &latency_level);
if (!rc) {
if (!strcmp(latency_level, "ultra"))
pdata->perf_mode = ULTRA_LOW_LATENCY_PCM_MODE;
}
} else
pdata->perf_mode = LEGACY_PCM_MODE;
dev_set_drvdata(&pdev->dev, pdata);
dev_set_name(&pdev->dev, "%s.%d", "msm-pcm-dsp", id);
dev_dbg(&pdev->dev, "%s: dev name %s\n",
__func__, dev_name(&pdev->dev));
return snd_soc_register_platform(&pdev->dev,
&msm_soc_platform);
}
static int msm_pcm_remove(struct platform_device *pdev)
{
struct msm_plat_data *pdata;
pdata = dev_get_drvdata(&pdev->dev);
kfree(pdata);
snd_soc_unregister_platform(&pdev->dev);
return 0;
}
static const struct of_device_id msm_pcm_dt_match[] = {
{.compatible = "qti,msm-pcm-dsp"},
{}
};
MODULE_DEVICE_TABLE(of, msm_pcm_dt_match);
static struct platform_driver msm_pcm_driver = {
.driver = {
.name = "msm-pcm-dsp",
.owner = THIS_MODULE,
.of_match_table = msm_pcm_dt_match,
},
.probe = msm_pcm_probe,
.remove = __devexit_p(msm_pcm_remove),
};
static int __init msm_soc_platform_init(void)
{
init_waitqueue_head(&the_locks.enable_wait);
init_waitqueue_head(&the_locks.eos_wait);
init_waitqueue_head(&the_locks.write_wait);
init_waitqueue_head(&the_locks.read_wait);
return platform_driver_register(&msm_pcm_driver);
}
module_init(msm_soc_platform_init);
static void __exit msm_soc_platform_exit(void)
{
platform_driver_unregister(&msm_pcm_driver);
}
module_exit(msm_soc_platform_exit);
MODULE_DESCRIPTION("PCM module platform driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
greguu/linux-4.9-rc3-c3x00 | drivers/net/wireless/ralink/rt2x00/rt2x00mac.c | 264 | 24339 | /*
Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
<http://rt2x00.serialmonkey.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
Module: rt2x00mac
Abstract: rt2x00 generic mac80211 routines.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include "rt2x00.h"
#include "rt2x00lib.h"
static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
struct data_queue *queue,
struct sk_buff *frag_skb)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(frag_skb);
struct ieee80211_tx_info *rts_info;
struct sk_buff *skb;
unsigned int data_length;
int retval = 0;
if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
data_length = sizeof(struct ieee80211_cts);
else
data_length = sizeof(struct ieee80211_rts);
skb = dev_alloc_skb(data_length + rt2x00dev->hw->extra_tx_headroom);
if (unlikely(!skb)) {
rt2x00_warn(rt2x00dev, "Failed to create RTS/CTS frame\n");
return -ENOMEM;
}
skb_reserve(skb, rt2x00dev->hw->extra_tx_headroom);
skb_put(skb, data_length);
/*
* Copy TX information over from original frame to
* RTS/CTS frame. Note that we set the no encryption flag
* since we don't want this frame to be encrypted.
* RTS frames should be acked, while CTS-to-self frames
* should not. The ready for TX flag is cleared to prevent
* it being automatically send when the descriptor is
* written to the hardware.
*/
memcpy(skb->cb, frag_skb->cb, sizeof(skb->cb));
rts_info = IEEE80211_SKB_CB(skb);
rts_info->control.rates[0].flags &= ~IEEE80211_TX_RC_USE_RTS_CTS;
rts_info->control.rates[0].flags &= ~IEEE80211_TX_RC_USE_CTS_PROTECT;
if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
rts_info->flags |= IEEE80211_TX_CTL_NO_ACK;
else
rts_info->flags &= ~IEEE80211_TX_CTL_NO_ACK;
/* Disable hardware encryption */
rts_info->control.hw_key = NULL;
/*
* RTS/CTS frame should use the length of the frame plus any
* encryption overhead that will be added by the hardware.
*/
data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb);
if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
ieee80211_ctstoself_get(rt2x00dev->hw, tx_info->control.vif,
frag_skb->data, data_length, tx_info,
(struct ieee80211_cts *)(skb->data));
else
ieee80211_rts_get(rt2x00dev->hw, tx_info->control.vif,
frag_skb->data, data_length, tx_info,
(struct ieee80211_rts *)(skb->data));
retval = rt2x00queue_write_tx_frame(queue, skb, NULL, true);
if (retval) {
dev_kfree_skb_any(skb);
rt2x00_warn(rt2x00dev, "Failed to send RTS/CTS frame\n");
}
return retval;
}
void rt2x00mac_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
enum data_queue_qid qid = skb_get_queue_mapping(skb);
struct data_queue *queue = NULL;
/*
* Mac80211 might be calling this function while we are trying
* to remove the device or perhaps suspending it.
* Note that we can only stop the TX queues inside the TX path
* due to possible race conditions in mac80211.
*/
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
goto exit_free_skb;
/*
* Use the ATIM queue if appropriate and present.
*/
if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
rt2x00_has_cap_flag(rt2x00dev, REQUIRE_ATIM_QUEUE))
qid = QID_ATIM;
queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
if (unlikely(!queue)) {
rt2x00_err(rt2x00dev,
"Attempt to send packet over invalid queue %d\n"
"Please file bug report to %s\n", qid, DRV_PROJECT);
goto exit_free_skb;
}
/*
* If CTS/RTS is required. create and queue that frame first.
* Make sure we have at least enough entries available to send
* this CTS/RTS frame as well as the data frame.
* Note that when the driver has set the set_rts_threshold()
* callback function it doesn't need software generation of
* either RTS or CTS-to-self frame and handles everything
* inside the hardware.
*/
if (!rt2x00dev->ops->hw->set_rts_threshold &&
(tx_info->control.rates[0].flags & (IEEE80211_TX_RC_USE_RTS_CTS |
IEEE80211_TX_RC_USE_CTS_PROTECT))) {
if (rt2x00queue_available(queue) <= 1)
goto exit_fail;
if (rt2x00mac_tx_rts_cts(rt2x00dev, queue, skb))
goto exit_fail;
}
if (unlikely(rt2x00queue_write_tx_frame(queue, skb, control->sta, false)))
goto exit_fail;
/*
* Pausing queue has to be serialized with rt2x00lib_txdone(). Note
* we should not use spin_lock_bh variant as bottom halve was already
* disabled before ieee80211_xmit() call.
*/
spin_lock(&queue->tx_lock);
if (rt2x00queue_threshold(queue))
rt2x00queue_pause_queue(queue);
spin_unlock(&queue->tx_lock);
return;
exit_fail:
spin_lock(&queue->tx_lock);
rt2x00queue_pause_queue(queue);
spin_unlock(&queue->tx_lock);
exit_free_skb:
ieee80211_free_txskb(hw, skb);
}
EXPORT_SYMBOL_GPL(rt2x00mac_tx);
int rt2x00mac_start(struct ieee80211_hw *hw)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
return 0;
return rt2x00lib_start(rt2x00dev);
}
EXPORT_SYMBOL_GPL(rt2x00mac_start);
void rt2x00mac_stop(struct ieee80211_hw *hw)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
return;
rt2x00lib_stop(rt2x00dev);
}
EXPORT_SYMBOL_GPL(rt2x00mac_stop);
int rt2x00mac_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
struct rt2x00_intf *intf = vif_to_intf(vif);
struct data_queue *queue = rt2x00dev->bcn;
struct queue_entry *entry = NULL;
unsigned int i;
/*
* Don't allow interfaces to be added
* the device has disappeared.
*/
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) ||
!test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags))
return -ENODEV;
/*
* Loop through all beacon queues to find a free
* entry. Since there are as much beacon entries
* as the maximum interfaces, this search shouldn't
* fail.
*/
for (i = 0; i < queue->limit; i++) {
entry = &queue->entries[i];
if (!test_and_set_bit(ENTRY_BCN_ASSIGNED, &entry->flags))
break;
}
if (unlikely(i == queue->limit))
return -ENOBUFS;
/*
* We are now absolutely sure the interface can be created,
* increase interface count and start initialization.
*/
if (vif->type == NL80211_IFTYPE_AP)
rt2x00dev->intf_ap_count++;
else
rt2x00dev->intf_sta_count++;
mutex_init(&intf->beacon_skb_mutex);
intf->beacon = entry;
/*
* The MAC address must be configured after the device
* has been initialized. Otherwise the device can reset
* the MAC registers.
* The BSSID address must only be configured in AP mode,
* however we should not send an empty BSSID address for
* STA interfaces at this time, since this can cause
* invalid behavior in the device.
*/
rt2x00lib_config_intf(rt2x00dev, intf, vif->type,
vif->addr, NULL);
/*
* Some filters depend on the current working mode. We can force
* an update during the next configure_filter() run by mac80211 by
* resetting the current packet_filter state.
*/
rt2x00dev->packet_filter = 0;
return 0;
}
EXPORT_SYMBOL_GPL(rt2x00mac_add_interface);
void rt2x00mac_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
struct rt2x00_intf *intf = vif_to_intf(vif);
/*
* Don't allow interfaces to be remove while
* either the device has disappeared or when
* no interface is present.
*/
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) ||
(vif->type == NL80211_IFTYPE_AP && !rt2x00dev->intf_ap_count) ||
(vif->type != NL80211_IFTYPE_AP && !rt2x00dev->intf_sta_count))
return;
if (vif->type == NL80211_IFTYPE_AP)
rt2x00dev->intf_ap_count--;
else
rt2x00dev->intf_sta_count--;
/*
* Release beacon entry so it is available for
* new interfaces again.
*/
clear_bit(ENTRY_BCN_ASSIGNED, &intf->beacon->flags);
/*
* Make sure the bssid and mac address registers
* are cleared to prevent false ACKing of frames.
*/
rt2x00lib_config_intf(rt2x00dev, intf,
NL80211_IFTYPE_UNSPECIFIED, NULL, NULL);
}
EXPORT_SYMBOL_GPL(rt2x00mac_remove_interface);
int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
/*
* mac80211 might be calling this function while we are trying
* to remove the device or perhaps suspending it.
*/
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
return 0;
/*
* Some configuration parameters (e.g. channel and antenna values) can
* only be set when the radio is enabled, but do require the RX to
* be off. During this period we should keep link tuning enabled,
* if for any reason the link tuner must be reset, this will be
* handled by rt2x00lib_config().
*/
rt2x00queue_stop_queue(rt2x00dev->rx);
/*
* When we've just turned on the radio, we want to reprogram
* everything to ensure a consistent state
*/
rt2x00lib_config(rt2x00dev, conf, changed);
/*
* After the radio has been enabled we need to configure
* the antenna to the default settings. rt2x00lib_config_antenna()
* should determine if any action should be taken based on
* checking if diversity has been enabled or no antenna changes
* have been made since the last configuration change.
*/
rt2x00lib_config_antenna(rt2x00dev, rt2x00dev->default_ant);
/* Turn RX back on */
rt2x00queue_start_queue(rt2x00dev->rx);
return 0;
}
EXPORT_SYMBOL_GPL(rt2x00mac_config);
void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *total_flags,
u64 multicast)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
/*
* Mask off any flags we are going to ignore
* from the total_flags field.
*/
*total_flags &=
FIF_ALLMULTI |
FIF_FCSFAIL |
FIF_PLCPFAIL |
FIF_CONTROL |
FIF_PSPOLL |
FIF_OTHER_BSS;
/*
* Apply some rules to the filters:
* - Some filters imply different filters to be set.
* - Some things we can't filter out at all.
* - Multicast filter seems to kill broadcast traffic so never use it.
*/
*total_flags |= FIF_ALLMULTI;
/*
* If the device has a single filter for all control frames,
* FIF_CONTROL and FIF_PSPOLL flags imply each other.
* And if the device has more than one filter for control frames
* of different types, but has no a separate filter for PS Poll frames,
* FIF_CONTROL flag implies FIF_PSPOLL.
*/
if (!rt2x00_has_cap_control_filters(rt2x00dev)) {
if (*total_flags & FIF_CONTROL || *total_flags & FIF_PSPOLL)
*total_flags |= FIF_CONTROL | FIF_PSPOLL;
}
if (!rt2x00_has_cap_control_filter_pspoll(rt2x00dev)) {
if (*total_flags & FIF_CONTROL)
*total_flags |= FIF_PSPOLL;
}
rt2x00dev->packet_filter = *total_flags;
rt2x00dev->ops->lib->config_filter(rt2x00dev, *total_flags);
}
EXPORT_SYMBOL_GPL(rt2x00mac_configure_filter);
static void rt2x00mac_set_tim_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
struct rt2x00_intf *intf = vif_to_intf(vif);
if (vif->type != NL80211_IFTYPE_AP &&
vif->type != NL80211_IFTYPE_ADHOC &&
vif->type != NL80211_IFTYPE_MESH_POINT &&
vif->type != NL80211_IFTYPE_WDS)
return;
set_bit(DELAYED_UPDATE_BEACON, &intf->delayed_flags);
}
int rt2x00mac_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
bool set)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
return 0;
ieee80211_iterate_active_interfaces_atomic(
rt2x00dev->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
rt2x00mac_set_tim_iter, rt2x00dev);
/* queue work to upodate the beacon template */
ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->intf_work);
return 0;
}
EXPORT_SYMBOL_GPL(rt2x00mac_set_tim);
#ifdef CONFIG_RT2X00_LIB_CRYPTO
static void memcpy_tkip(struct rt2x00lib_crypto *crypto, u8 *key, u8 key_len)
{
if (key_len > NL80211_TKIP_DATA_OFFSET_ENCR_KEY)
memcpy(crypto->key,
&key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY],
sizeof(crypto->key));
if (key_len > NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY)
memcpy(crypto->tx_mic,
&key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
sizeof(crypto->tx_mic));
if (key_len > NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY)
memcpy(crypto->rx_mic,
&key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
sizeof(crypto->rx_mic));
}
int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
int (*set_key) (struct rt2x00_dev *rt2x00dev,
struct rt2x00lib_crypto *crypto,
struct ieee80211_key_conf *key);
struct rt2x00lib_crypto crypto;
static const u8 bcast_addr[ETH_ALEN] =
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, };
struct rt2x00_sta *sta_priv = NULL;
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
return 0;
if (!rt2x00_has_cap_hw_crypto(rt2x00dev))
return -EOPNOTSUPP;
/*
* To support IBSS RSN, don't program group keys in IBSS, the
* hardware will then not attempt to decrypt the frames.
*/
if (vif->type == NL80211_IFTYPE_ADHOC &&
!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
return -EOPNOTSUPP;
if (key->keylen > 32)
return -ENOSPC;
memset(&crypto, 0, sizeof(crypto));
crypto.bssidx = rt2x00lib_get_bssidx(rt2x00dev, vif);
crypto.cipher = rt2x00crypto_key_to_cipher(key);
if (crypto.cipher == CIPHER_NONE)
return -EOPNOTSUPP;
if (crypto.cipher == CIPHER_TKIP && rt2x00_is_usb(rt2x00dev))
return -EOPNOTSUPP;
crypto.cmd = cmd;
if (sta) {
crypto.address = sta->addr;
sta_priv = sta_to_rt2x00_sta(sta);
crypto.wcid = sta_priv->wcid;
} else
crypto.address = bcast_addr;
if (crypto.cipher == CIPHER_TKIP)
memcpy_tkip(&crypto, &key->key[0], key->keylen);
else
memcpy(crypto.key, &key->key[0], key->keylen);
/*
* Each BSS has a maximum of 4 shared keys.
* Shared key index values:
* 0) BSS0 key0
* 1) BSS0 key1
* ...
* 4) BSS1 key0
* ...
* 8) BSS2 key0
* ...
* Both pairwise as shared key indeces are determined by
* driver. This is required because the hardware requires
* keys to be assigned in correct order (When key 1 is
* provided but key 0 is not, then the key is not found
* by the hardware during RX).
*/
if (cmd == SET_KEY)
key->hw_key_idx = 0;
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
set_key = rt2x00dev->ops->lib->config_pairwise_key;
else
set_key = rt2x00dev->ops->lib->config_shared_key;
if (!set_key)
return -EOPNOTSUPP;
return set_key(rt2x00dev, &crypto, key);
}
EXPORT_SYMBOL_GPL(rt2x00mac_set_key);
#endif /* CONFIG_RT2X00_LIB_CRYPTO */
int rt2x00mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
return rt2x00dev->ops->lib->sta_add(rt2x00dev, vif, sta);
}
EXPORT_SYMBOL_GPL(rt2x00mac_sta_add);
int rt2x00mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
struct rt2x00_sta *sta_priv = sta_to_rt2x00_sta(sta);
return rt2x00dev->ops->lib->sta_remove(rt2x00dev, sta_priv->wcid);
}
EXPORT_SYMBOL_GPL(rt2x00mac_sta_remove);
void rt2x00mac_sw_scan_start(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
const u8 *mac_addr)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
set_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags);
rt2x00link_stop_tuner(rt2x00dev);
}
EXPORT_SYMBOL_GPL(rt2x00mac_sw_scan_start);
void rt2x00mac_sw_scan_complete(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
clear_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags);
rt2x00link_start_tuner(rt2x00dev);
}
EXPORT_SYMBOL_GPL(rt2x00mac_sw_scan_complete);
int rt2x00mac_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
/*
* The dot11ACKFailureCount, dot11RTSFailureCount and
* dot11RTSSuccessCount are updated in interrupt time.
* dot11FCSErrorCount is updated in the link tuner.
*/
memcpy(stats, &rt2x00dev->low_level_stats, sizeof(*stats));
return 0;
}
EXPORT_SYMBOL_GPL(rt2x00mac_get_stats);
void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
u32 changes)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
struct rt2x00_intf *intf = vif_to_intf(vif);
/*
* mac80211 might be calling this function while we are trying
* to remove the device or perhaps suspending it.
*/
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
return;
/*
* Update the BSSID.
*/
if (changes & BSS_CHANGED_BSSID)
rt2x00lib_config_intf(rt2x00dev, intf, vif->type, NULL,
bss_conf->bssid);
/*
* Start/stop beaconing.
*/
if (changes & BSS_CHANGED_BEACON_ENABLED) {
mutex_lock(&intf->beacon_skb_mutex);
if (!bss_conf->enable_beacon && intf->enable_beacon) {
rt2x00dev->intf_beaconing--;
intf->enable_beacon = false;
if (rt2x00dev->intf_beaconing == 0) {
/*
* Last beaconing interface disabled
* -> stop beacon queue.
*/
rt2x00queue_stop_queue(rt2x00dev->bcn);
}
/*
* Clear beacon in the H/W for this vif. This is needed
* to disable beaconing on this particular interface
* and keep it running on other interfaces.
*/
rt2x00queue_clear_beacon(rt2x00dev, vif);
} else if (bss_conf->enable_beacon && !intf->enable_beacon) {
rt2x00dev->intf_beaconing++;
intf->enable_beacon = true;
/*
* Upload beacon to the H/W. This is only required on
* USB devices. PCI devices fetch beacons periodically.
*/
if (rt2x00_is_usb(rt2x00dev))
rt2x00queue_update_beacon(rt2x00dev, vif);
if (rt2x00dev->intf_beaconing == 1) {
/*
* First beaconing interface enabled
* -> start beacon queue.
*/
rt2x00queue_start_queue(rt2x00dev->bcn);
}
}
mutex_unlock(&intf->beacon_skb_mutex);
}
/*
* When the association status has changed we must reset the link
* tuner counter. This is because some drivers determine if they
* should perform link tuning based on the number of seconds
* while associated or not associated.
*/
if (changes & BSS_CHANGED_ASSOC) {
rt2x00dev->link.count = 0;
if (bss_conf->assoc)
rt2x00dev->intf_associated++;
else
rt2x00dev->intf_associated--;
rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated);
clear_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags);
}
/*
* Check for access point which do not support 802.11e . We have to
* generate data frames sequence number in S/W for such AP, because
* of H/W bug.
*/
if (changes & BSS_CHANGED_QOS && !bss_conf->qos)
set_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags);
/*
* When the erp information has changed, we should perform
* additional configuration steps. For all other changes we are done.
*/
if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_ERP_PREAMBLE |
BSS_CHANGED_ERP_SLOT | BSS_CHANGED_BASIC_RATES |
BSS_CHANGED_BEACON_INT | BSS_CHANGED_HT))
rt2x00lib_config_erp(rt2x00dev, intf, bss_conf, changes);
}
EXPORT_SYMBOL_GPL(rt2x00mac_bss_info_changed);
int rt2x00mac_conf_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, u16 queue_idx,
const struct ieee80211_tx_queue_params *params)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
struct data_queue *queue;
queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx);
if (unlikely(!queue))
return -EINVAL;
/*
* The passed variables are stored as real value ((2^n)-1).
* Ralink registers require to know the bit number 'n'.
*/
if (params->cw_min > 0)
queue->cw_min = fls(params->cw_min);
else
queue->cw_min = 5; /* cw_min: 2^5 = 32. */
if (params->cw_max > 0)
queue->cw_max = fls(params->cw_max);
else
queue->cw_max = 10; /* cw_min: 2^10 = 1024. */
queue->aifs = params->aifs;
queue->txop = params->txop;
rt2x00_dbg(rt2x00dev,
"Configured TX queue %d - CWmin: %d, CWmax: %d, Aifs: %d, TXop: %d\n",
queue_idx, queue->cw_min, queue->cw_max, queue->aifs,
queue->txop);
return 0;
}
EXPORT_SYMBOL_GPL(rt2x00mac_conf_tx);
void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
bool active = !!rt2x00dev->ops->lib->rfkill_poll(rt2x00dev);
wiphy_rfkill_set_hw_state(hw->wiphy, !active);
}
EXPORT_SYMBOL_GPL(rt2x00mac_rfkill_poll);
void rt2x00mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
struct data_queue *queue;
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
return;
tx_queue_for_each(rt2x00dev, queue)
rt2x00queue_flush_queue(queue, drop);
}
EXPORT_SYMBOL_GPL(rt2x00mac_flush);
int rt2x00mac_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
struct link_ant *ant = &rt2x00dev->link.ant;
struct antenna_setup *def = &rt2x00dev->default_ant;
struct antenna_setup setup;
// The antenna value is not supposed to be 0,
// or exceed the maximum number of antenna's.
if (!tx_ant || (tx_ant & ~3) || !rx_ant || (rx_ant & ~3))
return -EINVAL;
// When the client tried to configure the antenna to or from
// diversity mode, we must reset the default antenna as well
// as that controls the diversity switch.
if (ant->flags & ANTENNA_TX_DIVERSITY && tx_ant != 3)
ant->flags &= ~ANTENNA_TX_DIVERSITY;
if (ant->flags & ANTENNA_RX_DIVERSITY && rx_ant != 3)
ant->flags &= ~ANTENNA_RX_DIVERSITY;
// If diversity is being enabled, check if we need hardware
// or software diversity. In the latter case, reset the value,
// and make sure we update the antenna flags to have the
// link tuner pick up the diversity tuning.
if (tx_ant == 3 && def->tx == ANTENNA_SW_DIVERSITY) {
tx_ant = ANTENNA_SW_DIVERSITY;
ant->flags |= ANTENNA_TX_DIVERSITY;
}
if (rx_ant == 3 && def->rx == ANTENNA_SW_DIVERSITY) {
rx_ant = ANTENNA_SW_DIVERSITY;
ant->flags |= ANTENNA_RX_DIVERSITY;
}
setup.tx = tx_ant;
setup.rx = rx_ant;
setup.rx_chain_num = 0;
setup.tx_chain_num = 0;
rt2x00lib_config_antenna(rt2x00dev, setup);
return 0;
}
EXPORT_SYMBOL_GPL(rt2x00mac_set_antenna);
int rt2x00mac_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
struct link_ant *ant = &rt2x00dev->link.ant;
struct antenna_setup *active = &rt2x00dev->link.ant.active;
// When software diversity is active, we must report this to the
// client and not the current active antenna state.
if (ant->flags & ANTENNA_TX_DIVERSITY)
*tx_ant = ANTENNA_HW_DIVERSITY;
else
*tx_ant = active->tx;
if (ant->flags & ANTENNA_RX_DIVERSITY)
*rx_ant = ANTENNA_HW_DIVERSITY;
else
*rx_ant = active->rx;
return 0;
}
EXPORT_SYMBOL_GPL(rt2x00mac_get_antenna);
void rt2x00mac_get_ringparam(struct ieee80211_hw *hw,
u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
struct data_queue *queue;
tx_queue_for_each(rt2x00dev, queue) {
*tx += queue->length;
*tx_max += queue->limit;
}
*rx = rt2x00dev->rx->length;
*rx_max = rt2x00dev->rx->limit;
}
EXPORT_SYMBOL_GPL(rt2x00mac_get_ringparam);
bool rt2x00mac_tx_frames_pending(struct ieee80211_hw *hw)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
struct data_queue *queue;
tx_queue_for_each(rt2x00dev, queue) {
if (!rt2x00queue_empty(queue))
return true;
}
return false;
}
EXPORT_SYMBOL_GPL(rt2x00mac_tx_frames_pending);
| gpl-2.0 |
denghl/linux3.x | net/batman-adv/gateway_client.c | 264 | 24549 | /* Copyright (C) 2009-2014 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "main.h"
#include "sysfs.h"
#include "gateway_client.h"
#include "gateway_common.h"
#include "hard-interface.h"
#include "originator.h"
#include "translation-table.h"
#include "routing.h"
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/udp.h>
#include <linux/if_vlan.h>
/* These are the offsets of the "hw type" and "hw address length" in the dhcp
* packet starting at the beginning of the dhcp header
*/
#define BATADV_DHCP_HTYPE_OFFSET 1
#define BATADV_DHCP_HLEN_OFFSET 2
/* Value of htype representing Ethernet */
#define BATADV_DHCP_HTYPE_ETHERNET 0x01
/* This is the offset of the "chaddr" field in the dhcp packet starting at the
* beginning of the dhcp header
*/
#define BATADV_DHCP_CHADDR_OFFSET 28
static void batadv_gw_node_free_ref(struct batadv_gw_node *gw_node)
{
if (atomic_dec_and_test(&gw_node->refcount)) {
batadv_orig_node_free_ref(gw_node->orig_node);
kfree_rcu(gw_node, rcu);
}
}
static struct batadv_gw_node *
batadv_gw_get_selected_gw_node(struct batadv_priv *bat_priv)
{
struct batadv_gw_node *gw_node;
rcu_read_lock();
gw_node = rcu_dereference(bat_priv->gw.curr_gw);
if (!gw_node)
goto out;
if (!atomic_inc_not_zero(&gw_node->refcount))
gw_node = NULL;
out:
rcu_read_unlock();
return gw_node;
}
struct batadv_orig_node *
batadv_gw_get_selected_orig(struct batadv_priv *bat_priv)
{
struct batadv_gw_node *gw_node;
struct batadv_orig_node *orig_node = NULL;
gw_node = batadv_gw_get_selected_gw_node(bat_priv);
if (!gw_node)
goto out;
rcu_read_lock();
orig_node = gw_node->orig_node;
if (!orig_node)
goto unlock;
if (!atomic_inc_not_zero(&orig_node->refcount))
orig_node = NULL;
unlock:
rcu_read_unlock();
out:
if (gw_node)
batadv_gw_node_free_ref(gw_node);
return orig_node;
}
static void batadv_gw_select(struct batadv_priv *bat_priv,
struct batadv_gw_node *new_gw_node)
{
struct batadv_gw_node *curr_gw_node;
spin_lock_bh(&bat_priv->gw.list_lock);
if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount))
new_gw_node = NULL;
curr_gw_node = rcu_dereference_protected(bat_priv->gw.curr_gw, 1);
rcu_assign_pointer(bat_priv->gw.curr_gw, new_gw_node);
if (curr_gw_node)
batadv_gw_node_free_ref(curr_gw_node);
spin_unlock_bh(&bat_priv->gw.list_lock);
}
/**
* batadv_gw_reselect - force a gateway reselection
* @bat_priv: the bat priv with all the soft interface information
*
* Set a flag to remind the GW component to perform a new gateway reselection.
* However this function does not ensure that the current gateway is going to be
* deselected. The reselection mechanism may elect the same gateway once again.
*
* This means that invoking batadv_gw_reselect() does not guarantee a gateway
* change and therefore a uevent is not necessarily expected.
*/
void batadv_gw_reselect(struct batadv_priv *bat_priv)
{
atomic_set(&bat_priv->gw.reselect, 1);
}
static struct batadv_gw_node *
batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
{
struct batadv_neigh_node *router;
struct batadv_neigh_ifinfo *router_ifinfo;
struct batadv_gw_node *gw_node, *curr_gw = NULL;
uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
uint32_t gw_divisor;
uint8_t max_tq = 0;
uint8_t tq_avg;
struct batadv_orig_node *orig_node;
gw_divisor = BATADV_TQ_LOCAL_WINDOW_SIZE * BATADV_TQ_LOCAL_WINDOW_SIZE;
gw_divisor *= 64;
rcu_read_lock();
hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
if (gw_node->deleted)
continue;
orig_node = gw_node->orig_node;
router = batadv_orig_router_get(orig_node, BATADV_IF_DEFAULT);
if (!router)
continue;
router_ifinfo = batadv_neigh_ifinfo_get(router,
BATADV_IF_DEFAULT);
if (!router_ifinfo)
goto next;
if (!atomic_inc_not_zero(&gw_node->refcount))
goto next;
tq_avg = router_ifinfo->bat_iv.tq_avg;
switch (atomic_read(&bat_priv->gw_sel_class)) {
case 1: /* fast connection */
tmp_gw_factor = tq_avg * tq_avg;
tmp_gw_factor *= gw_node->bandwidth_down;
tmp_gw_factor *= 100 * 100;
tmp_gw_factor /= gw_divisor;
if ((tmp_gw_factor > max_gw_factor) ||
((tmp_gw_factor == max_gw_factor) &&
(tq_avg > max_tq))) {
if (curr_gw)
batadv_gw_node_free_ref(curr_gw);
curr_gw = gw_node;
atomic_inc(&curr_gw->refcount);
}
break;
default: /* 2: stable connection (use best statistic)
* 3: fast-switch (use best statistic but change as
* soon as a better gateway appears)
* XX: late-switch (use best statistic but change as
* soon as a better gateway appears which has
* $routing_class more tq points)
*/
if (tq_avg > max_tq) {
if (curr_gw)
batadv_gw_node_free_ref(curr_gw);
curr_gw = gw_node;
atomic_inc(&curr_gw->refcount);
}
break;
}
if (tq_avg > max_tq)
max_tq = tq_avg;
if (tmp_gw_factor > max_gw_factor)
max_gw_factor = tmp_gw_factor;
batadv_gw_node_free_ref(gw_node);
next:
batadv_neigh_node_free_ref(router);
if (router_ifinfo)
batadv_neigh_ifinfo_free_ref(router_ifinfo);
}
rcu_read_unlock();
return curr_gw;
}
/**
* batadv_gw_check_client_stop - check if client mode has been switched off
* @bat_priv: the bat priv with all the soft interface information
*
* This function assumes the caller has checked that the gw state *is actually
* changing*. This function is not supposed to be called when there is no state
* change.
*/
void batadv_gw_check_client_stop(struct batadv_priv *bat_priv)
{
struct batadv_gw_node *curr_gw;
if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_CLIENT)
return;
curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
if (!curr_gw)
return;
/* deselect the current gateway so that next time that client mode is
* enabled a proper GW_ADD event can be sent
*/
batadv_gw_select(bat_priv, NULL);
/* if batman-adv is switching the gw client mode off and a gateway was
* already selected, send a DEL uevent
*/
batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_DEL, NULL);
batadv_gw_node_free_ref(curr_gw);
}
void batadv_gw_election(struct batadv_priv *bat_priv)
{
struct batadv_gw_node *curr_gw = NULL, *next_gw = NULL;
struct batadv_neigh_node *router = NULL;
struct batadv_neigh_ifinfo *router_ifinfo = NULL;
char gw_addr[18] = { '\0' };
if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_CLIENT)
goto out;
curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
if (!batadv_atomic_dec_not_zero(&bat_priv->gw.reselect) && curr_gw)
goto out;
next_gw = batadv_gw_get_best_gw_node(bat_priv);
if (curr_gw == next_gw)
goto out;
if (next_gw) {
sprintf(gw_addr, "%pM", next_gw->orig_node->orig);
router = batadv_orig_router_get(next_gw->orig_node,
BATADV_IF_DEFAULT);
if (!router) {
batadv_gw_reselect(bat_priv);
goto out;
}
router_ifinfo = batadv_neigh_ifinfo_get(router,
BATADV_IF_DEFAULT);
if (!router_ifinfo) {
batadv_gw_reselect(bat_priv);
goto out;
}
}
if ((curr_gw) && (!next_gw)) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Removing selected gateway - no gateway in range\n");
batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_DEL,
NULL);
} else if ((!curr_gw) && (next_gw)) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Adding route to gateway %pM (bandwidth: %u.%u/%u.%u MBit, tq: %i)\n",
next_gw->orig_node->orig,
next_gw->bandwidth_down / 10,
next_gw->bandwidth_down % 10,
next_gw->bandwidth_up / 10,
next_gw->bandwidth_up % 10,
router_ifinfo->bat_iv.tq_avg);
batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_ADD,
gw_addr);
} else {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Changing route to gateway %pM (bandwidth: %u.%u/%u.%u MBit, tq: %i)\n",
next_gw->orig_node->orig,
next_gw->bandwidth_down / 10,
next_gw->bandwidth_down % 10,
next_gw->bandwidth_up / 10,
next_gw->bandwidth_up % 10,
router_ifinfo->bat_iv.tq_avg);
batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_CHANGE,
gw_addr);
}
batadv_gw_select(bat_priv, next_gw);
out:
if (curr_gw)
batadv_gw_node_free_ref(curr_gw);
if (next_gw)
batadv_gw_node_free_ref(next_gw);
if (router)
batadv_neigh_node_free_ref(router);
if (router_ifinfo)
batadv_neigh_ifinfo_free_ref(router_ifinfo);
}
void batadv_gw_check_election(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node)
{
struct batadv_neigh_ifinfo *router_orig_tq = NULL;
struct batadv_neigh_ifinfo *router_gw_tq = NULL;
struct batadv_orig_node *curr_gw_orig;
struct batadv_neigh_node *router_gw = NULL, *router_orig = NULL;
uint8_t gw_tq_avg, orig_tq_avg;
curr_gw_orig = batadv_gw_get_selected_orig(bat_priv);
if (!curr_gw_orig)
goto reselect;
router_gw = batadv_orig_router_get(curr_gw_orig, BATADV_IF_DEFAULT);
if (!router_gw)
goto reselect;
router_gw_tq = batadv_neigh_ifinfo_get(router_gw,
BATADV_IF_DEFAULT);
if (!router_gw_tq)
goto reselect;
/* this node already is the gateway */
if (curr_gw_orig == orig_node)
goto out;
router_orig = batadv_orig_router_get(orig_node, BATADV_IF_DEFAULT);
if (!router_orig)
goto out;
router_orig_tq = batadv_neigh_ifinfo_get(router_orig,
BATADV_IF_DEFAULT);
if (!router_orig_tq)
goto out;
gw_tq_avg = router_gw_tq->bat_iv.tq_avg;
orig_tq_avg = router_orig_tq->bat_iv.tq_avg;
/* the TQ value has to be better */
if (orig_tq_avg < gw_tq_avg)
goto out;
/* if the routing class is greater than 3 the value tells us how much
* greater the TQ value of the new gateway must be
*/
if ((atomic_read(&bat_priv->gw_sel_class) > 3) &&
(orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw_sel_class)))
goto out;
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Restarting gateway selection: better gateway found (tq curr: %i, tq new: %i)\n",
gw_tq_avg, orig_tq_avg);
reselect:
batadv_gw_reselect(bat_priv);
out:
if (curr_gw_orig)
batadv_orig_node_free_ref(curr_gw_orig);
if (router_gw)
batadv_neigh_node_free_ref(router_gw);
if (router_orig)
batadv_neigh_node_free_ref(router_orig);
if (router_gw_tq)
batadv_neigh_ifinfo_free_ref(router_gw_tq);
if (router_orig_tq)
batadv_neigh_ifinfo_free_ref(router_orig_tq);
}
/**
* batadv_gw_node_add - add gateway node to list of available gateways
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: originator announcing gateway capabilities
* @gateway: announced bandwidth information
*/
static void batadv_gw_node_add(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
struct batadv_tvlv_gateway_data *gateway)
{
struct batadv_gw_node *gw_node;
if (gateway->bandwidth_down == 0)
return;
if (!atomic_inc_not_zero(&orig_node->refcount))
return;
gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC);
if (!gw_node) {
batadv_orig_node_free_ref(orig_node);
return;
}
INIT_HLIST_NODE(&gw_node->list);
gw_node->orig_node = orig_node;
atomic_set(&gw_node->refcount, 1);
spin_lock_bh(&bat_priv->gw.list_lock);
hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.list);
spin_unlock_bh(&bat_priv->gw.list_lock);
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Found new gateway %pM -> gw bandwidth: %u.%u/%u.%u MBit\n",
orig_node->orig,
ntohl(gateway->bandwidth_down) / 10,
ntohl(gateway->bandwidth_down) % 10,
ntohl(gateway->bandwidth_up) / 10,
ntohl(gateway->bandwidth_up) % 10);
}
/**
* batadv_gw_node_get - retrieve gateway node from list of available gateways
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: originator announcing gateway capabilities
*
* Returns gateway node if found or NULL otherwise.
*/
static struct batadv_gw_node *
batadv_gw_node_get(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node)
{
struct batadv_gw_node *gw_node_tmp, *gw_node = NULL;
rcu_read_lock();
hlist_for_each_entry_rcu(gw_node_tmp, &bat_priv->gw.list, list) {
if (gw_node_tmp->orig_node != orig_node)
continue;
if (gw_node_tmp->deleted)
continue;
if (!atomic_inc_not_zero(&gw_node_tmp->refcount))
continue;
gw_node = gw_node_tmp;
break;
}
rcu_read_unlock();
return gw_node;
}
/**
* batadv_gw_node_update - update list of available gateways with changed
* bandwidth information
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: originator announcing gateway capabilities
* @gateway: announced bandwidth information
*/
void batadv_gw_node_update(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
struct batadv_tvlv_gateway_data *gateway)
{
struct batadv_gw_node *gw_node, *curr_gw = NULL;
gw_node = batadv_gw_node_get(bat_priv, orig_node);
if (!gw_node) {
batadv_gw_node_add(bat_priv, orig_node, gateway);
goto out;
}
if ((gw_node->bandwidth_down == ntohl(gateway->bandwidth_down)) &&
(gw_node->bandwidth_up == ntohl(gateway->bandwidth_up)))
goto out;
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Gateway bandwidth of originator %pM changed from %u.%u/%u.%u MBit to %u.%u/%u.%u MBit\n",
orig_node->orig,
gw_node->bandwidth_down / 10,
gw_node->bandwidth_down % 10,
gw_node->bandwidth_up / 10,
gw_node->bandwidth_up % 10,
ntohl(gateway->bandwidth_down) / 10,
ntohl(gateway->bandwidth_down) % 10,
ntohl(gateway->bandwidth_up) / 10,
ntohl(gateway->bandwidth_up) % 10);
gw_node->bandwidth_down = ntohl(gateway->bandwidth_down);
gw_node->bandwidth_up = ntohl(gateway->bandwidth_up);
gw_node->deleted = 0;
if (ntohl(gateway->bandwidth_down) == 0) {
gw_node->deleted = jiffies;
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Gateway %pM removed from gateway list\n",
orig_node->orig);
/* Note: We don't need a NULL check here, since curr_gw never
* gets dereferenced.
*/
curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
if (gw_node == curr_gw)
batadv_gw_reselect(bat_priv);
}
out:
if (curr_gw)
batadv_gw_node_free_ref(curr_gw);
if (gw_node)
batadv_gw_node_free_ref(gw_node);
}
void batadv_gw_node_delete(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node)
{
struct batadv_tvlv_gateway_data gateway;
gateway.bandwidth_down = 0;
gateway.bandwidth_up = 0;
batadv_gw_node_update(bat_priv, orig_node, &gateway);
}
void batadv_gw_node_purge(struct batadv_priv *bat_priv)
{
struct batadv_gw_node *gw_node, *curr_gw;
struct hlist_node *node_tmp;
unsigned long timeout = msecs_to_jiffies(2 * BATADV_PURGE_TIMEOUT);
int do_reselect = 0;
curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
spin_lock_bh(&bat_priv->gw.list_lock);
hlist_for_each_entry_safe(gw_node, node_tmp,
&bat_priv->gw.list, list) {
if (((!gw_node->deleted) ||
(time_before(jiffies, gw_node->deleted + timeout))) &&
atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE)
continue;
if (curr_gw == gw_node)
do_reselect = 1;
hlist_del_rcu(&gw_node->list);
batadv_gw_node_free_ref(gw_node);
}
spin_unlock_bh(&bat_priv->gw.list_lock);
/* gw_reselect() needs to acquire the gw_list_lock */
if (do_reselect)
batadv_gw_reselect(bat_priv);
if (curr_gw)
batadv_gw_node_free_ref(curr_gw);
}
/* fails if orig_node has no router */
static int batadv_write_buffer_text(struct batadv_priv *bat_priv,
struct seq_file *seq,
const struct batadv_gw_node *gw_node)
{
struct batadv_gw_node *curr_gw;
struct batadv_neigh_node *router;
struct batadv_neigh_ifinfo *router_ifinfo = NULL;
int ret = -1;
router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT);
if (!router)
goto out;
router_ifinfo = batadv_neigh_ifinfo_get(router, BATADV_IF_DEFAULT);
if (!router_ifinfo)
goto out;
curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %u.%u/%u.%u MBit\n",
(curr_gw == gw_node ? "=>" : " "),
gw_node->orig_node->orig,
router_ifinfo->bat_iv.tq_avg, router->addr,
router->if_incoming->net_dev->name,
gw_node->bandwidth_down / 10,
gw_node->bandwidth_down % 10,
gw_node->bandwidth_up / 10,
gw_node->bandwidth_up % 10);
if (curr_gw)
batadv_gw_node_free_ref(curr_gw);
out:
if (router_ifinfo)
batadv_neigh_ifinfo_free_ref(router_ifinfo);
if (router)
batadv_neigh_node_free_ref(router);
return ret;
}
int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
{
struct net_device *net_dev = (struct net_device *)seq->private;
struct batadv_priv *bat_priv = netdev_priv(net_dev);
struct batadv_hard_iface *primary_if;
struct batadv_gw_node *gw_node;
int gw_count = 0;
primary_if = batadv_seq_print_text_primary_if_get(seq);
if (!primary_if)
goto out;
seq_printf(seq,
" %-12s (%s/%i) %17s [%10s]: advertised uplink bandwidth ... [B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
"Gateway", "#", BATADV_TQ_MAX_VALUE, "Nexthop", "outgoingIF",
BATADV_SOURCE_VERSION, primary_if->net_dev->name,
primary_if->net_dev->dev_addr, net_dev->name);
rcu_read_lock();
hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
if (gw_node->deleted)
continue;
/* fails if orig_node has no router */
if (batadv_write_buffer_text(bat_priv, seq, gw_node) < 0)
continue;
gw_count++;
}
rcu_read_unlock();
if (gw_count == 0)
seq_puts(seq, "No gateways in range ...\n");
out:
if (primary_if)
batadv_hardif_free_ref(primary_if);
return 0;
}
/**
* batadv_gw_dhcp_recipient_get - check if a packet is a DHCP message
* @skb: the packet to check
* @header_len: a pointer to the batman-adv header size
* @chaddr: buffer where the client address will be stored. Valid
* only if the function returns BATADV_DHCP_TO_CLIENT
*
* Returns:
* - BATADV_DHCP_NO if the packet is not a dhcp message or if there was an error
* while parsing it
* - BATADV_DHCP_TO_SERVER if this is a message going to the DHCP server
* - BATADV_DHCP_TO_CLIENT if this is a message going to a DHCP client
*
* This function may re-allocate the data buffer of the skb passed as argument.
*/
enum batadv_dhcp_recipient
batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
uint8_t *chaddr)
{
enum batadv_dhcp_recipient ret = BATADV_DHCP_NO;
struct ethhdr *ethhdr;
struct iphdr *iphdr;
struct ipv6hdr *ipv6hdr;
struct udphdr *udphdr;
struct vlan_ethhdr *vhdr;
int chaddr_offset;
__be16 proto;
uint8_t *p;
/* check for ethernet header */
if (!pskb_may_pull(skb, *header_len + ETH_HLEN))
return BATADV_DHCP_NO;
ethhdr = eth_hdr(skb);
proto = ethhdr->h_proto;
*header_len += ETH_HLEN;
/* check for initial vlan header */
if (proto == htons(ETH_P_8021Q)) {
if (!pskb_may_pull(skb, *header_len + VLAN_HLEN))
return BATADV_DHCP_NO;
vhdr = vlan_eth_hdr(skb);
proto = vhdr->h_vlan_encapsulated_proto;
*header_len += VLAN_HLEN;
}
/* check for ip header */
switch (proto) {
case htons(ETH_P_IP):
if (!pskb_may_pull(skb, *header_len + sizeof(*iphdr)))
return BATADV_DHCP_NO;
iphdr = (struct iphdr *)(skb->data + *header_len);
*header_len += iphdr->ihl * 4;
/* check for udp header */
if (iphdr->protocol != IPPROTO_UDP)
return BATADV_DHCP_NO;
break;
case htons(ETH_P_IPV6):
if (!pskb_may_pull(skb, *header_len + sizeof(*ipv6hdr)))
return BATADV_DHCP_NO;
ipv6hdr = (struct ipv6hdr *)(skb->data + *header_len);
*header_len += sizeof(*ipv6hdr);
/* check for udp header */
if (ipv6hdr->nexthdr != IPPROTO_UDP)
return BATADV_DHCP_NO;
break;
default:
return BATADV_DHCP_NO;
}
if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr)))
return BATADV_DHCP_NO;
/* skb->data might have been reallocated by pskb_may_pull() */
ethhdr = eth_hdr(skb);
if (ntohs(ethhdr->h_proto) == ETH_P_8021Q)
ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
udphdr = (struct udphdr *)(skb->data + *header_len);
*header_len += sizeof(*udphdr);
/* check for bootp port */
switch (proto) {
case htons(ETH_P_IP):
if (udphdr->dest == htons(67))
ret = BATADV_DHCP_TO_SERVER;
else if (udphdr->source == htons(67))
ret = BATADV_DHCP_TO_CLIENT;
break;
case htons(ETH_P_IPV6):
if (udphdr->dest == htons(547))
ret = BATADV_DHCP_TO_SERVER;
else if (udphdr->source == htons(547))
ret = BATADV_DHCP_TO_CLIENT;
break;
}
chaddr_offset = *header_len + BATADV_DHCP_CHADDR_OFFSET;
/* store the client address if the message is going to a client */
if (ret == BATADV_DHCP_TO_CLIENT &&
pskb_may_pull(skb, chaddr_offset + ETH_ALEN)) {
/* check if the DHCP packet carries an Ethernet DHCP */
p = skb->data + *header_len + BATADV_DHCP_HTYPE_OFFSET;
if (*p != BATADV_DHCP_HTYPE_ETHERNET)
return BATADV_DHCP_NO;
/* check if the DHCP packet carries a valid Ethernet address */
p = skb->data + *header_len + BATADV_DHCP_HLEN_OFFSET;
if (*p != ETH_ALEN)
return BATADV_DHCP_NO;
ether_addr_copy(chaddr, skb->data + chaddr_offset);
}
return ret;
}
/**
* batadv_gw_out_of_range - check if the dhcp request destination is the best gw
* @bat_priv: the bat priv with all the soft interface information
* @skb: the outgoing packet
*
* Check if the skb is a DHCP request and if it is sent to the current best GW
* server. Due to topology changes it may be the case that the GW server
* previously selected is not the best one anymore.
*
* Returns true if the packet destination is unicast and it is not the best gw,
* false otherwise.
*
* This call might reallocate skb data.
* Must be invoked only when the DHCP packet is going TO a DHCP SERVER.
*/
bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
struct sk_buff *skb)
{
struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL;
struct batadv_orig_node *orig_dst_node = NULL;
struct batadv_gw_node *gw_node = NULL, *curr_gw = NULL;
struct batadv_neigh_ifinfo *curr_ifinfo, *old_ifinfo;
struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
bool out_of_range = false;
uint8_t curr_tq_avg;
unsigned short vid;
vid = batadv_get_vid(skb, 0);
orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
ethhdr->h_dest, vid);
if (!orig_dst_node)
goto out;
gw_node = batadv_gw_node_get(bat_priv, orig_dst_node);
if (!gw_node->bandwidth_down == 0)
goto out;
switch (atomic_read(&bat_priv->gw_mode)) {
case BATADV_GW_MODE_SERVER:
/* If we are a GW then we are our best GW. We can artificially
* set the tq towards ourself as the maximum value
*/
curr_tq_avg = BATADV_TQ_MAX_VALUE;
break;
case BATADV_GW_MODE_CLIENT:
curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
if (!curr_gw)
goto out;
/* packet is going to our gateway */
if (curr_gw->orig_node == orig_dst_node)
goto out;
/* If the dhcp packet has been sent to a different gw,
* we have to evaluate whether the old gw is still
* reliable enough
*/
neigh_curr = batadv_find_router(bat_priv, curr_gw->orig_node,
NULL);
if (!neigh_curr)
goto out;
curr_ifinfo = batadv_neigh_ifinfo_get(neigh_curr,
BATADV_IF_DEFAULT);
if (!curr_ifinfo)
goto out;
curr_tq_avg = curr_ifinfo->bat_iv.tq_avg;
batadv_neigh_ifinfo_free_ref(curr_ifinfo);
break;
case BATADV_GW_MODE_OFF:
default:
goto out;
}
neigh_old = batadv_find_router(bat_priv, orig_dst_node, NULL);
if (!neigh_old)
goto out;
old_ifinfo = batadv_neigh_ifinfo_get(neigh_old, BATADV_IF_DEFAULT);
if (!old_ifinfo)
goto out;
if ((curr_tq_avg - old_ifinfo->bat_iv.tq_avg) > BATADV_GW_THRESHOLD)
out_of_range = true;
batadv_neigh_ifinfo_free_ref(old_ifinfo);
out:
if (orig_dst_node)
batadv_orig_node_free_ref(orig_dst_node);
if (curr_gw)
batadv_gw_node_free_ref(curr_gw);
if (gw_node)
batadv_gw_node_free_ref(gw_node);
if (neigh_old)
batadv_neigh_node_free_ref(neigh_old);
if (neigh_curr)
batadv_neigh_node_free_ref(neigh_curr);
return out_of_range;
}
| gpl-2.0 |
leshak/i5700-leshak-kernel | arch/m32r/kernel/sys_m32r.c | 264 | 5270 | /*
* linux/arch/m32r/kernel/sys_m32r.c
*
* This file contains various random system calls that
* have a non-standard calling sequence on the Linux/M32R platform.
*
* Taken from i386 version.
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/stat.h>
#include <linux/syscalls.h>
#include <linux/mman.h>
#include <linux/file.h>
#include <linux/utsname.h>
#include <linux/ipc.h>
#include <asm/uaccess.h>
#include <asm/cachectl.h>
#include <asm/cacheflush.h>
#include <asm/syscall.h>
#include <asm/unistd.h>
/*
* sys_tas() - test-and-set
*/
asmlinkage int sys_tas(int __user *addr)
{
int oldval;
if (!access_ok(VERIFY_WRITE, addr, sizeof (int)))
return -EFAULT;
/* atomic operation:
* oldval = *addr; *addr = 1;
*/
__asm__ __volatile__ (
DCACHE_CLEAR("%0", "r4", "%1")
" .fillinsn\n"
"1:\n"
" lock %0, @%1 -> unlock %2, @%1\n"
"2:\n"
/* NOTE:
* The m32r processor can accept interrupts only
* at the 32-bit instruction boundary.
* So, in the above code, the "unlock" instruction
* can be executed continuously after the "lock"
* instruction execution without any interruptions.
*/
".section .fixup,\"ax\"\n"
" .balign 4\n"
"3: ldi %0, #%3\n"
" seth r14, #high(2b)\n"
" or3 r14, r14, #low(2b)\n"
" jmp r14\n"
".previous\n"
".section __ex_table,\"a\"\n"
" .balign 4\n"
" .long 1b,3b\n"
".previous\n"
: "=&r" (oldval)
: "r" (addr), "r" (1), "i"(-EFAULT)
: "r14", "memory"
#ifdef CONFIG_CHIP_M32700_TS1
, "r4"
#endif /* CONFIG_CHIP_M32700_TS1 */
);
return oldval;
}
asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff)
{
int error = -EBADF;
struct file *file = NULL;
flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
if (!(flags & MAP_ANONYMOUS)) {
file = fget(fd);
if (!file)
goto out;
}
down_write(¤t->mm->mmap_sem);
error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
up_write(¤t->mm->mmap_sem);
if (file)
fput(file);
out:
return error;
}
/*
* sys_ipc() is the de-multiplexer for the SysV IPC calls..
*
* This is really horribly ugly.
*/
asmlinkage int sys_ipc(uint call, int first, int second,
int third, void __user *ptr, long fifth)
{
int version, ret;
version = call >> 16; /* hack for backward compatibility */
call &= 0xffff;
switch (call) {
case SEMOP:
return sys_semtimedop(first, (struct sembuf __user *)ptr,
second, NULL);
case SEMTIMEDOP:
return sys_semtimedop(first, (struct sembuf __user *)ptr,
second, (const struct timespec __user *)fifth);
case SEMGET:
return sys_semget (first, second, third);
case SEMCTL: {
union semun fourth;
if (!ptr)
return -EINVAL;
if (get_user(fourth.__pad, (void __user * __user *) ptr))
return -EFAULT;
return sys_semctl (first, second, third, fourth);
}
case MSGSND:
return sys_msgsnd (first, (struct msgbuf __user *) ptr,
second, third);
case MSGRCV:
switch (version) {
case 0: {
struct ipc_kludge tmp;
if (!ptr)
return -EINVAL;
if (copy_from_user(&tmp,
(struct ipc_kludge __user *) ptr,
sizeof (tmp)))
return -EFAULT;
return sys_msgrcv (first, tmp.msgp, second,
tmp.msgtyp, third);
}
default:
return sys_msgrcv (first,
(struct msgbuf __user *) ptr,
second, fifth, third);
}
case MSGGET:
return sys_msgget ((key_t) first, second);
case MSGCTL:
return sys_msgctl (first, second,
(struct msqid_ds __user *) ptr);
case SHMAT: {
ulong raddr;
if (!access_ok(VERIFY_WRITE, (ulong __user *) third,
sizeof(ulong)))
return -EFAULT;
ret = do_shmat (first, (char __user *) ptr, second, &raddr);
if (ret)
return ret;
return put_user (raddr, (ulong __user *) third);
}
case SHMDT:
return sys_shmdt ((char __user *)ptr);
case SHMGET:
return sys_shmget (first, second, third);
case SHMCTL:
return sys_shmctl (first, second,
(struct shmid_ds __user *) ptr);
default:
return -ENOSYS;
}
}
asmlinkage int sys_uname(struct old_utsname __user * name)
{
int err;
if (!name)
return -EFAULT;
down_read(&uts_sem);
err = copy_to_user(name, utsname(), sizeof (*name));
up_read(&uts_sem);
return err?-EFAULT:0;
}
asmlinkage int sys_cacheflush(void *addr, int bytes, int cache)
{
/* This should flush more selectively ... */
_flush_cache_all();
return 0;
}
asmlinkage int sys_cachectl(char *addr, int nbytes, int op)
{
/* Not implemented yet. */
return -ENOSYS;
}
/*
* Do a system call from kernel instead of calling sys_execve so we
* end up with proper pt_regs.
*/
int kernel_execve(const char *filename, char *const argv[], char *const envp[])
{
register long __scno __asm__ ("r7") = __NR_execve;
register long __arg3 __asm__ ("r2") = (long)(envp);
register long __arg2 __asm__ ("r1") = (long)(argv);
register long __res __asm__ ("r0") = (long)(filename);
__asm__ __volatile__ (
"trap #" SYSCALL_VECTOR "|| nop"
: "=r" (__res)
: "r" (__scno), "0" (__res), "r" (__arg2),
"r" (__arg3)
: "memory");
return __res;
}
| gpl-2.0 |
pranith/linux-next | drivers/video/backlight/tdo24m.c | 520 | 10928 | /*
* tdo24m - SPI-based drivers for Toppoly TDO24M series LCD panels
*
* Copyright (C) 2008 Marvell International Ltd.
* Eric Miao <eric.miao@marvell.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* publishhed by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/spi/spi.h>
#include <linux/spi/tdo24m.h>
#include <linux/fb.h>
#include <linux/lcd.h>
#include <linux/slab.h>
#define POWER_IS_ON(pwr) ((pwr) <= FB_BLANK_NORMAL)
#define TDO24M_SPI_BUFF_SIZE (4)
#define MODE_QVGA 0
#define MODE_VGA 1
struct tdo24m {
struct spi_device *spi_dev;
struct lcd_device *lcd_dev;
struct spi_message msg;
struct spi_transfer xfer;
uint8_t *buf;
int (*adj_mode)(struct tdo24m *lcd, int mode);
int color_invert;
int power;
int mode;
};
/* use bit 30, 31 as the indicator of command parameter number */
#define CMD0(x) ((0 << 30) | (x))
#define CMD1(x, x1) ((1 << 30) | ((x) << 9) | 0x100 | (x1))
#define CMD2(x, x1, x2) ((2 << 30) | ((x) << 18) | 0x20000 |\
((x1) << 9) | 0x100 | (x2))
#define CMD_NULL (-1)
static const uint32_t lcd_panel_reset[] = {
CMD0(0x1), /* reset */
CMD0(0x0), /* nop */
CMD0(0x0), /* nop */
CMD0(0x0), /* nop */
CMD_NULL,
};
static const uint32_t lcd_panel_on[] = {
CMD0(0x29), /* Display ON */
CMD2(0xB8, 0xFF, 0xF9), /* Output Control */
CMD0(0x11), /* Sleep out */
CMD1(0xB0, 0x16), /* Wake */
CMD_NULL,
};
static const uint32_t lcd_panel_off[] = {
CMD0(0x28), /* Display OFF */
CMD2(0xB8, 0x80, 0x02), /* Output Control */
CMD0(0x10), /* Sleep in */
CMD1(0xB0, 0x00), /* Deep stand by in */
CMD_NULL,
};
static const uint32_t lcd_vga_pass_through_tdo24m[] = {
CMD1(0xB0, 0x16),
CMD1(0xBC, 0x80),
CMD1(0xE1, 0x00),
CMD1(0x36, 0x50),
CMD1(0x3B, 0x00),
CMD_NULL,
};
static const uint32_t lcd_qvga_pass_through_tdo24m[] = {
CMD1(0xB0, 0x16),
CMD1(0xBC, 0x81),
CMD1(0xE1, 0x00),
CMD1(0x36, 0x50),
CMD1(0x3B, 0x22),
CMD_NULL,
};
static const uint32_t lcd_vga_transfer_tdo24m[] = {
CMD1(0xcf, 0x02), /* Blanking period control (1) */
CMD2(0xd0, 0x08, 0x04), /* Blanking period control (2) */
CMD1(0xd1, 0x01), /* CKV timing control on/off */
CMD2(0xd2, 0x14, 0x00), /* CKV 1,2 timing control */
CMD2(0xd3, 0x1a, 0x0f), /* OEV timing control */
CMD2(0xd4, 0x1f, 0xaf), /* ASW timing control (1) */
CMD1(0xd5, 0x14), /* ASW timing control (2) */
CMD0(0x21), /* Invert for normally black display */
CMD0(0x29), /* Display on */
CMD_NULL,
};
static const uint32_t lcd_qvga_transfer[] = {
CMD1(0xd6, 0x02), /* Blanking period control (1) */
CMD2(0xd7, 0x08, 0x04), /* Blanking period control (2) */
CMD1(0xd8, 0x01), /* CKV timing control on/off */
CMD2(0xd9, 0x00, 0x08), /* CKV 1,2 timing control */
CMD2(0xde, 0x05, 0x0a), /* OEV timing control */
CMD2(0xdf, 0x0a, 0x19), /* ASW timing control (1) */
CMD1(0xe0, 0x0a), /* ASW timing control (2) */
CMD0(0x21), /* Invert for normally black display */
CMD0(0x29), /* Display on */
CMD_NULL,
};
static const uint32_t lcd_vga_pass_through_tdo35s[] = {
CMD1(0xB0, 0x16),
CMD1(0xBC, 0x80),
CMD1(0xE1, 0x00),
CMD1(0x3B, 0x00),
CMD_NULL,
};
static const uint32_t lcd_qvga_pass_through_tdo35s[] = {
CMD1(0xB0, 0x16),
CMD1(0xBC, 0x81),
CMD1(0xE1, 0x00),
CMD1(0x3B, 0x22),
CMD_NULL,
};
static const uint32_t lcd_vga_transfer_tdo35s[] = {
CMD1(0xcf, 0x02), /* Blanking period control (1) */
CMD2(0xd0, 0x08, 0x04), /* Blanking period control (2) */
CMD1(0xd1, 0x01), /* CKV timing control on/off */
CMD2(0xd2, 0x00, 0x1e), /* CKV 1,2 timing control */
CMD2(0xd3, 0x14, 0x28), /* OEV timing control */
CMD2(0xd4, 0x28, 0x64), /* ASW timing control (1) */
CMD1(0xd5, 0x28), /* ASW timing control (2) */
CMD0(0x21), /* Invert for normally black display */
CMD0(0x29), /* Display on */
CMD_NULL,
};
static const uint32_t lcd_panel_config[] = {
CMD2(0xb8, 0xff, 0xf9), /* Output control */
CMD0(0x11), /* sleep out */
CMD1(0xba, 0x01), /* Display mode (1) */
CMD1(0xbb, 0x00), /* Display mode (2) */
CMD1(0x3a, 0x60), /* Display mode 18-bit RGB */
CMD1(0xbf, 0x10), /* Drive system change control */
CMD1(0xb1, 0x56), /* Booster operation setup */
CMD1(0xb2, 0x33), /* Booster mode setup */
CMD1(0xb3, 0x11), /* Booster frequency setup */
CMD1(0xb4, 0x02), /* Op amp/system clock */
CMD1(0xb5, 0x35), /* VCS voltage */
CMD1(0xb6, 0x40), /* VCOM voltage */
CMD1(0xb7, 0x03), /* External display signal */
CMD1(0xbd, 0x00), /* ASW slew rate */
CMD1(0xbe, 0x00), /* Dummy data for QuadData operation */
CMD1(0xc0, 0x11), /* Sleep out FR count (A) */
CMD1(0xc1, 0x11), /* Sleep out FR count (B) */
CMD1(0xc2, 0x11), /* Sleep out FR count (C) */
CMD2(0xc3, 0x20, 0x40), /* Sleep out FR count (D) */
CMD2(0xc4, 0x60, 0xc0), /* Sleep out FR count (E) */
CMD2(0xc5, 0x10, 0x20), /* Sleep out FR count (F) */
CMD1(0xc6, 0xc0), /* Sleep out FR count (G) */
CMD2(0xc7, 0x33, 0x43), /* Gamma 1 fine tuning (1) */
CMD1(0xc8, 0x44), /* Gamma 1 fine tuning (2) */
CMD1(0xc9, 0x33), /* Gamma 1 inclination adjustment */
CMD1(0xca, 0x00), /* Gamma 1 blue offset adjustment */
CMD2(0xec, 0x01, 0xf0), /* Horizontal clock cycles */
CMD_NULL,
};
static int tdo24m_writes(struct tdo24m *lcd, const uint32_t *array)
{
struct spi_transfer *x = &lcd->xfer;
const uint32_t *p = array;
uint32_t data;
int nparams, err = 0;
for (; *p != CMD_NULL; p++) {
if (!lcd->color_invert && *p == CMD0(0x21))
continue;
nparams = (*p >> 30) & 0x3;
data = *p << (7 - nparams);
switch (nparams) {
case 0:
lcd->buf[0] = (data >> 8) & 0xff;
lcd->buf[1] = data & 0xff;
break;
case 1:
lcd->buf[0] = (data >> 16) & 0xff;
lcd->buf[1] = (data >> 8) & 0xff;
lcd->buf[2] = data & 0xff;
break;
case 2:
lcd->buf[0] = (data >> 24) & 0xff;
lcd->buf[1] = (data >> 16) & 0xff;
lcd->buf[2] = (data >> 8) & 0xff;
lcd->buf[3] = data & 0xff;
break;
default:
continue;
}
x->len = nparams + 2;
err = spi_sync(lcd->spi_dev, &lcd->msg);
if (err)
break;
}
return err;
}
static int tdo24m_adj_mode(struct tdo24m *lcd, int mode)
{
switch (mode) {
case MODE_VGA:
tdo24m_writes(lcd, lcd_vga_pass_through_tdo24m);
tdo24m_writes(lcd, lcd_panel_config);
tdo24m_writes(lcd, lcd_vga_transfer_tdo24m);
break;
case MODE_QVGA:
tdo24m_writes(lcd, lcd_qvga_pass_through_tdo24m);
tdo24m_writes(lcd, lcd_panel_config);
tdo24m_writes(lcd, lcd_qvga_transfer);
break;
default:
return -EINVAL;
}
lcd->mode = mode;
return 0;
}
static int tdo35s_adj_mode(struct tdo24m *lcd, int mode)
{
switch (mode) {
case MODE_VGA:
tdo24m_writes(lcd, lcd_vga_pass_through_tdo35s);
tdo24m_writes(lcd, lcd_panel_config);
tdo24m_writes(lcd, lcd_vga_transfer_tdo35s);
break;
case MODE_QVGA:
tdo24m_writes(lcd, lcd_qvga_pass_through_tdo35s);
tdo24m_writes(lcd, lcd_panel_config);
tdo24m_writes(lcd, lcd_qvga_transfer);
break;
default:
return -EINVAL;
}
lcd->mode = mode;
return 0;
}
static int tdo24m_power_on(struct tdo24m *lcd)
{
int err;
err = tdo24m_writes(lcd, lcd_panel_on);
if (err)
goto out;
err = tdo24m_writes(lcd, lcd_panel_reset);
if (err)
goto out;
err = lcd->adj_mode(lcd, lcd->mode);
out:
return err;
}
static int tdo24m_power_off(struct tdo24m *lcd)
{
return tdo24m_writes(lcd, lcd_panel_off);
}
static int tdo24m_power(struct tdo24m *lcd, int power)
{
int ret = 0;
if (POWER_IS_ON(power) && !POWER_IS_ON(lcd->power))
ret = tdo24m_power_on(lcd);
else if (!POWER_IS_ON(power) && POWER_IS_ON(lcd->power))
ret = tdo24m_power_off(lcd);
if (!ret)
lcd->power = power;
return ret;
}
static int tdo24m_set_power(struct lcd_device *ld, int power)
{
struct tdo24m *lcd = lcd_get_data(ld);
return tdo24m_power(lcd, power);
}
static int tdo24m_get_power(struct lcd_device *ld)
{
struct tdo24m *lcd = lcd_get_data(ld);
return lcd->power;
}
static int tdo24m_set_mode(struct lcd_device *ld, struct fb_videomode *m)
{
struct tdo24m *lcd = lcd_get_data(ld);
int mode = MODE_QVGA;
if (m->xres == 640 || m->xres == 480)
mode = MODE_VGA;
if (lcd->mode == mode)
return 0;
return lcd->adj_mode(lcd, mode);
}
static struct lcd_ops tdo24m_ops = {
.get_power = tdo24m_get_power,
.set_power = tdo24m_set_power,
.set_mode = tdo24m_set_mode,
};
static int tdo24m_probe(struct spi_device *spi)
{
struct tdo24m *lcd;
struct spi_message *m;
struct spi_transfer *x;
struct tdo24m_platform_data *pdata;
enum tdo24m_model model;
int err;
pdata = dev_get_platdata(&spi->dev);
if (pdata)
model = pdata->model;
else
model = TDO24M;
spi->bits_per_word = 8;
spi->mode = SPI_MODE_3;
err = spi_setup(spi);
if (err)
return err;
lcd = devm_kzalloc(&spi->dev, sizeof(struct tdo24m), GFP_KERNEL);
if (!lcd)
return -ENOMEM;
lcd->spi_dev = spi;
lcd->power = FB_BLANK_POWERDOWN;
lcd->mode = MODE_VGA; /* default to VGA */
lcd->buf = devm_kzalloc(&spi->dev, TDO24M_SPI_BUFF_SIZE, GFP_KERNEL);
if (lcd->buf == NULL)
return -ENOMEM;
m = &lcd->msg;
x = &lcd->xfer;
spi_message_init(m);
x->cs_change = 1;
x->tx_buf = &lcd->buf[0];
spi_message_add_tail(x, m);
switch (model) {
case TDO24M:
lcd->color_invert = 1;
lcd->adj_mode = tdo24m_adj_mode;
break;
case TDO35S:
lcd->adj_mode = tdo35s_adj_mode;
lcd->color_invert = 0;
break;
default:
dev_err(&spi->dev, "Unsupported model");
return -EINVAL;
}
lcd->lcd_dev = devm_lcd_device_register(&spi->dev, "tdo24m", &spi->dev,
lcd, &tdo24m_ops);
if (IS_ERR(lcd->lcd_dev))
return PTR_ERR(lcd->lcd_dev);
spi_set_drvdata(spi, lcd);
err = tdo24m_power(lcd, FB_BLANK_UNBLANK);
if (err)
return err;
return 0;
}
static int tdo24m_remove(struct spi_device *spi)
{
struct tdo24m *lcd = spi_get_drvdata(spi);
tdo24m_power(lcd, FB_BLANK_POWERDOWN);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int tdo24m_suspend(struct device *dev)
{
struct tdo24m *lcd = dev_get_drvdata(dev);
return tdo24m_power(lcd, FB_BLANK_POWERDOWN);
}
static int tdo24m_resume(struct device *dev)
{
struct tdo24m *lcd = dev_get_drvdata(dev);
return tdo24m_power(lcd, FB_BLANK_UNBLANK);
}
#endif
static SIMPLE_DEV_PM_OPS(tdo24m_pm_ops, tdo24m_suspend, tdo24m_resume);
/* Power down all displays on reboot, poweroff or halt */
static void tdo24m_shutdown(struct spi_device *spi)
{
struct tdo24m *lcd = spi_get_drvdata(spi);
tdo24m_power(lcd, FB_BLANK_POWERDOWN);
}
static struct spi_driver tdo24m_driver = {
.driver = {
.name = "tdo24m",
.owner = THIS_MODULE,
.pm = &tdo24m_pm_ops,
},
.probe = tdo24m_probe,
.remove = tdo24m_remove,
.shutdown = tdo24m_shutdown,
};
module_spi_driver(tdo24m_driver);
MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>");
MODULE_DESCRIPTION("Driver for Toppoly TDO24M LCD Panel");
MODULE_LICENSE("GPL");
MODULE_ALIAS("spi:tdo24m");
| gpl-2.0 |
JoeyJiao/android-huawei-kernel-common | arch/powerpc/platforms/powermac/pfunc_base.c | 776 | 10373 | #include <linux/types.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <asm/pmac_feature.h>
#include <asm/pmac_pfunc.h>
#undef DEBUG
#ifdef DEBUG
#define DBG(fmt...) printk(fmt)
#else
#define DBG(fmt...)
#endif
static irqreturn_t macio_gpio_irq(int irq, void *data)
{
pmf_do_irq(data);
return IRQ_HANDLED;
}
static int macio_do_gpio_irq_enable(struct pmf_function *func)
{
unsigned int irq = irq_of_parse_and_map(func->node, 0);
if (irq == NO_IRQ)
return -EINVAL;
return request_irq(irq, macio_gpio_irq, 0, func->node->name, func);
}
static int macio_do_gpio_irq_disable(struct pmf_function *func)
{
unsigned int irq = irq_of_parse_and_map(func->node, 0);
if (irq == NO_IRQ)
return -EINVAL;
free_irq(irq, func);
return 0;
}
static int macio_do_gpio_write(PMF_STD_ARGS, u8 value, u8 mask)
{
u8 __iomem *addr = (u8 __iomem *)func->driver_data;
unsigned long flags;
u8 tmp;
/* Check polarity */
if (args && args->count && !args->u[0].v)
value = ~value;
/* Toggle the GPIO */
spin_lock_irqsave(&feature_lock, flags);
tmp = readb(addr);
tmp = (tmp & ~mask) | (value & mask);
DBG("Do write 0x%02x to GPIO %s (%p)\n",
tmp, func->node->full_name, addr);
writeb(tmp, addr);
spin_unlock_irqrestore(&feature_lock, flags);
return 0;
}
static int macio_do_gpio_read(PMF_STD_ARGS, u8 mask, int rshift, u8 xor)
{
u8 __iomem *addr = (u8 __iomem *)func->driver_data;
u32 value;
/* Check if we have room for reply */
if (args == NULL || args->count == 0 || args->u[0].p == NULL)
return -EINVAL;
value = readb(addr);
*args->u[0].p = ((value & mask) >> rshift) ^ xor;
return 0;
}
static int macio_do_delay(PMF_STD_ARGS, u32 duration)
{
/* assume we can sleep ! */
msleep((duration + 999) / 1000);
return 0;
}
static struct pmf_handlers macio_gpio_handlers = {
.irq_enable = macio_do_gpio_irq_enable,
.irq_disable = macio_do_gpio_irq_disable,
.write_gpio = macio_do_gpio_write,
.read_gpio = macio_do_gpio_read,
.delay = macio_do_delay,
};
static void macio_gpio_init_one(struct macio_chip *macio)
{
struct device_node *gparent, *gp;
/*
* Find the "gpio" parent node
*/
for (gparent = NULL;
(gparent = of_get_next_child(macio->of_node, gparent)) != NULL;)
if (strcmp(gparent->name, "gpio") == 0)
break;
if (gparent == NULL)
return;
DBG("Installing GPIO functions for macio %s\n",
macio->of_node->full_name);
/*
* Ok, got one, we dont need anything special to track them down, so
* we just create them all
*/
for (gp = NULL; (gp = of_get_next_child(gparent, gp)) != NULL;) {
const u32 *reg = of_get_property(gp, "reg", NULL);
unsigned long offset;
if (reg == NULL)
continue;
offset = *reg;
/* Deal with old style device-tree. We can safely hard code the
* offset for now too even if it's a bit gross ...
*/
if (offset < 0x50)
offset += 0x50;
offset += (unsigned long)macio->base;
pmf_register_driver(gp, &macio_gpio_handlers, (void *)offset);
}
DBG("Calling initial GPIO functions for macio %s\n",
macio->of_node->full_name);
/* And now we run all the init ones */
for (gp = NULL; (gp = of_get_next_child(gparent, gp)) != NULL;)
pmf_do_functions(gp, NULL, 0, PMF_FLAGS_ON_INIT, NULL);
/* Note: We do not at this point implement the "at sleep" or "at wake"
* functions. I yet to find any for GPIOs anyway
*/
}
static int macio_do_write_reg32(PMF_STD_ARGS, u32 offset, u32 value, u32 mask)
{
struct macio_chip *macio = func->driver_data;
unsigned long flags;
spin_lock_irqsave(&feature_lock, flags);
MACIO_OUT32(offset, (MACIO_IN32(offset) & ~mask) | (value & mask));
spin_unlock_irqrestore(&feature_lock, flags);
return 0;
}
static int macio_do_read_reg32(PMF_STD_ARGS, u32 offset)
{
struct macio_chip *macio = func->driver_data;
/* Check if we have room for reply */
if (args == NULL || args->count == 0 || args->u[0].p == NULL)
return -EINVAL;
*args->u[0].p = MACIO_IN32(offset);
return 0;
}
static int macio_do_write_reg8(PMF_STD_ARGS, u32 offset, u8 value, u8 mask)
{
struct macio_chip *macio = func->driver_data;
unsigned long flags;
spin_lock_irqsave(&feature_lock, flags);
MACIO_OUT8(offset, (MACIO_IN8(offset) & ~mask) | (value & mask));
spin_unlock_irqrestore(&feature_lock, flags);
return 0;
}
static int macio_do_read_reg8(PMF_STD_ARGS, u32 offset)
{
struct macio_chip *macio = func->driver_data;
/* Check if we have room for reply */
if (args == NULL || args->count == 0 || args->u[0].p == NULL)
return -EINVAL;
*((u8 *)(args->u[0].p)) = MACIO_IN8(offset);
return 0;
}
static int macio_do_read_reg32_msrx(PMF_STD_ARGS, u32 offset, u32 mask,
u32 shift, u32 xor)
{
struct macio_chip *macio = func->driver_data;
/* Check if we have room for reply */
if (args == NULL || args->count == 0 || args->u[0].p == NULL)
return -EINVAL;
*args->u[0].p = ((MACIO_IN32(offset) & mask) >> shift) ^ xor;
return 0;
}
static int macio_do_read_reg8_msrx(PMF_STD_ARGS, u32 offset, u32 mask,
u32 shift, u32 xor)
{
struct macio_chip *macio = func->driver_data;
/* Check if we have room for reply */
if (args == NULL || args->count == 0 || args->u[0].p == NULL)
return -EINVAL;
*((u8 *)(args->u[0].p)) = ((MACIO_IN8(offset) & mask) >> shift) ^ xor;
return 0;
}
static int macio_do_write_reg32_slm(PMF_STD_ARGS, u32 offset, u32 shift,
u32 mask)
{
struct macio_chip *macio = func->driver_data;
unsigned long flags;
u32 tmp, val;
/* Check args */
if (args == NULL || args->count == 0)
return -EINVAL;
spin_lock_irqsave(&feature_lock, flags);
tmp = MACIO_IN32(offset);
val = args->u[0].v << shift;
tmp = (tmp & ~mask) | (val & mask);
MACIO_OUT32(offset, tmp);
spin_unlock_irqrestore(&feature_lock, flags);
return 0;
}
static int macio_do_write_reg8_slm(PMF_STD_ARGS, u32 offset, u32 shift,
u32 mask)
{
struct macio_chip *macio = func->driver_data;
unsigned long flags;
u32 tmp, val;
/* Check args */
if (args == NULL || args->count == 0)
return -EINVAL;
spin_lock_irqsave(&feature_lock, flags);
tmp = MACIO_IN8(offset);
val = args->u[0].v << shift;
tmp = (tmp & ~mask) | (val & mask);
MACIO_OUT8(offset, tmp);
spin_unlock_irqrestore(&feature_lock, flags);
return 0;
}
static struct pmf_handlers macio_mmio_handlers = {
.write_reg32 = macio_do_write_reg32,
.read_reg32 = macio_do_read_reg32,
.write_reg8 = macio_do_write_reg8,
.read_reg8 = macio_do_read_reg8,
.read_reg32_msrx = macio_do_read_reg32_msrx,
.read_reg8_msrx = macio_do_read_reg8_msrx,
.write_reg32_slm = macio_do_write_reg32_slm,
.write_reg8_slm = macio_do_write_reg8_slm,
.delay = macio_do_delay,
};
static void macio_mmio_init_one(struct macio_chip *macio)
{
DBG("Installing MMIO functions for macio %s\n",
macio->of_node->full_name);
pmf_register_driver(macio->of_node, &macio_mmio_handlers, macio);
}
static struct device_node *unin_hwclock;
static int unin_do_write_reg32(PMF_STD_ARGS, u32 offset, u32 value, u32 mask)
{
unsigned long flags;
spin_lock_irqsave(&feature_lock, flags);
/* This is fairly bogus in darwin, but it should work for our needs
* implemeted that way:
*/
UN_OUT(offset, (UN_IN(offset) & ~mask) | (value & mask));
spin_unlock_irqrestore(&feature_lock, flags);
return 0;
}
static struct pmf_handlers unin_mmio_handlers = {
.write_reg32 = unin_do_write_reg32,
.delay = macio_do_delay,
};
static void uninorth_install_pfunc(void)
{
struct device_node *np;
DBG("Installing functions for UniN %s\n",
uninorth_node->full_name);
/*
* Install handlers for the bridge itself
*/
pmf_register_driver(uninorth_node, &unin_mmio_handlers, NULL);
pmf_do_functions(uninorth_node, NULL, 0, PMF_FLAGS_ON_INIT, NULL);
/*
* Install handlers for the hwclock child if any
*/
for (np = NULL; (np = of_get_next_child(uninorth_node, np)) != NULL;)
if (strcmp(np->name, "hw-clock") == 0) {
unin_hwclock = np;
break;
}
if (unin_hwclock) {
DBG("Installing functions for UniN clock %s\n",
unin_hwclock->full_name);
pmf_register_driver(unin_hwclock, &unin_mmio_handlers, NULL);
pmf_do_functions(unin_hwclock, NULL, 0, PMF_FLAGS_ON_INIT,
NULL);
}
}
/* We export this as the SMP code might init us early */
int __init pmac_pfunc_base_install(void)
{
static int pfbase_inited;
int i;
if (pfbase_inited)
return 0;
pfbase_inited = 1;
if (!machine_is(powermac))
return 0;
DBG("Installing base platform functions...\n");
/*
* Locate mac-io chips and install handlers
*/
for (i = 0 ; i < MAX_MACIO_CHIPS; i++) {
if (macio_chips[i].of_node) {
macio_mmio_init_one(&macio_chips[i]);
macio_gpio_init_one(&macio_chips[i]);
}
}
/*
* Install handlers for northbridge and direct mapped hwclock
* if any. We do not implement the config space access callback
* which is only ever used for functions that we do not call in
* the current driver (enabling/disabling cells in U2, mostly used
* to restore the PCI settings, we do that differently)
*/
if (uninorth_node && uninorth_base)
uninorth_install_pfunc();
DBG("All base functions installed\n");
return 0;
}
machine_arch_initcall(powermac, pmac_pfunc_base_install);
#ifdef CONFIG_PM
/* Those can be called by pmac_feature. Ultimately, I should use a sysdev
* or a device, but for now, that's good enough until I sort out some
* ordering issues. Also, we do not bother with GPIOs, as so far I yet have
* to see a case where a GPIO function has the on-suspend or on-resume bit
*/
void pmac_pfunc_base_suspend(void)
{
int i;
for (i = 0 ; i < MAX_MACIO_CHIPS; i++) {
if (macio_chips[i].of_node)
pmf_do_functions(macio_chips[i].of_node, NULL, 0,
PMF_FLAGS_ON_SLEEP, NULL);
}
if (uninorth_node)
pmf_do_functions(uninorth_node, NULL, 0,
PMF_FLAGS_ON_SLEEP, NULL);
if (unin_hwclock)
pmf_do_functions(unin_hwclock, NULL, 0,
PMF_FLAGS_ON_SLEEP, NULL);
}
void pmac_pfunc_base_resume(void)
{
int i;
if (unin_hwclock)
pmf_do_functions(unin_hwclock, NULL, 0,
PMF_FLAGS_ON_WAKE, NULL);
if (uninorth_node)
pmf_do_functions(uninorth_node, NULL, 0,
PMF_FLAGS_ON_WAKE, NULL);
for (i = 0 ; i < MAX_MACIO_CHIPS; i++) {
if (macio_chips[i].of_node)
pmf_do_functions(macio_chips[i].of_node, NULL, 0,
PMF_FLAGS_ON_WAKE, NULL);
}
}
#endif /* CONFIG_PM */
| gpl-2.0 |
vitaliyy/msm7x30 | arch/arm/mach-shmobile/intc-sh7367.c | 776 | 10828 | /*
* sh7367 processor support - INTC hardware block
*
* Copyright (C) 2010 Magnus Damm
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/sh_intc.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
enum {
UNUSED_INTCA = 0,
ENABLED,
DISABLED,
/* interrupt sources INTCA */
IRQ0A, IRQ1A, IRQ2A, IRQ3A, IRQ4A, IRQ5A, IRQ6A, IRQ7A,
IRQ8A, IRQ9A, IRQ10A, IRQ11A, IRQ12A, IRQ13A, IRQ14A, IRQ15A,
DIRC,
CRYPT1_ERR, CRYPT2_STD,
IIC1_ALI1, IIC1_TACKI1, IIC1_WAITI1, IIC1_DTEI1,
ARM11_IRQPMU, ARM11_COMMTX, ARM11_COMMRX,
ETM11_ACQCMP, ETM11_FULL,
MFI_MFIM, MFI_MFIS,
BBIF1, BBIF2,
USBDMAC_USHDMI,
USBHS_USHI0, USBHS_USHI1,
CMT1_CMT10, CMT1_CMT11, CMT1_CMT12, CMT1_CMT13, CMT2, CMT3,
KEYSC_KEY,
SCIFA0, SCIFA1, SCIFA2, SCIFA3,
MSIOF2, MSIOF1,
SCIFA4, SCIFA5, SCIFB,
FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I,
SDHI0,
SDHI1,
MSU_MSU, MSU_MSU2,
IREM,
SIU,
SPU,
IRDA,
TPU0, TPU1, TPU2, TPU3, TPU4,
LCRC,
PINT1, PINT2,
TTI20,
MISTY,
DDM,
SDHI2,
RWDT0, RWDT1,
DMAC_1_DEI0, DMAC_1_DEI1, DMAC_1_DEI2, DMAC_1_DEI3,
DMAC_2_DEI4, DMAC_2_DEI5, DMAC_2_DADERR,
DMAC2_1_DEI0, DMAC2_1_DEI1, DMAC2_1_DEI2, DMAC2_1_DEI3,
DMAC2_2_DEI4, DMAC2_2_DEI5, DMAC2_2_DADERR,
DMAC3_1_DEI0, DMAC3_1_DEI1, DMAC3_1_DEI2, DMAC3_1_DEI3,
DMAC3_2_DEI4, DMAC3_2_DEI5, DMAC3_2_DADERR,
/* interrupt groups INTCA */
DMAC_1, DMAC_2, DMAC2_1, DMAC2_2, DMAC3_1, DMAC3_2,
ETM11, ARM11, USBHS, FLCTL, IIC1
};
static struct intc_vect intca_vectors[] = {
INTC_VECT(IRQ0A, 0x0200), INTC_VECT(IRQ1A, 0x0220),
INTC_VECT(IRQ2A, 0x0240), INTC_VECT(IRQ3A, 0x0260),
INTC_VECT(IRQ4A, 0x0280), INTC_VECT(IRQ5A, 0x02a0),
INTC_VECT(IRQ6A, 0x02c0), INTC_VECT(IRQ7A, 0x02e0),
INTC_VECT(IRQ8A, 0x0300), INTC_VECT(IRQ9A, 0x0320),
INTC_VECT(IRQ10A, 0x0340), INTC_VECT(IRQ11A, 0x0360),
INTC_VECT(IRQ12A, 0x0380), INTC_VECT(IRQ13A, 0x03a0),
INTC_VECT(IRQ14A, 0x03c0), INTC_VECT(IRQ15A, 0x03e0),
INTC_VECT(DIRC, 0x0560),
INTC_VECT(CRYPT1_ERR, 0x05e0),
INTC_VECT(CRYPT2_STD, 0x0700),
INTC_VECT(IIC1_ALI1, 0x0780), INTC_VECT(IIC1_TACKI1, 0x07a0),
INTC_VECT(IIC1_WAITI1, 0x07c0), INTC_VECT(IIC1_DTEI1, 0x07e0),
INTC_VECT(ARM11_IRQPMU, 0x0800), INTC_VECT(ARM11_COMMTX, 0x0840),
INTC_VECT(ARM11_COMMRX, 0x0860),
INTC_VECT(ETM11_ACQCMP, 0x0880), INTC_VECT(ETM11_FULL, 0x08a0),
INTC_VECT(MFI_MFIM, 0x0900), INTC_VECT(MFI_MFIS, 0x0920),
INTC_VECT(BBIF1, 0x0940), INTC_VECT(BBIF2, 0x0960),
INTC_VECT(USBDMAC_USHDMI, 0x0a00),
INTC_VECT(USBHS_USHI0, 0x0a20), INTC_VECT(USBHS_USHI1, 0x0a40),
INTC_VECT(CMT1_CMT10, 0x0b00), INTC_VECT(CMT1_CMT11, 0x0b20),
INTC_VECT(CMT1_CMT12, 0x0b40), INTC_VECT(CMT1_CMT13, 0x0b60),
INTC_VECT(CMT2, 0x0b80), INTC_VECT(CMT3, 0x0ba0),
INTC_VECT(KEYSC_KEY, 0x0be0),
INTC_VECT(SCIFA0, 0x0c00), INTC_VECT(SCIFA1, 0x0c20),
INTC_VECT(SCIFA2, 0x0c40), INTC_VECT(SCIFA3, 0x0c60),
INTC_VECT(MSIOF2, 0x0c80), INTC_VECT(MSIOF1, 0x0d00),
INTC_VECT(SCIFA4, 0x0d20), INTC_VECT(SCIFA5, 0x0d40),
INTC_VECT(SCIFB, 0x0d60),
INTC_VECT(FLCTL_FLSTEI, 0x0d80), INTC_VECT(FLCTL_FLTENDI, 0x0da0),
INTC_VECT(FLCTL_FLTREQ0I, 0x0dc0), INTC_VECT(FLCTL_FLTREQ1I, 0x0de0),
INTC_VECT(SDHI0, 0x0e00), INTC_VECT(SDHI0, 0x0e20),
INTC_VECT(SDHI0, 0x0e40), INTC_VECT(SDHI0, 0x0e60),
INTC_VECT(SDHI1, 0x0e80), INTC_VECT(SDHI1, 0x0ea0),
INTC_VECT(SDHI1, 0x0ec0), INTC_VECT(SDHI1, 0x0ee0),
INTC_VECT(MSU_MSU, 0x0f20), INTC_VECT(MSU_MSU2, 0x0f40),
INTC_VECT(IREM, 0x0f60),
INTC_VECT(SIU, 0x0fa0),
INTC_VECT(SPU, 0x0fc0),
INTC_VECT(IRDA, 0x0480),
INTC_VECT(TPU0, 0x04a0), INTC_VECT(TPU1, 0x04c0),
INTC_VECT(TPU2, 0x04e0), INTC_VECT(TPU3, 0x0500),
INTC_VECT(TPU4, 0x0520),
INTC_VECT(LCRC, 0x0540),
INTC_VECT(PINT1, 0x1000), INTC_VECT(PINT2, 0x1020),
INTC_VECT(TTI20, 0x1100),
INTC_VECT(MISTY, 0x1120),
INTC_VECT(DDM, 0x1140),
INTC_VECT(SDHI2, 0x1200), INTC_VECT(SDHI2, 0x1220),
INTC_VECT(SDHI2, 0x1240), INTC_VECT(SDHI2, 0x1260),
INTC_VECT(RWDT0, 0x1280), INTC_VECT(RWDT1, 0x12a0),
INTC_VECT(DMAC_1_DEI0, 0x2000), INTC_VECT(DMAC_1_DEI1, 0x2020),
INTC_VECT(DMAC_1_DEI2, 0x2040), INTC_VECT(DMAC_1_DEI3, 0x2060),
INTC_VECT(DMAC_2_DEI4, 0x2080), INTC_VECT(DMAC_2_DEI5, 0x20a0),
INTC_VECT(DMAC_2_DADERR, 0x20c0),
INTC_VECT(DMAC2_1_DEI0, 0x2100), INTC_VECT(DMAC2_1_DEI1, 0x2120),
INTC_VECT(DMAC2_1_DEI2, 0x2140), INTC_VECT(DMAC2_1_DEI3, 0x2160),
INTC_VECT(DMAC2_2_DEI4, 0x2180), INTC_VECT(DMAC2_2_DEI5, 0x21a0),
INTC_VECT(DMAC2_2_DADERR, 0x21c0),
INTC_VECT(DMAC3_1_DEI0, 0x2200), INTC_VECT(DMAC3_1_DEI1, 0x2220),
INTC_VECT(DMAC3_1_DEI2, 0x2240), INTC_VECT(DMAC3_1_DEI3, 0x2260),
INTC_VECT(DMAC3_2_DEI4, 0x2280), INTC_VECT(DMAC3_2_DEI5, 0x22a0),
INTC_VECT(DMAC3_2_DADERR, 0x22c0),
};
static struct intc_group intca_groups[] __initdata = {
INTC_GROUP(DMAC_1, DMAC_1_DEI0,
DMAC_1_DEI1, DMAC_1_DEI2, DMAC_1_DEI3),
INTC_GROUP(DMAC_2, DMAC_2_DEI4,
DMAC_2_DEI5, DMAC_2_DADERR),
INTC_GROUP(DMAC2_1, DMAC2_1_DEI0,
DMAC2_1_DEI1, DMAC2_1_DEI2, DMAC2_1_DEI3),
INTC_GROUP(DMAC2_2, DMAC2_2_DEI4,
DMAC2_2_DEI5, DMAC2_2_DADERR),
INTC_GROUP(DMAC3_1, DMAC3_1_DEI0,
DMAC3_1_DEI1, DMAC3_1_DEI2, DMAC3_1_DEI3),
INTC_GROUP(DMAC3_2, DMAC3_2_DEI4,
DMAC3_2_DEI5, DMAC3_2_DADERR),
INTC_GROUP(ETM11, ETM11_ACQCMP, ETM11_FULL),
INTC_GROUP(ARM11, ARM11_IRQPMU, ARM11_COMMTX, ARM11_COMMTX),
INTC_GROUP(USBHS, USBHS_USHI0, USBHS_USHI1),
INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLTENDI,
FLCTL_FLTREQ0I, FLCTL_FLTREQ1I),
INTC_GROUP(IIC1, IIC1_ALI1, IIC1_TACKI1, IIC1_WAITI1, IIC1_DTEI1),
};
static struct intc_mask_reg intca_mask_registers[] = {
{ 0xe6900040, 0xe6900060, 8, /* INTMSK00A / INTMSKCLR00A */
{ IRQ0A, IRQ1A, IRQ2A, IRQ3A, IRQ4A, IRQ5A, IRQ6A, IRQ7A } },
{ 0xe6900044, 0xe6900064, 8, /* INTMSK10A / INTMSKCLR10A */
{ IRQ8A, IRQ9A, IRQ10A, IRQ11A, IRQ12A, IRQ13A, IRQ14A, IRQ15A } },
{ 0xe6940080, 0xe69400c0, 8, /* IMR0A / IMCR0A */
{ DMAC2_1_DEI3, DMAC2_1_DEI2, DMAC2_1_DEI1, DMAC2_1_DEI0,
ARM11_IRQPMU, 0, ARM11_COMMTX, ARM11_COMMRX } },
{ 0xe6940084, 0xe69400c4, 8, /* IMR1A / IMCR1A */
{ CRYPT1_ERR, CRYPT2_STD, DIRC, 0,
DMAC_1_DEI3, DMAC_1_DEI2, DMAC_1_DEI1, DMAC_1_DEI0 } },
{ 0xe6940088, 0xe69400c8, 8, /* IMR2A / IMCR2A */
{ PINT1, PINT2, 0, 0,
BBIF1, BBIF2, MFI_MFIS, MFI_MFIM } },
{ 0xe694008c, 0xe69400cc, 8, /* IMR3A / IMCR3A */
{ DMAC3_1_DEI3, DMAC3_1_DEI2, DMAC3_1_DEI1, DMAC3_1_DEI0,
DMAC3_2_DADERR, DMAC3_2_DEI5, DMAC3_2_DEI4, IRDA } },
{ 0xe6940090, 0xe69400d0, 8, /* IMR4A / IMCR4A */
{ DDM, 0, 0, 0,
0, 0, ETM11_FULL, ETM11_ACQCMP } },
{ 0xe6940094, 0xe69400d4, 8, /* IMR5A / IMCR5A */
{ KEYSC_KEY, DMAC_2_DADERR, DMAC_2_DEI5, DMAC_2_DEI4,
SCIFA3, SCIFA2, SCIFA1, SCIFA0 } },
{ 0xe6940098, 0xe69400d8, 8, /* IMR6A / IMCR6A */
{ SCIFB, SCIFA5, SCIFA4, MSIOF1,
0, 0, MSIOF2, 0 } },
{ 0xe694009c, 0xe69400dc, 8, /* IMR7A / IMCR7A */
{ DISABLED, DISABLED, ENABLED, ENABLED,
FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } },
{ 0xe69400a0, 0xe69400e0, 8, /* IMR8A / IMCR8A */
{ DISABLED, DISABLED, ENABLED, ENABLED,
TTI20, USBDMAC_USHDMI, SPU, SIU } },
{ 0xe69400a4, 0xe69400e4, 8, /* IMR9A / IMCR9A */
{ CMT1_CMT13, CMT1_CMT12, CMT1_CMT11, CMT1_CMT10,
CMT2, USBHS_USHI1, USBHS_USHI0, 0 } },
{ 0xe69400a8, 0xe69400e8, 8, /* IMR10A / IMCR10A */
{ 0, DMAC2_2_DADERR, DMAC2_2_DEI5, DMAC2_2_DEI4,
0, 0, 0, 0 } },
{ 0xe69400ac, 0xe69400ec, 8, /* IMR11A / IMCR11A */
{ IIC1_DTEI1, IIC1_WAITI1, IIC1_TACKI1, IIC1_ALI1,
LCRC, MSU_MSU2, IREM, MSU_MSU } },
{ 0xe69400b0, 0xe69400f0, 8, /* IMR12A / IMCR12A */
{ 0, 0, TPU0, TPU1,
TPU2, TPU3, TPU4, 0 } },
{ 0xe69400b4, 0xe69400f4, 8, /* IMR13A / IMCR13A */
{ DISABLED, DISABLED, ENABLED, ENABLED,
MISTY, CMT3, RWDT1, RWDT0 } },
};
static struct intc_prio_reg intca_prio_registers[] = {
{ 0xe6900010, 0, 32, 4, /* INTPRI00A */
{ IRQ0A, IRQ1A, IRQ2A, IRQ3A, IRQ4A, IRQ5A, IRQ6A, IRQ7A } },
{ 0xe6900014, 0, 32, 4, /* INTPRI10A */
{ IRQ8A, IRQ9A, IRQ10A, IRQ11A, IRQ12A, IRQ13A, IRQ14A, IRQ15A } },
{ 0xe6940000, 0, 16, 4, /* IPRAA */ { DMAC3_1, DMAC3_2, CMT2, LCRC } },
{ 0xe6940004, 0, 16, 4, /* IPRBA */ { IRDA, ETM11, BBIF1, BBIF2 } },
{ 0xe6940008, 0, 16, 4, /* IPRCA */ { CRYPT1_ERR, CRYPT2_STD,
CMT1_CMT11, ARM11 } },
{ 0xe694000c, 0, 16, 4, /* IPRDA */ { PINT1, PINT2,
CMT1_CMT12, TPU4 } },
{ 0xe6940010, 0, 16, 4, /* IPREA */ { DMAC_1, MFI_MFIS,
MFI_MFIM, USBHS } },
{ 0xe6940014, 0, 16, 4, /* IPRFA */ { KEYSC_KEY, DMAC_2,
0, CMT1_CMT10 } },
{ 0xe6940018, 0, 16, 4, /* IPRGA */ { SCIFA0, SCIFA1,
SCIFA2, SCIFA3 } },
{ 0xe694001c, 0, 16, 4, /* IPRGH */ { MSIOF2, USBDMAC_USHDMI,
FLCTL, SDHI0 } },
{ 0xe6940020, 0, 16, 4, /* IPRIA */ { MSIOF1, SCIFA4, MSU_MSU, IIC1 } },
{ 0xe6940024, 0, 16, 4, /* IPRJA */ { DMAC2_1, DMAC2_2, SIU, TTI20 } },
{ 0xe6940028, 0, 16, 4, /* IPRKA */ { 0, CMT1_CMT13, IREM, SDHI1 } },
{ 0xe694002c, 0, 16, 4, /* IPRLA */ { TPU0, TPU1, TPU2, TPU3 } },
{ 0xe6940030, 0, 16, 4, /* IPRMA */ { MISTY, CMT3, RWDT1, RWDT0 } },
{ 0xe6940034, 0, 16, 4, /* IPRNA */ { SCIFB, SCIFA5, SPU, DDM } },
{ 0xe6940038, 0, 16, 4, /* IPROA */ { 0, 0, DIRC, SDHI2 } },
};
static struct intc_sense_reg intca_sense_registers[] __initdata = {
{ 0xe6900000, 16, 2, /* ICR1A */
{ IRQ0A, IRQ1A, IRQ2A, IRQ3A, IRQ4A, IRQ5A, IRQ6A, IRQ7A } },
{ 0xe6900004, 16, 2, /* ICR2A */
{ IRQ8A, IRQ9A, IRQ10A, IRQ11A, IRQ12A, IRQ13A, IRQ14A, IRQ15A } },
};
static struct intc_mask_reg intca_ack_registers[] __initdata = {
{ 0xe6900020, 0, 8, /* INTREQ00A */
{ IRQ0A, IRQ1A, IRQ2A, IRQ3A, IRQ4A, IRQ5A, IRQ6A, IRQ7A } },
{ 0xe6900024, 0, 8, /* INTREQ10A */
{ IRQ8A, IRQ9A, IRQ10A, IRQ11A, IRQ12A, IRQ13A, IRQ14A, IRQ15A } },
};
static struct intc_desc intca_desc __initdata = {
.name = "sh7367-intca",
.force_enable = ENABLED,
.force_disable = DISABLED,
.hw = INTC_HW_DESC(intca_vectors, intca_groups,
intca_mask_registers, intca_prio_registers,
intca_sense_registers, intca_ack_registers),
};
void __init sh7367_init_irq(void)
{
/* INTCA */
register_intc_controller(&intca_desc);
}
| gpl-2.0 |
rdesfo/kernel | drivers/gpu/drm/nouveau/core/engine/bsp/nv98.c | 1032 | 3475 | /*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
*/
#include <engine/falcon.h>
#include <engine/bsp.h>
struct nv98_bsp_priv {
struct nouveau_falcon base;
};
/*******************************************************************************
* BSP object classes
******************************************************************************/
static struct nouveau_oclass
nv98_bsp_sclass[] = {
{ 0x88b1, &nouveau_object_ofuncs },
{ 0x85b1, &nouveau_object_ofuncs },
{ 0x86b1, &nouveau_object_ofuncs },
{},
};
/*******************************************************************************
* PBSP context
******************************************************************************/
static struct nouveau_oclass
nv98_bsp_cclass = {
.handle = NV_ENGCTX(BSP, 0x98),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = _nouveau_falcon_context_ctor,
.dtor = _nouveau_falcon_context_dtor,
.init = _nouveau_falcon_context_init,
.fini = _nouveau_falcon_context_fini,
.rd32 = _nouveau_falcon_context_rd32,
.wr32 = _nouveau_falcon_context_wr32,
},
};
/*******************************************************************************
* PBSP engine/subdev functions
******************************************************************************/
static int
nv98_bsp_init(struct nouveau_object *object)
{
struct nv98_bsp_priv *priv = (void *)object;
int ret;
ret = nouveau_falcon_init(&priv->base);
if (ret)
return ret;
nv_wr32(priv, 0x084010, 0x0000ffd2);
nv_wr32(priv, 0x08401c, 0x0000fff2);
return 0;
}
static int
nv98_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv98_bsp_priv *priv;
int ret;
ret = nouveau_falcon_create(parent, engine, oclass, 0x084000, true,
"PBSP", "bsp", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x04008000;
nv_engine(priv)->cclass = &nv98_bsp_cclass;
nv_engine(priv)->sclass = nv98_bsp_sclass;
return 0;
}
struct nouveau_oclass
nv98_bsp_oclass = {
.handle = NV_ENGINE(BSP, 0x98),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv98_bsp_ctor,
.dtor = _nouveau_falcon_dtor,
.init = nv98_bsp_init,
.fini = _nouveau_falcon_fini,
.rd32 = _nouveau_falcon_rd32,
.wr32 = _nouveau_falcon_wr32,
},
};
| gpl-2.0 |
shminer/android_kernel_flounder | net/ipv6/mcast.c | 1288 | 64359 | /*
* Multicast support for IPv6
* Linux INET6 implementation
*
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
* Based on linux/ipv4/igmp.c and linux/ipv4/ip_sockglue.c
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
/* Changes:
*
* yoshfuji : fix format of router-alert option
* YOSHIFUJI Hideaki @USAGI:
* Fixed source address for MLD message based on
* <draft-ietf-magma-mld-source-05.txt>.
* YOSHIFUJI Hideaki @USAGI:
* - Ignore Queries for invalid addresses.
* - MLD for link-local addresses.
* David L Stevens <dlstevens@us.ibm.com>:
* - MLDv2 support
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/jiffies.h>
#include <linux/times.h>
#include <linux/net.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/route.h>
#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <net/mld.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv6.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/snmp.h>
#include <net/ipv6.h>
#include <net/protocol.h>
#include <net/if_inet6.h>
#include <net/ndisc.h>
#include <net/addrconf.h>
#include <net/ip6_route.h>
#include <net/inet_common.h>
#include <net/ip6_checksum.h>
/* Set to 3 to get tracing... */
#define MCAST_DEBUG 2
#if MCAST_DEBUG >= 3
#define MDBG(x) printk x
#else
#define MDBG(x)
#endif
/* Ensure that we have struct in6_addr aligned on 32bit word. */
static void *__mld2_query_bugs[] __attribute__((__unused__)) = {
BUILD_BUG_ON_NULL(offsetof(struct mld2_query, mld2q_srcs) % 4),
BUILD_BUG_ON_NULL(offsetof(struct mld2_report, mld2r_grec) % 4),
BUILD_BUG_ON_NULL(offsetof(struct mld2_grec, grec_mca) % 4)
};
static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT;
/* Big mc list lock for all the sockets */
static DEFINE_SPINLOCK(ipv6_sk_mc_lock);
static void igmp6_join_group(struct ifmcaddr6 *ma);
static void igmp6_leave_group(struct ifmcaddr6 *ma);
static void igmp6_timer_handler(unsigned long data);
static void mld_gq_timer_expire(unsigned long data);
static void mld_ifc_timer_expire(unsigned long data);
static void mld_ifc_event(struct inet6_dev *idev);
static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *addr);
static void mld_clear_delrec(struct inet6_dev *idev);
static int sf_setstate(struct ifmcaddr6 *pmc);
static void sf_markstate(struct ifmcaddr6 *pmc);
static void ip6_mc_clear_src(struct ifmcaddr6 *pmc);
static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
int sfmode, int sfcount, const struct in6_addr *psfsrc,
int delta);
static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
int sfmode, int sfcount, const struct in6_addr *psfsrc,
int delta);
static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
struct inet6_dev *idev);
#define IGMP6_UNSOLICITED_IVAL (10*HZ)
#define MLD_QRV_DEFAULT 2
#define MLD_V1_SEEN(idev) (dev_net((idev)->dev)->ipv6.devconf_all->force_mld_version == 1 || \
(idev)->cnf.force_mld_version == 1 || \
((idev)->mc_v1_seen && \
time_before(jiffies, (idev)->mc_v1_seen)))
#define IPV6_MLD_MAX_MSF 64
int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF;
/*
* socket join on multicast group
*/
#define for_each_pmc_rcu(np, pmc) \
for (pmc = rcu_dereference(np->ipv6_mc_list); \
pmc != NULL; \
pmc = rcu_dereference(pmc->next))
int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
{
struct net_device *dev = NULL;
struct ipv6_mc_socklist *mc_lst;
struct ipv6_pinfo *np = inet6_sk(sk);
struct net *net = sock_net(sk);
int err;
if (!ipv6_addr_is_multicast(addr))
return -EINVAL;
rcu_read_lock();
for_each_pmc_rcu(np, mc_lst) {
if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
ipv6_addr_equal(&mc_lst->addr, addr)) {
rcu_read_unlock();
return -EADDRINUSE;
}
}
rcu_read_unlock();
mc_lst = sock_kmalloc(sk, sizeof(struct ipv6_mc_socklist), GFP_KERNEL);
if (mc_lst == NULL)
return -ENOMEM;
mc_lst->next = NULL;
mc_lst->addr = *addr;
rcu_read_lock();
if (ifindex == 0) {
struct rt6_info *rt;
rt = rt6_lookup(net, addr, NULL, 0, 0);
if (rt) {
dev = rt->dst.dev;
ip6_rt_put(rt);
}
} else
dev = dev_get_by_index_rcu(net, ifindex);
if (dev == NULL) {
rcu_read_unlock();
sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
return -ENODEV;
}
mc_lst->ifindex = dev->ifindex;
mc_lst->sfmode = MCAST_EXCLUDE;
rwlock_init(&mc_lst->sflock);
mc_lst->sflist = NULL;
/*
* now add/increase the group membership on the device
*/
err = ipv6_dev_mc_inc(dev, addr);
if (err) {
rcu_read_unlock();
sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
return err;
}
spin_lock(&ipv6_sk_mc_lock);
mc_lst->next = np->ipv6_mc_list;
rcu_assign_pointer(np->ipv6_mc_list, mc_lst);
spin_unlock(&ipv6_sk_mc_lock);
rcu_read_unlock();
return 0;
}
/*
* socket leave on multicast group
*/
int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct ipv6_mc_socklist *mc_lst;
struct ipv6_mc_socklist __rcu **lnk;
struct net *net = sock_net(sk);
if (!ipv6_addr_is_multicast(addr))
return -EINVAL;
spin_lock(&ipv6_sk_mc_lock);
for (lnk = &np->ipv6_mc_list;
(mc_lst = rcu_dereference_protected(*lnk,
lockdep_is_held(&ipv6_sk_mc_lock))) !=NULL ;
lnk = &mc_lst->next) {
if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
ipv6_addr_equal(&mc_lst->addr, addr)) {
struct net_device *dev;
*lnk = mc_lst->next;
spin_unlock(&ipv6_sk_mc_lock);
rcu_read_lock();
dev = dev_get_by_index_rcu(net, mc_lst->ifindex);
if (dev != NULL) {
struct inet6_dev *idev = __in6_dev_get(dev);
(void) ip6_mc_leave_src(sk, mc_lst, idev);
if (idev)
__ipv6_dev_mc_dec(idev, &mc_lst->addr);
} else
(void) ip6_mc_leave_src(sk, mc_lst, NULL);
rcu_read_unlock();
atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
kfree_rcu(mc_lst, rcu);
return 0;
}
}
spin_unlock(&ipv6_sk_mc_lock);
return -EADDRNOTAVAIL;
}
/* called with rcu_read_lock() */
static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net,
const struct in6_addr *group,
int ifindex)
{
struct net_device *dev = NULL;
struct inet6_dev *idev = NULL;
if (ifindex == 0) {
struct rt6_info *rt = rt6_lookup(net, group, NULL, 0, 0);
if (rt) {
dev = rt->dst.dev;
ip6_rt_put(rt);
}
} else
dev = dev_get_by_index_rcu(net, ifindex);
if (!dev)
return NULL;
idev = __in6_dev_get(dev);
if (!idev)
return NULL;
read_lock_bh(&idev->lock);
if (idev->dead) {
read_unlock_bh(&idev->lock);
return NULL;
}
return idev;
}
void ipv6_sock_mc_close(struct sock *sk)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct ipv6_mc_socklist *mc_lst;
struct net *net = sock_net(sk);
if (!rcu_access_pointer(np->ipv6_mc_list))
return;
spin_lock(&ipv6_sk_mc_lock);
while ((mc_lst = rcu_dereference_protected(np->ipv6_mc_list,
lockdep_is_held(&ipv6_sk_mc_lock))) != NULL) {
struct net_device *dev;
np->ipv6_mc_list = mc_lst->next;
spin_unlock(&ipv6_sk_mc_lock);
rcu_read_lock();
dev = dev_get_by_index_rcu(net, mc_lst->ifindex);
if (dev) {
struct inet6_dev *idev = __in6_dev_get(dev);
(void) ip6_mc_leave_src(sk, mc_lst, idev);
if (idev)
__ipv6_dev_mc_dec(idev, &mc_lst->addr);
} else
(void) ip6_mc_leave_src(sk, mc_lst, NULL);
rcu_read_unlock();
atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
kfree_rcu(mc_lst, rcu);
spin_lock(&ipv6_sk_mc_lock);
}
spin_unlock(&ipv6_sk_mc_lock);
}
int ip6_mc_source(int add, int omode, struct sock *sk,
struct group_source_req *pgsr)
{
struct in6_addr *source, *group;
struct ipv6_mc_socklist *pmc;
struct inet6_dev *idev;
struct ipv6_pinfo *inet6 = inet6_sk(sk);
struct ip6_sf_socklist *psl;
struct net *net = sock_net(sk);
int i, j, rv;
int leavegroup = 0;
int pmclocked = 0;
int err;
source = &((struct sockaddr_in6 *)&pgsr->gsr_source)->sin6_addr;
group = &((struct sockaddr_in6 *)&pgsr->gsr_group)->sin6_addr;
if (!ipv6_addr_is_multicast(group))
return -EINVAL;
rcu_read_lock();
idev = ip6_mc_find_dev_rcu(net, group, pgsr->gsr_interface);
if (!idev) {
rcu_read_unlock();
return -ENODEV;
}
err = -EADDRNOTAVAIL;
for_each_pmc_rcu(inet6, pmc) {
if (pgsr->gsr_interface && pmc->ifindex != pgsr->gsr_interface)
continue;
if (ipv6_addr_equal(&pmc->addr, group))
break;
}
if (!pmc) { /* must have a prior join */
err = -EINVAL;
goto done;
}
/* if a source filter was set, must be the same mode as before */
if (pmc->sflist) {
if (pmc->sfmode != omode) {
err = -EINVAL;
goto done;
}
} else if (pmc->sfmode != omode) {
/* allow mode switches for empty-set filters */
ip6_mc_add_src(idev, group, omode, 0, NULL, 0);
ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
pmc->sfmode = omode;
}
write_lock(&pmc->sflock);
pmclocked = 1;
psl = pmc->sflist;
if (!add) {
if (!psl)
goto done; /* err = -EADDRNOTAVAIL */
rv = !0;
for (i=0; i<psl->sl_count; i++) {
rv = !ipv6_addr_equal(&psl->sl_addr[i], source);
if (rv == 0)
break;
}
if (rv) /* source not found */
goto done; /* err = -EADDRNOTAVAIL */
/* special case - (INCLUDE, empty) == LEAVE_GROUP */
if (psl->sl_count == 1 && omode == MCAST_INCLUDE) {
leavegroup = 1;
goto done;
}
/* update the interface filter */
ip6_mc_del_src(idev, group, omode, 1, source, 1);
for (j=i+1; j<psl->sl_count; j++)
psl->sl_addr[j-1] = psl->sl_addr[j];
psl->sl_count--;
err = 0;
goto done;
}
/* else, add a new source to the filter */
if (psl && psl->sl_count >= sysctl_mld_max_msf) {
err = -ENOBUFS;
goto done;
}
if (!psl || psl->sl_count == psl->sl_max) {
struct ip6_sf_socklist *newpsl;
int count = IP6_SFBLOCK;
if (psl)
count += psl->sl_max;
newpsl = sock_kmalloc(sk, IP6_SFLSIZE(count), GFP_ATOMIC);
if (!newpsl) {
err = -ENOBUFS;
goto done;
}
newpsl->sl_max = count;
newpsl->sl_count = count - IP6_SFBLOCK;
if (psl) {
for (i=0; i<psl->sl_count; i++)
newpsl->sl_addr[i] = psl->sl_addr[i];
sock_kfree_s(sk, psl, IP6_SFLSIZE(psl->sl_max));
}
pmc->sflist = psl = newpsl;
}
rv = 1; /* > 0 for insert logic below if sl_count is 0 */
for (i=0; i<psl->sl_count; i++) {
rv = !ipv6_addr_equal(&psl->sl_addr[i], source);
if (rv == 0) /* There is an error in the address. */
goto done;
}
for (j=psl->sl_count-1; j>=i; j--)
psl->sl_addr[j+1] = psl->sl_addr[j];
psl->sl_addr[i] = *source;
psl->sl_count++;
err = 0;
/* update the interface list */
ip6_mc_add_src(idev, group, omode, 1, source, 1);
done:
if (pmclocked)
write_unlock(&pmc->sflock);
read_unlock_bh(&idev->lock);
rcu_read_unlock();
if (leavegroup)
return ipv6_sock_mc_drop(sk, pgsr->gsr_interface, group);
return err;
}
int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
{
const struct in6_addr *group;
struct ipv6_mc_socklist *pmc;
struct inet6_dev *idev;
struct ipv6_pinfo *inet6 = inet6_sk(sk);
struct ip6_sf_socklist *newpsl, *psl;
struct net *net = sock_net(sk);
int leavegroup = 0;
int i, err;
group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;
if (!ipv6_addr_is_multicast(group))
return -EINVAL;
if (gsf->gf_fmode != MCAST_INCLUDE &&
gsf->gf_fmode != MCAST_EXCLUDE)
return -EINVAL;
rcu_read_lock();
idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface);
if (!idev) {
rcu_read_unlock();
return -ENODEV;
}
err = 0;
if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) {
leavegroup = 1;
goto done;
}
for_each_pmc_rcu(inet6, pmc) {
if (pmc->ifindex != gsf->gf_interface)
continue;
if (ipv6_addr_equal(&pmc->addr, group))
break;
}
if (!pmc) { /* must have a prior join */
err = -EINVAL;
goto done;
}
if (gsf->gf_numsrc) {
newpsl = sock_kmalloc(sk, IP6_SFLSIZE(gsf->gf_numsrc),
GFP_ATOMIC);
if (!newpsl) {
err = -ENOBUFS;
goto done;
}
newpsl->sl_max = newpsl->sl_count = gsf->gf_numsrc;
for (i=0; i<newpsl->sl_count; ++i) {
struct sockaddr_in6 *psin6;
psin6 = (struct sockaddr_in6 *)&gsf->gf_slist[i];
newpsl->sl_addr[i] = psin6->sin6_addr;
}
err = ip6_mc_add_src(idev, group, gsf->gf_fmode,
newpsl->sl_count, newpsl->sl_addr, 0);
if (err) {
sock_kfree_s(sk, newpsl, IP6_SFLSIZE(newpsl->sl_max));
goto done;
}
} else {
newpsl = NULL;
(void) ip6_mc_add_src(idev, group, gsf->gf_fmode, 0, NULL, 0);
}
write_lock(&pmc->sflock);
psl = pmc->sflist;
if (psl) {
(void) ip6_mc_del_src(idev, group, pmc->sfmode,
psl->sl_count, psl->sl_addr, 0);
sock_kfree_s(sk, psl, IP6_SFLSIZE(psl->sl_max));
} else
(void) ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
pmc->sflist = newpsl;
pmc->sfmode = gsf->gf_fmode;
write_unlock(&pmc->sflock);
err = 0;
done:
read_unlock_bh(&idev->lock);
rcu_read_unlock();
if (leavegroup)
err = ipv6_sock_mc_drop(sk, gsf->gf_interface, group);
return err;
}
int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
struct group_filter __user *optval, int __user *optlen)
{
int err, i, count, copycount;
const struct in6_addr *group;
struct ipv6_mc_socklist *pmc;
struct inet6_dev *idev;
struct ipv6_pinfo *inet6 = inet6_sk(sk);
struct ip6_sf_socklist *psl;
struct net *net = sock_net(sk);
group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;
if (!ipv6_addr_is_multicast(group))
return -EINVAL;
rcu_read_lock();
idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface);
if (!idev) {
rcu_read_unlock();
return -ENODEV;
}
err = -EADDRNOTAVAIL;
/*
* changes to the ipv6_mc_list require the socket lock and
* a read lock on ip6_sk_mc_lock. We have the socket lock,
* so reading the list is safe.
*/
for_each_pmc_rcu(inet6, pmc) {
if (pmc->ifindex != gsf->gf_interface)
continue;
if (ipv6_addr_equal(group, &pmc->addr))
break;
}
if (!pmc) /* must have a prior join */
goto done;
gsf->gf_fmode = pmc->sfmode;
psl = pmc->sflist;
count = psl ? psl->sl_count : 0;
read_unlock_bh(&idev->lock);
rcu_read_unlock();
copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc;
gsf->gf_numsrc = count;
if (put_user(GROUP_FILTER_SIZE(copycount), optlen) ||
copy_to_user(optval, gsf, GROUP_FILTER_SIZE(0))) {
return -EFAULT;
}
/* changes to psl require the socket lock, a read lock on
* on ipv6_sk_mc_lock and a write lock on pmc->sflock. We
* have the socket lock, so reading here is safe.
*/
for (i=0; i<copycount; i++) {
struct sockaddr_in6 *psin6;
struct sockaddr_storage ss;
psin6 = (struct sockaddr_in6 *)&ss;
memset(&ss, 0, sizeof(ss));
psin6->sin6_family = AF_INET6;
psin6->sin6_addr = psl->sl_addr[i];
if (copy_to_user(&optval->gf_slist[i], &ss, sizeof(ss)))
return -EFAULT;
}
return 0;
done:
read_unlock_bh(&idev->lock);
rcu_read_unlock();
return err;
}
bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
const struct in6_addr *src_addr)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct ipv6_mc_socklist *mc;
struct ip6_sf_socklist *psl;
bool rv = true;
rcu_read_lock();
for_each_pmc_rcu(np, mc) {
if (ipv6_addr_equal(&mc->addr, mc_addr))
break;
}
if (!mc) {
rcu_read_unlock();
return true;
}
read_lock(&mc->sflock);
psl = mc->sflist;
if (!psl) {
rv = mc->sfmode == MCAST_EXCLUDE;
} else {
int i;
for (i=0; i<psl->sl_count; i++) {
if (ipv6_addr_equal(&psl->sl_addr[i], src_addr))
break;
}
if (mc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)
rv = false;
if (mc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
rv = false;
}
read_unlock(&mc->sflock);
rcu_read_unlock();
return rv;
}
static void ma_put(struct ifmcaddr6 *mc)
{
if (atomic_dec_and_test(&mc->mca_refcnt)) {
in6_dev_put(mc->idev);
kfree(mc);
}
}
static void igmp6_group_added(struct ifmcaddr6 *mc)
{
struct net_device *dev = mc->idev->dev;
char buf[MAX_ADDR_LEN];
if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
IPV6_ADDR_SCOPE_LINKLOCAL)
return;
spin_lock_bh(&mc->mca_lock);
if (!(mc->mca_flags&MAF_LOADED)) {
mc->mca_flags |= MAF_LOADED;
if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
dev_mc_add(dev, buf);
}
spin_unlock_bh(&mc->mca_lock);
if (!(dev->flags & IFF_UP) || (mc->mca_flags & MAF_NOREPORT))
return;
if (MLD_V1_SEEN(mc->idev)) {
igmp6_join_group(mc);
return;
}
/* else v2 */
mc->mca_crcount = mc->idev->mc_qrv;
mld_ifc_event(mc->idev);
}
static void igmp6_group_dropped(struct ifmcaddr6 *mc)
{
struct net_device *dev = mc->idev->dev;
char buf[MAX_ADDR_LEN];
if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
IPV6_ADDR_SCOPE_LINKLOCAL)
return;
spin_lock_bh(&mc->mca_lock);
if (mc->mca_flags&MAF_LOADED) {
mc->mca_flags &= ~MAF_LOADED;
if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
dev_mc_del(dev, buf);
}
if (mc->mca_flags & MAF_NOREPORT)
goto done;
spin_unlock_bh(&mc->mca_lock);
if (!mc->idev->dead)
igmp6_leave_group(mc);
spin_lock_bh(&mc->mca_lock);
if (del_timer(&mc->mca_timer))
atomic_dec(&mc->mca_refcnt);
done:
ip6_mc_clear_src(mc);
spin_unlock_bh(&mc->mca_lock);
}
/*
* deleted ifmcaddr6 manipulation
*/
static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
{
struct ifmcaddr6 *pmc;
/* this is an "ifmcaddr6" for convenience; only the fields below
* are actually used. In particular, the refcnt and users are not
* used for management of the delete list. Using the same structure
* for deleted items allows change reports to use common code with
* non-deleted or query-response MCA's.
*/
pmc = kzalloc(sizeof(*pmc), GFP_ATOMIC);
if (!pmc)
return;
spin_lock_bh(&im->mca_lock);
spin_lock_init(&pmc->mca_lock);
pmc->idev = im->idev;
in6_dev_hold(idev);
pmc->mca_addr = im->mca_addr;
pmc->mca_crcount = idev->mc_qrv;
pmc->mca_sfmode = im->mca_sfmode;
if (pmc->mca_sfmode == MCAST_INCLUDE) {
struct ip6_sf_list *psf;
pmc->mca_tomb = im->mca_tomb;
pmc->mca_sources = im->mca_sources;
im->mca_tomb = im->mca_sources = NULL;
for (psf=pmc->mca_sources; psf; psf=psf->sf_next)
psf->sf_crcount = pmc->mca_crcount;
}
spin_unlock_bh(&im->mca_lock);
spin_lock_bh(&idev->mc_lock);
pmc->next = idev->mc_tomb;
idev->mc_tomb = pmc;
spin_unlock_bh(&idev->mc_lock);
}
static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca)
{
struct ifmcaddr6 *pmc, *pmc_prev;
struct ip6_sf_list *psf, *psf_next;
spin_lock_bh(&idev->mc_lock);
pmc_prev = NULL;
for (pmc=idev->mc_tomb; pmc; pmc=pmc->next) {
if (ipv6_addr_equal(&pmc->mca_addr, pmca))
break;
pmc_prev = pmc;
}
if (pmc) {
if (pmc_prev)
pmc_prev->next = pmc->next;
else
idev->mc_tomb = pmc->next;
}
spin_unlock_bh(&idev->mc_lock);
if (pmc) {
for (psf=pmc->mca_tomb; psf; psf=psf_next) {
psf_next = psf->sf_next;
kfree(psf);
}
in6_dev_put(pmc->idev);
kfree(pmc);
}
}
static void mld_clear_delrec(struct inet6_dev *idev)
{
struct ifmcaddr6 *pmc, *nextpmc;
spin_lock_bh(&idev->mc_lock);
pmc = idev->mc_tomb;
idev->mc_tomb = NULL;
spin_unlock_bh(&idev->mc_lock);
for (; pmc; pmc = nextpmc) {
nextpmc = pmc->next;
ip6_mc_clear_src(pmc);
in6_dev_put(pmc->idev);
kfree(pmc);
}
/* clear dead sources, too */
read_lock_bh(&idev->lock);
for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
struct ip6_sf_list *psf, *psf_next;
spin_lock_bh(&pmc->mca_lock);
psf = pmc->mca_tomb;
pmc->mca_tomb = NULL;
spin_unlock_bh(&pmc->mca_lock);
for (; psf; psf=psf_next) {
psf_next = psf->sf_next;
kfree(psf);
}
}
read_unlock_bh(&idev->lock);
}
/*
* device multicast group inc (add if not found)
*/
int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
{
struct ifmcaddr6 *mc;
struct inet6_dev *idev;
/* we need to take a reference on idev */
idev = in6_dev_get(dev);
if (idev == NULL)
return -EINVAL;
write_lock_bh(&idev->lock);
if (idev->dead) {
write_unlock_bh(&idev->lock);
in6_dev_put(idev);
return -ENODEV;
}
for (mc = idev->mc_list; mc; mc = mc->next) {
if (ipv6_addr_equal(&mc->mca_addr, addr)) {
mc->mca_users++;
write_unlock_bh(&idev->lock);
ip6_mc_add_src(idev, &mc->mca_addr, MCAST_EXCLUDE, 0,
NULL, 0);
in6_dev_put(idev);
return 0;
}
}
/*
* not found: create a new one.
*/
mc = kzalloc(sizeof(struct ifmcaddr6), GFP_ATOMIC);
if (mc == NULL) {
write_unlock_bh(&idev->lock);
in6_dev_put(idev);
return -ENOMEM;
}
setup_timer(&mc->mca_timer, igmp6_timer_handler, (unsigned long)mc);
mc->mca_addr = *addr;
mc->idev = idev; /* (reference taken) */
mc->mca_users = 1;
/* mca_stamp should be updated upon changes */
mc->mca_cstamp = mc->mca_tstamp = jiffies;
atomic_set(&mc->mca_refcnt, 2);
spin_lock_init(&mc->mca_lock);
/* initial mode is (EX, empty) */
mc->mca_sfmode = MCAST_EXCLUDE;
mc->mca_sfcount[MCAST_EXCLUDE] = 1;
if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) ||
IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
mc->mca_flags |= MAF_NOREPORT;
mc->next = idev->mc_list;
idev->mc_list = mc;
write_unlock_bh(&idev->lock);
mld_del_delrec(idev, &mc->mca_addr);
igmp6_group_added(mc);
ma_put(mc);
return 0;
}
/*
* device multicast group del
*/
int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr)
{
struct ifmcaddr6 *ma, **map;
write_lock_bh(&idev->lock);
for (map = &idev->mc_list; (ma=*map) != NULL; map = &ma->next) {
if (ipv6_addr_equal(&ma->mca_addr, addr)) {
if (--ma->mca_users == 0) {
*map = ma->next;
write_unlock_bh(&idev->lock);
igmp6_group_dropped(ma);
ma_put(ma);
return 0;
}
write_unlock_bh(&idev->lock);
return 0;
}
}
write_unlock_bh(&idev->lock);
return -ENOENT;
}
int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr)
{
struct inet6_dev *idev;
int err;
rcu_read_lock();
idev = __in6_dev_get(dev);
if (!idev)
err = -ENODEV;
else
err = __ipv6_dev_mc_dec(idev, addr);
rcu_read_unlock();
return err;
}
/*
* check if the interface/address pair is valid
*/
bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
const struct in6_addr *src_addr)
{
struct inet6_dev *idev;
struct ifmcaddr6 *mc;
bool rv = false;
rcu_read_lock();
idev = __in6_dev_get(dev);
if (idev) {
read_lock_bh(&idev->lock);
for (mc = idev->mc_list; mc; mc=mc->next) {
if (ipv6_addr_equal(&mc->mca_addr, group))
break;
}
if (mc) {
if (src_addr && !ipv6_addr_any(src_addr)) {
struct ip6_sf_list *psf;
spin_lock_bh(&mc->mca_lock);
for (psf=mc->mca_sources;psf;psf=psf->sf_next) {
if (ipv6_addr_equal(&psf->sf_addr, src_addr))
break;
}
if (psf)
rv = psf->sf_count[MCAST_INCLUDE] ||
psf->sf_count[MCAST_EXCLUDE] !=
mc->mca_sfcount[MCAST_EXCLUDE];
else
rv = mc->mca_sfcount[MCAST_EXCLUDE] !=0;
spin_unlock_bh(&mc->mca_lock);
} else
rv = true; /* don't filter unspecified source */
}
read_unlock_bh(&idev->lock);
}
rcu_read_unlock();
return rv;
}
static void mld_gq_start_timer(struct inet6_dev *idev)
{
int tv = net_random() % idev->mc_maxdelay;
idev->mc_gq_running = 1;
if (!mod_timer(&idev->mc_gq_timer, jiffies+tv+2))
in6_dev_hold(idev);
}
static void mld_ifc_start_timer(struct inet6_dev *idev, int delay)
{
int tv = net_random() % delay;
if (!mod_timer(&idev->mc_ifc_timer, jiffies+tv+2))
in6_dev_hold(idev);
}
/*
* IGMP handling (alias multicast ICMPv6 messages)
*/
static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime)
{
unsigned long delay = resptime;
/* Do not start timer for these addresses */
if (ipv6_addr_is_ll_all_nodes(&ma->mca_addr) ||
IPV6_ADDR_MC_SCOPE(&ma->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
return;
if (del_timer(&ma->mca_timer)) {
atomic_dec(&ma->mca_refcnt);
delay = ma->mca_timer.expires - jiffies;
}
if (delay >= resptime) {
if (resptime)
delay = net_random() % resptime;
else
delay = 1;
}
ma->mca_timer.expires = jiffies + delay;
if (!mod_timer(&ma->mca_timer, jiffies + delay))
atomic_inc(&ma->mca_refcnt);
ma->mca_flags |= MAF_TIMER_RUNNING;
}
/* mark EXCLUDE-mode sources */
static bool mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
const struct in6_addr *srcs)
{
struct ip6_sf_list *psf;
int i, scount;
scount = 0;
for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
if (scount == nsrcs)
break;
for (i=0; i<nsrcs; i++) {
/* skip inactive filters */
if (psf->sf_count[MCAST_INCLUDE] ||
pmc->mca_sfcount[MCAST_EXCLUDE] !=
psf->sf_count[MCAST_EXCLUDE])
break;
if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
scount++;
break;
}
}
}
pmc->mca_flags &= ~MAF_GSQUERY;
if (scount == nsrcs) /* all sources excluded */
return false;
return true;
}
static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
const struct in6_addr *srcs)
{
struct ip6_sf_list *psf;
int i, scount;
if (pmc->mca_sfmode == MCAST_EXCLUDE)
return mld_xmarksources(pmc, nsrcs, srcs);
/* mark INCLUDE-mode sources */
scount = 0;
for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
if (scount == nsrcs)
break;
for (i=0; i<nsrcs; i++) {
if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
psf->sf_gsresp = 1;
scount++;
break;
}
}
}
if (!scount) {
pmc->mca_flags &= ~MAF_GSQUERY;
return false;
}
pmc->mca_flags |= MAF_GSQUERY;
return true;
}
/* called with rcu_read_lock() */
int igmp6_event_query(struct sk_buff *skb)
{
struct mld2_query *mlh2 = NULL;
struct ifmcaddr6 *ma;
const struct in6_addr *group;
unsigned long max_delay;
struct inet6_dev *idev;
struct mld_msg *mld;
int group_type;
int mark = 0;
int len;
if (!pskb_may_pull(skb, sizeof(struct in6_addr)))
return -EINVAL;
/* compute payload length excluding extension headers */
len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr);
len -= skb_network_header_len(skb);
/* Drop queries with not link local source */
if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL))
return -EINVAL;
idev = __in6_dev_get(skb->dev);
if (idev == NULL)
return 0;
mld = (struct mld_msg *)icmp6_hdr(skb);
group = &mld->mld_mca;
group_type = ipv6_addr_type(group);
if (group_type != IPV6_ADDR_ANY &&
!(group_type&IPV6_ADDR_MULTICAST))
return -EINVAL;
if (len == 24) {
int switchback;
/* MLDv1 router present */
/* Translate milliseconds to jiffies */
max_delay = (ntohs(mld->mld_maxdelay)*HZ)/1000;
switchback = (idev->mc_qrv + 1) * max_delay;
idev->mc_v1_seen = jiffies + switchback;
/* cancel the interface change timer */
idev->mc_ifc_count = 0;
if (del_timer(&idev->mc_ifc_timer))
__in6_dev_put(idev);
/* clear deleted report items */
mld_clear_delrec(idev);
} else if (len >= 28) {
int srcs_offset = sizeof(struct mld2_query) -
sizeof(struct icmp6hdr);
if (!pskb_may_pull(skb, srcs_offset))
return -EINVAL;
mlh2 = (struct mld2_query *)skb_transport_header(skb);
max_delay = (MLDV2_MRC(ntohs(mlh2->mld2q_mrc))*HZ)/1000;
if (!max_delay)
max_delay = 1;
idev->mc_maxdelay = max_delay;
if (mlh2->mld2q_qrv)
idev->mc_qrv = mlh2->mld2q_qrv;
if (group_type == IPV6_ADDR_ANY) { /* general query */
if (mlh2->mld2q_nsrcs)
return -EINVAL; /* no sources allowed */
mld_gq_start_timer(idev);
return 0;
}
/* mark sources to include, if group & source-specific */
if (mlh2->mld2q_nsrcs != 0) {
if (!pskb_may_pull(skb, srcs_offset +
ntohs(mlh2->mld2q_nsrcs) * sizeof(struct in6_addr)))
return -EINVAL;
mlh2 = (struct mld2_query *)skb_transport_header(skb);
mark = 1;
}
} else
return -EINVAL;
read_lock_bh(&idev->lock);
if (group_type == IPV6_ADDR_ANY) {
for (ma = idev->mc_list; ma; ma=ma->next) {
spin_lock_bh(&ma->mca_lock);
igmp6_group_queried(ma, max_delay);
spin_unlock_bh(&ma->mca_lock);
}
} else {
for (ma = idev->mc_list; ma; ma=ma->next) {
if (!ipv6_addr_equal(group, &ma->mca_addr))
continue;
spin_lock_bh(&ma->mca_lock);
if (ma->mca_flags & MAF_TIMER_RUNNING) {
/* gsquery <- gsquery && mark */
if (!mark)
ma->mca_flags &= ~MAF_GSQUERY;
} else {
/* gsquery <- mark */
if (mark)
ma->mca_flags |= MAF_GSQUERY;
else
ma->mca_flags &= ~MAF_GSQUERY;
}
if (!(ma->mca_flags & MAF_GSQUERY) ||
mld_marksources(ma, ntohs(mlh2->mld2q_nsrcs), mlh2->mld2q_srcs))
igmp6_group_queried(ma, max_delay);
spin_unlock_bh(&ma->mca_lock);
break;
}
}
read_unlock_bh(&idev->lock);
return 0;
}
/* called with rcu_read_lock() */
int igmp6_event_report(struct sk_buff *skb)
{
struct ifmcaddr6 *ma;
struct inet6_dev *idev;
struct mld_msg *mld;
int addr_type;
/* Our own report looped back. Ignore it. */
if (skb->pkt_type == PACKET_LOOPBACK)
return 0;
/* send our report if the MC router may not have heard this report */
if (skb->pkt_type != PACKET_MULTICAST &&
skb->pkt_type != PACKET_BROADCAST)
return 0;
if (!pskb_may_pull(skb, sizeof(*mld) - sizeof(struct icmp6hdr)))
return -EINVAL;
mld = (struct mld_msg *)icmp6_hdr(skb);
/* Drop reports with not link local source */
addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr);
if (addr_type != IPV6_ADDR_ANY &&
!(addr_type&IPV6_ADDR_LINKLOCAL))
return -EINVAL;
idev = __in6_dev_get(skb->dev);
if (idev == NULL)
return -ENODEV;
/*
* Cancel the timer for this group
*/
read_lock_bh(&idev->lock);
for (ma = idev->mc_list; ma; ma=ma->next) {
if (ipv6_addr_equal(&ma->mca_addr, &mld->mld_mca)) {
spin_lock(&ma->mca_lock);
if (del_timer(&ma->mca_timer))
atomic_dec(&ma->mca_refcnt);
ma->mca_flags &= ~(MAF_LAST_REPORTER|MAF_TIMER_RUNNING);
spin_unlock(&ma->mca_lock);
break;
}
}
read_unlock_bh(&idev->lock);
return 0;
}
static bool is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type,
int gdeleted, int sdeleted)
{
switch (type) {
case MLD2_MODE_IS_INCLUDE:
case MLD2_MODE_IS_EXCLUDE:
if (gdeleted || sdeleted)
return false;
if (!((pmc->mca_flags & MAF_GSQUERY) && !psf->sf_gsresp)) {
if (pmc->mca_sfmode == MCAST_INCLUDE)
return true;
/* don't include if this source is excluded
* in all filters
*/
if (psf->sf_count[MCAST_INCLUDE])
return type == MLD2_MODE_IS_INCLUDE;
return pmc->mca_sfcount[MCAST_EXCLUDE] ==
psf->sf_count[MCAST_EXCLUDE];
}
return false;
case MLD2_CHANGE_TO_INCLUDE:
if (gdeleted || sdeleted)
return false;
return psf->sf_count[MCAST_INCLUDE] != 0;
case MLD2_CHANGE_TO_EXCLUDE:
if (gdeleted || sdeleted)
return false;
if (pmc->mca_sfcount[MCAST_EXCLUDE] == 0 ||
psf->sf_count[MCAST_INCLUDE])
return false;
return pmc->mca_sfcount[MCAST_EXCLUDE] ==
psf->sf_count[MCAST_EXCLUDE];
case MLD2_ALLOW_NEW_SOURCES:
if (gdeleted || !psf->sf_crcount)
return false;
return (pmc->mca_sfmode == MCAST_INCLUDE) ^ sdeleted;
case MLD2_BLOCK_OLD_SOURCES:
if (pmc->mca_sfmode == MCAST_INCLUDE)
return gdeleted || (psf->sf_crcount && sdeleted);
return psf->sf_crcount && !gdeleted && !sdeleted;
}
return false;
}
static int
mld_scount(struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted)
{
struct ip6_sf_list *psf;
int scount = 0;
for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
if (!is_in(pmc, psf, type, gdeleted, sdeleted))
continue;
scount++;
}
return scount;
}
static void ip6_mc_hdr(struct sock *sk, struct sk_buff *skb,
struct net_device *dev,
const struct in6_addr *saddr,
const struct in6_addr *daddr,
int proto, int len)
{
struct ipv6hdr *hdr;
skb->protocol = htons(ETH_P_IPV6);
skb->dev = dev;
skb_reset_network_header(skb);
skb_put(skb, sizeof(struct ipv6hdr));
hdr = ipv6_hdr(skb);
ip6_flow_hdr(hdr, 0, 0);
hdr->payload_len = htons(len);
hdr->nexthdr = proto;
hdr->hop_limit = inet6_sk(sk)->hop_limit;
hdr->saddr = *saddr;
hdr->daddr = *daddr;
}
static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size)
{
struct net_device *dev = idev->dev;
struct net *net = dev_net(dev);
struct sock *sk = net->ipv6.igmp_sk;
struct sk_buff *skb;
struct mld2_report *pmr;
struct in6_addr addr_buf;
const struct in6_addr *saddr;
int hlen = LL_RESERVED_SPACE(dev);
int tlen = dev->needed_tailroom;
int err;
u8 ra[8] = { IPPROTO_ICMPV6, 0,
IPV6_TLV_ROUTERALERT, 2, 0, 0,
IPV6_TLV_PADN, 0 };
/* we assume size > sizeof(ra) here */
size += hlen + tlen;
/* limit our allocations to order-0 page */
size = min_t(int, size, SKB_MAX_ORDER(0, 0));
skb = sock_alloc_send_skb(sk, size, 1, &err);
if (!skb)
return NULL;
skb_reserve(skb, hlen);
if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) {
/* <draft-ietf-magma-mld-source-05.txt>:
* use unspecified address as the source address
* when a valid link-local address is not available.
*/
saddr = &in6addr_any;
} else
saddr = &addr_buf;
ip6_mc_hdr(sk, skb, dev, saddr, &mld2_all_mcr, NEXTHDR_HOP, 0);
memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra));
skb_set_transport_header(skb, skb_tail_pointer(skb) - skb->data);
skb_put(skb, sizeof(*pmr));
pmr = (struct mld2_report *)skb_transport_header(skb);
pmr->mld2r_type = ICMPV6_MLD2_REPORT;
pmr->mld2r_resv1 = 0;
pmr->mld2r_cksum = 0;
pmr->mld2r_resv2 = 0;
pmr->mld2r_ngrec = 0;
return skb;
}
static void mld_sendpack(struct sk_buff *skb)
{
struct ipv6hdr *pip6 = ipv6_hdr(skb);
struct mld2_report *pmr =
(struct mld2_report *)skb_transport_header(skb);
int payload_len, mldlen;
struct inet6_dev *idev;
struct net *net = dev_net(skb->dev);
int err;
struct flowi6 fl6;
struct dst_entry *dst;
rcu_read_lock();
idev = __in6_dev_get(skb->dev);
IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
payload_len = (skb->tail - skb->network_header) - sizeof(*pip6);
mldlen = skb->tail - skb->transport_header;
pip6->payload_len = htons(payload_len);
pmr->mld2r_cksum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen,
IPPROTO_ICMPV6,
csum_partial(skb_transport_header(skb),
mldlen, 0));
icmpv6_flow_init(net->ipv6.igmp_sk, &fl6, ICMPV6_MLD2_REPORT,
&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
skb->dev->ifindex);
dst = icmp6_dst_alloc(skb->dev, &fl6);
err = 0;
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
dst = NULL;
}
skb_dst_set(skb, dst);
if (err)
goto err_out;
payload_len = skb->len;
err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev,
dst_output);
out:
if (!err) {
ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
} else {
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
}
rcu_read_unlock();
return;
err_out:
kfree_skb(skb);
goto out;
}
static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel)
{
return sizeof(struct mld2_grec) + 16 * mld_scount(pmc,type,gdel,sdel);
}
static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
int type, struct mld2_grec **ppgr)
{
struct net_device *dev = pmc->idev->dev;
struct mld2_report *pmr;
struct mld2_grec *pgr;
if (!skb)
skb = mld_newpack(pmc->idev, dev->mtu);
if (!skb)
return NULL;
pgr = (struct mld2_grec *)skb_put(skb, sizeof(struct mld2_grec));
pgr->grec_type = type;
pgr->grec_auxwords = 0;
pgr->grec_nsrcs = 0;
pgr->grec_mca = pmc->mca_addr; /* structure copy */
pmr = (struct mld2_report *)skb_transport_header(skb);
pmr->mld2r_ngrec = htons(ntohs(pmr->mld2r_ngrec)+1);
*ppgr = pgr;
return skb;
}
#define AVAILABLE(skb) ((skb) ? ((skb)->dev ? (skb)->dev->mtu - (skb)->len : \
skb_tailroom(skb)) : 0)
static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
int type, int gdeleted, int sdeleted)
{
struct inet6_dev *idev = pmc->idev;
struct net_device *dev = idev->dev;
struct mld2_report *pmr;
struct mld2_grec *pgr = NULL;
struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list;
int scount, stotal, first, isquery, truncate;
if (pmc->mca_flags & MAF_NOREPORT)
return skb;
isquery = type == MLD2_MODE_IS_INCLUDE ||
type == MLD2_MODE_IS_EXCLUDE;
truncate = type == MLD2_MODE_IS_EXCLUDE ||
type == MLD2_CHANGE_TO_EXCLUDE;
stotal = scount = 0;
psf_list = sdeleted ? &pmc->mca_tomb : &pmc->mca_sources;
if (!*psf_list)
goto empty_source;
pmr = skb ? (struct mld2_report *)skb_transport_header(skb) : NULL;
/* EX and TO_EX get a fresh packet, if needed */
if (truncate) {
if (pmr && pmr->mld2r_ngrec &&
AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
if (skb)
mld_sendpack(skb);
skb = mld_newpack(idev, dev->mtu);
}
}
first = 1;
psf_prev = NULL;
for (psf=*psf_list; psf; psf=psf_next) {
struct in6_addr *psrc;
psf_next = psf->sf_next;
if (!is_in(pmc, psf, type, gdeleted, sdeleted)) {
psf_prev = psf;
continue;
}
/* clear marks on query responses */
if (isquery)
psf->sf_gsresp = 0;
if (AVAILABLE(skb) < sizeof(*psrc) +
first*sizeof(struct mld2_grec)) {
if (truncate && !first)
break; /* truncate these */
if (pgr)
pgr->grec_nsrcs = htons(scount);
if (skb)
mld_sendpack(skb);
skb = mld_newpack(idev, dev->mtu);
first = 1;
scount = 0;
}
if (first) {
skb = add_grhead(skb, pmc, type, &pgr);
first = 0;
}
if (!skb)
return NULL;
psrc = (struct in6_addr *)skb_put(skb, sizeof(*psrc));
*psrc = psf->sf_addr;
scount++; stotal++;
if ((type == MLD2_ALLOW_NEW_SOURCES ||
type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount) {
psf->sf_crcount--;
if ((sdeleted || gdeleted) && psf->sf_crcount == 0) {
if (psf_prev)
psf_prev->sf_next = psf->sf_next;
else
*psf_list = psf->sf_next;
kfree(psf);
continue;
}
}
psf_prev = psf;
}
empty_source:
if (!stotal) {
if (type == MLD2_ALLOW_NEW_SOURCES ||
type == MLD2_BLOCK_OLD_SOURCES)
return skb;
if (pmc->mca_crcount || isquery) {
/* make sure we have room for group header */
if (skb && AVAILABLE(skb) < sizeof(struct mld2_grec)) {
mld_sendpack(skb);
skb = NULL; /* add_grhead will get a new one */
}
skb = add_grhead(skb, pmc, type, &pgr);
}
}
if (pgr)
pgr->grec_nsrcs = htons(scount);
if (isquery)
pmc->mca_flags &= ~MAF_GSQUERY; /* clear query state */
return skb;
}
static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
{
struct sk_buff *skb = NULL;
int type;
read_lock_bh(&idev->lock);
if (!pmc) {
for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
if (pmc->mca_flags & MAF_NOREPORT)
continue;
spin_lock_bh(&pmc->mca_lock);
if (pmc->mca_sfcount[MCAST_EXCLUDE])
type = MLD2_MODE_IS_EXCLUDE;
else
type = MLD2_MODE_IS_INCLUDE;
skb = add_grec(skb, pmc, type, 0, 0);
spin_unlock_bh(&pmc->mca_lock);
}
} else {
spin_lock_bh(&pmc->mca_lock);
if (pmc->mca_sfcount[MCAST_EXCLUDE])
type = MLD2_MODE_IS_EXCLUDE;
else
type = MLD2_MODE_IS_INCLUDE;
skb = add_grec(skb, pmc, type, 0, 0);
spin_unlock_bh(&pmc->mca_lock);
}
read_unlock_bh(&idev->lock);
if (skb)
mld_sendpack(skb);
}
/*
* remove zero-count source records from a source filter list
*/
static void mld_clear_zeros(struct ip6_sf_list **ppsf)
{
struct ip6_sf_list *psf_prev, *psf_next, *psf;
psf_prev = NULL;
for (psf=*ppsf; psf; psf = psf_next) {
psf_next = psf->sf_next;
if (psf->sf_crcount == 0) {
if (psf_prev)
psf_prev->sf_next = psf->sf_next;
else
*ppsf = psf->sf_next;
kfree(psf);
} else
psf_prev = psf;
}
}
static void mld_send_cr(struct inet6_dev *idev)
{
struct ifmcaddr6 *pmc, *pmc_prev, *pmc_next;
struct sk_buff *skb = NULL;
int type, dtype;
read_lock_bh(&idev->lock);
spin_lock(&idev->mc_lock);
/* deleted MCA's */
pmc_prev = NULL;
for (pmc=idev->mc_tomb; pmc; pmc=pmc_next) {
pmc_next = pmc->next;
if (pmc->mca_sfmode == MCAST_INCLUDE) {
type = MLD2_BLOCK_OLD_SOURCES;
dtype = MLD2_BLOCK_OLD_SOURCES;
skb = add_grec(skb, pmc, type, 1, 0);
skb = add_grec(skb, pmc, dtype, 1, 1);
}
if (pmc->mca_crcount) {
if (pmc->mca_sfmode == MCAST_EXCLUDE) {
type = MLD2_CHANGE_TO_INCLUDE;
skb = add_grec(skb, pmc, type, 1, 0);
}
pmc->mca_crcount--;
if (pmc->mca_crcount == 0) {
mld_clear_zeros(&pmc->mca_tomb);
mld_clear_zeros(&pmc->mca_sources);
}
}
if (pmc->mca_crcount == 0 && !pmc->mca_tomb &&
!pmc->mca_sources) {
if (pmc_prev)
pmc_prev->next = pmc_next;
else
idev->mc_tomb = pmc_next;
in6_dev_put(pmc->idev);
kfree(pmc);
} else
pmc_prev = pmc;
}
spin_unlock(&idev->mc_lock);
/* change recs */
for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
spin_lock_bh(&pmc->mca_lock);
if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
type = MLD2_BLOCK_OLD_SOURCES;
dtype = MLD2_ALLOW_NEW_SOURCES;
} else {
type = MLD2_ALLOW_NEW_SOURCES;
dtype = MLD2_BLOCK_OLD_SOURCES;
}
skb = add_grec(skb, pmc, type, 0, 0);
skb = add_grec(skb, pmc, dtype, 0, 1); /* deleted sources */
/* filter mode changes */
if (pmc->mca_crcount) {
if (pmc->mca_sfmode == MCAST_EXCLUDE)
type = MLD2_CHANGE_TO_EXCLUDE;
else
type = MLD2_CHANGE_TO_INCLUDE;
skb = add_grec(skb, pmc, type, 0, 0);
pmc->mca_crcount--;
}
spin_unlock_bh(&pmc->mca_lock);
}
read_unlock_bh(&idev->lock);
if (!skb)
return;
(void) mld_sendpack(skb);
}
static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
{
struct net *net = dev_net(dev);
struct sock *sk = net->ipv6.igmp_sk;
struct inet6_dev *idev;
struct sk_buff *skb;
struct mld_msg *hdr;
const struct in6_addr *snd_addr, *saddr;
struct in6_addr addr_buf;
int hlen = LL_RESERVED_SPACE(dev);
int tlen = dev->needed_tailroom;
int err, len, payload_len, full_len;
u8 ra[8] = { IPPROTO_ICMPV6, 0,
IPV6_TLV_ROUTERALERT, 2, 0, 0,
IPV6_TLV_PADN, 0 };
struct flowi6 fl6;
struct dst_entry *dst;
if (type == ICMPV6_MGM_REDUCTION)
snd_addr = &in6addr_linklocal_allrouters;
else
snd_addr = addr;
len = sizeof(struct icmp6hdr) + sizeof(struct in6_addr);
payload_len = len + sizeof(ra);
full_len = sizeof(struct ipv6hdr) + payload_len;
rcu_read_lock();
IP6_UPD_PO_STATS(net, __in6_dev_get(dev),
IPSTATS_MIB_OUT, full_len);
rcu_read_unlock();
skb = sock_alloc_send_skb(sk, hlen + tlen + full_len, 1, &err);
if (skb == NULL) {
rcu_read_lock();
IP6_INC_STATS(net, __in6_dev_get(dev),
IPSTATS_MIB_OUTDISCARDS);
rcu_read_unlock();
return;
}
skb_reserve(skb, hlen);
if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
/* <draft-ietf-magma-mld-source-05.txt>:
* use unspecified address as the source address
* when a valid link-local address is not available.
*/
saddr = &in6addr_any;
} else
saddr = &addr_buf;
ip6_mc_hdr(sk, skb, dev, saddr, snd_addr, NEXTHDR_HOP, payload_len);
memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra));
hdr = (struct mld_msg *) skb_put(skb, sizeof(struct mld_msg));
memset(hdr, 0, sizeof(struct mld_msg));
hdr->mld_type = type;
hdr->mld_mca = *addr;
hdr->mld_cksum = csum_ipv6_magic(saddr, snd_addr, len,
IPPROTO_ICMPV6,
csum_partial(hdr, len, 0));
rcu_read_lock();
idev = __in6_dev_get(skb->dev);
icmpv6_flow_init(sk, &fl6, type,
&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
skb->dev->ifindex);
dst = icmp6_dst_alloc(skb->dev, &fl6);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
goto err_out;
}
skb_dst_set(skb, dst);
err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev,
dst_output);
out:
if (!err) {
ICMP6MSGOUT_INC_STATS(net, idev, type);
ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, full_len);
} else
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
rcu_read_unlock();
return;
err_out:
kfree_skb(skb);
goto out;
}
static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode,
const struct in6_addr *psfsrc)
{
struct ip6_sf_list *psf, *psf_prev;
int rv = 0;
psf_prev = NULL;
for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
if (ipv6_addr_equal(&psf->sf_addr, psfsrc))
break;
psf_prev = psf;
}
if (!psf || psf->sf_count[sfmode] == 0) {
/* source filter not found, or count wrong => bug */
return -ESRCH;
}
psf->sf_count[sfmode]--;
if (!psf->sf_count[MCAST_INCLUDE] && !psf->sf_count[MCAST_EXCLUDE]) {
struct inet6_dev *idev = pmc->idev;
/* no more filters for this source */
if (psf_prev)
psf_prev->sf_next = psf->sf_next;
else
pmc->mca_sources = psf->sf_next;
if (psf->sf_oldin && !(pmc->mca_flags & MAF_NOREPORT) &&
!MLD_V1_SEEN(idev)) {
psf->sf_crcount = idev->mc_qrv;
psf->sf_next = pmc->mca_tomb;
pmc->mca_tomb = psf;
rv = 1;
} else
kfree(psf);
}
return rv;
}
static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
int sfmode, int sfcount, const struct in6_addr *psfsrc,
int delta)
{
struct ifmcaddr6 *pmc;
int changerec = 0;
int i, err;
if (!idev)
return -ENODEV;
read_lock_bh(&idev->lock);
for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
if (ipv6_addr_equal(pmca, &pmc->mca_addr))
break;
}
if (!pmc) {
/* MCA not found?? bug */
read_unlock_bh(&idev->lock);
return -ESRCH;
}
spin_lock_bh(&pmc->mca_lock);
sf_markstate(pmc);
if (!delta) {
if (!pmc->mca_sfcount[sfmode]) {
spin_unlock_bh(&pmc->mca_lock);
read_unlock_bh(&idev->lock);
return -EINVAL;
}
pmc->mca_sfcount[sfmode]--;
}
err = 0;
for (i=0; i<sfcount; i++) {
int rv = ip6_mc_del1_src(pmc, sfmode, &psfsrc[i]);
changerec |= rv > 0;
if (!err && rv < 0)
err = rv;
}
if (pmc->mca_sfmode == MCAST_EXCLUDE &&
pmc->mca_sfcount[MCAST_EXCLUDE] == 0 &&
pmc->mca_sfcount[MCAST_INCLUDE]) {
struct ip6_sf_list *psf;
/* filter mode change */
pmc->mca_sfmode = MCAST_INCLUDE;
pmc->mca_crcount = idev->mc_qrv;
idev->mc_ifc_count = pmc->mca_crcount;
for (psf=pmc->mca_sources; psf; psf = psf->sf_next)
psf->sf_crcount = 0;
mld_ifc_event(pmc->idev);
} else if (sf_setstate(pmc) || changerec)
mld_ifc_event(pmc->idev);
spin_unlock_bh(&pmc->mca_lock);
read_unlock_bh(&idev->lock);
return err;
}
/*
* Add multicast single-source filter to the interface list
*/
static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode,
const struct in6_addr *psfsrc)
{
struct ip6_sf_list *psf, *psf_prev;
psf_prev = NULL;
for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
if (ipv6_addr_equal(&psf->sf_addr, psfsrc))
break;
psf_prev = psf;
}
if (!psf) {
psf = kzalloc(sizeof(*psf), GFP_ATOMIC);
if (!psf)
return -ENOBUFS;
psf->sf_addr = *psfsrc;
if (psf_prev) {
psf_prev->sf_next = psf;
} else
pmc->mca_sources = psf;
}
psf->sf_count[sfmode]++;
return 0;
}
static void sf_markstate(struct ifmcaddr6 *pmc)
{
struct ip6_sf_list *psf;
int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
for (psf=pmc->mca_sources; psf; psf=psf->sf_next)
if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
psf->sf_oldin = mca_xcount ==
psf->sf_count[MCAST_EXCLUDE] &&
!psf->sf_count[MCAST_INCLUDE];
} else
psf->sf_oldin = psf->sf_count[MCAST_INCLUDE] != 0;
}
static int sf_setstate(struct ifmcaddr6 *pmc)
{
struct ip6_sf_list *psf, *dpsf;
int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
int qrv = pmc->idev->mc_qrv;
int new_in, rv;
rv = 0;
for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] &&
!psf->sf_count[MCAST_INCLUDE];
} else
new_in = psf->sf_count[MCAST_INCLUDE] != 0;
if (new_in) {
if (!psf->sf_oldin) {
struct ip6_sf_list *prev = NULL;
for (dpsf=pmc->mca_tomb; dpsf;
dpsf=dpsf->sf_next) {
if (ipv6_addr_equal(&dpsf->sf_addr,
&psf->sf_addr))
break;
prev = dpsf;
}
if (dpsf) {
if (prev)
prev->sf_next = dpsf->sf_next;
else
pmc->mca_tomb = dpsf->sf_next;
kfree(dpsf);
}
psf->sf_crcount = qrv;
rv++;
}
} else if (psf->sf_oldin) {
psf->sf_crcount = 0;
/*
* add or update "delete" records if an active filter
* is now inactive
*/
for (dpsf=pmc->mca_tomb; dpsf; dpsf=dpsf->sf_next)
if (ipv6_addr_equal(&dpsf->sf_addr,
&psf->sf_addr))
break;
if (!dpsf) {
dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC);
if (!dpsf)
continue;
*dpsf = *psf;
/* pmc->mca_lock held by callers */
dpsf->sf_next = pmc->mca_tomb;
pmc->mca_tomb = dpsf;
}
dpsf->sf_crcount = qrv;
rv++;
}
}
return rv;
}
/*
* Add multicast source filter list to the interface list
*/
static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
int sfmode, int sfcount, const struct in6_addr *psfsrc,
int delta)
{
struct ifmcaddr6 *pmc;
int isexclude;
int i, err;
if (!idev)
return -ENODEV;
read_lock_bh(&idev->lock);
for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
if (ipv6_addr_equal(pmca, &pmc->mca_addr))
break;
}
if (!pmc) {
/* MCA not found?? bug */
read_unlock_bh(&idev->lock);
return -ESRCH;
}
spin_lock_bh(&pmc->mca_lock);
sf_markstate(pmc);
isexclude = pmc->mca_sfmode == MCAST_EXCLUDE;
if (!delta)
pmc->mca_sfcount[sfmode]++;
err = 0;
for (i=0; i<sfcount; i++) {
err = ip6_mc_add1_src(pmc, sfmode, &psfsrc[i]);
if (err)
break;
}
if (err) {
int j;
if (!delta)
pmc->mca_sfcount[sfmode]--;
for (j=0; j<i; j++)
ip6_mc_del1_src(pmc, sfmode, &psfsrc[j]);
} else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) {
struct ip6_sf_list *psf;
/* filter mode change */
if (pmc->mca_sfcount[MCAST_EXCLUDE])
pmc->mca_sfmode = MCAST_EXCLUDE;
else if (pmc->mca_sfcount[MCAST_INCLUDE])
pmc->mca_sfmode = MCAST_INCLUDE;
/* else no filters; keep old mode for reports */
pmc->mca_crcount = idev->mc_qrv;
idev->mc_ifc_count = pmc->mca_crcount;
for (psf=pmc->mca_sources; psf; psf = psf->sf_next)
psf->sf_crcount = 0;
mld_ifc_event(idev);
} else if (sf_setstate(pmc))
mld_ifc_event(idev);
spin_unlock_bh(&pmc->mca_lock);
read_unlock_bh(&idev->lock);
return err;
}
static void ip6_mc_clear_src(struct ifmcaddr6 *pmc)
{
struct ip6_sf_list *psf, *nextpsf;
for (psf=pmc->mca_tomb; psf; psf=nextpsf) {
nextpsf = psf->sf_next;
kfree(psf);
}
pmc->mca_tomb = NULL;
for (psf=pmc->mca_sources; psf; psf=nextpsf) {
nextpsf = psf->sf_next;
kfree(psf);
}
pmc->mca_sources = NULL;
pmc->mca_sfmode = MCAST_EXCLUDE;
pmc->mca_sfcount[MCAST_INCLUDE] = 0;
pmc->mca_sfcount[MCAST_EXCLUDE] = 1;
}
static void igmp6_join_group(struct ifmcaddr6 *ma)
{
unsigned long delay;
if (ma->mca_flags & MAF_NOREPORT)
return;
igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
delay = net_random() % IGMP6_UNSOLICITED_IVAL;
spin_lock_bh(&ma->mca_lock);
if (del_timer(&ma->mca_timer)) {
atomic_dec(&ma->mca_refcnt);
delay = ma->mca_timer.expires - jiffies;
}
if (!mod_timer(&ma->mca_timer, jiffies + delay))
atomic_inc(&ma->mca_refcnt);
ma->mca_flags |= MAF_TIMER_RUNNING | MAF_LAST_REPORTER;
spin_unlock_bh(&ma->mca_lock);
}
static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
struct inet6_dev *idev)
{
int err;
/* callers have the socket lock and a write lock on ipv6_sk_mc_lock,
* so no other readers or writers of iml or its sflist
*/
if (!iml->sflist) {
/* any-source empty exclude case */
return ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0);
}
err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode,
iml->sflist->sl_count, iml->sflist->sl_addr, 0);
sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max));
iml->sflist = NULL;
return err;
}
static void igmp6_leave_group(struct ifmcaddr6 *ma)
{
if (MLD_V1_SEEN(ma->idev)) {
if (ma->mca_flags & MAF_LAST_REPORTER)
igmp6_send(&ma->mca_addr, ma->idev->dev,
ICMPV6_MGM_REDUCTION);
} else {
mld_add_delrec(ma->idev, ma);
mld_ifc_event(ma->idev);
}
}
static void mld_gq_timer_expire(unsigned long data)
{
struct inet6_dev *idev = (struct inet6_dev *)data;
idev->mc_gq_running = 0;
mld_send_report(idev, NULL);
in6_dev_put(idev);
}
static void mld_ifc_timer_expire(unsigned long data)
{
struct inet6_dev *idev = (struct inet6_dev *)data;
mld_send_cr(idev);
if (idev->mc_ifc_count) {
idev->mc_ifc_count--;
if (idev->mc_ifc_count)
mld_ifc_start_timer(idev, idev->mc_maxdelay);
}
in6_dev_put(idev);
}
static void mld_ifc_event(struct inet6_dev *idev)
{
if (MLD_V1_SEEN(idev))
return;
idev->mc_ifc_count = idev->mc_qrv;
mld_ifc_start_timer(idev, 1);
}
static void igmp6_timer_handler(unsigned long data)
{
struct ifmcaddr6 *ma = (struct ifmcaddr6 *) data;
if (MLD_V1_SEEN(ma->idev))
igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
else
mld_send_report(ma->idev, ma);
spin_lock(&ma->mca_lock);
ma->mca_flags |= MAF_LAST_REPORTER;
ma->mca_flags &= ~MAF_TIMER_RUNNING;
spin_unlock(&ma->mca_lock);
ma_put(ma);
}
/* Device changing type */
void ipv6_mc_unmap(struct inet6_dev *idev)
{
struct ifmcaddr6 *i;
/* Install multicast list, except for all-nodes (already installed) */
read_lock_bh(&idev->lock);
for (i = idev->mc_list; i; i = i->next)
igmp6_group_dropped(i);
read_unlock_bh(&idev->lock);
}
void ipv6_mc_remap(struct inet6_dev *idev)
{
ipv6_mc_up(idev);
}
/* Device going down */
void ipv6_mc_down(struct inet6_dev *idev)
{
struct ifmcaddr6 *i;
/* Withdraw multicast list */
read_lock_bh(&idev->lock);
idev->mc_ifc_count = 0;
if (del_timer(&idev->mc_ifc_timer))
__in6_dev_put(idev);
idev->mc_gq_running = 0;
if (del_timer(&idev->mc_gq_timer))
__in6_dev_put(idev);
for (i = idev->mc_list; i; i=i->next)
igmp6_group_dropped(i);
read_unlock_bh(&idev->lock);
mld_clear_delrec(idev);
}
/* Device going up */
void ipv6_mc_up(struct inet6_dev *idev)
{
struct ifmcaddr6 *i;
/* Install multicast list, except for all-nodes (already installed) */
read_lock_bh(&idev->lock);
for (i = idev->mc_list; i; i=i->next)
igmp6_group_added(i);
read_unlock_bh(&idev->lock);
}
/* IPv6 device initialization. */
void ipv6_mc_init_dev(struct inet6_dev *idev)
{
write_lock_bh(&idev->lock);
spin_lock_init(&idev->mc_lock);
idev->mc_gq_running = 0;
setup_timer(&idev->mc_gq_timer, mld_gq_timer_expire,
(unsigned long)idev);
idev->mc_tomb = NULL;
idev->mc_ifc_count = 0;
setup_timer(&idev->mc_ifc_timer, mld_ifc_timer_expire,
(unsigned long)idev);
idev->mc_qrv = MLD_QRV_DEFAULT;
idev->mc_maxdelay = IGMP6_UNSOLICITED_IVAL;
idev->mc_v1_seen = 0;
write_unlock_bh(&idev->lock);
}
/*
* Device is about to be destroyed: clean up.
*/
void ipv6_mc_destroy_dev(struct inet6_dev *idev)
{
struct ifmcaddr6 *i;
/* Deactivate timers */
ipv6_mc_down(idev);
/* Delete all-nodes address. */
/* We cannot call ipv6_dev_mc_dec() directly, our caller in
* addrconf.c has NULL'd out dev->ip6_ptr so in6_dev_get() will
* fail.
*/
__ipv6_dev_mc_dec(idev, &in6addr_linklocal_allnodes);
if (idev->cnf.forwarding)
__ipv6_dev_mc_dec(idev, &in6addr_linklocal_allrouters);
write_lock_bh(&idev->lock);
while ((i = idev->mc_list) != NULL) {
idev->mc_list = i->next;
write_unlock_bh(&idev->lock);
igmp6_group_dropped(i);
ma_put(i);
write_lock_bh(&idev->lock);
}
write_unlock_bh(&idev->lock);
}
#ifdef CONFIG_PROC_FS
struct igmp6_mc_iter_state {
struct seq_net_private p;
struct net_device *dev;
struct inet6_dev *idev;
};
#define igmp6_mc_seq_private(seq) ((struct igmp6_mc_iter_state *)(seq)->private)
static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq)
{
struct ifmcaddr6 *im = NULL;
struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
struct net *net = seq_file_net(seq);
state->idev = NULL;
for_each_netdev_rcu(net, state->dev) {
struct inet6_dev *idev;
idev = __in6_dev_get(state->dev);
if (!idev)
continue;
read_lock_bh(&idev->lock);
im = idev->mc_list;
if (im) {
state->idev = idev;
break;
}
read_unlock_bh(&idev->lock);
}
return im;
}
static struct ifmcaddr6 *igmp6_mc_get_next(struct seq_file *seq, struct ifmcaddr6 *im)
{
struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
im = im->next;
while (!im) {
if (likely(state->idev != NULL))
read_unlock_bh(&state->idev->lock);
state->dev = next_net_device_rcu(state->dev);
if (!state->dev) {
state->idev = NULL;
break;
}
state->idev = __in6_dev_get(state->dev);
if (!state->idev)
continue;
read_lock_bh(&state->idev->lock);
im = state->idev->mc_list;
}
return im;
}
static struct ifmcaddr6 *igmp6_mc_get_idx(struct seq_file *seq, loff_t pos)
{
struct ifmcaddr6 *im = igmp6_mc_get_first(seq);
if (im)
while (pos && (im = igmp6_mc_get_next(seq, im)) != NULL)
--pos;
return pos ? NULL : im;
}
static void *igmp6_mc_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
rcu_read_lock();
return igmp6_mc_get_idx(seq, *pos);
}
static void *igmp6_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct ifmcaddr6 *im = igmp6_mc_get_next(seq, v);
++*pos;
return im;
}
static void igmp6_mc_seq_stop(struct seq_file *seq, void *v)
__releases(RCU)
{
struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
if (likely(state->idev != NULL)) {
read_unlock_bh(&state->idev->lock);
state->idev = NULL;
}
state->dev = NULL;
rcu_read_unlock();
}
static int igmp6_mc_seq_show(struct seq_file *seq, void *v)
{
struct ifmcaddr6 *im = (struct ifmcaddr6 *)v;
struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
seq_printf(seq,
"%-4d %-15s %pi6 %5d %08X %ld\n",
state->dev->ifindex, state->dev->name,
&im->mca_addr,
im->mca_users, im->mca_flags,
(im->mca_flags&MAF_TIMER_RUNNING) ?
jiffies_to_clock_t(im->mca_timer.expires-jiffies) : 0);
return 0;
}
static const struct seq_operations igmp6_mc_seq_ops = {
.start = igmp6_mc_seq_start,
.next = igmp6_mc_seq_next,
.stop = igmp6_mc_seq_stop,
.show = igmp6_mc_seq_show,
};
static int igmp6_mc_seq_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &igmp6_mc_seq_ops,
sizeof(struct igmp6_mc_iter_state));
}
static const struct file_operations igmp6_mc_seq_fops = {
.owner = THIS_MODULE,
.open = igmp6_mc_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
struct igmp6_mcf_iter_state {
struct seq_net_private p;
struct net_device *dev;
struct inet6_dev *idev;
struct ifmcaddr6 *im;
};
#define igmp6_mcf_seq_private(seq) ((struct igmp6_mcf_iter_state *)(seq)->private)
static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq)
{
struct ip6_sf_list *psf = NULL;
struct ifmcaddr6 *im = NULL;
struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
struct net *net = seq_file_net(seq);
state->idev = NULL;
state->im = NULL;
for_each_netdev_rcu(net, state->dev) {
struct inet6_dev *idev;
idev = __in6_dev_get(state->dev);
if (unlikely(idev == NULL))
continue;
read_lock_bh(&idev->lock);
im = idev->mc_list;
if (likely(im != NULL)) {
spin_lock_bh(&im->mca_lock);
psf = im->mca_sources;
if (likely(psf != NULL)) {
state->im = im;
state->idev = idev;
break;
}
spin_unlock_bh(&im->mca_lock);
}
read_unlock_bh(&idev->lock);
}
return psf;
}
static struct ip6_sf_list *igmp6_mcf_get_next(struct seq_file *seq, struct ip6_sf_list *psf)
{
struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
psf = psf->sf_next;
while (!psf) {
spin_unlock_bh(&state->im->mca_lock);
state->im = state->im->next;
while (!state->im) {
if (likely(state->idev != NULL))
read_unlock_bh(&state->idev->lock);
state->dev = next_net_device_rcu(state->dev);
if (!state->dev) {
state->idev = NULL;
goto out;
}
state->idev = __in6_dev_get(state->dev);
if (!state->idev)
continue;
read_lock_bh(&state->idev->lock);
state->im = state->idev->mc_list;
}
if (!state->im)
break;
spin_lock_bh(&state->im->mca_lock);
psf = state->im->mca_sources;
}
out:
return psf;
}
static struct ip6_sf_list *igmp6_mcf_get_idx(struct seq_file *seq, loff_t pos)
{
struct ip6_sf_list *psf = igmp6_mcf_get_first(seq);
if (psf)
while (pos && (psf = igmp6_mcf_get_next(seq, psf)) != NULL)
--pos;
return pos ? NULL : psf;
}
static void *igmp6_mcf_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
rcu_read_lock();
return *pos ? igmp6_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
}
static void *igmp6_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct ip6_sf_list *psf;
if (v == SEQ_START_TOKEN)
psf = igmp6_mcf_get_first(seq);
else
psf = igmp6_mcf_get_next(seq, v);
++*pos;
return psf;
}
static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v)
__releases(RCU)
{
struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
if (likely(state->im != NULL)) {
spin_unlock_bh(&state->im->mca_lock);
state->im = NULL;
}
if (likely(state->idev != NULL)) {
read_unlock_bh(&state->idev->lock);
state->idev = NULL;
}
state->dev = NULL;
rcu_read_unlock();
}
static int igmp6_mcf_seq_show(struct seq_file *seq, void *v)
{
struct ip6_sf_list *psf = (struct ip6_sf_list *)v;
struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
if (v == SEQ_START_TOKEN) {
seq_printf(seq,
"%3s %6s "
"%32s %32s %6s %6s\n", "Idx",
"Device", "Multicast Address",
"Source Address", "INC", "EXC");
} else {
seq_printf(seq,
"%3d %6.6s %pi6 %pi6 %6lu %6lu\n",
state->dev->ifindex, state->dev->name,
&state->im->mca_addr,
&psf->sf_addr,
psf->sf_count[MCAST_INCLUDE],
psf->sf_count[MCAST_EXCLUDE]);
}
return 0;
}
static const struct seq_operations igmp6_mcf_seq_ops = {
.start = igmp6_mcf_seq_start,
.next = igmp6_mcf_seq_next,
.stop = igmp6_mcf_seq_stop,
.show = igmp6_mcf_seq_show,
};
static int igmp6_mcf_seq_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &igmp6_mcf_seq_ops,
sizeof(struct igmp6_mcf_iter_state));
}
static const struct file_operations igmp6_mcf_seq_fops = {
.owner = THIS_MODULE,
.open = igmp6_mcf_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
static int __net_init igmp6_proc_init(struct net *net)
{
int err;
err = -ENOMEM;
if (!proc_create("igmp6", S_IRUGO, net->proc_net, &igmp6_mc_seq_fops))
goto out;
if (!proc_create("mcfilter6", S_IRUGO, net->proc_net,
&igmp6_mcf_seq_fops))
goto out_proc_net_igmp6;
err = 0;
out:
return err;
out_proc_net_igmp6:
remove_proc_entry("igmp6", net->proc_net);
goto out;
}
static void __net_exit igmp6_proc_exit(struct net *net)
{
remove_proc_entry("mcfilter6", net->proc_net);
remove_proc_entry("igmp6", net->proc_net);
}
#else
static inline int igmp6_proc_init(struct net *net)
{
return 0;
}
static inline void igmp6_proc_exit(struct net *net)
{
}
#endif
static int __net_init igmp6_net_init(struct net *net)
{
int err;
err = inet_ctl_sock_create(&net->ipv6.igmp_sk, PF_INET6,
SOCK_RAW, IPPROTO_ICMPV6, net);
if (err < 0) {
pr_err("Failed to initialize the IGMP6 control socket (err %d)\n",
err);
goto out;
}
inet6_sk(net->ipv6.igmp_sk)->hop_limit = 1;
err = igmp6_proc_init(net);
if (err)
goto out_sock_create;
out:
return err;
out_sock_create:
inet_ctl_sock_destroy(net->ipv6.igmp_sk);
goto out;
}
static void __net_exit igmp6_net_exit(struct net *net)
{
inet_ctl_sock_destroy(net->ipv6.igmp_sk);
igmp6_proc_exit(net);
}
static struct pernet_operations igmp6_net_ops = {
.init = igmp6_net_init,
.exit = igmp6_net_exit,
};
int __init igmp6_init(void)
{
return register_pernet_subsys(&igmp6_net_ops);
}
void igmp6_cleanup(void)
{
unregister_pernet_subsys(&igmp6_net_ops);
}
| gpl-2.0 |
nunogia/Z7Max_NX505J_H129_kernel | drivers/power/max17042_battery.c | 1800 | 21222 | /*
* Fuel gauge driver for Maxim 17042 / 8966 / 8997
* Note that Maxim 8966 and 8997 are mfd and this is its subdevice.
*
* Copyright (C) 2011 Samsung Electronics
* MyungJoo Ham <myungjoo.ham@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* This driver is based on max17040_battery.c
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/mod_devicetable.h>
#include <linux/power_supply.h>
#include <linux/power/max17042_battery.h>
#include <linux/of.h>
/* Status register bits */
#define STATUS_POR_BIT (1 << 1)
#define STATUS_BST_BIT (1 << 3)
#define STATUS_VMN_BIT (1 << 8)
#define STATUS_TMN_BIT (1 << 9)
#define STATUS_SMN_BIT (1 << 10)
#define STATUS_BI_BIT (1 << 11)
#define STATUS_VMX_BIT (1 << 12)
#define STATUS_TMX_BIT (1 << 13)
#define STATUS_SMX_BIT (1 << 14)
#define STATUS_BR_BIT (1 << 15)
/* Interrupt mask bits */
#define CONFIG_ALRT_BIT_ENBL (1 << 2)
#define STATUS_INTR_SOCMIN_BIT (1 << 10)
#define STATUS_INTR_SOCMAX_BIT (1 << 14)
#define VFSOC0_LOCK 0x0000
#define VFSOC0_UNLOCK 0x0080
#define MODEL_UNLOCK1 0X0059
#define MODEL_UNLOCK2 0X00C4
#define MODEL_LOCK1 0X0000
#define MODEL_LOCK2 0X0000
#define dQ_ACC_DIV 0x4
#define dP_ACC_100 0x1900
#define dP_ACC_200 0x3200
struct max17042_chip {
struct i2c_client *client;
struct power_supply battery;
struct max17042_platform_data *pdata;
struct work_struct work;
int init_complete;
};
static int max17042_write_reg(struct i2c_client *client, u8 reg, u16 value)
{
int ret = i2c_smbus_write_word_data(client, reg, value);
if (ret < 0)
dev_err(&client->dev, "%s: err %d\n", __func__, ret);
return ret;
}
static int max17042_read_reg(struct i2c_client *client, u8 reg)
{
int ret = i2c_smbus_read_word_data(client, reg);
if (ret < 0)
dev_err(&client->dev, "%s: err %d\n", __func__, ret);
return ret;
}
static void max17042_set_reg(struct i2c_client *client,
struct max17042_reg_data *data, int size)
{
int i;
for (i = 0; i < size; i++)
max17042_write_reg(client, data[i].addr, data[i].data);
}
static enum power_supply_property max17042_battery_props[] = {
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_CYCLE_COUNT,
POWER_SUPPLY_PROP_VOLTAGE_MAX,
POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_VOLTAGE_AVG,
POWER_SUPPLY_PROP_VOLTAGE_OCV,
POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_CHARGE_FULL,
POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CURRENT_AVG,
};
static int max17042_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
struct max17042_chip *chip = container_of(psy,
struct max17042_chip, battery);
int ret;
if (!chip->init_complete)
return -EAGAIN;
switch (psp) {
case POWER_SUPPLY_PROP_PRESENT:
ret = max17042_read_reg(chip->client, MAX17042_STATUS);
if (ret < 0)
return ret;
if (ret & MAX17042_STATUS_BattAbsent)
val->intval = 0;
else
val->intval = 1;
break;
case POWER_SUPPLY_PROP_CYCLE_COUNT:
ret = max17042_read_reg(chip->client, MAX17042_Cycles);
if (ret < 0)
return ret;
val->intval = ret;
break;
case POWER_SUPPLY_PROP_VOLTAGE_MAX:
ret = max17042_read_reg(chip->client, MAX17042_MinMaxVolt);
if (ret < 0)
return ret;
val->intval = ret >> 8;
val->intval *= 20000; /* Units of LSB = 20mV */
break;
case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
ret = max17042_read_reg(chip->client, MAX17042_V_empty);
if (ret < 0)
return ret;
val->intval = ret >> 7;
val->intval *= 10000; /* Units of LSB = 10mV */
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
ret = max17042_read_reg(chip->client, MAX17042_VCELL);
if (ret < 0)
return ret;
val->intval = ret * 625 / 8;
break;
case POWER_SUPPLY_PROP_VOLTAGE_AVG:
ret = max17042_read_reg(chip->client, MAX17042_AvgVCELL);
if (ret < 0)
return ret;
val->intval = ret * 625 / 8;
break;
case POWER_SUPPLY_PROP_VOLTAGE_OCV:
ret = max17042_read_reg(chip->client, MAX17042_OCVInternal);
if (ret < 0)
return ret;
val->intval = ret * 625 / 8;
break;
case POWER_SUPPLY_PROP_CAPACITY:
ret = max17042_read_reg(chip->client, MAX17042_RepSOC);
if (ret < 0)
return ret;
val->intval = ret >> 8;
break;
case POWER_SUPPLY_PROP_CHARGE_FULL:
ret = max17042_read_reg(chip->client, MAX17042_FullCAP);
if (ret < 0)
return ret;
val->intval = ret * 1000 / 2;
break;
case POWER_SUPPLY_PROP_TEMP:
ret = max17042_read_reg(chip->client, MAX17042_TEMP);
if (ret < 0)
return ret;
val->intval = ret;
/* The value is signed. */
if (val->intval & 0x8000) {
val->intval = (0x7fff & ~val->intval) + 1;
val->intval *= -1;
}
/* The value is converted into deci-centigrade scale */
/* Units of LSB = 1 / 256 degree Celsius */
val->intval = val->intval * 10 / 256;
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
if (chip->pdata->enable_current_sense) {
ret = max17042_read_reg(chip->client, MAX17042_Current);
if (ret < 0)
return ret;
val->intval = ret;
if (val->intval & 0x8000) {
/* Negative */
val->intval = ~val->intval & 0x7fff;
val->intval++;
val->intval *= -1;
}
val->intval *= 1562500 / chip->pdata->r_sns;
} else {
return -EINVAL;
}
break;
case POWER_SUPPLY_PROP_CURRENT_AVG:
if (chip->pdata->enable_current_sense) {
ret = max17042_read_reg(chip->client,
MAX17042_AvgCurrent);
if (ret < 0)
return ret;
val->intval = ret;
if (val->intval & 0x8000) {
/* Negative */
val->intval = ~val->intval & 0x7fff;
val->intval++;
val->intval *= -1;
}
val->intval *= 1562500 / chip->pdata->r_sns;
} else {
return -EINVAL;
}
break;
default:
return -EINVAL;
}
return 0;
}
static int max17042_write_verify_reg(struct i2c_client *client,
u8 reg, u16 value)
{
int retries = 8;
int ret;
u16 read_value;
do {
ret = i2c_smbus_write_word_data(client, reg, value);
read_value = max17042_read_reg(client, reg);
if (read_value != value) {
ret = -EIO;
retries--;
}
} while (retries && read_value != value);
if (ret < 0)
dev_err(&client->dev, "%s: err %d\n", __func__, ret);
return ret;
}
static inline void max17042_override_por(
struct i2c_client *client, u8 reg, u16 value)
{
if (value)
max17042_write_reg(client, reg, value);
}
static inline void max10742_unlock_model(struct max17042_chip *chip)
{
struct i2c_client *client = chip->client;
max17042_write_reg(client, MAX17042_MLOCKReg1, MODEL_UNLOCK1);
max17042_write_reg(client, MAX17042_MLOCKReg2, MODEL_UNLOCK2);
}
static inline void max10742_lock_model(struct max17042_chip *chip)
{
struct i2c_client *client = chip->client;
max17042_write_reg(client, MAX17042_MLOCKReg1, MODEL_LOCK1);
max17042_write_reg(client, MAX17042_MLOCKReg2, MODEL_LOCK2);
}
static inline void max17042_write_model_data(struct max17042_chip *chip,
u8 addr, int size)
{
struct i2c_client *client = chip->client;
int i;
for (i = 0; i < size; i++)
max17042_write_reg(client, addr + i,
chip->pdata->config_data->cell_char_tbl[i]);
}
static inline void max17042_read_model_data(struct max17042_chip *chip,
u8 addr, u16 *data, int size)
{
struct i2c_client *client = chip->client;
int i;
for (i = 0; i < size; i++)
data[i] = max17042_read_reg(client, addr + i);
}
static inline int max17042_model_data_compare(struct max17042_chip *chip,
u16 *data1, u16 *data2, int size)
{
int i;
if (memcmp(data1, data2, size)) {
dev_err(&chip->client->dev, "%s compare failed\n", __func__);
for (i = 0; i < size; i++)
dev_info(&chip->client->dev, "0x%x, 0x%x",
data1[i], data2[i]);
dev_info(&chip->client->dev, "\n");
return -EINVAL;
}
return 0;
}
static int max17042_init_model(struct max17042_chip *chip)
{
int ret;
int table_size =
sizeof(chip->pdata->config_data->cell_char_tbl)/sizeof(u16);
u16 *temp_data;
temp_data = kzalloc(table_size, GFP_KERNEL);
if (!temp_data)
return -ENOMEM;
max10742_unlock_model(chip);
max17042_write_model_data(chip, MAX17042_MODELChrTbl,
table_size);
max17042_read_model_data(chip, MAX17042_MODELChrTbl, temp_data,
table_size);
ret = max17042_model_data_compare(
chip,
chip->pdata->config_data->cell_char_tbl,
temp_data,
table_size);
max10742_lock_model(chip);
kfree(temp_data);
return ret;
}
static int max17042_verify_model_lock(struct max17042_chip *chip)
{
int i;
int table_size =
sizeof(chip->pdata->config_data->cell_char_tbl);
u16 *temp_data;
int ret = 0;
temp_data = kzalloc(table_size, GFP_KERNEL);
if (!temp_data)
return -ENOMEM;
max17042_read_model_data(chip, MAX17042_MODELChrTbl, temp_data,
table_size);
for (i = 0; i < table_size; i++)
if (temp_data[i])
ret = -EINVAL;
kfree(temp_data);
return ret;
}
static void max17042_write_config_regs(struct max17042_chip *chip)
{
struct max17042_config_data *config = chip->pdata->config_data;
max17042_write_reg(chip->client, MAX17042_CONFIG, config->config);
max17042_write_reg(chip->client, MAX17042_LearnCFG, config->learn_cfg);
max17042_write_reg(chip->client, MAX17042_FilterCFG,
config->filter_cfg);
max17042_write_reg(chip->client, MAX17042_RelaxCFG, config->relax_cfg);
}
static void max17042_write_custom_regs(struct max17042_chip *chip)
{
struct max17042_config_data *config = chip->pdata->config_data;
max17042_write_verify_reg(chip->client, MAX17042_RCOMP0,
config->rcomp0);
max17042_write_verify_reg(chip->client, MAX17042_TempCo,
config->tcompc0);
max17042_write_reg(chip->client, MAX17042_EmptyTempCo,
config->empty_tempco);
max17042_write_verify_reg(chip->client, MAX17042_K_empty0,
config->kempty0);
max17042_write_verify_reg(chip->client, MAX17042_ICHGTerm,
config->ichgt_term);
}
static void max17042_update_capacity_regs(struct max17042_chip *chip)
{
struct max17042_config_data *config = chip->pdata->config_data;
max17042_write_verify_reg(chip->client, MAX17042_FullCAP,
config->fullcap);
max17042_write_reg(chip->client, MAX17042_DesignCap,
config->design_cap);
max17042_write_verify_reg(chip->client, MAX17042_FullCAPNom,
config->fullcapnom);
}
static void max17042_reset_vfsoc0_reg(struct max17042_chip *chip)
{
u16 vfSoc;
vfSoc = max17042_read_reg(chip->client, MAX17042_VFSOC);
max17042_write_reg(chip->client, MAX17042_VFSOC0Enable, VFSOC0_UNLOCK);
max17042_write_verify_reg(chip->client, MAX17042_VFSOC0, vfSoc);
max17042_write_reg(chip->client, MAX17042_VFSOC0Enable, VFSOC0_LOCK);
}
static void max17042_load_new_capacity_params(struct max17042_chip *chip)
{
u16 full_cap0, rep_cap, dq_acc, vfSoc;
u32 rem_cap;
struct max17042_config_data *config = chip->pdata->config_data;
full_cap0 = max17042_read_reg(chip->client, MAX17042_FullCAP0);
vfSoc = max17042_read_reg(chip->client, MAX17042_VFSOC);
/* fg_vfSoc needs to shifted by 8 bits to get the
* perc in 1% accuracy, to get the right rem_cap multiply
* full_cap0, fg_vfSoc and devide by 100
*/
rem_cap = ((vfSoc >> 8) * full_cap0) / 100;
max17042_write_verify_reg(chip->client, MAX17042_RemCap, (u16)rem_cap);
rep_cap = (u16)rem_cap;
max17042_write_verify_reg(chip->client, MAX17042_RepCap, rep_cap);
/* Write dQ_acc to 200% of Capacity and dP_acc to 200% */
dq_acc = config->fullcap / dQ_ACC_DIV;
max17042_write_verify_reg(chip->client, MAX17042_dQacc, dq_acc);
max17042_write_verify_reg(chip->client, MAX17042_dPacc, dP_ACC_200);
max17042_write_verify_reg(chip->client, MAX17042_FullCAP,
config->fullcap);
max17042_write_reg(chip->client, MAX17042_DesignCap,
config->design_cap);
max17042_write_verify_reg(chip->client, MAX17042_FullCAPNom,
config->fullcapnom);
}
/*
* Block write all the override values coming from platform data.
* This function MUST be called before the POR initialization proceedure
* specified by maxim.
*/
static inline void max17042_override_por_values(struct max17042_chip *chip)
{
struct i2c_client *client = chip->client;
struct max17042_config_data *config = chip->pdata->config_data;
max17042_override_por(client, MAX17042_TGAIN, config->tgain);
max17042_override_por(client, MAx17042_TOFF, config->toff);
max17042_override_por(client, MAX17042_CGAIN, config->cgain);
max17042_override_por(client, MAX17042_COFF, config->coff);
max17042_override_por(client, MAX17042_VALRT_Th, config->valrt_thresh);
max17042_override_por(client, MAX17042_TALRT_Th, config->talrt_thresh);
max17042_override_por(client, MAX17042_SALRT_Th,
config->soc_alrt_thresh);
max17042_override_por(client, MAX17042_CONFIG, config->config);
max17042_override_por(client, MAX17042_SHDNTIMER, config->shdntimer);
max17042_override_por(client, MAX17042_DesignCap, config->design_cap);
max17042_override_por(client, MAX17042_ICHGTerm, config->ichgt_term);
max17042_override_por(client, MAX17042_AtRate, config->at_rate);
max17042_override_por(client, MAX17042_LearnCFG, config->learn_cfg);
max17042_override_por(client, MAX17042_FilterCFG, config->filter_cfg);
max17042_override_por(client, MAX17042_RelaxCFG, config->relax_cfg);
max17042_override_por(client, MAX17042_MiscCFG, config->misc_cfg);
max17042_override_por(client, MAX17042_MaskSOC, config->masksoc);
max17042_override_por(client, MAX17042_FullCAP, config->fullcap);
max17042_override_por(client, MAX17042_FullCAPNom, config->fullcapnom);
max17042_override_por(client, MAX17042_SOC_empty, config->socempty);
max17042_override_por(client, MAX17042_LAvg_empty, config->lavg_empty);
max17042_override_por(client, MAX17042_dQacc, config->dqacc);
max17042_override_por(client, MAX17042_dPacc, config->dpacc);
max17042_override_por(client, MAX17042_V_empty, config->vempty);
max17042_override_por(client, MAX17042_TempNom, config->temp_nom);
max17042_override_por(client, MAX17042_TempLim, config->temp_lim);
max17042_override_por(client, MAX17042_FCTC, config->fctc);
max17042_override_por(client, MAX17042_RCOMP0, config->rcomp0);
max17042_override_por(client, MAX17042_TempCo, config->tcompc0);
max17042_override_por(client, MAX17042_EmptyTempCo,
config->empty_tempco);
max17042_override_por(client, MAX17042_K_empty0, config->kempty0);
}
static int max17042_init_chip(struct max17042_chip *chip)
{
int ret;
int val;
max17042_override_por_values(chip);
/* After Power up, the MAX17042 requires 500mS in order
* to perform signal debouncing and initial SOC reporting
*/
msleep(500);
/* Initialize configaration */
max17042_write_config_regs(chip);
/* write cell characterization data */
ret = max17042_init_model(chip);
if (ret) {
dev_err(&chip->client->dev, "%s init failed\n",
__func__);
return -EIO;
}
max17042_verify_model_lock(chip);
if (ret) {
dev_err(&chip->client->dev, "%s lock verify failed\n",
__func__);
return -EIO;
}
/* write custom parameters */
max17042_write_custom_regs(chip);
/* update capacity params */
max17042_update_capacity_regs(chip);
/* delay must be atleast 350mS to allow VFSOC
* to be calculated from the new configuration
*/
msleep(350);
/* reset vfsoc0 reg */
max17042_reset_vfsoc0_reg(chip);
/* load new capacity params */
max17042_load_new_capacity_params(chip);
/* Init complete, Clear the POR bit */
val = max17042_read_reg(chip->client, MAX17042_STATUS);
max17042_write_reg(chip->client, MAX17042_STATUS,
val & (~STATUS_POR_BIT));
return 0;
}
static void max17042_set_soc_threshold(struct max17042_chip *chip, u16 off)
{
u16 soc, soc_tr;
/* program interrupt thesholds such that we should
* get interrupt for every 'off' perc change in the soc
*/
soc = max17042_read_reg(chip->client, MAX17042_RepSOC) >> 8;
soc_tr = (soc + off) << 8;
soc_tr |= (soc - off);
max17042_write_reg(chip->client, MAX17042_SALRT_Th, soc_tr);
}
static irqreturn_t max17042_thread_handler(int id, void *dev)
{
struct max17042_chip *chip = dev;
u16 val;
val = max17042_read_reg(chip->client, MAX17042_STATUS);
if ((val & STATUS_INTR_SOCMIN_BIT) ||
(val & STATUS_INTR_SOCMAX_BIT)) {
dev_info(&chip->client->dev, "SOC threshold INTR\n");
max17042_set_soc_threshold(chip, 1);
}
power_supply_changed(&chip->battery);
return IRQ_HANDLED;
}
static void max17042_init_worker(struct work_struct *work)
{
struct max17042_chip *chip = container_of(work,
struct max17042_chip, work);
int ret;
/* Initialize registers according to values from the platform data */
if (chip->pdata->enable_por_init && chip->pdata->config_data) {
ret = max17042_init_chip(chip);
if (ret)
return;
}
chip->init_complete = 1;
}
#ifdef CONFIG_OF
static struct max17042_platform_data *
max17042_get_pdata(struct device *dev)
{
struct device_node *np = dev->of_node;
u32 prop;
struct max17042_platform_data *pdata;
if (!np)
return dev->platform_data;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return NULL;
/*
* Require current sense resistor value to be specified for
* current-sense functionality to be enabled at all.
*/
if (of_property_read_u32(np, "maxim,rsns-microohm", &prop) == 0) {
pdata->r_sns = prop;
pdata->enable_current_sense = true;
}
return pdata;
}
#else
static struct max17042_platform_data *
max17042_get_pdata(struct device *dev)
{
return dev->platform_data;
}
#endif
static int __devinit max17042_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
struct max17042_chip *chip;
int ret;
int reg;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
return -EIO;
chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
chip->client = client;
chip->pdata = max17042_get_pdata(&client->dev);
if (!chip->pdata) {
dev_err(&client->dev, "no platform data provided\n");
return -EINVAL;
}
i2c_set_clientdata(client, chip);
chip->battery.name = "max17042_battery";
chip->battery.type = POWER_SUPPLY_TYPE_BATTERY;
chip->battery.get_property = max17042_get_property;
chip->battery.properties = max17042_battery_props;
chip->battery.num_properties = ARRAY_SIZE(max17042_battery_props);
/* When current is not measured,
* CURRENT_NOW and CURRENT_AVG properties should be invisible. */
if (!chip->pdata->enable_current_sense)
chip->battery.num_properties -= 2;
if (chip->pdata->r_sns == 0)
chip->pdata->r_sns = MAX17042_DEFAULT_SNS_RESISTOR;
if (chip->pdata->init_data)
max17042_set_reg(client, chip->pdata->init_data,
chip->pdata->num_init_data);
if (!chip->pdata->enable_current_sense) {
max17042_write_reg(client, MAX17042_CGAIN, 0x0000);
max17042_write_reg(client, MAX17042_MiscCFG, 0x0003);
max17042_write_reg(client, MAX17042_LearnCFG, 0x0007);
}
if (client->irq) {
ret = request_threaded_irq(client->irq, NULL,
max17042_thread_handler,
IRQF_TRIGGER_FALLING,
chip->battery.name, chip);
if (!ret) {
reg = max17042_read_reg(client, MAX17042_CONFIG);
reg |= CONFIG_ALRT_BIT_ENBL;
max17042_write_reg(client, MAX17042_CONFIG, reg);
max17042_set_soc_threshold(chip, 1);
} else
dev_err(&client->dev, "%s(): cannot get IRQ\n",
__func__);
}
reg = max17042_read_reg(chip->client, MAX17042_STATUS);
if (reg & STATUS_POR_BIT) {
INIT_WORK(&chip->work, max17042_init_worker);
schedule_work(&chip->work);
} else {
chip->init_complete = 1;
}
ret = power_supply_register(&client->dev, &chip->battery);
if (ret)
dev_err(&client->dev, "failed: power supply register\n");
return ret;
}
static int __devexit max17042_remove(struct i2c_client *client)
{
struct max17042_chip *chip = i2c_get_clientdata(client);
power_supply_unregister(&chip->battery);
return 0;
}
#ifdef CONFIG_OF
static const struct of_device_id max17042_dt_match[] = {
{ .compatible = "maxim,max17042" },
{ },
};
MODULE_DEVICE_TABLE(of, max17042_dt_match);
#endif
static const struct i2c_device_id max17042_id[] = {
{ "max17042", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, max17042_id);
static struct i2c_driver max17042_i2c_driver = {
.driver = {
.name = "max17042",
.of_match_table = of_match_ptr(max17042_dt_match),
},
.probe = max17042_probe,
.remove = __devexit_p(max17042_remove),
.id_table = max17042_id,
};
module_i2c_driver(max17042_i2c_driver);
MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
MODULE_DESCRIPTION("MAX17042 Fuel Gauge");
MODULE_LICENSE("GPL");
| gpl-2.0 |
ashyx/kernel_gts28ve-gts210ve | drivers/i2c/busses/i2c-nuc900.c | 2312 | 14737 | /*
* linux/drivers/i2c/busses/i2c-nuc900.c
*
* Copyright (c) 2010 Nuvoton technology corporation.
*
* This driver based on S3C2410 I2C driver of Ben Dooks <ben-Y5A6D6n0/KfQXOPxS62xeg@public.gmane.org>.
* Written by Wan ZongShun <mcuos.com-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation;version 2 of the License.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/time.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/cpufreq.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <mach/mfp.h>
#include <linux/platform_data/i2c-nuc900.h>
/* nuc900 i2c registers offset */
#define CSR 0x00
#define DIVIDER 0x04
#define CMDR 0x08
#define SWR 0x0C
#define RXR 0x10
#define TXR 0x14
/* nuc900 i2c CSR register bits */
#define IRQEN 0x003
#define I2CBUSY 0x400
#define I2CSTART 0x018
#define IRQFLAG 0x004
#define ARBIT_LOST 0x200
#define SLAVE_ACK 0x800
/* nuc900 i2c CMDR register bits */
#define I2C_CMD_START 0x10
#define I2C_CMD_STOP 0x08
#define I2C_CMD_READ 0x04
#define I2C_CMD_WRITE 0x02
#define I2C_CMD_NACK 0x01
/* i2c controller state */
enum nuc900_i2c_state {
STATE_IDLE,
STATE_START,
STATE_READ,
STATE_WRITE,
STATE_STOP
};
/* i2c controller private data */
struct nuc900_i2c {
spinlock_t lock;
wait_queue_head_t wait;
struct i2c_msg *msg;
unsigned int msg_num;
unsigned int msg_idx;
unsigned int msg_ptr;
unsigned int irq;
enum nuc900_i2c_state state;
void __iomem *regs;
struct clk *clk;
struct device *dev;
struct resource *ioarea;
struct i2c_adapter adap;
};
/* nuc900_i2c_master_complete
*
* complete the message and wake up the caller, using the given return code,
* or zero to mean ok.
*/
static inline void nuc900_i2c_master_complete(struct nuc900_i2c *i2c, int ret)
{
dev_dbg(i2c->dev, "master_complete %d\n", ret);
i2c->msg_ptr = 0;
i2c->msg = NULL;
i2c->msg_idx++;
i2c->msg_num = 0;
if (ret)
i2c->msg_idx = ret;
wake_up(&i2c->wait);
}
/* irq enable/disable functions */
static inline void nuc900_i2c_disable_irq(struct nuc900_i2c *i2c)
{
unsigned long tmp;
tmp = readl(i2c->regs + CSR);
writel(tmp & ~IRQEN, i2c->regs + CSR);
}
static inline void nuc900_i2c_enable_irq(struct nuc900_i2c *i2c)
{
unsigned long tmp;
tmp = readl(i2c->regs + CSR);
writel(tmp | IRQEN, i2c->regs + CSR);
}
/* nuc900_i2c_message_start
*
* put the start of a message onto the bus
*/
static void nuc900_i2c_message_start(struct nuc900_i2c *i2c,
struct i2c_msg *msg)
{
unsigned int addr = (msg->addr & 0x7f) << 1;
if (msg->flags & I2C_M_RD)
addr |= 0x1;
writel(addr & 0xff, i2c->regs + TXR);
writel(I2C_CMD_START | I2C_CMD_WRITE, i2c->regs + CMDR);
}
static inline void nuc900_i2c_stop(struct nuc900_i2c *i2c, int ret)
{
dev_dbg(i2c->dev, "STOP\n");
/* stop the transfer */
i2c->state = STATE_STOP;
writel(I2C_CMD_STOP, i2c->regs + CMDR);
nuc900_i2c_master_complete(i2c, ret);
nuc900_i2c_disable_irq(i2c);
}
/* helper functions to determine the current state in the set of
* messages we are sending
*/
/* is_lastmsg()
*
* returns TRUE if the current message is the last in the set
*/
static inline int is_lastmsg(struct nuc900_i2c *i2c)
{
return i2c->msg_idx >= (i2c->msg_num - 1);
}
/* is_msglast
*
* returns TRUE if we this is the last byte in the current message
*/
static inline int is_msglast(struct nuc900_i2c *i2c)
{
return i2c->msg_ptr == i2c->msg->len-1;
}
/* is_msgend
*
* returns TRUE if we reached the end of the current message
*/
static inline int is_msgend(struct nuc900_i2c *i2c)
{
return i2c->msg_ptr >= i2c->msg->len;
}
/* i2c_nuc900_irq_nextbyte
*
* process an interrupt and work out what to do
*/
static void i2c_nuc900_irq_nextbyte(struct nuc900_i2c *i2c,
unsigned long iicstat)
{
unsigned char byte;
switch (i2c->state) {
case STATE_IDLE:
dev_err(i2c->dev, "%s: called in STATE_IDLE\n", __func__);
break;
case STATE_STOP:
dev_err(i2c->dev, "%s: called in STATE_STOP\n", __func__);
nuc900_i2c_disable_irq(i2c);
break;
case STATE_START:
/* last thing we did was send a start condition on the
* bus, or started a new i2c message
*/
if (iicstat & SLAVE_ACK &&
!(i2c->msg->flags & I2C_M_IGNORE_NAK)) {
/* ack was not received... */
dev_dbg(i2c->dev, "ack was not received\n");
nuc900_i2c_stop(i2c, -ENXIO);
break;
}
if (i2c->msg->flags & I2C_M_RD)
i2c->state = STATE_READ;
else
i2c->state = STATE_WRITE;
/* terminate the transfer if there is nothing to do
* as this is used by the i2c probe to find devices.
*/
if (is_lastmsg(i2c) && i2c->msg->len == 0) {
nuc900_i2c_stop(i2c, 0);
break;
}
if (i2c->state == STATE_READ)
goto prepare_read;
/* fall through to the write state, as we will need to
* send a byte as well
*/
case STATE_WRITE:
/* we are writing data to the device... check for the
* end of the message, and if so, work out what to do
*/
if (!(i2c->msg->flags & I2C_M_IGNORE_NAK)) {
if (iicstat & SLAVE_ACK) {
dev_dbg(i2c->dev, "WRITE: No Ack\n");
nuc900_i2c_stop(i2c, -ECONNREFUSED);
break;
}
}
retry_write:
if (!is_msgend(i2c)) {
byte = i2c->msg->buf[i2c->msg_ptr++];
writeb(byte, i2c->regs + TXR);
writel(I2C_CMD_WRITE, i2c->regs + CMDR);
} else if (!is_lastmsg(i2c)) {
/* we need to go to the next i2c message */
dev_dbg(i2c->dev, "WRITE: Next Message\n");
i2c->msg_ptr = 0;
i2c->msg_idx++;
i2c->msg++;
/* check to see if we need to do another message */
if (i2c->msg->flags & I2C_M_NOSTART) {
if (i2c->msg->flags & I2C_M_RD) {
/* cannot do this, the controller
* forces us to send a new START
* when we change direction
*/
nuc900_i2c_stop(i2c, -EINVAL);
}
goto retry_write;
} else {
/* send the new start */
nuc900_i2c_message_start(i2c, i2c->msg);
i2c->state = STATE_START;
}
} else {
/* send stop */
nuc900_i2c_stop(i2c, 0);
}
break;
case STATE_READ:
/* we have a byte of data in the data register, do
* something with it, and then work out whether we are
* going to do any more read/write
*/
byte = readb(i2c->regs + RXR);
i2c->msg->buf[i2c->msg_ptr++] = byte;
prepare_read:
if (is_msglast(i2c)) {
/* last byte of buffer */
if (is_lastmsg(i2c))
writel(I2C_CMD_READ | I2C_CMD_NACK,
i2c->regs + CMDR);
} else if (is_msgend(i2c)) {
/* ok, we've read the entire buffer, see if there
* is anything else we need to do
*/
if (is_lastmsg(i2c)) {
/* last message, send stop and complete */
dev_dbg(i2c->dev, "READ: Send Stop\n");
nuc900_i2c_stop(i2c, 0);
} else {
/* go to the next transfer */
dev_dbg(i2c->dev, "READ: Next Transfer\n");
i2c->msg_ptr = 0;
i2c->msg_idx++;
i2c->msg++;
writel(I2C_CMD_READ, i2c->regs + CMDR);
}
} else {
writel(I2C_CMD_READ, i2c->regs + CMDR);
}
break;
}
}
/* nuc900_i2c_irq
*
* top level IRQ servicing routine
*/
static irqreturn_t nuc900_i2c_irq(int irqno, void *dev_id)
{
struct nuc900_i2c *i2c = dev_id;
unsigned long status;
status = readl(i2c->regs + CSR);
writel(status | IRQFLAG, i2c->regs + CSR);
if (status & ARBIT_LOST) {
/* deal with arbitration loss */
dev_err(i2c->dev, "deal with arbitration loss\n");
goto out;
}
if (i2c->state == STATE_IDLE) {
dev_dbg(i2c->dev, "IRQ: error i2c->state == IDLE\n");
goto out;
}
/* pretty much this leaves us with the fact that we've
* transmitted or received whatever byte we last sent
*/
i2c_nuc900_irq_nextbyte(i2c, status);
out:
return IRQ_HANDLED;
}
/* nuc900_i2c_set_master
*
* get the i2c bus for a master transaction
*/
static int nuc900_i2c_set_master(struct nuc900_i2c *i2c)
{
int timeout = 400;
while (timeout-- > 0) {
if (((readl(i2c->regs + SWR) & I2CSTART) == I2CSTART) &&
((readl(i2c->regs + CSR) & I2CBUSY) == 0)) {
return 0;
}
msleep(1);
}
return -ETIMEDOUT;
}
/* nuc900_i2c_doxfer
*
* this starts an i2c transfer
*/
static int nuc900_i2c_doxfer(struct nuc900_i2c *i2c,
struct i2c_msg *msgs, int num)
{
unsigned long iicstat, timeout;
int spins = 20;
int ret;
ret = nuc900_i2c_set_master(i2c);
if (ret != 0) {
dev_err(i2c->dev, "cannot get bus (error %d)\n", ret);
ret = -EAGAIN;
goto out;
}
spin_lock_irq(&i2c->lock);
i2c->msg = msgs;
i2c->msg_num = num;
i2c->msg_ptr = 0;
i2c->msg_idx = 0;
i2c->state = STATE_START;
nuc900_i2c_message_start(i2c, msgs);
spin_unlock_irq(&i2c->lock);
timeout = wait_event_timeout(i2c->wait, i2c->msg_num == 0, HZ * 5);
ret = i2c->msg_idx;
/* having these next two as dev_err() makes life very
* noisy when doing an i2cdetect
*/
if (timeout == 0)
dev_dbg(i2c->dev, "timeout\n");
else if (ret != num)
dev_dbg(i2c->dev, "incomplete xfer (%d)\n", ret);
/* ensure the stop has been through the bus */
dev_dbg(i2c->dev, "waiting for bus idle\n");
/* first, try busy waiting briefly */
do {
iicstat = readl(i2c->regs + CSR);
} while ((iicstat & I2CBUSY) && --spins);
/* if that timed out sleep */
if (!spins) {
msleep(1);
iicstat = readl(i2c->regs + CSR);
}
if (iicstat & I2CBUSY)
dev_warn(i2c->dev, "timeout waiting for bus idle\n");
out:
return ret;
}
/* nuc900_i2c_xfer
*
* first port of call from the i2c bus code when an message needs
* transferring across the i2c bus.
*/
static int nuc900_i2c_xfer(struct i2c_adapter *adap,
struct i2c_msg *msgs, int num)
{
struct nuc900_i2c *i2c = (struct nuc900_i2c *)adap->algo_data;
int retry;
int ret;
nuc900_i2c_enable_irq(i2c);
for (retry = 0; retry < adap->retries; retry++) {
ret = nuc900_i2c_doxfer(i2c, msgs, num);
if (ret != -EAGAIN)
return ret;
dev_dbg(i2c->dev, "Retrying transmission (%d)\n", retry);
udelay(100);
}
return -EREMOTEIO;
}
/* declare our i2c functionality */
static u32 nuc900_i2c_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_NOSTART |
I2C_FUNC_PROTOCOL_MANGLING;
}
/* i2c bus registration info */
static const struct i2c_algorithm nuc900_i2c_algorithm = {
.master_xfer = nuc900_i2c_xfer,
.functionality = nuc900_i2c_func,
};
/* nuc900_i2c_probe
*
* called by the bus driver when a suitable device is found
*/
static int nuc900_i2c_probe(struct platform_device *pdev)
{
struct nuc900_i2c *i2c;
struct nuc900_platform_i2c *pdata;
struct resource *res;
int ret;
pdata = pdev->dev.platform_data;
if (!pdata) {
dev_err(&pdev->dev, "no platform data\n");
return -EINVAL;
}
i2c = kzalloc(sizeof(struct nuc900_i2c), GFP_KERNEL);
if (!i2c) {
dev_err(&pdev->dev, "no memory for state\n");
return -ENOMEM;
}
strlcpy(i2c->adap.name, "nuc900-i2c0", sizeof(i2c->adap.name));
i2c->adap.owner = THIS_MODULE;
i2c->adap.algo = &nuc900_i2c_algorithm;
i2c->adap.retries = 2;
i2c->adap.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
spin_lock_init(&i2c->lock);
init_waitqueue_head(&i2c->wait);
/* find the clock and enable it */
i2c->dev = &pdev->dev;
i2c->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(i2c->clk)) {
dev_err(&pdev->dev, "cannot get clock\n");
ret = -ENOENT;
goto err_noclk;
}
dev_dbg(&pdev->dev, "clock source %p\n", i2c->clk);
clk_enable(i2c->clk);
/* map the registers */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(&pdev->dev, "cannot find IO resource\n");
ret = -ENOENT;
goto err_clk;
}
i2c->ioarea = request_mem_region(res->start, resource_size(res),
pdev->name);
if (i2c->ioarea == NULL) {
dev_err(&pdev->dev, "cannot request IO\n");
ret = -ENXIO;
goto err_clk;
}
i2c->regs = ioremap(res->start, resource_size(res));
if (i2c->regs == NULL) {
dev_err(&pdev->dev, "cannot map IO\n");
ret = -ENXIO;
goto err_ioarea;
}
dev_dbg(&pdev->dev, "registers %p (%p, %p)\n",
i2c->regs, i2c->ioarea, res);
/* setup info block for the i2c core */
i2c->adap.algo_data = i2c;
i2c->adap.dev.parent = &pdev->dev;
mfp_set_groupg(&pdev->dev, NULL);
clk_get_rate(i2c->clk);
ret = (i2c->clk.apbfreq)/(pdata->bus_freq * 5) - 1;
writel(ret & 0xffff, i2c->regs + DIVIDER);
/* find the IRQ for this unit (note, this relies on the init call to
* ensure no current IRQs pending
*/
i2c->irq = ret = platform_get_irq(pdev, 0);
if (ret <= 0) {
dev_err(&pdev->dev, "cannot find IRQ\n");
goto err_iomap;
}
ret = request_irq(i2c->irq, nuc900_i2c_irq, IRQF_SHARED,
dev_name(&pdev->dev), i2c);
if (ret != 0) {
dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq);
goto err_iomap;
}
/* Note, previous versions of the driver used i2c_add_adapter()
* to add the bus at any number. We now pass the bus number via
* the platform data, so if unset it will now default to always
* being bus 0.
*/
i2c->adap.nr = pdata->bus_num;
ret = i2c_add_numbered_adapter(&i2c->adap);
if (ret < 0) {
dev_err(&pdev->dev, "failed to add bus to i2c core\n");
goto err_irq;
}
platform_set_drvdata(pdev, i2c);
dev_info(&pdev->dev, "%s: NUC900 I2C adapter\n",
dev_name(&i2c->adap.dev));
return 0;
err_irq:
free_irq(i2c->irq, i2c);
err_iomap:
iounmap(i2c->regs);
err_ioarea:
release_resource(i2c->ioarea);
kfree(i2c->ioarea);
err_clk:
clk_disable(i2c->clk);
clk_put(i2c->clk);
err_noclk:
kfree(i2c);
return ret;
}
/* nuc900_i2c_remove
*
* called when device is removed from the bus
*/
static int nuc900_i2c_remove(struct platform_device *pdev)
{
struct nuc900_i2c *i2c = platform_get_drvdata(pdev);
i2c_del_adapter(&i2c->adap);
free_irq(i2c->irq, i2c);
clk_disable(i2c->clk);
clk_put(i2c->clk);
iounmap(i2c->regs);
release_resource(i2c->ioarea);
kfree(i2c->ioarea);
kfree(i2c);
return 0;
}
static struct platform_driver nuc900_i2c_driver = {
.probe = nuc900_i2c_probe,
.remove = nuc900_i2c_remove,
.driver = {
.owner = THIS_MODULE,
.name = "nuc900-i2c0",
},
};
static int __init i2c_adap_nuc900_init(void)
{
return platform_driver_register(&nuc900_i2c_driver);
}
static void __exit i2c_adap_nuc900_exit(void)
{
platform_driver_unregister(&nuc900_i2c_driver);
}
subsys_initcall(i2c_adap_nuc900_init);
module_exit(i2c_adap_nuc900_exit);
MODULE_DESCRIPTION("NUC900 I2C Bus driver");
MODULE_AUTHOR("Wan ZongShun, <mcuos.com-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:nuc900-i2c0");
| gpl-2.0 |
jameskdev/lge-kernel-d1l_kr | arch/unicore32/mm/init.c | 2568 | 12908 | /*
* linux/arch/unicore32/mm/init.c
*
* Copyright (C) 2010 GUAN Xue-tao
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/swap.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/mman.h>
#include <linux/nodemask.h>
#include <linux/initrd.h>
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <linux/memblock.h>
#include <linux/sort.h>
#include <linux/dma-mapping.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/sizes.h>
#include <asm/tlb.h>
#include <mach/map.h>
#include "mm.h"
static unsigned long phys_initrd_start __initdata = 0x01000000;
static unsigned long phys_initrd_size __initdata = SZ_8M;
static int __init early_initrd(char *p)
{
unsigned long start, size;
char *endp;
start = memparse(p, &endp);
if (*endp == ',') {
size = memparse(endp + 1, NULL);
phys_initrd_start = start;
phys_initrd_size = size;
}
return 0;
}
early_param("initrd", early_initrd);
/*
* This keeps memory configuration data used by a couple memory
* initialization functions, as well as show_mem() for the skipping
* of holes in the memory map. It is populated by uc32_add_memory().
*/
struct meminfo meminfo;
void show_mem(unsigned int filter)
{
int free = 0, total = 0, reserved = 0;
int shared = 0, cached = 0, slab = 0, i;
struct meminfo *mi = &meminfo;
printk(KERN_DEFAULT "Mem-info:\n");
show_free_areas(filter);
for_each_bank(i, mi) {
struct membank *bank = &mi->bank[i];
unsigned int pfn1, pfn2;
struct page *page, *end;
pfn1 = bank_pfn_start(bank);
pfn2 = bank_pfn_end(bank);
page = pfn_to_page(pfn1);
end = pfn_to_page(pfn2 - 1) + 1;
do {
total++;
if (PageReserved(page))
reserved++;
else if (PageSwapCache(page))
cached++;
else if (PageSlab(page))
slab++;
else if (!page_count(page))
free++;
else
shared += page_count(page) - 1;
page++;
} while (page < end);
}
printk(KERN_DEFAULT "%d pages of RAM\n", total);
printk(KERN_DEFAULT "%d free pages\n", free);
printk(KERN_DEFAULT "%d reserved pages\n", reserved);
printk(KERN_DEFAULT "%d slab pages\n", slab);
printk(KERN_DEFAULT "%d pages shared\n", shared);
printk(KERN_DEFAULT "%d pages swap cached\n", cached);
}
static void __init find_limits(unsigned long *min, unsigned long *max_low,
unsigned long *max_high)
{
struct meminfo *mi = &meminfo;
int i;
*min = -1UL;
*max_low = *max_high = 0;
for_each_bank(i, mi) {
struct membank *bank = &mi->bank[i];
unsigned long start, end;
start = bank_pfn_start(bank);
end = bank_pfn_end(bank);
if (*min > start)
*min = start;
if (*max_high < end)
*max_high = end;
if (bank->highmem)
continue;
if (*max_low < end)
*max_low = end;
}
}
static void __init uc32_bootmem_init(unsigned long start_pfn,
unsigned long end_pfn)
{
struct memblock_region *reg;
unsigned int boot_pages;
phys_addr_t bitmap;
pg_data_t *pgdat;
/*
* Allocate the bootmem bitmap page. This must be in a region
* of memory which has already been mapped.
*/
boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
__pfn_to_phys(end_pfn));
/*
* Initialise the bootmem allocator, handing the
* memory banks over to bootmem.
*/
node_set_online(0);
pgdat = NODE_DATA(0);
init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);
/* Free the lowmem regions from memblock into bootmem. */
for_each_memblock(memory, reg) {
unsigned long start = memblock_region_memory_base_pfn(reg);
unsigned long end = memblock_region_memory_end_pfn(reg);
if (end >= end_pfn)
end = end_pfn;
if (start >= end)
break;
free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
}
/* Reserve the lowmem memblock reserved regions in bootmem. */
for_each_memblock(reserved, reg) {
unsigned long start = memblock_region_reserved_base_pfn(reg);
unsigned long end = memblock_region_reserved_end_pfn(reg);
if (end >= end_pfn)
end = end_pfn;
if (start >= end)
break;
reserve_bootmem(__pfn_to_phys(start),
(end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT);
}
}
static void __init uc32_bootmem_free(unsigned long min, unsigned long max_low,
unsigned long max_high)
{
unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
struct memblock_region *reg;
/*
* initialise the zones.
*/
memset(zone_size, 0, sizeof(zone_size));
/*
* The memory size has already been determined. If we need
* to do anything fancy with the allocation of this memory
* to the zones, now is the time to do it.
*/
zone_size[0] = max_low - min;
/*
* Calculate the size of the holes.
* holes = node_size - sum(bank_sizes)
*/
memcpy(zhole_size, zone_size, sizeof(zhole_size));
for_each_memblock(memory, reg) {
unsigned long start = memblock_region_memory_base_pfn(reg);
unsigned long end = memblock_region_memory_end_pfn(reg);
if (start < max_low) {
unsigned long low_end = min(end, max_low);
zhole_size[0] -= low_end - start;
}
}
/*
* Adjust the sizes according to any special requirements for
* this machine type.
*/
arch_adjust_zones(zone_size, zhole_size);
free_area_init_node(0, zone_size, min, zhole_size);
}
int pfn_valid(unsigned long pfn)
{
return memblock_is_memory(pfn << PAGE_SHIFT);
}
EXPORT_SYMBOL(pfn_valid);
static void uc32_memory_present(void)
{
}
static int __init meminfo_cmp(const void *_a, const void *_b)
{
const struct membank *a = _a, *b = _b;
long cmp = bank_pfn_start(a) - bank_pfn_start(b);
return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
}
void __init uc32_memblock_init(struct meminfo *mi)
{
int i;
sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]),
meminfo_cmp, NULL);
memblock_init();
for (i = 0; i < mi->nr_banks; i++)
memblock_add(mi->bank[i].start, mi->bank[i].size);
/* Register the kernel text, kernel data and initrd with memblock. */
memblock_reserve(__pa(_text), _end - _text);
#ifdef CONFIG_BLK_DEV_INITRD
if (phys_initrd_size) {
memblock_reserve(phys_initrd_start, phys_initrd_size);
/* Now convert initrd to virtual addresses */
initrd_start = __phys_to_virt(phys_initrd_start);
initrd_end = initrd_start + phys_initrd_size;
}
#endif
uc32_mm_memblock_reserve();
memblock_analyze();
memblock_dump_all();
}
void __init bootmem_init(void)
{
unsigned long min, max_low, max_high;
max_low = max_high = 0;
find_limits(&min, &max_low, &max_high);
uc32_bootmem_init(min, max_low);
#ifdef CONFIG_SWIOTLB
swiotlb_init(1);
#endif
/*
* Sparsemem tries to allocate bootmem in memory_present(),
* so must be done after the fixed reservations
*/
uc32_memory_present();
/*
* sparse_init() needs the bootmem allocator up and running.
*/
sparse_init();
/*
* Now free the memory - free_area_init_node needs
* the sparse mem_map arrays initialized by sparse_init()
* for memmap_init_zone(), otherwise all PFNs are invalid.
*/
uc32_bootmem_free(min, max_low, max_high);
high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1;
/*
* This doesn't seem to be used by the Linux memory manager any
* more, but is used by ll_rw_block. If we can get rid of it, we
* also get rid of some of the stuff above as well.
*
* Note: max_low_pfn and max_pfn reflect the number of _pages_ in
* the system, not the maximum PFN.
*/
max_low_pfn = max_low - PHYS_PFN_OFFSET;
max_pfn = max_high - PHYS_PFN_OFFSET;
}
static inline int free_area(unsigned long pfn, unsigned long end, char *s)
{
unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
for (; pfn < end; pfn++) {
struct page *page = pfn_to_page(pfn);
ClearPageReserved(page);
init_page_count(page);
__free_page(page);
pages++;
}
if (size && s)
printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
return pages;
}
static inline void
free_memmap(unsigned long start_pfn, unsigned long end_pfn)
{
struct page *start_pg, *end_pg;
unsigned long pg, pgend;
/*
* Convert start_pfn/end_pfn to a struct page pointer.
*/
start_pg = pfn_to_page(start_pfn - 1) + 1;
end_pg = pfn_to_page(end_pfn);
/*
* Convert to physical addresses, and
* round start upwards and end downwards.
*/
pg = PAGE_ALIGN(__pa(start_pg));
pgend = __pa(end_pg) & PAGE_MASK;
/*
* If there are free pages between these,
* free the section of the memmap array.
*/
if (pg < pgend)
free_bootmem(pg, pgend - pg);
}
/*
* The mem_map array can get very big. Free the unused area of the memory map.
*/
static void __init free_unused_memmap(struct meminfo *mi)
{
unsigned long bank_start, prev_bank_end = 0;
unsigned int i;
/*
* This relies on each bank being in address order.
* The banks are sorted previously in bootmem_init().
*/
for_each_bank(i, mi) {
struct membank *bank = &mi->bank[i];
bank_start = bank_pfn_start(bank);
/*
* If we had a previous bank, and there is a space
* between the current bank and the previous, free it.
*/
if (prev_bank_end && prev_bank_end < bank_start)
free_memmap(prev_bank_end, bank_start);
/*
* Align up here since the VM subsystem insists that the
* memmap entries are valid from the bank end aligned to
* MAX_ORDER_NR_PAGES.
*/
prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES);
}
}
/*
* mem_init() marks the free areas in the mem_map and tells us how much
* memory is free. This is done after various parts of the system have
* claimed their memory after the kernel image.
*/
void __init mem_init(void)
{
unsigned long reserved_pages, free_pages;
struct memblock_region *reg;
int i;
max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
/* this will put all unused low memory onto the freelists */
free_unused_memmap(&meminfo);
totalram_pages += free_all_bootmem();
reserved_pages = free_pages = 0;
for_each_bank(i, &meminfo) {
struct membank *bank = &meminfo.bank[i];
unsigned int pfn1, pfn2;
struct page *page, *end;
pfn1 = bank_pfn_start(bank);
pfn2 = bank_pfn_end(bank);
page = pfn_to_page(pfn1);
end = pfn_to_page(pfn2 - 1) + 1;
do {
if (PageReserved(page))
reserved_pages++;
else if (!page_count(page))
free_pages++;
page++;
} while (page < end);
}
/*
* Since our memory may not be contiguous, calculate the
* real number of pages we have in this system
*/
printk(KERN_INFO "Memory:");
num_physpages = 0;
for_each_memblock(memory, reg) {
unsigned long pages = memblock_region_memory_end_pfn(reg) -
memblock_region_memory_base_pfn(reg);
num_physpages += pages;
printk(" %ldMB", pages >> (20 - PAGE_SHIFT));
}
printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n",
nr_free_pages() << (PAGE_SHIFT-10),
free_pages << (PAGE_SHIFT-10),
reserved_pages << (PAGE_SHIFT-10),
totalhigh_pages << (PAGE_SHIFT-10));
printk(KERN_NOTICE "Virtual kernel memory layout:\n"
" vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
" vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
" lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
" modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
" .init : 0x%p" " - 0x%p" " (%4d kB)\n"
" .text : 0x%p" " - 0x%p" " (%4d kB)\n"
" .data : 0x%p" " - 0x%p" " (%4d kB)\n",
VECTORS_BASE, VECTORS_BASE + PAGE_SIZE,
DIV_ROUND_UP(PAGE_SIZE, SZ_1K),
VMALLOC_START, VMALLOC_END,
DIV_ROUND_UP((VMALLOC_END - VMALLOC_START), SZ_1M),
PAGE_OFFSET, (unsigned long)high_memory,
DIV_ROUND_UP(((unsigned long)high_memory - PAGE_OFFSET), SZ_1M),
MODULES_VADDR, MODULES_END,
DIV_ROUND_UP((MODULES_END - MODULES_VADDR), SZ_1M),
__init_begin, __init_end,
DIV_ROUND_UP((__init_end - __init_begin), SZ_1K),
_stext, _etext,
DIV_ROUND_UP((_etext - _stext), SZ_1K),
_sdata, _edata,
DIV_ROUND_UP((_edata - _sdata), SZ_1K));
BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
BUG_ON(TASK_SIZE > MODULES_VADDR);
if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
/*
* On a machine this small we won't get
* anywhere without overcommit, so turn
* it on by default.
*/
sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
}
}
void free_initmem(void)
{
totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
__phys_to_pfn(__pa(__init_end)),
"init");
}
#ifdef CONFIG_BLK_DEV_INITRD
static int keep_initrd;
void free_initrd_mem(unsigned long start, unsigned long end)
{
if (!keep_initrd)
totalram_pages += free_area(__phys_to_pfn(__pa(start)),
__phys_to_pfn(__pa(end)),
"initrd");
}
static int __init keepinitrd_setup(char *__unused)
{
keep_initrd = 1;
return 1;
}
__setup("keepinitrd", keepinitrd_setup);
#endif
| gpl-2.0 |
Elite-Kernels/Elite_M8 | drivers/pci/hotplug/pciehp_hpc.c | 3336 | 25680 | /*
* PCI Express PCI Hot Plug Driver
*
* Copyright (C) 1995,2001 Compaq Computer Corporation
* Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
* Copyright (C) 2001 IBM Corp.
* Copyright (C) 2003-2004 Intel Corporation
*
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Send feedback to <greg@kroah.com>,<kristen.c.accardi@intel.com>
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/signal.h>
#include <linux/jiffies.h>
#include <linux/timer.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/time.h>
#include <linux/slab.h>
#include "../pci.h"
#include "pciehp.h"
static inline int pciehp_readw(struct controller *ctrl, int reg, u16 *value)
{
struct pci_dev *dev = ctrl->pcie->port;
return pci_read_config_word(dev, pci_pcie_cap(dev) + reg, value);
}
static inline int pciehp_readl(struct controller *ctrl, int reg, u32 *value)
{
struct pci_dev *dev = ctrl->pcie->port;
return pci_read_config_dword(dev, pci_pcie_cap(dev) + reg, value);
}
static inline int pciehp_writew(struct controller *ctrl, int reg, u16 value)
{
struct pci_dev *dev = ctrl->pcie->port;
return pci_write_config_word(dev, pci_pcie_cap(dev) + reg, value);
}
static inline int pciehp_writel(struct controller *ctrl, int reg, u32 value)
{
struct pci_dev *dev = ctrl->pcie->port;
return pci_write_config_dword(dev, pci_pcie_cap(dev) + reg, value);
}
/* Power Control Command */
#define POWER_ON 0
#define POWER_OFF PCI_EXP_SLTCTL_PCC
static irqreturn_t pcie_isr(int irq, void *dev_id);
static void start_int_poll_timer(struct controller *ctrl, int sec);
/* This is the interrupt polling timeout function. */
static void int_poll_timeout(unsigned long data)
{
struct controller *ctrl = (struct controller *)data;
/* Poll for interrupt events. regs == NULL => polling */
pcie_isr(0, ctrl);
init_timer(&ctrl->poll_timer);
if (!pciehp_poll_time)
pciehp_poll_time = 2; /* default polling interval is 2 sec */
start_int_poll_timer(ctrl, pciehp_poll_time);
}
/* This function starts the interrupt polling timer. */
static void start_int_poll_timer(struct controller *ctrl, int sec)
{
/* Clamp to sane value */
if ((sec <= 0) || (sec > 60))
sec = 2;
ctrl->poll_timer.function = &int_poll_timeout;
ctrl->poll_timer.data = (unsigned long)ctrl;
ctrl->poll_timer.expires = jiffies + sec * HZ;
add_timer(&ctrl->poll_timer);
}
static inline int pciehp_request_irq(struct controller *ctrl)
{
int retval, irq = ctrl->pcie->irq;
/* Install interrupt polling timer. Start with 10 sec delay */
if (pciehp_poll_mode) {
init_timer(&ctrl->poll_timer);
start_int_poll_timer(ctrl, 10);
return 0;
}
/* Installs the interrupt handler */
retval = request_irq(irq, pcie_isr, IRQF_SHARED, MY_NAME, ctrl);
if (retval)
ctrl_err(ctrl, "Cannot get irq %d for the hotplug controller\n",
irq);
return retval;
}
static inline void pciehp_free_irq(struct controller *ctrl)
{
if (pciehp_poll_mode)
del_timer_sync(&ctrl->poll_timer);
else
free_irq(ctrl->pcie->irq, ctrl);
}
static int pcie_poll_cmd(struct controller *ctrl)
{
u16 slot_status;
int err, timeout = 1000;
err = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
if (!err && (slot_status & PCI_EXP_SLTSTA_CC)) {
pciehp_writew(ctrl, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_CC);
return 1;
}
while (timeout > 0) {
msleep(10);
timeout -= 10;
err = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
if (!err && (slot_status & PCI_EXP_SLTSTA_CC)) {
pciehp_writew(ctrl, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_CC);
return 1;
}
}
return 0; /* timeout */
}
static void pcie_wait_cmd(struct controller *ctrl, int poll)
{
unsigned int msecs = pciehp_poll_mode ? 2500 : 1000;
unsigned long timeout = msecs_to_jiffies(msecs);
int rc;
if (poll)
rc = pcie_poll_cmd(ctrl);
else
rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout);
if (!rc)
ctrl_dbg(ctrl, "Command not completed in 1000 msec\n");
}
/**
* pcie_write_cmd - Issue controller command
* @ctrl: controller to which the command is issued
* @cmd: command value written to slot control register
* @mask: bitmask of slot control register to be modified
*/
static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
{
int retval = 0;
u16 slot_status;
u16 slot_ctrl;
mutex_lock(&ctrl->ctrl_lock);
retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
if (retval) {
ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n",
__func__);
goto out;
}
if (slot_status & PCI_EXP_SLTSTA_CC) {
if (!ctrl->no_cmd_complete) {
/*
* After 1 sec and CMD_COMPLETED still not set, just
* proceed forward to issue the next command according
* to spec. Just print out the error message.
*/
ctrl_dbg(ctrl, "CMD_COMPLETED not clear after 1 sec\n");
} else if (!NO_CMD_CMPL(ctrl)) {
/*
* This controller semms to notify of command completed
* event even though it supports none of power
* controller, attention led, power led and EMI.
*/
ctrl_dbg(ctrl, "Unexpected CMD_COMPLETED. Need to "
"wait for command completed event.\n");
ctrl->no_cmd_complete = 0;
} else {
ctrl_dbg(ctrl, "Unexpected CMD_COMPLETED. Maybe "
"the controller is broken.\n");
}
}
retval = pciehp_readw(ctrl, PCI_EXP_SLTCTL, &slot_ctrl);
if (retval) {
ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__);
goto out;
}
slot_ctrl &= ~mask;
slot_ctrl |= (cmd & mask);
ctrl->cmd_busy = 1;
smp_mb();
retval = pciehp_writew(ctrl, PCI_EXP_SLTCTL, slot_ctrl);
if (retval)
ctrl_err(ctrl, "Cannot write to SLOTCTRL register\n");
/*
* Wait for command completion.
*/
if (!retval && !ctrl->no_cmd_complete) {
int poll = 0;
/*
* if hotplug interrupt is not enabled or command
* completed interrupt is not enabled, we need to poll
* command completed event.
*/
if (!(slot_ctrl & PCI_EXP_SLTCTL_HPIE) ||
!(slot_ctrl & PCI_EXP_SLTCTL_CCIE))
poll = 1;
pcie_wait_cmd(ctrl, poll);
}
out:
mutex_unlock(&ctrl->ctrl_lock);
return retval;
}
static bool check_link_active(struct controller *ctrl)
{
bool ret = false;
u16 lnk_status;
if (pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status))
return ret;
ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
if (ret)
ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
return ret;
}
static void __pcie_wait_link_active(struct controller *ctrl, bool active)
{
int timeout = 1000;
if (check_link_active(ctrl) == active)
return;
while (timeout > 0) {
msleep(10);
timeout -= 10;
if (check_link_active(ctrl) == active)
return;
}
ctrl_dbg(ctrl, "Data Link Layer Link Active not %s in 1000 msec\n",
active ? "set" : "cleared");
}
static void pcie_wait_link_active(struct controller *ctrl)
{
__pcie_wait_link_active(ctrl, true);
}
static void pcie_wait_link_not_active(struct controller *ctrl)
{
__pcie_wait_link_active(ctrl, false);
}
static bool pci_bus_check_dev(struct pci_bus *bus, int devfn)
{
u32 l;
int count = 0;
int delay = 1000, step = 20;
bool found = false;
do {
found = pci_bus_read_dev_vendor_id(bus, devfn, &l, 0);
count++;
if (found)
break;
msleep(step);
delay -= step;
} while (delay > 0);
if (count > 1 && pciehp_debug)
printk(KERN_DEBUG "pci %04x:%02x:%02x.%d id reading try %d times with interval %d ms to get %08x\n",
pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
PCI_FUNC(devfn), count, step, l);
return found;
}
int pciehp_check_link_status(struct controller *ctrl)
{
u16 lnk_status;
int retval = 0;
bool found = false;
/*
* Data Link Layer Link Active Reporting must be capable for
* hot-plug capable downstream port. But old controller might
* not implement it. In this case, we wait for 1000 ms.
*/
if (ctrl->link_active_reporting)
pcie_wait_link_active(ctrl);
else
msleep(1000);
/* wait 100ms before read pci conf, and try in 1s */
msleep(100);
found = pci_bus_check_dev(ctrl->pcie->port->subordinate,
PCI_DEVFN(0, 0));
retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status);
if (retval) {
ctrl_err(ctrl, "Cannot read LNKSTATUS register\n");
return retval;
}
ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
if ((lnk_status & PCI_EXP_LNKSTA_LT) ||
!(lnk_status & PCI_EXP_LNKSTA_NLW)) {
ctrl_err(ctrl, "Link Training Error occurs \n");
retval = -1;
return retval;
}
pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
if (!found && !retval)
retval = -1;
return retval;
}
static int __pciehp_link_set(struct controller *ctrl, bool enable)
{
u16 lnk_ctrl;
int retval = 0;
retval = pciehp_readw(ctrl, PCI_EXP_LNKCTL, &lnk_ctrl);
if (retval) {
ctrl_err(ctrl, "Cannot read LNKCTRL register\n");
return retval;
}
if (enable)
lnk_ctrl &= ~PCI_EXP_LNKCTL_LD;
else
lnk_ctrl |= PCI_EXP_LNKCTL_LD;
retval = pciehp_writew(ctrl, PCI_EXP_LNKCTL, lnk_ctrl);
if (retval) {
ctrl_err(ctrl, "Cannot write LNKCTRL register\n");
return retval;
}
ctrl_dbg(ctrl, "%s: lnk_ctrl = %x\n", __func__, lnk_ctrl);
return retval;
}
static int pciehp_link_enable(struct controller *ctrl)
{
return __pciehp_link_set(ctrl, true);
}
static int pciehp_link_disable(struct controller *ctrl)
{
return __pciehp_link_set(ctrl, false);
}
int pciehp_get_attention_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
u16 slot_ctrl;
u8 atten_led_state;
int retval = 0;
retval = pciehp_readw(ctrl, PCI_EXP_SLTCTL, &slot_ctrl);
if (retval) {
ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__);
return retval;
}
ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
atten_led_state = (slot_ctrl & PCI_EXP_SLTCTL_AIC) >> 6;
switch (atten_led_state) {
case 0:
*status = 0xFF; /* Reserved */
break;
case 1:
*status = 1; /* On */
break;
case 2:
*status = 2; /* Blink */
break;
case 3:
*status = 0; /* Off */
break;
default:
*status = 0xFF;
break;
}
return 0;
}
int pciehp_get_power_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
u16 slot_ctrl;
u8 pwr_state;
int retval = 0;
retval = pciehp_readw(ctrl, PCI_EXP_SLTCTL, &slot_ctrl);
if (retval) {
ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__);
return retval;
}
ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
pwr_state = (slot_ctrl & PCI_EXP_SLTCTL_PCC) >> 10;
switch (pwr_state) {
case 0:
*status = 1;
break;
case 1:
*status = 0;
break;
default:
*status = 0xFF;
break;
}
return retval;
}
int pciehp_get_latch_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
u16 slot_status;
int retval;
retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
if (retval) {
ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n",
__func__);
return retval;
}
*status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS);
return 0;
}
int pciehp_get_adapter_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
u16 slot_status;
int retval;
retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
if (retval) {
ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n",
__func__);
return retval;
}
*status = !!(slot_status & PCI_EXP_SLTSTA_PDS);
return 0;
}
int pciehp_query_power_fault(struct slot *slot)
{
struct controller *ctrl = slot->ctrl;
u16 slot_status;
int retval;
retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
if (retval) {
ctrl_err(ctrl, "Cannot check for power fault\n");
return retval;
}
return !!(slot_status & PCI_EXP_SLTSTA_PFD);
}
int pciehp_set_attention_status(struct slot *slot, u8 value)
{
struct controller *ctrl = slot->ctrl;
u16 slot_cmd;
u16 cmd_mask;
cmd_mask = PCI_EXP_SLTCTL_AIC;
switch (value) {
case 0 : /* turn off */
slot_cmd = 0x00C0;
break;
case 1: /* turn on */
slot_cmd = 0x0040;
break;
case 2: /* turn blink */
slot_cmd = 0x0080;
break;
default:
return -EINVAL;
}
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
return pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
}
void pciehp_green_led_on(struct slot *slot)
{
struct controller *ctrl = slot->ctrl;
u16 slot_cmd;
u16 cmd_mask;
slot_cmd = 0x0100;
cmd_mask = PCI_EXP_SLTCTL_PIC;
pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
}
void pciehp_green_led_off(struct slot *slot)
{
struct controller *ctrl = slot->ctrl;
u16 slot_cmd;
u16 cmd_mask;
slot_cmd = 0x0300;
cmd_mask = PCI_EXP_SLTCTL_PIC;
pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
}
void pciehp_green_led_blink(struct slot *slot)
{
struct controller *ctrl = slot->ctrl;
u16 slot_cmd;
u16 cmd_mask;
slot_cmd = 0x0200;
cmd_mask = PCI_EXP_SLTCTL_PIC;
pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
}
int pciehp_power_on_slot(struct slot * slot)
{
struct controller *ctrl = slot->ctrl;
u16 slot_cmd;
u16 cmd_mask;
u16 slot_status;
int retval = 0;
/* Clear sticky power-fault bit from previous power failures */
retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
if (retval) {
ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n",
__func__);
return retval;
}
slot_status &= PCI_EXP_SLTSTA_PFD;
if (slot_status) {
retval = pciehp_writew(ctrl, PCI_EXP_SLTSTA, slot_status);
if (retval) {
ctrl_err(ctrl,
"%s: Cannot write to SLOTSTATUS register\n",
__func__);
return retval;
}
}
ctrl->power_fault_detected = 0;
slot_cmd = POWER_ON;
cmd_mask = PCI_EXP_SLTCTL_PCC;
retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
if (retval) {
ctrl_err(ctrl, "Write %x command failed!\n", slot_cmd);
return retval;
}
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
retval = pciehp_link_enable(ctrl);
if (retval)
ctrl_err(ctrl, "%s: Can not enable the link!\n", __func__);
return retval;
}
int pciehp_power_off_slot(struct slot * slot)
{
struct controller *ctrl = slot->ctrl;
u16 slot_cmd;
u16 cmd_mask;
int retval;
/* Disable the link at first */
pciehp_link_disable(ctrl);
/* wait the link is down */
if (ctrl->link_active_reporting)
pcie_wait_link_not_active(ctrl);
else
msleep(1000);
slot_cmd = POWER_OFF;
cmd_mask = PCI_EXP_SLTCTL_PCC;
retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
if (retval) {
ctrl_err(ctrl, "Write command failed!\n");
return retval;
}
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
return 0;
}
static irqreturn_t pcie_isr(int irq, void *dev_id)
{
struct controller *ctrl = (struct controller *)dev_id;
struct slot *slot = ctrl->slot;
u16 detected, intr_loc;
/*
* In order to guarantee that all interrupt events are
* serviced, we need to re-inspect Slot Status register after
* clearing what is presumed to be the last pending interrupt.
*/
intr_loc = 0;
do {
if (pciehp_readw(ctrl, PCI_EXP_SLTSTA, &detected)) {
ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS\n",
__func__);
return IRQ_NONE;
}
detected &= (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
PCI_EXP_SLTSTA_CC);
detected &= ~intr_loc;
intr_loc |= detected;
if (!intr_loc)
return IRQ_NONE;
if (detected && pciehp_writew(ctrl, PCI_EXP_SLTSTA, intr_loc)) {
ctrl_err(ctrl, "%s: Cannot write to SLOTSTATUS\n",
__func__);
return IRQ_NONE;
}
} while (detected);
ctrl_dbg(ctrl, "%s: intr_loc %x\n", __func__, intr_loc);
/* Check Command Complete Interrupt Pending */
if (intr_loc & PCI_EXP_SLTSTA_CC) {
ctrl->cmd_busy = 0;
smp_mb();
wake_up(&ctrl->queue);
}
if (!(intr_loc & ~PCI_EXP_SLTSTA_CC))
return IRQ_HANDLED;
/* Check MRL Sensor Changed */
if (intr_loc & PCI_EXP_SLTSTA_MRLSC)
pciehp_handle_switch_change(slot);
/* Check Attention Button Pressed */
if (intr_loc & PCI_EXP_SLTSTA_ABP)
pciehp_handle_attention_button(slot);
/* Check Presence Detect Changed */
if (intr_loc & PCI_EXP_SLTSTA_PDC)
pciehp_handle_presence_change(slot);
/* Check Power Fault Detected */
if ((intr_loc & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
ctrl->power_fault_detected = 1;
pciehp_handle_power_fault(slot);
}
return IRQ_HANDLED;
}
int pciehp_get_max_lnk_width(struct slot *slot,
enum pcie_link_width *value)
{
struct controller *ctrl = slot->ctrl;
enum pcie_link_width lnk_wdth;
u32 lnk_cap;
int retval = 0;
retval = pciehp_readl(ctrl, PCI_EXP_LNKCAP, &lnk_cap);
if (retval) {
ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__);
return retval;
}
switch ((lnk_cap & PCI_EXP_LNKSTA_NLW) >> 4){
case 0:
lnk_wdth = PCIE_LNK_WIDTH_RESRV;
break;
case 1:
lnk_wdth = PCIE_LNK_X1;
break;
case 2:
lnk_wdth = PCIE_LNK_X2;
break;
case 4:
lnk_wdth = PCIE_LNK_X4;
break;
case 8:
lnk_wdth = PCIE_LNK_X8;
break;
case 12:
lnk_wdth = PCIE_LNK_X12;
break;
case 16:
lnk_wdth = PCIE_LNK_X16;
break;
case 32:
lnk_wdth = PCIE_LNK_X32;
break;
default:
lnk_wdth = PCIE_LNK_WIDTH_UNKNOWN;
break;
}
*value = lnk_wdth;
ctrl_dbg(ctrl, "Max link width = %d\n", lnk_wdth);
return retval;
}
int pciehp_get_cur_lnk_width(struct slot *slot,
enum pcie_link_width *value)
{
struct controller *ctrl = slot->ctrl;
enum pcie_link_width lnk_wdth = PCIE_LNK_WIDTH_UNKNOWN;
int retval = 0;
u16 lnk_status;
retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status);
if (retval) {
ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n",
__func__);
return retval;
}
switch ((lnk_status & PCI_EXP_LNKSTA_NLW) >> 4){
case 0:
lnk_wdth = PCIE_LNK_WIDTH_RESRV;
break;
case 1:
lnk_wdth = PCIE_LNK_X1;
break;
case 2:
lnk_wdth = PCIE_LNK_X2;
break;
case 4:
lnk_wdth = PCIE_LNK_X4;
break;
case 8:
lnk_wdth = PCIE_LNK_X8;
break;
case 12:
lnk_wdth = PCIE_LNK_X12;
break;
case 16:
lnk_wdth = PCIE_LNK_X16;
break;
case 32:
lnk_wdth = PCIE_LNK_X32;
break;
default:
lnk_wdth = PCIE_LNK_WIDTH_UNKNOWN;
break;
}
*value = lnk_wdth;
ctrl_dbg(ctrl, "Current link width = %d\n", lnk_wdth);
return retval;
}
int pcie_enable_notification(struct controller *ctrl)
{
u16 cmd, mask;
/*
* TBD: Power fault detected software notification support.
*
* Power fault detected software notification is not enabled
* now, because it caused power fault detected interrupt storm
* on some machines. On those machines, power fault detected
* bit in the slot status register was set again immediately
* when it is cleared in the interrupt service routine, and
* next power fault detected interrupt was notified again.
*/
cmd = PCI_EXP_SLTCTL_PDCE;
if (ATTN_BUTTN(ctrl))
cmd |= PCI_EXP_SLTCTL_ABPE;
if (MRL_SENS(ctrl))
cmd |= PCI_EXP_SLTCTL_MRLSCE;
if (!pciehp_poll_mode)
cmd |= PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE;
mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE |
PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE);
if (pcie_write_cmd(ctrl, cmd, mask)) {
ctrl_err(ctrl, "Cannot enable software notification\n");
return -1;
}
return 0;
}
static void pcie_disable_notification(struct controller *ctrl)
{
u16 mask;
mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE |
PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
PCI_EXP_SLTCTL_DLLSCE);
if (pcie_write_cmd(ctrl, 0, mask))
ctrl_warn(ctrl, "Cannot disable software notification\n");
}
int pcie_init_notification(struct controller *ctrl)
{
if (pciehp_request_irq(ctrl))
return -1;
if (pcie_enable_notification(ctrl)) {
pciehp_free_irq(ctrl);
return -1;
}
ctrl->notification_enabled = 1;
return 0;
}
static void pcie_shutdown_notification(struct controller *ctrl)
{
if (ctrl->notification_enabled) {
pcie_disable_notification(ctrl);
pciehp_free_irq(ctrl);
ctrl->notification_enabled = 0;
}
}
static int pcie_init_slot(struct controller *ctrl)
{
struct slot *slot;
slot = kzalloc(sizeof(*slot), GFP_KERNEL);
if (!slot)
return -ENOMEM;
slot->ctrl = ctrl;
mutex_init(&slot->lock);
INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work);
ctrl->slot = slot;
return 0;
}
static void pcie_cleanup_slot(struct controller *ctrl)
{
struct slot *slot = ctrl->slot;
cancel_delayed_work(&slot->work);
flush_workqueue(pciehp_wq);
kfree(slot);
}
static inline void dbg_ctrl(struct controller *ctrl)
{
int i;
u16 reg16;
struct pci_dev *pdev = ctrl->pcie->port;
if (!pciehp_debug)
return;
ctrl_info(ctrl, "Hotplug Controller:\n");
ctrl_info(ctrl, " Seg/Bus/Dev/Func/IRQ : %s IRQ %d\n",
pci_name(pdev), pdev->irq);
ctrl_info(ctrl, " Vendor ID : 0x%04x\n", pdev->vendor);
ctrl_info(ctrl, " Device ID : 0x%04x\n", pdev->device);
ctrl_info(ctrl, " Subsystem ID : 0x%04x\n",
pdev->subsystem_device);
ctrl_info(ctrl, " Subsystem Vendor ID : 0x%04x\n",
pdev->subsystem_vendor);
ctrl_info(ctrl, " PCIe Cap offset : 0x%02x\n",
pci_pcie_cap(pdev));
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
if (!pci_resource_len(pdev, i))
continue;
ctrl_info(ctrl, " PCI resource [%d] : %pR\n",
i, &pdev->resource[i]);
}
ctrl_info(ctrl, "Slot Capabilities : 0x%08x\n", ctrl->slot_cap);
ctrl_info(ctrl, " Physical Slot Number : %d\n", PSN(ctrl));
ctrl_info(ctrl, " Attention Button : %3s\n",
ATTN_BUTTN(ctrl) ? "yes" : "no");
ctrl_info(ctrl, " Power Controller : %3s\n",
POWER_CTRL(ctrl) ? "yes" : "no");
ctrl_info(ctrl, " MRL Sensor : %3s\n",
MRL_SENS(ctrl) ? "yes" : "no");
ctrl_info(ctrl, " Attention Indicator : %3s\n",
ATTN_LED(ctrl) ? "yes" : "no");
ctrl_info(ctrl, " Power Indicator : %3s\n",
PWR_LED(ctrl) ? "yes" : "no");
ctrl_info(ctrl, " Hot-Plug Surprise : %3s\n",
HP_SUPR_RM(ctrl) ? "yes" : "no");
ctrl_info(ctrl, " EMI Present : %3s\n",
EMI(ctrl) ? "yes" : "no");
ctrl_info(ctrl, " Command Completed : %3s\n",
NO_CMD_CMPL(ctrl) ? "no" : "yes");
pciehp_readw(ctrl, PCI_EXP_SLTSTA, ®16);
ctrl_info(ctrl, "Slot Status : 0x%04x\n", reg16);
pciehp_readw(ctrl, PCI_EXP_SLTCTL, ®16);
ctrl_info(ctrl, "Slot Control : 0x%04x\n", reg16);
}
struct controller *pcie_init(struct pcie_device *dev)
{
struct controller *ctrl;
u32 slot_cap, link_cap;
struct pci_dev *pdev = dev->port;
ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
if (!ctrl) {
dev_err(&dev->device, "%s: Out of memory\n", __func__);
goto abort;
}
ctrl->pcie = dev;
if (!pci_pcie_cap(pdev)) {
ctrl_err(ctrl, "Cannot find PCI Express capability\n");
goto abort_ctrl;
}
if (pciehp_readl(ctrl, PCI_EXP_SLTCAP, &slot_cap)) {
ctrl_err(ctrl, "Cannot read SLOTCAP register\n");
goto abort_ctrl;
}
ctrl->slot_cap = slot_cap;
mutex_init(&ctrl->ctrl_lock);
init_waitqueue_head(&ctrl->queue);
dbg_ctrl(ctrl);
/*
* Controller doesn't notify of command completion if the "No
* Command Completed Support" bit is set in Slot Capability
* register or the controller supports none of power
* controller, attention led, power led and EMI.
*/
if (NO_CMD_CMPL(ctrl) ||
!(POWER_CTRL(ctrl) | ATTN_LED(ctrl) | PWR_LED(ctrl) | EMI(ctrl)))
ctrl->no_cmd_complete = 1;
/* Check if Data Link Layer Link Active Reporting is implemented */
if (pciehp_readl(ctrl, PCI_EXP_LNKCAP, &link_cap)) {
ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__);
goto abort_ctrl;
}
if (link_cap & PCI_EXP_LNKCAP_DLLLARC) {
ctrl_dbg(ctrl, "Link Active Reporting supported\n");
ctrl->link_active_reporting = 1;
}
/* Clear all remaining event bits in Slot Status register */
if (pciehp_writew(ctrl, PCI_EXP_SLTSTA, 0x1f))
goto abort_ctrl;
/* Disable sotfware notification */
pcie_disable_notification(ctrl);
ctrl_info(ctrl, "HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n",
pdev->vendor, pdev->device, pdev->subsystem_vendor,
pdev->subsystem_device);
if (pcie_init_slot(ctrl))
goto abort_ctrl;
return ctrl;
abort_ctrl:
kfree(ctrl);
abort:
return NULL;
}
void pciehp_release_ctrl(struct controller *ctrl)
{
pcie_shutdown_notification(ctrl);
pcie_cleanup_slot(ctrl);
kfree(ctrl);
}
| gpl-2.0 |
somcom3x/kernel_u8800pro | drivers/staging/sbe-2t3e3/cpld.c | 8200 | 8806 | /*
* SBE 2T3E3 synchronous serial card driver for Linux
*
* Copyright (C) 2009-2010 Krzysztof Halasa <khc@pm.waw.pl>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*
* This code is based on a driver written by SBE Inc.
*/
#include <linux/delay.h>
#include "2t3e3.h"
#include "ctrl.h"
#define bootrom_set_bit(sc, reg, bit) \
bootrom_write((sc), (reg), \
bootrom_read((sc), (reg)) | (bit))
#define bootrom_clear_bit(sc, reg, bit) \
bootrom_write((sc), (reg), \
bootrom_read((sc), (reg)) & ~(bit))
static inline void cpld_set_bit(struct channel *channel, unsigned reg, u32 bit)
{
unsigned long flags;
spin_lock_irqsave(&channel->card->bootrom_lock, flags);
bootrom_set_bit(channel, CPLD_MAP_REG(reg, channel), bit);
spin_unlock_irqrestore(&channel->card->bootrom_lock, flags);
}
static inline void cpld_clear_bit(struct channel *channel, unsigned reg, u32 bit)
{
unsigned long flags;
spin_lock_irqsave(&channel->card->bootrom_lock, flags);
bootrom_clear_bit(channel, CPLD_MAP_REG(reg, channel), bit);
spin_unlock_irqrestore(&channel->card->bootrom_lock, flags);
}
void cpld_init(struct channel *sc)
{
u32 val;
#if 0
/* reset LIU and Framer */
val = cpld_val_map[SBE_2T3E3_CPLD_VAL_LIU_FRAMER_RESET][sc->h.slot];
cpld_write(sc, SBE_2T3E3_CPLD_REG_STATIC_RESET, val);
udelay(10000); /* TODO - how long? */
val = 0;
cpld_write(sc, SBE_2T3E3_CPLD_REG_STATIC_RESET, val);
#endif
/* PCRA */
val = SBE_2T3E3_CPLD_VAL_CRC32 |
cpld_val_map[SBE_2T3E3_CPLD_VAL_LOOP_TIMING_SOURCE][sc->h.slot];
cpld_write(sc, SBE_2T3E3_CPLD_REG_PCRA, val);
/* PCRB */
val = 0;
cpld_write(sc, SBE_2T3E3_CPLD_REG_PCRB, val);
/* PCRC */
val = 0;
cpld_write(sc, SBE_2T3E3_CPLD_REG_PCRC, val);
/* PBWF */
val = 0;
cpld_write(sc, SBE_2T3E3_CPLD_REG_PBWF, val);
/* PBWL */
val = 0;
cpld_write(sc, SBE_2T3E3_CPLD_REG_PBWL, val);
/* PLTR */
val = SBE_2T3E3_CPLD_VAL_LCV_COUNTER;
cpld_write(sc, SBE_2T3E3_CPLD_REG_PLTR, val);
udelay(1000);
/* PLCR */
val = 0;
cpld_write(sc, SBE_2T3E3_CPLD_REG_PLCR, val);
udelay(1000);
/* PPFR */
val = 0x55;
cpld_write(sc, SBE_2T3E3_CPLD_REG_PPFR, val);
/* TODO: this doesn't work!!! */
/* SERIAL_CHIP_SELECT */
val = 0;
cpld_write(sc, SBE_2T3E3_CPLD_REG_SERIAL_CHIP_SELECT, val);
/* PICSR */
val = SBE_2T3E3_CPLD_VAL_DMO_SIGNAL_DETECTED |
SBE_2T3E3_CPLD_VAL_RECEIVE_LOSS_OF_LOCK_DETECTED |
SBE_2T3E3_CPLD_VAL_RECEIVE_LOSS_OF_SIGNAL_DETECTED;
cpld_write(sc, SBE_2T3E3_CPLD_REG_PICSR, val);
cpld_start_intr(sc);
udelay(1000);
}
void cpld_start_intr(struct channel *sc)
{
u32 val;
/* PIER */
val = SBE_2T3E3_CPLD_VAL_INTERRUPT_FROM_ETHERNET_ENABLE |
SBE_2T3E3_CPLD_VAL_INTERRUPT_FROM_FRAMER_ENABLE;
cpld_write(sc, SBE_2T3E3_CPLD_REG_PIER, val);
#if 0
/*
do you want to hang up your computer?
ENABLE REST OF INTERRUPTS !!!
you have been warned :).
*/
#endif
}
void cpld_stop_intr(struct channel *sc)
{
u32 val;
/* PIER */
val = 0;
cpld_write(sc, SBE_2T3E3_CPLD_REG_PIER, val);
}
void cpld_set_frame_mode(struct channel *sc, u32 mode)
{
if (sc->p.frame_mode == mode)
return;
switch (mode) {
case SBE_2T3E3_FRAME_MODE_HDLC:
cpld_clear_bit(sc, SBE_2T3E3_CPLD_REG_PCRA,
SBE_2T3E3_CPLD_VAL_TRANSPARENT_MODE |
SBE_2T3E3_CPLD_VAL_RAW_MODE);
exar7250_unipolar_onoff(sc, SBE_2T3E3_OFF);
exar7300_unipolar_onoff(sc, SBE_2T3E3_OFF);
break;
case SBE_2T3E3_FRAME_MODE_TRANSPARENT:
cpld_clear_bit(sc, SBE_2T3E3_CPLD_REG_PCRA,
SBE_2T3E3_CPLD_VAL_RAW_MODE);
cpld_set_bit(sc, SBE_2T3E3_CPLD_REG_PCRA,
SBE_2T3E3_CPLD_VAL_TRANSPARENT_MODE);
exar7250_unipolar_onoff(sc, SBE_2T3E3_OFF);
exar7300_unipolar_onoff(sc, SBE_2T3E3_OFF);
break;
case SBE_2T3E3_FRAME_MODE_RAW:
cpld_set_bit(sc, SBE_2T3E3_CPLD_REG_PCRA,
SBE_2T3E3_CPLD_VAL_RAW_MODE);
exar7250_unipolar_onoff(sc, SBE_2T3E3_ON);
exar7300_unipolar_onoff(sc, SBE_2T3E3_ON);
break;
default:
return;
}
sc->p.frame_mode = mode;
}
/* set rate of the local clock */
void cpld_set_frame_type(struct channel *sc, u32 type)
{
switch (type) {
case SBE_2T3E3_FRAME_TYPE_E3_G751:
case SBE_2T3E3_FRAME_TYPE_E3_G832:
cpld_set_bit(sc, SBE_2T3E3_CPLD_REG_PCRA,
SBE_2T3E3_CPLD_VAL_LOCAL_CLOCK_E3);
break;
case SBE_2T3E3_FRAME_TYPE_T3_CBIT:
case SBE_2T3E3_FRAME_TYPE_T3_M13:
cpld_clear_bit(sc, SBE_2T3E3_CPLD_REG_PCRA,
SBE_2T3E3_CPLD_VAL_LOCAL_CLOCK_E3);
break;
default:
return;
}
}
void cpld_set_scrambler(struct channel *sc, u32 mode)
{
if (sc->p.scrambler == mode)
return;
switch (mode) {
case SBE_2T3E3_SCRAMBLER_OFF:
cpld_clear_bit(sc, SBE_2T3E3_CPLD_REG_PCRB,
SBE_2T3E3_CPLD_VAL_SCRAMBLER_ENABLE);
break;
case SBE_2T3E3_SCRAMBLER_LARSCOM:
cpld_clear_bit(sc, SBE_2T3E3_CPLD_REG_PCRB,
SBE_2T3E3_CPLD_VAL_SCRAMBLER_TYPE);
cpld_set_bit(sc, SBE_2T3E3_CPLD_REG_PCRB,
SBE_2T3E3_CPLD_VAL_SCRAMBLER_ENABLE);
break;
case SBE_2T3E3_SCRAMBLER_ADC_KENTROX_DIGITAL:
cpld_set_bit(sc, SBE_2T3E3_CPLD_REG_PCRB,
SBE_2T3E3_CPLD_VAL_SCRAMBLER_TYPE);
cpld_set_bit(sc, SBE_2T3E3_CPLD_REG_PCRB,
SBE_2T3E3_CPLD_VAL_SCRAMBLER_ENABLE);
break;
default:
return;
}
sc->p.scrambler = mode;
}
void cpld_set_crc(struct channel *sc, u32 crc)
{
if (sc->p.crc == crc)
return;
switch (crc) {
case SBE_2T3E3_CRC_16:
cpld_clear_bit(sc, SBE_2T3E3_CPLD_REG_PCRA,
SBE_2T3E3_CPLD_VAL_CRC32);
break;
case SBE_2T3E3_CRC_32:
cpld_set_bit(sc, SBE_2T3E3_CPLD_REG_PCRA,
SBE_2T3E3_CPLD_VAL_CRC32);
break;
default:
return;
}
sc->p.crc = crc;
}
void cpld_select_panel(struct channel *sc, u32 panel)
{
if (sc->p.panel == panel)
return;
switch (panel) {
case SBE_2T3E3_PANEL_FRONT:
cpld_clear_bit(sc, SBE_2T3E3_CPLD_REG_PCRA,
SBE_2T3E3_CPLD_VAL_REAR_PANEL);
break;
case SBE_2T3E3_PANEL_REAR:
cpld_set_bit(sc, SBE_2T3E3_CPLD_REG_PCRA,
SBE_2T3E3_CPLD_VAL_REAR_PANEL);
break;
default:
return;
}
udelay(100);
sc->p.panel = panel;
}
extern void cpld_set_clock(struct channel *sc, u32 mode)
{
if (sc->p.clock_source == mode)
return;
switch (mode) {
case SBE_2T3E3_TIMING_LOCAL:
cpld_set_bit(sc, SBE_2T3E3_CPLD_REG_PCRA,
SBE_2T3E3_CPLD_VAL_ALT);
break;
case SBE_2T3E3_TIMING_LOOP:
cpld_clear_bit(sc, SBE_2T3E3_CPLD_REG_PCRA,
SBE_2T3E3_CPLD_VAL_ALT);
break;
default:
return;
}
sc->p.clock_source = mode;
}
void cpld_set_pad_count(struct channel *sc, u32 count)
{
u32 val;
if (sc->p.pad_count == count)
return;
switch (count) {
case SBE_2T3E3_PAD_COUNT_1:
val = SBE_2T3E3_CPLD_VAL_PAD_COUNT_1;
break;
case SBE_2T3E3_PAD_COUNT_2:
val = SBE_2T3E3_CPLD_VAL_PAD_COUNT_2;
break;
case SBE_2T3E3_PAD_COUNT_3:
val = SBE_2T3E3_CPLD_VAL_PAD_COUNT_3;
break;
case SBE_2T3E3_PAD_COUNT_4:
val = SBE_2T3E3_CPLD_VAL_PAD_COUNT_4;
break;
default:
return;
}
cpld_clear_bit(sc, SBE_2T3E3_CPLD_REG_PCRB,
SBE_2T3E3_CPLD_VAL_PAD_COUNT);
cpld_set_bit(sc, SBE_2T3E3_CPLD_REG_PCRB, val);
sc->p.pad_count = count;
}
void cpld_LOS_update(struct channel *sc)
{
u_int8_t los;
cpld_write(sc, SBE_2T3E3_CPLD_REG_PICSR,
SBE_2T3E3_CPLD_VAL_DMO_SIGNAL_DETECTED |
SBE_2T3E3_CPLD_VAL_RECEIVE_LOSS_OF_LOCK_DETECTED |
SBE_2T3E3_CPLD_VAL_RECEIVE_LOSS_OF_SIGNAL_DETECTED);
los = cpld_read(sc, SBE_2T3E3_CPLD_REG_PICSR) &
SBE_2T3E3_CPLD_VAL_RECEIVE_LOSS_OF_SIGNAL_DETECTED;
if (los != sc->s.LOS)
dev_info(&sc->pdev->dev, "SBE 2T3E3: LOS status: %s\n",
los ? "Loss of signal" : "Signal OK");
sc->s.LOS = los;
}
void cpld_set_fractional_mode(struct channel *sc, u32 mode,
u32 start, u32 stop)
{
if (mode == SBE_2T3E3_FRACTIONAL_MODE_NONE) {
start = 0;
stop = 0;
}
if (sc->p.fractional_mode == mode && sc->p.bandwidth_start == start &&
sc->p.bandwidth_stop == stop)
return;
switch (mode) {
case SBE_2T3E3_FRACTIONAL_MODE_NONE:
cpld_write(sc, SBE_2T3E3_CPLD_REG_PCRC,
SBE_2T3E3_CPLD_VAL_FRACTIONAL_MODE_NONE);
break;
case SBE_2T3E3_FRACTIONAL_MODE_0:
cpld_write(sc, SBE_2T3E3_CPLD_REG_PCRC,
SBE_2T3E3_CPLD_VAL_FRACTIONAL_MODE_0);
break;
case SBE_2T3E3_FRACTIONAL_MODE_1:
cpld_write(sc, SBE_2T3E3_CPLD_REG_PCRC,
SBE_2T3E3_CPLD_VAL_FRACTIONAL_MODE_1);
break;
case SBE_2T3E3_FRACTIONAL_MODE_2:
cpld_write(sc, SBE_2T3E3_CPLD_REG_PCRC,
SBE_2T3E3_CPLD_VAL_FRACTIONAL_MODE_2);
break;
default:
printk(KERN_ERR "wrong mode in set_fractional_mode\n");
return;
}
cpld_write(sc, SBE_2T3E3_CPLD_REG_PBWF, start);
cpld_write(sc, SBE_2T3E3_CPLD_REG_PBWL, stop);
sc->p.fractional_mode = mode;
sc->p.bandwidth_start = start;
sc->p.bandwidth_stop = stop;
}
| gpl-2.0 |
nicon8/Sched_deadline_rt | arch/sh/boards/mach-hp6xx/setup.c | 8456 | 4003 | /*
* linux/arch/sh/boards/hp6xx/setup.c
*
* Copyright (C) 2002 Andriy Skulysh
* Copyright (C) 2007 Kristoffer Ericson <Kristoffer_e1@hotmail.com>
*
* May be copied or modified under the terms of the GNU General Public
* License. See linux/COPYING for more information.
*
* Setup code for HP620/HP660/HP680/HP690 (internal peripherials only)
*/
#include <linux/types.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/irq.h>
#include <sound/sh_dac_audio.h>
#include <asm/hd64461.h>
#include <asm/io.h>
#include <mach/hp6xx.h>
#include <cpu/dac.h>
#define SCPCR 0xa4000116
#define SCPDR 0xa4000136
/* CF Slot */
static struct resource cf_ide_resources[] = {
[0] = {
.start = 0x15000000 + 0x1f0,
.end = 0x15000000 + 0x1f0 + 0x08 - 0x01,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 0x15000000 + 0x1fe,
.end = 0x15000000 + 0x1fe + 0x01,
.flags = IORESOURCE_MEM,
},
[2] = {
.start = 77,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device cf_ide_device = {
.name = "pata_platform",
.id = -1,
.num_resources = ARRAY_SIZE(cf_ide_resources),
.resource = cf_ide_resources,
};
static struct platform_device jornadakbd_device = {
.name = "jornada680_kbd",
.id = -1,
};
static void dac_audio_start(struct dac_audio_pdata *pdata)
{
u16 v;
u8 v8;
/* HP Jornada 680/690 speaker on */
v = inw(HD64461_GPADR);
v &= ~HD64461_GPADR_SPEAKER;
outw(v, HD64461_GPADR);
/* HP Palmtop 620lx/660lx speaker on */
v8 = inb(PKDR);
v8 &= ~PKDR_SPEAKER;
outb(v8, PKDR);
sh_dac_enable(pdata->channel);
}
static void dac_audio_stop(struct dac_audio_pdata *pdata)
{
u16 v;
u8 v8;
/* HP Jornada 680/690 speaker off */
v = inw(HD64461_GPADR);
v |= HD64461_GPADR_SPEAKER;
outw(v, HD64461_GPADR);
/* HP Palmtop 620lx/660lx speaker off */
v8 = inb(PKDR);
v8 |= PKDR_SPEAKER;
outb(v8, PKDR);
sh_dac_output(0, pdata->channel);
sh_dac_disable(pdata->channel);
}
static struct dac_audio_pdata dac_audio_platform_data = {
.buffer_size = 64000,
.channel = 1,
.start = dac_audio_start,
.stop = dac_audio_stop,
};
static struct platform_device dac_audio_device = {
.name = "dac_audio",
.id = -1,
.dev = {
.platform_data = &dac_audio_platform_data,
}
};
static struct platform_device *hp6xx_devices[] __initdata = {
&cf_ide_device,
&jornadakbd_device,
&dac_audio_device,
};
static void __init hp6xx_init_irq(void)
{
/* Gets touchscreen and powerbutton IRQ working */
plat_irq_setup_pins(IRQ_MODE_IRQ);
}
static int __init hp6xx_devices_setup(void)
{
return platform_add_devices(hp6xx_devices, ARRAY_SIZE(hp6xx_devices));
}
static void __init hp6xx_setup(char **cmdline_p)
{
u8 v8;
u16 v;
v = inw(HD64461_STBCR);
v |= HD64461_STBCR_SURTST | HD64461_STBCR_SIRST |
HD64461_STBCR_STM1ST | HD64461_STBCR_STM0ST |
HD64461_STBCR_SAFEST | HD64461_STBCR_SPC0ST |
HD64461_STBCR_SMIAST | HD64461_STBCR_SAFECKE_OST|
HD64461_STBCR_SAFECKE_IST;
#ifndef CONFIG_HD64461_ENABLER
v |= HD64461_STBCR_SPC1ST;
#endif
outw(v, HD64461_STBCR);
v = inw(HD64461_GPADR);
v |= HD64461_GPADR_SPEAKER | HD64461_GPADR_PCMCIA0;
outw(v, HD64461_GPADR);
outw(HD64461_PCCGCR_VCC0 | HD64461_PCCSCR_VCC1, HD64461_PCC0GCR);
#ifndef CONFIG_HD64461_ENABLER
outw(HD64461_PCCGCR_VCC0 | HD64461_PCCSCR_VCC1, HD64461_PCC1GCR);
#endif
sh_dac_output(0, DAC_SPEAKER_VOLUME);
sh_dac_disable(DAC_SPEAKER_VOLUME);
v8 = __raw_readb(DACR);
v8 &= ~DACR_DAE;
__raw_writeb(v8,DACR);
v8 = __raw_readb(SCPDR);
v8 |= SCPDR_TS_SCAN_X | SCPDR_TS_SCAN_Y;
v8 &= ~SCPDR_TS_SCAN_ENABLE;
__raw_writeb(v8, SCPDR);
v = __raw_readw(SCPCR);
v &= ~SCPCR_TS_MASK;
v |= SCPCR_TS_ENABLE;
__raw_writew(v, SCPCR);
}
device_initcall(hp6xx_devices_setup);
static struct sh_machine_vector mv_hp6xx __initmv = {
.mv_name = "hp6xx",
.mv_setup = hp6xx_setup,
/* IRQ's : CPU(64) + CCHIP(16) + FREE_TO_USE(6) */
.mv_nr_irqs = HD64461_IRQBASE + HD64461_IRQ_NUM + 6,
/* Enable IRQ0 -> IRQ3 in IRQ_MODE */
.mv_init_irq = hp6xx_init_irq,
};
| gpl-2.0 |
tuxx42/linux-3.1PNX8550 | drivers/video/sis/sis_accel.c | 14856 | 12136 | /*
* SiS 300/540/630[S]/730[S],
* SiS 315[E|PRO]/550/[M]650/651/[M]661[F|M]X/740/[M]741[GX]/330/[M]760[GX],
* XGI V3XT/V5/V8, Z7
* frame buffer driver for Linux kernels >= 2.4.14 and >=2.6.3
*
* 2D acceleration part
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the named License,
* or any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
*
* Based on the XFree86/X.org driver which is
* Copyright (C) 2001-2005 by Thomas Winischhofer, Vienna, Austria
*
* Author: Thomas Winischhofer <thomas@winischhofer.net>
* (see http://www.winischhofer.net/
* for more information and updates)
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/fb.h>
#include <linux/ioport.h>
#include <linux/types.h>
#include <asm/io.h>
#include "sis.h"
#include "sis_accel.h"
static const u8 sisALUConv[] =
{
0x00, /* dest = 0; 0, GXclear, 0 */
0x88, /* dest &= src; DSa, GXand, 0x1 */
0x44, /* dest = src & ~dest; SDna, GXandReverse, 0x2 */
0xCC, /* dest = src; S, GXcopy, 0x3 */
0x22, /* dest &= ~src; DSna, GXandInverted, 0x4 */
0xAA, /* dest = dest; D, GXnoop, 0x5 */
0x66, /* dest = ^src; DSx, GXxor, 0x6 */
0xEE, /* dest |= src; DSo, GXor, 0x7 */
0x11, /* dest = ~src & ~dest; DSon, GXnor, 0x8 */
0x99, /* dest ^= ~src ; DSxn, GXequiv, 0x9 */
0x55, /* dest = ~dest; Dn, GXInvert, 0xA */
0xDD, /* dest = src|~dest ; SDno, GXorReverse, 0xB */
0x33, /* dest = ~src; Sn, GXcopyInverted, 0xC */
0xBB, /* dest |= ~src; DSno, GXorInverted, 0xD */
0x77, /* dest = ~src|~dest; DSan, GXnand, 0xE */
0xFF, /* dest = 0xFF; 1, GXset, 0xF */
};
/* same ROP but with Pattern as Source */
static const u8 sisPatALUConv[] =
{
0x00, /* dest = 0; 0, GXclear, 0 */
0xA0, /* dest &= src; DPa, GXand, 0x1 */
0x50, /* dest = src & ~dest; PDna, GXandReverse, 0x2 */
0xF0, /* dest = src; P, GXcopy, 0x3 */
0x0A, /* dest &= ~src; DPna, GXandInverted, 0x4 */
0xAA, /* dest = dest; D, GXnoop, 0x5 */
0x5A, /* dest = ^src; DPx, GXxor, 0x6 */
0xFA, /* dest |= src; DPo, GXor, 0x7 */
0x05, /* dest = ~src & ~dest; DPon, GXnor, 0x8 */
0xA5, /* dest ^= ~src ; DPxn, GXequiv, 0x9 */
0x55, /* dest = ~dest; Dn, GXInvert, 0xA */
0xF5, /* dest = src|~dest ; PDno, GXorReverse, 0xB */
0x0F, /* dest = ~src; Pn, GXcopyInverted, 0xC */
0xAF, /* dest |= ~src; DPno, GXorInverted, 0xD */
0x5F, /* dest = ~src|~dest; DPan, GXnand, 0xE */
0xFF, /* dest = 0xFF; 1, GXset, 0xF */
};
static const int myrops[] = {
3, 10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3
};
/* 300 series ----------------------------------------------------- */
#ifdef CONFIG_FB_SIS_300
static void
SiS300Sync(struct sis_video_info *ivideo)
{
SiS300Idle
}
static void
SiS300SetupForScreenToScreenCopy(struct sis_video_info *ivideo, int xdir, int ydir,
int rop, int trans_color)
{
SiS300SetupDSTColorDepth(ivideo->DstColor);
SiS300SetupSRCPitch(ivideo->video_linelength)
SiS300SetupDSTRect(ivideo->video_linelength, 0xffff)
if(trans_color != -1) {
SiS300SetupROP(0x0A)
SiS300SetupSRCTrans(trans_color)
SiS300SetupCMDFlag(TRANSPARENT_BITBLT)
} else {
SiS300SetupROP(sisALUConv[rop])
}
if(xdir > 0) {
SiS300SetupCMDFlag(X_INC)
}
if(ydir > 0) {
SiS300SetupCMDFlag(Y_INC)
}
}
static void
SiS300SubsequentScreenToScreenCopy(struct sis_video_info *ivideo, int src_x,
int src_y, int dst_x, int dst_y, int width, int height)
{
u32 srcbase = 0, dstbase = 0;
if(src_y >= 2048) {
srcbase = ivideo->video_linelength * src_y;
src_y = 0;
}
if(dst_y >= 2048) {
dstbase = ivideo->video_linelength * dst_y;
dst_y = 0;
}
SiS300SetupSRCBase(srcbase);
SiS300SetupDSTBase(dstbase);
if(!(ivideo->CommandReg & X_INC)) {
src_x += width-1;
dst_x += width-1;
}
if(!(ivideo->CommandReg & Y_INC)) {
src_y += height-1;
dst_y += height-1;
}
SiS300SetupRect(width, height)
SiS300SetupSRCXY(src_x, src_y)
SiS300SetupDSTXY(dst_x, dst_y)
SiS300DoCMD
}
static void
SiS300SetupForSolidFill(struct sis_video_info *ivideo, u32 color, int rop)
{
SiS300SetupPATFG(color)
SiS300SetupDSTRect(ivideo->video_linelength, 0xffff)
SiS300SetupDSTColorDepth(ivideo->DstColor);
SiS300SetupROP(sisPatALUConv[rop])
SiS300SetupCMDFlag(PATFG)
}
static void
SiS300SubsequentSolidFillRect(struct sis_video_info *ivideo, int x, int y, int w, int h)
{
u32 dstbase = 0;
if(y >= 2048) {
dstbase = ivideo->video_linelength * y;
y = 0;
}
SiS300SetupDSTBase(dstbase)
SiS300SetupDSTXY(x,y)
SiS300SetupRect(w,h)
SiS300SetupCMDFlag(X_INC | Y_INC | BITBLT)
SiS300DoCMD
}
#endif
/* 315/330/340 series ---------------------------------------------- */
#ifdef CONFIG_FB_SIS_315
static void
SiS310Sync(struct sis_video_info *ivideo)
{
SiS310Idle
}
static void
SiS310SetupForScreenToScreenCopy(struct sis_video_info *ivideo, int rop, int trans_color)
{
SiS310SetupDSTColorDepth(ivideo->DstColor);
SiS310SetupSRCPitch(ivideo->video_linelength)
SiS310SetupDSTRect(ivideo->video_linelength, 0x0fff)
if(trans_color != -1) {
SiS310SetupROP(0x0A)
SiS310SetupSRCTrans(trans_color)
SiS310SetupCMDFlag(TRANSPARENT_BITBLT)
} else {
SiS310SetupROP(sisALUConv[rop])
/* Set command - not needed, both 0 */
/* SiSSetupCMDFlag(BITBLT | SRCVIDEO) */
}
SiS310SetupCMDFlag(ivideo->SiS310_AccelDepth)
/* The chip is smart enough to know the direction */
}
static void
SiS310SubsequentScreenToScreenCopy(struct sis_video_info *ivideo, int src_x, int src_y,
int dst_x, int dst_y, int width, int height)
{
u32 srcbase = 0, dstbase = 0;
int mymin = min(src_y, dst_y);
int mymax = max(src_y, dst_y);
/* Although the chip knows the direction to use
* if the source and destination areas overlap,
* that logic fails if we fiddle with the bitmap
* addresses. Therefore, we check if the source
* and destination blitting areas overlap and
* adapt the bitmap addresses synchronously
* if the coordinates exceed the valid range.
* The the areas do not overlap, we do our
* normal check.
*/
if((mymax - mymin) < height) {
if((src_y >= 2048) || (dst_y >= 2048)) {
srcbase = ivideo->video_linelength * mymin;
dstbase = ivideo->video_linelength * mymin;
src_y -= mymin;
dst_y -= mymin;
}
} else {
if(src_y >= 2048) {
srcbase = ivideo->video_linelength * src_y;
src_y = 0;
}
if(dst_y >= 2048) {
dstbase = ivideo->video_linelength * dst_y;
dst_y = 0;
}
}
srcbase += ivideo->video_offset;
dstbase += ivideo->video_offset;
SiS310SetupSRCBase(srcbase);
SiS310SetupDSTBase(dstbase);
SiS310SetupRect(width, height)
SiS310SetupSRCXY(src_x, src_y)
SiS310SetupDSTXY(dst_x, dst_y)
SiS310DoCMD
}
static void
SiS310SetupForSolidFill(struct sis_video_info *ivideo, u32 color, int rop)
{
SiS310SetupPATFG(color)
SiS310SetupDSTRect(ivideo->video_linelength, 0x0fff)
SiS310SetupDSTColorDepth(ivideo->DstColor);
SiS310SetupROP(sisPatALUConv[rop])
SiS310SetupCMDFlag(PATFG | ivideo->SiS310_AccelDepth)
}
static void
SiS310SubsequentSolidFillRect(struct sis_video_info *ivideo, int x, int y, int w, int h)
{
u32 dstbase = 0;
if(y >= 2048) {
dstbase = ivideo->video_linelength * y;
y = 0;
}
dstbase += ivideo->video_offset;
SiS310SetupDSTBase(dstbase)
SiS310SetupDSTXY(x,y)
SiS310SetupRect(w,h)
SiS310SetupCMDFlag(BITBLT)
SiS310DoCMD
}
#endif
/* --------------------------------------------------------------------- */
/* The exported routines */
int sisfb_initaccel(struct sis_video_info *ivideo)
{
#ifdef SISFB_USE_SPINLOCKS
spin_lock_init(&ivideo->lockaccel);
#endif
return 0;
}
void sisfb_syncaccel(struct sis_video_info *ivideo)
{
if(ivideo->sisvga_engine == SIS_300_VGA) {
#ifdef CONFIG_FB_SIS_300
SiS300Sync(ivideo);
#endif
} else {
#ifdef CONFIG_FB_SIS_315
SiS310Sync(ivideo);
#endif
}
}
int fbcon_sis_sync(struct fb_info *info)
{
struct sis_video_info *ivideo = (struct sis_video_info *)info->par;
CRITFLAGS
if((!ivideo->accel) || (!ivideo->engineok))
return 0;
CRITBEGIN
sisfb_syncaccel(ivideo);
CRITEND
return 0;
}
void fbcon_sis_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct sis_video_info *ivideo = (struct sis_video_info *)info->par;
u32 col = 0;
u32 vxres = info->var.xres_virtual;
u32 vyres = info->var.yres_virtual;
int width, height;
CRITFLAGS
if(info->state != FBINFO_STATE_RUNNING)
return;
if((!ivideo->accel) || (!ivideo->engineok)) {
cfb_fillrect(info, rect);
return;
}
if(!rect->width || !rect->height || rect->dx >= vxres || rect->dy >= vyres)
return;
/* Clipping */
width = ((rect->dx + rect->width) > vxres) ? (vxres - rect->dx) : rect->width;
height = ((rect->dy + rect->height) > vyres) ? (vyres - rect->dy) : rect->height;
switch(info->var.bits_per_pixel) {
case 8: col = rect->color;
break;
case 16:
case 32: col = ((u32 *)(info->pseudo_palette))[rect->color];
break;
}
if(ivideo->sisvga_engine == SIS_300_VGA) {
#ifdef CONFIG_FB_SIS_300
CRITBEGIN
SiS300SetupForSolidFill(ivideo, col, myrops[rect->rop]);
SiS300SubsequentSolidFillRect(ivideo, rect->dx, rect->dy, width, height);
CRITEND
#endif
} else {
#ifdef CONFIG_FB_SIS_315
CRITBEGIN
SiS310SetupForSolidFill(ivideo, col, myrops[rect->rop]);
SiS310SubsequentSolidFillRect(ivideo, rect->dx, rect->dy, width, height);
CRITEND
#endif
}
sisfb_syncaccel(ivideo);
}
void fbcon_sis_copyarea(struct fb_info *info, const struct fb_copyarea *area)
{
struct sis_video_info *ivideo = (struct sis_video_info *)info->par;
u32 vxres = info->var.xres_virtual;
u32 vyres = info->var.yres_virtual;
int width = area->width;
int height = area->height;
CRITFLAGS
if(info->state != FBINFO_STATE_RUNNING)
return;
if((!ivideo->accel) || (!ivideo->engineok)) {
cfb_copyarea(info, area);
return;
}
if(!width || !height ||
area->sx >= vxres || area->sy >= vyres ||
area->dx >= vxres || area->dy >= vyres)
return;
/* Clipping */
if((area->sx + width) > vxres) width = vxres - area->sx;
if((area->dx + width) > vxres) width = vxres - area->dx;
if((area->sy + height) > vyres) height = vyres - area->sy;
if((area->dy + height) > vyres) height = vyres - area->dy;
if(ivideo->sisvga_engine == SIS_300_VGA) {
#ifdef CONFIG_FB_SIS_300
int xdir, ydir;
if(area->sx < area->dx) xdir = 0;
else xdir = 1;
if(area->sy < area->dy) ydir = 0;
else ydir = 1;
CRITBEGIN
SiS300SetupForScreenToScreenCopy(ivideo, xdir, ydir, 3, -1);
SiS300SubsequentScreenToScreenCopy(ivideo, area->sx, area->sy,
area->dx, area->dy, width, height);
CRITEND
#endif
} else {
#ifdef CONFIG_FB_SIS_315
CRITBEGIN
SiS310SetupForScreenToScreenCopy(ivideo, 3, -1);
SiS310SubsequentScreenToScreenCopy(ivideo, area->sx, area->sy,
area->dx, area->dy, width, height);
CRITEND
#endif
}
sisfb_syncaccel(ivideo);
}
| gpl-2.0 |
anwarMov/android_kernel_asus_a500cg | drivers/net/wireless/bcmdhd/wl_cfgp2p.c | 9 | 76454 | /*
* Linux cfgp2p driver
*
* Copyright (C) 1999-2013, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
*
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* you also meet, for each linked independent module, the terms and conditions of
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
*
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* $Id: wl_cfgp2p.c 419821 2013-08-22 21:43:26Z $
*
*/
#include <typedefs.h>
#include <linuxver.h>
#include <osl.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/if_arp.h>
#include <asm/uaccess.h>
#include <bcmutils.h>
#include <bcmendian.h>
#include <proto/ethernet.h>
#include <proto/802.11.h>
#include <wl_cfg80211.h>
#include <wl_cfgp2p.h>
#include <wldev_common.h>
#include <wl_android.h>
static s8 scanparambuf[WLC_IOCTL_SMLEN];
static s8 g_mgmt_ie_buf[2048];
static bool
wl_cfgp2p_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type);
static u32
wl_cfgp2p_vndr_ie(struct wl_priv *wl, u8 *iebuf, s32 pktflag,
s8 *oui, s32 ie_id, s8 *data, s32 datalen, const s8* add_del_cmd);
static s32 wl_cfgp2p_cancel_listen(struct wl_priv *wl, struct net_device *ndev,
struct wireless_dev *wdev, bool notify);
#if defined(WL_ENABLE_P2P_IF)
static int wl_cfgp2p_start_xmit(struct sk_buff *skb, struct net_device *ndev);
static int wl_cfgp2p_do_ioctl(struct net_device *net, struct ifreq *ifr, int cmd);
static int wl_cfgp2p_if_open(struct net_device *net);
static int wl_cfgp2p_if_stop(struct net_device *net);
static const struct net_device_ops wl_cfgp2p_if_ops = {
.ndo_open = wl_cfgp2p_if_open,
.ndo_stop = wl_cfgp2p_if_stop,
.ndo_do_ioctl = wl_cfgp2p_do_ioctl,
.ndo_start_xmit = wl_cfgp2p_start_xmit,
};
#endif /* WL_ENABLE_P2P_IF */
bool wl_cfgp2p_is_pub_action(void *frame, u32 frame_len)
{
wifi_p2p_pub_act_frame_t *pact_frm;
if (frame == NULL)
return false;
pact_frm = (wifi_p2p_pub_act_frame_t *)frame;
if (frame_len < sizeof(wifi_p2p_pub_act_frame_t) -1)
return false;
if (pact_frm->category == P2P_PUB_AF_CATEGORY &&
pact_frm->action == P2P_PUB_AF_ACTION &&
pact_frm->oui_type == P2P_VER &&
memcmp(pact_frm->oui, P2P_OUI, sizeof(pact_frm->oui)) == 0) {
return true;
}
return false;
}
bool wl_cfgp2p_is_p2p_action(void *frame, u32 frame_len)
{
wifi_p2p_action_frame_t *act_frm;
if (frame == NULL)
return false;
act_frm = (wifi_p2p_action_frame_t *)frame;
if (frame_len < sizeof(wifi_p2p_action_frame_t) -1)
return false;
if (act_frm->category == P2P_AF_CATEGORY &&
act_frm->type == P2P_VER &&
memcmp(act_frm->OUI, P2P_OUI, DOT11_OUI_LEN) == 0) {
return true;
}
return false;
}
#define GAS_RESP_LEN 2
#define DOUBLE_TLV_BODY_OFF 4
#define GAS_RESP_OFFSET 4
#define GAS_CRESP_OFFSET 5
bool wl_cfgp2p_find_gas_subtype(u8 subtype, u8* data, u32 len)
{
bcm_tlv_t *ie = (bcm_tlv_t *)data;
u8 *frame = NULL;
u16 id, flen;
/* Skipped first ANQP Element, if frame has anqp elemnt */
ie = bcm_parse_tlvs(ie, (int)len, DOT11_MNG_ADVERTISEMENT_ID);
if (ie == NULL)
return false;
frame = (uint8 *)ie + ie->len + TLV_HDR_LEN + GAS_RESP_LEN;
id = ((u16) (((frame)[1] << 8) | (frame)[0]));
flen = ((u16) (((frame)[3] << 8) | (frame)[2]));
/* If the contents match the OUI and the type */
if (flen >= WFA_OUI_LEN + 1 &&
id == P2PSD_GAS_NQP_INFOID &&
!bcmp(&frame[DOUBLE_TLV_BODY_OFF], (const uint8*)WFA_OUI, WFA_OUI_LEN) &&
subtype == frame[DOUBLE_TLV_BODY_OFF+WFA_OUI_LEN]) {
return true;
}
return false;
}
bool wl_cfgp2p_is_gas_action(void *frame, u32 frame_len)
{
wifi_p2psd_gas_pub_act_frame_t *sd_act_frm;
if (frame == NULL)
return false;
sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *)frame;
if (frame_len < (sizeof(wifi_p2psd_gas_pub_act_frame_t) - 1))
return false;
if (sd_act_frm->category != P2PSD_ACTION_CATEGORY)
return false;
#ifdef WL11U
if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IRESP)
return wl_cfgp2p_find_gas_subtype(P2PSD_GAS_OUI_SUBTYPE,
(u8 *)sd_act_frm->query_data + GAS_RESP_OFFSET,
frame_len);
else if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_CRESP)
return wl_cfgp2p_find_gas_subtype(P2PSD_GAS_OUI_SUBTYPE,
(u8 *)sd_act_frm->query_data + GAS_CRESP_OFFSET,
frame_len);
else if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IREQ ||
sd_act_frm->action == P2PSD_ACTION_ID_GAS_CREQ)
return true;
else
return false;
#else
if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IREQ ||
sd_act_frm->action == P2PSD_ACTION_ID_GAS_IRESP ||
sd_act_frm->action == P2PSD_ACTION_ID_GAS_CREQ ||
sd_act_frm->action == P2PSD_ACTION_ID_GAS_CRESP)
return true;
else
return false;
#endif /* WL11U */
}
void wl_cfgp2p_print_actframe(bool tx, void *frame, u32 frame_len, u32 channel)
{
wifi_p2p_pub_act_frame_t *pact_frm;
wifi_p2p_action_frame_t *act_frm;
wifi_p2psd_gas_pub_act_frame_t *sd_act_frm;
if (!frame || frame_len <= 2)
return;
if (wl_cfgp2p_is_pub_action(frame, frame_len)) {
pact_frm = (wifi_p2p_pub_act_frame_t *)frame;
switch (pact_frm->subtype) {
case P2P_PAF_GON_REQ:
CFGP2P_ACTION(("%s P2P Group Owner Negotiation Req Frame,"
" channel=%d\n", (tx)? "TX": "RX", channel));
break;
case P2P_PAF_GON_RSP:
CFGP2P_ACTION(("%s P2P Group Owner Negotiation Rsp Frame,"
" channel=%d\n", (tx)? "TX": "RX", channel));
break;
case P2P_PAF_GON_CONF:
CFGP2P_ACTION(("%s P2P Group Owner Negotiation Confirm Frame,"
" channel=%d\n", (tx)? "TX": "RX", channel));
break;
case P2P_PAF_INVITE_REQ:
CFGP2P_ACTION(("%s P2P Invitation Request Frame,"
" channel=%d\n", (tx)? "TX": "RX", channel));
break;
case P2P_PAF_INVITE_RSP:
CFGP2P_ACTION(("%s P2P Invitation Response Frame,"
" channel=%d\n", (tx)? "TX": "RX", channel));
break;
case P2P_PAF_DEVDIS_REQ:
CFGP2P_ACTION(("%s P2P Device Discoverability Request Frame,"
" channel=%d\n", (tx)? "TX": "RX", channel));
break;
case P2P_PAF_DEVDIS_RSP:
CFGP2P_ACTION(("%s P2P Device Discoverability Response Frame,"
" channel=%d\n", (tx)? "TX": "RX", channel));
break;
case P2P_PAF_PROVDIS_REQ:
CFGP2P_ACTION(("%s P2P Provision Discovery Request Frame,"
" channel=%d\n", (tx)? "TX": "RX", channel));
break;
case P2P_PAF_PROVDIS_RSP:
CFGP2P_ACTION(("%s P2P Provision Discovery Response Frame,"
" channel=%d\n", (tx)? "TX": "RX", channel));
break;
default:
CFGP2P_ACTION(("%s Unknown P2P Public Action Frame,"
" channel=%d\n", (tx)? "TX": "RX", channel));
}
} else if (wl_cfgp2p_is_p2p_action(frame, frame_len)) {
act_frm = (wifi_p2p_action_frame_t *)frame;
switch (act_frm->subtype) {
case P2P_AF_NOTICE_OF_ABSENCE:
CFGP2P_ACTION(("%s P2P Notice of Absence Frame,"
" channel=%d\n", (tx)? "TX": "RX", channel));
break;
case P2P_AF_PRESENCE_REQ:
CFGP2P_ACTION(("%s P2P Presence Request Frame,"
" channel=%d\n", (tx)? "TX": "RX", channel));
break;
case P2P_AF_PRESENCE_RSP:
CFGP2P_ACTION(("%s P2P Presence Response Frame,"
" channel=%d\n", (tx)? "TX": "RX", channel));
break;
case P2P_AF_GO_DISC_REQ:
CFGP2P_ACTION(("%s P2P Discoverability Request Frame,"
" channel=%d\n", (tx)? "TX": "RX", channel));
break;
default:
CFGP2P_ACTION(("%s Unknown P2P Action Frame,"
" channel=%d\n", (tx)? "TX": "RX", channel));
}
} else if (wl_cfgp2p_is_gas_action(frame, frame_len)) {
sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *)frame;
switch (sd_act_frm->action) {
case P2PSD_ACTION_ID_GAS_IREQ:
CFGP2P_ACTION(("%s P2P GAS Initial Request,"
" channel=%d\n", (tx)? "TX" : "RX", channel));
break;
case P2PSD_ACTION_ID_GAS_IRESP:
CFGP2P_ACTION(("%s P2P GAS Initial Response,"
" channel=%d\n", (tx)? "TX" : "RX", channel));
break;
case P2PSD_ACTION_ID_GAS_CREQ:
CFGP2P_ACTION(("%s P2P GAS Comback Request,"
" channel=%d\n", (tx)? "TX" : "RX", channel));
break;
case P2PSD_ACTION_ID_GAS_CRESP:
CFGP2P_ACTION(("%s P2P GAS Comback Response,"
" channel=%d\n", (tx)? "TX" : "RX", channel));
break;
default:
CFGP2P_ACTION(("%s Unknown P2P GAS Frame,"
" channel=%d\n", (tx)? "TX" : "RX", channel));
}
}
}
/*
* Initialize variables related to P2P
*
*/
s32
wl_cfgp2p_init_priv(struct wl_priv *wl)
{
if (!(wl->p2p = kzalloc(sizeof(struct p2p_info), GFP_KERNEL))) {
CFGP2P_ERR(("struct p2p_info allocation failed\n"));
return -ENOMEM;
}
#define INIT_IE(IE_TYPE, BSS_TYPE) \
do { \
memset(wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie, 0, \
sizeof(wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie)); \
wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie_len = 0; \
} while (0);
INIT_IE(probe_req, P2PAPI_BSSCFG_PRIMARY);
INIT_IE(probe_res, P2PAPI_BSSCFG_PRIMARY);
INIT_IE(assoc_req, P2PAPI_BSSCFG_PRIMARY);
INIT_IE(assoc_res, P2PAPI_BSSCFG_PRIMARY);
INIT_IE(beacon, P2PAPI_BSSCFG_PRIMARY);
INIT_IE(probe_req, P2PAPI_BSSCFG_DEVICE);
INIT_IE(probe_res, P2PAPI_BSSCFG_DEVICE);
INIT_IE(assoc_req, P2PAPI_BSSCFG_DEVICE);
INIT_IE(assoc_res, P2PAPI_BSSCFG_DEVICE);
INIT_IE(beacon, P2PAPI_BSSCFG_DEVICE);
INIT_IE(probe_req, P2PAPI_BSSCFG_CONNECTION);
INIT_IE(probe_res, P2PAPI_BSSCFG_CONNECTION);
INIT_IE(assoc_req, P2PAPI_BSSCFG_CONNECTION);
INIT_IE(assoc_res, P2PAPI_BSSCFG_CONNECTION);
INIT_IE(beacon, P2PAPI_BSSCFG_CONNECTION);
#undef INIT_IE
wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_PRIMARY) = wl_to_prmry_ndev(wl);
wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_PRIMARY) = 0;
wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE) = NULL;
wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) = 0;
wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION) = NULL;
wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_CONNECTION) = 0;
return BCME_OK;
}
/*
* Deinitialize variables related to P2P
*
*/
void
wl_cfgp2p_deinit_priv(struct wl_priv *wl)
{
CFGP2P_DBG(("In\n"));
if (wl->p2p) {
kfree(wl->p2p);
wl->p2p = NULL;
}
wl->p2p_supported = 0;
}
/*
* Set P2P functions into firmware
*/
s32
wl_cfgp2p_set_firm_p2p(struct wl_priv *wl)
{
struct net_device *ndev = wl_to_prmry_ndev(wl);
struct ether_addr null_eth_addr = { { 0, 0, 0, 0, 0, 0 } };
s32 ret = BCME_OK;
s32 val = 0;
/* Do we have to check whether APSTA is enabled or not ? */
ret = wldev_iovar_getint(ndev, "apsta", &val);
if (ret < 0) {
CFGP2P_ERR(("get apsta error %d\n", ret));
return ret;
}
if (val == 0) {
val = 1;
ret = wldev_ioctl(ndev, WLC_DOWN, &val, sizeof(s32), true);
if (ret < 0) {
CFGP2P_ERR(("WLC_DOWN error %d\n", ret));
return ret;
}
wldev_iovar_setint(ndev, "apsta", val);
ret = wldev_ioctl(ndev, WLC_UP, &val, sizeof(s32), true);
if (ret < 0) {
CFGP2P_ERR(("WLC_UP error %d\n", ret));
return ret;
}
}
/* In case of COB type, firmware has default mac address
* After Initializing firmware, we have to set current mac address to
* firmware for P2P device address
*/
ret = wldev_iovar_setbuf_bsscfg(ndev, "p2p_da_override", &null_eth_addr,
sizeof(null_eth_addr), wl->ioctl_buf, WLC_IOCTL_MAXLEN, 0, &wl->ioctl_buf_sync);
if (ret && ret != BCME_UNSUPPORTED) {
CFGP2P_ERR(("failed to update device address ret %d\n", ret));
}
return ret;
}
/* Create a new P2P BSS.
* Parameters:
* @mac : MAC address of the BSS to create
* @if_type : interface type: WL_P2P_IF_GO or WL_P2P_IF_CLIENT
* @chspec : chspec to use if creating a GO BSS.
* Returns 0 if success.
*/
s32
wl_cfgp2p_ifadd(struct wl_priv *wl, struct ether_addr *mac, u8 if_type,
chanspec_t chspec)
{
wl_p2p_if_t ifreq;
s32 err;
u32 scb_timeout = WL_SCB_TIMEOUT;
struct net_device *ndev = wl_to_prmry_ndev(wl);
ifreq.type = if_type;
ifreq.chspec = chspec;
memcpy(ifreq.addr.octet, mac->octet, sizeof(ifreq.addr.octet));
CFGP2P_DBG(("---wl p2p_ifadd "MACDBG" %s %u\n",
MAC2STRDBG(ifreq.addr.octet),
(if_type == WL_P2P_IF_GO) ? "go" : "client",
(chspec & WL_CHANSPEC_CHAN_MASK) >> WL_CHANSPEC_CHAN_SHIFT));
err = wldev_iovar_setbuf(ndev, "p2p_ifadd", &ifreq, sizeof(ifreq),
wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync);
if (unlikely(err < 0))
printk("'wl p2p_ifadd' error %d\n", err);
else if (if_type == WL_P2P_IF_GO) {
err = wldev_ioctl(ndev, WLC_SET_SCB_TIMEOUT, &scb_timeout, sizeof(u32), true);
if (unlikely(err < 0))
printk("'wl scb_timeout' error %d\n", err);
}
return err;
}
/* Disable a P2P BSS.
* Parameters:
* @mac : MAC address of the BSS to disable
* Returns 0 if success.
*/
s32
wl_cfgp2p_ifdisable(struct wl_priv *wl, struct ether_addr *mac)
{
s32 ret;
struct net_device *netdev = wl_to_prmry_ndev(wl);
CFGP2P_INFO(("------primary idx %d : wl p2p_ifdis "MACDBG"\n",
netdev->ifindex, MAC2STRDBG(mac->octet)));
ret = wldev_iovar_setbuf(netdev, "p2p_ifdis", mac, sizeof(*mac),
wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync);
if (unlikely(ret < 0)) {
printk("'wl p2p_ifdis' error %d\n", ret);
}
return ret;
}
/* Delete a P2P BSS.
* Parameters:
* @mac : MAC address of the BSS to delete
* Returns 0 if success.
*/
s32
wl_cfgp2p_ifdel(struct wl_priv *wl, struct ether_addr *mac)
{
s32 ret;
struct net_device *netdev = wl_to_prmry_ndev(wl);
CFGP2P_INFO(("------primary idx %d : wl p2p_ifdel "MACDBG"\n",
netdev->ifindex, MAC2STRDBG(mac->octet)));
ret = wldev_iovar_setbuf(netdev, "p2p_ifdel", mac, sizeof(*mac),
wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync);
if (unlikely(ret < 0)) {
printk("'wl p2p_ifdel' error %d\n", ret);
}
return ret;
}
/* Change a P2P Role.
* Parameters:
* @mac : MAC address of the BSS to change a role
* Returns 0 if success.
*/
s32
wl_cfgp2p_ifchange(struct wl_priv *wl, struct ether_addr *mac, u8 if_type,
chanspec_t chspec)
{
wl_p2p_if_t ifreq;
s32 err;
u32 scb_timeout = WL_SCB_TIMEOUT;
struct net_device *netdev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION);
ifreq.type = if_type;
ifreq.chspec = chspec;
memcpy(ifreq.addr.octet, mac->octet, sizeof(ifreq.addr.octet));
CFGP2P_INFO(("---wl p2p_ifchange "MACDBG" %s %u"
" chanspec 0x%04x\n", MAC2STRDBG(ifreq.addr.octet),
(if_type == WL_P2P_IF_GO) ? "go" : "client",
(chspec & WL_CHANSPEC_CHAN_MASK) >> WL_CHANSPEC_CHAN_SHIFT,
ifreq.chspec));
err = wldev_iovar_setbuf(netdev, "p2p_ifupd", &ifreq, sizeof(ifreq),
wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync);
if (unlikely(err < 0)) {
printk("'wl p2p_ifupd' error %d\n", err);
} else if (if_type == WL_P2P_IF_GO) {
err = wldev_ioctl(netdev, WLC_SET_SCB_TIMEOUT, &scb_timeout, sizeof(u32), true);
if (unlikely(err < 0))
printk("'wl scb_timeout' error %d\n", err);
}
return err;
}
/* Get the index of a created P2P BSS.
* Parameters:
* @mac : MAC address of the created BSS
* @index : output: index of created BSS
* Returns 0 if success.
*/
s32
wl_cfgp2p_ifidx(struct wl_priv *wl, struct ether_addr *mac, s32 *index)
{
s32 ret;
u8 getbuf[64];
struct net_device *dev = wl_to_prmry_ndev(wl);
CFGP2P_INFO(("---wl p2p_if "MACDBG"\n", MAC2STRDBG(mac->octet)));
ret = wldev_iovar_getbuf_bsscfg(dev, "p2p_if", mac, sizeof(*mac), getbuf,
sizeof(getbuf), wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_PRIMARY), NULL);
if (ret == 0) {
memcpy(index, getbuf, sizeof(s32));
CFGP2P_INFO(("---wl p2p_if ==> %d\n", *index));
}
return ret;
}
static s32
wl_cfgp2p_set_discovery(struct wl_priv *wl, s32 on)
{
s32 ret = BCME_OK;
struct net_device *ndev = wl_to_prmry_ndev(wl);
CFGP2P_DBG(("enter\n"));
ret = wldev_iovar_setint(ndev, "p2p_disc", on);
if (unlikely(ret < 0)) {
CFGP2P_ERR(("p2p_disc %d error %d\n", on, ret));
}
return ret;
}
/* Set the WL driver's P2P mode.
* Parameters :
* @mode : is one of WL_P2P_DISC_ST_{SCAN,LISTEN,SEARCH}.
* @channel : the channel to listen
* @listen_ms : the time (milli seconds) to wait
* @bssidx : bss index for BSSCFG
* Returns 0 if success
*/
s32
wl_cfgp2p_set_p2p_mode(struct wl_priv *wl, u8 mode, u32 channel, u16 listen_ms, int bssidx)
{
wl_p2p_disc_st_t discovery_mode;
s32 ret;
struct net_device *dev;
CFGP2P_DBG(("enter\n"));
if (unlikely(bssidx == WL_INVALID)) {
CFGP2P_ERR((" %d index out of range\n", bssidx));
return -1;
}
dev = wl_cfgp2p_find_ndev(wl, bssidx);
if (unlikely(dev == NULL)) {
CFGP2P_ERR(("bssidx %d is not assigned\n", bssidx));
return BCME_NOTFOUND;
}
/* Put the WL driver into P2P Listen Mode to respond to P2P probe reqs */
discovery_mode.state = mode;
discovery_mode.chspec = wl_ch_host_to_driver(channel);
discovery_mode.dwell = listen_ms;
ret = wldev_iovar_setbuf_bsscfg(dev, "p2p_state", &discovery_mode,
sizeof(discovery_mode), wl->ioctl_buf, WLC_IOCTL_MAXLEN,
bssidx, &wl->ioctl_buf_sync);
return ret;
}
/* Get the index of the P2P Discovery BSS */
static s32
wl_cfgp2p_get_disc_idx(struct wl_priv *wl, s32 *index)
{
s32 ret;
struct net_device *dev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_PRIMARY);
ret = wldev_iovar_getint(dev, "p2p_dev", index);
CFGP2P_INFO(("p2p_dev bsscfg_idx=%d ret=%d\n", *index, ret));
if (unlikely(ret < 0)) {
CFGP2P_ERR(("'p2p_dev' error %d\n", ret));
return ret;
}
return ret;
}
s32
wl_cfgp2p_init_discovery(struct wl_priv *wl)
{
s32 index = 0;
s32 ret = BCME_OK;
CFGP2P_DBG(("enter\n"));
if (wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) != 0) {
CFGP2P_ERR(("do nothing, already initialized\n"));
return ret;
}
ret = wl_cfgp2p_set_discovery(wl, 1);
if (ret < 0) {
CFGP2P_ERR(("set discover error\n"));
return ret;
}
/* Enable P2P Discovery in the WL Driver */
ret = wl_cfgp2p_get_disc_idx(wl, &index);
if (ret < 0) {
return ret;
}
wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE) =
wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_PRIMARY);
wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) = index;
/* Set the initial discovery state to SCAN */
ret = wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0,
wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE));
if (unlikely(ret != 0)) {
CFGP2P_ERR(("unable to set WL_P2P_DISC_ST_SCAN\n"));
wl_cfgp2p_set_discovery(wl, 0);
wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) = 0;
wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE) = NULL;
return 0;
}
return ret;
}
/* Deinitialize P2P Discovery
* Parameters :
* @wl : wl_private data
* Returns 0 if succes
*/
static s32
wl_cfgp2p_deinit_discovery(struct wl_priv *wl)
{
s32 ret = BCME_OK;
CFGP2P_DBG(("enter\n"));
if (wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) == 0) {
CFGP2P_ERR(("do nothing, not initialized\n"));
return -1;
}
/* Set the discovery state to SCAN */
ret = wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0,
wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE));
/* Disable P2P discovery in the WL driver (deletes the discovery BSSCFG) */
ret = wl_cfgp2p_set_discovery(wl, 0);
/* Clear our saved WPS and P2P IEs for the discovery BSS. The driver
* deleted these IEs when wl_cfgp2p_set_discovery() deleted the discovery
* BSS.
*/
/* Clear the saved bsscfg index of the discovery BSSCFG to indicate we
* have no discovery BSS.
*/
wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) = WL_INVALID;
wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE) = NULL;
return ret;
}
/* Enable P2P Discovery
* Parameters:
* @wl : wl_private data
* @ie : probe request ie (WPS IE + P2P IE)
* @ie_len : probe request ie length
* Returns 0 if success.
*/
s32
wl_cfgp2p_enable_discovery(struct wl_priv *wl, struct net_device *dev,
const u8 *ie, u32 ie_len)
{
s32 ret = BCME_OK;
s32 bssidx;
if (wl_to_prmry_ndev(wl) == dev) {
bssidx = wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE);
} else if (wl_cfgp2p_find_idx(wl, dev, &bssidx) != BCME_OK) {
WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
return BCME_ERROR;
}
if (wl_get_p2p_status(wl, DISCOVERY_ON)) {
CFGP2P_INFO((" DISCOVERY is already initialized, we have nothing to do\n"));
goto set_ie;
}
wl_set_p2p_status(wl, DISCOVERY_ON);
CFGP2P_DBG(("enter\n"));
ret = wl_cfgp2p_init_discovery(wl);
if (unlikely(ret < 0)) {
CFGP2P_ERR((" init discovery error %d\n", ret));
goto exit;
}
/* Set wsec to any non-zero value in the discovery bsscfg to ensure our
* P2P probe responses have the privacy bit set in the 802.11 WPA IE.
* Some peer devices may not initiate WPS with us if this bit is not set.
*/
ret = wldev_iovar_setint_bsscfg(wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE),
"wsec", AES_ENABLED, wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE));
if (unlikely(ret < 0)) {
CFGP2P_ERR((" wsec error %d\n", ret));
}
set_ie:
if (ie_len) {
ret = wl_cfgp2p_set_management_ie(wl, dev,
bssidx,
VNDR_IE_PRBREQ_FLAG, ie, ie_len);
if (unlikely(ret < 0)) {
CFGP2P_ERR(("set probreq ie occurs error %d\n", ret));
goto exit;
}
}
exit:
return ret;
}
/* Disable P2P Discovery
* Parameters:
* @wl : wl_private_data
* Returns 0 if success.
*/
s32
wl_cfgp2p_disable_discovery(struct wl_priv *wl)
{
s32 ret = BCME_OK;
CFGP2P_DBG((" enter\n"));
wl_clr_p2p_status(wl, DISCOVERY_ON);
if (wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) == 0) {
CFGP2P_ERR((" do nothing, not initialized\n"));
goto exit;
}
ret = wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0,
wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE));
if (unlikely(ret < 0)) {
CFGP2P_ERR(("unable to set WL_P2P_DISC_ST_SCAN\n"));
}
/* Do a scan abort to stop the driver's scan engine in case it is still
* waiting out an action frame tx dwell time.
*/
wl_clr_p2p_status(wl, DISCOVERY_ON);
ret = wl_cfgp2p_deinit_discovery(wl);
exit:
return ret;
}
s32
wl_cfgp2p_escan(struct wl_priv *wl, struct net_device *dev, u16 active,
u32 num_chans, u16 *channels,
s32 search_state, u16 action, u32 bssidx, struct ether_addr *tx_dst_addr,
p2p_scan_purpose_t p2p_scan_purpose)
{
s32 ret = BCME_OK;
s32 memsize;
s32 eparams_size;
u32 i;
s8 *memblk;
wl_p2p_scan_t *p2p_params;
wl_escan_params_t *eparams;
wlc_ssid_t ssid;
/* Scan parameters */
#define P2PAPI_SCAN_NPROBES 1
#define P2PAPI_SCAN_DWELL_TIME_MS 80
#define P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS 40
#define P2PAPI_SCAN_HOME_TIME_MS 60
#define P2PAPI_SCAN_NPROBS_TIME_MS 30
#define P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS 100
struct net_device *pri_dev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_PRIMARY);
/* Allocate scan params which need space for 3 channels and 0 ssids */
eparams_size = (WL_SCAN_PARAMS_FIXED_SIZE +
OFFSETOF(wl_escan_params_t, params)) +
num_chans * sizeof(eparams->params.channel_list[0]);
memsize = sizeof(wl_p2p_scan_t) + eparams_size;
memblk = scanparambuf;
if (memsize > sizeof(scanparambuf)) {
CFGP2P_ERR((" scanpar buf too small (%u > %zu)\n",
memsize, sizeof(scanparambuf)));
return -1;
}
memset(memblk, 0, memsize);
memset(wl->ioctl_buf, 0, WLC_IOCTL_MAXLEN);
if (search_state == WL_P2P_DISC_ST_SEARCH) {
/*
* If we in SEARCH STATE, we don't need to set SSID explictly
* because dongle use P2P WILDCARD internally by default
*/
wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SEARCH, 0, 0, bssidx);
/* use null ssid */
ssid.SSID_len = 0;
memset(&ssid.SSID, 0, sizeof(ssid.SSID));
} else if (search_state == WL_P2P_DISC_ST_SCAN) {
/* SCAN STATE 802.11 SCAN
* WFD Supplicant has p2p_find command with (type=progressive, type= full)
* So if P2P_find command with type=progressive,
* we have to set ssid to P2P WILDCARD because
* we just do broadcast scan unless setting SSID
*/
wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0, bssidx);
/* use wild card ssid */
ssid.SSID_len = WL_P2P_WILDCARD_SSID_LEN;
memset(&ssid.SSID, 0, sizeof(ssid.SSID));
memcpy(&ssid.SSID, WL_P2P_WILDCARD_SSID, WL_P2P_WILDCARD_SSID_LEN);
} else {
CFGP2P_ERR((" invalid search state %d\n", search_state));
return -1;
}
/* Fill in the P2P scan structure at the start of the iovar param block */
p2p_params = (wl_p2p_scan_t*) memblk;
p2p_params->type = 'E';
/* Fill in the Scan structure that follows the P2P scan structure */
eparams = (wl_escan_params_t*) (p2p_params + 1);
eparams->params.bss_type = DOT11_BSSTYPE_ANY;
if (active)
eparams->params.scan_type = DOT11_SCANTYPE_ACTIVE;
else
eparams->params.scan_type = DOT11_SCANTYPE_PASSIVE;
if (tx_dst_addr == NULL)
memcpy(&eparams->params.bssid, ðer_bcast, ETHER_ADDR_LEN);
else
memcpy(&eparams->params.bssid, tx_dst_addr, ETHER_ADDR_LEN);
if (ssid.SSID_len)
memcpy(&eparams->params.ssid, &ssid, sizeof(wlc_ssid_t));
eparams->params.home_time = htod32(P2PAPI_SCAN_HOME_TIME_MS);
switch (p2p_scan_purpose) {
case P2P_SCAN_SOCIAL_CHANNEL:
eparams->params.active_time = htod32(P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS);
break;
case P2P_SCAN_AFX_PEER_NORMAL:
case P2P_SCAN_AFX_PEER_REDUCED:
eparams->params.active_time = htod32(P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS);
break;
case P2P_SCAN_CONNECT_TRY:
eparams->params.active_time = htod32(WL_SCAN_CONNECT_DWELL_TIME_MS);
break;
default :
if (wl_get_drv_status_all(wl, CONNECTED))
eparams->params.active_time = -1;
else
eparams->params.active_time = htod32(P2PAPI_SCAN_DWELL_TIME_MS);
break;
}
if (p2p_scan_purpose == P2P_SCAN_CONNECT_TRY)
eparams->params.nprobes = htod32(eparams->params.active_time /
WL_SCAN_JOIN_PROBE_INTERVAL_MS);
else
eparams->params.nprobes = htod32((eparams->params.active_time /
P2PAPI_SCAN_NPROBS_TIME_MS));
if (eparams->params.nprobes <= 0)
eparams->params.nprobes = 1;
CFGP2P_DBG(("nprobes # %d, active_time %d\n",
eparams->params.nprobes, eparams->params.active_time));
eparams->params.passive_time = htod32(-1);
eparams->params.channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
(num_chans & WL_SCAN_PARAMS_COUNT_MASK));
for (i = 0; i < num_chans; i++) {
eparams->params.channel_list[i] = wl_ch_host_to_driver(channels[i]);
}
eparams->version = htod32(ESCAN_REQ_VERSION);
eparams->action = htod16(action);
wl_escan_set_sync_id(eparams->sync_id, wl);
CFGP2P_INFO(("SCAN CHANNELS : "));
for (i = 0; i < num_chans; i++) {
if (i == 0) CFGP2P_INFO(("%d", channels[i]));
else CFGP2P_INFO((",%d", channels[i]));
}
CFGP2P_INFO(("\n"));
ret = wldev_iovar_setbuf_bsscfg(pri_dev, "p2p_scan",
memblk, memsize, wl->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync);
if (ret == BCME_OK)
wl_set_p2p_status(wl, SCANNING);
return ret;
}
/* search function to reach at common channel to send action frame
* Parameters:
* @wl : wl_private data
* @ndev : net device for bssidx
* @bssidx : bssidx for BSS
* Returns 0 if success.
*/
s32
wl_cfgp2p_act_frm_search(struct wl_priv *wl, struct net_device *ndev,
s32 bssidx, s32 channel, struct ether_addr *tx_dst_addr)
{
s32 ret = 0;
u32 chan_cnt = 0;
u16 *default_chan_list = NULL;
p2p_scan_purpose_t p2p_scan_purpose = P2P_SCAN_AFX_PEER_NORMAL;
if (!p2p_is_on(wl) || ndev == NULL || bssidx == WL_INVALID)
return -BCME_ERROR;
CFGP2P_DBG((" Enter\n"));
if (bssidx == wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_PRIMARY))
bssidx = wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE);
if (channel)
chan_cnt = AF_PEER_SEARCH_CNT;
else
chan_cnt = SOCIAL_CHAN_CNT;
default_chan_list = kzalloc(chan_cnt * sizeof(*default_chan_list), GFP_KERNEL);
if (default_chan_list == NULL) {
CFGP2P_ERR(("channel list allocation failed \n"));
ret = -ENOMEM;
goto exit;
}
if (channel) {
u32 i;
/* insert same channel to the chan_list */
for (i = 0; i < chan_cnt; i++) {
default_chan_list[i] = channel;
}
} else {
default_chan_list[0] = SOCIAL_CHAN_1;
default_chan_list[1] = SOCIAL_CHAN_2;
default_chan_list[2] = SOCIAL_CHAN_3;
}
ret = wl_cfgp2p_escan(wl, ndev, true, chan_cnt,
default_chan_list, WL_P2P_DISC_ST_SEARCH,
WL_SCAN_ACTION_START, bssidx, tx_dst_addr, p2p_scan_purpose);
kfree(default_chan_list);
exit:
return ret;
}
/* Check whether pointed-to IE looks like WPA. */
#define wl_cfgp2p_is_wpa_ie(ie, tlvs, len) wl_cfgp2p_has_ie(ie, tlvs, len, \
(const uint8 *)WPS_OUI, WPS_OUI_LEN, WPA_OUI_TYPE)
/* Check whether pointed-to IE looks like WPS. */
#define wl_cfgp2p_is_wps_ie(ie, tlvs, len) wl_cfgp2p_has_ie(ie, tlvs, len, \
(const uint8 *)WPS_OUI, WPS_OUI_LEN, WPS_OUI_TYPE)
/* Check whether the given IE looks like WFA P2P IE. */
#define wl_cfgp2p_is_p2p_ie(ie, tlvs, len) wl_cfgp2p_has_ie(ie, tlvs, len, \
(const uint8 *)WFA_OUI, WFA_OUI_LEN, WFA_OUI_TYPE_P2P)
/* Check whether the given IE looks like WFA WFDisplay IE. */
#ifndef WFA_OUI_TYPE_WFD
#define WFA_OUI_TYPE_WFD 0x0a /* WiFi Display OUI TYPE */
#endif
#define wl_cfgp2p_is_wfd_ie(ie, tlvs, len) wl_cfgp2p_has_ie(ie, tlvs, len, \
(const uint8 *)WFA_OUI, WFA_OUI_LEN, WFA_OUI_TYPE_WFD)
static s32
wl_cfgp2p_parse_vndr_ies(u8 *parse, u32 len,
struct parsed_vndr_ies *vndr_ies)
{
s32 err = BCME_OK;
vndr_ie_t *vndrie;
bcm_tlv_t *ie;
struct parsed_vndr_ie_info *parsed_info;
u32 count = 0;
s32 remained_len;
remained_len = (s32)len;
memset(vndr_ies, 0, sizeof(*vndr_ies));
WL_INFO(("---> len %d\n", len));
ie = (bcm_tlv_t *) parse;
if (!bcm_valid_tlv(ie, remained_len))
ie = NULL;
while (ie) {
if (count >= MAX_VNDR_IE_NUMBER)
break;
if (ie->id == DOT11_MNG_VS_ID) {
vndrie = (vndr_ie_t *) ie;
/* len should be bigger than OUI length + one data length at least */
if (vndrie->len < (VNDR_IE_MIN_LEN + 1)) {
CFGP2P_ERR(("%s: invalid vndr ie. length is too small %d\n",
__FUNCTION__, vndrie->len));
goto end;
}
/* if wpa or wme ie, do not add ie */
if (!bcmp(vndrie->oui, (u8*)WPA_OUI, WPA_OUI_LEN) &&
((vndrie->data[0] == WPA_OUI_TYPE) ||
(vndrie->data[0] == WME_OUI_TYPE))) {
CFGP2P_DBG(("Found WPA/WME oui. Do not add it\n"));
goto end;
}
parsed_info = &vndr_ies->ie_info[count++];
/* save vndr ie information */
parsed_info->ie_ptr = (char *)vndrie;
parsed_info->ie_len = (vndrie->len + TLV_HDR_LEN);
memcpy(&parsed_info->vndrie, vndrie, sizeof(vndr_ie_t));
vndr_ies->count = count;
CFGP2P_DBG(("\t ** OUI %02x %02x %02x, type 0x%02x \n",
parsed_info->vndrie.oui[0], parsed_info->vndrie.oui[1],
parsed_info->vndrie.oui[2], parsed_info->vndrie.data[0]));
}
end:
ie = bcm_next_tlv(ie, &remained_len);
}
return err;
}
/* Delete and Set a management vndr ie to firmware
* Parameters:
* @wl : wl_private data
* @ndev : net device for bssidx
* @bssidx : bssidx for BSS
* @pktflag : packet flag for IE (VNDR_IE_PRBREQ_FLAG,VNDR_IE_PRBRSP_FLAG, VNDR_IE_ASSOCRSP_FLAG,
* VNDR_IE_ASSOCREQ_FLAG)
* @ie : VNDR IE (such as P2P IE , WPS IE)
* @ie_len : VNDR IE Length
* Returns 0 if success.
*/
s32
wl_cfgp2p_set_management_ie(struct wl_priv *wl, struct net_device *ndev, s32 bssidx,
s32 pktflag, const u8 *vndr_ie, u32 vndr_ie_len)
{
s32 ret = BCME_OK;
u8 *curr_ie_buf = NULL;
u8 *mgmt_ie_buf = NULL;
u32 mgmt_ie_buf_len = 0;
u32 *mgmt_ie_len = 0;
u32 del_add_ie_buf_len = 0;
u32 total_ie_buf_len = 0;
u32 parsed_ie_buf_len = 0;
struct parsed_vndr_ies old_vndr_ies;
struct parsed_vndr_ies new_vndr_ies;
s32 i;
u8 *ptr;
s32 type = -1;
s32 remained_buf_len;
#define IE_TYPE(type, bsstype) (wl_to_p2p_bss_saved_ie(wl, bsstype).p2p_ ## type ## _ie)
#define IE_TYPE_LEN(type, bsstype) (wl_to_p2p_bss_saved_ie(wl, bsstype).p2p_ ## type ## _ie_len)
memset(g_mgmt_ie_buf, 0, sizeof(g_mgmt_ie_buf));
curr_ie_buf = g_mgmt_ie_buf;
CFGP2P_DBG((" bssidx %d, pktflag : 0x%02X\n", bssidx, pktflag));
if (wl->p2p != NULL) {
if (wl_cfgp2p_find_type(wl, bssidx, &type)) {
CFGP2P_ERR(("cannot find type from bssidx : %d\n", bssidx));
return BCME_ERROR;
}
switch (pktflag) {
case VNDR_IE_PRBREQ_FLAG :
mgmt_ie_buf = IE_TYPE(probe_req, type);
mgmt_ie_len = &IE_TYPE_LEN(probe_req, type);
mgmt_ie_buf_len = sizeof(IE_TYPE(probe_req, type));
break;
case VNDR_IE_PRBRSP_FLAG :
mgmt_ie_buf = IE_TYPE(probe_res, type);
mgmt_ie_len = &IE_TYPE_LEN(probe_res, type);
mgmt_ie_buf_len = sizeof(IE_TYPE(probe_res, type));
break;
case VNDR_IE_ASSOCREQ_FLAG :
mgmt_ie_buf = IE_TYPE(assoc_req, type);
mgmt_ie_len = &IE_TYPE_LEN(assoc_req, type);
mgmt_ie_buf_len = sizeof(IE_TYPE(assoc_req, type));
break;
case VNDR_IE_ASSOCRSP_FLAG :
mgmt_ie_buf = IE_TYPE(assoc_res, type);
mgmt_ie_len = &IE_TYPE_LEN(assoc_res, type);
mgmt_ie_buf_len = sizeof(IE_TYPE(assoc_res, type));
break;
case VNDR_IE_BEACON_FLAG :
mgmt_ie_buf = IE_TYPE(beacon, type);
mgmt_ie_len = &IE_TYPE_LEN(beacon, type);
mgmt_ie_buf_len = sizeof(IE_TYPE(beacon, type));
break;
default:
mgmt_ie_buf = NULL;
mgmt_ie_len = NULL;
CFGP2P_ERR(("not suitable type\n"));
return BCME_ERROR;
}
} else if (wl_get_mode_by_netdev(wl, ndev) == WL_MODE_AP) {
switch (pktflag) {
case VNDR_IE_PRBRSP_FLAG :
mgmt_ie_buf = wl->ap_info->probe_res_ie;
mgmt_ie_len = &wl->ap_info->probe_res_ie_len;
mgmt_ie_buf_len = sizeof(wl->ap_info->probe_res_ie);
break;
case VNDR_IE_BEACON_FLAG :
mgmt_ie_buf = wl->ap_info->beacon_ie;
mgmt_ie_len = &wl->ap_info->beacon_ie_len;
mgmt_ie_buf_len = sizeof(wl->ap_info->beacon_ie);
break;
default:
mgmt_ie_buf = NULL;
mgmt_ie_len = NULL;
CFGP2P_ERR(("not suitable type\n"));
return BCME_ERROR;
}
bssidx = 0;
} else if (wl_get_mode_by_netdev(wl, ndev) == WL_MODE_BSS) {
switch (pktflag) {
case VNDR_IE_PRBREQ_FLAG :
mgmt_ie_buf = wl->sta_info->probe_req_ie;
mgmt_ie_len = &wl->sta_info->probe_req_ie_len;
mgmt_ie_buf_len = sizeof(wl->sta_info->probe_req_ie);
break;
case VNDR_IE_ASSOCREQ_FLAG :
mgmt_ie_buf = wl->sta_info->assoc_req_ie;
mgmt_ie_len = &wl->sta_info->assoc_req_ie_len;
mgmt_ie_buf_len = sizeof(wl->sta_info->assoc_req_ie);
break;
default:
mgmt_ie_buf = NULL;
mgmt_ie_len = NULL;
CFGP2P_ERR(("not suitable type\n"));
return BCME_ERROR;
}
bssidx = 0;
} else {
CFGP2P_ERR(("not suitable type\n"));
return BCME_ERROR;
}
if (vndr_ie_len > mgmt_ie_buf_len) {
CFGP2P_ERR(("extra IE size too big\n"));
ret = -ENOMEM;
} else {
/* parse and save new vndr_ie in curr_ie_buff before comparing it */
if (vndr_ie && vndr_ie_len && curr_ie_buf) {
ptr = curr_ie_buf;
wl_cfgp2p_parse_vndr_ies((u8*)vndr_ie,
vndr_ie_len, &new_vndr_ies);
for (i = 0; i < new_vndr_ies.count; i++) {
struct parsed_vndr_ie_info *vndrie_info =
&new_vndr_ies.ie_info[i];
memcpy(ptr + parsed_ie_buf_len, vndrie_info->ie_ptr,
vndrie_info->ie_len);
parsed_ie_buf_len += vndrie_info->ie_len;
}
}
if (mgmt_ie_buf != NULL) {
if (parsed_ie_buf_len && (parsed_ie_buf_len == *mgmt_ie_len) &&
(memcmp(mgmt_ie_buf, curr_ie_buf, parsed_ie_buf_len) == 0)) {
CFGP2P_INFO(("Previous mgmt IE is equals to current IE"));
goto exit;
}
/* parse old vndr_ie */
wl_cfgp2p_parse_vndr_ies(mgmt_ie_buf, *mgmt_ie_len,
&old_vndr_ies);
/* make a command to delete old ie */
for (i = 0; i < old_vndr_ies.count; i++) {
struct parsed_vndr_ie_info *vndrie_info =
&old_vndr_ies.ie_info[i];
CFGP2P_INFO(("DELETED ID : %d, Len: %d , OUI:%02x:%02x:%02x\n",
vndrie_info->vndrie.id, vndrie_info->vndrie.len,
vndrie_info->vndrie.oui[0], vndrie_info->vndrie.oui[1],
vndrie_info->vndrie.oui[2]));
del_add_ie_buf_len = wl_cfgp2p_vndr_ie(wl, curr_ie_buf,
pktflag, vndrie_info->vndrie.oui,
vndrie_info->vndrie.id,
vndrie_info->ie_ptr + VNDR_IE_FIXED_LEN,
vndrie_info->ie_len - VNDR_IE_FIXED_LEN,
"del");
curr_ie_buf += del_add_ie_buf_len;
total_ie_buf_len += del_add_ie_buf_len;
}
}
*mgmt_ie_len = 0;
/* Add if there is any extra IE */
if (mgmt_ie_buf && parsed_ie_buf_len) {
ptr = mgmt_ie_buf;
remained_buf_len = mgmt_ie_buf_len;
/* make a command to add new ie */
for (i = 0; i < new_vndr_ies.count; i++) {
struct parsed_vndr_ie_info *vndrie_info =
&new_vndr_ies.ie_info[i];
CFGP2P_INFO(("ADDED ID : %d, Len: %d(%d), OUI:%02x:%02x:%02x\n",
vndrie_info->vndrie.id, vndrie_info->vndrie.len,
vndrie_info->ie_len - 2,
vndrie_info->vndrie.oui[0], vndrie_info->vndrie.oui[1],
vndrie_info->vndrie.oui[2]));
del_add_ie_buf_len = wl_cfgp2p_vndr_ie(wl, curr_ie_buf,
pktflag, vndrie_info->vndrie.oui,
vndrie_info->vndrie.id,
vndrie_info->ie_ptr + VNDR_IE_FIXED_LEN,
vndrie_info->ie_len - VNDR_IE_FIXED_LEN,
"add");
/* verify remained buf size before copy data */
if (remained_buf_len >= vndrie_info->ie_len) {
remained_buf_len -= vndrie_info->ie_len;
} else {
CFGP2P_ERR(("no space in mgmt_ie_buf: pktflag = %d, "
"found vndr ies # = %d(cur %d), remained len %d, "
"cur mgmt_ie_len %d, new ie len = %d\n",
pktflag, new_vndr_ies.count, i, remained_buf_len,
*mgmt_ie_len, vndrie_info->ie_len));
break;
}
/* save the parsed IE in wl struct */
memcpy(ptr + (*mgmt_ie_len), vndrie_info->ie_ptr,
vndrie_info->ie_len);
*mgmt_ie_len += vndrie_info->ie_len;
curr_ie_buf += del_add_ie_buf_len;
total_ie_buf_len += del_add_ie_buf_len;
}
}
if (total_ie_buf_len) {
ret = wldev_iovar_setbuf_bsscfg(ndev, "vndr_ie", g_mgmt_ie_buf,
total_ie_buf_len, wl->ioctl_buf, WLC_IOCTL_MAXLEN,
bssidx, &wl->ioctl_buf_sync);
if (ret)
CFGP2P_ERR(("vndr ie set error : %d\n", ret));
}
}
#undef IE_TYPE
#undef IE_TYPE_LEN
exit:
return ret;
}
/* Clear the manament IE buffer of BSSCFG
* Parameters:
* @wl : wl_private data
* @bssidx : bssidx for BSS
*
* Returns 0 if success.
*/
s32
wl_cfgp2p_clear_management_ie(struct wl_priv *wl, s32 bssidx)
{
s32 vndrie_flag[] = {VNDR_IE_BEACON_FLAG, VNDR_IE_PRBRSP_FLAG, VNDR_IE_ASSOCRSP_FLAG,
VNDR_IE_PRBREQ_FLAG, VNDR_IE_ASSOCREQ_FLAG};
s32 index = -1;
s32 type = -1;
struct net_device *ndev = wl_cfgp2p_find_ndev(wl, bssidx);
#define INIT_IE(IE_TYPE, BSS_TYPE) \
do { \
memset(wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie, 0, \
sizeof(wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie)); \
wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie_len = 0; \
} while (0);
if (bssidx < 0 || ndev == NULL) {
CFGP2P_ERR(("invalid %s\n", (bssidx < 0) ? "bssidx" : "ndev"));
return BCME_BADARG;
}
if (wl_cfgp2p_find_type(wl, bssidx, &type)) {
CFGP2P_ERR(("invalid argument\n"));
return BCME_BADARG;
}
for (index = 0; index < ARRAYSIZE(vndrie_flag); index++) {
/* clean up vndr ies in dongle */
wl_cfgp2p_set_management_ie(wl, ndev, bssidx, vndrie_flag[index], NULL, 0);
}
INIT_IE(probe_req, type);
INIT_IE(probe_res, type);
INIT_IE(assoc_req, type);
INIT_IE(assoc_res, type);
INIT_IE(beacon, type);
return BCME_OK;
}
/* Is any of the tlvs the expected entry? If
* not update the tlvs buffer pointer/length.
*/
static bool
wl_cfgp2p_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type)
{
/* If the contents match the OUI and the type */
if (ie[TLV_LEN_OFF] >= oui_len + 1 &&
!bcmp(&ie[TLV_BODY_OFF], oui, oui_len) &&
type == ie[TLV_BODY_OFF + oui_len]) {
return TRUE;
}
if (tlvs == NULL)
return FALSE;
/* point to the next ie */
ie += ie[TLV_LEN_OFF] + TLV_HDR_LEN;
/* calculate the length of the rest of the buffer */
*tlvs_len -= (int)(ie - *tlvs);
/* update the pointer to the start of the buffer */
*tlvs = ie;
return FALSE;
}
wpa_ie_fixed_t *
wl_cfgp2p_find_wpaie(u8 *parse, u32 len)
{
bcm_tlv_t *ie;
while ((ie = bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_VS_ID))) {
if (wl_cfgp2p_is_wpa_ie((u8*)ie, &parse, &len)) {
return (wpa_ie_fixed_t *)ie;
}
}
return NULL;
}
wpa_ie_fixed_t *
wl_cfgp2p_find_wpsie(u8 *parse, u32 len)
{
bcm_tlv_t *ie;
while ((ie = bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_VS_ID))) {
if (wl_cfgp2p_is_wps_ie((u8*)ie, &parse, &len)) {
return (wpa_ie_fixed_t *)ie;
}
}
return NULL;
}
wifi_p2p_ie_t *
wl_cfgp2p_find_p2pie(u8 *parse, u32 len)
{
bcm_tlv_t *ie;
while ((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_VS_ID))) {
if (wl_cfgp2p_is_p2p_ie((uint8*)ie, &parse, &len)) {
return (wifi_p2p_ie_t *)ie;
}
}
return NULL;
}
wifi_wfd_ie_t *
wl_cfgp2p_find_wfdie(u8 *parse, u32 len)
{
bcm_tlv_t *ie;
while ((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_VS_ID))) {
if (wl_cfgp2p_is_wfd_ie((uint8*)ie, &parse, &len)) {
return (wifi_wfd_ie_t *)ie;
}
}
return NULL;
}
static u32
wl_cfgp2p_vndr_ie(struct wl_priv *wl, u8 *iebuf, s32 pktflag,
s8 *oui, s32 ie_id, s8 *data, s32 datalen, const s8* add_del_cmd)
{
vndr_ie_setbuf_t hdr; /* aligned temporary vndr_ie buffer header */
s32 iecount;
u32 data_offset;
/* Validate the pktflag parameter */
if ((pktflag & ~(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG |
VNDR_IE_ASSOCRSP_FLAG | VNDR_IE_AUTHRSP_FLAG |
VNDR_IE_PRBREQ_FLAG | VNDR_IE_ASSOCREQ_FLAG))) {
CFGP2P_ERR(("p2pwl_vndr_ie: Invalid packet flag 0x%x\n", pktflag));
return -1;
}
/* Copy the vndr_ie SET command ("add"/"del") to the buffer */
strncpy(hdr.cmd, add_del_cmd, VNDR_IE_CMD_LEN - 1);
hdr.cmd[VNDR_IE_CMD_LEN - 1] = '\0';
/* Set the IE count - the buffer contains only 1 IE */
iecount = htod32(1);
memcpy((void *)&hdr.vndr_ie_buffer.iecount, &iecount, sizeof(s32));
/* Copy packet flags that indicate which packets will contain this IE */
pktflag = htod32(pktflag);
memcpy((void *)&hdr.vndr_ie_buffer.vndr_ie_list[0].pktflag, &pktflag,
sizeof(u32));
/* Add the IE ID to the buffer */
hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.id = ie_id;
/* Add the IE length to the buffer */
hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.len =
(uint8) VNDR_IE_MIN_LEN + datalen;
/* Add the IE OUI to the buffer */
hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui[0] = oui[0];
hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui[1] = oui[1];
hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui[2] = oui[2];
/* Copy the aligned temporary vndr_ie buffer header to the IE buffer */
memcpy(iebuf, &hdr, sizeof(hdr) - 1);
/* Copy the IE data to the IE buffer */
data_offset =
(u8*)&hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.data[0] -
(u8*)&hdr;
memcpy(iebuf + data_offset, data, datalen);
return data_offset + datalen;
}
/*
* Search the bssidx based on dev argument
* Parameters:
* @wl : wl_private data
* @ndev : net device to search bssidx
* @bssidx : output arg to store bssidx of the bsscfg of firmware.
* Returns error
*/
s32
wl_cfgp2p_find_idx(struct wl_priv *wl, struct net_device *ndev, s32 *bssidx)
{
u32 i;
if (ndev == NULL || bssidx == NULL) {
CFGP2P_ERR((" argument is invalid\n"));
return BCME_BADARG;
}
if (!wl->p2p_supported) {
*bssidx = P2PAPI_BSSCFG_PRIMARY;
return BCME_OK;
}
/* we cannot find the bssidx of DISCOVERY BSS
* because the ndev is same with ndev of PRIMARY BSS.
*/
for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) {
if (ndev == wl_to_p2p_bss_ndev(wl, i)) {
*bssidx = wl_to_p2p_bss_bssidx(wl, i);
return BCME_OK;
}
}
return BCME_BADARG;
}
struct net_device *
wl_cfgp2p_find_ndev(struct wl_priv *wl, s32 bssidx)
{
u32 i;
struct net_device *ndev = NULL;
if (bssidx < 0) {
CFGP2P_ERR((" bsscfg idx is invalid\n"));
goto exit;
}
for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) {
if (bssidx == wl_to_p2p_bss_bssidx(wl, i)) {
ndev = wl_to_p2p_bss_ndev(wl, i);
break;
}
}
exit:
return ndev;
}
/*
* Search the driver array idx based on bssidx argument
* Parameters:
* @wl : wl_private data
* @bssidx : bssidx which indicate bsscfg->idx of firmware.
* @type : output arg to store array idx of p2p->bss.
* Returns error
*/
s32
wl_cfgp2p_find_type(struct wl_priv *wl, s32 bssidx, s32 *type)
{
u32 i;
if (bssidx < 0 || type == NULL) {
CFGP2P_ERR((" argument is invalid\n"));
goto exit;
}
for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) {
if (bssidx == wl_to_p2p_bss_bssidx(wl, i)) {
*type = i;
return BCME_OK;
}
}
exit:
return BCME_BADARG;
}
/*
* Callback function for WLC_E_P2P_DISC_LISTEN_COMPLETE
*/
s32
wl_cfgp2p_listen_complete(struct wl_priv *wl, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data)
{
s32 ret = BCME_OK;
struct net_device *ndev = NULL;
if (!wl || !wl->p2p)
return BCME_ERROR;
CFGP2P_DBG((" Enter\n"));
ndev = cfgdev_to_wlc_ndev(cfgdev, wl);
if (wl_get_p2p_status(wl, LISTEN_EXPIRED) == 0) {
wl_set_p2p_status(wl, LISTEN_EXPIRED);
if (timer_pending(&wl->p2p->listen_timer)) {
del_timer_sync(&wl->p2p->listen_timer);
}
if (wl->afx_hdl->is_listen == TRUE &&
wl_get_drv_status_all(wl, FINDING_COMMON_CHANNEL)) {
WL_DBG(("Listen DONE for action frame\n"));
complete(&wl->act_frm_scan);
}
#ifdef WL_CFG80211_SYNC_GON
else if (wl_get_drv_status_all(wl, WAITING_NEXT_ACT_FRM_LISTEN)) {
wl_clr_drv_status(wl, WAITING_NEXT_ACT_FRM_LISTEN, ndev);
WL_DBG(("Listen DONE and wake up wait_next_af !!(%d)\n",
jiffies_to_msecs(jiffies - wl->af_tx_sent_jiffies)));
if (wl_get_drv_status_all(wl, WAITING_NEXT_ACT_FRM))
wl_clr_drv_status(wl, WAITING_NEXT_ACT_FRM, ndev);
complete(&wl->wait_next_af);
}
#endif /* WL_CFG80211_SYNC_GON */
#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
if (wl_get_drv_status_all(wl, REMAINING_ON_CHANNEL)) {
#else
if (wl_get_drv_status_all(wl, REMAINING_ON_CHANNEL) ||
wl_get_drv_status_all(wl, FAKE_REMAINING_ON_CHANNEL)) {
#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
WL_DBG(("Listen DONE for ramain on channel expired\n"));
wl_clr_drv_status(wl, REMAINING_ON_CHANNEL, ndev);
#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
wl_clr_drv_status(wl, FAKE_REMAINING_ON_CHANNEL, ndev);
#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
if (ndev && (ndev->ieee80211_ptr != NULL)) {
#if defined(WL_CFG80211_P2P_DEV_IF)
cfg80211_remain_on_channel_expired(cfgdev, wl->last_roc_id,
&wl->remain_on_chan, GFP_KERNEL);
#else
cfg80211_remain_on_channel_expired(cfgdev, wl->last_roc_id,
&wl->remain_on_chan, wl->remain_on_chan_type, GFP_KERNEL);
#endif /* WL_CFG80211_P2P_DEV_IF */
}
}
if (wl_add_remove_eventmsg(wl_to_prmry_ndev(wl),
WLC_E_P2P_PROBREQ_MSG, false) != BCME_OK) {
CFGP2P_ERR((" failed to unset WLC_E_P2P_PROPREQ_MSG\n"));
}
} else
wl_clr_p2p_status(wl, LISTEN_EXPIRED);
return ret;
}
/*
* Timer expire callback function for LISTEN
* We can't report cfg80211_remain_on_channel_expired from Timer ISR context,
* so lets do it from thread context.
*/
void
wl_cfgp2p_listen_expired(unsigned long data)
{
wl_event_msg_t msg;
struct wl_priv *wl = (struct wl_priv *) data;
CFGP2P_DBG((" Enter\n"));
bzero(&msg, sizeof(wl_event_msg_t));
msg.event_type = hton32(WLC_E_P2P_DISC_LISTEN_COMPLETE);
#if defined(WL_ENABLE_P2P_IF)
wl_cfg80211_event(wl->p2p_net ? wl->p2p_net :
wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE), &msg, NULL);
#else
wl_cfg80211_event(wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE), &msg,
NULL);
#endif /* WL_ENABLE_P2P_IF */
}
/*
* Routine for cancelling the P2P LISTEN
*/
static s32
wl_cfgp2p_cancel_listen(struct wl_priv *wl, struct net_device *ndev,
struct wireless_dev *wdev, bool notify)
{
WL_DBG(("Enter \n"));
/* Irrespective of whether timer is running or not, reset
* the LISTEN state.
*/
if (timer_pending(&wl->p2p->listen_timer)) {
del_timer_sync(&wl->p2p->listen_timer);
if (notify)
if (ndev && ndev->ieee80211_ptr) {
#if defined(WL_CFG80211_P2P_DEV_IF)
cfg80211_remain_on_channel_expired(wdev, wl->last_roc_id,
&wl->remain_on_chan, GFP_KERNEL);
#else
cfg80211_remain_on_channel_expired(ndev, wl->last_roc_id,
&wl->remain_on_chan, wl->remain_on_chan_type, GFP_KERNEL);
#endif /* WL_CFG80211_P2P_DEV_IF */
}
}
return 0;
}
/*
* Do a P2P Listen on the given channel for the given duration.
* A listen consists of sitting idle and responding to P2P probe requests
* with a P2P probe response.
*
* This fn assumes dongle p2p device discovery is already enabled.
* Parameters :
* @wl : wl_private data
* @channel : channel to listen
* @duration_ms : the time (milli seconds) to wait
*/
s32
wl_cfgp2p_discover_listen(struct wl_priv *wl, s32 channel, u32 duration_ms)
{
#define EXTRA_DELAY_TIME 100
s32 ret = BCME_OK;
struct timer_list *_timer;
s32 extra_delay;
struct net_device *netdev = wl_to_prmry_ndev(wl);
CFGP2P_DBG((" Enter Listen Channel : %d, Duration : %d\n", channel, duration_ms));
if (unlikely(wl_get_p2p_status(wl, DISCOVERY_ON) == 0)) {
CFGP2P_ERR((" Discovery is not set, so we have noting to do\n"));
ret = BCME_NOTREADY;
goto exit;
}
if (timer_pending(&wl->p2p->listen_timer)) {
CFGP2P_DBG(("previous LISTEN is not completed yet\n"));
goto exit;
}
#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
else
wl_clr_p2p_status(wl, LISTEN_EXPIRED);
#endif /* not WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
if (wl_add_remove_eventmsg(netdev, WLC_E_P2P_PROBREQ_MSG, true) != BCME_OK) {
CFGP2P_ERR((" failed to set WLC_E_P2P_PROPREQ_MSG\n"));
}
ret = wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_LISTEN, channel, (u16) duration_ms,
wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE));
_timer = &wl->p2p->listen_timer;
/* We will wait to receive WLC_E_P2P_DISC_LISTEN_COMPLETE from dongle ,
* otherwise we will wait up to duration_ms + 100ms + duration / 10
*/
if (ret == BCME_OK) {
extra_delay = EXTRA_DELAY_TIME + (duration_ms / 10);
} else {
/* if failed to set listen, it doesn't need to wait whole duration. */
duration_ms = 100 + duration_ms / 20;
extra_delay = 0;
}
INIT_TIMER(_timer, wl_cfgp2p_listen_expired, duration_ms, extra_delay);
#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
wl_clr_p2p_status(wl, LISTEN_EXPIRED);
#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
#undef EXTRA_DELAY_TIME
exit:
return ret;
}
s32
wl_cfgp2p_discover_enable_search(struct wl_priv *wl, u8 enable)
{
s32 ret = BCME_OK;
CFGP2P_DBG((" Enter\n"));
if (!wl_get_p2p_status(wl, DISCOVERY_ON)) {
CFGP2P_DBG((" do nothing, discovery is off\n"));
return ret;
}
if (wl_get_p2p_status(wl, SEARCH_ENABLED) == enable) {
CFGP2P_DBG(("already : %d\n", enable));
return ret;
}
wl_chg_p2p_status(wl, SEARCH_ENABLED);
/* When disabling Search, reset the WL driver's p2p discovery state to
* WL_P2P_DISC_ST_SCAN.
*/
if (!enable) {
wl_clr_p2p_status(wl, SCANNING);
ret = wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0,
wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE));
}
return ret;
}
/*
* Callback function for WLC_E_ACTION_FRAME_COMPLETE, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE
*/
s32
wl_cfgp2p_action_tx_complete(struct wl_priv *wl, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data)
{
s32 ret = BCME_OK;
u32 event_type = ntoh32(e->event_type);
u32 status = ntoh32(e->status);
CFGP2P_DBG((" Enter\n"));
if (wl_get_drv_status_all(wl, SENDING_ACT_FRM)) {
if (event_type == WLC_E_ACTION_FRAME_COMPLETE) {
CFGP2P_INFO((" WLC_E_ACTION_FRAME_COMPLETE is received : %d\n", status));
if (status == WLC_E_STATUS_SUCCESS) {
wl_set_p2p_status(wl, ACTION_TX_COMPLETED);
CFGP2P_DBG(("WLC_E_ACTION_FRAME_COMPLETE : ACK\n"));
}
else {
if (!wl_get_p2p_status(wl, ACTION_TX_COMPLETED)) {
wl_set_p2p_status(wl, ACTION_TX_NOACK);
CFGP2P_INFO(("WLC_E_ACTION_FRAME_COMPLETE : NO ACK\n"));
wl_stop_wait_next_action_frame(wl);
}
}
} else {
CFGP2P_INFO((" WLC_E_ACTION_FRAME_OFFCHAN_COMPLETE is received,"
"status : %d\n", status));
if (wl_get_drv_status_all(wl, SENDING_ACT_FRM))
complete(&wl->send_af_done);
}
}
return ret;
}
/* Send an action frame immediately without doing channel synchronization.
*
* This function does not wait for a completion event before returning.
* The WLC_E_ACTION_FRAME_COMPLETE event will be received when the action
* frame is transmitted.
* The WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE event will be received when an
* 802.11 ack has been received for the sent action frame.
*/
s32
wl_cfgp2p_tx_action_frame(struct wl_priv *wl, struct net_device *dev,
wl_af_params_t *af_params, s32 bssidx)
{
s32 ret = BCME_OK;
s32 evt_ret = BCME_OK;
s32 timeout = 0;
wl_eventmsg_buf_t buf;
CFGP2P_INFO(("\n"));
CFGP2P_INFO(("channel : %u , dwell time : %u\n",
af_params->channel, af_params->dwell_time));
wl_clr_p2p_status(wl, ACTION_TX_COMPLETED);
wl_clr_p2p_status(wl, ACTION_TX_NOACK);
bzero(&buf, sizeof(wl_eventmsg_buf_t));
wl_cfg80211_add_to_eventbuffer(&buf, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE, true);
wl_cfg80211_add_to_eventbuffer(&buf, WLC_E_ACTION_FRAME_COMPLETE, true);
if ((evt_ret = wl_cfg80211_apply_eventbuffer(wl_to_prmry_ndev(wl), wl, &buf)) < 0)
return evt_ret;
if (bssidx == P2PAPI_BSSCFG_PRIMARY)
bssidx = wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE);
wl->af_sent_channel = af_params->channel;
#ifdef WL_CFG80211_SYNC_GON
wl->af_tx_sent_jiffies = jiffies;
#endif /* WL_CFG80211_SYNC_GON */
ret = wldev_iovar_setbuf_bsscfg(dev, "actframe", af_params, sizeof(*af_params),
wl->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync);
if (ret < 0) {
CFGP2P_ERR((" sending action frame is failed\n"));
goto exit;
}
timeout = wait_for_completion_timeout(&wl->send_af_done,
msecs_to_jiffies(af_params->dwell_time + WL_AF_TX_EXTRA_TIME_MAX));
if (timeout >= 0 && wl_get_p2p_status(wl, ACTION_TX_COMPLETED)) {
CFGP2P_INFO(("tx action frame operation is completed\n"));
ret = BCME_OK;
} else {
ret = BCME_ERROR;
CFGP2P_INFO(("tx action frame operation is failed\n"));
}
/* clear status bit for action tx */
wl_clr_p2p_status(wl, ACTION_TX_COMPLETED);
wl_clr_p2p_status(wl, ACTION_TX_NOACK);
exit:
CFGP2P_INFO((" via act frame iovar : status = %d\n", ret));
bzero(&buf, sizeof(wl_eventmsg_buf_t));
wl_cfg80211_add_to_eventbuffer(&buf, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE, false);
wl_cfg80211_add_to_eventbuffer(&buf, WLC_E_ACTION_FRAME_COMPLETE, false);
if ((evt_ret = wl_cfg80211_apply_eventbuffer(wl_to_prmry_ndev(wl), wl, &buf)) < 0) {
WL_ERR(("TX frame events revert back failed \n"));
return evt_ret;
}
return ret;
}
/* Generate our P2P Device Address and P2P Interface Address from our primary
* MAC address.
*/
void
wl_cfgp2p_generate_bss_mac(struct ether_addr *primary_addr,
struct ether_addr *out_dev_addr, struct ether_addr *out_int_addr)
{
memset(out_dev_addr, 0, sizeof(*out_dev_addr));
memset(out_int_addr, 0, sizeof(*out_int_addr));
/* Generate the P2P Device Address. This consists of the device's
* primary MAC address with the locally administered bit set.
*/
memcpy(out_dev_addr, primary_addr, sizeof(*out_dev_addr));
out_dev_addr->octet[0] |= 0x02;
/* Generate the P2P Interface Address. If the discovery and connection
* BSSCFGs need to simultaneously co-exist, then this address must be
* different from the P2P Device Address.
*/
memcpy(out_int_addr, out_dev_addr, sizeof(*out_int_addr));
out_int_addr->octet[4] ^= 0x80;
}
/* P2P IF Address change to Virtual Interface MAC Address */
void
wl_cfg80211_change_ifaddr(u8* buf, struct ether_addr *p2p_int_addr, u8 element_id)
{
wifi_p2p_ie_t *ie = (wifi_p2p_ie_t*) buf;
u16 len = ie->len;
u8 *subel;
u8 subelt_id;
u16 subelt_len;
CFGP2P_DBG((" Enter\n"));
/* Point subel to the P2P IE's subelt field.
* Subtract the preceding fields (id, len, OUI, oui_type) from the length.
*/
subel = ie->subelts;
len -= 4; /* exclude OUI + OUI_TYPE */
while (len >= 3) {
/* attribute id */
subelt_id = *subel;
subel += 1;
len -= 1;
/* 2-byte little endian */
subelt_len = *subel++;
subelt_len |= *subel++ << 8;
len -= 2;
len -= subelt_len; /* for the remaining subelt fields */
if (subelt_id == element_id) {
if (subelt_id == P2P_SEID_INTINTADDR) {
memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN);
CFGP2P_INFO(("Intended P2P Interface Address ATTR FOUND\n"));
} else if (subelt_id == P2P_SEID_DEV_ID) {
memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN);
CFGP2P_INFO(("Device ID ATTR FOUND\n"));
} else if (subelt_id == P2P_SEID_DEV_INFO) {
memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN);
CFGP2P_INFO(("Device INFO ATTR FOUND\n"));
} else if (subelt_id == P2P_SEID_GROUP_ID) {
memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN);
CFGP2P_INFO(("GROUP ID ATTR FOUND\n"));
} return;
} else {
CFGP2P_DBG(("OTHER id : %d\n", subelt_id));
}
subel += subelt_len;
}
}
/*
* Check if a BSS is up.
* This is a common implementation called by most OSL implementations of
* p2posl_bss_isup(). DO NOT call this function directly from the
* common code -- call p2posl_bss_isup() instead to allow the OSL to
* override the common implementation if necessary.
*/
bool
wl_cfgp2p_bss_isup(struct net_device *ndev, int bsscfg_idx)
{
s32 result, val;
bool isup = false;
s8 getbuf[64];
/* Check if the BSS is up */
*(int*)getbuf = -1;
result = wldev_iovar_getbuf_bsscfg(ndev, "bss", &bsscfg_idx,
sizeof(bsscfg_idx), getbuf, sizeof(getbuf), 0, NULL);
if (result != 0) {
CFGP2P_ERR(("'wl bss -C %d' failed: %d\n", bsscfg_idx, result));
CFGP2P_ERR(("NOTE: this ioctl error is normal "
"when the BSS has not been created yet.\n"));
} else {
val = *(int*)getbuf;
val = dtoh32(val);
CFGP2P_INFO(("---wl bss -C %d ==> %d\n", bsscfg_idx, val));
isup = (val ? TRUE : FALSE);
}
return isup;
}
/* Bring up or down a BSS */
s32
wl_cfgp2p_bss(struct wl_priv *wl, struct net_device *ndev, s32 bsscfg_idx, s32 up)
{
s32 ret = BCME_OK;
s32 val = up ? 1 : 0;
struct {
s32 cfg;
s32 val;
} bss_setbuf;
bss_setbuf.cfg = htod32(bsscfg_idx);
bss_setbuf.val = htod32(val);
CFGP2P_INFO(("---wl bss -C %d %s\n", bsscfg_idx, up ? "up" : "down"));
ret = wldev_iovar_setbuf(ndev, "bss", &bss_setbuf, sizeof(bss_setbuf),
wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync);
if (ret != 0) {
CFGP2P_ERR(("'bss %d' failed with %d\n", up, ret));
}
return ret;
}
/* Check if 'p2p' is supported in the driver */
s32
wl_cfgp2p_supported(struct wl_priv *wl, struct net_device *ndev)
{
s32 ret = BCME_OK;
s32 p2p_supported = 0;
ret = wldev_iovar_getint(ndev, "p2p",
&p2p_supported);
if (ret < 0) {
CFGP2P_ERR(("wl p2p error %d\n", ret));
return 0;
}
if (p2p_supported == 1) {
CFGP2P_INFO(("p2p is supported\n"));
} else {
CFGP2P_INFO(("p2p is unsupported\n"));
p2p_supported = 0;
}
return p2p_supported;
}
/* Cleanup P2P resources */
s32
wl_cfgp2p_down(struct wl_priv *wl)
{
struct net_device *ndev = NULL;
struct wireless_dev *wdev = NULL;
s32 i = 0, index = -1;
#if defined(WL_CFG80211_P2P_DEV_IF)
ndev = wl_to_prmry_ndev(wl);
wdev = wl_to_p2p_wdev(wl);
#elif defined(WL_ENABLE_P2P_IF)
ndev = wl->p2p_net ? wl->p2p_net : wl_to_prmry_ndev(wl);
wdev = ndev_to_wdev(ndev);
#endif /* WL_CFG80211_P2P_DEV_IF */
wl_cfgp2p_cancel_listen(wl, ndev, wdev, TRUE);
for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) {
index = wl_to_p2p_bss_bssidx(wl, i);
if (index != WL_INVALID)
wl_cfgp2p_clear_management_ie(wl, index);
}
#if defined(WL_CFG80211_P2P_DEV_IF)
wl_cfgp2p_del_p2p_disc_if(wdev);
#endif /* WL_CFG80211_P2P_DEV_IF */
wl_cfgp2p_deinit_priv(wl);
return 0;
}
s32
wl_cfgp2p_set_p2p_noa(struct wl_priv *wl, struct net_device *ndev, char* buf, int len)
{
s32 ret = -1;
int count, start, duration;
wl_p2p_sched_t dongle_noa;
CFGP2P_DBG((" Enter\n"));
memset(&dongle_noa, 0, sizeof(dongle_noa));
if (wl->p2p && wl->p2p->vif_created) {
wl->p2p->noa.desc[0].start = 0;
sscanf(buf, "%10d %10d %10d", &count, &start, &duration);
CFGP2P_DBG(("set_p2p_noa count %d start %d duration %d\n",
count, start, duration));
if (count != -1)
wl->p2p->noa.desc[0].count = count;
/* supplicant gives interval as start */
if (start != -1)
wl->p2p->noa.desc[0].interval = start;
if (duration != -1)
wl->p2p->noa.desc[0].duration = duration;
if (wl->p2p->noa.desc[0].count != 255) {
wl->p2p->noa.desc[0].start = 200;
dongle_noa.type = WL_P2P_SCHED_TYPE_REQ_ABS;
dongle_noa.action = WL_P2P_SCHED_ACTION_GOOFF;
dongle_noa.option = WL_P2P_SCHED_OPTION_TSFOFS;
}
else {
/* Continuous NoA interval. */
dongle_noa.action = WL_P2P_SCHED_ACTION_NONE;
dongle_noa.type = WL_P2P_SCHED_TYPE_ABS;
if ((wl->p2p->noa.desc[0].interval == 102) ||
(wl->p2p->noa.desc[0].interval == 100)) {
wl->p2p->noa.desc[0].start = 100 -
wl->p2p->noa.desc[0].duration;
dongle_noa.option = WL_P2P_SCHED_OPTION_BCNPCT;
}
else {
dongle_noa.option = WL_P2P_SCHED_OPTION_NORMAL;
}
}
/* Put the noa descriptor in dongle format for dongle */
dongle_noa.desc[0].count = htod32(wl->p2p->noa.desc[0].count);
if (dongle_noa.option == WL_P2P_SCHED_OPTION_BCNPCT) {
dongle_noa.desc[0].start = htod32(wl->p2p->noa.desc[0].start);
dongle_noa.desc[0].duration = htod32(wl->p2p->noa.desc[0].duration);
}
else {
dongle_noa.desc[0].start = htod32(wl->p2p->noa.desc[0].start*1000);
dongle_noa.desc[0].duration = htod32(wl->p2p->noa.desc[0].duration*1000);
}
dongle_noa.desc[0].interval = htod32(wl->p2p->noa.desc[0].interval*1000);
ret = wldev_iovar_setbuf(wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION),
"p2p_noa", &dongle_noa, sizeof(dongle_noa), wl->ioctl_buf, WLC_IOCTL_MAXLEN,
&wl->ioctl_buf_sync);
if (ret < 0) {
CFGP2P_ERR(("fw set p2p_noa failed %d\n", ret));
}
}
else {
CFGP2P_ERR(("ERROR: set_noa in non-p2p mode\n"));
}
return ret;
}
s32
wl_cfgp2p_get_p2p_noa(struct wl_priv *wl, struct net_device *ndev, char* buf, int buf_len)
{
wifi_p2p_noa_desc_t *noa_desc;
int len = 0, i;
char _buf[200];
CFGP2P_DBG((" Enter\n"));
buf[0] = '\0';
if (wl->p2p && wl->p2p->vif_created) {
if (wl->p2p->noa.desc[0].count || wl->p2p->ops.ops) {
_buf[0] = 1; /* noa index */
_buf[1] = (wl->p2p->ops.ops ? 0x80: 0) |
(wl->p2p->ops.ctw & 0x7f); /* ops + ctw */
len += 2;
if (wl->p2p->noa.desc[0].count) {
noa_desc = (wifi_p2p_noa_desc_t*)&_buf[len];
noa_desc->cnt_type = wl->p2p->noa.desc[0].count;
noa_desc->duration = wl->p2p->noa.desc[0].duration;
noa_desc->interval = wl->p2p->noa.desc[0].interval;
noa_desc->start = wl->p2p->noa.desc[0].start;
len += sizeof(wifi_p2p_noa_desc_t);
}
if (buf_len <= len * 2) {
CFGP2P_ERR(("ERROR: buf_len %d in not enough for"
"returning noa in string format\n", buf_len));
return -1;
}
/* We have to convert the buffer data into ASCII strings */
for (i = 0; i < len; i++) {
snprintf(buf, 3, "%02x", _buf[i]);
buf += 2;
}
buf[i*2] = '\0';
}
}
else {
CFGP2P_ERR(("ERROR: get_noa in non-p2p mode\n"));
return -1;
}
return len * 2;
}
s32
wl_cfgp2p_set_p2p_ps(struct wl_priv *wl, struct net_device *ndev, char* buf, int len)
{
int ps, ctw;
int ret = -1;
s32 legacy_ps;
CFGP2P_DBG((" Enter\n"));
if (wl->p2p && wl->p2p->vif_created) {
sscanf(buf, "%10d %10d %10d", &legacy_ps, &ps, &ctw);
CFGP2P_DBG((" Enter legacy_ps %d ps %d ctw %d\n", legacy_ps, ps, ctw));
if (ctw != -1) {
wl->p2p->ops.ctw = ctw;
ret = 0;
}
if (ps != -1) {
wl->p2p->ops.ops = ps;
ret = wldev_iovar_setbuf(wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION),
"p2p_ops", &wl->p2p->ops, sizeof(wl->p2p->ops),
wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync);
if (ret < 0) {
CFGP2P_ERR(("fw set p2p_ops failed %d\n", ret));
}
}
if ((legacy_ps != -1) && ((legacy_ps == PM_MAX) || (legacy_ps == PM_OFF))) {
#if defined(SUPPORT_PM2_ONLY)
if (legacy_ps == PM_MAX)
legacy_ps = PM_FAST;
#endif /* SUPPORT_PM2_ONLY */
ret = wldev_ioctl(wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION),
WLC_SET_PM, &legacy_ps, sizeof(legacy_ps), true);
if (unlikely(ret)) {
CFGP2P_ERR(("error (%d)\n", ret));
} else {
wl_cfg80211_update_power_mode(ndev);
}
}
else
CFGP2P_ERR(("ilegal setting\n"));
}
else {
CFGP2P_ERR(("ERROR: set_p2p_ps in non-p2p mode\n"));
ret = -1;
}
return ret;
}
u8 *
wl_cfgp2p_retreive_p2pattrib(void *buf, u8 element_id)
{
wifi_p2p_ie_t *ie = NULL;
u16 len = 0;
u8 *subel;
u8 subelt_id;
u16 subelt_len;
if (!buf) {
WL_ERR(("P2P IE not present"));
return 0;
}
ie = (wifi_p2p_ie_t*) buf;
len = ie->len;
/* Point subel to the P2P IE's subelt field.
* Subtract the preceding fields (id, len, OUI, oui_type) from the length.
*/
subel = ie->subelts;
len -= 4; /* exclude OUI + OUI_TYPE */
while (len >= 3) {
/* attribute id */
subelt_id = *subel;
subel += 1;
len -= 1;
/* 2-byte little endian */
subelt_len = *subel++;
subelt_len |= *subel++ << 8;
len -= 2;
len -= subelt_len; /* for the remaining subelt fields */
if (subelt_id == element_id) {
/* This will point to start of subelement attrib after
* attribute id & len
*/
return subel;
}
/* Go to next subelement */
subel += subelt_len;
}
/* Not Found */
return NULL;
}
#define P2P_GROUP_CAPAB_GO_BIT 0x01
u8*
wl_cfgp2p_find_attrib_in_all_p2p_Ies(u8 *parse, u32 len, u32 attrib)
{
bcm_tlv_t *ie;
u8* pAttrib;
CFGP2P_INFO(("Starting parsing parse %p attrib %d remaining len %d ", parse, attrib, len));
while ((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_VS_ID))) {
if (wl_cfgp2p_is_p2p_ie((uint8*)ie, &parse, &len) == TRUE) {
/* Have the P2p ie. Now check for attribute */
if ((pAttrib = wl_cfgp2p_retreive_p2pattrib(parse, attrib)) != NULL) {
CFGP2P_INFO(("P2P attribute %d was found at parse %p",
attrib, parse));
return pAttrib;
}
else {
parse += (ie->len + TLV_HDR_LEN);
len -= (ie->len + TLV_HDR_LEN);
CFGP2P_INFO(("P2P Attribute %d not found Moving parse"
" to %p len to %d", attrib, parse, len));
}
}
else {
/* It was not p2p IE. parse will get updated automatically to next TLV */
CFGP2P_INFO(("IT was NOT P2P IE parse %p len %d", parse, len));
}
}
CFGP2P_ERR(("P2P attribute %d was NOT found", attrib));
return NULL;
}
u8 *
wl_cfgp2p_retreive_p2p_dev_addr(wl_bss_info_t *bi, u32 bi_length)
{
u8 *capability = NULL;
bool p2p_go = 0;
u8 *ptr = NULL;
if ((capability = wl_cfgp2p_find_attrib_in_all_p2p_Ies(((u8 *) bi) + bi->ie_offset,
bi->ie_length, P2P_SEID_P2P_INFO)) == NULL) {
WL_ERR(("P2P Capability attribute not found"));
return NULL;
}
/* Check Group capability for Group Owner bit */
p2p_go = capability[1] & P2P_GROUP_CAPAB_GO_BIT;
if (!p2p_go) {
return bi->BSSID.octet;
}
/* In probe responses, DEVICE INFO attribute will be present */
if (!(ptr = wl_cfgp2p_find_attrib_in_all_p2p_Ies(((u8 *) bi) + bi->ie_offset,
bi->ie_length, P2P_SEID_DEV_INFO))) {
/* If DEVICE_INFO is not found, this might be a beacon frame.
* check for DEVICE_ID in the beacon frame.
*/
ptr = wl_cfgp2p_find_attrib_in_all_p2p_Ies(((u8 *) bi) + bi->ie_offset,
bi->ie_length, P2P_SEID_DEV_ID);
}
if (!ptr)
WL_ERR((" Both DEVICE_ID & DEVICE_INFO attribute not present in P2P IE "));
return ptr;
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
static void
wl_cfgp2p_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
{
snprintf(info->driver, sizeof(info->driver), "p2p");
snprintf(info->version, sizeof(info->version), "%lu", (unsigned long)(0));
}
struct ethtool_ops cfgp2p_ethtool_ops = {
.get_drvinfo = wl_cfgp2p_ethtool_get_drvinfo
};
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
#if defined(WL_ENABLE_P2P_IF)
s32
wl_cfgp2p_register_ndev(struct wl_priv *wl)
{
int ret = 0;
struct net_device* net = NULL;
struct wireless_dev *wdev = NULL;
uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x33, 0x22, 0x11 };
if (wl->p2p_net) {
CFGP2P_ERR(("p2p_net defined already.\n"));
return -EINVAL;
}
/* Allocate etherdev, including space for private structure */
if (!(net = alloc_etherdev(sizeof(struct wl_priv *)))) {
CFGP2P_ERR(("%s: OOM - alloc_etherdev\n", __FUNCTION__));
return -ENODEV;
}
wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
if (unlikely(!wdev)) {
WL_ERR(("Could not allocate wireless device\n"));
free_netdev(net);
return -ENOMEM;
}
strncpy(net->name, "p2p%d", sizeof(net->name) - 1);
net->name[IFNAMSIZ - 1] = '\0';
/* Copy the reference to wl_priv */
memcpy((void *)netdev_priv(net), &wl, sizeof(struct wl_priv *));
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
ASSERT(!net->open);
net->do_ioctl = wl_cfgp2p_do_ioctl;
net->hard_start_xmit = wl_cfgp2p_start_xmit;
net->open = wl_cfgp2p_if_open;
net->stop = wl_cfgp2p_if_stop;
#else
ASSERT(!net->netdev_ops);
net->netdev_ops = &wl_cfgp2p_if_ops;
#endif
/* Register with a dummy MAC addr */
memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
wdev->wiphy = wl->wdev->wiphy;
wdev->iftype = wl_mode_to_nl80211_iftype(WL_MODE_BSS);
net->ieee80211_ptr = wdev;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
net->ethtool_ops = &cfgp2p_ethtool_ops;
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
SET_NETDEV_DEV(net, wiphy_dev(wdev->wiphy));
/* Associate p2p0 network interface with new wdev */
wdev->netdev = net;
ret = register_netdev(net);
if (ret) {
CFGP2P_ERR((" register_netdevice failed (%d)\n", ret));
free_netdev(net);
kfree(wdev);
return -ENODEV;
}
/* store p2p net ptr for further reference. Note that iflist won't have this
* entry as there corresponding firmware interface is a "Hidden" interface.
*/
wl->p2p_wdev = wdev;
wl->p2p_net = net;
printk("%s: P2P Interface Registered\n", net->name);
return ret;
}
s32
wl_cfgp2p_unregister_ndev(struct wl_priv *wl)
{
if (!wl || !wl->p2p_net) {
CFGP2P_ERR(("Invalid Ptr\n"));
return -EINVAL;
}
unregister_netdev(wl->p2p_net);
free_netdev(wl->p2p_net);
return 0;
}
static int wl_cfgp2p_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
if (skb)
{
CFGP2P_DBG(("(%s) is not used for data operations.Droping the packet.\n",
ndev->name));
dev_kfree_skb_any(skb);
}
return 0;
}
static int wl_cfgp2p_do_ioctl(struct net_device *net, struct ifreq *ifr, int cmd)
{
int ret = 0;
struct wl_priv *wl = *(struct wl_priv **)netdev_priv(net);
struct net_device *ndev = wl_to_prmry_ndev(wl);
/* There is no ifidx corresponding to p2p0 in our firmware. So we should
* not Handle any IOCTL cmds on p2p0 other than ANDROID PRIVATE CMDs.
* For Android PRIV CMD handling map it to primary I/F
*/
if (cmd == SIOCDEVPRIVATE+1) {
ret = wl_android_priv_cmd(ndev, ifr, cmd);
} else {
CFGP2P_ERR(("%s: IOCTL req 0x%x on p2p0 I/F. Ignoring. \n",
__FUNCTION__, cmd));
return -1;
}
return ret;
}
static int wl_cfgp2p_if_open(struct net_device *net)
{
extern struct wl_priv *wlcfg_drv_priv;
struct wireless_dev *wdev = net->ieee80211_ptr;
struct wl_priv *wl = NULL;
wl = wlcfg_drv_priv;
if (!wdev || !wl || !wl->p2p)
return -EINVAL;
WL_TRACE(("Enter\n"));
#if !defined(WL_IFACE_COMB_NUM_CHANNELS)
/* If suppose F/W download (ifconfig wlan0 up) hasn't been done by now,
* do it here. This will make sure that in concurrent mode, supplicant
* is not dependent on a particular order of interface initialization.
* i.e you may give wpa_supp -iwlan0 -N -ip2p0 or wpa_supp -ip2p0 -N
* -iwlan0.
*/
wdev->wiphy->interface_modes |= (BIT(NL80211_IFTYPE_P2P_CLIENT)
| BIT(NL80211_IFTYPE_P2P_GO));
#endif /* !WL_IFACE_COMB_NUM_CHANNELS */
wl_cfg80211_do_driver_init(net);
return 0;
}
static int wl_cfgp2p_if_stop(struct net_device *net)
{
extern struct wl_priv *wlcfg_drv_priv;
struct wl_priv *wl = NULL;
unsigned long flags;
struct wireless_dev *wdev = net->ieee80211_ptr;
int clear_flag = 0;
if (!wdev)
return -EINVAL;
WL_TRACE(("Enter\n"));
wl = wlcfg_drv_priv;
if (!wl)
return -EINVAL;
spin_lock_irqsave(&wl->cfgdrv_lock, flags);
if (wl->scan_request && wl->scan_request->dev == net) {
cfg80211_scan_done(wl->scan_request, true);
wl->scan_request = NULL;
clear_flag = 1;
}
spin_unlock_irqrestore(&wl->cfgdrv_lock, flags);
if (clear_flag)
wl_clr_drv_status(wl, SCANNING, net);
#if !defined(WL_IFACE_COMB_NUM_CHANNELS)
wdev->wiphy->interface_modes = (wdev->wiphy->interface_modes)
& (~(BIT(NL80211_IFTYPE_P2P_CLIENT)|
BIT(NL80211_IFTYPE_P2P_GO)));
#endif /* !WL_IFACE_COMB_NUM_CHANNELS */
return 0;
}
bool wl_cfgp2p_is_ifops(const struct net_device_ops *if_ops)
{
return (if_ops == &wl_cfgp2p_if_ops);
}
#endif /* WL_ENABLE_P2P_IF */
#if defined(WL_CFG80211_P2P_DEV_IF)
struct wireless_dev *
wl_cfgp2p_add_p2p_disc_if(void)
{
extern struct wl_priv *wlcfg_drv_priv;
struct wl_priv *wl = wlcfg_drv_priv;
struct wireless_dev *wdev = NULL;
struct ether_addr primary_mac;
if (!wl)
return ERR_PTR(-EINVAL);
WL_TRACE(("Enter\n"));
if (wl->p2p_wdev) {
CFGP2P_ERR(("p2p_wdev defined already.\n"));
return ERR_PTR(-EEXIST);
}
wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
if (unlikely(!wdev)) {
WL_ERR(("Could not allocate wireless device\n"));
return ERR_PTR(-ENOMEM);
}
memset(&primary_mac, 0, sizeof(primary_mac));
get_primary_mac(wl, &primary_mac);
if (!(&primary_mac)->octet) {
CFGP2P_ERR(("primary_mac->octet is null\n"));
return ERR_PTR(-EINVAL);
}
if (!wl->p2p) {
CFGP2P_ERR(("wl->p2p is null\n"));
return ERR_PTR(-ENODEV);
}
wl_cfgp2p_generate_bss_mac(&primary_mac,
&wl->p2p->dev_addr, &wl->p2p->int_addr);
wdev->wiphy = wl->wdev->wiphy;
wdev->iftype = NL80211_IFTYPE_P2P_DEVICE;
memcpy(wdev->address, &wl->p2p->dev_addr, ETHER_ADDR_LEN);
/* store p2p wdev ptr for further reference. */
wl->p2p_wdev = wdev;
CFGP2P_ERR(("P2P interface registered\n"));
return wdev;
}
int
wl_cfgp2p_start_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev)
{
int ret = 0;
extern struct wl_priv *wlcfg_drv_priv;
struct wl_priv *wl = wlcfg_drv_priv;
if (!wl)
return -EINVAL;
WL_TRACE(("Enter\n"));
ret = wl_cfgp2p_set_firm_p2p(wl);
if (unlikely(ret < 0)) {
CFGP2P_ERR(("Set P2P in firmware failed, ret=%d\n", ret));
goto exit;
}
ret = wl_cfgp2p_enable_discovery(wl, wl_to_prmry_ndev(wl), NULL, 0);
if (unlikely(ret < 0)) {
CFGP2P_ERR(("P2P enable discovery failed, ret=%d\n", ret));
goto exit;
}
p2p_on(wl) = true;
CFGP2P_DBG(("P2P interface started\n"));
exit:
return ret;
}
void
wl_cfgp2p_stop_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev)
{
int ret = 0;
int clear_flag = 0;
unsigned long flags = 0;
struct net_device *ndev = NULL;
extern struct wl_priv *wlcfg_drv_priv;
struct wl_priv *wl = wlcfg_drv_priv;
if (!wl || !wdev)
return;
WL_TRACE(("Enter\n"));
ndev = wdev_to_wlc_ndev(wdev, wl);
spin_lock_irqsave(&wl->cfgdrv_lock, flags);
if (wl->scan_request && wl->scan_request->wdev == wdev) {
cfg80211_scan_done(wl->scan_request, true);
wl->scan_request = NULL;
clear_flag = 1;
}
spin_unlock_irqrestore(&wl->cfgdrv_lock, flags);
if (clear_flag)
wl_clr_drv_status(wl, SCANNING, ndev);
ret = wl_cfgp2p_disable_discovery(wl);
if (unlikely(ret < 0)) {
CFGP2P_ERR(("P2P disable discovery failed, ret=%d\n", ret));
goto exit;
}
p2p_on(wl) = false;
CFGP2P_DBG(("P2P interface stopped\n"));
exit:
return;
}
int
wl_cfgp2p_del_p2p_disc_if(struct wireless_dev *wdev)
{
extern struct wl_priv *wlcfg_drv_priv;
struct wl_priv *wl = wlcfg_drv_priv;
if (!wdev)
return -EINVAL;
WL_TRACE(("Enter\n"));
cfg80211_unregister_wdev(wdev);
kfree(wdev);
wl->p2p_wdev = NULL;
CFGP2P_ERR(("P2P interface unregistered\n"));
return 0;
}
#endif /* WL_CFG80211_P2P_DEV_IF */
| gpl-2.0 |
samnazarko/vero2-uboot | board/amlogic/g9tv_n300_v1/firmware/storage.c | 9 | 1545 | /***********************************************
*****Storage config of board, for ACS use.*****
Header file: arch/arm/include/asm/arch-xx/storage.h
***********************************************/
#include <asm/arch/storage.h>
#ifdef CONFIG_ACS
//partition tables
struct partitions partition_table[MAX_PART_NUM]={
{
.name = "logo",
.size = 32*SZ_1M,
.mask_flags = STORE_CODE,
},
{
.name = "recovery",
.size = 32*SZ_1M,
.mask_flags = STORE_CODE,
},
{
.name = "misc",
.size = 32*SZ_1M,
.mask_flags = STORE_CODE,
},
{
.name = "boot",
.size = 32*SZ_1M,
.mask_flags = STORE_CODE,
},
{
.name = "system",
.size = 1024*SZ_1M,
.mask_flags = STORE_CODE,
},
{
.name = "cache",
.size = 256*SZ_1M,
.mask_flags = STORE_CACHE,
},
{
.name = "param",
.size = 128*SZ_1M,
.mask_flags = STORE_CODE,
},
{
.name = "cri_data",
.size = 16*SZ_1M,
.mask_flags = STORE_DATA,
},
{
.name = "data",
.size = NAND_PART_SIZE_FULL,
.mask_flags = STORE_DATA,
},
/*{
.name = "media",
.size = NAND_PART_SIZE_FULL,
},*/
};
struct store_config store_configs ={
.store_device_flag = NAND_BOOT_FLAG,
.nand_configs = {
.enable_slc = 0,
.order_ce = 0,
.reserved[0] = 0,
.reserved[1] = 0,
},
.mmc_configs = {
.type = (PORT_A_CARD_TYPE | (PORT_B_CARD_TYPE << 4) | (PORT_C_CARD_TYPE << 8)),
.port = 0,
.reserved[0] = 0,
.reserved[1] = 0,
},
};
#endif
| gpl-2.0 |
mrimp/N910TUVU1ANIH_kernel | drivers/video/msm/mdss/mdss_samsung_dsi_panel_common.c | 9 | 153998 | /* Copyright (c) 2012, Samsung Electronics. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/gpio.h>
#include <linux/qpnp/pin.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/leds.h>
#include <linux/pwm.h>
#include <linux/err.h>
#include <linux/lcd.h>
#ifdef CONFIG_HAS_EARLYSUSPEND
#include <linux/earlysuspend.h>
#endif
#include "mdss_dsi.h"
#include "mdss_samsung_dsi_panel_common.h"
#include "mdss_fb.h"
#if defined(CONFIG_MDNIE_LITE_TUNING)
#include "mdnie_lite_tuning.h"
#endif
#define DDI_VIDEO_ENHANCE_TUNING
#if defined(DDI_VIDEO_ENHANCE_TUNING)
#include <linux/syscalls.h>
#include <asm/uaccess.h>
#endif
#include <asm/system_info.h>
#define SMART_ACL
#if defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_S6E3HA2_CMD_WQHD_PT_PANEL)
#define SMART_VINT
#endif
#define HBM_RE
#define TEMPERATURE_ELVSS
#define PARTIAL_UPDATE
#define TEST_RESOLUTION //for sysfs of panel resolution
static struct dsi_buf dsi_panel_tx_buf;
static struct dsi_buf dsi_panel_rx_buf;
static struct dsi_cmd hsync_on_seq;
static struct dsi_cmd display_on_cmd;
static struct dsi_cmd display_off_cmd;
static struct dsi_cmd test_key_enable_cmds;
static struct dsi_cmd test_key_disable_cmds;
static struct dsi_cmd nv_mtp_read_cmds;
static struct dsi_cmd nv_enable_cmds;
static struct dsi_cmd nv_disable_cmds;
static struct dsi_cmd manufacture_id_cmds;
static struct dsi_cmd manufacture_date_cmds;
static struct dsi_cmd ddi_id_cmds;
static struct dsi_cmd rddpm_cmds;
static struct dsi_cmd rddsm_cmds;
static struct dsi_cmd mtp_read_sysfs_cmds;
static struct dsi_cmd acl_off_cmd;
static struct cmd_map acl_map_table;
static struct candella_lux_map candela_map_table;
static struct dsi_cmd acl_cmds_list;
static struct dsi_cmd opr_avg_cal_cmd;
static struct dsi_cmd aclcont_cmds_list;
static struct dsi_cmd gamma_cmds_list;
static struct dsi_cmd elvss_cmds_list;
static struct dsi_cmd elvss_cmds_revI_list;
static struct cmd_map aid_map_table;
static struct dsi_cmd aid_cmds_list;
#if defined(HBM_RE)
static struct dsi_cmd nv_mtp_hbm_read_cmds;
static struct dsi_cmd nv_mtp_hbm2_read_cmds;
static struct dsi_cmd hbm_gamma_cmds_list;
static struct dsi_cmd hbm_off_cmd;
static struct dsi_cmd hbm_etc_cmds_list;
#endif
static struct dsi_cmd nv_mtp_elvss_read_cmds;
#if defined(CONFIG_MDNIE_LITE_TUNING)
static struct dsi_cmd nv_mdnie_read_cmds;
#endif
#ifdef DEBUG_LDI_STATUS
static struct dsi_cmd ldi_debug_cmds;
#endif
#if defined(TEMPERATURE_ELVSS)
static struct dsi_cmd elvss_lowtemp_cmds_list;
static struct dsi_cmd elvss_lowtemp2_cmds_list;
#endif
#if defined(SMART_ACL)
static struct dsi_cmd smart_acl_elvss_cmds_list;
static struct cmd_map smart_acl_elvss_map_table;
#endif
#if defined(SMART_VINT)
static struct dsi_cmd smart_vint_cmds_list;
static struct cmd_map smart_vint_map_table;
#endif
#if defined(PARTIAL_UPDATE)
static struct dsi_cmd partialdisp_on_cmd;
static struct dsi_cmd partialdisp_off_cmd;
static int partial_disp_range[2];
#endif
#ifdef CONFIG_FB_MSM_SAMSUNG_AMOLED_LOW_POWER_MODE
static struct dsi_cmd alpm_on_seq;
static struct dsi_cmd alpm_off_seq;
#endif
#if defined(DYNAMIC_FPS_USE_TE_CTRL)
int dynamic_fps_use_te_ctrl;
#endif
#if defined(CONFIG_LCD_HMT)
static struct dsi_cmd hmt_bright_cmds_list;
static struct dsi_cmd hmt_single_scan_enable;
static struct dsi_cmd hmt_disable;
static struct dsi_cmd hmt_reverse_enable;
static struct dsi_cmd hmt_reverse_disable;
static struct dsi_cmd hmt_aid_cmds_list;
static struct cmd_map aid_map_table_reverse_hmt;
static struct candella_lux_map candela_map_table_reverse_hmt;
static int is_first = 1;
static struct dsi_cmd hmt_150cd_read_cmds;
#endif
#if defined(CONFIG_WACOM_LCD_FREQ_COMPENSATE)
static struct dsi_cmd write_ldi_fps_cmds;
#endif
#if defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_CMD_WQHD_PT_PANEL)
#define LDI_ADJ_VDDM_OFFSET
#endif
#if defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_S6E3HA2_CMD_WQHD_PT_PANEL) || defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_S6E3HA2_CMD_WQXGA_PT_PANEL)
#define CONFIG_ESD_FG_RECOVERY
#endif
#ifdef LDI_ADJ_VDDM_OFFSET
static struct dsi_cmd read_vdd_ref_cmds;
static struct dsi_cmd write_vdd_offset_cmds;
static struct dsi_cmd read_vddm_ref_cmds;
static struct dsi_cmd write_vddm_offset_cmds;
#endif
#if defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_CMD_FHD_FA2_PT_PANEL)
static struct dsi_cmd panel_set_te_osc_b;
static struct dsi_cmd panel_set_te_restore;
static struct dsi_cmd panel_set_te;
static struct dsi_cmd panel_set_te_1;
static struct dsi_cmd panel_set_te_2;
static struct dsi_cmd panel_osc_type_read_cmds;
extern int te_set_done;
#endif
static struct mipi_samsung_driver_data msd;
/*List of supported Panels with HW revision detail
* (one structure per project)
* {hw_rev,"label string given in panel dtsi file"}
* */
static struct panel_hrev panel_supp_cdp[]= {
{"samsung amoled 1080p video mode dsi S6E8FA0 panel", PANEL_FHD_OCTA_S6E8FA0},
{"samsung amoled 1080p video mode dsi S6E3FA0 panel", PANEL_FHD_OCTA_S6E3FA0},
{"samsung amoled 1080p command mode dsi S6E3FA0 panel", PANEL_FHD_OCTA_S6E3FA0_CMD},
{"samsung amoled 1080p command mode dsi S6E3FA2 panel",PANEL_FHD_OCTA_S6E3FA2_CMD },
{"samsung amoled wqhd command mode dsi1 S6E3HA0 panel", PANEL_WQHD_OCTA_S6E3HA0_CMD },
{"samsung amoled wqhd command mode dsi0 S6E3HA0 panel", PANEL_WQHD_OCTA_S6E3HA0_CMD },
{"samsung amoled wqhd command mode dsi0 S6E3HA2X01 panel", PANEL_WQHD_OCTA_S6E3HA2X01_CMD },
{"samsung amoled wqhd command mode dsi1 S6E3HA2X01 panel", PANEL_WQHD_OCTA_S6E3HA2X01_CMD },
{"samsung amoled wqxga command mode dsi0 S6E3HA2X01 panel", PANEL_WQXGA_OCTA_S6E3HA2X01_CMD },
{"samsung amoled wqxga command mode dsi1 S6E3HA2X01 panel", PANEL_WQXGA_OCTA_S6E3HA2X01_CMD },
{NULL}
};
static struct dsi_cmd_desc brightness_packet[] = {
{{DTYPE_DCS_LWRITE, 1, 0, 0, 0, 0}, NULL},
{{DTYPE_DCS_LWRITE, 1, 0, 0, 0, 0}, NULL},
{{DTYPE_DCS_LWRITE, 1, 0, 0, 0, 0}, NULL},
{{DTYPE_DCS_LWRITE, 1, 0, 0, 0, 0}, NULL},
{{DTYPE_DCS_LWRITE, 1, 0, 0, 0, 0}, NULL},
{{DTYPE_DCS_LWRITE, 1, 0, 0, 0, 0}, NULL},
{{DTYPE_DCS_LWRITE, 1, 0, 0, 0, 0}, NULL},
{{DTYPE_DCS_LWRITE, 1, 0, 0, 0, 0}, NULL},
{{DTYPE_DCS_LWRITE, 1, 0, 0, 0, 0}, NULL},
{{DTYPE_DCS_LWRITE, 1, 0, 0, 0, 0}, NULL},
{{DTYPE_DCS_LWRITE, 1, 0, 0, 0, 0}, NULL},
{{DTYPE_DCS_LWRITE, 1, 0, 0, 0, 0}, NULL},
{{DTYPE_DCS_LWRITE, 1, 0, 0, 0, 0}, NULL},
{{DTYPE_DCS_LWRITE, 1, 0, 0, 0, 0}, NULL},
{{DTYPE_DCS_LWRITE, 1, 0, 0, 0, 0}, NULL},
};
#ifdef LDI_ADJ_VDDM_OFFSET
unsigned int ldi_vddm_lut[128][2] = {
{0, 12}, {1, 12}, {2, 13}, {3, 14}, {4, 15}, {5, 16}, {6, 17}, {7, 18}, {8, 19}, {9, 20},
{10, 22}, {11, 23}, {12, 24}, {13, 25}, {14, 26}, {15, 27}, {16, 28}, {17, 29}, {18, 30}, {19, 31},
{20, 32}, {21, 33}, {22, 34}, {23, 35}, {24, 37}, {25, 38}, {26, 39}, {27, 40}, {28, 41}, {29, 42},
{30, 43}, {31, 44}, {32, 45}, {33, 46}, {34, 47}, {35, 48}, {36, 49}, {37, 50}, {38, 51}, {39, 53},
{40, 54}, {41, 55}, {42, 56}, {43, 57}, {44, 58}, {45, 59}, {46, 60}, {47, 61}, {48, 62}, {49, 63},
{50, 63}, {51, 63}, {52, 63}, {53, 63}, {54, 63}, {55, 63}, {56, 63}, {57, 63}, {58, 63}, {59, 63},
{60, 63}, {61, 63}, {62, 63}, {63, 63}, {64, 11}, {65, 10}, {66, 9}, {67, 8}, {68, 7}, {69, 6},
{70, 4}, {71, 3}, {72, 2}, {73, 1}, {74, 64}, {75, 65}, {76, 66}, {77, 67}, {78, 68}, {79, 69},
{80, 70}, {81, 71}, {82, 72}, {83, 73}, {84, 74}, {85, 76}, {86, 77}, {87, 78}, {88, 79}, {89, 80},
{90, 81}, {91, 82}, {92, 83}, {93, 84}, {94, 85}, {95, 86}, {96, 87}, {97, 88}, {98, 89}, {99, 91},
{100, 92}, {101, 93}, {102, 94}, {103, 95}, {104, 96}, {105, 97}, {106, 98}, {107, 99}, {108, 100}, {109, 101},
{110, 102}, {111, 103}, {112, 104}, {113, 105}, {114, 107}, {115, 108}, {116, 109}, {117, 110}, {118, 111}, {119, 112},
{120, 113}, {121, 114}, {122, 115}, {123, 116}, {124, 117}, {125, 118}, {126, 119}, {127, 120},
};
#endif
#define MAX_BR_PACKET_SIZE sizeof(brightness_packet)/sizeof(struct dsi_cmd_desc)
DEFINE_LED_TRIGGER(bl_led_trigger);
static struct mdss_dsi_phy_ctrl phy_params;
char board_rev;
static int lcd_attached = 1;
static int lcd_id = 0;
#if defined(CONFIG_ESD_FG_RECOVERY)
#define ESD_DEBUG 1
struct work_struct err_fg_work;
static int err_fg_gpio = 0; /* PM_GPIO5 */
static int esd_count = 0;
int err_fg_working = 0;
static int esd_enable = 0;
#endif
#if defined(CONFIG_LCD_CLASS_DEVICE) && defined(DDI_VIDEO_ENHANCE_TUNING)
#define MAX_FILE_NAME 128
#define TUNING_FILE_PATH "/sdcard/"
static char tuning_file[MAX_FILE_NAME];
#if !defined (CONFIG_MDNIE_LITE_TUNING)
#define MDNIE_TUNE_HEAD_SIZE 22
#define MDNIE_TUNE_BODY_SIZE 128
#endif
static char mdnie_head[MDNIE_TUNE_HEAD_SIZE];
static char mdnie_body[MDNIE_TUNE_BODY_SIZE];
static struct dsi_cmd_desc mdni_tune_cmd[] = {
{{DTYPE_DCS_LWRITE, 1, 0, 0, 0,
sizeof(mdnie_body)}, mdnie_body},
{{DTYPE_DCS_LWRITE, 1, 0, 0, 0,
sizeof(mdnie_head)}, mdnie_head},
};
#endif
static int mipi_samsung_disp_send_cmd(
enum mipi_samsung_cmd_list cmd,
unsigned char lock);
extern void mdss_dsi_panel_touchsensing(int enable);
int get_lcd_attached(void);
int get_lcd_id(void);
int set_panel_rev(unsigned int id)
{
switch (id & 0xFF) {
#if defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_S6E3HA2_CMD_WQHD_PT_PANEL)
case 0x00:
case 0x01:
pr_info("%s : 0x01 EVT0_T_wqhd_REVA\n",__func__);
msd.id3 = EVT0_T_wqhd_REVA;
break;
case 0x02:
pr_info("%s : 0x02 EVT0_T_wqhd_REVB\n",__func__);
msd.id3 = EVT0_T_wqhd_REVB;
break;
case 0x03:
pr_info("%s : 0x03 EVT0_T_wqhd_REVC\n",__func__);
msd.id3 = EVT0_T_wqhd_REVC;
break;
case 0x04:
pr_info("%s : 0x04 EVT0_T_wqhd_REVD\n",__func__);
msd.id3 = EVT0_T_wqhd_REVD;
break;
case 0x05:
pr_info("%s : 0x05 EVT0_T_wqhd_REVF\n",__func__);
msd.id3 = EVT0_T_wqhd_REVF;
break;
case 0x06:
pr_info("%s : 0x06 EVT0_T_wqhd_REVG\n",__func__);
msd.id3 = EVT0_T_wqhd_REVG;
break;
case 0x07:
pr_info("%s : 0x07 EVT0_T_wqhd_REVH\n",__func__);
msd.id3 = EVT0_T_wqhd_REVH;
break;
case 0x08:
pr_info("%s : 0x08 EVT0_T_wqhd_REVI\n",__func__);
msd.id3 = EVT0_T_wqhd_REVI;
break;
case 0x09:
pr_info("%s : 0x08 EVT0_T_wqhd_REVJ\n",__func__);
msd.id3 = EVT0_T_wqhd_REVJ;
break;
#elif defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_S6E3HA2_CMD_WQXGA_PT_PANEL)
case 0x10:
case 0x11:
if(((id & 0xFF00) >> 8 )== YM4_PANEL){
pr_info("%s : 0x10/0x11 EVT0_T_wqxga YM4\n",__func__);
msd.id2 = YM4_PANEL;
msd.id3 = EVT0_T_wqxga_REVA;
break;
}else{
pr_info("%s : 0x10/0x11 EVT0_T_wqxga_REVA\n",__func__);
msd.id3 = EVT0_T_wqxga_REVA;
break;
}
case 0x12:
case 0x13:
pr_info("%s : 0x13 EVT0_T_wqxga_REVD\n",__func__);
msd.id3 = EVT0_T_wqxga_REVD;
break;
case 0x14:
pr_info("%s : 0x14 EVT0_T_wqxga_REVE\n",__func__);
msd.id3 = EVT0_T_wqxga_REVE;
break;
#elif defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_CMD_FHD_FA2_PT_PANEL)
case 0x00:
pr_info("%s : 0x00 EVT0_K_fhd_REVB \n",__func__);
msd.id3 = EVT0_K_fhd_REVB;
break;
case 0x01:
pr_info("%s : 0x01 EVT0_K_fhd_REVF \n",__func__);
msd.id3 = EVT0_K_fhd_REVF;
break;
case 0x02:
pr_info("%s : 0x02 EVT0_K_fhd_REVG \n",__func__);
msd.id3 = EVT0_K_fhd_REVG;
break;
case 0x12:
pr_info("%s : 0x12 EVT1_K_fhd_REVH \n",__func__);
msd.id3 = EVT1_K_fhd_REVH;
break;
case 0x13:
pr_info("%s : 0x13 EVT1_K_fhd_REVI \n",__func__);
msd.id3 = EVT1_K_fhd_REVI;
break;
#else
case 0x00:
pr_info("%s : 0x01 EVT0_L_wqhd_REVC \n",__func__);
msd.id3 = EVT0_K_wqhd_REVB;
break;
case 0x01:
pr_info("%s : 0x01 EVT0_L_wqhd_REVC \n",__func__);
msd.id3 = EVT0_K_wqhd_REVC;
break;
case 0x02:
pr_info("%s : 0x02 EVT0_L_wqhd_REVD \n",__func__);
msd.id3 = EVT0_K_wqhd_REVD;
break;
case 0x03:
pr_info("%s : 0x03 EVT0_L_wqhd_REVE \n",__func__);
msd.id3 = EVT0_K_wqhd_REVE;
break;
case 0x04:
pr_info("%s : 0x04 EVT0_L_wqhd_REVF \n",__func__);
msd.id3 = EVT0_K_wqhd_REVF;
break;
case 0x05:
pr_info("%s : 0x05 EVT0_L_wqhd_REVG \n",__func__);
msd.id3 = EVT0_K_wqhd_REVG;
break;
case 0x17:
pr_info("%s : 0x17 EVT0_L_wqhd_REVI \n",__func__);
msd.id3 = EVT0_K_wqhd_REVI;
break;
case 0x18:
pr_info("%s : 0x18 EVT0_L_wqhd_REVJ \n",__func__);
msd.id3 = EVT0_K_wqhd_REVJ;
break;
case 0x19:
pr_info("%s : 0x18 EVT0_L_wqhd_REVK \n",__func__);
msd.id3 = EVT0_K_wqhd_REVK;
break;
case 0x1A:
pr_info("%s : 0x1A EVT0_L_wqhd_REVL \n",__func__);
msd.id3 = EVT0_K_wqhd_REVL;
break;
#endif
default:
pr_err("%s : can't find panel id.. \n", __func__);
return -EINVAL;
break;
}
return 1;
}
void mdss_dsi_panel_pwm_cfg(struct mdss_dsi_ctrl_pdata *ctrl)
{
int ret;
if (!gpio_is_valid(ctrl->pwm_pmic_gpio)) {
pr_err("%s: pwm_pmic_gpio=%d Invalid\n", __func__,
ctrl->pwm_pmic_gpio);
return;
}
ret = gpio_request(ctrl->pwm_pmic_gpio, "disp_pwm");
if (ret) {
pr_err("%s: pwm_pmic_gpio=%d request failed\n", __func__,
ctrl->pwm_pmic_gpio);
return;
}
ctrl->pwm_bl = pwm_request(ctrl->pwm_lpg_chan, "lcd-bklt");
if (ctrl->pwm_bl == NULL || IS_ERR(ctrl->pwm_bl)) {
pr_err("%s: lpg_chan=%d pwm request failed", __func__,
ctrl->pwm_lpg_chan);
gpio_free(ctrl->pwm_pmic_gpio);
ctrl->pwm_pmic_gpio = -1;
}
}
static void mdss_dsi_panel_bklt_pwm(struct mdss_dsi_ctrl_pdata *ctrl, int level)
{
int ret;
u32 duty;
if (ctrl->pwm_bl == NULL) {
pr_err("%s: no PWM\n", __func__);
return;
}
duty = level * ctrl->pwm_period;
duty /= ctrl->bklt_max;
pr_debug("%s: bklt_ctrl=%d pwm_period=%d pwm_pmic_gpio=%d pwm_lpg_chan=%d\n",
__func__, ctrl->bklt_ctrl, ctrl->pwm_period,
ctrl->pwm_pmic_gpio, ctrl->pwm_lpg_chan);
pr_debug("%s: ndx=%d level=%d duty=%d\n", __func__,
ctrl->ndx, level, duty);
ret = pwm_config(ctrl->pwm_bl, duty, ctrl->pwm_period);
if (ret) {
pr_err("%s: pwm_config() failed err=%d.\n", __func__, ret);
return;
}
ret = pwm_enable(ctrl->pwm_bl);
if (ret)
pr_err("%s: pwm_enable() failed err=%d\n", __func__, ret);
}
static char dcs_cmd[2] = {0x54, 0x00}; /* DTYPE_DCS_READ */
static struct dsi_cmd_desc dcs_read_cmd = {
{DTYPE_DCS_READ, 1, 0, 1, 5, sizeof(dcs_cmd)},
dcs_cmd
};
static void dcs_read_cb(int data)
{
pr_info("%s: bklt_ctrl=%x\n", __func__, data);
}
u32 mdss_dsi_dcs_read(struct mdss_dsi_ctrl_pdata *ctrl,
char cmd0, char cmd1)
{
struct dcs_cmd_req cmdreq;
dcs_cmd[0] = cmd0;
dcs_cmd[1] = cmd1;
memset(&cmdreq, 0, sizeof(cmdreq));
cmdreq.cmds = &dcs_read_cmd;
cmdreq.cmds_cnt = 1;
cmdreq.flags = CMD_REQ_RX | CMD_REQ_COMMIT;
cmdreq.rlen = 1;
cmdreq.cb = dcs_read_cb; /* call back */
mdss_dsi_cmdlist_put(ctrl, &cmdreq);
/*
* blocked here, untill call back called
*/
return 0;
}
void mdss_dsi_samsung_panel_reset(struct mdss_panel_data *pdata, int enable)
{
struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
static struct mdss_panel_alpm_data *alpm_data = NULL;
if (pdata == NULL) {
pr_err("%s: Invalid input data\n", __func__);
return;
}
if (unlikely(!alpm_data))
alpm_data = &pdata->alpm_data;
if (alpm_data->alpm_status) {
pr_info("[ALPM_DEBUG] %s: Panel is not reset, enable : %d\n",
__func__, enable);
if (enable && alpm_data->alpm_status(CHECK_PREVIOUS_STATUS))
return;
else if (!enable && alpm_data->alpm_status(CHECK_CURRENT_STATUS)) {
alpm_data->alpm_status(STORE_CURRENT_STATUS);
return;
}
} else
pr_info("[ALPM_DEBUG] %s: Panel reset, enable : %d",
__func__, enable);
ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
panel_data);
pr_info("%s: enable(%d) ndx(%d)\n",
__func__,enable, ctrl_pdata->ndx );
if (!gpio_is_valid(ctrl_pdata->rst_gpio)) {
pr_err("%s:%d, reset line not configured\n",
__func__, __LINE__);
return;
}
if (enable) {
gpio_set_value((ctrl_pdata->rst_gpio), 1);
usleep_range(5000, 5000);
pr_info("ctrl_pdata->rst_gpio = %d\n", gpio_get_value(ctrl_pdata->rst_gpio));
wmb();
gpio_set_value((ctrl_pdata->rst_gpio), 0);
usleep_range(5000, 5000);
pr_info("ctrl_pdata->rst_gpio = %d\n", gpio_get_value(ctrl_pdata->rst_gpio));
wmb();
gpio_set_value((ctrl_pdata->rst_gpio), 1);
usleep_range(11000, 11000);
pr_info("ctrl_pdata->rst_gpio = %d\n", gpio_get_value(ctrl_pdata->rst_gpio));
wmb();
} else {
gpio_set_value((ctrl_pdata->rst_gpio), 0);
}
}
/**
* mdss_dsi_roi_merge() - merge two roi into single roi
*
* Function used by partial update with only one dsi intf take 2A/2B
* (column/page) dcs commands.
*/
static int mdss_dsi_roi_merge(struct mdss_dsi_ctrl_pdata *ctrl,
struct mdss_rect *roi)
{
struct mdss_panel_info *l_pinfo;
struct mdss_rect *l_roi;
struct mdss_rect *r_roi;
struct mdss_dsi_ctrl_pdata *other = NULL;
int ans = 0;
if (ctrl->ndx == DSI_CTRL_LEFT) {
other = mdss_dsi_get_ctrl_by_index(DSI_CTRL_RIGHT);
if (!other)
return ans;
l_pinfo = &(ctrl->panel_data.panel_info);
l_roi = &(ctrl->panel_data.panel_info.roi);
r_roi = &(other->panel_data.panel_info.roi);
} else {
other = mdss_dsi_get_ctrl_by_index(DSI_CTRL_LEFT);
if (!other)
return ans;
l_pinfo = &(other->panel_data.panel_info);
l_roi = &(other->panel_data.panel_info.roi);
r_roi = &(ctrl->panel_data.panel_info.roi);
}
if (l_roi->w == 0 && l_roi->h == 0) {
/* right only */
*roi = *r_roi;
roi->x += l_pinfo->xres;/* add left full width to x-offset */
} else {
/* left only and left+righ */
*roi = *l_roi;
roi->w += r_roi->w; /* add right width */
ans = 1;
}
return ans;
}
static char caset[] = {0x2a, 0x00, 0x00, 0x03, 0x00}; /* DTYPE_DCS_LWRITE */
static char paset[] = {0x2b, 0x00, 0x00, 0x05, 0x00}; /* DTYPE_DCS_LWRITE */
/* pack into one frame before sent */
static struct dsi_cmd_desc set_col_page_addr_cmd[] = {
{{DTYPE_DCS_LWRITE, 0, 0, 0, 1, sizeof(caset)}, caset}, /* packed */
{{DTYPE_DCS_LWRITE, 1, 0, 0, 1, sizeof(paset)}, paset},
};
static int mdss_dsi_set_col_page_addr(struct mdss_panel_data *pdata)
{
struct mdss_panel_info *pinfo;
struct mdss_rect roi;
struct mdss_rect *p_roi;
struct mdss_rect *c_roi;
struct mdss_dsi_ctrl_pdata *ctrl = NULL;
struct mdss_dsi_ctrl_pdata *other = NULL;
struct dcs_cmd_req cmdreq;
int left_or_both = 0;
if (pdata == NULL) {
pr_err("%s: Invalid input data\n", __func__);
return -EINVAL;
}
ctrl = container_of(pdata, struct mdss_dsi_ctrl_pdata,
panel_data);
pinfo = &pdata->panel_info;
p_roi = &pinfo->roi;
/*
* to avoid keep sending same col_page info to panel,
* if roi_merge enabled, the roi of left ctrl is used
* to compare against new merged roi and saved new
* merged roi to it after comparing.
* if roi_merge disabled, then the calling ctrl's roi
* and pinfo's roi are used to compare.
*/
if (pinfo->partial_update_roi_merge) {
left_or_both = mdss_dsi_roi_merge(ctrl, &roi);
other = mdss_dsi_get_ctrl_by_index(DSI_CTRL_LEFT);
c_roi = &other->roi;
} else {
c_roi = &ctrl->roi;
roi = *p_roi;
}
/* roi had changed, do col_page update */
if ( !mdss_rect_cmp(c_roi, &roi)) {
pr_debug("%s: ndx=%d x=%d y=%d w=%d h=%d\n",
__func__, ctrl->ndx, p_roi->x,
p_roi->y, p_roi->w, p_roi->h);
*c_roi = roi; /* keep to ctrl */
if (c_roi->w == 0 || c_roi->h == 0) {
/* no new frame update */
pr_err("%s: ctrl=%d, no partial roi set\n",
__func__, ctrl->ndx);
if (!mdss_dsi_broadcast_mode_enabled())
return 0;
}
pr_debug("%s: ndx=%d x=%d y=%d w=%d h=%d\n",
__func__, ctrl->ndx, roi.x,
roi.y, roi.w, roi.h);
if (pinfo->partial_update_dcs_cmd_by_left) {
if (left_or_both && ctrl->ndx == DSI_CTRL_RIGHT) {
/* 2A/2B sent by left already */
pr_err("%s: ctrl=%d, sned-by-left\n",
__func__, ctrl->ndx);
return 0;
}
}
caset[1] = (((roi.x) & 0xFF00) >> 8);
caset[2] = (((roi.x) & 0xFF));
caset[3] = (((roi.x - 1 + roi.w) & 0xFF00) >> 8);
caset[4] = (((roi.x - 1 + roi.w) & 0xFF));
pr_debug("%s:{0x%0x,0x%0x,0x%0x,0x%0x,0x%0x}\n",__func__,
caset[0],caset[1],caset[2],caset[3],caset[4]);
set_col_page_addr_cmd[0].payload = caset;
paset[1] = (((roi.y) & 0xFF00) >> 8);
paset[2] = (((roi.y) & 0xFF));
paset[3] = (((roi.y - 1 + roi.h) & 0xFF00) >> 8);
paset[4] = (((roi.y - 1 + roi.h) & 0xFF));
pr_debug("%s:{0x%0x,0x%0x,0x%0x,0x%0x,0x%0x}\n",__func__,
paset[0],paset[1],paset[2],paset[3],paset[4]);
set_col_page_addr_cmd[1].payload = paset;
memset(&cmdreq, 0, sizeof(cmdreq));
cmdreq.cmds = set_col_page_addr_cmd;
cmdreq.cmds_cnt = 2;
cmdreq.flags = CMD_REQ_COMMIT | CMD_CLK_CTRL;
cmdreq.rlen = 0;
cmdreq.cb = NULL;
if (pinfo->partial_update_dcs_cmd_by_left)
ctrl = mdss_dsi_get_ctrl_by_index(DSI_CTRL_LEFT);
else if(mdss_dsi_broadcast_mode_enabled())
ctrl = mdss_dsi_get_ctrl_by_index(DSI_CTRL_RIGHT);
mdss_dsi_cmdlist_put(ctrl, &cmdreq);
}
return 0;
}
static int get_candela_value(int bl_level)
{
return candela_map_table.lux_tab[candela_map_table.bkl[bl_level]];
}
static int get_cmd_idx(int bl_level)
{
return candela_map_table.cmd_idx[candela_map_table.bkl[bl_level]];
}
static struct dsi_cmd get_testKey_set(int enable)
{
struct dsi_cmd testKey = {0,};
if (enable)
testKey.cmd_desc = &(test_key_enable_cmds.cmd_desc[0]);
else
testKey.cmd_desc = &(test_key_disable_cmds.cmd_desc[0]);
testKey.num_of_cmds = 1;
return testKey;
}
#if defined(HBM_RE)
static struct dsi_cmd get_hbm_off_set(void)
{
struct dsi_cmd hbm_off = {0,};
hbm_off.cmd_desc = &(hbm_off_cmd.cmd_desc[0]);
hbm_off.num_of_cmds = 1;
return hbm_off;
}
#endif
static struct dsi_cmd get_aid_aor_control_set(int cd_idx)
{
struct dsi_cmd aid_control = {0,};
int cmd_idx = 0, payload_size = 0;
char *p_payload, *c_payload;
int p_idx = msd.dstat.curr_aid_idx;
if (!aid_map_table.size || !(cd_idx < aid_map_table.size))
goto end;
/* Get index in the aid command list*/
cmd_idx = aid_map_table.cmd_idx[cd_idx];
c_payload = aid_cmds_list.cmd_desc[cmd_idx].payload;
/* Check if current & previous commands are same */
if (p_idx >= 0) {
p_payload = aid_cmds_list.cmd_desc[p_idx].payload;
payload_size = aid_cmds_list.cmd_desc[p_idx].dchdr.dlen;
if (!memcmp(p_payload, c_payload, payload_size))
goto end;
}
/* Get the command desc */
aid_control.cmd_desc = &(aid_cmds_list.cmd_desc[cmd_idx]);
aid_control.num_of_cmds = 1;
msd.dstat.curr_aid_idx = cmd_idx;
end:
return aid_control;
}
/*
This function takes acl_map_table and uses cd_idx,
to get the index of the command in elvss command list.
*/
static struct dsi_cmd get_acl_control_on_set(void)
{
struct dsi_cmd aclcont_control = {0,};
int acl_cond = msd.dstat.curr_acl_cond;
if (acl_cond) /* already acl condition setted */
goto end;
/* Get the command desc */
aclcont_control.cmd_desc = aclcont_cmds_list.cmd_desc;
aclcont_control.num_of_cmds = aclcont_cmds_list.num_of_cmds;
msd.dstat.curr_acl_cond = 1;
pr_info("%s #(%d)\n",
__func__, aclcont_cmds_list.num_of_cmds);
end:
return aclcont_control;
}
/*
This function takes acl_map_table and uses cd_idx,
to get the index of the command in elvss command list.
*/
static struct dsi_cmd get_acl_control_set(int cd_idx)
{
struct dsi_cmd acl_control = {0,};
int cmd_idx = 0, payload_size = 0;
char *p_payload, *c_payload;
int p_idx = msd.dstat.curr_acl_idx;
if (!acl_map_table.size || !(cd_idx < acl_map_table.size))
goto end;
/* Get index in the acl command list*/
cmd_idx = acl_map_table.cmd_idx[cd_idx];
c_payload = acl_cmds_list.cmd_desc[cmd_idx].payload;
/* Check if current & previous commands are same */
if (p_idx >= 0) {
p_payload = acl_cmds_list.cmd_desc[p_idx].payload;
payload_size = acl_cmds_list.cmd_desc[p_idx].dchdr.dlen;
if (!memcmp(p_payload, c_payload, payload_size))
goto end;
}
/* Get the command desc */
acl_control.cmd_desc = &(acl_cmds_list.cmd_desc[cmd_idx]);
acl_control.num_of_cmds = 1;
msd.dstat.curr_acl_idx = cmd_idx;
end:
return acl_control;
}
static struct dsi_cmd get_acl_control_off_set(void)
{
struct dsi_cmd acl_control = {0,};
int p_idx = msd.dstat.curr_acl_idx;
/* Check if current & previous commands are same */
if (p_idx == 0) {
/* already acl off */
goto end;
}
/* Get the command desc */
acl_control.cmd_desc = acl_off_cmd.cmd_desc; /* idx 0 : ACL OFF */
acl_control.num_of_cmds = acl_off_cmd.num_of_cmds;
pr_info("%s #(%d)\n",
__func__, acl_off_cmd.num_of_cmds);
msd.dstat.curr_acl_idx = 0;
msd.dstat.curr_acl_cond = 0;
end:
return acl_control;
}
#if 0 /*not used*/
static struct dsi_cmd get_orp_avg_cal_set(void)
{
struct dsi_cmd opr_set = {0,};
char *p_payload, *c_payload;
int idx, payload_size = 0;
int p_idx = msd.dstat.curr_opr_idx;
if (!opr_avg_cal_cmd.cmds_len)
goto end;
if (msd.dstat.acl_on)
idx = 0;
else
idx = 1;
c_payload = opr_avg_cal_cmd.cmd_desc[idx].payload;
if (p_idx >=0) {
p_payload = opr_avg_cal_cmd.cmd_desc[idx].payload;
payload_size = opr_avg_cal_cmd.cmd_desc[idx].dchdr.dlen;
if (!memcmp(p_payload, c_payload, payload_size))
goto end;
}
/* Get the command desc */
opr_set.cmd_desc = &(opr_avg_cal_cmd.cmd_desc[idx]);
opr_set.num_of_cmds = 1;
end:
return opr_set;
}
#endif
#if defined(TEMPERATURE_ELVSS)
// ELVSS TEMPERATURE COMPENSATION for S6E3FA0
static struct dsi_cmd get_elvss_tempcompen_control_set(void)
{
struct dsi_cmd elvss_tempcompen_control = {0,};
pr_debug("%s for ELVSS CONTROL acl(%d), temp(%d)\n",
__func__, msd.dstat.acl_on, msd.dstat.temperature);
/* Get the command desc */
if (msd.dstat.temperature > 0) {
pr_debug("%s temp > 0 \n",__func__);
elvss_lowtemp_cmds_list.cmd_desc[1].payload[1] = 0x19; // B8
} else if (msd.dstat.temperature > -20) {
pr_debug("%s 0 >= temp > -20 \n",__func__);
elvss_lowtemp_cmds_list.cmd_desc[1].payload[1] = 0x00; // B8
} else {
pr_debug("%s temp <= -20 \n",__func__);
elvss_lowtemp_cmds_list.cmd_desc[1].payload[1] = 0x94; // B8
}
pr_info("%s for ELVSS CONTROL acl(%d), temp(%d) B8(0x%x) \n",
__func__, msd.dstat.acl_on, msd.dstat.temperature,
elvss_lowtemp_cmds_list.cmd_desc[1].payload[1]);
elvss_tempcompen_control.cmd_desc = elvss_lowtemp_cmds_list.cmd_desc;
elvss_tempcompen_control.num_of_cmds = elvss_lowtemp_cmds_list.num_of_cmds;
return elvss_tempcompen_control;
}
static struct dsi_cmd get_elvss_tempcompen_control_set2(void)
{
struct dsi_cmd elvss_tempcompen_control2 = {0,};
pr_debug("%s for ELVSS CONTROL acl2(%d), temp(%d)\n",
__func__, msd.dstat.acl_on, msd.dstat.temperature);
/* Get the command desc */
if (msd.dstat.temperature > -20) /*b6 21th para*/
elvss_lowtemp2_cmds_list.cmd_desc[1].payload[1] = msd.dstat.elvss_value;
else {
/*temp <= -20 : b6 21th para-0x05*/
if(msd.panel == PANEL_FHD_OCTA_S6E3FA2_CMD)
elvss_lowtemp2_cmds_list.cmd_desc[1].payload[1] = (msd.dstat.elvss_value - 0x05);
else
elvss_lowtemp2_cmds_list.cmd_desc[1].payload[1] = (msd.dstat.elvss_value - 0x03);
}
pr_info("%s for ELVSS CONTROL acl(%d), temp(%d) B0(0x%x) B6(0x%x)\n",
__func__, msd.dstat.acl_on, msd.dstat.temperature,
elvss_lowtemp2_cmds_list.cmd_desc[0].payload[1],
elvss_lowtemp2_cmds_list.cmd_desc[1].payload[1]);
elvss_tempcompen_control2.cmd_desc = elvss_lowtemp2_cmds_list.cmd_desc;
elvss_tempcompen_control2.num_of_cmds = elvss_lowtemp2_cmds_list.num_of_cmds;
return elvss_tempcompen_control2;
}
#endif
/*
This function takes acl_map_table and uses cd_idx,
to get the index of the command in elvss command list.
*/
#ifdef SMART_ACL
static struct dsi_cmd get_elvss_control_set(int cd_idx)
{
struct dsi_cmd elvss_control = {0,};
int cmd_idx = 0;
char *payload;
pr_debug("%s for SMART_ACL acl(%d), temp(%d)\n",
__func__, msd.dstat.acl_on, msd.dstat.temperature);
if (!smart_acl_elvss_map_table.size || !(cd_idx < smart_acl_elvss_map_table.size) ||
!smart_acl_elvss_map_table.size ||
!(cd_idx < smart_acl_elvss_map_table.size)) {
pr_err("%s failed mapping elvss table\n",__func__);
goto end;
}
cmd_idx = smart_acl_elvss_map_table.cmd_idx[cd_idx];
/* Get the command desc */
if(msd.dstat.acl_on || msd.dstat.siop_status) {
if (msd.panel == PANEL_WQHD_OCTA_S6E3HA0_CMD ||\
msd.panel == PANEL_FHD_OCTA_S6E3FA2_CMD){
if (msd.dstat.temperature > 0)
smart_acl_elvss_cmds_list.cmd_desc[cmd_idx].payload[1] = 0x88;
else
smart_acl_elvss_cmds_list.cmd_desc[cmd_idx].payload[1] = 0x8C;
}else{
smart_acl_elvss_cmds_list.cmd_desc[cmd_idx].payload[1] = 0x8C;
}
payload = smart_acl_elvss_cmds_list.cmd_desc[cmd_idx].payload;
elvss_control.cmd_desc = &(smart_acl_elvss_cmds_list.cmd_desc[cmd_idx]);
pr_debug("ELVSS for SMART_ACL cd_idx=%d, cmd_idx=%d\n", cd_idx, cmd_idx);
} else {
if (msd.panel == PANEL_WQHD_OCTA_S6E3HA0_CMD ||\
msd.panel == PANEL_FHD_OCTA_S6E3FA2_CMD){
if (msd.dstat.temperature > 0)
elvss_cmds_list.cmd_desc[cmd_idx].payload[1] = 0x98;
else
elvss_cmds_list.cmd_desc[cmd_idx].payload[1] = 0x9C;
}else{
elvss_cmds_list.cmd_desc[cmd_idx].payload[1] = 0x9C;
if (cd_idx < 13) /*2~19nit*/
elvss_cmds_list.cmd_desc[cmd_idx].payload[1] = 0x8C; /*CAPS OFF*/
}
payload = elvss_cmds_list.cmd_desc[cmd_idx].payload;
elvss_control.cmd_desc = &(elvss_cmds_list.cmd_desc[cmd_idx]);
pr_debug("ELVSS for normal cd_idx=%d, cmd_idx=%d\n", cd_idx, cmd_idx);
}
elvss_control.num_of_cmds = 1;
msd.dstat.curr_elvss_idx = cmd_idx;
end:
return elvss_control;
}
#else
static struct dsi_cmd get_elvss_control_set(int cd_idx)
{
struct dsi_cmd elvss_control = {0,};
int cmd_idx = 0, payload_size = 0;
char *p_payload, *c_payload;
int p_idx = msd.dstat.curr_elvss_idx;
if (!smart_acl_elvss_map_table.size || !(cd_idx < smart_acl_elvss_map_table.size))
{
pr_err("%s invalid elvss mapping \n",__func__);
goto end;
}
/* Get index in the acl command list*/
cmd_idx = smart_acl_elvss_map_table.cmd_idx[cd_idx];
c_payload = elvss_cmds_list.cmd_desc[cmd_idx].payload;
/* Check if current & previous commands are same */
if (p_idx >= 0) {
p_payload = elvss_cmds_list.cmd_desc[p_idx].payload;
payload_size = elvss_cmds_list.cmd_desc[p_idx].dchdr.dlen;
if (msd.dstat.curr_elvss_idx == cmd_idx ||
!memcmp(p_payload, c_payload, payload_size))
goto end;
}
elvss_control.cmd_desc = &(elvss_cmds_list.cmd_desc[cmd_idx]);
elvss_control.num_of_cmds = 1;
msd.dstat.curr_elvss_idx = cmd_idx;
end:
return elvss_control;
}
#endif
#ifdef SMART_VINT
static struct dsi_cmd get_vint_control_set(int cd_idx)
{
struct dsi_cmd vint_control = {0,};
int cmd_idx = 0, payload_size = 0;
char *p_payload, *c_payload;
int p_idx = msd.dstat.curr_vint_idx;
if (!smart_vint_map_table.size || !(cd_idx < smart_vint_map_table.size))
{
pr_err("%s invalid vint mapping \n",__func__);
goto end;
}
/* Get index in the acl command list*/
cmd_idx = smart_vint_map_table.cmd_idx[cd_idx];
c_payload = smart_vint_cmds_list.cmd_desc[cmd_idx].payload;
/* Check if current & previous commands are same */
if (p_idx >= 0) {
p_payload = smart_vint_cmds_list.cmd_desc[p_idx].payload;
payload_size = smart_vint_cmds_list.cmd_desc[p_idx].dchdr.dlen;
if (msd.dstat.curr_vint_idx == cmd_idx ||
!memcmp(p_payload, c_payload, payload_size))
goto end;
}
vint_control.cmd_desc = &(smart_vint_cmds_list.cmd_desc[cmd_idx]);
vint_control.num_of_cmds = 1;
msd.dstat.curr_vint_idx = cmd_idx;
end:
return vint_control;
}
#endif
static struct dsi_cmd get_gamma_control_set(int candella)
{
struct dsi_cmd gamma_control = {0,};
/* Just a safety check to ensure smart dimming data is initialised well */
BUG_ON(msd.sdimconf->generate_gamma == NULL);
msd.sdimconf->generate_gamma(candella, &gamma_cmds_list.cmd_desc[0].payload[1]);
gamma_control.cmd_desc = &(gamma_cmds_list.cmd_desc[0]);
gamma_control.num_of_cmds = gamma_cmds_list.num_of_cmds;
return gamma_control;
}
static int update_bright_packet(int cmd_count, struct dsi_cmd *cmd_set)
{
int i = 0;
if (cmd_count > (MAX_BR_PACKET_SIZE - 1))/*cmd_count is index, if cmd_count >13 then panic*/
panic("over max brightness_packet size(%d).. !!", MAX_BR_PACKET_SIZE);
for (i = 0; i < cmd_set->num_of_cmds; i++) {
brightness_packet[cmd_count].payload = \
cmd_set->cmd_desc[i].payload;
brightness_packet[cmd_count].dchdr.dlen = \
cmd_set->cmd_desc[i].dchdr.dlen;
brightness_packet[cmd_count].dchdr.dtype = \
cmd_set->cmd_desc[i].dchdr.dtype;
brightness_packet[cmd_count].dchdr.wait = \
cmd_set->cmd_desc[i].dchdr.wait;
cmd_count++;
}
return cmd_count;
}
#if defined(HBM_RE)
static struct dsi_cmd get_hbm_etc_control_set(void)
{
struct dsi_cmd etc_hbm_control = {0,};
/* Get the command desc */
etc_hbm_control.cmd_desc = &(hbm_etc_cmds_list.cmd_desc[0]);
etc_hbm_control.num_of_cmds = hbm_etc_cmds_list.num_of_cmds;
return etc_hbm_control;
}
static struct dsi_cmd get_hbm_gamma_control_set(void)
{
struct dsi_cmd gamma_hbm_control = {0,};
/* Get the command desc */
gamma_hbm_control.cmd_desc = &(hbm_gamma_cmds_list.cmd_desc[0]);
gamma_hbm_control.num_of_cmds = hbm_gamma_cmds_list.num_of_cmds;
return gamma_hbm_control;
}
static int make_brightcontrol_hbm_set(void)
{
struct dsi_cmd hbm_etc_control = {0,};
struct dsi_cmd gamma_control = {0,};
struct dsi_cmd testKey = {0, 0, 0, 0, 0};
int cmd_count = 0;
if (msd.dstat.hbm_mode) {
pr_err("%s : already hbm mode! return .. \n", __func__);
return 0;
}
/* level2 enable */
testKey = get_testKey_set(1);
cmd_count = update_bright_packet(cmd_count, &testKey);
/*gamma*/
if (msd.panel == PANEL_WQHD_OCTA_S6E3HA0_CMD ||\
msd.panel == PANEL_FHD_OCTA_S6E3FA2_CMD){
gamma_control = get_hbm_gamma_control_set();
cmd_count = update_bright_packet(cmd_count, &gamma_control);
}
hbm_etc_control = get_hbm_etc_control_set();
cmd_count = update_bright_packet(cmd_count, &hbm_etc_control);
/* level2 disable */
testKey = get_testKey_set(0);
cmd_count = update_bright_packet(cmd_count, &testKey);
/* for non hbm mode : reset */
msd.dstat.curr_elvss_idx = -1;
#if defined(SMART_VINT)
msd.dstat.curr_vint_idx = -1;
#endif
msd.dstat.curr_acl_idx = -1;
msd.dstat.curr_opr_idx = -1;
msd.dstat.curr_aid_idx = -1;
msd.dstat.curr_acl_cond = 0;
LCD_DEBUG("HBM : %d\n", cmd_count);
return cmd_count;
}
#endif
#if defined(CONFIG_LCD_HMT)
static struct smartdim_conf_hmt* get_smartdim_conf_hmt(void) {
struct smartdim_conf_hmt *conf_hmt = NULL;
conf_hmt = msd.sdimconf_reverse_hmt_single;
return conf_hmt;
}
static struct candella_lux_map* get_candella_lux_map_hmt(void) {
struct candella_lux_map *candela_map_table = NULL;
candela_map_table = &candela_map_table_reverse_hmt;
return candela_map_table;
}
static int get_candela_value_hmt(int bl_level)
{
struct candella_lux_map *candela_map_table = get_candella_lux_map_hmt();
return candela_map_table->lux_tab[candela_map_table->bkl[bl_level]];
}
static int get_cmd_idx_hmt(int bl_level)
{
struct candella_lux_map *candela_map_table = get_candella_lux_map_hmt();
return candela_map_table->cmd_idx[candela_map_table->bkl[bl_level]];
}
static void get_aor_set_hmt(int cd_idx)
{
hmt_bright_cmds_list.cmd_desc[2] = hmt_aid_cmds_list.cmd_desc[cd_idx];
return;
}
static void make_brightcontrol_set_hmt_tlte(int bl_level)
{
int cd_idx = 0, cd_level =0;
struct smartdim_conf_hmt *conf_hmt;
if ( bl_level < 0 || bl_level > 255 ) {
pr_err("[HMT] bl_level(%d) is out of range! set to 150cd \n",bl_level);
cd_level = 150;
cd_idx = 28;
} else {
cd_idx = get_cmd_idx_hmt(bl_level);
cd_level = get_candela_value_hmt(bl_level);
}
LCD_DEBUG("[HMT] bright_level: %d, candela_idx: %d( %d cd )\n", bl_level, cd_idx, cd_level);
/* CA */
conf_hmt = get_smartdim_conf_hmt();
BUG_ON(conf_hmt->generate_gamma == NULL);
conf_hmt->generate_gamma(cd_level, &hmt_bright_cmds_list.cmd_desc[1].payload[1]);
/* B2 */
get_aor_set_hmt(cd_idx);
/* B6 */
if (msd.dstat.acl_on||msd.dstat.siop_status)
hmt_bright_cmds_list.cmd_desc[3].payload[1] = 0x8C;
else
hmt_bright_cmds_list.cmd_desc[3].payload[1] = 0x9C;
if ( bl_level < 0 || bl_level > 255 ) {
hmt_bright_cmds_list.cmd_desc[3].payload[2] = 0x00;
}
return;
}
static void make_brightcontrol_set_hmt_lentis(int bl_level)
{
int cd_idx = 0, cd_level =0;
struct smartdim_conf_hmt *conf_hmt;
cd_idx = get_cmd_idx_hmt(bl_level);
cd_level = get_candela_value_hmt(bl_level);
/* CA */
conf_hmt = get_smartdim_conf_hmt();
BUG_ON(conf_hmt->generate_gamma == NULL);
conf_hmt->generate_gamma(cd_level, &hmt_bright_cmds_list.cmd_desc[1].payload[1]);
LCD_DEBUG("[HMT] bright_level: %d, candela_idx: %d( %d cd ), ", bl_level, cd_idx, cd_level);
return;
}
#endif
static int make_brightcontrol_set(int bl_level)
{
struct dsi_cmd aid_control = {0,};
struct dsi_cmd acl_control = {0,};
struct dsi_cmd acl_on_cont = {0,};
struct dsi_cmd acl_off_cont = {0,};
struct dsi_cmd elvss_control = {0,};
struct dsi_cmd gamma_control = {0,};
struct dsi_cmd testKey = {0,};
#if defined(TEMPERATURE_ELVSS)
struct dsi_cmd temperature_elvss_control = {0,};
struct dsi_cmd temperature_elvss_control2 = {0,};
#endif
#if defined(SMART_VINT)
struct dsi_cmd vint_control = {0,};
#endif
#if defined(HBM_RE)
struct dsi_cmd hbm_off_control = {0,};
#endif
#if defined(CONFIG_LCD_HMT)
int i;
int hmt_cd_level;
#endif
int cmd_count = 0, cd_idx = 0, cd_level =0;
cd_idx = get_cmd_idx(bl_level);
cd_level = get_candela_value(bl_level);
#if defined(CONFIG_LCD_HMT)
if (msd.hmt_stat.hmt_on && !msd.hmt_stat.hmt_low_persistence) {
pr_info("[HMT] [LOW PERSISTENCE OFF] - use normal aid for brightness\n");
hmt_cd_level = get_candela_value_hmt(msd.hmt_stat.hmt_bl_level);
for(i=0; i<candela_map_table.lux_tab_size; i++) {
if (hmt_cd_level <= candela_map_table.lux_tab[i]) {
pr_debug("%s: hmt_cd_level (%d), normal_cd_level (%d) idx(%d)\n",
__func__, hmt_cd_level, candela_map_table.lux_tab[i], i);
break;
}
}
cd_idx = i;
cd_level = candela_map_table.lux_tab[i];
pr_info("[HMT] [LOW PERSISTENCE OFF] hmt_cd_level (%d) idx(%d) cd_level(%d)\n",
hmt_cd_level, cd_idx, cd_level);
}
#endif
/* level2 enable */
testKey = get_testKey_set(1);
cmd_count = update_bright_packet(cmd_count, &testKey);
/* aid/aor */
#if defined(HBM_RE)
if (msd.panel == PANEL_WQHD_OCTA_S6E3HA2X01_CMD ||msd.panel == PANEL_WQXGA_OCTA_S6E3HA2X01_CMD){
/*hbm off cmd*/
if(msd.dstat.hbm_mode){
hbm_off_control = get_hbm_off_set();/*53 00*/
cmd_count = update_bright_packet(cmd_count, &hbm_off_control);
}
}
#endif
aid_control = get_aid_aor_control_set(cd_idx);
cmd_count = update_bright_packet(cmd_count, &aid_control);
/* acl */
if (msd.dstat.acl_on||msd.dstat.siop_status) {
acl_on_cont = get_acl_control_on_set(); /*b5 51*/
cmd_count = update_bright_packet(cmd_count, &acl_on_cont);
acl_control = get_acl_control_set(cd_idx); /*55 02*/
cmd_count = update_bright_packet(cmd_count, &acl_control);
} else {
/* acl off (hbm off) */
acl_off_cont = get_acl_control_off_set(); /*b5 41,55 00 */
cmd_count = update_bright_packet(cmd_count, &acl_off_cont);
}
/*elvss*/
elvss_control = get_elvss_control_set(cd_idx);
cmd_count = update_bright_packet(cmd_count, &elvss_control);
#if defined(SMART_VINT)
vint_control = get_vint_control_set(cd_idx);
cmd_count = update_bright_packet(cmd_count, &vint_control);
#endif
#if defined(TEMPERATURE_ELVSS)
// ELVSS TEMPERATURE COMPENSATION
// ELVSS for Temperature set cmd should be sent after normal elvss set cmd
if (msd.dstat.elvss_need_update) {
temperature_elvss_control = get_elvss_tempcompen_control_set();
cmd_count = update_bright_packet(cmd_count, &temperature_elvss_control);
if ((msd.panel == PANEL_FHD_OCTA_S6E3FA2_CMD)||\
(msd.panel == PANEL_WQHD_OCTA_S6E3HA0_CMD)||\
(msd.panel == PANEL_WQXGA_OCTA_S6E3HA2X01_CMD)||\
(msd.panel == PANEL_WQHD_OCTA_S6E3HA2X01_CMD)){
temperature_elvss_control2 = get_elvss_tempcompen_control_set2();
cmd_count = update_bright_packet(cmd_count, &temperature_elvss_control2);
}
msd.dstat.elvss_need_update = 0;
}
#endif
/*gamma*/
gamma_control = get_gamma_control_set(cd_level);
cmd_count = update_bright_packet(cmd_count, &gamma_control);
/* level2 disable */
testKey = get_testKey_set(0);
cmd_count = update_bright_packet(cmd_count, &testKey);
#if defined(TEMPERATURE_ELVSS)
LCD_DEBUG("bright_level: %d, candela_idx: %d( %d cd ), "\
"cmd_count(aid,acl,elvss,temperature,gamma)::(%d,%d,%d,%d,%d)%d,id3(0x%x)\n",
#else
LCD_DEBUG("bright_level: %d, candela_idx: %d( %d cd ), "\
"cmd_count(aid,acl,elvss,temperature,gamma)::(%d,%d,%d,%d)%d,id3(0x%x)\n",
#endif
msd.dstat.bright_level, cd_idx, cd_level,
aid_control.num_of_cmds,
msd.dstat.acl_on | msd.dstat.siop_status,
elvss_control.num_of_cmds,
#if defined(TEMPERATURE_ELVSS)
temperature_elvss_control.num_of_cmds,
#endif
gamma_control.num_of_cmds,
cmd_count,
msd.id3);
return cmd_count;
}
#if !defined(CONFIG_FB_MSM_EDP_SAMSUNG)
static int __init current_boot_mode(char *mode)
{
/*
* 1,2 is recovery booting
* 0 is normal booting
*/
if ((strncmp(mode, "1", 1) == 0)||(strncmp(mode, "2", 1) == 0))
msd.dstat.recovery_boot_mode = 1;
else
msd.dstat.recovery_boot_mode = 0;
pr_debug("%s %s", __func__, msd.dstat.recovery_boot_mode == 1 ?
"recovery" : "normal");
return 1;
}
__setup("androidboot.boot_recovery=", current_boot_mode);
#endif
static void mdss_dsi_panel_cmds_send(struct mdss_dsi_ctrl_pdata *ctrl,
struct dsi_panel_cmds *pcmds)
{
struct dcs_cmd_req cmdreq;
struct mdss_panel_info *pinfo;
mutex_lock(&msd.lock);
if (get_lcd_attached() == 0) {
printk("%s: get_lcd_attached(0)!\n",__func__);
mutex_unlock(&msd.lock);
return;
}
pinfo = &(ctrl->panel_data.panel_info);
if (pinfo->partial_update_dcs_cmd_by_left) {
if (ctrl->ndx != DSI_CTRL_LEFT) {
mutex_unlock(&msd.lock);
return;
}
}
memset(&cmdreq, 0, sizeof(cmdreq));
cmdreq.cmds = pcmds->cmds;
cmdreq.cmds_cnt = pcmds->cmd_cnt;
cmdreq.flags = CMD_REQ_COMMIT | CMD_CLK_CTRL;
/*Panel ON/Off commands should be sent in DSI Low Power Mode*/
if (pcmds->link_state == DSI_LP_MODE)
cmdreq.flags |= CMD_REQ_LP_MODE;
cmdreq.rlen = 0;
cmdreq.cb = NULL;
mdss_dsi_cmdlist_put(ctrl, &cmdreq);
mutex_unlock(&msd.lock);
}
void mdss_dsi_cmds_send(struct mdss_dsi_ctrl_pdata *ctrl, struct dsi_cmd_desc *cmds, int cnt,int flag)
{
struct dcs_cmd_req cmdreq;
if (get_lcd_attached() == 0) {
printk("%s: get_lcd_attached(0)!\n",__func__);
return;
}
memset(&cmdreq, 0, sizeof(cmdreq));
if (flag & CMD_REQ_SINGLE_TX) {
cmdreq.flags = CMD_REQ_SINGLE_TX | CMD_CLK_CTRL | CMD_REQ_COMMIT;
} else
cmdreq.flags = CMD_REQ_COMMIT | CMD_CLK_CTRL;
cmdreq.cmds = cmds;
cmdreq.cmds_cnt = cnt;
cmdreq.rlen = 0;
cmdreq.cb = NULL;
mdss_dsi_cmdlist_put(ctrl, &cmdreq);
}
u32 mdss_dsi_cmd_receive(struct mdss_dsi_ctrl_pdata *ctrl, struct dsi_cmd_desc *cmd, int rlen)
{
struct dcs_cmd_req cmdreq;
if (get_lcd_attached() == 0) {
printk("%s: get_lcd_attached(0)!\n",__func__);
return 0;
}
memset(&cmdreq, 0, sizeof(cmdreq));
cmdreq.cmds = cmd;
cmdreq.cmds_cnt = 1;
cmdreq.flags = CMD_REQ_RX | CMD_REQ_COMMIT;
cmdreq.rlen = rlen;
cmdreq.rbuf = ctrl->rx_buf.data;
cmdreq.cb = NULL; /* call back */
/*
* This mutex is to sync up with dynamic FPS changes
* so that DSI lockups shall not happen
*/
BUG_ON(msd.ctrl_pdata == NULL);
// mutex_lock(&msd.ctrl_pdata->dfps_mutex);
mdss_dsi_cmdlist_put(ctrl, &cmdreq);
// mutex_unlock(&msd.ctrl_pdata->dfps_mutex);
/*
* blocked here, untill call back called
*/
return ctrl->rx_buf.len;
}
static int samsung_nv_read(struct dsi_cmd_desc *desc, char *destBuffer,
int srcLength, struct mdss_panel_data *pdata, int startoffset)
{
int loop_limit = 0;
/* first byte is size of Register */
static char packet_size[] = { 0x07, 0 };
static struct dsi_cmd_desc s6e8aa0_packet_size_cmd = {
{DTYPE_MAX_PKTSIZE, 1, 0, 0, 0, sizeof(packet_size)},
packet_size };
/* second byte is Read-position */
static char reg_read_pos[] = { 0xB0, 0x00 };
static struct dsi_cmd_desc s6e8aa0_read_pos_cmd = {
{DTYPE_DCS_WRITE1, 1, 0, 0, 0, sizeof(reg_read_pos)},
reg_read_pos };
int read_pos = startoffset;
int read_count = 0;
int show_cnt;
int i, j;
char show_buffer[256];
int show_buffer_pos = 0;
int read_size = 0;
show_buffer_pos +=
snprintf(show_buffer, 256, "read_reg : %X[%d] : ",
desc[0].payload[0], srcLength);
loop_limit = (srcLength + packet_size[0] - 1)
/ packet_size[0];
mdss_dsi_cmds_send(msd.ctrl_pdata, &(s6e8aa0_packet_size_cmd), 1, 0);
show_cnt = 0;
for (j = 0; j < loop_limit; j++) {
reg_read_pos[1] = read_pos;
read_size = ((srcLength - read_pos + startoffset) < packet_size[0]) ?
(srcLength - read_pos + startoffset) : packet_size[0];
mdss_dsi_cmds_send(msd.ctrl_pdata, &(s6e8aa0_read_pos_cmd), 1, 0);
read_count = mdss_dsi_cmd_receive(msd.ctrl_pdata, desc, read_size);
for (i = 0; i < read_count; i++, show_cnt++) {
show_buffer_pos += snprintf(show_buffer +
show_buffer_pos, (256 - show_buffer_pos), "%02x ",
msd.ctrl_pdata->rx_buf.data[i]);
if (destBuffer != NULL && show_cnt < srcLength) {
destBuffer[show_cnt] =
msd.ctrl_pdata->rx_buf.data[i];
}
}
show_buffer_pos += snprintf(show_buffer +
show_buffer_pos, (256 - show_buffer_pos), ".");
read_pos += read_count;
if (read_pos-startoffset >= srcLength)
break;
}
pr_info("%s\n", show_buffer);
return read_pos-startoffset;
}
static int mipi_samsung_read_nv_mem(struct mdss_panel_data *pdata, struct dsi_cmd *nv_read_cmds, char *buffer)
{
int nv_size = 0;
int nv_read_cnt = 0;
int i = 0;
mipi_samsung_disp_send_cmd(PANEL_MTP_ENABLE, true);
for (i = 0; i < nv_read_cmds->num_of_cmds; i++)
nv_size += nv_read_cmds->read_size[i];
pr_debug("nv_size= %d, nv_read_cmds->num_of_cmds = %d\n", nv_size, nv_read_cmds->num_of_cmds);
for (i = 0; i < nv_read_cmds->num_of_cmds; i++) {
int count = 0;
int read_size = nv_read_cmds->read_size[i];
int read_startoffset = nv_read_cmds->read_startoffset[i];
count = samsung_nv_read(&(nv_read_cmds->cmd_desc[i]),
&buffer[nv_read_cnt], read_size, pdata, read_startoffset);
nv_read_cnt += count;
if (count != read_size)
pr_err("Error reading LCD NV data count(%d), read_size(%d)!!!!\n",count,read_size);
}
mipi_samsung_disp_send_cmd(PANEL_MTP_DISABLE, true);
return nv_read_cnt;
}
//#endif
#ifdef DEBUG_LDI_STATUS
int read_ldi_status(void)
{
struct dsi_buf *rp, *tp;
int i;
if (!ldi_debug_cmds.num_of_cmds)
return 1;
if(!msd.dstat.on) {
pr_err("%s can not read because of panel off \n", __func__);
return 1;
}
tp = &dsi_panel_tx_buf;
rp = &dsi_panel_rx_buf;
mdss_dsi_cmd_receive(msd.ctrl_pdata,
&ldi_debug_cmds.cmd_desc[0],
ldi_debug_cmds.read_size[0]);
pr_info("%s: LDI 0Ah Register Value = 0x%x (Normal Case:0x9C)\n", __func__, *msd.ctrl_pdata->rx_buf.data);
mdss_dsi_cmd_receive(msd.ctrl_pdata,
&ldi_debug_cmds.cmd_desc[1],
ldi_debug_cmds.read_size[1]);
pr_info("%s: LDI 0Eh Register Value = 0x%x (Normal Case:0x80)\n", __func__, *msd.ctrl_pdata->rx_buf.data);
mdss_dsi_cmd_receive(msd.ctrl_pdata,
&ldi_debug_cmds.cmd_desc[2],
ldi_debug_cmds.read_size[2]);
for(i=0 ; i<8 ; i++) {
pr_info("%s: LDI EAh Register Value[%d] = 0x%x \n", __func__,i, msd.ctrl_pdata->rx_buf.data[i]);
}
return 0;
}
EXPORT_SYMBOL(read_ldi_status);
#endif
static void mipi_samsung_manufacture_date_read(struct mdss_panel_data *pdata)
{
char date[4];
int year, month, day;
int hour, min;
int manufacture_date, manufacture_time;
/* Read mtp (C8h 41,42,43,44th) for manufacture date */
mipi_samsung_read_nv_mem(pdata, &manufacture_date_cmds, date);
year = date[0] & 0xf0;
year >>= 4;
year += 2011; // 0 = 2011 year
month = date[0] & 0x0f;
day = date[1] & 0x1f;
hour = date[2]& 0x0f;
min = date[3] & 0x1f;
manufacture_date = year * 10000 + month * 100 + day;
manufacture_time = hour * 100 + min;
pr_info("manufacture_date = (%d%04d) - year(%d) month(%d) day(%d) hour(%d) min(%d)\n",
manufacture_date, manufacture_time, year, month, day, hour, min);
msd.manufacture_date = manufacture_date;
msd.manufacture_time = manufacture_time;
}
static void mipi_samsung_ddi_id_read(struct mdss_panel_data *pdata)
{
char ddi_id[5];
/* Read mtp (D6h 1~5th) for ddi id */
mipi_samsung_read_nv_mem(pdata, &ddi_id_cmds, ddi_id);
memcpy(msd.ddi_id, ddi_id, 5);
pr_info("%s : %02x %02x %02x %02x %02x\n", __func__,
msd.ddi_id[0], msd.ddi_id[1], msd.ddi_id[2], msd.ddi_id[3], msd.ddi_id[4]);
}
static unsigned int mipi_samsung_manufacture_id(struct mdss_panel_data *pdata)
{
struct dsi_buf *rp, *tp;
unsigned int id = 0 ;
#if defined(CAMERA_LP)
return 0x501401;
#endif
if (get_lcd_attached() == 0)
{
printk("%s: get_lcd_attached(0)!\n",__func__);
return id;
}
if (!manufacture_id_cmds.num_of_cmds) {
pr_err("%s : manufacture id cmds num is zero..\n",__func__);
return 0;
}
tp = &dsi_panel_tx_buf;
rp = &dsi_panel_rx_buf;
mdss_dsi_cmd_receive(msd.ctrl_pdata,
&manufacture_id_cmds.cmd_desc[0],
manufacture_id_cmds.read_size[0]);
pr_debug("%s: manufacture_id1=%x\n", __func__, *msd.ctrl_pdata->rx_buf.data);
id = (*((unsigned int *)msd.ctrl_pdata->rx_buf.data) & 0xFF);
id <<= 8;
mdss_dsi_cmd_receive(msd.ctrl_pdata,
&manufacture_id_cmds.cmd_desc[1],
manufacture_id_cmds.read_size[1]);
pr_debug("%s: manufacture_id2=%x\n", __func__, *msd.ctrl_pdata->rx_buf.data);
id |= (*((unsigned int *)msd.ctrl_pdata->rx_buf.data) & 0xFF);
id <<= 8;
mdss_dsi_cmd_receive(msd.ctrl_pdata,
&manufacture_id_cmds.cmd_desc[2],
manufacture_id_cmds.read_size[2]);
pr_debug("%s: manufacture_id3=%x\n", __func__, *msd.ctrl_pdata->rx_buf.data);
id |= (*((unsigned int *)msd.ctrl_pdata->rx_buf.data) & 0xFF);
pr_info("%s: manufacture_id=%x\n", __func__, id);
return id;
}
static void mdss_dsi_panel_bl_ctrl(struct mdss_panel_data *pdata,
u32 bl_level)
{
struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
/*Dont need to send backlight command if display off*/
if (msd.mfd->resume_state != MIPI_RESUME_STATE)
return;
ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
panel_data);
if (!ctrl_pdata) {
pr_err("%s: Invalid input data\n", __func__);
return;
}
switch (ctrl_pdata->bklt_ctrl) {
case BL_WLED:
led_trigger_event(bl_led_trigger, bl_level);
break;
case BL_PWM:
mdss_dsi_panel_bklt_pwm(ctrl_pdata, bl_level);
break;
case BL_DCS_CMD:
msd.dstat.bright_level = bl_level;
mipi_samsung_disp_send_cmd(PANEL_BRIGHT_CTRL, true);
break;
default:
pr_err("%s: Unknown bl_ctrl configuration\n",
__func__);
break;
}
}
static int mipi_samsung_disp_send_cmd(
enum mipi_samsung_cmd_list cmd,
unsigned char lock)
{
struct dsi_cmd_desc *cmd_desc;
int cmd_size = 0;
int flag = 0;
#ifdef CMD_DEBUG
int i,j;
#endif
if (get_lcd_attached() == 0) {
printk("%s: get_lcd_attached(0)!\n",__func__);
return -ENODEV;
}
if (lock)
mutex_lock(&msd.lock);
switch (cmd) {
case PANEL_DISPLAY_ON:
cmd_desc = display_on_cmd.cmd_desc;
cmd_size = display_on_cmd.num_of_cmds;
break;
case PANEL_DISPLAY_OFF:
cmd_desc = display_off_cmd.cmd_desc;
cmd_size = display_off_cmd.num_of_cmds;
break;
case PANEL_HSYNC_ON:
cmd_desc = hsync_on_seq.cmd_desc;
cmd_size = hsync_on_seq.num_of_cmds;
break;
#if defined(CONFIG_WACOM_LCD_FREQ_COMPENSATE)
case PANEL_LDI_FPS_CHANGE:
cmd_desc = write_ldi_fps_cmds.cmd_desc;
cmd_size = write_ldi_fps_cmds.num_of_cmds;
break;
#endif
#if defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_CMD_FHD_FA2_PT_PANEL)
case PANEL_SET_TE_OSC_B:
cmd_desc = panel_set_te_osc_b.cmd_desc;
cmd_size = panel_set_te_osc_b.num_of_cmds;
break;
case PANEL_SET_TE_RESTORE:
cmd_desc = panel_set_te_restore.cmd_desc;
cmd_size = panel_set_te_restore.num_of_cmds;
break;
case PANEL_SET_TE:
cmd_desc = panel_set_te.cmd_desc;
cmd_size = panel_set_te.num_of_cmds;
break;
case PANEL_SET_TE_1:
cmd_desc = panel_set_te_1.cmd_desc;
cmd_size = panel_set_te_1.num_of_cmds;
break;
case PANEL_SET_TE_2:
cmd_desc = panel_set_te_2.cmd_desc;
cmd_size = panel_set_te_2.num_of_cmds;
break;
#endif
case PANEL_BRIGHT_CTRL:
#if defined(CONFIG_SEC_TRLTE_PROJECT)
if (system_rev < 2)
goto err;
#endif
#if defined(CAMERA_LP)
goto err;
#endif
#if defined(CONFIG_LCD_HMT)
if (msd.hmt_stat.hmt_on) {
pr_err("hmt is on!! do not set brightness..\n");
goto err;
}
#endif
cmd_desc = brightness_packet;
#if defined(CONFIG_LCD_FORCE_VIDEO_MODE)
flag = 0;
#else
/* Single Tx use for DSI_VIDEO_MODE Only */
if(msd.pdata->panel_info.mipi.mode == DSI_VIDEO_MODE)
flag = CMD_REQ_SINGLE_TX;
else
flag = 0;
#endif
msd.dstat.recent_bright_level = msd.dstat.bright_level;
#if defined(HBM_RE)
if(msd.dstat.auto_brightness == 6) {
cmd_size = make_brightcontrol_hbm_set();
msd.dstat.hbm_mode = 1;
} else {
cmd_size = make_brightcontrol_set(msd.dstat.bright_level);
msd.dstat.hbm_mode = 0;
}
#else
cmd_size = make_brightcontrol_set(msd.dstat.bright_level);
#endif
if (msd.mfd->resume_state != MIPI_RESUME_STATE) {
pr_info("%s : panel is off state!!\n", __func__);
goto unknown_command;
}
break;
case PANEL_MTP_ENABLE:
cmd_desc = nv_enable_cmds.cmd_desc;
cmd_size = nv_enable_cmds.num_of_cmds;
break;
case PANEL_MTP_DISABLE:
cmd_desc = nv_disable_cmds.cmd_desc;
cmd_size = nv_disable_cmds.num_of_cmds;
break;
case PANEL_NEED_FLIP:
/*
May be required by Panel Like Fusion3
*/
break;
case PANEL_ACL_ON:
/*
May be required by panel like D2,Commanche
*/
break;
case PANEL_ACL_OFF:
cmd_desc = acl_off_cmd.cmd_desc;
cmd_size = acl_off_cmd.num_of_cmds;
break;
#if defined(PARTIAL_UPDATE)
case PANEL_PARTIAL_ON:
cmd_desc = partialdisp_on_cmd.cmd_desc;
cmd_size = partialdisp_on_cmd.num_of_cmds;
break;
case PANEL_PARTIAL_OFF:
cmd_desc = partialdisp_off_cmd.cmd_desc;
cmd_size = partialdisp_off_cmd.num_of_cmds;
break;
#endif
#ifdef CONFIG_FB_MSM_SAMSUNG_AMOLED_LOW_POWER_MODE
case PANEL_ALPM_ON:
cmd_desc = alpm_on_seq.cmd_desc;
cmd_size = alpm_on_seq.num_of_cmds;
break;
case PANEL_ALPM_OFF:
cmd_desc = alpm_off_seq.cmd_desc;
cmd_size = alpm_off_seq.num_of_cmds;
break;
#endif
#if defined(CONFIG_LCD_CLASS_DEVICE) && defined(DDI_VIDEO_ENHANCE_TUNING)
case MDNIE_ADB_TEST:
cmd_desc = mdni_tune_cmd;
cmd_size = ARRAY_SIZE(mdni_tune_cmd);
break;
#endif
#if defined(CONFIG_LCD_HMT)
case PANEL_HMT_BRIGHT:
if (msd.panel == PANEL_WQHD_OCTA_S6E3HA0_CMD)
make_brightcontrol_set_hmt_lentis(msd.hmt_stat.hmt_bl_level);
else
make_brightcontrol_set_hmt_tlte(msd.hmt_stat.hmt_bl_level);
cmd_desc = hmt_bright_cmds_list.cmd_desc;
cmd_size = hmt_bright_cmds_list.num_of_cmds;
break;
case PANEL_LOW_PERSISTENCE_BRIGHT:
cmd_desc = brightness_packet;
/* Single Tx use for DSI_VIDEO_MODE Only */
if(msd.pdata->panel_info.mipi.mode == DSI_VIDEO_MODE)
flag = CMD_REQ_SINGLE_TX;
else
flag = 0;
cmd_size = make_brightcontrol_set(msd.dstat.bright_level);
if (msd.mfd->resume_state != MIPI_RESUME_STATE) {
pr_info("%s : panel is off state!!\n", __func__);
goto unknown_command;
}
break;
case PANEL_ENABLE:
cmd_desc = hmt_single_scan_enable.cmd_desc;
cmd_size = hmt_single_scan_enable.num_of_cmds;
break;
case PANEL_DISABLE:
cmd_desc = hmt_disable.cmd_desc;
cmd_size = hmt_disable.num_of_cmds;
break;
case PANEL_HMT_REVERSE_ENABLE:
cmd_desc = hmt_reverse_enable.cmd_desc;
cmd_size = hmt_reverse_enable.num_of_cmds;
break;
case PANEL_HMT_REVERSE_DISABLE:
cmd_desc = hmt_reverse_disable.cmd_desc;
cmd_size = hmt_reverse_disable.num_of_cmds;
break;
case PANEL_HMT_KEY_ENABLE:
cmd_desc = test_key_enable_cmds.cmd_desc;
cmd_size = test_key_enable_cmds.num_of_cmds;
break;
#endif
#ifdef LDI_ADJ_VDDM_OFFSET
case PANEL_LDI_SET_VDD_OFFSET:
cmd_desc = write_vdd_offset_cmds.cmd_desc;
cmd_size = write_vdd_offset_cmds.num_of_cmds;
break;
case PANEL_LDI_SET_VDDM_OFFSET:
cmd_desc = write_vddm_offset_cmds.cmd_desc;
cmd_size = write_vddm_offset_cmds.num_of_cmds;
break;
#endif
default:
pr_err("%s : unknown_command.. \n", __func__);
goto unknown_command;
break;
}
if (!cmd_size) {
pr_err("%s : cmd_size is zero!.. \n", __func__);
goto err;
}
#ifdef CMD_DEBUG
for (i = 0; i < cmd_size; i++) {
for (j = 0; j < cmd_desc[i].dchdr.dlen; j++)
printk("%x ",cmd_desc[i].payload[j]);
printk("\n");
}
#endif
#ifdef MDP_RECOVERY
if (!mdss_recovery_start)
mdss_dsi_cmds_send(msd.ctrl_pdata, cmd_desc, cmd_size, flag);
else
pr_err ("%s : Can't send command during mdss_recovery_start\n", __func__);
#else
mdss_dsi_cmds_send(msd.ctrl_pdata, cmd_desc, cmd_size, flag);
#endif
if (lock)
mutex_unlock(&msd.lock);
pr_debug("%s done..\n", __func__);
return 0;
unknown_command:
LCD_DEBUG("Undefined command\n");
err:
if (lock)
mutex_unlock(&msd.lock);
return -EINVAL;
}
void mdss_dsi_panel_touchsensing(int enable)
{
if(!msd.dstat.on)
{
pr_err("%s: No panel on! %d\n", __func__, enable);
return;
}
if(enable)
mipi_samsung_disp_send_cmd(PANEL_TOUCHSENSING_ON, true);
else
mipi_samsung_disp_send_cmd(PANEL_TOUCHSENSING_OFF, true);
}
static void mdss_dsi_panel_read_func(struct mdss_panel_data *pdata)
{
#if defined(CONFIG_MDNIE_LITE_TUNING)
char temp[4];
int x, y;
#endif
#if defined(HBM_RE)
char hbm_buffer[20];
#endif
#if defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_CMD_FHD_FA2_PT_PANEL)
char read_buffer[1];
#endif
char elvss_buffer[2];
pr_info("%s : ++\n",__func__);
#if defined(CAMERA_LP)
return;
#endif
if (get_lcd_attached() == 0) {
pr_err("%s: get_lcd_attached(0)!\n",__func__);
return;
}
mipi_samsung_manufacture_date_read(pdata);
mipi_samsung_ddi_id_read(pdata);
#if defined(HBM_RE)
if (msd.panel == PANEL_WQHD_OCTA_S6E3HA0_CMD ||\
msd.panel == PANEL_FHD_OCTA_S6E3FA2_CMD){
/* Read mtp (C8h 34th ~ 40th) for HBM */
mipi_samsung_read_nv_mem(pdata, &nv_mtp_hbm_read_cmds, hbm_buffer);
memcpy(&hbm_gamma_cmds_list.cmd_desc[0].payload[1], hbm_buffer, 7);
/* octa panel Read C8h 40th -> write B6h 21th */
if (hbm_etc_cmds_list.cmd_desc)
memcpy(&hbm_etc_cmds_list.cmd_desc[1].payload[21], hbm_buffer+6, 1);
/* Read mtp (C8h 73th ~ 87th) for HBM */
mipi_samsung_read_nv_mem(pdata, &nv_mtp_hbm2_read_cmds, hbm_buffer);
memcpy(&hbm_gamma_cmds_list.cmd_desc[0].payload[7], hbm_buffer, 15);
}
#endif
/* Read mtp (B6h 21th) for elvss*/
mipi_samsung_read_nv_mem(pdata, &nv_mtp_elvss_read_cmds, elvss_buffer);
msd.dstat.elvss_value = elvss_buffer[0];
#if defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_CMD_FHD_FA2_PT_PANEL)
mipi_samsung_read_nv_mem(pdata, &panel_osc_type_read_cmds, read_buffer);
panel_set_te_restore.cmd_desc[2].payload[1] = read_buffer[0];
#endif
#if defined(CONFIG_MDNIE_LITE_TUNING)
/* MDNIe tuning initialisation*/
if (!msd.dstat.is_mdnie_loaded) {
mipi_samsung_read_nv_mem(pdata, &nv_mdnie_read_cmds, temp);
x = temp[0] << 8 | temp[1]; /* X */
y = temp[2] << 8 | temp[3]; /* Y */
coordinate_tunning(x, y);
msd.dstat.is_mdnie_loaded = true;
}
#endif
msd.dstat.is_panel_read_done = true;
pr_info("%s : --\n",__func__);
return;
}
static int mdss_dsi_panel_dimming_init(struct mdss_panel_data *pdata)
{
#ifdef LDI_ADJ_VDDM_OFFSET
unsigned int vdd_offset, vddm_offset;
char vol_ref_buffer, vol_ref_buffer2;
#endif
pr_info("%s : ++\n",__func__);
#if defined(CAMERA_LP)
return 0;
#endif
#if defined(CONFIG_SEC_TRLTE_PROJECT) //TEMP for T
if (system_rev < 2)
return 0;
#endif
switch (msd.panel) {
case PANEL_FHD_OCTA_S6E3FA0:
case PANEL_FHD_OCTA_S6E3FA0_CMD:
case PANEL_FHD_OCTA_S6E3FA2_CMD:
case PANEL_WQHD_OCTA_S6E3HA0_CMD:
case PANEL_WQHD_OCTA_S6E3HA2X01_CMD:
case PANEL_WQXGA_OCTA_S6E3HA2X01_CMD:
msd.sdimconf = smart_S6E3_get_conf();
break;
}
/* Just a safety check to ensure smart dimming data is initialised well */
BUG_ON(msd.sdimconf == NULL);
/* Set the mtp read buffer pointer and read the NVM value*/
mipi_samsung_read_nv_mem(pdata, &nv_mtp_read_cmds, msd.sdimconf->mtp_buffer);
#ifdef LDI_ADJ_VDDM_OFFSET
mipi_samsung_read_nv_mem(msd.pdata, &read_vdd_ref_cmds, &vol_ref_buffer);
vdd_offset=(unsigned int)(vol_ref_buffer & 0x7F);
pr_info("%s:vddm_offset = %d , ldi_vdd_lut[%d][1] = %d \n", __func__, vdd_offset, vdd_offset, ldi_vddm_lut[vdd_offset][1]);
write_vdd_offset_cmds.cmd_desc[3].payload[1] = ldi_vddm_lut[vdd_offset][1];
mipi_samsung_read_nv_mem(msd.pdata, &read_vddm_ref_cmds, &vol_ref_buffer2);
vddm_offset=(unsigned int)(vol_ref_buffer2 & 0x7F);
pr_info("%s:vddm_offset = %d , ldi_vddm_lut[%d][1] = %d \n", __func__, vddm_offset, vddm_offset, ldi_vddm_lut[vddm_offset][1]);
write_vddm_offset_cmds.cmd_desc[2].payload[1] = ldi_vddm_lut[vddm_offset][1];
#endif
/* Initialize smart dimming related things here */
/* lux_tab setting for 350cd */
msd.sdimconf->lux_tab = &candela_map_table.lux_tab[0];
msd.sdimconf->lux_tabsize = candela_map_table.lux_tab_size;
msd.sdimconf->man_id = msd.manufacture_id;
/* Just a safety check to ensure smart dimming data is initialised well */
BUG_ON(msd.sdimconf->init == NULL);
msd.sdimconf->init();
msd.dstat.temperature = 20; // default temperature
msd.dstat.elvss_need_update = 1;
msd.dstat.is_smart_dim_loaded = true;
pr_info("%s : --\n",__func__);
return 0;
}
#if defined(CONFIG_LCD_HMT)
static void mdss_dsi_panel_make_sdimconf(struct mdss_panel_data *pdata, struct smartdim_conf_hmt *pSdimconf, struct candella_lux_map *pCandela_map_table) {
int size;
char mtp_buffer_for_150cd[30];
/* Just a safety check to ensure smart dimming data is initialised well */
BUG_ON(pSdimconf == NULL);
/* Set the mtp read buffer pointer and read the NVM value*/
size = mipi_samsung_read_nv_mem(pdata, &nv_mtp_read_cmds, pSdimconf->mtp_buffer);
/* Read mtp (B4h 2nd ~ 31th) for HMT 150cd */
mipi_samsung_read_nv_mem(pdata, &hmt_150cd_read_cmds, mtp_buffer_for_150cd);
/* Initialize smart dimming related things here */
/* lux_tab setting for 350cd */
pSdimconf->lux_tab = &(pCandela_map_table->lux_tab[0]);
pSdimconf->lux_tabsize = pCandela_map_table->lux_tab_size;
pSdimconf->man_id = msd.manufacture_id;
if (pSdimconf->set_para_for_150cd)
pSdimconf->set_para_for_150cd(mtp_buffer_for_150cd, 30);
/* Just a safety check to ensure smart dimming data is initialised well */
BUG_ON(pSdimconf->init == NULL);
pSdimconf->init();
pr_info("[HMT] smart dimming done!\n");
}
static int mdss_dsi_panel_dimming_init_HMT(struct mdss_panel_data *pdata)
{
pr_info("[HMT] %s : ++\n",__func__);
msd.hmt_stat.hmt_bl_level = 0;
msd.hmt_stat.hmt_on = 0;
msd.hmt_stat.hmt_reverse = 0;
msd.hmt_stat.hmt_low_persistence = 1;
switch (msd.panel) {
case PANEL_WQHD_OCTA_S6E3HA2X01_CMD:
case PANEL_WQXGA_OCTA_S6E3HA2X01_CMD:
case PANEL_WQHD_OCTA_S6E3HA0_CMD:
msd.sdimconf_reverse_hmt_single = smart_S6E3_get_conf_hmt();
break;
}
mdss_dsi_panel_make_sdimconf(pdata, msd.sdimconf_reverse_hmt_single, &candela_map_table_reverse_hmt);
msd.dstat.is_hmt_smart_dim_loaded = true;
pr_debug("[HMT] %s : --\n",__func__);
return 0;
}
#endif
static int mdss_dsi_panel_registered(struct mdss_panel_data *pdata)
{
struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
if (pdata == NULL) {
pr_err("%s: Invalid input data\n", __func__);
return -EINVAL;
}
ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
panel_data);
msd.mfd = (struct msm_fb_data_type *)registered_fb[0]->par;
msd.pdata = pdata;
msd.ctrl_pdata = ctrl_pdata;
#if defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_CMD_FHD_FA2_PT_PANEL)
te_set_done = TE_SET_INIT;
#endif
if(!msd.mfd)
{
pr_info("%s mds.mfd is null!!\n",__func__);
} else
pr_info("%s mds.mfd is ok!!\n",__func__);
#if defined(CONFIG_MDNIE_LITE_TUNING)
pr_info("[%s] CONFIG_MDNIE_LITE_TUNING ok ! mdnie_lite_tuning_init called!\n",
__func__);
mdnie_lite_tuning_init(&msd);
#endif
/* Set the initial state to Suspend until it is switched on */
msd.mfd->resume_state = MIPI_SUSPEND_STATE;
pr_info("%s:%d, Panel registered succesfully\n", __func__, __LINE__);
return 0;
}
#if defined(CONFIG_DUAL_LCD)
struct mdss_panel_data *mdss_dsi_switching = NULL;
int IsSwitching = 0;
extern int dsi_clk_on;
#endif
static int mdss_dsi_panel_on(struct mdss_panel_data *pdata)
{
struct mdss_dsi_ctrl_pdata *ctrl = NULL;
static struct mdss_panel_alpm_data *alpm_data = NULL;
struct mdss_panel_info *pinfo;
if (pdata == NULL) {
pr_err("%s: Invalid input data\n", __func__);
return -EINVAL;
}
if (unlikely(!alpm_data))
alpm_data = &pdata->alpm_data;
pr_info("%s : ++\n", __func__);
pinfo = &pdata->panel_info;
ctrl = container_of(pdata, struct mdss_dsi_ctrl_pdata,
panel_data);
msd.ctrl_pdata = ctrl;
pr_debug("mdss_dsi_panel_on DSI_MODE = %d ++\n",msd.pdata->panel_info.mipi.mode);
pr_info("%s: ctrl=%p ndx=%d\n", __func__, ctrl, ctrl->ndx);
if (ctrl->shared_pdata.broadcast_enable) {
if (ctrl->ndx == DSI_CTRL_0) {
pr_info("%s: Broadcast mode. 1st ctrl(0). return..\n",__func__);
goto end;
}
}
#if defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_S6E3HA2_CMD_WQHD_PT_PANEL) || defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_S6E3HA2_CMD_WQXGA_PT_PANEL) //TEMP for T
if (system_rev >= 2)
mipi_samsung_disp_send_cmd(PANEL_MTP_ENABLE, true);
#endif
if (!msd.manufacture_id) {
msd.manufacture_id = mipi_samsung_manufacture_id(pdata);
if (set_panel_rev(msd.manufacture_id) < 0)
pr_err("%s : can't find panel id.. \n", __func__);
}
if (!msd.dstat.is_panel_read_done){
mdss_dsi_panel_read_func(pdata);
}
if (!msd.dstat.is_smart_dim_loaded)
mdss_dsi_panel_dimming_init(pdata);
#if defined(CONFIG_LCD_HMT)
if (!msd.dstat.is_hmt_smart_dim_loaded)
mdss_dsi_panel_dimming_init_HMT(pdata);
#endif
/*
* Normaly the else is working for PANEL_DISP_ON_SEQ
* if the alpm was not enabled
*/
if (alpm_data->alpm_status) {
if (!alpm_data->alpm_status(CHECK_PREVIOUS_STATUS))
mdss_dsi_panel_cmds_send(ctrl, &ctrl->on_cmds);
} else {
mdss_dsi_panel_cmds_send(ctrl, &ctrl->on_cmds);
#if defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_CMD_FHD_FA2_PT_PANEL)
if (te_set_done == TE_SET_DONE) {
mipi_samsung_disp_send_cmd(PANEL_SET_TE, true);
} else
msleep(120);
#endif
}
if (msd.panel == PANEL_FHD_OCTA_S6E3FA2_CMD)
mipi_samsung_disp_send_cmd(PANEL_HSYNC_ON, true);
if ((msd.panel == PANEL_WQHD_OCTA_S6E3HA2X01_CMD || msd.panel == PANEL_WQXGA_OCTA_S6E3HA2X01_CMD) && (msd.id2 == 0x20 ||msd.id2 == 0x40 ))
mipi_samsung_disp_send_cmd(PANEL_HSYNC_ON, true);
/* Recovery Mode : Set some default brightness */
if (msd.dstat.recovery_boot_mode) {
msd.dstat.bright_level = RECOVERY_BRIGHTNESS;
mipi_samsung_disp_send_cmd(PANEL_BRIGHT_CTRL, true);
}
#if defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_S6E3HA2_CMD_WQHD_PT_PANEL) || defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_S6E3HA2_CMD_WQXGA_PT_PANEL) //TEMP for T
if (system_rev >= 2)
mipi_samsung_disp_send_cmd(PANEL_MTP_DISABLE, true);
#endif
/* Init Index Values */
msd.dstat.curr_elvss_idx = -1;
#if defined(SMART_VINT)
msd.dstat.curr_vint_idx = -1;
#endif
msd.dstat.curr_acl_idx = -1;
msd.dstat.curr_opr_idx = -1;
msd.dstat.curr_aid_idx = -1;
msd.dstat.hbm_mode = 0;
msd.dstat.on = 1;
msd.dstat.wait_disp_on = 1;
/*default acl off(caps on :b5 41) in on seq. */
msd.dstat.curr_acl_idx = 0;
msd.dstat.curr_acl_cond = 0;
#if defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_CMD_FHD_FA2_PT_PANEL)
if (te_set_done == TE_SET_INIT)
te_set_done = TE_SET_READY;
#endif
msd.mfd->resume_state = MIPI_RESUME_STATE;
#ifdef LDI_ADJ_VDDM_OFFSET
mipi_samsung_disp_send_cmd(PANEL_LDI_SET_VDD_OFFSET, true);
mipi_samsung_disp_send_cmd(PANEL_LDI_SET_VDDM_OFFSET, true);
#endif
/* ALPM Mode Change */
if (alpm_data->alpm_status) {
if (!alpm_data->alpm_status(CHECK_PREVIOUS_STATUS)\
&& alpm_data->alpm_status(CHECK_CURRENT_STATUS)) {
/* Turn On ALPM Mode */
mipi_samsung_disp_send_cmd(PANEL_ALPM_ON, true);
alpm_data->alpm_status(STORE_CURRENT_STATUS);
pr_info("[ALPM_DEBUG] %s: Send ALPM mode on cmds\n", __func__);
} else if (!alpm_data->alpm_status(CHECK_CURRENT_STATUS)\
&& alpm_data->alpm_status(CHECK_PREVIOUS_STATUS)) {
/* Turn Off ALPM Mode */
mipi_samsung_disp_send_cmd(PANEL_ALPM_OFF, true);
alpm_data->alpm_status(CLEAR_MODE_STATUS);
pr_info("[ALPM_DEBUG] %s: Send ALPM off cmds\n", __func__);
}
}
#if defined(CONFIG_MDNIE_LITE_TUNING)
is_negative_on();
#if defined(PARTIAL_UPDATE)
if (partial_disp_range[0] || partial_disp_range[1])
mipi_samsung_disp_send_cmd(PANEL_PARTIAL_ON, true);
#endif
// to prevent splash during wakeup
msd.dstat.bright_level = msd.dstat.recent_bright_level;
mipi_samsung_disp_send_cmd(PANEL_BRIGHT_CTRL, true);
#endif
#if defined(CONFIG_ESD_FG_RECOVERY)
if(esd_enable){
if (err_fg_working){
err_fg_working = 0;
esd_count++;
}
enable_irq(gpio_to_irq(err_fg_gpio));
pr_info("%s: ESD enable irq (%d)esd_count(%d)\n", __func__,
gpio_get_value(err_fg_gpio),esd_count);
}
#endif
#if defined(CONFIG_WACOM_LCD_FREQ_COMPENSATE)
/*LUT offset initalization*/
write_ldi_fps_cmds.cmd_desc[1].payload[3] = 0xC5;
#endif
end:
pinfo->blank_state = MDSS_PANEL_BLANK_UNBLANK;
pr_info("%s : --\n", __func__);
return 0;
}
static int mdss_dsi_panel_off(struct mdss_panel_data *pdata)
{
struct mdss_dsi_ctrl_pdata *ctrl = NULL;
static struct mdss_panel_alpm_data *alpm_data = NULL;
struct mdss_panel_info *pinfo;
if (pdata == NULL) {
pr_err("%s: Invalid input data\n", __func__);
return -EINVAL;
}
if (unlikely(!alpm_data))
alpm_data = &pdata->alpm_data;
pr_info("%s : ++\n",__func__);
pinfo = &pdata->panel_info;
ctrl = container_of(pdata, struct mdss_dsi_ctrl_pdata,
panel_data);
msd.ctrl_pdata = ctrl;
#if defined(CONFIG_ESD_FG_RECOVERY)
if (ctrl->ndx == DSI_CTRL_0) {
if (esd_enable && !err_fg_working && msd.dstat.on) {
disable_irq_nosync(gpio_to_irq(err_fg_gpio));
cancel_work_sync(&err_fg_work);
pr_info("%s: ESD disable irq (%d)\n", __func__, gpio_get_value(err_fg_gpio));
}
}
#endif
msd.dstat.on = 0;
msd.mfd->resume_state = MIPI_SUSPEND_STATE;
pr_info("%s: ctrl=%p ndx=%d\n", __func__, ctrl, ctrl->ndx);
if (ctrl->shared_pdata.broadcast_enable) {
if (ctrl->ndx == DSI_CTRL_0) {
pr_info("%s: Broadcast mode. 1st ctrl(0). return..\n",__func__);
goto end;
}
}
if (alpm_data->alpm_status && alpm_data->alpm_status(CHECK_CURRENT_STATUS))
pr_info("[ALPM_DEBUG] %s: Skip to send panel off cmds\n", __func__);
else
mdss_dsi_panel_cmds_send(ctrl, &ctrl->off_cmds);
pr_info("DISPLAY_OFF\n");
end:
pinfo->blank_state = MDSS_PANEL_BLANK_BLANK;
pr_info("%s : --\n",__func__);
return 0;
}
static int mdss_dsi_panel_low_power_config(struct mdss_panel_data *pdata,
int enable)
{
struct mdss_dsi_ctrl_pdata *ctrl = NULL;
struct mdss_panel_info *pinfo;
if (pdata == NULL) {
pr_err("%s: Invalid input data\n", __func__);
return -EINVAL;
}
pinfo = &pdata->panel_info;
ctrl = container_of(pdata, struct mdss_dsi_ctrl_pdata,
panel_data);
pr_debug("%s: ctrl=%p ndx=%d enable=%d\n", __func__, ctrl, ctrl->ndx,
enable);
/* Any panel specific low power commands/config */
if (enable)
pinfo->blank_state = MDSS_PANEL_BLANK_LOW_POWER;
else
pinfo->blank_state = MDSS_PANEL_BLANK_UNBLANK;
pr_debug("%s:-\n", __func__);
return 0;
}
#if defined(CONFIG_DUAL_LCD)
int samsung_switching_lcd(int flip)
{
int ret = 0;
msd.dstat.lcd_sel=!flip; //Change LCD SEL
if(mdss_dsi_switching == NULL)
return 0;
if (get_lcd_attached() == 0)
{
pr_err("%s: get_lcd_attached(0)!\n",__func__);
return -ENODEV;
}
LCD_DEBUG("msd.dstat.on=%d, lcd_sel=%d +\n", msd.dstat.on, msd.dstat.lcd_sel);
if(!msd.dstat.on && dsi_clk_on) {
int retry=5;
while(retry>0 && !msd.dstat.on) {
msleep(100);
retry--;
}
}
if(msd.dstat.on && dsi_clk_on) {
IsSwitching = 1;
ret = mdss_dsi_panel_off(msd.pdata);
if(ret)
pr_err("%s: mdss_dsi_panel_off error\n",__func__);
msd.dstat.bright_level = msd.mfd->bl_level;
/* Init Index Values */
msd.dstat.curr_elvss_idx = -1;
msd.dstat.curr_acl_idx = -1;
msd.dstat.curr_aid_idx = -1;
msd.dstat.hbm_mode = 0;
ret = mdss_dsi_panel_on(msd.pdata);
if(ret)
pr_err("%s: mdss_dsi_panel_on error\n",__func__);
IsSwitching = 0;
}
LCD_DEBUG(" -\n");
return ret;
}
EXPORT_SYMBOL(samsung_switching_lcd);
#endif
static int mdss_samsung_parse_candella_lux_mapping_table(struct device_node *np,
struct candella_lux_map *table, char *keystring)
{
const __be32 *data;
int data_offset, len = 0 , i = 0;
int cdmap_start=0, cdmap_end=0;
data = of_get_property(np, keystring, &len);
if (!data) {
pr_err("%s:%d, Unable to read table %s \n",
__func__, __LINE__, keystring);
return -EINVAL;
}
if ((len % 4) != 0) {
pr_err("%s:%d, Incorrect table entries for %s \n",
__func__, __LINE__, keystring);
return -EINVAL;
}
table->lux_tab_size = len / (sizeof(int)*4);
table->lux_tab = kzalloc((sizeof(int) * table->lux_tab_size), GFP_KERNEL);
if (!table->lux_tab)
return -ENOMEM;
table->cmd_idx = kzalloc((sizeof(int) * table->lux_tab_size), GFP_KERNEL);
if (!table->cmd_idx)
goto error;
data_offset = 0;
for (i = 0 ; i < table->lux_tab_size; i++) {
table->cmd_idx[i]= be32_to_cpup(&data[data_offset++]); /* 1rst field => <idx> */
cdmap_start = be32_to_cpup(&data[data_offset++]); /* 2nd field => <from> */
cdmap_end = be32_to_cpup(&data[data_offset++]); /* 3rd field => <till> */
table->lux_tab[i] = be32_to_cpup(&data[data_offset++]); /* 4th field => <candella> */
/* Fill the backlight level to lux mapping array */
do{
table->bkl[cdmap_start++] = i;
}while(cdmap_start <= cdmap_end);
}
return 0;
error:
kfree(table->lux_tab);
return -ENOMEM;
}
static int mdss_samsung_parse_panel_table(struct device_node *np,
struct cmd_map *table, char *keystring)
{
const __be32 *data;
int data_offset, len = 0 , i = 0;
data = of_get_property(np, keystring, &len);
if (!data) {
pr_err("%s:%d, Unable to read table %s \n",
__func__, __LINE__, keystring);
return -EINVAL;
}
if ((len % 2) != 0) {
pr_err("%s:%d, Incorrect table entries for %s \n",
__func__, __LINE__, keystring);
return -EINVAL;
}
table->size = len / (sizeof(int)*2);
table->bl_level = kzalloc((sizeof(int) * table->size), GFP_KERNEL);
if (!table->bl_level)
return -ENOMEM;
table->cmd_idx = kzalloc((sizeof(int) * table->size), GFP_KERNEL);
if (!table->cmd_idx)
goto error;
data_offset = 0;
for (i = 0 ; i < table->size; i++) {
table->bl_level[i] = be32_to_cpup(&data[data_offset++]);
table->cmd_idx[i] = be32_to_cpup(&data[data_offset++]);
}
return 0;
error:
kfree(table->cmd_idx);
return -ENOMEM;
}
static int mdss_samsung_parse_panel_cmd(struct device_node *np,
struct dsi_cmd *commands, char *keystring)
{
const char *data;
int type, len = 0, i = 0;
char *bp;
struct dsi_ctrl_hdr *dchdr;
int is_read = 0;
data = of_get_property(np, keystring, &len);
if (!data) {
pr_err("%s:%d, Unable to read %s \n",
__func__, __LINE__, keystring);
return -ENOMEM;
}
commands->cmds_buff = kzalloc(sizeof(char) * len, GFP_KERNEL);
if (!commands->cmds_buff)
return -ENOMEM;
memcpy(commands->cmds_buff, data, len);
commands->cmds_len = len;
commands->num_of_cmds = 0;
/* scan dcs commands */
bp = commands->cmds_buff;
while (len > sizeof(*dchdr)) {
dchdr = (struct dsi_ctrl_hdr *)bp;
dchdr->dlen = ntohs(dchdr->dlen);
if (dchdr->dlen >200)
goto error2;
bp += sizeof(*dchdr);
len -= sizeof(*dchdr);
bp += dchdr->dlen;
len -= dchdr->dlen;
commands->num_of_cmds++;
type = dchdr->dtype;
if (type == DTYPE_GEN_READ ||
type == DTYPE_GEN_READ1 ||
type == DTYPE_GEN_READ2 ||
type == DTYPE_DCS_READ) {
/* Read command :last byte contain read size, read start */
bp += 2;
len -= 2;
is_read = 1;
}
}
if (len != 0) {
pr_err("%s: dcs OFF command byte Error, len=%d", __func__, len);
commands->cmds_len = 0;
commands->num_of_cmds = 0;
goto error2;
}
if (is_read) {
/*
Allocate an array which will store the number
for bytes to read for each read command
*/
commands->read_size = kzalloc(sizeof(char) * \
commands->num_of_cmds, GFP_KERNEL);
if (!commands->read_size) {
pr_err("%s:%d, Unable to read NV cmds",
__func__, __LINE__);
goto error2;
}
commands->read_startoffset = kzalloc(sizeof(char) * \
commands->num_of_cmds, GFP_KERNEL);
if (!commands->read_startoffset) {
pr_err("%s:%d, Unable to read NV cmds",
__func__, __LINE__);
goto error1;
}
}
commands->cmd_desc = kzalloc(commands->num_of_cmds
* sizeof(struct dsi_cmd_desc),
GFP_KERNEL);
if (!commands->cmd_desc)
goto error1;
bp = commands->cmds_buff;
len = commands->cmds_len;
for (i = 0; i < commands->num_of_cmds; i++) {
dchdr = (struct dsi_ctrl_hdr *)bp;
len -= sizeof(*dchdr);
bp += sizeof(*dchdr);
commands->cmd_desc[i].dchdr = *dchdr;
commands->cmd_desc[i].payload = bp;
bp += dchdr->dlen;
len -= dchdr->dlen;
if (is_read)
{
commands->read_size[i] = *bp++;
commands->read_startoffset[i] = *bp++;
len -= 2;
}
}
return 0;
error1:
kfree(commands->read_size);
error2:
kfree(commands->cmds_buff);
return -EINVAL;
}
static void mdss_panel_parse_te_params(struct device_node *np,
struct mdss_panel_info *panel_info)
{
u32 tmp;
int rc = 0;
pr_info("%s ++ \n", __func__);
/*
* TE default: dsi byte clock calculated base on 70 fps;
* around 14 ms to complete a kickoff cycle if te disabled;
* vclk_line base on 60 fps; write is faster than read;
* init == start == rdptr;
*/
panel_info->te.tear_check_en =
!of_property_read_bool(np, "qcom,mdss-tear-check-disable");
rc = of_property_read_u32
(np, "qcom,mdss-tear-check-sync-cfg-height", &tmp);
panel_info->te.sync_cfg_height = (!rc ? tmp : 0xfff0);
rc = of_property_read_u32
(np, "qcom,mdss-tear-check-sync-init-val", &tmp);
panel_info->te.vsync_init_val = (!rc ? tmp : panel_info->yres);
rc = of_property_read_u32
(np, "qcom,mdss-tear-check-sync-threshold-start", &tmp);
panel_info->te.sync_threshold_start = (!rc ? tmp : 4);
rc = of_property_read_u32
(np, "qcom,mdss-tear-check-sync-threshold-continue", &tmp);
panel_info->te.sync_threshold_continue = (!rc ? tmp : 4);
rc = of_property_read_u32(np, "qcom,mdss-tear-check-start-pos", &tmp);
panel_info->te.start_pos = (!rc ? tmp : panel_info->yres);
rc = of_property_read_u32
(np, "qcom,mdss-tear-check-rd-ptr-trigger-intr", &tmp);
panel_info->te.rd_ptr_irq = (!rc ? tmp : panel_info->yres + 1);
rc = of_property_read_u32(np, "qcom,mdss-tear-check-frame-rate", &tmp);
panel_info->te.refx100 = (!rc ? tmp : 6000);
}
static void mdss_dsi_parse_roi_alignment(struct device_node *np,
struct mdss_panel_info *pinfo)
{
int len = 0;
u32 value[6];
struct property *data;
data = of_find_property(np, "qcom,panel-roi-alignment", &len);
len /= sizeof(u32);
if (!data || (len != 6)) {
pr_debug("%s: Panel roi alignment not found", __func__);
} else {
int rc = of_property_read_u32_array(np,
"qcom,panel-roi-alignment", value, len);
if (rc)
pr_debug("%s: Error reading panel roi alignment values",
__func__);
else {
pinfo->xstart_pix_align = value[0];
pinfo->width_pix_align = value[1];
pinfo->ystart_pix_align = value[2];
pinfo->height_pix_align = value[3];
pinfo->min_width = value[4];
pinfo->min_height = value[5];
}
pr_debug("%s: ROI alignment: [%d, %d, %d, %d, %d, %d]",
__func__, pinfo->xstart_pix_align,
pinfo->width_pix_align, pinfo->ystart_pix_align,
pinfo->height_pix_align, pinfo->min_width,
pinfo->min_height);
}
}
static int mdss_dsi_parse_dcs_cmds(struct device_node *np,
struct dsi_panel_cmds *pcmds, char *cmd_key, char *link_key)
{
const char *data;
int blen = 0, len;
char *buf, *bp;
struct dsi_ctrl_hdr *dchdr;
int i, cnt;
data = of_get_property(np, cmd_key, &blen);
if (!data) {
pr_err("%s: failed, key=%s\n", __func__, cmd_key);
return -ENOMEM;
}
buf = kzalloc(sizeof(char) * blen, GFP_KERNEL);
if (!buf)
return -ENOMEM;
memcpy(buf, data, blen);
/* scan dcs commands */
bp = buf;
len = blen;
cnt = 0;
while (len >= sizeof(*dchdr)) {
dchdr = (struct dsi_ctrl_hdr *)bp;
dchdr->dlen = ntohs(dchdr->dlen);
if (dchdr->dlen > len) {
pr_err("%s: dtsi cmd=%x error, len=%d",
__func__, dchdr->dtype, dchdr->dlen);
goto exit_free;
}
bp += sizeof(*dchdr);
len -= sizeof(*dchdr);
bp += dchdr->dlen;
len -= dchdr->dlen;
cnt++;
}
if (len != 0) {
pr_err("%s: dcs_cmd=%x len=%d error!",
__func__, buf[0], blen);
goto exit_free;
}
pcmds->cmds = kzalloc(cnt * sizeof(struct dsi_cmd_desc),
GFP_KERNEL);
if (!pcmds->cmds)
goto exit_free;
pcmds->cmd_cnt = cnt;
pcmds->buf = buf;
pcmds->blen = blen;
bp = buf;
len = blen;
for (i = 0; i < cnt; i++) {
dchdr = (struct dsi_ctrl_hdr *)bp;
len -= sizeof(*dchdr);
bp += sizeof(*dchdr);
pcmds->cmds[i].dchdr = *dchdr;
pcmds->cmds[i].payload = bp;
bp += dchdr->dlen;
len -= dchdr->dlen;
}
data = of_get_property(np, link_key, NULL);
if (data && !strcmp(data, "dsi_hs_mode"))
pcmds->link_state = DSI_HS_MODE;
else
pcmds->link_state = DSI_LP_MODE;
pr_info("%s: dcs_cmd=%x len=%d, cmd_cnt=%d link_state=%d\n", __func__,
pcmds->buf[0], pcmds->blen, pcmds->cmd_cnt, pcmds->link_state);
return 0;
exit_free:
kfree(buf);
return -ENOMEM;
}
static int mdss_panel_parse_dt(struct device_node *np,
struct mdss_dsi_ctrl_pdata *ctrl_pdata)
{
u32 res[6], tmp;
u32 fbc_res[7];
int rc, i, len;
const char *data;
static const char *bl_ctrl_type, *pdest;
struct mdss_panel_info *pinfo = &(ctrl_pdata->panel_data.panel_info);
bool fbc_enabled = false;
rc = of_property_read_u32_array(np, "qcom,mdss-pan-res", res, 2);
if (rc) {
pr_err("%s:%d, panel resolution not specified\n",
__func__, __LINE__);
return -EINVAL;
}
pinfo->xres = (!rc ? res[0] : 640);
pinfo->yres = (!rc ? res[1] : 480);
rc = of_property_read_u32_array(np, "qcom,mdss-pan-size", res, 2);
if (rc == 0) {
pinfo->physical_width = (!rc ? res[0] : -1);
pinfo->physical_height = (!rc ? res[1] : -1);
}
pr_debug("Panel Physical Width=%d, Height=%d\n",
pinfo->physical_width,
pinfo->physical_height);
rc = of_property_read_u32_array(np, "qcom,mdss-pan-active-res", res, 2);
if (rc == 0) {
pinfo->lcdc.xres_pad =
pinfo->xres - res[0];
pinfo->lcdc.yres_pad =
pinfo->yres - res[1];
}
rc = of_property_read_u32(np, "qcom,mdss-pan-bpp", &tmp);
if (rc) {
pr_err("%s:%d, panel bpp not specified\n",
__func__, __LINE__);
return -EINVAL;
}
pinfo->bpp = (!rc ? tmp : 24);
pdest = of_get_property(np, "qcom,mdss-pan-dest", NULL);
if (strlen(pdest) != 9) {
pr_err("%s: Unknown pdest specified\n", __func__);
return -EINVAL;
}
if (!strncmp(pdest, "display_1", 9))
pinfo->pdest = DISPLAY_1;
else if (!strncmp(pdest, "display_2", 9))
pinfo->pdest = DISPLAY_2;
else {
pr_debug("%s: pdest not specified. Set Default\n",
__func__);
pinfo->pdest = DISPLAY_1;
}
#if defined(CONFIG_ESD_FG_RECOVERY)
if (pinfo->pdest == DISPLAY_1) {
err_fg_gpio = of_get_named_gpio(np, "qcom,esd-irq-gpio", 0);
pr_err("%s:%d, err_fg_gpio (%d)",__func__, __LINE__,err_fg_gpio );
if (!gpio_is_valid(err_fg_gpio)) {
pr_err("%s:%d, esd gpio not specified\n",
__func__, __LINE__);
} else {
if (system_rev < 12)
esd_enable = 0;
else
esd_enable = 1;
rc = gpio_request(err_fg_gpio, "esd_enable");
if (rc) {
pr_err("request esd gpio failed, rc=%d\n",
rc);
gpio_free(err_fg_gpio);
esd_enable = 0;
/* return -ENODEV;*/
}
}
pr_info("%s:%d, esd_enable(%d) system_rev(%d)\n",__func__, __LINE__,esd_enable, system_rev );
}
#endif
rc = of_property_read_u32_array(np, "qcom,mdss-pan-porch-values", res, 6);
pinfo->lcdc.h_back_porch = (!rc ? res[0] : 6);
pinfo->lcdc.h_pulse_width = (!rc ? res[1] : 2);
pinfo->lcdc.h_front_porch = (!rc ? res[2] : 6);
pinfo->lcdc.v_back_porch = (!rc ? res[3] : 6);
pinfo->lcdc.v_pulse_width = (!rc ? res[4] : 2);
pinfo->lcdc.v_front_porch = (!rc ? res[5] : 6);
rc = of_property_read_u32(np, "qcom,mdss-pan-underflow-clr", &tmp);
pinfo->lcdc.underflow_clr = (!rc ? tmp : 0xff);
bl_ctrl_type = of_get_property(np, "qcom,mdss-pan-bl-ctrl", NULL);
if ((bl_ctrl_type) && (!strncmp(bl_ctrl_type, "bl_ctrl_wled", 12))) {
led_trigger_register_simple("bkl-trigger", &bl_led_trigger);
pr_debug("%s: SUCCESS-> WLED TRIGGER register\n", __func__);
pinfo->bklt_ctrl = BL_WLED;
} else if (!strncmp(bl_ctrl_type, "bl_ctrl_pwm", 11)) {
pinfo->bklt_ctrl = BL_PWM;
rc = of_property_read_u32(np, "qcom,dsi-pwm-period", &tmp);
if (rc) {
pr_err("%s:%d, Error, dsi pwm_period\n",
__func__, __LINE__);
return -EINVAL;
}
pinfo->pwm_period = tmp;
rc = of_property_read_u32(np, "qcom,dsi-lpg-channel", &tmp);
if (rc) {
pr_err("%s:%d, Error, dsi lpg channel\n",
__func__, __LINE__);
return -EINVAL;
}
pinfo->pwm_lpg_chan = tmp;
tmp = of_get_named_gpio(np, "qcom,dsi-pwm-gpio", 0);
pinfo->pwm_pmic_gpio = tmp;
} else if (!strncmp(bl_ctrl_type, "bl_ctrl_dcs_cmds", 12)) {
pr_debug("%s: SUCCESS-> DCS CMD BACKLIGHT register\n",
__func__);
pinfo->bklt_ctrl = BL_DCS_CMD;
} else {
pr_debug("%s: Unknown backlight control\n", __func__);
pinfo->bklt_ctrl = UNKNOWN_CTRL;
}
rc = of_property_read_u32(np, "qcom,mdss-brightness-max-level", &tmp);
pinfo->brightness_max = (!rc ? tmp : MDSS_MAX_BL_BRIGHTNESS);
rc = of_property_read_u32_array(np,
"qcom,mdss-pan-bl-levels", res, 2);
pinfo->bl_min = (!rc ? res[0] : 0);
pinfo->bl_max = (!rc ? res[1] : 255);
rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-mode", &tmp);
pinfo->mipi.mode = (!rc ? tmp : DSI_VIDEO_MODE);
#if defined(CONFIG_LCD_FORCE_VIDEO_MODE)
pinfo->mipi.mode = DSI_VIDEO_MODE;
#endif
rc = of_property_read_u32(np, "qcom,mdss-vsync-enable", &tmp);
pinfo->mipi.vsync_enable = (!rc ? tmp : 0);
rc = of_property_read_u32(np, "qcom,mdss-hw-vsync-mode", &tmp);
pinfo->mipi.hw_vsync_mode = (!rc ? tmp : 0);
pr_info("pinfo->mipi.hw_vsync_mode = %d, pinfo->mipi.vsync_enable = %d ", pinfo->mipi.hw_vsync_mode, pinfo->mipi.vsync_enable);
rc = of_property_read_u32(np,
"qcom,mdss-pan-dsi-h-pulse-mode", &tmp);
pinfo->mipi.pulse_mode_hsa_he = (!rc ? tmp : false);
rc = of_property_read_u32_array(np,
"qcom,mdss-pan-dsi-h-power-stop", res, 3);
pinfo->mipi.hbp_power_stop = (!rc ? res[0] : false);
pinfo->mipi.hsa_power_stop = (!rc ? res[1] : false);
pinfo->mipi.hfp_power_stop = (!rc ? res[2] : false);
rc = of_property_read_u32_array(np,
"qcom,mdss-pan-dsi-bllp-power-stop", res, 2);
pinfo->mipi.bllp_power_stop =
(!rc ? res[0] : false);
pinfo->mipi.eof_bllp_power_stop =
(!rc ? res[1] : false);
rc = of_property_read_u32(np,
"qcom,mdss-pan-dsi-traffic-mode", &tmp);
pinfo->mipi.traffic_mode =
(!rc ? tmp : DSI_NON_BURST_SYNCH_PULSE);
rc = of_property_read_u32(np,
"qcom,mdss-pan-insert-dcs-cmd", &tmp);
pinfo->mipi.insert_dcs_cmd =
(!rc ? tmp : 1);
rc = of_property_read_u32(np,
"qcom,mdss-pan-wr-mem-continue", &tmp);
pinfo->mipi.wr_mem_continue =
(!rc ? tmp : 0x3c);
rc = of_property_read_u32(np,
"qcom,mdss-pan-wr-mem-start", &tmp);
pinfo->mipi.wr_mem_start =
(!rc ? tmp : 0x2c);
rc = of_property_read_u32(np,
"qcom,mdss-pan-te-sel", &tmp);
pinfo->mipi.te_sel =
(!rc ? tmp : 1);
rc = of_property_read_u32(np,
"qcom,mdss-pan-dsi-dst-format", &tmp);
pinfo->mipi.dst_format =
(!rc ? tmp : DSI_VIDEO_DST_FORMAT_RGB888);
#if defined(CONFIG_LCD_FORCE_VIDEO_MODE)
pinfo->mipi.dst_format = DSI_VIDEO_DST_FORMAT_RGB888;
#endif
rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-vc", &tmp);
pinfo->mipi.vc = (!rc ? tmp : 0);
rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-rgb-swap", &tmp);
pinfo->mipi.rgb_swap = (!rc ? tmp : DSI_RGB_SWAP_RGB);
rc = of_property_read_u32(np, "qcom,mdss-force-clk-lane-hs", &tmp);
pinfo->mipi.force_clk_lane_hs = (!rc ? tmp : 0);
rc = of_property_read_u32(np, "samsung,mdss-early-lcd-on", &tmp);
pinfo->early_lcd_on = (!rc ? tmp : 0);
rc = of_property_read_u32_array(np,
"qcom,mdss-pan-dsi-data-lanes", res, 4);
pinfo->mipi.data_lane0 = (!rc ? res[0] : true);
pinfo->mipi.data_lane1 = (!rc ? res[1] : false);
pinfo->mipi.data_lane2 = (!rc ? res[2] : false);
pinfo->mipi.data_lane3 = (!rc ? res[3] : false);
rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-dlane-swap", &tmp);
pinfo->mipi.dlane_swap = (!rc ? tmp : 0);
rc = of_property_read_u32_array(np, "qcom,mdss-pan-dsi-t-clk", res, 2);
pinfo->mipi.t_clk_pre = (!rc ? res[0] : 0x24);
pinfo->mipi.t_clk_post = (!rc ? res[1] : 0x03);
rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-stream", &tmp);
pinfo->mipi.stream = (!rc ? tmp : 0);
rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-tx-eot-append", &tmp);
pinfo->mipi.tx_eot_append = (!rc ? tmp : 0);
rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-mdp-tr", &tmp);
pinfo->mipi.mdp_trigger =
(!rc ? tmp : DSI_CMD_TRIGGER_SW);
if (pinfo->mipi.mdp_trigger > 6) {
pr_err("%s:%d, Invalid mdp trigger. Forcing to sw trigger",
__func__, __LINE__);
pinfo->mipi.mdp_trigger =
DSI_CMD_TRIGGER_SW;
}
rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-dma-tr", &tmp);
pinfo->mipi.dma_trigger =
(!rc ? tmp : DSI_CMD_TRIGGER_SW);
if (pinfo->mipi.dma_trigger > 6) {
pr_err("%s:%d, Invalid dma trigger. Forcing to sw trigger",
__func__, __LINE__);
pinfo->mipi.dma_trigger =
DSI_CMD_TRIGGER_SW;
}
rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-frame-rate", &tmp);
pinfo->mipi.frame_rate = (!rc ? tmp : 60);
rc = of_property_read_u32(np, "qcom,mdss-pan-clk-rate", &tmp);
pinfo->clk_rate = (!rc ? tmp : 0);
data = of_get_property(np, "qcom,panel-phy-regulatorSettings", &len);
if ((!data) || (len != 7)) {
pr_err("%s:%d, Unable to read Phy regulator settings",
__func__, __LINE__);
return -EINVAL;
}
for (i = 0; i < len; i++)
phy_params.regulator[i] = data[i];
data = of_get_property(np, "qcom,panel-phy-timingSettings", &len);
if ((!data) || (len != 12)) {
pr_err("%s:%d, Unable to read Phy timing settings",
__func__, __LINE__);
return -EINVAL;
}
for (i = 0; i < len; i++)
phy_params.timing[i] = data[i];
data = of_get_property(np, "qcom,panel-phy-strengthCtrl", &len);
if ((!data) || (len != 2)) {
pr_err("%s:%d, Unable to read Phy Strength ctrl settings",
__func__, __LINE__);
return -EINVAL;
}
phy_params.strength[0] = data[0];
phy_params.strength[1] = data[1];
data = of_get_property(np, "qcom,panel-phy-bistCtrl", &len);
if ((!data) || (len != 6)) {
pr_err("%s:%d, Unable to read Phy Bist Ctrl settings",
__func__, __LINE__);
return -EINVAL;
}
for (i = 0; i < len; i++)
phy_params.bistctrl[i] = data[i];
data = of_get_property(np, "qcom,panel-phy-laneConfig", &len);
if ((!data) || (len != 45)) {
pr_err("%s:%d, Unable to read Phy lane configure settings",
__func__, __LINE__);
return -EINVAL;
}
for (i = 0; i < len; i++)
phy_params.lanecfg[i] = data[i];
pinfo->mipi.dsi_phy_db = phy_params;
fbc_enabled = of_property_read_bool(np,
"qcom,fbc-enabled");
if (fbc_enabled) {
pr_debug("%s:%d FBC panel enabled.\n", __func__, __LINE__);
pinfo->fbc.enabled = 1;
rc = of_property_read_u32_array(np,
"qcom,fbc-mode", fbc_res, 7);
pinfo->fbc.target_bpp =
(!rc ? fbc_res[0] : pinfo->bpp);
pinfo->fbc.comp_mode = (!rc ? fbc_res[1] : 0);
pinfo->fbc.qerr_enable =
(!rc ? fbc_res[2] : 0);
pinfo->fbc.cd_bias = (!rc ? fbc_res[3] : 0);
pinfo->fbc.pat_enable = (!rc ? fbc_res[4] : 0);
pinfo->fbc.vlc_enable = (!rc ? fbc_res[5] : 0);
pinfo->fbc.bflc_enable =
(!rc ? fbc_res[6] : 0);
rc = of_property_read_u32_array(np,
"qcom,fbc-budget-ctl", fbc_res, 3);
pinfo->fbc.line_x_budget =
(!rc ? fbc_res[0] : 0);
pinfo->fbc.block_x_budget =
(!rc ? fbc_res[1] : 0);
pinfo->fbc.block_budget =
(!rc ? fbc_res[2] : 0);
rc = of_property_read_u32_array(np,
"qcom,fbc-lossy-mode", fbc_res, 4);
pinfo->fbc.lossless_mode_thd =
(!rc ? fbc_res[0] : 0);
pinfo->fbc.lossy_mode_thd =
(!rc ? fbc_res[1] : 0);
pinfo->fbc.lossy_rgb_thd =
(!rc ? fbc_res[2] : 0);
pinfo->fbc.lossy_mode_idx =
(!rc ? fbc_res[3] : 0);
} else {
pr_debug("%s:%d Panel does not support FBC.\n",
__func__, __LINE__);
pinfo->fbc.enabled = 0;
pinfo->fbc.target_bpp =
pinfo->bpp;
}
mdss_dsi_parse_roi_alignment(np, pinfo);
mdss_panel_parse_te_params(np, pinfo);
mdss_dsi_parse_dcs_cmds(np, &ctrl_pdata->on_cmds,
"qcom,mdss-dsi-on-command", "qcom,mdss-dsi-on-command-state");
mdss_dsi_parse_dcs_cmds(np, &ctrl_pdata->off_cmds,
"qcom,mdss-dsi-off-command", "qcom,mdss-dsi-off-command-state");
mdss_samsung_parse_panel_cmd(np, &hsync_on_seq,
"qcom,panel-hsync-on-seq");
mdss_samsung_parse_panel_cmd(np, &display_on_cmd,
"qcom,panel-display-on-cmds");
mdss_samsung_parse_panel_cmd(np, &display_off_cmd,
"qcom,panel-display-off-cmds");
mdss_samsung_parse_panel_cmd(np, &test_key_enable_cmds,
"samsung,panel-test-key-enable-cmds");
mdss_samsung_parse_panel_cmd(np, &test_key_disable_cmds,
"samsung,panel-test-key-disable-cmds");
#if defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_CMD_FHD_FA2_PT_PANEL)
mdss_samsung_parse_panel_cmd(np, &panel_set_te_osc_b,
"samsung,panel-set-te-osc-b");
mdss_samsung_parse_panel_cmd(np, &panel_set_te_restore,
"samsung,panel-set-te-restore");
mdss_samsung_parse_panel_cmd(np, &panel_set_te_1,
"samsung,panel-set-te1");
mdss_samsung_parse_panel_cmd(np, &panel_set_te_2,
"samsung,panel-set-te2");
mdss_samsung_parse_panel_cmd(np, &panel_set_te,
"samsung,panel-set-te");
mdss_samsung_parse_panel_cmd(np, &panel_osc_type_read_cmds,
"samsung,panel-osc-type-read");
#endif
#if defined(CONFIG_WACOM_LCD_FREQ_COMPENSATE)
mdss_samsung_parse_panel_cmd(np, &write_ldi_fps_cmds,
"samsung,panel-ldi-fps-write-cmds");
#endif
mdss_samsung_parse_panel_cmd(np, &nv_mtp_read_cmds,
"samsung,panel-nv-mtp-read-cmds");
mdss_samsung_parse_panel_cmd(np, &nv_enable_cmds,
"samsung,panel-nv-read-enable-cmds");
mdss_samsung_parse_panel_cmd(np, &nv_disable_cmds,
"samsung,panel-nv-read-disable-cmds");
mdss_samsung_parse_panel_cmd(np, &manufacture_id_cmds,
"samsung,panel-manufacture-id-read-cmds");
mdss_samsung_parse_panel_cmd(np, &manufacture_date_cmds,
"samsung,panel-manufacture-date-read-cmds");
mdss_samsung_parse_panel_cmd(np, &ddi_id_cmds,
"samsung,panel-ddi-id-read-cmds");
mdss_samsung_parse_panel_cmd(np, &rddpm_cmds,
"samsung,panel-rddpm-read-cmds");
mdss_samsung_parse_panel_cmd(np, &rddsm_cmds,
"samsung,panel-rddsm-read-cmds");
mdss_samsung_parse_panel_cmd(np, &mtp_read_sysfs_cmds,
"samsung,panel-mtp-read-sysfs-cmds");
mdss_samsung_parse_panel_cmd(np, &acl_off_cmd,
"samsung,panel-acl-off-cmds");
mdss_samsung_parse_panel_cmd(np, &acl_cmds_list,
"samsung,panel-acl-cmds-list");
mdss_samsung_parse_panel_cmd(np, &opr_avg_cal_cmd,
"samsung,panel-acl-OPR-avg-cal");
mdss_samsung_parse_panel_cmd(np, &aclcont_cmds_list,
"samsung,panel-aclcont-cmds-list");
mdss_samsung_parse_panel_cmd(np, &gamma_cmds_list,
"samsung,panel-gamma-cmds-list");
mdss_samsung_parse_panel_cmd(np, &elvss_cmds_list,
"samsung,panel-elvss-cmds-list");
mdss_samsung_parse_panel_cmd(np, &elvss_cmds_revI_list,
"samsung,panel-elvss-cmds-revI-list");
if((msd.panel == PANEL_WQHD_OCTA_S6E3HA0_CMD)&& (msd.id3 == EVT0_K_wqhd_REVE)){
mdss_samsung_parse_panel_cmd(np, &aid_cmds_list,
"samsung,panel-aid-cmds-revE-list");
mdss_samsung_parse_panel_table(np, &aid_map_table,
"samsung,panel-aid-map-revE-table");
}else if((msd.panel == PANEL_WQHD_OCTA_S6E3HA0_CMD)&& (msd.id3 == EVT0_K_wqhd_REVF)){
mdss_samsung_parse_panel_cmd(np, &aid_cmds_list,
"samsung,panel-aid-cmds-revF-list");
mdss_samsung_parse_panel_table(np, &aid_map_table,
"samsung,panel-aid-map-revF-table");
}else if((msd.panel == PANEL_WQHD_OCTA_S6E3HA0_CMD)
&& ((msd.id3 == EVT0_K_wqhd_REVG)||(msd.id3 == EVT0_K_wqhd_REVH)||(msd.id3 == EVT0_K_wqhd_REVI))){
mdss_samsung_parse_panel_cmd(np, &aid_cmds_list,
"samsung,panel-aid-cmds-revG-list");
mdss_samsung_parse_panel_table(np, &aid_map_table,
"samsung,panel-aid-map-revG-table");
}else if((msd.panel == PANEL_WQHD_OCTA_S6E3HA0_CMD)&& (msd.id3 == EVT0_K_wqhd_REVJ)){
mdss_samsung_parse_panel_cmd(np, &aid_cmds_list,
"samsung,panel-aid-cmds-revJ-list");
mdss_samsung_parse_panel_table(np, &aid_map_table,
"samsung,panel-aid-map-revJ-table");
}else if((msd.panel == PANEL_WQHD_OCTA_S6E3HA0_CMD)&& (msd.id3 == EVT0_K_wqhd_REVK)){
mdss_samsung_parse_panel_cmd(np, &aid_cmds_list,
"samsung,panel-aid-cmds-revK-list");
mdss_samsung_parse_panel_table(np, &aid_map_table,
"samsung,panel-aid-map-revK-table");
}else if((msd.panel == PANEL_WQHD_OCTA_S6E3HA0_CMD)&& (msd.id3 == EVT0_K_wqhd_REVL)){
mdss_samsung_parse_panel_cmd(np, &aid_cmds_list, "samsung,panel-aid-cmds-revL-list");
mdss_samsung_parse_panel_table(np, &aid_map_table, "samsung,panel-aid-map-revL-table");
}else if(msd.panel == PANEL_WQHD_OCTA_S6E3HA2X01_CMD){
if (msd.id3 == EVT0_T_wqhd_REVF ||msd.id3 == EVT0_T_wqhd_REVG)
mdss_samsung_parse_panel_cmd(np, &aid_cmds_list,
"samsung,panel-aid-cmds-revF-list");
else if(msd.id3 == EVT0_T_wqhd_REVH || msd.id3 == EVT0_T_wqhd_REVI || msd.id3 == EVT0_T_wqhd_REVJ)
mdss_samsung_parse_panel_cmd(np, &aid_cmds_list,
"samsung,panel-aid-cmds-revH-list");
else
mdss_samsung_parse_panel_cmd(np, &aid_cmds_list,
"samsung,panel-aid-cmds-revA-list");
mdss_samsung_parse_panel_table(np, &aid_map_table,
"samsung,panel-aid-map-revA-table");
}else if(msd.panel == PANEL_WQXGA_OCTA_S6E3HA2X01_CMD){
if(msd.id2 == YM4_PANEL){
mdss_samsung_parse_panel_cmd(np, &aid_cmds_list,
"samsung,panel-aid-cmds-ym4-list");
mdss_samsung_parse_panel_table(np, &aid_map_table,
"samsung,panel-aid-map-revA-table");
}else if(msd.id3 == EVT0_T_wqxga_REVD || msd.id3 == EVT0_T_wqxga_REVB){
mdss_samsung_parse_panel_cmd(np, &aid_cmds_list,
"samsung,panel-aid-cmds-revD-list");
mdss_samsung_parse_panel_table(np, &aid_map_table,
"samsung,panel-aid-map-revA-table");
}else if(msd.id3 == EVT0_T_wqxga_REVE){
mdss_samsung_parse_panel_cmd(np, &aid_cmds_list,
"samsung,panel-aid-cmds-revE-list");
mdss_samsung_parse_panel_table(np, &aid_map_table,
"samsung,panel-aid-map-revA-table");
}else{
mdss_samsung_parse_panel_cmd(np, &aid_cmds_list,
"samsung,panel-aid-cmds-revA-list");
mdss_samsung_parse_panel_table(np, &aid_map_table,
"samsung,panel-aid-map-revA-table");
}
}else{
mdss_samsung_parse_panel_cmd(np, &aid_cmds_list,
"samsung,panel-aid-cmds-list");
mdss_samsung_parse_panel_table(np, &aid_map_table,
"samsung,panel-aid-map-table");
}
mdss_samsung_parse_panel_table(np, &acl_map_table,
"samsung,panel-acl-map-table");
/* Process the lux value table */
mdss_samsung_parse_candella_lux_mapping_table(np, &candela_map_table,
"samsung,panel-candella-mapping-table");
#ifdef LDI_ADJ_VDDM_OFFSET
mdss_samsung_parse_panel_cmd(np, &read_vdd_ref_cmds,
"samsung,panel-ldi-vdd-read-cmds");
mdss_samsung_parse_panel_cmd(np, &write_vdd_offset_cmds,
"samsung,panel-ldi-vdd-offset-write-cmds");
mdss_samsung_parse_panel_cmd(np, &read_vddm_ref_cmds,
"samsung,panel-ldi-vddm-read-cmds");
mdss_samsung_parse_panel_cmd(np, &write_vddm_offset_cmds,
"samsung,panel-ldi-vddm-offset-write-cmds");
#endif
#if defined(HBM_RE)
if (msd.panel == PANEL_WQHD_OCTA_S6E3HA0_CMD ||\
msd.panel == PANEL_FHD_OCTA_S6E3FA2_CMD){
mdss_samsung_parse_panel_cmd(np, &nv_mtp_hbm_read_cmds,
"samsung,panel-nv-mtp-read-hbm-cmds");
mdss_samsung_parse_panel_cmd(np, &nv_mtp_hbm2_read_cmds,
"samsung,panel-nv-mtp-read-hbm2-cmds");
mdss_samsung_parse_panel_cmd(np, &hbm_gamma_cmds_list,
"samsung,panel-gamma-hbm-cmds-list");
if(msd.panel == PANEL_FHD_OCTA_S6E3FA2_CMD) {
mdss_samsung_parse_panel_cmd(np, &hbm_etc_cmds_list,
"samsung,panel-etc-hbm-cmds");
} else {
if (msd.id3 == EVT0_K_wqhd_REVB)
mdss_samsung_parse_panel_cmd(np, &hbm_etc_cmds_list,
"samsung,panel-etc-hbm-revB-cmds");
else if (msd.id3 == EVT0_K_wqhd_REVC || msd.id3 == EVT0_K_wqhd_REVD)
mdss_samsung_parse_panel_cmd(np, &hbm_etc_cmds_list,
"samsung,panel-etc-hbm-revC-cmds");
else if (msd.id3 == EVT0_K_wqhd_REVE )
mdss_samsung_parse_panel_cmd(np, &hbm_etc_cmds_list,
"samsung,panel-etc-hbm-revE-cmds");
else if (msd.id3 >= EVT0_K_wqhd_REVF )
mdss_samsung_parse_panel_cmd(np, &hbm_etc_cmds_list,
"samsung,panel-etc-hbm-revF-cmds");
}
}else{
mdss_samsung_parse_panel_cmd(np, &hbm_etc_cmds_list,
"samsung,panel-hbm-cmds-list");
mdss_samsung_parse_panel_cmd(np, &hbm_off_cmd,
"samsung,panel-hbm-off-cmds");
}
#endif
mdss_samsung_parse_panel_cmd(np, &nv_mtp_elvss_read_cmds,
"samsung,panel-nv-mtp-read-elvss-cmds");
#if defined(CONFIG_MDNIE_LITE_TUNING)
mdss_samsung_parse_panel_cmd(np, &nv_mdnie_read_cmds,
"samsung,panel-nv-mdnie-read-cmds");
#endif
#ifdef DEBUG_LDI_STATUS
mdss_samsung_parse_panel_cmd(np, &ldi_debug_cmds,
"samsung,panel-ldi-debug-read-cmds");
#endif
#if defined(TEMPERATURE_ELVSS)
mdss_samsung_parse_panel_cmd(np, &elvss_lowtemp_cmds_list,
"samsung,panel-elvss-lowtemp-cmds-list");
mdss_samsung_parse_panel_cmd(np, &elvss_lowtemp2_cmds_list,
"samsung,panel-elvss-lowtemp2-cmds-list");
#endif
#if defined(SMART_ACL)
mdss_samsung_parse_panel_cmd(np, &smart_acl_elvss_cmds_list,
"samsung,panel-smart-acl-elvss-cmds-list");
mdss_samsung_parse_panel_table(np, &smart_acl_elvss_map_table,
"samsung,panel-smart-acl-elvss-map-table");
#endif
#if defined(SMART_VINT)
mdss_samsung_parse_panel_cmd(np, &smart_vint_cmds_list,
"samsung,panel-smart-vint-cmds-list");
mdss_samsung_parse_panel_table(np, &smart_vint_map_table,
"samsung,panel-smart-vint-map-table");
#endif
#if defined(PARTIAL_UPDATE)
mdss_samsung_parse_panel_cmd(np, &partialdisp_on_cmd,
"samsung,panel-ldi-partial-disp-on");
mdss_samsung_parse_panel_cmd(np, &partialdisp_off_cmd,
"samsung,panel-ldi-partial-disp-off");
#endif
#ifdef CONFIG_FB_MSM_SAMSUNG_AMOLED_LOW_POWER_MODE
mdss_samsung_parse_panel_cmd(np, &alpm_on_seq,
"samsung,panel-alpm-on-seq");
mdss_samsung_parse_panel_cmd(np, &alpm_off_seq,
"samsung,panel-alpm-off-seq");
#endif
#if defined(CONFIG_LCD_HMT)
mdss_samsung_parse_panel_cmd(np, &hmt_single_scan_enable,
"samsung,panel-hmt-single-scan-enable");
mdss_samsung_parse_panel_cmd(np, &hmt_disable,
"samsung,panel-hmt-disable");
mdss_samsung_parse_panel_cmd(np, &hmt_reverse_enable,
"samsung,panel-hmt-reverse-enable");
mdss_samsung_parse_panel_cmd(np, &hmt_reverse_disable,
"samsung,panel-hmt-reverse-disable");
mdss_samsung_parse_panel_cmd(np, &hmt_bright_cmds_list,
"samsung,hmt-panel-bright-cmds-list");
mdss_samsung_parse_panel_cmd(np, &hmt_aid_cmds_list,
"samsung,panel-aid-cmds-list-hmt");
mdss_samsung_parse_panel_table(np, &aid_map_table_reverse_hmt,
"samsung,panel-aid-map-table-reverse-hmt");
mdss_samsung_parse_candella_lux_mapping_table(np, &candela_map_table_reverse_hmt,
"samsung,panel-candella-mapping-table-reverse-hmt");
mdss_samsung_parse_panel_cmd(np, &hmt_150cd_read_cmds,
"samsung,panel-hmt_150cd-read-cmds");
#endif
if(lcd_attached){
pinfo->ulps_feature_enabled = of_property_read_bool(np,
"qcom,ulps-enabled");
pr_info(" ulps feature %s", (pinfo->ulps_feature_enabled ? "enabled" : "disabled"));
}
return 0;
}
#if defined(CONFIG_HAS_EARLYSUSPEND)
static void mipi_samsung_disp_early_suspend(struct early_suspend *h)
{
msd.mfd->resume_state = MIPI_SUSPEND_STATE;
LCD_DEBUG("------");
}
static void mipi_samsung_disp_late_resume(struct early_suspend *h)
{
msd.mfd->resume_state = MIPI_RESUME_STATE;
LCD_DEBUG("------");
}
#endif
static int is_panel_supported(const char *panel_name)
{
int i = 0;
if (panel_name == NULL)
return -EINVAL;
if(get_lcd_id()){
msd.id3 = (get_lcd_id()&0xFF);
msd.id2 = ((get_lcd_id()&0xFF00)>>8);
}
while(panel_supp_cdp[i].name != NULL) {
if(!strcmp(panel_name,panel_supp_cdp[i].name))
break;
i++;
}
if (i < ARRAY_SIZE(panel_supp_cdp)) {
memcpy(msd.panel_name, panel_name, MAX_PANEL_NAME_SIZE);
msd.panel = panel_supp_cdp[i].panel_code;
return 0;
}
return -EINVAL;
}
#if defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_CMD_FHD_FA2_PT_PANEL)
struct te_fctrl_lookup_table te_fctrl_lookup_table[] = {
{33333, 0x0 },
{22999, 0x32},
{22861, 0x33},
{22726, 0x34},
{22609, 0x35},
{22490, 0x36},
{22363, 0x37},
{22237, 0x38},
{22121, 0x39},
{22005, 0x3A},
{21880, 0x3B},
{21761, 0x3C},
{21649, 0x3D},
{21535, 0x3E},
{21461, 0x3F},
{21385, 0x40},
{21275, 0x41},
{21170, 0x42},
{21056, 0x43},
{20943, 0x44},
{20834, 0x45},
{20723, 0x46},
{20614, 0x47},
{20506, 0x48},
{20406, 0x49},
{20311, 0x4A},
{20206, 0x4B},
{20099, 0x4C},
{20003, 0x4D},
{19903, 0x4E},
{19808, 0x4F},
{19717, 0x50},
{19621, 0x51},
{19526, 0x52},
{19428, 0x53},
{19331, 0x54},
{19244, 0x55},
{19153, 0x56},
{19056, 0x57},
{18963, 0x58},
{18879, 0x59},
{18794, 0x5A},
{18702, 0x5B},
{18610, 0x5C},
{18522, 0x5D},
{18438, 0x5E},
{18359, 0x5F},
{18277, 0x60},
{18196, 0x61},
{18116, 0x62},
{18030, 0x63},
{17943, 0x64},
{17862, 0x65},
{17783, 0x66},
{17704, 0x67},
{17625, 0x68},
{17545, 0x69},
{17464, 0x6A},
{17384, 0x6B},
{17306, 0x6C},
{17231, 0x6D},
{17158, 0x6E},
{17089, 0x6F},
{17016, 0x70},
{16944, 0x71},
{16874, 0x72},
{16797, 0x73},
{16717, 0x74},
{16646, 0x75},
{16578, 0x76},
{16505, 0x77},
{16433, 0x78},
{16365, 0x79},
{16299, 0x7A},
{16228, 0x7B},
{16152, 0x7C},
{16089, 0x7D},
{16027, 0x7E},
{15973, 0x7F},
{15921, 0x80},
{15855, 0x81},
{15787, 0x82},
{15720, 0x83},
{15657, 0x84},
{15594, 0x85},
{15532, 0x86},
{15466, 0x87},
{15401, 0x88},
{15342, 0x89},
{15283, 0x8A},
{14286, 0x8B},
};
struct te_offset_lookup_table te_offset_lookup_table[] = {
{19654, 0 },
{19551, -34},
{19448, -33},
{19354, -32},
{19264, -31},
{19164, -30},
{19063, -29},
{18971, -28},
{18877, -27},
{18786, -26},
{18701, -25},
{18610, -24},
{18519, -23},
{18427, -22},
{18335, -21},
{18252, -20},
{18166, -19},
{18074, -18},
{17986, -17},
{17906, -16},
{17825, -15},
{17738, -14},
{17650, -13},
{17567, -12},
{17487, -11},
{17412, -10},
{17334, -9 },
{17257, -8 },
{17182, -7 },
{17101, -6 },
{17018, -5 },
{16941, -4 },
{16866, -3 },
{16818, -2 },
{16518, 0 },
{16488, 2 },
{16414, 3 },
{16343, 4 },
{16273, 5 },
{16207, 6 },
{16139, 7 },
{16070, 8 },
{16005, 9 },
{15931, 10},
{15855, 11},
{15788, 12},
{15723, 13},
{15654, 14},
{15586, 15},
{15521, 16},
{15459, 17},
{15391, 18},
{15320, 19},
{15259, 20},
{15200, 21},
{15149, 22},
{15100, 23},
{15037, 24},
{14973, 25},
{14909, 26},
{14849, 27},
{14790, 28},
{14731, 29},
{14669, 30},
{14607, 31},
{14551, 32},
{14495, 33},
{14435, 34},
};
extern int te;
extern int te_cnt;
extern struct completion te_check_comp;
static char check_te_step1(void)
{
int ret;
int size = sizeof(te_fctrl_lookup_table)/(sizeof(int)+sizeof(char)) - 1;
int i;
char fctrl;
pr_info("[%s] ++ \n", __func__);
INIT_COMPLETION(te_check_comp);
te_cnt = 0;
ret = wait_for_completion_timeout(&te_check_comp,
msecs_to_jiffies(16 * 4));
if (!ret) {
pr_err("[ERROR] te_check_comp timeout!!\n");
return 0;
}
if (te != 0) {
pr_info("[%s] first TE = %d\n", __func__, te);
for (i = 0; i < size; i++) {
if (te < te_fctrl_lookup_table[i].te && te >= te_fctrl_lookup_table[i+1].te) {
fctrl = te_fctrl_lookup_table[i+1].value;
break;
}
}
if (i == size) {
pr_err("[%s] out of range ... (%d)\n", __func__, te);
return 0;
}
pr_info("[%s] fctrl = %x\n", __func__, fctrl);
panel_set_te_1.cmd_desc[3].payload[1] = fctrl;
mipi_samsung_disp_send_cmd(PANEL_SET_TE_1, true);
} else {
pr_info("[%s] TE is 0..\n", __func__);
return 0;
}
pr_info("[%s] -- \n", __func__);
return fctrl;
}
static int check_te_step2(char fctrl)
{
int ret;
int size = sizeof(te_offset_lookup_table)/(sizeof(int)*2) - 1;
int i;
int offset;
pr_info("[%s] ++ \n", __func__);
INIT_COMPLETION(te_check_comp);
te_cnt = 0;
ret = wait_for_completion_timeout(&te_check_comp,
msecs_to_jiffies(16 * 4));
if (!ret) {
pr_err("[ERROR] te_check_comp timeout!!\n");
return -1;
}
if (te != 0) {
pr_info("[%s] second TE = %d\n", __func__, te);
for (i = 0; i < size; i++) {
if (te < te_offset_lookup_table[i].te && te >= te_offset_lookup_table[i+1].te) {
offset = te_offset_lookup_table[i+1].offset;
break;
}
}
if (i == size) {
pr_err("[%s] out of range ... (%d)\n", __func__, te);
return -1;
}
pr_info("[%s] offset = %d\n", __func__, offset);
panel_set_te_2.cmd_desc[3].payload[1] = fctrl + offset;
panel_set_te.cmd_desc[4].payload[1] = fctrl + offset;
mipi_samsung_disp_send_cmd(PANEL_SET_TE_2, true);
} else {
pr_info("[%s] TE is 0..\n", __func__);
return -1;
}
pr_info("[%s] -- \n", __func__);
return 0;
}
static int check_te_step3(void)
{
int ret;
pr_info("[%s] ++ \n", __func__);
INIT_COMPLETION(te_check_comp);
te_cnt = 0;
ret = wait_for_completion_timeout(&te_check_comp,
msecs_to_jiffies(16 * 4));
if (!ret) {
pr_err("[ERROR] te_check_comp timeout!!\n");
return -1;
}
// +5% = 17.499 , -5% = 15.832
if (te >= (16666 * 105 / 100) || te <= (16666 * 95 / 100)) {
pr_err("[%s] TE is not correct!! (%d) back to OSC type A..\n", __func__, te);
return -1;
} else
pr_info("[%s] finals TE is (%d) - OK\n", __func__, te);
pr_info("[%s] -- \n", __func__);
return 0;
}
#endif
static int samsung_dsi_panel_event_handler(int event)
{
char rddpm_buf[4], rddsm_buf[4];
#if defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_CMD_FHD_FA2_PT_PANEL)
char fctrl;
#endif
pr_debug("%s : %d",__func__,event);
switch (event) {
case MDSS_EVENT_FRAME_UPDATE:
if(msd.dstat.wait_disp_on) {
pr_info("DISPLAY_ON\n");
mipi_samsung_disp_send_cmd(PANEL_DISPLAY_ON, true);
mipi_samsung_read_nv_mem(msd.pdata, &rddpm_cmds, rddpm_buf);
#if defined(CONFIG_LCD_HMT)
if (!is_first) {
if (msd.hmt_stat.hmt_on && msd.hmt_stat.hmt_low_persistence) {
pr_info("hmt on (%d), setting for HMT!\n", msd.hmt_stat.hmt_on);
hmt_enable(1);
hmt_reverse_update(1);
}
hmt_bright_update();
} else
is_first = 0;
#endif
msd.dstat.wait_disp_on = 0;
}
break;
case MDSS_EVENT_READ_LDI_STATUS:
mipi_samsung_read_nv_mem(msd.pdata, &rddpm_cmds, rddpm_buf);
mipi_samsung_read_nv_mem(msd.pdata, &rddsm_cmds, rddsm_buf);
return (int)rddpm_buf[0];
#if defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_CMD_FHD_FA2_PT_PANEL)
case MDSS_EVENT_TE_UPDATE:
if (te_set_done == TE_SET_READY) {
te_set_done = TE_SET_START;
mipi_samsung_disp_send_cmd(PANEL_SET_TE_OSC_B, true);
fctrl = check_te_step1();
if (!fctrl)
return -1;
if (check_te_step2(fctrl) < 0)
return -1;
if (check_te_step3() < 0)
return -1;
te_set_done = TE_SET_DONE;
}
break;
case MDSS_EVENT_TE_RESTORE:
pr_info("RESTORE_TE (OSC TYPE A)\n");
mipi_samsung_disp_send_cmd(PANEL_SET_TE_RESTORE, true);
te_set_done = TE_SET_FAIL;
break;
#endif
#if defined(CONFIG_MDNIE_LITE_TUNING)
case MDSS_EVENT_MDNIE_DEFAULT_UPDATE:
pr_info("%s : send CONFIG_MDNIE_LITE_TUNING... \n",__func__);
is_negative_on();
break;
#endif
default:
pr_debug("%s: unhandled event=%d\n", __func__, event);
break;
}
return 0;
}
static int mdss_dsi_panel_blank(struct mdss_panel_data *pdata, int blank)
{
if(blank) {
pr_debug("%s:%d, blanking panel\n", __func__, __LINE__);
mipi_samsung_disp_send_cmd(PANEL_DISPLAY_BLANK, false);
}
else {
pr_debug("%s:%d, unblanking panel\n", __func__, __LINE__);
mipi_samsung_disp_send_cmd(PANEL_DISPLAY_UNBLANK, false);
}
return 0;
}
#if defined(CONFIG_WACOM_LCD_FREQ_COMPENSATE)
#define LUT_SIZE (sizeof(freq_cal_lut_offset) / sizeof(int)) / 3
static int freq_cal_lut_offset[][3] = {
{56568, 56686, -25},
{56686, 56805, -24},
{56805, 56924, -23},
{56924, 57044, -22},
{57044, 57165, -21},
{57165, 57286, -20},
{57286, 57407, -19},
{57407, 57529, -18},
{57529, 57651, -17},
{57651, 57774, -16},
{57774, 57898, -15},
{57898, 58022, -14},
{58022, 58146, -13},
{58146, 58272, -12},
{58272, 58397, -11},
{58397, 58523, -10},
{58523, 58650, -9},
{58650, 58777, -8},
{58777, 58905, -7},
{58905, 59034, -6},
{59034, 59163, -5},
{59163, 59292, -4},
{59292, 59422, -3},
{59422, 59553, -2},
{59553, 59684, -1},
{59684, 59816, 0},
{59816, 59948, 1},
{59948, 60081, 2},
{60081, 60215, 3},
{60215, 60349, 4},
{60349, 60484, 5},
{60484, 60619, 6},
{60619, 60755, 7},
{60755, 60892, 8},
{60892, 61029, 9},
{61029, 61167, 10},
{61167, 61305, 11},
{61305, 61444, 12},
{61444, 61584, 13},
{61584, 61724, 14},
{61724, 61865, 15},
{61865, 62007, 16},
{62007, 62149, 17},
{62149, 62292, 18},
{62292, 62436, 19},
{62436, 62580, 20},
{62580, 62725, 21},
{62725, 62871, 22},
{62871, 63017, 23},
{63017, 63164, 24},
{63164, 63312, 25},
{63312, 63460, 26},
{63460, 63609, 27},
{63609, 63759, 28},
{63759, 63909, 29},
{63909, 64010, 30},
};
int ldi_fps(unsigned int input_fps)
{
int offset = 0;
int i;
int dest_cal_val;
pr_info("%s :input_fps (%d), lut size (%d)\n", __func__, input_fps, LUT_SIZE);
if ((msd.manufacture_id & 0xFF) <= 0x01) {
pr_err("%s :LDI EVT0 Not Support. Skip!! \n",__func__);
return 0;
}
for (i = 0; i < LUT_SIZE; i++) {
if (input_fps >= freq_cal_lut_offset[i][0] &&
input_fps < freq_cal_lut_offset[i][1]) {
offset = freq_cal_lut_offset[i][2];
break;
}
}
if (i == LUT_SIZE) {
pr_err("%s :can not find offset !!\n", __func__);
return 0;
}
if (msd.mfd->resume_state == MIPI_RESUME_STATE) {
pr_info("%s :current comp value(0x%x),offset(%d)\n", __func__,
write_ldi_fps_cmds.cmd_desc[1].payload[3], offset);
dest_cal_val = write_ldi_fps_cmds.cmd_desc[1].payload[3] + offset;
if((dest_cal_val < 0xAC) || (dest_cal_val > 0xE3)) {
pr_err("Invalid cal value(0x%x)", dest_cal_val);
return 0;
}
else
pr_info("%s :dest write value (0x%x)\n", __func__, dest_cal_val);
write_ldi_fps_cmds.cmd_desc[1].payload[3] = dest_cal_val;
mipi_samsung_disp_send_cmd(PANEL_LDI_FPS_CHANGE, true);
} else {
pr_err("%s : Panel is off state!!\n", __func__);
return 0;
}
return 1;
}
EXPORT_SYMBOL(ldi_fps);
#endif
#if defined(CONFIG_LCD_CLASS_DEVICE)
#if defined(DDI_VIDEO_ENHANCE_TUNING)
static char char_to_dec(char data1, char data2)
{
char dec;
dec = 0;
if (data1 >= 'a') {
data1 -= 'a';
data1 += 10;
} else if (data1 >= 'A') {
data1 -= 'A';
data1 += 10;
} else
data1 -= '0';
dec = data1 << 4;
if (data2 >= 'a') {
data2 -= 'a';
data2 += 10;
} else if (data2 >= 'A') {
data2 -= 'A';
data2 += 10;
} else
data2 -= '0';
dec |= data2;
return dec;
}
static void sending_tune_cmd(char *src, int len)
{
int data_pos;
int cmd_step;
int cmd_pos;
cmd_step = 0;
cmd_pos = 0;
for (data_pos = 0; data_pos < len;) {
if (*(src + data_pos) == '0') {
if (*(src + data_pos + 1) == 'x') {
if (!cmd_step) {
mdnie_head[cmd_pos] =
char_to_dec(*(src + data_pos + 2),
*(src + data_pos + 3));
} else {
mdnie_body[cmd_pos] =
char_to_dec(*(src + data_pos + 2),
*(src + data_pos + 3));
}
data_pos += 3;
cmd_pos++;
if (cmd_pos == MDNIE_TUNE_HEAD_SIZE && !cmd_step) {
cmd_pos = 0;
cmd_step = 1;
}else if (cmd_pos == MDNIE_TUNE_BODY_SIZE && cmd_step) {/*blocking overflow*/
cmd_pos = 0;
break;
}
} else
data_pos++;
} else {
data_pos++;
}
}
printk(KERN_INFO "\n");
for (data_pos = 0; data_pos < MDNIE_TUNE_HEAD_SIZE ; data_pos++)
printk(KERN_INFO "0x%x ", mdnie_head[data_pos]);
printk(KERN_INFO "\n");
for (data_pos = 0; data_pos < MDNIE_TUNE_BODY_SIZE ; data_pos++)
printk(KERN_INFO "0x%x ", mdnie_body[data_pos]);
printk(KERN_INFO "\n");
mipi_samsung_disp_send_cmd(PANEL_MTP_ENABLE, true);
mipi_samsung_disp_send_cmd(MDNIE_ADB_TEST, true);
mipi_samsung_disp_send_cmd(PANEL_MTP_DISABLE, true);
}
static void load_tuning_file(char *filename)
{
struct file *filp;
char *dp;
long l;
loff_t pos;
int ret;
mm_segment_t fs;
pr_info("%s called loading file name : [%s]\n", __func__,
filename);
fs = get_fs();
set_fs(get_ds());
filp = filp_open(filename, O_RDONLY, 0);
if (IS_ERR(filp)) {
printk(KERN_ERR "%s File open failed\n", __func__);
return;
}
l = filp->f_path.dentry->d_inode->i_size;
pr_info("%s Loading File Size : %ld(bytes)", __func__, l);
dp = kmalloc(l + 10, GFP_KERNEL);
if (dp == NULL) {
pr_info("Can't not alloc memory for tuning file load\n");
filp_close(filp, current->files);
return;
}
pos = 0;
memset(dp, 0, l);
pr_info("%s before vfs_read()\n", __func__);
ret = vfs_read(filp, (char __user *)dp, l, &pos);
pr_info("%s after vfs_read()\n", __func__);
if (ret != l) {
pr_info("vfs_read() filed ret : %d\n", ret);
kfree(dp);
filp_close(filp, current->files);
return;
}
filp_close(filp, current->files);
set_fs(fs);
sending_tune_cmd(dp, l);
kfree(dp);
}
int mdnie_adb_test;
void copy_tuning_data_from_adb(char *body, char *head);
static ssize_t tuning_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret = 0;
ret = snprintf(buf, MAX_FILE_NAME, "Tunned File Name : %s\n",
tuning_file);
mdnie_adb_test = 0;
return ret;
}
static ssize_t tuning_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t size)
{
char *pt;
memset(tuning_file, 0, sizeof(tuning_file));
snprintf(tuning_file, MAX_FILE_NAME, "%s%s", TUNING_FILE_PATH, buf);
pt = tuning_file;
while (*pt) {
if (*pt == '\r' || *pt == '\n') {
*pt = 0;
break;
}
pt++;
}
pr_info("%s:%s\n", __func__, tuning_file);
load_tuning_file(tuning_file);
#if defined(CONFIG_MDNIE_LITE_TUNING)
copy_tuning_data_from_adb(mdnie_body, mdnie_head);
#endif
mdnie_adb_test = 1;
return size;
}
static DEVICE_ATTR(tuning, 0664, tuning_show, tuning_store);
#endif
static ssize_t mipi_samsung_disp_get_power(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct msm_fb_data_type *mfd = msd.mfd;
int rc;
if (unlikely(!mfd))
return -ENODEV;
if (unlikely(mfd->key != MFD_KEY))
return -EINVAL;
rc = snprintf((char *)buf, 4, "%d\n", mdss_fb_is_power_on(mfd));
pr_info("mipi_samsung_disp_get_power(%d)\n", mdss_fb_is_power_on(mfd));
return rc;
}
static ssize_t mipi_samsung_disp_set_power(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
unsigned int power;
struct msm_fb_data_type *mfd = msd.mfd;
if (sscanf(buf, "%u", &power) != 1)
return -EINVAL;
if (power == mdss_fb_is_power_on(mfd))
return 0;
if (power) {
mfd->fbi->fbops->fb_blank(FB_BLANK_UNBLANK, mfd->fbi);
mfd->fbi->fbops->fb_pan_display(&mfd->fbi->var, mfd->fbi);
mipi_samsung_disp_send_cmd(PANEL_BRIGHT_CTRL, true);
} else {
mfd->fbi->fbops->fb_blank(FB_BLANK_POWERDOWN, mfd->fbi);
}
pr_info("mipi_samsung_disp_set_power\n");
return size;
}
static ssize_t mipi_samsung_disp_lcdtype_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
char temp[100];
if(msd.manufacture_id){
snprintf(temp, 20, "SDC_%x\n",msd.manufacture_id);
}else{
pr_info("no manufacture id\n");
/*
switch (msd.panel) {
case PANEL_FHD_OCTA_S6E3FA0:
case PANEL_FHD_OCTA_S6E3FA0_CMD:
snprintf(temp, 20, "SDC_AMS568AT01\n");
break;
case PANEL_FHD_OCTA_S6E3FA2_CMD:
snprintf(temp, 20, "SDC_AMS520BQ01\n");
break;
case PANEL_WQHD_OCTA_S6E3HA0_CMD:
snprintf(temp, 20, "SDC_AMS520BR01\n");
break;
default :
snprintf(temp, strnlen(msd.panel_name, 100),
msd.panel_name);
break;
*/
}
strlcat(buf, temp, 100);
return strnlen(buf, 100);
}
static ssize_t mipi_samsung_disp_windowtype_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
char temp[15];
int id1, id2, id3;
id1 = (msd.manufacture_id & 0x00FF0000) >> 16;
id2 = (msd.manufacture_id & 0x0000FF00) >> 8;
id3 = msd.manufacture_id & 0xFF;
snprintf(temp, sizeof(temp), "%x %x %x\n", id1, id2, id3);
strlcat(buf, temp, 15);
return strnlen(buf, 15);
}
static ssize_t mipi_samsung_disp_manufacture_date_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
char temp[60];
snprintf((char *)temp, sizeof(temp), "manufacture date(%d) time(%d)\n", msd.manufacture_date, msd.manufacture_time);
strlcat(buf, temp, 60);
pr_info("manufacture date(%d) time(%d)\n", msd.manufacture_date, msd.manufacture_time);
return strnlen(buf, 60);
}
static ssize_t mipi_samsung_disp_manufacture_code_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
char temp[30];
snprintf((char *)temp, sizeof(temp), "%02x%02x%02x%02x%02x\n",
msd.ddi_id[0], msd.ddi_id[1], msd.ddi_id[2], msd.ddi_id[3], msd.ddi_id[4]);
strlcat(buf, temp, 30);
pr_info("%02x%02x%02x%02x%02x\n",
msd.ddi_id[0], msd.ddi_id[1], msd.ddi_id[2], msd.ddi_id[3], msd.ddi_id[4]);
return strnlen(buf, 30);
}
#if defined(TEST_RESOLUTION)
static ssize_t mipi_samsung_disp_panel_res_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
char temp[100];
/* {"samsung amoled 1080p command mode dsi S6E3FA2 panel",PANEL_FHD_OCTA_S6E3FA2_CMD },*/
/* {"samsung amoled wqhd command mode dsi1 S6E3HA0 panel", PANEL_WQHD_OCTA_S6E3HA0_CMD },*/
switch (msd.panel) {
case PANEL_FHD_OCTA_S6E3FA2_CMD:
snprintf(temp, 10, "FHD\n");
break;
case PANEL_WQHD_OCTA_S6E3HA0_CMD:
snprintf(temp, 10, "WQHD\n");
break;
default :
snprintf(temp, 10, "WQHD\n");
break;
}
strlcat(buf, temp, 100);
return strnlen(buf, 100);
}
#endif
static ssize_t mipi_samsung_disp_acl_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int rc;
rc = snprintf((char *)buf, 30, "%d\n", msd.dstat.acl_on);
pr_info("acl status: %d\n", *buf);
return rc;
}
static ssize_t mipi_samsung_disp_acl_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct msm_fb_data_type *mfd = msd.mfd;
int acl_set;
acl_set = msd.dstat.acl_on;
if (sysfs_streq(buf, "1"))
acl_set = true;
else if (sysfs_streq(buf, "0"))
acl_set = false;
else
pr_info("%s: Invalid argument!!", __func__);
msd.dstat.elvss_need_update = 1;
if (mdss_fb_is_power_on(mfd)) {
if (acl_set && !(msd.dstat.acl_on||msd.dstat.siop_status)) {
msd.dstat.acl_on = true;
pr_info("%s: acl on : acl %d, siop %d", __func__,
msd.dstat.acl_on, msd.dstat.siop_status);
mipi_samsung_disp_send_cmd(PANEL_BRIGHT_CTRL, true);
} else if (!acl_set && msd.dstat.acl_on && !msd.dstat.siop_status) {
msd.dstat.acl_on = false;
msd.dstat.curr_acl_idx = -1;
msd.dstat.curr_opr_idx = -1;
pr_info("%s: acl off : acl %d, siop %d", __func__,
msd.dstat.acl_on, msd.dstat.siop_status);
if(msd.dstat.auto_brightness == 6)
pr_info("%s: HBM mode No ACL off!!", __func__);
#ifdef SMART_ACL
/* If SMART_ACL enabled, elvss table shoud be set again */
mipi_samsung_disp_send_cmd(PANEL_BRIGHT_CTRL, true);
#endif
} else {
msd.dstat.acl_on = acl_set;
pr_info("%s: skip but acl update!! acl %d, siop %d", __func__,
msd.dstat.acl_on, msd.dstat.siop_status);
}
}else {
pr_info("%s: panel is off state. updating state value.\n",
__func__);
msd.dstat.acl_on = acl_set;
}
return size;
}
static ssize_t mipi_samsung_disp_siop_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int rc;
rc = snprintf((char *)buf, 4, "%d\n", msd.dstat.siop_status);
pr_info("siop status: %d\n", *buf);
return rc;
}
static ssize_t mipi_samsung_disp_siop_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct msm_fb_data_type *mfd = msd.mfd;
int siop_set;
siop_set = msd.dstat.siop_status;
if (sysfs_streq(buf, "1"))
siop_set = true;
else if (sysfs_streq(buf, "0"))
siop_set = false;
else
pr_info("%s: Invalid argument!!", __func__);
if (mdss_fb_is_power_on(mfd)) {
if (siop_set && !(msd.dstat.acl_on||msd.dstat.siop_status)) {
msd.dstat.siop_status = true;
mipi_samsung_disp_send_cmd(PANEL_BRIGHT_CTRL, true);
pr_info("%s: acl on : acl %d, siop %d", __func__,
msd.dstat.acl_on, msd.dstat.siop_status);
} else if (!siop_set && !msd.dstat.acl_on && msd.dstat.siop_status) {
mutex_lock(&msd.lock);
msd.dstat.siop_status = false;
msd.dstat.curr_acl_idx = -1;
msd.dstat.curr_opr_idx = -1;
if(msd.dstat.auto_brightness == 6)
pr_info("%s: HBM mode No ACL off!!", __func__);
#ifdef SMART_ACL
/* If SMART_ACL enabled, elvss table shoud be set again */
mipi_samsung_disp_send_cmd(PANEL_BRIGHT_CTRL, false);
#endif
mutex_unlock(&msd.lock);
pr_info("%s: acl off : acl %d, siop %d", __func__,
msd.dstat.acl_on, msd.dstat.siop_status);
} else {
msd.dstat.siop_status = siop_set;
pr_info("%s: skip but siop update!! acl %d, siop %d", __func__,
msd.dstat.acl_on, msd.dstat.siop_status);
}
}else {
msd.dstat.siop_status = siop_set;
pr_info("%s: panel is off state. updating state value.\n",
__func__);
}
return size;
}
static ssize_t mipi_samsung_aid_log_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int rc = 0;
if (msd.dstat.is_smart_dim_loaded)
msd.sdimconf->print_aid_log();
else
pr_err("smart dim is not loaded..\n");
#if defined(CONFIG_LCD_HMT)
if (msd.dstat.is_hmt_smart_dim_loaded) {
msd.sdimconf_reverse_hmt_single->print_aid_log();
} else
pr_err("smart dim for HMT is not loaded..\n");
#endif
return rc;
}
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE)
static ssize_t mipi_samsung_auto_brightness_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int rc;
rc = snprintf((char *)buf, 30, "%d\n",
msd.dstat.auto_brightness);
pr_info("auto_brightness: %d\n", *buf);
return rc;
}
static ssize_t mipi_samsung_auto_brightness_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
static int first_auto_br = 0;
if (sysfs_streq(buf, "0"))
msd.dstat.auto_brightness = 0;
else if (sysfs_streq(buf, "1"))
msd.dstat.auto_brightness = 1;
else if (sysfs_streq(buf, "2"))
msd.dstat.auto_brightness = 2;
else if (sysfs_streq(buf, "3"))
msd.dstat.auto_brightness = 3;
else if (sysfs_streq(buf, "4"))
msd.dstat.auto_brightness = 4;
else if (sysfs_streq(buf, "5"))
msd.dstat.auto_brightness = 5;
else if (sysfs_streq(buf, "6")) // HBM mode
msd.dstat.auto_brightness = 6;
else if (sysfs_streq(buf, "7"))
msd.dstat.auto_brightness = 7;
else
pr_info("%s: Invalid argument!!", __func__);
if (!first_auto_br) {
pr_info("%s : skip first auto brightness store (%d) (%d)!!\n",
__func__, msd.dstat.auto_brightness, msd.dstat.bright_level);
first_auto_br++;
return size;
}
msd.dstat.elvss_need_update = 1;
if (msd.mfd->resume_state == MIPI_RESUME_STATE) {
mipi_samsung_disp_send_cmd(PANEL_BRIGHT_CTRL, true);
mDNIe_Set_Mode(); // LOCAL CE tuning
pr_info("%s %d %d\n", __func__, msd.dstat.auto_brightness, msd.dstat.bright_level);
} else {
pr_info("%s : panel is off state!!\n", __func__);
}
return size;
}
static ssize_t mipi_samsung_read_mtp_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
int addr, len, start;
char *read_buf = NULL;
sscanf(buf, "%x %d %x" , &addr, &len, &start);
read_buf = kmalloc(len * sizeof(char), GFP_KERNEL);
pr_info("%x %d %x\n", addr, len, start);
mtp_read_sysfs_cmds.cmd_desc[0].payload[0] = addr; // addr
mtp_read_sysfs_cmds.cmd_desc[0].payload[1] = len; // size
mtp_read_sysfs_cmds.cmd_desc[0].payload[2] = start; // start
mtp_read_sysfs_cmds.read_size = kzalloc(sizeof(char) * \
mtp_read_sysfs_cmds.num_of_cmds, GFP_KERNEL);
mtp_read_sysfs_cmds.read_startoffset = kzalloc(sizeof(char) * \
mtp_read_sysfs_cmds.num_of_cmds, GFP_KERNEL);
mtp_read_sysfs_cmds.read_size[0] = len;
mtp_read_sysfs_cmds.read_startoffset[0] = start;
pr_info("%x %x %x %x %x %x %x %x %x\n",
mtp_read_sysfs_cmds.cmd_desc[0].dchdr.dtype,
mtp_read_sysfs_cmds.cmd_desc[0].dchdr.last,
mtp_read_sysfs_cmds.cmd_desc[0].dchdr.vc,
mtp_read_sysfs_cmds.cmd_desc[0].dchdr.ack,
mtp_read_sysfs_cmds.cmd_desc[0].dchdr.wait,
mtp_read_sysfs_cmds.cmd_desc[0].dchdr.dlen,
mtp_read_sysfs_cmds.cmd_desc[0].payload[0],
mtp_read_sysfs_cmds.cmd_desc[0].payload[1],
mtp_read_sysfs_cmds.cmd_desc[0].payload[2]);
mipi_samsung_read_nv_mem(msd.pdata, &mtp_read_sysfs_cmds, read_buf);
kfree(read_buf);
return size;
}
#endif
#if defined(TEMPERATURE_ELVSS)
static ssize_t mipi_samsung_temperature_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int rc;
rc = snprintf((char *)buf, 40,"-20, -19, 0, 1, 30, 40\n");
pr_info("%s msd.mpd->temperature : %d msd.mpd->temperature_value : 0x%x", __func__,
msd.dstat.temperature, msd.dstat.temperature_value);
return rc;
}
static ssize_t mipi_samsung_temperature_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
int temp;
sscanf(buf, "%d" , &msd.dstat.temperature);
temp = msd.dstat.temperature;
if (temp > 0)
msd.dstat.temperature_value = (char)temp;
else {
temp *= -1;
msd.dstat.temperature_value = (char)temp;
msd.dstat.temperature_value |=0x80;
}
msd.dstat.elvss_need_update = 1;
if(msd.mfd->resume_state == MIPI_RESUME_STATE) {
mipi_samsung_disp_send_cmd(PANEL_BRIGHT_CTRL, true);
pr_info("mipi_samsung_temperature_store %d\n", msd.dstat.bright_level);
pr_info("%s msd.dstat.temperature : %d msd.dstat.temperature_value : 0x%x", __func__,
msd.dstat.temperature, msd.dstat.temperature_value);
} else {
pr_info("%s: skip but temperature update!! temperature %d, temperature_value %d", __func__,
msd.dstat.temperature, msd.dstat.temperature_value);
}
return size;
}
#endif
#if defined(PARTIAL_UPDATE)
static ssize_t mipi_samsung_disp_partial_disp_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int rc;
rc = snprintf((char *)buf, 40,"partial display range %d to %d \n", partial_disp_range[0], partial_disp_range[1]);
pr_info("partial display range %d to %d \n", partial_disp_range[0], partial_disp_range[1]);
return rc;
}
static ssize_t mipi_samsung_disp_partial_disp_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
sscanf(buf, "%d %d" , &partial_disp_range[0], &partial_disp_range[1]);
pr_info("%s: partial_disp range[0] = 0x%x, range[1] = 0x%x\n", __func__, partial_disp_range[0], partial_disp_range[1]);
if(partial_disp_range[0] > msd.pdata->panel_info.yres-1
|| partial_disp_range[1] > msd.pdata->panel_info.yres-1)
{
pr_err("%s:Invalid Input\n",__func__);
return -EINVAL;
}
partialdisp_on_cmd.cmd_desc[0].payload[1] = (partial_disp_range[0] >> 8) & 0xFF;/*select msb 1byte*/
partialdisp_on_cmd.cmd_desc[0].payload[2] = partial_disp_range[0] & 0xFF;
partialdisp_on_cmd.cmd_desc[0].payload[3] = (partial_disp_range[1] >> 8) & 0xFF;/*select msb 1byte*/
partialdisp_on_cmd.cmd_desc[0].payload[4] = partial_disp_range[1] & 0xFF;
if (msd.dstat.on) {
if (partial_disp_range[0] || partial_disp_range[1])
mipi_samsung_disp_send_cmd(PANEL_PARTIAL_ON, true);
else
mipi_samsung_disp_send_cmd(PANEL_PARTIAL_OFF, true);
} else {
pr_info("%s : LCD is off state\n", __func__);
return -EINVAL;
}
pr_info("%s: partialdisp_on_cmd = 0x%x\n", __func__, partialdisp_on_cmd.cmd_desc[0].payload[1]);
pr_info("%s: partialdisp_on_cmd = 0x%x\n", __func__, partialdisp_on_cmd.cmd_desc[0].payload[2]);
pr_info("%s: partialdisp_on_cmd = 0x%x\n", __func__, partialdisp_on_cmd.cmd_desc[0].payload[3]);
pr_info("%s: partialdisp_on_cmd = 0x%x\n", __func__, partialdisp_on_cmd.cmd_desc[0].payload[4]);
return size;
}
#endif
static ssize_t mipi_samsung_alpm_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int rc;
static struct mdss_panel_alpm_data *alpm_data = NULL;
u8 current_status = 0;
if (unlikely(!alpm_data) && msd.pdata)
alpm_data = &msd.pdata->alpm_data;
if (likely(alpm_data) && alpm_data->alpm_status)
current_status = alpm_data->alpm_status(CHECK_CURRENT_STATUS);
rc = snprintf((char *)buf, 30, "%d\n", current_status);
pr_info("[ALPM_DEBUG] %s: current status : %d \n",\
__func__, (int)current_status);
return rc;
}
static ssize_t mipi_samsung_alpm_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
int mode = 0;
static struct mdss_panel_alpm_data *alpm_data = NULL;
struct display_status *dstat = &msd.dstat;
sscanf(buf, "%d" , &mode);
pr_info("[ALPM_DEBUG] %s: mode : %d\n", __func__, mode);
if (unlikely(!alpm_data) && msd.pdata)
alpm_data = &msd.pdata->alpm_data;
if (!alpm_data->alpm_status)
return size;
/*
* Possible mode status for Blank(0) or Unblank(1)
* * Blank *
* 1) ALPM_MODE_ON
* -> That will set during wakeup
* * Unblank *
* 1) NORMAL_MODE_ON
* -> That will send partial update commands
*/
alpm_data->alpm_status(mode ? ALPM_MODE_ON : MODE_OFF);
if (mode == ALPM_MODE_ON) {
/*
* This will work if the ALPM must be on or chagne partial area
* if that already in the status of unblank
*/
if (dstat->on) {
if (!alpm_data->alpm_status(CHECK_PREVIOUS_STATUS)\
&& alpm_data->alpm_status(CHECK_CURRENT_STATUS)) {
/* Turn On ALPM Mode */
mipi_samsung_disp_send_cmd(PANEL_ALPM_ON, true);
if (dstat->wait_disp_on == 0) {
msleep(20); /* wait 1 frame(more than 16ms) */
mipi_samsung_disp_send_cmd(PANEL_DISPLAY_ON, true);
}
alpm_data->alpm_status(STORE_CURRENT_STATUS);
pr_info("[ALPM_DEBUG] %s: Send ALPM mode on cmds\n", __func__);
}
}
} else if (mode == MODE_OFF) {
if (alpm_data->alpm_status(CHECK_PREVIOUS_STATUS) == ALPM_MODE_ON) {
if (dstat->on) {
mipi_samsung_disp_send_cmd(PANEL_ALPM_OFF, true);
msleep(20); /* wait 1 frame(more than 16ms) */
mipi_samsung_disp_send_cmd(PANEL_DISPLAY_ON, true);
alpm_data->alpm_status(CLEAR_MODE_STATUS);
}
pr_info("[ALPM_DEBUG] %s: Send ALPM off cmds\n", __func__);
}
} else
pr_info("[ALPM_DEBUG] %s: no operation \n:", __func__);
return size;
}
#if defined(DYNAMIC_FPS_USE_TE_CTRL)
static ssize_t dynamic_fps_use_te_ctrl_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int rc;
rc = snprintf((char *)buf, 40,"dynamic_fps_use_te_ctrl %d \n", dynamic_fps_use_te_ctrl);
pr_info("dynamic_fps_use_te_ctrl %d \n", dynamic_fps_use_te_ctrl);
return rc;
}
static ssize_t dynamic_fps_use_te_ctrl_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
sscanf(buf, "%d" , &dynamic_fps_use_te_ctrl);
return size;
}
#endif
static struct lcd_ops mipi_samsung_disp_props = {
.get_power = NULL,
.set_power = NULL,
};
#ifdef CONFIG_FB_MSM_SAMSUNG_AMOLED_LOW_POWER_MODE
/*
* This will use to enable/disable or check the status of ALPM
* * Description for STATUS_OR_EVENT_FLAG *
* 1) ALPM_MODE_ON
* 2) NORMAL_MODE_ON
* -> Set by user using sysfs(/sys/class/lcd/panel/alpm)
* The value will save to current_status
* 3) CHECK_CURRENT_STATUS
* -> Check current status
* that will return current status like ALPM_MODE_ON, NORMAL_MODE_ON or MODE_OFF
* 4) CHECK_PREVIOUS_STATUS
* -> Check previous status that will return previous status like
* ALPM_MODE_ON, NORMAL_MODE_ON or MODE_OFF
* 5) STORE_CURRENT_STATUS
* -> Store current status to previous status because that will use
* for next turn on sequence
* 6) CLEAR_MODE_STATUS
* -> Clear current and previous status as MODE_OFF status that can use with
* * Usage *
* Call function "mdss_dsi_panel_alpm_status_func(STATUS_FLAG)"
*/
u8 mdss_dsi_panel_alpm_status_func(u8 flag)
{
static u8 current_status = 0;
static u8 previous_status = 0;
u8 ret = 0;
switch (flag) {
case ALPM_MODE_ON:
current_status = ALPM_MODE_ON;
break;
case NORMAL_MODE_ON:
/*current_status = NORMAL_MODE_ON;*/
break;
case MODE_OFF:
current_status = MODE_OFF;
break;
case CHECK_CURRENT_STATUS:
ret = current_status;
break;
case CHECK_PREVIOUS_STATUS:
ret = previous_status;
break;
case STORE_CURRENT_STATUS:
previous_status = current_status;
break;
case CLEAR_MODE_STATUS:
previous_status = 0;
current_status = 0;
break;
default:
break;
}
pr_debug("[ALPM_DEBUG] current_status : %d, previous_status : %d, ret : %d\n",\
current_status, previous_status, ret);
return ret;
}
#endif
void mdss_dsi_panel_alpm_register(struct mdss_panel_alpm_data *alpm_data)
{
if (!alpm_data) {
pr_err("%s: pdata is null\n", __func__);
return;
}
alpm_data->alpm_status = mdss_dsi_panel_alpm_status_func;
}
#if defined(CONFIG_ESD_FG_RECOVERY)
static irqreturn_t err_fg_irq_handler(int irq, void *handle)
{
struct msm_fb_data_type *mfd = msd.mfd;
if(err_fg_working || !(mdss_fb_is_power_on(mfd))|| mdss_fb_get_first_cmt_flag()) return IRQ_HANDLED;
pr_info("%s handler + irq(%d) state(%d)", __func__, irq, gpio_get_value(err_fg_gpio));
err_fg_working = 1;
disable_irq_nosync(gpio_to_irq(err_fg_gpio));
schedule_work(&err_fg_work);
pr_info("%s : handler - state(%d)", __func__, gpio_get_value(err_fg_gpio));
return IRQ_HANDLED;
}
void err_fg_work_func(struct work_struct *work)
{
struct msm_fb_data_type *mfd = msd.mfd;
struct mdss_panel_data *pdata = msd.pdata;
char *envp[2] = {"PANEL_ALIVE=0", NULL};
struct device *dev = msd.mfd->fbi->dev;
pr_info("%s : start irqstate(%d)", __func__, gpio_get_value(err_fg_gpio));
if(msd.mfd == NULL){
pr_err("%s: mfd not initialized Skip ESD recovery\n", __func__);
return;
}
if (mdss_fb_is_power_on(mfd)) {
pdata->panel_info.panel_dead = true; /*for cmd mode panel only*/
kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
pr_err("Panel has gone bad, sending uevent - %s\n", envp[0]);
}
return;
}
#ifdef ESD_DEBUG
static ssize_t mipi_samsung_esd_check_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int rc;
rc = snprintf((char *)buf, 20, "esd count %d\n", esd_count);
return rc;
}
static ssize_t mipi_samsung_esd_check_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct msm_fb_data_type *mfd = msd.mfd;
err_fg_irq_handler(0, mfd);
return 1;
}
static DEVICE_ATTR(esd_check, /*S_IRUGO*/0664 , mipi_samsung_esd_check_show,\
mipi_samsung_esd_check_store);
#endif
#endif
#if defined(CONFIG_LCD_HMT)
int hmt_bright_update(void)
{
msleep(20);
if (msd.hmt_stat.hmt_on) {
if (msd.hmt_stat.hmt_low_persistence)
mipi_samsung_disp_send_cmd(PANEL_HMT_BRIGHT, true);
else
mipi_samsung_disp_send_cmd(PANEL_LOW_PERSISTENCE_BRIGHT, true);
} else {
mipi_samsung_disp_send_cmd(PANEL_BRIGHT_CTRL, true);
}
return 0;
}
int hmt_enable(int enable)
{
msleep(20);
if (enable) {
pr_info("Single Scan Enable ++ \n");
mipi_samsung_disp_send_cmd(PANEL_ENABLE, true);
pr_info("Single Scan Enable -- \n");
} else {
pr_info("HMT OFF.. \n");
mipi_samsung_disp_send_cmd(PANEL_DISABLE, true);
}
return 0;
}
int hmt_reverse_update(int enable)
{
msleep(20);
if (enable) {
pr_info("REVERSE ENABLE ++\n");
mipi_samsung_disp_send_cmd(PANEL_HMT_REVERSE_ENABLE, true);
pr_info("REVERSE ENABLE --\n");
} else {
pr_info("REVERSE DISABLE ++ \n");
mipi_samsung_disp_send_cmd(PANEL_HMT_REVERSE_DISABLE, true);
pr_info("REVERSE DISABLE -- \n");
}
return 0;
}
static ssize_t mipi_samsung_hmt_bright_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int rc;
rc = snprintf((char *)buf, 30, "%d\n", msd.hmt_stat.hmt_bl_level);
pr_info("[HMT] hmt bright : %d\n", *buf);
return rc;
}
static ssize_t mipi_samsung_hmt_bright_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
int input;
sscanf(buf, "%d" , &input);
pr_info("[HMT] %s: input (%d) ++ \n", __func__, input);
if (!msd.hmt_stat.hmt_on) {
pr_info("[HMT] hmt is off!\n");
return size;
}
if (!msd.dstat.on) {
pr_err("[HMT] panel is off!\n");
msd.hmt_stat.hmt_bl_level = input;
return size;
}
if (msd.hmt_stat.hmt_bl_level == input) {
pr_err("[HMT] hmt bright already %d!\n", msd.hmt_stat.hmt_bl_level);
return size;
}
msd.hmt_stat.hmt_bl_level = input;
hmt_bright_update();
pr_info("[HMT] %s: input (%d) -- \n", __func__, input);
return size;
}
static ssize_t mipi_samsung_hmt_on_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int rc;
rc = snprintf((char *)buf, 30, "%d\n", msd.hmt_stat.hmt_on);
pr_info("[HMT] hmt on input : %d\n", *buf);
return rc;
}
static ssize_t mipi_samsung_hmt_on_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
int input;
sscanf(buf, "%d" , &input);
pr_info("[HMT] %s: input (%d) ++ \n", __func__, input);
if (!msd.dstat.on) {
pr_err("[HMT] panel is off!\n");
msd.hmt_stat.hmt_on = input;
return size;
}
if (msd.hmt_stat.hmt_on == input) {
pr_info("[HMT] hmt already %s !\n", msd.hmt_stat.hmt_on?"ON":"OFF");
return size;
}
msd.hmt_stat.hmt_on = input;
if (msd.hmt_stat.hmt_on && msd.hmt_stat.hmt_low_persistence) {
hmt_enable(1);
hmt_reverse_update(1);
} else {
hmt_enable(0);
hmt_reverse_update(0);
}
hmt_bright_update();
pr_info("[HMT] %s: input hmt (%d) hmt lp (%d)-- \n",
__func__, input, msd.hmt_stat.hmt_low_persistence);
return size;
}
static ssize_t mipi_samsung_hmt_low_persistence_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int rc;
rc = snprintf((char *)buf, 30, "%d\n", msd.hmt_stat.hmt_low_persistence);
pr_info("[HMT] hmt low persistence : %d\n", *buf);
return rc;
}
static ssize_t mipi_samsung_hmt_low_persistence_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
int input;
sscanf(buf, "%d" , &input);
pr_info("[HMT] %s: input (%d) ++ \n", __func__, input);
if (!msd.hmt_stat.hmt_on) {
pr_info("[HMT] hmt is off!\n");
return size;
}
if (!msd.dstat.on) {
pr_err("[HMT] panel is off!\n");
msd.hmt_stat.hmt_low_persistence = input;
return size;
}
msd.hmt_stat.hmt_low_persistence = input;
if (!msd.hmt_stat.hmt_low_persistence) {
hmt_enable(0);
hmt_reverse_update(0);
} else {
hmt_enable(1);
hmt_reverse_update(1);
}
hmt_bright_update();
pr_info("[HMT] %s: input hmt (%d) hmt lp (%d)-- \n",
__func__, msd.hmt_stat.hmt_on, msd.hmt_stat.hmt_low_persistence);
return size;
}
#endif
static DEVICE_ATTR(lcd_power, S_IRUGO | S_IWUSR,
mipi_samsung_disp_get_power,
mipi_samsung_disp_set_power);
static DEVICE_ATTR(lcd_type, S_IRUGO,
mipi_samsung_disp_lcdtype_show,
NULL);
static DEVICE_ATTR(window_type, S_IRUGO,
mipi_samsung_disp_windowtype_show, NULL);
static DEVICE_ATTR(manufacture_date, S_IRUGO,
mipi_samsung_disp_manufacture_date_show, NULL);
static DEVICE_ATTR(manufacture_code, S_IRUGO,
mipi_samsung_disp_manufacture_code_show, NULL);
static DEVICE_ATTR(power_reduce, S_IRUGO | S_IWUSR | S_IWGRP,
mipi_samsung_disp_acl_show,
mipi_samsung_disp_acl_store);
static DEVICE_ATTR(siop_enable, S_IRUGO | S_IWUSR | S_IWGRP,
mipi_samsung_disp_siop_show,
mipi_samsung_disp_siop_store);
static DEVICE_ATTR(read_mtp, S_IRUGO | S_IWUSR | S_IWGRP,
NULL,
mipi_samsung_read_mtp_store);
#if defined(TEMPERATURE_ELVSS)
static DEVICE_ATTR(temperature, S_IRUGO | S_IWUSR | S_IWGRP,
mipi_samsung_temperature_show,
mipi_samsung_temperature_store);
static DEVICE_ATTR(aid_log, S_IRUGO | S_IWUSR | S_IWGRP,
mipi_samsung_aid_log_show,
NULL);
#endif
#if defined(PARTIAL_UPDATE)
static DEVICE_ATTR(partial_disp, S_IRUGO | S_IWUSR | S_IWGRP,
mipi_samsung_disp_partial_disp_show,
mipi_samsung_disp_partial_disp_store);
#endif
static DEVICE_ATTR(alpm, S_IRUGO | S_IWUSR | S_IWGRP,
mipi_samsung_alpm_show,
mipi_samsung_alpm_store);
#if defined(DYNAMIC_FPS_USE_TE_CTRL)
static DEVICE_ATTR(dynamic_fps_use_te, S_IRUGO | S_IWUSR | S_IWGRP,
dynamic_fps_use_te_ctrl_show,
dynamic_fps_use_te_ctrl_store);
#endif
#if defined(TEST_RESOLUTION)
static DEVICE_ATTR(panel_res, S_IRUGO,
mipi_samsung_disp_panel_res_show,
NULL);
#endif
#if defined(CONFIG_LCD_HMT)
static DEVICE_ATTR(hmt_bright, 660,
mipi_samsung_hmt_bright_show,
mipi_samsung_hmt_bright_store);
static DEVICE_ATTR(hmt_on, 660,
mipi_samsung_hmt_on_show,
mipi_samsung_hmt_on_store);
static DEVICE_ATTR(hmt_low_persistence, 660,
mipi_samsung_hmt_low_persistence_show,
mipi_samsung_hmt_low_persistence_store);
#endif
static struct attribute *panel_sysfs_attributes[] = {
&dev_attr_lcd_power.attr,
&dev_attr_lcd_type.attr,
&dev_attr_window_type.attr,
&dev_attr_manufacture_date.attr,
&dev_attr_manufacture_code.attr,
&dev_attr_power_reduce.attr,
&dev_attr_siop_enable.attr,
&dev_attr_aid_log.attr,
&dev_attr_read_mtp.attr,
#if defined(TEMPERATURE_ELVSS)
&dev_attr_temperature.attr,
#endif
#if defined(PARTIAL_UPDATE)
&dev_attr_partial_disp.attr,
#endif
&dev_attr_alpm.attr,
#if defined(DYNAMIC_FPS_USE_TE_CTRL)
&dev_attr_dynamic_fps_use_te.attr,
#endif
#if defined(TEST_RESOLUTION)
&dev_attr_panel_res.attr,
#endif
#if defined(CONFIG_LCD_HMT)
&dev_attr_hmt_bright.attr,
&dev_attr_hmt_on.attr,
&dev_attr_hmt_low_persistence.attr,
#endif
NULL
};
static const struct attribute_group panel_sysfs_group = {
.attrs = panel_sysfs_attributes,
};
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE)
static DEVICE_ATTR(auto_brightness, S_IRUGO | S_IWUSR | S_IWGRP,
mipi_samsung_auto_brightness_show,
mipi_samsung_auto_brightness_store);
static struct attribute *bl_sysfs_attributes[] = {
&dev_attr_auto_brightness.attr,
NULL
};
static const struct attribute_group bl_sysfs_group = {
.attrs = bl_sysfs_attributes,
};
#endif
static int sysfs_enable;
static int mdss_samsung_create_sysfs(void)
{
int rc = 0;
struct lcd_device *lcd_device;
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE)
struct backlight_device *bd = NULL;
#endif
/* sysfs creat func should be called one time in dual dsi mode */
if (sysfs_enable)
return 0;
lcd_device = lcd_device_register("panel", NULL, NULL,
&mipi_samsung_disp_props);
if (IS_ERR(lcd_device)) {
rc = PTR_ERR(lcd_device);
pr_err("Failed to register lcd device..\n");
return rc;
}
sysfs_remove_file(&lcd_device->dev.kobj,
&dev_attr_lcd_power.attr);
rc = sysfs_create_group(&lcd_device->dev.kobj, &panel_sysfs_group);
if (rc) {
pr_err("Failed to create panel sysfs group..\n");
sysfs_remove_group(&lcd_device->dev.kobj, &panel_sysfs_group);
return rc;
}
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE)
bd = backlight_device_register("panel", &lcd_device->dev,
NULL, NULL, NULL);
if (IS_ERR(bd)) {
rc = PTR_ERR(bd);
pr_err("backlight : failed to register device\n");
return rc;
}
rc = sysfs_create_group(&bd->dev.kobj, &bl_sysfs_group);
if (rc) {
pr_err("Failed to create backlight sysfs group..\n");
sysfs_remove_group(&bd->dev.kobj, &bl_sysfs_group);
return rc;
}
#endif
#if defined(CONFIG_ESD_FG_RECOVERY)
#ifdef ESD_DEBUG
rc= sysfs_create_file(&lcd_device->dev.kobj,
&dev_attr_esd_check.attr);
if (rc) {
pr_info("sysfs create fail-%s\n",
dev_attr_esd_check.attr.name);
}
#endif
#endif
#if defined(DDI_VIDEO_ENHANCE_TUNING)
rc = sysfs_create_file(&lcd_device->dev.kobj,
&dev_attr_tuning.attr);
if (rc) {
pr_err("sysfs create fail-%s\n",
dev_attr_tuning.attr.name);
return rc;
}
#endif
sysfs_enable = 1;
pr_info("%s: done!! \n", __func__);
return rc;
}
#endif
int mdss_dsi_panel_init(struct device_node *node, struct mdss_dsi_ctrl_pdata *ctrl_pdata,
bool cmd_cfg_cont_splash)
{
int rc = 0;
static const char *panel_name;
bool cont_splash_enabled;
bool partial_update_enabled;
pr_debug("%s: ++ \n", __func__);
if (!node)
return -ENODEV;
panel_name = of_get_property(node, "label", NULL);
if (!panel_name)
pr_info("%s:%d, panel name not specified\n",
__func__, __LINE__);
else
pr_info("%s: Panel Name = %s\n", __func__, panel_name);
if (is_panel_supported(panel_name))
LCD_DEBUG("Panel : %s is not supported:",panel_name);
rc = mdss_panel_parse_dt(node, ctrl_pdata);
if (rc)
return rc;
if (cmd_cfg_cont_splash)
cont_splash_enabled = of_property_read_bool(node,
"qcom,cont-splash-enabled");
else
cont_splash_enabled = false;
if (!cont_splash_enabled) {
pr_info("%s:%d Continuous splash flag not found.\n",
__func__, __LINE__);
ctrl_pdata->panel_data.panel_info.cont_splash_enabled = 0;
} else {
pr_info("%s:%d Continuous splash flag enabled.\n",
__func__, __LINE__);
ctrl_pdata->panel_data.panel_info.cont_splash_enabled = 1;
}
#if defined(CONFIG_ESD_FG_RECOVERY)
if (ctrl_pdata->panel_data.panel_info.pdest == DISPLAY_1 && esd_enable) {
INIT_WORK(&err_fg_work, err_fg_work_func);
rc = request_threaded_irq(gpio_to_irq(err_fg_gpio),
NULL, err_fg_irq_handler, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "esd_detect", NULL);
if (rc) {
pr_err("%s : Failed to request_irq.:ret=%d", __func__, rc);
}
disable_irq(gpio_to_irq(err_fg_gpio));
}
#endif
ctrl_pdata->on = mdss_dsi_panel_on;
ctrl_pdata->off = mdss_dsi_panel_off;
ctrl_pdata->low_power_config = mdss_dsi_panel_low_power_config;
ctrl_pdata->event_handler = samsung_dsi_panel_event_handler;
ctrl_pdata->bl_fnc = mdss_dsi_panel_bl_ctrl;
ctrl_pdata->panel_reset = mdss_dsi_samsung_panel_reset;
ctrl_pdata->registered = mdss_dsi_panel_registered;
ctrl_pdata->dimming_init = mdss_dsi_panel_dimming_init;
ctrl_pdata->panel_blank = mdss_dsi_panel_blank;
ctrl_pdata->bklt_ctrl = ctrl_pdata->panel_data.panel_info.bklt_ctrl;
ctrl_pdata->panel_data.set_backlight = mdss_dsi_panel_bl_ctrl;
ctrl_pdata->panel_data.alpm_data.alpm_register =\
mdss_dsi_panel_alpm_register;
mutex_init(&msd.lock);
#if defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_CMD_FHD_FA2_PT_PANEL)
init_completion(&te_check_comp);
#endif
msd.dstat.on = 0;
msd.dstat.recent_bright_level = 255;
#if 1
partial_update_enabled =of_property_read_bool(node,"qcom,partial-update-enabled");
#else
partial_update_enabled = 0;
#endif
if (partial_update_enabled) {
pr_info("%s:%d Partial update enabled.\n", __func__, __LINE__);
ctrl_pdata->panel_data.panel_info.partial_update_enabled = 1;
if(msd.panel == PANEL_FHD_OCTA_S6E3FA2_CMD) // temp for K FHD
ctrl_pdata->set_col_page_addr = NULL;
else
ctrl_pdata->set_col_page_addr = mdss_dsi_set_col_page_addr;
ctrl_pdata->panel_data.panel_info.partial_update_dcs_cmd_by_left =
of_property_read_bool(node, "qcom,partial-update-dcs-cmd-by-left");
ctrl_pdata->panel_data.panel_info.partial_update_roi_merge =
of_property_read_bool(node, "qcom,partial-update-roi-merge");
} else {
pr_info("%s:%d Partial update disabled.\n", __func__, __LINE__);
ctrl_pdata->panel_data.panel_info.partial_update_enabled = 0;
ctrl_pdata->panel_data.panel_info.partial_update_dcs_cmd_by_left = 0;
ctrl_pdata->panel_data.panel_info.partial_update_roi_merge = 0;
ctrl_pdata->set_col_page_addr = NULL;
}
#if defined(CONFIG_LCD_CLASS_DEVICE)
rc = mdss_samsung_create_sysfs();
if (rc) {
pr_err("Failed to create sysfs for lcd driver..\n");
return rc;
}
#endif
#if defined(CONFIG_MDNIE_LITE_TUNING)
pr_info("[%s] CONFIG_MDNIE_LITE_TUNING ok ! init class called!\n",
__func__);
init_mdnie_class();
#endif
#if defined(CONFIG_HAS_EARLYSUSPEND)
msd.early_suspend.suspend = mipi_samsung_disp_early_suspend;
msd.early_suspend.resume = mipi_samsung_disp_late_resume;
msd.early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN-1;
register_early_suspend(&msd.early_suspend);
#endif
pr_debug("%s : --\n",__func__);
return 0;
}
int get_lcd_id(void)
{
return lcd_id;
}
EXPORT_SYMBOL(get_lcd_id);
int get_samsung_lcd_attached(void)
{
return lcd_attached;
}
EXPORT_SYMBOL(get_samsung_lcd_attached);
static int __init get_lcd_id_cmdline(char *mode)
{
char *pt;
lcd_id = 0;
if( mode == NULL ) return 1;
for( pt = mode; *pt != 0; pt++ )
{
lcd_id <<= 4;
switch(*pt)
{
case '0' ... '9' :
lcd_id += *pt -'0';
break;
case 'a' ... 'f' :
lcd_id += 10 + *pt -'a';
break;
case 'A' ... 'F' :
lcd_id += 10 + *pt -'A';
break;
}
}
lcd_attached = ((lcd_id&0xFFFFFF)!=0x000000);
pr_info( "%s: LCD_ID = 0x%X, lcd_attached =%d", __func__,lcd_id, lcd_attached);
return 0;
}
__setup( "lcd_id=0x", get_lcd_id_cmdline );
MODULE_DESCRIPTION("Samsung DSI panel driver");
MODULE_AUTHOR("Krishna Kishor Jha <krishna.jha@samsung.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
frustreated/linux | drivers/mfd/max77620.c | 265 | 19184 | // SPDX-License-Identifier: GPL-2.0-only
/*
* Maxim MAX77620 MFD Driver
*
* Copyright (C) 2016 NVIDIA CORPORATION. All rights reserved.
*
* Author:
* Laxman Dewangan <ldewangan@nvidia.com>
* Chaitanya Bandi <bandik@nvidia.com>
* Mallikarjun Kasoju <mkasoju@nvidia.com>
*/
/****************** Teminology used in driver ********************
* Here are some terminology used from datasheet for quick reference:
* Flexible Power Sequence (FPS):
* The Flexible Power Sequencer (FPS) allows each regulator to power up under
* hardware or software control. Additionally, each regulator can power on
* independently or among a group of other regulators with an adjustable
* power-up and power-down delays (sequencing). GPIO1, GPIO2, and GPIO3 can
* be programmed to be part of a sequence allowing external regulators to be
* sequenced along with internal regulators. 32KHz clock can be programmed to
* be part of a sequence.
* There is 3 FPS confguration registers and all resources are configured to
* any of these FPS or no FPS.
*/
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/mfd/core.h>
#include <linux/mfd/max77620.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
static struct max77620_chip *max77620_scratch;
static const struct resource gpio_resources[] = {
DEFINE_RES_IRQ(MAX77620_IRQ_TOP_GPIO),
};
static const struct resource power_resources[] = {
DEFINE_RES_IRQ(MAX77620_IRQ_LBT_MBATLOW),
};
static const struct resource rtc_resources[] = {
DEFINE_RES_IRQ(MAX77620_IRQ_TOP_RTC),
};
static const struct resource thermal_resources[] = {
DEFINE_RES_IRQ(MAX77620_IRQ_LBT_TJALRM1),
DEFINE_RES_IRQ(MAX77620_IRQ_LBT_TJALRM2),
};
static const struct regmap_irq max77620_top_irqs[] = {
REGMAP_IRQ_REG(MAX77620_IRQ_TOP_GLBL, 0, MAX77620_IRQ_TOP_GLBL_MASK),
REGMAP_IRQ_REG(MAX77620_IRQ_TOP_SD, 0, MAX77620_IRQ_TOP_SD_MASK),
REGMAP_IRQ_REG(MAX77620_IRQ_TOP_LDO, 0, MAX77620_IRQ_TOP_LDO_MASK),
REGMAP_IRQ_REG(MAX77620_IRQ_TOP_GPIO, 0, MAX77620_IRQ_TOP_GPIO_MASK),
REGMAP_IRQ_REG(MAX77620_IRQ_TOP_RTC, 0, MAX77620_IRQ_TOP_RTC_MASK),
REGMAP_IRQ_REG(MAX77620_IRQ_TOP_32K, 0, MAX77620_IRQ_TOP_32K_MASK),
REGMAP_IRQ_REG(MAX77620_IRQ_TOP_ONOFF, 0, MAX77620_IRQ_TOP_ONOFF_MASK),
REGMAP_IRQ_REG(MAX77620_IRQ_LBT_MBATLOW, 1, MAX77620_IRQ_LBM_MASK),
REGMAP_IRQ_REG(MAX77620_IRQ_LBT_TJALRM1, 1, MAX77620_IRQ_TJALRM1_MASK),
REGMAP_IRQ_REG(MAX77620_IRQ_LBT_TJALRM2, 1, MAX77620_IRQ_TJALRM2_MASK),
};
static const struct mfd_cell max77620_children[] = {
{ .name = "max77620-pinctrl", },
{ .name = "max77620-clock", },
{ .name = "max77620-pmic", },
{ .name = "max77620-watchdog", },
{
.name = "max77620-gpio",
.resources = gpio_resources,
.num_resources = ARRAY_SIZE(gpio_resources),
}, {
.name = "max77620-rtc",
.resources = rtc_resources,
.num_resources = ARRAY_SIZE(rtc_resources),
}, {
.name = "max77620-power",
.resources = power_resources,
.num_resources = ARRAY_SIZE(power_resources),
}, {
.name = "max77620-thermal",
.resources = thermal_resources,
.num_resources = ARRAY_SIZE(thermal_resources),
},
};
static const struct mfd_cell max20024_children[] = {
{ .name = "max20024-pinctrl", },
{ .name = "max77620-clock", },
{ .name = "max20024-pmic", },
{ .name = "max77620-watchdog", },
{
.name = "max77620-gpio",
.resources = gpio_resources,
.num_resources = ARRAY_SIZE(gpio_resources),
}, {
.name = "max77620-rtc",
.resources = rtc_resources,
.num_resources = ARRAY_SIZE(rtc_resources),
}, {
.name = "max20024-power",
.resources = power_resources,
.num_resources = ARRAY_SIZE(power_resources),
},
};
static const struct mfd_cell max77663_children[] = {
{ .name = "max77620-pinctrl", },
{ .name = "max77620-clock", },
{ .name = "max77663-pmic", },
{ .name = "max77620-watchdog", },
{
.name = "max77620-gpio",
.resources = gpio_resources,
.num_resources = ARRAY_SIZE(gpio_resources),
}, {
.name = "max77620-rtc",
.resources = rtc_resources,
.num_resources = ARRAY_SIZE(rtc_resources),
}, {
.name = "max77663-power",
.resources = power_resources,
.num_resources = ARRAY_SIZE(power_resources),
},
};
static const struct regmap_range max77620_readable_ranges[] = {
regmap_reg_range(MAX77620_REG_CNFGGLBL1, MAX77620_REG_DVSSD4),
};
static const struct regmap_access_table max77620_readable_table = {
.yes_ranges = max77620_readable_ranges,
.n_yes_ranges = ARRAY_SIZE(max77620_readable_ranges),
};
static const struct regmap_range max20024_readable_ranges[] = {
regmap_reg_range(MAX77620_REG_CNFGGLBL1, MAX77620_REG_DVSSD4),
regmap_reg_range(MAX20024_REG_MAX_ADD, MAX20024_REG_MAX_ADD),
};
static const struct regmap_access_table max20024_readable_table = {
.yes_ranges = max20024_readable_ranges,
.n_yes_ranges = ARRAY_SIZE(max20024_readable_ranges),
};
static const struct regmap_range max77620_writable_ranges[] = {
regmap_reg_range(MAX77620_REG_CNFGGLBL1, MAX77620_REG_DVSSD4),
};
static const struct regmap_access_table max77620_writable_table = {
.yes_ranges = max77620_writable_ranges,
.n_yes_ranges = ARRAY_SIZE(max77620_writable_ranges),
};
static const struct regmap_range max77620_cacheable_ranges[] = {
regmap_reg_range(MAX77620_REG_SD0_CFG, MAX77620_REG_LDO_CFG3),
regmap_reg_range(MAX77620_REG_FPS_CFG0, MAX77620_REG_FPS_SD3),
};
static const struct regmap_access_table max77620_volatile_table = {
.no_ranges = max77620_cacheable_ranges,
.n_no_ranges = ARRAY_SIZE(max77620_cacheable_ranges),
};
static const struct regmap_config max77620_regmap_config = {
.name = "power-slave",
.reg_bits = 8,
.val_bits = 8,
.max_register = MAX77620_REG_DVSSD4 + 1,
.cache_type = REGCACHE_RBTREE,
.rd_table = &max77620_readable_table,
.wr_table = &max77620_writable_table,
.volatile_table = &max77620_volatile_table,
.use_single_write = true,
};
static const struct regmap_config max20024_regmap_config = {
.name = "power-slave",
.reg_bits = 8,
.val_bits = 8,
.max_register = MAX20024_REG_MAX_ADD + 1,
.cache_type = REGCACHE_RBTREE,
.rd_table = &max20024_readable_table,
.wr_table = &max77620_writable_table,
.volatile_table = &max77620_volatile_table,
};
static const struct regmap_range max77663_readable_ranges[] = {
regmap_reg_range(MAX77620_REG_CNFGGLBL1, MAX77620_REG_CID5),
};
static const struct regmap_access_table max77663_readable_table = {
.yes_ranges = max77663_readable_ranges,
.n_yes_ranges = ARRAY_SIZE(max77663_readable_ranges),
};
static const struct regmap_range max77663_writable_ranges[] = {
regmap_reg_range(MAX77620_REG_CNFGGLBL1, MAX77620_REG_CID5),
};
static const struct regmap_access_table max77663_writable_table = {
.yes_ranges = max77663_writable_ranges,
.n_yes_ranges = ARRAY_SIZE(max77663_writable_ranges),
};
static const struct regmap_config max77663_regmap_config = {
.name = "power-slave",
.reg_bits = 8,
.val_bits = 8,
.max_register = MAX77620_REG_CID5 + 1,
.cache_type = REGCACHE_RBTREE,
.rd_table = &max77663_readable_table,
.wr_table = &max77663_writable_table,
.volatile_table = &max77620_volatile_table,
};
/*
* MAX77620 and MAX20024 has the following steps of the interrupt handling
* for TOP interrupts:
* 1. When interrupt occurs from PMIC, mask the PMIC interrupt by setting GLBLM.
* 2. Read IRQTOP and service the interrupt.
* 3. Once all interrupts has been checked and serviced, the interrupt service
* routine un-masks the hardware interrupt line by clearing GLBLM.
*/
static int max77620_irq_global_mask(void *irq_drv_data)
{
struct max77620_chip *chip = irq_drv_data;
int ret;
ret = regmap_update_bits(chip->rmap, MAX77620_REG_INTENLBT,
MAX77620_GLBLM_MASK, MAX77620_GLBLM_MASK);
if (ret < 0)
dev_err(chip->dev, "Failed to set GLBLM: %d\n", ret);
return ret;
}
static int max77620_irq_global_unmask(void *irq_drv_data)
{
struct max77620_chip *chip = irq_drv_data;
int ret;
ret = regmap_update_bits(chip->rmap, MAX77620_REG_INTENLBT,
MAX77620_GLBLM_MASK, 0);
if (ret < 0)
dev_err(chip->dev, "Failed to reset GLBLM: %d\n", ret);
return ret;
}
static struct regmap_irq_chip max77620_top_irq_chip = {
.name = "max77620-top",
.irqs = max77620_top_irqs,
.num_irqs = ARRAY_SIZE(max77620_top_irqs),
.num_regs = 2,
.status_base = MAX77620_REG_IRQTOP,
.mask_base = MAX77620_REG_IRQTOPM,
.handle_pre_irq = max77620_irq_global_mask,
.handle_post_irq = max77620_irq_global_unmask,
};
/* max77620_get_fps_period_reg_value: Get FPS bit field value from
* requested periods.
* MAX77620 supports the FPS period of 40, 80, 160, 320, 540, 1280, 2560
* and 5120 microseconds. MAX20024 supports the FPS period of 20, 40, 80,
* 160, 320, 540, 1280 and 2560 microseconds.
* The FPS register has 3 bits field to set the FPS period as
* bits max77620 max20024
* 000 40 20
* 001 80 40
* :::
*/
static int max77620_get_fps_period_reg_value(struct max77620_chip *chip,
int tperiod)
{
int fps_min_period;
int i;
switch (chip->chip_id) {
case MAX20024:
fps_min_period = MAX20024_FPS_PERIOD_MIN_US;
break;
case MAX77620:
fps_min_period = MAX77620_FPS_PERIOD_MIN_US;
break;
case MAX77663:
fps_min_period = MAX20024_FPS_PERIOD_MIN_US;
break;
default:
return -EINVAL;
}
for (i = 0; i < 7; i++) {
if (fps_min_period >= tperiod)
return i;
fps_min_period *= 2;
}
return i;
}
/* max77620_config_fps: Configure FPS configuration registers
* based on platform specific information.
*/
static int max77620_config_fps(struct max77620_chip *chip,
struct device_node *fps_np)
{
struct device *dev = chip->dev;
unsigned int mask = 0, config = 0;
u32 fps_max_period;
u32 param_val;
int tperiod, fps_id;
int ret;
char fps_name[10];
switch (chip->chip_id) {
case MAX20024:
fps_max_period = MAX20024_FPS_PERIOD_MAX_US;
break;
case MAX77620:
fps_max_period = MAX77620_FPS_PERIOD_MAX_US;
break;
case MAX77663:
fps_max_period = MAX20024_FPS_PERIOD_MAX_US;
break;
default:
return -EINVAL;
}
for (fps_id = 0; fps_id < MAX77620_FPS_COUNT; fps_id++) {
sprintf(fps_name, "fps%d", fps_id);
if (of_node_name_eq(fps_np, fps_name))
break;
}
if (fps_id == MAX77620_FPS_COUNT) {
dev_err(dev, "FPS node name %pOFn is not valid\n", fps_np);
return -EINVAL;
}
ret = of_property_read_u32(fps_np, "maxim,shutdown-fps-time-period-us",
¶m_val);
if (!ret) {
mask |= MAX77620_FPS_TIME_PERIOD_MASK;
chip->shutdown_fps_period[fps_id] = min(param_val,
fps_max_period);
tperiod = max77620_get_fps_period_reg_value(chip,
chip->shutdown_fps_period[fps_id]);
config |= tperiod << MAX77620_FPS_TIME_PERIOD_SHIFT;
}
ret = of_property_read_u32(fps_np, "maxim,suspend-fps-time-period-us",
¶m_val);
if (!ret)
chip->suspend_fps_period[fps_id] = min(param_val,
fps_max_period);
ret = of_property_read_u32(fps_np, "maxim,fps-event-source",
¶m_val);
if (!ret) {
if (param_val > 2) {
dev_err(dev, "FPS%d event-source invalid\n", fps_id);
return -EINVAL;
}
mask |= MAX77620_FPS_EN_SRC_MASK;
config |= param_val << MAX77620_FPS_EN_SRC_SHIFT;
if (param_val == 2) {
mask |= MAX77620_FPS_ENFPS_SW_MASK;
config |= MAX77620_FPS_ENFPS_SW;
}
}
if (!chip->sleep_enable && !chip->enable_global_lpm) {
ret = of_property_read_u32(fps_np,
"maxim,device-state-on-disabled-event",
¶m_val);
if (!ret) {
if (param_val == 0)
chip->sleep_enable = true;
else if (param_val == 1)
chip->enable_global_lpm = true;
}
}
ret = regmap_update_bits(chip->rmap, MAX77620_REG_FPS_CFG0 + fps_id,
mask, config);
if (ret < 0) {
dev_err(dev, "Failed to update FPS CFG: %d\n", ret);
return ret;
}
return 0;
}
static int max77620_initialise_fps(struct max77620_chip *chip)
{
struct device *dev = chip->dev;
struct device_node *fps_np, *fps_child;
u8 config;
int fps_id;
int ret;
for (fps_id = 0; fps_id < MAX77620_FPS_COUNT; fps_id++) {
chip->shutdown_fps_period[fps_id] = -1;
chip->suspend_fps_period[fps_id] = -1;
}
fps_np = of_get_child_by_name(dev->of_node, "fps");
if (!fps_np)
goto skip_fps;
for_each_child_of_node(fps_np, fps_child) {
ret = max77620_config_fps(chip, fps_child);
if (ret < 0) {
of_node_put(fps_child);
return ret;
}
}
config = chip->enable_global_lpm ? MAX77620_ONOFFCNFG2_SLP_LPM_MSK : 0;
ret = regmap_update_bits(chip->rmap, MAX77620_REG_ONOFFCNFG2,
MAX77620_ONOFFCNFG2_SLP_LPM_MSK, config);
if (ret < 0) {
dev_err(dev, "Failed to update SLP_LPM: %d\n", ret);
return ret;
}
skip_fps:
if (chip->chip_id == MAX77663)
return 0;
/* Enable wake on EN0 pin */
ret = regmap_update_bits(chip->rmap, MAX77620_REG_ONOFFCNFG2,
MAX77620_ONOFFCNFG2_WK_EN0,
MAX77620_ONOFFCNFG2_WK_EN0);
if (ret < 0) {
dev_err(dev, "Failed to update WK_EN0: %d\n", ret);
return ret;
}
/* For MAX20024, SLPEN will be POR reset if CLRSE is b11 */
if ((chip->chip_id == MAX20024) && chip->sleep_enable) {
config = MAX77620_ONOFFCNFG1_SLPEN | MAX20024_ONOFFCNFG1_CLRSE;
ret = regmap_update_bits(chip->rmap, MAX77620_REG_ONOFFCNFG1,
config, config);
if (ret < 0) {
dev_err(dev, "Failed to update SLPEN: %d\n", ret);
return ret;
}
}
return 0;
}
static int max77620_read_es_version(struct max77620_chip *chip)
{
unsigned int val;
u8 cid_val[6];
int i;
int ret;
for (i = MAX77620_REG_CID0; i <= MAX77620_REG_CID5; i++) {
ret = regmap_read(chip->rmap, i, &val);
if (ret < 0) {
dev_err(chip->dev, "Failed to read CID: %d\n", ret);
return ret;
}
dev_dbg(chip->dev, "CID%d: 0x%02x\n",
i - MAX77620_REG_CID0, val);
cid_val[i - MAX77620_REG_CID0] = val;
}
/* CID4 is OTP Version and CID5 is ES version */
dev_info(chip->dev, "PMIC Version OTP:0x%02X and ES:0x%X\n",
cid_val[4], MAX77620_CID5_DIDM(cid_val[5]));
return ret;
}
static void max77620_pm_power_off(void)
{
struct max77620_chip *chip = max77620_scratch;
regmap_update_bits(chip->rmap, MAX77620_REG_ONOFFCNFG1,
MAX77620_ONOFFCNFG1_SFT_RST,
MAX77620_ONOFFCNFG1_SFT_RST);
}
static int max77620_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct regmap_config *rmap_config;
struct max77620_chip *chip;
const struct mfd_cell *mfd_cells;
int n_mfd_cells;
bool pm_off;
int ret;
chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
i2c_set_clientdata(client, chip);
chip->dev = &client->dev;
chip->chip_irq = client->irq;
chip->chip_id = (enum max77620_chip_id)id->driver_data;
switch (chip->chip_id) {
case MAX77620:
mfd_cells = max77620_children;
n_mfd_cells = ARRAY_SIZE(max77620_children);
rmap_config = &max77620_regmap_config;
break;
case MAX20024:
mfd_cells = max20024_children;
n_mfd_cells = ARRAY_SIZE(max20024_children);
rmap_config = &max20024_regmap_config;
break;
case MAX77663:
mfd_cells = max77663_children;
n_mfd_cells = ARRAY_SIZE(max77663_children);
rmap_config = &max77663_regmap_config;
break;
default:
dev_err(chip->dev, "ChipID is invalid %d\n", chip->chip_id);
return -EINVAL;
}
chip->rmap = devm_regmap_init_i2c(client, rmap_config);
if (IS_ERR(chip->rmap)) {
ret = PTR_ERR(chip->rmap);
dev_err(chip->dev, "Failed to initialise regmap: %d\n", ret);
return ret;
}
ret = max77620_read_es_version(chip);
if (ret < 0)
return ret;
max77620_top_irq_chip.irq_drv_data = chip;
ret = devm_regmap_add_irq_chip(chip->dev, chip->rmap, client->irq,
IRQF_ONESHOT | IRQF_SHARED, 0,
&max77620_top_irq_chip,
&chip->top_irq_data);
if (ret < 0) {
dev_err(chip->dev, "Failed to add regmap irq: %d\n", ret);
return ret;
}
ret = max77620_initialise_fps(chip);
if (ret < 0)
return ret;
ret = devm_mfd_add_devices(chip->dev, PLATFORM_DEVID_NONE,
mfd_cells, n_mfd_cells, NULL, 0,
regmap_irq_get_domain(chip->top_irq_data));
if (ret < 0) {
dev_err(chip->dev, "Failed to add MFD children: %d\n", ret);
return ret;
}
pm_off = of_device_is_system_power_controller(client->dev.of_node);
if (pm_off && !pm_power_off) {
max77620_scratch = chip;
pm_power_off = max77620_pm_power_off;
}
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int max77620_set_fps_period(struct max77620_chip *chip,
int fps_id, int time_period)
{
int period = max77620_get_fps_period_reg_value(chip, time_period);
int ret;
ret = regmap_update_bits(chip->rmap, MAX77620_REG_FPS_CFG0 + fps_id,
MAX77620_FPS_TIME_PERIOD_MASK,
period << MAX77620_FPS_TIME_PERIOD_SHIFT);
if (ret < 0) {
dev_err(chip->dev, "Failed to update FPS period: %d\n", ret);
return ret;
}
return 0;
}
static int max77620_i2c_suspend(struct device *dev)
{
struct max77620_chip *chip = dev_get_drvdata(dev);
struct i2c_client *client = to_i2c_client(dev);
unsigned int config;
int fps;
int ret;
for (fps = 0; fps < MAX77620_FPS_COUNT; fps++) {
if (chip->suspend_fps_period[fps] < 0)
continue;
ret = max77620_set_fps_period(chip, fps,
chip->suspend_fps_period[fps]);
if (ret < 0)
return ret;
}
/*
* For MAX20024: No need to configure SLPEN on suspend as
* it will be configured on Init.
*/
if (chip->chip_id == MAX20024)
goto out;
config = (chip->sleep_enable) ? MAX77620_ONOFFCNFG1_SLPEN : 0;
ret = regmap_update_bits(chip->rmap, MAX77620_REG_ONOFFCNFG1,
MAX77620_ONOFFCNFG1_SLPEN,
config);
if (ret < 0) {
dev_err(dev, "Failed to configure sleep in suspend: %d\n", ret);
return ret;
}
if (chip->chip_id == MAX77663)
goto out;
/* Disable WK_EN0 */
ret = regmap_update_bits(chip->rmap, MAX77620_REG_ONOFFCNFG2,
MAX77620_ONOFFCNFG2_WK_EN0, 0);
if (ret < 0) {
dev_err(dev, "Failed to configure WK_EN in suspend: %d\n", ret);
return ret;
}
out:
disable_irq(client->irq);
return 0;
}
static int max77620_i2c_resume(struct device *dev)
{
struct max77620_chip *chip = dev_get_drvdata(dev);
struct i2c_client *client = to_i2c_client(dev);
int ret;
int fps;
for (fps = 0; fps < MAX77620_FPS_COUNT; fps++) {
if (chip->shutdown_fps_period[fps] < 0)
continue;
ret = max77620_set_fps_period(chip, fps,
chip->shutdown_fps_period[fps]);
if (ret < 0)
return ret;
}
/*
* For MAX20024: No need to configure WKEN0 on resume as
* it is configured on Init.
*/
if (chip->chip_id == MAX20024 || chip->chip_id == MAX77663)
goto out;
/* Enable WK_EN0 */
ret = regmap_update_bits(chip->rmap, MAX77620_REG_ONOFFCNFG2,
MAX77620_ONOFFCNFG2_WK_EN0,
MAX77620_ONOFFCNFG2_WK_EN0);
if (ret < 0) {
dev_err(dev, "Failed to configure WK_EN0 n resume: %d\n", ret);
return ret;
}
out:
enable_irq(client->irq);
return 0;
}
#endif
static const struct i2c_device_id max77620_id[] = {
{"max77620", MAX77620},
{"max20024", MAX20024},
{"max77663", MAX77663},
{},
};
static const struct dev_pm_ops max77620_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(max77620_i2c_suspend, max77620_i2c_resume)
};
static struct i2c_driver max77620_driver = {
.driver = {
.name = "max77620",
.pm = &max77620_pm_ops,
},
.probe = max77620_probe,
.id_table = max77620_id,
};
builtin_i2c_driver(max77620_driver);
| gpl-2.0 |
shobhitka/linux-kernel | sound/soc/jz4740/jz4740-i2s.c | 521 | 14104 | /*
* Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/init.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/initval.h>
#include <sound/dmaengine_pcm.h>
#include "jz4740-i2s.h"
#define JZ4740_DMA_TYPE_AIC_TRANSMIT 24
#define JZ4740_DMA_TYPE_AIC_RECEIVE 25
#define JZ_REG_AIC_CONF 0x00
#define JZ_REG_AIC_CTRL 0x04
#define JZ_REG_AIC_I2S_FMT 0x10
#define JZ_REG_AIC_FIFO_STATUS 0x14
#define JZ_REG_AIC_I2S_STATUS 0x1c
#define JZ_REG_AIC_CLK_DIV 0x30
#define JZ_REG_AIC_FIFO 0x34
#define JZ_AIC_CONF_FIFO_RX_THRESHOLD_MASK (0xf << 12)
#define JZ_AIC_CONF_FIFO_TX_THRESHOLD_MASK (0xf << 8)
#define JZ_AIC_CONF_OVERFLOW_PLAY_LAST BIT(6)
#define JZ_AIC_CONF_INTERNAL_CODEC BIT(5)
#define JZ_AIC_CONF_I2S BIT(4)
#define JZ_AIC_CONF_RESET BIT(3)
#define JZ_AIC_CONF_BIT_CLK_MASTER BIT(2)
#define JZ_AIC_CONF_SYNC_CLK_MASTER BIT(1)
#define JZ_AIC_CONF_ENABLE BIT(0)
#define JZ_AIC_CONF_FIFO_RX_THRESHOLD_OFFSET 12
#define JZ_AIC_CONF_FIFO_TX_THRESHOLD_OFFSET 8
#define JZ4780_AIC_CONF_FIFO_RX_THRESHOLD_OFFSET 24
#define JZ4780_AIC_CONF_FIFO_TX_THRESHOLD_OFFSET 16
#define JZ4780_AIC_CONF_FIFO_RX_THRESHOLD_MASK \
(0xf << JZ4780_AIC_CONF_FIFO_RX_THRESHOLD_OFFSET)
#define JZ4780_AIC_CONF_FIFO_TX_THRESHOLD_MASK \
(0x1f << JZ4780_AIC_CONF_FIFO_TX_THRESHOLD_OFFSET)
#define JZ_AIC_CTRL_OUTPUT_SAMPLE_SIZE_MASK (0x7 << 19)
#define JZ_AIC_CTRL_INPUT_SAMPLE_SIZE_MASK (0x7 << 16)
#define JZ_AIC_CTRL_ENABLE_RX_DMA BIT(15)
#define JZ_AIC_CTRL_ENABLE_TX_DMA BIT(14)
#define JZ_AIC_CTRL_MONO_TO_STEREO BIT(11)
#define JZ_AIC_CTRL_SWITCH_ENDIANNESS BIT(10)
#define JZ_AIC_CTRL_SIGNED_TO_UNSIGNED BIT(9)
#define JZ_AIC_CTRL_FLUSH BIT(8)
#define JZ_AIC_CTRL_ENABLE_ROR_INT BIT(6)
#define JZ_AIC_CTRL_ENABLE_TUR_INT BIT(5)
#define JZ_AIC_CTRL_ENABLE_RFS_INT BIT(4)
#define JZ_AIC_CTRL_ENABLE_TFS_INT BIT(3)
#define JZ_AIC_CTRL_ENABLE_LOOPBACK BIT(2)
#define JZ_AIC_CTRL_ENABLE_PLAYBACK BIT(1)
#define JZ_AIC_CTRL_ENABLE_CAPTURE BIT(0)
#define JZ_AIC_CTRL_OUTPUT_SAMPLE_SIZE_OFFSET 19
#define JZ_AIC_CTRL_INPUT_SAMPLE_SIZE_OFFSET 16
#define JZ_AIC_I2S_FMT_DISABLE_BIT_CLK BIT(12)
#define JZ_AIC_I2S_FMT_DISABLE_BIT_ICLK BIT(13)
#define JZ_AIC_I2S_FMT_ENABLE_SYS_CLK BIT(4)
#define JZ_AIC_I2S_FMT_MSB BIT(0)
#define JZ_AIC_I2S_STATUS_BUSY BIT(2)
#define JZ_AIC_CLK_DIV_MASK 0xf
#define I2SDIV_DV_SHIFT 8
#define I2SDIV_DV_MASK (0xf << I2SDIV_DV_SHIFT)
#define I2SDIV_IDV_SHIFT 8
#define I2SDIV_IDV_MASK (0xf << I2SDIV_IDV_SHIFT)
enum jz47xx_i2s_version {
JZ_I2S_JZ4740,
JZ_I2S_JZ4780,
};
struct jz4740_i2s {
struct resource *mem;
void __iomem *base;
dma_addr_t phys_base;
struct clk *clk_aic;
struct clk *clk_i2s;
struct snd_dmaengine_dai_dma_data playback_dma_data;
struct snd_dmaengine_dai_dma_data capture_dma_data;
enum jz47xx_i2s_version version;
};
static inline uint32_t jz4740_i2s_read(const struct jz4740_i2s *i2s,
unsigned int reg)
{
return readl(i2s->base + reg);
}
static inline void jz4740_i2s_write(const struct jz4740_i2s *i2s,
unsigned int reg, uint32_t value)
{
writel(value, i2s->base + reg);
}
static int jz4740_i2s_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai);
uint32_t conf, ctrl;
if (dai->active)
return 0;
ctrl = jz4740_i2s_read(i2s, JZ_REG_AIC_CTRL);
ctrl |= JZ_AIC_CTRL_FLUSH;
jz4740_i2s_write(i2s, JZ_REG_AIC_CTRL, ctrl);
clk_prepare_enable(i2s->clk_i2s);
conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF);
conf |= JZ_AIC_CONF_ENABLE;
jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, conf);
return 0;
}
static void jz4740_i2s_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai);
uint32_t conf;
if (dai->active)
return;
conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF);
conf &= ~JZ_AIC_CONF_ENABLE;
jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, conf);
clk_disable_unprepare(i2s->clk_i2s);
}
static int jz4740_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *dai)
{
struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai);
uint32_t ctrl;
uint32_t mask;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
mask = JZ_AIC_CTRL_ENABLE_PLAYBACK | JZ_AIC_CTRL_ENABLE_TX_DMA;
else
mask = JZ_AIC_CTRL_ENABLE_CAPTURE | JZ_AIC_CTRL_ENABLE_RX_DMA;
ctrl = jz4740_i2s_read(i2s, JZ_REG_AIC_CTRL);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
ctrl |= mask;
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
ctrl &= ~mask;
break;
default:
return -EINVAL;
}
jz4740_i2s_write(i2s, JZ_REG_AIC_CTRL, ctrl);
return 0;
}
static int jz4740_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai);
uint32_t format = 0;
uint32_t conf;
conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF);
conf &= ~(JZ_AIC_CONF_BIT_CLK_MASTER | JZ_AIC_CONF_SYNC_CLK_MASTER);
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBS_CFS:
conf |= JZ_AIC_CONF_BIT_CLK_MASTER | JZ_AIC_CONF_SYNC_CLK_MASTER;
format |= JZ_AIC_I2S_FMT_ENABLE_SYS_CLK;
break;
case SND_SOC_DAIFMT_CBM_CFS:
conf |= JZ_AIC_CONF_SYNC_CLK_MASTER;
break;
case SND_SOC_DAIFMT_CBS_CFM:
conf |= JZ_AIC_CONF_BIT_CLK_MASTER;
break;
case SND_SOC_DAIFMT_CBM_CFM:
break;
default:
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_MSB:
format |= JZ_AIC_I2S_FMT_MSB;
break;
case SND_SOC_DAIFMT_I2S:
break;
default:
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
break;
default:
return -EINVAL;
}
jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, conf);
jz4740_i2s_write(i2s, JZ_REG_AIC_I2S_FMT, format);
return 0;
}
static int jz4740_i2s_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
{
struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai);
unsigned int sample_size;
uint32_t ctrl, div_reg;
int div;
ctrl = jz4740_i2s_read(i2s, JZ_REG_AIC_CTRL);
div_reg = jz4740_i2s_read(i2s, JZ_REG_AIC_CLK_DIV);
div = clk_get_rate(i2s->clk_i2s) / (64 * params_rate(params));
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S8:
sample_size = 0;
break;
case SNDRV_PCM_FORMAT_S16:
sample_size = 1;
break;
default:
return -EINVAL;
}
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
ctrl &= ~JZ_AIC_CTRL_OUTPUT_SAMPLE_SIZE_MASK;
ctrl |= sample_size << JZ_AIC_CTRL_OUTPUT_SAMPLE_SIZE_OFFSET;
if (params_channels(params) == 1)
ctrl |= JZ_AIC_CTRL_MONO_TO_STEREO;
else
ctrl &= ~JZ_AIC_CTRL_MONO_TO_STEREO;
div_reg &= ~I2SDIV_DV_MASK;
div_reg |= (div - 1) << I2SDIV_DV_SHIFT;
} else {
ctrl &= ~JZ_AIC_CTRL_INPUT_SAMPLE_SIZE_MASK;
ctrl |= sample_size << JZ_AIC_CTRL_INPUT_SAMPLE_SIZE_OFFSET;
if (i2s->version >= JZ_I2S_JZ4780) {
div_reg &= ~I2SDIV_IDV_MASK;
div_reg |= (div - 1) << I2SDIV_IDV_SHIFT;
} else {
div_reg &= ~I2SDIV_DV_MASK;
div_reg |= (div - 1) << I2SDIV_DV_SHIFT;
}
}
jz4740_i2s_write(i2s, JZ_REG_AIC_CTRL, ctrl);
jz4740_i2s_write(i2s, JZ_REG_AIC_CLK_DIV, div_reg);
return 0;
}
static int jz4740_i2s_set_sysclk(struct snd_soc_dai *dai, int clk_id,
unsigned int freq, int dir)
{
struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai);
struct clk *parent;
int ret = 0;
switch (clk_id) {
case JZ4740_I2S_CLKSRC_EXT:
parent = clk_get(NULL, "ext");
clk_set_parent(i2s->clk_i2s, parent);
break;
case JZ4740_I2S_CLKSRC_PLL:
parent = clk_get(NULL, "pll half");
clk_set_parent(i2s->clk_i2s, parent);
ret = clk_set_rate(i2s->clk_i2s, freq);
break;
default:
return -EINVAL;
}
clk_put(parent);
return ret;
}
static int jz4740_i2s_suspend(struct snd_soc_dai *dai)
{
struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai);
uint32_t conf;
if (dai->active) {
conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF);
conf &= ~JZ_AIC_CONF_ENABLE;
jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, conf);
clk_disable_unprepare(i2s->clk_i2s);
}
clk_disable_unprepare(i2s->clk_aic);
return 0;
}
static int jz4740_i2s_resume(struct snd_soc_dai *dai)
{
struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai);
uint32_t conf;
clk_prepare_enable(i2s->clk_aic);
if (dai->active) {
clk_prepare_enable(i2s->clk_i2s);
conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF);
conf |= JZ_AIC_CONF_ENABLE;
jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, conf);
}
return 0;
}
static void jz4740_i2c_init_pcm_config(struct jz4740_i2s *i2s)
{
struct snd_dmaengine_dai_dma_data *dma_data;
/* Playback */
dma_data = &i2s->playback_dma_data;
dma_data->maxburst = 16;
dma_data->slave_id = JZ4740_DMA_TYPE_AIC_TRANSMIT;
dma_data->addr = i2s->phys_base + JZ_REG_AIC_FIFO;
/* Capture */
dma_data = &i2s->capture_dma_data;
dma_data->maxburst = 16;
dma_data->slave_id = JZ4740_DMA_TYPE_AIC_RECEIVE;
dma_data->addr = i2s->phys_base + JZ_REG_AIC_FIFO;
}
static int jz4740_i2s_dai_probe(struct snd_soc_dai *dai)
{
struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai);
uint32_t conf;
clk_prepare_enable(i2s->clk_aic);
jz4740_i2c_init_pcm_config(i2s);
snd_soc_dai_init_dma_data(dai, &i2s->playback_dma_data,
&i2s->capture_dma_data);
if (i2s->version >= JZ_I2S_JZ4780) {
conf = (7 << JZ4780_AIC_CONF_FIFO_RX_THRESHOLD_OFFSET) |
(8 << JZ4780_AIC_CONF_FIFO_TX_THRESHOLD_OFFSET) |
JZ_AIC_CONF_OVERFLOW_PLAY_LAST |
JZ_AIC_CONF_I2S |
JZ_AIC_CONF_INTERNAL_CODEC;
} else {
conf = (7 << JZ_AIC_CONF_FIFO_RX_THRESHOLD_OFFSET) |
(8 << JZ_AIC_CONF_FIFO_TX_THRESHOLD_OFFSET) |
JZ_AIC_CONF_OVERFLOW_PLAY_LAST |
JZ_AIC_CONF_I2S |
JZ_AIC_CONF_INTERNAL_CODEC;
}
jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, JZ_AIC_CONF_RESET);
jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, conf);
return 0;
}
static int jz4740_i2s_dai_remove(struct snd_soc_dai *dai)
{
struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai);
clk_disable_unprepare(i2s->clk_aic);
return 0;
}
static const struct snd_soc_dai_ops jz4740_i2s_dai_ops = {
.startup = jz4740_i2s_startup,
.shutdown = jz4740_i2s_shutdown,
.trigger = jz4740_i2s_trigger,
.hw_params = jz4740_i2s_hw_params,
.set_fmt = jz4740_i2s_set_fmt,
.set_sysclk = jz4740_i2s_set_sysclk,
};
#define JZ4740_I2S_FMTS (SNDRV_PCM_FMTBIT_S8 | \
SNDRV_PCM_FMTBIT_S16_LE)
static struct snd_soc_dai_driver jz4740_i2s_dai = {
.probe = jz4740_i2s_dai_probe,
.remove = jz4740_i2s_dai_remove,
.playback = {
.channels_min = 1,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = JZ4740_I2S_FMTS,
},
.capture = {
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = JZ4740_I2S_FMTS,
},
.symmetric_rates = 1,
.ops = &jz4740_i2s_dai_ops,
.suspend = jz4740_i2s_suspend,
.resume = jz4740_i2s_resume,
};
static struct snd_soc_dai_driver jz4780_i2s_dai = {
.probe = jz4740_i2s_dai_probe,
.remove = jz4740_i2s_dai_remove,
.playback = {
.channels_min = 1,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = JZ4740_I2S_FMTS,
},
.capture = {
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = JZ4740_I2S_FMTS,
},
.ops = &jz4740_i2s_dai_ops,
.suspend = jz4740_i2s_suspend,
.resume = jz4740_i2s_resume,
};
static const struct snd_soc_component_driver jz4740_i2s_component = {
.name = "jz4740-i2s",
};
#ifdef CONFIG_OF
static const struct of_device_id jz4740_of_matches[] = {
{ .compatible = "ingenic,jz4740-i2s", .data = (void *)JZ_I2S_JZ4740 },
{ .compatible = "ingenic,jz4780-i2s", .data = (void *)JZ_I2S_JZ4780 },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, jz4740_of_matches);
#endif
static int jz4740_i2s_dev_probe(struct platform_device *pdev)
{
struct jz4740_i2s *i2s;
struct resource *mem;
int ret;
const struct of_device_id *match;
i2s = devm_kzalloc(&pdev->dev, sizeof(*i2s), GFP_KERNEL);
if (!i2s)
return -ENOMEM;
match = of_match_device(jz4740_of_matches, &pdev->dev);
if (match)
i2s->version = (enum jz47xx_i2s_version)match->data;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
i2s->base = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(i2s->base))
return PTR_ERR(i2s->base);
i2s->phys_base = mem->start;
i2s->clk_aic = devm_clk_get(&pdev->dev, "aic");
if (IS_ERR(i2s->clk_aic))
return PTR_ERR(i2s->clk_aic);
i2s->clk_i2s = devm_clk_get(&pdev->dev, "i2s");
if (IS_ERR(i2s->clk_i2s))
return PTR_ERR(i2s->clk_i2s);
platform_set_drvdata(pdev, i2s);
if (i2s->version == JZ_I2S_JZ4780)
ret = devm_snd_soc_register_component(&pdev->dev,
&jz4740_i2s_component, &jz4780_i2s_dai, 1);
else
ret = devm_snd_soc_register_component(&pdev->dev,
&jz4740_i2s_component, &jz4740_i2s_dai, 1);
if (ret)
return ret;
return devm_snd_dmaengine_pcm_register(&pdev->dev, NULL,
SND_DMAENGINE_PCM_FLAG_COMPAT);
}
static struct platform_driver jz4740_i2s_driver = {
.probe = jz4740_i2s_dev_probe,
.driver = {
.name = "jz4740-i2s",
.of_match_table = of_match_ptr(jz4740_of_matches)
},
};
module_platform_driver(jz4740_i2s_driver);
MODULE_AUTHOR("Lars-Peter Clausen, <lars@metafoo.de>");
MODULE_DESCRIPTION("Ingenic JZ4740 SoC I2S driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:jz4740-i2s");
| gpl-2.0 |
CandyDevices/kernel_mediatek_sprout | fs/autofs4/root.c | 1545 | 23791 | /* -*- c -*- --------------------------------------------------------------- *
*
* linux/fs/autofs/root.c
*
* Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved
* Copyright 1999-2000 Jeremy Fitzhardinge <jeremy@goop.org>
* Copyright 2001-2006 Ian Kent <raven@themaw.net>
*
* This file is part of the Linux kernel and is made available under
* the terms of the GNU General Public License, version 2, or at your
* option, any later version, incorporated herein by reference.
*
* ------------------------------------------------------------------------- */
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/stat.h>
#include <linux/slab.h>
#include <linux/param.h>
#include <linux/time.h>
#include <linux/compat.h>
#include <linux/mutex.h>
#include "autofs_i.h"
static int autofs4_dir_symlink(struct inode *,struct dentry *,const char *);
static int autofs4_dir_unlink(struct inode *,struct dentry *);
static int autofs4_dir_rmdir(struct inode *,struct dentry *);
static int autofs4_dir_mkdir(struct inode *,struct dentry *,umode_t);
static long autofs4_root_ioctl(struct file *,unsigned int,unsigned long);
#ifdef CONFIG_COMPAT
static long autofs4_root_compat_ioctl(struct file *,unsigned int,unsigned long);
#endif
static int autofs4_dir_open(struct inode *inode, struct file *file);
static struct dentry *autofs4_lookup(struct inode *,struct dentry *, unsigned int);
static struct vfsmount *autofs4_d_automount(struct path *);
static int autofs4_d_manage(struct dentry *, bool);
static void autofs4_dentry_release(struct dentry *);
const struct file_operations autofs4_root_operations = {
.open = dcache_dir_open,
.release = dcache_dir_close,
.read = generic_read_dir,
.readdir = dcache_readdir,
.llseek = dcache_dir_lseek,
.unlocked_ioctl = autofs4_root_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = autofs4_root_compat_ioctl,
#endif
};
const struct file_operations autofs4_dir_operations = {
.open = autofs4_dir_open,
.release = dcache_dir_close,
.read = generic_read_dir,
.readdir = dcache_readdir,
.llseek = dcache_dir_lseek,
};
const struct inode_operations autofs4_dir_inode_operations = {
.lookup = autofs4_lookup,
.unlink = autofs4_dir_unlink,
.symlink = autofs4_dir_symlink,
.mkdir = autofs4_dir_mkdir,
.rmdir = autofs4_dir_rmdir,
};
const struct dentry_operations autofs4_dentry_operations = {
.d_automount = autofs4_d_automount,
.d_manage = autofs4_d_manage,
.d_release = autofs4_dentry_release,
};
static void autofs4_add_active(struct dentry *dentry)
{
struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
struct autofs_info *ino = autofs4_dentry_ino(dentry);
if (ino) {
spin_lock(&sbi->lookup_lock);
if (!ino->active_count) {
if (list_empty(&ino->active))
list_add(&ino->active, &sbi->active_list);
}
ino->active_count++;
spin_unlock(&sbi->lookup_lock);
}
return;
}
static void autofs4_del_active(struct dentry *dentry)
{
struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
struct autofs_info *ino = autofs4_dentry_ino(dentry);
if (ino) {
spin_lock(&sbi->lookup_lock);
ino->active_count--;
if (!ino->active_count) {
if (!list_empty(&ino->active))
list_del_init(&ino->active);
}
spin_unlock(&sbi->lookup_lock);
}
return;
}
static int autofs4_dir_open(struct inode *inode, struct file *file)
{
struct dentry *dentry = file->f_path.dentry;
struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
DPRINTK("file=%p dentry=%p %.*s",
file, dentry, dentry->d_name.len, dentry->d_name.name);
if (autofs4_oz_mode(sbi))
goto out;
/*
* An empty directory in an autofs file system is always a
* mount point. The daemon must have failed to mount this
* during lookup so it doesn't exist. This can happen, for
* example, if user space returns an incorrect status for a
* mount request. Otherwise we're doing a readdir on the
* autofs file system so just let the libfs routines handle
* it.
*/
spin_lock(&sbi->lookup_lock);
if (!d_mountpoint(dentry) && simple_empty(dentry)) {
spin_unlock(&sbi->lookup_lock);
return -ENOENT;
}
spin_unlock(&sbi->lookup_lock);
out:
return dcache_dir_open(inode, file);
}
static void autofs4_dentry_release(struct dentry *de)
{
struct autofs_info *ino = autofs4_dentry_ino(de);
struct autofs_sb_info *sbi = autofs4_sbi(de->d_sb);
DPRINTK("releasing %p", de);
if (!ino)
return;
if (sbi) {
spin_lock(&sbi->lookup_lock);
if (!list_empty(&ino->active))
list_del(&ino->active);
if (!list_empty(&ino->expiring))
list_del(&ino->expiring);
spin_unlock(&sbi->lookup_lock);
}
autofs4_free_ino(ino);
}
static struct dentry *autofs4_lookup_active(struct dentry *dentry)
{
struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
struct dentry *parent = dentry->d_parent;
struct qstr *name = &dentry->d_name;
unsigned int len = name->len;
unsigned int hash = name->hash;
const unsigned char *str = name->name;
struct list_head *p, *head;
spin_lock(&sbi->lookup_lock);
head = &sbi->active_list;
list_for_each(p, head) {
struct autofs_info *ino;
struct dentry *active;
struct qstr *qstr;
ino = list_entry(p, struct autofs_info, active);
active = ino->dentry;
spin_lock(&active->d_lock);
/* Already gone? */
if (active->d_count == 0)
goto next;
qstr = &active->d_name;
if (active->d_name.hash != hash)
goto next;
if (active->d_parent != parent)
goto next;
if (qstr->len != len)
goto next;
if (memcmp(qstr->name, str, len))
goto next;
if (d_unhashed(active)) {
dget_dlock(active);
spin_unlock(&active->d_lock);
spin_unlock(&sbi->lookup_lock);
return active;
}
next:
spin_unlock(&active->d_lock);
}
spin_unlock(&sbi->lookup_lock);
return NULL;
}
static struct dentry *autofs4_lookup_expiring(struct dentry *dentry)
{
struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
struct dentry *parent = dentry->d_parent;
struct qstr *name = &dentry->d_name;
unsigned int len = name->len;
unsigned int hash = name->hash;
const unsigned char *str = name->name;
struct list_head *p, *head;
spin_lock(&sbi->lookup_lock);
head = &sbi->expiring_list;
list_for_each(p, head) {
struct autofs_info *ino;
struct dentry *expiring;
struct qstr *qstr;
ino = list_entry(p, struct autofs_info, expiring);
expiring = ino->dentry;
spin_lock(&expiring->d_lock);
/* Bad luck, we've already been dentry_iput */
if (!expiring->d_inode)
goto next;
qstr = &expiring->d_name;
if (expiring->d_name.hash != hash)
goto next;
if (expiring->d_parent != parent)
goto next;
if (qstr->len != len)
goto next;
if (memcmp(qstr->name, str, len))
goto next;
if (d_unhashed(expiring)) {
dget_dlock(expiring);
spin_unlock(&expiring->d_lock);
spin_unlock(&sbi->lookup_lock);
return expiring;
}
next:
spin_unlock(&expiring->d_lock);
}
spin_unlock(&sbi->lookup_lock);
return NULL;
}
static int autofs4_mount_wait(struct dentry *dentry)
{
struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
struct autofs_info *ino = autofs4_dentry_ino(dentry);
int status = 0;
if (ino->flags & AUTOFS_INF_PENDING) {
DPRINTK("waiting for mount name=%.*s",
dentry->d_name.len, dentry->d_name.name);
status = autofs4_wait(sbi, dentry, NFY_MOUNT);
DPRINTK("mount wait done status=%d", status);
}
ino->last_used = jiffies;
return status;
}
static int do_expire_wait(struct dentry *dentry)
{
struct dentry *expiring;
expiring = autofs4_lookup_expiring(dentry);
if (!expiring)
return autofs4_expire_wait(dentry);
else {
/*
* If we are racing with expire the request might not
* be quite complete, but the directory has been removed
* so it must have been successful, just wait for it.
*/
autofs4_expire_wait(expiring);
autofs4_del_expiring(expiring);
dput(expiring);
}
return 0;
}
static struct dentry *autofs4_mountpoint_changed(struct path *path)
{
struct dentry *dentry = path->dentry;
struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
/*
* If this is an indirect mount the dentry could have gone away
* as a result of an expire and a new one created.
*/
if (autofs_type_indirect(sbi->type) && d_unhashed(dentry)) {
struct dentry *parent = dentry->d_parent;
struct autofs_info *ino;
struct dentry *new = d_lookup(parent, &dentry->d_name);
if (!new)
return NULL;
ino = autofs4_dentry_ino(new);
ino->last_used = jiffies;
dput(path->dentry);
path->dentry = new;
}
return path->dentry;
}
static struct vfsmount *autofs4_d_automount(struct path *path)
{
struct dentry *dentry = path->dentry;
struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
struct autofs_info *ino = autofs4_dentry_ino(dentry);
int status;
DPRINTK("dentry=%p %.*s",
dentry, dentry->d_name.len, dentry->d_name.name);
/* The daemon never triggers a mount. */
if (autofs4_oz_mode(sbi))
return NULL;
/*
* If an expire request is pending everyone must wait.
* If the expire fails we're still mounted so continue
* the follow and return. A return of -EAGAIN (which only
* happens with indirect mounts) means the expire completed
* and the directory was removed, so just go ahead and try
* the mount.
*/
status = do_expire_wait(dentry);
if (status && status != -EAGAIN)
return NULL;
/* Callback to the daemon to perform the mount or wait */
spin_lock(&sbi->fs_lock);
if (ino->flags & AUTOFS_INF_PENDING) {
spin_unlock(&sbi->fs_lock);
status = autofs4_mount_wait(dentry);
if (status)
return ERR_PTR(status);
goto done;
}
/*
* If the dentry is a symlink it's equivalent to a directory
* having d_mountpoint() true, so there's no need to call back
* to the daemon.
*/
if (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode)) {
spin_unlock(&sbi->fs_lock);
goto done;
}
if (!d_mountpoint(dentry)) {
/*
* It's possible that user space hasn't removed directories
* after umounting a rootless multi-mount, although it
* should. For v5 have_submounts() is sufficient to handle
* this because the leaves of the directory tree under the
* mount never trigger mounts themselves (they have an autofs
* trigger mount mounted on them). But v4 pseudo direct mounts
* do need the leaves to to trigger mounts. In this case we
* have no choice but to use the list_empty() check and
* require user space behave.
*/
if (sbi->version > 4) {
if (have_submounts(dentry)) {
spin_unlock(&sbi->fs_lock);
goto done;
}
} else {
if (!simple_empty(dentry)) {
spin_unlock(&sbi->fs_lock);
goto done;
}
}
ino->flags |= AUTOFS_INF_PENDING;
spin_unlock(&sbi->fs_lock);
status = autofs4_mount_wait(dentry);
spin_lock(&sbi->fs_lock);
ino->flags &= ~AUTOFS_INF_PENDING;
if (status) {
spin_unlock(&sbi->fs_lock);
return ERR_PTR(status);
}
}
spin_unlock(&sbi->fs_lock);
done:
/* Mount succeeded, check if we ended up with a new dentry */
dentry = autofs4_mountpoint_changed(path);
if (!dentry)
return ERR_PTR(-ENOENT);
return NULL;
}
static int autofs4_d_manage(struct dentry *dentry, bool rcu_walk)
{
struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
struct autofs_info *ino = autofs4_dentry_ino(dentry);
int status;
DPRINTK("dentry=%p %.*s",
dentry, dentry->d_name.len, dentry->d_name.name);
/* The daemon never waits. */
if (autofs4_oz_mode(sbi)) {
if (rcu_walk)
return 0;
if (!d_mountpoint(dentry))
return -EISDIR;
return 0;
}
/* We need to sleep, so we need pathwalk to be in ref-mode */
if (rcu_walk)
return -ECHILD;
/* Wait for pending expires */
do_expire_wait(dentry);
/*
* This dentry may be under construction so wait on mount
* completion.
*/
status = autofs4_mount_wait(dentry);
if (status)
return status;
spin_lock(&sbi->fs_lock);
/*
* If the dentry has been selected for expire while we slept
* on the lock then it might go away. We'll deal with that in
* ->d_automount() and wait on a new mount if the expire
* succeeds or return here if it doesn't (since there's no
* mount to follow with a rootless multi-mount).
*/
if (!(ino->flags & AUTOFS_INF_EXPIRING)) {
/*
* Any needed mounting has been completed and the path
* updated so check if this is a rootless multi-mount so
* we can avoid needless calls ->d_automount() and avoid
* an incorrect ELOOP error return.
*/
if ((!d_mountpoint(dentry) && !simple_empty(dentry)) ||
(dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode)))
status = -EISDIR;
}
spin_unlock(&sbi->fs_lock);
return status;
}
/* Lookups in the root directory */
static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
{
struct autofs_sb_info *sbi;
struct autofs_info *ino;
struct dentry *active;
DPRINTK("name = %.*s", dentry->d_name.len, dentry->d_name.name);
/* File name too long to exist */
if (dentry->d_name.len > NAME_MAX)
return ERR_PTR(-ENAMETOOLONG);
sbi = autofs4_sbi(dir->i_sb);
DPRINTK("pid = %u, pgrp = %u, catatonic = %d, oz_mode = %d",
current->pid, task_pgrp_nr(current), sbi->catatonic,
autofs4_oz_mode(sbi));
active = autofs4_lookup_active(dentry);
if (active) {
return active;
} else {
/*
* A dentry that is not within the root can never trigger a
* mount operation, unless the directory already exists, so we
* can return fail immediately. The daemon however does need
* to create directories within the file system.
*/
if (!autofs4_oz_mode(sbi) && !IS_ROOT(dentry->d_parent))
return ERR_PTR(-ENOENT);
/* Mark entries in the root as mount triggers */
if (autofs_type_indirect(sbi->type) && IS_ROOT(dentry->d_parent))
__managed_dentry_set_managed(dentry);
ino = autofs4_new_ino(sbi);
if (!ino)
return ERR_PTR(-ENOMEM);
dentry->d_fsdata = ino;
ino->dentry = dentry;
autofs4_add_active(dentry);
d_instantiate(dentry, NULL);
}
return NULL;
}
static int autofs4_dir_symlink(struct inode *dir,
struct dentry *dentry,
const char *symname)
{
struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb);
struct autofs_info *ino = autofs4_dentry_ino(dentry);
struct autofs_info *p_ino;
struct inode *inode;
size_t size = strlen(symname);
char *cp;
DPRINTK("%s <- %.*s", symname,
dentry->d_name.len, dentry->d_name.name);
if (!autofs4_oz_mode(sbi))
return -EACCES;
BUG_ON(!ino);
autofs4_clean_ino(ino);
autofs4_del_active(dentry);
cp = kmalloc(size + 1, GFP_KERNEL);
if (!cp)
return -ENOMEM;
strcpy(cp, symname);
inode = autofs4_get_inode(dir->i_sb, S_IFLNK | 0555);
if (!inode) {
kfree(cp);
if (!dentry->d_fsdata)
kfree(ino);
return -ENOMEM;
}
inode->i_private = cp;
inode->i_size = size;
d_add(dentry, inode);
dget(dentry);
atomic_inc(&ino->count);
p_ino = autofs4_dentry_ino(dentry->d_parent);
if (p_ino && dentry->d_parent != dentry)
atomic_inc(&p_ino->count);
dir->i_mtime = CURRENT_TIME;
return 0;
}
/*
* NOTE!
*
* Normal filesystems would do a "d_delete()" to tell the VFS dcache
* that the file no longer exists. However, doing that means that the
* VFS layer can turn the dentry into a negative dentry. We don't want
* this, because the unlink is probably the result of an expire.
* We simply d_drop it and add it to a expiring list in the super block,
* which allows the dentry lookup to check for an incomplete expire.
*
* If a process is blocked on the dentry waiting for the expire to finish,
* it will invalidate the dentry and try to mount with a new one.
*
* Also see autofs4_dir_rmdir()..
*/
static int autofs4_dir_unlink(struct inode *dir, struct dentry *dentry)
{
struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb);
struct autofs_info *ino = autofs4_dentry_ino(dentry);
struct autofs_info *p_ino;
/* This allows root to remove symlinks */
if (!autofs4_oz_mode(sbi) && !capable(CAP_SYS_ADMIN))
return -EPERM;
if (atomic_dec_and_test(&ino->count)) {
p_ino = autofs4_dentry_ino(dentry->d_parent);
if (p_ino && dentry->d_parent != dentry)
atomic_dec(&p_ino->count);
}
dput(ino->dentry);
dentry->d_inode->i_size = 0;
clear_nlink(dentry->d_inode);
dir->i_mtime = CURRENT_TIME;
spin_lock(&sbi->lookup_lock);
__autofs4_add_expiring(dentry);
d_drop(dentry);
spin_unlock(&sbi->lookup_lock);
return 0;
}
/*
* Version 4 of autofs provides a pseudo direct mount implementation
* that relies on directories at the leaves of a directory tree under
* an indirect mount to trigger mounts. To allow for this we need to
* set the DMANAGED_AUTOMOUNT and DMANAGED_TRANSIT flags on the leaves
* of the directory tree. There is no need to clear the automount flag
* following a mount or restore it after an expire because these mounts
* are always covered. However, it is necessary to ensure that these
* flags are clear on non-empty directories to avoid unnecessary calls
* during path walks.
*/
static void autofs_set_leaf_automount_flags(struct dentry *dentry)
{
struct dentry *parent;
/* root and dentrys in the root are already handled */
if (IS_ROOT(dentry->d_parent))
return;
managed_dentry_set_managed(dentry);
parent = dentry->d_parent;
/* only consider parents below dentrys in the root */
if (IS_ROOT(parent->d_parent))
return;
managed_dentry_clear_managed(parent);
return;
}
static void autofs_clear_leaf_automount_flags(struct dentry *dentry)
{
struct list_head *d_child;
struct dentry *parent;
/* flags for dentrys in the root are handled elsewhere */
if (IS_ROOT(dentry->d_parent))
return;
managed_dentry_clear_managed(dentry);
parent = dentry->d_parent;
/* only consider parents below dentrys in the root */
if (IS_ROOT(parent->d_parent))
return;
d_child = &dentry->d_u.d_child;
/* Set parent managed if it's becoming empty */
if (d_child->next == &parent->d_subdirs &&
d_child->prev == &parent->d_subdirs)
managed_dentry_set_managed(parent);
return;
}
static int autofs4_dir_rmdir(struct inode *dir, struct dentry *dentry)
{
struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb);
struct autofs_info *ino = autofs4_dentry_ino(dentry);
struct autofs_info *p_ino;
DPRINTK("dentry %p, removing %.*s",
dentry, dentry->d_name.len, dentry->d_name.name);
if (!autofs4_oz_mode(sbi))
return -EACCES;
spin_lock(&sbi->lookup_lock);
if (!simple_empty(dentry)) {
spin_unlock(&sbi->lookup_lock);
return -ENOTEMPTY;
}
__autofs4_add_expiring(dentry);
d_drop(dentry);
spin_unlock(&sbi->lookup_lock);
if (sbi->version < 5)
autofs_clear_leaf_automount_flags(dentry);
if (atomic_dec_and_test(&ino->count)) {
p_ino = autofs4_dentry_ino(dentry->d_parent);
if (p_ino && dentry->d_parent != dentry)
atomic_dec(&p_ino->count);
}
dput(ino->dentry);
dentry->d_inode->i_size = 0;
clear_nlink(dentry->d_inode);
if (dir->i_nlink)
drop_nlink(dir);
return 0;
}
static int autofs4_dir_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb);
struct autofs_info *ino = autofs4_dentry_ino(dentry);
struct autofs_info *p_ino;
struct inode *inode;
if (!autofs4_oz_mode(sbi))
return -EACCES;
DPRINTK("dentry %p, creating %.*s",
dentry, dentry->d_name.len, dentry->d_name.name);
BUG_ON(!ino);
autofs4_clean_ino(ino);
autofs4_del_active(dentry);
inode = autofs4_get_inode(dir->i_sb, S_IFDIR | 0555);
if (!inode)
return -ENOMEM;
d_add(dentry, inode);
if (sbi->version < 5)
autofs_set_leaf_automount_flags(dentry);
dget(dentry);
atomic_inc(&ino->count);
p_ino = autofs4_dentry_ino(dentry->d_parent);
if (p_ino && dentry->d_parent != dentry)
atomic_inc(&p_ino->count);
inc_nlink(dir);
dir->i_mtime = CURRENT_TIME;
return 0;
}
/* Get/set timeout ioctl() operation */
#ifdef CONFIG_COMPAT
static inline int autofs4_compat_get_set_timeout(struct autofs_sb_info *sbi,
compat_ulong_t __user *p)
{
int rv;
unsigned long ntimeout;
if ((rv = get_user(ntimeout, p)) ||
(rv = put_user(sbi->exp_timeout/HZ, p)))
return rv;
if (ntimeout > UINT_MAX/HZ)
sbi->exp_timeout = 0;
else
sbi->exp_timeout = ntimeout * HZ;
return 0;
}
#endif
static inline int autofs4_get_set_timeout(struct autofs_sb_info *sbi,
unsigned long __user *p)
{
int rv;
unsigned long ntimeout;
if ((rv = get_user(ntimeout, p)) ||
(rv = put_user(sbi->exp_timeout/HZ, p)))
return rv;
if (ntimeout > ULONG_MAX/HZ)
sbi->exp_timeout = 0;
else
sbi->exp_timeout = ntimeout * HZ;
return 0;
}
/* Return protocol version */
static inline int autofs4_get_protover(struct autofs_sb_info *sbi, int __user *p)
{
return put_user(sbi->version, p);
}
/* Return protocol sub version */
static inline int autofs4_get_protosubver(struct autofs_sb_info *sbi, int __user *p)
{
return put_user(sbi->sub_version, p);
}
/*
* Tells the daemon whether it can umount the autofs mount.
*/
static inline int autofs4_ask_umount(struct vfsmount *mnt, int __user *p)
{
int status = 0;
if (may_umount(mnt))
status = 1;
DPRINTK("returning %d", status);
status = put_user(status, p);
return status;
}
/* Identify autofs4_dentries - this is so we can tell if there's
an extra dentry refcount or not. We only hold a refcount on the
dentry if its non-negative (ie, d_inode != NULL)
*/
int is_autofs4_dentry(struct dentry *dentry)
{
return dentry && dentry->d_inode &&
dentry->d_op == &autofs4_dentry_operations &&
dentry->d_fsdata != NULL;
}
/*
* ioctl()'s on the root directory is the chief method for the daemon to
* generate kernel reactions
*/
static int autofs4_root_ioctl_unlocked(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
struct autofs_sb_info *sbi = autofs4_sbi(inode->i_sb);
void __user *p = (void __user *)arg;
DPRINTK("cmd = 0x%08x, arg = 0x%08lx, sbi = %p, pgrp = %u",
cmd,arg,sbi,task_pgrp_nr(current));
if (_IOC_TYPE(cmd) != _IOC_TYPE(AUTOFS_IOC_FIRST) ||
_IOC_NR(cmd) - _IOC_NR(AUTOFS_IOC_FIRST) >= AUTOFS_IOC_COUNT)
return -ENOTTY;
if (!autofs4_oz_mode(sbi) && !capable(CAP_SYS_ADMIN))
return -EPERM;
switch(cmd) {
case AUTOFS_IOC_READY: /* Wait queue: go ahead and retry */
return autofs4_wait_release(sbi,(autofs_wqt_t)arg,0);
case AUTOFS_IOC_FAIL: /* Wait queue: fail with ENOENT */
return autofs4_wait_release(sbi,(autofs_wqt_t)arg,-ENOENT);
case AUTOFS_IOC_CATATONIC: /* Enter catatonic mode (daemon shutdown) */
autofs4_catatonic_mode(sbi);
return 0;
case AUTOFS_IOC_PROTOVER: /* Get protocol version */
return autofs4_get_protover(sbi, p);
case AUTOFS_IOC_PROTOSUBVER: /* Get protocol sub version */
return autofs4_get_protosubver(sbi, p);
case AUTOFS_IOC_SETTIMEOUT:
return autofs4_get_set_timeout(sbi, p);
#ifdef CONFIG_COMPAT
case AUTOFS_IOC_SETTIMEOUT32:
return autofs4_compat_get_set_timeout(sbi, p);
#endif
case AUTOFS_IOC_ASKUMOUNT:
return autofs4_ask_umount(filp->f_path.mnt, p);
/* return a single thing to expire */
case AUTOFS_IOC_EXPIRE:
return autofs4_expire_run(inode->i_sb,filp->f_path.mnt,sbi, p);
/* same as above, but can send multiple expires through pipe */
case AUTOFS_IOC_EXPIRE_MULTI:
return autofs4_expire_multi(inode->i_sb,filp->f_path.mnt,sbi, p);
default:
return -ENOSYS;
}
}
static long autofs4_root_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg)
{
struct inode *inode = file_inode(filp);
return autofs4_root_ioctl_unlocked(inode, filp, cmd, arg);
}
#ifdef CONFIG_COMPAT
static long autofs4_root_compat_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg)
{
struct inode *inode = file_inode(filp);
int ret;
if (cmd == AUTOFS_IOC_READY || cmd == AUTOFS_IOC_FAIL)
ret = autofs4_root_ioctl_unlocked(inode, filp, cmd, arg);
else
ret = autofs4_root_ioctl_unlocked(inode, filp, cmd,
(unsigned long)compat_ptr(arg));
return ret;
}
#endif
| gpl-2.0 |
thanhphat11/Kernel_N4_N910SLK | drivers/iio/industrialio-event.c | 1801 | 11719 | /* Industrial I/O event handling
*
* Copyright (c) 2008 Jonathan Cameron
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* Based on elements of hwmon and input subsystems.
*/
#include <linux/anon_inodes.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/kfifo.h>
#include <linux/module.h>
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/wait.h>
#include <linux/iio/iio.h>
#include "iio_core.h"
#include <linux/iio/sysfs.h>
#include <linux/iio/events.h>
/**
* struct iio_event_interface - chrdev interface for an event line
* @wait: wait queue to allow blocking reads of events
* @det_events: list of detected events
* @dev_attr_list: list of event interface sysfs attribute
* @flags: file operations related flags including busy flag.
* @group: event interface sysfs attribute group
*/
struct iio_event_interface {
wait_queue_head_t wait;
struct mutex read_lock;
DECLARE_KFIFO(det_events, struct iio_event_data, 16);
struct list_head dev_attr_list;
unsigned long flags;
struct attribute_group group;
};
int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
{
struct iio_event_interface *ev_int = indio_dev->event_interface;
struct iio_event_data ev;
unsigned long flags;
int copied;
/* Does anyone care? */
spin_lock_irqsave(&ev_int->wait.lock, flags);
if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
ev.id = ev_code;
ev.timestamp = timestamp;
copied = kfifo_put(&ev_int->det_events, &ev);
if (copied != 0)
wake_up_locked_poll(&ev_int->wait, POLLIN);
}
spin_unlock_irqrestore(&ev_int->wait.lock, flags);
return 0;
}
EXPORT_SYMBOL(iio_push_event);
/**
* iio_event_poll() - poll the event queue to find out if it has data
*/
static unsigned int iio_event_poll(struct file *filep,
struct poll_table_struct *wait)
{
struct iio_event_interface *ev_int = filep->private_data;
unsigned int events = 0;
poll_wait(filep, &ev_int->wait, wait);
spin_lock_irq(&ev_int->wait.lock);
if (!kfifo_is_empty(&ev_int->det_events))
events = POLLIN | POLLRDNORM;
spin_unlock_irq(&ev_int->wait.lock);
return events;
}
static ssize_t iio_event_chrdev_read(struct file *filep,
char __user *buf,
size_t count,
loff_t *f_ps)
{
struct iio_event_interface *ev_int = filep->private_data;
unsigned int copied;
int ret;
if (count < sizeof(struct iio_event_data))
return -EINVAL;
if (mutex_lock_interruptible(&ev_int->read_lock))
return -ERESTARTSYS;
if (kfifo_is_empty(&ev_int->det_events)) {
if (filep->f_flags & O_NONBLOCK) {
ret = -EAGAIN;
goto error_unlock;
}
/* Blocking on device; waiting for something to be there */
ret = wait_event_interruptible(ev_int->wait,
!kfifo_is_empty(&ev_int->det_events));
if (ret)
goto error_unlock;
/* Single access device so no one else can get the data */
}
ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
error_unlock:
mutex_unlock(&ev_int->read_lock);
return ret ? ret : copied;
}
static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
{
struct iio_event_interface *ev_int = filep->private_data;
spin_lock_irq(&ev_int->wait.lock);
__clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
/*
* In order to maintain a clean state for reopening,
* clear out any awaiting events. The mask will prevent
* any new __iio_push_event calls running.
*/
kfifo_reset_out(&ev_int->det_events);
spin_unlock_irq(&ev_int->wait.lock);
return 0;
}
static const struct file_operations iio_event_chrdev_fileops = {
.read = iio_event_chrdev_read,
.poll = iio_event_poll,
.release = iio_event_chrdev_release,
.owner = THIS_MODULE,
.llseek = noop_llseek,
};
int iio_event_getfd(struct iio_dev *indio_dev)
{
struct iio_event_interface *ev_int = indio_dev->event_interface;
int fd;
if (ev_int == NULL)
return -ENODEV;
spin_lock_irq(&ev_int->wait.lock);
if (__test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
spin_unlock_irq(&ev_int->wait.lock);
return -EBUSY;
}
spin_unlock_irq(&ev_int->wait.lock);
fd = anon_inode_getfd("iio:event",
&iio_event_chrdev_fileops, ev_int, O_RDONLY);
if (fd < 0) {
spin_lock_irq(&ev_int->wait.lock);
__clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
spin_unlock_irq(&ev_int->wait.lock);
}
return fd;
}
static const char * const iio_ev_type_text[] = {
[IIO_EV_TYPE_THRESH] = "thresh",
[IIO_EV_TYPE_MAG] = "mag",
[IIO_EV_TYPE_ROC] = "roc",
[IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive",
[IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive",
};
static const char * const iio_ev_dir_text[] = {
[IIO_EV_DIR_EITHER] = "either",
[IIO_EV_DIR_RISING] = "rising",
[IIO_EV_DIR_FALLING] = "falling"
};
static ssize_t iio_ev_state_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
bool val;
ret = strtobool(buf, &val);
if (ret < 0)
return ret;
ret = indio_dev->info->write_event_config(indio_dev,
this_attr->address,
val);
return (ret < 0) ? ret : len;
}
static ssize_t iio_ev_state_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int val = indio_dev->info->read_event_config(indio_dev,
this_attr->address);
if (val < 0)
return val;
else
return sprintf(buf, "%d\n", val);
}
static ssize_t iio_ev_value_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int val, ret;
ret = indio_dev->info->read_event_value(indio_dev,
this_attr->address, &val);
if (ret < 0)
return ret;
return sprintf(buf, "%d\n", val);
}
static ssize_t iio_ev_value_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int val;
int ret;
if (!indio_dev->info->write_event_value)
return -EINVAL;
ret = kstrtoint(buf, 10, &val);
if (ret)
return ret;
ret = indio_dev->info->write_event_value(indio_dev, this_attr->address,
val);
if (ret < 0)
return ret;
return len;
}
static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan)
{
int ret = 0, i, attrcount = 0;
u64 mask = 0;
char *postfix;
if (!chan->event_mask)
return 0;
for_each_set_bit(i, &chan->event_mask, sizeof(chan->event_mask)*8) {
postfix = kasprintf(GFP_KERNEL, "%s_%s_en",
iio_ev_type_text[i/IIO_EV_DIR_MAX],
iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
if (postfix == NULL) {
ret = -ENOMEM;
goto error_ret;
}
if (chan->modified)
mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel,
i/IIO_EV_DIR_MAX,
i%IIO_EV_DIR_MAX);
else if (chan->differential)
mask = IIO_EVENT_CODE(chan->type,
0, 0,
i%IIO_EV_DIR_MAX,
i/IIO_EV_DIR_MAX,
0,
chan->channel,
chan->channel2);
else
mask = IIO_UNMOD_EVENT_CODE(chan->type,
chan->channel,
i/IIO_EV_DIR_MAX,
i%IIO_EV_DIR_MAX);
ret = __iio_add_chan_devattr(postfix,
chan,
&iio_ev_state_show,
iio_ev_state_store,
mask,
0,
&indio_dev->dev,
&indio_dev->event_interface->
dev_attr_list);
kfree(postfix);
if (ret)
goto error_ret;
attrcount++;
postfix = kasprintf(GFP_KERNEL, "%s_%s_value",
iio_ev_type_text[i/IIO_EV_DIR_MAX],
iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
if (postfix == NULL) {
ret = -ENOMEM;
goto error_ret;
}
ret = __iio_add_chan_devattr(postfix, chan,
iio_ev_value_show,
iio_ev_value_store,
mask,
0,
&indio_dev->dev,
&indio_dev->event_interface->
dev_attr_list);
kfree(postfix);
if (ret)
goto error_ret;
attrcount++;
}
ret = attrcount;
error_ret:
return ret;
}
static inline void __iio_remove_event_config_attrs(struct iio_dev *indio_dev)
{
struct iio_dev_attr *p, *n;
list_for_each_entry_safe(p, n,
&indio_dev->event_interface->
dev_attr_list, l) {
kfree(p->dev_attr.attr.name);
kfree(p);
}
}
static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev)
{
int j, ret, attrcount = 0;
/* Dynically created from the channels array */
for (j = 0; j < indio_dev->num_channels; j++) {
ret = iio_device_add_event_sysfs(indio_dev,
&indio_dev->channels[j]);
if (ret < 0)
return ret;
attrcount += ret;
}
return attrcount;
}
static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev)
{
int j;
for (j = 0; j < indio_dev->num_channels; j++)
if (indio_dev->channels[j].event_mask != 0)
return true;
return false;
}
static void iio_setup_ev_int(struct iio_event_interface *ev_int)
{
INIT_KFIFO(ev_int->det_events);
init_waitqueue_head(&ev_int->wait);
mutex_init(&ev_int->read_lock);
}
static const char *iio_event_group_name = "events";
int iio_device_register_eventset(struct iio_dev *indio_dev)
{
struct iio_dev_attr *p;
int ret = 0, attrcount_orig = 0, attrcount, attrn;
struct attribute **attr;
if (!(indio_dev->info->event_attrs ||
iio_check_for_dynamic_events(indio_dev)))
return 0;
indio_dev->event_interface =
kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL);
if (indio_dev->event_interface == NULL) {
ret = -ENOMEM;
goto error_ret;
}
INIT_LIST_HEAD(&indio_dev->event_interface->dev_attr_list);
iio_setup_ev_int(indio_dev->event_interface);
if (indio_dev->info->event_attrs != NULL) {
attr = indio_dev->info->event_attrs->attrs;
while (*attr++ != NULL)
attrcount_orig++;
}
attrcount = attrcount_orig;
if (indio_dev->channels) {
ret = __iio_add_event_config_attrs(indio_dev);
if (ret < 0)
goto error_free_setup_event_lines;
attrcount += ret;
}
indio_dev->event_interface->group.name = iio_event_group_name;
indio_dev->event_interface->group.attrs = kcalloc(attrcount + 1,
sizeof(indio_dev->event_interface->group.attrs[0]),
GFP_KERNEL);
if (indio_dev->event_interface->group.attrs == NULL) {
ret = -ENOMEM;
goto error_free_setup_event_lines;
}
if (indio_dev->info->event_attrs)
memcpy(indio_dev->event_interface->group.attrs,
indio_dev->info->event_attrs->attrs,
sizeof(indio_dev->event_interface->group.attrs[0])
*attrcount_orig);
attrn = attrcount_orig;
/* Add all elements from the list. */
list_for_each_entry(p,
&indio_dev->event_interface->dev_attr_list,
l)
indio_dev->event_interface->group.attrs[attrn++] =
&p->dev_attr.attr;
indio_dev->groups[indio_dev->groupcounter++] =
&indio_dev->event_interface->group;
return 0;
error_free_setup_event_lines:
__iio_remove_event_config_attrs(indio_dev);
mutex_destroy(&indio_dev->event_interface->read_lock);
kfree(indio_dev->event_interface);
error_ret:
return ret;
}
void iio_device_unregister_eventset(struct iio_dev *indio_dev)
{
if (indio_dev->event_interface == NULL)
return;
__iio_remove_event_config_attrs(indio_dev);
kfree(indio_dev->event_interface->group.attrs);
mutex_destroy(&indio_dev->event_interface->read_lock);
kfree(indio_dev->event_interface);
}
| gpl-2.0 |
ystk/linux-poky-debian | sound/pci/emu10k1/emu10k1_callback.c | 1801 | 14663 | /*
* synth callback routines for Emu10k1
*
* Copyright (C) 2000 Takashi Iwai <tiwai@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/export.h>
#include "emu10k1_synth_local.h"
#include <sound/asoundef.h>
/* voice status */
enum {
V_FREE=0, V_OFF, V_RELEASED, V_PLAYING, V_END
};
/* Keeps track of what we are finding */
struct best_voice {
unsigned int time;
int voice;
};
/*
* prototypes
*/
static void lookup_voices(struct snd_emux *emux, struct snd_emu10k1 *hw,
struct best_voice *best, int active_only);
static struct snd_emux_voice *get_voice(struct snd_emux *emux,
struct snd_emux_port *port);
static int start_voice(struct snd_emux_voice *vp);
static void trigger_voice(struct snd_emux_voice *vp);
static void release_voice(struct snd_emux_voice *vp);
static void update_voice(struct snd_emux_voice *vp, int update);
static void terminate_voice(struct snd_emux_voice *vp);
static void free_voice(struct snd_emux_voice *vp);
static void set_fmmod(struct snd_emu10k1 *hw, struct snd_emux_voice *vp);
static void set_fm2frq2(struct snd_emu10k1 *hw, struct snd_emux_voice *vp);
static void set_filterQ(struct snd_emu10k1 *hw, struct snd_emux_voice *vp);
/*
* Ensure a value is between two points
* macro evaluates its args more than once, so changed to upper-case.
*/
#define LIMITVALUE(x, a, b) do { if ((x) < (a)) (x) = (a); else if ((x) > (b)) (x) = (b); } while (0)
#define LIMITMAX(x, a) do {if ((x) > (a)) (x) = (a); } while (0)
/*
* set up operators
*/
static struct snd_emux_operators emu10k1_ops = {
.owner = THIS_MODULE,
.get_voice = get_voice,
.prepare = start_voice,
.trigger = trigger_voice,
.release = release_voice,
.update = update_voice,
.terminate = terminate_voice,
.free_voice = free_voice,
.sample_new = snd_emu10k1_sample_new,
.sample_free = snd_emu10k1_sample_free,
};
void
snd_emu10k1_ops_setup(struct snd_emux *emux)
{
emux->ops = emu10k1_ops;
}
/*
* get more voice for pcm
*
* terminate most inactive voice and give it as a pcm voice.
*/
int
snd_emu10k1_synth_get_voice(struct snd_emu10k1 *hw)
{
struct snd_emux *emu;
struct snd_emux_voice *vp;
struct best_voice best[V_END];
unsigned long flags;
int i;
emu = hw->synth;
spin_lock_irqsave(&emu->voice_lock, flags);
lookup_voices(emu, hw, best, 1); /* no OFF voices */
for (i = 0; i < V_END; i++) {
if (best[i].voice >= 0) {
int ch;
vp = &emu->voices[best[i].voice];
if ((ch = vp->ch) < 0) {
/*
printk(KERN_WARNING
"synth_get_voice: ch < 0 (%d) ??", i);
*/
continue;
}
vp->emu->num_voices--;
vp->ch = -1;
vp->state = SNDRV_EMUX_ST_OFF;
spin_unlock_irqrestore(&emu->voice_lock, flags);
return ch;
}
}
spin_unlock_irqrestore(&emu->voice_lock, flags);
/* not found */
return -ENOMEM;
}
/*
* turn off the voice (not terminated)
*/
static void
release_voice(struct snd_emux_voice *vp)
{
int dcysusv;
struct snd_emu10k1 *hw;
hw = vp->hw;
dcysusv = 0x8000 | (unsigned char)vp->reg.parm.modrelease;
snd_emu10k1_ptr_write(hw, DCYSUSM, vp->ch, dcysusv);
dcysusv = 0x8000 | (unsigned char)vp->reg.parm.volrelease | DCYSUSV_CHANNELENABLE_MASK;
snd_emu10k1_ptr_write(hw, DCYSUSV, vp->ch, dcysusv);
}
/*
* terminate the voice
*/
static void
terminate_voice(struct snd_emux_voice *vp)
{
struct snd_emu10k1 *hw;
if (snd_BUG_ON(!vp))
return;
hw = vp->hw;
snd_emu10k1_ptr_write(hw, DCYSUSV, vp->ch, 0x807f | DCYSUSV_CHANNELENABLE_MASK);
if (vp->block) {
struct snd_emu10k1_memblk *emem;
emem = (struct snd_emu10k1_memblk *)vp->block;
if (emem->map_locked > 0)
emem->map_locked--;
}
}
/*
* release the voice to system
*/
static void
free_voice(struct snd_emux_voice *vp)
{
struct snd_emu10k1 *hw;
hw = vp->hw;
/* FIXME: emu10k1_synth is broken. */
/* This can get called with hw == 0 */
/* Problem apparent on plug, unplug then plug */
/* on the Audigy 2 ZS Notebook. */
if (hw && (vp->ch >= 0)) {
snd_emu10k1_ptr_write(hw, IFATN, vp->ch, 0xff00);
snd_emu10k1_ptr_write(hw, DCYSUSV, vp->ch, 0x807f | DCYSUSV_CHANNELENABLE_MASK);
// snd_emu10k1_ptr_write(hw, DCYSUSV, vp->ch, 0);
snd_emu10k1_ptr_write(hw, VTFT, vp->ch, 0xffff);
snd_emu10k1_ptr_write(hw, CVCF, vp->ch, 0xffff);
snd_emu10k1_voice_free(hw, &hw->voices[vp->ch]);
vp->emu->num_voices--;
vp->ch = -1;
}
}
/*
* update registers
*/
static void
update_voice(struct snd_emux_voice *vp, int update)
{
struct snd_emu10k1 *hw;
hw = vp->hw;
if (update & SNDRV_EMUX_UPDATE_VOLUME)
snd_emu10k1_ptr_write(hw, IFATN_ATTENUATION, vp->ch, vp->avol);
if (update & SNDRV_EMUX_UPDATE_PITCH)
snd_emu10k1_ptr_write(hw, IP, vp->ch, vp->apitch);
if (update & SNDRV_EMUX_UPDATE_PAN) {
snd_emu10k1_ptr_write(hw, PTRX_FXSENDAMOUNT_A, vp->ch, vp->apan);
snd_emu10k1_ptr_write(hw, PTRX_FXSENDAMOUNT_B, vp->ch, vp->aaux);
}
if (update & SNDRV_EMUX_UPDATE_FMMOD)
set_fmmod(hw, vp);
if (update & SNDRV_EMUX_UPDATE_TREMFREQ)
snd_emu10k1_ptr_write(hw, TREMFRQ, vp->ch, vp->reg.parm.tremfrq);
if (update & SNDRV_EMUX_UPDATE_FM2FRQ2)
set_fm2frq2(hw, vp);
if (update & SNDRV_EMUX_UPDATE_Q)
set_filterQ(hw, vp);
}
/*
* look up voice table - get the best voice in order of preference
*/
/* spinlock held! */
static void
lookup_voices(struct snd_emux *emu, struct snd_emu10k1 *hw,
struct best_voice *best, int active_only)
{
struct snd_emux_voice *vp;
struct best_voice *bp;
int i;
for (i = 0; i < V_END; i++) {
best[i].time = (unsigned int)-1; /* XXX MAX_?INT really */
best[i].voice = -1;
}
/*
* Go through them all and get a best one to use.
* NOTE: could also look at volume and pick the quietest one.
*/
for (i = 0; i < emu->max_voices; i++) {
int state, val;
vp = &emu->voices[i];
state = vp->state;
if (state == SNDRV_EMUX_ST_OFF) {
if (vp->ch < 0) {
if (active_only)
continue;
bp = best + V_FREE;
} else
bp = best + V_OFF;
}
else if (state == SNDRV_EMUX_ST_RELEASED ||
state == SNDRV_EMUX_ST_PENDING) {
bp = best + V_RELEASED;
#if 1
val = snd_emu10k1_ptr_read(hw, CVCF_CURRENTVOL, vp->ch);
if (! val)
bp = best + V_OFF;
#endif
}
else if (state == SNDRV_EMUX_ST_STANDBY)
continue;
else if (state & SNDRV_EMUX_ST_ON)
bp = best + V_PLAYING;
else
continue;
/* check if sample is finished playing (non-looping only) */
if (bp != best + V_OFF && bp != best + V_FREE &&
(vp->reg.sample_mode & SNDRV_SFNT_SAMPLE_SINGLESHOT)) {
val = snd_emu10k1_ptr_read(hw, CCCA_CURRADDR, vp->ch);
if (val >= vp->reg.loopstart)
bp = best + V_OFF;
}
if (vp->time < bp->time) {
bp->time = vp->time;
bp->voice = i;
}
}
}
/*
* get an empty voice
*
* emu->voice_lock is already held.
*/
static struct snd_emux_voice *
get_voice(struct snd_emux *emu, struct snd_emux_port *port)
{
struct snd_emu10k1 *hw;
struct snd_emux_voice *vp;
struct best_voice best[V_END];
int i;
hw = emu->hw;
lookup_voices(emu, hw, best, 0);
for (i = 0; i < V_END; i++) {
if (best[i].voice >= 0) {
vp = &emu->voices[best[i].voice];
if (vp->ch < 0) {
/* allocate a voice */
struct snd_emu10k1_voice *hwvoice;
if (snd_emu10k1_voice_alloc(hw, EMU10K1_SYNTH, 1, &hwvoice) < 0 || hwvoice == NULL)
continue;
vp->ch = hwvoice->number;
emu->num_voices++;
}
return vp;
}
}
/* not found */
return NULL;
}
/*
* prepare envelopes and LFOs
*/
static int
start_voice(struct snd_emux_voice *vp)
{
unsigned int temp;
int ch;
unsigned int addr, mapped_offset;
struct snd_midi_channel *chan;
struct snd_emu10k1 *hw;
struct snd_emu10k1_memblk *emem;
hw = vp->hw;
ch = vp->ch;
if (snd_BUG_ON(ch < 0))
return -EINVAL;
chan = vp->chan;
emem = (struct snd_emu10k1_memblk *)vp->block;
if (emem == NULL)
return -EINVAL;
emem->map_locked++;
if (snd_emu10k1_memblk_map(hw, emem) < 0) {
/* printk(KERN_ERR "emu: cannot map!\n"); */
return -ENOMEM;
}
mapped_offset = snd_emu10k1_memblk_offset(emem) >> 1;
vp->reg.start += mapped_offset;
vp->reg.end += mapped_offset;
vp->reg.loopstart += mapped_offset;
vp->reg.loopend += mapped_offset;
/* set channel routing */
/* A = left(0), B = right(1), C = reverb(c), D = chorus(d) */
if (hw->audigy) {
temp = FXBUS_MIDI_LEFT | (FXBUS_MIDI_RIGHT << 8) |
(FXBUS_MIDI_REVERB << 16) | (FXBUS_MIDI_CHORUS << 24);
snd_emu10k1_ptr_write(hw, A_FXRT1, ch, temp);
} else {
temp = (FXBUS_MIDI_LEFT << 16) | (FXBUS_MIDI_RIGHT << 20) |
(FXBUS_MIDI_REVERB << 24) | (FXBUS_MIDI_CHORUS << 28);
snd_emu10k1_ptr_write(hw, FXRT, ch, temp);
}
/* channel to be silent and idle */
snd_emu10k1_ptr_write(hw, DCYSUSV, ch, 0x0000);
snd_emu10k1_ptr_write(hw, VTFT, ch, 0x0000FFFF);
snd_emu10k1_ptr_write(hw, CVCF, ch, 0x0000FFFF);
snd_emu10k1_ptr_write(hw, PTRX, ch, 0);
snd_emu10k1_ptr_write(hw, CPF, ch, 0);
/* set pitch offset */
snd_emu10k1_ptr_write(hw, IP, vp->ch, vp->apitch);
/* set envelope parameters */
snd_emu10k1_ptr_write(hw, ENVVAL, ch, vp->reg.parm.moddelay);
snd_emu10k1_ptr_write(hw, ATKHLDM, ch, vp->reg.parm.modatkhld);
snd_emu10k1_ptr_write(hw, DCYSUSM, ch, vp->reg.parm.moddcysus);
snd_emu10k1_ptr_write(hw, ENVVOL, ch, vp->reg.parm.voldelay);
snd_emu10k1_ptr_write(hw, ATKHLDV, ch, vp->reg.parm.volatkhld);
/* decay/sustain parameter for volume envelope is used
for triggerg the voice */
/* cutoff and volume */
temp = (unsigned int)vp->acutoff << 8 | (unsigned char)vp->avol;
snd_emu10k1_ptr_write(hw, IFATN, vp->ch, temp);
/* modulation envelope heights */
snd_emu10k1_ptr_write(hw, PEFE, ch, vp->reg.parm.pefe);
/* lfo1/2 delay */
snd_emu10k1_ptr_write(hw, LFOVAL1, ch, vp->reg.parm.lfo1delay);
snd_emu10k1_ptr_write(hw, LFOVAL2, ch, vp->reg.parm.lfo2delay);
/* lfo1 pitch & cutoff shift */
set_fmmod(hw, vp);
/* lfo1 volume & freq */
snd_emu10k1_ptr_write(hw, TREMFRQ, vp->ch, vp->reg.parm.tremfrq);
/* lfo2 pitch & freq */
set_fm2frq2(hw, vp);
/* reverb and loop start (reverb 8bit, MSB) */
temp = vp->reg.parm.reverb;
temp += (int)vp->chan->control[MIDI_CTL_E1_REVERB_DEPTH] * 9 / 10;
LIMITMAX(temp, 255);
addr = vp->reg.loopstart;
snd_emu10k1_ptr_write(hw, PSST, vp->ch, (temp << 24) | addr);
/* chorus & loop end (chorus 8bit, MSB) */
addr = vp->reg.loopend;
temp = vp->reg.parm.chorus;
temp += (int)chan->control[MIDI_CTL_E3_CHORUS_DEPTH] * 9 / 10;
LIMITMAX(temp, 255);
temp = (temp <<24) | addr;
snd_emu10k1_ptr_write(hw, DSL, ch, temp);
/* clear filter delay memory */
snd_emu10k1_ptr_write(hw, Z1, ch, 0);
snd_emu10k1_ptr_write(hw, Z2, ch, 0);
/* invalidate maps */
temp = (hw->silent_page.addr << 1) | MAP_PTI_MASK;
snd_emu10k1_ptr_write(hw, MAPA, ch, temp);
snd_emu10k1_ptr_write(hw, MAPB, ch, temp);
#if 0
/* cache */
{
unsigned int val, sample;
val = 32;
if (vp->reg.sample_mode & SNDRV_SFNT_SAMPLE_8BITS)
sample = 0x80808080;
else {
sample = 0;
val *= 2;
}
/* cache */
snd_emu10k1_ptr_write(hw, CCR, ch, 0x1c << 16);
snd_emu10k1_ptr_write(hw, CDE, ch, sample);
snd_emu10k1_ptr_write(hw, CDF, ch, sample);
/* invalidate maps */
temp = ((unsigned int)hw->silent_page.addr << 1) | MAP_PTI_MASK;
snd_emu10k1_ptr_write(hw, MAPA, ch, temp);
snd_emu10k1_ptr_write(hw, MAPB, ch, temp);
/* fill cache */
val -= 4;
val <<= 25;
val |= 0x1c << 16;
snd_emu10k1_ptr_write(hw, CCR, ch, val);
}
#endif
/* Q & current address (Q 4bit value, MSB) */
addr = vp->reg.start;
temp = vp->reg.parm.filterQ;
temp = (temp<<28) | addr;
if (vp->apitch < 0xe400)
temp |= CCCA_INTERPROM_0;
else {
unsigned int shift = (vp->apitch - 0xe000) >> 10;
temp |= shift << 25;
}
if (vp->reg.sample_mode & SNDRV_SFNT_SAMPLE_8BITS)
temp |= CCCA_8BITSELECT;
snd_emu10k1_ptr_write(hw, CCCA, ch, temp);
/* reset volume */
temp = (unsigned int)vp->vtarget << 16;
snd_emu10k1_ptr_write(hw, VTFT, ch, temp | vp->ftarget);
snd_emu10k1_ptr_write(hw, CVCF, ch, temp | 0xff00);
return 0;
}
/*
* Start envelope
*/
static void
trigger_voice(struct snd_emux_voice *vp)
{
unsigned int temp, ptarget;
struct snd_emu10k1 *hw;
struct snd_emu10k1_memblk *emem;
hw = vp->hw;
emem = (struct snd_emu10k1_memblk *)vp->block;
if (! emem || emem->mapped_page < 0)
return; /* not mapped */
#if 0
ptarget = (unsigned int)vp->ptarget << 16;
#else
ptarget = IP_TO_CP(vp->apitch);
#endif
/* set pitch target and pan (volume) */
temp = ptarget | (vp->apan << 8) | vp->aaux;
snd_emu10k1_ptr_write(hw, PTRX, vp->ch, temp);
/* pitch target */
snd_emu10k1_ptr_write(hw, CPF, vp->ch, ptarget);
/* trigger voice */
snd_emu10k1_ptr_write(hw, DCYSUSV, vp->ch, vp->reg.parm.voldcysus|DCYSUSV_CHANNELENABLE_MASK);
}
#define MOD_SENSE 18
/* set lfo1 modulation height and cutoff */
static void
set_fmmod(struct snd_emu10k1 *hw, struct snd_emux_voice *vp)
{
unsigned short fmmod;
short pitch;
unsigned char cutoff;
int modulation;
pitch = (char)(vp->reg.parm.fmmod>>8);
cutoff = (vp->reg.parm.fmmod & 0xff);
modulation = vp->chan->gm_modulation + vp->chan->midi_pressure;
pitch += (MOD_SENSE * modulation) / 1200;
LIMITVALUE(pitch, -128, 127);
fmmod = ((unsigned char)pitch<<8) | cutoff;
snd_emu10k1_ptr_write(hw, FMMOD, vp->ch, fmmod);
}
/* set lfo2 pitch & frequency */
static void
set_fm2frq2(struct snd_emu10k1 *hw, struct snd_emux_voice *vp)
{
unsigned short fm2frq2;
short pitch;
unsigned char freq;
int modulation;
pitch = (char)(vp->reg.parm.fm2frq2>>8);
freq = vp->reg.parm.fm2frq2 & 0xff;
modulation = vp->chan->gm_modulation + vp->chan->midi_pressure;
pitch += (MOD_SENSE * modulation) / 1200;
LIMITVALUE(pitch, -128, 127);
fm2frq2 = ((unsigned char)pitch<<8) | freq;
snd_emu10k1_ptr_write(hw, FM2FRQ2, vp->ch, fm2frq2);
}
/* set filterQ */
static void
set_filterQ(struct snd_emu10k1 *hw, struct snd_emux_voice *vp)
{
unsigned int val;
val = snd_emu10k1_ptr_read(hw, CCCA, vp->ch) & ~CCCA_RESONANCE;
val |= (vp->reg.parm.filterQ << 28);
snd_emu10k1_ptr_write(hw, CCCA, vp->ch, val);
}
| gpl-2.0 |
qqzwc/LG_G3_D858_Android4.4_Kernel | kernel/power/autosleep.c | 1801 | 2645 | /*
* kernel/power/autosleep.c
*
* Opportunistic sleep support.
*
* Copyright (C) 2012 Rafael J. Wysocki <rjw@sisk.pl>
*/
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/pm_wakeup.h>
#include "power.h"
static suspend_state_t autosleep_state;
static struct workqueue_struct *autosleep_wq;
/*
* Note: it is only safe to mutex_lock(&autosleep_lock) if a wakeup_source
* is active, otherwise a deadlock with try_to_suspend() is possible.
* Alternatively mutex_lock_interruptible() can be used. This will then fail
* if an auto_sleep cycle tries to freeze processes.
*/
static DEFINE_MUTEX(autosleep_lock);
static struct wakeup_source *autosleep_ws;
static void try_to_suspend(struct work_struct *work)
{
unsigned int initial_count, final_count;
if (!pm_get_wakeup_count(&initial_count, true))
goto out;
mutex_lock(&autosleep_lock);
if (!pm_save_wakeup_count(initial_count)) {
mutex_unlock(&autosleep_lock);
goto out;
}
if (autosleep_state == PM_SUSPEND_ON) {
mutex_unlock(&autosleep_lock);
return;
}
if (autosleep_state >= PM_SUSPEND_MAX)
hibernate();
else
pm_suspend(autosleep_state);
mutex_unlock(&autosleep_lock);
if (!pm_get_wakeup_count(&final_count, false))
goto out;
/*
* If the wakeup occured for an unknown reason, wait to prevent the
* system from trying to suspend and waking up in a tight loop.
*/
if (final_count == initial_count)
schedule_timeout_uninterruptible(HZ / 2);
out:
queue_up_suspend_work();
}
static DECLARE_WORK(suspend_work, try_to_suspend);
void queue_up_suspend_work(void)
{
if (!work_pending(&suspend_work) && autosleep_state > PM_SUSPEND_ON)
queue_work(autosleep_wq, &suspend_work);
}
suspend_state_t pm_autosleep_state(void)
{
return autosleep_state;
}
int pm_autosleep_lock(void)
{
return mutex_lock_interruptible(&autosleep_lock);
}
void pm_autosleep_unlock(void)
{
mutex_unlock(&autosleep_lock);
}
int pm_autosleep_set_state(suspend_state_t state)
{
#ifndef CONFIG_HIBERNATION
if (state >= PM_SUSPEND_MAX)
return -EINVAL;
#endif
__pm_stay_awake(autosleep_ws);
mutex_lock(&autosleep_lock);
autosleep_state = state;
__pm_relax(autosleep_ws);
if (state > PM_SUSPEND_ON) {
pm_wakep_autosleep_enabled(true);
queue_up_suspend_work();
} else {
pm_wakep_autosleep_enabled(false);
}
mutex_unlock(&autosleep_lock);
return 0;
}
int __init pm_autosleep_init(void)
{
autosleep_ws = wakeup_source_register("autosleep");
if (!autosleep_ws)
return -ENOMEM;
autosleep_wq = alloc_ordered_workqueue("autosleep", 0);
if (autosleep_wq)
return 0;
wakeup_source_unregister(autosleep_ws);
return -ENOMEM;
}
| gpl-2.0 |
shukiz/VAR-SOM-AM33-Kernel-3-15 | arch/powerpc/crypto/sha1.c | 2057 | 3610 | /*
* Cryptographic API.
*
* powerpc implementation of the SHA1 Secure Hash Algorithm.
*
* Derived from cryptoapi implementation, adapted for in-place
* scatterlist interface.
*
* Derived from "crypto/sha1.c"
* Copyright (c) Alan Smithee.
* Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
* Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <asm/byteorder.h>
extern void powerpc_sha_transform(u32 *state, const u8 *src, u32 *temp);
static int sha1_init(struct shash_desc *desc)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
*sctx = (struct sha1_state){
.state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
};
return 0;
}
static int sha1_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
unsigned int partial, done;
const u8 *src;
partial = sctx->count & 0x3f;
sctx->count += len;
done = 0;
src = data;
if ((partial + len) > 63) {
u32 temp[SHA_WORKSPACE_WORDS];
if (partial) {
done = -partial;
memcpy(sctx->buffer + partial, data, done + 64);
src = sctx->buffer;
}
do {
powerpc_sha_transform(sctx->state, src, temp);
done += 64;
src = data + done;
} while (done + 63 < len);
memset(temp, 0, sizeof(temp));
partial = 0;
}
memcpy(sctx->buffer + partial, src, len - done);
return 0;
}
/* Add padding and return the message digest. */
static int sha1_final(struct shash_desc *desc, u8 *out)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
__be32 *dst = (__be32 *)out;
u32 i, index, padlen;
__be64 bits;
static const u8 padding[64] = { 0x80, };
bits = cpu_to_be64(sctx->count << 3);
/* Pad out to 56 mod 64 */
index = sctx->count & 0x3f;
padlen = (index < 56) ? (56 - index) : ((64+56) - index);
sha1_update(desc, padding, padlen);
/* Append length */
sha1_update(desc, (const u8 *)&bits, sizeof(bits));
/* Store state in digest */
for (i = 0; i < 5; i++)
dst[i] = cpu_to_be32(sctx->state[i]);
/* Wipe context */
memset(sctx, 0, sizeof *sctx);
return 0;
}
static int sha1_export(struct shash_desc *desc, void *out)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
memcpy(out, sctx, sizeof(*sctx));
return 0;
}
static int sha1_import(struct shash_desc *desc, const void *in)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
memcpy(sctx, in, sizeof(*sctx));
return 0;
}
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_init,
.update = sha1_update,
.final = sha1_final,
.export = sha1_export,
.import = sha1_import,
.descsize = sizeof(struct sha1_state),
.statesize = sizeof(struct sha1_state),
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-powerpc",
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static int __init sha1_powerpc_mod_init(void)
{
return crypto_register_shash(&alg);
}
static void __exit sha1_powerpc_mod_fini(void)
{
crypto_unregister_shash(&alg);
}
module_init(sha1_powerpc_mod_init);
module_exit(sha1_powerpc_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
MODULE_ALIAS("sha1-powerpc");
| gpl-2.0 |
DirtyUnicorns/android_kernel_samsung_trlte | sound/soc/pxa/ttc-dkb.c | 2313 | 5066 | /*
* linux/sound/soc/pxa/ttc_dkb.c
*
* Copyright (C) 2012 Marvell International Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include <sound/jack.h>
#include <asm/mach-types.h>
#include <sound/pcm_params.h>
#include "../codecs/88pm860x-codec.h"
static struct snd_soc_jack hs_jack, mic_jack;
static struct snd_soc_jack_pin hs_jack_pins[] = {
{ .pin = "Headset Stereophone", .mask = SND_JACK_HEADPHONE, },
};
static struct snd_soc_jack_pin mic_jack_pins[] = {
{ .pin = "Headset Mic 2", .mask = SND_JACK_MICROPHONE, },
};
/* ttc machine dapm widgets */
static const struct snd_soc_dapm_widget ttc_dapm_widgets[] = {
SND_SOC_DAPM_HP("Headset Stereophone", NULL),
SND_SOC_DAPM_LINE("Lineout Out 1", NULL),
SND_SOC_DAPM_LINE("Lineout Out 2", NULL),
SND_SOC_DAPM_SPK("Ext Speaker", NULL),
SND_SOC_DAPM_MIC("Ext Mic 1", NULL),
SND_SOC_DAPM_MIC("Headset Mic 2", NULL),
SND_SOC_DAPM_MIC("Ext Mic 3", NULL),
};
/* ttc machine audio map */
static const struct snd_soc_dapm_route ttc_audio_map[] = {
{"Headset Stereophone", NULL, "HS1"},
{"Headset Stereophone", NULL, "HS2"},
{"Ext Speaker", NULL, "LSP"},
{"Ext Speaker", NULL, "LSN"},
{"Lineout Out 1", NULL, "LINEOUT1"},
{"Lineout Out 2", NULL, "LINEOUT2"},
{"MIC1P", NULL, "Mic1 Bias"},
{"MIC1N", NULL, "Mic1 Bias"},
{"Mic1 Bias", NULL, "Ext Mic 1"},
{"MIC2P", NULL, "Mic1 Bias"},
{"MIC2N", NULL, "Mic1 Bias"},
{"Mic1 Bias", NULL, "Headset Mic 2"},
{"MIC3P", NULL, "Mic3 Bias"},
{"MIC3N", NULL, "Mic3 Bias"},
{"Mic3 Bias", NULL, "Ext Mic 3"},
};
static int ttc_pm860x_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_dapm_context *dapm = &codec->dapm;
/* connected pins */
snd_soc_dapm_enable_pin(dapm, "Ext Speaker");
snd_soc_dapm_enable_pin(dapm, "Ext Mic 1");
snd_soc_dapm_enable_pin(dapm, "Ext Mic 3");
snd_soc_dapm_disable_pin(dapm, "Headset Mic 2");
snd_soc_dapm_disable_pin(dapm, "Headset Stereophone");
/* Headset jack detection */
snd_soc_jack_new(codec, "Headphone Jack", SND_JACK_HEADPHONE
| SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2,
&hs_jack);
snd_soc_jack_add_pins(&hs_jack, ARRAY_SIZE(hs_jack_pins),
hs_jack_pins);
snd_soc_jack_new(codec, "Microphone Jack", SND_JACK_MICROPHONE,
&mic_jack);
snd_soc_jack_add_pins(&mic_jack, ARRAY_SIZE(mic_jack_pins),
mic_jack_pins);
/* headphone, microphone detection & headset short detection */
pm860x_hs_jack_detect(codec, &hs_jack, SND_JACK_HEADPHONE,
SND_JACK_BTN_0, SND_JACK_BTN_1, SND_JACK_BTN_2);
pm860x_mic_jack_detect(codec, &hs_jack, SND_JACK_MICROPHONE);
return 0;
}
/* ttc/td-dkb digital audio interface glue - connects codec <--> CPU */
static struct snd_soc_dai_link ttc_pm860x_hifi_dai[] = {
{
.name = "88pm860x i2s",
.stream_name = "audio playback",
.codec_name = "88pm860x-codec",
.platform_name = "mmp-pcm-audio",
.cpu_dai_name = "pxa-ssp-dai.1",
.codec_dai_name = "88pm860x-i2s",
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBM_CFM,
.init = ttc_pm860x_init,
},
};
/* ttc/td audio machine driver */
static struct snd_soc_card ttc_dkb_card = {
.name = "ttc-dkb-hifi",
.dai_link = ttc_pm860x_hifi_dai,
.num_links = ARRAY_SIZE(ttc_pm860x_hifi_dai),
.dapm_widgets = ttc_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(ttc_dapm_widgets),
.dapm_routes = ttc_audio_map,
.num_dapm_routes = ARRAY_SIZE(ttc_audio_map),
};
static int ttc_dkb_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &ttc_dkb_card;
int ret;
card->dev = &pdev->dev;
ret = snd_soc_register_card(card);
if (ret)
dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n",
ret);
return ret;
}
static int ttc_dkb_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
snd_soc_unregister_card(card);
return 0;
}
static struct platform_driver ttc_dkb_driver = {
.driver = {
.name = "ttc-dkb-audio",
.owner = THIS_MODULE,
},
.probe = ttc_dkb_probe,
.remove = ttc_dkb_remove,
};
module_platform_driver(ttc_dkb_driver);
/* Module information */
MODULE_AUTHOR("Qiao Zhou, <zhouqiao@marvell.com>");
MODULE_DESCRIPTION("ALSA SoC TTC DKB");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:ttc-dkb-audio");
| gpl-2.0 |
dev-elixir/elixir-redmi2 | drivers/tty/goldfish.c | 2313 | 8724 | /*
* Copyright (C) 2007 Google, Inc.
* Copyright (C) 2012 Intel, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/console.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/module.h>
enum {
GOLDFISH_TTY_PUT_CHAR = 0x00,
GOLDFISH_TTY_BYTES_READY = 0x04,
GOLDFISH_TTY_CMD = 0x08,
GOLDFISH_TTY_DATA_PTR = 0x10,
GOLDFISH_TTY_DATA_LEN = 0x14,
GOLDFISH_TTY_CMD_INT_DISABLE = 0,
GOLDFISH_TTY_CMD_INT_ENABLE = 1,
GOLDFISH_TTY_CMD_WRITE_BUFFER = 2,
GOLDFISH_TTY_CMD_READ_BUFFER = 3,
};
struct goldfish_tty {
struct tty_port port;
spinlock_t lock;
void __iomem *base;
u32 irq;
int opencount;
struct console console;
};
static DEFINE_MUTEX(goldfish_tty_lock);
static struct tty_driver *goldfish_tty_driver;
static u32 goldfish_tty_line_count = 8;
static u32 goldfish_tty_current_line_count;
static struct goldfish_tty *goldfish_ttys;
static void goldfish_tty_do_write(int line, const char *buf, unsigned count)
{
unsigned long irq_flags;
struct goldfish_tty *qtty = &goldfish_ttys[line];
void __iomem *base = qtty->base;
spin_lock_irqsave(&qtty->lock, irq_flags);
writel((u32)buf, base + GOLDFISH_TTY_DATA_PTR);
writel(count, base + GOLDFISH_TTY_DATA_LEN);
writel(GOLDFISH_TTY_CMD_WRITE_BUFFER, base + GOLDFISH_TTY_CMD);
spin_unlock_irqrestore(&qtty->lock, irq_flags);
}
static irqreturn_t goldfish_tty_interrupt(int irq, void *dev_id)
{
struct platform_device *pdev = dev_id;
struct goldfish_tty *qtty = &goldfish_ttys[pdev->id];
void __iomem *base = qtty->base;
unsigned long irq_flags;
unsigned char *buf;
u32 count;
count = readl(base + GOLDFISH_TTY_BYTES_READY);
if(count == 0)
return IRQ_NONE;
count = tty_prepare_flip_string(&qtty->port, &buf, count);
spin_lock_irqsave(&qtty->lock, irq_flags);
writel((u32)buf, base + GOLDFISH_TTY_DATA_PTR);
writel(count, base + GOLDFISH_TTY_DATA_LEN);
writel(GOLDFISH_TTY_CMD_READ_BUFFER, base + GOLDFISH_TTY_CMD);
spin_unlock_irqrestore(&qtty->lock, irq_flags);
tty_schedule_flip(&qtty->port);
return IRQ_HANDLED;
}
static int goldfish_tty_activate(struct tty_port *port, struct tty_struct *tty)
{
struct goldfish_tty *qtty = container_of(port, struct goldfish_tty, port);
writel(GOLDFISH_TTY_CMD_INT_ENABLE, qtty->base + GOLDFISH_TTY_CMD);
return 0;
}
static void goldfish_tty_shutdown(struct tty_port *port)
{
struct goldfish_tty *qtty = container_of(port, struct goldfish_tty, port);
writel(GOLDFISH_TTY_CMD_INT_DISABLE, qtty->base + GOLDFISH_TTY_CMD);
}
static int goldfish_tty_open(struct tty_struct * tty, struct file * filp)
{
struct goldfish_tty *qtty = &goldfish_ttys[tty->index];
return tty_port_open(&qtty->port, tty, filp);
}
static void goldfish_tty_close(struct tty_struct * tty, struct file * filp)
{
tty_port_close(tty->port, tty, filp);
}
static void goldfish_tty_hangup(struct tty_struct *tty)
{
tty_port_hangup(tty->port);
}
static int goldfish_tty_write(struct tty_struct * tty, const unsigned char *buf, int count)
{
goldfish_tty_do_write(tty->index, buf, count);
return count;
}
static int goldfish_tty_write_room(struct tty_struct *tty)
{
return 0x10000;
}
static int goldfish_tty_chars_in_buffer(struct tty_struct *tty)
{
struct goldfish_tty *qtty = &goldfish_ttys[tty->index];
void __iomem *base = qtty->base;
return readl(base + GOLDFISH_TTY_BYTES_READY);
}
static void goldfish_tty_console_write(struct console *co, const char *b, unsigned count)
{
goldfish_tty_do_write(co->index, b, count);
}
static struct tty_driver *goldfish_tty_console_device(struct console *c, int *index)
{
*index = c->index;
return goldfish_tty_driver;
}
static int goldfish_tty_console_setup(struct console *co, char *options)
{
if((unsigned)co->index > goldfish_tty_line_count)
return -ENODEV;
if(goldfish_ttys[co->index].base == 0)
return -ENODEV;
return 0;
}
static struct tty_port_operations goldfish_port_ops = {
.activate = goldfish_tty_activate,
.shutdown = goldfish_tty_shutdown
};
static struct tty_operations goldfish_tty_ops = {
.open = goldfish_tty_open,
.close = goldfish_tty_close,
.hangup = goldfish_tty_hangup,
.write = goldfish_tty_write,
.write_room = goldfish_tty_write_room,
.chars_in_buffer = goldfish_tty_chars_in_buffer,
};
static int goldfish_tty_create_driver(void)
{
int ret;
struct tty_driver *tty;
goldfish_ttys = kzalloc(sizeof(*goldfish_ttys) * goldfish_tty_line_count, GFP_KERNEL);
if(goldfish_ttys == NULL) {
ret = -ENOMEM;
goto err_alloc_goldfish_ttys_failed;
}
tty = alloc_tty_driver(goldfish_tty_line_count);
if(tty == NULL) {
ret = -ENOMEM;
goto err_alloc_tty_driver_failed;
}
tty->driver_name = "goldfish";
tty->name = "ttyGF";
tty->type = TTY_DRIVER_TYPE_SERIAL;
tty->subtype = SERIAL_TYPE_NORMAL;
tty->init_termios = tty_std_termios;
tty->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
tty_set_operations(tty, &goldfish_tty_ops);
ret = tty_register_driver(tty);
if(ret)
goto err_tty_register_driver_failed;
goldfish_tty_driver = tty;
return 0;
err_tty_register_driver_failed:
put_tty_driver(tty);
err_alloc_tty_driver_failed:
kfree(goldfish_ttys);
goldfish_ttys = NULL;
err_alloc_goldfish_ttys_failed:
return ret;
}
static void goldfish_tty_delete_driver(void)
{
tty_unregister_driver(goldfish_tty_driver);
put_tty_driver(goldfish_tty_driver);
goldfish_tty_driver = NULL;
kfree(goldfish_ttys);
goldfish_ttys = NULL;
}
static int goldfish_tty_probe(struct platform_device *pdev)
{
struct goldfish_tty *qtty;
int ret = -EINVAL;
int i;
struct resource *r;
struct device *ttydev;
void __iomem *base;
u32 irq;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if(r == NULL)
return -EINVAL;
base = ioremap(r->start, 0x1000);
if (base == NULL)
pr_err("goldfish_tty: unable to remap base\n");
r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if(r == NULL)
goto err_unmap;
irq = r->start;
if(pdev->id >= goldfish_tty_line_count)
goto err_unmap;
mutex_lock(&goldfish_tty_lock);
if(goldfish_tty_current_line_count == 0) {
ret = goldfish_tty_create_driver();
if(ret)
goto err_create_driver_failed;
}
goldfish_tty_current_line_count++;
qtty = &goldfish_ttys[pdev->id];
spin_lock_init(&qtty->lock);
tty_port_init(&qtty->port);
qtty->port.ops = &goldfish_port_ops;
qtty->base = base;
qtty->irq = irq;
writel(GOLDFISH_TTY_CMD_INT_DISABLE, base + GOLDFISH_TTY_CMD);
ret = request_irq(irq, goldfish_tty_interrupt, IRQF_SHARED, "goldfish_tty", pdev);
if(ret)
goto err_request_irq_failed;
ttydev = tty_port_register_device(&qtty->port, goldfish_tty_driver,
pdev->id, &pdev->dev);
if(IS_ERR(ttydev)) {
ret = PTR_ERR(ttydev);
goto err_tty_register_device_failed;
}
strcpy(qtty->console.name, "ttyGF");
qtty->console.write = goldfish_tty_console_write;
qtty->console.device = goldfish_tty_console_device;
qtty->console.setup = goldfish_tty_console_setup;
qtty->console.flags = CON_PRINTBUFFER;
qtty->console.index = pdev->id;
register_console(&qtty->console);
mutex_unlock(&goldfish_tty_lock);
return 0;
tty_unregister_device(goldfish_tty_driver, i);
err_tty_register_device_failed:
free_irq(irq, pdev);
err_request_irq_failed:
goldfish_tty_current_line_count--;
if(goldfish_tty_current_line_count == 0)
goldfish_tty_delete_driver();
err_create_driver_failed:
mutex_unlock(&goldfish_tty_lock);
err_unmap:
iounmap(base);
return ret;
}
static int goldfish_tty_remove(struct platform_device *pdev)
{
struct goldfish_tty *qtty;
mutex_lock(&goldfish_tty_lock);
qtty = &goldfish_ttys[pdev->id];
unregister_console(&qtty->console);
tty_unregister_device(goldfish_tty_driver, pdev->id);
iounmap(qtty->base);
qtty->base = 0;
free_irq(qtty->irq, pdev);
goldfish_tty_current_line_count--;
if(goldfish_tty_current_line_count == 0)
goldfish_tty_delete_driver();
mutex_unlock(&goldfish_tty_lock);
return 0;
}
static struct platform_driver goldfish_tty_platform_driver = {
.probe = goldfish_tty_probe,
.remove = goldfish_tty_remove,
.driver = {
.name = "goldfish_tty"
}
};
module_platform_driver(goldfish_tty_platform_driver);
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
1N4148/agni | drivers/gpio/gpio-s5pv210.c | 2313 | 6060 | /* linux/arch/arm/mach-s5pv210/gpiolib.c
*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* S5PV210 - GPIOlib support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <plat/gpio-core.h>
#include <plat/gpio-cfg.h>
#include <plat/gpio-cfg-helpers.h>
#include <mach/map.h>
static struct s3c_gpio_cfg gpio_cfg = {
.set_config = s3c_gpio_setcfg_s3c64xx_4bit,
.set_pull = s3c_gpio_setpull_updown,
.get_pull = s3c_gpio_getpull_updown,
};
static struct s3c_gpio_cfg gpio_cfg_noint = {
.set_config = s3c_gpio_setcfg_s3c64xx_4bit,
.set_pull = s3c_gpio_setpull_updown,
.get_pull = s3c_gpio_getpull_updown,
};
/* GPIO bank's base address given the index of the bank in the
* list of all gpio banks.
*/
#define S5PV210_BANK_BASE(bank_nr) (S5P_VA_GPIO + ((bank_nr) * 0x20))
/*
* Following are the gpio banks in v210.
*
* The 'config' member when left to NULL, is initialized to the default
* structure gpio_cfg in the init function below.
*
* The 'base' member is also initialized in the init function below.
* Note: The initialization of 'base' member of s3c_gpio_chip structure
* uses the above macro and depends on the banks being listed in order here.
*/
static struct s3c_gpio_chip s5pv210_gpio_4bit[] = {
{
.chip = {
.base = S5PV210_GPA0(0),
.ngpio = S5PV210_GPIO_A0_NR,
.label = "GPA0",
},
}, {
.chip = {
.base = S5PV210_GPA1(0),
.ngpio = S5PV210_GPIO_A1_NR,
.label = "GPA1",
},
}, {
.chip = {
.base = S5PV210_GPB(0),
.ngpio = S5PV210_GPIO_B_NR,
.label = "GPB",
},
}, {
.chip = {
.base = S5PV210_GPC0(0),
.ngpio = S5PV210_GPIO_C0_NR,
.label = "GPC0",
},
}, {
.chip = {
.base = S5PV210_GPC1(0),
.ngpio = S5PV210_GPIO_C1_NR,
.label = "GPC1",
},
}, {
.chip = {
.base = S5PV210_GPD0(0),
.ngpio = S5PV210_GPIO_D0_NR,
.label = "GPD0",
},
}, {
.chip = {
.base = S5PV210_GPD1(0),
.ngpio = S5PV210_GPIO_D1_NR,
.label = "GPD1",
},
}, {
.chip = {
.base = S5PV210_GPE0(0),
.ngpio = S5PV210_GPIO_E0_NR,
.label = "GPE0",
},
}, {
.chip = {
.base = S5PV210_GPE1(0),
.ngpio = S5PV210_GPIO_E1_NR,
.label = "GPE1",
},
}, {
.chip = {
.base = S5PV210_GPF0(0),
.ngpio = S5PV210_GPIO_F0_NR,
.label = "GPF0",
},
}, {
.chip = {
.base = S5PV210_GPF1(0),
.ngpio = S5PV210_GPIO_F1_NR,
.label = "GPF1",
},
}, {
.chip = {
.base = S5PV210_GPF2(0),
.ngpio = S5PV210_GPIO_F2_NR,
.label = "GPF2",
},
}, {
.chip = {
.base = S5PV210_GPF3(0),
.ngpio = S5PV210_GPIO_F3_NR,
.label = "GPF3",
},
}, {
.chip = {
.base = S5PV210_GPG0(0),
.ngpio = S5PV210_GPIO_G0_NR,
.label = "GPG0",
},
}, {
.chip = {
.base = S5PV210_GPG1(0),
.ngpio = S5PV210_GPIO_G1_NR,
.label = "GPG1",
},
}, {
.chip = {
.base = S5PV210_GPG2(0),
.ngpio = S5PV210_GPIO_G2_NR,
.label = "GPG2",
},
}, {
.chip = {
.base = S5PV210_GPG3(0),
.ngpio = S5PV210_GPIO_G3_NR,
.label = "GPG3",
},
}, {
.config = &gpio_cfg_noint,
.chip = {
.base = S5PV210_GPI(0),
.ngpio = S5PV210_GPIO_I_NR,
.label = "GPI",
},
}, {
.chip = {
.base = S5PV210_GPJ0(0),
.ngpio = S5PV210_GPIO_J0_NR,
.label = "GPJ0",
},
}, {
.chip = {
.base = S5PV210_GPJ1(0),
.ngpio = S5PV210_GPIO_J1_NR,
.label = "GPJ1",
},
}, {
.chip = {
.base = S5PV210_GPJ2(0),
.ngpio = S5PV210_GPIO_J2_NR,
.label = "GPJ2",
},
}, {
.chip = {
.base = S5PV210_GPJ3(0),
.ngpio = S5PV210_GPIO_J3_NR,
.label = "GPJ3",
},
}, {
.chip = {
.base = S5PV210_GPJ4(0),
.ngpio = S5PV210_GPIO_J4_NR,
.label = "GPJ4",
},
}, {
.config = &gpio_cfg_noint,
.chip = {
.base = S5PV210_MP01(0),
.ngpio = S5PV210_GPIO_MP01_NR,
.label = "MP01",
},
}, {
.config = &gpio_cfg_noint,
.chip = {
.base = S5PV210_MP02(0),
.ngpio = S5PV210_GPIO_MP02_NR,
.label = "MP02",
},
}, {
.config = &gpio_cfg_noint,
.chip = {
.base = S5PV210_MP03(0),
.ngpio = S5PV210_GPIO_MP03_NR,
.label = "MP03",
},
}, {
.config = &gpio_cfg_noint,
.chip = {
.base = S5PV210_MP04(0),
.ngpio = S5PV210_GPIO_MP04_NR,
.label = "MP04",
},
}, {
.config = &gpio_cfg_noint,
.chip = {
.base = S5PV210_MP05(0),
.ngpio = S5PV210_GPIO_MP05_NR,
.label = "MP05",
},
}, {
.base = (S5P_VA_GPIO + 0xC00),
.config = &gpio_cfg_noint,
.irq_base = IRQ_EINT(0),
.chip = {
.base = S5PV210_GPH0(0),
.ngpio = S5PV210_GPIO_H0_NR,
.label = "GPH0",
.to_irq = samsung_gpiolib_to_irq,
},
}, {
.base = (S5P_VA_GPIO + 0xC20),
.config = &gpio_cfg_noint,
.irq_base = IRQ_EINT(8),
.chip = {
.base = S5PV210_GPH1(0),
.ngpio = S5PV210_GPIO_H1_NR,
.label = "GPH1",
.to_irq = samsung_gpiolib_to_irq,
},
}, {
.base = (S5P_VA_GPIO + 0xC40),
.config = &gpio_cfg_noint,
.irq_base = IRQ_EINT(16),
.chip = {
.base = S5PV210_GPH2(0),
.ngpio = S5PV210_GPIO_H2_NR,
.label = "GPH2",
.to_irq = samsung_gpiolib_to_irq,
},
}, {
.base = (S5P_VA_GPIO + 0xC60),
.config = &gpio_cfg_noint,
.irq_base = IRQ_EINT(24),
.chip = {
.base = S5PV210_GPH3(0),
.ngpio = S5PV210_GPIO_H3_NR,
.label = "GPH3",
.to_irq = samsung_gpiolib_to_irq,
},
},
};
static __init int s5pv210_gpiolib_init(void)
{
struct s3c_gpio_chip *chip = s5pv210_gpio_4bit;
int nr_chips = ARRAY_SIZE(s5pv210_gpio_4bit);
int gpioint_group = 0;
int i = 0;
for (i = 0; i < nr_chips; i++, chip++) {
if (chip->config == NULL) {
chip->config = &gpio_cfg;
chip->group = gpioint_group++;
}
if (chip->base == NULL)
chip->base = S5PV210_BANK_BASE(i);
}
samsung_gpiolib_add_4bit_chips(s5pv210_gpio_4bit, nr_chips);
s5p_register_gpioint_bank(IRQ_GPIOINT, 0, S5P_GPIOINT_GROUP_MAXNR);
return 0;
}
core_initcall(s5pv210_gpiolib_init);
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.